blob_id stringlengths 40 40 | directory_id stringlengths 40 40 | path stringlengths 3 616 | content_id stringlengths 40 40 | detected_licenses listlengths 0 112 | license_type stringclasses 2 values | repo_name stringlengths 5 115 | snapshot_id stringlengths 40 40 | revision_id stringlengths 40 40 | branch_name stringclasses 777 values | visit_date timestamp[us]date 2015-08-06 10:31:46 2023-09-06 10:44:38 | revision_date timestamp[us]date 1970-01-01 02:38:32 2037-05-03 13:00:00 | committer_date timestamp[us]date 1970-01-01 02:38:32 2023-09-06 01:08:06 | github_id int64 4.92k 681M ⌀ | star_events_count int64 0 209k | fork_events_count int64 0 110k | gha_license_id stringclasses 22 values | gha_event_created_at timestamp[us]date 2012-06-04 01:52:49 2023-09-14 21:59:50 ⌀ | gha_created_at timestamp[us]date 2008-05-22 07:58:19 2023-08-21 12:35:19 ⌀ | gha_language stringclasses 149 values | src_encoding stringclasses 26 values | language stringclasses 1 value | is_vendor bool 2 classes | is_generated bool 2 classes | length_bytes int64 3 10.2M | extension stringclasses 188 values | content stringlengths 3 10.2M | authors listlengths 1 1 | author_id stringlengths 1 132 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
72d7294ff5ad1f06d357da58ada0fb115ed59c6e | d24a6e0be809ae3af8bc8daa6dacfc1789d38a84 | /ABC/ABC151-200/ABC190/A.py | 2dc4094c45ab83aaac5f6866e3c4f197b3e157a5 | [] | no_license | k-harada/AtCoder | 5d8004ce41c5fc6ad6ef90480ef847eaddeea179 | 02b0a6c92a05c6858b87cb22623ce877c1039f8f | refs/heads/master | 2023-08-21T18:55:53.644331 | 2023-08-05T14:21:25 | 2023-08-05T14:21:25 | 184,904,794 | 9 | 0 | null | 2023-05-22T16:29:18 | 2019-05-04T14:24:18 | Python | UTF-8 | Python | false | false | 502 | py | def solve(a, b, c):
if c == 0:
if b >= a:
return 'Aoki'
else:
return 'Takahashi'
else:
if a >= b:
return 'Takahashi'
else:
return 'Aoki'
def main():
a, b, c = map(int, input().split())
res = solve(a, b, c)
print(res)
def test():
assert solve(2, 1, 0) == 'Takahashi'
assert solve(2, 2, 0) == 'Aoki'
assert solve(2, 2, 1) == 'Takahashi'
if __name__ == "__main__":
test()
main()
| [
"cashfeg@gmail.com"
] | cashfeg@gmail.com |
bc18007a717e92dad6c5f793ceb31f91ad7bb8a8 | eff0422ed21d7b1b6a870efbc1b969e30b9d2897 | /fabtools/tests/test_vagrant_version.py | 8fec58e230befa79ac31805d7300e4b42022080d | [
"BSD-2-Clause"
] | permissive | fabtools/fabtools | 561ecec02227f48d84e0ff9c5e659d32819e6413 | 5fdc7174c3fae5e93a16d677d0466f41dc2be175 | refs/heads/master | 2023-08-01T15:55:56.871793 | 2019-09-16T09:19:00 | 2019-09-16T09:19:00 | 2,325,793 | 308 | 55 | BSD-2-Clause | 2021-06-04T01:02:55 | 2011-09-05T01:44:24 | Python | UTF-8 | Python | false | false | 1,268 | py | import unittest
from mock import patch
class _Success(str):
@property
def failed(self):
return False
class TestVagrantVersion(unittest.TestCase):
def test_vagrant_version_1_3_0(self):
with patch('fabtools.vagrant.local') as mock_local:
mock_local.return_value = _Success("Vagrant version 1.3.0\n")
from fabtools.vagrant import version
self.assertEqual(version(), (1, 3, 0))
def test_vagrant_version_1_3_1(self):
with patch('fabtools.vagrant.local') as mock_local:
mock_local.return_value = _Success("Vagrant v1.3.1\n")
from fabtools.vagrant import version
self.assertEqual(version(), (1, 3, 1))
def test_vagrant_version_1_4_3(self):
with patch('fabtools.vagrant.local') as mock_local:
mock_local.return_value = _Success("Vagrant 1.4.3\n")
from fabtools.vagrant import version
self.assertEqual(version(), (1, 4, 3))
def test_vagrant_version_1_5_0_dev(self):
with patch('fabtools.vagrant.local') as mock_local:
mock_local.return_value = _Success("Vagrant 1.5.0.dev\n")
from fabtools.vagrant import version
self.assertEqual(version(), (1, 5, 0, 'dev'))
| [
"ronan.amicel@gmail.com"
] | ronan.amicel@gmail.com |
9fa570042bd60ed44e495699919da044b67f6599 | dd3b8bd6c9f6f1d9f207678b101eff93b032b0f0 | /basis/AbletonLive10.1_MIDIRemoteScripts/pushbase/clip_control_component.py | d03b661d1f355bccee1a18a5179912ab0e409c38 | [] | no_license | jhlax/les | 62955f57c33299ebfc4fca8d0482b30ee97adfe7 | d865478bf02778e509e61370174a450104d20a28 | refs/heads/master | 2023-08-17T17:24:44.297302 | 2019-12-15T08:13:29 | 2019-12-15T08:13:29 | 228,120,861 | 3 | 0 | null | 2023-08-03T16:40:44 | 2019-12-15T03:02:27 | Python | UTF-8 | Python | false | false | 27,188 | py | # uncompyle6 version 3.4.1
# Python bytecode 2.7 (62211)
# Decompiled from: Python 2.7.16 (v2.7.16:413a49145e, Mar 2 2019, 14:32:10)
# [GCC 4.2.1 Compatible Apple LLVM 6.0 (clang-600.0.57)]
# Embedded file name: /Users/versonator/Jenkins/live/output/mac_64_static/Release/python-bundle/MIDI Remote Scripts/pushbase/clip_control_component.py
# Compiled at: 2019-04-09 19:23:45
from __future__ import absolute_import, print_function, unicode_literals
import Live
from ableton.v2.base import EventObject, clamp, forward_property, listenable_property, listens, liveobj_valid, nop
from ableton.v2.control_surface import Component
from ableton.v2.control_surface.control import ButtonControl, control_list, EncoderControl, StepEncoderControl
from ableton.v2.control_surface.mode import ModesComponent
from ableton.v2.control_surface.elements import DisplayDataSource
ONE_THIRTYSECOND_IN_BEATS = 0.125
ONE_SIXTEENTH_IN_BEATS = 0.25
ONE_YEAR_AT_120BPM_IN_BEATS = 63072000.0
GRID_QUANTIZATION_LIST = [
Live.Clip.GridQuantization.no_grid,
Live.Clip.GridQuantization.g_thirtysecond,
Live.Clip.GridQuantization.g_sixteenth,
Live.Clip.GridQuantization.g_eighth,
Live.Clip.GridQuantization.g_quarter,
Live.Clip.GridQuantization.g_half,
Live.Clip.GridQuantization.g_bar,
Live.Clip.GridQuantization.g_2_bars,
Live.Clip.GridQuantization.g_4_bars,
Live.Clip.GridQuantization.g_8_bars]
WARP_MODE_NAMES = {Live.Clip.WarpMode.beats: 'Beats',
Live.Clip.WarpMode.tones: 'Tones',
Live.Clip.WarpMode.texture: 'Texture',
Live.Clip.WarpMode.repitch: 'Repitch',
Live.Clip.WarpMode.complex: 'Complex',
Live.Clip.WarpMode.complex_pro: 'Pro',
Live.Clip.WarpMode.rex: 'Rex'}
def convert_beat_time_to_bars_beats_sixteenths((numerator, denominator), beat_time):
if beat_time is None:
return '-'
else:
beats_per_bar = one_bar_in_note_values((numerator, denominator), 4.0)
musical_beats_per_beat = denominator / 4.0
if beat_time >= 0:
bars = 1 + int(beat_time / beats_per_bar)
else:
bars = int(beat_time / beats_per_bar) if beat_time % beats_per_bar == 0 else int(beat_time / beats_per_bar) - 1
beats = 1 + int(beat_time % beats_per_bar * musical_beats_per_beat)
sixteenths = 1 + int(beat_time % (1.0 / musical_beats_per_beat) * 4.0)
return '%i.%i.%i' % (bars, beats, sixteenths)
def convert_beat_length_to_bars_beats_sixteenths((numerator, denominator), beat_length):
if beat_length is None:
return '-'
else:
beats_per_bar = one_bar_in_note_values((numerator, denominator), 4.0)
musical_beats_per_beat = denominator / 4.0
bars = int(beat_length / beats_per_bar)
beats = int(beat_length % beats_per_bar * musical_beats_per_beat)
sixteenths = int(beat_length % (1.0 / musical_beats_per_beat) * 4.0)
return '%i.%i.%i' % (bars, beats, sixteenths)
def is_new_recording(clip):
return clip.is_recording and not clip.is_overdubbing
def one_bar_in_note_values((numerator, denominator), note_value=4.0):
return note_value * numerator / denominator
class LoopSettingsModel(EventObject):
__events__ = (u'looping', u'loop_start', u'loop_end', u'loop_length', u'position',
u'start_marker')
def __init__(self, song, *a, **k):
super(LoopSettingsModel, self).__init__(*a, **k)
self.clip = None
self._song = song
return
@listenable_property
def clip(self):
return self._clip
@clip.setter
def clip(self, clip):
self._clip = clip
self._loop_length = self._get_loop_length()
self._on_looping_changed.subject = clip
self._on_start_marker_changed.subject = clip
self._on_loop_start_changed.subject = clip
self._on_loop_end_changed.subject = clip
self._on_position_changed.subject = clip
self.notify_clip()
loop_start = forward_property('clip')('loop_start')
start_marker = forward_property('clip')('start_marker')
loop_end = forward_property('clip')('loop_end')
looping = forward_property('clip')('looping')
position = forward_property('clip')('position')
@listens('looping')
def _on_looping_changed(self):
self.notify_looping()
@listens('start_marker')
def _on_start_marker_changed(self):
self.notify_start_marker()
@listens('loop_start')
def _on_loop_start_changed(self):
self._update_loop_length()
self.notify_loop_start()
@listens('loop_end')
def _on_loop_end_changed(self):
self._update_loop_length()
self.notify_loop_end()
@listens('position')
def _on_position_changed(self):
self.notify_position()
@property
def loop_length(self):
return self._loop_length
def _get_loop_length(self):
if liveobj_valid(self._clip):
return self.loop_end - self.loop_start
return 0
def _update_loop_length(self):
loop_length = self._get_loop_length()
if self._loop_length != loop_length:
self._loop_length = loop_length
self.notify_loop_length()
@property
def can_loop(self):
return self.clip.is_midi_clip or self.clip.is_audio_clip and self.clip.warping
def move_start_marker(self, value, fine_grained):
marker = self.clip.start_marker if self.looping else self.clip.loop_start
new_value = marker + self._adjusted_offset(value, fine_grained)
signature = (
self.clip.signature_numerator, self.clip.signature_denominator)
measure_in_beats = one_bar_in_note_values(signature)
measure_in_sixteenths = one_bar_in_note_values(signature, 16.0)
additional_offset = measure_in_beats / measure_in_sixteenths * (measure_in_sixteenths - 1) if fine_grained else 0.0
new_value = min(new_value, self.clip.loop_end - measure_in_beats + additional_offset)
if self.looping:
if new_value >= self.clip.end_marker:
self.clip.end_marker = self.clip.loop_end
self.clip.start_marker = new_value
else:
self.clip.loop_start = new_value
def move_position(self, value, fine_grained):
if not is_new_recording(self.clip):
new_value = self.clip.position + self._adjusted_offset(value, fine_grained)
should_update_start_marker = self.clip.position == self.clip.start_marker
self.clip.position = new_value
if should_update_start_marker:
self.clip.start_marker = new_value
self.clip.view.show_loop()
def move_loop_end(self, value, fine_grained):
if not is_new_recording(self.clip):
new_end = self.clip.loop_end + self._adjusted_offset(value, fine_grained)
if new_end > self.loop_start:
self.clip.loop_end = new_end
def _adjusted_offset(self, value, fine_grained):
return value * self._encoder_factor(fine_grained) * one_bar_in_note_values((
self.clip.signature_numerator,
self.clip.signature_denominator))
def _encoder_factor(self, fine_grained):
if fine_grained:
return 1.0 / one_bar_in_note_values((self.clip.signature_numerator, self.clip.signature_denominator), 16.0)
return 1.0
class LoopSettingsControllerComponent(Component):
encoders = control_list(StepEncoderControl, control_count=4)
shift_button = ButtonControl()
def __init__(self, *a, **k):
super(LoopSettingsControllerComponent, self).__init__(*a, **k)
self._encoder_callbacks_looped = [
self._on_clip_position_value,
self._on_clip_end_value,
self._on_clip_start_marker_value,
self._on_clip_looping_value]
self._encoder_callbacks_unlooped = [
self._on_clip_start_marker_value,
self._on_clip_end_value,
nop,
self._on_clip_looping_value]
self._touched_encoder_callbacks_looped = [
self._on_clip_position_touched,
self._on_clip_end_touched,
self._on_clip_start_marker_touched,
self._on_clip_looping_touched]
self._touched_encoder_callbacks_unlooped = [
self._on_clip_position_touched,
self._on_clip_end_touched,
nop,
self._on_clip_looping_touched]
self._released_encoder_callbacks_looped = [
self._on_clip_position_released,
self._on_clip_end_released,
self._on_clip_start_marker_released,
self._on_clip_looping_released]
self._released_encoder_callbacks_unlooped = [
self._on_clip_position_released,
self._on_clip_end_released,
nop,
self._on_clip_looping_released]
self._loop_model = self.register_disconnectable(LoopSettingsModel(self.song))
self._update_encoder_state()
def _get_clip(self):
return self._loop_model.clip
def _set_clip(self, clip):
self._loop_model.clip = clip
self.update()
self._update_encoder_state()
self._on_clip_changed()
clip = property(_get_clip, _set_clip)
def _on_clip_changed(self):
pass
@encoders.value
def encoders(self, value, encoder):
callback_set = self._encoder_callbacks_looped if self._loop_model.looping else self._encoder_callbacks_unlooped
callback_set[encoder.index](value)
@encoders.touched
def encoders(self, encoder):
callback_set = self._touched_encoder_callbacks_looped if self._loop_model.looping else self._touched_encoder_callbacks_unlooped
callback_set[encoder.index]()
@encoders.released
def encoders(self, encoder):
callback_set = self._released_encoder_callbacks_looped if self._loop_model.looping else self._released_encoder_callbacks_unlooped
callback_set[encoder.index]()
def _update_encoder_state(self):
enable_encoders = liveobj_valid(self.clip)
for encoder in self.encoders:
encoder.enabled = enable_encoders
def _on_clip_position_value(self, value):
self._loop_model.move_position(value, self.shift_button.is_pressed)
def _on_clip_end_value(self, value):
self._loop_model.move_loop_end(value, self.shift_button.is_pressed)
def _on_clip_start_marker_value(self, value):
self._loop_model.move_start_marker(value, self.shift_button.is_pressed)
def _on_clip_looping_value(self, value):
if self._loop_model.can_loop:
currently_looping = self._loop_model.looping
if value >= 0 and not currently_looping or value < 0 and currently_looping:
self._loop_model.looping = not currently_looping
def _on_clip_start_marker_touched(self):
pass
def _on_clip_end_touched(self):
pass
def _on_clip_position_touched(self):
pass
def _on_clip_looping_touched(self):
pass
def _on_clip_start_marker_released(self):
pass
def _on_clip_end_released(self):
pass
def _on_clip_position_released(self):
pass
def _on_clip_looping_released(self):
pass
class LoopSettingsComponent(LoopSettingsControllerComponent):
u"""
Component for managing loop settings of a clip
"""
def __init__(self, *a, **k):
super(LoopSettingsComponent, self).__init__(*a, **k)
self._name_sources = [ DisplayDataSource() for _ in xrange(4) ]
self._value_sources = [ DisplayDataSource() for _ in xrange(4) ]
self.__on_looping_changed.subject = self._loop_model
self.__on_start_marker_changed.subject = self._loop_model
self.__on_loop_start_changed.subject = self._loop_model
self.__on_loop_end_changed.subject = self._loop_model
def set_name_display(self, display):
if display:
display.set_data_sources(self._name_sources)
def set_value_display(self, display):
if display:
display.set_data_sources(self._value_sources)
def convert_beat_time_to_bars_beats_sixteenths(self, clip, beat_time):
return convert_beat_time_to_bars_beats_sixteenths((
clip.signature_numerator, clip.signature_denominator), beat_time)
def convert_beat_length_to_bars_beats_sixteenths(self, clip, beat_length):
return convert_beat_length_to_bars_beats_sixteenths((
clip.signature_numerator, clip.signature_denominator), beat_length)
def _on_clip_changed(self):
self.__on_signature_denominator_changed.subject = self._loop_model.clip
self.__on_signature_denominator_changed()
self.__on_signature_numerator_changed.subject = self._loop_model.clip
self.__on_signature_numerator_changed()
@listens('signature_denominator')
def __on_signature_denominator_changed(self):
self.__update_position_sources()
@listens('signature_numerator')
def __on_signature_numerator_changed(self):
self.__update_position_sources()
def __update_position_sources(self):
self._update_start_marker_source()
self._update_loop_start_source()
self._update_loop_end_source()
self._update_position_source()
@listens('looping')
def __on_looping_changed(self):
if self.is_enabled():
self._update_is_looping_source()
self._update_loop_end_source()
self._update_start_marker_source()
@listens('start_marker')
def __on_start_marker_changed(self):
self._update_start_marker_source()
@listens('loop_start')
def __on_loop_start_changed(self):
self._update_loop_start_source()
self._update_position_source()
self._update_loop_end_source()
@listens('loop_end')
def __on_loop_end_changed(self):
self._update_position_source()
self._update_loop_end_source()
def _update_start_marker_source(self):
looping = self._loop_model.looping if liveobj_valid(self.clip) else False
self._value_sources[2].set_display_string(self.convert_beat_time_to_bars_beats_sixteenths(self.clip, self._loop_model.start_marker) if looping else '')
def _update_is_looping_source(self):
looping = self._loop_model.looping if liveobj_valid(self.clip) else False
self._name_sources[0].set_display_string('Position' if looping else 'Start')
self._name_sources[1].set_display_string('Length' if looping else 'End')
self._name_sources[2].set_display_string('Offset' if looping else '')
def _update_loop_start_source(self):
self._value_sources[0].set_display_string(self.convert_beat_time_to_bars_beats_sixteenths(self.clip, self._loop_model.loop_start) if self.clip else '-')
def _update_loop_end_source(self):
if liveobj_valid(self.clip) and not is_new_recording(self.clip):
looping = self._loop_model.looping
self._value_sources[1].set_display_string(self.convert_beat_length_to_bars_beats_sixteenths(self.clip, self._loop_model.loop_length) if looping else self.convert_beat_time_to_bars_beats_sixteenths(self.clip, self._loop_model.loop_end))
self._value_sources[3].set_display_string('On' if looping else 'Off')
else:
self._value_sources[1].set_display_string('-')
self._value_sources[3].set_display_string('-')
def _update_position_source(self):
self._value_sources[0].set_display_string(self.convert_beat_time_to_bars_beats_sixteenths(self.clip, self._loop_model.position) if liveobj_valid(self.clip) else '-')
def update(self):
super(LoopSettingsComponent, self).update()
if self.is_enabled():
for index, label in enumerate(['Position', 'Length', 'Offset', 'Loop']):
self._name_sources[index].set_display_string(label)
self.__on_loop_start_changed()
self.__on_loop_end_changed()
self.__on_looping_changed()
self.__on_start_marker_changed()
class AudioClipSettingsModel(EventObject):
__events__ = (u'pitch_fine', u'pitch_coarse', u'gain', u'warp_mode', u'warping')
def __init__(self, *a, **k):
super(AudioClipSettingsModel, self).__init__(*a, **k)
self.clip = None
return
def _get_clip(self):
return self._clip
def _set_clip(self, clip):
self._clip = clip
self.__on_pitch_fine_changed.subject = self._clip
self.__on_pitch_coarse_changed.subject = self._clip
self.__on_gain_changed.subject = self._clip
self.__on_warp_mode_changed.subject = self._clip
self.__on_warping_changed.subject = self._clip
clip = property(_get_clip, _set_clip)
pitch_fine = forward_property('clip')('pitch_fine')
pitch_coarse = forward_property('clip')('pitch_coarse')
gain = forward_property('clip')('gain')
warping = forward_property('clip')('warping')
def _get_warp_mode(self):
return self.clip.warp_mode
def _set_warp_mode(self, value):
if self.clip.warping:
available_warp_modes = self.available_warp_modes
warp_mode_index = available_warp_modes.index(self.clip.warp_mode)
new_warp_mode_index = clamp(warp_mode_index + value, 0, len(available_warp_modes) - 1)
self.clip.warp_mode = available_warp_modes[new_warp_mode_index]
warp_mode = property(_get_warp_mode, _set_warp_mode)
def set_clip_gain(self, value, fine_grained):
self.clip.gain = clamp(self.clip.gain + value * self._encoder_factor(fine_grained), 0.0, 1.0)
def set_clip_pitch_coarse(self, value, fine_grained):
self.clip.pitch_coarse = int(clamp(self.clip.pitch_coarse + value * self._encoder_factor(fine_grained), -48.0, 48.0))
def set_clip_pitch_fine(self, value, fine_grained):
self.clip.pitch_fine = int(self.clip.pitch_fine + value * 100.0 * self._encoder_factor(fine_grained))
def _encoder_factor(self, fine_grained):
if fine_grained:
return 0.1
return 1.0
@listens('pitch_fine')
def __on_pitch_fine_changed(self):
self.notify_pitch_fine()
@listens('pitch_coarse')
def __on_pitch_coarse_changed(self):
self.notify_pitch_coarse()
@listens('gain')
def __on_gain_changed(self):
self.notify_gain()
@listens('warp_mode')
def __on_warp_mode_changed(self):
self.notify_warp_mode()
@listens('warping')
def __on_warping_changed(self):
self.notify_warping()
@property
def available_warp_modes(self):
if liveobj_valid(self.clip):
return list(self.clip.available_warp_modes)
return []
class AudioClipSettingsControllerComponent(Component):
u"""
Component for managing settings of an audio clip
"""
warp_mode_encoder = StepEncoderControl()
transpose_encoder = StepEncoderControl()
detune_encoder = EncoderControl()
gain_encoder = EncoderControl()
shift_button = ButtonControl()
def __init__(self, *a, **k):
super(AudioClipSettingsControllerComponent, self).__init__(*a, **k)
self._audio_clip_model = self.register_disconnectable(AudioClipSettingsModel())
def _get_clip(self):
return self._audio_clip_model.clip
def _set_clip(self, clip):
self._audio_clip_model.clip = clip
self._update_encoder_enabled_state()
self._on_clip_changed()
clip = property(_get_clip, _set_clip)
def _update_encoder_enabled_state(self):
enabled = liveobj_valid(self.clip)
self.warp_mode_encoder.enabled = self.transpose_encoder.enabled = self.detune_encoder.enabled = self.gain_encoder.enabled = enabled
@warp_mode_encoder.value
def warp_mode_encoder(self, value, encoder):
self._on_clip_warp_mode_value(value)
def _on_clip_warp_mode_value(self, value):
self._audio_clip_model.warp_mode = value
@transpose_encoder.value
def transpose_encoder(self, value, encoder):
self._on_transpose_encoder_value(value)
def _on_transpose_encoder_value(self, value):
self._audio_clip_model.set_clip_pitch_coarse(value, self.shift_button.is_pressed)
@detune_encoder.value
def detune_encoder(self, value, encoder):
self._on_detune_encoder_value(value)
def _on_detune_encoder_value(self, value):
self._audio_clip_model.set_clip_pitch_fine(value, self.shift_button.is_pressed)
@gain_encoder.value
def gain_encoder(self, value, encoder):
self._audio_clip_model.set_clip_gain(value, self.shift_button.is_pressed)
class AudioClipSettingsComponent(AudioClipSettingsControllerComponent):
def __init__(self, *a, **k):
super(AudioClipSettingsComponent, self).__init__(*a, **k)
self._name_sources = [ DisplayDataSource() for _ in xrange(4) ]
self._value_sources = [ DisplayDataSource() for _ in xrange(4) ]
self.__on_pitch_fine_changed.subject = self._audio_clip_model
self.__on_pitch_coarse_changed.subject = self._audio_clip_model
self.__on_gain_changed.subject = self._audio_clip_model
self.__on_warping_changed.subject = self._audio_clip_model
self.__on_warp_mode_changed.subject = self._audio_clip_model
def _on_clip_changed(self):
self.update()
def set_name_display(self, display):
if display:
display.set_data_sources(self._name_sources)
def set_value_display(self, display):
if display:
display.set_data_sources(self._value_sources)
@listens('warp_mode')
def __on_warp_mode_changed(self):
if self.is_enabled():
self._update_warp_mode_source()
@listens('warping')
def __on_warping_changed(self):
if self.is_enabled():
self._update_warp_mode_source()
@listens('gain')
def __on_gain_changed(self):
if self.is_enabled():
self._update_gain_source()
@listens('pitch_fine')
def __on_pitch_fine_changed(self):
if self.is_enabled():
self._update_pitch_fine_source()
@listens('pitch_coarse')
def __on_pitch_coarse_changed(self):
if self.is_enabled():
self._update_pitch_coarse_source()
def _update_warp_mode_source(self):
display_value = '-'
if liveobj_valid(self.clip):
display_value = WARP_MODE_NAMES[self.clip.warp_mode] if liveobj_valid(self.clip) and self.clip.warping else 'Off'
self._value_sources[0].set_display_string(display_value)
def _update_gain_source(self):
value = self.clip.gain_display_string if liveobj_valid(self.clip) else '-'
self._value_sources[3].set_display_string(value)
def _update_pitch_fine_source(self):
value = str(int(self.clip.pitch_fine)) + ' ct' if liveobj_valid(self.clip) else '-'
self._value_sources[2].set_display_string(value)
def _update_pitch_coarse_source(self):
value = str(int(self.clip.pitch_coarse)) + ' st' if liveobj_valid(self.clip) else '-'
self._value_sources[1].set_display_string(value)
def update(self):
super(AudioClipSettingsComponent, self).update()
if self.is_enabled():
for index, label in enumerate(['WarpMode', 'Transpose', 'Detune', 'Gain']):
self._name_sources[index].set_display_string(label)
self._update_warp_mode_source()
self._update_gain_source()
self._update_pitch_fine_source()
self._update_pitch_coarse_source()
class ClipNameComponent(Component):
u"""
Component for showing the clip name
"""
num_label_segments = 4
def __init__(self, *a, **k):
super(ClipNameComponent, self).__init__(*a, **k)
self._clip = None
self._name_data_sources = [ DisplayDataSource() for _ in xrange(self.num_label_segments)
]
self._name_data_sources[0].set_display_string('Clip Selection:')
return
def _get_clip(self):
return self._clip
def _set_clip(self, clip):
self._clip = clip
self._update_clip_name()
self._on_name_changed.subject = clip
self.update()
clip = property(_get_clip, _set_clip)
def set_display(self, display):
if display:
display.set_num_segments(self.num_label_segments)
for idx in xrange(self.num_label_segments):
display.segment(idx).set_data_source(self._name_data_sources[idx])
@listens('name')
def _on_name_changed(self):
if self.is_enabled():
self._update_clip_name()
def _name_for_clip(self, clip):
if clip:
if clip.name:
return clip.name
return '[unnamed]'
return '[none]'
def _update_clip_name(self):
self._name_data_sources[1].set_display_string(self._name_for_clip(self._clip))
def update(self):
super(ClipNameComponent, self).update()
if self.is_enabled():
self._update_clip_name()
class ClipControlComponent(ModesComponent):
u"""
Component that modifies clip properties
"""
def __init__(self, loop_layer=None, audio_layer=None, clip_name_layer=None, *a, **k):
super(ClipControlComponent, self).__init__(*a, **k)
self._audio_clip_settings = AudioClipSettingsComponent(parent=self, is_enabled=False, layer=audio_layer)
self._loop_settings = LoopSettingsComponent(parent=self, is_enabled=False, layer=loop_layer)
self._clip_name = ClipNameComponent(parent=self, is_enabled=False, layer=clip_name_layer)
self.add_mode('no_clip', (self._clip_name,))
self.add_mode('midi', (self._loop_settings,
self._clip_name))
self.add_mode('audio', (self._loop_settings,
self._audio_clip_settings,
self._clip_name))
self.selected_mode = 'no_clip'
self._update_clip()
self._on_detail_clip_changed.subject = self.song.view
self._on_selected_scene_changed.subject = self.song.view
self._on_selected_track_changed.subject = self.song.view
@listens('selected_scene')
def _on_selected_scene_changed(self):
self._update_clip()
@listens('selected_track')
def _on_selected_track_changed(self):
self._update_clip()
@listens('detail_clip')
def _on_detail_clip_changed(self):
self._update_clip()
def update(self):
super(ClipControlComponent, self).update()
if self.is_enabled():
self._update_clip()
def _update_mode(self):
track = self.song.view.selected_track
new_mode = 'no_clip'
if track.clip_slots and (track.has_midi_input or track.has_audio_input):
new_mode = 'midi' if track.has_midi_input else 'audio'
self.selected_mode = new_mode
def _update_clip(self):
self._update_mode()
clip = self.song.view.detail_clip if self.is_enabled() else None
audio_clip = clip if liveobj_valid(clip) and clip.is_audio_clip else None
self._clip_name.clip = clip
self._loop_settings.clip = clip
self._audio_clip_settings.clip = audio_clip
return | [
"jharrington@transcendbg.com"
] | jharrington@transcendbg.com |
8d91955a195e9bf88115635ed8997b216f7814d9 | 4ebeaa3d8dbe4f5ecb1c9d69530f2cee2539c7d4 | /datahub/ninwen.py | 0abeb78571b72ca68f4d80cbad33158b720b9307 | [
"BSD-2-Clause"
] | permissive | tianhm/stock | 0867fb7ce0cef4bf75d38650096ffe026580f0a0 | ed31ff4df90e70c58572121bc75f447e5ce108a1 | refs/heads/master | 2023-09-05T20:08:01.286323 | 2023-08-22T15:07:04 | 2023-08-22T15:07:04 | 108,801,947 | 0 | 0 | BSD-3-Clause | 2023-09-11T23:55:16 | 2017-10-30T04:34:12 | Python | UTF-8 | Python | false | false | 9,807 | py | # -*- coding: utf-8 -*-
# @Time : 2021/9/6 8:21
# @File : ninwen.py
# @Author : Rocky C@www.30daydo.com
# 宁稳网
import json
import os
import random
import time
from parsel import Selector
import requests
import warnings
import datetime
import re
import pandas as pd
import validate_key
import pickle
import loguru
warnings.filterwarnings("ignore")
logger = loguru.logger
class NinwenSpider():
def __init__(self):
super(NinwenSpider, self).__init__()
self.session = requests.Session()
self.today = datetime.datetime.now().strftime('%Y-%m-%d')
logger.info(f'{self.today} start to crawl....')
@property
def headers(self):
_header = {
"Accept": "application/json, text/javascript, */*; q=0.01",
"Accept-Encoding": "gzip, deflate",
"Accept-Language": "zh,en;q=0.9,en-US;q=0.8,zh-CN;q=0.7",
"Content-Type": "application/x-www-form-urlencoded; charset=UTF-8",
"Host": "www.ninwin.cn",
"Origin": "http://www.ninwin.cn",
"Referer": "http://www.ninwin.cn/index.php?m=u&c=login",
"User-Agent": "Mozilla/5.0 (Windows NT 6.1; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/92.0.4515.159 Safari/537.36",
"X-Requested-With": "XMLHttpRequest",
}
return _header
@property
def json_headers(self):
headers = {
"Host": "www.ninwin.cn",
"Accept": "application/json, text/javascript, */*; q=0.01",
"Content-Type": "application/x-www-form-urlencoded; charset=UTF-8",
"X-Requested-With": "XMLHttpRequest",
"User-Agent": "Mozilla/5.0 (Windows NT 6.1; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/93.0.4577.82 Safari/537.36",
"Origin": "https://www.ninwin.cn",
"Referer": "https://www.ninwin.cn/index.php?m=u&c=login",
"Accept-Encoding": "gzip, deflate, br",
"Accept-Language": "zh,en;q=0.9,en-US;q=0.8,zh-CN;q=0.7",
}
return headers
def get_image(self):
rand = int(time.time())
url = f'http://www.ninwin.cn/index.php?m=verify&a=get&rand={rand}'
_headers = {"Referer": "http://www.ninwin.cn/index.php?m=u&c=login",
"User-Agent": "Mozilla/5.0 (Windows NT 6.1; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/92.0.4515.159 Safari/537.36"}
r = self.session.get(url=url, headers=_headers)
with open('code.png', 'wb') as fp:
fp.write(r.content)
return r.content
def convert(self, float_str):
try:
return_float = float(float_str)
except:
return_float = None
return return_float
def login(self, code, csrf):
url = 'https://www.ninwin.cn/index.php?m=u&c=login&a=dorun'
data = {
'username': validate_key.username,
'password': validate_key.password,
'code': code,
'backurl': 'https://www.ninwin.cn/',
'invite': '',
'csrf_token': csrf
}
r = self.session.post(url=url, headers=self.json_headers,
data=data
)
ret_js = r.json()
if ret_js.get('state') == 'success':
return ret_js.get('referer')
def get_csrf_token(self):
url = 'http://www.ninwin.cn/index.php?m=u&c=login'
content = self.visit_page(url)
if re.search('value="(.*?)"', content):
csrf = re.search('value="(.*?)"', content).group(1)
return csrf
return None
def get_bond_data(self):
url = 'http://www.ninwin.cn/index.php?m=cb&a=cb_all&show_cb_only=Y&show_listed_only=Y'
content = self.visit_page(url)
if '回售起始日' in content:
logger.info("\n获取数据成功\n")
return content
else:
logger.error('获取数据失败')
return None
def visit_page(self, url, _headers=None):
if _headers is None:
_headers = self.headers
resp = self.session.get(url=url, headers=_headers)
content = resp.text
return content
@property
def columns_name(self):
columns_name_ = [("转债代码", ".//td[2]/text()"),
("转债名称", ".//td[3]/a/text()"),
("满足", ".//td[3]/a/span/@title"),
("发行日期", ".//td[4]/text()"),
("股票代码", ".//td[5]/text()"),
("股票名称", ".//td[6]/text()"),
("行业", ".//td[7]/text()"),
("子行业", ".//td[8]/text()"),
("转债价格", ".//td[9]/text()"),
("本息", ".//td[9]/@title"),
("涨跌", ".//td[10]/spand/text()"),
("日内套利", ".//td[11]/spand/text()"),
("股价", ".//td[12]/text()"),
("正股涨跌", ".//td[13]/spand/text()"),
("剩余本息", ".//td[14]/text()"),
("转股价格", ".//td[15]/text()"),
("转股溢价率", ".//td[16]/text()"),
# ("转股期", ".//td[18]/@title"),
("转股价值", ".//td[17]/text()"),
("距离转股日", ".//td[18]/text()"),
("剩余年限", ".//td[19]/text()"),
("回售年限", ".//td[20]/text()"),
("剩余余额", ".//td[21]/text()"),
# ("余额", ".//td[20]/text()"),
("成交额(百万)", ".//td[22]/text()"),
("转债换手率", ".//td[23]/text()"),
("余额/市值", ".//td[24]/@title"),
("余额/股本", ".//td[25]/text()"),
("股票市值(亿)", ".//td[26]/text()"),
("P/B", ".//td[27]/text()"),
("税前收益率", ".//td[28]/text()"),
("税后收益率", ".//td[29]/text()"),
("税前回售收益", ".//td[30]/text()"),
("税后回售收益", ".//td[31]/text()"),
("回售价值", ".//td[32]/text()"),
("纯债价值", ".//td[33]/text()"),
("弹性", ".//td[34]/text()"),
("信用", ".//td[35]/text()"),
("折现率", ".//td[36]/text()"),
("老式双低", ".//td[37]/text()"),
("老式排名", ".//td[38]/text()"),
("新式双低", ".//td[39]/text()"),
("新式排名", ".//td[40]/text()"),
("热门度", ".//td[41]/text()"),
]
return columns_name_
def patch_fix(self, name, v, node):
if name == '转股价格' and v is None:
return True, node.xpath('.//td[15]/a/text()').extract_first()
return False, None
def parse(self, content):
resp = Selector(text=content)
columns = resp.xpath('//table[@id="cb_hq"]/tbody/tr')
bond_result_list = []
for col in columns:
d = {}
for item in self.columns_name:
v = col.xpath(item[1]).extract_first()
patch, _v = self.patch_fix(item[0], v, col)
if patch:
v = _v
if isinstance(v, str):
v = v.strip()
d[item[0]] = v
bond_result_list.append(d)
return bond_result_list
def dump_excel(self, bond_info_list):
df = pd.DataFrame(bond_info_list)
df.to_excel(f'../data/{self.today}_宁稳.xlsx', encoding="utf8")
def image_recognize(self, img):
files = {'file': img}
data={'sign':validate_key.sign}
url=validate_key.url
r = requests.post(url=url, files=files, data=data,timeout=20)
try:
code = r.json().get('code')
print(r.json())
except Exception as e:
logger.error(e)
raise e
else:
return code
def check_name(self, csrf_token):
url = 'https://www.ninwin.cn/index.php?m=u&c=login&a=checkname'
data = {'csrf_token': csrf_token,
'username': validate_key.username}
r = self.session.post(url=url, headers=self.json_headers, data=data)
def check_cookies(self, csrf, code):
url = f'https://www.ninwin.cn/index.php?m=verify&a=check&csrf_token={csrf}&code={code}'
time.sleep(0.5)
content = self.visit_page(url, _headers=self.json_headers)
def run(self):
csrf = self.get_csrf_token()
while 1:
img = self.get_image()
code = self.image_recognize(img)
print(code)
self.check_name(csrf)
self.check_cookies(csrf, code)
time.sleep(0.5)
ref_url = self.login(code, csrf)
if ref_url is None:
logger.info('识别错误或者密码错误,正在重试.....')
time.sleep(random.randint(1, 5))
continue
self.visit_page(ref_url)
content = self.get_bond_data()
bond_info_list = self.parse(content)
self.dump_excel(bond_info_list)
logger.info('获取结束')
os.remove('code.png')
break
if __name__ == '__main__':
app = NinwenSpider()
app.run()
| [
"jinweizsu@gmail.com"
] | jinweizsu@gmail.com |
bedb1ebe76b92f643cf76628ca2fed73920daa9e | b49e31fefcdd6acb5eb791f2fd3ba2ec80b84e2f | /DEMO/python/antchain_sdk_demo/__init__.py | 54660c33b26f64e6d666ac1e179909cf50da19cd | [
"MIT",
"Apache-2.0"
] | permissive | sdk-team/antchain-openapi-prod-sdk | e2e58d41ef7197ad7b0f0e1777e437ff019131da | 4af1da6f11a2771e373f4a8904904427f06f1887 | refs/heads/master | 2023-03-16T01:44:59.860216 | 2021-03-07T05:46:38 | 2021-03-07T05:46:38 | 276,055,425 | 0 | 0 | MIT | 2020-06-30T09:29:03 | 2020-06-30T09:29:02 | null | UTF-8 | Python | false | false | 21 | py | __version__ = '2.4.0' | [
"sdk-team@alibabacloud.com"
] | sdk-team@alibabacloud.com |
c8640b5fccd56d423cdbf764864f28b14e1d8e59 | abfa9440fb67805d710c2d70fac354444c22b5f8 | /services/rest/main.py | 14e541f0e9431eb314ddf60fa7a80077ea9a0274 | [
"MIT"
] | permissive | KINGdotNET/chainbb | 1e9373442676f4fea96f8a5d5ece444c95392487 | 67fc1749b721d6950835e6c45042880842181850 | refs/heads/master | 2021-05-06T23:41:25.224868 | 2017-11-21T22:52:38 | 2017-11-21T22:52:38 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 14,614 | py | from flask import Flask, jsonify, request
from pprint import pprint
from pymongo import MongoClient
from bson.json_util import dumps
from flask_cors import CORS, cross_origin
from mongodb_jsonencoder import MongoJsonEncoder
from steem import Steem
import os
ns = os.environ['namespace'] if 'namespace' in os.environ else 'chainbb'
mongo = MongoClient("mongodb://mongo", connect=False)
db = mongo[ns]
nodes = [
os.environ['steem_node'] if 'steem_node' in os.environ else 'localhost:5090',
]
s = Steem(nodes)
app = Flask(__name__)
app.json_encoder = MongoJsonEncoder
CORS(app)
def response(json, forum=False, children=False, meta=False, status='ok'):
# Load height
# NYI - should be cached at for 3 seconds
statuses = db.status.find()
network = {}
for doc in statuses:
network.update({
str(doc['_id']): doc['value']
})
response = {
'status': status,
'network': network,
'data': json
}
if forum:
response.update({
'forum': forum
})
if children:
response.update({
'children': list(children)
})
if meta:
response.update({
'meta': meta
})
return jsonify(response)
def load_post(author, permlink):
# Load the post by author/permlink
query = {
'author': author,
'permlink': permlink
}
post = db.posts.find_one(query)
if post and 'active_votes' in post:
# A dict to store vote information
votes = {}
# Loop over current votes and add them to the new simple dict
for vote in post['active_votes']:
votes.update({vote[0]: vote[1]})
# Remove old active_votes
post.pop('active_votes', None)
# Add the new simple votes
post.update({
'votes': votes
})
return post
def load_replies(query, sort):
replies = []
results = db.replies.find(query).sort(sort)
for idx, post in enumerate(results):
if post and 'active_votes' in post:
# A dict to store vote information
votes = {}
# Loop over current votes and add them to the new simple dict
for vote in post['active_votes']:
votes.update({vote[0]: vote[1]})
# Remove old active_votes
post.pop('active_votes', None)
# Add the new simple votes
post.update({
'votes': votes
})
replies.append(post)
return replies
@app.route("/")
def index():
query = {
"group": {"$in": [
"localtesting", # localtesting never exists on live, only in dev
"projects",
"crypto",
"community"
]}
}
sort = [("group_order", 1), ("forum_order", 1)]
results = db.forums.find(query).sort(sort)
appusers = db.activeusers.find({'app': ns}, {'_id': 1})
return response({
'forums': list(results),
'users': {
'stats': {
'total': db.activeusers.count(),
'app': db.activeusers.count({'app': ns}),
},
'list': list(appusers)
}
})
@app.route("/forums")
def forums():
query = {}
sort = [("highlight", -1), ("_id", 1), ("parent", 1)]
results = db.forums.find(query).sort(sort)
return response({
'forums': list(results)
})
@app.route("/@<username>")
def account(username):
query = {
'author': username
}
fields = {
'author': 1,
'category': 1,
'created': 1,
'children': 1,
'json_metadata': 1,
'last_reply': 1,
'last_reply_by': 1,
'permlink': 1,
'title': 1,
'url': 1
}
sort = [("created", -1)]
page = int(request.args.get('page', 1))
perPage = 20
skip = (page - 1) * perPage
limit = perPage
total = db.posts.count(query)
posts = db.posts.find(query, fields).sort(sort).skip(skip).limit(limit)
return response({
'posts': list(posts),
'total': total,
'page': page
})
@app.route("/@<username>/replies")
def replies(username):
sort = {"created": -1}
page = int(request.args.get('page', 1))
perPage = 10
skip = (page - 1) * perPage
limit = perPage
pipeline = [
{'$match': {
'parent_author': username,
'author': {'$ne': username},
}},
{'$sort': sort},
{'$project': {
'parent_id': {'$concat': ['$parent_author', '/', '$parent_permlink']},
'reply': '$$ROOT'
}},
{'$lookup': {
'from': 'posts',
'localField': 'parent_id',
'foreignField': '_id',
'as': 'parent_post'
}},
{'$lookup': {
'from': 'replies',
'localField': 'parent_id',
'foreignField': '_id',
'as': 'parent_reply'
}},
{'$project': {
'reply': 1,
'parent': {
'$cond': {
'if': {'$eq': ["$parent_reply", []]},
'then': '$parent_post',
'else': '$parent_reply'
}
}
}},
{'$unwind': '$parent'},
{'$project': {
'reply': {
'_id': 1,
'active_votes': 1,
'author': 1,
'body': 1,
'category': 1,
'created': 1,
'depth': 1,
'json_metadata': 1,
'parent_author': 1,
'parent_permlink': 1,
'permlink': 1,
'root_namespace': 1,
'root_post': 1,
'root_title': 1,
'title': 1,
'url': 1,
},
'parent': {
'_id': 1,
'active_votes': 1,
'author': 1,
'body': 1,
'category': 1,
'created': 1,
'depth': 1,
'parent_author': 1,
'parent_permlink': 1,
'permlink': 1,
'namespace': 1,
'root_namespace': 1,
'root_title': 1,
'title': 1,
'url': 1,
}
}},
{'$limit': limit + skip},
{'$skip': skip},
]
total = db.replies.count({'parent_author': username})
replies = db.replies.aggregate(pipeline)
results = []
for idx, reply in enumerate(replies):
# Format parent votes
parent_votes = {}
for vote in reply['parent']['active_votes']:
parent_votes.update({vote[0]: vote[1]})
reply['parent'].pop('active_votes', None)
reply['parent'].update({
'votes': parent_votes
})
# Format reply votes
reply_votes = {}
for vote in reply['reply']['active_votes']:
reply_votes.update({vote[0]: vote[1]})
reply['reply'].pop('active_votes', None)
reply['reply'].update({
'votes': reply_votes
})
# Temporary way to retrieve forum
if 'root_namespace' in reply['reply']:
reply['forum'] = db.forums.find_one({
'_id': reply['reply']['root_namespace']
}, {
'_id': 1,
'creator': 1,
'exclusive': 1,
'funded': 1,
'name': 1,
'tags': 1,
})
results.append(reply)
return response({
'replies': results,
'total': total,
'page': page,
})
@app.route("/@<username>/responses")
def accountResponses(username):
query = {
'author': username
}
fields = {
'author': 1,
'category': 1,
'created': 1,
'children': 1,
'json_metadata': 1,
'last_reply': 1,
'last_reply_by': 1,
'parent_author': 1,
'parent_permlink': 1,
'permlink': 1,
'root_post': 1,
'root_title': 1,
'url': 1
}
sort = [("created", -1)]
page = int(request.args.get('page', 1))
perPage = 20
skip = (page - 1) * perPage
limit = perPage
total = db.replies.count(query)
responses = db.replies.find(query, fields).sort(
sort).skip(skip).limit(limit)
return response({
'responses': list(responses),
'total': total,
'page': page
})
@app.route("/tags")
def tags():
query = {}
sort = [("last_reply", -1)]
results = db.topics.find(query).sort(sort)
return response(list(results))
@app.route("/search")
def search():
pipeline = [
{
'$match': {
'$text': {
'$search': request.args.get('q')
}
}
},
{
'$sort': {
'score': {
'$meta': "textScore"
}
}
},
{
'$project': {
'title': '$title',
'description': '$url'
}
},
{
'$limit': 5
}
]
results = db.posts.aggregate(pipeline)
return response(list(results))
@app.route('/forum/<slug>')
def forum(slug):
# Load the specified forum
query = {
'_id': slug
}
forum = db.forums.find_one(query)
# No forum? Look for a reservation
if not forum:
reservation = db.forum_requests.find_one(query)
return response({}, meta={'reservation': reservation}, status='not-found')
# No tags or authors? It's unconfigured
if 'tags' not in forum and 'accounts' not in forum:
return response(list(), forum=forum, meta={'configured': False})
# Load children forums
query = {
'parent': str(forum['_id'])
}
children = db.forums.find(query)
# Load the posts
query = {}
# ?filter=all will allow display of all posts
postFilter = request.args.get('filter', False)
if postFilter != 'all':
query['_removedFrom'] = {
'$nin': [slug]
}
if 'tags' in forum and len(forum['tags']) > 0:
query.update({
'category': {
'$in': forum['tags']
}
})
if 'accounts' in forum and len(forum['accounts']) > 0:
query.update({
'author': {
'$in': forum['accounts']
}
})
if postFilter != False and postFilter != 'all':
query.update({
'category': postFilter
})
if 'exclusive' in forum and forum['exclusive'] == True:
if postFilter == False and postFilter == 'all':
query.pop('category', None)
query.update({'namespace': slug})
# If we have an empty query, it's an unconfigured forum
fields = {
'author': 1,
'category': 1,
'cbb': 1,
'created': 1,
'children': 1,
'funded': 1,
'json_metadata': 1,
'last_reply': 1,
'last_reply_by': 1,
'last_reply_url': 1,
'max_accepted_payout': 1,
'percent_steem_dollars': 1,
'permlink': 1,
'title': 1,
'url': 1
}
# ?filter=all should also display the _removedFrom field
if postFilter == 'all':
fields['_removedFrom'] = 1
sort = [("active", -1)]
page = int(request.args.get('page', 1))
perPage = 20
skip = (page - 1) * perPage
limit = perPage
results = db.posts.find(query, fields).sort(sort).skip(skip).limit(limit)
return response(list(results), forum=forum, children=children, meta={'query': query, 'sort': sort})
@app.route('/status/<slug>')
def status(slug):
# Load the specified forum
query = {
'_id': slug
}
forum = db.forums.find_one(query)
# And those who funded it
query = {
'ns': slug
}
funding = db.funding.find(query).sort([('timestamp', -1)])
# Total contributions
contributions = db.funding.aggregate([
{'$match': {'ns': slug}},
{'$group': {'_id': '$from', 'count': {'$sum': 1}, 'total': {'$sum': '$steem_value'}}},
{'$sort': {'total': -1}}
])
return response({
'history': list(funding),
'contributors': list(contributions)
}, forum=forum)
@app.route('/topics/<category>')
def topics(category):
query = {
'category': category
}
fields = {
'author': 1,
'created': 1,
'json_metadata': 1,
'last_reply': 1,
'last_reply_by': 1,
'permlink': 1,
'title': 1,
'url': 1
}
sort = [("last_reply", -1), ("created", -1)]
results = db.posts.find(query, fields).sort(sort).limit(20)
return response(list(results))
@app.route('/<category>/@<author>/<permlink>')
def post(category, author, permlink):
# Load the specified post
post = load_post(author, permlink)
if post:
# Load the specified forum
query = {
'tags': {'$in': [post['category']]}
}
forum = db.forums.find_one(query)
return response(post, forum=forum)
else:
post = s.get_content(author, permlink).copy()
return response(post)
@app.route('/<category>/@<author>/<permlink>/responses')
def responses(category, author, permlink):
query = {
'root_post': author + '/' + permlink
}
sort = [
('created', 1)
]
return response(list(load_replies(query, sort)))
@app.route('/active')
def active():
query = {
}
fields = {
'author': 1,
'category': 1,
'children': 1,
'created': 1,
'last_reply': 1,
'last_reply_by': 1,
'permlink': 1,
'title': 1,
'url': 1
}
sort = [("last_reply", -1), ("created", -1)]
limit = 20
results = db.posts.find(query, fields).sort(sort).limit(limit)
return response(list(results))
@app.route('/api/ns_lookup')
def ns_lookup():
ns = request.args.get('ns', False)
query = {
'_id': ns
}
forums = db.forums.find_one(query)
requests = db.forum_requests.find_one(query)
return response({
'exists': bool(forums or requests)
})
@app.route('/height')
def height():
query = {
'_id': 'height'
}
return response(db.status.find_one(query))
@app.route("/config")
def config():
results = db.forums.find()
return response(list(results))
if __name__ == "__main__":
app.run(host='0.0.0.0', debug=True)
| [
"aaron.cox@greymass.com"
] | aaron.cox@greymass.com |
bba4fd67e94ae71583cf7f433709f6cad7bacfc7 | 9743d5fd24822f79c156ad112229e25adb9ed6f6 | /xai/brain/wordbase/nouns/_luxury.py | e4dee89c8349b6b4fdb26c26725c56453c0f4872 | [
"MIT"
] | permissive | cash2one/xai | de7adad1758f50dd6786bf0111e71a903f039b64 | e76f12c9f4dcf3ac1c7c08b0cc8844c0b0a104b6 | refs/heads/master | 2021-01-19T12:33:54.964379 | 2017-01-28T02:00:50 | 2017-01-28T02:00:50 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 499 | py |
#calss header
class _LUXURY():
def __init__(self,):
self.name = "LUXURY"
self.definitions = [u'great comfort, especially as provided by expensive and beautiful things: ', u'something expensive that is pleasant to have but is not necessary: ', u'something that gives you a lot of pleasure but cannot be done often: ']
self.parents = []
self.childen = []
self.properties = []
self.jsondata = {}
self.specie = 'nouns'
def run(self, obj1 = [], obj2 = []):
return self.jsondata
| [
"xingwang1991@gmail.com"
] | xingwang1991@gmail.com |
0d23c9eb3813079255485d6d28ebbb4baf1276dc | a512ee52b68f4058d25e4c74972087d20e81965c | /tests/services/test_storage_service.py | 81177f8f54fabec8bdb1b0e1797a14be8193130f | [] | no_license | joshmarshall/tornadorax | 359e236f0bc3f5da81309ee21b610e7f387f2d8d | fc35a08dca8cce8e6c7fa38091c0840a5285790e | refs/heads/master | 2023-08-14T09:42:37.061544 | 2020-02-10T01:18:48 | 2020-02-10T01:18:48 | 33,112,657 | 1 | 0 | null | 2016-01-19T04:55:34 | 2015-03-30T08:53:49 | Python | UTF-8 | Python | false | false | 15,257 | py | import json
import hashlib
import random
try:
from string import letters
except ImportError:
from string import ascii_letters as letters
try:
import urlparse
except ImportError:
import urllib.parse as urlparse
from tornado.testing import AsyncTestCase, gen_test
from testnado.service_case_helpers import ServiceCaseHelpers
from tests.helpers.service_helpers import fetch_token
from tornadorax.services.storage_service import StorageService
from tornadorax.services.storage_service import SegmentWriter
from tornadorax.services.storage_service import MissingTempURLKey
from tornadorax.services.storage_service import StreamError
OBJECT_BODY = "".join([
random.choice(letters) for i in range(2048)
]).encode("utf8")
class TestStorage(ServiceCaseHelpers, AsyncTestCase):
def setUp(self):
super(TestStorage, self).setUp()
self.storage_service = self.add_service()
self.storage_service.add_method(
"PUT", "/v1/container/object", object_write_handle)
self.storage_service.add_method(
"GET", "/v1/container/object", object_read_handle)
self.storage_service.add_method(
"HEAD", "/v1/container/object", object_info_handle)
self.storage_service.add_method(
"PUT", "/v1/container/manifest", object_write_handle)
self.storage_service.add_method(
"PUT", r"/v1/container/manifest/segments/\d+", object_write_handle)
self.client = StorageService(
self.storage_service.url("/v1"), fetch_token=fetch_token,
ioloop=self.io_loop)
@gen_test
async def test_generate_tempurl_requires_key(self):
self.start_services()
container = await self.client.fetch_container("container")
obj = await container.fetch_object("object")
with self.assertRaises(MissingTempURLKey):
obj.generate_tempurl(method="GET", expires=1000)
@gen_test
async def test_generate_tempurl(self):
self.start_services()
container = await self.client.fetch_container("container")
obj = await container.fetch_object("object", tempurl_key="foobar")
url = obj.generate_tempurl(method="GET", expires=1000)
url, params = url.split("?")
self.assertEqual(url, obj.object_url)
params = dict(urlparse.parse_qsl(params))
# pregenerated based on parameters and 'foobar' key
expected = "a42206ca0c6e654e46cd5e33b2b1f92aab81194d"
self.assertEqual("1000", params["temp_url_expires"])
self.assertEqual(expected, params["temp_url_sig"])
@gen_test
async def test_generate_tempurl_forces_integer_expiration(self):
self.start_services()
container = await self.client.fetch_container("container")
obj = await container.fetch_object("object", tempurl_key="foobar")
url = obj.generate_tempurl(method="GET", expires=1000.112)
params = dict(urlparse.parse_qsl(url.split("?")[1]))
# pregenerated based on parameters and 'foobar' key
expected = "a42206ca0c6e654e46cd5e33b2b1f92aab81194d"
self.assertEqual("1000", params["temp_url_expires"])
self.assertEqual(expected, params["temp_url_sig"])
@gen_test
async def test_upload_stream_stores_contents(self):
self.start_services()
container = await self.client.fetch_container("container")
obj = await container.fetch_object("object")
writer = await obj.upload_stream(mimetype="text/html")
await writer.write("CON".encode("ascii"))
await writer.write("TENTS".encode("ascii"))
result = await writer.finish()
self.assertEqual("success", result["status"])
self.assertEqual(8, result["length"])
self.assertEqual(
hashlib.md5(b"CONTENTS").hexdigest(), result["md5sum"])
request = self.storage_service.assert_requested(
"PUT", "/v1/container/object", headers={
"X-Auth-Token": "TOKEN", "Content-type": "text/html"})
self.assertEqual(b"CONTENTS", request.body)
@gen_test
async def test_upload_stream_allows_extra_metadata(self):
self.start_services()
container = await self.client.fetch_container("container")
obj = await container.fetch_object("object")
writer = await obj.upload_stream(
mimetype="text/html", metadata={"foo": "bar", "cat": "mouse"})
await writer.write(b"CONTENTS")
result = await writer.finish()
self.assertEqual("success", result["status"])
request = self.storage_service.assert_requested(
"PUT", "/v1/container/object")
self.assertEqual("bar", request.headers["X-Object-Meta-foo"])
self.assertEqual("mouse", request.headers["X-Object-Meta-cat"])
@gen_test
async def test_upload_stream_raises_error(self):
self.storage_service.add_method(
"PUT", "/v1/container/object", object_write_error_handle)
self.start_services()
container = await self.client.fetch_container("container")
obj = await container.fetch_object("object")
writer = await obj.upload_stream(mimetype="text/html")
await writer.write(b"CONTENTS")
result = await writer.finish()
self.assertEqual("error", result["status"])
self.assertEqual(401, result["code"])
self.assertEqual(b"ERROR", result["body"])
@gen_test
async def test_upload_stream_allows_content_length(self):
self.start_services()
container = await self.client.fetch_container("container")
obj = await container.fetch_object("object")
writer = await obj.upload_stream(
mimetype="text/html", content_length=8)
await writer.write(b"CONTENTS")
await writer.finish()
request = self.storage_service.assert_requested(
"PUT", "/v1/container/object")
self.assertEqual("8", request.headers["Content-length"])
@gen_test
async def test_upload_stream_allows_custom_metadata(self):
self.start_services()
container = await self.client.fetch_container("container")
obj = await container.fetch_object("manifest")
segment_writer = SegmentWriter.with_defaults(segment_size=10)
writer = await obj.upload_stream(
mimetype="text/html", writer=segment_writer,
metadata={"cat": "dog"})
await writer.write(b"lincoln")
result = await writer.finish()
self.assertEqual("success", result["status"])
request = self.storage_service.assert_requested(
"PUT", "/v1/container/manifest")
self.assertEqual("dog", request.headers["X-Object-Meta-cat"])
@gen_test
async def test_upload_stream_allows_segmentation(self):
# big, nasty segment test. should be broken up later, especially
# with retry, etc.
self.start_services()
container = await self.client.fetch_container("container")
obj = await container.fetch_object("manifest")
segment_writer = SegmentWriter.with_defaults(segment_size=4)
writer = await obj.upload_stream(
mimetype="text/html", writer=segment_writer)
await writer.write(b"abe")
await writer.write(b" lincoln")
await writer.write(b" wins")
result = await writer.finish()
self.assertEqual("success", result["status"])
expected = [
("abe ", "/container/manifest/segments/000001"),
("linc", "/container/manifest/segments/000002"),
("oln ", "/container/manifest/segments/000003"),
("wins", "/container/manifest/segments/000004")
]
for content, segment_path in expected:
request = self.storage_service.assert_requested(
"PUT", "/v1{}".format(segment_path),
headers={"X-Auth-Token": "TOKEN"})
self.assertEqual(content, request.body.decode("utf8"))
request = self.storage_service.assert_requested(
"PUT", "/v1/container/manifest",
headers={"X-Auth-Token": "TOKEN"})
self.assertEqual(b"put", request.arguments["multipart-manifest"][0])
self.assertEqual("text/html", request.headers["Content-type"])
body = json.loads(request.body.decode("utf8"))
self.assertEqual(4, len(body))
for i in range(len(body)):
segment_info = body[i]
expected_body, expected_path = expected[i]
expected_etag = hashlib.md5(
expected_body.encode("ascii")).hexdigest()
self.assertEqual(expected_path, segment_info["path"])
self.assertEqual(expected_etag, segment_info["etag"])
self.assertEqual(4, segment_info["size_bytes"])
@gen_test
async def test_upload_stream_allows_custom_segments(self):
self.start_services()
container = await self.client.fetch_container("container")
obj = await container.fetch_object("manifest")
# bad segment size so we can ensure it's not using it
segment_writer = SegmentWriter.with_defaults(segment_size=1)
writer = await obj.upload_stream(
mimetype="text/html", writer=segment_writer)
segment1 = writer.create_segment()
await segment1.write(b"foo")
await segment1.write(b"bar")
await segment1.write(b"one")
await writer.close_segment(segment1)
segment2 = writer.create_segment()
await segment2.write(b"foobar2")
await writer.close_segment(segment2)
result = await writer.finish()
self.assertEqual("success", result["status"])
request = self.storage_service.assert_requested(
"PUT", "/v1/container/manifest/segments/000001")
self.assertEqual(b"foobarone", request.body)
request = self.storage_service.assert_requested(
"PUT", "/v1/container/manifest/segments/000002")
self.assertEqual(b"foobar2", request.body)
@gen_test
async def test_upload_segment_allows_dynamic_segments(self):
self.start_services()
container = await self.client.fetch_container("container")
obj = await container.fetch_object("manifest")
writer = await obj.upload_stream(
mimetype="text/html",
writer=SegmentWriter.with_defaults(dynamic=True))
segment1 = writer.create_segment("001")
await segment1.write(b"foo")
await writer.close_segment(segment1)
segment2 = writer.create_segment("005")
await segment2.write(b"bar")
await writer.close_segment(segment2)
result = await writer.finish()
self.assertEqual("success", result["status"])
request = self.storage_service.assert_requested(
"PUT", "/v1/container/manifest/segments/001")
self.assertEqual(b"foo", request.body)
request = self.storage_service.assert_requested(
"PUT", "/v1/container/manifest/segments/005")
self.assertEqual(b"bar", request.body)
request = self.storage_service.assert_requested(
"PUT", "/v1/container/manifest")
self.assertEqual(
"container/manifest/segments",
request.headers["X-Object-Manifest"])
self.assertEqual("text/html", request.headers["Content-type"])
self.assertEqual(b"", request.body)
# Need to add tests that verify etags, retry manifests, etc.
@gen_test
async def test_read_chunk(self):
self.start_services()
container = await self.client.fetch_container("container")
obj = await container.fetch_object("object")
chunk1 = await obj.read(0, 1023)
chunk2 = await obj.read(1024)
total = await obj.read()
self.assertEqual(chunk1, OBJECT_BODY[:1024])
self.assertEqual(chunk2, OBJECT_BODY[1024:])
self.assertEqual(OBJECT_BODY, total)
@gen_test
async def test_read_stream_returns_body_in_chunks(self):
self.start_services()
container = await self.client.fetch_container("container")
obj = await container.fetch_object("object")
reader = await obj.read_stream()
body = bytearray()
for read_chunk in reader:
chunk = await read_chunk
body.extend(chunk)
self.assertEqual(OBJECT_BODY, body)
@gen_test
async def test_read_stream_raises_with_uncomsumed_chunk(self):
self.start_services()
container = await self.client.fetch_container("container")
obj = await container.fetch_object("object")
reader = await obj.read_stream()
next(reader)
with self.assertRaises(StreamError):
next(reader)
@gen_test
async def test_read_stream_raises_with_bad_response(self):
self.start_services()
container = await self.client.fetch_container("container")
obj = await container.fetch_object("object2")
reader = await obj.read_stream()
with self.assertRaises(StreamError):
for read_chunk in reader:
await read_chunk
@gen_test
async def test_info_returns_metadata_about_object(self):
self.start_services()
container = await self.client.fetch_container("container")
obj = await container.fetch_object("object")
info = await obj.info()
self.assertEqual("success", info["status"])
self.assertEqual({"foo": "foo", "bar": "bar"}, info["metadata"])
self.assertEqual("value", info["x-foobar"])
self.assertEqual(1024, info["length"])
self.assertEqual("text/plain", info["type"])
self.assertEqual("md5sum", info["etag"])
@gen_test
async def test_info_returns_error_with_bad_response(self):
self.start_services()
container = await self.client.fetch_container("container")
obj = await container.fetch_object("object2")
info = await obj.info()
self.assertEqual("error", info["status"])
self.assertEqual(404, info["code"])
def object_write_handle(handler):
handler.set_status(201)
handler.set_header(
"ETag", hashlib.md5(handler.request.body).hexdigest())
handler.finish()
def object_write_error_handle(handler):
handler.set_status(401)
handler.write("ERROR")
def object_read_handle(handler):
handler.set_status(200)
range_string = handler.request.headers.get("Range", "bytes=0-")
range_parts = range_string.split("=")[1].rsplit("-", 1)
start, end = range_parts
if not end:
end = len(OBJECT_BODY) - 1
start, end = (int(start), int(end) + 1)
for i in range(0, end-start, 1024):
offset = start + i
offset_end = offset + 1024 if end > offset + 1024 else end
handler.write(OBJECT_BODY[offset:offset_end])
handler.flush()
handler.finish()
def object_info_handle(handler):
handler.set_status(200)
handler.set_header("X-Object-Meta-Foo", "foo")
handler.set_header("X-Object-Meta-Bar", "bar")
handler.set_header("Etag", "md5sum")
handler.set_header("Content-length", "1024")
handler.set_header("Content-type", "text/plain")
handler.set_header("X-Foobar", "value")
handler.finish()
| [
"catchjosh@gmail.com"
] | catchjosh@gmail.com |
04df1bf0937573a45683f42a2a2164d6ce0d1f49 | b05761d771bb5a85d39d370c649567c1ff3eb089 | /venv/lib/python3.10/site-packages/poetry/core/_vendor/packaging/__about__.py | d6a5b918dbed4d5f6e27b58b4269032a25063f14 | [] | no_license | JawshyJ/Coding_Practice | 88c49cab955eab04609ec1003b6b8c20f103fc06 | eb6b229d41aa49b1545af2120e6bee8e982adb41 | refs/heads/master | 2023-02-19T10:18:04.818542 | 2023-02-06T21:22:58 | 2023-02-06T21:22:58 | 247,788,631 | 4 | 0 | null | null | null | null | UTF-8 | Python | false | false | 96 | py | /home/runner/.cache/pip/pool/87/d4/0e/025817939d425545df0f67630cef17ef3d832a39c8c6411c9821da9537 | [
"37465112+JawshyJ@users.noreply.github.com"
] | 37465112+JawshyJ@users.noreply.github.com |
7f45c773c3dd9aefea08cb9a9711902e9d32e7e8 | 3adf1035314c70514e7acefb13c5489e41fab30e | /stock/migrations/0001_initial.py | a3d8387afb1f6899b25333d2103a84a4be428dba | [
"Apache-2.0"
] | permissive | nowanys/GreaterWMS | 9597bcb2eee25e5c803355d9e7373b62c03af909 | 51baefe3a10016575411133bbc6eb4625d794d82 | refs/heads/master | 2023-02-24T06:14:04.318108 | 2021-01-26T06:57:45 | 2021-01-26T06:57:45 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,077 | py | # Generated by Django 3.1.4 on 2021-01-18 02:23
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='StockBinModel',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('bin_name', models.CharField(max_length=255, verbose_name='Bin Name')),
('goods_code', models.CharField(max_length=255, verbose_name='Goods Code')),
('goods_desc', models.CharField(max_length=255, verbose_name='Goods Description')),
('goods_qty', models.IntegerField(default=0, verbose_name='Binstock Qty')),
('pick_qty', models.IntegerField(default=0, verbose_name='BinPick Qty')),
('picked_qty', models.IntegerField(default=0, verbose_name='BinPicked Qty')),
('bin_size', models.CharField(max_length=255, verbose_name='Bin size')),
('bin_property', models.CharField(max_length=255, verbose_name='Bin Property')),
('openid', models.CharField(max_length=255, verbose_name='Openid')),
('create_time', models.DateTimeField(auto_now_add=True, verbose_name='Create Time')),
('update_time', models.DateTimeField(auto_now=True, null=True, verbose_name='Update Time')),
],
options={
'verbose_name': 'data id',
'verbose_name_plural': 'data id',
'db_table': 'stockbin',
'ordering': ['-id'],
},
),
migrations.CreateModel(
name='StockListModel',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('goods_code', models.CharField(max_length=32, verbose_name='Goods Code')),
('goods_desc', models.CharField(max_length=255, verbose_name='Goods Description')),
('goods_qty', models.BigIntegerField(default=0, verbose_name='Total Qty')),
('onhand_stock', models.BigIntegerField(default=0, verbose_name='On Hand Stock')),
('can_order_stock', models.BigIntegerField(default=0, verbose_name='Can Order Stock')),
('ordered_stock', models.BigIntegerField(default=0, verbose_name='Ordered Stock')),
('inspect_stock', models.BigIntegerField(default=0, verbose_name='Inspect Stock')),
('hold_stock', models.BigIntegerField(default=0, verbose_name='Holding Stock')),
('damage_stock', models.BigIntegerField(default=0, verbose_name='Damage Stock')),
('asn_stock', models.BigIntegerField(default=0, verbose_name='ASN Stock')),
('dn_stock', models.BigIntegerField(default=0, verbose_name='DN Stock')),
('pre_load_stock', models.BigIntegerField(default=0, verbose_name='Pre Load Stock')),
('pre_sort_stock', models.BigIntegerField(default=0, verbose_name='Pre Sort Stock')),
('sorted_stock', models.BigIntegerField(default=0, verbose_name='Sorted Stock')),
('pick_stock', models.BigIntegerField(default=0, verbose_name='Pick Stock')),
('picked_stock', models.BigIntegerField(default=0, verbose_name='Picked Stock')),
('back_order_stock', models.BigIntegerField(default=0, verbose_name='Back Order Stock')),
('openid', models.CharField(max_length=255, verbose_name='Openid')),
('create_time', models.DateTimeField(auto_now_add=True, verbose_name='Create Time')),
('update_time', models.DateTimeField(auto_now=True, null=True, verbose_name='Update Time')),
],
options={
'verbose_name': 'data id',
'verbose_name_plural': 'data id',
'db_table': 'stocklist',
'ordering': ['-id'],
},
),
]
| [
"singosgu@gmail.com"
] | singosgu@gmail.com |
2b4b4aa93d84385b2cdcabc1169aa211a0ecf359 | 0fd5793e78e39adbfe9dcd733ef5e42390b8cc9a | /python3/12_Logging/a_builtin_logging/16_logging_json.py | e96163fa788ef205f6df9a6c57247576641c0168 | [] | no_license | udhayprakash/PythonMaterial | 3ea282ceb4492d94d401e3bc8bad9bf6e9cfa156 | e72f44e147141ebc9bf9ec126b70a5fcdbfbd076 | refs/heads/develop | 2023-07-08T21:07:33.154577 | 2023-07-03T10:53:25 | 2023-07-03T10:53:25 | 73,196,374 | 8 | 5 | null | 2023-05-26T09:59:17 | 2016-11-08T14:55:51 | Jupyter Notebook | UTF-8 | Python | false | false | 1,015 | py | import json
import logging
import logging.config
class JsonFormatter:
ATTR_TO_JSON = [
"created",
"filename",
"funcName",
"levelname",
"lineno",
"module",
"msecs",
"msg",
"name",
"pathname",
"process",
"processName",
"relativeCreated",
"thread",
"threadName",
]
def format(self, record):
obj = {attr: getattr(record, attr) for attr in self.ATTR_TO_JSON}
return json.dumps(obj, indent=4)
console_handler = logging.StreamHandler()
console_handler.formatter = JsonFormatter()
file_handler = logging.FileHandler(filename="logs/17_logging_json.json", mode="w")
file_handler.formatter = JsonFormatter()
logger = logging.getLogger(__name__)
logger.addHandler(console_handler)
logger.addHandler(file_handler)
logger.debug("Debug message")
logger.info("info message")
logger.warning("warning message")
logger.error("error message")
logger.critical("critical message")
| [
"uday3prakash@gmail.com"
] | uday3prakash@gmail.com |
99a7a7f6f37e668b259ef80193a305341646509f | 0818a9020adc6e25b86060a8e84171d0b4958625 | /test_demo/learn_mxnet/main.py | 32fe35b55545b1f1f14cb4d73be8713c45578381 | [] | no_license | wgwangang/mycodes | 2107becb6c457ed88b46426974a8f1fa07ed37dd | 9fa48ca071eacf480034d1f69d3c05171d8a97d2 | refs/heads/master | 2020-03-28T07:58:45.017910 | 2018-03-14T07:21:14 | 2018-03-14T07:21:14 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 320 | py | import mxnet as mx
from mxnet import sym
from mxnet import symbol
net = mx.sym.Variable('data')
net = mx.sym.FullyConnected(data=net, weight=net,name='fc1', num_hidden=128)
net2 = symbol.FullyConnected(data=net,weight=net, name='fc1', num_hidden=128)
print(sym)
print(symbol)
mx.viz.plot_network(symbol=net).render()
| [
"yinpenghhz@hotmail.com"
] | yinpenghhz@hotmail.com |
bea8ea31a5a292c4e98de4927510a0929e68ac71 | c247293cbd83e8b70b23e658b7ae69c05210669a | /test/test_transformers.py | 54b6125eba7b5a6718df8ef7fb25d9cab7e11bb5 | [
"BSD-2-Clause",
"LicenseRef-scancode-secret-labs-2011",
"BSD-3-Clause",
"LicenseRef-scancode-generic-cla",
"BSL-1.0",
"Apache-2.0"
] | permissive | mtmd/pytorch | cdc26d2ae4afb159d7e805be73ea691aff511e2e | 5e6c1246490e6c9934ba426c6ad8adab2387b5ce | refs/heads/main | 2023-07-20T10:02:14.721772 | 2023-07-17T22:14:59 | 2023-07-17T22:15:03 | 314,725,853 | 0 | 0 | NOASSERTION | 2020-11-21T03:39:59 | 2020-11-21T03:39:58 | null | UTF-8 | Python | false | false | 122,023 | py | # Owner(s): ["module: nn"]
import contextlib
from functools import partial
import sys
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.nn.parameter import Parameter
import unittest
from unittest.mock import patch, MagicMock, ANY
import math
from torch.backends.cuda import sdp_kernel, SDPBackend
import torch.optim as optim
from torch.testing._internal.common_dtype import floating_types_and_half
from torch.testing._internal.common_device_type import instantiate_device_type_tests, onlyCUDA, onlyCPU
from typing import List, Tuple, Union, Optional
from torch.testing._internal.common_nn import NNTestCase
from torch.testing._internal.common_utils import (
TEST_FAIRSEQ,
run_tests,
parametrize,
freeze_rng_state,
TEST_WITH_CROSSREF,
slowTest,
set_default_dtype,
gradcheck
)
from torch.testing._internal.common_methods_invocations import wrapper_set_seed
from torch.testing._internal.common_cuda import SM80OrLater, PLATFORM_SUPPORTS_FUSED_SDPA
if TEST_FAIRSEQ:
import fairseq.models.transformer as fairseq_transformer
@contextlib.contextmanager
def use_deterministic_algorithims(mode: bool, warn_only: bool):
r"""
This context manager can be used to temporarily enable or disable deterministic algorithms.
Upon exiting the context manager, the previous state of the flag will be restored.
"""
previous_mode: bool = torch.are_deterministic_algorithms_enabled()
previous_warn_only: bool = torch.is_deterministic_algorithms_warn_only_enabled()
try:
torch.use_deterministic_algorithms(mode, warn_only=warn_only)
yield {}
finally:
torch.use_deterministic_algorithms(previous_mode, warn_only=previous_warn_only)
# Found in torch/testing/_comparison.py
default_atol = {torch.float16: 1e-3, torch.bfloat16: 1e-3, torch.float32: 1e-5}
default_rtol = {torch.float16: 1e-3, torch.bfloat16: 1.6e-2, torch.float32: 1.3e-6}
isSM86or89Device = torch.cuda.is_available() and torch.cuda.get_device_capability() in [(8, 6), (8, 9)]
isSM90Device = torch.cuda.is_available() and torch.cuda.get_device_capability() == (9, 0)
isSM5xDevice = torch.cuda.is_available() and torch.cuda.get_device_capability()[0] == 5
def get_rtol(true_value: torch.Tensor, computed_value: torch.Tensor) -> float:
deviation = true_value - computed_value
deviation = torch.abs(deviation / true_value)
# Fill in the nans with the default rtol
torch.nan_to_num_(deviation, nan=default_rtol[computed_value.dtype])
return deviation.max().item()
def get_atol(true_value: torch.Tensor, computed_value: torch.Tensor) -> float:
deviation = true_value - computed_value
atol = torch.abs(deviation).max().item()
return atol
def get_tolerances(
true_value: torch.Tensor,
computed_value: torch.Tensor,
fudge_factor: Optional[float] = None,
) -> Tuple[float, float]:
"""Returns the absolute and relative tolerances for comparing two tensors."""
fudge_factor = fudge_factor if fudge_factor is not None else 1.0
atol = get_atol(true_value, computed_value)
rtol = get_rtol(true_value, computed_value)
atol = fudge_factor * max(atol, default_atol[computed_value.dtype])
rtol = fudge_factor * max(rtol, default_rtol[computed_value.dtype])
# torch.isclose() has weird behavior around see:
# https://github.com/pytorch/pytorch/issues/102400
if rtol > 1e30:
rtol = default_rtol[computed_value.dtype]
return atol, rtol
backend_map = {
SDPBackend.MATH: {"enable_math": True, "enable_flash": False, "enable_mem_efficient": False},
SDPBackend.FLASH_ATTENTION: {"enable_math": False, "enable_flash": True, "enable_mem_efficient": False},
SDPBackend.EFFICIENT_ATTENTION: {
"enable_math": False, "enable_flash": False, "enable_mem_efficient": True}
}
def rand_sdpa_tensor(shape: Tuple[Union[int, List[int]]], device: str, dtype: torch.dtype, type: str,
requires_grad: bool = False, packed: bool = False) -> torch.Tensor:
"""Creates rand dense or nested tensor with given shape and type.
Args:
shape (Tuple[int]): Shape of Tensor to construct
device (str): which device to create tensor on
dtype (torch.dtype): Tensors' dtype
type (str): Nested or Dense
requires_grad (bool, optional): Tensors grad status. Defaults to False.
packed (bool, optional): Whether to create a single QKV packed or not. Defaults to False.
Returns:
torch.Tensor: A new tensor
"""
batch, seq_len, num_heads, head_dim = shape
if type == "nested":
if isinstance(seq_len, list):
def _size(i):
return (seq_len[i], num_heads, head_dim) if not packed else (seq_len[i], 3 * num_heads * head_dim)
return torch.nested.nested_tensor([
torch.randn(_size(i), device=device, dtype=dtype, requires_grad=requires_grad)
for i in range(batch)])
else:
size = (seq_len, num_heads, head_dim) if not packed else (seq_len, 3 * num_heads * head_dim)
return torch.nested.nested_tensor([
torch.randn(size, device=device, dtype=dtype, requires_grad=requires_grad)
for _ in range(batch)])
else:
assert (isinstance(seq_len, int))
size = (batch, seq_len, num_heads, head_dim) if not packed else (batch, seq_len, 3 * num_heads * head_dim)
return torch.randn(size, device=device, dtype=dtype, requires_grad=requires_grad)
class TestTransformers(NNTestCase):
_do_cuda_memory_leak_check = True
_do_cuda_non_default_stream = True
@onlyCUDA
@unittest.skip("4D mask not supported yet - activate when 4D mask supported")
def test_self_attn_TxT_attn_mask(self, device):
embed_dim = 16
num_heads = 4
batch_size = 10
tgt_len = 16
query = torch.rand(batch_size, tgt_len, embed_dim, device=device) # [N, T, D]
attn_mask = torch.randint(0, 2, (tgt_len, tgt_len)).cuda().float() # [T, T]
attn_mask = attn_mask.masked_fill(attn_mask == 0, float('-inf')).masked_fill(attn_mask == 1, float(0.0))
attn_mask_4d = attn_mask.expand(batch_size, num_heads, tgt_len, tgt_len)
mta_model = torch.nn.MultiheadAttention(embed_dim, num_heads, batch_first=True).cuda()
mta_model.eval()
# Generate 3D results
with torch.inference_mode():
output_mask_4d = mta_model(query, query, query, attn_mask=attn_mask_4d)[0]
output_mask_4d = output_mask_4d.transpose(0, 1) # [N, T, D]
output_mask_TxT = mta_model(query, query, query, attn_mask=attn_mask)[0]
output_mask_TxT = output_mask_TxT.transpose(0, 1) # [N, T, D]
self.assertEqual(output_mask_4d, output_mask_TxT)
@slowTest
def test_train_with_pad_and_catch_error(self, device):
iters = 100
pad_mask = torch.tensor([[1, 1, 0, 0]], dtype=torch.bool).to(device)
layer = nn.TransformerEncoderLayer(
d_model=2,
dim_feedforward=4,
nhead=2,
batch_first=True,
activation="gelu",
dropout=0,
)
criterion = nn.MSELoss()
encoder = nn.TransformerEncoder(layer, 2).to(device)
optimizer = optim.SGD(encoder.parameters(), lr=0.1, momentum=0.9)
encoder.train()
for i in range(iters):
encoder.train()
optimizer.zero_grad()
inputs = torch.cat([torch.randn(1, 2, 2), torch.zeros(1, 2, 2)], dim=1).to(device)
outputs = encoder(inputs, src_key_padding_mask=pad_mask)
loss = criterion(outputs[:, 0:2, :], inputs[:, 0:2, :])
loss.backward()
optimizer.step()
with torch.no_grad():
test = torch.cat([torch.randn(1, 2, 2), torch.zeros(1, 2, 2)], dim=1).to(device)
# Expect uint8 type not supported
ex = None
try:
test_train_uint8 = encoder(test, src_key_padding_mask=pad_mask.to(torch.uint8))
except AssertionError as e:
continue
self.assertFalse(e, "Failed to catch unsupported uint8 type exception")
test_train_bool = encoder(test, src_key_padding_mask=pad_mask)
encoder.eval()
# Expect long type not supported
ex = None
try:
test_eval_uint8 = encoder(test, src_key_padding_mask=pad_mask.to(torch.int64))
except AssertionError as e:
continue
self.assertFalse(e, "Failed to catch unsupported Long type exception")
test_eval_bool = encoder(test, src_key_padding_mask=pad_mask)
l1_bool = nn.L1Loss()(test_train_bool[:, 0:2, :], test_eval_bool[:, 0:2, :]).item()
self.assertTrue(l1_bool < 1e-4, "Eval/Train difference in pad_mask BOOL")
@parametrize("attn_mask_dim", [2, 3, None])
@parametrize("key_padding_mask_dim", [2, None])
def test_multiheadattention_fastpath_attn_mask(self, device, attn_mask_dim, key_padding_mask_dim):
with torch.no_grad():
B = 2
L = 4
D = 8
H = 4
if attn_mask_dim == 2:
attn_mask = torch.randn(L, L, device=device) > 0
elif attn_mask_dim == 3:
attn_mask = torch.randn(B * H, L, L, device=device) > 0
elif attn_mask_dim is None:
attn_mask = None
if key_padding_mask_dim == 2:
key_padding_mask = torch.randn(B, L, device=device) > 0
elif key_padding_mask_dim is None:
key_padding_mask = None
mha = nn.MultiheadAttention(D, H, batch_first=True, device=device)
X = torch.randn(B, L, D, device=device)
mha.train() # disable fast path
out, _ = mha(X, X, X, attn_mask=attn_mask, key_padding_mask=key_padding_mask, need_weights=False)
mha.eval() # enable fast path
out, _ = mha(X, X, X, attn_mask=attn_mask, key_padding_mask=key_padding_mask, need_weights=False)
@parametrize("nhead", [1, 4, 8])
def test_transformerencoderlayer_src_mask(self, device, nhead):
batch_size = 2
seqlen = 4
d_model = 8
dim_feedforward = 32
model = torch.nn.TransformerEncoderLayer(
d_model=d_model,
nhead=nhead,
dim_feedforward=dim_feedforward,
batch_first=True).to(device)
src = torch.rand(batch_size, seqlen, d_model).to(device) # bs, seqlen, d_model
src_mask = torch.zeros(seqlen, seqlen).to(torch.bool).to(device)
model(src, src_mask=src_mask)
model.eval()
with torch.no_grad():
model(src, src_mask=src_mask)
@parametrize("use_torchscript", [False])
@parametrize("enable_nested_tensor", [True, False])
@parametrize("use_autocast", [True, False])
@parametrize("d_model", [12, 256])
def test_transformerencoder_fastpath(self, device, use_torchscript, enable_nested_tensor, use_autocast, d_model):
"""
Test TransformerEncoder fastpath output matches slowpath output
"""
torch.manual_seed(1234)
nhead = 4
dim_feedforward = d_model
batch_first = True
model = torch.nn.TransformerEncoder(
torch.nn.TransformerEncoderLayer(
d_model=d_model,
nhead=nhead,
dim_feedforward=dim_feedforward,
batch_first=batch_first),
num_layers=2,
enable_nested_tensor=enable_nested_tensor
).to(device).eval()
if use_torchscript:
model = torch.jit.script(model)
# each input is (input, mask)
input_mask_pairs = [
(
torch.rand(3, 2, d_model),
[
[0, 1],
[0, 1],
[1, 1]
]
),
(
torch.rand(2, 100, d_model),
[
[0] * 98 + [1] * 2,
[0] * 90 + [1] * 10
]
),
# softmax.cu switches from fast->slowpath at masked seqlen 1024. test 1024.
(
torch.rand(2, 1024, d_model),
[
[0] * 1020 + [1] * 4,
[0] * 1024,
]
),
(
torch.rand(1, 1026, d_model),
[[0] * 1024 + [1] * 2]
),
# softmax.cu switches from fast->slowpath at masked seqlen 1024. test range of masks above 1024.
(
torch.rand(4, 1040, d_model),
[
[0] * 1024 + [1] * 16,
[0] * 1025 + [1] * 15,
[0] * 1031 + [1] * 9,
[0] * 1040,
]
)
]
input_mask_pairs = [
(
torch.tensor(pair[0], device=device, dtype=torch.get_default_dtype()), # float input
torch.tensor(pair[1], device=device, dtype=torch.bool) # bool mask
) for pair in input_mask_pairs
]
maybe_autocast = torch.autocast("cuda", dtype=torch.float16) if use_autocast else contextlib.nullcontext()
with maybe_autocast:
for input, src_key_padding_mask in input_mask_pairs:
with torch.no_grad():
fastpath_output = model(input, src_key_padding_mask=src_key_padding_mask)
slowpath_output = model(input, src_key_padding_mask=src_key_padding_mask) # reference
# Make sure fastpath_output is same shape as slowpath_output and mask.
# When enable_nested_tensor=true, fastpath_output may be smaller than input tensor.
# Eg if input bs=1, seqlen=6, and we mask out 2 tokens, fastpath_output will have bs=1, seqlen=4.
# Expand back to old size to match.
bs, true_seqlen, embed_dim = fastpath_output.shape
expanded_seqlen = src_key_padding_mask.shape[1]
fastpath_output_expanded = torch.zeros(bs, expanded_seqlen, embed_dim, device=device)
fastpath_output_expanded[:, :true_seqlen, :] = fastpath_output
# no garauntees on output corresponding to masked tokens, so they may vary between slow/fast path. set all to 0.
fastpath_output_expanded = fastpath_output_expanded.masked_fill(src_key_padding_mask.unsqueeze(-1), 0)
slowpath_output = slowpath_output.masked_fill(src_key_padding_mask.unsqueeze(-1), 0)
torch.testing.assert_close(fastpath_output_expanded, slowpath_output, rtol=1e-7, atol=1e-5)
@parametrize("with_no_grad", [True, False])
@parametrize("training", [True, False])
@parametrize("enable_nested_tensor", [False])
def test_transformerencoder_square_input(self, with_no_grad, training, enable_nested_tensor, device):
"""
Test for edge cases when input of shape (batch size, sequence length, embedding dimension) has
batch size == sequence length
"""
model = torch.nn.TransformerEncoder(
torch.nn.TransformerEncoderLayer(d_model=4, nhead=2, dim_feedforward=16, dropout=0.0, batch_first=True),
num_layers=2,
enable_nested_tensor=enable_nested_tensor
).to(device)
with torch.no_grad():
# set constant weights of the model
for idx, p in enumerate(model.parameters()):
x = p.data
sz = x.view(-1).size(0)
shape = x.shape
x = torch.cos(torch.arange(0, sz).float().view(shape))
p.data.copy_(x)
if training:
model = model.train()
else:
model = model.eval()
x = torch.arange(0, 16).reshape(2, 2, 4).to(torch.get_default_dtype()).to(device)
src_mask = torch.Tensor([[0, 1], [0, 0]]).to(torch.bool).to(device)
if with_no_grad:
cm = torch.no_grad()
else:
cm = contextlib.nullcontext()
with cm:
result = model(x, mask=src_mask)
ref_output = torch.Tensor([[[2.420306205749512, 0.017629241570830, -0.607857942581177, -0.085519507527351],
[2.420306205749512, 0.017629241570830, -0.607857942581177, -0.085519507527351]],
[[2.419836044311523, 0.017548924311996, -0.608187675476074, -0.085347734391689],
[2.419836044311523, 0.017548924311996, -0.608187675476074, -0.085347734391689]]]
).to(device)
self.assertEqual(tuple(result.shape), tuple(ref_output.shape))
torch.testing.assert_close(result, ref_output, rtol=1e-7, atol=1e-5)
@parametrize("batch_first", [True, False])
@parametrize("training", [True, False])
@parametrize("enable_nested_tensor", [True, False])
def test_transformerencoder(self, batch_first, training, enable_nested_tensor, device):
def get_a_test_layer(activation, batch_first=False):
d_model = 4
nhead = 2
dim_feedforward = 16
dropout = 0.0
layer = nn.TransformerEncoderLayer(
d_model,
nhead,
dim_feedforward=dim_feedforward,
dropout=dropout,
activation=activation,
batch_first=batch_first,
).to(device)
with torch.no_grad():
# set constant weights of the model
for idx, p in enumerate(layer.parameters()):
x = p.data
sz = x.view(-1).size(0)
shape = x.shape
x = torch.cos(torch.arange(0, sz).float().view(shape))
p.data.copy_(x)
return layer
# this is a deterministic test for TransformerEncoder
activation = F.relu
def _test(batch_first, training, enable_nested_tensor):
def perm_fn(x):
return x.transpose(1, 0) if batch_first else x
encoder_layer = get_a_test_layer(activation=activation,
batch_first=batch_first)
model = nn.TransformerEncoder(
encoder_layer, 1, enable_nested_tensor=enable_nested_tensor
).to(device)
if not training:
model = model.eval()
# deterministic input
encoder_input = perm_fn(torch.tensor([[[0.7462, 0.6653, 0.5679, 0.4891],
[0.5387, 0.1655, 0.3565, 0.0471]],
[[0.8335, 0.2799, 0.5031, 0.2947],
[0.1402, 0.0318, 0.7636, 0.1346]],
[[0.6333, 0.9344, 0.1376, 0.9938],
[0.8924, 0.2872, 0.6692, 0.2944]],
[[0.9897, 0.6915, 0.3154, 0.1733],
[0.8645, 0.3513, 0.3064, 0.0767]],
[[0.8117, 0.2366, 0.4838, 0.7881],
[0.3718, 0.4945, 0.9511, 0.0864]]]
)).to(device)
result = model(encoder_input)
ref_output = perm_fn(torch.tensor([[[2.428589, 0.020835, -0.602055, -0.085249],
[2.427987, 0.021213, -0.602496, -0.084103]],
[[2.424689, 0.019155, -0.604793, -0.085672],
[2.413863, 0.022211, -0.612486, -0.072490]],
[[2.433774, 0.021598, -0.598343, -0.087548],
[2.425104, 0.019748, -0.604515, -0.084839]],
[[2.436185, 0.022682, -0.596625, -0.087261],
[2.433556, 0.021891, -0.598509, -0.086832]],
[[2.416246, 0.017512, -0.610712, -0.082961],
[2.422901, 0.024187, -0.606178, -0.074929]]]
)).to(device)
self.assertEqual(tuple(result.shape), tuple(ref_output.shape))
torch.testing.assert_close(result, ref_output, rtol=1e-7, atol=1e-5)
# all 0 src_mask
src_mask = torch.zeros([5, 5]).to(device) == 1
result = model(encoder_input, mask=src_mask)
self.assertEqual(tuple(result.shape), tuple(ref_output.shape))
torch.testing.assert_close(result, ref_output, rtol=1e-7, atol=1e-5)
# all 0
mask = torch.zeros([2, 5]).to(device) == 1
result = model(encoder_input, src_key_padding_mask=mask)
self.assertEqual(tuple(result.shape), tuple(ref_output.shape))
torch.testing.assert_close(result, ref_output, rtol=1e-7, atol=1e-5)
mask[0, 1] = 1
mask[1, 3] = 1
mask[1, 4] = 1
result = model(encoder_input, src_key_padding_mask=mask)
ref_output = perm_fn(torch.tensor([[[2.429026, 0.020793, -0.601741, -0.085642],
[2.428811, 0.021445, -0.601912, -0.084252]],
[[2.425009, 0.019155, -0.604566, -0.085899],
[2.415408, 0.02249, -0.611415, -0.073]],
[[2.434199, 0.021682, -0.598039, -0.087699],
[2.42598, 0.019941, -0.603896, -0.085091]],
[[2.436457, 0.022736, -0.59643, -0.08736],
[2.434021, 0.022093, -0.598179, -0.08679]],
[[2.416531, 0.017498, -0.610513, -0.083181],
[2.4242, 0.024653, -0.605266, -0.074959]]]
)).to(device)
self.assertEqual(tuple(result.shape), tuple(ref_output.shape))
torch.testing.assert_close(result, ref_output, rtol=1e-7, atol=1e-5)
# test case 2, multiple layers no norm
model = nn.TransformerEncoder(encoder_layer, 2, enable_nested_tensor=enable_nested_tensor).to(device)
if not training:
model = model.eval()
result = model(encoder_input, src_key_padding_mask=mask)
ref_output = perm_fn(torch.tensor([[[2.419051, 0.017446, -0.608738, -0.085003],
[2.419102, 0.017452, -0.608703, -0.085026]],
[[2.419043, 0.017445, -0.608744, -0.084999],
[2.419052, 0.017446, -0.608738, -0.085004]],
[[2.419067, 0.017448, -0.608727, -0.085010],
[2.419098, 0.017452, -0.608706, -0.085024]],
[[2.419072, 0.017449, -0.608724, -0.085012],
[2.419119, 0.017455, -0.608691, -0.085034]],
[[2.419019, 0.017442, -0.608761, -0.084989],
[2.419075, 0.017449, -0.608722, -0.085014]]]
)).to(device)
self.assertEqual(tuple(result.shape), tuple(ref_output.shape))
torch.testing.assert_close(result, ref_output, rtol=1e-7, atol=1e-5)
model = nn.TransformerEncoder(encoder_layer, 6, enable_nested_tensor=enable_nested_tensor).to(device)
if not training:
model = model.eval()
result = model(encoder_input, src_key_padding_mask=mask)
ref_output = perm_fn(torch.tensor([[[2.419101, 0.017453, -0.608703, -0.085025],
[2.419101, 0.017453, -0.608704, -0.085025]],
[[2.419101, 0.017453, -0.608703, -0.085025],
[2.419101, 0.017453, -0.608704, -0.085025]],
[[2.419101, 0.017453, -0.608703, -0.085025],
[2.419101, 0.017453, -0.608704, -0.085025]],
[[2.419101, 0.017453, -0.608703, -0.085025],
[2.419101, 0.017453, -0.608704, -0.085025]],
[[2.419101, 0.017453, -0.608703, -0.085025],
[2.419101, 0.017453, -0.608704, -0.085025]]]
)).to(device)
self.assertEqual(tuple(result.shape), tuple(ref_output.shape))
torch.testing.assert_close(result, ref_output, rtol=1e-7, atol=1e-5)
# test case 3, multiple layers with norm
# d_model = 4
norm = nn.LayerNorm(4)
model = nn.TransformerEncoder(encoder_layer, 2, norm=norm,
enable_nested_tensor=enable_nested_tensor).to(device)
if not training:
model = model.eval()
result = model(encoder_input, src_key_padding_mask=mask)
ref_output = perm_fn(torch.tensor([[[1.695949, -0.357635, -0.893077, -0.445238],
[1.695955, -0.357639, -0.893050, -0.445266]],
[[1.695948, -0.357634, -0.893082, -0.445233],
[1.695950, -0.357635, -0.893077, -0.445238]],
[[1.695951, -0.357636, -0.893069, -0.445246],
[1.695955, -0.357639, -0.893052, -0.445264]],
[[1.695952, -0.357636, -0.893066, -0.445249],
[1.695957, -0.357641, -0.893041, -0.445276]],
[[1.695946, -0.357632, -0.893095, -0.445220],
[1.695952, -0.357637, -0.893065, -0.445251]]]
)).to(device)
self.assertEqual(tuple(result.shape), tuple(ref_output.shape))
torch.testing.assert_close(result, ref_output, rtol=1e-7, atol=1e-5)
model = nn.TransformerEncoder(encoder_layer, 6, norm=norm,
enable_nested_tensor=enable_nested_tensor).to(device)
if not training:
model = model.eval()
result = model(encoder_input, src_key_padding_mask=mask)
ref_output = perm_fn(torch.tensor([[[1.695955, -0.357639, -0.893051, -0.445265],
[1.695955, -0.357639, -0.893051, -0.445265]],
[[1.695955, -0.357639, -0.893051, -0.445265],
[1.695955, -0.357639, -0.893051, -0.445265]],
[[1.695955, -0.357639, -0.893051, -0.445265],
[1.695955, -0.357639, -0.893051, -0.445265]],
[[1.695955, -0.357639, -0.893051, -0.445265],
[1.695955, -0.357639, -0.893051, -0.445265]],
[[1.695955, -0.357639, -0.893051, -0.445265],
[1.695955, -0.357639, -0.893051, -0.445265]]]
)).to(device)
self.assertEqual(tuple(result.shape), tuple(ref_output.shape))
torch.testing.assert_close(result, ref_output, rtol=1e-7, atol=1e-5)
# TODO: remove set default dtype to double by making ref_output more precise.
# Added because this test was copied from test_nn.py, which has default
# dtype double. If default dtype is float, tests will say tensors not close because
# ref output precision too low
with set_default_dtype(torch.double):
if training:
cm = contextlib.nullcontext()
else:
cm = torch.no_grad() # transformer fast path requires no grad
with cm:
_test(batch_first, training, enable_nested_tensor)
@unittest.skipIf(sys.version_info < (3, 11), "not supported on pre-3.11 Python")
def test_encoder_padding_and_src_mask_bool(self):
encoder_layer = nn.TransformerEncoderLayer(
d_model=16,
nhead=2,
dim_feedforward=32,
dropout=0.1,
activation='relu',
batch_first=True,
)
encoder_norm = nn.LayerNorm(16)
encoder = nn.TransformerEncoder(
encoder_layer, 2, encoder_norm
)
inputs = torch.randn(2, 3, 16)
src_mask = torch.ones(3, 3, dtype=torch.bool).triu_(diagonal=1)
input_seq_len = torch.tensor([3, 2])
padding_mask = (
torch.arange(3)[None, :].cpu() >= input_seq_len[:, None]
)
with self.assertNoLogs(None):
encoder(
inputs,
mask=src_mask,
src_key_padding_mask=padding_mask,
)
@unittest.skipIf(sys.version_info < (3, 11), "not supported on pre-3.11 Python")
def test_decoder_padding_and_src_mask_bool(self):
def transformer_decoder(inputs, input_seq_len, memory):
decoder_layer = nn.TransformerDecoderLayer(
d_model=16,
nhead=2,
dim_feedforward=32,
dropout=0.1,
activation='relu',
batch_first=True,
)
decoder_norm = nn.LayerNorm(16)
decoder = nn.TransformerDecoder(
decoder_layer, 2, decoder_norm
)
src_mask = torch.ones(
inputs.shape[1], inputs.shape[1], dtype=torch.bool
).triu_(diagonal=1)
padding_mask = (
torch.arange(inputs.shape[1])[None, :].cpu()
>= input_seq_len[:, None]
)
return decoder(
inputs,
memory,
tgt_mask=src_mask,
tgt_key_padding_mask=padding_mask,
memory_key_padding_mask=padding_mask,
)
inputs = torch.randn(2, 3, 16)
memory = torch.randn(2, 3, 16)
input_seq_len = torch.tensor([3, 2])
with self.assertNoLogs(None):
transformer_decoder(inputs, input_seq_len, memory)
def test_encoder_is_causal(self):
d_model = 3
layer = torch.nn.TransformerEncoderLayer(d_model, 1, 6, batch_first=True)
layer.eval()
x = torch.randn(1, 5, d_model)
unmasked_output = layer(x)
mask = torch.nn.Transformer.generate_square_subsequent_mask(x.size(1))
is_causal_output = layer(x, src_mask=mask, is_causal=True)
masked_output = layer(x, src_mask=mask)
self.assertEqual(masked_output, is_causal_output)
@onlyCUDA
@parametrize("nb_heads", [1, 8])
@parametrize("bias", [True, False])
def test_mha_native_args(self, nb_heads, bias):
B, L, F = 8, 100, 128
batch_first = True
fast_path = True
use_pad_mask = (bias % 2) == 1
mha = nn.MultiheadAttention(
embed_dim=F,
num_heads=nb_heads,
batch_first=batch_first,
bias=bias
).cuda()
mha.eval()
ctx = torch.no_grad if fast_path else contextlib.nullcontext
with ctx():
x = torch.randn(B, L, F).cuda()
if not batch_first:
x = x.transpose(0, 1)
pad_mask = None
if use_pad_mask:
pad_mask = torch.zeros((B, L), dtype=torch.bool).cuda()
mha(query=x, key=x, value=x, key_padding_mask=pad_mask)
def test_kpm_mask_trailing_column_with_nested_tensor(self, device):
encoder_layer = nn.TransformerEncoderLayer(
d_model=256,
nhead=4,
dim_feedforward=512,
activation='gelu',
norm_first=False,
batch_first=False,
)
transformer_encoder = nn.TransformerEncoder(encoder_layer, num_layers=3, enable_nested_tensor=True).to(device)
x = torch.randn(10, 6, 256).to(device)
mask = torch.ones(6, 10)
mask[0, :] = 0 # here I masked 5 columns instead of just one
mask = mask.bool().to(device)
out = transformer_encoder(src=x, src_key_padding_mask=mask)
self.assertEqual(out.shape[1], 6)
# CPU unit test has_torch_functions in test environment,
# preventing successful completion
@onlyCUDA
def test_with_nested_tensor_input(self, device):
encoder_layer = nn.TransformerEncoderLayer(
d_model=256,
nhead=4,
dim_feedforward=512,
activation='gelu',
norm_first=False,
batch_first=True,
)
transformer_encoder = nn.TransformerEncoder(encoder_layer, num_layers=3, enable_nested_tensor=True).to(device)
transformer_encoder.eval()
with torch.no_grad():
x = torch.randn(6, 10, 256).to(device)
mask = torch.ones(6, 10)
mask[0, 0:] = 0 # here I masked 5 columns instead of just one
mask[2, 2:] = 0 # here I masked 5 columns instead of just one
mask[4, 4:] = 0 # here I masked 5 columns instead of just one
mask[5, 8:] = 0 # here I masked 5 columns instead of just one
mask = mask.bool().to(device)
x = torch._nested_tensor_from_mask(x, mask.logical_not(), mask_check=False)
out = transformer_encoder(src=x, src_key_padding_mask=None)
self.assertEqual(out.is_nested, True)
def test_script_encoder_subclass(self, device):
class MyCustomLayer(nn.TransformerEncoderLayer):
pass
encoder = nn.TransformerEncoder(
MyCustomLayer(d_model=256, nhead=8), num_layers=6
).to(device=device)
torch.jit.script(encoder)
# brazenly adapted from test_transformerencoderlayer_src_mask to test execution of
# torchscripted transformerencoderlayer subclass
def test_transformerencoderlayer_subclass(self, device):
class MyCustomLayer(nn.TransformerEncoderLayer):
pass
nhead = 4
batch_size = 2
seqlen = 4
d_model = 8
dim_feedforward = 32
model = MyCustomLayer(
d_model=d_model,
nhead=nhead,
dim_feedforward=dim_feedforward,
batch_first=True).to(device)
script_model = torch.jit.script(model)
src = torch.rand(batch_size, seqlen, d_model).to(device) # bs, seqlen, d_model
src_mask = torch.zeros(seqlen, seqlen).to(torch.bool).to(device)
torch.manual_seed(42)
result = model(src, src_mask=src_mask)
torch.manual_seed(42)
scripted_result = script_model(src, src_mask=src_mask)
self.assertEqual(result, scripted_result)
model.eval()
script_model = torch.jit.script(model)
with torch.no_grad():
result = model(src, src_mask=src_mask)
scripted_result = script_model(src, src_mask=src_mask)
self.assertEqual(result, scripted_result)
def test_transformerencoderlayer_subclass_model(self, device):
class MyCustomLayer(nn.TransformerEncoderLayer):
pass
nhead = 4
batch_size = 2
seqlen = 4
d_model = 8
dim_feedforward = 32
layer = MyCustomLayer(
d_model=d_model,
nhead=nhead,
dim_feedforward=dim_feedforward,
batch_first=True)
model = nn.TransformerEncoder(
layer, num_layers=6
).to(device=device)
script_model = torch.jit.script(model)
src = torch.rand(batch_size, seqlen, d_model).to(device) # bs, seqlen, d_model
src_mask = torch.zeros(seqlen, seqlen).to(torch.bool).to(device)
torch.manual_seed(42)
result = model(src, mask=src_mask)
torch.manual_seed(42)
scripted_result = script_model(src, mask=src_mask)
self.assertEqual(result, scripted_result)
model.eval()
script_model = torch.jit.script(model)
with torch.no_grad():
result = model(src, mask=src_mask)
scripted_result = script_model(src, mask=src_mask)
self.assertEqual(result, scripted_result)
@onlyCUDA
@unittest.skipIf(not TEST_FAIRSEQ, "Fairseq not found")
def test_decoder_only_layer(self):
DEFAULT_PADDING_IDX = 0
class FairseqDecoder(torch.nn.Module):
def __init__(
self,
embed_dim,
attention_heads,
ffn_embed_dim,
num_layers,
embedding_layer, # torch.nn.Embedding. Must have a padding_idx field
dropout=0,
normalize_before=False,
torch_encoder=None, # torch encoder that you can map weights from
activation="relu",
):
super().__init__()
cfg = fairseq_transformer.TransformerConfig()
cfg.decoder.embed_dim = embed_dim
cfg.decoder.output_dim = embed_dim
cfg.decoder.attention_heads = attention_heads
cfg.decoder.ffn_embed_dim = ffn_embed_dim
cfg.dropout = dropout
cfg.decoder.normalize_before = normalize_before
cfg.decoder.layers = num_layers
# make embedding behavior same as other encoders
cfg.no_token_positional_embeddings = True
cfg.no_scale_embedding = True
cfg.activation_fn = activation
dictionary = {} # TODO: verify what this is
self.decoder = fairseq_transformer.TransformerDecoder(
cfg,
dictionary,
embedding_layer,
no_encoder_attn=True,
output_projection=None,
)
if torch_encoder is not None:
self.decoder = torch_to_fairseq(torch_encoder, self.decoder)
self.decoder = self.decoder.eval().cuda().half()
def forward(
self,
tokens,
src_lengths=None,
with_triangle_mask=False,
incremental_state=None,
):
return self.decoder(
prev_output_tokens=tokens,
encoder_out=None,
incremental_state=incremental_state,
features_only=True,
full_context_alignment=not with_triangle_mask,
alignment_layer=None,
alignment_heads=None,
src_lengths=src_lengths,
return_all_hiddens=False,
)[0]
@parametrize("input_dim,attn_mask_dim,is_causal",
[(3, None, False), (3, 2, False), (3, 2, True), (3, 3, False), (3, 3, True),
(4, None, False), (4, 2, False), (4, 2, True), (4, 4, False), (4, 4, True)],
name_fn=lambda input_dim, attn_dim, is_causal: (
f"{input_dim}D_input_dim_" + (
f"{attn_dim}D_{'causal_' if is_causal else ''}attn_mask"
if attn_dim is not None else "no_attn_mask")))
@parametrize("dropout_p", [0.0, 0.2, 0.5])
@sdp_kernel(enable_flash=False, enable_mem_efficient=False)
def test_scaled_dot_product_attention(self, device, input_dim, attn_mask_dim, is_causal, dropout_p):
def sdp_ref(
q,
k,
v,
attn_mask=None,
dropout_p=0.0):
E = q.size(-1)
q = q / math.sqrt(E)
# (B, Nt, E) x (B, E, Ns) -> (B, Nt, Ns)
if attn_mask is not None:
attn = torch.baddbmm(attn_mask, q, k.transpose(-2, -1))
else:
attn = torch.bmm(q, k.transpose(-2, -1))
attn = torch.nn.functional.softmax(attn, dim=-1)
if dropout_p > 0.0:
attn = torch.nn.functional.dropout(attn, p=dropout_p)
# (B, Nt, Ns) x (B, Ns, E) -> (B, Nt, E)
output = torch.bmm(attn, v)
return output
# TODO: Support cross-device / dtype testing properly when instantiate_device_type_tests() is used.
dtypes = [torch.double, torch.float]
for dtype in dtypes:
def rand_tensor(*shape):
return torch.randn(shape, device=device, dtype=dtype)
# This test compares python and C++ implementations of SDP.
N, N_prime, L, S, E = 5, 2, 4, 3, 6
if input_dim == 3:
query = rand_tensor(N, L, E)
key = rand_tensor(N, S, E)
value = rand_tensor(N, S, E)
elif input_dim == 4:
query = rand_tensor(N, N_prime, L, E)
key = rand_tensor(N, N_prime, S, E)
value = rand_tensor(N, N_prime, S, E)
else:
self.fail(f'Invalid input_dim {input_dim} encountered in SDP test')
attn_mask = None
if attn_mask_dim is not None:
assert attn_mask_dim in [2, input_dim]
mask_size = (L, S) if attn_mask_dim == 2 else ((N, L, S) if input_dim == 3 else (N, N_prime, L, S))
attn_mask = (torch.ones(mask_size, device=device, dtype=torch.bool).tril() if is_causal
else torch.randint(0, 2, size=mask_size, device=device, dtype=torch.bool))
with freeze_rng_state():
# Python impl only supports float mask and 3D inputs.
attn_mask_float = attn_mask
if attn_mask_float is not None:
attn_mask_float = torch.zeros_like(attn_mask, dtype=query.dtype)
attn_mask_float.masked_fill_(attn_mask.logical_not(), float("-inf"))
q, k, v = query.view(-1, L, E), key.view(-1, S, E), value.view(-1, S, E)
a = attn_mask_float
if a is not None and attn_mask_dim > 3:
a = a.view(-1, L, S)
expected = sdp_ref(q, k, v, attn_mask=a, dropout_p=dropout_p)
if input_dim > 3:
expected = expected.view(-1, N_prime, L, E)
with freeze_rng_state():
if is_causal:
# NB: Don't pass attn_mask here
actual = torch.nn.functional.scaled_dot_product_attention(
query, key, value, None, dropout_p, is_causal)
# Error case: both explicit attn_mask and is_causal are set
with self.assertRaisesRegex(RuntimeError,
"Explicit attn_mask should not be set when is_causal=True"):
torch.nn.functional.scaled_dot_product_attention(
query, key, value, attn_mask, dropout_p, is_causal)
else:
actual = torch.nn.functional.scaled_dot_product_attention(
query, key, value, attn_mask, dropout_p, is_causal)
self.assertEqual(actual, expected)
if attn_mask_dim is None:
q = q.double().clone()
k = k.double().clone()
v = v.double().clone()
q.requires_grad_()
k.requires_grad_()
v.requires_grad_()
assert gradcheck(lambda *args, **kwargs: wrapper_set_seed(sdp_ref, *args, **kwargs),
(q, k, v, attn_mask, dropout_p))
assert gradcheck(lambda *args, **kwargs:
wrapper_set_seed(torch.nn.functional.scaled_dot_product_attention, *args, **kwargs),
(q, k, v, attn_mask, dropout_p))
def test_incompatible_mask(self, device):
def ones_tensor(*shape):
return torch.ones(shape, dtype=torch.float32)
S, L, E, H = 1, 2, 4, 1
qkv = ones_tensor(S, L, E)
mha = nn.MultiheadAttention(E, H)
mha.in_proj_weight = Parameter(torch.ones((E * 3, E)))
mha.out_proj.weight = Parameter(torch.ones((E, E)))
qkv = qkv.to(float)
kpm = ones_tensor(S, L) * float("-inf")
am = ones_tensor(L, L).to(bool)
def func():
return mha(qkv, qkv, qkv, need_weights=False, key_padding_mask=kpm, attn_mask=am)
self.assertRaises(RuntimeError, func)
@unittest.skipIf(TEST_WITH_CROSSREF, 'Fastpath not available with crossref')
@torch.no_grad()
def test_mask_check_fastpath(self):
"""
Test that fastpath is executed independently of the masks that are passed.
If the passed key padding mask is left aligned or mask_check=False, test that nested tensors are used
(sparsity fastpath), otherwise use fastpath with traditional tensors.
Also test that fast path is executed with both key padding mask and attention mask passed at the same time.
"""
x = torch.Tensor([[[1, 2], [3, 4], [5, 6]]]).to(torch.float)
def _test_fastpath(model, key_padding_mask, mock_return_value, attn_mask=None, nested_tensors=True):
with patch('torch._transformer_encoder_layer_fwd') as fastpath_mock:
fastpath_mock.return_value = mock_return_value
model(x, src_key_padding_mask=key_padding_mask, mask=attn_mask)
# If mock was called, fastpath was taken
self.assertTrue(fastpath_mock.called)
# If mock was called with nested tensors, sparsity fastpath was taken
for call_args, _ in fastpath_mock.call_args_list:
self.assertEqual(call_args[0].is_nested, nested_tensors)
encoder_layer = torch.nn.TransformerEncoderLayer(d_model=2, nhead=2, dim_feedforward=8, batch_first=True)
model = torch.nn.TransformerEncoder(encoder_layer, num_layers=2, enable_nested_tensor=True, mask_check=True)
model.eval()
aligned_key_padding_mask = torch.Tensor([[0, 0, 1]]).to(torch.bool)
not_aligned_key_padding_mask = torch.Tensor([[1, 0, 1]]).to(torch.bool)
attn_mask = torch.Tensor([[1, 0, 1], [0, 1, 0], [1, 0, 1]]).to(torch.bool)
nested_tensor_return_value = torch.nested.nested_tensor([torch.ones((2, 2), dtype=torch.float)])
tensor_return_value = torch.ones((1, 3, 2), dtype=torch.float)
# Left aligned mask results in sparsity fastpath
_test_fastpath(model, aligned_key_padding_mask, nested_tensor_return_value, nested_tensors=True)
# Not aligned mask results in fastpath
_test_fastpath(model, not_aligned_key_padding_mask, tensor_return_value, nested_tensors=False)
model = torch.nn.TransformerEncoder(encoder_layer, num_layers=2, enable_nested_tensor=False, mask_check=True)
model.eval()
# If nested tensor disabled, fastpath is always taken
_test_fastpath(model, aligned_key_padding_mask, tensor_return_value, nested_tensors=False)
_test_fastpath(model, not_aligned_key_padding_mask, tensor_return_value, nested_tensors=False)
# Fast path is taken if both attention mask and key padding mask are present
_test_fastpath(model, aligned_key_padding_mask, tensor_return_value, attn_mask=attn_mask, nested_tensors=False)
model = torch.nn.TransformerEncoder(encoder_layer, num_layers=2, enable_nested_tensor=True, mask_check=False)
model.eval()
# Mask check disabled results in sparisty fastpath, independently of the mask
_test_fastpath(model, aligned_key_padding_mask, nested_tensor_return_value, nested_tensors=True)
_test_fastpath(model, not_aligned_key_padding_mask, nested_tensor_return_value, nested_tensors=True)
# Test failing MHA when bias was NoneType
def test_bias_is_none(self):
x = torch.rand((1, 5, 10))
model = torch.nn.modules.activation.MultiheadAttention(10, 1, bias=False, batch_first=True)
model.eval()
model(x, x, x)
# completes without error
def test_train_with_is_causal(self, device):
# training with is_causal
S, L, E, H = 1, 2, 2, 1
layer = nn.TransformerEncoderLayer(
d_model=2,
dim_feedforward=4,
nhead=H,
batch_first=True,
activation="gelu",
dropout=0,
)
criterion = nn.MSELoss()
encoder = nn.TransformerEncoder(layer, 2).to(device)
optimizer = optim.SGD(encoder.parameters(), lr=0.1, momentum=0.9)
encoder.train()
encoder.train()
optimizer.zero_grad()
inputs = torch.randn(S, L, E).to(device)
mask = torch.nn.Transformer.generate_square_subsequent_mask(
inputs.size(1), device=device
)
outputs = encoder(inputs, mask=mask, is_causal=True)
loss = criterion(outputs[:, 0:2, :], inputs[:, 0:2, :])
loss.backward()
optimizer.step()
# inference with is_causal
t_qvk = torch.randn((S, L, E), device=device, dtype=torch.float32)
mha = nn.MultiheadAttention(E, H).to(device)
mask = torch.nn.Transformer.generate_square_subsequent_mask(
S, device=device
)
attn_out, _ = mha(t_qvk, t_qvk, t_qvk, attn_mask=mask, is_causal=True)
# Can't give only is_causal
attn_mask = torch.randint(0, 2, size=(L, L), device=device, dtype=torch.bool)
with self.assertRaises(RuntimeError):
_ = mha(t_qvk, t_qvk, t_qvk, is_causal=True)
# # Passing a causal mask sets is_causal to 1
causal_mask = torch.triu(
torch.ones(L, L, device=inputs.device) * float('-inf'), diagonal=1
).to(torch.bool)
mock_layer = MagicMock(torch.nn.MultiheadAttention(E, H), return_value=inputs)
encoder.layers[0] = mock_layer
outputs = encoder(inputs, mask=causal_mask)
mock_layer.assert_called_with(ANY, src_mask=ANY, is_causal=True, src_key_padding_mask=ANY)
# check expected numerical values with all kernels
self.is_causal_kernels(["math"], device)
def is_causal_kernels(self, kernels, device):
def ones_tensor(*shape):
return torch.ones(shape, device=device, dtype=torch.float32).to(device)
S, L, E, H = 1, 2, 4, 1
qkv = ones_tensor(S, L, E)
mha = nn.MultiheadAttention(E, H).to(device)
mha.in_proj_weight = Parameter(torch.ones((E * 3, E), device=device))
mha.out_proj.weight = Parameter(torch.ones((E, E), device=device))
expected = torch.ones(size=(S, L, E)).to(device) * 16
mask = torch.nn.Transformer.generate_square_subsequent_mask(
qkv.size(1), device=device
)
for kernel in kernels:
with torch.backends.cuda.sdp_kernel(
enable_math=(kernel == 'math'),
enable_flash=(kernel == 'flash'),
enable_mem_efficient=(kernel == 'meff')
):
actual, _ = mha(qkv, qkv, qkv, attn_mask=mask, need_weights=False, is_causal=True)
self.assertTrue(torch.equal(actual, expected))
if kernel != 'math':
# fails with embedding size not multiple of 4
with self.assertRaisesRegex(RuntimeError, "No available kernel"):
qkv_f, mha_f = ones_tensor(S, L, 2), nn.MultiheadAttention(2, H).to(device)
mask = torch.nn.Transformer.generate_square_subsequent_mask(
qkv_f.size(1), device=device
)
_ = mha_f(qkv_f, qkv_f, qkv_f, attn_mask=mask, need_weights=False, is_causal=True)
torch.cuda.synchronize()
@unittest.skipIf(
not PLATFORM_SUPPORTS_FUSED_SDPA or not SM80OrLater, "Platform does not supposrt fused SDPA or pre-SM80 hardware"
)
def test_is_causal_gpu(self):
device = 'cuda'
self.is_causal_kernels(["math", "meff"], device)
def test_script_mha_in_proj_weight_none(self):
mha = torch.nn.MultiheadAttention(
embed_dim=128, num_heads=8, kdim=256, vdim=256
).eval()
torch.jit.script(mha)
class TestSDPAFailureModes(NNTestCase):
""" Used to test the failure modes of scaled_dot_product_attention
"""
_do_cuda_memory_leak_check = True
_do_cuda_non_default_stream = True
@onlyCUDA
@unittest.skipIf(not PLATFORM_SUPPORTS_FUSED_SDPA or not isSM86or89Device,
"Does not support fused SDPA or not SM86+ hardware")
@parametrize("head_dim", [72, 96, 128])
def test_flash_backward_failure_sm86plus(self, device, head_dim: int):
dtype = torch.float16
make_tensor = partial(rand_sdpa_tensor, type="dense", device=device, dtype=dtype)
# See check_requires_grad_and_head_dim_gt64_and_sm_ge86 in pytorch/aten/src/ATen/native/transformers/cuda/sdp_utils.h
size = (2, 2, 4, head_dim)
q, k, v = make_tensor(size), make_tensor(size), make_tensor(size)
with sdp_kernel(enable_mem_efficient=False, enable_flash=False, enable_math=True):
math_ref = torch.nn.functional.scaled_dot_product_attention(q, k, v, None, 0.0, False)
with sdp_kernel(enable_mem_efficient=False, enable_flash=True, enable_math=False):
# Should not fail because inputs don't require grad
flash_ref = torch.nn.functional.scaled_dot_product_attention(q, k, v, None, 0.0, False)
self.assertEqual(math_ref, flash_ref, atol=1e-3, rtol=1e-3)
# Should fail because inputs require grad
q = make_tensor(size, requires_grad=True)
k = make_tensor(size, requires_grad=True)
v = make_tensor(size, requires_grad=True)
self.assertRaises(RuntimeError, lambda: torch.nn.functional.scaled_dot_product_attention(
q, k, v, None, 0.0, False))
@onlyCUDA
def test_dispatch_fails_no_backend(self, device):
dtype = torch.float16
with sdp_kernel(enable_flash=False, enable_math=False, enable_mem_efficient=False):
size = (2, 3, 4)
q = torch.randn(size, device=device, dtype=dtype)
k = torch.randn(size, device=device, dtype=dtype)
v = torch.randn(size, device=device, dtype=dtype)
self.assertRaisesRegex(RuntimeError, "No viable backend for scaled_dot_product_attention was found.",
lambda: torch._fused_sdp_choice(q, k, v))
self.assertRaisesRegex(RuntimeError, "No viable backend for scaled_dot_product_attention was found.",
lambda: torch.nn.functional.scaled_dot_product_attention(q, k, v))
@onlyCUDA
@unittest.skipIf(not PLATFORM_SUPPORTS_FUSED_SDPA, "Does not support fused scaled dot product attention")
@parametrize(
"kernel",
[SDPBackend.FLASH_ATTENTION, SDPBackend.EFFICIENT_ATTENTION]
if SM80OrLater
else [SDPBackend.EFFICIENT_ATTENTION],
)
def test_invalid_fused_inputs_dim_3(self, device, kernel: SDPBackend):
with sdp_kernel(**backend_map[kernel]):
# Dim is not 4
size = (2, 3, 8)
dtype = torch.float16
q = torch.randn(size, device=device, dtype=dtype)
k = torch.randn(size, device=device, dtype=dtype)
v = torch.randn(size, device=device, dtype=dtype)
with self.assertWarnsRegex(UserWarning, "Both fused kernels requires query, key and value to be 4 dimensional"):
self.assertRaises(RuntimeError, lambda: torch.nn.functional.scaled_dot_product_attention(
q, k, v, None, 0.0, False))
@onlyCUDA
@unittest.skipIf(not PLATFORM_SUPPORTS_FUSED_SDPA, "Does not support fused scaled dot product attention")
@parametrize(
"kernel",
[SDPBackend.FLASH_ATTENTION, SDPBackend.EFFICIENT_ATTENTION]
if SM80OrLater
else [SDPBackend.EFFICIENT_ATTENTION],
)
def test_invalid_fused_inputs_broadcast(self, device, kernel: SDPBackend):
with sdp_kernel(**backend_map[kernel]):
# Fused Kernels don't support broadcasting for dense inputs
dtype = torch.float16
size = (2, 4, 3, 8)
size_broadcast = (1, 4, 3, 8)
q = torch.randn(size_broadcast, device=device, dtype=dtype)
k = torch.randn(size, device=device, dtype=dtype)
v = torch.randn(size, device=device, dtype=dtype)
self.assertRaises(RuntimeError, lambda: torch.nn.functional.scaled_dot_product_attention(
q, k, v, None, 0.0, False))
@onlyCUDA
@unittest.skipIf(not PLATFORM_SUPPORTS_FUSED_SDPA or not SM80OrLater, "Does not support fused scaled dot product attention")
@parametrize("kernel", [SDPBackend.FLASH_ATTENTION, SDPBackend.EFFICIENT_ATTENTION])
def test_invalid_fused_inputs_head_dim(self, device, kernel: SDPBackend):
with sdp_kernel(**backend_map[kernel]):
# The embed dim per head is not divisible by 8 for flash attention
dtype = torch.float16
make_tensor = partial(rand_sdpa_tensor, type="dense", device=device, dtype=dtype)
size = (2, 2, 3, 9)
q, k, v = make_tensor(size), make_tensor(size), make_tensor(size)
self.assertRaises(RuntimeError, lambda: torch.nn.functional.scaled_dot_product_attention(
q, k, v, None, 0.0, False))
@onlyCUDA
@unittest.skipIf(not PLATFORM_SUPPORTS_FUSED_SDPA, "Does not support fused scaled dot product attention")
@parametrize(
"kernel",
[SDPBackend.FLASH_ATTENTION, SDPBackend.EFFICIENT_ATTENTION]
if SM80OrLater
else [SDPBackend.EFFICIENT_ATTENTION],
)
def test_invalid_fused_inputs_invalid_dtype(self, device, kernel: SDPBackend):
with sdp_kernel(**backend_map[kernel]):
# Invalid dtype for both Flash Attention and Mem Efficient Attention
size = (2, 2, 3, 16)
make_tensor = partial(rand_sdpa_tensor, type="dense", device=device, dtype=torch.float64)
q, k, v = make_tensor(size), make_tensor(size), make_tensor(size)
self.assertRaises(RuntimeError, lambda: torch.nn.functional.scaled_dot_product_attention(
q, k, v, None, 0.0, False))
@onlyCUDA
@unittest.skipIf(not PLATFORM_SUPPORTS_FUSED_SDPA, "Does not support fused scaled dot product attention")
@parametrize(
"kernel",
[SDPBackend.FLASH_ATTENTION, SDPBackend.EFFICIENT_ATTENTION]
if SM80OrLater
else [SDPBackend.EFFICIENT_ATTENTION],
)
def test_invalid_fused_inputs_attn_mask_present(self, device, kernel: SDPBackend):
with sdp_kernel(**backend_map[kernel]):
# Failures for unsupported SDP args
size = (2, 2, 3, 16)
make_tensor = partial(rand_sdpa_tensor, type="dense", device=device, dtype=torch.float16)
q, k, v = make_tensor(size), make_tensor(size), make_tensor(size)
# Non-None attention mask
self.assertRaises(RuntimeError, lambda: torch.nn.functional.scaled_dot_product_attention(
q, k, v, torch.ones_like(q), 0.0, False))
@onlyCUDA
@unittest.skipIf(not PLATFORM_SUPPORTS_FUSED_SDPA or not SM80OrLater, "Does not support fused SDPA or pre-SM80 hardware")
def test_unaligned_tensors(self, device):
# The alignment is depdent on arch so we specifiy SM80OrLater
dtype = torch.float16
shape = (2, 2, 8, 5)
make_tensor = partial(rand_sdpa_tensor, shape=shape, type=type, device=device, dtype=dtype)
q, k, v = make_tensor(), make_tensor(), make_tensor()
with sdp_kernel(enable_flash=False, enable_mem_efficient=True, enable_math=False):
self.assertRaises(RuntimeError, lambda: torch.nn.functional.scaled_dot_product_attention(
q, k, v, None, 0.0, False))
@onlyCUDA
@unittest.skipIf(not PLATFORM_SUPPORTS_FUSED_SDPA or not SM80OrLater, "Does not support fused SDPA or pre-SM80 hardware")
def test_flash_fail_fp32(self, device):
dtype = torch.float
shape = (16, 16, 32, 32)
make_tensor = partial(rand_sdpa_tensor, shape=shape, type=type, device=device, dtype=dtype)
q, k, v = make_tensor(), make_tensor(), make_tensor()
with sdp_kernel(enable_flash=True, enable_mem_efficient=False, enable_math=False):
with self.assertWarnsRegex(UserWarning, "Expected query, key and value to all be of dtype: {Half, BFloat16}"):
self.assertRaises(RuntimeError, lambda: torch.nn.functional.scaled_dot_product_attention(
q, k, v, None, 0.0, False))
@onlyCUDA
@unittest.skipIf(not PLATFORM_SUPPORTS_FUSED_SDPA or not SM80OrLater, "Does not support SDPA or pre-SM80 hardware")
def test_flash_autocast_fp32_float16(self, device):
dtype = torch.float
shape = (16, 16, 32, 32)
make_tensor = partial(rand_sdpa_tensor, shape=shape, type=type, device=device, dtype=dtype)
q, k, v = make_tensor(), make_tensor(), make_tensor()
with torch.autocast(device_type='cuda', dtype=torch.float16):
with sdp_kernel(enable_flash=True, enable_mem_efficient=False, enable_math=False):
_ = torch.nn.functional.scaled_dot_product_attention(
q, k, v, None, 0.0, False)
@onlyCUDA
@unittest.skipIf(not PLATFORM_SUPPORTS_FUSED_SDPA or not SM80OrLater, "Does not support SDPA or pre-SM80 hardware")
def test_flash_autocast_fp32_bfloat16(self, device):
dtype = torch.float
shape = (16, 16, 32, 32)
make_tensor = partial(rand_sdpa_tensor, shape=shape, type=type, device=device, dtype=dtype)
q, k, v = make_tensor(), make_tensor(), make_tensor()
with torch.autocast(device_type='cuda', dtype=torch.bfloat16):
with sdp_kernel(enable_flash=True, enable_mem_efficient=False, enable_math=False):
_ = torch.nn.functional.scaled_dot_product_attention(
q, k, v, None, 0.0, False)
@parametrize("kernel", [SDPBackend.MATH, SDPBackend.FLASH_ATTENTION, SDPBackend.EFFICIENT_ATTENTION])
def test_invalid_inputs_different_datatypes(self, device, kernel: SDPBackend):
with sdp_kernel(**backend_map[kernel]):
# Different datatypes
shape = (1, 4, 8, 16)
query = torch.randn(shape, dtype=torch.float32, device=device)
key = torch.randn(shape, dtype=torch.float16, device=device)
value = torch.randn(shape, dtype=torch.float16, device=device)
self.assertRaises(RuntimeError, lambda: F.scaled_dot_product_attention(query, key, value))
@onlyCUDA
@parametrize("kernel", [SDPBackend.MATH, SDPBackend.FLASH_ATTENTION, SDPBackend.EFFICIENT_ATTENTION])
def test_invalid_inputs_different_devices(self, device, kernel: SDPBackend):
# Different devices
shape = (1, 4, 8, 16)
query = torch.randn(shape, dtype=torch.float32, device=device)
key = torch.randn(shape, dtype=torch.float16, device='cpu')
value = torch.randn(shape, dtype=torch.float16, device='cpu')
self.assertRaises(RuntimeError, lambda: F.scaled_dot_product_attention(query, key, value))
@parametrize("kernel", [SDPBackend.MATH, SDPBackend.FLASH_ATTENTION, SDPBackend.EFFICIENT_ATTENTION])
def test_invalid_inputs_1_dimensional_inputs(self, device, kernel: SDPBackend):
with sdp_kernel(**backend_map[kernel]):
# 1 dimensional input
shape = (1, 4)
query = torch.randn(4, dtype=torch.float16, device=device)
key = torch.randn(shape, dtype=torch.float16, device=device)
value = torch.randn(shape, dtype=torch.float16, device=device)
self.assertRaises(RuntimeError, lambda: F.scaled_dot_product_attention(query, key, value))
@onlyCUDA
@unittest.skipIf(not PLATFORM_SUPPORTS_FUSED_SDPA, "Fused SDPA was not built for this system")
def test_fused_kernels_nested_broadcasting_error_cases(self, device):
# one of k,v needs to be broadcasted and other has non consistent seq_len dim
rand_nested_tensor = partial(rand_sdpa_tensor, type="nested", device=device, dtype=torch.float32)
batch, num_heads, head_dim = 32, 8, 64
seq_lens_q = torch.randint(low=1, high=32, size=(batch,)).tolist()
seq_lens_v = torch.randint(low=1, high=32, size=(batch,)).tolist()
q_shape = (batch, seq_lens_q, num_heads, head_dim)
k_shape = (1, 1, num_heads, head_dim)
v_shape = (batch, seq_lens_v, num_heads, head_dim)
query = rand_nested_tensor(q_shape).transpose(1, 2)
key = rand_nested_tensor(k_shape).transpose(1, 2)
value = rand_nested_tensor(v_shape).transpose(1, 2)
with sdp_kernel(enable_flash=False, enable_math=False, enable_mem_efficient=True):
with self.assertRaisesRegex(RuntimeError, "No available kernel"):
torch.nn.functional.scaled_dot_product_attention(
query, key, value, attn_mask=None, dropout_p=0.0, is_causal=False)
@onlyCUDA
@unittest.skipIf(not PLATFORM_SUPPORTS_FUSED_SDPA or not isSM5xDevice, "Does not support fused SDPA or not SM50 hardware")
def test_mem_efficient_fail_bfloat16_sm50(self, device):
dtype = torch.bfloat16
shape = (16, 16, 32, 32)
make_tensor = partial(rand_sdpa_tensor, shape=shape, type=type, device=device, dtype=dtype)
q, k, v = make_tensor(), make_tensor(), make_tensor()
with sdp_kernel(**backend_map[SDPBackend.EFFICIENT_ATTENTION]):
with self.assertWarnsRegex(UserWarning, "Expected query, key and value to all be of dtype: {Half, Float}"):
self.assertRaises(RuntimeError, lambda: torch.nn.functional.scaled_dot_product_attention(
q, k, v, None, 0.0, False))
@onlyCUDA
@unittest.skipIf(not PLATFORM_SUPPORTS_FUSED_SDPA or not isSM90Device, "Does not support fused SDPA or pre-SM90 hardware")
def test_mem_efficient_fail_sm90(self, device):
dtype = torch.float16
shape = (16, 16, 32, 32)
make_tensor = partial(rand_sdpa_tensor, shape=shape, type=type, device=device, dtype=dtype)
q, k, v = make_tensor(), make_tensor(), make_tensor()
with sdp_kernel(**backend_map[SDPBackend.EFFICIENT_ATTENTION]):
self.assertRaises(RuntimeError, lambda: torch.nn.functional.scaled_dot_product_attention(
q, k, v, None, 0.0, False))
class TestSDPA(NNTestCase):
""" Used to test the functionality of scaled_dot_product_attention
Quarks:
There is some trickiness with this function. It's runtime behavior
is dependent on the CUDA architecture you are testing it on. See
`PLATFORM_SUPPORTS_FUSED_SDPA` at the top of the file.
Summary:
Math: always supported
FlashAttention: Supported on sm80 or newer hardware
MemEfficientAttention: Supported on sm50 or newer hardware
"""
_do_cuda_memory_leak_check = True
_do_cuda_non_default_stream = True
def convert_flash_attn_S_to_softmax(self, S, query_padding_mask, key_padding_mask, head_dim, causal=False):
"""FlashAttention stores the S matrix in a different way.
Arguments:
S: (batch_size, nheads, seqlen_q, seqlen_k)
query_padding_mask: (batch_size, seqlen_q)
key_padding_mask: (batch_size, seqlen_k)
"""
def _get_block_size(head_dim):
assert head_dim % 8 == 0 and head_dim <= 128
return 256 if head_dim <= 64 else 128
S_flat = S.view(S.shape[0], S.shape[1], S.shape[2] * S.shape[3])
seqlen_q, seqlen_k = S.shape[-2:]
block_size = _get_block_size(head_dim)
loop_steps = math.ceil(seqlen_k / block_size)
warps_n = 4
mmas_n = (seqlen_k // warps_n //
16) if seqlen_k <= block_size else (block_size // warps_n // 16)
S_converted = S_flat.view(S_flat.shape[0], S_flat.shape[1], loop_steps,
seqlen_q // 16, mmas_n, warps_n, 8, 4, 2, 2, 2)
S_converted = S_converted.permute(0, 1, 3, 8, 6, 2, 4, 5, 9, 7, 10)
S_converted = S_converted.reshape(S_flat.shape[0],
S_flat.shape[1], (seqlen_q // 16 * 2 * 8), (loop_steps * mmas_n * warps_n * 2 * 4 * 2))
# Need to zero out things not in attention_mask in case S was initialized with random values
# and some of those values aren't overwritten.
seqlen_q_og = query_padding_mask.shape[-1]
if seqlen_q_og < seqlen_q:
query_padding_mask = F.pad(
query_padding_mask, (0, seqlen_q - seqlen_q_og))
else:
query_padding_mask = query_padding_mask[:, :seqlen_q]
q_mask_fill = ~query_padding_mask.view(query_padding_mask.shape[0], 1, query_padding_mask.shape[1], 1)
S_converted = S_converted.masked_fill(q_mask_fill, 0.0)
seqlen_k_og = key_padding_mask.shape[-1]
if seqlen_k_og < seqlen_k:
key_padding_mask = F.pad(key_padding_mask, (0, seqlen_k - seqlen_k_og))
else:
key_padding_mask = key_padding_mask[:, :seqlen_k]
k_mask_fill = ~key_padding_mask.view(key_padding_mask.shape[0], 1, 1, key_padding_mask.shape[1])
S_converted = S_converted.masked_fill(k_mask_fill, 0.0)
if causal:
causal_mask = torch.triu(torch.ones(
seqlen_q, seqlen_k, dtype=torch.bool, device=S.device), 1)
S_converted.masked_fill_(causal_mask, 0.0)
if seqlen_q_og < seqlen_q:
S_converted = S_converted[:, :, :seqlen_q_og, :]
else:
S_converted = F.pad(S_converted, (0, 0, 0, seqlen_q_og - seqlen_q))
if seqlen_k_og < seqlen_k:
S_converted = S_converted[:, :, :, :seqlen_k_og]
else:
S_converted = F.pad(S_converted, (0, seqlen_k_og - seqlen_k))
return S_converted
def query_key_value_clones(self, query: torch.Tensor, key: torch.Tensor, value: torch.Tensor, dtype: torch.dtype):
""" Clones the query, key, and value tensors and moves them to the specified dtype. """
query_ref = query.clone().detach().to(dtype).requires_grad_(query.requires_grad)
key_ref = key.clone().detach().to(dtype).requires_grad_(key.requires_grad)
value_ref = value.clone().detach().to(dtype).requires_grad_(value.requires_grad)
return query_ref, key_ref, value_ref
@onlyCUDA
@unittest.skipIf(not PLATFORM_SUPPORTS_FUSED_SDPA, "Fused SDPA was not built for this system")
@parametrize("type", ["dense", "nested"])
@parametrize("is_contiguous", [True, False])
@parametrize("head_dims_match", [True, False])
def test_scaled_dot_product_attention_fused_kernels(self, device, type: str, is_contiguous: bool, head_dims_match: bool):
make_tensor = partial(rand_sdpa_tensor, type=type, device=device, dtype=torch.float16)
batch, seq_len, num_heads, head_dim = 32, 64, 16, 64
shape = (batch, seq_len, num_heads, head_dim)
if head_dims_match:
shape_v = shape
else:
head_dim_v = 96
shape_v = (batch, seq_len, num_heads, head_dim_v)
query = make_tensor(shape)
key = make_tensor(shape)
value = make_tensor(shape_v)
# Lets switch seq_len and num_heads
# B x S X H X D -> B x H x S x D
query = query.transpose(1, 2)
key = key.transpose(1, 2)
value = value.transpose(1, 2)
if is_contiguous:
query = query.contiguous()
key = key.contiguous()
value = value.contiguous()
with sdp_kernel(enable_flash=False, enable_math=False, enable_mem_efficient=True):
actual = torch.nn.functional.scaled_dot_product_attention(
query, key, value, attn_mask=None, dropout_p=0.0, is_causal=False)
with sdp_kernel(enable_flash=False, enable_math=True, enable_mem_efficient=False):
math_ref = torch.nn.functional.scaled_dot_product_attention(
query.contiguous(), key.contiguous(), value.contiguous(),
attn_mask=None, dropout_p=0.0, is_causal=False)
self.assertEqual(actual[0].contiguous(), math_ref[0].contiguous(), atol=1e-3, rtol=1e-2)
@onlyCUDA
@unittest.skipIf(not PLATFORM_SUPPORTS_FUSED_SDPA, "Fused SDPA was not built for this system")
@parametrize("type", ["dense", "nested"])
@parametrize("is_contiguous", [True, False])
def test_scaled_dot_product_attention_fused_kernels_packed(self, device, type: str, is_contiguous: bool):
make_tensor = partial(rand_sdpa_tensor, type=type, device=device, dtype=torch.float16, packed=True)
batch_size, seq_len, num_heads, head_dim = 32, 64, 16, 64
shape = (batch_size, seq_len, num_heads, head_dim)
# Test Packed
qkv = make_tensor(shape)
query, key, value = qkv.chunk(3, dim=-1)
query = query.view(batch_size, -1, num_heads, head_dim).transpose(1, 2)
value = value.view(batch_size, -1, num_heads, head_dim).transpose(1, 2)
key = key.view(batch_size, -1, num_heads, head_dim).transpose(1, 2)
if is_contiguous:
query = query.contiguous()
key = key.contiguous()
value = value.contiguous()
with sdp_kernel(enable_flash=False, enable_math=False, enable_mem_efficient=True):
actual = torch.nn.functional.scaled_dot_product_attention(
query, key, value, attn_mask=None, dropout_p=0.0, is_causal=False)
with sdp_kernel(enable_flash=False, enable_math=True, enable_mem_efficient=False):
math_ref = torch.nn.functional.scaled_dot_product_attention(
query.contiguous(), key.contiguous(), value.contiguous(),
attn_mask=None, dropout_p=0.0, is_causal=False)
self.assertEqual(actual.contiguous(), math_ref.contiguous(), atol=2e-3, rtol=1e-2)
@onlyCUDA
@unittest.skipIf(not PLATFORM_SUPPORTS_FUSED_SDPA, "Fused SDPA was not built for this system")
@parametrize("type", ["dense", "nested"])
@parametrize("fused_kernel", [SDPBackend.FLASH_ATTENTION, SDPBackend.EFFICIENT_ATTENTION])
def test_scaled_dot_product_attention_fused_kernels_packed_accuracy(self, device, type: str, fused_kernel: str):
if (not SM80OrLater) and fused_kernel == SDPBackend.FLASH_ATTENTION:
return
def rand_nt(shape):
batch, seq_len, num_heads, head_dim = shape
tensors = [6 * torch.rand((seq_len, 3 * num_heads * head_dim), device=device, dtype=torch.float32) - 3
for _ in range(batch)]
return (torch.nested.nested_tensor(tensors, device=device, dtype=torch.float32),
torch.nested.nested_tensor(tensors, device=device, dtype=torch.float16))
def rand_tensor(shape):
batch, seq_len, num_heads, head_dim = shape
tensor = 6 * torch.rand((batch, seq_len, 3 * num_heads * head_dim), device=device, dtype=torch.float32) - 3
return tensor, tensor.to(dtype=torch.float16)
batch_size, seq_len, num_heads, head_dim = 16, 8, 4, 64
shape = (batch_size, seq_len, num_heads, head_dim)
# Test Packed
qkv, qkv_low_precision = rand_tensor(shape) if type == "dense" else rand_nt(shape)
query, key, value = qkv.chunk(3, dim=-1)
query_lp, key_lp, value_lp = qkv_low_precision.chunk(3, dim=-1)
query = query.view(batch_size, -1, num_heads, head_dim).transpose(1, 2)
key = key.view(batch_size, -1, num_heads, head_dim).transpose(1, 2)
value = value.view(batch_size, -1, num_heads, head_dim).transpose(1, 2)
query_lp = query_lp.view(batch_size, -1, num_heads, head_dim).transpose(1, 2)
key_lp = key_lp.view(batch_size, -1, num_heads, head_dim).transpose(1, 2)
value_lp = value_lp.view(batch_size, -1, num_heads, head_dim).transpose(1, 2)
with sdp_kernel(**backend_map[fused_kernel]):
actual = torch.nn.functional.scaled_dot_product_attention(
query_lp, key_lp, value_lp, attn_mask=None, dropout_p=0.0, is_causal=False)
with sdp_kernel(**backend_map[SDPBackend.MATH]):
math_ref_lp = torch.nn.functional.scaled_dot_product_attention(
query_lp.contiguous(), key_lp.contiguous(), value_lp.contiguous(),
attn_mask=None, dropout_p=0.0, is_causal=False)
math_query = query.contiguous()
math_key = key.contiguous()
math_value = value.contiguous()
math_ref = torch.nn.functional.scaled_dot_product_attention(
math_query, math_key, math_value, attn_mask=None, dropout_p=0.0, is_causal=False)
actual_test = actual
math_ref_test = math_ref
math_ref_lp_test = math_ref_lp
if actual_test.is_nested:
actual_test = torch.nested.to_padded_tensor(actual_test.contiguous(), padding=0.0)
math_ref_test = torch.nested.to_padded_tensor(math_ref_test, padding=0.0)
math_ref_lp_test = torch.nested.to_padded_tensor(math_ref_lp_test, padding=0.0)
actual_test = actual_test.to(dtype=torch.float32).contiguous()
math_ref_test = math_ref_test.to(dtype=torch.float32).contiguous()
math_ref_lp_test = math_ref_lp_test.to(dtype=torch.float32).contiguous()
self.assertEqual(math_ref_test, math_ref_lp_test, atol=7e-3, rtol=7e-3)
self.assertEqual(actual_test, math_ref_test, atol=5e-3, rtol=5e-3)
@parametrize("contiguous_inputs", [True, False])
def test_sdp_math_gradcheck(self, device, contiguous_inputs: bool):
batch_size, seq_len, num_heads, head_dim = 4, 4, 2, 16
make_tensor = partial(rand_sdpa_tensor, type="dense", device=device,
dtype=torch.float64, requires_grad=True, packed=True)
qkv = make_tensor((batch_size, seq_len, num_heads, head_dim))
query, key, value = qkv.chunk(3, dim=-1)
query = query.view(batch_size, -1, num_heads, head_dim).transpose(1, 2)
key = key.view(batch_size, -1, num_heads, head_dim).transpose(1, 2)
value = value.view(batch_size, -1, num_heads, head_dim).transpose(1, 2)
if contiguous_inputs:
query = query.contiguous()
key = key.contiguous()
value = value.contiguous()
with sdp_kernel(enable_math=True, enable_mem_efficient=False, enable_flash=False):
assert gradcheck(lambda *args, **kwargs:
wrapper_set_seed(torch.nn.functional.scaled_dot_product_attention, *args, **kwargs),
(query, key, value, None, 0.0, False)
)
@onlyCUDA
@unittest.skipIf(not PLATFORM_SUPPORTS_FUSED_SDPA, "Flash Attention was not built for this system")
@parametrize("contiguous_inputs", [True, False])
@parametrize("is_causal", [True, False])
def test_sdp_mem_efficient_grad_against_math(self, device, contiguous_inputs: bool, is_causal: bool):
batch_size, seq_len, num_heads, head_dim = 4, 4, 2, 16
make_tensor = partial(rand_sdpa_tensor, type="dense", device=device,
dtype=torch.float64, requires_grad=True, packed=True)
qkv = make_tensor((batch_size, seq_len, num_heads, head_dim))
qkv_lp = qkv.detach().clone().to(torch.float32).requires_grad_()
query, key, value = qkv.chunk(3, dim=-1)
query_lp, key_lp, value_lp = qkv_lp.chunk(3, dim=-1)
query = query.view(batch_size, -1, num_heads, head_dim).transpose(1, 2)
key = key.view(batch_size, -1, num_heads, head_dim).transpose(1, 2)
value = value.view(batch_size, -1, num_heads, head_dim).transpose(1, 2)
query_lp = query_lp.view(batch_size, -1, num_heads, head_dim).transpose(1, 2)
key_lp = key_lp.view(batch_size, -1, num_heads, head_dim).transpose(1, 2)
value_lp = value_lp.view(batch_size, -1, num_heads, head_dim).transpose(1, 2)
if contiguous_inputs:
query = query.contiguous()
key = key.contiguous()
value = value.contiguous()
query_lp = query_lp.contiguous()
key_lp = key_lp.contiguous()
value_lp = value_lp.contiguous()
with sdp_kernel(enable_math=True, enable_mem_efficient=False, enable_flash=False):
out = torch.nn.functional.scaled_dot_product_attention(query, key, value, None, 0.0, is_causal)
with sdp_kernel(enable_math=False, enable_mem_efficient=True, enable_flash=False):
out_lp = torch.nn.functional.scaled_dot_product_attention(
query_lp, key_lp, value_lp, None, 0.0, is_causal)
rand_upward = torch.rand_like(out)
rand_upward_lp = rand_upward.to(torch.float32)
out.backward(rand_upward)
out_lp.backward(rand_upward_lp)
# Cast up and compare
self.assertEqual(qkv.grad, qkv_lp.grad.to(torch.float64), atol=1e-5, rtol=1e-5)
@onlyCUDA
@unittest.skipIf(not PLATFORM_SUPPORTS_FUSED_SDPA or not SM80OrLater, "Flash Attention was not built for this system")
@parametrize("contiguous_inputs", [True, False])
@parametrize("is_causal", [True, False])
@parametrize("dtype", [torch.float16, torch.bfloat16])
def test_sdp_flash_attention_grad_against_math(self, device, contiguous_inputs: bool, is_causal: bool, dtype: torch.dtype):
batch_size, seq_len, num_heads, head_dim = 4, 4, 2, 16
make_tensor = partial(rand_sdpa_tensor, type="dense", device=device,
dtype=torch.float64, requires_grad=True, packed=True)
qkv = make_tensor((batch_size, seq_len, num_heads, head_dim))
qkv_lp = qkv.detach().clone().to(dtype).requires_grad_()
query, key, value = qkv.chunk(3, dim=-1)
query_lp, key_lp, value_lp = qkv_lp.chunk(3, dim=-1)
query = query.view(batch_size, -1, num_heads, head_dim).transpose(1, 2)
key = key.view(batch_size, -1, num_heads, head_dim).transpose(1, 2)
value = value.view(batch_size, -1, num_heads, head_dim).transpose(1, 2)
query_lp = query_lp.view(batch_size, -1, num_heads, head_dim).transpose(1, 2)
key_lp = key_lp.view(batch_size, -1, num_heads, head_dim).transpose(1, 2)
value_lp = value_lp.view(batch_size, -1, num_heads, head_dim).transpose(1, 2)
if contiguous_inputs:
query = query.contiguous()
key = key.contiguous()
value = value.contiguous()
query_lp = query_lp.contiguous()
key_lp = key_lp.contiguous()
value_lp = value_lp.contiguous()
with sdp_kernel(enable_math=True, enable_mem_efficient=False, enable_flash=False):
out = torch.nn.functional.scaled_dot_product_attention(query, key, value, None, 0.0, is_causal)
with sdp_kernel(enable_math=False, enable_mem_efficient=False, enable_flash=True):
out_lp = torch.nn.functional.scaled_dot_product_attention(
query_lp, key_lp, value_lp, None, 0.0, is_causal)
rand_upward = torch.rand_like(out)
rand_upward_lp = rand_upward.to(dtype)
out.backward(rand_upward)
out_lp.backward(rand_upward_lp)
# Cast up and compare
# Since we are doing the compute on fp16 we have to bump the tolerance
# Bump down the tolearnce for blfoat16
atol = 7e-4 if dtype == torch.float16 else 7e-3
rtol = 7e-4 if dtype == torch.float16 else 7e-3
self.assertEqual(qkv.grad, qkv_lp.grad.to(torch.float64), atol=atol, rtol=rtol)
@onlyCPU
@parametrize("type", ["dense", "nested"])
def test_fused_sdp_choice_cpu(self, device, type: str):
# Test that cpu and nestedtensor cpu return MATH backend
for dtype in floating_types_and_half():
make_tensor = partial(rand_sdpa_tensor, type=type, device=device, dtype=dtype)
size = (2, 2, 3, 4)
q, k, v = make_tensor(size), make_tensor(size), make_tensor(size)
assert torch._fused_sdp_choice(q, k, v) == SDPBackend.MATH
@onlyCUDA
@parametrize("type", ["dense", "nested"])
def test_fused_sdp_choice(self, device, type: str):
if PLATFORM_SUPPORTS_FUSED_SDPA:
batch_size, seq_len, num_heads, head_dim = 2, 128, 8, 64
shape = (batch_size, seq_len, num_heads, head_dim)
make_tensor = partial(rand_sdpa_tensor, device=device, dtype=torch.float16, packed=True, requires_grad=True)
qkv = make_tensor(shape, type=type)
query, key, value = qkv.chunk(3, dim=-1)
query = query.view(batch_size, -1, num_heads, head_dim).transpose(1, 2)
value = value.view(batch_size, -1, num_heads, head_dim).transpose(1, 2)
key = key.view(batch_size, -1, num_heads, head_dim).transpose(1, 2)
if SM80OrLater and not type == "nested":
assert torch._fused_sdp_choice(query, key, value) == SDPBackend.FLASH_ATTENTION
else:
assert torch._fused_sdp_choice(query, key, value) == SDPBackend.EFFICIENT_ATTENTION
# Change dtype to float32 so that efficient attention should get chosen
make_tensor = partial(rand_sdpa_tensor, device=device, dtype=torch.float32, packed=True)
qkv = make_tensor(shape, type=type)
query, key, value = qkv.chunk(3, dim=-1)
query = query.view(batch_size, -1, num_heads, head_dim).transpose(1, 2)
value = value.view(batch_size, -1, num_heads, head_dim).transpose(1, 2)
key = key.view(batch_size, -1, num_heads, head_dim).transpose(1, 2)
assert torch._fused_sdp_choice(query, key, value) == SDPBackend.EFFICIENT_ATTENTION
@onlyCUDA
@unittest.skipIf(not PLATFORM_SUPPORTS_FUSED_SDPA, "Platform does not support fused SDPA")
@parametrize("warn_only", [True, False])
def test_sdp_choice_with_determinism(self, device, warn_only):
batch_size, seq_len, num_heads, head_dim = 1, 64, 8, 64
shape = (batch_size, seq_len, num_heads, head_dim)
make_tensor = partial(rand_sdpa_tensor, type="dense", device=device, dtype=torch.float32, packed=False)
query, key, value = make_tensor(shape), make_tensor(shape), make_tensor(shape)
with use_deterministic_algorithims(True, warn_only=warn_only):
with sdp_kernel(enable_flash=False, enable_math=True, enable_mem_efficient=True):
assert torch._fused_sdp_choice(query, key, value) == SDPBackend.EFFICIENT_ATTENTION
@onlyCUDA
@unittest.skipIf(not PLATFORM_SUPPORTS_FUSED_SDPA, "Platform does not support fused SDPA")
@parametrize("warn_only", [True, False])
def test_mem_eff_backwards_throws_determinism_warning(self, device, warn_only):
batch_size, seq_len, num_heads, head_dim = 1, 64, 8, 64
shape = (batch_size, seq_len, num_heads, head_dim)
make_tensor = partial(rand_sdpa_tensor, type="dense", device=device, dtype=torch.float32, packed=False, requires_grad=True)
query, key, value = make_tensor(shape), make_tensor(shape), make_tensor(shape)
warning_context = (
self.assertWarnsRegex(
UserWarning,
"Memory Efficient attention defaults to a non-deterministic algorithm.",
)
if warn_only
else contextlib.nullcontext()
)
with use_deterministic_algorithims(True, warn_only=warn_only):
with sdp_kernel(**backend_map[SDPBackend.EFFICIENT_ATTENTION]):
with warning_context:
torch.nn.functional.scaled_dot_product_attention(query, key, value).sum().backward()
@onlyCUDA
@unittest.skip("This test is not behaving deterministaclly non-deterministaclly on CI/CD")
@unittest.skipIf(not PLATFORM_SUPPORTS_FUSED_SDPA or not SM80OrLater, "Platform does not support fused SDPA")
def test_mem_eff_backwards_determinism(self, device):
# Need big seq_len to ensure that num_splits > 1
dtype = torch.float32
batch_size, seq_len, n_heads, head_dim = 1, 1024, 8, 64
query = torch.rand(batch_size, n_heads, seq_len, head_dim,
device=device, dtype=dtype, requires_grad=True)
key = torch.rand(batch_size, n_heads, seq_len, head_dim, device=device,
dtype=dtype, requires_grad=True)
value = torch.rand(batch_size, n_heads, seq_len, head_dim,
device=device, dtype=dtype, requires_grad=True)
with sdp_kernel(enable_mem_efficient=True, enable_math=False, enable_flash=False):
# Run once to establish baseline
out = F.scaled_dot_product_attention(query, key, value)
upward_grad = torch.rand_like(out)
out.backward(upward_grad)
intial_query_grad = query.grad
# Re-run the op with the same upward grad and check that the backward is
# not deterministic
diff_anwser_once = False
for _ in range(100):
query.grad = None
out = F.scaled_dot_product_attention(query, key, value)
out.backward(upward_grad)
if not torch.equal(intial_query_grad, query.grad):
diff_anwser_once = True
break
self.assertTrue(diff_anwser_once)
with use_deterministic_algorithims(True, warn_only=False):
query.grad = None
out = F.scaled_dot_product_attention(query, key, value)
upward_grad = torch.rand_like(out)
out.backward(upward_grad)
intial_query_grad = query.grad
# Re-run the op with the same upward grad and check that the backward is
# deterministic now that we have enforced it
diff_anwser_once = False
for _ in range(100):
query.grad = None
out = F.scaled_dot_product_attention(query, key, value)
out.backward(upward_grad)
if not torch.equal(intial_query_grad, query.grad):
diff_anwser_once = True
break
self.assertFalse(diff_anwser_once)
# verified passing successfully on H100
@onlyCUDA
@unittest.skipIf(not PLATFORM_SUPPORTS_FUSED_SDPA, "Does not support SDPA")
@parametrize("batch_size", [1, 8])
@parametrize("seq_len_q", [4, 8, 64, 128, 256, 512, 1024, 2048] if SM80OrLater else [4, 8, 64, 128, 256, 512])
@parametrize("seq_len_k", [4, 8, 64, 128, 256, 512, 1024, 2048] if SM80OrLater else [4, 8, 64, 128, 256, 512])
@parametrize("head_dim", [8, 16, 32, 64, 72, 96, 128] if SM80OrLater else [8, 16, 32, 64])
@parametrize("is_causal", [False, True])
@parametrize("dropout_p", [0.0, 0.22])
@parametrize("dtype", [torch.float16, torch.bfloat16, torch.float32] if
SM80OrLater else [torch.float16, torch.float32])
@parametrize("scale", [None, "l1"])
def test_mem_efficient_attention_vs_math_ref_grads(self, device, batch_size: int, seq_len_q: int, seq_len_k: int,
head_dim: int, is_causal: bool, dropout_p: float, dtype: torch.dtype,
scale: str):
def _get_mem_eff_drop_mask(batch_size, n_heads, q_len, kv_len, p, seed, offset, device=device):
mask = torch.empty((batch_size, n_heads, q_len, kv_len), device=device, dtype=torch.float32)
rand_uniform = torch._fill_mem_eff_dropout_mask_(mask, p, seed, offset)
mask = (rand_uniform > p).to(torch.float32)
return mask
seed = 42
scale = scale if scale is None else (1 / head_dim)
n_heads = 4
query = torch.rand(batch_size, n_heads, seq_len_q, head_dim,
device=device, dtype=dtype, requires_grad=True)
key = torch.rand(batch_size, n_heads, seq_len_k, head_dim, device=device,
dtype=dtype, requires_grad=True)
value = torch.rand(batch_size, n_heads, seq_len_k, head_dim,
device=device, dtype=dtype, requires_grad=True)
# Run the math kernel on low precision references
query_ref_lp, key_ref_lp, value_ref_lp = self.query_key_value_clones(query, key, value, dtype=dtype)
higher_precision_dtype = torch.float64 if dtype == torch.float32 else torch.float32
query_ref, key_ref, value_ref = self.query_key_value_clones(query, key, value, dtype=higher_precision_dtype)
# Create real output
with sdp_kernel(enable_mem_efficient=True, enable_flash=False, enable_math=False):
# Set the seed and run the kernel
torch.manual_seed(seed)
out = F.scaled_dot_product_attention(query, key, value, dropout_p=dropout_p, is_causal=is_causal, scale=scale)
if dropout_p == 0.0:
with sdp_kernel(enable_math=True, enable_flash=False, enable_mem_efficient=False):
# High Precision Math Reference
out_ref = F.scaled_dot_product_attention(query_ref, key_ref, value_ref,
dropout_p=dropout_p, is_causal=is_causal, scale=scale)
# Low Precision Math Reference
out_lp_ref = F.scaled_dot_product_attention(query_ref_lp, key_ref_lp, value_ref_lp,
dropout_p=dropout_p, is_causal=is_causal, scale=scale)
else:
if seq_len_q > 1024:
self.skipTest("Will call _fill_mem_eff_dropout_mask with too many threads!")
# Create the dropout_mask
torch.manual_seed(seed)
dropout_mask = _get_mem_eff_drop_mask(batch_size, n_heads, seq_len_q, seq_len_k, dropout_p, seed, 0, device=device)
# High Precision Math Reference
out_ref = torch.ops.aten._scaled_dot_product_attention_math(
query_ref, key_ref, value_ref, dropout_p=dropout_p, is_causal=is_causal, scale=scale, dropout_mask=dropout_mask)[0]
# Low Precision Math Reference
out_lp_ref = torch.ops.aten._scaled_dot_product_attention_math(
query_ref_lp, key_ref_lp, value_ref_lp, dropout_p=dropout_p, is_causal=is_causal, scale=scale,
dropout_mask=dropout_mask)[0]
upstream_grad = torch.rand_like(out, requires_grad=False)
out.backward(upstream_grad)
out_ref.backward(upstream_grad.to(out_ref.dtype))
out_lp_ref.backward(upstream_grad.to(out_lp_ref.dtype))
# [Note] Fused Tolerances
# Establish the numerical error between the "true" high precision math output
# and the low precision math reference. We use this reference for the atol
# And we use the default rtol for the low precision type.
# We then provide a fudge factor for gradients respectively to account
# for the use of the fused kernel rather than the eager implemntation.
output_ref_atol, output_ref_rtol = get_tolerances(out_ref, out_lp_ref)
# Fudge Factor when dropout is enabled
dropout_fudge_factor = 1.0 if dropout_p == 0.0 else 1.5
query_fudge_factor = dropout_fudge_factor
grad_q_ref_atol, grad_q_ref_rtol = get_tolerances(query_ref.grad, query_ref_lp.grad, query_fudge_factor)
# TODO: Investigate why grad_k needs larger tolerances
key_fudge_factor = 8 * dropout_fudge_factor
grad_k_ref_atol, grad_k_ref_rtol = get_tolerances(key_ref.grad, key_ref_lp.grad, key_fudge_factor)
value_fudge_factor = 7 if not SM80OrLater and dtype == torch.float16 else 1.0
grad_v_ref_atol, grad_v_ref_rtol = get_tolerances(value_ref.grad, value_ref_lp.grad, value_fudge_factor)
self.assertEqual(out, out_ref.to(out.dtype), atol=output_ref_atol, rtol=output_ref_rtol)
self.assertEqual(query.grad, query_ref.grad.to(query.grad.dtype),
atol=grad_q_ref_atol, rtol=grad_q_ref_rtol)
self.assertEqual(key.grad, key_ref.grad.to(key.grad.dtype),
atol=grad_k_ref_atol, rtol=grad_k_ref_rtol)
self.assertEqual(value.grad, value_ref.grad.to(value.grad.dtype),
atol=grad_v_ref_atol, rtol=grad_v_ref_rtol)
# verified passing successfully on H100
@onlyCUDA
@unittest.skipIf(not PLATFORM_SUPPORTS_FUSED_SDPA or not SM80OrLater, "Does not support SDPA or pre-SM80 hardware")
@parametrize("batch_size", [1, 8])
@parametrize("seq_len_q", [4, 8, 64, 128, 256, 512, 1024, 2048])
@parametrize("seq_len_k", [4, 8, 64, 128, 256, 512, 1024, 2048])
@parametrize("head_dim", [8, 16, 32, 64, 72, 96, 128])
@parametrize("is_causal", [True, False])
@parametrize("dropout_p", [0.0, 0.22, 0.48])
@parametrize("dtype", [torch.float16, torch.bfloat16])
@parametrize("scale", [None, "l1"])
def test_flash_attention_vs_math_ref_grads(self, device, batch_size: int, seq_len_q: int, seq_len_k: int,
head_dim: int, is_causal: bool, dropout_p: float, dtype: torch.dtype,
scale: str):
scale = scale if scale is None else (1 / head_dim)
n_heads = 4
query = torch.rand(batch_size, n_heads, seq_len_q, head_dim,
device=device, dtype=dtype, requires_grad=True)
key = torch.rand(batch_size, n_heads, seq_len_k, head_dim, device=device,
dtype=dtype, requires_grad=True)
value = torch.rand(batch_size, n_heads, seq_len_k, head_dim,
device=device, dtype=dtype, requires_grad=True)
# Run the math kernel on low precision references
query_ref_lp, key_ref_lp, value_ref_lp = self.query_key_value_clones(query, key, value, dtype=dtype)
higher_precision_dtype = torch.float64 if dtype == torch.float32 else torch.float32
query_ref, key_ref, value_ref = self.query_key_value_clones(query, key, value, dtype=higher_precision_dtype)
is_dropout = dropout_p > 0.0
# Create real output
output_tuple = torch.ops.aten._scaled_dot_product_flash_attention(
query, key, value, dropout_p=dropout_p, is_causal=is_causal, scale=scale, return_debug_mask=True)
out = output_tuple[0]
dbug_mask = output_tuple[-1]
query_padding_mask = torch.ones(
1, seq_len_q, device=device, dtype=torch.bool)
key_padding_mask = torch.ones(
1, seq_len_k, device=device, dtype=torch.bool)
softmax_mask = self.convert_flash_attn_S_to_softmax(
dbug_mask, query_padding_mask, key_padding_mask, head_dim=head_dim, causal=is_causal)
dropout_mask = softmax_mask >= 0
if not is_dropout:
with sdp_kernel(enable_math=True, enable_flash=False, enable_mem_efficient=False):
# High Precision Math Reference
out_ref = F.scaled_dot_product_attention(
query_ref, key_ref, value_ref, is_causal=is_causal, scale=scale)
# Low Precision Math Reference
out_lp_ref = F.scaled_dot_product_attention(
query_ref_lp, key_ref_lp, value_ref_lp, is_causal=is_causal, scale=scale)
else:
# High Precision Math Reference
out_ref = torch.ops.aten._scaled_dot_product_attention_math(
query_ref, key_ref, value_ref, dropout_p=dropout_p, is_causal=is_causal, scale=scale, dropout_mask=dropout_mask)[0]
# Low Precision Math Reference
out_lp_ref = torch.ops.aten._scaled_dot_product_attention_math(
query_ref_lp, key_ref_lp, value_ref_lp, dropout_p=dropout_p, is_causal=is_causal, scale=scale,
dropout_mask=dropout_mask)[0]
upstream_grad = torch.rand_like(out, requires_grad=False)
# backward for flash attention on sm86 and sm89 for headdim > 64 currently disabled
if isSM86or89Device and head_dim in range(65, 129):
self.assertRaises(RuntimeError, lambda: out.backward(upstream_grad))
return
out.backward(upstream_grad)
out_ref.backward(upstream_grad.to(out_ref.dtype))
out_lp_ref.backward(upstream_grad.to(out_lp_ref.dtype))
# See [Note] Fused Tolerances above
output_ref_atol, output_ref_rtol = get_tolerances(out_ref, out_lp_ref)
# TODO: Investigate why grad_q needs larger tolerances
query_fudge_factor = 4
grad_q_ref_atol, grad_q_ref_rtol = get_tolerances(query_ref.grad, query_ref_lp.grad, query_fudge_factor)
grad_k_ref_atol, grad_k_ref_rtol = get_tolerances(key_ref.grad, key_ref_lp.grad)
grad_v_ref_atol, grad_v_ref_rtol = get_tolerances(value_ref.grad, value_ref_lp.grad)
self.assertEqual(out, out_ref.to(out.dtype), atol=output_ref_atol, rtol=output_ref_rtol)
self.assertEqual(query.grad, query_ref.grad.to(query.grad.dtype),
atol=grad_q_ref_atol, rtol=grad_q_ref_rtol)
self.assertEqual(key.grad, key_ref.grad.to(key.grad.dtype),
atol=grad_k_ref_atol, rtol=grad_k_ref_rtol)
self.assertEqual(value.grad, value_ref.grad.to(value.grad.dtype),
atol=grad_v_ref_atol, rtol=grad_v_ref_rtol)
@onlyCUDA
@unittest.skipIf(not PLATFORM_SUPPORTS_FUSED_SDPA or not SM80OrLater, "Does not support SDPA or pre-SM80 hardware")
@parametrize("batch_size", [1, 8])
@parametrize("seq_len_q", [256, 512, 1024])
@parametrize("seq_len_k", [256, 512, 1024])
@parametrize("head_dim", [32, 64])
@parametrize("is_causal", [True, False])
@parametrize("dropout_p", [0.0, 0.22])
@parametrize("dtype", [torch.float16,])
@parametrize("scale", [None, "l1"])
@parametrize("fused_kernel", [SDPBackend.FLASH_ATTENTION, SDPBackend.EFFICIENT_ATTENTION])
def test_fused_attention_vs_math_ref_grads_cudagraph(self, device, batch_size: int, seq_len_q: int, seq_len_k: int,
head_dim: int,
is_causal: bool,
dropout_p: float,
dtype: torch.dtype,
scale: str,
fused_kernel: SDPBackend):
def _get_mem_eff_drop_mask(batch_size, n_heads, q_len, kv_len, dropout_p, seed, offset, device=device):
mask = torch.empty((batch_size, n_heads, q_len, kv_len), device=device, dtype=torch.float32)
rand_uniform = torch._fill_mem_eff_dropout_mask_(mask, dropout_p, seed, offset)
mask = (rand_uniform > dropout_p).to(torch.float32)
return mask
def get_dropout_mask(output, fused_kernel, batch_size, n_heads, q_len, kv_len, dropout_p, device=device):
if fused_kernel == SDPBackend.EFFICIENT_ATTENTION:
output_seed, output_offset = output_tuple[2], output_tuple[3]
output_seed = output_seed.item()
output_offset = output_offset.item()
return _get_mem_eff_drop_mask(batch_size, n_heads, q_len, kv_len,
dropout_p, output_seed, output_offset, device=device)
else:
dbug_mask = output[-1]
query_padding_mask = torch.ones(
1, seq_len_q, device="cuda", dtype=torch.bool)
key_padding_mask = torch.ones(
1, seq_len_k, device="cuda", dtype=torch.bool)
softmax_mask = self.convert_flash_attn_S_to_softmax(
dbug_mask, query_padding_mask, key_padding_mask, head_dim=head_dim, causal=is_causal)
dropout_mask = softmax_mask >= 0
return dropout_mask
seed = 42
scale = scale if scale is None else (1 / head_dim)
n_heads = 4
query = torch.rand(batch_size, n_heads, seq_len_q, head_dim,
device=device, dtype=dtype, requires_grad=True)
key = torch.rand(batch_size, n_heads, seq_len_k, head_dim, device=device,
dtype=dtype, requires_grad=True)
value = torch.rand(batch_size, n_heads, seq_len_k, head_dim,
device=device, dtype=dtype, requires_grad=True)
fused_op = (torch.ops.aten._scaled_dot_product_efficient_attention
if fused_kernel == SDPBackend.EFFICIENT_ATTENTION else torch.ops.aten._scaled_dot_product_flash_attention)
# Run the math kernel on low precision references
query_ref_lp, key_ref_lp, value_ref_lp = self.query_key_value_clones(query, key, value, dtype=dtype)
higher_precision_dtype = torch.float64 if dtype == torch.float32 else torch.float32
query_ref, key_ref, value_ref = self.query_key_value_clones(query, key, value, dtype=higher_precision_dtype)
# warmup
s = torch.cuda.Stream()
s.wait_stream(torch.cuda.current_stream())
# Set the global seed before capture
torch.manual_seed(seed)
kwargs = {"dropout_p": dropout_p, "is_causal": is_causal, "scale": scale}
if fused_kernel == SDPBackend.EFFICIENT_ATTENTION:
kwargs["compute_log_sumexp"] = True
if fused_kernel == SDPBackend.FLASH_ATTENTION:
kwargs['return_debug_mask'] = True
with torch.cuda.stream(s):
# Create real output
output_tuple = fused_op(query, key, value, **kwargs)
torch.cuda.current_stream().wait_stream(s)
out = output_tuple[0]
upstream_grad = torch.rand_like(out, requires_grad=False)
s.wait_stream(torch.cuda.current_stream())
with torch.cuda.stream(s):
out.backward(upstream_grad)
for x in (query, key, value):
x.grad = None
g = torch.cuda.CUDAGraph()
# Create real output
with torch.cuda.graph(g):
tmp = torch.rand_like(query, device=query.device) # test non-zero intragraph offset
# Create real output
output_tuple = fused_op(query, key, value, **kwargs)
assert all(not isinstance(o, torch.Tensor) or o.is_cuda for o in output_tuple)
g.replay()
out_first = output_tuple[0].clone()
g.replay()
out = output_tuple[0]
if dropout_p == 0.0:
self.assertEqual(out_first, out, atol=0, rtol=0)
else:
# replays produce different results
self.assertNotEqual(out_first, out)
with sdp_kernel(enable_math=True, enable_flash=False, enable_mem_efficient=False):
if dropout_p == 0.0:
# High Precision Math Reference
out_ref = F.scaled_dot_product_attention(query_ref, key_ref, value_ref,
dropout_p=dropout_p, is_causal=is_causal, scale=scale)
# Low Precision Math Reference
out_lp_ref = F.scaled_dot_product_attention(query_ref_lp, key_ref_lp, value_ref_lp,
dropout_p=dropout_p, is_causal=is_causal, scale=scale)
else:
# Create the dropout_mask
dropout_mask = get_dropout_mask(output_tuple, fused_kernel, batch_size,
n_heads, seq_len_q, seq_len_k, dropout_p, device)
# High Precision Math Reference
out_ref = torch.ops.aten._scaled_dot_product_attention_math(
query_ref, key_ref, value_ref, dropout_p=dropout_p, is_causal=is_causal,
scale=scale, dropout_mask=dropout_mask)[0]
# Low Precision Math Reference
out_lp_ref = torch.ops.aten._scaled_dot_product_attention_math(
query_ref_lp, key_ref_lp, value_ref_lp, dropout_p=dropout_p, is_causal=is_causal, scale=scale,
dropout_mask=dropout_mask)[0]
g1 = torch.cuda.CUDAGraph()
with torch.cuda.graph(g1):
out.backward(upstream_grad)
g1.replay()
out_ref.backward(upstream_grad.to(out_ref.dtype))
out_lp_ref.backward(upstream_grad.to(out_lp_ref.dtype))
# [Note] Fused Tolerances
# Establish the numerical error between the "true" high precision math output
# and the low precision math reference. We use this reference for the atol
# And we use the default rtol for the low precision type.
# We then provide a fudge factor for gradients respectively to account
# for the use of the fused kernel rather than the eager implemntation.
output_ref_atol, output_ref_rtol = get_tolerances(out_ref, out_lp_ref)
# Fudge Factor when dropout is enabled
dropout_fudge_factor = 1.0 if dropout_p == 0.0 else 1.5
query_fudge_factor = dropout_fudge_factor
grad_q_ref_atol, grad_q_ref_rtol = get_tolerances(query_ref.grad, query_ref_lp.grad, query_fudge_factor)
# TODO: Investigate why grad_k needs larger tolerances
key_fudge_factor = 8 * dropout_fudge_factor
grad_k_ref_atol, grad_k_ref_rtol = get_tolerances(key_ref.grad, key_ref_lp.grad, key_fudge_factor)
value_fudge_factor = 7 if not SM80OrLater and dtype == torch.float16 else 1.0
grad_v_ref_atol, grad_v_ref_rtol = get_tolerances(value_ref.grad, value_ref_lp.grad, value_fudge_factor)
self.assertEqual(out, out_ref.to(out.dtype), atol=output_ref_atol, rtol=output_ref_rtol)
self.assertEqual(query.grad, query_ref.grad.to(query.grad.dtype),
atol=grad_q_ref_atol, rtol=grad_q_ref_rtol)
self.assertEqual(key.grad, key_ref.grad.to(key.grad.dtype),
atol=grad_k_ref_atol, rtol=grad_k_ref_rtol)
self.assertEqual(value.grad, value_ref.grad.to(value.grad.dtype),
atol=grad_v_ref_atol, rtol=grad_v_ref_rtol)
@onlyCUDA
@unittest.skipIf(not PLATFORM_SUPPORTS_FUSED_SDPA, "Fused SDPA was not built for this system")
@parametrize("fused_kernel", [SDPBackend.FLASH_ATTENTION, SDPBackend.EFFICIENT_ATTENTION])
def test_fused_kernels_seq_len_1_inputs(self, device, fused_kernel):
if (not SM80OrLater) and fused_kernel == SDPBackend.FLASH_ATTENTION:
return
rand_nested_tensor = partial(rand_sdpa_tensor, type="nested", device=device, dtype=torch.float16)
batch, num_heads, head_dim = 32, 16, 64
seq_lens = torch.randint(low=1, high=32, size=(batch,))
# make sure some seq_lens are 1
num_ones = 10
indices = torch.randint(low=0, high=batch, size=(num_ones,))
seq_lens.scatter_(0, indices, 1)
shape = (batch, seq_lens.tolist(), num_heads, head_dim)
query = rand_nested_tensor(shape)
key = rand_nested_tensor(shape)
value = rand_nested_tensor(shape)
query = query.transpose(1, 2)
key = key.transpose(1, 2)
value = value.transpose(1, 2)
with sdp_kernel(**backend_map[fused_kernel]):
actual = torch.nn.functional.scaled_dot_product_attention(
query, key, value, attn_mask=None, dropout_p=0.0, is_causal=False)
with sdp_kernel(enable_flash=False, enable_math=True, enable_mem_efficient=False):
math_ref = torch.nn.functional.scaled_dot_product_attention(
query.contiguous().to(torch.float32),
key.contiguous().to(torch.float32),
value.contiguous().to(torch.float32),
attn_mask=None, dropout_p=0.0, is_causal=False)
self.assertEqual(actual.contiguous(), math_ref.contiguous().to(torch.float16), atol=1e-3, rtol=1e-2)
@onlyCUDA
@unittest.skipIf(not PLATFORM_SUPPORTS_FUSED_SDPA, "Fused SDPA was not built for this system")
@parametrize("fused_kernel", [SDPBackend.FLASH_ATTENTION, SDPBackend.EFFICIENT_ATTENTION])
def test_fused_kernels_seq_len_0_inputs(self, device, fused_kernel):
if (not SM80OrLater) and fused_kernel == SDPBackend.FLASH_ATTENTION:
return
rand_nested_tensor = partial(rand_sdpa_tensor, type="nested", device=device, dtype=torch.float16)
batch, num_heads, head_dim = 32, 16, 64
seq_lens = torch.randint(low=1, high=32, size=(batch,))
# make sure some seq_lens are 0
num_zeros = 10
indices = torch.randint(low=0, high=batch, size=(num_zeros,))
seq_lens.scatter_(0, indices, 0)
shape = (batch, seq_lens.tolist(), num_heads, head_dim)
query = rand_nested_tensor(shape)
key = rand_nested_tensor(shape)
value = rand_nested_tensor(shape)
query = query.transpose(1, 2)
key = key.transpose(1, 2)
value = value.transpose(1, 2)
with sdp_kernel(**backend_map[fused_kernel]):
with self.assertRaisesRegex(RuntimeError, "No available kernel"):
torch.nn.functional.scaled_dot_product_attention(
query, key, value, attn_mask=None, dropout_p=0.0, is_causal=False)
@onlyCUDA
@unittest.skipIf(not PLATFORM_SUPPORTS_FUSED_SDPA, "Fused SDPA was not built for this system")
@parametrize("kernel", [SDPBackend.FLASH_ATTENTION, SDPBackend.EFFICIENT_ATTENTION])
@parametrize("expand_q_batch", [True, False])
@parametrize("expand_k_batch", [True, False])
@parametrize("expand_v_batch", [True, False])
@parametrize("expand_q_num_heads", [True, False])
@parametrize("expand_k_num_heads", [True, False])
@parametrize("expand_v_num_heads", [True, False])
def test_fused_kernels_nested_broadcasting(
self,
device,
kernel,
expand_q_batch,
expand_k_batch,
expand_v_batch,
expand_q_num_heads,
expand_k_num_heads,
expand_v_num_heads,
):
if (not SM80OrLater) and kernel == SDPBackend.FLASH_ATTENTION:
return
is_efficient = kernel == SDPBackend.EFFICIENT_ATTENTION
dtype = torch.float32 if is_efficient else torch.float16
rand_nested_tensor = partial(rand_sdpa_tensor, type="nested", device=device, dtype=dtype)
batch, num_heads, head_dim = 32, 8, 64
head_dim_v = 32 if is_efficient else head_dim
seq_lens_q = (torch.randint(low=1, high=5, size=(1,)).item()
if expand_q_batch
else torch.randint(low=1, high=32, size=(batch,)).tolist())
seq_lens_kv = (torch.randint(low=1, high=5, size=(1,)).item()
if (expand_k_batch or expand_v_batch)
else torch.randint(low=1, high=32, size=(batch,)).tolist())
batch_q = 1 if expand_q_batch else batch
batch_k = 1 if expand_k_batch else batch
batch_v = 1 if expand_v_batch else batch
# handle case where all batch_sizes are 1
batch = max(batch_q, batch_k, batch_v)
num_heads_q = 1 if expand_q_num_heads else num_heads
num_heads_k = 1 if expand_k_num_heads else num_heads
num_heads_v = 1 if expand_v_num_heads else num_heads
# handle case where all num_heads are 1
num_heads = max(num_heads_q, num_heads_k, num_heads_v)
q_shape = (batch_q, seq_lens_q, num_heads_q, head_dim)
k_shape = (batch_k, seq_lens_kv, num_heads_k, head_dim)
v_shape = (batch_v, seq_lens_kv, num_heads_v, head_dim_v)
query = rand_nested_tensor(q_shape)
key = rand_nested_tensor(k_shape)
value = rand_nested_tensor(v_shape)
def _broadcast(t, batch_broadcasted, num_heads_broadcasted):
if batch_broadcasted and num_heads_broadcasted:
# (1, seq_len, 1, head_dim) -> (batch, seq_len, num_heads, head_dim)
result = torch.nested.nested_tensor(
[t[0].expand(-1, num_heads, t.size(-1)) for _ in range(batch)], dtype=torch.float32)
elif batch_broadcasted:
# (1, seq_len, num_heads, head_dim) -> (batch, seq_len, num_heads, head_dim)
result = torch.nested.nested_tensor([t[0] for _ in range(batch)], dtype=torch.float32)
elif num_heads_broadcasted:
# (batch, seq_len, 1, head_dim) -> (batch, seq_len, num_heads, head_dim)
result = torch.nested.nested_tensor([x.expand(-1, num_heads, t.size(-1))
for x in t.unbind()], dtype=torch.float32)
else:
result = t.to(torch.float32)
return result
query_expanded = _broadcast(query, expand_q_batch, expand_q_num_heads).transpose(1, 2)
key_expanded = _broadcast(key, expand_k_batch, expand_k_num_heads).transpose(1, 2)
value_expanded = _broadcast(value, expand_v_batch, expand_v_num_heads).transpose(1, 2)
query = query.transpose(1, 2)
key = key.transpose(1, 2)
value = value.transpose(1, 2)
with sdp_kernel(**backend_map[kernel]):
actual = torch.nn.functional.scaled_dot_product_attention(
query, key, value, attn_mask=None, dropout_p=0.0, is_causal=False)
with sdp_kernel(enable_flash=False, enable_math=True, enable_mem_efficient=False):
math_ref = torch.nn.functional.scaled_dot_product_attention(
query_expanded.contiguous(), key_expanded.contiguous(), value_expanded.contiguous(),
attn_mask=None, dropout_p=0.0, is_causal=False)
self.assertEqual(actual.contiguous(), math_ref.contiguous().to(dtype), atol=1e-3, rtol=1e-2)
@onlyCUDA
@unittest.skipIf(not PLATFORM_SUPPORTS_FUSED_SDPA, "Fused SDPA was not built for this system")
def test_fused_kernels_nested_broadcasting_query_dense(self, device):
rand_nested_tensor = partial(rand_sdpa_tensor, type="nested", device=device, dtype=torch.float32)
batch, num_heads, head_dim, head_dim_v = 32, 16, 64, 96
seq_lens = torch.randint(low=1, high=32, size=(batch,)).tolist()
q_shape = (1, 1, num_heads, head_dim)
k_shape = (batch, seq_lens, num_heads, head_dim)
v_shape = (batch, seq_lens, 1, head_dim_v)
# create a dense query
query = torch.randn(q_shape, device=device, dtype=torch.float32)
key = rand_nested_tensor(k_shape)
value = rand_nested_tensor(v_shape)
# (1, 1, num_heads, head_dim) -> (batch, 1, num_heads, head_dim)
query_expanded = torch.nested.nested_tensor([query.squeeze(0) for _ in range(batch)]).transpose(1, 2)
# (batch, seq_lens, 1, head_dim) -> (batch, seq_lens, num_heads, head_dim)
value_expanded = torch.nested.nested_tensor(
[t.expand(-1, num_heads, head_dim_v) for t in value.unbind()]).transpose(1, 2)
query = query.transpose(1, 2)
key = key.transpose(1, 2)
value = value.transpose(1, 2)
with sdp_kernel(enable_flash=False, enable_math=False, enable_mem_efficient=True):
actual = torch.nn.functional.scaled_dot_product_attention(
query, key, value, attn_mask=None, dropout_p=0.0, is_causal=False)
with sdp_kernel(enable_flash=False, enable_math=True, enable_mem_efficient=False):
math_ref = torch.nn.functional.scaled_dot_product_attention(
query_expanded.contiguous(), key.contiguous(), value_expanded.contiguous(),
attn_mask=None, dropout_p=0.0, is_causal=False)
self.assertEqual(actual.contiguous(), math_ref.contiguous(), atol=1e-3, rtol=1e-2)
device_types = ("cpu", "cuda")
instantiate_device_type_tests(TestTransformers, globals(), only_for=device_types)
instantiate_device_type_tests(TestSDPA, globals(), only_for=device_types)
instantiate_device_type_tests(TestSDPAFailureModes, globals(), only_for=device_types)
if __name__ == '__main__':
run_tests()
| [
"pytorchmergebot@users.noreply.github.com"
] | pytorchmergebot@users.noreply.github.com |
699c42fa3bf5aa3e7036983a7ba52eef403977dd | 2f638d47a9681cbb2caab865702ddca39a0456d3 | /djangocms_misc/basic/app_template/views.py | 34e07fb3adb9556429404f527e27f54219269bc9 | [
"MIT"
] | permissive | bnzk/djangocms-misc | b0d1a1950b3d8c7752ea661c74bc08bfbd0360a6 | 8869384305ef7ff8538af986f4854bcfde7257de | refs/heads/develop | 2023-06-08T10:12:11.275012 | 2023-05-30T13:00:34 | 2023-05-30T13:00:34 | 66,085,267 | 1 | 1 | MIT | 2023-02-04T07:49:28 | 2016-08-19T13:43:34 | Python | UTF-8 | Python | false | false | 410 | py | from django.views.generic import ListView, DetailView
from .models import AppTemplate
from .views_utils import PublishedViewMixin, AutoSlugMixin, LanguageChooserEnhancerMixin
class AppTemplateListView(PublishedViewMixin, ListView):
model = AppTemplate
class AppTemplateDetailView(
AutoSlugMixin,
PublishedViewMixin,
LanguageChooserEnhancerMixin,
DetailView,
):
model = AppTemplate
| [
"bnzk@bnzk.ch"
] | bnzk@bnzk.ch |
d1ddbd8e8f4dfdb9b410d931a174d498c0ea422f | 84f1fea102aeb2d324e8ad3908e1765d04a0a730 | /manage.py | 61ffa52ca3f12cdb094726d77de51bf55c8f649f | [
"Apache-2.0"
] | permissive | Natsoye/explorer | c205f8eb8d08705c2c4ee4ee45c28f7d0a534b10 | 638c70204d6001d9c5c56701917a6273a02c90cf | refs/heads/master | 2021-08-30T10:42:56.371192 | 2021-08-17T15:43:04 | 2021-08-17T15:43:04 | 181,131,891 | 2 | 0 | Apache-2.0 | 2021-08-17T15:43:05 | 2019-04-13T06:43:15 | Python | UTF-8 | Python | false | false | 256 | py | #!/usr/bin/env python
import os
import sys
if __name__ == "__main__":
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "blockexplorer.settings")
from django.core.management import execute_from_command_line
execute_from_command_line(sys.argv)
| [
"mflaxman@gmail.com"
] | mflaxman@gmail.com |
f5ac3e553479901461c99d71fd770afbce1fe15f | 45f93a9d47204d76b8bf25a71dfb79403e75c33c | /Trees_and_Graphs/Bellmen-Ford-Algorithm.py | 147fdc8e118ce15efcd43136f824d40cbe359e4f | [] | no_license | tahmid-tanzim/problem-solving | 0173bce1973ac3e95441a76c10324c0e1b0a57c3 | 6ddb51de6772130f209474e76f39ca2938f444f0 | refs/heads/master | 2023-06-25T02:18:03.690263 | 2023-06-20T06:58:46 | 2023-06-20T06:58:46 | 137,173,850 | 4 | 1 | null | 2022-03-30T08:28:41 | 2018-06-13T06:44:25 | Python | UTF-8 | Python | false | false | 1,960 | py | #!/usr/bin/python3
"""
Bellman Ford Algorithm
Single-Source - Shortest Path (SSSP)
Dynamic Programming
Time complexity - O(n^2)
"""
class Graph:
def __int__(self):
self.adjacencyList = []
if __name__ == "__main__":
# inputs = (
# {
# "start": "A",
# "vertices": ("A", "B", "C", "D", "E", "F"),
# "edges": [
# ("A", "B", 2),
# ("A", "C", 4),
# ("B", "C", 1),
# ("B", "D", 7),
# ("C", "E", 3),
# ("D", "F", 1),
# ("E", "F", 5),
# ("E", "D", 2),
# ],
# "type": "Directed Graph"
# },
# {
# "start": "A",
# "vertices": ("A", "B", "C", "D", "E", "F"),
# "edges": [
# ("A", "B", 50),
# ("A", "D", 10),
# ("A", "C", 45),
# ("B", "C", 10),
# ("B", "D", 15),
# ("C", "E", 30),
# ("D", "A", 10),
# ("D", "E", 15),
# ("E", "B", 20),
# ("E", "C", 35),
# ("F", "E", 3),
# ],
# "type": "Directed Graph"
# },
# {
# "start": "A",
# "vertices": ("A", "B", "C", "D", "E", "F", "G", "H", "I"),
# "edges": [
# ("A", "B", 4),
# ("A", "H", 8),
# ("H", "B", 11),
# ("B", "C", 8),
# ("H", "I", 7),
# ("H", "G", 1),
# ("I", "G", 6),
# ("I", "C", 2),
# ("D", "C", 7),
# ("D", "E", 9),
# ("D", "F", 14),
# ("E", "F", 10),
# ("C", "F", 4),
# ("G", "F", 2),
# ],
# "type": "Undirected Graph"
# },
# )
pass
| [
"tahmid.tanzim@gmail.com"
] | tahmid.tanzim@gmail.com |
b8572b1bb7ec82b1467e1ed2653aca1c2a3d07b4 | caa10e2a97ebbeea69027b3f35f8fe096d9a6ef2 | /backend/manage.py | 8439f43d3ecddada36719d83f8f0baecb2c891d6 | [] | no_license | crowdbotics-apps/m-201-oct-dev-13649 | 0da5d7457951e5853a55f2ce700e0ecb40cd0a8d | 042c7e9a73c160138b0d69ef294da4af0e9f1e54 | refs/heads/master | 2022-12-30T00:28:43.972381 | 2020-10-20T13:16:56 | 2020-10-20T13:16:56 | 305,697,927 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 638 | py | #!/usr/bin/env python
"""Django's command-line utility for administrative tasks."""
import os
import sys
def main():
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "m_20_oct_dev_13649.settings")
try:
from django.core.management import execute_from_command_line
except ImportError as exc:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
) from exc
execute_from_command_line(sys.argv)
if __name__ == "__main__":
main()
| [
"team@crowdbotics.com"
] | team@crowdbotics.com |
e58d118d8660ed80dce0203b57e55c19fe6d55fb | 974d04d2ea27b1bba1c01015a98112d2afb78fe5 | /test/collective/test_communication_stream_allreduce_api.py | 60386a6262ff257ec272578a19e1b583f84a3960 | [
"Apache-2.0"
] | permissive | PaddlePaddle/Paddle | b3d2583119082c8e4b74331dacc4d39ed4d7cff0 | 22a11a60e0e3d10a3cf610077a3d9942a6f964cb | refs/heads/develop | 2023-08-17T21:27:30.568889 | 2023-08-17T12:38:22 | 2023-08-17T12:38:22 | 65,711,522 | 20,414 | 5,891 | Apache-2.0 | 2023-09-14T19:20:51 | 2016-08-15T06:59:08 | C++ | UTF-8 | Python | false | false | 1,685 | py | # Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
import test_communication_api_base as test_base
class TestCommunicationStreamAllreduceAPI(test_base.CommunicationTestDistBase):
def setUp(self):
super().setUp(num_of_devices=2, timeout=120)
self._default_envs = {
"backend": "nccl",
"shape": "(100, 200)",
"dtype": "float32",
"seeds": str(self._seeds),
}
self._changeable_envs = {
"sync_op": ["True", "False"],
"use_calc_stream": ["True", "False"],
}
def test_allreduce_stream(self):
envs_list = test_base.gen_product_envs_list(
self._default_envs, self._changeable_envs
)
for envs in envs_list:
if eval(envs["use_calc_stream"]) and not eval(envs["sync_op"]):
continue
self.run_test_case(
"communication_stream_allreduce_api_dygraph.py",
user_defined_envs=envs,
)
def tearDown(self):
super().tearDown()
if __name__ == '__main__':
unittest.main()
| [
"noreply@github.com"
] | PaddlePaddle.noreply@github.com |
f8bc55e2cd75960e261d70dec8161f5a44faaa63 | 494af1db6ac6a72b738d79053a8084c3afd1dbd2 | /smartapp/urls.py | ea82d10e5d8b824dba8bfc8c28204ba5502ada1c | [
"MIT"
] | permissive | ae200/SmartApp | b21f0e031a558341b18ea7ca5787f726e1d09fb5 | b24bb7139e65976428ceec1e9d082f2eac52fd24 | refs/heads/master | 2023-01-15T05:26:31.005542 | 2020-07-16T21:52:31 | 2020-07-16T21:52:31 | 132,465,331 | 1 | 1 | MIT | 2023-01-06T05:24:56 | 2018-05-07T13:34:30 | Python | UTF-8 | Python | false | false | 3,510 | py | """smartapp URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/3.0/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path, include
from allauth.account.views import confirm_email
from django.conf.urls import url
from django.contrib import admin
from django.conf.urls.static import static
from django.urls import path, re_path, include
from django.contrib import admin
from django.views.generic.base import TemplateView
urlpatterns = [
path('admin/', admin.site.urls),
path('admin/', admin.site.urls),
#path('register/', register),
# url (r'^$', TemplateView.as_view(template_name="ang_home.html"), name='home'),
path('api/movies/', include('movies.api.urls')),
# path('streamapi/streammovies/', include('streammovies.streamapi.urls')),
path('actionapi/actionmovies/', include('actionmovies.actionapi.urls')),
path('actionthrillerapi/actionthriller/', include('actionthriller.actionthrillerapi.urls')),
path('actionrealapi/actionreal/', include('actionreal.actionrealapi.urls')),
path('adventureapi/adventuremovies/', include('adventuremovies.adventureapi.urls')),
path('adventurethrillerapi/adventurethriller/', include('adventurethriller.adventurethrillerapi.urls')),
path('adventurerealapi/adventurereal/', include('adventurereal.adventurerealapi.urls')),
path('comedyapi/comedymovies/', include('comedymovies.comedyapi.urls')),
path('comedythrillerapi/comedythriller/', include('comedythriller.comedythrillerapi.urls')),
path('comedyrealapi/comedyreal/', include('comedyreal.comedyrealapi.urls')),
path('dramathrillerapi/dramathriller/', include('dramathriller.dramathrillerapi.urls')),
path('dramarealapi/dramareal/', include('dramareal.dramarealapi.urls')),
path('dramaapi/dramamovies/', include('dramamovies.dramaapi.urls')),
path('fictionthrillerapi/fictionthriller/', include('fictionthriller.fictionthrillerapi.urls')),
path('fictionrealapi/fictionreal/', include('fictionreal.fictionrealapi.urls')),
path('fictionapi/fictionmovies/', include('fictionmovies.fictionapi.urls')),
path('historicalapi/historicalmovies/', include('historicalmovies.historicalapi.urls')),
path('historicalthrillerapi/historicalthriller/', include('historicalthriller.historicalthrillerapi.urls')),
path('historicalrealapi/historicalreal/', include('historicalreal.historicalrealapi.urls')),
url(r'^rest-auth/', include('rest_auth.urls')),
url(r'^rest-auth/registration/', include('rest_auth.registration.urls')),
url(r'^users/', include('users.urls')),
url(r'^account/', include('allauth.urls')),
url(r'^accounts-rest/registration/account-confirm-email/(?P<key>.+)/', confirm_email, name='account_confirm_email')
]
urlpatterns += [
re_path(r'^(?P<path>.*)', TemplateView.as_view(template_name="ang_movies.html"), name='movies'),
re_path(r'^(?P<path>.*)', TemplateView.as_view(template_name="ang_home.html"), name='home'),
] | [
"dandaoluks@gmail.com"
] | dandaoluks@gmail.com |
b4b9f787181c6c79a53b128b8e22d735c4638e6c | afa52cfab070818eb08fb9a456b0defcf2df5ebd | /tools/upgrade/errors.py | 26d6437bea9c810823d82964c742119199b1b3a1 | [
"MIT"
] | permissive | vkoukoutsas/pyre-check | a128d77a6d56b50639496025cc458873db7b21c5 | 73fa0dda836c413a86879eb9ef8ba0538e29d615 | refs/heads/master | 2020-07-22T11:09:00.988050 | 2019-09-08T19:56:23 | 2019-09-08T19:57:41 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,582 | py | # Copyright (c) 2016-present, Facebook, Inc.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
# pyre-unsafe
import itertools
import json
import sys
from typing import Any, Dict, List, Optional, Tuple
from .postprocess import LOG
def json_to_errors(json_string: Optional[str]) -> List[Dict[str, Any]]:
if json_string:
try:
return json.loads(json_string)
# pyre-fixme[18]: Undefined name [18]: Global name `json.decoder` is not defined
except json.decoder.JSONDecodeError:
LOG.error(
"Recevied invalid JSON as input."
"If piping from `pyre check` be sure to use `--output=json`."
)
else:
LOG.error(
"Recevied no input."
"If piping from `pyre check` be sure to use `--output=json`."
)
return []
def sort_errors(errors: List[Dict[str, Any]]) -> List[Tuple[str, List[Any]]]:
def error_path(error):
return error["path"]
return itertools.groupby(sorted(errors, key=error_path), error_path)
def filter_errors(arguments, errors) -> List[Dict[str, Any]]:
def matches_error_code(error) -> bool:
return error["code"] == arguments.only_fix_error_code
if arguments.only_fix_error_code:
errors = list(filter(matches_error_code, errors))
return errors
def errors_from_stdin(_arguments) -> List[Dict[str, Any]]:
input = sys.stdin.read()
errors = json_to_errors(input)
return filter_errors(_arguments, errors)
| [
"facebook-github-bot@users.noreply.github.com"
] | facebook-github-bot@users.noreply.github.com |
1eb27ae6c2701ba03d0d277c21d91755f6868825 | 48c62c3693e419257d1e26fd065bf92801f4ef4d | /django_lets_go/custom_xml_emitter.py | 16d19cabf07e9e99990e9f976ae2d4747bf3a91a | [
"MIT"
] | permissive | callhub/django-lets-go | 0da70777331adb9c0637f7b03154bdff7dd8a026 | 70e9016a91b1db06685a7d0cf9ee414e49375fe8 | refs/heads/master | 2021-01-25T13:11:21.140612 | 2014-09-23T16:17:29 | 2014-09-23T16:17:29 | 123,537,598 | 0 | 0 | null | 2018-03-02T06:03:26 | 2018-03-02T06:03:25 | null | UTF-8 | Python | false | false | 2,153 | py | #
# Django-Lets-go License
#
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this file,
# You can obtain one at http://mozilla.org/MPL/2.0/.
#
# Copyright (C) 2011-2014 Star2Billing S.L.
#
# The Initial Developer of the Original Code is
# Arezqui Belaid <info@star2billing.com>
#
try:
import cStringIO as StringIO
except ImportError:
import StringIO
from django.utils.encoding import smart_unicode
from django.utils.xmlutils import SimplerXMLGenerator
from piston.emitters import Emitter
from piston.utils import Mimer
from django.contrib.auth import authenticate
from django.http import HttpResponse
from django.conf import settings
class CustomXmlEmitter(Emitter):
def _to_xml(self, xml, data):
if isinstance(data, (list, tuple)):
for item in data:
self._to_xml(xml, item)
elif isinstance(data, dict):
for key, value in data.iteritems():
xml.startElement(key, {})
self._to_xml(xml, value)
xml.endElement(key.split()[0])
else:
xml.characters(smart_unicode(data))
def render(self, request):
stream = StringIO.StringIO()
xml = SimplerXMLGenerator(stream, "utf-8")
xml.startDocument()
xml.startElement("Response", {})
self._to_xml(xml, self.construct())
xml.endElement("Response")
xml.endDocument()
return stream.getvalue()
Emitter.register('custom_xml', CustomXmlEmitter, 'text/xml; charset=utf-8')
Mimer.register(lambda *a: None, ('text/xml',))
class IpAuthentication(object):
"""IP Authentication handler
"""
def __init__(self, auth_func=authenticate, realm='API'):
self.auth_func = auth_func
self.realm = realm
def is_authenticated(self, request):
try:
settings.API_ALLOWED_IP.index(request.META['REMOTE_ADDR'])
return True
except:
return False
def challenge(self):
resp = HttpResponse("Not Authorized")
resp.status_code = 401
return resp
| [
"areski@gmail.com"
] | areski@gmail.com |
d382ac9e7cbdb1e20a269d0296a00a0cd13c0279 | b96f1bad8a74d31d8ff79bc955813bfcd17d7b26 | /Longest Valid Parentheses3.py | 96a38c4d67c9185d78a04d0b65099873753c94b3 | [] | no_license | brianhu0716/LeetCode-Solution | e7177af15e84e833ce8ab05027683ed4ac489643 | 158a4359c90b723545b22c4898047274cc1b80a6 | refs/heads/main | 2023-07-11T05:29:56.783795 | 2021-08-28T12:53:14 | 2021-08-28T12:53:14 | 374,991,658 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,757 | py | # -*- coding: utf-8 -*-
"""
Created on Sat Feb 6 21:37:21 2021
@author: Brian
"""
"""
Created on Sat Feb 6 19:30:57 2021
@author: Brian
"""
# -*- coding: utf-8 -*-
"""
Created on Sat Feb 6 19:30:57 2021
@author: Brian
"""
# -*- coding: utf-8 -*-
"""
Created on Sat Feb 6 21:37:21 2021
@author: Brian
"""
"""
Created on Sat Feb 6 19:30:57 2021
@author: Brian
"""
import numpy as np
class Solution:
def longestValidParentheses(self, s) -> int:
i = 0
self.s = s
self.pairs = []
self.c = 0
self.flag = []
if len(s) <= 1: # in case s = '','('...
return 0
while True:
if self.s[i] == '(' and self.s[i+1] == ')':
self.c += 1
self.flag += [(i,i+1)]
i += 2
elif self.s[i] == '(' and self.s[i+1] == '(':
self.checkconsecutive(i)
i = self.fi
else:
i += 1
# self.pairs += [self.c]
self.c = 0
self.pairs += [self.c]
if i > len(self.s) - 2:
break
if len(self.flag) > 1:
for i in range(len(self.flag)-1):
if self.flag[i+1][0] - self.flag[i][1] != 1:
break
else:
if '(' in self.s[0:self.flag[0][0]] and ')' in self.s[self.flag[-1][1] + 1:]:
self.pairs += [len(self.flag) + len(self.s[0:self.flag[0][0]])]
# condition = True
# if condition and len(self.s[0:self.flag[0][0]]) == len(self.s[self.flag[-1][1]:]):
# if '(' in self.s[0:self.flag[0][0]] and ')' in self.s[self.flag[-1][1]:]:
# self.pairs += [len(self.flag) + len(self.s[0:self.flag[0][0]])]
# return max(self.pairs) * 2
return max(self.pairs) * 2
def checkconsecutive(self,fi):
self.fi = fi
for i in range(fi,len(self.s)):
if self.s[i] == ')':
break
shift = i-self.fi
if len(self.s[self.fi:i]) == len(self.s[i:i+shift]) and (np.array([item for item in self.s[i:i+shift]]) == ')').all():
self.c += i - self.fi
self.fi = i + shift
else:
self.c = 0
self.fi = fi + 1
s = ['()((())))', # 8
'()()', # 4
'()(()', # 2
'())()', # 2
')(', # 0
'())((()))', # 6
'()(((()))', # 6
'(()())', # 6
")()())", # 4
"(()()", # 4
"((()))())"]
test = Solution()
for i in range(len(s)):
test.longestValidParentheses(s[i])
print(max(test.pairs) * 2)
| [
"85205343+brianhu0716@users.noreply.github.com"
] | 85205343+brianhu0716@users.noreply.github.com |
11749c8f04b7a5cd3b1187f627182c30f5104806 | 03ff89c04cd325d3f7b4266c59e39011f5b466ba | /ogs5_transectplot/venv/lib/python2.7/site-packages/pip/_vendor/urllib3/packages/ordered_dict.py | 21e600c6185e1e25a66b80ef799d7a187d428236 | [] | no_license | timohouben/python_scripts | dabd35a7778fc459beddd5286141d405af32be1c | 15c622645725560c6450cd31ff194aa95394bfc9 | refs/heads/master | 2021-02-03T21:14:29.916525 | 2020-02-27T15:02:04 | 2020-02-27T15:02:04 | 243,537,745 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 9,197 | py | # Backport of OrderedDict() class that runs on Python 2.4, 2.5, 2.6, 2.7 and pypy.
# Passes Python2.7's test suite and incorporates all the latest updates.
# Copyright 2009 Raymond Hettinger, released under the MIT License.
# http://code.activestate.com/recipes/576693/
try:
from thread import get_ident as _get_ident
except ImportError:
from dummy_thread import get_ident as _get_ident
try:
from _abcoll import KeysView, ValuesView, ItemsView
except ImportError:
pass
class OrderedDict(dict):
"Dictionary that remembers insertion order"
# An inherited dict maps keys to values.
# The inherited dict provides __getitem__, __len__, __contains__, and get.
# The remaining methods are order-aware.
# Big-O running times for all methods are the same as for regular dictionaries.
# The internal self.__map dictionary maps keys to links in a doubly linked list.
# The circular doubly linked list starts and ends with a sentinel element.
# The sentinel element never gets deleted (this simplifies the algorithm).
# Each link is stored as a list of length three: [PREV, NEXT, KEY].
def __init__(self, *args, **kwds):
"""Initialize an ordered dictionary. Signature is the same as for
regular dictionaries, but keyword arguments are not recommended
because their insertion order is arbitrary.
"""
if len(args) > 1:
raise TypeError("expected at most 1 arguments, got %d" % len(args))
try:
self.__root
except AttributeError:
self.__root = root = [] # sentinel node
root[:] = [root, root, None]
self.__map = {}
self.__update(*args, **kwds)
def __setitem__(self, key, value, dict_setitem=dict.__setitem__):
"od.__setitem__(i, y) <==> od[i]=y"
# Setting a new item creates a new link which goes at the end of the linked
# list, and the inherited dictionary is updated with the new key/value pair.
if key not in self:
root = self.__root
last = root[0]
last[1] = root[0] = self.__map[key] = [last, root, key]
dict_setitem(self, key, value)
def __delitem__(self, key, dict_delitem=dict.__delitem__):
"od.__delitem__(y) <==> del od[y]"
# Deleting an existing item uses self.__map to find the link which is
# then removed by updating the links in the predecessor and successor nodes.
dict_delitem(self, key)
link_prev, link_next, key = self.__map.pop(key)
link_prev[1] = link_next
link_next[0] = link_prev
def __iter__(self):
"od.__iter__() <==> iter(od)"
root = self.__root
curr = root[1]
while curr is not root:
yield curr[2]
curr = curr[1]
def __reversed__(self):
"od.__reversed__() <==> reversed(od)"
root = self.__root
curr = root[0]
while curr is not root:
yield curr[2]
curr = curr[0]
def clear(self):
"od.clear() -> None. Remove all items from od."
try:
for node in self.__map.itervalues():
del node[:]
root = self.__root
root[:] = [root, root, None]
self.__map.clear()
except AttributeError:
pass
dict.clear(self)
def popitem(self, last=True):
"""od.popitem() -> (k, v), return and remove a (key, value) pair.
Pairs are returned in LIFO order if last is true or FIFO order if false.
"""
if not self:
raise KeyError("dictionary is empty")
root = self.__root
if last:
link = root[0]
link_prev = link[0]
link_prev[1] = root
root[0] = link_prev
else:
link = root[1]
link_next = link[1]
root[1] = link_next
link_next[0] = root
key = link[2]
del self.__map[key]
value = dict.pop(self, key)
return key, value
# -- the following methods do not depend on the internal structure --
def keys(self):
"od.keys() -> list of keys in od"
return list(self)
def values(self):
"od.values() -> list of values in od"
return [self[key] for key in self]
def items(self):
"od.items() -> list of (key, value) pairs in od"
return [(key, self[key]) for key in self]
def iterkeys(self):
"od.iterkeys() -> an iterator over the keys in od"
return iter(self)
def itervalues(self):
"od.itervalues -> an iterator over the values in od"
for k in self:
yield self[k]
def iteritems(self):
"od.iteritems -> an iterator over the (key, value) items in od"
for k in self:
yield (k, self[k])
def update(*args, **kwds):
"""od.update(E, **F) -> None. Update od from dict/iterable E and F.
If E is a dict instance, does: for k in E: od[k] = E[k]
If E has a .keys() method, does: for k in E.keys(): od[k] = E[k]
Or if E is an iterable of items, does: for k, v in E: od[k] = v
In either case, this is followed by: for k, v in F.items(): od[k] = v
"""
if len(args) > 2:
raise TypeError(
"update() takes at most 2 positional "
"arguments (%d given)" % (len(args),)
)
elif not args:
raise TypeError("update() takes at least 1 argument (0 given)")
self = args[0]
# Make progressively weaker assumptions about "other"
other = ()
if len(args) == 2:
other = args[1]
if isinstance(other, dict):
for key in other:
self[key] = other[key]
elif hasattr(other, "keys"):
for key in other.keys():
self[key] = other[key]
else:
for key, value in other:
self[key] = value
for key, value in kwds.items():
self[key] = value
__update = update # let subclasses override update without breaking __init__
__marker = object()
def pop(self, key, default=__marker):
"""od.pop(k[,d]) -> v, remove specified key and return the corresponding value.
If key is not found, d is returned if given, otherwise KeyError is raised.
"""
if key in self:
result = self[key]
del self[key]
return result
if default is self.__marker:
raise KeyError(key)
return default
def setdefault(self, key, default=None):
"od.setdefault(k[,d]) -> od.get(k,d), also set od[k]=d if k not in od"
if key in self:
return self[key]
self[key] = default
return default
def __repr__(self, _repr_running={}):
"od.__repr__() <==> repr(od)"
call_key = id(self), _get_ident()
if call_key in _repr_running:
return "..."
_repr_running[call_key] = 1
try:
if not self:
return "%s()" % (self.__class__.__name__,)
return "%s(%r)" % (self.__class__.__name__, self.items())
finally:
del _repr_running[call_key]
def __reduce__(self):
"Return state information for pickling"
items = [[k, self[k]] for k in self]
inst_dict = vars(self).copy()
for k in vars(OrderedDict()):
inst_dict.pop(k, None)
if inst_dict:
return (self.__class__, (items,), inst_dict)
return self.__class__, (items,)
def copy(self):
"od.copy() -> a shallow copy of od"
return self.__class__(self)
@classmethod
def fromkeys(cls, iterable, value=None):
"""OD.fromkeys(S[, v]) -> New ordered dictionary with keys from S
and values equal to v (which defaults to None).
"""
d = cls()
for key in iterable:
d[key] = value
return d
def __eq__(self, other):
"""od.__eq__(y) <==> od==y. Comparison to another OD is order-sensitive
while comparison to a regular mapping is order-insensitive.
"""
if isinstance(other, OrderedDict):
return len(self) == len(other) and self.items() == other.items()
return dict.__eq__(self, other)
def __ne__(self, other):
return not self == other
# -- the following methods are only used in Python 2.7 --
def viewkeys(self):
"od.viewkeys() -> a set-like object providing a view on od's keys"
return KeysView(self)
def viewvalues(self):
"od.viewvalues() -> an object providing a view on od's values"
return ValuesView(self)
def viewitems(self):
"od.viewitems() -> a set-like object providing a view on od's items"
return ItemsView(self)
| [
"timo.houben@ufz.de"
] | timo.houben@ufz.de |
3032790ef87d1235ba183234e61a2382394daf55 | 7bcec8a9c6a240ec0888bec4179f536046464005 | /moviesys/moviesys/.history/library/views_20210325010105.py | 41322bcdbacdcc67f0951a400f57f5fc6504fbbb | [] | no_license | yifanzhang13/MovieManagementSystem_group5 | c64e5810914c3d33ae6cd94e8eed5dc5a3962181 | 4cca1a4299311681d69b2347ca8d7b02e0846ebc | refs/heads/main | 2023-03-29T08:30:26.655108 | 2021-04-01T15:42:52 | 2021-04-01T15:42:52 | 344,417,874 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,720 | py | from django.shortcuts import render
from .models import Movies, Users, Ratings, Links, Tags
from django.db import connection
from django.views import generic
from django.shortcuts import get_object_or_404
from django.http import HttpResponseRedirect
from django.urls import reverse
from library.forms import SearchMovieForm
# Create your views here.
def index(request):
cursor = connection.cursor()
try:
num_movies = cursor.execute('SELECT * FROM library_movies')
rating_5 = cursor.execute('SELECT * FROM library_ratings WHERE RatingScore = 5')
rating_4 = cursor.execute('SELECT * FROM library_ratings WHERE RatingScore = 4')
rating_3 = cursor.execute('SELECT * FROM library_ratings WHERE RatingScore = 3')
rating_2 = cursor.execute('SELECT * FROM library_ratings WHERE RatingScore = 2')
rating_1 = cursor.execute('SELECT * FROM library_ratings WHERE RatingScore = 1')
finally:
cursor.close()
context = {
'num_movies':num_movies,
'rating_5':rating_5,
'rating_4':rating_4,
'rating_3':rating_3,
'rating_2':rating_2,
'rating_1':rating_1,
}
return render(request, 'index.html', context=context)
def MoviesView(request):
cursor = connection.cursor()
try:
movies = cursor.execute('SELECT * FROM library_movies')
results = cursor.fetchall()
finally:
cursor.close()
all = []
for row in results:
dic = {
'MovieID':row[0],
'MovieTitle':row[1],
'MovieGenres':row[2],
}
all.append(dic)
context = {
'movies':all,
}
return render(request, 'Movies.html', context=context)
class MovieDetailView(generic.DetailView):
model = Movies
def MovieDetail(request, pk):
form = SearchMovieForm()
if request.method == 'POST':
form = SearchMovieForm(request.POST)
if form.is_valid():
return HttpResponseRedirect('http://127.0.0.1:8000/library/movies/'+str(2))
movie = get_object_or_404(Movies, pk=pk)
print(pk)
cursor = connection.cursor()
try:
movie = cursor.execute('SELECT * FROM library_movies WHERE MovieID = '+str(pk))
results = cursor.fetchall()
print(results)
finally:
cursor.close()
all = []
for row in results:
dic = {
'MovieID':row[0],
'MovieTitle':row[1],
'MovieGenres':row[2],
}
all.append(dic)
context = {
'movies':all,
}
return render(request, 'library/movies_detail.html', context=context)
def search(request):
context = {}
return render(request, "library/search.html", context=context)
def handle(request):
text = request.POST["search_content"] # user input text
movie_report = {}
po_list = []
cursor = connection.cursor()
try:
cursor.execute('SELECT * FROM library_movies')
results = cursor.fetchall()
for row in results:
# id title genres
if text == row[1]: # 可以找到准确的电影
movie_report['MovieTitle'] = row[1]
movie_report['MovieID'] = row[0]
movie_report['MovieGenres'] = str(row[2]).replace("|"," & ")
movie_report['test'] = 'test11'
if text in row[1]: # 只能找到带有用户搜索关键字的电影
# js不支持tuple
dic = {
'MovieID':row[0],
'MovieTitle':row[1],
'MovieGenres':row[2],
}
po_list.append(dic)
if movie_report:
# 看过这个电影并打过分的人数
cursor.execute('SELECT count(*) FROM library_ratings WHERE MovieID_id = %s', [movie_report['MovieID']])
results = cursor.fetchall()
for row in results:
movie_report['number_of_ratings'] = row[0]
# 电影的平均分
cursor.execute('SELECT RatingScore FROM library_ratings WHERE MovieID_id = %s', [movie_report['MovieID']])
results = cursor.fetchall()
print(results)
finally:
cursor.close()
context = {
'resp':po_list,
'report':movie_report,
}
return render(request, "library/resp.html", context=context)
def handle_backup(request):
text = request.POST["search_content"] # user input text
db = Movies.objects.all()
movie_report = []
po_list = []
for i in db:
if text == i.MovieTitle:
movie_report = i
if text in i.MovieTitle:
po_list.append(i)
context = {
'resp':po_list,
'report':movie_report,
}
# def MovieDetail(request, pk):
# movie = get_object_or_404(Movies, pk=pk)
# print(pk) # pk等于14 http://127.0.0.1:8000/library/movies/14
# # form = SearchMovieForm()
# # if request.method == 'POST':
# # form = SearchMovieForm(request.POST)
# # if form.is_valid():
# # return HttpResponseRedirect('http://127.0.0.1:8000/library/movies/'+str(2))
# context = {
# 'movie': movie,
# }
# return render(request, 'library/movies_detail.html', context)
class MoviesListView(generic.ListView):
# The generic view will query the database to get all records for the specified model
# (Movies) then render a template located
# at /locallibrary/catalog/templates/catalog/Movies_list.html (which we will create below).
# Within the template you can access the list of books with the
# template variable named object_list OR book_list (i.e. generically "the_model_name_list").
model = Movies | [
"yifancheung13@gmail.com"
] | yifancheung13@gmail.com |
f53fbcd0fb56c46f2908629aafc5eecc2a3e9377 | 2af74fe66cd24eb63814477bc93b409ff971dcc2 | /backend/test_24647/settings.py | 05b71697ee5df0aaf60fdeda09c89b9f8ffc8e31 | [] | no_license | crowdbotics-apps/test-24647 | 3852044942b8e31a2f2636984015112369a67559 | 403ab427de342341739bd821fe7facebe14629f7 | refs/heads/master | 2023-03-05T16:23:50.487393 | 2021-02-21T19:30:25 | 2021-02-21T19:30:25 | 340,983,176 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,017 | py | """
Django settings for test_24647 project.
Generated by 'django-admin startproject' using Django 2.2.2.
For more information on this file, see
https://docs.djangoproject.com/en/2.2/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/2.2/ref/settings/
"""
import os
import environ
import logging
env = environ.Env()
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = env.bool("DEBUG", default=False)
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/2.2/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = env.str("SECRET_KEY")
ALLOWED_HOSTS = env.list("HOST", default=["*"])
SITE_ID = 1
SECURE_PROXY_SSL_HEADER = ("HTTP_X_FORWARDED_PROTO", "https")
SECURE_SSL_REDIRECT = env.bool("SECURE_REDIRECT", default=False)
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'django.contrib.sites'
]
LOCAL_APPS = [
'home',
'modules',
'users.apps.UsersConfig',
]
THIRD_PARTY_APPS = [
'rest_framework',
'rest_framework.authtoken',
'rest_auth',
'rest_auth.registration',
'bootstrap4',
'allauth',
'allauth.account',
'allauth.socialaccount',
'allauth.socialaccount.providers.google',
'django_extensions',
'drf_yasg',
'storages',
# start fcm_django push notifications
'fcm_django',
# end fcm_django push notifications
]
INSTALLED_APPS += LOCAL_APPS + THIRD_PARTY_APPS
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'test_24647.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'test_24647.wsgi.application'
# Database
# https://docs.djangoproject.com/en/2.2/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
if env.str("DATABASE_URL", default=None):
DATABASES = {
'default': env.db()
}
# Password validation
# https://docs.djangoproject.com/en/2.2/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/2.2/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/2.2/howto/static-files/
STATIC_URL = '/static/'
MIDDLEWARE += ['whitenoise.middleware.WhiteNoiseMiddleware']
AUTHENTICATION_BACKENDS = (
'django.contrib.auth.backends.ModelBackend',
'allauth.account.auth_backends.AuthenticationBackend'
)
STATIC_ROOT = os.path.join(BASE_DIR, "staticfiles")
STATICFILES_DIRS = [os.path.join(BASE_DIR, 'static')]
STATICFILES_STORAGE = 'whitenoise.storage.CompressedManifestStaticFilesStorage'
# allauth / users
ACCOUNT_EMAIL_REQUIRED = True
ACCOUNT_AUTHENTICATION_METHOD = 'email'
ACCOUNT_USERNAME_REQUIRED = False
ACCOUNT_EMAIL_VERIFICATION = "optional"
ACCOUNT_CONFIRM_EMAIL_ON_GET = True
ACCOUNT_LOGIN_ON_EMAIL_CONFIRMATION = True
ACCOUNT_UNIQUE_EMAIL = True
LOGIN_REDIRECT_URL = "users:redirect"
ACCOUNT_ADAPTER = "users.adapters.AccountAdapter"
SOCIALACCOUNT_ADAPTER = "users.adapters.SocialAccountAdapter"
ACCOUNT_ALLOW_REGISTRATION = env.bool("ACCOUNT_ALLOW_REGISTRATION", True)
SOCIALACCOUNT_ALLOW_REGISTRATION = env.bool("SOCIALACCOUNT_ALLOW_REGISTRATION", True)
REST_AUTH_SERIALIZERS = {
# Replace password reset serializer to fix 500 error
"PASSWORD_RESET_SERIALIZER": "home.api.v1.serializers.PasswordSerializer",
}
REST_AUTH_REGISTER_SERIALIZERS = {
# Use custom serializer that has no username and matches web signup
"REGISTER_SERIALIZER": "home.api.v1.serializers.SignupSerializer",
}
# Custom user model
AUTH_USER_MODEL = "users.User"
EMAIL_HOST = env.str("EMAIL_HOST", "smtp.sendgrid.net")
EMAIL_HOST_USER = env.str("SENDGRID_USERNAME", "")
EMAIL_HOST_PASSWORD = env.str("SENDGRID_PASSWORD", "")
EMAIL_PORT = 587
EMAIL_USE_TLS = True
# AWS S3 config
AWS_ACCESS_KEY_ID = env.str("AWS_ACCESS_KEY_ID", "")
AWS_SECRET_ACCESS_KEY = env.str("AWS_SECRET_ACCESS_KEY", "")
AWS_STORAGE_BUCKET_NAME = env.str("AWS_STORAGE_BUCKET_NAME", "")
AWS_STORAGE_REGION = env.str("AWS_STORAGE_REGION", "")
USE_S3 = (
AWS_ACCESS_KEY_ID and
AWS_SECRET_ACCESS_KEY and
AWS_STORAGE_BUCKET_NAME and
AWS_STORAGE_REGION
)
if USE_S3:
AWS_S3_CUSTOM_DOMAIN = env.str("AWS_S3_CUSTOM_DOMAIN", "")
AWS_S3_OBJECT_PARAMETERS = {"CacheControl": "max-age=86400"}
AWS_DEFAULT_ACL = env.str("AWS_DEFAULT_ACL", "public-read")
AWS_MEDIA_LOCATION = env.str("AWS_MEDIA_LOCATION", "media")
AWS_AUTO_CREATE_BUCKET = env.bool("AWS_AUTO_CREATE_BUCKET", True)
DEFAULT_FILE_STORAGE = env.str(
"DEFAULT_FILE_STORAGE", "home.storage_backends.MediaStorage"
)
MEDIA_URL = '/mediafiles/'
MEDIA_ROOT = os.path.join(BASE_DIR, 'mediafiles')
# start fcm_django push notifications
FCM_DJANGO_SETTINGS = {
"FCM_SERVER_KEY": env.str("FCM_SERVER_KEY", "")
}
# end fcm_django push notifications
# Swagger settings for api docs
SWAGGER_SETTINGS = {
"DEFAULT_INFO": f"{ROOT_URLCONF}.api_info",
}
if DEBUG or not (EMAIL_HOST_USER and EMAIL_HOST_PASSWORD):
# output email to console instead of sending
if not DEBUG:
logging.warning("You should setup `SENDGRID_USERNAME` and `SENDGRID_PASSWORD` env vars to send emails.")
EMAIL_BACKEND = "django.core.mail.backends.console.EmailBackend"
| [
"team@crowdbotics.com"
] | team@crowdbotics.com |
c33900bba020c0bda8c2bba886b7750ff883dd66 | 141545126466a00f32247dfa40e067ec049b0fa4 | /Programming Fundamentals Python/07 Data Types and Variables/Pounds to Dollars.py | 2887f8e7bcfe398e6a0111998e1cf147e5f3e69a | [] | no_license | RadkaValkova/SoftUni-Web-Developer | 83314367172a18f001e182b4e57f7ca0502ad1fc | 61d3414373498bb6009ae70e8d17f26cd2d88ea5 | refs/heads/main | 2023-06-01T02:11:06.606370 | 2021-06-29T19:39:19 | 2021-06-29T19:39:19 | 325,611,606 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 91 | py | british_pound_value = float(input())
usd = british_pound_value * 1.31
print(f'{usd:.3f}') | [
"radka_valkova@abv.bg"
] | radka_valkova@abv.bg |
c87233b316d1cde9beb727f774ac0a744257d918 | e1ffebca6a0f185663c779462e3ca27866f557b8 | /week1/Project1/api/urls.py | 6d299dbdab8ff4ebcaebb95a5661e680c6a213b5 | [] | no_license | asselyer/Backend2019 | d8d85d7850261880fe4aeef9092b0a8c7b1b6767 | ec5931e2bd22ec62e68592a4199c00184f4dacc3 | refs/heads/master | 2020-07-24T13:38:21.246351 | 2019-12-04T03:16:27 | 2019-12-04T03:16:27 | 207,944,768 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 423 | py | from django.urls import path, include
from api import views
urlpatterns = [
path('projects/', views.ProjectList.as_view(), name='project_list'),
path('projects/<int:pk>', views.ProjectDetail.as_view(), name='project_detail'),
path('tasks/', views.TaskList.as_view(), name='task_list'),
path('tasks/<int:pk>', views.TaskDetail.as_view(), name='task_detail'),
path('block/', views.BlockList.as_view())
] | [
"asel.yer98@gmail.com"
] | asel.yer98@gmail.com |
49ffa16b8391b7ab4ce364bdc4d7a5b5e37759e8 | d73e73bad3d797bbfa82971ac44e424e7e163fe6 | /mimic_learner/comparsion_learners/cart_scratch.py | 257f6261c6c76b2d76a50064ea52f7e72e36c284 | [] | no_license | Guiliang/statistical-DRL-interpreter | 80bdd885c1d3029a48654118667c0071f3e4a8d8 | 033d79e45579fb7ddd824c8e04d245d0285741f3 | refs/heads/master | 2023-07-24T21:50:56.323738 | 2021-01-04T05:28:20 | 2021-01-04T05:28:20 | 241,288,171 | 1 | 0 | null | 2023-07-23T06:00:02 | 2020-02-18T06:22:52 | Python | UTF-8 | Python | false | false | 6,793 | py | import numpy as np
class CART(object):
def __init__(self, tree='cls', criterion='gini', prune='depth', max_depth=4, min_criterion=0.05):
self.feature = None
self.label = None
self.n_samples = None
self.gain = None
self.left = None
self.right = None
self.threshold = None
self.depth = 0
self.root = None
self.criterion = criterion
self.prune = prune
self.max_depth = max_depth
self.min_criterion = min_criterion
self.tree = tree
def fit(self, features, target):
self.root = CART()
if (self.tree == 'cls'):
self.root._grow_tree(features, target, self.criterion)
else:
self.root._grow_tree(features, target, 'mse')
self.root._prune(self.prune, self.max_depth, self.min_criterion, self.root.n_samples)
def predict(self, features):
return np.array([self.root._predict(f) for f in features])
def print_tree(self):
self.root._show_tree(0, ' ')
def _grow_tree(self, features, target, criterion='gini'):
self.n_samples = features.shape[0]
if len(np.unique(target)) == 1:
self.label = target[0]
return
best_gain = 0.0
best_feature = None
best_threshold = None
if criterion in {'gini', 'entropy'}:
self.label = max([(c, len(target[target == c])) for c in np.unique(target)], key=lambda x: x[1])[0]
else:
self.label = np.mean(target)
impurity_node = self._calc_impurity(criterion, target)
for col in range(features.shape[1]):
feature_level = np.unique(features[:, col])
thresholds = (feature_level[:-1] + feature_level[1:]) / 2.0
for threshold in thresholds:
target_l = target[features[:, col] <= threshold]
impurity_l = self._calc_impurity(criterion, target_l)
n_l = float(target_l.shape[0]) / self.n_samples
target_r = target[features[:, col] > threshold]
impurity_r = self._calc_impurity(criterion, target_r)
n_r = float(target_r.shape[0]) / self.n_samples
impurity_gain = impurity_node - (n_l * impurity_l + n_r * impurity_r)
if impurity_gain > best_gain:
best_gain = impurity_gain
best_feature = col
best_threshold = threshold
self.feature = best_feature
self.gain = best_gain
self.threshold = best_threshold
self._split_tree(features, target, criterion)
def _split_tree(self, features, target, criterion):
features_l = features[features[:, self.feature] <= self.threshold]
target_l = target[features[:, self.feature] <= self.threshold]
self.left = CART()
self.left.depth = self.depth + 1
self.left._grow_tree(features_l, target_l, criterion)
features_r = features[features[:, self.feature] > self.threshold]
target_r = target[features[:, self.feature] > self.threshold]
self.right = CART()
self.right.depth = self.depth + 1
self.right._grow_tree(features_r, target_r, criterion)
def _calc_impurity(self, criterion, target):
if criterion == 'gini':
return 1.0 - sum(
[(float(len(target[target == c])) / float(target.shape[0])) ** 2.0 for c in np.unique(target)])
elif criterion == 'mse':
return np.mean((target - np.mean(target)) ** 2.0)
else:
entropy = 0.0
for c in np.unique(target):
p = float(len(target[target == c])) / target.shape[0]
if p > 0.0:
entropy -= p * np.log2(p)
return entropy
def _prune(self, method, max_depth, min_criterion, n_samples):
if self.feature is None:
return
self.left._prune(method, max_depth, min_criterion, n_samples)
self.right._prune(method, max_depth, min_criterion, n_samples)
pruning = False
if method == 'impurity' and self.left.feature is None and self.right.feature is None:
if (self.gain * float(self.n_samples) / n_samples) < min_criterion:
pruning = True
elif method == 'depth' and self.depth >= max_depth:
pruning = True
if pruning is True:
self.left = None
self.right = None
self.feature = None
def _predict(self, d):
if self.feature != None:
if d[self.feature] <= self.threshold:
return self.left._predict(d)
else:
return self.right._predict(d)
else:
return self.label
def _show_tree(self, depth, cond):
base = ' ' * depth + cond
if self.feature != None:
print(base + 'if X[' + str(self.feature) + '] <= ' + str(self.threshold))
self.left._show_tree(depth + 1, 'then ')
self.right._show_tree(depth + 1, 'else ')
else:
print(base + '{value: ' + str(self.label) + ', samples: ' + str(self.n_samples) + '}')
from sklearn.datasets import load_iris
from sklearn.model_selection import train_test_split
from sklearn import tree as sktree
def classification_example():
print('\n\nClassification Tree')
iris = load_iris()
X, y = iris.data, iris.target
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=42)
cls = CART(tree='cls', criterion='entropy', prune='depth', max_depth=3)
cls.fit(X_train, y_train)
cls.print_tree()
pred = cls.predict(X_test)
print("This Classification Tree Prediction Accuracy: {}".format(sum(pred == y_test) / len(pred)))
clf = sktree.DecisionTreeClassifier(criterion='entropy')
clf = clf.fit(X_train, y_train)
sk_pred = clf.predict(X_test)
print("Sklearn Library Tree Prediction Accuracy: {}".format(sum(sk_pred == y_test) / len(pred)))
def regression_example():
print('\n\nRegression Tree')
rng = np.random.RandomState(1)
X = np.sort(5 * rng.rand(80, 1), axis=0)
y = np.sin(X).ravel()
y[::5] += 3 * (0.5 - rng.rand(16))
# Fit regression model
reg = CART(tree='reg', criterion='mse', prune='depth', max_depth=2)
reg.fit(X, y)
reg.print_tree()
pred = reg.predict(np.sort(5 * rng.rand(1, 1), axis=0))
print('This Regression Tree Prediction: {}'.format(pred))
sk_reg = sktree.DecisionTreeRegressor(max_depth=3)
sk_reg.fit(X, y)
sk_pred = sk_reg.predict(np.sort(5 * rng.rand(1, 1), axis=0))
print('Sklearn Library Regression Tree Prediction: {}'.format(sk_pred))
# classification_example()
regression_example()
| [
"gla68@sfu.ca"
] | gla68@sfu.ca |
c1a9d7614dee9add2fb785fc83ec7bedfbff2655 | 1419418226b6ba0f510649daaf62b71554cc2284 | /amatrice/make_gps.py | f3e7202422146a69bb69df5e38665b74a0eaa03f | [] | no_license | shineusn/mylife | 2ef48a777e39be2ef746c3dad16ea963d5b23e5e | 61dfa72d9047551746d26b7fe01fb5c2f1f0657a | refs/heads/master | 2020-03-22T13:44:42.422127 | 2018-02-13T18:09:43 | 2018-02-13T18:09:43 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,978 | py | from numpy import genfromtxt,sqrt
out='/Users/dmelgar/Amatrice2016/GPS/neu_Oct26th/'
stafile='/Users/dmelgar/Amatrice2016/GPS/gps_Oct26th.sta'
threshold=0.005 #In m
#sta=genfromtxt('/Users/dmelgar/Amatrice2016/GPS/offsets.txt',usecols=0,dtype='S')
#lonlat=genfromtxt('/Users/dmelgar/Amatrice2016/GPS/offsets.txt',usecols=[1,2])
#e=genfromtxt('/Users/dmelgar/Amatrice2016/GPS/offsets.txt',usecols=4)
#n=genfromtxt('/Users/dmelgar/Amatrice2016/GPS/offsets.txt',usecols=6)
#u=genfromtxt('/Users/dmelgar/Amatrice2016/GPS/offsets.txt',usecols=8)
#sta=genfromtxt('/Users/dmelgar/Amatrice2016/GPS/Cosismico_30Oct2016_GPS_GdL_V1.dat',usecols=0,dtype='S')
#lonlat=genfromtxt('/Users/dmelgar/Amatrice2016/GPS/Cosismico_30Oct2016_GPS_GdL_V1.dat',usecols=[1,2])
#e=genfromtxt('/Users/dmelgar/Amatrice2016/GPS/Cosismico_30Oct2016_GPS_GdL_V1.dat',usecols=4)
#n=genfromtxt('/Users/dmelgar/Amatrice2016/GPS/Cosismico_30Oct2016_GPS_GdL_V1.dat',usecols=6)
#u=genfromtxt('/Users/dmelgar/Amatrice2016/GPS/Cosismico_30Oct2016_GPS_GdL_V1.dat',usecols=8)
sta=genfromtxt('/Users/dmelgar/Amatrice2016/GPS/Cosismico_26Oct2016_GPS_GdL_V1.dat',usecols=0,dtype='S')
lonlat=genfromtxt('/Users/dmelgar/Amatrice2016/GPS/Cosismico_26Oct2016_GPS_GdL_V1.dat',usecols=[1,2])
e=genfromtxt('/Users/dmelgar/Amatrice2016/GPS/Cosismico_26Oct2016_GPS_GdL_V1.dat',usecols=4)
n=genfromtxt('/Users/dmelgar/Amatrice2016/GPS/Cosismico_26Oct2016_GPS_GdL_V1.dat',usecols=6)
u=genfromtxt('/Users/dmelgar/Amatrice2016/GPS/Cosismico_26Oct2016_GPS_GdL_V1.dat',usecols=8)
#Make station file
f=open(stafile,'w')
for k in range(len(sta)):
line='%s\t%.4f\t%.4f\n' %(sta[k],lonlat[k,0],lonlat[k,1])
f.write(line)
f.close()
#Make neu files
for k in range(len(sta)):
offset=sqrt((n[k]/1000.)**2+(e[k]/1000.)**2)
if offset>=threshold:
f=open(out+sta[k]+'.neu','w')
f.write('%.6f\n' % (n[k]/1000.))
f.write('%.6f\n' % (e[k]/1000.))
f.write('%.6f' % (u[k]/1000.))
f.close()
| [
"dmelgar@berkeley.edu"
] | dmelgar@berkeley.edu |
af5d3aef501bd267cbd20147636139c54f4ecdca | 42fe2827d14a82043ade9393beaedf53e22a69f5 | /bebop_ws/devel/lib/python2.7/dist-packages/mav_state_machine_msgs/msg/__init__.py | 577c4b1523728ce85c4360d739c38db39e97fb3c | [] | no_license | cjbanks/bebop-software-framework | a3714646545e9d7d71299a365814bc87437f5e14 | 7da1bbdef4e84aa0ed793cfaad9fe133959ebe21 | refs/heads/master | 2023-04-30T17:52:23.255302 | 2020-11-18T18:32:41 | 2020-11-18T18:32:41 | 368,626,051 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 150 | py | /home/chris/bebop-software-framework/bebop_ws/devel/.private/mav_state_machine_msgs/lib/python2.7/dist-packages/mav_state_machine_msgs/msg/__init__.py | [
"Chewie_Alex@nder1"
] | Chewie_Alex@nder1 |
1463098d36cf7bdefe89981ed0bf1c123c701674 | ca7aa979e7059467e158830b76673f5b77a0f5a3 | /Python_codes/p03569/s302751603.py | 4c25973ce80b184d1269be33c4a741981577c714 | [] | no_license | Aasthaengg/IBMdataset | 7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901 | f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8 | refs/heads/main | 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 598 | py | import sys
from collections import Counter
s = input()
n = len(s)
ns = []
no = []
for i in range(n):
if s[i] != "x":
ns.append(s[i])
no.append(i)
no = [-1] + no + [n]
m = len(no)
sa = []
for i in range(m-1):
sa.append(no[i+1] - no[i] - 1)
if ns != ns[::-1]:
print(-1)
sys.exit()
ans = 0
if m%2 == 1:
mm = m//2
te = no[mm]
ans = 0
for i in range(mm):
ans += abs(sa[i]-sa[m-2-i])
else:
mm = m//2
te = no[mm]
ans = 0
for i in range(mm-1):
ans += abs(sa[i]-sa[m-2-i])
print(ans) | [
"66529651+Aastha2104@users.noreply.github.com"
] | 66529651+Aastha2104@users.noreply.github.com |
0a1bbb4c953fe1a11da5f00cdc77fb7901b5af27 | 75452de12ec9eea346e3b9c7789ac0abf3eb1d73 | /build/zircon/populate_zircon_public.py | 7c7d07c57d69742b00f01e9cb616b92c007143f3 | [
"BSD-3-Clause"
] | permissive | oshunter/fuchsia | c9285cc8c14be067b80246e701434bbef4d606d1 | 2196fc8c176d01969466b97bba3f31ec55f7767b | refs/heads/master | 2022-12-22T11:30:15.486382 | 2020-08-16T03:41:23 | 2020-08-16T03:41:23 | 287,920,017 | 2 | 2 | BSD-3-Clause | 2022-12-16T03:30:27 | 2020-08-16T10:18:30 | C++ | UTF-8 | Python | false | false | 4,949 | py | #!/usr/bin/env python3.8
# Copyright 2019 The Fuchsia Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import json
import os
import shutil
import sys
SCRIPT_DIR = os.path.dirname(os.path.abspath(__file__))
FUCHSIA_ROOT = os.path.dirname( # $root
os.path.dirname( # build
SCRIPT_DIR)) # zircon
ZIRCON_PUBLIC = os.path.join(FUCHSIA_ROOT, 'zircon', 'public')
EXPORT_TEMPLATE_FILE = os.path.join(SCRIPT_DIR, 'lib_template.gn')
TOOL_TEMPLATE_FILE = os.path.join(SCRIPT_DIR, 'tool_template.gn')
UNIFICATION_DIR = os.path.join(FUCHSIA_ROOT, 'build', 'unification')
MAPPINGS_FILE = os.path.join(UNIFICATION_DIR, 'zircon_library_mappings.json')
FORWARD_TEMPLATE_FILE = os.path.join(UNIFICATION_DIR,
'zircon_library_forward.gn')
DIRS = {
'lib': True,
'tool': False,
}
PUBLIC_DIRS = set(DIRS.keys())
MARKER = 'ONLY EDIT IT BY THAT NAME!'
def is_template(build_file):
with open(build_file, 'r') as file:
return MARKER in file.read()
def has_sources(top_dir):
return DIRS[top_dir]
def main():
with open(sys.argv[1]) as f:
legacy_dirs = json.load(f)
with open(MAPPINGS_FILE, 'r') as f:
content = json.load(f)
mapped_lib_dirs = dict([('lib/' + i['name'], i['label'])
for i in content])
# Verify that we're not trying to create a forwarding target and an exported
# library under the same alias.
common_dirs = set(mapped_lib_dirs) & set(legacy_dirs)
if common_dirs:
print('The following paths cannot be both exports from Zircon and '
'forwarding targets:')
for dir in common_dirs:
print('//zircon/public/' + dir)
return 1
# Create a data structure holding all generated paths.
all_dirs = {}
for dir in legacy_dirs:
top_dir = os.path.dirname(dir)
if top_dir == 'tool':
all_dirs[dir] = TOOL_TEMPLATE_FILE
else:
all_dirs[dir] = EXPORT_TEMPLATE_FILE
for dir in mapped_lib_dirs:
all_dirs[dir] = FORWARD_TEMPLATE_FILE
dirs = {}
for dir, template in all_dirs.items():
top_dir = os.path.dirname(dir)
name = os.path.basename(dir)
subdirs, templates = dirs.setdefault(top_dir, ([], {}))
templates[name] = template
dirs[top_dir] = (subdirs + [name], templates)
assert set(dirs.keys()).issubset(PUBLIC_DIRS), (
"%r from JSON should be a subset of %r" %
(set(dirs.keys()), PUBLIC_DIRS))
stats = dict([(f, os.lstat(f))
for f in [EXPORT_TEMPLATE_FILE, FORWARD_TEMPLATE_FILE,
TOOL_TEMPLATE_FILE]])
for top_dir in dirs:
subdirs, templates = dirs[top_dir]
top_dir_name = top_dir
top_dir = os.path.join(ZIRCON_PUBLIC, top_dir)
subdirs = set(subdirs)
if not os.path.exists(top_dir):
os.mkdir(top_dir)
else:
# Go over the existing contents of the directory.
for existing in os.listdir(top_dir):
existing_dir = os.path.join(top_dir, existing)
if not os.path.isdir(existing_dir):
# Disregard files (e.g. .gitignore).
continue
build_file = os.path.join(existing_dir, 'BUILD.gn')
is_source = (has_sources(top_dir_name) and
os.path.exists(build_file) and
not is_template(build_file))
if existing in subdirs:
if is_source:
print('%s cannot be both a source and generated' %
existing_dir)
return 1
# An existing directory might already have the link.
# If the link doesn't exist or doesn't match, make it.
template = templates[existing]
if not os.path.exists(build_file):
os.link(template, build_file)
elif not os.path.samestat(os.lstat(build_file),
stats[template]):
os.remove(build_file)
os.link(template, build_file)
subdirs.remove(existing)
else:
if not is_source:
# A stale directory that shouldn't exist any more.
shutil.rmtree(existing_dir)
# Make and populate any directories that don't exist yet.
for subdir in subdirs:
template = templates[subdir]
subdir = os.path.join(top_dir, subdir)
os.mkdir(subdir)
os.link(template, os.path.join(subdir, 'BUILD.gn'))
return 0
if __name__ == '__main__':
sys.exit(main())
| [
"commit-bot@chromium.org"
] | commit-bot@chromium.org |
7305f6254d6dca7956dbd70d1542a09a54f2670c | b6559661c06efc7e69096c4fedc08509545c2602 | /tests/test_dh_prime_generators/test_two_ton.py | 5e0dd6ee048d2e57478030c20a9579c501c1be2b | [
"MIT"
] | permissive | Zapix/mtpylon | ed29d8c67ab2b6b3fd8c6d84fc129e6e143600ff | b268a4e2d1bc641cace1962ea68de73c1156e44c | refs/heads/dev | 2023-06-27T09:13:20.268691 | 2021-08-05T20:08:18 | 2021-08-05T20:08:18 | 306,233,058 | 9 | 0 | MIT | 2021-08-05T20:07:23 | 2020-10-22T05:34:36 | Python | UTF-8 | Python | false | false | 9,108 | py | # -*- coding: utf-8 -*-
import pytest
from aioresponses import aioresponses # type: ignore
from mtpylon.dh_prime_generators.two_ton import generate, SAFE_PRIME_URL
two_ton_responses = [
{
'p': {
'human': '29,749,001,914,257,905,533,821,621,796,385,130,351,804,895,306,933,420,788,210,276,585,886,958,566,862,068,664,893,110,742,225,612,922,013,234,436,000,245,423,022,562,771,036,853,331,665,435,557,277,665,063,771,848,177,560,502,741,407,281,408,484,151,469,660,500,958,296,085,388,973,483,178,014,179,732,266,242,619,813,925,063,449,598,165,775,942,500,856,204,646,488,420,308,821,849,667,058,157,810,452,033,475,105,946,282,270,811,313,880,835,685,772,931,277,950,553,348,846,490,568,527,518,933,918,916,667,230,934,637,434,337,124,944,282,911,141,632,417,789,855,581,296,558,775,411,230,285,859,641,916,736,062,019,009,644,168,595,562,935,482,246,547,041,775,380,520,570,293,174,319,558,572,826,618,381,946,383,745,972,377,225,066,496,310,875,285,319,890,435,100,838,729,684,421,317,153,499,439,999,621,368,817,127,895,187', # noqa
'base10': '29749001914257905533821621796385130351804895306933420788210276585886958566862068664893110742225612922013234436000245423022562771036853331665435557277665063771848177560502741407281408484151469660500958296085388973483178014179732266242619813925063449598165775942500856204646488420308821849667058157810452033475105946282270811313880835685772931277950553348846490568527518933918916667230934637434337124944282911141632417789855581296558775411230285859641916736062019009644168595562935482246547041775380520570293174319558572826618381946383745972377225066496310875285319890435100838729684421317153499439999621368817127895187', # noqa
'base16': '0xeba8512c5eec4678332e1ba1f3b68722fe2fe9580fd3a282736e6ac39ea533dbb9a5a7a3cf6ea9cf7d189cc549f2d947a10fd05e6bf0a32b4d95d68b99527d0f38d40d7af4fdaa89d3c4afc281486cd34603902a109360349a4d6dd785be0e373126187dbe41031b656ee1a9efcc7840915a713fece90548cae930d8891433bea7381e226b60b6e301ee13b97bc32106e459f429e1b32e10962d237576e4f3570b52d2227b2baea3e81be407ca825b7b00110abd0f59587562c79ad6c3fb90c89fe53ded3a16786ebffdc14dea031d6f8971d778161d3340760d4588a63afc6271e4e70ad1f87b968f28280abed7e43c2dcc077e1f3f3bf5d80714635a05b093' # noqa
},
'q': {
'human': '14,874,500,957,128,952,766,910,810,898,192,565,175,902,447,653,466,710,394,105,138,292,943,479,283,431,034,332,446,555,371,112,806,461,006,617,218,000,122,711,511,281,385,518,426,665,832,717,778,638,832,531,885,924,088,780,251,370,703,640,704,242,075,734,830,250,479,148,042,694,486,741,589,007,089,866,133,121,309,906,962,531,724,799,082,887,971,250,428,102,323,244,210,154,410,924,833,529,078,905,226,016,737,552,973,141,135,405,656,940,417,842,886,465,638,975,276,674,423,245,284,263,759,466,959,458,333,615,467,318,717,168,562,472,141,455,570,816,208,894,927,790,648,279,387,705,615,142,929,820,958,368,031,009,504,822,084,297,781,467,741,123,273,520,887,690,260,285,146,587,159,779,286,413,309,190,973,191,872,986,188,612,533,248,155,437,642,659,945,217,550,419,364,842,210,658,576,749,719,999,810,684,408,563,947,593', # noqa
'base10': '14874500957128952766910810898192565175902447653466710394105138292943479283431034332446555371112806461006617218000122711511281385518426665832717778638832531885924088780251370703640704242075734830250479148042694486741589007089866133121309906962531724799082887971250428102323244210154410924833529078905226016737552973141135405656940417842886465638975276674423245284263759466959458333615467318717168562472141455570816208894927790648279387705615142929820958368031009504822084297781467741123273520887690260285146587159779286413309190973191872986188612533248155437642659945217550419364842210658576749719999810684408563947593', # noqa
'base16': '0x75d428962f76233c19970dd0f9db43917f17f4ac07e9d14139b73561cf5299eddcd2d3d1e7b754e7be8c4e62a4f96ca3d087e82f35f85195a6caeb45cca93e879c6a06bd7a7ed544e9e257e140a43669a301c8150849b01a4d26b6ebc2df071b98930c3edf20818db2b770d4f7e63c2048ad389ff67482a46574986c448a19df539c0f1135b05b7180f709dcbde19083722cfa14f0d997084b1691babb7279ab85a969113d95d751f40df203e5412dbd8008855e87acac3ab163cd6b61fdc8644ff29ef69d0b3c375ffee0a6f5018eb7c4b8ebbc0b0e99a03b06a2c4531d7e3138f2738568fc3dcb479414055f6bf21e16e603bf0f9f9dfaec038a31ad02d849' # noqa
},
'g': {
'human': 3,
'base10': 3,
'base16': '0x3'
}
},
{
'p': {
'human': '31,161,765,328,438,092,131,704,438,370,975,398,580,821,692,130,803,368,431,778,929,149,884,882,248,080,338,555,754,456,437,415,514,685,332,636,535,971,759,564,666,992,135,039,210,523,666,002,908,162,177,171,251,196,664,376,275,755,296,274,443,811,412,410,965,741,346,783,054,875,939,808,046,695,924,233,789,224,571,587,114,748,376,481,638,193,324,834,547,206,610,016,759,562,172,788,691,963,510,153,251,526,148,119,308,207,358,866,434,515,043,003,799,698,415,624,427,104,276,896,584,373,774,133,046,237,369,191,999,563,020,638,610,697,530,093,120,097,208,325,279,667,654,115,732,048,868,585,544,285,213,908,808,386,200,627,388,041,599,348,031,948,308,083,996,881,367,198,287,970,302,550,361,092,400,932,274,926,611,302,353,988,847,013,818,884,979,881,157,440,943,643,923,535,392,279,846,293,558,893,212,997,610,073,745,979', # noqa
'base10': '31161765328438092131704438370975398580821692130803368431778929149884882248080338555754456437415514685332636535971759564666992135039210523666002908162177171251196664376275755296274443811412410965741346783054875939808046695924233789224571587114748376481638193324834547206610016759562172788691963510153251526148119308207358866434515043003799698415624427104276896584373774133046237369191999563020638610697530093120097208325279667654115732048868585544285213908808386200627388041599348031948308083996881367198287970302550361092400932274926611302353988847013818884979881157440943643923535392279846293558893212997610073745979', # noqa
'base16': '0xf6d94667ddff69f32c097d92e23b05aa244a988fc9b11152a12bda50fe1f8ed8c7add62e90f4417ea806f4fd580644c5cbb322403578bfc285b687036496c71237e408c4f7b9fdc7da23ac07557a63fe1afe9e866b19d332578db4bea86b652a906fc649a964d9f3a540ce0d00df26198841f1b29d60c3afbed57d584393d4898e3eeb4493bc27cddf2a9f135dde03f5e2d83ab4abd69ddc4c8e9ab2716fc5bc043b735ede391f8392ce8fca92c889f7ec9428e794c05c7ea86f7f4482b883873086ebf9628144fe184d1478155e59eb52d14c6c03ee5be64822a6587da245be1a2dd0d402f11e71e03d9e2531e3192e29271feb819ee2773b212af88407ea3b' # noqa
},
'q': {
'human': '15,580,882,664,219,046,065,852,219,185,487,699,290,410,846,065,401,684,215,889,464,574,942,441,124,040,169,277,877,228,218,707,757,342,666,318,267,985,879,782,333,496,067,519,605,261,833,001,454,081,088,585,625,598,332,188,137,877,648,137,221,905,706,205,482,870,673,391,527,437,969,904,023,347,962,116,894,612,285,793,557,374,188,240,819,096,662,417,273,603,305,008,379,781,086,394,345,981,755,076,625,763,074,059,654,103,679,433,217,257,521,501,899,849,207,812,213,552,138,448,292,186,887,066,523,118,684,595,999,781,510,319,305,348,765,046,560,048,604,162,639,833,827,057,866,024,434,292,772,142,606,954,404,193,100,313,694,020,799,674,015,974,154,041,998,440,683,599,143,985,151,275,180,546,200,466,137,463,305,651,176,994,423,506,909,442,489,940,578,720,471,821,961,767,696,139,923,146,779,446,606,498,805,036,872,989', # noqa
'base10': '15580882664219046065852219185487699290410846065401684215889464574942441124040169277877228218707757342666318267985879782333496067519605261833001454081088585625598332188137877648137221905706205482870673391527437969904023347962116894612285793557374188240819096662417273603305008379781086394345981755076625763074059654103679433217257521501899849207812213552138448292186887066523118684595999781510319305348765046560048604162639833827057866024434292772142606954404193100313694020799674015974154041998440683599143985151275180546200466137463305651176994423506909442489940578720471821961767696139923146779446606498805036872989', # noqa
'base16': '0x7b6ca333eeffb4f99604bec9711d82d512254c47e4d888a95095ed287f0fc76c63d6eb17487a20bf54037a7eac032262e5d991201abc5fe142db4381b24b63891bf204627bdcfee3ed11d603aabd31ff0d7f4f43358ce9992bc6da5f5435b2954837e324d4b26cf9d2a06706806f930cc420f8d94eb061d7df6abeac21c9ea44c71f75a249de13e6ef954f89aeef01faf16c1d5a55eb4eee26474d5938b7e2de021db9af6f1c8fc1c96747e5496444fbf64a1473ca602e3f5437bfa2415c41c3984375fcb140a27f0c268a3c0aaf2cf5a968a63601f72df32411532c3ed122df0d16e86a01788f38f01ecf1298f18c9714938ff5c0cf713b9d90957c4203f51d', # noqa
},
'g': {
'human': 3,
'base10': 3,
'base16': '0x3'
}
},
]
@pytest.mark.asyncio
async def test_generate():
with aioresponses() as m:
for payload in two_ton_responses:
m.get(SAFE_PRIME_URL, status=200, payload=payload)
gen = generate()
for payload in two_ton_responses:
expected_value = int(payload['p']['base10'])
assert await gen.asend(None) == expected_value
| [
"zap.aibulatov@gmail.com"
] | zap.aibulatov@gmail.com |
de63cc8f0fc0605459e386f4e4681be42967ca58 | 93f47ba04fc18c4e537f0a48fe6232e2a89a4d30 | /examples/adspygoogle/adwords/v201406/campaign_management/add_experiment.py | 0016c83287c0eebaeadcbcc187dafc3aba38b067 | [
"Apache-2.0"
] | permissive | jasonshih/googleads-python-legacy-lib | c56dc52a1dab28b9de461fd5db0fcd6020b84a04 | 510fad41ecf986fe15258af64b90f99a96dc5548 | refs/heads/master | 2021-04-30T22:12:12.900275 | 2015-03-06T15:35:21 | 2015-03-06T15:35:21 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,712 | py | #!/usr/bin/python
#
# Copyright 2013 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""This example creates an experiment using a query percentage of 10, which
defines what fraction of auctions should go to the control split (90%) vs. the
experiment split (10%), then adds experimental bid changes for an ad group, and
adds an experiment-only keyword. To get campaigns, run get_campaigns.py. To
get ad groups, run get_ad_groups.py. To get keywords, run get_keywords.py.
Tags: ExperimentService.mutate
Api: AdWordsOnly
"""
__author__ = 'api.kwinter@gmail.com (Kevin Winter)'
import datetime
import os
import sys
sys.path.insert(0, os.path.join('..', '..', '..', '..', '..'))
# Import appropriate classes from the client library.
from adspygoogle import AdWordsClient
from adspygoogle.common import Utils
campaign_id = 'INSERT_CAMPAIGN_ID_HERE'
ad_group_id = 'INSERT_AD_GROUP_ID_HERE'
def main(client, campaign_id, ad_group_id):
# Initialize appropriate service.
experiment_service = client.GetExperimentService(version='v201406')
ad_group_service = client.GetAdGroupService(version='v201406')
ad_group_criterion_service = client.GetAdGroupCriterionService(
version='v201406')
# Construct operations and add experiment.
tomorrow = datetime.datetime.now() + datetime.timedelta(1)
thirty_days = datetime.datetime.now() + datetime.timedelta(30)
operations = [{
'operator': 'ADD',
'operand': {
'campaignId': campaign_id,
'name': 'Interplanetary Experiment #%s' % Utils.GetUniqueName(),
'queryPercentage': '10',
'startDateTime': tomorrow.strftime('%Y%m%d %H%M%S'),
# Optional fields.
'status': 'ENABLED',
'endDateTime': thirty_days.strftime('%Y%m%d %H%M%S')
}
}]
result = experiment_service.Mutate(operations)[0]
# Display results.
for experiment in result['value']:
print ('Experiment with name \'%s\' and id \'%s\' was added.'
% (experiment['name'], experiment['id']))
# Construct operations and update ad group.
operations = [{
'operator': 'SET',
'operand': {
'id': ad_group_id,
'experimentData': {
'xsi_type': 'AdGroupExperimentData',
'experimentId': experiment['id'],
'experimentDeltaStatus': 'MODIFIED',
'experimentBidMultipliers': {
'xsi_type': 'ManualCPCAdGroupExperimentBidMultipliers',
'maxCpcMultiplier': {
'multiplier': '0.5'
}
}
}
}
}]
result = ad_group_service.Mutate(operations)[0]
# Display results.
for ad_group in result['value']:
print ('Ad group with name \'%s\' and id \'%s\' was updated in the '
'experiment.' % (ad_group['name'], ad_group['id']))
# Construct operations and add ad group crierion.
operations = [{
'operator': 'ADD',
'operand': {
'xsi_type': 'BiddableAdGroupCriterion',
'adGroupId': ad_group['id'],
'criterion': {
'xsi_type': 'Keyword',
'matchType': 'BROAD',
'text': 'mars cruise'
},
'experimentData': {
'xsi_type': 'BiddableAdGroupCriterionExperimentData',
'experimentId': experiment['id'],
'experimentDeltaStatus': 'EXPERIMENT_ONLY'
}
}
}]
result = ad_group_criterion_service.Mutate(operations)[0]
# Display results.
for criterion in result['value']:
print ('Ad group criterion with ad group id \'%s\' and criterion '
'id \'%s\' was added to the experiment.'
% (criterion['adGroupId'], criterion['criterion']['id']))
print
print ('Usage: %s units, %s operations' % (client.GetUnits(),
client.GetOperations()))
if __name__ == '__main__':
# Initialize client object.
client = AdWordsClient(path=os.path.join('..', '..', '..', '..', '..'))
main(client, campaign_id, ad_group_id)
| [
"msaniscalchi@google.com"
] | msaniscalchi@google.com |
19c43377525c88e3507724542a581786aad55373 | 6188f8ef474da80c9e407e8040de877273f6ce20 | /python_modules/libraries/dagster-fivetran/dagster_fivetran_tests/test_asset_defs.py | b7b03f9b2586e8537bfca4c70fd41ec34123525b | [
"Apache-2.0"
] | permissive | iKintosh/dagster | 99f2a1211de1f3b52f8bcf895dafaf832b999de2 | 932a5ba35263deb7d223750f211c2ddfa71e6f48 | refs/heads/master | 2023-01-24T15:58:28.497042 | 2023-01-20T21:51:35 | 2023-01-20T21:51:35 | 276,410,978 | 1 | 0 | Apache-2.0 | 2020-07-01T15:19:47 | 2020-07-01T15:13:56 | null | UTF-8 | Python | false | false | 5,327 | py | import pytest
import responses
from dagster import AssetKey, DagsterStepOutputNotFoundError
from dagster._legacy import build_assets_job
from dagster_fivetran import fivetran_resource
from dagster_fivetran.asset_defs import build_fivetran_assets
from dagster_fivetran.resources import (
FIVETRAN_API_BASE,
FIVETRAN_API_VERSION_PATH,
FIVETRAN_CONNECTOR_PATH,
)
from .utils import (
DEFAULT_CONNECTOR_ID,
get_sample_connector_response,
get_sample_connector_schema_config,
get_sample_sync_response,
get_sample_update_response,
)
def test_fivetran_asset_keys():
ft_assets = build_fivetran_assets(
connector_id=DEFAULT_CONNECTOR_ID, destination_tables=["x.foo", "y.bar"]
)
assert ft_assets[0].keys == {AssetKey(["x", "foo"]), AssetKey(["y", "bar"])}
@pytest.mark.parametrize(
"group_name,expected_group_name",
[
(None, "default"),
("my_group_name", "my_group_name"),
],
)
def test_fivetran_group_label(group_name, expected_group_name):
ft_assets = build_fivetran_assets(
connector_id=DEFAULT_CONNECTOR_ID,
destination_tables=["x.foo", "y.bar"],
group_name=group_name,
)
group_names = set(ft_assets[0].group_names_by_key.values())
assert len(group_names) == 1
assert list(group_names)[0] == expected_group_name
@pytest.mark.parametrize("schema_prefix", ["", "the_prefix"])
@pytest.mark.parametrize(
"tables,should_error",
[
([], False),
(["schema1.tracked"], False),
(["schema1.tracked", "schema2.tracked"], False),
(["does.not_exist"], True),
(["schema1.tracked", "does.not_exist"], True),
],
)
def test_fivetran_asset_run(tables, should_error, schema_prefix):
ft_resource = fivetran_resource.configured({"api_key": "foo", "api_secret": "bar"})
final_data = {"succeeded_at": "2021-01-01T02:00:00.0Z"}
api_prefix = f"{FIVETRAN_API_BASE}/{FIVETRAN_API_VERSION_PATH}{FIVETRAN_CONNECTOR_PATH}{DEFAULT_CONNECTOR_ID}"
if schema_prefix:
tables = [f"{schema_prefix}_{t}" for t in tables]
fivetran_assets = build_fivetran_assets(
connector_id=DEFAULT_CONNECTOR_ID,
destination_tables=tables,
poll_interval=0.1,
poll_timeout=10,
)
# expect the multi asset to have one asset key and one output for each specified asset key
assert fivetran_assets[0].keys == {AssetKey(table.split(".")) for table in tables}
assert len(fivetran_assets[0].op.output_defs) == len(tables)
fivetran_assets_job = build_assets_job(
name="fivetran_assets_job",
assets=fivetran_assets,
resource_defs={"fivetran": ft_resource},
)
with responses.RequestsMock() as rsps:
rsps.add(rsps.PATCH, api_prefix, json=get_sample_update_response())
rsps.add(rsps.POST, f"{api_prefix}/force", json=get_sample_sync_response())
# connector schema
rsps.add(
rsps.GET,
f"{api_prefix}/schemas",
json=get_sample_connector_schema_config(
tables=[
("schema1", "tracked"),
("schema1", "untracked"),
("schema2", "tracked"),
]
),
)
# initial state
rsps.add(
rsps.GET,
api_prefix,
json=get_sample_connector_response(),
)
final_json = get_sample_connector_response(data=final_data)
if schema_prefix:
final_json["data"]["config"]["schema_prefix"] = schema_prefix
# final state will be updated
rsps.add(rsps.GET, api_prefix, json=final_json)
if should_error:
with pytest.raises(DagsterStepOutputNotFoundError):
fivetran_assets_job.execute_in_process()
else:
result = fivetran_assets_job.execute_in_process()
assert result.success
# make sure we only have outputs for the explicit asset keys
outputs = [
event
for event in result.events_for_node(f"fivetran_sync_{DEFAULT_CONNECTOR_ID}")
if event.event_type_value == "STEP_OUTPUT"
]
assert len(outputs) == len(tables)
# make sure we have asset materializations for all the schemas/tables that were actually sync'd
asset_materializations = [
event
for event in result.events_for_node(f"fivetran_sync_{DEFAULT_CONNECTOR_ID}")
if event.event_type_value == "ASSET_MATERIALIZATION"
]
assert len(asset_materializations) == 3
found_asset_keys = set(
mat.event_specific_data.materialization.asset_key for mat in asset_materializations
)
if schema_prefix:
assert found_asset_keys == {
AssetKey(["the_prefix_schema1", "tracked"]),
AssetKey(["the_prefix_schema1", "untracked"]),
AssetKey(["the_prefix_schema2", "tracked"]),
}
else:
assert found_asset_keys == {
AssetKey(["schema1", "tracked"]),
AssetKey(["schema1", "untracked"]),
AssetKey(["schema2", "tracked"]),
}
| [
"noreply@github.com"
] | iKintosh.noreply@github.com |
67dbe2876b4fdc551b2fc9988b78fa0932c92363 | 8067ca3d96d00080df5d54aa5bc2ec33b7fc3035 | /Hexagonal Grid.py | bae5ce1e331b29f864651624619e26579cdf6bc1 | [
"Apache-2.0"
] | permissive | Ashutosh-gupt/HackerRankAlgorithms | 9620bd12c66a9f26f08949a292b3baad79387227 | 439bf2e31fd395d19d40f79e969153e50e5358b5 | refs/heads/master | 2022-12-24T18:56:53.263797 | 2016-10-05T03:14:07 | 2016-10-05T03:14:07 | 376,810,303 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,112 | py | # -*- coding: utf-8 -*-
"""
You are given a hexagonal grid of size 2xN. Your task is to construct the grid with 2x1 dominoes. The dominoes can be
arranged in any of the three orientations shown below. To add to the woes, certain cells of the hexogonal grid are
blackened i.e., no domino can occupy that cell. Can you construct such a hexagonal grid?
"""
__author__ = 'Danyang'
class Solution(object):
def __init__(self):
self.delta = [(0, 1), (1, 0), (1, -1)] # dominoes delta, coordinate: x downward, y rightward,
# need consistent directions
def solve(self, cipher):
"""
recursive solution, brute force, starting from top left
:param cipher: the cipher
"""
ret = self.rec(cipher)
if ret:
return "YES"
else:
return "NO"
def rec(self, grid):
changed = False
m = len(grid)
n = len(grid[0])
for i in xrange(m):
for j in xrange(n):
if not changed: # control the start from top, left
if grid[i][j] == 0:
changed = True
grid[i][j] = 1
for d in self.delta:
i2 = i + d[0]
j2 = j + d[1]
if 0 <= i2 < m and 0 <= j2 < n and grid[i2][j2] == 0:
grid[i2][j2] = 1
if self.rec(grid):
return True
grid[i2][j2] = 0
grid[i][j] = 0
if not changed:
return True
if __name__ == "__main__":
import sys
f = open("1.in", "r")
# f = sys.stdin
solution = Solution()
testcases = int(f.readline().strip())
for t in xrange(testcases):
# construct cipher
int(f.readline().strip())
cipher = []
for _ in xrange(2):
cipher.append(map(int, list(f.readline().strip())))
# solve
s = "%s\n" % (solution.solve(cipher))
print s,
| [
"zhangdanyangg@gmail.com"
] | zhangdanyangg@gmail.com |
059e84c2f1ff6af24c13aa2c403890209360ddbc | 6fa7f99d3d3d9b177ef01ebf9a9da4982813b7d4 | /WLKF79mxKnhjtrFRB_13.py | 19738ce84620bcbc808363c11292586b2032ab08 | [] | no_license | daniel-reich/ubiquitous-fiesta | 26e80f0082f8589e51d359ce7953117a3da7d38c | 9af2700dbe59284f5697e612491499841a6c126f | refs/heads/master | 2023-04-05T06:40:37.328213 | 2021-04-06T20:17:44 | 2021-04-06T20:17:44 | 355,318,759 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 181 | py |
def is_good_match(lst):
if len(lst) % 2 != 0:
return "bad match"
lst1 = [x for x in lst[0::2]]
lst2 = [x for x in lst[1::2]]
return [x + y for x,y in zip(lst1, lst2)]
| [
"daniel.reich@danielreichs-MacBook-Pro.local"
] | daniel.reich@danielreichs-MacBook-Pro.local |
065966e1fbabe141ee422c8cc29b6acefaf67a49 | 3545c3a5ede04aeb229c3da9792f1430959bbb0e | /BLOGGER/users/forms.py | f0c26fca518965f8952077f9858abb3d3c76ff61 | [] | no_license | Gourav2000/DJ3 | 6050315e4a65501b3f59617ad8bf174fbdaa8074 | bab01fa5fda0f8c274ed9e75d32306ff8d087355 | refs/heads/master | 2022-12-01T02:47:06.222790 | 2020-07-13T09:00:56 | 2020-07-13T09:00:56 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 306 | py | from django import forms
from django.contrib.auth.models import User
from django.contrib.auth.forms import UserCreationForm
class UserRegisterForm(UserCreationForm):
email=forms.EmailField();
class Meta:
model=User
fields=["username","email","password1",'password2'];
| [
"parajbhattacharjee123@gmail.com"
] | parajbhattacharjee123@gmail.com |
fef662ef6fd908b5b68e87d622023d04aa854e13 | 13f55c8fc102c64a8924d83579aeb0bd563daeb9 | /src/aria2p/cli/commands/add_magnet.py | d5e2d71da5711321591a2f8000b059a88e037584 | [
"ISC"
] | permissive | wqcsim/aria2p | e67b8b7d5c5afffc22d3728d3f8c89e5b24bfc29 | 6cdc9a1ef5ed0413fffa3be4885f4b5325177660 | refs/heads/master | 2023-07-26T20:02:05.134854 | 2021-08-26T20:47:20 | 2021-08-26T20:47:20 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,058 | py | """Command to add magnets."""
import sys
from typing import List
from aria2p.api import API
from aria2p.utils import read_lines
def add_magnets(
api: API,
uris: List[str] = None,
from_file: str = None,
options: dict = None,
position: int = None,
) -> int:
"""
Add magnet subcommand.
Arguments:
api: The API instance to use.
uris: The URIs of the magnets.
from_file: Path to the file to read uris from.
options: String of aria2c options to add to download.
position: Position to add new download in the queue.
Returns:
int: Always 0.
"""
ok = True
if not uris:
uris = []
if from_file:
try:
uris.extend(read_lines(from_file))
except OSError:
print(f"Cannot open file: {from_file}", file=sys.stderr)
ok = False
for uri in uris:
new_download = api.add_magnet(uri, options=options, position=position)
print(f"Created download {new_download.gid}")
return 0 if ok else 1
| [
"pawamoy@pm.me"
] | pawamoy@pm.me |
9f8044eff2a26ebffb2ef7e386df954dd5e218e3 | d0a2ff39d48dbcf6b019c7c7530bcda1a398e2be | /python_for_data_analysis/Chapter_2/chapter2.py | a60592d6087c79b204769f0779d3261e7e4605b2 | [] | no_license | foxcodenine/books | 2711fd8be596bc7fcbd4c00d6a0573acb29dee3f | 100624b41484f853ab3e850fb33d99d0dd65d4f3 | refs/heads/master | 2023-03-08T05:54:18.270243 | 2022-06-21T19:22:40 | 2022-06-21T19:22:40 | 205,091,163 | 0 | 0 | null | 2023-03-05T12:57:12 | 2019-08-29T05:57:41 | JavaScript | UTF-8 | Python | false | false | 1,689 | py | # IPYTHON $ JUPYTER NOTEBOOK
# from ipython shell you can do #run to run it:
# %run hello_world.py
# ______________________________________________________________________
# ipython use pretty-print.
from numpy.random import randn
data = {i: randn() for i in range(7)}
# ______________________________________________________________________
# to open jupyter notebook enter j.. n.. while in conda base env.
# $ jupyter notebook
# ______________________________________________________________________
# Tab Completion
_secret_key = 'xjfjhsdbfjvhbsdjbfv'
# _<Tab> <- auto comlete variables
# _secret_ket.<Tab> <- auto comlete function
# path = '/home/foxcodenine/git/'<Tab> <- auto comlete path
# %run ch<Tab> <- combined with %run
# ______________________________________________________________________
# Introspection (using ?)
b = [1, 2, 3]
# b?
# print?
# ------------------------------
def add_numbers(a, b):
'''
Add two numbers together.
Returns
_______
the sum : tye of arguments
'''
return a + b
# add_numbers? <- shows docstring
# add_numbers?? <- shows docstring & source code
# ------------------------------
# also combined with * it will show all names matching the wildcard:
# >> import numpy as np
# >> np.*load*?
# np.__loader__
# np.load
# np.loads
# np.loadtxt
# ______________________________________________________________________
# The %run Command
# %run script_test.py
# %run -i script_test.py <-scripy file can assess variables aleady defined in ipython
print(a) | [
"foxcode9@gmail.com"
] | foxcode9@gmail.com |
d14bab7577070de6f6d2a27551adc98ce5c40c9f | a663eb28d9c5e05d505a94aba511649b68e951cf | /pytype/tests/test_attr.py | f25b8569123f084b7e28653c08ae1bcd5c3a0fc2 | [
"Apache-2.0",
"MIT"
] | permissive | CyberFlameGO/pytype | 603240757291e066aad4259ce0b93221d90a4710 | c8cbeea997634455b5abcb27c76c58aa0dfc25ae | refs/heads/master | 2023-07-02T14:39:55.343774 | 2021-08-06T21:55:02 | 2021-08-06T21:55:02 | 393,544,239 | 0 | 0 | NOASSERTION | 2021-08-07T01:12:39 | 2021-08-07T01:10:49 | null | UTF-8 | Python | false | false | 19,895 | py | # Lint as: python3
"""Tests for attrs library in attr_overlay.py."""
from pytype.tests import test_base
class TestAttrib(test_base.TargetIndependentTest):
"""Tests for attr.ib."""
def test_basic(self):
ty = self.Infer("""
import attr
@attr.s
class Foo(object):
x = attr.ib()
y = attr.ib(type=int)
z = attr.ib(type=str)
""")
self.assertTypesMatchPytd(ty, """
from typing import Any
attr: module
@attr.s
class Foo(object):
x: Any
y: int
z: str
def __init__(self, x, y: int, z: str) -> None: ...
""")
def test_interpreter_class(self):
ty = self.Infer("""
import attr
class A(object): pass
@attr.s
class Foo(object):
x = attr.ib(type=A)
""")
self.assertTypesMatchPytd(ty, """
attr: module
class A(object): ...
@attr.s
class Foo(object):
x: A
def __init__(self, x: A) -> None: ...
""")
def test_typing(self):
ty = self.Infer("""
from typing import List
import attr
@attr.s
class Foo(object):
x = attr.ib(type=List[int])
""")
self.assertTypesMatchPytd(ty, """
from typing import List
attr: module
@attr.s
class Foo(object):
x: List[int]
def __init__(self, x: List[int]) -> None: ...
""")
def test_union_types(self):
ty = self.Infer("""
from typing import Union
import attr
@attr.s
class Foo(object):
x = attr.ib(type=Union[str, int])
""")
self.assertTypesMatchPytd(ty, """
from typing import Union
attr: module
@attr.s
class Foo(object):
x: Union[str, int]
def __init__(self, x: Union[str, int]) -> None: ...
""")
def test_comment_annotations(self):
ty = self.Infer("""
from typing import Union
import attr
@attr.s
class Foo(object):
x = attr.ib() # type: Union[str, int]
y = attr.ib(type=str)
""")
self.assertTypesMatchPytd(ty, """
from typing import Union
attr: module
@attr.s
class Foo(object):
x: Union[str, int]
y: str
def __init__(self, x: Union[str, int], y: str) -> None: ...
""")
def test_late_annotations(self):
ty = self.Infer("""
import attr
@attr.s
class Foo(object):
x = attr.ib() # type: 'Foo'
y = attr.ib() # type: str
""")
self.assertTypesMatchPytd(ty, """
attr: module
@attr.s
class Foo(object):
x: Foo
y: str
def __init__(self, x: Foo, y: str) -> None: ...
""")
def test_late_annotation_in_type(self):
ty = self.Infer("""
import attr
@attr.s
class Foo(object):
x = attr.ib(type='Foo')
""")
self.assertTypesMatchPytd(ty, """
attr: module
@attr.s
class Foo(object):
x: Foo
def __init__(self, x: Foo) -> None: ...
""")
def test_classvar(self):
ty = self.Infer("""
import attr
@attr.s
class Foo(object):
x = attr.ib() # type: int
y = attr.ib(type=str)
z = 1 # class var, should not be in __init__
""")
self.assertTypesMatchPytd(ty, """
attr: module
@attr.s
class Foo(object):
x: int
y: str
z: int
def __init__(self, x: int, y: str) -> None: ...
""")
def test_type_clash(self):
self.CheckWithErrors("""
import attr
@attr.s
class Foo(object): # invalid-annotation
x = attr.ib(type=str) # type: int
y = attr.ib(type=str, default="") # type: int
Foo(x="") # should not report an error
""")
def test_bad_type(self):
self.CheckWithErrors("""
import attr
@attr.s
class Foo(object):
x = attr.ib(type=10) # invalid-annotation
""")
def test_name_mangling(self):
# NOTE: Python itself mangles names starting with two underscores.
ty = self.Infer("""
import attr
@attr.s
class Foo(object):
_x = attr.ib(type=int)
__y = attr.ib(type=int)
___z = attr.ib(type=int)
""")
self.assertTypesMatchPytd(ty, """
attr: module
@attr.s
class Foo(object):
_x: int
_Foo__y: int
_Foo___z: int
def __init__(self, x: int, Foo__y: int, Foo___z: int) -> None: ...
""")
def test_defaults(self):
ty, err = self.InferWithErrors("""
import attr
@attr.s
class Foo(object):
x = attr.ib(default=42)
y = attr.ib(type=int, default=6)
z = attr.ib(type=str, default=28) # annotation-type-mismatch[e]
a = attr.ib(type=str, default=None)
""")
self.assertTypesMatchPytd(ty, """
attr: module
@attr.s
class Foo(object):
x: int
y: int
z: str
a: str
def __init__(self, x: int = ..., y: int = ..., z: str = ...,
a: str = ...) -> None: ...
""")
self.assertErrorRegexes(err, {"e": "annotation for z"})
def test_defaults_with_typecomment(self):
# Typecomments should override the type of default
ty, err = self.InferWithErrors("""
import attr
@attr.s
class Foo(object):
x = attr.ib(default=42) # type: int
y = attr.ib(default=42) # type: str # annotation-type-mismatch[e]
""")
self.assertTypesMatchPytd(ty, """
attr: module
@attr.s
class Foo(object):
x: int
y: str
def __init__(self, x: int = ..., y: str = ...) -> None: ...
""")
self.assertErrorRegexes(err, {"e": "annotation for y"})
def test_factory_class(self):
ty = self.Infer("""
import attr
class CustomClass(object):
pass
@attr.s
class Foo(object):
x = attr.ib(factory=list)
y = attr.ib(factory=CustomClass)
""")
self.assertTypesMatchPytd(ty, """
from typing import List
attr: module
class CustomClass(object): ...
@attr.s
class Foo(object):
x: list
y: CustomClass
def __init__(self, x: list = ..., y: CustomClass = ...) -> None: ...
""")
def test_factory_function(self):
ty = self.Infer("""
import attr
class CustomClass(object):
pass
def unannotated_func():
return CustomClass()
@attr.s
class Foo(object):
x = attr.ib(factory=locals)
y = attr.ib(factory=unannotated_func)
""")
self.assertTypesMatchPytd(ty, """
from typing import Any, Dict
attr: module
class CustomClass(object): ...
def unannotated_func() -> CustomClass: ...
@attr.s
class Foo(object):
x: Dict[str, Any]
y: Any # b/64832148: the return type isn't inferred early enough
def __init__(self, x: Dict[str, object] = ..., y = ...) -> None: ...
""")
def test_verbose_factory(self):
ty = self.Infer("""
import attr
@attr.s
class Foo(object):
x = attr.ib(default=attr.Factory(list))
""")
self.assertTypesMatchPytd(ty, """
from typing import List
attr: module
@attr.s
class Foo(object):
x: list
def __init__(self, x: list = ...) -> None: ...
""")
def test_bad_factory(self):
errors = self.CheckWithErrors("""
import attr
@attr.s
class Foo(object):
x = attr.ib(default=attr.Factory(42)) # wrong-arg-types[e1]
y = attr.ib(factory=42) # wrong-arg-types[e2]
""")
self.assertErrorRegexes(errors, {"e1": r"Callable.*int",
"e2": r"Callable.*int"})
def test_default_factory_clash(self):
errors = self.CheckWithErrors("""
import attr
@attr.s
class Foo(object):
x = attr.ib(default=None, factory=list) # duplicate-keyword-argument[e]
""")
self.assertErrorRegexes(errors, {"e": r"default"})
def test_takes_self(self):
ty = self.Infer("""
import attr
@attr.s
class Foo(object):
x = attr.ib(default=attr.Factory(len, takes_self=True))
""")
self.assertTypesMatchPytd(ty, """
attr: module
@attr.s
class Foo(object):
x: int
def __init__(self, x: int = ...) -> None: ...
""")
def test_default_none(self):
ty = self.Infer("""
import attr
@attr.s
class Foo(object):
x = attr.ib(default=None)
""")
self.assertTypesMatchPytd(ty, """
from typing import Any
attr: module
@attr.s
class Foo(object):
x: Any
def __init__(self, x: Any = ...) -> None: ...
""")
def test_annotation_type(self):
ty = self.Infer("""
from typing import List
import attr
@attr.s
class Foo(object):
x = attr.ib(type=List)
x = Foo([]).x
""")
self.assertTypesMatchPytd(ty, """
attr: module
@attr.s
class Foo(object):
x: list
def __init__(self, x: list) -> None: ...
x: list
""")
def test_instantiation(self):
self.Check("""
import attr
class A(object):
def __init__(self):
self.w = None
@attr.s
class Foo(object):
x = attr.ib(type=A)
y = attr.ib() # type: A
z = attr.ib(factory=A)
foo = Foo(A(), A())
foo.x.w
foo.y.w
foo.z.w
""")
def test_init(self):
self.Check("""
import attr
@attr.s
class Foo(object):
x = attr.ib(init=False, default='') # type: str
y = attr.ib() # type: int
foo = Foo(42)
foo.x
foo.y
""")
def test_init_type(self):
ty = self.Infer("""
import attr
@attr.s
class Foo(object):
x = attr.ib(init=False, default='') # type: str
y = attr.ib() # type: int
""")
self.assertTypesMatchPytd(ty, """
attr: module
@attr.s
class Foo(object):
x: str
y: int
def __init__(self, y: int) -> None: ...
""")
def test_init_bad_constant(self):
err = self.CheckWithErrors("""
import attr
@attr.s
class Foo(object):
x = attr.ib(init=0) # wrong-arg-types[e]
""")
self.assertErrorRegexes(err, {"e": r"bool.*int"})
def test_init_bad_kwarg(self):
self.CheckWithErrors("""
import attr
@attr.s
class Foo:
x = attr.ib(init=__random__) # type: str # not-supported-yet
""")
def test_class(self):
self.assertNoCrash(self.Check, """
import attr
class X(attr.make_class('X', {'y': attr.ib(default=None)})):
pass
""")
def test_base_class_attrs(self):
self.Check("""
import attr
@attr.s
class A(object):
a = attr.ib() # type: int
@attr.s
class B(object):
b = attr.ib() # type: str
@attr.s
class C(A, B):
c = attr.ib() # type: int
x = C(10, 'foo', 42)
x.a
x.b
x.c
""")
def test_base_class_attrs_type(self):
ty = self.Infer("""
import attr
@attr.s
class A(object):
a = attr.ib() # type: int
@attr.s
class B(object):
b = attr.ib() # type: str
@attr.s
class C(A, B):
c = attr.ib() # type: int
""")
self.assertTypesMatchPytd(ty, """
attr: module
@attr.s
class A(object):
a: int
def __init__(self, a: int) -> None: ...
@attr.s
class B(object):
b: str
def __init__(self, b: str) -> None: ...
@attr.s
class C(A, B):
c: int
def __init__(self, a: int, b: str, c: int) -> None: ...
""")
def test_base_class_attrs_override_type(self):
ty = self.Infer("""
import attr
@attr.s
class A(object):
a = attr.ib() # type: int
@attr.s
class B(object):
b = attr.ib() # type: str
@attr.s
class C(A, B):
a = attr.ib() # type: str
c = attr.ib() # type: int
""")
self.assertTypesMatchPytd(ty, """
attr: module
@attr.s
class A(object):
a: int
def __init__(self, a: int) -> None: ...
@attr.s
class B(object):
b: str
def __init__(self, b: str) -> None: ...
@attr.s
class C(A, B):
a: str
c: int
def __init__(self, b: str, a: str, c: int) -> None: ...
""")
def test_base_class_attrs_init(self):
ty = self.Infer("""
import attr
@attr.s
class A(object):
a = attr.ib(init=False) # type: int
@attr.s
class B(object):
b = attr.ib() # type: str
@attr.s
class C(A, B):
c = attr.ib() # type: int
""")
self.assertTypesMatchPytd(ty, """
attr: module
@attr.s
class A(object):
a: int
def __init__(self) -> None: ...
@attr.s
class B(object):
b: str
def __init__(self, b: str) -> None: ...
@attr.s
class C(A, B):
c: int
def __init__(self, b: str, c: int) -> None: ...
""")
def test_base_class_attrs_abstract_type(self):
ty = self.Infer("""
import attr
@attr.s
class Foo(__any_object__):
a = attr.ib() # type: int
""")
self.assertTypesMatchPytd(ty, """
from typing import Any
attr: module
@attr.s
class Foo(Any):
a: int
def __init__(self, a: int) -> None: ...
""")
def test_method_decorators(self):
# Test for:
# - validator decorator does not throw an error
# - default decorator sets type if it isn't set
# - default decorator does not override type
ty, err = self.InferWithErrors("""
import attr
@attr.s
class Foo(object):
a = attr.ib()
b = attr.ib()
c = attr.ib(type=str) # annotation-type-mismatch[e]
@a.validator
def validate(self, attribute, value):
pass
@a.default
def default_a(self):
# type: (...) -> int
return 10
@b.default
def default_b(self):
return 10
@c.default
def default_c(self):
# type: (...) -> int
return 10
""")
self.assertTypesMatchPytd(ty, """
from typing import Any
attr: module
@attr.s
class Foo(object):
a: int
b: int
c: str
def __init__(self, a: int = ..., b: int = ..., c: str = ...) -> None: ...
def default_a(self) -> int: ...
def default_b(self) -> int: ...
def default_c(self) -> int: ...
def validate(self, attribute, value) -> None: ...
""")
self.assertErrorRegexes(err, {"e": "annotation for c"})
def test_default_decorator_using_self(self):
# default_b refers to self.a; the method itself will be annotated with the
# correct type, but since this happens after the attribute defaults have
# been processed, b will have an inferred default types of `Any` rather than
# `int`.
#
# default_c refers to self.b, which has been inferred as `Any`, so default_c
# gets a type of `-> Any`, but since the type annotation for c is more
# specific it overrides that.
ty = self.Infer("""
import attr
@attr.s
class Foo(object):
a = attr.ib(default=42)
b = attr.ib()
c = attr.ib(type=str)
@b.default
def default_b(self):
return self.a
@c.default
def default_c(self):
return self.b
""")
self.assertTypesMatchPytd(ty, """
from typing import Any
attr: module
@attr.s
class Foo(object):
a: int
b: Any
c: str
def __init__(self, a: int = ..., b = ..., c: str = ...) -> None: ...
def default_b(self) -> int: ...
def default_c(self) -> Any: ...
""")
def test_repeated_default(self):
# Regression test for a bug where `params` and `calls` shared an underlying
# list object, so modifying one affected the type of the other.
self.Check("""
import attr
class Call(object):
pass
@attr.s
class Function(object):
params = attr.ib(factory=list)
calls = attr.ib(factory=list)
class FunctionMap(object):
def __init__(self, index):
self.fmap = {"": Function()}
def print_params(self):
for param in self.fmap[""].params:
print(param.name)
def add_call(self, call):
self.fmap[""].calls.append(Call())
""")
def test_empty_factory(self):
ty = self.Infer("""
import attr
FACTORIES = []
@attr.s
class Foo:
x = attr.ib(factory=FACTORIES[0])
Foo(x=0) # should not be an error
""")
self.assertTypesMatchPytd(ty, """
from typing import Any, List
attr: module
FACTORIES: List[nothing]
@attr.s
class Foo:
x: Any
def __init__(self, x = ...) -> None: ...
""")
def test_empty_tuple_default(self):
ty = self.Infer("""
import attr
@attr.s
class Foo:
x = attr.ib(default=())
""")
self.assertTypesMatchPytd(ty, """
attr: module
@attr.s
class Foo:
x: tuple
def __init__(self, x: tuple = ...) -> None: ...
""")
def test_long_alias(self):
# Tests an [annotation-type-mismatch] bug that appears when the
# "serious-business alias" for attr.ib is used.
self.Check("""
import attr
@attr.s
class Foo:
x= attr.attrib(default=0) # type: int
""")
def test_typevar_in_type_arg(self):
self.Check("""
import attr
from typing import Callable, TypeVar
T = TypeVar('T')
@attr.s
class Foo:
f = attr.ib(type=Callable[[T], T])
assert_type(Foo(__any_object__).f(0), int)
""")
def test_bad_typevar_in_type_arg(self):
self.CheckWithErrors("""
import attr
from typing import TypeVar
T = TypeVar('T')
@attr.s
class Foo:
x = attr.ib(type=T) # invalid-annotation
""")
class TestAttrs(test_base.TargetIndependentTest):
"""Tests for attr.s."""
def test_basic(self):
ty = self.Infer("""
import attr
@attr.s()
class Foo(object):
x = attr.ib()
y = attr.ib(type=int)
z = attr.ib(type=str)
""")
self.assertTypesMatchPytd(ty, """
from typing import Any
attr: module
@attr.s
class Foo(object):
x: Any
y: int
z: str
def __init__(self, x, y: int, z: str) -> None: ...
""")
def test_no_init(self):
ty = self.Infer("""
import attr
@attr.s(init=False)
class Foo(object):
x = attr.ib()
y = attr.ib(type=int)
z = attr.ib(type=str)
""")
self.assertTypesMatchPytd(ty, """
from typing import Any
attr: module
@attr.s
class Foo(object):
x: Any
y: int
z: str
""")
def test_init_bad_constant(self):
err = self.CheckWithErrors("""
import attr
@attr.s(init=0) # wrong-arg-types[e]
class Foo:
pass
""")
self.assertErrorRegexes(err, {"e": r"bool.*int"})
def test_bad_kwarg(self):
self.CheckWithErrors("""
import attr
@attr.s(init=__random__) # not-supported-yet
class Foo:
pass
""")
def test_depth(self):
self.Check("""
import attr
def f():
@attr.s
class Foo:
pass
""", maximum_depth=1)
def test_signature(self):
self.Check("""
import attr
@attr.s()
class A:
id = attr.ib(
default='', converter=str,
on_setattr=attr.setters.convert)
""")
test_base.main(globals(), __name__ == "__main__")
| [
"rechen@google.com"
] | rechen@google.com |
d3911e4c9f0cb924b32844dc531ca096d2def61c | 06c2bc496f9e285f06e4c3c71f14d5716f411d89 | /source/webapp/migrations/0007_auto_20210504_1243.py | 10b149d9e4237d16899b45386bb911ca394c7078 | [] | no_license | Beknasar/Coin_collection | 37a9e77cc00270dfcb9d0cb5916f985cec4c591d | 091860f98e7dc81d460ab0cbcb6ca1d7fdeffda8 | refs/heads/master | 2023-06-09T16:25:30.473134 | 2021-06-25T09:31:13 | 2021-06-25T09:31:13 | 365,229,399 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 700 | py | # Generated by Django 2.2 on 2021-05-04 12:43
import django.core.validators
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('webapp', '0006_auto_20210503_1327'),
]
operations = [
migrations.AlterField(
model_name='coin',
name='size',
field=models.FloatField(validators=[django.core.validators.MinValueValidator(0)], verbose_name='Размер'),
),
migrations.AlterField(
model_name='coin',
name='weight',
field=models.FloatField(validators=[django.core.validators.MinValueValidator(0)], verbose_name='Вес'),
),
]
| [
"680633@gmail.com"
] | 680633@gmail.com |
2726900ca710ad9b236b6180dcd2909b84e4d9e7 | 2b2af3a4924f74d0be10370f25121c015f37aba0 | /EVLA_pipe_statwt.py | 0def7573ebcbc6c607a05ab4529f67a8883704f1 | [] | no_license | tomr-stargazer/VLA_pipeline_custom | 98ba3cec311ccc8fa37e0d3424a3c97da1816669 | cf5720588ded8c7dd88cf5ecda4df82824183078 | refs/heads/master | 2020-04-02T08:01:25.574938 | 2018-10-22T23:54:22 | 2018-10-22T23:54:22 | 154,225,698 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,334 | py | ######################################################################
#
# Copyright (C) 2013
# Associated Universities, Inc. Washington DC, USA,
#
# This library is free software; you can redistribute it and/or modify it
# under the terms of the GNU Library General Public License as published by
# the Free Software Foundation; either version 2 of the License, or (at your
# option) any later version.
#
# This library is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
# FITNESS FOR A PARTICULAR PURPOSE. See the GNU Library General Public
# License for more details.
#
# You should have received a copy of the GNU Library General Public License
# along with this library; if not, write to the Free Software Foundation,
# Inc., 675 Massachusetts Ave, Cambridge, MA 02139, USA.
#
# Correspondence concerning VLA Pipelines should be addressed as follows:
# Please register and submit helpdesk tickets via: https://help.nrao.edu
# Postal address:
# National Radio Astronomy Observatory
# VLA Pipeline Support Office
# PO Box O
# Socorro, NM, USA
#
######################################################################
# CALCULATE DATA WEIGHTS BASED ON ST. DEV. WITHIN EACH SPW
# use statwt
logprint ("Starting EVLA_pipe_statwt.py", logfileout='logs/statwt.log')
time_list=runtiming('checkflag', 'start')
QA2_statwt='Pass'
logprint ("Calculate data weights per spw using statwt", logfileout='logs/statwt.log')
# Run on all calibrators
default(statwt)
vis=ms_active
dorms=False
fitspw=''
fitcorr=''
combine=''
minsamp=2
field=''
spw='7~38'
intent='*CALIBRATE*'
datacolumn='corrected'
statwt()
# Run on all targets
# set spw to exclude strong science spectral lines
default(statwt)
vis=ms_active
dorms=False
fitspw=''
fitcorr=''
combine=''
minsamp=2
field=''
spw='7~38'
intent='*TARGET*'
datacolumn='corrected'
statwt()
# Until we understand better the failure modes of this task, leave QA2
# score set to "Pass".
logprint ("QA2 score: "+QA2_statwt, logfileout='logs/statwt.log')
logprint ("Finished EVLA_pipe_statwt.py", logfileout='logs/statwt.log')
time_list=runtiming('targetflag', 'end')
pipeline_save()
######################################################################
| [
"t.rice90@gmail.com"
] | t.rice90@gmail.com |
5f80c5d519dbfcbcd70511459f0737348f4fd5b2 | 1566f14c336e67c77001b620df55f68f14b4e2c5 | /tests/bench.py | aeb901e3a963ea48ce486f3ec163f0a3bbe2d442 | [
"BSD-3-Clause"
] | permissive | wolfmetr/django-cacheops | 9645088c2f20f12aad955fc5ec7aaa2742ab4f41 | ce56df88f341c3a4c22a58d0cd0557e92838d89a | refs/heads/master | 2021-01-18T08:46:22.312893 | 2013-12-03T12:58:06 | 2013-12-03T12:58:06 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,379 | py | from cacheops import invalidate_obj
from cacheops.conf import redis_client
from .models import Category, Post
count_key = Category.objects.all()._cache_key(extra='count')
def invalidate_count():
redis_client.delete(count_key)
def do_count():
Category.objects.cache().count()
def do_count_no_cache():
Category.objects.nocache().count()
fetch_key = Category.objects.all()._cache_key()
def invalidate_fetch():
redis_client.delete(fetch_key)
def do_fetch():
list(Category.objects.cache().all())
def do_fetch_no_cache():
list(Category.objects.nocache().all())
def do_fetch_construct():
Category.objects.all()
def prepare_obj():
return Category.objects.cache().get(pk=1)
def do_invalidate_obj(obj):
invalidate_obj(obj)
def do_save_obj(obj):
obj.save()
TESTS = [
('count_no_cache', {'run': do_count_no_cache}),
('count_hit', {'prepare_once': do_count, 'run': do_count}),
('count_miss', {'prepare': invalidate_count, 'run': do_count}),
('fetch_construct', {'run': do_fetch_construct}),
('fetch_no_cache', {'run': do_fetch_no_cache}),
('fetch_hit', {'prepare_once': do_fetch, 'run': do_fetch}),
('fetch_miss', {'prepare': invalidate_fetch, 'run': do_fetch}),
('invalidate_obj', {'prepare': prepare_obj, 'run': do_invalidate_obj}),
('save_obj', {'prepare': prepare_obj, 'run': do_save_obj}),
]
| [
"suor.web@gmail.com"
] | suor.web@gmail.com |
065fe50031eb0f2ee2bd0f6c17af9744ac523539 | f3b233e5053e28fa95c549017bd75a30456eb50c | /ptp1b_input/L82/82-bs_wat_20Abox/set_1ns_equi.py | 28657cb7707bdfb592da52d7aa486e0d2ea88321 | [] | no_license | AnguseZhang/Input_TI | ddf2ed40ff1c0aa24eea3275b83d4d405b50b820 | 50ada0833890be9e261c967d00948f998313cb60 | refs/heads/master | 2021-05-25T15:02:38.858785 | 2020-02-18T16:57:04 | 2020-02-18T16:57:04 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 916 | py | import os
dir = '/mnt/scratch/songlin3/run/ptp1b/L82/wat_20Abox/ti_one-step/82_bs/'
filesdir = dir + 'files/'
temp_equiin = filesdir + 'temp_equi.in'
temp_pbs = filesdir + 'temp_1ns_equi.pbs'
lambd = [ 0.00922, 0.04794, 0.11505, 0.20634, 0.31608, 0.43738, 0.56262, 0.68392, 0.79366, 0.88495, 0.95206, 0.99078]
for j in lambd:
os.system("rm -r %6.5f" %(j))
os.system("mkdir %6.5f" %(j))
os.chdir("%6.5f" %(j))
os.system("rm *")
workdir = dir + "%6.5f" %(j) + '/'
#equiin
eqin = workdir + "%6.5f_equi.in" %(j)
os.system("cp %s %s" %(temp_equiin, eqin))
os.system("sed -i 's/XXX/%6.5f/g' %s" %(j, eqin))
#PBS
pbs = workdir + "%6.5f_1ns_equi.pbs" %(j)
os.system("cp %s %s" %(temp_pbs, pbs))
os.system("sed -i 's/XXX/%6.5f/g' %s" %(j, pbs))
#top
os.system("cp ../82-bs_merged.prmtop .")
os.system("cp ../0.5_equi_0.rst .")
#submit pbs
os.system("qsub %s" %(pbs))
os.chdir(dir)
| [
"songlin3@msu.edu"
] | songlin3@msu.edu |
f41f0aeac0150a5016682fef4713dad35084986b | 0b40458397355319e74f421b5903b6bdbdb5ee9c | /accounts/migrations/0007_remove_userprofile_friends.py | 1acf280c35ce724833ff78fa83c547be53db7390 | [] | no_license | OllyDorvelus/visumic | bae61c7768ed1fa0b76134dbd715e2f1ece3143d | 884a7c89bd562ef7e2e33a01a3239a48b038ac40 | refs/heads/master | 2022-12-09T10:02:06.921966 | 2018-03-01T03:12:43 | 2018-03-01T03:12:43 | 122,236,678 | 1 | 0 | null | 2022-12-08T00:39:38 | 2018-02-20T18:17:26 | HTML | UTF-8 | Python | false | false | 398 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.11 on 2017-04-29 23:16
from __future__ import unicode_literals
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('accounts', '0006_auto_20170401_2124'),
]
operations = [
migrations.RemoveField(
model_name='userprofile',
name='friends',
),
]
| [
"ollydorvelus@gmail.com"
] | ollydorvelus@gmail.com |
968ecbcdd00c75509c462e5effc5495acb927ec4 | e41b0bb4f8f835082f8c559101b94dc5f64976ae | /exp/exp34.py | ce1607b45bd2258286f1bebbbc233ab9b7fc5b4e | [] | no_license | voytekresearch/pacological | 5e6f5aba0ede883594863a56b4702f907d458a90 | 306f953f456e87298322065308ad4e2fbbe6d7f7 | refs/heads/master | 2021-01-21T15:34:35.003245 | 2018-12-03T01:00:26 | 2018-12-03T01:00:26 | 37,220,244 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,989 | py | #! /usr/bin/env python
# -*- coding: utf-8 -*-
"""PAC as selective amplification and information transmission."""
import numpy as np
import matplotlib.pyplot as plt; plt.ion()
if __name__ == "__main__":
from pacological.exp.exp6 import run
import sys
import pandas as pd
import os
from itertools import product
from collections import defaultdict
path = sys.argv[1]
# -- USER SETTINGS --------------------------------------------------------
n = 1000
t = 5
dt = 0.001
f = 10
Sstim = .05
# This ratio of k to excitability gives mean rates
# equivilant to Poisson
k_base = 1
excitability_base = 0.0001
bin_multipliers = range(2, 32, 2)
# Drives and iteration counter
Ioscs = [5, 30]
Istims = [5, 30]
iterations = range(200)
params = product(Ioscs, Istims, bin_multipliers)
for Iosc, Istim, b_mult in params:
# Create basename for the data
basename = "Iosc-{0}_Istim-{1}_k{2}".format(
Iosc, Istim, b_mult * k_base)
print(basename)
basepath = os.path.join(path, basename)
# Tmp dicts for each param set
d_H = defaultdict(list)
d_MI = defaultdict(list)
d_PAC = defaultdict(list)
d_rate = defaultdict(list)
# -- Run
k = k_base * b_mult
excitability = excitability_base / b_mult
for i in iterations:
print(i)
res = run(n, t, Iosc, f, Istim, Sstim * Istim, dt, k, excitability)
# Process the result
hys = {}
for b in res['H'].keys():
hys[b] = res['H'][b]['HY']
for b in hys.keys():
d_H[b].append(hys[b])
for b in res['MI'].keys():
d_MI[b].append(res['MI'][b])
for b in res['PAC'].keys():
d_PAC[b].append(res['PAC'][b])
for b in res['spikes'].keys():
mrate = np.mean(res['spikes'][b].sum(0) / float(t))
d_rate[b].append(mrate)
# -- Save
# H
df_H = pd.DataFrame(d_H)
df_H.to_csv(basepath + "_H.csv", index=False)
sum_H = df_H.describe(percentiles=[.05, .25, .75, .95]).T
sum_H.to_csv(basepath + "_H_summary.csv")
# MI
df_MI = pd.DataFrame(d_MI)
df_MI.to_csv(basepath + "_MI.csv", index=False)
sum_MI = df_MI.describe(percentiles=[.05, .25, .75, .95]).T
sum_MI.to_csv(basepath + "_MI_summary.csv")
# PAC
df_PAC = pd.DataFrame(d_PAC)
df_PAC.to_csv(basepath + "_PAC.csv", index=False)
sum_PAC = df_PAC.describe(percentiles=[.05, .25, .75, .95]).T
sum_PAC.to_csv(basepath + "_PAC_summary.csv")
# rate
df_rate = pd.DataFrame(d_rate)
df_rate.to_csv(basepath + "_rate.csv", index=False)
sum_rate = df_rate.describe(percentiles=[.05, .25, .75, .95]).T
sum_rate.to_csv(basepath + "_rate_summary.csv")
| [
"Erik.Exists@gmail.com"
] | Erik.Exists@gmail.com |
c30f6710203806ad57e0d9cfcaad2b3e8c7ed1fb | 2990b0841b63f300a722107933c01c7237a7976b | /all_xuef/程序员练级+Never/xuef code/xuef_code_python/python_cookbook_code/1. 数据结构和算法/1.6 字典中的键映射多个值.py | 735e481d02cb5f19c48205d6c10befff0a5121b4 | [] | no_license | xuefengCrown/Files_01_xuef | 8ede04751689e0495e3691fc5d8682da4d382b4d | 677329b0189149cb07e7ba934612ad2b3e38ae35 | refs/heads/master | 2021-05-15T04:34:49.936001 | 2019-01-23T11:50:54 | 2019-01-23T11:50:54 | 118,802,861 | 1 | 1 | null | null | null | null | UTF-8 | Python | false | false | 1,196 | py | """
问题
怎样实现一个键对应多个值的字典(也叫 multidict)?
解决方案
一个字典就是一个键对应一个单值的映射。
如果你想要一个键映射多个值,那么你就需要将这多个值放到另外的容器中, 比如列表或者集合里面。
比如,你可以像下面这样构造这样的字典:
d = {
'a' : [1, 2, 3],
'b' : [4, 5]
}
e = {
'a' : {1, 2, 3},
'b' : {4, 5}
}
"""
"""
选择使用列表还是集合取决于你的实际需求。
如果你想保持元素的插入顺序就应该使用列表,
如果想去掉重复元素就使用集合(并且不关心元素的顺序问题)。
"""
"""
你可以很方便的使用 collections 模块中的 defaultdict 来构造这样的字典。
defaultdict 的一个特征是它会自动初始化每个 key 刚开始对应的值,
所以你只需要关注添加元素操作了。比如:
"""
from collections import defaultdict
d = defaultdict(list) # list 表示值类型
d['a'].append(1)
d['a'].append(2)
d['b'].append(4)
d = defaultdict(set)
d['a'].add(1)
d['a'].add(2)
d['b'].add(4)
##d = defaultdict(list)
##for key, value in pairs:
## d[key].append(value)
| [
"643472092@qq.com"
] | 643472092@qq.com |
8e000e5070188bf89462da25f306d558f36ec373 | 4fca17a3dbc3e74ba7e46bd7869eb6d138e4c422 | /_1551_Minimum_Operations_to_Make_Array_Equal.py | c1ce0ef2b3b5d7928801d2d186042277770336b7 | [] | no_license | mingweihe/leetcode | a2cfee0e004627b817a3c0321bb9c74128f8c1a7 | edff905f63ab95cdd40447b27a9c449c9cefec37 | refs/heads/master | 2021-06-19T07:46:46.897952 | 2021-05-02T05:13:17 | 2021-05-02T05:13:17 | 205,740,338 | 3 | 0 | null | null | null | null | UTF-8 | Python | false | false | 281 | py | class Solution(object):
def minOperations(self, n):
"""
:type n: int
:rtype: int
"""
last = 2 * (n - 1) + 1
avg = (last + 1) / 2
res = 0
for i in xrange(n/2):
res += avg - (i*2 + 1)
return res
| [
"hemingwei2017@gmail.com"
] | hemingwei2017@gmail.com |
031545508eb0cde90949a94355460d23108404d2 | 4a1b61cf551db7843050cc7080cec6fd60c4f8cc | /2020/백준문제/Other/1018_체스판 다시 칠하기.py | 15e56f6ea95ea6eb5167092167e29d02dcf8c5b5 | [] | no_license | phoenix9373/Algorithm | 4551692027ca60e714437fd3b0c86462f635d8ff | c66fd70e14bb8357318e8b8f386d2e968f0c4d98 | refs/heads/master | 2023-08-24T10:01:20.798430 | 2021-10-15T07:57:36 | 2021-10-15T07:57:36 | 288,092,774 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,008 | py | n, m = map(int, input().split()) # m*n
arr = [list(input()) for i in range(n)]
# 왼쪽 맨 위가 W일때, B일때를 나누어 구함.
# 최대 row = (n - 8) index까지 가능.
max_r = n - 8 # 2
max_c = m - 8
s = ['W', 'B']
count = []
for char in s:
for r in range(0, max_r + 1):
for c in range(0, max_c + 1):
test = [row[c:c+8] for row in arr[r:r+8]]
cnt = 0
for i in range(8):
for j in range(8):
if r % 2 == c % 2:
if i % 2 == j % 2 and test[i][j] != char:
cnt += 1
elif i % 2 != j % 2 and test[i][j] == char:
cnt += 1
elif r % 2 != c % 2:
if i % 2 == j % 2 and test[i][j] == char:
cnt += 1
elif i % 2 != j % 2 and test[i][j] != char:
cnt += 1
count.append(cnt)
print(min(count)) | [
"phoenix9373@naver.com"
] | phoenix9373@naver.com |
5662acc9dfbb5eccbc2f255113862a6a02a9523d | bb150497a05203a718fb3630941231be9e3b6a32 | /framework/e2e/jit/test_Pad2D_25.py | 43af560c2452752f47a1cc5b0ee2f2d14862faf8 | [] | no_license | PaddlePaddle/PaddleTest | 4fb3dec677f0f13f7f1003fd30df748bf0b5940d | bd3790ce72a2a26611b5eda3901651b5a809348f | refs/heads/develop | 2023-09-06T04:23:39.181903 | 2023-09-04T11:17:50 | 2023-09-04T11:17:50 | 383,138,186 | 42 | 312 | null | 2023-09-13T11:13:35 | 2021-07-05T12:44:59 | Python | UTF-8 | Python | false | false | 608 | py | #!/bin/env python
# -*- coding: utf-8 -*-
# encoding=utf-8 vi:ts=4:sw=4:expandtab:ft=python
"""
test jit cases
"""
import os
import sys
sys.path.append(os.path.abspath(os.path.dirname(os.getcwd())))
sys.path.append(os.path.join(os.path.abspath(os.path.dirname(os.getcwd())), "utils"))
from utils.yaml_loader import YamlLoader
from jittrans import JitTrans
yaml_path = os.path.join(os.path.abspath(os.path.dirname(os.getcwd())), "yaml", "nn.yml")
yml = YamlLoader(yaml_path)
def test_Pad2D_25():
"""test Pad2D_25"""
jit_case = JitTrans(case=yml.get_case_info("Pad2D_25"))
jit_case.jit_run()
| [
"825276847@qq.com"
] | 825276847@qq.com |
ae24899f5d7f723d07cb58f1c053b40315c20c77 | d5c67ac21a5210d36c74bfd0a4d45c91ab3c1879 | /Spyder/python机器学习应用/学生上网分析/学生上网分析之上网时长聚类.py | 39c78009f51ebda23804e3ee7586778083623f24 | [] | no_license | HanKin2015/ACM | 93036222eb5e382e5a1269c0208c58bba4ad5af7 | 040779ce4a3e88c40c7beb9cba6a33aa3695bf50 | refs/heads/master | 2022-03-03T05:15:51.053240 | 2021-12-20T14:18:14 | 2021-12-20T14:21:11 | 57,268,602 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,295 | py | # -*- coding: utf-8 -*-
"""
Created on Wed Aug 23 16:03:02 2017
@author: HanKin
"""
# -*- coding: utf-8 -*-
'''
对上网时长时间进行聚类
'''
import numpy as np
from sklearn.cluster import DBSCAN
from sklearn import metrics#计算方法
import matplotlib.pyplot as plt
mac2id=dict()
onlinetimes=[]
f=open('TestData.txt',encoding='utf-8')
for line in f:
items = line.strip().split(",")
#mac地址
mac=items[2]
#上网时长
onlinetime=int(items[6])
#时间格式举例:2014-07-20 22:44:18.540000000
starttime=int(items[4].split(' ')[1].split(':')[0])#只保留时间的小时位
#保证onlinetime中对应一个mac地址有一个唯一的记录
if mac not in mac2id:
mac2id[mac]=len(onlinetimes)
onlinetimes.append((starttime,onlinetime))
else:
onlinetimes[mac2id[mac]]=(starttime,onlinetime)
real_X=np.array(onlinetimes).reshape((-1,2)) #-1代表行数由程序自行根据列数和总数据信息推算出
X=np.log(1+real_X[:,1:])#只得到上网时长,这里+1是为了防止为0的情况
#调用DBSCAN方法进行训练,labels为每个数据的簇标签
db=DBSCAN(eps=0.14,min_samples=10).fit(X)
labels = db.labels_#返回的数据的簇标签,噪声数据标签为-1
print('Labels:\n',labels)
#计算标签为-1的数据(即噪声数据)的比例
raito=len(labels[labels[:] == -1]) / len(labels)
print('Noise raito:',format(raito, '.2%'))
#计算簇的个数
n_clusters_ = len(set(labels)) - (1 if -1 in labels else 0)
print('Estimated number of clusters: %d' % n_clusters_)
#评价聚类效果:轮廓系数si,原理可参考:http://blog.csdn.net/xueyingxue001/article/details/51966932
'''
si接近1,则说明样本i聚类合理;
si接近-1,则说明样本i更应该分类到另外的簇;
若si 近似为0,则说明样本i在两个簇的边界上。
'''
print("Silhouette Coefficient: %0.3f"% metrics.silhouette_score(X, labels))#聚类效果评价指标
#打印各簇标号以及各簇内数据
for i in range(n_clusters_):
print('number of data in Cluster %s is : %s'%(i,len(X[labels==i])))
#print(list(X[labels == i].flatten()))
#绘制直方图分析
plt.hist(X,24) | [
"1058198502@qq.com"
] | 1058198502@qq.com |
5154a4e9ad8557de0e8b54229abfaae3972c2128 | 6c9b8812e1f5e1f6bc881265ce9de9efeb22869d | /model_zoo/obj_detection/rpn.py | 63b8ffae98cd091045ca49ae3bcdde701b367885 | [] | no_license | maxme1/model_zoo | a194582362ab07e2b40dfd43fc5e0aa60f7289cf | ef34432cbd7eb912ba976cd1448427f3cc3ff6f5 | refs/heads/master | 2022-07-12T10:08:43.279906 | 2022-06-23T18:37:57 | 2022-06-23T18:37:57 | 90,151,979 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,542 | py | import numpy as np
import torch.nn as nn
from torch.nn import functional
class RPN(nn.Module):
def __init__(self, in_channels, mid_channels, kernel_size, num_anchors):
super().__init__()
self.standardise = nn.Conv2d(in_channels, mid_channels, kernel_size, padding=kernel_size // 2)
self.classifier = nn.Conv2d(mid_channels, num_anchors, 1)
self.regressor = nn.Conv2d(mid_channels, num_anchors * 4, 1)
def forward(self, x):
x = self.standardise(x)
foreground = self.classifier(x)
boxes = self.regressor(x)
return foreground, boxes
class ROIPooling(nn.Module):
def __init__(self, output_shape, pooling=functional.max_pool2d):
super().__init__()
self.pooling = pooling
self.output_shape = output_shape
def forward(self, x):
shape = np.array(x.shape[2:])
scale = shape / self.output_shape
stride = tuple(map(int, np.floor(scale)))
kernel_size = tuple(map(int, np.ceil(scale)))
x = self.pooling(x, kernel_size=kernel_size, stride=stride)
return x
class ResNetC4(nn.Module):
def __init__(self, model):
super().__init__()
del model.avgpool, model.fc
self.model = model
def forward(self, x):
model = self.model
x = model.conv1(x)
x = model.bn1(x)
x = model.relu(x)
x = model.maxpool(x)
x = model.layer1(x)
x = model.layer2(x)
x = model.layer3(x)
x = model.layer4(x)
return x
| [
"maxs987@gmail.com"
] | maxs987@gmail.com |
e732fc6870ede2af1dff4d1fd378f33fa5b4cb66 | 485412a223a244544e8e099c0425b580224b1026 | /iCount/tests/test_summary.py | 0c46bf3fbe8f0cfcef2b527b37eee20122d2c826 | [
"MIT"
] | permissive | dblenkus/iCount | c18de739e0a16385fd023202d05645ae7ea80be8 | 9d54a49f98f7393fa9003834ba0173bb5f70b61a | refs/heads/master | 2020-06-23T10:44:43.894222 | 2016-11-23T09:41:12 | 2016-11-23T09:41:12 | 74,653,274 | 0 | 0 | null | 2016-11-24T08:32:09 | 2016-11-24T08:32:08 | null | UTF-8 | Python | false | false | 12,726 | py | # pylint: disable=missing-docstring, protected-access
import unittest
import tempfile
import warnings
from iCount.analysis import summary
from iCount.tests.utils import make_file_from_list, make_list_from_file, get_temp_file_name
def _make_types_length(annotation, subtype='biotype', excluded_types=None):
"""
Run function `make_types_length_file` with data from `annotation`.
"""
annotation_file = make_file_from_list(annotation)
out_file = get_temp_file_name()
fai = make_file_from_list(bedtool=False, data=[
['1', '100'],
['6', '100'],
['20', '100'],
])
result, _ = summary.make_types_length_file(
annotation_file, fai, out_file, subtype=subtype, excluded_types=excluded_types)
return make_list_from_file(result, fields_separator='\t')
def _make_summary_report(annotation, cross_links, chrom_lengths,
subtype='biotype', excluded_types=None):
"""
Run function `make_summary_report` with input/output data as lists.
"""
annotation_file = make_file_from_list(annotation)
cross_links_file = make_file_from_list(cross_links)
chrom_length_file = make_file_from_list(chrom_lengths, bedtool=False)
out_file = tempfile.NamedTemporaryFile(delete=False).name
return make_list_from_file(summary.make_summary_report(
annotation_file, cross_links_file, out_file, chrom_length_file,
subtype=subtype, excluded_types=excluded_types), fields_separator='\t')
class TestMakeTypesLengthFile(unittest.TestCase):
def setUp(self):
warnings.simplefilter("ignore", ResourceWarning)
def test_merge_same_types(self):
"""
Test that merging same type is done as expected.
Confirm that:
* same interval is not counted twice
* overlapping/touching intervals are merged and only then counted
"""
annotation = [
['1', '.', 'CDS', '10', '20', '.', '+', '.', 'biotype "A";'],
['1', '.', 'CDS', '10', '20', '.', '+', '.', 'biotype "A";'],
['1', '.', 'CDS', '15', '25', '.', '+', '.', 'biotype "A";'],
['1', '.', 'CDS', '20', '29', '.', '+', '.', 'biotype "A";']]
expected = [
['CDS A', '20'],
['unannotated', '580'],
]
self.assertEqual(expected, _make_types_length(annotation))
def test_merge_respect_strand(self):
"""
Test that merging is sensitive to strand.
Intervals differing only in strand are counted separately
"""
annotation = [
['1', '.', 'CDS', '10', '19', '.', '+', '.', 'biotype "A";'],
['1', '.', 'CDS', '10', '19', '.', '-', '.', 'biotype "A";']]
expected = [
['CDS A', '20'],
['unannotated', '580'],
]
self.assertEqual(expected, _make_types_length(annotation))
def test_mixed_types(self):
"""
Defect different types in same position correctly.
"""
annotation = [
['1', '.', 'intron', '10', '19', '.', '+', '.', 'biotype "A";'],
['1', '.', 'intron', '20', '29', '.', '+', '.', 'biotype "A";'],
['1', '.', 'intron', '10', '19', '.', '+', '.', 'biotype "B";'],
['1', '.', 'ncRNA', '10', '19', '.', '+', '.', 'biotype "A";'],
['1', '.', 'ncRNA', '10', '19', '.', '+', '.', 'biotype "C";']]
expected = [
['intron A', '20'],
['intron B', '10'],
['ncRNA A', '10'],
['ncRNA C', '10'],
['unannotated', '580'],
]
self.assertEqual(expected, _make_types_length(annotation))
def test_shuffled_input(self):
"""
Unsorted input does not make difference.
"""
annotation = [
['20', '.', 'intron', '20', '29', '.', '+', '.', 'biotype "A";'],
['1', '.', 'intron', '20', '29', '.', '+', '.', 'biotype "B";'],
['1', '.', 'intron', '10', '19', '.', '+', '.', 'biotype "A";'],
['6', '.', 'ncRNA', '10', '19', '.', '+', '.', 'biotype "C";']]
expected = [
['intron A', '20'],
['intron B', '10'],
['ncRNA C', '10'],
['unannotated', '560'],
]
self.assertEqual(expected, _make_types_length(annotation))
def test_subtype_param1(self):
"""
Subtype parameter can be empty: type is just 3rd column.
"""
annotation = [
['20', '.', 'intron', '20', '29', '.', '+', '.', 'biotype "A";'],
['6', '.', 'ncRNA', '10', '19', '.', '+', '.', 'biotype "C";']]
expected = [
['intron', '10'],
['ncRNA', '10'],
['unannotated', '580']
]
self.assertEqual(expected, _make_types_length(
annotation, subtype=None))
def test_subtype_param2(self):
"""
Subtype can have any value - not just the default biotype.
"""
annotation = [
['20', '.', 'intron', '20', '29', '.', '+', '.', 'attr42 "A";'],
['6', '.', 'ncRNA', '10', '19', '.', '+', '.', 'attr42 "C";']]
expected = [
['intron A', '10'],
['ncRNA C', '10'],
['unannotated', '580'],
]
self.assertEqual(expected, _make_types_length(
annotation, subtype='attr42'))
def test_excluded_types(self):
"""
Exclude some annotation intervals by 3rd column value.
"""
annotation = [
['20', '.', 'intron', '20', '29', '.', '+', '.', 'biotype "A";'],
['6', '.', 'ncRNA', '10', '19', '.', '+', '.', 'biotype "C";']]
expected = [
['ncRNA C', '10'],
['unannotated', '590'],
]
self.assertEqual(expected, _make_types_length(
annotation, excluded_types=['intron']))
class TestMakeSummaryReport(unittest.TestCase):
def setUp(self):
warnings.simplefilter("ignore", ResourceWarning)
self.chrom_lengths = [['1', '100']]
self.header = ['type', 'length', 'length %',
'sites #', 'sites %', 'sites enrichment',
'events #', 'events %', 'events enrichment']
def test_diff_chromosome_naming(self):
"""
Exception is raised if chromosome naming is inconsistent.
"""
cross_links = [
['chr1', '15', '16', '.', '5', '+']]
annotation = [
['1', '.', 'CDS', '10', '20', '.', '+', '.', 'biotype "A";']]
message = r"No intersections found. This may be caused by .*"
with self.assertRaisesRegex(ValueError, message):
_make_summary_report(annotation, cross_links, self.chrom_lengths)
def test_diff_only_strand1(self):
"""
Same coords but diff strand and same type.
"""
cross_links = [
['1', '5', '6', '.', '3', '+'],
['1', '5', '6', '.', '2', '-']]
annotation = [
['1', '.', 'CDS', '1', '10', '.', '+', '.', 'biotype "A";'],
['1', '.', 'CDS', '1', '10', '.', '-', '.', 'biotype "A";']]
expected = [
self.header,
['CDS A', '20', '0.1', '2', '1.0', '10.0', '5', '1.0', '10.0'],
['unannotated', '180', '0.9', '0', '0.0', '0.0', '0', '0.0', '0.0'],
]
out = _make_summary_report(annotation, cross_links, self.chrom_lengths)
self.assertEqual(out, expected)
def test_diff_only_strand2(self):
"""
Same coords but diff strand and diff type.
"""
cross_links = [
['1', '5', '6', '.', '3', '+'],
['1', '5', '6', '.', '1', '-']]
annotation = [
['1', '.', 'CDS', '1', '10', '.', '+', '.', 'biotype "A";'],
['1', '.', 'CDS', '1', '10', '.', '-', '.', 'biotype "B";']]
expected = [
self.header,
['CDS A', '10', '0.05', '1', '0.5', '10.0', '3', '0.75', '15.0'],
['CDS B', '10', '0.05', '1', '0.5', '10.0', '1', '0.25', '5.0'],
['unannotated', '180', '0.9', '0', '0.0', '0.0', '0', '0.0', '0.0'],
]
out = _make_summary_report(annotation, cross_links, self.chrom_lengths)
self.assertEqual(out, expected)
def test_many_regions(self):
"""
Multiple annotation regions intersecting one crosslink.
"""
cross_links = [
['1', '5', '6', '.', '1', '+']]
annotation = [
['1', '.', 'CDS', '1', '10', '.', '+', '.', 'biotype "A";'],
['1', '.', 'CDS', '1', '20', '.', '+', '.', 'biotype "B";'],
['1', '.', 'ncRNA', '1', '10', '.', '+', '.', 'biotype "A";'],
['1', '.', 'ncRNA', '1', '20', '.', '+', '.', 'biotype "C";']]
expected = [
self.header,
['CDS A', '10', '0.05', '1', '0.25', '5.0', '1', '0.25', '5.0'],
['CDS B', '20', '0.1', '1', '0.25', '2.5', '1', '0.25', '2.5'],
['ncRNA A', '10', '0.05', '1', '0.25', '5.0', '1', '0.25', '5.0'],
['ncRNA C', '20', '0.1', '1', '0.25', '2.5', '1', '0.25', '2.5'],
['unannotated', '180', '0.9', '0', '0.0', '0.0', '0', '0.0', '0.0'],
]
out = _make_summary_report(annotation, cross_links, self.chrom_lengths)
self.assertEqual(out, expected)
def test_unsorted_input(self):
"""
Unsoreted input should make no difference.
"""
cross_links = [
['1', '7', '8', '.', '2', '+'],
['1', '5', '6', '.', '1', '-']]
annotation = [
['1', '.', 'CDS', '20', '29', '.', '+', '.', 'biotype "A";'],
['1', '.', 'CDS', '6', '10', '.', '+', '.', 'biotype "A";'],
['1', '.', 'CDS', '1', '10', '.', '-', '.', 'biotype "A";']]
expected = [
self.header,
['CDS A', '25', '0.125', '2', '1.0', '8.0', '3', '1.0', '8.0'],
['unannotated', '175', '0.875', '0', '0.0', '0.0', '0', '0.0', '0.0'],
]
out = _make_summary_report(annotation, cross_links, self.chrom_lengths)
self.assertEqual(out, expected)
def test_subtype_param1(self):
"""
Subtype parameter can be empty: type is just 3rd column.
"""
cross_links = [
['1', '5', '6', '.', '1', '+']]
annotation = [
['1', '.', 'CDS', '1', '10', '.', '+', '.', 'biotype "A";'],
['1', '.', 'CDS', '2', '10', '.', '+', '.', 'biotype "B";']]
expected = [
self.header,
['CDS', '10', '0.05', '1', '1.0', '20.0', '1', '1.0', '20.0'],
['unannotated', '190', '0.95', '0', '0.0', '0.0', '0', '0.0', '0.0'],
]
out = _make_summary_report(annotation, cross_links,
self.chrom_lengths, subtype=None)
self.assertEqual(out, expected)
def test_subtype_param2(self):
"""
Subtype can have any value - not just the default biotype.
"""
cross_links = [
['1', '5', '6', '.', '1', '+']]
annotation = [
['1', '.', 'CDS', '1', '10', '.', '+', '.', 'attr42 "A";'],
['1', '.', 'CDS', '1', '10', '.', '+', '.', 'attr42 "B";'],
['1', '.', 'intron', '7', '10', '.', '+', '.', 'attr42 "B";']]
expected = [
self.header,
['CDS A', '10', '0.05', '1', '0.5', '10.0', '1', '0.5', '10.0'],
['CDS B', '10', '0.05', '1', '0.5', '10.0', '1', '0.5', '10.0'],
['intron B', '4', '0.02', '0', '0.0', '0.0', '0', '0.0', '0.0'],
['unannotated', '190', '0.95', '0', '0.0', '0.0', '0', '0.0', '0.0'],
]
out = _make_summary_report(annotation, cross_links,
self.chrom_lengths, subtype='attr42')
self.assertEqual(out, expected)
def test_excluded_types(self):
"""
Exclude some annotation intervals by 3rd column value.
"""
cross_links = [
['1', '5', '6', '.', '1', '+']]
annotation = [
['1', '.', 'intron', '1', '20', '.', '+', '.', 'biotype "A";'],
['1', '.', 'ncRNA', '1', '20', '.', '+', '.', 'biotype "C";']]
expected = [
self.header,
['ncRNA C', '20', '0.1', '1', '1.0', '10.0', '1', '1.0', '10.0'],
['unannotated', '180', '0.9', '0', '0.0', '0.0', '0', '0.0', '0.0'],
]
out = _make_summary_report(annotation, cross_links,
self.chrom_lengths, excluded_types=['intron'])
self.assertEqual(out, expected)
if __name__ == '__main__':
unittest.main()
| [
"zmrzlikar.jure@gmail.com"
] | zmrzlikar.jure@gmail.com |
cf0cb9b68aefd4e7d4cc1752125b10ee68486cd7 | c9ffc4b4d2bec921d7f7acbdcd3b2dda85c62a07 | /example_taxi/example/serializers.py | e707a860f0c63fb6bb37fda6952de28f402e2544 | [] | no_license | gridl/taxi-app-channels2 | 1c07f7e6832b743e4593653c633c377579756313 | 7958a071a37b0b90c2847a521f4196d4443318a2 | refs/heads/master | 2020-03-30T19:48:05.692197 | 2018-04-19T13:19:49 | 2018-04-19T13:19:49 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,134 | py | from django.contrib.auth import get_user_model
from rest_framework import serializers
from .models import Trip
class UserSerializer(serializers.ModelSerializer):
password1 = serializers.CharField(write_only=True)
password2 = serializers.CharField(write_only=True)
def validate(self, data):
if data['password1'] != data['password2']:
raise serializers.ValidationError('Passwords must match.')
return data
def create(self, validated_data):
data = {
key: value for key, value in validated_data.items()
if key not in ('password1', 'password2')
}
data['password'] = validated_data['password1']
return self.Meta.model.objects.create_user(**data)
class Meta:
model = get_user_model()
fields = (
'id', 'username', 'password1', 'password2',
'first_name', 'last_name',
)
read_only_fields = ('id',)
class TripSerializer(serializers.ModelSerializer):
class Meta:
model = Trip
fields = '__all__'
read_only_fields = ('id', 'nk', 'created', 'updated',)
| [
"hermanmu@gmail.com"
] | hermanmu@gmail.com |
1be012a64757e907d83446fe8297da8be07d1ca7 | 879ac03dd910d152170d6d1e3ff4d5e522b14d79 | /Tutorial/02. 30 Days of Code/025. Day 24; More Linked Lists.py | d5beb71d16b8a7bc1ee33d88692932b22a6858e9 | [] | no_license | dispe1/Hackerrank-Solutions | ae47920d7761546fd2ef753c1b4f9ae087aaed2a | 67b792dc2cb2933eb1f1565100ea13b0c9783fba | refs/heads/master | 2020-07-11T21:25:39.824667 | 2019-12-10T12:00:12 | 2019-12-10T12:00:12 | 204,646,756 | 4 | 3 | null | null | null | null | UTF-8 | Python | false | false | 1,186 | py | # Problem: https://www.hackerrank.com/challenges/30-linked-list-deletion/problem
# Difficulty : Easy
# Score : 30
class Node:
def __init__(self,data):
self.data = data
self.next = None
class Solution:
def insert(self,head,data):
p = Node(data)
if head==None:
head=p
elif head.next==None:
head.next=p
else:
start=head
while(start.next!=None):
start=start.next
start.next=p
return head
def display(self,head):
current = head
while current:
print(current.data,end=' ')
current = current.next
def removeDuplicates(self,head):
current = head
while head != None and current.next != None:
if current.next.data == current.data:
current.next = current.next.next
else:
current = current.next
return head
mylist= Solution()
T=int(input())
head=None
for i in range(T):
data=int(input())
head=mylist.insert(head,data)
head=mylist.removeDuplicates(head)
mylist.display(head);
| [
"lkjim0757@naver.com"
] | lkjim0757@naver.com |
dc343080a3e6d41edc7e4dd758414b296f10372e | 0a1742760b617db58d13bec3d715d83d4f552bdb | /scripts/delnopm.py | e6d88e6657644f1097cd0eee3bc9096c03d5e1e6 | [
"MIT"
] | permissive | rezvorck/vkbot | 1a97709e4bf3ec51e02af17ecc88bc0ceac59058 | f0e3a9ce4c6384bca3939960996e449d98d6ae46 | refs/heads/master | 2021-01-01T16:53:12.262398 | 2017-07-20T15:48:15 | 2017-07-20T15:48:15 | 97,942,346 | 1 | 0 | null | 2017-07-21T11:48:01 | 2017-07-21T11:48:00 | null | UTF-8 | Python | false | false | 436 | py | import logging
import scriptlib
# noinspection PyUnusedLocal
def main(a, args):
friends = scriptlib.getFriends(a, fields='can_write_private_message')
to_del = []
for j in friends:
if not j['can_write_private_message']:
to_del.append(str(j['id']))
logging.info('Found id{} ({} {})'.format(j['id'], j['first_name'], j['last_name']))
scriptlib.createFriendController().appendNoadd(to_del)
| [
"kalinochkind@gmail.com"
] | kalinochkind@gmail.com |
c54de06cec40dc1412ffe45eae83c0d130d2c7ec | e7efae2b83216d9621bd93390959d652de779c3d | /hyperv/datadog_checks/hyperv/check.py | aa4d44f2a269738f8b3d15872dcfd9dd47ce87f9 | [
"BSD-3-Clause",
"LicenseRef-scancode-unknown-license-reference",
"MIT",
"BSD-3-Clause-Modification",
"Unlicense",
"Apache-2.0",
"LGPL-3.0-only",
"LicenseRef-scancode-public-domain",
"BSD-2-Clause",
"CC0-1.0"
] | permissive | DataDog/integrations-core | ee1886cc7655972b2791e6ab8a1c62ab35afdb47 | 406072e4294edff5b46b513f0cdf7c2c00fac9d2 | refs/heads/master | 2023-08-31T04:08:06.243593 | 2023-08-30T18:22:10 | 2023-08-30T18:22:10 | 47,203,045 | 852 | 1,548 | BSD-3-Clause | 2023-09-14T16:39:54 | 2015-12-01T16:41:45 | Python | UTF-8 | Python | false | false | 427 | py | # (C) Datadog, Inc. 2021-present
# All rights reserved
# Licensed under a 3-clause BSD style license (see LICENSE)
from datadog_checks.base.checks.windows.perf_counters.base import PerfCountersBaseCheckWithLegacySupport
from .metrics import METRICS_CONFIG
class HypervCheckV2(PerfCountersBaseCheckWithLegacySupport):
__NAMESPACE__ = 'hyperv'
def get_default_config(self):
return {'metrics': METRICS_CONFIG}
| [
"noreply@github.com"
] | DataDog.noreply@github.com |
c5de719c57b09e8105dec4d270d58fd962cd0482 | 1c6283303ceb883add8de4ee07c5ffcfc2e93fab | /Jinja2/lib/python3.7/site-packages/uhd_restpy/testplatform/sessions/ixnetwork/traffic/trafficitem/configelement/frameratedistribution/frameratedistribution.py | 9e82189eba96293027ad24081a8cab44a50a6e6d | [] | no_license | pdobrinskiy/devcore | 0f5b3dfc2f3bf1e44abd716f008a01c443e14f18 | 580c7df6f5db8c118990cf01bc2b986285b9718b | refs/heads/main | 2023-07-29T20:28:49.035475 | 2021-09-14T10:02:16 | 2021-09-14T10:02:16 | 405,919,390 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,774 | py | # MIT LICENSE
#
# Copyright 1997 - 2020 by IXIA Keysight
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
from uhd_restpy.base import Base
from uhd_restpy.files import Files
from typing import List, Any, Union
class FrameRateDistribution(Base):
"""This object provides the options for Frame Rate distribution.
The FrameRateDistribution class encapsulates a required frameRateDistribution resource which will be retrieved from the server every time the property is accessed.
"""
__slots__ = ()
_SDM_NAME = 'frameRateDistribution'
_SDM_ATT_MAP = {
'PortDistribution': 'portDistribution',
'StreamDistribution': 'streamDistribution',
}
_SDM_ENUM_MAP = {
'portDistribution': ['applyRateToAll', 'splitRateEvenly'],
'streamDistribution': ['applyRateToAll', 'splitRateEvenly'],
}
def __init__(self, parent, list_op=False):
super(FrameRateDistribution, self).__init__(parent, list_op)
@property
def PortDistribution(self):
# type: () -> str
"""
Returns
-------
- str(applyRateToAll | splitRateEvenly): At the port level, apply the target configuration transmission rate for each encapsulation.
"""
return self._get_attribute(self._SDM_ATT_MAP['PortDistribution'])
@PortDistribution.setter
def PortDistribution(self, value):
# type: (str) -> None
self._set_attribute(self._SDM_ATT_MAP['PortDistribution'], value)
@property
def StreamDistribution(self):
# type: () -> str
"""
Returns
-------
- str(applyRateToAll | splitRateEvenly): At the flow group level, apply the target rate of each port.
"""
return self._get_attribute(self._SDM_ATT_MAP['StreamDistribution'])
@StreamDistribution.setter
def StreamDistribution(self, value):
# type: (str) -> None
self._set_attribute(self._SDM_ATT_MAP['StreamDistribution'], value)
def update(self, PortDistribution=None, StreamDistribution=None):
# type: (str, str) -> FrameRateDistribution
"""Updates frameRateDistribution resource on the server.
Args
----
- PortDistribution (str(applyRateToAll | splitRateEvenly)): At the port level, apply the target configuration transmission rate for each encapsulation.
- StreamDistribution (str(applyRateToAll | splitRateEvenly)): At the flow group level, apply the target rate of each port.
Raises
------
- ServerError: The server has encountered an uncategorized error condition
"""
return self._update(self._map_locals(self._SDM_ATT_MAP, locals()))
| [
"pdobrinskiy@yahoo.com"
] | pdobrinskiy@yahoo.com |
ab5a2cf7646ee4c873d6f8e1b936ef25d22e2781 | 39f9cdff9eca95b1018d2b869cb08c1b71905ead | /Lesson03/re_lang.py | 994273c73af1c078d431f1a4978dca42dcde6449 | [] | no_license | alexbaryzhikov/design-of-computer-programs | a900ec246a1d174da7fba4f209471aa44dfa7486 | 7b4b212b528a0164cbd283110426bb7e0a0f46ce | refs/heads/master | 2020-03-23T03:57:44.165186 | 2018-07-15T21:06:28 | 2018-07-15T21:06:28 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,107 | py | # Specifications
def test_search():
a, b, c = lit('a'), lit('b'), lit('c')
abcstars = seq(star(a), seq(star(b), star(c)))
dotstar = star(dot)
assert search(lit('def'), 'abcdefg') == 'def'
assert search(seq(lit('def'), eol), 'abcdefg') == None
assert search(a, 'not the start') == 'a'
assert match(a, 'not the start') == None
assert match(abcstars, 'aaabbbccccccccccdef') == 'aaabbbcccccccccc'
assert match(abcstars, 'junk') == ''
assert all(match(seq(abcstars, eol), s) == s
for s in 'abc aaabbccc aaaabcccc'.split())
assert all(match(seq(abcstars, eol), s) == None
for s in 'cab aaabbcccd aaaa-b-cccc'.split())
r = seq(lit('ab'), seq(dotstar, seq(lit('aca'), seq(dotstar, seq(a, eol)))))
assert all(search(r, s) is not None
for s in 'abracadabra abacaa about-acacia-flora'.split())
assert all(match(seq(c, seq(dotstar, b)), s) is not None
for s in 'cab cob carob cb carbuncle'.split())
assert not any(match(seq(c, seq(dot, b)), s)
for s in 'crab cb across scab'.split())
return 'test_search passes'
# Implementation -- Interpreter
def search(pattern, text):
"Match pattern anywhere in text; return longest earliest match or None."
for i in range(len(text)):
m = match(pattern, text[i:])
if m is not None:
return m
def match(pattern, text):
"Match pattern against start of text; return longest match found or None."
remainders = matchset(pattern, text)
if remainders:
shortest = min(remainders, key=len)
return text[:len(text)-len(shortest)]
def components(pattern):
"Return the op, x, and y arguments; x and y are None if missing."
x = pattern[1] if len(pattern) > 1 else None
y = pattern[2] if len(pattern) > 2 else None
return pattern[0], x, y
def matchset(pattern, text):
"Match pattern at start of text; return a set of remainders of text."
op, x, y = components(pattern)
if 'lit' == op:
return set([text[len(x):]]) if text.startswith(x) else null
elif 'seq' == op:
return set(t2 for t1 in matchset(x, text) for t2 in matchset(y, t1))
elif 'alt' == op:
return matchset(x, text) | matchset(y, text)
elif 'dot' == op:
return set([text[1:]]) if text else null
elif 'oneof' == op:
return set([text[1:]]) if text.startswith(x) else null
elif 'eol' == op:
return set(['']) if text == '' else null
elif 'star' == op:
return (set([text]) |
set(t2 for t1 in matchset(x, text)
for t2 in matchset(pattern, t1) if t1 != text))
else:
raise ValueError('unknown pattern: %s' % pattern)
null = frozenset()
def lit(string): return ('lit', string)
def seq(x, y): return ('seq', x, y)
def alt(x, y): return ('alt', x, y)
def star(x): return ('star', x)
def plus(x): return seq(x, star(x))
def opt(x): return alt(lit(''), x)
def oneof(chars): return ('oneof', tuple(chars))
dot = ('dot',)
eol = ('eol',)
print(test_search())
| [
"aleksiarts@gmail.com"
] | aleksiarts@gmail.com |
7b31f1cabe8e6e6e069065a085ffe735af6feec8 | 17575d8276d36cf5b32d0b6645fb5dd1b5c0962a | /algorithm/elements/height_balanced.py | 8ea07d761515626c6b9a234ae01c1f9b9e141ba4 | [] | no_license | upul/WhiteBoard | 2f720acc1b1c1e0002f8e0d7842c23707c58debe | e81feb8172add6b893fb4496a590c43f863a0346 | refs/heads/master | 2022-09-26T21:07:25.271461 | 2021-05-13T13:31:27 | 2021-05-13T13:31:27 | 47,049,709 | 8 | 20 | null | 2022-09-23T22:34:42 | 2015-11-29T04:20:21 | Jupyter Notebook | UTF-8 | Python | false | false | 1,267 | py |
from collections import namedtuple
class BinaryTreeNode:
def __init__(self, data=None, left=None, right=None):
self.data = data
self.left = left
self.right = right
def is_balanced_binary_tree(tree):
BalancedStatusWithHeight = namedtuple(
'BalancedStatusWithHeight', ('balanced', 'height'))
def check_balance(tree):
if not tree:
return BalancedStatusWithHeight(True, -1)
left_result = check_balance(tree.left)
if not left_result.balanced:
return BalancedStatusWithHeight(False, 0)
right_result = check_balance(tree.right)
if not right_result.balanced:
return BalancedStatusWithHeight(False, 0)
is_balanced = abs(left_result.height - right_result.height) <= 1
height = max(left_result.height, right_result.height)
return BalancedStatusWithHeight(is_balanced, height)
return check_balance(tree).balanced
if __name__ == '__main__':
L21 = BinaryTreeNode(271)
L22 = BinaryTreeNode(561)
L23 = BinaryTreeNode(2)
L24 = BinaryTreeNode(271)
L11 = BinaryTreeNode(6, L21, L22)
L12 = BinaryTreeNode(6, L23, L24)
root = BinaryTreeNode(314, L11, L12)
print(is_balanced_binary_tree(root))
| [
"upulbandara@gmail.com"
] | upulbandara@gmail.com |
69422396b91bb1ec63437d92310092788a861501 | 385ed58325dd0cc75bdb9fd3e61c5e005f7a4f28 | /source/hall/src/hall/entity/hall_friend_table.py | c51ba891d473e4cdeb55af31c3bcbe92f12a95d2 | [] | no_license | csirui/hall37 | 17dfa4e4f1f8bf719d0c11ac7738fa4c14fd06db | 5c4eb4b2bf57bbbee4731470c830d8d81915d603 | refs/heads/master | 2021-09-04T03:55:12.460035 | 2018-01-15T15:12:30 | 2018-01-15T15:12:30 | 117,560,615 | 0 | 3 | null | null | null | null | UTF-8 | Python | false | false | 10,494 | py | # -*- coding=utf-8
'''
Created on 2016年9月5日
好友/熟人自建桌模板
负责好友桌的建桌/进入流程
本模块不负责配置,由各个游戏管理自建桌需要的配合
只需要配置符合规范即可
@author: zhaol
'''
import random
import freetime.util.log as ftlog
import poker.util.timestamp as pktimestamp
from hall.entity.hallconf import HALL_GAMEID
from hall.entity.todotask import TodoTaskHelper, \
TodoTaskPopTip
from poker.entity.configure import gdata
from poker.entity.dao import daobase
from poker.entity.game.game import TYGame
from poker.entity.game.tables.table_player import TYPlayer
FRIEND_TABLE_MAIN_KEY = "friend_table"
FRIEND_TABLE_ROOM_INDEX = "friend_table_index"
FRIEND_TABLE_PLUGIN = "plugin"
FRIEND_TABLE_ROOMID = "roomId"
FRIEND_TABLE_TABLEID = "tableId"
FRIEND_TABLE_RANDOM_KEY = "friend_table_random"
FRIEND_TABLE_RANDOM_COUNT = 1000000
def getStringFTId(roomNumber):
"""获取六位字符串房号"""
return "%06d" % roomNumber
def createFriendTable(pluginId):
'''
创建好友桌
config - 创建好友桌的参数,非房主依靠config参数加入自建桌
pluginId - 插件游戏ID
'''
# 随机选择自建桌ID
index = daobase.executeMixCmd('hincrby', FRIEND_TABLE_MAIN_KEY, FRIEND_TABLE_ROOM_INDEX, 1)
index = index % FRIEND_TABLE_RANDOM_COUNT
ftlog.debug('hall_friend_table.createFriendTable index: ', index)
ftNumStr = daobase.executeMixCmd('LINDEX', FRIEND_TABLE_RANDOM_KEY, index)
if ftNumStr:
ftId = getStringFTId(int(ftNumStr))
ftlog.debug('hall_friend_table.createFriendTable ftId:', ftId)
check = TYGame(pluginId).checkFriendTable(ftId)
if check:
ftlog.info('hall_friend_table.createFriendTable ftId:', ftId, ' already used by plugin:', pluginId)
return None
else:
daobase.executeMixCmd('HSET', FRIEND_TABLE_MAIN_KEY + ftId, FRIEND_TABLE_PLUGIN, pluginId)
# 设置过期时间
daobase.executeMixCmd('expire', FRIEND_TABLE_MAIN_KEY + ftId, 604800)
ftlog.info('hall_friend_table.createFriendTable distribution ftId:', ftId, ' to plugin:', pluginId)
return ftId
else:
ftlog.info('hall_friend_table.createFriendTable get ftId index:', index, ' error, please check redis!!')
return None
def enterFriendTable(userId, gameId, clientId, ftId):
"""进入自建桌"""
if ftlog.is_debug():
ftlog.debug('hall_friend_table.enterFriendTable userId:', userId, ' pluginId:', gameId, ' clientId:', clientId,
' ftId:', ftId)
pluginId = queryFriendTable(ftId)
if not pluginId:
TodoTaskHelper.sendTodoTask(HALL_GAMEID, userId, TodoTaskPopTip('该房间不存在'))
return
if TYPlayer.isRobot(userId):
isValidPluginId = True
else:
isValidPluginId = False
from hall.entity import hallgamelist2
template = hallgamelist2.getUITemplate(gameId, userId, clientId)
for version in template.versionList:
if pluginId == version.game.gameId:
isValidPluginId = True
break
if not isValidPluginId:
TodoTaskHelper.sendTodoTask(HALL_GAMEID, userId, TodoTaskPopTip('该安装包不支持此房间号所对应的玩法'))
return
ftlog.info('hall_friend_table.enterFriendTable userId:', userId, ' lead to pluginId:', pluginId)
pluginId = int(pluginId)
TYGame(pluginId).enterFriendTable(userId, gameId, clientId, ftId)
def queryFriendTable(ftId):
"""查询自建桌信息"""
pluginId = daobase.executeMixCmd('HGET', FRIEND_TABLE_MAIN_KEY + ftId, FRIEND_TABLE_PLUGIN)
return pluginId
def releaseFriendTable(pluginId, ftId):
'''
回收大厅自建桌房号
pluginId 插件游戏ID
ftId 大厅自建桌房号
'''
ftlog.info('hall_friend_table.releaseFriendTable pluginId:', pluginId
, ' ftId:', ftId)
daobase.executeMixCmd('DEL', FRIEND_TABLE_MAIN_KEY + ftId)
def _initialize():
"""初始化100万个自建桌房号"""
if gdata.serverType() != gdata.SRV_TYPE_CENTER:
return
ftlog.debug('hall_friend_table._initialize check friend table randoms in process CT')
randomLen = daobase.executeMixCmd('LLEN', FRIEND_TABLE_RANDOM_KEY)
ftlog.debug('hall_friend_table._initialize randomLen:', randomLen)
if randomLen != FRIEND_TABLE_RANDOM_COUNT:
ftlog.debug('hall_friend_table._initialize push random begin:', pktimestamp.getCurrentTimestamp())
daobase.executeMixCmd('DEL', FRIEND_TABLE_RANDOM_KEY)
rList = [x for x in range(0, 1000000)]
random.shuffle(rList)
# 每次PUSH 100个
for index in range(0, 10000):
begin = index * 100
daobase.executeMixCmd('LPUSH', FRIEND_TABLE_RANDOM_KEY
, rList[begin], rList[begin + 1], rList[begin + 2], rList[begin + 3], rList[begin + 4]
, rList[begin + 5], rList[begin + 6], rList[begin + 7], rList[begin + 8],
rList[begin + 9]
, rList[begin + 10], rList[begin + 11], rList[begin + 12], rList[begin + 13],
rList[begin + 14]
, rList[begin + 15], rList[begin + 16], rList[begin + 17], rList[begin + 18],
rList[begin + 19]
, rList[begin + 20], rList[begin + 21], rList[begin + 22], rList[begin + 23],
rList[begin + 24]
, rList[begin + 25], rList[begin + 26], rList[begin + 27], rList[begin + 28],
rList[begin + 29]
, rList[begin + 30], rList[begin + 31], rList[begin + 32], rList[begin + 33],
rList[begin + 34]
, rList[begin + 35], rList[begin + 36], rList[begin + 37], rList[begin + 38],
rList[begin + 39]
, rList[begin + 40], rList[begin + 41], rList[begin + 42], rList[begin + 43],
rList[begin + 44]
, rList[begin + 45], rList[begin + 46], rList[begin + 47], rList[begin + 48],
rList[begin + 49]
, rList[begin + 50], rList[begin + 51], rList[begin + 52], rList[begin + 53],
rList[begin + 54]
, rList[begin + 55], rList[begin + 56], rList[begin + 57], rList[begin + 58],
rList[begin + 59]
, rList[begin + 60], rList[begin + 61], rList[begin + 62], rList[begin + 63],
rList[begin + 64]
, rList[begin + 65], rList[begin + 66], rList[begin + 67], rList[begin + 68],
rList[begin + 69]
, rList[begin + 70], rList[begin + 71], rList[begin + 72], rList[begin + 73],
rList[begin + 74]
, rList[begin + 75], rList[begin + 76], rList[begin + 77], rList[begin + 78],
rList[begin + 79]
, rList[begin + 80], rList[begin + 81], rList[begin + 82], rList[begin + 83],
rList[begin + 84]
, rList[begin + 85], rList[begin + 86], rList[begin + 87], rList[begin + 88],
rList[begin + 89]
, rList[begin + 90], rList[begin + 91], rList[begin + 92], rList[begin + 93],
rList[begin + 94]
, rList[begin + 95], rList[begin + 96], rList[begin + 97], rList[begin + 98],
rList[begin + 99]
)
ftlog.debug('hall_friend_table._initialize push random end:', pktimestamp.getCurrentTimestamp())
"""
自建桌日志
"""
def getFriendTableInfo(tableNo):
"""获取自建桌的参数"""
params = {}
pluginId = daobase.executeMixCmd('HGET', FRIEND_TABLE_MAIN_KEY + tableNo, FRIEND_TABLE_PLUGIN)
if pluginId:
params['pluginId'] = pluginId
roomId = daobase.executeMixCmd('HGET', FRIEND_TABLE_MAIN_KEY + tableNo, FRIEND_TABLE_ROOMID)
if roomId:
params['roomId'] = roomId
tableId = daobase.executeMixCmd('HGET', FRIEND_TABLE_MAIN_KEY + tableNo, FRIEND_TABLE_TABLEID)
if tableId:
params['tableId'] = tableId
ftlog.debug('hall_friend_table.getFriendTableInfo params:', params)
return params
def gameBegin(tableNo, seats, gameId, roomId, tableId):
"""开局日志"""
ftlog.info('hall_friend_table.gameBegin tableNo=', tableNo
, ' seats=', seats
, ' gameId=', gameId
, ' roomId=', roomId
, ' tableId=', tableId
, ' time=', pktimestamp.getCurrentTimestamp())
daobase.executeMixCmd('HSET', FRIEND_TABLE_MAIN_KEY + tableNo, FRIEND_TABLE_ROOMID, roomId)
daobase.executeMixCmd('HSET', FRIEND_TABLE_MAIN_KEY + tableNo, FRIEND_TABLE_TABLEID, tableId)
def addOneResult(tableNo, seats, deltaScore, totalScore, curRound, totalRound, gameId, roomId, tableId):
"""每一小局的结果"""
ftlog.info('hall_friend_table.one_game_result tableNo=', tableNo
, ' seats=', seats
, ' deltaScore=', deltaScore
, ' totalScore=', totalScore
, ' curRound=', curRound
, ' totalRound=', totalRound
, ' gameId=', gameId
, ' roomId=', roomId
, ' tableId=', tableId
, ' time=', pktimestamp.getCurrentTimestamp())
def gameEnd(tableNo, seats, totalScore, totalRound, gameId, roomId, tableId):
"""最终结果"""
ftlog.info('hall_friend_table.gameEnd tableNo=', tableNo
, ' seats=', seats
, ' totalScore=', totalScore
, ' totalRound=', totalRound
, ' gameId=', gameId
, ' roomId=', roomId
, ' tableId=', tableId
, ' time=', pktimestamp.getCurrentTimestamp())
| [
"cg@ibenxi.com"
] | cg@ibenxi.com |
545e9c52abe5d51cda54bc57e2d7a1abfa427f33 | 6db97ab761d59452c05611354637dfb2ce693c96 | /setup.py | 373a4c95fc18def50d4a44b1db4c7823a36f17d9 | [
"MIT"
] | permissive | Mahdi-Soheyli/compas_fab | e885efbdd5531ae5f245bf02b2f1acce0a308680 | 0e7d426903a5d9a1bca947cd7a1251031c4c71b4 | refs/heads/master | 2020-05-02T16:53:13.265526 | 2019-03-20T13:37:37 | 2019-03-20T13:37:37 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,309 | py | #!/usr/bin/env python
# -*- encoding: utf-8 -*-
from __future__ import absolute_import, print_function
import io
import re
from glob import glob
from os.path import abspath, basename, dirname, join, splitext
from setuptools import find_packages, setup
requirements = [
'compas==0.4.10',
'roslibpy>=0.4.0',
'pyserial',
]
keywords_list = ['robotic fabrication', 'digital fabrication', 'architecture', 'robotics', 'ros']
here = abspath(dirname(__file__))
def read(*names, **kwargs):
return io.open(
join(here, *names),
encoding=kwargs.get('encoding', 'utf8')
).read()
about = {}
exec(read('src', 'compas_fab', '__version__.py'), about)
setup(
name=about['__title__'],
version=about['__version__'],
license=about['__license__'],
description=about['__description__'],
author=about['__author__'],
author_email=about['__author_email__'],
url=about['__url__'],
long_description='%s\n%s' % (
re.compile('^.. start-badges.*^.. end-badges', re.M |
re.S).sub('', read('README.rst')),
re.sub(':[a-z]+:`~?(.*?)`', r'``\1``', read('CHANGELOG.rst'))
),
packages=find_packages('src'),
package_dir={'': 'src'},
py_modules=[splitext(basename(path))[0] for path in glob('src/*.py')],
include_package_data=True,
zip_safe=False,
classifiers=[
'Development Status :: 3 - Alpha',
'Intended Audience :: Developers',
'Intended Audience :: Science/Research',
'License :: OSI Approved :: MIT License',
'Operating System :: Unix',
'Operating System :: POSIX',
'Operating System :: Microsoft :: Windows',
'Programming Language :: Python',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: Implementation :: CPython',
'Programming Language :: Python :: Implementation :: IronPython',
'Topic :: Scientific/Engineering',
],
keywords=keywords_list,
install_requires=requirements,
extras_require={},
entry_points={},
)
| [
"casas@arch.ethz.ch"
] | casas@arch.ethz.ch |
d80e039f2b571e04b30e70764e043f515fe553ee | f8fe280d9ebd953e8a5f65aa2d81bbfc0472b7fd | /cachalot/tests/write.py | a227f5b55ca01c3380c1ea1c7dc128cba17c4d1e | [] | no_license | androidsx/django-cachalot | c33acfe06c3943c8f4fd30cf32f6fa29d56a377b | 39597a2658d700516ccb238ca8f83f2c94980d30 | refs/heads/master | 2020-12-14T09:48:18.837681 | 2016-08-07T19:32:36 | 2016-08-07T19:32:36 | 65,150,484 | 0 | 0 | null | 2016-08-07T19:30:57 | 2016-08-07T19:30:55 | Python | UTF-8 | Python | false | false | 37,022 | py | # coding: utf-8
from __future__ import unicode_literals
from django.contrib.auth.models import User, Permission, Group
from django.contrib.contenttypes.models import ContentType
from django.core.exceptions import MultipleObjectsReturned
from django.core.management import call_command
from django.db import connection, transaction
from django.db.models import Count
from django.test import TransactionTestCase, skipUnlessDBFeature
from .models import Test, TestParent, TestChild
class WriteTestCase(TransactionTestCase):
"""
Tests if every SQL request writing data is not cached and invalidates the
implied data.
"""
def setUp(self):
self.is_sqlite = connection.vendor == 'sqlite'
if connection.vendor == 'mysql':
# We need to reopen the connection or Django
# will execute an extra SQL request below.
connection.cursor()
def test_create(self):
with self.assertNumQueries(1):
data1 = list(Test.objects.all())
self.assertListEqual(data1, [])
with self.assertNumQueries(2 if self.is_sqlite else 1):
t1 = Test.objects.create(name='test1')
with self.assertNumQueries(2 if self.is_sqlite else 1):
t2 = Test.objects.create(name='test2')
with self.assertNumQueries(1):
data2 = list(Test.objects.all())
with self.assertNumQueries(2 if self.is_sqlite else 1):
t3 = Test.objects.create(name='test3')
with self.assertNumQueries(1):
data3 = list(Test.objects.all())
self.assertListEqual(data2, [t1, t2])
self.assertListEqual(data3, [t1, t2, t3])
with self.assertNumQueries(2 if self.is_sqlite else 1):
t3_copy = Test.objects.create(name='test3')
self.assertNotEqual(t3_copy, t3)
with self.assertNumQueries(1):
data4 = list(Test.objects.all())
self.assertListEqual(data4, [t1, t2, t3, t3_copy])
def test_get_or_create(self):
"""
Tests if the ``SELECT`` query of a ``QuerySet.get_or_create``
is cached, but not the ``INSERT`` one.
"""
with self.assertNumQueries(1):
data1 = list(Test.objects.all())
self.assertListEqual(data1, [])
with self.assertNumQueries(3 if self.is_sqlite else 2):
t, created = Test.objects.get_or_create(name='test')
self.assertTrue(created)
with self.assertNumQueries(1):
t_clone, created = Test.objects.get_or_create(name='test')
self.assertFalse(created)
self.assertEqual(t_clone, t)
with self.assertNumQueries(0):
t_clone, created = Test.objects.get_or_create(name='test')
self.assertFalse(created)
self.assertEqual(t_clone, t)
with self.assertNumQueries(1):
data2 = list(Test.objects.all())
self.assertListEqual(data2, [t])
def test_update_or_create(self):
with self.assertNumQueries(1):
self.assertListEqual(list(Test.objects.all()), [])
with self.assertNumQueries(3 if self.is_sqlite else 2):
t, created = Test.objects.update_or_create(
name='test', defaults={'public': True})
self.assertTrue(created)
self.assertEqual(t.name, 'test')
self.assertEqual(t.public, True)
with self.assertNumQueries(3 if self.is_sqlite else 2):
t, created = Test.objects.update_or_create(
name='test', defaults={'public': False})
self.assertFalse(created)
self.assertEqual(t.name, 'test')
self.assertEqual(t.public, False)
# The number of SQL queries doesn’t decrease because update_or_create
# always calls an UPDATE, even when data wasn’t changed.
with self.assertNumQueries(3 if self.is_sqlite else 2):
t, created = Test.objects.update_or_create(
name='test', defaults={'public': False})
self.assertFalse(created)
self.assertEqual(t.name, 'test')
self.assertEqual(t.public, False)
with self.assertNumQueries(1):
self.assertListEqual(list(Test.objects.all()), [t])
def test_bulk_create(self):
with self.assertNumQueries(1):
data1 = list(Test.objects.all())
self.assertListEqual(data1, [])
with self.assertNumQueries(2 if self.is_sqlite else 1):
unsaved_tests = [Test(name='test%02d' % i) for i in range(1, 11)]
Test.objects.bulk_create(unsaved_tests)
self.assertEqual(Test.objects.count(), 10)
with self.assertNumQueries(2 if self.is_sqlite else 1):
unsaved_tests = [Test(name='test%02d' % i) for i in range(1, 11)]
Test.objects.bulk_create(unsaved_tests)
self.assertEqual(Test.objects.count(), 20)
with self.assertNumQueries(1):
data2 = list(Test.objects.all())
self.assertEqual(len(data2), 20)
self.assertListEqual([t.name for t in data2],
['test%02d' % (i // 2) for i in range(2, 22)])
def test_update(self):
with self.assertNumQueries(2 if self.is_sqlite else 1):
t = Test.objects.create(name='test1')
with self.assertNumQueries(1):
t1 = Test.objects.get()
with self.assertNumQueries(2 if self.is_sqlite else 1):
t.name = 'test2'
t.save()
with self.assertNumQueries(1):
t2 = Test.objects.get()
self.assertEqual(t1.name, 'test1')
self.assertEqual(t2.name, 'test2')
with self.assertNumQueries(2 if self.is_sqlite else 1):
Test.objects.update(name='test3')
with self.assertNumQueries(1):
t3 = Test.objects.get()
self.assertEqual(t3.name, 'test3')
def test_delete(self):
with self.assertNumQueries(2 if self.is_sqlite else 1):
t1 = Test.objects.create(name='test1')
with self.assertNumQueries(2 if self.is_sqlite else 1):
t2 = Test.objects.create(name='test2')
with self.assertNumQueries(1):
data1 = list(Test.objects.values_list('name', flat=True))
with self.assertNumQueries(2 if self.is_sqlite else 1):
t2.delete()
with self.assertNumQueries(1):
data2 = list(Test.objects.values_list('name', flat=True))
self.assertListEqual(data1, [t1.name, t2.name])
self.assertListEqual(data2, [t1.name])
with self.assertNumQueries(2 if self.is_sqlite else 1):
Test.objects.bulk_create([Test(name='test%s' % i)
for i in range(2, 11)])
with self.assertNumQueries(1):
self.assertEqual(Test.objects.count(), 10)
with self.assertNumQueries(2 if self.is_sqlite else 1):
Test.objects.all().delete()
with self.assertNumQueries(1):
self.assertEqual(Test.objects.count(), 0)
def test_invalidate_exists(self):
with self.assertNumQueries(1):
self.assertFalse(Test.objects.exists())
Test.objects.create(name='test')
with self.assertNumQueries(2 if self.is_sqlite else 1):
self.assertTrue(Test.objects.create())
def test_invalidate_count(self):
with self.assertNumQueries(1):
self.assertEqual(Test.objects.count(), 0)
Test.objects.create(name='test1')
with self.assertNumQueries(1):
self.assertEqual(Test.objects.count(), 1)
Test.objects.create(name='test2')
with self.assertNumQueries(1):
self.assertEqual(Test.objects.count(), 2)
def test_invalidate_get(self):
with self.assertNumQueries(1):
with self.assertRaises(Test.DoesNotExist):
Test.objects.get(name='test')
Test.objects.create(name='test')
with self.assertNumQueries(1):
Test.objects.get(name='test')
Test.objects.create(name='test')
with self.assertNumQueries(1):
with self.assertRaises(MultipleObjectsReturned):
Test.objects.get(name='test')
def test_invalidate_values(self):
with self.assertNumQueries(1):
data1 = list(Test.objects.values('name', 'public'))
self.assertListEqual(data1, [])
Test.objects.bulk_create([Test(name='test1'),
Test(name='test2', public=True)])
with self.assertNumQueries(1):
data2 = list(Test.objects.values('name', 'public'))
self.assertEqual(len(data2), 2)
self.assertDictEqual(data2[0], {'name': 'test1', 'public': False})
self.assertDictEqual(data2[1], {'name': 'test2', 'public': True})
Test.objects.all()[0].delete()
with self.assertNumQueries(1):
data3 = list(Test.objects.values('name', 'public'))
self.assertEqual(len(data3), 1)
self.assertDictEqual(data3[0], {'name': 'test2', 'public': True})
def test_invalidate_foreign_key(self):
with self.assertNumQueries(1):
data1 = [t.owner.username for t in Test.objects.all() if t.owner]
self.assertListEqual(data1, [])
u1 = User.objects.create_user('user1')
Test.objects.bulk_create([Test(name='test1', owner=u1),
Test(name='test2')])
with self.assertNumQueries(2):
data2 = [t.owner.username for t in Test.objects.all() if t.owner]
self.assertListEqual(data2, ['user1'])
Test.objects.create(name='test3')
with self.assertNumQueries(1):
data3 = [t.owner.username for t in Test.objects.all() if t.owner]
self.assertListEqual(data3, ['user1'])
t2 = Test.objects.get(name='test2')
t2.owner = u1
t2.save()
with self.assertNumQueries(1):
data4 = [t.owner.username for t in Test.objects.all() if t.owner]
self.assertListEqual(data4, ['user1', 'user1'])
u2 = User.objects.create_user('user2')
Test.objects.filter(name='test3').update(owner=u2)
with self.assertNumQueries(3):
data5 = [t.owner.username for t in Test.objects.all() if t.owner]
self.assertListEqual(data5, ['user1', 'user1', 'user2'])
User.objects.filter(username='user2').update(username='user3')
with self.assertNumQueries(2):
data6 = [t.owner.username for t in Test.objects.all() if t.owner]
self.assertListEqual(data6, ['user1', 'user1', 'user3'])
u2 = User.objects.create_user('user2')
Test.objects.filter(name='test2').update(owner=u2)
with self.assertNumQueries(4):
data7 = [t.owner.username for t in Test.objects.all() if t.owner]
self.assertListEqual(data7, ['user1', 'user2', 'user3'])
with self.assertNumQueries(0):
data8 = [t.owner.username for t in Test.objects.all() if t.owner]
self.assertListEqual(data8, ['user1', 'user2', 'user3'])
def test_invalidate_many_to_many(self):
u = User.objects.create_user('test_user')
ct = ContentType.objects.get_for_model(User)
discuss = Permission.objects.create(
name='Can discuss', content_type=ct, codename='discuss')
touch = Permission.objects.create(
name='Can touch', content_type=ct, codename='touch')
cuddle = Permission.objects.create(
name='Can cuddle', content_type=ct, codename='cuddle')
u.user_permissions.add(discuss, touch, cuddle)
with self.assertNumQueries(1):
data1 = [p.codename for p in u.user_permissions.all()]
self.assertListEqual(data1, ['cuddle', 'discuss', 'touch'])
touch.name = 'Can lick'
touch.codename = 'lick'
touch.save()
with self.assertNumQueries(1):
data2 = [p.codename for p in u.user_permissions.all()]
self.assertListEqual(data2, ['cuddle', 'discuss', 'lick'])
Permission.objects.filter(pk=discuss.pk).update(
name='Can finger', codename='finger')
with self.assertNumQueries(1):
data3 = [p.codename for p in u.user_permissions.all()]
self.assertListEqual(data3, ['cuddle', 'finger', 'lick'])
def test_invalidate_aggregate(self):
with self.assertNumQueries(1):
self.assertEqual(User.objects.aggregate(n=Count('test'))['n'], 0)
with self.assertNumQueries(2 if self.is_sqlite else 1):
u = User.objects.create_user('test')
with self.assertNumQueries(1):
self.assertEqual(User.objects.aggregate(n=Count('test'))['n'], 0)
with self.assertNumQueries(2 if self.is_sqlite else 1):
Test.objects.create(name='test1')
with self.assertNumQueries(1):
self.assertEqual(User.objects.aggregate(n=Count('test'))['n'], 0)
with self.assertNumQueries(2 if self.is_sqlite else 1):
Test.objects.create(name='test2', owner=u)
with self.assertNumQueries(1):
self.assertEqual(User.objects.aggregate(n=Count('test'))['n'], 1)
with self.assertNumQueries(2 if self.is_sqlite else 1):
Test.objects.create(name='test3')
with self.assertNumQueries(1):
self.assertEqual(User.objects.aggregate(n=Count('test'))['n'], 1)
def test_invalidate_annotate(self):
with self.assertNumQueries(1):
data1 = list(User.objects.annotate(n=Count('test')).order_by('pk'))
self.assertListEqual(data1, [])
with self.assertNumQueries(2 if self.is_sqlite else 1):
Test.objects.create(name='test1')
with self.assertNumQueries(1):
data2 = list(User.objects.annotate(n=Count('test')).order_by('pk'))
self.assertListEqual(data2, [])
with self.assertNumQueries(4 if self.is_sqlite else 2):
user1 = User.objects.create_user('user1')
user2 = User.objects.create_user('user2')
with self.assertNumQueries(1):
data3 = list(User.objects.annotate(n=Count('test')).order_by('pk'))
self.assertListEqual(data3, [user1, user2])
self.assertListEqual([u.n for u in data3], [0, 0])
with self.assertNumQueries(2 if self.is_sqlite else 1):
Test.objects.create(name='test2', owner=user1)
with self.assertNumQueries(1):
data4 = list(User.objects.annotate(n=Count('test')).order_by('pk'))
self.assertListEqual(data4, [user1, user2])
self.assertListEqual([u.n for u in data4], [1, 0])
with self.assertNumQueries(2 if self.is_sqlite else 1):
Test.objects.bulk_create([
Test(name='test3', owner=user1),
Test(name='test4', owner=user2),
Test(name='test5', owner=user1),
Test(name='test6', owner=user2),
])
with self.assertNumQueries(1):
data5 = list(User.objects.annotate(n=Count('test')).order_by('pk'))
self.assertListEqual(data5, [user1, user2])
self.assertListEqual([u.n for u in data5], [3, 2])
def test_invalidate_subquery(self):
with self.assertNumQueries(1):
data1 = list(Test.objects.filter(owner__in=User.objects.all()))
self.assertListEqual(data1, [])
u = User.objects.create_user('test')
with self.assertNumQueries(1):
data2 = list(Test.objects.filter(owner__in=User.objects.all()))
self.assertListEqual(data2, [])
t = Test.objects.create(name='test', owner=u)
with self.assertNumQueries(1):
data3 = list(Test.objects.filter(owner__in=User.objects.all()))
self.assertListEqual(data3, [t])
with self.assertNumQueries(1):
data4 = list(
Test.objects.filter(
owner__groups__permissions__in=Permission.objects.all()
).distinct())
self.assertListEqual(data4, [])
g = Group.objects.create(name='test_group')
with self.assertNumQueries(1):
data5 = list(
Test.objects.filter(
owner__groups__permissions__in=Permission.objects.all()
).distinct())
self.assertListEqual(data5, [])
p = Permission.objects.first()
g.permissions.add(p)
with self.assertNumQueries(1):
data6 = list(
Test.objects.filter(
owner__groups__permissions__in=Permission.objects.all()
).distinct())
self.assertListEqual(data6, [])
u.groups.add(g)
with self.assertNumQueries(1):
data7 = list(
Test.objects.filter(
owner__groups__permissions__in=Permission.objects.all()
).distinct())
self.assertListEqual(data7, [t])
with self.assertNumQueries(1):
data8 = list(
User.objects.filter(user_permissions__in=g.permissions.all())
)
self.assertListEqual(data8, [])
u.user_permissions.add(p)
with self.assertNumQueries(1):
data9 = list(
User.objects.filter(user_permissions__in=g.permissions.all())
)
self.assertListEqual(data9, [u])
g.permissions.remove(p)
with self.assertNumQueries(1):
data10 = list(
User.objects.filter(user_permissions__in=g.permissions.all())
)
self.assertListEqual(data10, [])
with self.assertNumQueries(1):
data11 = list(User.objects.exclude(user_permissions=None))
self.assertListEqual(data11, [u])
u.user_permissions.clear()
with self.assertNumQueries(1):
data12 = list(User.objects.exclude(user_permissions=None))
self.assertListEqual(data12, [])
def test_invalidate_nested_subqueries(self):
with self.assertNumQueries(1):
data1 = list(
User.objects.filter(
pk__in=User.objects.filter(
user_permissions__in=Permission.objects.all()
)
)
)
self.assertListEqual(data1, [])
u = User.objects.create_user('test')
with self.assertNumQueries(1):
data2 = list(
User.objects.filter(
pk__in=User.objects.filter(
user_permissions__in=Permission.objects.all()
)
)
)
self.assertListEqual(data2, [])
p = Permission.objects.first()
u.user_permissions.add(p)
with self.assertNumQueries(1):
data3 = list(
User.objects.filter(
pk__in=User.objects.filter(
user_permissions__in=Permission.objects.all()
)
)
)
self.assertListEqual(data3, [u])
with self.assertNumQueries(1):
data4 = list(
User.objects.filter(
pk__in=User.objects.filter(
pk__in=User.objects.filter(
user_permissions__in=Permission.objects.all()
)
)
)
)
self.assertListEqual(data4, [u])
u.user_permissions.remove(p)
with self.assertNumQueries(1):
data5 = list(
User.objects.filter(
pk__in=User.objects.filter(
pk__in=User.objects.filter(
user_permissions__in=Permission.objects.all()
)
)
)
)
self.assertListEqual(data5, [])
def test_invalidate_select_related(self):
with self.assertNumQueries(1):
data1 = list(Test.objects.select_related('owner'))
self.assertListEqual(data1, [])
with self.assertNumQueries(4 if self.is_sqlite else 2):
u1 = User.objects.create_user('test1')
u2 = User.objects.create_user('test2')
with self.assertNumQueries(1):
data2 = list(Test.objects.select_related('owner'))
self.assertListEqual(data2, [])
with self.assertNumQueries(2 if self.is_sqlite else 1):
Test.objects.bulk_create([
Test(name='test1', owner=u1),
Test(name='test2', owner=u2),
Test(name='test3', owner=u2),
Test(name='test4', owner=u1),
])
with self.assertNumQueries(1):
data3 = list(Test.objects.select_related('owner'))
self.assertEqual(data3[0].owner, u1)
self.assertEqual(data3[1].owner, u2)
self.assertEqual(data3[2].owner, u2)
self.assertEqual(data3[3].owner, u1)
with self.assertNumQueries(2 if self.is_sqlite else 1):
Test.objects.filter(name__in=['test1', 'test2']).delete()
with self.assertNumQueries(1):
data4 = list(Test.objects.select_related('owner'))
self.assertEqual(data4[0].owner, u2)
self.assertEqual(data4[1].owner, u1)
def test_invalidate_prefetch_related(self):
with self.assertNumQueries(1):
data1 = list(Test.objects.select_related('owner')
.prefetch_related('owner__groups__permissions'))
self.assertListEqual(data1, [])
with self.assertNumQueries(2 if self.is_sqlite else 1):
t1 = Test.objects.create(name='test1')
with self.assertNumQueries(1):
data2 = list(Test.objects.select_related('owner')
.prefetch_related('owner__groups__permissions'))
self.assertListEqual(data2, [t1])
self.assertEqual(data2[0].owner, None)
with self.assertNumQueries(4 if self.is_sqlite else 2):
u = User.objects.create_user('user')
t1.owner = u
t1.save()
with self.assertNumQueries(2):
data3 = list(Test.objects.select_related('owner')
.prefetch_related('owner__groups__permissions'))
self.assertListEqual(data3, [t1])
self.assertEqual(data3[0].owner, u)
self.assertListEqual(list(data3[0].owner.groups.all()), [])
with self.assertNumQueries(9 if self.is_sqlite else 6):
group = Group.objects.create(name='test_group')
permissions = list(Permission.objects.all()[:5])
group.permissions.add(*permissions)
u.groups.add(group)
with self.assertNumQueries(2):
data4 = list(Test.objects.select_related('owner')
.prefetch_related('owner__groups__permissions'))
self.assertListEqual(data4, [t1])
owner = data4[0].owner
self.assertEqual(owner, u)
groups = list(owner.groups.all())
self.assertListEqual(groups, [group])
self.assertListEqual(list(groups[0].permissions.all()),
permissions)
with self.assertNumQueries(2 if self.is_sqlite else 1):
t2 = Test.objects.create(name='test2')
with self.assertNumQueries(1):
data5 = list(Test.objects.select_related('owner')
.prefetch_related('owner__groups__permissions'))
self.assertListEqual(data5, [t1, t2])
owners = [t.owner for t in data5 if t.owner is not None]
self.assertListEqual(owners, [u])
groups = [g for o in owners for g in o.groups.all()]
self.assertListEqual(groups, [group])
data5_permissions = [p for g in groups
for p in g.permissions.all()]
self.assertListEqual(data5_permissions, permissions)
with self.assertNumQueries(2 if self.is_sqlite else 1):
permissions[0].save()
with self.assertNumQueries(1):
list(Test.objects.select_related('owner')
.prefetch_related('owner__groups__permissions'))
with self.assertNumQueries(2 if self.is_sqlite else 1):
group.name = 'modified_test_group'
group.save()
with self.assertNumQueries(2):
data6 = list(Test.objects.select_related('owner')
.prefetch_related('owner__groups__permissions'))
g = list(data6[0].owner.groups.all())[0]
self.assertEqual(g.name, 'modified_test_group')
with self.assertNumQueries(2 if self.is_sqlite else 1):
User.objects.update(username='modified_user')
with self.assertNumQueries(2):
data7 = list(Test.objects.select_related('owner')
.prefetch_related('owner__groups__permissions'))
self.assertEqual(data7[0].owner.username, 'modified_user')
@skipUnlessDBFeature('has_select_for_update')
def test_invalidate_select_for_update(self):
with self.assertNumQueries(1):
Test.objects.bulk_create([Test(name='test1'), Test(name='test2')])
with self.assertNumQueries(1):
with transaction.atomic():
data1 = list(Test.objects.select_for_update())
self.assertListEqual([t.name for t in data1],
['test1', 'test2'])
with self.assertNumQueries(1):
with transaction.atomic():
qs = Test.objects.select_for_update()
qs.update(name='test3')
with self.assertNumQueries(1):
with transaction.atomic():
data2 = list(Test.objects.select_for_update())
self.assertListEqual([t.name for t in data2], ['test3'] * 2)
def test_invalidate_extra_select(self):
user = User.objects.create_user('user')
t1 = Test.objects.create(name='test1', owner=user, public=True)
username_length_sql = """
SELECT LENGTH(%(user_table)s.username)
FROM %(user_table)s
WHERE %(user_table)s.id = %(test_table)s.owner_id
""" % {'user_table': User._meta.db_table,
'test_table': Test._meta.db_table}
with self.assertNumQueries(1):
data1 = list(Test.objects.extra(
select={'username_length': username_length_sql}))
self.assertListEqual(data1, [t1])
self.assertListEqual([o.username_length for o in data1], [4])
Test.objects.update(public=False)
with self.assertNumQueries(1):
data2 = list(Test.objects.extra(
select={'username_length': username_length_sql}))
self.assertListEqual(data2, [t1])
self.assertListEqual([o.username_length for o in data2], [4])
admin = User.objects.create_superuser('admin',
'admin@test.me', 'password')
with self.assertNumQueries(1):
data3 = list(Test.objects.extra(
select={'username_length': username_length_sql}))
self.assertListEqual(data3, [t1])
self.assertListEqual([o.username_length for o in data3], [4])
t2 = Test.objects.create(name='test2', owner=admin)
with self.assertNumQueries(1):
data4 = list(Test.objects.extra(
select={'username_length': username_length_sql}))
self.assertListEqual(data4, [t1, t2])
self.assertListEqual([o.username_length for o in data4], [4, 5])
def test_invalidate_having(self):
with self.assertNumQueries(1):
data1 = list(User.objects.annotate(n=Count('user_permissions'))
.filter(n__gte=1))
self.assertListEqual(data1, [])
u = User.objects.create_user('user')
with self.assertNumQueries(1):
data2 = list(User.objects.annotate(n=Count('user_permissions'))
.filter(n__gte=1))
self.assertListEqual(data2, [])
p = Permission.objects.first()
p.save()
with self.assertNumQueries(1):
data3 = list(User.objects.annotate(n=Count('user_permissions'))
.filter(n__gte=1))
self.assertListEqual(data3, [])
u.user_permissions.add(p)
with self.assertNumQueries(1):
data3 = list(User.objects.annotate(n=Count('user_permissions'))
.filter(n__gte=1))
self.assertListEqual(data3, [u])
with self.assertNumQueries(1):
self.assertEqual(User.objects.annotate(n=Count('user_permissions'))
.filter(n__gte=1).count(), 1)
u.user_permissions.clear()
with self.assertNumQueries(1):
self.assertEqual(User.objects.annotate(n=Count('user_permissions'))
.filter(n__gte=1).count(), 0)
def test_invalidate_extra_where(self):
sql_condition = ("owner_id IN "
"(SELECT id FROM auth_user WHERE username = 'admin')")
with self.assertNumQueries(1):
data1 = list(Test.objects.extra(where=[sql_condition]))
self.assertListEqual(data1, [])
admin = User.objects.create_superuser('admin',
'admin@test.me', 'password')
with self.assertNumQueries(1):
data2 = list(Test.objects.extra(where=[sql_condition]))
self.assertListEqual(data2, [])
t = Test.objects.create(name='test', owner=admin)
with self.assertNumQueries(1):
data3 = list(Test.objects.extra(where=[sql_condition]))
self.assertListEqual(data3, [t])
admin.username = 'modified'
admin.save()
with self.assertNumQueries(1):
data4 = list(Test.objects.extra(where=[sql_condition]))
self.assertListEqual(data4, [])
def test_invalidate_extra_tables(self):
with self.assertNumQueries(2 if self.is_sqlite else 1):
User.objects.create_user('user1')
with self.assertNumQueries(1):
data1 = list(Test.objects.all().extra(tables=['auth_user']))
self.assertListEqual(data1, [])
with self.assertNumQueries(2 if self.is_sqlite else 1):
t1 = Test.objects.create(name='test1')
with self.assertNumQueries(1):
data2 = list(Test.objects.all().extra(tables=['auth_user']))
self.assertListEqual(data2, [t1])
with self.assertNumQueries(2 if self.is_sqlite else 1):
t2 = Test.objects.create(name='test2')
with self.assertNumQueries(1):
data3 = list(Test.objects.all().extra(tables=['auth_user']))
self.assertListEqual(data3, [t1, t2])
with self.assertNumQueries(2 if self.is_sqlite else 1):
User.objects.create_user('user2')
with self.assertNumQueries(1):
data4 = list(Test.objects.all().extra(tables=['auth_user']))
self.assertListEqual(data4, [t1, t1, t2, t2])
def test_invalidate_extra_order_by(self):
with self.assertNumQueries(1):
data1 = list(Test.objects.extra(order_by=['-cachalot_test.name']))
self.assertListEqual(data1, [])
t1 = Test.objects.create(name='test1')
with self.assertNumQueries(1):
data2 = list(Test.objects.extra(order_by=['-cachalot_test.name']))
self.assertListEqual(data2, [t1])
t2 = Test.objects.create(name='test2')
with self.assertNumQueries(1):
data2 = list(Test.objects.extra(order_by=['-cachalot_test.name']))
self.assertListEqual(data2, [t2, t1])
def test_invalidate_table_inheritance(self):
with self.assertNumQueries(1):
with self.assertRaises(TestChild.DoesNotExist):
TestChild.objects.get()
with self.assertNumQueries(3 if self.is_sqlite else 2):
t_child = TestChild.objects.create(name='test_child')
with self.assertNumQueries(1):
self.assertEqual(TestChild.objects.get(), t_child)
with self.assertNumQueries(2 if self.is_sqlite else 1):
TestParent.objects.filter(pk=t_child.pk).update(name='modified')
with self.assertNumQueries(1):
modified_t_child = TestChild.objects.get()
self.assertEqual(modified_t_child.pk, t_child.pk)
self.assertEqual(modified_t_child.name, 'modified')
with self.assertNumQueries(3 if self.is_sqlite else 2):
TestChild.objects.filter(pk=t_child.pk).update(name='modified2')
with self.assertNumQueries(1):
modified2_t_child = TestChild.objects.get()
self.assertEqual(modified2_t_child.pk, t_child.pk)
self.assertEqual(modified2_t_child.name, 'modified2')
def test_raw_insert(self):
with self.assertNumQueries(1):
self.assertListEqual(
list(Test.objects.values_list('name', flat=True)),
[])
with self.assertNumQueries(1):
with connection.cursor() as cursor:
cursor.execute(
"INSERT INTO cachalot_test (name, public) "
"VALUES ('test1', %s)", [1 if self.is_sqlite else 'true'])
with self.assertNumQueries(1):
self.assertListEqual(
list(Test.objects.values_list('name', flat=True)),
['test1'])
with self.assertNumQueries(1):
with connection.cursor() as cursor:
cursor.execute(
"INSERT INTO cachalot_test (name, public) "
"VALUES ('test2', %s)", [1 if self.is_sqlite else 'true'])
with self.assertNumQueries(1):
self.assertListEqual(
list(Test.objects.values_list('name', flat=True)),
['test1', 'test2'])
with self.assertNumQueries(1):
with connection.cursor() as cursor:
cursor.executemany(
"INSERT INTO cachalot_test (name, public) "
"VALUES ('test3', %s)", [[1 if self.is_sqlite else 'true']])
with self.assertNumQueries(1):
self.assertListEqual(
list(Test.objects.values_list('name', flat=True)),
['test1', 'test2', 'test3'])
def test_raw_update(self):
with self.assertNumQueries(2 if self.is_sqlite else 1):
Test.objects.create(name='test')
with self.assertNumQueries(1):
self.assertListEqual(
list(Test.objects.values_list('name', flat=True)),
['test'])
with self.assertNumQueries(1):
with connection.cursor() as cursor:
cursor.execute("UPDATE cachalot_test SET name = 'new name';")
with self.assertNumQueries(1):
self.assertListEqual(
list(Test.objects.values_list('name', flat=True)),
['new name'])
def test_raw_delete(self):
with self.assertNumQueries(2 if self.is_sqlite else 1):
Test.objects.create(name='test')
with self.assertNumQueries(1):
self.assertListEqual(
list(Test.objects.values_list('name', flat=True)),
['test'])
with self.assertNumQueries(1):
with connection.cursor() as cursor:
cursor.execute("DELETE FROM cachalot_test;")
with self.assertNumQueries(1):
self.assertListEqual(
list(Test.objects.values_list('name', flat=True)),
[])
class DatabaseCommandTestCase(TransactionTestCase):
def setUp(self):
self.t = Test.objects.create(name='test1')
def test_flush(self):
with self.assertNumQueries(1):
self.assertListEqual(list(Test.objects.all()), [self.t])
call_command('flush', verbosity=0, interactive=False)
if connection.vendor == 'mysql':
# We need to reopen the connection or Django
# will execute an extra SQL request below.
connection.cursor()
with self.assertNumQueries(1):
self.assertListEqual(list(Test.objects.all()), [])
def test_loaddata(self):
with self.assertNumQueries(1):
self.assertListEqual(list(Test.objects.all()), [self.t])
call_command('loaddata', 'cachalot/tests/loaddata_fixture.json',
verbosity=0, interactive=False)
if connection.vendor == 'mysql':
# We need to reopen the connection or Django
# will execute an extra SQL request below.
connection.cursor()
with self.assertNumQueries(1):
self.assertListEqual([t.name for t in Test.objects.all()],
['test1', 'test2'])
| [
"bordage.bertrand@gmail.com"
] | bordage.bertrand@gmail.com |
58f5a3c0db2393623589440672e2123e1cb50dd6 | 7136e5242793b620fa12e9bd15bf4d8aeb0bfe7a | /examples/adspygoogle/dfp/v201103/get_all_companies.py | f8f7073f8e259464c0e996abd799d0c872d58852 | [
"Apache-2.0"
] | permissive | hockeyprincess/google-api-dfp-python | 534519695ffd26341204eedda7a8b50648f12ea9 | efa82a8d85cbdc90f030db9d168790c55bd8b12a | refs/heads/master | 2021-01-10T10:01:09.445419 | 2011-04-14T18:25:38 | 2011-04-14T18:25:38 | 52,676,942 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,729 | py | #!/usr/bin/python
#
# Copyright 2011 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""This code example gets all companies. To create companies, run
create_companies.py."""
__author__ = 'api.sgrinberg@gmail.com (Stan Grinberg)'
# Locate the client library. If module was installed via "setup.py" script, then
# the following two lines are not needed.
import os
import sys
sys.path.append(os.path.join('..', '..', '..', '..'))
# Import appropriate classes from the client library.
from adspygoogle.dfp import DfpUtils
from adspygoogle.dfp.DfpClient import DfpClient
# Initialize client object.
client = DfpClient(path=os.path.join('..', '..', '..', '..'))
# Initialize appropriate service. By default, the request is always made against
# sandbox environment.
company_service = client.GetCompanyService(
'https://sandbox.google.com', 'v201103')
# Get companies by statement.
companies = DfpUtils.GetAllEntitiesByStatement(client, 'Company')
# Display results.
for company in companies:
print ('Company with id \'%s\', name \'%s\', and type \'%s\' was found.'
% (company['id'], company['name'], company['type']))
print
print 'Number of results found: %s' % len(companies)
| [
"api.sgrinberg@7990c6e4-1bfd-11df-85e6-9b4bd7dd5138"
] | api.sgrinberg@7990c6e4-1bfd-11df-85e6-9b4bd7dd5138 |
070406b840ca91f2dc2ea9342d6a45aed96919b4 | abf4bfa1db4b9bacac3ffb7ab6aeee2e85b8667d | /minerva/controllers/system.py | 5562535ac1c0260f71bec732ad9ab09bb101cf6a | [
"MIT"
] | permissive | gitter-badger/minerva-1 | 0fac217c37992329fe83b1e4b366696ccc97a6aa | 9825c5494e83051afcfdec20771b64475fa35c84 | refs/heads/master | 2022-12-13T05:53:01.959423 | 2020-09-09T02:33:18 | 2020-09-09T02:33:18 | 293,982,383 | 1 | 0 | null | 2020-09-09T02:35:10 | 2020-09-09T02:35:10 | null | UTF-8 | Python | false | false | 1,252 | py | # pylint: disable=unidiomatic-typecheck
import json
from d3rlpy.gpu import get_gpu_count
from flask import Blueprint, jsonify
from ..models.experiment import Experiment, ExperimentSchema
from .project import _process_metrics
system_route = Blueprint('system', __name__)
@system_route.route('/status', methods=['GET'])
def get_system_status():
n_gpus = get_gpu_count()
# get all active experiments
experiments = Experiment.create_query().filter(Experiment.is_active).all()
gpu_jobs = {}
cpu_jobs = []
for experiment in experiments:
# identify device
config = json.loads(experiment.config)
device_id = config['use_gpu'] if 'use_gpu' in config else None
# make response data
data = ExperimentSchema().dump(experiment)
# update status
_process_metrics(experiment, data)
if type(device_id) == int:
if device_id not in gpu_jobs:
gpu_jobs[device_id] = []
gpu_jobs[device_id].append(data)
else:
cpu_jobs.append(data)
res = {
'gpu': {
'total': n_gpus,
'jobs': gpu_jobs
},
'cpu': {
'jobs': cpu_jobs
}
}
return jsonify(res)
| [
"takuma.seno@gmail.com"
] | takuma.seno@gmail.com |
ce66b322309d1345c6a6b04b0e3be71b501b9bbd | 868ac4e558cf5fe945e8b557564f34f79b3ad01e | /purity_fb/purity_fb_1dot8dot1/apis/arrays_api.py | fa76199d59e1896ea44edf7fa710b2e552b63c86 | [
"Apache-2.0"
] | permissive | mabdelhafez/purity_fb_python_client | f4253ce8497fb3cff648e0a0cd1e567f48129fa7 | a9856875b3df43b4302a2e4addd1a6b71f51f5ce | refs/heads/master | 2022-04-20T09:24:22.031408 | 2020-04-20T22:11:32 | 2020-04-20T22:15:44 | 257,372,596 | 0 | 0 | NOASSERTION | 2020-04-20T18:40:24 | 2020-04-20T18:40:23 | null | UTF-8 | Python | false | false | 33,413 | py | # coding: utf-8
"""
Pure Storage FlashBlade REST 1.8.1 Python SDK
Pure Storage FlashBlade REST 1.8.1 Python SDK, developed by [Pure Storage, Inc](http://www.purestorage.com/). Documentations can be found at [purity-fb.readthedocs.io](http://purity-fb.readthedocs.io/).
OpenAPI spec version: 1.8.1
Contact: info@purestorage.com
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import sys
import os
import re
# python 2 and python 3 compatibility library
from six import iteritems
from ..configuration import Configuration
from ..api_client import ApiClient
class ArraysApi(object):
"""
NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
Ref: https://github.com/swagger-api/swagger-codegen
"""
def __init__(self, api_client=None):
config = Configuration()
if api_client:
self.api_client = api_client
else:
if not config.api_client:
config.api_client = ApiClient()
self.api_client = config.api_client
def list_arrays(self, **kwargs):
"""
List arrays
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.list_arrays(callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:return: ArrayResponse
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('callback'):
return self.list_arrays_with_http_info(**kwargs)
else:
(data) = self.list_arrays_with_http_info(**kwargs)
return data
def list_arrays_with_http_info(self, **kwargs):
"""
List arrays
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.list_arrays_with_http_info(callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:return: ArrayResponse
If the method is called asynchronously,
returns the request thread.
"""
all_params = []
all_params.append('callback')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method list_arrays" % key
)
params[key] = val
del params['kwargs']
collection_formats = {}
path_params = {}
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json'])
# Authentication setting
auth_settings = ['AuthTokenHeader']
return self.api_client.call_api('/1.8.1/arrays', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='ArrayResponse',
auth_settings=auth_settings,
callback=params.get('callback'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def list_arrays_http_specific_performance(self, **kwargs):
"""
List instant or historical http specific performance
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.list_arrays_http_specific_performance(callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param int start_time: Time to start sample in milliseconds since epoch.
:param int end_time: Time to end sample in milliseconds since epoch.
:param int resolution: sample frequency in milliseconds
:return: ArrayHttpPerformanceResponse
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('callback'):
return self.list_arrays_http_specific_performance_with_http_info(**kwargs)
else:
(data) = self.list_arrays_http_specific_performance_with_http_info(**kwargs)
return data
def list_arrays_http_specific_performance_with_http_info(self, **kwargs):
"""
List instant or historical http specific performance
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.list_arrays_http_specific_performance_with_http_info(callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param int start_time: Time to start sample in milliseconds since epoch.
:param int end_time: Time to end sample in milliseconds since epoch.
:param int resolution: sample frequency in milliseconds
:return: ArrayHttpPerformanceResponse
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['start_time', 'end_time', 'resolution']
all_params.append('callback')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method list_arrays_http_specific_performance" % key
)
params[key] = val
del params['kwargs']
collection_formats = {}
path_params = {}
query_params = []
if 'start_time' in params:
query_params.append(('start_time', params['start_time']))
if 'end_time' in params:
query_params.append(('end_time', params['end_time']))
if 'resolution' in params:
query_params.append(('resolution', params['resolution']))
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json'])
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['application/json'])
# Authentication setting
auth_settings = ['AuthTokenHeader']
return self.api_client.call_api('/1.8.1/arrays/http-specific-performance', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='ArrayHttpPerformanceResponse',
auth_settings=auth_settings,
callback=params.get('callback'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def list_arrays_performance(self, **kwargs):
"""
List instant or historical array performance
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.list_arrays_performance(callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param int start_time: Time to start sample in milliseconds since epoch.
:param int end_time: Time to end sample in milliseconds since epoch.
:param int resolution: sample frequency in milliseconds
:param str protocol: to sample performance of a certain protocol
:return: ArrayPerformanceResponse
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('callback'):
return self.list_arrays_performance_with_http_info(**kwargs)
else:
(data) = self.list_arrays_performance_with_http_info(**kwargs)
return data
def list_arrays_performance_with_http_info(self, **kwargs):
"""
List instant or historical array performance
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.list_arrays_performance_with_http_info(callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param int start_time: Time to start sample in milliseconds since epoch.
:param int end_time: Time to end sample in milliseconds since epoch.
:param int resolution: sample frequency in milliseconds
:param str protocol: to sample performance of a certain protocol
:return: ArrayPerformanceResponse
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['start_time', 'end_time', 'resolution', 'protocol']
all_params.append('callback')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method list_arrays_performance" % key
)
params[key] = val
del params['kwargs']
collection_formats = {}
path_params = {}
query_params = []
if 'start_time' in params:
query_params.append(('start_time', params['start_time']))
if 'end_time' in params:
query_params.append(('end_time', params['end_time']))
if 'resolution' in params:
query_params.append(('resolution', params['resolution']))
if 'protocol' in params:
query_params.append(('protocol', params['protocol']))
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json'])
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['application/json'])
# Authentication setting
auth_settings = ['AuthTokenHeader']
return self.api_client.call_api('/1.8.1/arrays/performance', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='ArrayPerformanceResponse',
auth_settings=auth_settings,
callback=params.get('callback'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def list_arrays_s3_specific_performance(self, **kwargs):
"""
List instant or historical object store specific performance
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.list_arrays_s3_specific_performance(callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param int start_time: Time to start sample in milliseconds since epoch.
:param int end_time: Time to end sample in milliseconds since epoch.
:param int resolution: sample frequency in milliseconds
:return: ArrayS3PerformanceResponse
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('callback'):
return self.list_arrays_s3_specific_performance_with_http_info(**kwargs)
else:
(data) = self.list_arrays_s3_specific_performance_with_http_info(**kwargs)
return data
def list_arrays_s3_specific_performance_with_http_info(self, **kwargs):
"""
List instant or historical object store specific performance
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.list_arrays_s3_specific_performance_with_http_info(callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param int start_time: Time to start sample in milliseconds since epoch.
:param int end_time: Time to end sample in milliseconds since epoch.
:param int resolution: sample frequency in milliseconds
:return: ArrayS3PerformanceResponse
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['start_time', 'end_time', 'resolution']
all_params.append('callback')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method list_arrays_s3_specific_performance" % key
)
params[key] = val
del params['kwargs']
collection_formats = {}
path_params = {}
query_params = []
if 'start_time' in params:
query_params.append(('start_time', params['start_time']))
if 'end_time' in params:
query_params.append(('end_time', params['end_time']))
if 'resolution' in params:
query_params.append(('resolution', params['resolution']))
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json'])
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['application/json'])
# Authentication setting
auth_settings = ['AuthTokenHeader']
return self.api_client.call_api('/1.8.1/arrays/s3-specific-performance', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='ArrayS3PerformanceResponse',
auth_settings=auth_settings,
callback=params.get('callback'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def list_arrays_space(self, **kwargs):
"""
List instant or historical array space
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.list_arrays_space(callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param int start_time: Time to start sample in milliseconds since epoch.
:param int end_time: Time to end sample in milliseconds since epoch.
:param int resolution: sample frequency in milliseconds
:param str type: to sample space of either file systems, object store, or all
:return: ArraySpaceResponse
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('callback'):
return self.list_arrays_space_with_http_info(**kwargs)
else:
(data) = self.list_arrays_space_with_http_info(**kwargs)
return data
def list_arrays_space_with_http_info(self, **kwargs):
"""
List instant or historical array space
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.list_arrays_space_with_http_info(callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param int start_time: Time to start sample in milliseconds since epoch.
:param int end_time: Time to end sample in milliseconds since epoch.
:param int resolution: sample frequency in milliseconds
:param str type: to sample space of either file systems, object store, or all
:return: ArraySpaceResponse
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['start_time', 'end_time', 'resolution', 'type']
all_params.append('callback')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method list_arrays_space" % key
)
params[key] = val
del params['kwargs']
collection_formats = {}
path_params = {}
query_params = []
if 'start_time' in params:
query_params.append(('start_time', params['start_time']))
if 'end_time' in params:
query_params.append(('end_time', params['end_time']))
if 'resolution' in params:
query_params.append(('resolution', params['resolution']))
if 'type' in params:
query_params.append(('type', params['type']))
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json'])
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['application/json'])
# Authentication setting
auth_settings = ['AuthTokenHeader']
return self.api_client.call_api('/1.8.1/arrays/space', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='ArraySpaceResponse',
auth_settings=auth_settings,
callback=params.get('callback'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def list_clients_performance(self, **kwargs):
"""
List client performance
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.list_clients_performance(callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param list[str] names: A comma-separated list of resource names. This cannot be provided together with the ids query parameters.
:param str filter: The filter to be used for query.
:param str sort: The way to order the results.
:param int limit: limit, should be >= 0
:return: ClientPerformanceResponse
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('callback'):
return self.list_clients_performance_with_http_info(**kwargs)
else:
(data) = self.list_clients_performance_with_http_info(**kwargs)
return data
def list_clients_performance_with_http_info(self, **kwargs):
"""
List client performance
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.list_clients_performance_with_http_info(callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param list[str] names: A comma-separated list of resource names. This cannot be provided together with the ids query parameters.
:param str filter: The filter to be used for query.
:param str sort: The way to order the results.
:param int limit: limit, should be >= 0
:return: ClientPerformanceResponse
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['names', 'filter', 'sort', 'limit']
all_params.append('callback')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method list_clients_performance" % key
)
params[key] = val
del params['kwargs']
collection_formats = {}
path_params = {}
query_params = []
if 'names' in params:
query_params.append(('names', params['names']))
collection_formats['names'] = 'csv'
if 'filter' in params:
query_params.append(('filter', params['filter']))
if 'sort' in params:
query_params.append(('sort', params['sort']))
if 'limit' in params:
query_params.append(('limit', params['limit']))
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json'])
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['application/json'])
# Authentication setting
auth_settings = ['AuthTokenHeader']
return self.api_client.call_api('/1.8.1/arrays/clients/performance', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='ClientPerformanceResponse',
auth_settings=auth_settings,
callback=params.get('callback'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def update_arrays(self, array_settings, **kwargs):
"""
Update arrays
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.update_arrays(array_settings, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param PureArray array_settings: (required)
:return: ArrayResponse
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('callback'):
return self.update_arrays_with_http_info(array_settings, **kwargs)
else:
(data) = self.update_arrays_with_http_info(array_settings, **kwargs)
return data
def update_arrays_with_http_info(self, array_settings, **kwargs):
"""
Update arrays
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.update_arrays_with_http_info(array_settings, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param PureArray array_settings: (required)
:return: ArrayResponse
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['array_settings']
all_params.append('callback')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method update_arrays" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'array_settings' is set
if ('array_settings' not in params) or (params['array_settings'] is None):
raise ValueError("Missing the required parameter `array_settings` when calling `update_arrays`")
collection_formats = {}
path_params = {}
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'array_settings' in params:
body_params = params['array_settings']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json'])
# Authentication setting
auth_settings = ['AuthTokenHeader']
return self.api_client.call_api('/1.8.1/arrays', 'PATCH',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='ArrayResponse',
auth_settings=auth_settings,
callback=params.get('callback'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
| [
"tlewis@purestorage.com"
] | tlewis@purestorage.com |
c796a865d390b4c419a61d3e70f78ed63e05345c | 0d043a8bf4f1dbeaadba8690f028016487ce8f6d | /actor/apps.py | d886f2eb326b367e6b994b7a1a5e76c64581c27a | [] | no_license | neldomarcelino/parque | 74db6d750c74dce81f53f09cab19e5a0ecf862ac | 273ec6ec8a67fb78f8068f212f61e73ca2fd754c | refs/heads/main | 2022-06-20T01:36:09.232552 | 2022-06-09T21:04:08 | 2022-06-09T21:04:08 | 226,638,782 | 0 | 0 | null | 2022-04-22T23:25:01 | 2019-12-08T08:47:20 | Python | UTF-8 | Python | false | false | 90 | py | from django.apps import AppConfig
class ActorConfig(AppConfig):
name = 'actor'
| [
"guerzeneldo@gmail.com"
] | guerzeneldo@gmail.com |
6ebe68c85e467dcf90b2f16afae750ba710e29da | ca7aa979e7059467e158830b76673f5b77a0f5a3 | /Python_codes/p02900/s183018985.py | 26a0dfad96b01c45de7664128068ededb63e7367 | [] | no_license | Aasthaengg/IBMdataset | 7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901 | f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8 | refs/heads/main | 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 207 | py | from fractions import gcd
A,B=map(int,input().split())
p=set([1])
g=gcd(A,B)
for d in range(2,g):
while g%d==0:
p.add(d)
g//=d
if (d*d>=g): break
if g>=2:
p.add(g)
print(len(p))
| [
"66529651+Aastha2104@users.noreply.github.com"
] | 66529651+Aastha2104@users.noreply.github.com |
15fa11d56b13546651fcfbffea19ed57d799c6fe | fb6d4b48f836b002605ffac15a48b6721a3f5508 | /day4/loop5.py | 5531ff28820af3cb650f002476dd5fa2b61287ae | [] | no_license | aravindanath/GlobeMasterAutomation | 72c7fa1854309a67fc988bf1724f36c1270d1e46 | e02fe2361ec965f8429c32d4b814f647e15fe425 | refs/heads/master | 2021-05-19T17:30:53.932495 | 2020-04-01T17:05:40 | 2020-04-01T17:05:40 | 252,048,477 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 74 | py | i = 1
while i < 10:
print(i)
if i == 3:
break
i += 1
| [
"aravindanath86@gmail.com"
] | aravindanath86@gmail.com |
6889e85cc3123edb30ca517b5957ce75bd2bac61 | 95495baeb47fd40b9a7ecb372b79d3847aa7a139 | /swagger_client/models/i_console_user.py | f159b1559b5b8777fa48a6f57cad912d863bbf0f | [] | no_license | pt1988/fmc-api | b1d8ff110e12c13aa94d737f3fae9174578b019c | 075f229585fcf9bd9486600200ff9efea5371912 | refs/heads/main | 2023-01-07T09:22:07.685524 | 2020-10-30T03:21:24 | 2020-10-30T03:21:24 | 308,226,669 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,000 | py | # coding: utf-8
"""
Cisco Firepower Management Center Open API Specification
**Specifies the REST URLs and methods supported in the Cisco Firepower Management Center API. Refer to the version specific [REST API Quick Start Guide](https://www.cisco.com/c/en/us/support/security/defense-center/products-programming-reference-guides-list.html) for additional information.** # noqa: E501
OpenAPI spec version: 1.0.0
Contact: tac@cisco.com
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re # noqa: F401
import six
class IConsoleUser(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'name': 'str',
'links': 'ILinks',
'id': 'str',
'type': 'str'
}
attribute_map = {
'name': 'name',
'links': 'links',
'id': 'id',
'type': 'type'
}
def __init__(self, name=None, links=None, id=None, type=None): # noqa: E501
"""IConsoleUser - a model defined in Swagger""" # noqa: E501
self._name = None
self._links = None
self._id = None
self._type = None
self.discriminator = None
if name is not None:
self.name = name
if links is not None:
self.links = links
if id is not None:
self.id = id
if type is not None:
self.type = type
@property
def name(self):
"""Gets the name of this IConsoleUser. # noqa: E501
:return: The name of this IConsoleUser. # noqa: E501
:rtype: str
"""
return self._name
@name.setter
def name(self, name):
"""Sets the name of this IConsoleUser.
:param name: The name of this IConsoleUser. # noqa: E501
:type: str
"""
self._name = name
@property
def links(self):
"""Gets the links of this IConsoleUser. # noqa: E501
:return: The links of this IConsoleUser. # noqa: E501
:rtype: ILinks
"""
return self._links
@links.setter
def links(self, links):
"""Sets the links of this IConsoleUser.
:param links: The links of this IConsoleUser. # noqa: E501
:type: ILinks
"""
self._links = links
@property
def id(self):
"""Gets the id of this IConsoleUser. # noqa: E501
:return: The id of this IConsoleUser. # noqa: E501
:rtype: str
"""
return self._id
@id.setter
def id(self, id):
"""Sets the id of this IConsoleUser.
:param id: The id of this IConsoleUser. # noqa: E501
:type: str
"""
self._id = id
@property
def type(self):
"""Gets the type of this IConsoleUser. # noqa: E501
:return: The type of this IConsoleUser. # noqa: E501
:rtype: str
"""
return self._type
@type.setter
def type(self, type):
"""Sets the type of this IConsoleUser.
:param type: The type of this IConsoleUser. # noqa: E501
:type: str
"""
self._type = type
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
if issubclass(IConsoleUser, dict):
for key, value in self.items():
result[key] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, IConsoleUser):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| [
"pt1988@gmail.com"
] | pt1988@gmail.com |
cf7a9e665afaffe4e6462083af381e40b8547bc4 | bde6ed092b7b29703737e11c5a5ff90934af3d74 | /AtCoder/ABC/001/a.py | 3d32bd925bddb7222fc59c4b5b344e37ab26caf4 | [] | no_license | takecian/ProgrammingStudyLog | 2ab7ea601e0996b3fa502b81ec141bc3772442b6 | 94485d131c0cc9842f1f4799da2d861dbf09b12a | refs/heads/master | 2023-04-28T16:56:18.943574 | 2023-04-18T06:34:58 | 2023-04-18T06:34:58 | 128,525,713 | 4 | 0 | null | 2022-12-09T06:15:19 | 2018-04-07T12:21:29 | Python | UTF-8 | Python | false | false | 105 | py | # https://atcoder.jp/contests/abc001/tasks/abc001_1
H1 = int(input())
H2 = int(input())
print(H1 - H2)
| [
"takecian@gmail.com"
] | takecian@gmail.com |
5a06f3632960fa777547ed5e03bf7d29df9e7fe3 | 6a2bfeaec1998675bb539d4ecd64aba04a4f047e | /manage.py | daa4358e274988b74194f48776764498d254ac50 | [] | no_license | arifbd2221/E-commerce-Django-Tokenized-Restful-Api | 38412b979a3b3f28e537080387e8eaceea363a44 | c54b25da74b1c365c2633eafe98925f97d3abd43 | refs/heads/master | 2020-07-18T10:40:13.558785 | 2019-09-04T04:29:59 | 2019-09-04T04:29:59 | 206,231,082 | 6 | 0 | null | null | null | null | UTF-8 | Python | false | false | 637 | py | #!/usr/bin/env python
"""Django's command-line utility for administrative tasks."""
import os
import sys
def main():
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'django_react_auth.settings')
try:
from django.core.management import execute_from_command_line
except ImportError as exc:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
) from exc
execute_from_command_line(sys.argv)
if __name__ == '__main__':
main()
| [
"mohidulhoque216@gmail.com"
] | mohidulhoque216@gmail.com |
9af971e9e11904e32f099a08e326fd7f04508864 | 06096594942104b976e3c4dc785320517128a1ba | /tests/test_space/test_skopt_space.py | 6f2ee86ff599fdbb5acff81b7c5f3aa54b1d09ea | [
"MIT"
] | permissive | shaoeric/hyperparameter_hunter | 082fc556fb313235c747ac08b2ba5a9a1907bb40 | 3709d5e97dd23efa0df1b79982ae029789e1af57 | refs/heads/master | 2021-04-08T05:13:42.653573 | 2020-03-21T01:22:12 | 2020-03-21T01:22:12 | 248,743,697 | 0 | 0 | MIT | 2020-03-20T11:55:29 | 2020-03-20T11:55:28 | null | UTF-8 | Python | false | false | 21,450 | py | """Tests the tools defined by :mod:`hyperparameter_hunter.space.dimensions` and
:mod:`hyperparameter_hunter.space.space_core`
Notes
-----
Many of the tests defined herein (although substantially modified) are based on those provided by
the excellent [Scikit-Optimize](https://github.com/scikit-optimize/scikit-optimize) library. See
:mod:`hyperparameter_hunter.optimization.backends.skopt` for a copy of SKOpt's license"""
##################################################
# Import Own Assets
##################################################
from hyperparameter_hunter.optimization.backends.skopt.engine import Optimizer
from hyperparameter_hunter.space.dimensions import Real, Integer, Categorical
from hyperparameter_hunter.space.space_core import Space, check_dimension, normalize_dimensions
##################################################
# Import Miscellaneous Assets
##################################################
import numbers
import numpy as np
import pytest
##################################################
# Import Learning Assets
##################################################
from sklearn.utils.testing import assert_array_almost_equal, assert_array_equal, assert_equal
def check_limits(value, low, high):
"""Check if `low` <= `value` <= `high`"""
assert low <= value
assert high >= value
##################################################
# Dimension Smoke Tests
##################################################
@pytest.mark.parametrize("dimension", [Real, Integer])
@pytest.mark.parametrize("bounds", [(1, 4), (1.0, 4.0)])
def test_numerical_dimension_equality(dimension, bounds: tuple):
dim = dimension(*bounds)
# Assert equality with identical `Dimension`
assert dim == dimension(*bounds)
# Assert inequality with `Dimension`s of differing bounds
assert dim != dimension(bounds[0], bounds[1] + 1)
assert dim != dimension(bounds[0] + 1, bounds[1])
@pytest.mark.parametrize("categories", [("a", "b", "c", "d"), (1.0, 2.0, 3.0, 4.0)])
def test_categorical_dimension_equality(categories):
dim = Categorical(categories)
# Assert equality with identical `Categorical`
assert dim == Categorical(categories)
# Assert inequality with `Categorical`, whose final value differs
assert dim != Categorical(categories[:-1] + ("zzz",))
@pytest.mark.parametrize(
["dimension", "random_val"],
[
(Real(1.0, 4.0), 2.251066014107722),
(Real(1, 4), 2.251066014107722),
(Integer(1, 4), 2),
(Integer(1.0, 4.0), 2),
(Categorical([1.0, 2.0, 3.0, 4.0]), 2.0),
(Categorical(["a", "b", "c", "d"]), "b"),
(Categorical(["foo", "bar", "baz"]), "bar"),
],
)
def test_dimension_rvs(dimension, random_val):
"""Assert random sample is expected"""
assert dimension.rvs(random_state=1) == random_val
@pytest.mark.fast_test
@pytest.mark.parametrize(
["dim", "expected_repr"],
[
(Categorical([1, 2, 3, 4, 5]), "Categorical(categories=(1, 2, 3, 4, 5))"),
(Categorical([1, 2, 3, 4, 5, 6, 7, 8]), "Categorical(categories=(1, 2, 3, ..., 6, 7, 8))"),
(Real(0.4, 0.9), "Real(low=0.4, high=0.9, prior='uniform', transform='identity')"),
(Real(4, 23), "Real(low=4, high=23, prior='uniform', transform='identity')"),
(Integer(4, 23), "Integer(low=4, high=23)"),
],
)
def test_dimension_repr(dim, expected_repr):
assert dim.__repr__() == expected_repr
@pytest.mark.fast_test
def test_real_log_sampling_in_bounds():
# TODO: Refactor - Use PyTest
dim = Real(low=1, high=32, prior="log-uniform", transform="normalize")
# Round-trip a value that is within the bounds of the space
# x = dim.inverse_transform(dim.transform(31.999999999999999))
for n in (32.0, 31.999999999999999):
round_tripped = dim.inverse_transform(dim.transform([n]))
assert np.allclose([n], round_tripped)
assert n in dim
assert round_tripped in dim
@pytest.mark.fast_test
def test_real():
# TODO: Refactor - Use PyTest
a = Real(1, 25)
for i in range(50):
r = a.rvs(random_state=i)
check_limits(r, 1, 25)
assert r in a
random_values = a.rvs(random_state=0, n_samples=10)
assert_array_equal(random_values.shape, (10))
assert_array_equal(a.transform(random_values), random_values)
assert_array_equal(a.inverse_transform(random_values), random_values)
log_uniform = Real(10 ** -5, 10 ** 5, prior="log-uniform")
assert log_uniform != Real(10 ** -5, 10 ** 5)
for i in range(50):
random_val = log_uniform.rvs(random_state=i)
check_limits(random_val, 10 ** -5, 10 ** 5)
random_values = log_uniform.rvs(random_state=0, n_samples=10)
assert_array_equal(random_values.shape, (10))
transformed_vals = log_uniform.transform(random_values)
assert_array_equal(transformed_vals, np.log10(random_values))
assert_array_equal(log_uniform.inverse_transform(transformed_vals), random_values)
@pytest.mark.fast_test
def test_real_bounds():
# TODO: Refactor - Use PyTest
# Should give same answer as using check_limits() but this is easier to read
a = Real(1.0, 2.1)
assert 0.99 not in a
assert 1.0 in a
assert 2.09 in a
assert 2.1 in a
assert np.nextafter(2.1, 3.0) not in a
@pytest.mark.fast_test
def test_integer():
# TODO: Refactor - Use PyTest
a = Integer(1, 10)
for i in range(50):
r = a.rvs(random_state=i)
assert 1 <= r
assert 11 >= r
assert r in a
random_values = a.rvs(random_state=0, n_samples=10)
assert_array_equal(random_values.shape, (10))
assert_array_equal(a.transform(random_values), random_values)
assert_array_equal(a.inverse_transform(random_values), random_values)
@pytest.mark.fast_test
def test_categorical_transform():
# TODO: Refactor - Use PyTest
categories = ["apple", "orange", "banana", None, True, False, 3]
cat = Categorical(categories)
apple = [1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0]
orange = [0.0, 1.0, 0.0, 0.0, 0.0, 0.0, 0.0]
banana = [0.0, 0.0, 1.0, 0.0, 0.0, 0.0, 0.0]
none = [0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0]
true = [0.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0]
false = [0.0, 0.0, 0.0, 0.0, 0.0, 1.0, 0.0]
three = [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 1.0]
assert_equal(cat.transformed_size, 7)
assert_equal(cat.transformed_size, cat.transform(["apple"]).size)
assert_array_equal(cat.transform(categories), [apple, orange, banana, none, true, false, three])
assert_array_equal(cat.transform(["apple", "orange"]), [apple, orange])
assert_array_equal(cat.transform(["apple", "banana"]), [apple, banana])
assert_array_equal(cat.inverse_transform([apple, orange]), ["apple", "orange"])
assert_array_equal(cat.inverse_transform([apple, banana]), ["apple", "banana"])
ent_inverse = cat.inverse_transform([apple, orange, banana, none, true, false, three])
assert_array_equal(ent_inverse, categories)
@pytest.mark.fast_test
def test_categorical_transform_binary():
# TODO: Refactor - Use PyTest
categories = ["apple", "orange"]
cat = Categorical(categories)
apple = [0.0]
orange = [1.0]
assert_equal(cat.transformed_size, 1)
assert_equal(cat.transformed_size, cat.transform(["apple"]).size)
assert_array_equal(cat.transform(categories), [apple, orange])
assert_array_equal(cat.transform(["apple", "orange"]), [apple, orange])
assert_array_equal(cat.inverse_transform([apple, orange]), ["apple", "orange"])
ent_inverse = cat.inverse_transform([apple, orange])
assert_array_equal(ent_inverse, categories)
@pytest.mark.fast_test
def test_space_consistency():
# TODO: Refactor - Use PyTest
# Reals (uniform)
s1 = Space([Real(0.0, 1.0)])
s2 = Space([Real(0.0, 1.0)])
s3 = Space([Real(0, 1)])
s4 = Space([(0.0, 1.0)])
s5 = Space([(0.0, 1.0, "uniform")])
s6 = Space([(0, 1.0)])
s7 = Space([(np.float64(0.0), 1.0)])
s8 = Space([(0, np.float64(1.0))])
a1 = s1.rvs(n_samples=10, random_state=0)
a2 = s2.rvs(n_samples=10, random_state=0)
a3 = s3.rvs(n_samples=10, random_state=0)
a4 = s4.rvs(n_samples=10, random_state=0)
a5 = s5.rvs(n_samples=10, random_state=0)
assert_equal(s1, s2)
assert_equal(s1, s3)
assert_equal(s1, s4)
assert_equal(s1, s5)
assert_equal(s1, s6)
assert_equal(s1, s7)
assert_equal(s1, s8)
assert_array_equal(a1, a2)
assert_array_equal(a1, a3)
assert_array_equal(a1, a4)
assert_array_equal(a1, a5)
# Reals (log-uniform)
s1 = Space([Real(10 ** -3.0, 10 ** 3.0, prior="log-uniform")])
s2 = Space([Real(10 ** -3.0, 10 ** 3.0, prior="log-uniform")])
s3 = Space([Real(10 ** -3, 10 ** 3, prior="log-uniform")])
s4 = Space([(10 ** -3.0, 10 ** 3.0, "log-uniform")])
s5 = Space([(np.float64(10 ** -3.0), 10 ** 3.0, "log-uniform")])
a1 = s1.rvs(n_samples=10, random_state=0)
a2 = s2.rvs(n_samples=10, random_state=0)
a3 = s3.rvs(n_samples=10, random_state=0)
a4 = s4.rvs(n_samples=10, random_state=0)
assert_equal(s1, s2)
assert_equal(s1, s3)
assert_equal(s1, s4)
assert_equal(s1, s5)
assert_array_equal(a1, a2)
assert_array_equal(a1, a3)
assert_array_equal(a1, a4)
# Integers
s1 = Space([Integer(1, 5)])
s2 = Space([Integer(1.0, 5.0)])
s3 = Space([(1, 5)])
s4 = Space([(np.int64(1.0), 5)])
s5 = Space([(1, np.int64(5.0))])
a1 = s1.rvs(n_samples=10, random_state=0)
a2 = s2.rvs(n_samples=10, random_state=0)
a3 = s3.rvs(n_samples=10, random_state=0)
assert_equal(s1, s2)
assert_equal(s1, s3)
assert_equal(s1, s4)
assert_equal(s1, s5)
assert_array_equal(a1, a2)
assert_array_equal(a1, a3)
# Categoricals
s1 = Space([Categorical(["a", "b", "c"])])
s2 = Space([Categorical(["a", "b", "c"])])
s3 = Space([["a", "b", "c"]])
a1 = s1.rvs(n_samples=10, random_state=0)
a2 = s2.rvs(n_samples=10, random_state=0)
a3 = s3.rvs(n_samples=10, random_state=0)
assert_equal(s1, s2)
assert_array_equal(a1, a2)
assert_equal(s1, s3)
assert_array_equal(a1, a3)
s1 = Space([(True, False)])
s2 = Space([Categorical([True, False])])
s3 = Space([np.array([True, False])])
assert s1 == s2 == s3
@pytest.mark.fast_test
def test_space_api():
# TODO: Refactor - Use PyTest - Break this up into multiple tests
space = Space([(0.0, 1.0), (-5, 5), ("a", "b", "c"), (1.0, 5.0, "log-uniform"), ("e", "f")])
cat_space = Space([(1, "r"), (1.0, "r")])
assert isinstance(cat_space.dimensions[0], Categorical)
assert isinstance(cat_space.dimensions[1], Categorical)
assert_equal(len(space.dimensions), 5)
assert isinstance(space.dimensions[0], Real)
assert isinstance(space.dimensions[1], Integer)
assert isinstance(space.dimensions[2], Categorical)
assert isinstance(space.dimensions[3], Real)
assert isinstance(space.dimensions[4], Categorical)
samples = space.rvs(n_samples=10, random_state=0)
assert_equal(len(samples), 10)
assert_equal(len(samples[0]), 5)
assert isinstance(samples, list)
for n in range(4):
assert isinstance(samples[n], list)
assert isinstance(samples[0][0], numbers.Real)
assert isinstance(samples[0][1], numbers.Integral)
assert isinstance(samples[0][2], str)
assert isinstance(samples[0][3], numbers.Real)
assert isinstance(samples[0][4], str)
samples_transformed = space.transform(samples)
assert_equal(samples_transformed.shape[0], len(samples))
assert_equal(samples_transformed.shape[1], 1 + 1 + 3 + 1 + 1)
# our space contains mixed types, this means we can't use
# `array_allclose` or similar to check points are close after a round-trip
# of transformations
for orig, round_trip in zip(samples, space.inverse_transform(samples_transformed)):
assert space.distance(orig, round_trip) < 1.0e-8
samples = space.inverse_transform(samples_transformed)
assert isinstance(samples[0][0], numbers.Real)
assert isinstance(samples[0][1], numbers.Integral)
assert isinstance(samples[0][2], str)
assert isinstance(samples[0][3], numbers.Real)
assert isinstance(samples[0][4], str)
for b1, b2 in zip(
space.bounds,
[(0.0, 1.0), (-5, 5), np.asarray(["a", "b", "c"]), (1.0, 5.0), np.asarray(["e", "f"])],
):
assert_array_equal(b1, b2)
for b1, b2 in zip(
space.transformed_bounds,
[
(0.0, 1.0),
(-5, 5),
(0.0, 1.0),
(0.0, 1.0),
(0.0, 1.0),
(np.log10(1.0), np.log10(5.0)),
(0.0, 1.0),
],
):
assert_array_equal(b1, b2)
@pytest.mark.fast_test
def test_space_from_space():
"""Test that a `Space` instance can be passed to a `Space` constructor"""
space_0 = Space([(0.0, 1.0), (-5, 5), ("a", "b", "c"), (1.0, 5.0, "log-uniform"), ("e", "f")])
space_1 = Space(space_0)
assert_equal(space_0, space_1)
@pytest.mark.fast_test
def test_normalize():
# TODO: Refactor - Use PyTest
a = Real(2.0, 30.0, transform="normalize")
for i in range(50):
check_limits(a.rvs(random_state=i), 2, 30)
rng = np.random.RandomState(0)
X = rng.randn(100)
X = 28 * (X - X.min()) / (X.max() - X.min()) + 2
# Check transformed values are in [0, 1]
assert np.all(a.transform(X) <= np.ones_like(X))
assert np.all(np.zeros_like(X) <= a.transform(X))
# Check inverse transform
assert_array_almost_equal(a.inverse_transform(a.transform(X)), X)
# log-uniform prior
a = Real(10 ** 2.0, 10 ** 4.0, prior="log-uniform", transform="normalize")
for i in range(50):
check_limits(a.rvs(random_state=i), 10 ** 2, 10 ** 4)
rng = np.random.RandomState(0)
X = np.clip(10 ** 3 * rng.randn(100), 10 ** 2.0, 10 ** 4.0)
# Check transform
assert np.all(a.transform(X) <= np.ones_like(X))
assert np.all(np.zeros_like(X) <= a.transform(X))
# Check inverse transform
assert_array_almost_equal(a.inverse_transform(a.transform(X)), X)
a = Integer(2, 30, transform="normalize")
for i in range(50):
check_limits(a.rvs(random_state=i), 2, 30)
assert_array_equal(a.transformed_bounds, (0, 1))
X = rng.randint(2, 31)
# Check transformed values are in [0, 1]
assert np.all(a.transform(X) <= np.ones_like(X))
assert np.all(np.zeros_like(X) <= a.transform(X))
# Check inverse transform
X_orig = a.inverse_transform(a.transform(X))
assert_equal(X_orig.dtype, "int64")
assert_array_equal(X_orig, X)
@pytest.mark.parametrize("dim", [Real, Integer])
@pytest.mark.parametrize("transform", ["normalize", "identity"])
def test_valid_numerical_transformation(dim, transform):
assert dim(2, 30, transform=transform)
@pytest.mark.parametrize("dim", [Real, Integer])
@pytest.mark.parametrize("transform", ["not a valid transform name"])
def test_invalid_numerical_transformation(dim, transform):
with pytest.raises(ValueError, match=r"`transform` must be in \['normalize', 'identity'\].*"):
dim(2, 30, transform=transform)
@pytest.mark.fast_test
def test_categorical_identity():
# TODO: Refactor - Use PyTest
categories = ["cat", "dog", "rat"]
cat = Categorical(categories, transform="identity")
samples = cat.rvs(100)
assert all([t in categories for t in cat.rvs(100)])
transformed = cat.transform(samples)
assert_array_equal(transformed, samples)
assert_array_equal(samples, cat.inverse_transform(transformed))
@pytest.mark.fast_test
def test_categorical_distance():
# TODO: Refactor - Use PyTest
categories = ["car", "dog", "orange"]
cat = Categorical(categories)
for cat1 in categories:
for cat2 in categories:
delta = cat.distance(cat1, cat2)
if cat1 == cat2:
assert delta == 0
else:
assert delta == 1
@pytest.mark.parametrize("dimension", [Real(1, 10), Integer(1, 10)])
@pytest.mark.parametrize("points", [(11, 10)])
def test_numerical_distance_out_of_range(dimension, points: tuple):
err = "Distance computation requires values within space. Received {} and {}".format(*points)
with pytest.raises(RuntimeError, match=err):
dimension.distance(*points)
@pytest.mark.fast_test
def test_integer_distance():
# TODO: Refactor - Use PyTest
ints = Integer(1, 10)
for i in range(1, 10 + 1):
assert_equal(ints.distance(4, i), abs(4 - i))
@pytest.mark.fast_test
def test_real_distance():
# TODO: Refactor - Use PyTest
reals = Real(1, 10)
for i in range(1, 10 + 1):
assert_equal(reals.distance(4.1234, i), abs(4.1234 - i))
@pytest.mark.parametrize("dimension", [Real, Integer])
@pytest.mark.parametrize("bounds", [(2, 1), (2, 2)])
def test_dimension_bounds(dimension, bounds: tuple):
err = r"Lower bound \({}\) must be less than the upper bound \({}\)".format(*bounds)
with pytest.raises(ValueError, match=err):
dimension(*bounds)
@pytest.mark.parametrize(
"dimension, name",
[
(Real(1, 2, name="learning rate"), "learning rate"),
(Integer(1, 100, name="no of trees"), "no of trees"),
(Categorical(["red, blue"], name="colors"), "colors"),
],
)
def test_dimension_name(dimension, name):
assert dimension.name == name
@pytest.mark.parametrize("dimension", [Real(1, 2), Integer(1, 100), Categorical(["red, blue"])])
def test_dimension_name_none(dimension):
assert dimension.name is None
@pytest.mark.parametrize("name", [1, 1.0, True])
def test_dimension_with_invalid_names(name):
with pytest.raises(ValueError, match="Dimension's name must be one of: string, tuple, or None"):
Real(1, 2, name=name)
@pytest.mark.fast_test
def test_purely_categorical_space():
# TODO: Refactor - Use PyTest
# Test reproduces the bug in #908, make sure it doesn't come back
dims = [Categorical(["a", "b", "c"]), Categorical(["A", "B", "C"])]
optimizer = Optimizer(dims, n_initial_points=1, random_state=3)
x = optimizer.ask()
# Before the fix this call raised an exception
optimizer.tell(x, 1.0)
##################################################
# `space_core.normalize_dimensions` Tests
##################################################
@pytest.mark.fast_test
@pytest.mark.parametrize("dimensions", [(["a", "b", "c"], ["1", "2", "3"])])
def test_normalize_dimensions_all_categorical(dimensions):
"""Test that :func:`normalize_dimensions` works with exclusively-`Categorical` spaces, and that
the resulting space's :attr:`is_categorical` is True"""
space = normalize_dimensions(dimensions)
assert space.is_categorical
@pytest.mark.fast_test
@pytest.mark.parametrize(
"dimensions, normalizations",
[
(((1, 3), (1.0, 3.0)), ("normalize", "normalize")),
(((1, 3), ("a", "b", "c")), ("normalize", "onehot")),
],
)
def test_normalize_dimensions_transform(dimensions, normalizations):
"""Test that dimensions' :attr:`transform_` have been set to the expected value after invoking
:func:`normalize_dimensions`"""
space = normalize_dimensions(dimensions)
for dimension, normalization in zip(space, normalizations):
assert dimension.transform_ == normalization
@pytest.mark.fast_test
@pytest.mark.parametrize(
"dimension, name",
[
(Real(1, 2, name="learning rate"), "learning rate"),
(Integer(1, 100, name="no of trees"), "no of trees"),
(Categorical(["red, blue"], name="colors"), "colors"),
],
)
def test_normalize_dimensions_name(dimension, name):
"""Test that a dimension's :attr:`name` is unchanged after invoking `normalize_dimensions`"""
space = normalize_dimensions([dimension])
assert space.dimensions[0].name == name
@pytest.mark.parametrize(
"dimensions",
[
((1, 3), (1.0, 3.0)),
((1, 3), ("a", "b", "c")),
(["a", "b", "c"], ["1", "2", "3"]),
(["a", "b", "c"], ["1", "2", "3"], ["foo", "bar"]),
((1, 3), (1.0, 3.0), ["a", "b", "c"], ["1", "2", "3"]),
],
)
def test_normalize_dimensions_consecutive_calls(dimensions):
"""Test that :func:`normalize_dimensions` can be safely invoked consecutively on the space each
invocation returns. This doesn't test that the result of :func:`normalize_dimensions` is
actually correct - Only that the result remains unchanged after multiple invocations"""
space_0 = normalize_dimensions(dimensions)
space_1 = normalize_dimensions(space_0)
space_2 = normalize_dimensions(space_1)
# Same as above, but starting with a `Space` instance to make sure nothing changes
space_3 = normalize_dimensions(Space(dimensions))
space_4 = normalize_dimensions(space_3)
space_5 = normalize_dimensions(space_4)
assert space_0 == space_1 == space_2 == space_3 == space_4 == space_5
##################################################
# `space_core.check_dimension` Tests
##################################################
@pytest.mark.fast_test
@pytest.mark.parametrize("dim", ["23"])
def test_invalid_check_dimension(dim):
with pytest.raises(ValueError, match="Dimension has to be a list or tuple"):
check_dimension("23")
@pytest.mark.parametrize("dim", [(23,)])
def test_valid_check_dimension(dim):
# Single value fixes dimension of space
check_dimension(dim)
| [
"hunter@mcgushion.com"
] | hunter@mcgushion.com |
461981baa6d5e7e3e901b71df7640e0723b32d40 | 719853613b5b96f02072be1fde736d883e799f02 | /server/accounts/urls.py | 106d5275a4cf4eb733a549e205bf7f2113d2753e | [
"MIT"
] | permissive | anmolkabra/opensurfaces | 5ba442123586533a93eb29890fa1694e3efdbfe8 | a42420083a777d7e1906506cc218f681c5cd145b | refs/heads/master | 2020-03-20T01:11:05.182880 | 2018-06-13T14:55:45 | 2018-06-13T14:55:45 | 137,068,945 | 0 | 0 | MIT | 2018-06-12T12:32:53 | 2018-06-12T12:32:52 | null | UTF-8 | Python | false | false | 178 | py | from django.conf.urls import patterns, url
from accounts.views import admin_shell
urlpatterns = patterns(
'',
url(r'^admin-shell/$', admin_shell, name='admin-shell'),
)
| [
"sbell@cs.cornell.edu"
] | sbell@cs.cornell.edu |
c7cfcaf109b00f593c5d0e7c165d89cac1516f38 | b289a2c1b42e17a2338c5414b6831f9cd44cb2dd | /valarie/executor/system.py | 496768e72ee769b6de032fc723323797ee8f093f | [
"MIT"
] | permissive | phnomcobra/valarie | fa2d3136092c80aeaca5474afe0ce726e36ade25 | 83bedeb50be5ab385c0851bf53044ee583e1adfd | refs/heads/master | 2022-10-27T21:38:23.669304 | 2022-10-20T12:44:03 | 2022-10-20T12:44:03 | 163,562,318 | 0 | 0 | MIT | 2022-10-09T03:30:39 | 2018-12-30T04:49:11 | JavaScript | UTF-8 | Python | false | false | 898 | py | #!/usr/bin/python3
"""This module implements system for the executor module."""
from subprocess import Popen, PIPE
from valarie.controller import logging
def system(command: str) -> int:
"""This function executes a command on the system.
Standard out and standard error emit into the logger.
Args:
command:
The command to execute.
Returns:
Returns the return code as an integer.
"""
logging.info(command)
process = Popen(command, shell=True, stdout=PIPE, stderr=PIPE)
output_buffer, stderr_buffer = process.communicate()
stdout, stderr = str(output_buffer.decode()).strip(), str(stderr_buffer.decode()).strip()
if len(stdout) > 0:
logging.debug(stdout)
if len(stderr) > 0:
logging.error(stderr)
logging.info(f'returned {process.returncode}')
return process.returncode
| [
"phnomcobra@gmail.com"
] | phnomcobra@gmail.com |
c91dfca5f72120badcf9148ba66d5d8015240b3d | 056863ff17af7121b0348db31bf28836ff584e4a | /53.py | 157faab99a5c9c28d7742036e8fdb561f6a72687 | [] | no_license | saranya258/python | 1face016cdd52c55df6fd6493f5f2aa5bcae9212 | df2274ad72bd36b7eb8cf4a4d2360e40dc902ee0 | refs/heads/master | 2020-06-07T14:19:32.476586 | 2019-07-10T08:53:32 | 2019-07-10T08:53:32 | 193,040,334 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 77 | py | r=int(input())
m=0
i=0
while(r>0):
i=r%10
m=m+i
r=r//10
print(m)
| [
"noreply@github.com"
] | saranya258.noreply@github.com |
15c9115c35dcba355e97459f453a66e4f5821cd0 | cc9cf69b1534dc0d9530b4ff485084162a404e34 | /leetcode/without/leetcode_70.py | 1f25e342333eabed2950b304d24f79fb7ef1c40e | [] | no_license | NASA2333/study | 99a58b2c9979201e9a4fae0c797391a538de6f45 | ba63bc18f3c788090e43406315497329b00ec0a5 | refs/heads/master | 2021-05-03T22:26:52.541760 | 2018-02-07T02:24:55 | 2018-02-07T02:24:55 | 104,988,265 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 923 | py | '''
You are climbing a stair case. It takes n steps to reach to the top.
Each time you can either climb 1 or 2 steps. In how many distinct ways can you climb to the top?
Note: Given n will be a positive integer.
Example 1:
Input: 2
Output: 2
Explanation: There are two ways to climb to the top.
1. 1 step + 1 step
2. 2 steps
Example 2:
Input: 3
Output: 3
Explanation: There are three ways to climb to the top.
1. 1 step + 1 step + 1 step
2. 1 step + 2 steps
3. 2 steps + 1 step
'''
class Solution(object):
def climbStairs(self, n):
"""
:type n: int
:rtype: int
"""
if n <= 2:
return n
dp = [0 for __ in range(n)]
dp[0] = 1
dp[1] = 2
for i in range(2, n):
dp[i] = dp[i - 1] + dp[i - 2]
return dp[n - 1]
if __name__ == "__main__":
print(Solution().climbStairs(6))
assert Solution().climbStairs(6) == 13 | [
"422282539@qq.com"
] | 422282539@qq.com |
430b5945a07208c04556532b109363bdb908ea52 | 25b914aecd6b0cb49294fdc4f2efcfdf5803cc36 | /homeassistant/components/balboa/binary_sensor.py | b73872b664710068013ef6e8b5b0aeb2da9c9692 | [
"Apache-2.0"
] | permissive | jason0x43/home-assistant | 9114decaa8f7c2f1582f84e79dc06736b402b008 | 8bf6aba1cf44ee841de063755c935ea78040f399 | refs/heads/dev | 2023-03-04T01:14:10.257593 | 2022-01-01T12:11:56 | 2022-01-01T12:11:56 | 230,622,861 | 1 | 1 | Apache-2.0 | 2023-02-22T06:15:07 | 2019-12-28T14:45:43 | Python | UTF-8 | Python | false | false | 1,768 | py | """Support for Balboa Spa binary sensors."""
from homeassistant.components.binary_sensor import (
BinarySensorDeviceClass,
BinarySensorEntity,
)
from .const import CIRC_PUMP, DOMAIN, FILTER
from .entity import BalboaEntity
FILTER_STATES = [
[False, False], # self.FILTER_OFF
[True, False], # self.FILTER_1
[False, True], # self.FILTER_2
[True, True], # self.FILTER_1_2
]
async def async_setup_entry(hass, entry, async_add_entities):
"""Set up the spa's binary sensors."""
spa = hass.data[DOMAIN][entry.entry_id]
entities = [BalboaSpaFilter(entry, spa, FILTER, index) for index in range(1, 3)]
if spa.have_circ_pump():
entities.append(BalboaSpaCircPump(entry, spa, CIRC_PUMP))
async_add_entities(entities)
class BalboaSpaBinarySensor(BalboaEntity, BinarySensorEntity):
"""Representation of a Balboa Spa binary sensor entity."""
_attr_device_class = BinarySensorDeviceClass.MOVING
class BalboaSpaCircPump(BalboaSpaBinarySensor):
"""Representation of a Balboa Spa circulation pump."""
@property
def is_on(self) -> bool:
"""Return true if the filter is on."""
return self._client.get_circ_pump()
@property
def icon(self):
"""Return the icon to use in the frontend."""
return "mdi:water-pump" if self.is_on else "mdi:water-pump-off"
class BalboaSpaFilter(BalboaSpaBinarySensor):
"""Representation of a Balboa Spa Filter."""
@property
def is_on(self) -> bool:
"""Return true if the filter is on."""
return FILTER_STATES[self._client.get_filtermode()][self._num - 1]
@property
def icon(self):
"""Return the icon to use in the frontend."""
return "mdi:sync" if self.is_on else "mdi:sync-off"
| [
"noreply@github.com"
] | jason0x43.noreply@github.com |
cea2ec05a0670b294d79a17e8de73449644a3eb6 | 48e124e97cc776feb0ad6d17b9ef1dfa24e2e474 | /sdk/python/pulumi_azure_native/authorization/v20190901/get_policy_definition_at_management_group.py | 287a80a7cda013c3317067e3b4fba4c14062bbe9 | [
"BSD-3-Clause",
"Apache-2.0"
] | permissive | bpkgoud/pulumi-azure-native | 0817502630062efbc35134410c4a784b61a4736d | a3215fe1b87fba69294f248017b1591767c2b96c | refs/heads/master | 2023-08-29T22:39:49.984212 | 2021-11-15T12:43:41 | 2021-11-15T12:43:41 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,524 | py | # coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from ... import _utilities
from . import outputs
__all__ = [
'GetPolicyDefinitionAtManagementGroupResult',
'AwaitableGetPolicyDefinitionAtManagementGroupResult',
'get_policy_definition_at_management_group',
'get_policy_definition_at_management_group_output',
]
@pulumi.output_type
class GetPolicyDefinitionAtManagementGroupResult:
"""
The policy definition.
"""
def __init__(__self__, description=None, display_name=None, id=None, metadata=None, mode=None, name=None, parameters=None, policy_rule=None, policy_type=None, type=None):
if description and not isinstance(description, str):
raise TypeError("Expected argument 'description' to be a str")
pulumi.set(__self__, "description", description)
if display_name and not isinstance(display_name, str):
raise TypeError("Expected argument 'display_name' to be a str")
pulumi.set(__self__, "display_name", display_name)
if id and not isinstance(id, str):
raise TypeError("Expected argument 'id' to be a str")
pulumi.set(__self__, "id", id)
if metadata and not isinstance(metadata, dict):
raise TypeError("Expected argument 'metadata' to be a dict")
pulumi.set(__self__, "metadata", metadata)
if mode and not isinstance(mode, str):
raise TypeError("Expected argument 'mode' to be a str")
pulumi.set(__self__, "mode", mode)
if name and not isinstance(name, str):
raise TypeError("Expected argument 'name' to be a str")
pulumi.set(__self__, "name", name)
if parameters and not isinstance(parameters, dict):
raise TypeError("Expected argument 'parameters' to be a dict")
pulumi.set(__self__, "parameters", parameters)
if policy_rule and not isinstance(policy_rule, dict):
raise TypeError("Expected argument 'policy_rule' to be a dict")
pulumi.set(__self__, "policy_rule", policy_rule)
if policy_type and not isinstance(policy_type, str):
raise TypeError("Expected argument 'policy_type' to be a str")
pulumi.set(__self__, "policy_type", policy_type)
if type and not isinstance(type, str):
raise TypeError("Expected argument 'type' to be a str")
pulumi.set(__self__, "type", type)
@property
@pulumi.getter
def description(self) -> Optional[str]:
"""
The policy definition description.
"""
return pulumi.get(self, "description")
@property
@pulumi.getter(name="displayName")
def display_name(self) -> Optional[str]:
"""
The display name of the policy definition.
"""
return pulumi.get(self, "display_name")
@property
@pulumi.getter
def id(self) -> str:
"""
The ID of the policy definition.
"""
return pulumi.get(self, "id")
@property
@pulumi.getter
def metadata(self) -> Optional[Any]:
"""
The policy definition metadata. Metadata is an open ended object and is typically a collection of key value pairs.
"""
return pulumi.get(self, "metadata")
@property
@pulumi.getter
def mode(self) -> Optional[str]:
"""
The policy definition mode. Some examples are All, Indexed, Microsoft.KeyVault.Data.
"""
return pulumi.get(self, "mode")
@property
@pulumi.getter
def name(self) -> str:
"""
The name of the policy definition.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter
def parameters(self) -> Optional[Mapping[str, 'outputs.ParameterDefinitionsValueResponse']]:
"""
The parameter definitions for parameters used in the policy rule. The keys are the parameter names.
"""
return pulumi.get(self, "parameters")
@property
@pulumi.getter(name="policyRule")
def policy_rule(self) -> Optional[Any]:
"""
The policy rule.
"""
return pulumi.get(self, "policy_rule")
@property
@pulumi.getter(name="policyType")
def policy_type(self) -> Optional[str]:
"""
The type of policy definition. Possible values are NotSpecified, BuiltIn, Custom, and Static.
"""
return pulumi.get(self, "policy_type")
@property
@pulumi.getter
def type(self) -> str:
"""
The type of the resource (Microsoft.Authorization/policyDefinitions).
"""
return pulumi.get(self, "type")
class AwaitableGetPolicyDefinitionAtManagementGroupResult(GetPolicyDefinitionAtManagementGroupResult):
# pylint: disable=using-constant-test
def __await__(self):
if False:
yield self
return GetPolicyDefinitionAtManagementGroupResult(
description=self.description,
display_name=self.display_name,
id=self.id,
metadata=self.metadata,
mode=self.mode,
name=self.name,
parameters=self.parameters,
policy_rule=self.policy_rule,
policy_type=self.policy_type,
type=self.type)
def get_policy_definition_at_management_group(management_group_id: Optional[str] = None,
policy_definition_name: Optional[str] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetPolicyDefinitionAtManagementGroupResult:
"""
The policy definition.
:param str management_group_id: The ID of the management group.
:param str policy_definition_name: The name of the policy definition to get.
"""
__args__ = dict()
__args__['managementGroupId'] = management_group_id
__args__['policyDefinitionName'] = policy_definition_name
if opts is None:
opts = pulumi.InvokeOptions()
if opts.version is None:
opts.version = _utilities.get_version()
__ret__ = pulumi.runtime.invoke('azure-native:authorization/v20190901:getPolicyDefinitionAtManagementGroup', __args__, opts=opts, typ=GetPolicyDefinitionAtManagementGroupResult).value
return AwaitableGetPolicyDefinitionAtManagementGroupResult(
description=__ret__.description,
display_name=__ret__.display_name,
id=__ret__.id,
metadata=__ret__.metadata,
mode=__ret__.mode,
name=__ret__.name,
parameters=__ret__.parameters,
policy_rule=__ret__.policy_rule,
policy_type=__ret__.policy_type,
type=__ret__.type)
@_utilities.lift_output_func(get_policy_definition_at_management_group)
def get_policy_definition_at_management_group_output(management_group_id: Optional[pulumi.Input[str]] = None,
policy_definition_name: Optional[pulumi.Input[str]] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> pulumi.Output[GetPolicyDefinitionAtManagementGroupResult]:
"""
The policy definition.
:param str management_group_id: The ID of the management group.
:param str policy_definition_name: The name of the policy definition to get.
"""
...
| [
"noreply@github.com"
] | bpkgoud.noreply@github.com |
d140cf5c10646c09460c5fb577904cce75daf311 | cf3e9398e4a1a8b41aa12e3ef42aa2a73bff2507 | /src/compiler/frontend_test.py | bf75f5b52ccb47538806e8ac975c5e988e14b86b | [
"Apache-2.0",
"MIT"
] | permissive | fritzo/pomagma | fb207e8bfd77c7ac592ddb27d5fd3213da50a532 | ad2bf9c12eb58190f2761608c053ac89d3ddf305 | refs/heads/master | 2023-02-24T16:54:31.981623 | 2023-02-10T23:17:42 | 2023-02-10T23:17:42 | 4,943,857 | 12 | 0 | NOASSERTION | 2023-02-10T23:17:43 | 2012-07-08T05:22:16 | C++ | UTF-8 | Python | false | false | 465 | py | import pomagma.util
from pomagma.compiler import __main__ as main
from pomagma.compiler.util import find_theories
from pomagma.util.testing import for_each
@for_each(find_theories())
def test_compile(filename):
with pomagma.util.in_temp_dir():
main.compile(
filename,
symbols_out='temp.symbols',
facts_out='temp.facts',
programs_out='temp.programs',
optimized_out='temp.optimized.programs')
| [
"fritz.obermeyer@gmail.com"
] | fritz.obermeyer@gmail.com |
e03fbd20f95a8dd0e7069256533d364d13b34279 | 35f9def6e6d327d3a4a4f2959024eab96f199f09 | /developer/lab/tools/NVIDIA/FasterTransformer/sample/PyTorch/utils/gpt.py | dd5efbb375c41e6f402ef254744b89e62b291c41 | [
"Apache-2.0",
"CAL-1.0-Combined-Work-Exception",
"CAL-1.0",
"MIT",
"CC-BY-SA-4.0",
"LicenseRef-scancode-free-unknown"
] | permissive | arXiv-research/DevLab-III-1 | ec10aef27e1ca75f206fea11014da8784752e454 | c50cd2b9154c83c3db5e4a11b9e8874f7fb8afa2 | refs/heads/main | 2023-04-16T19:24:58.758519 | 2021-04-28T20:21:23 | 2021-04-28T20:21:23 | 362,599,929 | 2 | 0 | MIT | 2021-04-28T20:36:11 | 2021-04-28T20:36:11 | null | UTF-8 | Python | false | false | 12,159 | py | # Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
import os
import torch
import torch.nn as nn
import numpy as np
import torch.distributed as dist
class GPTWeights(object):
def __init__(self, head_num, size_per_head, layer_num, vocab_size, max_seq_len, tensor_para_size, layer_para_size):
assert(head_num % tensor_para_size == 0)
self.head_num = head_num
self.size_per_head = size_per_head
self.layer_num = layer_num
self.vocab_size = vocab_size
self.max_seq_len = max_seq_len
self.tensor_para_size = tensor_para_size
self.layer_para_size = layer_para_size
self.layers_per_device = layer_num // layer_para_size
local_head_num = head_num // tensor_para_size
global_head_num = head_num
local_hidden_units = local_head_num * size_per_head
global_hidden_units = global_head_num * size_per_head
local_inner_size = local_hidden_units * 4
self.local_head_num = local_head_num
self.global_head_num = global_head_num
self.local_hidden_units = local_hidden_units
self.global_hidden_units = global_hidden_units
self.local_inner_size = local_inner_size
self.w = []
# Before Transformer blocks
self.w.append(torch.zeros(vocab_size, global_hidden_units)) # embedding_table
self.w.append(torch.zeros(max_seq_len, global_hidden_units)) # position_encoding_table
# Transformer blocks
self.w.append([torch.zeros(global_hidden_units)] * layer_num) # self_layernorm_gamma
self.w.append([torch.zeros(global_hidden_units)] * layer_num) # self_layernorm_beta
self.w.append([torch.zeros(global_hidden_units, local_hidden_units * 3)] * layer_num) # self_kernel
self.w.append([torch.zeros(local_hidden_units * 3)] * layer_num) # self_bias
self.w.append([torch.zeros(local_hidden_units, global_hidden_units)] * layer_num) # self_output_kernel
self.w.append([torch.zeros(global_hidden_units)] * layer_num) # self_output_bias
self.w.append([torch.zeros(global_hidden_units)] * layer_num) # ffn_layernorm_gamma
self.w.append([torch.zeros(global_hidden_units)] * layer_num) # ffn_layernorm_beta
self.w.append([torch.zeros(global_hidden_units, local_inner_size)] * layer_num) # ffn_kernel1
self.w.append([torch.zeros(local_inner_size, global_hidden_units)] * layer_num) # ffn_kernel2
self.w.append([torch.zeros(local_inner_size)] * layer_num) # ffn_bias1
self.w.append([torch.zeros(global_hidden_units)] * layer_num) # ffn_bias2
# After Transformer blocks
self.w.append(torch.zeros(global_hidden_units)) # layernorm_gamma
self.w.append(torch.zeros(global_hidden_units)) # layernorm_beta
# Initialization
self._map(lambda w : torch.nn.init.normal_(w, mean=0., std=1.))
def __getitem__(self, idx):
return self.w[idx]
def __setitem__(self, idx, val):
self.w[idx] = val
def __len__(self):
return len(self.w)
def _map(self, func):
for i in range(len(self.w)):
if isinstance(self.w[i], list):
for j in range(len(self.w[i])):
self.w[i][j] = func(self.w[i][j])
else:
self.w[i] = func(self.w[i])
def load(self, ckpt_path, tensor_para_rank, layer_para_rank):
w = []
# Load
w.append(torch.from_numpy(np.fromfile(ckpt_path + "/model.wte.bin", dtype=np.single)))
wpe = torch.from_numpy(np.fromfile(ckpt_path + "/model.wpe.bin", dtype=np.single)).reshape(-1, self.global_hidden_units)
assert self.max_seq_len <= wpe.size(0), "max_seq_len must not exceed the value of maximum sequence length during traning."
wpe = wpe[:self.max_seq_len] # excludes weights not to really use.
w.append(wpe)
is_load = lambda i: i >= self.layers_per_device * layer_para_rank and i < self.layers_per_device * (layer_para_rank + 1)
w.append([torch.from_numpy(np.fromfile(ckpt_path + "/model.layers.{}.input_layernorm.weight.bin".format(i), dtype=np.single)) if is_load(i) else torch.empty(0) for i in range(self.layer_num)])
w.append([torch.from_numpy(np.fromfile(ckpt_path + "/model.layers.{}.input_layernorm.bias.bin".format(i), dtype=np.single)) if is_load(i) else torch.empty(0) for i in range(self.layer_num)])
w.append([torch.from_numpy(np.fromfile(ckpt_path + "/model.layers.{}.attention.query_key_value.weight.{}.bin".format(i, tensor_para_rank), dtype=np.single)) if is_load(i) else torch.empty(0) for i in range(self.layer_num)])
w.append([torch.from_numpy(np.fromfile(ckpt_path + "/model.layers.{}.attention.query_key_value.bias.{}.bin".format(i, tensor_para_rank), dtype=np.single)) if is_load(i) else torch.empty(0) for i in range(self.layer_num)])
w.append([torch.from_numpy(np.fromfile(ckpt_path + "/model.layers.{}.attention.dense.weight.{}.bin".format(i, tensor_para_rank), dtype=np.single)) if is_load(i) else torch.empty(0) for i in range(self.layer_num)])
w.append([torch.from_numpy(np.fromfile(ckpt_path + "/model.layers.{}.attention.dense.bias.bin".format(i), dtype=np.single)) if is_load(i) else torch.empty(0) for i in range(self.layer_num)])
w.append([torch.from_numpy(np.fromfile(ckpt_path + "/model.layers.{}.post_attention_layernorm.weight.bin".format(i), dtype=np.single)) if is_load(i) else torch.empty(0) for i in range(self.layer_num)])
w.append([torch.from_numpy(np.fromfile(ckpt_path + "/model.layers.{}.post_attention_layernorm.bias.bin".format(i), dtype=np.single)) if is_load(i) else torch.empty(0) for i in range(self.layer_num)])
w.append([torch.from_numpy(np.fromfile(ckpt_path + "/model.layers.{}.mlp.dense_h_to_4h.weight.{}.bin".format(i, tensor_para_rank), dtype=np.single)) if is_load(i) else torch.empty(0) for i in range(self.layer_num)])
w.append([torch.from_numpy(np.fromfile(ckpt_path + "/model.layers.{}.mlp.dense_4h_to_h.weight.{}.bin".format(i, tensor_para_rank), dtype=np.single)) if is_load(i) else torch.empty(0) for i in range(self.layer_num)])
w.append([torch.from_numpy(np.fromfile(ckpt_path + "/model.layers.{}.mlp.dense_h_to_4h.bias.{}.bin".format(i, tensor_para_rank), dtype=np.single)) if is_load(i) else torch.empty(0) for i in range(self.layer_num)])
w.append([torch.from_numpy(np.fromfile(ckpt_path + "/model.layers.{}.mlp.dense_4h_to_h.bias.bin".format(i), dtype=np.single)) if is_load(i) else torch.empty(0) for i in range(self.layer_num)])
w.append(torch.from_numpy(np.fromfile(ckpt_path + "/model.final_layernorm.weight.bin", dtype=np.single)))
w.append(torch.from_numpy(np.fromfile(ckpt_path + "/model.final_layernorm.bias.bin", dtype=np.single)))
# Reshape
try:
for i in range(len(w)):
if isinstance(w[i], list):
for j in range(len(w[i])):
self.w[i][j] = w[i][j].reshape(self.w[i][j].shape) if w[i][j].nelement() > 0 else self.w[i][j]
else:
self.w[i] = w[i].reshape(self.w[i].shape)
except RuntimeError:
raise RuntimeError("head_num, size_per_head, vocab_size, and max_seq_len must be the same as the ones during training.")
class GPT(nn.Module):
def __init__(self, head_num, size_per_head, vocab_size, start_id, end_id, layer_num, top_k, top_p, temperature,
output_len, max_seq_len, tensor_para_size, layer_para_size, layer_para_batch_size, is_fuse_QKV, max_batch_size, lib_path):
super().__init__()
self.head_num = head_num
self.size_per_head = size_per_head
self.vocab_size = vocab_size
self.start_id = start_id
self.end_id = end_id
self.layer_num = layer_num
self.top_k = top_k
self.top_p = top_p
self.temperature = temperature
self.output_len = output_len
self.max_seq_len = max_seq_len
self.tensor_para_size = tensor_para_size
self.layer_para_size = layer_para_size
self.layer_para_batch_size = layer_para_batch_size
self.is_fuse_QKV = is_fuse_QKV
self.max_batch_size = max_batch_size
assert torch.cuda.is_available(), "CUDA is required for this model."
assert head_num % tensor_para_size == 0, "head_num must be a multiple of tensor_para_size."
assert layer_num % layer_para_size == 0, "layer_num must be a multiple of layer_para_size."
# Prepare weights
self.weights = GPTWeights(head_num, size_per_head, layer_num, vocab_size, max_seq_len, tensor_para_size, layer_para_size)
# Prepare for tensor/pipeline parallel
dist.init_process_group(backend='mpi')
self.rank = dist.get_rank()
self.device_count = torch.cuda.device_count()
self.device = self.rank % self.device_count
torch.cuda.set_device(self.device)
world_size = dist.get_world_size()
assert world_size == tensor_para_size * layer_para_size, "tensor_para_size * layer_para_size must be equal to world_size."
self.tensor_para_rank = self.rank % self.tensor_para_size
self.layer_para_rank = self.rank // self.tensor_para_size
# Load the C++ model into Pytorch model.
torch.classes.load_library(os.path.abspath(lib_path))
self.model = None
def load(self, ckpt_path):
self.weights.load(ckpt_path, tensor_para_rank=self.tensor_para_rank, layer_para_rank=self.layer_para_rank)
def half(self):
self.weights._map(lambda w : w.half())
if self.model is not None:
self.cuda()
def cuda(self):
self.weights._map(lambda w : w.cuda(self.device))
self.model = torch.classes.FasterTransformer.GPT(self.head_num, self.size_per_head, self.vocab_size,
self.start_id, self.end_id, self.layer_num, self.top_k, self.top_p, self.temperature, self.max_seq_len,
self.tensor_para_size, self.layer_para_size, self.layer_para_batch_size,
self.is_fuse_QKV, self.max_batch_size, *self.weights.w)
def forward(self, start_ids, start_lengths, attn_mask, batch_first=True):
batch_size = start_ids.size(0)
assert batch_size <= self.max_batch_size, "batch_size must not exceed max_batch_size."
assert batch_size >= self.layer_para_batch_size, "batch_size must be equal to or larger than layer_para_batch_size."
assert batch_size % self.layer_para_batch_size == 0, "batch_size must be a multiple of layer_para_batch_size."
input_len = min(start_lengths)
assert input_len > 0, "input_len must be larger than zero. For an unconditional case, use start_id as the first token."
assert input_len + self.output_len <= self.max_seq_len, "input_len + output_len must not exceed max_seq_len."
# Inputs to device
start_ids = start_ids.cuda(self.device)
start_lengths = start_lengths.cuda(self.device)
attn_mask = attn_mask.cuda(self.device)
assert self.model is not None, "The model must be copied to the device(s) through cuda()."
output_ids, = self.model.forward(start_ids, start_lengths, attn_mask, self.output_len)
if batch_first:
output_ids = output_ids.T
if self.rank == 0:
return output_ids
| [
"noreply@github.com"
] | arXiv-research.noreply@github.com |
f44507616b1b518b8c703ba0fcb821c34c829762 | f9ff9ffc9d5e48b63c137382e1d3299a518215ec | /microbenchmarks/simple_rcnn/graph_rewrites_LP.py | 1678cb7a1ba243db64bb23867ba9f7518894425a | [] | no_license | mkuchnik/PlumberApp | 240b760cf0250a68253153950bf9a85014c35499 | 6123f5bce36eec7dc75b6b9298054b493d930bdc | refs/heads/main | 2023-04-16T08:32:54.829454 | 2022-03-13T04:08:37 | 2022-03-13T04:08:37 | 459,011,430 | 12 | 0 | null | null | null | null | UTF-8 | Python | false | false | 49,559 | py | from absl import app
from absl import flags
import shutil
import pprint
import tensorflow as tf
import tensorflow.compat.v1 as tf1
import graphsurgeon
import gen_util
import pandas as pd
try:
import dataloader
except ImportError:
try:
import resnet_flags
except ImportError:
import dataset_flags
import random
import copy
import networkx as nx
import convex_solver
import numpy as np
STRATEGIES = [None, "random", "random_valid"]
DEFAULT_MODE = "p_busy"
DEFAULT_MODE = None
FLAGS = flags.FLAGS
flags.DEFINE_bool('rebench_baseline',
default=False,
help=('Run benchmarking on pipeline again for baseline.'))
flags.DEFINE_bool('sweep_nodes',
default=False,
help=('Run benchmarking on individual nodes.'))
flags.DEFINE_bool('skip_baseline',
default=False,
help=('Don\'t evalute first run.'))
flags.DEFINE_bool('skip_LP_baseline',
default=False,
help=('Don\'t evalute LP baseline.'))
flags.DEFINE_integer('num_deviations',
default=1,
help=('The number of deviations (non-recommendations) to '
'run per step. Set to 1 for none.'))
flags.DEFINE_integer('num_steps',
default=55,
help=('The number of steps (max) to take.'))
flags.DEFINE_integer('time_limit_s',
default=42,
help=('The number of seconds (max) to run.'))
flags.DEFINE_string('strategy',
default=None,
help=('The strategy to run. One of {}'.format(STRATEGIES)))
BATCH_NODE_OPS = set(["BatchDataset", "BatchDatasetV2"])
def is_dataset_node(node_name):
return "Dataset" in node_name or "dataset" in node_name
def get_dataset_node_names(graphdef):
surgeon = graphsurgeon.StaticGraph(graphdef)
dataset_nodes = [n.name for n in surgeon if "Dataset" in n.op]
return dataset_nodes
def get_node_parallelism(node):
return node.parallelism
def parallelism_parameter_index(surgeon_node):
if surgeon_node.op == "MapAndBatchDataset":
return 2
elif surgeon_node.op == "ParallelMapDatasetV2":
return -1
elif surgeon_node.op == "ParallelInterleaveDatasetV4":
return -1
else:
raise RuntimeError("Don't know how to handle"
" {}".format(surgeon_node.name))
def cycle_length_parameter_index(surgeon_node):
if surgeon_node.op == "ParallelInterleaveDatasetV4":
return -5
else:
raise RuntimeError("Don't know how to handle"
" {}".format(surgeon_node.name))
def parallelism_parameter_name(surgeon_node):
idx = parallelism_parameter_index(surgeon_node)
return surgeon_node.input[idx]
def find_datasets_in_f(graph_def, f_name, datasets=None):
"""Find datasets in a function."""
# TODO(mkuchnik): Use
def is_tfdata_node(node):
return "Dataset" in node.op or "dataset" in node.op
def find_fs_of_f(f):
"""Find nested function nodes e.g., f1 calls f2."""
# TODO(mkuchnik): Add support for groupbywindowdataset
fs_nodes = []
for node in f.node_def:
if ("f" in node.attr or "key_func" in node.attr or "reduce_func" in
node.attr or "window_size_func" in node.attr):
fs_nodes.append(node)
return fs_nodes
datasets = [] if datasets is None else datasets
for f in graph_def.library.function:
if f.signature.name == f_name:
for node in f.node_def:
if is_tfdata_node(node):
datasets.append(node)
child_f_nodes = find_fs_of_f(f)
for child_node in child_f_nodes:
child_f_name = child_node.attr["f"].func.name
find_datasets_in_f(graph_def, child_f_name, datasets)
return datasets
def find_function_by_name(graph_def, f_name):
for f in graph_def.library.function:
if f.signature.name == f_name:
return f
raise RuntimeError(
"Expected to find 1 node for {}, but found {}".format(
f_name, 0))
def find_functions_of_node(graph_def, surgeon_node):
fs = []
if surgeon_node.op == "GroupByWindowDataset":
for key_str in ["key_func", "reduce_func", "window_size_func"]:
key_f = surgeon_node.attr[key_str].func
# TODO(mkuchnik): For some reason, encoding adds "\nF" or "\nA" etc.
key_f_str = key_f.SerializeToString().decode()
key_f_str = key_f_str.strip("\n")
key_f_str = key_f_str[1:]
f = find_function_by_name(graph_def, key_f_str)
fs.append(f)
elif (surgeon_node.op == "ParallelInterleaveDatasetV4" or
surgeon_node.op == "MapDataset" or
surgeon_node.op == "ParallelMapDatasetV2"):
for key_str in ["f"]:
key_f = surgeon_node.attr[key_str].func
key_f_str = key_f.SerializeToString().decode()
key_f_str = key_f_str.strip("\n")
# TODO(mkuchnik): For some reason, encoding adds "\nF" or "\nA" etc.
key_f_str = key_f_str[1:]
f = find_function_by_name(graph_def, key_f_str)
fs.append(f)
return fs
def graphdef_to_networkx(graphdef, keep_const=False):
# NOTE(mkuchnik): Can also use from_pydot
G = nx.DiGraph()
surgeon = graphsurgeon.StaticGraph(graphdef)
retval = surgeon.find_nodes_by_op("_Retval")
assert len(retval) == 1
retval = retval[0]
def descend(node):
for i_name in node.input:
i = find_node_by_name(surgeon, i_name)
if i.op != "Const" or (i.op == "Const" and keep_const):
G.add_node(i.name)
G.add_edge(i.name, node.name)
descend(i)
fs = find_functions_of_node(graphdef, node)
for f in fs:
G.add_node(f.signature.name)
G.add_edge(f.signature.name, node.name)
f_datasets = find_datasets_in_f(graphdef, f.signature.name)
for ff in f_datasets:
G.add_node(ff.name)
G.add_edge(ff.name, f.signature.name)
G.add_node(retval.name)
descend(retval)
return G
def find_retvals(graphdef):
surgeon = graphsurgeon.StaticGraph(graphdef)
nodes = surgeon.find_nodes_by_op("_Retval")
return nodes
def find_node_by_name(surgeon, node_name, raise_on_fail=True):
surgeon_node = surgeon.find_nodes_by_name(node_name)
if not surgeon_node:
surgeon_node = [n for n in surgeon if n.name == node_name]
if len(surgeon_node) != 1:
if raise_on_fail:
raise RuntimeError(
"Expected to find 1 node for {}, but found {}".format(
node_name, len(surgeon_node)))
else:
return None
surgeon_node = surgeon_node[0]
return surgeon_node
def fork_node(surgeon, surgeon_node):
new_node = copy.deepcopy(surgeon_node)
prefix = "Added_{}/".format(new_node.op)
new_name = generate_new_name(surgeon, prefix)
new_node.name = new_name
surgeon.append(new_node)
new_node = find_node_by_name(surgeon, new_node.name)
return new_node
def set_node_parallelism(graphdef, node_name: str, parallelism: int):
# NOTE(mkuchnik): Take node_name and not node
surgeon = graphsurgeon.DynamicGraph(graphdef)
surgeon_node = find_node_by_name(surgeon, node_name)
try:
parallelism_node = parallelism_parameter_name(surgeon_node)
except RuntimeError as ex:
print(ex)
print("IGNORING")
graph_def = surgeon.as_graph_def()
return graph_def, None
parallelism_surgeon_node = [k for k in surgeon if
k.name == parallelism_node]
assert len(parallelism_surgeon_node) == 1, \
"Expected to find 1 node for {}, but found {}".format(
parallelism_node, len(parallelism_surgeon_node))
parallelism_surgeon_node = parallelism_surgeon_node[0]
new_parallelism_surgeon_node = fork_node(surgeon, parallelism_surgeon_node)
i = parallelism_parameter_index(surgeon_node)
node_input = surgeon_node.input[i]
assert(node_input == parallelism_node)
surgeon_node.input[i] = new_parallelism_surgeon_node.name
parallelism_surgeon_node = new_parallelism_surgeon_node
parallelism_param = parallelism_surgeon_node.attr["value"].tensor
parallelism_param.int64_val[:] = [parallelism]
debug_string = "{}.parallelism={}".format(node_name,
parallelism)
graph_def = surgeon.as_graph_def()
return graph_def, debug_string
def increase_node_parallelism(graphdef, node, up_parallelism=1):
surgeon = graphsurgeon.DynamicGraph(graphdef)
surgeon_node = find_node_by_name(surgeon, node.name)
parallelism = get_node_parallelism(node) + up_parallelism
try:
parallelism_node = parallelism_parameter_name(surgeon_node)
except RuntimeError as ex:
print(ex)
print("IGNORING")
graph_def = surgeon.as_graph_def()
return graph_def, None
parallelism_surgeon_node = [k for k in surgeon if
k.name == parallelism_node]
assert len(parallelism_surgeon_node) == 1, \
"Expected to find 1 node for {}, but found {}".format(
parallelism_node, len(parallelism_surgeon_node))
parallelism_surgeon_node = parallelism_surgeon_node[0]
new_parallelism_surgeon_node = fork_node(surgeon, parallelism_surgeon_node)
i = parallelism_parameter_index(surgeon_node)
node_input = surgeon_node.input[i]
assert(node_input == parallelism_node)
surgeon_node.input[i] = new_parallelism_surgeon_node.name
parallelism_surgeon_node = new_parallelism_surgeon_node
parallelism_param = parallelism_surgeon_node.attr["value"].tensor
parallelism_param.int64_val[:] = [parallelism]
debug_string = "{}.parallelism={}".format(node.name,
parallelism)
graph_def = surgeon.as_graph_def()
return graph_def, debug_string
def find_placeholders(graphdef):
surgeon = graphsurgeon.StaticGraph(graphdef)
return surgeon.find_nodes_by_op("Placeholder")
def apply_pipeline_options(dataset, map_and_batch_fusion, stats_filename):
options = tf.data.Options()
options.experimental_deterministic = False
options.experimental_threading.max_intra_op_parallelism = 1
options.experimental_threading.private_threadpool_size = \
FLAGS.dataset_threadpool_size
options.experimental_optimization.map_and_batch_fusion = \
map_and_batch_fusion
if stats_filename:
options.experimental_optimization.autotune_stats_filename = \
stats_filename
dataset = dataset.with_options(options)
return dataset
def remove_extra_datasets(graphdef):
"""Removes nodes such as `ModelDataset` which are appended to the dataset"""
NODES_TO_REMOVE = ["ModelDataset",
"MaxIntraOpParallelismDataset",
"PrivateThreadPoolDataset",
]
surgeon = graphsurgeon.DynamicGraph(graphdef)
surgeon = remove_op_datasets(surgeon, NODES_TO_REMOVE)
return surgeon.as_graph_def()
def remove_op_datasets(surgeon, nodes_to_remove):
for n in nodes_to_remove:
nodes = surgeon.find_nodes_by_op(n)
surgeon.forward_inputs(nodes)
nodes = surgeon.find_nodes_by_op(n)
assert not nodes, "{} still found".format(n)
return surgeon
def remove_name_datasets(surgeon, nodes_to_remove, forward=False):
for n in nodes_to_remove:
nodes = find_node_by_name(surgeon, n)
if forward:
surgeon.forward_inputs(nodes)
else:
surgeon.remove(nodes)
nodes = find_node_by_name(surgeon, n, raise_on_fail=False)
assert not nodes, "{} still found".format(n)
return surgeon
def patch_retval(retval):
"""Retval nodes have only one input, but deleting nodes may have caused them
to have more than one."""
patched_inputs = [i for i in retval.input if "Dataset" in i]
retval.input[:] = patched_inputs
def remap_dataset_names(topo_dataset_names):
remapper = dict()
remapper_counter = dict()
for n in topo_dataset_names:
if "/" in n:
base = n.split("/")[0]
else:
base = n
if base in remapper_counter:
remapper_counter[base] += 1
else:
remapper_counter[base] = 0
new_name = "{}_{}".format(base, remapper_counter[base])
remapper[n] = new_name
return remapper
def instantiate_pipeline(graphdef, element_spec, dataset_options):
# TODO(mkuchnik): Stats filename is stripped
placeholders = find_placeholders(graphdef)
assert not placeholders, \
"No placeholders can exist in graph but found {}".format(placeholders)
dataset_nodes = get_dataset_node_names(graphdef)
print("Found dataset nodes: {}".format(dataset_nodes))
retvals = find_retvals(graphdef)
assert len(retvals) == 1
graphdef = remove_extra_datasets(graphdef)
retvals = find_retvals(graphdef)
assert len(retvals) == 1
patch_retval(retvals[0])
print("Retval input: {}".format(retvals[0].input))
with open("graphdef_rewritten.txt", "w") as f:
f.write(str(graphdef))
graph_def = tf.constant(graphdef.SerializeToString())
ds = tf.data.experimental.analysis.ResumeDataset(graph_def, element_spec)
if dataset_options["take_amount"]:
ds = ds.repeat()
ds = ds.take(dataset_options["take_amount"])
ds = apply_pipeline_options(
ds,
map_and_batch_fusion=dataset_options["map_and_batch_fusion"],
stats_filename=dataset_options["stats_filename"])
return ds
def reinstantiate_pipeline(dataset, dataset_options):
"""Use python side graphdef for instantiation"""
graph_def = dataset._as_serialized_graph()
graph_def = bytes(graph_def.numpy())
graphdef = tf1.GraphDef()
graphdef.ParseFromString(graph_def)
dataset = instantiate_pipeline(graphdef, element_spec, dataset_options)
return dataset
def clear_graph():
tf.compat.v1.reset_default_graph()
tf.keras.backend.clear_session()
def span_context_to_networkx(graphdef, span_context):
"""Joins graph with events"""
G = graphdef_to_networkx(graphdef)
def time_delta(span):
return span.end_time - span.start_time
name_average = dict()
for span in span_context.spans:
if span.name in name_average:
name_average[span.name].append(time_delta(span))
else:
name_average[span.name] = [time_delta(span)]
remapper_dict = dict()
for k in name_average:
v = name_average[k]
v_pd = pd.Series(v)
mean = v_pd.mean()
std = v_pd.std()
name_average[k] = (mean, std)
remapper_dict[k] = "{}\nmean:{}\nstd:{}".format(k, mean, std)
def is_outlier(span):
if span.name in name_average:
mean, std = name_average[span.name]
if time_delta(span) > (2 * std + mean):
return True
return False
name_counter = dict()
for span in span_context.spans:
if span.name in name_counter:
count = name_counter[span.name]
else:
count = 0
name_counter[span.name] = count
outlier = is_outlier(span)
if outlier:
color = "red"
else:
color = "blue"
if count > 10 and not outlier:
continue
name = "{}_span_{}".format(span.name, count)
mean, std = name_average[span.name]
attrs = {"mean": mean,
"std": std}
G.add_node(name, color=color, **attrs)
G.add_edge(name, span.name, label=time_delta(span), color=color)
name_counter[span.name] += 1
# Apply relabeling to add means
#G = nx.relabel_nodes(G, remapper_dict, copy=True)
#isolated_nodes = nx.isolates(G)
#G.remove_nodes_from(isolated_nodes)
return G
def get_runtime_data(model, mode=DEFAULT_MODE):
CPU_Util = model.CPU_Util()
CPU_Util_clock = model.CPU_Util(calculation_mode="CPU_clock")
process_CPU_Util_clock = model.CPU_Util(calculation_mode="process_CPU_clock")
Disk_Util = model.Disk_Util()
Disk_Throughput = model.disk_throughput()
recommendation = model.recommendation()
Disk_max_rate_100mb = recommendation.disk_upper_bounds(100e6)
Disk_bytes_per_root_element = recommendation.disk_bytes_per_root_element()
max_rate = recommendation.upper_bounds(mode=mode)
max_rate_p_busy = recommendation.upper_bounds(keep_p_busy=True, mode=mode)
max_rate_convex_native, convex_theta = recommendation.LP_upper_bounds()
max_rate_convex_native_naive, _ = recommendation.LP_upper_bounds(naive=True)
max_rate_convex, _ = convex_solver.LP_upper_bounds_inner(
model)
if max_rate_convex_native != max_rate_convex:
print("Convex implementations have different rates!")
max_rate_convex_existing, convex_theta_existing = \
convex_solver.LP_upper_bounds_inner(model,
use_existing_usage=True)
cores_remaining = recommendation.remaining_CPU_cores()
total_dataset_size = model.dataset_working_set_size()
total_free_memory = model.memory_free()
iter_duration = recommendation.iterator_duration()
iter_variance = recommendation.iterator_variance()
runtime_data = {
"CPU_Util": CPU_Util,
"CPU_Util_clock": CPU_Util_clock,
"Process_CPU_Util_clock": process_CPU_Util_clock,
"Disk_Util": Disk_Util,
"Disk_Throughput": Disk_Throughput,
"Disk Bytes Per Minibatch": Disk_bytes_per_root_element,
"Total Dataset Size": total_dataset_size,
"Total Free Memory": total_free_memory,
"Estimated_Disk_Max_Rate_100MB": Disk_max_rate_100mb,
"Estimated_Max_Rate": max_rate,
"Estimated_Max_Rate_p_busy": max_rate_p_busy,
"Estimated_Max_Rate_Convex": max_rate_convex,
"Estimated_Max_Rate_Convex_Existing": max_rate_convex_existing,
"Estimated_Max_Rate_Convex_Native": max_rate_convex_native,
"Estimated_Max_Rate_Convex_Native_Naive": max_rate_convex_native_naive,
"Cores_Remaining": cores_remaining,
"Iterator_Duration": iter_duration,
"Iterator_Variance": iter_variance,
"Convex_Theta": convex_theta,
"Convex_Theta_Existing": convex_theta_existing,
}
return runtime_data
def output_shape_types_to_element_spec(output_shapes, output_types):
assert len(output_shapes.shape) == len(output_types.type), \
"output shape is len={} but type is={}".format(
len(output_shapes.shape), len(output_types.type))
element_spec = [tf.TensorSpec(shape=s, dtype=t) for s, t in
zip(output_shapes.shape, output_types.type)]
return tuple(element_spec)
def element_spec_from_graph(surgeon):
"""Infer element_spec"""
retval = surgeon.find_nodes_by_op("_Retval")
assert len(retval) == 1
retval = retval[0]
terminal_node = retval.input[0]
terminal_node = find_node_by_name(surgeon, terminal_node)
output_shapes = terminal_node.attr["output_shapes"].list
output_types = terminal_node.attr["output_types"].list
element_spec = output_shape_types_to_element_spec(output_shapes,
output_types)
return element_spec
def load_pipeline(filename, dataset_options, plot_span_ctxs=False):
plumber = tf.data.experimental.analysis.PlumberPerformanceModel(filename)
model = plumber.model()
recommendation = model.recommendation()
runtime_data = get_runtime_data(model)
graphdef = model.graphdef()
if plot_span_ctxs:
for i, span_context in enumerate(recommendation.span_contexts()):
span_G = span_context_to_networkx(graphdef, span_context)
nx.drawing.nx_pydot.write_dot(span_G, "span_{}.dot".format(i))
surgeon = graphsurgeon.StaticGraph(graphdef)
element_spec = element_spec_from_graph(surgeon)
ds = instantiate_pipeline(graphdef, element_spec, dataset_options)
return ds, runtime_data
def apply_thetas_recommendation(graphdef, thetas):
for node_name, parallelism in thetas.items():
graphdef, _ = set_node_parallelism(graphdef, node_name, parallelism)
return graphdef
def optimize_slowest_node(graphdef, slowest_node, dataset_options):
"""Dynamically dispatch to optimization routine"""
if slowest_node.op in BATCH_NODE_OPS:
dataset_options["map_and_batch_fusion"] = True
debug_string = "{}.map_and_batch_fusion=True".format(slowest_node.name)
else:
graphdef, debug_string = increase_node_parallelism(graphdef,
slowest_node)
return graphdef, dataset_options, debug_string
def ranked_nodes_to_df(ranked_nodes):
ranked_nodes_cols = ["name",
"expected_core_max_rate",
"expected_parallel_max_rate",
"observed_rate",
"p_busy",
"scheduling_delay",
"element_ratio",
"processing_time",
"CPU_time",
"aggregate_processing_time",
"aggregate_CPU_time",
"parallelism",
"aggregate_elements_produced",
"aggregate_udf_processing_time",
"aggregate_udf_processing_time_clock",
"p_udf",
"p_udf_clock",
"aggregate_avg_number_active_threads",
"aggregate_inter_op_parallelism",
"aggregate_wait_time",
"aggregate_elements_consumed",
"avg_wait_time",
"wait_time_diff",
"p_wait",
"p_scheduling",
"num_cores_used",
"cardinality",
"expected_dataset_size",
"dataset_record_ratio",
"average_bytes_per_element_produced",
"average_bytes_per_element_consumed",
"parent_name",
]
def p_udf_f(x):
return (x.node.state.aggregate_udf_processing_time
/ x.node.state.aggregate_processing_time)
def p_udf_clock_f(x):
return (x.node.state.aggregate_udf_processing_time_clock
/ x.node.state.aggregate_processing_time_clock)
ranked_nodes_data = [(x.name,
x.expected_per_core_max_rate,
x.expected_parallel_max_rate(),
x.observed_rate,
x.p_busy,
x.node.state.aggregate_scheduling_delay_time,
x.element_ratio,
x.node.state.processing_time,
x.node.state.processing_time_clock,
x.node.state.aggregate_processing_time,
x.node.state.aggregate_processing_time_clock,
x.parallelism,
x.node.state.aggregate_elements_produced,
x.node.state.aggregate_udf_processing_time,
x.node.state.aggregate_udf_processing_time_clock,
p_udf_f(x),
p_udf_clock_f(x),
x.node.state.aggregate_avg_number_active_threads,
x.node.state.aggregate_inter_op_parallelism,
x.node.state.aggregate_wait_time,
x.node.state.aggregate_elements_consumed,
x.wait_time,
x.wait_time_diff,
x.p_wait,
x.p_scheduling,
x.num_cores_used,
x.cardinality,
x.expected_dataset_size,
x.dataset_record_ratio,
x.average_bytes_per_element_produced,
x.average_bytes_per_element_consumed,
x.parent.name if x.parent else "",
)
for x in ranked_nodes]
df = pd.DataFrame(ranked_nodes_data, columns=ranked_nodes_cols)
return df
def step_pipeline(filename, dataset_options, strategy=None,
mode=DEFAULT_MODE, plot_span_ctxs=False):
plumber = tf.data.experimental.analysis.PlumberPerformanceModel(filename)
model = plumber.model()
runtime_data = get_runtime_data(model)
recommendation = model.recommendation()
num_cores = recommendation._analysis.global_state.machine_info.num_cores
print("num_cores: {}".format(num_cores))
ranked_nodes = \
recommendation.ranked_list_bottleneck_nodes_analysis(mode=mode)
if strategy is None:
slowest_node = recommendation.bottleneck_node(mode=mode)
elif strategy == "random":
# Emulate randomly permuting choice
slowest_node = random.choice(ranked_nodes)
elif strategy == "random_valid":
# Emulate randomly permuting choice
valid_ranked_nodes = [x for x in ranked_nodes if x.is_parallel_node() or
(not dataset_options["map_and_batch_fusion"] and x.op
in BATCH_NODE_OPS)]
slowest_node = random.choice(valid_ranked_nodes)
elif strategy == "random_valid_deviation":
# Emulate randomly permuting choice with recommendation removed
_slowest_node = recommendation.bottleneck_node(mode=mode)
valid_ranked_nodes = [x for x in ranked_nodes if
(x.is_parallel_node() or
(not dataset_options["map_and_batch_fusion"] and
x.op in BATCH_NODE_OPS)) and
x.name != _slowest_node.name]
print("Valid nodes {} without {}: ".format(
[x.name for x in valid_ranked_nodes],
_slowest_node.name))
slowest_node = random.choice(valid_ranked_nodes)
print("Deviation: {} -> {}".format(_slowest_node.name,
slowest_node.name))
else:
raise RuntimeError("Unknown strategy: {}".format(strategy))
df = ranked_nodes_to_df(ranked_nodes)
graphdef = model.graphdef()
if plot_span_ctxs:
for i, span_context in enumerate(recommendation.span_contexts()):
span_G = span_context_to_networkx(graphdef, span_context)
nx.drawing.nx_pydot.write_dot(span_G, "span_{}.dot".format(i))
G = graphdef_to_networkx(graphdef)
nx.drawing.nx_pydot.write_dot(G, "networkx.dot")
topo_sort = nx.topological_sort(G)
topo_sort_dataset = filter(is_dataset_node, topo_sort)
remapper = remap_dataset_names(topo_sort_dataset)
G_remapped = nx.relabel_nodes(G, remapper)
nx.drawing.nx_pydot.write_dot(G_remapped, "networkx_remapped.dot")
df["canonical_name"] = df["name"].map(lambda x: remapper[x])
with pd.option_context('display.max_rows', None,
'display.max_columns', None):
print("Ranked_nodes:\n{}".format(df))
current_parallelism = get_node_parallelism(slowest_node)
print("Current parallelism for node {} is {}".format(slowest_node.name,
current_parallelism))
with open("graphdef.txt", "w") as f:
f.write(str(graphdef))
graphdef, dataset_options, debug_string = \
optimize_slowest_node(graphdef, slowest_node, dataset_options)
surgeon = graphsurgeon.DynamicGraph(graphdef)
graphdef = surgeon.as_graph_def()
element_spec = element_spec_from_graph(surgeon)
with open("graphdef2.txt", "w") as f:
f.write(str(graphdef))
ds = instantiate_pipeline(graphdef, element_spec, dataset_options)
runtime_data.update(dataset_options)
return ds, debug_string, dataset_options, df, runtime_data
def generate_new_name(surgeon, prefix: str) -> str:
"""Generates a name without collisions using the prefix."""
i = 0
new_name = "{}_{}".format(prefix, i)
surgeon_node = find_node_by_name(surgeon, new_name, raise_on_fail=False)
while surgeon_node:
i += 1
new_name = "{}_{}".format(prefix, i)
surgeon_node = find_node_by_name(surgeon, new_name, raise_on_fail=False)
return new_name
def add_const_node(surgeon, dtype: str, value):
""" Add a constant node to graph_def.
For example:
name: "Const/_7"
op: "Const"
attr {
key: "dtype"
value {
type: DT_INT64
}
}
attr {
key: "value"
value {
tensor {
dtype: DT_INT64
tensor_shape {
}
int64_val: 3
}
}
}
"""
if dtype == "DT_INT64":
new_name = generate_new_name(surgeon, "Added_Const/")
# TODO(mkuchnik): Consider tf.make_tensor_proto or using tf.Constant
surgeon_node = graphsurgeon.create_node(new_name,
op="Const",
dtype=tf.int64,
)
surgeon_node.attr["value"].tensor.dtype = \
surgeon_node.attr["dtype"].type
surgeon_node.attr["value"].tensor.int64_val[:] = [value]
surgeon_node.attr["value"].tensor.tensor_shape.CopyFrom(
tf.TensorShape([]).as_proto())
else:
raise RuntimeError("Dtype {} is unsupported".format(dtype))
surgeon.append(surgeon_node)
return surgeon_node
def add_take_node(surgeon, input_ds_name, const_node_name, output_shapes,
output_types):
"""
node {
name: "TakeDataset/_30"
op: "TakeDataset"
input: "PrefetchDataset/_28"
input: "Const/_29"
attr {
key: "output_shapes"
value {
list {
shape {
dim {
size: 19267584
}
}
shape {
dim {
size: 128
}
}
}
}
}
attr {
key: "output_types"
value {
list {
type: DT_BFLOAT16
type: DT_INT32
}
}
}
}
"""
new_name = generate_new_name(surgeon, "Added_TakeDataset/")
new_node = graphsurgeon.create_node(new_name,
op="TakeDataset",
)
new_node.input[:] = [input_ds_name, const_node_name]
new_node.attr["output_shapes"].CopyFrom(output_shapes)
new_node.attr["output_types"].CopyFrom(output_types)
surgeon.append(new_node)
return new_node
def add_repeat_node(surgeon, input_ds_name, const_node_name, output_shapes,
output_types):
"""
node {
name: "RepeatDataset/_24"
op: "RepeatDataset"
input: "CacheDatasetV2/_22"
input: "Const/_23"
attr {
key: "output_shapes"
value {
list {
shape {
}
}
}
}
attr {
key: "output_types"
value {
list {
type: DT_STRING
}
}
}
}
"""
new_name = generate_new_name(surgeon, "Added_RepeatDataset/")
new_node = graphsurgeon.create_node(new_name,
op="RepeatDataset",
)
new_node.input[:] = [input_ds_name, const_node_name]
new_node.attr["output_shapes"].CopyFrom(output_shapes)
new_node.attr["output_types"].CopyFrom(output_types)
surgeon.append(new_node)
return new_node
def copy_node_into_graph(surgeon, surgeon_node):
# Find unique name
new_node = copy.deepcopy(surgeon_node)
new_name = generate_new_name(surgeon, "Added_{}/".format(new_node.op))
new_node.name = new_name
surgeon.append(new_node)
return new_node
def find_unreferenced_nodes(surgeon, root_node):
all_unreferenced_nodes = set(surgeon.node_map.keys())
all_unreferenced_nodes.discard(root_node.name)
def descend(node):
for i_name in node.input:
i = find_node_by_name(surgeon, i_name)
all_unreferenced_nodes.discard(i_name)
descend(i)
descend(root_node)
return all_unreferenced_nodes
def add_take_and_cache_node_after_node(surgeon, surgeon_node,
take_amount: int = 500):
"""Adds a take, cache, and repeat dataset node after the given node.
There is likely one consumer as dataset is DAG"""
take_amount_node = add_const_node(surgeon, "DT_INT64", take_amount)
output_shapes = surgeon_node.attr["output_shapes"]
output_types = surgeon_node.attr["output_types"]
take_node = add_take_node(surgeon, surgeon_node.name,
take_amount_node.name, output_shapes,
output_types)
# NOTE(mkuchnik): We don't know hashcode of memory resource, so we copy it
ds = tf.data.Dataset.from_tensor_slices([1])
ds = ds.cache()
graphdef = ds._as_serialized_graph()
graph_def = bytes(graphdef.numpy())
graphdef = tf1.GraphDef()
graphdef.ParseFromString(graph_def)
temp_surgeon = graphsurgeon.StaticGraph(graphdef)
cache_node = temp_surgeon.find_nodes_by_op("CacheDatasetV2")
assert len(cache_node) == 1
cache_node = cache_node[0]
assert len(cache_node.input) == 3
file_const_node_name = cache_node.input[1]
file_const_node = find_node_by_name(temp_surgeon, file_const_node_name)
mem_const_node_name = cache_node.input[2]
mem_const_node = find_node_by_name(temp_surgeon, mem_const_node_name)
file_const_node = copy_node_into_graph(surgeon, file_const_node)
mem_const_node = copy_node_into_graph(surgeon, mem_const_node)
cache_node.input[0] = take_node.name
cache_node.input[1] = file_const_node.name
cache_node.input[2] = mem_const_node.name
cache_node.attr["output_shapes"].CopyFrom(output_shapes)
cache_node.attr["output_types"].CopyFrom(output_types)
cache_node = copy_node_into_graph(surgeon, cache_node)
repeat_amount_node = add_const_node(surgeon, "DT_INT64", -1)
repeat_node = add_repeat_node(surgeon, cache_node.name,
repeat_amount_node.name, output_shapes,
output_types)
return surgeon
def add_retval_after_node(surgeon, surgeon_node):
# Truncate dataset
retval = surgeon.find_nodes_by_op("_Retval")
assert len(retval) == 1
retval = retval[0]
retval.input[0] = surgeon_node.name
# Remove dangling nodes
unreferenced_nodes = find_unreferenced_nodes(surgeon, retval)
print("Found unreferenced nodes: {}".format(unreferenced_nodes))
surgeon = remove_name_datasets(surgeon, unreferenced_nodes)
return surgeon
def create_benchmark_node_dataset(surgeon, node_name: str, take_amount: int):
"""Creates a dataset to test the
maximum throughput of a node by inserting caches and truncating
the dataset at that node."""
surgeon_node = find_node_by_name(surgeon, node_name)
num_input_dataset = sum([1 for i in surgeon_node.input if "Dataset" in i])
assert num_input_dataset == 1
node_input = find_node_by_name(surgeon, surgeon_node.input[0])
surgeon = add_take_and_cache_node_after_node(surgeon,
node_input,
take_amount)
surgeon = add_retval_after_node(surgeon, surgeon_node)
return surgeon
def maybe_inject_cache_optimization(surgeon, model):
# TODO(mkuchnik): Check if remainining memory is greater than 0
total_dataset_size = model.dataset_working_set_size()
total_free_memory = model.memory_free()
percentage_data_cacheable = total_free_memory / total_dataset_size
print("Percentage of data cacheable: {}".format(percentage_data_cacheable))
def benchmark_node_dataset(surgeon, node_name: str, dataset_options: dict,
bench_options: dict,
take_amount: int = 500,
prefetch_amount: int = 300):
surgeon = copy.deepcopy(surgeon)
clear_graph()
surgeon = create_benchmark_node_dataset(surgeon, node_name, take_amount)
graphdef = surgeon.as_graph_def()
element_spec = element_spec_from_graph(surgeon)
ds = instantiate_pipeline(graphdef, element_spec, dataset_options)
if prefetch_amount:
ds = ds.prefetch(prefetch_amount)
benchmark_summary = gen_util.benchmark_dataset(ds, **bench_options)
return benchmark_summary
def benchmark_all_nodes_dataset(surgeon, dataset_options: dict,
bench_options: dict,
take_amount: int = 500,
parallelism_grid = None,
record_model = True):
"""Sweeps through nodes in the dataset and runs a benchmark over their
parallelism parameter."""
G = graphdef_to_networkx(surgeon.as_graph_def())
topo_sort = nx.topological_sort(G)
topo_sort_dataset = filter(is_dataset_node, topo_sort)
remapper = remap_dataset_names(topo_sort_dataset)
if parallelism_grid is None:
parallelism_grid = range(1, 20)
bench_options["skip_first_n"] = 10
all_benchmark_summary = []
IGNORE_LIST_OPS = ["TensorSliceDataset", "ShardDataset",
"MaxIntraOpParallelismDataset",
"PrivateThreadPoolDataset",
"ModelDataset",
]
dataset_nodes = [n for n in surgeon if "Dataset"
in n.op and n.op not
in IGNORE_LIST_OPS]
# Shuffle for less bias
random.shuffle(dataset_nodes)
for node in dataset_nodes:
_surgeon = copy.deepcopy(surgeon)
# TODO(mkuchnik): Remove
if "Parallel" not in node.name:
continue
print("Benchmarking {}".format(node.name))
if record_model:
filename = "stats_node.pb"
dataset_options["stats_filename"] = filename
benchmark_summary = benchmark_node_dataset(
_surgeon, node.name, dataset_options, bench_options, take_amount)
# TODO(mkuchnik): End Remove
benchmark_summary["name"] = node.name
benchmark_summary["canonical_name"] = remapper[node.name]
all_benchmark_summary.append(benchmark_summary)
if record_model:
def try_record_model():
try:
plumber = tf.data.experimental.analysis.PlumberPerformanceModel(
filename)
model = plumber.model()
runtime_data = get_runtime_data(model)
print(runtime_data)
recommendation = model.recommendation()
ranked_nodes = recommendation.ranked_list_bottleneck_nodes_analysis()
df = ranked_nodes_to_df(ranked_nodes)
print(df)
except:
pass
try_record_model()
# TODO(mkuchnik): Remove surgeon_node
surgeon_node = find_node_by_name(_surgeon, node.name)
try:
parallelism_node = parallelism_parameter_name(surgeon_node)
except RuntimeError as ex:
print(ex)
benchmark_summary["parallelism"] = None
continue
parallelism_surgeon_node = [k for k in _surgeon if
k.name == parallelism_node]
assert len(parallelism_surgeon_node) == 1, \
"Expected to find 1 node for {}, but found {}".format(
parallelism_node, len(parallelism_surgeon_node))
parallelism_surgeon_node = parallelism_surgeon_node[0]
parallelism_tensor = parallelism_surgeon_node.attr["value"].tensor
assert len(parallelism_tensor.int64_val) == 1
parallelism_param = parallelism_tensor.int64_val[0]
benchmark_summary["parallelism"] = int(parallelism_param)
new_parallelism_surgeon_node = fork_node(_surgeon,
parallelism_surgeon_node)
i = parallelism_parameter_index(surgeon_node)
node_input = surgeon_node.input[i]
assert(node_input == parallelism_surgeon_node.name)
surgeon_node.input[i] = new_parallelism_surgeon_node.name
parallelism_tensor = new_parallelism_surgeon_node.attr["value"].tensor
if surgeon_node.op == "ParallelInterleaveDatasetV4":
# Adjust cycle length to match parallelism
i = cycle_length_parameter_index(surgeon_node)
node_input = surgeon_node.input[i]
cycle_surgeon_node = [k for k in _surgeon if
k.name == node_input]
assert len(cycle_surgeon_node) == 1
cycle_surgeon_node = cycle_surgeon_node[0]
new_cycle_surgeon_node = fork_node(_surgeon,
cycle_surgeon_node)
surgeon_node.input[i] = new_cycle_surgeon_node.name
cycle_tensor = new_cycle_surgeon_node.attr["value"].tensor
else:
cycle_tensor = None
for p in parallelism_grid:
if p != parallelism_tensor.int64_val[0]:
print("Benchmarking {} with parallelism={}".format(
node.name, p))
parallelism_tensor.int64_val[:] = [p]
if cycle_tensor:
cycle_tensor.int64_val[:] = [p]
benchmark_summary = benchmark_node_dataset(
_surgeon, node.name, dataset_options, bench_options,
take_amount)
benchmark_summary["name"] = node.name
benchmark_summary["canonical_name"] = remapper[node.name]
benchmark_summary["parallelism"] = int(p)
all_benchmark_summary.append(benchmark_summary)
if record_model:
try_record_model()
return all_benchmark_summary
def benchmark_all_nodes_dataset_from_plumber(filename, dataset_options: dict,
bench_options: dict,
take_amount: int = 500):
plumber = \
tf.data.experimental.analysis.PlumberPerformanceModel(filename)
model = plumber.model()
graphdef = model.graphdef()
surgeon = graphsurgeon.DynamicGraph(graphdef)
all_benchmark_summary = benchmark_all_nodes_dataset(
surgeon, dataset_options, bench_options)
return all_benchmark_summary
def extract_theta_from_runtime_data(runtime_data):
keys = ["Convex_Theta", "Convex_Theta_Existing"]
thetas = dict()
for k in keys:
v = runtime_data[k]
thetas[k] = v
del runtime_data[k]
return thetas
def main(_):
num_steps = FLAGS.num_steps
num_deviations = FLAGS.num_deviations # set to 1 for normal run
assert num_deviations >= 1, "num_deviations has to be at least 1"
dataset_options = {
"stats_filename": "stats_new.pb",
"map_and_batch_fusion": False,
"take_amount": None,
}
bench_options = {
"time_limit_s": FLAGS.time_limit_s,
}
if FLAGS.strategy not in STRATEGIES:
raise ValueError("time_limit_s={} not in {}".format(
FLAGS.strategy, STRATEGIES))
strategy = FLAGS.strategy
ds, runtime_data = load_pipeline("stats.pb", dataset_options)
thetas_dict = extract_theta_from_runtime_data(runtime_data)
print("Runtime_data\n{}".format(pd.Series(data=runtime_data)))
if not FLAGS.skip_baseline:
benchmark_summary = gen_util.benchmark_dataset(ds, **bench_options)
else:
benchmark_summary = {"global_minibatch_rate": None}
if not FLAGS.skip_LP_baseline:
plumber = tf.data.experimental.analysis.PlumberPerformanceModel(
"stats.pb")
model = plumber.model()
graphdef = model.graphdef()
thetas_dict = thetas_dict["Convex_Theta"]
# Round to integer parallelism
for k in thetas_dict:
thetas_dict[k] = max(int(round(thetas_dict[k])), 1)
LP_graphdef = apply_thetas_recommendation(
graphdef, thetas_dict)
surgeon = graphsurgeon.StaticGraph(LP_graphdef)
element_spec = element_spec_from_graph(surgeon)
LP_ds = instantiate_pipeline(LP_graphdef,
element_spec,
dataset_options)
LP_benchmark_summary = gen_util.benchmark_dataset(LP_ds,
**bench_options)
else:
LP_benchmark_summary = {"global_minibatch_rate": None}
rate = benchmark_summary["global_minibatch_rate"]
rates = [rate]
changes = [None]
del ds
benchmark_summary["step"] = 0
benchmark_summary["change"] = None
benchmark_summary["deviation"] = 0
benchmark_summary.update(runtime_data)
clear_graph()
# Start with original stats
if not FLAGS.rebench_baseline:
shutil.copyfile("stats.pb", "stats_new.pb")
shutil.copyfile("stats_new.pb", "stats_new_0_0.pb")
if FLAGS.sweep_nodes:
_bench_options = copy.deepcopy(bench_options)
_bench_options["profile_interval"] = None
_dataset_options = copy.deepcopy(dataset_options)
_dataset_options["stats_filename"] = None
all_benchmark_summary = benchmark_all_nodes_dataset_from_plumber(
"stats_new_0_0.pb", _dataset_options, _bench_options)
def transform_to_df(data):
df = pd.DataFrame(data=data, index=[0])
return df
all_benchmark_summary_df = [transform_to_df(s) for s in
all_benchmark_summary]
all_benchmark_summary_df = pd.concat(all_benchmark_summary_df)
all_benchmark_summary_df.reset_index(inplace=True)
all_benchmark_summary_df.to_csv("sweep_all_node_benchmark_stats.csv")
thetas_df = pd.DataFrame(data=thetas_dict, index=[0])
thetas_df["step"] = 0
thetas_df["deviation"] = 0
thetas_dfs = [thetas_df]
dfs = []
benchmark_dfs = [pd.DataFrame(benchmark_summary, index=[0])]
del benchmark_summary
for i in range(1, num_steps + 1):
# NOTE(mkuchnik): Take is already applied
_dataset_options = copy.deepcopy(dataset_options)
_new_dataset_options = None
for deviation in range(num_deviations):
curr_dataset_options = copy.deepcopy(_dataset_options)
curr_dataset_options["stats_filename"] = \
"stats_new_{}_{}.pb".format(i, deviation)
curr_strategy = strategy if not deviation else \
"random_valid_deviation"
ds, changed_node, curr_dataset_options, df, runtime_data = \
step_pipeline("stats_new_{}_0.pb".format(i - 1),
curr_dataset_options,
curr_strategy)
thetas_dict = extract_theta_from_runtime_data(runtime_data)
thetas_df = pd.DataFrame(data=thetas_dict, index=[0])
thetas_df["step"] = i
thetas_df["deviation"] = deviation
thetas_dfs.append(thetas_df)
print("Runtime_data\n{}".format(pd.Series(data=runtime_data)))
df["step"] = i
df["deviation"] = deviation
clear_graph()
try:
benchmark_summary = gen_util.benchmark_dataset(ds,
**bench_options)
except TypeError as ex:
print(ex)
break
rate = benchmark_summary["global_minibatch_rate"]
del ds
dfs.append(df)
if not deviation:
_new_dataset_options = curr_dataset_options
rates.append(rate)
changes.append(changed_node)
global_df = pd.concat(dfs)
global_df.to_csv("node_stats.csv")
global_thetas_df = pd.concat(thetas_dfs).reset_index()
global_thetas_df.to_csv("thetas.csv")
benchmark_summary["step"] = i
benchmark_summary["change"] = changed_node
benchmark_summary["deviation"] = deviation
benchmark_summary.update(runtime_data)
benchmark_df = pd.DataFrame(benchmark_summary, index=[0])
benchmark_dfs.append(benchmark_df)
global_benchmark_df = \
pd.concat(benchmark_dfs).reset_index(drop=True)
global_benchmark_df.to_csv("benchmark_stats.csv")
print("Rates:\n{}".format(pprint.pformat(rates)))
print("Changes:\n{}".format(pprint.pformat(changes)))
dataset_options = _new_dataset_options
print("Rates:\n{}".format(pprint.pformat(rates)))
print("Changes:\n{}".format(pprint.pformat(changes)))
if __name__ == '__main__':
app.run(main) | [
"michaelkuchnik@gmail.com"
] | michaelkuchnik@gmail.com |
44564d43c1934ab6625863de0fc90ab17d11dada | 9afbcb367de9bf055d531d285bc299a9ca3040fe | /next_partial.py | 6290584281b1e87c10e787a4fd4eb350d01744a1 | [] | no_license | mysqlplus163/aboutPython | a41a5bc2efd43b53d4acf96e7477e80c022cf657 | fa7c3e6f123158011d8726b28bfcd0dee02fa853 | refs/heads/master | 2020-03-21T05:06:19.949902 | 2018-03-14T16:04:54 | 2018-03-14T16:04:54 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 653 | py | #! /usr/bin/env python
# -*- coding: utf-8 -*-
# __author__ = "Q1mi"
# Email: master@liwenzhou.com
"""
Python Web编程实战(小绿书)里面学到的知识点
"""
# 1.使用next获取循环中符合条件的值
a1 = -1
for i in range(1, 10):
if not i % 4:
a1 = i
break
print(a1)
a2 = next((i for i in range(1, 10) if not i % 4), -1)
print(a2)
# 2.执行调用直到某种情况结束
"""
blocks = []
while True:
block = f.read(32)
if block == "":
break
blocks.append(block)
"""
"""
from functools import partial
blocks = []
for block in iter(partial(f.read, 32), ""):
blocks.append(block)
"""
| [
"liwenzhou7@gmail.com"
] | liwenzhou7@gmail.com |
bf4ed8e20cfbcef2a691f202e56011fc09f79742 | 444a9480bce2035565332d4d4654244c0b5cd47b | /research/nlp/mass/src/transformer/self_attention.py | 5a21c5aaf31b296c7e83ef88ba3d0b095f8fca39 | [
"Apache-2.0",
"LicenseRef-scancode-unknown-license-reference",
"LicenseRef-scancode-proprietary-license"
] | permissive | mindspore-ai/models | 7ede9c6454e77e995e674628204e1c6e76bd7b27 | eab643f51336dbf7d711f02d27e6516e5affee59 | refs/heads/master | 2023-07-20T01:49:34.614616 | 2023-07-17T11:43:18 | 2023-07-17T11:43:18 | 417,393,380 | 301 | 92 | Apache-2.0 | 2023-05-17T11:22:28 | 2021-10-15T06:38:37 | Python | UTF-8 | Python | false | false | 3,168 | py | # Copyright 2020 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Self-Attention block."""
import mindspore.common.dtype as mstype
from mindspore import nn
from .multi_head_attention import MultiHeadAttention
from .residual_conn import ResidualConnection
from .components import LayerNorm
class SelfAttention(nn.Cell):
"""
Self-Attention.
Layer norm -> Multi-Head Self-Attention -> Add & Dropout.
Args:
attn_embed_dim (int): Dimensions of attention weight, e.g. Q, K, V.
num_attn_heads (int): Attention heads number. Default: 1.
attn_dropout_prob (float): Dropout rate in attention. Default: 0.1.
initializer_range (float): Initial range.
dropout_prob (float): Dropout rate.
has_attention_mask (bool): Whether has attention mask.
compute_type (mstype): Mindspore data type. Default: mstype.float32.
Returns:
Tensor, shape (N, T, D).
"""
def __init__(self,
attn_embed_dim,
num_attn_heads,
attn_dropout_prob=0.1,
initializer_range=0.02,
dropout_prob=0.1,
has_attention_mask=True,
compute_type=mstype.float32):
super(SelfAttention, self).__init__()
self.multi_head_self_attention = MultiHeadAttention(
src_dim=attn_embed_dim,
tgt_dim=attn_embed_dim,
attn_embed_dim=attn_embed_dim,
num_attn_heads=num_attn_heads,
attention_dropout_prob=attn_dropout_prob,
initializer_range=initializer_range,
has_attention_mask=has_attention_mask,
do_return_2d_tensor=False,
compute_type=compute_type)
self.layer_norm = LayerNorm(in_channels=attn_embed_dim)
self.residual = ResidualConnection(dropout_prob=dropout_prob)
def construct(self, queries, keys, values, attention_mask):
"""
Construct self-attention block.
Layer norm -> Multi-Head Self-Attention -> Add & Dropout.
Args:
queries (Tensor): Shape (N, T, D).
keys (Tensor): Shape (N, T', D).
values (Tensor): Shape (N, T', D).
attention_mask (Tensor): Shape (N, T, T').
Returns:
Tensor, shape (N, T, D).
"""
q = self.layer_norm(queries) # (N, T, D)
attention_output = self.multi_head_self_attention(
q, keys, values, attention_mask
) # (N, T, D)
q = self.residual(attention_output, queries)
return q
| [
"chenhaozhe1@huawei.com"
] | chenhaozhe1@huawei.com |
4b92a27efd3b3a79589583a27bfec2e8f52dbb20 | 888e79392cb660be5799cc5bd25d76bcfa9e2e2c | /doctorus/doctorus/doctype/estados_de_evaluacion/estados_de_evaluacion.py | 3ce28a77294e67d9b00cbd7cdc44cba53e700881 | [
"MIT"
] | permissive | Nirchains/doctorus | 269eadee5754612c521d1c6193d5fe7bbfdb3b8a | 38d39270742dfdae6597a06713952df01a2c3e9d | refs/heads/master | 2020-03-17T07:09:30.046005 | 2019-05-08T06:51:50 | 2019-05-08T06:51:50 | 133,386,354 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 272 | py | # -*- coding: utf-8 -*-
# Copyright (c) 2019, HISPALIS DIGITAL and contributors
# For license information, please see license.txt
from __future__ import unicode_literals
import frappe
from frappe.model.document import Document
class EstadosdeEvaluacion(Document):
pass
| [
"nirchains@gmail.com"
] | nirchains@gmail.com |
7443f4fff300825d8d672d4aa3125b64f6155161 | 5254c3a7e94666264120f26c87734ad053c54541 | /Entregas/Entrega Semana N°7/vida.py | e9cdc579900b67a214dad9a18b6c5ae8437809ea | [] | no_license | ccollado7/UNSAM---Python | 425eb29a2df8777e9f892b08cc250bce9b2b0b8c | f2d0e7b3f64efa8d03f9aa4707c90e992683672d | refs/heads/master | 2023-03-21T17:42:27.210599 | 2021-03-09T13:06:45 | 2021-03-09T13:06:45 | 286,613,172 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 954 | py | # -*- coding: utf-8 -*-
"""
Created on Mon Sep 28 14:23:33 2020
@author: Claudio Collado
"""
#Ejercicio 7.1
from datetime import datetime
def segundos_nacimiento(fecha):
''''Funcion que recibe una fecha del tipó dd/mm/AAAA'
(día, mes, año con 2, 2 y 4 dígitos, separados con barras normales)
y te devuelve la cantidad de segundos que viviste'''
fecha_inicio = datetime.strptime(fecha, '%d/%m/%Y') #Fecha de Nacimiento
fecha_actual = datetime.now() #Fecha Actual
diferencia_fechas = fecha_actual - fecha_inicio #Realizo la diferencia de fechas
total_segundos = diferencia_fechas.total_seconds() #Transformo a segundos
return int(total_segundos) #Retorno la cantidad de segundos en formato entero
#Pruebas
fecha_1 = segundos_nacimiento('02/04/1985')
print(fecha_1)
fecha_2 = segundos_nacimiento('01/01/2020')
print(fecha_2)
fecha_3 = segundos_nacimiento('01/01/2010')
print(fecha_3) | [
"46108725+ccollado7@users.noreply.github.com"
] | 46108725+ccollado7@users.noreply.github.com |
f72f1f8975ef40e6fe9352bd48103f7ec16d903e | 12362aa3c315e2b72ed29193ee24e3fd7f1a57db | /LeetCode/0232-Implement Queue using Stacks/main.py | 2261e5e4e7a767227fb8f251fe8dd92dd750aff4 | [] | no_license | PRKKILLER/Algorithm_Practice | f2f4662352516965777605ccf116dd7945c4b94a | 73654b6567fdb282af84a868608929be234075c5 | refs/heads/master | 2023-07-03T23:24:15.081892 | 2021-08-09T03:55:12 | 2021-08-09T03:55:12 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,302 | py | """
Implement the following operations of a queue using stacks.
push(x) -- Push element x to the back of queue.
pop() -- Removes the element from in front of queue.
peek() -- Get the front element.
empty() -- Return whether the queue is empty.
Example:
MyQueue queue = new MyQueue();
queue.push(1);
queue.push(2);
queue.peek(); // returns 1
queue.pop(); // returns 1
queue.empty(); // returns false
"""
class MyQueue:
def __init__(self):
"""
Initialize your data structure here.
"""
self._new = []
self._old = []
def push(self, x: int) -> None:
"""
Push element x to the back of queue.
"""
self._new.append(x)
def pop(self) -> int:
"""
Removes the element from in front of queue and returns that element.
"""
self.shuffle()
return self._old.pop()
def peek(self) -> int:
"""
Get the front element.
"""
self.shuffle()
return self._old[-1]
def empty(self) -> bool:
"""
Returns whether the queue is empty.
"""
return not self._new and not self._old
def shuffle(self):
if self._old:
return
while self._new:
self._old.append(self._new.pop())
| [
"dw6000@163.com"
] | dw6000@163.com |
8bf3648d4e9800f14bf2caa630487f844621f7e0 | ec21d4397a1939ac140c22eca12491c258ed6a92 | /instances/sapl23/Products/ILSAPL/skins/sk_sapl/pysc/.svn/text-base/votacao_restaurar_situacao_inicial_pysc.py.svn-base | 3bea46fc54a0e53dd862c413918976db43459968 | [] | no_license | wpjunior/proled | dc9120eaa6067821c983b67836026602bbb3a211 | 1c81471295a831b0970085c44e66172a63c3a2b0 | refs/heads/master | 2016-08-08T11:59:09.748402 | 2012-04-17T07:37:43 | 2012-04-17T07:37:43 | 3,573,786 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 394 | ## Script (Python) "votacao_restaurar_situacao_inicial_pysc"
##bind container=container
##bind context=context
##bind namespace=
##bind script=script
##bind subpath=traverse_subpath
##parameters=cod_materia
##title=
##
try:
context.zsql.votacao_restaurar_parlamentar_zsql(cod_materia=cod_materia)
context.zsql.votacao_restaurar_zsql(cod_materia=cod_materia)
except:
pass
return 1
| [
"root@cpro5106.publiccloud.com.br"
] | root@cpro5106.publiccloud.com.br | |
80cef7f808746a77dbe20e6a66357fe24b83d06b | 6819a924ee1cff66f508e85e26f826c1f0b08267 | /feeds.py | bc0b5e9d1d4101041835ec6fa1683aea521cf0da | [
"MIT"
] | permissive | joskid/snippify | 94245539a96a0327b8f431c51598673ef951d2ba | b692a941a7a46959df9aff064b7ad056d0125484 | refs/heads/master | 2021-01-18T05:25:51.818175 | 2011-06-16T20:13:05 | 2011-06-16T20:13:17 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,079 | py | """ For now there are """
from django.contrib.syndication.feeds import Feed, FeedDoesNotExist
from django.core.exceptions import ObjectDoesNotExist
from django.contrib.auth.models import User
from taggit.models import Tag
from snippets.models import Snippet
class LatestSnippets(Feed):
""" Get latest global snippets """
title = u"Latest snippets"
link = "/snippets/"
description = "Updates on changes and additions to snippify"
def items(self):
return Snippet.objects.order_by('-created_date')[:10]
class LatestTag(Feed):
"""Get latest snippets for a specific tag"""
def get_object(self, bits):
if len(bits) != 2:
raise ObjectDoesNotExist
tag = Tag.objects.get(name=bits[0])
if tag is None:
raise FeedDoesNotExist
return tag
def title(self, obj):
return u"Latest snippets in %s" % obj.name
def link(self, obj):
if not obj:
raise FeedDoesNotExist
return '/tag/' + str(obj.name) + '/'
def description(self, obj):
return u"Updates on changes and additions to snippify in "
"%s tag" % obj.name
def items(self, obj):
return Snippet.objects.filter(tags__name__in=[obj.name]).\
order_by('-created_date')[:10]
class LatestUser(Feed):
"""Get latest snippets for a specific user"""
def get_object(self, bits):
if len(bits) != 2:
raise ObjectDoesNotExist
user = User.objects.get(username=bits[0])
if user is None:
raise FeedDoesNotExist
return user
def title(self, obj):
return "Latest snippets in %s" % obj.username
def link(self, obj):
if not obj:
raise FeedDoesNotExist
return '/account/' + str(obj.username) + '/'
def description(self, obj):
return "Updates on changes and additions to snippify.me in %s tag" % obj.username
def items(self, obj):
return Snippet.objects.filter(author=obj.id).\
order_by('-created_date')[:10]
| [
"alexandru.plugaru@gmail.com"
] | alexandru.plugaru@gmail.com |
4197891e02e3bd1e8943643b9c545370a44a5c14 | 73e7f93353ff6fa706ec644ac24d87de970b7115 | /src/keyboard_handler/__init__.py | 74f3084f3ed6eb49189a7a608a115df20629d888 | [
"MIT"
] | permissive | wafiqtaher/TheQube | 59c873bf7554088a8d436b58c2f0b6e72e6660d9 | fcfd8a68b15948e0740642d635db24adef8cc314 | refs/heads/master | 2022-04-07T10:52:50.469554 | 2020-01-27T14:07:33 | 2020-01-27T14:07:33 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 181 | py | # -*- coding: utf-8 -*-
from main import KeyboardHandler, KeyboardHandlerError
__all__ = ["KeyboardHandler", "KeyboardHandlerError", "WXKeyboardHandler", "WXPanelKeyboardHandler"]
| [
"andre@oire.org"
] | andre@oire.org |
07ddf970dc79e9698f5e51a218ff3857d2095bd4 | ca7aa979e7059467e158830b76673f5b77a0f5a3 | /Python_codes/p03186/s117979946.py | 86c1d20329340bc1f926ab78103e45762038a3ec | [] | no_license | Aasthaengg/IBMdataset | 7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901 | f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8 | refs/heads/main | 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 113 | py | a,b,c=map(int,input().split())
if c<=b:
print(c+b)
else:
if c-b<=a:
print(c+b)
else:
print(a+b+b+1) | [
"66529651+Aastha2104@users.noreply.github.com"
] | 66529651+Aastha2104@users.noreply.github.com |
4f3ccf728dfe75aae7e2984b5f59f776001750f0 | 53fab060fa262e5d5026e0807d93c75fb81e67b9 | /backup/user_372/ch22_2020_03_31_23_32_56_982191.py | ce25e75a074e8b3a8f3aa789366088cc04c78302 | [] | no_license | gabriellaec/desoft-analise-exercicios | b77c6999424c5ce7e44086a12589a0ad43d6adca | 01940ab0897aa6005764fc220b900e4d6161d36b | refs/heads/main | 2023-01-31T17:19:42.050628 | 2020-12-16T05:21:31 | 2020-12-16T05:21:31 | 306,735,108 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 133 | py | cigarros = int(input('Quantos cigarros você fuma por dia? ')
anos = int(input('Há quantos anos você fuma? ')
a=(365*cigarros)*anos | [
"you@example.com"
] | you@example.com |
90143fc733d7a98aaff3c395f14752f60520d6cf | f576f0ea3725d54bd2551883901b25b863fe6688 | /sdk/authorization/azure-mgmt-authorization/azure/mgmt/authorization/v2021_07_01_preview/aio/operations/_access_review_instances_assigned_for_my_approval_operations.py | 84b325b2e4ec07044f743f0ce2f9663982f87889 | [
"MIT",
"LicenseRef-scancode-generic-cla",
"LGPL-2.1-or-later"
] | permissive | Azure/azure-sdk-for-python | 02e3838e53a33d8ba27e9bcc22bd84e790e4ca7c | c2ca191e736bb06bfbbbc9493e8325763ba990bb | refs/heads/main | 2023-09-06T09:30:13.135012 | 2023-09-06T01:08:06 | 2023-09-06T01:08:06 | 4,127,088 | 4,046 | 2,755 | MIT | 2023-09-14T21:48:49 | 2012-04-24T16:46:12 | Python | UTF-8 | Python | false | false | 9,720 | py | # pylint: disable=too-many-lines
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import Any, AsyncIterable, Callable, Dict, Optional, TypeVar
import urllib.parse
from azure.core.async_paging import AsyncItemPaged, AsyncList
from azure.core.exceptions import (
ClientAuthenticationError,
HttpResponseError,
ResourceExistsError,
ResourceNotFoundError,
ResourceNotModifiedError,
map_error,
)
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import AsyncHttpResponse
from azure.core.rest import HttpRequest
from azure.core.tracing.decorator import distributed_trace
from azure.core.tracing.decorator_async import distributed_trace_async
from azure.core.utils import case_insensitive_dict
from azure.mgmt.core.exceptions import ARMErrorFormat
from ... import models as _models
from ..._vendor import _convert_request
from ...operations._access_review_instances_assigned_for_my_approval_operations import (
build_get_by_id_request,
build_list_request,
)
T = TypeVar("T")
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]]
class AccessReviewInstancesAssignedForMyApprovalOperations:
"""
.. warning::
**DO NOT** instantiate this class directly.
Instead, you should access the following operations through
:class:`~azure.mgmt.authorization.v2021_07_01_preview.aio.AuthorizationManagementClient`'s
:attr:`access_review_instances_assigned_for_my_approval` attribute.
"""
models = _models
def __init__(self, *args, **kwargs) -> None:
input_args = list(args)
self._client = input_args.pop(0) if input_args else kwargs.pop("client")
self._config = input_args.pop(0) if input_args else kwargs.pop("config")
self._serialize = input_args.pop(0) if input_args else kwargs.pop("serializer")
self._deserialize = input_args.pop(0) if input_args else kwargs.pop("deserializer")
self._api_version = input_args.pop(0) if input_args else kwargs.pop("api_version")
@distributed_trace
def list(
self, schedule_definition_id: str, filter: Optional[str] = None, **kwargs: Any
) -> AsyncIterable["_models.AccessReviewInstance"]:
"""Get access review instances assigned for my approval.
:param schedule_definition_id: The id of the access review schedule definition. Required.
:type schedule_definition_id: str
:param filter: The filter to apply on the operation. Other than standard filters, one custom
filter option is supported : 'assignedToMeToReview()'. When one specified
$filter=assignedToMeToReview(), only items that are assigned to the calling user to review are
returned. Default value is None.
:type filter: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either AccessReviewInstance or the result of
cls(response)
:rtype:
~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.authorization.v2021_07_01_preview.models.AccessReviewInstance]
:raises ~azure.core.exceptions.HttpResponseError:
"""
_headers = kwargs.pop("headers", {}) or {}
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version: str = kwargs.pop(
"api_version", _params.pop("api-version", self._api_version or "2021-07-01-preview")
)
cls: ClsType[_models.AccessReviewInstanceListResult] = kwargs.pop("cls", None)
error_map = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
304: ResourceNotModifiedError,
}
error_map.update(kwargs.pop("error_map", {}) or {})
def prepare_request(next_link=None):
if not next_link:
request = build_list_request(
schedule_definition_id=schedule_definition_id,
filter=filter,
api_version=api_version,
template_url=self.list.metadata["url"],
headers=_headers,
params=_params,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
else:
# make call to next link with the client's api-version
_parsed_next_link = urllib.parse.urlparse(next_link)
_next_request_params = case_insensitive_dict(
{
key: [urllib.parse.quote(v) for v in value]
for key, value in urllib.parse.parse_qs(_parsed_next_link.query).items()
}
)
_next_request_params["api-version"] = self._config.api_version
request = HttpRequest(
"GET", urllib.parse.urljoin(next_link, _parsed_next_link.path), params=_next_request_params
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
request.method = "GET"
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize("AccessReviewInstanceListResult", pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem) # type: ignore
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
_stream = False
pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access
request, stream=_stream, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorDefinition, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(get_next, extract_data)
list.metadata = {
"url": "/providers/Microsoft.Authorization/accessReviewScheduleDefinitions/{scheduleDefinitionId}/instances"
}
@distributed_trace_async
async def get_by_id(self, schedule_definition_id: str, id: str, **kwargs: Any) -> _models.AccessReviewInstance:
"""Get single access review instance assigned for my approval.
:param schedule_definition_id: The id of the access review schedule definition. Required.
:type schedule_definition_id: str
:param id: The id of the access review instance. Required.
:type id: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: AccessReviewInstance or the result of cls(response)
:rtype: ~azure.mgmt.authorization.v2021_07_01_preview.models.AccessReviewInstance
:raises ~azure.core.exceptions.HttpResponseError:
"""
error_map = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
304: ResourceNotModifiedError,
}
error_map.update(kwargs.pop("error_map", {}) or {})
_headers = kwargs.pop("headers", {}) or {}
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version: str = kwargs.pop(
"api_version", _params.pop("api-version", self._api_version or "2021-07-01-preview")
)
cls: ClsType[_models.AccessReviewInstance] = kwargs.pop("cls", None)
request = build_get_by_id_request(
schedule_definition_id=schedule_definition_id,
id=id,
api_version=api_version,
template_url=self.get_by_id.metadata["url"],
headers=_headers,
params=_params,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
_stream = False
pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access
request, stream=_stream, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorDefinition, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
deserialized = self._deserialize("AccessReviewInstance", pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get_by_id.metadata = {
"url": "/providers/Microsoft.Authorization/accessReviewScheduleDefinitions/{scheduleDefinitionId}/instances/{id}"
}
| [
"noreply@github.com"
] | Azure.noreply@github.com |
b02c815984671705a915af7da9c06659b7232120 | 48d232cc6dcf57abf6fca9cbbef8943e189acb04 | /cake-thief.py | 4544306cd668b4d93270e1585e8aca5e3bb9d888 | [] | no_license | csusb-005411285/CodeBreakersCode | dae796ba4262770e0a568e9c27597a041db0775c | 8f218164e1b9e42c1a928d22ef5a76328abb66a2 | refs/heads/master | 2022-01-12T09:11:33.668338 | 2021-12-27T04:45:13 | 2021-12-27T04:45:13 | 232,490,141 | 1 | 1 | null | 2021-01-29T23:09:14 | 2020-01-08T06:02:11 | Python | UTF-8 | Python | false | false | 789 | py | def max_duffel_bag_value(cake_tuples, weight_capacity):
if weight_capacity == 0:
return 0
cache = {}
for i in range(weight_capacity + 1):
cache[i] = 0
for weight in range(weight_capacity + 1): # 4
max_value_at_weight = 0
for cake in cake_tuples: # (2, 1)
max_value_cake = 0
if cake[0] == 0 and cake[1] != 0:
return float('inf')
if cake[0] <= weight: # 2 <= 4
max_value_cake = cake[1] # 1
remaining_weight = weight - cake[0] # 2
max_value_at_weight = max(max_value_at_weight, max_value_cake + cache[remaining_weight]) # 2
cache[weight] = max_value_at_weight # {0: 0, 1: 0, 2: 1, 3: 1, 4: 2}
return cache[weight_capacity]
| [
"noreply@github.com"
] | csusb-005411285.noreply@github.com |
1d90f7404d9cf3abc78760441a556f49047fbccc | b0eddf070767a08ea41a474eb424c51b81b856a9 | /week-5/monday/character.py | e7a02b28a926a8b461f86f3774307c3cced10f91 | [] | no_license | green-fox-academy/florimaros | fdd645c8ed6620e2b5021a2feca056049438a951 | 39741ea40e18441877c61a7fdf20b832cccf247a | refs/heads/master | 2021-05-30T14:35:57.456067 | 2016-02-25T19:53:44 | 2016-02-25T19:53:44 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 421 | py | class Character():
def __init__(self, name, hp, damage):
self.name = name
self.hp = hp
self.damage = damage
def drink_potion(self):
self.hp += 10
def strike(self, opponent):
opponent.hp -= self.damage
def get_status(self):
life_status = "dead"
if self.hp > 0:
life_status = "HP: " + str(self.hp)
return self.name + life_status
| [
"flori.maros.adw@gmail.com"
] | flori.maros.adw@gmail.com |
55366f7b56526ec007a05e15bef779b09f10e3bd | c175c4e3560c6c66ec2b0c4b439cd586878b44a5 | /prplatform/submissions/migrations/0016_answer_uploaded_file.py | aede969729e418c53d85a3ea901af39719f4650a | [
"MIT"
] | permissive | piehei/prplatform | fd30e2e388597583b9ef0e59462ea9643f7244ba | f3248b66019f207bb06a4681a62057e175408b3e | refs/heads/master | 2020-03-09T17:09:47.893706 | 2019-09-18T15:24:58 | 2019-09-18T15:24:58 | 128,902,940 | 3 | 0 | null | null | null | null | UTF-8 | Python | false | false | 487 | py | # Generated by Django 2.1.2 on 2018-11-13 13:12
from django.db import migrations, models
import prplatform.submissions.models
class Migration(migrations.Migration):
dependencies = [
('submissions', '0015_auto_20181109_1251'),
]
operations = [
migrations.AddField(
model_name='answer',
name='uploaded_file',
field=models.FileField(blank=True, upload_to=prplatform.submissions.models.answer_upload_fp),
),
]
| [
"ph@extreg.com"
] | ph@extreg.com |
966ea3d3546ffdf38dbbf81b30f804d781107f46 | 16076240644897ad0529a4cb7543e19dd5fc539a | /etl.py | ebf228080aabfd1519962b4734fac2b7a61292a0 | [] | no_license | MZ195/DEND-Data-Warehouse_AWS | a2eef32a6d372cd556868549c9422316dfeb2be0 | 47f96f91444a15e81386a9a4698dd5263fdb9f6e | refs/heads/master | 2022-10-14T18:12:34.309750 | 2020-06-10T09:33:30 | 2020-06-10T09:33:30 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,415 | py | import configparser
import psycopg2
from logging import getLogger
from sql_queries import copy_table_queries, insert_table_queries
log = getLogger(__name__)
def load_staging_tables(cur, conn):
"""Loading the data from S3 buckets into the staging tables of Redshift
Keyword arguments:
cur -- the curser of the database
conn -- the connection to the database
"""
log.info("Loading staging tables...")
print("Loading staging tables...\n")
for query in copy_table_queries:
cur.execute(query)
conn.commit()
def insert_tables(cur, conn):
"""inserting the data into the facts and dimensional tables
Keyword arguments:
cur -- the curser of the database
conn -- the connection to the database
"""
log.info("inserting into dimensional and facts tables...")
print("inserting into dimensional and facts tables...\n")
for query in insert_table_queries:
cur.execute(query)
conn.commit()
def main():
config = configparser.ConfigParser()
config.read('dwh.cfg')
conn = psycopg2.connect("host={} dbname={} user={} password={} port={}".format(*config['CLUSTER'].values()))
cur = conn.cursor()
log.info("Connection established")
print("Connection established\n")
load_staging_tables(cur, conn)
insert_tables(cur, conn)
conn.close()
if __name__ == "__main__":
main() | [
"40984264+MZ195@users.noreply.github.com"
] | 40984264+MZ195@users.noreply.github.com |
7387fc2c2fcc75e354b688ee8bb357bc9a6e723c | 4c0328c7fa7805cdd196cf890695ec1a8a438a5f | /devel/lib/python2.7/dist-packages/pal_navigation_msgs/msg/_JoyTurboActionFeedback.py | cc6cd8e2ac73cf99bf0acb00754793a923898078 | [] | no_license | SebsBarbas/iRob_KTH | d98dfce8692bdd4d32ce3a4d72daa8d022976c0a | c164c9d12efcab56b4871fc5bb5538df5849a42e | refs/heads/main | 2023-01-23T10:49:59.073113 | 2020-12-04T18:49:53 | 2020-12-04T18:49:53 | 318,595,723 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 12,040 | py | # This Python file uses the following encoding: utf-8
"""autogenerated by genpy from pal_navigation_msgs/JoyTurboActionFeedback.msg. Do not edit."""
import sys
python3 = True if sys.hexversion > 0x03000000 else False
import genpy
import struct
import pal_navigation_msgs.msg
import genpy
import actionlib_msgs.msg
import std_msgs.msg
class JoyTurboActionFeedback(genpy.Message):
_md5sum = "aae20e09065c3809e8a8e87c4c8953fd"
_type = "pal_navigation_msgs/JoyTurboActionFeedback"
_has_header = True #flag to mark the presence of a Header object
_full_text = """# ====== DO NOT MODIFY! AUTOGENERATED FROM AN ACTION DEFINITION ======
Header header
actionlib_msgs/GoalStatus status
JoyTurboFeedback feedback
================================================================================
MSG: std_msgs/Header
# Standard metadata for higher-level stamped data types.
# This is generally used to communicate timestamped data
# in a particular coordinate frame.
#
# sequence ID: consecutively increasing ID
uint32 seq
#Two-integer timestamp that is expressed as:
# * stamp.sec: seconds (stamp_secs) since epoch (in Python the variable is called 'secs')
# * stamp.nsec: nanoseconds since stamp_secs (in Python the variable is called 'nsecs')
# time-handling sugar is provided by the client library
time stamp
#Frame this data is associated with
string frame_id
================================================================================
MSG: actionlib_msgs/GoalStatus
GoalID goal_id
uint8 status
uint8 PENDING = 0 # The goal has yet to be processed by the action server
uint8 ACTIVE = 1 # The goal is currently being processed by the action server
uint8 PREEMPTED = 2 # The goal received a cancel request after it started executing
# and has since completed its execution (Terminal State)
uint8 SUCCEEDED = 3 # The goal was achieved successfully by the action server (Terminal State)
uint8 ABORTED = 4 # The goal was aborted during execution by the action server due
# to some failure (Terminal State)
uint8 REJECTED = 5 # The goal was rejected by the action server without being processed,
# because the goal was unattainable or invalid (Terminal State)
uint8 PREEMPTING = 6 # The goal received a cancel request after it started executing
# and has not yet completed execution
uint8 RECALLING = 7 # The goal received a cancel request before it started executing,
# but the action server has not yet confirmed that the goal is canceled
uint8 RECALLED = 8 # The goal received a cancel request before it started executing
# and was successfully cancelled (Terminal State)
uint8 LOST = 9 # An action client can determine that a goal is LOST. This should not be
# sent over the wire by an action server
#Allow for the user to associate a string with GoalStatus for debugging
string text
================================================================================
MSG: actionlib_msgs/GoalID
# The stamp should store the time at which this goal was requested.
# It is used by an action server when it tries to preempt all
# goals that were requested before a certain time
time stamp
# The id provides a way to associate feedback and
# result message with specific goal requests. The id
# specified must be unique.
string id
================================================================================
MSG: pal_navigation_msgs/JoyTurboFeedback
# ====== DO NOT MODIFY! AUTOGENERATED FROM AN ACTION DEFINITION ======
"""
__slots__ = ['header','status','feedback']
_slot_types = ['std_msgs/Header','actionlib_msgs/GoalStatus','pal_navigation_msgs/JoyTurboFeedback']
def __init__(self, *args, **kwds):
"""
Constructor. Any message fields that are implicitly/explicitly
set to None will be assigned a default value. The recommend
use is keyword arguments as this is more robust to future message
changes. You cannot mix in-order arguments and keyword arguments.
The available fields are:
header,status,feedback
:param args: complete set of field values, in .msg order
:param kwds: use keyword arguments corresponding to message field names
to set specific fields.
"""
if args or kwds:
super(JoyTurboActionFeedback, self).__init__(*args, **kwds)
#message fields cannot be None, assign default values for those that are
if self.header is None:
self.header = std_msgs.msg.Header()
if self.status is None:
self.status = actionlib_msgs.msg.GoalStatus()
if self.feedback is None:
self.feedback = pal_navigation_msgs.msg.JoyTurboFeedback()
else:
self.header = std_msgs.msg.Header()
self.status = actionlib_msgs.msg.GoalStatus()
self.feedback = pal_navigation_msgs.msg.JoyTurboFeedback()
def _get_types(self):
"""
internal API method
"""
return self._slot_types
def serialize(self, buff):
"""
serialize message into buffer
:param buff: buffer, ``StringIO``
"""
try:
_x = self
buff.write(_get_struct_3I().pack(_x.header.seq, _x.header.stamp.secs, _x.header.stamp.nsecs))
_x = self.header.frame_id
length = len(_x)
if python3 or type(_x) == unicode:
_x = _x.encode('utf-8')
length = len(_x)
buff.write(struct.pack('<I%ss'%length, length, _x))
_x = self
buff.write(_get_struct_2I().pack(_x.status.goal_id.stamp.secs, _x.status.goal_id.stamp.nsecs))
_x = self.status.goal_id.id
length = len(_x)
if python3 or type(_x) == unicode:
_x = _x.encode('utf-8')
length = len(_x)
buff.write(struct.pack('<I%ss'%length, length, _x))
buff.write(_get_struct_B().pack(self.status.status))
_x = self.status.text
length = len(_x)
if python3 or type(_x) == unicode:
_x = _x.encode('utf-8')
length = len(_x)
buff.write(struct.pack('<I%ss'%length, length, _x))
except struct.error as se: self._check_types(struct.error("%s: '%s' when writing '%s'" % (type(se), str(se), str(locals().get('_x', self)))))
except TypeError as te: self._check_types(ValueError("%s: '%s' when writing '%s'" % (type(te), str(te), str(locals().get('_x', self)))))
def deserialize(self, str):
"""
unpack serialized message in str into this message instance
:param str: byte array of serialized message, ``str``
"""
try:
if self.header is None:
self.header = std_msgs.msg.Header()
if self.status is None:
self.status = actionlib_msgs.msg.GoalStatus()
if self.feedback is None:
self.feedback = pal_navigation_msgs.msg.JoyTurboFeedback()
end = 0
_x = self
start = end
end += 12
(_x.header.seq, _x.header.stamp.secs, _x.header.stamp.nsecs,) = _get_struct_3I().unpack(str[start:end])
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
start = end
end += length
if python3:
self.header.frame_id = str[start:end].decode('utf-8')
else:
self.header.frame_id = str[start:end]
_x = self
start = end
end += 8
(_x.status.goal_id.stamp.secs, _x.status.goal_id.stamp.nsecs,) = _get_struct_2I().unpack(str[start:end])
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
start = end
end += length
if python3:
self.status.goal_id.id = str[start:end].decode('utf-8')
else:
self.status.goal_id.id = str[start:end]
start = end
end += 1
(self.status.status,) = _get_struct_B().unpack(str[start:end])
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
start = end
end += length
if python3:
self.status.text = str[start:end].decode('utf-8')
else:
self.status.text = str[start:end]
return self
except struct.error as e:
raise genpy.DeserializationError(e) #most likely buffer underfill
def serialize_numpy(self, buff, numpy):
"""
serialize message with numpy array types into buffer
:param buff: buffer, ``StringIO``
:param numpy: numpy python module
"""
try:
_x = self
buff.write(_get_struct_3I().pack(_x.header.seq, _x.header.stamp.secs, _x.header.stamp.nsecs))
_x = self.header.frame_id
length = len(_x)
if python3 or type(_x) == unicode:
_x = _x.encode('utf-8')
length = len(_x)
buff.write(struct.pack('<I%ss'%length, length, _x))
_x = self
buff.write(_get_struct_2I().pack(_x.status.goal_id.stamp.secs, _x.status.goal_id.stamp.nsecs))
_x = self.status.goal_id.id
length = len(_x)
if python3 or type(_x) == unicode:
_x = _x.encode('utf-8')
length = len(_x)
buff.write(struct.pack('<I%ss'%length, length, _x))
buff.write(_get_struct_B().pack(self.status.status))
_x = self.status.text
length = len(_x)
if python3 or type(_x) == unicode:
_x = _x.encode('utf-8')
length = len(_x)
buff.write(struct.pack('<I%ss'%length, length, _x))
except struct.error as se: self._check_types(struct.error("%s: '%s' when writing '%s'" % (type(se), str(se), str(locals().get('_x', self)))))
except TypeError as te: self._check_types(ValueError("%s: '%s' when writing '%s'" % (type(te), str(te), str(locals().get('_x', self)))))
def deserialize_numpy(self, str, numpy):
"""
unpack serialized message in str into this message instance using numpy for array types
:param str: byte array of serialized message, ``str``
:param numpy: numpy python module
"""
try:
if self.header is None:
self.header = std_msgs.msg.Header()
if self.status is None:
self.status = actionlib_msgs.msg.GoalStatus()
if self.feedback is None:
self.feedback = pal_navigation_msgs.msg.JoyTurboFeedback()
end = 0
_x = self
start = end
end += 12
(_x.header.seq, _x.header.stamp.secs, _x.header.stamp.nsecs,) = _get_struct_3I().unpack(str[start:end])
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
start = end
end += length
if python3:
self.header.frame_id = str[start:end].decode('utf-8')
else:
self.header.frame_id = str[start:end]
_x = self
start = end
end += 8
(_x.status.goal_id.stamp.secs, _x.status.goal_id.stamp.nsecs,) = _get_struct_2I().unpack(str[start:end])
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
start = end
end += length
if python3:
self.status.goal_id.id = str[start:end].decode('utf-8')
else:
self.status.goal_id.id = str[start:end]
start = end
end += 1
(self.status.status,) = _get_struct_B().unpack(str[start:end])
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
start = end
end += length
if python3:
self.status.text = str[start:end].decode('utf-8')
else:
self.status.text = str[start:end]
return self
except struct.error as e:
raise genpy.DeserializationError(e) #most likely buffer underfill
_struct_I = genpy.struct_I
def _get_struct_I():
global _struct_I
return _struct_I
_struct_3I = None
def _get_struct_3I():
global _struct_3I
if _struct_3I is None:
_struct_3I = struct.Struct("<3I")
return _struct_3I
_struct_B = None
def _get_struct_B():
global _struct_B
if _struct_B is None:
_struct_B = struct.Struct("<B")
return _struct_B
_struct_2I = None
def _get_struct_2I():
global _struct_2I
if _struct_2I is None:
_struct_2I = struct.Struct("<2I")
return _struct_2I
| [
"ssbl@kth.se"
] | ssbl@kth.se |
be99e2028248897425d63985f3a164926c163f06 | d41d18d3ea6edd2ec478b500386375a8693f1392 | /plotly/validators/scatterpolargl/__init__.py | ca20fdc223e891433052b3060c7dc3fe90e4a0e9 | [
"MIT"
] | permissive | miladrux/plotly.py | 38921dd6618650d03be9891d6078e771ffccc99a | dbb79e43e2cc6c5762251537d24bad1dab930fff | refs/heads/master | 2020-03-27T01:46:57.497871 | 2018-08-20T22:37:38 | 2018-08-20T22:37:38 | 145,742,203 | 1 | 0 | MIT | 2018-08-22T17:37:07 | 2018-08-22T17:37:07 | null | UTF-8 | Python | false | false | 1,266 | py | from ._visible import VisibleValidator
from ._unselected import UnselectedValidator
from ._uid import UidValidator
from ._thetaunit import ThetaunitValidator
from ._thetasrc import ThetasrcValidator
from ._theta import ThetaValidator
from ._textsrc import TextsrcValidator
from ._text import TextValidator
from ._subplot import SubplotValidator
from ._stream import StreamValidator
from ._showlegend import ShowlegendValidator
from ._selectedpoints import SelectedpointsValidator
from ._selected import SelectedValidator
from ._rsrc import RsrcValidator
from ._r import RValidator
from ._opacity import OpacityValidator
from ._name import NameValidator
from ._mode import ModeValidator
from ._marker import MarkerValidator
from ._line import LineValidator
from ._legendgroup import LegendgroupValidator
from ._idssrc import IdssrcValidator
from ._ids import IdsValidator
from ._hoveron import HoveronValidator
from ._hoverlabel import HoverlabelValidator
from ._hoverinfosrc import HoverinfosrcValidator
from ._hoverinfo import HoverinfoValidator
from ._fillcolor import FillcolorValidator
from ._fill import FillValidator
from ._customdatasrc import CustomdatasrcValidator
from ._customdata import CustomdataValidator
from ._connectgaps import ConnectgapsValidator
| [
"adam.kulidjian@gmail.com"
] | adam.kulidjian@gmail.com |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.