hexsha stringlengths 40 40 | size int64 5 2.06M | ext stringclasses 10 values | lang stringclasses 1 value | max_stars_repo_path stringlengths 3 248 | max_stars_repo_name stringlengths 5 125 | max_stars_repo_head_hexsha stringlengths 40 78 | max_stars_repo_licenses listlengths 1 10 | max_stars_count int64 1 191k ⌀ | max_stars_repo_stars_event_min_datetime stringlengths 24 24 ⌀ | max_stars_repo_stars_event_max_datetime stringlengths 24 24 ⌀ | max_issues_repo_path stringlengths 3 248 | max_issues_repo_name stringlengths 5 125 | max_issues_repo_head_hexsha stringlengths 40 78 | max_issues_repo_licenses listlengths 1 10 | max_issues_count int64 1 67k ⌀ | max_issues_repo_issues_event_min_datetime stringlengths 24 24 ⌀ | max_issues_repo_issues_event_max_datetime stringlengths 24 24 ⌀ | max_forks_repo_path stringlengths 3 248 | max_forks_repo_name stringlengths 5 125 | max_forks_repo_head_hexsha stringlengths 40 78 | max_forks_repo_licenses listlengths 1 10 | max_forks_count int64 1 105k ⌀ | max_forks_repo_forks_event_min_datetime stringlengths 24 24 ⌀ | max_forks_repo_forks_event_max_datetime stringlengths 24 24 ⌀ | content stringlengths 5 2.06M | avg_line_length float64 1 1.02M | max_line_length int64 3 1.03M | alphanum_fraction float64 0 1 | count_classes int64 0 1.6M | score_classes float64 0 1 | count_generators int64 0 651k | score_generators float64 0 1 | count_decorators int64 0 990k | score_decorators float64 0 1 | count_async_functions int64 0 235k | score_async_functions float64 0 1 | count_documentation int64 0 1.04M | score_documentation float64 0 1 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
76cb577af9c67979a6febdbd56b344a5e668c7c4 | 1,086 | py | Python | data_operate_util.py | zs856/Second-hand-housing-guide-price-Shenzhen. | a4cbf0b3bc693d80d0ba64286ce9aeb0737c1698 | [
"MIT"
] | 2 | 2021-02-13T13:20:05.000Z | 2021-03-24T09:45:24.000Z | data_operate_util.py | zs856/Second-hand-housing-guide-price-Shenzhen. | a4cbf0b3bc693d80d0ba64286ce9aeb0737c1698 | [
"MIT"
] | null | null | null | data_operate_util.py | zs856/Second-hand-housing-guide-price-Shenzhen. | a4cbf0b3bc693d80d0ba64286ce9aeb0737c1698 | [
"MIT"
] | null | null | null | import re
import pandas as pd
from constant import shenzhen_data_csv_path
def shape_data(header, body):
"""
该方法用于从pdf获取数据的时候塑造dataframe形式的数据
:param header:
:param body:
:return:
"""
pd.set_option('display.max_rows', None)
df = pd.DataFrame(body)
df.columns = header
return df
def to_csv(dataframe):
'''
该方法用于保存从PDF提取的数据到csv
:param dataframe:
:return:
'''
print("开始写入数据到csv文件")
dataframe.to_csv(shenzhen_data_csv_path, index=False)
def read_data_from_csv(path):
"""
该方法用于从csv文件中获取数据
:param path:
:return:
"""
pd.set_option('display.max_rows', None)
return pd.read_csv(path)
def fuzzy_finder(user_input, collection):
"""
该方法参考于https://www.cnblogs.com/weiman3389/p/6047017.html
:param user_input:
:param collection:
:return:
"""
suggestions = []
pattern = '.*'.join(user_input)
regex = re.compile(pattern)
for item in collection:
match = regex.search(item)
if match:
suggestions.append(item)
return suggestions
| 19.392857 | 59 | 0.643646 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 538 | 0.444628 |
76cc225335a2213744e19bb14468fe461b9bf959 | 179 | py | Python | magi/agents/sac_ae/__init__.py | ethanluoyc/magi | 2ef2ba60989a55ccf8c90ba74c8e712fe301e2fa | [
"Apache-2.0"
] | 86 | 2021-11-24T21:53:29.000Z | 2022-03-27T13:35:45.000Z | magi/agents/sac_ae/__init__.py | ethanluoyc/magi | 2ef2ba60989a55ccf8c90ba74c8e712fe301e2fa | [
"Apache-2.0"
] | 7 | 2021-11-26T17:23:29.000Z | 2022-03-07T21:49:44.000Z | magi/agents/sac_ae/__init__.py | ethanluoyc/magi | 2ef2ba60989a55ccf8c90ba74c8e712fe301e2fa | [
"Apache-2.0"
] | 3 | 2021-11-27T11:13:18.000Z | 2022-01-24T14:38:53.000Z | """SAC-AE agent."""
from magi.agents.sac_ae.agent import SACAEAgent
from magi.agents.sac_ae.agent import SACAEConfig
from magi.agents.sac_ae.networks import make_default_networks
| 35.8 | 61 | 0.826816 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 19 | 0.106145 |
76ce8679092b6c88a9cd642b89726a44b635faef | 86 | py | Python | test/lib-clay/externals/abi/newtypes/run.py | jb55/clay | db0bd2702ab0b6e48965cd85f8859bbd5f60e48e | [
"BSD-2-Clause"
] | 185 | 2015-01-04T09:33:17.000Z | 2022-03-18T04:53:38.000Z | test/externals/abi/complex/run.py | aep/clay | 92224d71c9d64a32d70a289593c13da7f970ec35 | [
"BSD-2-Clause"
] | 7 | 2015-03-22T06:13:42.000Z | 2015-12-28T19:07:24.000Z | test/externals/abi/complex/run.py | aep/clay | 92224d71c9d64a32d70a289593c13da7f970ec35 | [
"BSD-2-Clause"
] | 20 | 2015-01-09T22:24:46.000Z | 2022-03-18T04:54:59.000Z | import sys
sys.path.append('..')
import external_test
external_test.runExternalTest()
| 17.2 | 31 | 0.802326 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 4 | 0.046512 |
76cf4f621c8a4523513d863e1b41164e09239127 | 34,495 | py | Python | luna/gateware/debug/ila.py | macdaliot/luna | 97e725d0af001a6d4c4811eefb43b6c31a9b45e1 | [
"BSD-3-Clause"
] | 1 | 2021-02-28T04:35:15.000Z | 2021-02-28T04:35:15.000Z | luna/gateware/debug/ila.py | macdaliot/luna | 97e725d0af001a6d4c4811eefb43b6c31a9b45e1 | [
"BSD-3-Clause"
] | null | null | null | luna/gateware/debug/ila.py | macdaliot/luna | 97e725d0af001a6d4c4811eefb43b6c31a9b45e1 | [
"BSD-3-Clause"
] | null | null | null | #
# This file is part of LUNA.
#
# Copyright (c) 2020 Great Scott Gadgets <info@greatscottgadgets.com>
# SPDX-License-Identifier: BSD-3-Clause
""" Integrated logic analysis helpers. """
import io
import os
import sys
import math
import unittest
import tempfile
import subprocess
from abc import ABCMeta, abstractmethod
from nmigen import Signal, Module, Cat, Elaboratable, Memory, ClockDomain, DomainRenamer
from nmigen.hdl.ast import Rose
from nmigen.lib.cdc import FFSynchronizer
from nmigen.lib.fifo import AsyncFIFOBuffered
from vcd import VCDWriter
from vcd.gtkw import GTKWSave
from ..stream import StreamInterface
from ..interface.uart import UARTMultibyteTransmitter
from ..interface.spi import SPIDeviceInterface, SPIBus, SPIGatewareTestCase
from ..test.utils import LunaGatewareTestCase, sync_test_case
class IntegratedLogicAnalyzer(Elaboratable):
""" Super-simple integrated-logic-analyzer generator class for LUNA.
Attributes
----------
trigger: Signal(), input
A strobe that determines when we should start sampling.
sampling: Signal(), output
Indicates when sampling is in progress.
complete: Signal(), output
Indicates when sampling is complete and ready to be read.
captured_sample_number: Signal(), input
Selects which sample the ILA will output. Effectively the address for the ILA's
sample buffer.
captured_sample: Signal(), output
The sample corresponding to the relevant sample number.
Can be broken apart by using Cat(*signals).
Parameters
----------
signals: iterable of Signals
An iterable of signals that should be captured by the ILA.
sample_depth: int
The depth of the desired buffer, in samples.
domain: string
The clock domain in which the ILA should operate.
sample_rate: float
Cosmetic indication of the sample rate. Used to format output.
samples_pretrigger: int
The number of our samples which should be captured _before_ the trigger.
This also can act like an implicit synchronizer; so asynchronous inputs
are allowed if this number is >= 2. Note that the trigger strobe is read
on the rising edge of the clock.
"""
def __init__(self, *, signals, sample_depth, domain="sync", sample_rate=60e6, samples_pretrigger=1):
self.domain = domain
self.signals = signals
self.inputs = Cat(*signals)
self.sample_width = len(self.inputs)
self.sample_depth = sample_depth
self.samples_pretrigger = samples_pretrigger
self.sample_rate = sample_rate
self.sample_period = 1 / sample_rate
#
# Create a backing store for our samples.
#
self.mem = Memory(width=self.sample_width, depth=sample_depth, name="ila_buffer")
#
# I/O port
#
self.trigger = Signal()
self.sampling = Signal()
self.complete = Signal()
self.captured_sample_number = Signal(range(0, self.sample_depth))
self.captured_sample = Signal(self.sample_width)
def elaborate(self, platform):
m = Module()
# TODO: switch this to a single-port RAM
# Memory ports.
write_port = self.mem.write_port()
read_port = self.mem.read_port(domain='comb')
m.submodules += [write_port, read_port]
# If necessary, create synchronized versions of the relevant signals.
if self.samples_pretrigger >= 2:
delayed_inputs = Signal.like(self.inputs)
m.submodules += FFSynchronizer(self.inputs, delayed_inputs,
stages=self.samples_pretrigger)
elif self.samples_pretrigger == 1:
delayed_inputs = Signal.like(self.inputs)
m.d.sync += delayed_inputs.eq(self.inputs)
else:
delayed_inputs = self.inputs
# Counter that keeps track of our write position.
write_position = Signal(range(0, self.sample_depth))
# Set up our write port to capture the input signals,
# and our read port to provide the output.
m.d.comb += [
write_port.data .eq(delayed_inputs),
write_port.addr .eq(write_position),
self.captured_sample .eq(read_port.data),
read_port.addr .eq(self.captured_sample_number)
]
self.test = Signal()
m.d.comb += self.test.eq(read_port.addr)
# Don't sample unless our FSM asserts our sample signal explicitly.
m.d.sync += write_port.en.eq(0)
with m.FSM() as fsm:
m.d.comb += self.sampling.eq(~fsm.ongoing("IDLE"))
# IDLE: wait for the trigger strobe
with m.State('IDLE'):
with m.If(self.trigger):
m.next = 'SAMPLE'
# Grab a sample as our trigger is asserted.
m.d.sync += [
write_port.en .eq(1),
write_position .eq(0),
self.complete .eq(0),
]
# SAMPLE: do our sampling
with m.State('SAMPLE'):
# Sample until we run out of samples.
m.d.sync += [
write_port.en .eq(1),
write_position .eq(write_position + 1),
]
# If this is the last sample, we're done. Finish up.
with m.If(write_position + 1 == self.sample_depth):
m.next = "IDLE"
m.d.sync += [
self.complete .eq(1),
write_port.en .eq(0)
]
# Convert our sync domain to the domain requested by the user, if necessary.
if self.domain != "sync":
m = DomainRenamer({"sync": self.domain})(m)
return m
class IntegratedLogicAnalyzerTest(LunaGatewareTestCase):
def instantiate_dut(self):
self.input_a = Signal()
self.input_b = Signal(30)
self.input_c = Signal()
return IntegratedLogicAnalyzer(
signals=[self.input_a, self.input_b, self.input_c],
sample_depth = 32
)
def initialize_signals(self):
yield self.input_a .eq(0)
yield self.input_b .eq(0)
yield self.input_c .eq(0)
def provide_all_signals(self, value):
all_signals = Cat(self.input_a, self.input_b, self.input_c)
yield all_signals.eq(value)
def assert_sample_value(self, address, value):
""" Helper that asserts a ILA sample has a given value. """
yield self.dut.captured_sample_number.eq(address)
yield
try:
self.assertEqual((yield self.dut.captured_sample), value)
return
except AssertionError:
pass
# Generate an appropriate exception.
actual_value = (yield self.dut.captured_sample)
message = "assertion failed: at address 0x{:08x}: {:08x} != {:08x} (expected)".format(address, actual_value, value)
raise AssertionError(message)
@sync_test_case
def test_sampling(self):
# Quick helper that generates simple, repetitive samples.
def sample_value(i):
return i | (i << 8) | (i << 16) | (0xFF << 24)
yield from self.provide_all_signals(0xDEADBEEF)
yield
# Before we trigger, we shouldn't be capturing any samples,
# and we shouldn't be complete.
self.assertEqual((yield self.dut.sampling), 0)
self.assertEqual((yield self.dut.complete), 0)
# Advance a bunch of cycles, and ensure we don't start sampling.
yield from self.advance_cycles(10)
self.assertEqual((yield self.dut.sampling), 0)
# Set a new piece of data for a couple of cycles.
yield from self.provide_all_signals(0x01234567)
yield
yield from self.provide_all_signals(0x89ABCDEF)
yield
# Finally, trigger the capture.
yield from self.provide_all_signals(sample_value(0))
yield from self.pulse(self.dut.trigger, step_after=False)
yield from self.provide_all_signals(sample_value(1))
yield
# After we pulse our trigger strobe, we should be sampling.
self.assertEqual((yield self.dut.sampling), 1)
# Populate the memory with a variety of interesting signals;
# and continue afterwards for a couple of cycles to make sure
# these don't make it into our sample buffer.
for i in range(2, 34):
yield from self.provide_all_signals(sample_value(i))
yield
# We now should be done with our sampling.
self.assertEqual((yield self.dut.sampling), 0)
self.assertEqual((yield self.dut.complete), 1)
# Validate the memory values that were captured.
for i in range(32):
yield from self.assert_sample_value(i, sample_value(i))
# All of those reads shouldn't change our completeness.
self.assertEqual((yield self.dut.sampling), 0)
self.assertEqual((yield self.dut.complete), 1)
class SyncSerialILA(Elaboratable):
""" Super-simple ILA that reads samples out over a simple unidirectional SPI.
Create a receiver for this object by calling apollo.ila_receiver_for(<this>).
This protocol is simple: every time CS goes low, we begin sending out a bit of
sample on each rising edge. Once a new sample is complete, the next sample begins
on the next 32-bit boundary.
Attributes
----------
trigger: Signal(), input
A strobe that determines when we should start sampling.
sampling: Signal(), output
Indicates when sampling is in progress.
complete: Signal(), output
Indicates when sampling is complete and ready to be read.
sck: Signal(), input
Serial clock for the SPI lines.
sdo: Signal(), output
Serial data out for the SPI lines.
cs: Signal(), input
Chip select for the SPI lines.
Parameters
----------
signals: iterable of Signals
An iterable of signals that should be captured by the ILA.
sample_depth: int
The depth of the desired buffer, in samples.
domain: string
The clock domain in which the ILA should operate.
samples_pretrigger: int
The number of our samples which should be captured _before_ the trigger.
This also can act like an implicit synchronizer; so asynchronous inputs
are allowed if this number is >= 2.
clock_polarity: int, 0 or 1
Clock polarity for the output SPI transciever. Optional.
clock_phase: int, 0 or 1
Clock phase for the output SPI transciever. Optional.
cs_idles_high: bool, optional
If True, the CS line will be assumed to be asserted when cs=0.
If False or not provided, the CS line will be assumed to be asserted when cs=1.
This can be used to share a simple two-device SPI bus, so two internal endpoints
can use the same CS line, with two opposite polarities.
"""
def __init__(self, *, signals, sample_depth, clock_polarity=0, clock_phase=1, cs_idles_high=False, **kwargs):
#
# I/O port
#
self.spi = SPIBus()
#
# Init
#
self.clock_phase = clock_phase
self.clock_polarity = clock_polarity
# Extract the domain from our keyword arguments, and then translate it to sync
# before we pass it back below. We'll use a DomainRenamer at the boundary to
# handle non-sync domains.
self.domain = kwargs.get('domain', 'sync')
kwargs['domain'] = 'sync'
# Create our core integrated logic analyzer.
self.ila = IntegratedLogicAnalyzer(
signals=signals,
sample_depth=sample_depth,
**kwargs)
# Copy some core parameters from our inner ILA.
self.signals = signals
self.sample_width = self.ila.sample_width
self.sample_depth = self.ila.sample_depth
self.sample_rate = self.ila.sample_rate
self.sample_period = self.ila.sample_period
# Figure out how many bytes we'll send per sample.
# We'll always send things squished into 32-bit chunks, as this is what the SPI engine
# on our Debug Controller likes most.
words_per_sample = (self.ila.sample_width + 31) // 32
# Bolster our bits_per_word up to a power of two...
self.bits_per_word = words_per_sample * 4 * 8
self.bits_per_word = 2 ** ((self.bits_per_word - 1).bit_length())
# ... and compute how many bits should be used.
self.bytes_per_sample = self.bits_per_word // 8
# Expose our ILA's trigger and status ports directly.
self.trigger = self.ila.trigger
self.sampling = self.ila.sampling
self.complete = self.ila.complete
def elaborate(self, platform):
m = Module()
m.submodules.ila = self.ila
transaction_start = Rose(self.spi.cs)
# Connect up our SPI transciever to our public interface.
interface = SPIDeviceInterface(
word_size=self.bits_per_word,
clock_polarity=self.clock_polarity,
clock_phase=self.clock_phase
)
m.submodules.spi = interface
m.d.comb += [
interface.spi .connect(self.spi),
# Always output the captured sample.
interface.word_out .eq(self.ila.captured_sample)
]
# Count where we are in the current transmission.
current_sample_number = Signal(range(0, self.ila.sample_depth))
# Our first piece of data is latched in when the transaction
# starts, so we'll move on to sample #1.
with m.If(self.spi.cs):
with m.If(transaction_start):
m.d.sync += current_sample_number.eq(1)
# From then on, we'll move to the next sample whenever we're finished
# scanning out a word (and thus our current samples are latched in).
with m.Elif(interface.word_accepted):
m.d.sync += current_sample_number.eq(current_sample_number + 1)
# Whenever CS is low, we should be providing the very first sample,
# so reset our sample counter to 0.
with m.Else():
m.d.sync += current_sample_number.eq(0)
# Ensure our ILA module outputs the right sample.
m.d.sync += [
self.ila.captured_sample_number .eq(current_sample_number)
]
# Convert our sync domain to the domain requested by the user, if necessary.
if self.domain != "sync":
m = DomainRenamer({"sync": self.domain})(m)
return m
class SyncSerialReadoutILATest(SPIGatewareTestCase):
def instantiate_dut(self):
self.input_signal = Signal(12)
return SyncSerialILA(
signals=[self.input_signal],
sample_depth=16,
clock_polarity=1,
clock_phase=0
)
def initialize_signals(self):
yield self.input_signal.eq(0xF00)
@sync_test_case
def test_spi_readout(self):
input_signal = self.input_signal
# Trigger the test while offering our first sample.
yield
yield from self.pulse(self.dut.trigger, step_after=False)
# Provide the remainder of our samples.
for i in range(1, 16):
yield input_signal.eq(0xF00 | i)
yield
# Wait a few cycles to account for delays in
# the sampling pipeline.
yield from self.advance_cycles(5)
# We've now captured a full set of samples.
# We'll test reading them out.
self.assertEqual((yield self.dut.complete), 1)
# Start the transaction, and exchange 16 bytes of data.
yield self.dut.spi.cs.eq(1)
yield
# Read our our result over SPI...
data = yield from self.spi_exchange_data(b"\0" * 32)
# ... and ensure it matches what was sampled.
i = 0
while data:
datum = data[0:4]
del data[0:4]
expected = b"\x00\x00\x0f" + bytes([i])
self.assertEqual(datum, expected)
i += 1
class StreamILA(Elaboratable):
""" Super-simple ILA that outputs its samples over a Stream.
Create a receiver for this object by calling apollo.ila_receiver_for(<this>).
This protocol is simple: we wait for a trigger; and then broadcast our samples.
We broadcast one buffer of samples per each subsequent trigger.
Attributes
----------
trigger: Signal(), input
A strobe that determines when we should start sampling.
sampling: Signal(), output
Indicates when sampling is in progress.
complete: Signal(), output
Indicates when sampling is complete and ready to be read.
stream: output stream
Stream output for the ILA.
Parameters
----------
signals: iterable of Signals
An iterable of signals that should be captured by the ILA.
sample_depth: int
The depth of the desired buffer, in samples.
domain: string
The clock domain in which the ILA should operate.
o_domain: string
The clock domain in which the output stream will be generated.
If omitted, defaults to the same domain as the core ILA.
samples_pretrigger: int
The number of our samples which should be captured _before_ the trigger.
This also can act like an implicit synchronizer; so asynchronous inputs
are allowed if this number is >= 2.
"""
def __init__(self, *, signals, sample_depth, o_domain=None, **kwargs):
# Extract the domain from our keyword arguments, and then translate it to sync
# before we pass it back below. We'll use a DomainRenamer at the boundary to
# handle non-sync domains.
self.domain = kwargs.get('domain', 'sync')
kwargs['domain'] = 'sync'
self._o_domain = o_domain if o_domain else self.domain
# Create our core integrated logic analyzer.
self.ila = IntegratedLogicAnalyzer(
signals=signals,
sample_depth=sample_depth,
**kwargs)
# Copy some core parameters from our inner ILA.
self.signals = signals
self.sample_width = self.ila.sample_width
self.sample_depth = self.ila.sample_depth
self.sample_rate = self.ila.sample_rate
self.sample_period = self.ila.sample_period
# Bolster our bits per sample "word" up to a power of two.
self.bits_per_sample = 2 ** ((self.ila.sample_width - 1).bit_length())
self.bytes_per_sample = self.bits_per_sample // 8
#
# I/O port
#
self.stream = StreamInterface(payload_width=self.bits_per_sample)
self.trigger = Signal()
# Expose our ILA's trigger and status ports directly.
self.sampling = self.ila.sampling
self.complete = self.ila.complete
def elaborate(self, platform):
m = Module()
m.submodules.ila = ila = self.ila
if self._o_domain == self.domain:
in_domain_stream = self.stream
else:
in_domain_stream = StreamInterface(payload_width=self.bits_per_sample)
# Count where we are in the current transmission.
current_sample_number = Signal(range(0, ila.sample_depth))
# Always present the current sample number to our ILA, and the current
# sample value to the UART.
m.d.comb += [
ila.captured_sample_number .eq(current_sample_number),
in_domain_stream.payload .eq(ila.captured_sample)
]
with m.FSM():
# IDLE -- we're currently waiting for a trigger before capturing samples.
with m.State("IDLE"):
# Always allow triggering, as we're ready for the data.
m.d.comb += self.ila.trigger.eq(self.trigger)
# Once we're triggered, move onto the SAMPLING state.
with m.If(self.trigger):
m.next = "SAMPLING"
# SAMPLING -- the internal ILA is sampling; we're now waiting for it to
# complete. This state is similar to IDLE; except we block triggers in order
# to cleanly avoid a race condition.
with m.State("SAMPLING"):
# Once our ILA has finished sampling, prepare to read out our samples.
with m.If(self.ila.complete):
m.d.sync += [
current_sample_number .eq(0),
in_domain_stream.first .eq(1)
]
m.next = "SENDING"
# SENDING -- we now have a valid buffer of samples to send up to the host;
# we'll transmit them over our stream interface.
with m.State("SENDING"):
m.d.comb += [
# While we're sending, we're always providing valid data to the UART.
in_domain_stream.valid .eq(1),
# Indicate when we're on the last sample.
in_domain_stream.last .eq(current_sample_number == (self.sample_depth - 1))
]
# Each time the UART accepts a valid word, move on to the next one.
with m.If(in_domain_stream.ready):
m.d.sync += [
current_sample_number .eq(current_sample_number + 1),
in_domain_stream.first .eq(0)
]
# If this was the last sample, we're done! Move back to idle.
with m.If(self.stream.last):
m.next = "IDLE"
# If we're not streaming out of the same domain we're capturing from,
# we'll add some clock-domain crossing hardware.
if self._o_domain != self.domain:
in_domain_signals = Cat(
in_domain_stream.first,
in_domain_stream.payload,
in_domain_stream.last
)
out_domain_signals = Cat(
self.stream.first,
self.stream.payload,
self.stream.last
)
# Create our async FIFO...
m.submodules.cdc = fifo = AsyncFIFOBuffered(
width=len(in_domain_signals),
depth=16,
w_domain="sync",
r_domain=self._o_domain
)
m.d.comb += [
# ... fill it from our in-domain stream...
fifo.w_data .eq(in_domain_signals),
fifo.w_en .eq(in_domain_stream.valid),
in_domain_stream.ready .eq(fifo.w_rdy),
# ... and output it into our outupt stream.
out_domain_signals .eq(fifo.r_data),
self.stream.valid .eq(fifo.r_rdy),
fifo.r_en .eq(self.stream.ready)
]
# Convert our sync domain to the domain requested by the user, if necessary.
if self.domain != "sync":
m = DomainRenamer({"sync": self.domain})(m)
return m
class AsyncSerialILA(Elaboratable):
""" Super-simple ILA that reads samples out over a UART connection.
Create a receiver for this object by calling apollo.ila_receiver_for(<this>).
This protocol is simple: we wait for a trigger; and then broadcast our samples.
We broadcast one buffer of samples per each subsequent trigger.
Attributes
----------
trigger: Signal(), input
A strobe that determines when we should start sampling.
sampling: Signal(), output
Indicates when sampling is in progress.
complete: Signal(), output
Indicates when sampling is complete and ready to be read.
tx: Signal(), output
Serial output for the ILA.
Parameters
----------
signals: iterable of Signals
An iterable of signals that should be captured by the ILA.
sample_depth: int
The depth of the desired buffer, in samples.
divisor: int
The number of `sync` clock cycles per bit period.
domain: string
The clock domain in which the ILA should operate.
samples_pretrigger: int
The number of our samples which should be captured _before_ the trigger.
This also can act like an implicit synchronizer; so asynchronous inputs
are allowed if this number is >= 2.
"""
def __init__(self, *, signals, sample_depth, divisor, **kwargs):
self.divisor = divisor
#
# I/O port
#
self.tx = Signal()
# Extract the domain from our keyword arguments, and then translate it to sync
# before we pass it back below. We'll use a DomainRenamer at the boundary to
# handle non-sync domains.
self.domain = kwargs.get('domain', 'sync')
kwargs['domain'] = 'sync'
# Create our core integrated logic analyzer.
self.ila = StreamILA(
signals=signals,
sample_depth=sample_depth,
**kwargs)
# Copy some core parameters from our inner ILA.
self.signals = signals
self.sample_width = self.ila.sample_width
self.sample_depth = self.ila.sample_depth
self.sample_rate = self.ila.sample_rate
self.sample_period = self.ila.sample_period
self.bits_per_sample = self.ila.bits_per_sample
self.bytes_per_sample = self.ila.bytes_per_sample
# Expose our ILA's trigger and status ports directly.
self.trigger = self.ila.trigger
self.sampling = self.ila.sampling
self.complete = self.ila.complete
def elaborate(self, platform):
m = Module()
m.submodules.ila = ila = self.ila
# Create our UART transmitter, and connect it to our stream interface.
m.submodules.uart = uart = UARTMultibyteTransmitter(
byte_width=self.bytes_per_sample,
divisor=self.divisor
)
m.d.comb +=[
uart.stream .stream_eq(ila.stream),
self.tx .eq(uart.tx)
]
# Convert our sync domain to the domain requested by the user, if necessary.
if self.domain != "sync":
m = DomainRenamer({"sync": self.domain})(m)
return m
class ILAFrontend(metaclass=ABCMeta):
""" Class that communicates with an ILA module and emits useful output. """
def __init__(self, ila):
"""
Parameters:
ila -- The ILA object to work with.
"""
self.ila = ila
self.samples = None
@abstractmethod
def _read_samples(self):
""" Read samples from the target ILA. Should return an iterable of samples. """
def _parse_sample(self, raw_sample):
""" Converts a single binary sample to a dictionary of names -> sample values. """
position = 0
sample = {}
# Split our raw, bits(0) signal into smaller slices, and associate them with their names.
for signal in self.ila.signals:
signal_width = len(signal)
signal_bits = raw_sample[position : position + signal_width]
position += signal_width
sample[signal.name] = signal_bits
return sample
def _parse_samples(self, raw_samples):
""" Converts raw, binary samples to dictionaries of name -> sample. """
return [self._parse_sample(sample) for sample in raw_samples]
def refresh(self):
""" Fetches the latest set of samples from the target ILA. """
self.samples = self._parse_samples(self._read_samples())
def enumerate_samples(self):
""" Returns an iterator that returns pairs of (timestamp, sample). """
# If we don't have any samples, fetch samples from the ILA.
if self.samples is None:
self.refresh()
timestamp = 0
# Iterate over each sample...
for sample in self.samples:
yield timestamp, sample
# ... and advance the timestamp by the relevant interval.
timestamp += self.ila.sample_period
def print_samples(self):
""" Simple method that prints each of our samples; for simple CLI debugging."""
for timestamp, sample in self.enumerate_samples():
timestamp_scaled = 1000000 * timestamp
print(f"{timestamp_scaled:08f}us: {sample}")
def emit_vcd(self, filename, *, gtkw_filename=None, add_clock=True):
""" Emits a VCD file containing the ILA samples.
Parameters:
filename -- The filename to write to, or '-' to write to stdout.
gtkw_filename -- If provided, a gtkwave save file will be generated that
automatically displays all of the relevant signals in the
order provided to the ILA.
add_clock -- If true or not provided, adds a replica of the ILA's sample
clock to make change points easier to see.
"""
# Select the file-like object we're working with.
if filename == "-":
stream = sys.stdout
close_after = False
else:
stream = open(filename, 'w')
close_after = True
# Create our basic VCD.
with VCDWriter(stream, timescale=f"1 ns", date='today') as writer:
first_timestamp = math.inf
last_timestamp = 0
signals = {}
# If we're adding a clock...
if add_clock:
clock_value = 1
clock_signal = writer.register_var('ila', 'ila_clock', 'integer', size=1, init=clock_value ^ 1)
# Create named values for each of our signals.
for signal in self.ila.signals:
signals[signal.name] = writer.register_var('ila', signal.name, 'integer', size=len(signal))
# Dump the each of our samples into the VCD.
clock_time = 0
for timestamp, sample in self.enumerate_samples():
for signal_name, signal_value in sample.items():
# If we're adding a clock signal, add any changes necessary since
# the last value-change.
if add_clock:
while clock_time < timestamp:
writer.change(clock_signal, clock_time / 1e-9, clock_value)
clock_value ^= 1
clock_time += (self.ila.sample_period / 2)
# Register the signal change.
writer.change(signals[signal_name], timestamp / 1e-9, signal_value.to_int())
# If we're generating a GTKW, delegate that to our helper function.
if gtkw_filename:
assert(filename != '-')
self._emit_gtkw(gtkw_filename, filename, add_clock=add_clock)
def _emit_gtkw(self, filename, dump_filename, *, add_clock=True):
""" Emits a GTKWave save file to accompany a generated VCD.
Parameters:
filename -- The filename to write the GTKW save to.
dump_filename -- The filename of the VCD that should be opened with this save.
add_clock -- True iff a clock signal should be added to the GTKW save.
"""
with open(filename, 'w') as f:
gtkw = GTKWSave(f)
# Comments / context.
gtkw.comment("Generated by the LUNA ILA.")
# Add a reference to the dumpfile we're working with.
gtkw.dumpfile(dump_filename)
# If we're adding a clock, add it to the top of the view.
gtkw.trace('ila.ila_clock')
# Add each of our signals to the file.
for signal in self.ila.signals:
gtkw.trace(f"ila.{signal.name}")
def interactive_display(self, *, add_clock=True):
""" Attempts to spawn a GTKWave instance to display the ILA results interactively. """
# Hack: generate files in a way that doesn't trip macOS's fancy guards.
try:
vcd_filename = os.path.join(tempfile.gettempdir(), os.urandom(24).hex() + '.vcd')
gtkw_filename = os.path.join(tempfile.gettempdir(), os.urandom(24).hex() + '.gtkw')
self.emit_vcd(vcd_filename, gtkw_filename=gtkw_filename)
subprocess.run(["gtkwave", "-f", vcd_filename, "-a", gtkw_filename])
finally:
os.remove(vcd_filename)
os.remove(gtkw_filename)
class AsyncSerialILAFrontend(ILAFrontend):
""" UART-based ILA transport.
Parameters
------------
port: string
The serial port to use to connect. This is typically a path on *nix systems.
ila: IntegratedLogicAnalyzer
The ILA object to work with.
"""
def __init__(self, *args, ila, **kwargs):
import serial
self._port = serial.Serial(*args, **kwargs)
self._port.reset_input_buffer()
super().__init__(ila)
def _split_samples(self, all_samples):
""" Returns an iterator that iterates over each sample in the raw binary of samples. """
from apollo.support.bits import bits
sample_width_bytes = self.ila.bytes_per_sample
# Iterate over each sample, and yield its value as a bits object.
for i in range(0, len(all_samples), sample_width_bytes):
raw_sample = all_samples[i:i + sample_width_bytes]
sample_length = len(Cat(self.ila.signals))
yield bits.from_bytes(raw_sample, length=sample_length, byteorder='big')
def _read_samples(self):
""" Reads a set of ILA samples, and returns them. """
sample_width_bytes = self.ila.bytes_per_sample
total_to_read = self.ila.sample_depth * sample_width_bytes
# Fetch all of our samples from the given device.
all_samples = self._port.read(total_to_read)
return list(self._split_samples(all_samples))
if __name__ == "__main__":
unittest.main()
| 35.020305 | 123 | 0.604464 | 33,544 | 0.972431 | 5,134 | 0.148833 | 3,289 | 0.095347 | 0 | 0 | 14,718 | 0.426671 |
4f113526def488c862f6b1e51745bcce4bcf9d63 | 3,400 | py | Python | SPD/lib/lib_IZZI_MD.py | yamamon75/PmagPy | fa5b189800a239683fc17c6b312cdfdd839a46c3 | [
"BSD-3-Clause"
] | 2 | 2020-07-05T01:11:33.000Z | 2020-07-05T01:11:39.000Z | SPD/lib/lib_IZZI_MD.py | yamamon75/PmagPy | fa5b189800a239683fc17c6b312cdfdd839a46c3 | [
"BSD-3-Clause"
] | null | null | null | SPD/lib/lib_IZZI_MD.py | yamamon75/PmagPy | fa5b189800a239683fc17c6b312cdfdd839a46c3 | [
"BSD-3-Clause"
] | null | null | null | #!/usr/bin/env python
from __future__ import division
from builtins import range
from past.utils import old_div
from numpy import *
def rect_area(three_points):
xA,yA=three_points[0][0],three_points[0][1]
xB,yB=three_points[1][0],three_points[1][1]
xC,yC=three_points[2][0],three_points[2][1]
#area=abs((xB*yA-xA*yB)+(xC*yB-xB*yC)+(xA*yC-xC*yA))/2
area=old_div(abs((xB*yA-xA*yB)+(xC*yB-xB*yC)+(xA*yC-xC*yA)),2)
return area
def get_IZZI_MD(X_arai,Y_arai,Step,start,end):
if end-start <4:
return(9999.99)
else:
X_IZZI_MD,Y_IZZI_MD,Step_IZZI_MD=[],[],[]
step_tmp=Step[start]
X_IZZI_MD.append(X_arai[start])
Y_IZZI_MD.append(Y_arai[start])
Step_IZZI_MD.append(step_tmp)
for i in range(start,end+1,1):
#if i==0:
# step_tmp='ZI'
# X_IZZI_MD.append(X_arai[0])
# Y_IZZI_MD.append(Y_arai[0])
# Step_IZZI_MD.append('ZI')
# continue
if step_tmp=='ZI' and Step[i]=='IZ':
X_IZZI_MD.append(X_arai[i])
Y_IZZI_MD.append(Y_arai[i])
Step_IZZI_MD.append('IZ')
step_tmp='IZ'
continue
if step_tmp=='IZ' and Step[i]=='ZI':
X_IZZI_MD.append(X_arai[i])
Y_IZZI_MD.append(Y_arai[i])
Step_IZZI_MD.append('ZI')
step_tmp='ZI'
continue
# if ZI after ZI or IZ after IZ than
# take only the last one
if step_tmp=='ZI' and Step[i]=='ZI':
X_IZZI_MD[-1]=X_arai[i]
Y_IZZI_MD[-1]=Y_arai[i]
step_tmp='ZI'
continue
if step_tmp=='IZ' and Step[i]=='IZ':
X_IZZI_MD[-1]=X_arai[i]
Y_IZZI_MD[-1]=Y_arai[i]
step_tmp='IZ'
total_ZI_curve=0
total_Z_area=0
# calculate the area between the IZ and the ZI curve
# and the length of the ZI curve
# the IZZI parameter is: IZZI_area/ZI_length
if len(Step_IZZI_MD) <= 2:
return 0
for i in range(len(X_IZZI_MD)-2):
if Step_IZZI_MD[i]=='ZI' or Step_IZZI_MD[i]=='IZ':
A=array([X_IZZI_MD[i],Y_IZZI_MD[i]])
B=array([X_IZZI_MD[i+1],Y_IZZI_MD[i+1]])
C=array([X_IZZI_MD[i+2],Y_IZZI_MD[i+2]])
area=rect_area([A,B,C])
slope_A_C=old_div((C[1]-A[1]),(C[0]-A[0]))
intercept_A_C=A[1]-(slope_A_C*A[0])
#print 'slope_A_C,intercept_A_C', slope_A_C,intercept_A_C
#raw_input()
if B[1] < slope_A_C*B[0]+intercept_A_C:
down_triangle=True
else:
down_triangle=False
# negative for IZ below ZI
# positive for IZ above IZ
if (down_triangle and Step_IZZI_MD[i]=='ZI')or (not (down_triangle) and Step_IZZI_MD[i]=='IZ'):
area=-1*area
total_Z_area=total_Z_area+area
if Step_IZZI_MD[i]=='ZI':
total_ZI_curve=total_ZI_curve+sqrt( (C[0]-A[0])**2 + (C[1]-A[1])**2)
if total_ZI_curve == 0:
return 0
IZZI_MD=old_div(total_Z_area,total_ZI_curve)
return(IZZI_MD)
| 34.693878 | 131 | 0.518824 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 593 | 0.174412 |
4f11fa446f5c128d63dab17202cbfa4c6043f4c7 | 3,135 | py | Python | trainings/workshop1/step12/network_outage.py | jochenparm/moler | 0253d677e0ef150206758c7991197ba5687d0965 | [
"BSD-3-Clause"
] | 57 | 2018-02-20T08:16:47.000Z | 2022-03-28T10:36:57.000Z | trainings/workshop1/step12/network_outage.py | jochenparm/moler | 0253d677e0ef150206758c7991197ba5687d0965 | [
"BSD-3-Clause"
] | 377 | 2018-07-19T11:56:27.000Z | 2021-07-09T13:08:12.000Z | trainings/workshop1/step12/network_outage.py | jochenparm/moler | 0253d677e0ef150206758c7991197ba5687d0965 | [
"BSD-3-Clause"
] | 24 | 2018-04-14T20:49:40.000Z | 2022-03-29T10:44:26.000Z | import os.path
import time
from moler.config import load_config
from moler.device.device import DeviceFactory
from moler.util.moler_test import MolerTest
def outage_callback(device_name, ping_times):
MolerTest.info("Network outage on {}".format(device_name))
ping_times["lost_connection_time"] = time.time()
def ping_is_on_callback(ping_times):
MolerTest.info("Ping works")
if ping_times["lost_connection_time"] > 0: # ping operable AFTER any net loss
if ping_times["reconnection_time"] == 0:
ping_times["reconnection_time"] = time.time()
outage_time = ping_times["reconnection_time"] - ping_times["lost_connection_time"]
MolerTest.info("Network outage time is {}".format(outage_time))
def test_network_outage():
load_config(config=os.path.abspath('config/my_devices.yml'))
unix1 = DeviceFactory.get_device(name='MyMachine1')
unix2 = DeviceFactory.get_device(name='MyMachine2')
# test setup
ping_times = {"lost_connection_time": 0,
"reconnection_time": 0}
# ensure network is up before running test
net_up = unix2.get_cmd(cmd_name="ifconfig", cmd_params={"options": "lo up"})
sudo_ensure_net_up = unix2.get_cmd(cmd_name="sudo", cmd_params={"password": "moler", "cmd_object": net_up})
sudo_ensure_net_up()
# run event observing "network down/up"
no_ping = unix1.get_event(event_name="ping_no_response")
no_ping.add_event_occurred_callback(callback=outage_callback,
callback_params={'device_name': 'MyMachine1',
'ping_times': ping_times})
no_ping.start()
ping_is_on = unix1.get_event(event_name="ping_response")
ping_is_on.add_event_occurred_callback(callback=ping_is_on_callback,
callback_params={'ping_times': ping_times})
ping_is_on.start()
# run test
ping = unix1.get_cmd(cmd_name="ping", cmd_params={"destination": "localhost", "options": "-O"})
ping.start(timeout=120)
time.sleep(3)
ifconfig_down = unix2.get_cmd(cmd_name="ifconfig", cmd_params={"options": "lo down"})
sudo_ifconfig_down = unix2.get_cmd(cmd_name="sudo", cmd_params={"password": "moler", "cmd_object": ifconfig_down})
sudo_ifconfig_down()
time.sleep(5)
ifconfig_up = unix2.get_cmd(cmd_name="ifconfig", cmd_params={"options": "lo up"})
sudo_ifconfig_up = unix2.get_cmd(cmd_name="sudo", cmd_params={"password": "moler", "cmd_object": ifconfig_up})
sudo_ifconfig_up()
time.sleep(3)
# test teardown
ping.cancel()
no_ping.cancel()
if __name__ == '__main__':
test_network_outage()
"""
copy this file into workshop1/network_outage.py
*** calculating network outage time ***
1. run it
2. see logs - look for "Network outage" and "Ping works"
- be carefull in logs analysis - what's wrong?
3. fix incorrect calculation by exchanging:
no_ping = unix1.get_event(event_name="ping_no_response")
into:
no_ping = unix1.get_event(event_name="ping_no_response", event_params={"till_occurs_times": 1})
"""
| 38.231707 | 118 | 0.686124 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,169 | 0.372887 |
4f15458885d2ae78814572df9599b9ae72b6ee6c | 34 | py | Python | app/mashaller/__init__.py | yntonfon/dashboard | 287e7b2d895916102236243c1051da1e5ee3756e | [
"MIT"
] | null | null | null | app/mashaller/__init__.py | yntonfon/dashboard | 287e7b2d895916102236243c1051da1e5ee3756e | [
"MIT"
] | null | null | null | app/mashaller/__init__.py | yntonfon/dashboard | 287e7b2d895916102236243c1051da1e5ee3756e | [
"MIT"
] | null | null | null | from .user import user_marshaller
| 17 | 33 | 0.852941 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
4f155a9ab823ba33ff35f3612ec2e684b44b8924 | 2,782 | py | Python | base/abstract/contextual_data.py | kefir/snakee | a17734d4b2d7dfd3e6c7b195baa128fbc84d197b | [
"MIT"
] | null | null | null | base/abstract/contextual_data.py | kefir/snakee | a17734d4b2d7dfd3e6c7b195baa128fbc84d197b | [
"MIT"
] | null | null | null | base/abstract/contextual_data.py | kefir/snakee | a17734d4b2d7dfd3e6c7b195baa128fbc84d197b | [
"MIT"
] | 2 | 2021-04-10T19:22:15.000Z | 2022-03-08T19:37:56.000Z | from abc import ABC
from typing import Union, Optional, Iterable, Any
try: # Assume we're a sub-module in a package.
from utils import arguments as arg
from base.interfaces.context_interface import ContextInterface
from base.interfaces.contextual_interface import ContextualInterface
from base.interfaces.data_interface import ContextualDataInterface
from base.abstract.abstract_base import AbstractBaseObject
from base.abstract.simple_data import SimpleDataWrapper
from base.abstract.contextual import Contextual
except ImportError: # Apparently no higher-level package has been imported, fall back to a local import.
from ...utils import arguments as arg
from ..interfaces.context_interface import ContextInterface
from ..interfaces.contextual_interface import ContextualInterface
from ..interfaces.data_interface import ContextualDataInterface
from .abstract_base import AbstractBaseObject
from .simple_data import SimpleDataWrapper
from .contextual import Contextual
Data = Union[Iterable, Any]
OptionalFields = Optional[Union[str, Iterable]]
Source = Optional[ContextualInterface]
Context = Optional[ContextInterface]
DATA_MEMBER_NAMES = ('_data', )
DYNAMIC_META_FIELDS = tuple()
class ContextualDataWrapper(Contextual, ContextualDataInterface, ABC):
def __init__(
self, data, name: str,
source: Source = None,
context: Context = None,
check: bool = True,
):
self._data = data
super().__init__(name=name, source=source, context=context, check=check)
@classmethod
def _get_data_member_names(cls):
return DATA_MEMBER_NAMES
def get_data(self) -> Data:
return self._data
def set_data(self, data: Data, inplace: bool):
if inplace:
self._data = data
self.set_meta(**self.get_static_meta())
else:
return ContextualDataWrapper(data, **self.get_static_meta())
def apply_to_data(self, function, *args, dynamic=False, **kwargs):
return self.__class__(
data=function(self.get_data(), *args, **kwargs),
**self.get_static_meta() if dynamic else self.get_meta()
)
@staticmethod
def _get_dynamic_meta_fields() -> tuple:
return DYNAMIC_META_FIELDS
def get_static_meta(self, ex: OptionalFields = None) -> dict:
meta = self.get_meta(ex=ex)
for f in self._get_dynamic_meta_fields():
meta.pop(f, None)
return meta
def get_compatible_static_meta(self, other=arg.DEFAULT, ex=None, **kwargs) -> dict:
meta = self.get_compatible_meta(other=other, ex=ex, **kwargs)
for f in self._get_dynamic_meta_fields():
meta.pop(f, None)
return meta
| 37.093333 | 105 | 0.700935 | 1,533 | 0.551042 | 0 | 0 | 175 | 0.062904 | 0 | 0 | 132 | 0.047448 |
4f1563734b81e35e89fdc77fb21035b9e52c5dfc | 1,880 | py | Python | snypy/snippets/rest/filters.py | sterapps/snypy-backend | e4733a1b7bf041c79c66ce74e64cc428d3c6ba5d | [
"MIT"
] | 2 | 2018-06-21T07:51:30.000Z | 2019-06-01T14:17:07.000Z | snypy/snippets/rest/filters.py | nezhar/snypy-backend | 0673b7dc7dc8b730639e0f634dcaa8b8178151e0 | [
"MIT"
] | 33 | 2018-05-10T10:37:46.000Z | 2021-10-30T11:07:22.000Z | snypy/snippets/rest/filters.py | sterapps/snypy-backend | e4733a1b7bf041c79c66ce74e64cc428d3c6ba5d | [
"MIT"
] | 3 | 2019-06-12T08:53:37.000Z | 2020-10-28T17:21:02.000Z | import django_filters
from snippets.models import File, Snippet, Label, SnippetLabel
class FileFilter(django_filters.FilterSet):
class Meta:
model = File
fields = [
'snippet',
'language',
]
class SnippetFilter(django_filters.FilterSet):
favorite = django_filters.BooleanFilter(method='filter_is_favorite', label="Is favorite?", )
labeled = django_filters.BooleanFilter(method='filter_is_labeled', label="Is labeled?", )
team_is_null = django_filters.BooleanFilter(method='filter_team_is_null', label="Team is None", )
# ToDo: Add after shares app
# shared_to = django_filters.NumberFilter(field_name="shared__user")
# shared_from = django_filters.NumberFilter(field_name="shared__user")
class Meta:
model = Snippet
fields = [
'labels',
'visibility',
'files__language',
'user',
'team',
]
def filter_is_favorite(self, queryset, name, value):
pass
def filter_is_labeled(self, queryset, name, value):
if value:
return queryset.exclude(labels=None)
return queryset.filter(labels=None)
def filter_team_is_null(self, queryset, name, value):
return queryset.filter(
team__isnull=value,
)
class LabelFilter(django_filters.FilterSet):
user = django_filters.NumberFilter(method='filter_user', label="User", )
class Meta:
model = Label
fields = [
'user',
'team',
]
def filter_user(self, queryset, name, value):
return queryset.filter(
user=value,
team=None,
)
class SnippetLabelFilter(django_filters.FilterSet):
class Meta:
model = SnippetLabel
fields = [
'snippet',
'label',
]
| 24.102564 | 101 | 0.607447 | 1,782 | 0.947872 | 0 | 0 | 0 | 0 | 0 | 0 | 382 | 0.203191 |
4f15648cc786e557ca07ca52d78e1a5532cecf8b | 1,892 | py | Python | Packs/CommonWidgets/Scripts/MyToDoTasksWidget/MyToDoTasksWidget_test.py | satyakidroid/content | b5342c522d44aec8f31f4ee0fc8ad269ac970903 | [
"MIT"
] | null | null | null | Packs/CommonWidgets/Scripts/MyToDoTasksWidget/MyToDoTasksWidget_test.py | satyakidroid/content | b5342c522d44aec8f31f4ee0fc8ad269ac970903 | [
"MIT"
] | 51 | 2022-02-25T22:28:40.000Z | 2022-03-31T22:34:58.000Z | Packs/CommonWidgets/Scripts/MyToDoTasksWidget/MyToDoTasksWidget_test.py | satyakidroid/content | b5342c522d44aec8f31f4ee0fc8ad269ac970903 | [
"MIT"
] | null | null | null | import json
import demistomock as demisto
from MyToDoTasksWidget import get_open_to_do_tasks_of_current_user
def test_open_to_do_tasks_of_current_user(mocker):
'''
Given:
- Mock response of 'internalHttpRequest' to '/v2/statistics/widgets/query' that includes an open task and
a close task
When:
- Running the MyToDoTasksWidget script
Then:
- Ensure the markdown table was generated correctly and includes only the open task
'''
res_body = {
'data': [
{
'assignee': 'admin',
'completed': '0001-01-01T00:00:00Z',
'dbotCreatedBy': 'admin',
'description': 'test_open_task',
'dueDate': '2021-11-30T15:49:11+02:00',
'id': '1@2',
'incidentId': '2',
'status': 'open',
'title': 'test open'
},
{
'assignee': 'admin',
'dbotCreatedBy': 'admin',
'description': 'test_close_task',
'dueDate': '2021-11-30T15:49:11+02:00',
'id': '1@3',
'incidentId': '3',
'status': 'close',
'title': 'test close'
}
]
}
mocker.patch.object(
demisto,
'internalHttpRequest',
return_value={
'statusCode': 200,
'body': json.dumps(res_body)
}
)
expected_table = [
{
'Task Name': 'test open',
'Task Description': 'test_open_task',
'Task ID': '1@2',
'SLA': '2021-11-30 15:49:11+0200',
'Opened By': 'admin',
'Incident ID': '[2](#/Custom/caseinfoid/2)'
}
]
table = get_open_to_do_tasks_of_current_user()
assert len(table) == 1
assert table == expected_table
| 28.238806 | 113 | 0.493129 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 879 | 0.464588 |
4f16139d6db319752d1b3230dd073a3e1e47c8fd | 118 | py | Python | runners/mlcube_singularity/mlcube_singularity/__init__.py | johnugeorge/mlcube | 10bdfe859805aa8c868c5a4745259037e123e757 | [
"Apache-2.0"
] | 83 | 2020-12-03T18:53:11.000Z | 2022-03-24T11:58:11.000Z | runners/mlcube_singularity/mlcube_singularity/__init__.py | mlperf/mlbox | 5623826bd9c1d60f082170aeffc9ff1ccda7a656 | [
"Apache-2.0"
] | 100 | 2019-11-08T19:58:59.000Z | 2020-11-19T05:47:12.000Z | runners/mlcube_singularity/mlcube_singularity/__init__.py | johnugeorge/mlcube | 10bdfe859805aa8c868c5a4745259037e123e757 | [
"Apache-2.0"
] | 15 | 2019-10-30T17:53:39.000Z | 2020-10-31T15:07:38.000Z |
def get_runner_class():
from mlcube_singularity.singularity_run import SingularityRun
return SingularityRun
| 19.666667 | 65 | 0.813559 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
4f168cadf59113f2c21dd814c3d020d96791fe21 | 1,669 | py | Python | Contest/LeetCode/BiweeklyContest27/2.py | WatsonWangZh/CodingPractice | dc057dd6ea2fc2034e14fd73e07e73e6364be2ae | [
"MIT"
] | 11 | 2019-09-01T22:36:00.000Z | 2021-11-08T08:57:20.000Z | Contest/LeetCode/BiweeklyContest27/2.py | WatsonWangZh/LeetCodePractice | dc057dd6ea2fc2034e14fd73e07e73e6364be2ae | [
"MIT"
] | null | null | null | Contest/LeetCode/BiweeklyContest27/2.py | WatsonWangZh/LeetCodePractice | dc057dd6ea2fc2034e14fd73e07e73e6364be2ae | [
"MIT"
] | 2 | 2020-05-27T14:58:52.000Z | 2020-05-27T15:04:17.000Z | # 1461. Check If a String Contains All Binary Codes of Size K
# User Accepted:2806
# User Tried:4007
# Total Accepted:2876
# Total Submissions:9725
# Difficulty:Medium
# Given a binary string s and an integer k.
# Return True if any binary code of length k is a substring of s. Otherwise, return False.
# Example 1:
# Input: s = "00110110", k = 2
# Output: true
# Explanation: The binary codes of length 2 are "00", "01", "10" and "11".
# They can be all found as substrings at indicies 0, 1, 3 and 2 respectively.
# Example 2:
# Input: s = "00110", k = 2
# Output: true
# Example 3:
# Input: s = "0110", k = 1
# Output: true
# Explanation: The binary codes of length 1 are "0" and "1", it is clear that both exist as a substring.
# Example 4:
# Input: s = "0110", k = 2
# Output: false
# Explanation: The binary code "00" is of length 2 and doesn't exist in the array.
# Example 5:
# Input: s = "0000000001011100", k = 4
# Output: false
# Constraints:
# 1 <= s.length <= 5 * 10^5
# s consists of 0's and 1's only.
# 1 <= k <= 20
class Solution:
def hasAllCodes(self, s: str, k: int) -> bool:
for i in range(2**k):
tmp = str(bin(i))[2:]
if len(tmp) < k:
tmp = '0' * (k-len(tmp)) + tmp
if tmp in s:
# print('fuck')
continue
else:
return False
return True
# Redo
rec = set()
tmp = 0
for i in range(len(s)):
tmp = tmp * 2 + int(s[i])
if i >= k:
tmp -= int(s[i-k]) << k
if i >= k-1:
rec.add(tmp)
return len(rec) == (1<<k) | 27.360656 | 105 | 0.547633 | 628 | 0.376273 | 0 | 0 | 0 | 0 | 0 | 0 | 1,026 | 0.614739 |
4f1af3d08cbf530d538f0749b68ac8ba48af17e8 | 2,840 | py | Python | src/globus_cli/services/transfer/client.py | sirosen/temp-cli-test | 416fd3fea17b4c7c2cf35d6ccde63cb5719a1af6 | [
"Apache-2.0"
] | 47 | 2016-04-21T19:51:17.000Z | 2022-02-25T14:13:30.000Z | src/globus_cli/services/transfer/client.py | sirosen/temp-cli-test | 416fd3fea17b4c7c2cf35d6ccde63cb5719a1af6 | [
"Apache-2.0"
] | 421 | 2016-04-20T18:45:24.000Z | 2022-03-14T14:50:41.000Z | src/globus_cli/services/transfer/client.py | sirosen/temp-cli-test | 416fd3fea17b4c7c2cf35d6ccde63cb5719a1af6 | [
"Apache-2.0"
] | 20 | 2016-09-10T20:25:27.000Z | 2021-10-06T16:02:47.000Z | import logging
import textwrap
import uuid
from typing import Any, Dict, Tuple, Union
import click
from globus_sdk import GlobusHTTPResponse, TransferClient
from .data import display_name_or_cname
from .recursive_ls import RecursiveLsResponse
log = logging.getLogger(__name__)
class CustomTransferClient(TransferClient):
# TODO: Remove this function when endpoints natively support recursive ls
def recursive_operation_ls(
self,
endpoint_id: Union[str, uuid.UUID],
params: Dict[str, Any],
depth: int = 3,
) -> RecursiveLsResponse:
"""
Makes recursive calls to ``GET /operation/endpoint/<endpoint_id>/ls``
Does not preserve access to top level operation_ls fields, but
adds a "path" field for every item that represents the full
path to that item.
:rtype: iterable of :class:`GlobusResponse <globus_sdk.response.GlobusResponse>`
:param endpoint_id: The endpoint being recursively ls'ed. If no "path" is given
in params, the start path is determined by this endpoint.
:param params: Parameters that will be passed through as query params.
:param depth: The maximum file depth the recursive ls will go to.
"""
endpoint_id = str(endpoint_id)
log.info(
"TransferClient.recursive_operation_ls(%s, %s, %s)",
endpoint_id,
depth,
params,
)
return RecursiveLsResponse(self, endpoint_id, params, max_depth=depth)
def get_endpoint_w_server_list(
self, endpoint_id
) -> Tuple[GlobusHTTPResponse, Union[str, GlobusHTTPResponse]]:
"""
A helper for handling endpoint server list lookups correctly accounting
for various endpoint types.
- Raises click.UsageError when used on Shares
- Returns (<get_endpoint_response>, "S3") for S3 endpoints
- Returns (<get_endpoint_response>, <server_list_response>) for all other
Endpoints
"""
endpoint = self.get_endpoint(endpoint_id)
if endpoint["host_endpoint_id"]: # not GCS -- this is a share endpoint
raise click.UsageError(
textwrap.dedent(
"""\
{id} ({0}) is a share and does not have servers.
To see details of the share, use
globus endpoint show {id}
To list the servers on the share's host endpoint, use
globus endpoint server list {host_endpoint_id}
"""
).format(display_name_or_cname(endpoint), **endpoint.data)
)
if endpoint["s3_url"]: # not GCS -- legacy S3 endpoint type
return (endpoint, "S3")
else:
return (endpoint, self.endpoint_server_list(endpoint_id))
| 35.949367 | 88 | 0.634859 | 2,557 | 0.900352 | 0 | 0 | 0 | 0 | 0 | 0 | 1,562 | 0.55 |
4f1c750bec0093c0c520c5d3c41ab1cdad36460f | 4,740 | py | Python | examples/issue_789/app.py | davidnateberg/Flask-AppBuilder | 3a6b45b1c12a52a794de27910896cbae61270d6b | [
"BSD-3-Clause"
] | 1 | 2022-03-18T19:00:29.000Z | 2022-03-18T19:00:29.000Z | examples/issue_789/app.py | davidnateberg/Flask-AppBuilder | 3a6b45b1c12a52a794de27910896cbae61270d6b | [
"BSD-3-Clause"
] | null | null | null | examples/issue_789/app.py | davidnateberg/Flask-AppBuilder | 3a6b45b1c12a52a794de27910896cbae61270d6b | [
"BSD-3-Clause"
] | null | null | null | import sys
from flask_appbuilder import SQLA, AppBuilder, ModelView, Model
from flask_appbuilder.models.sqla.interface import SQLAInterface
from sqlalchemy import Column, Integer, String, ForeignKey, Table
from sqlalchemy.orm import relationship
from flask import Flask
from flask_appbuilder.actions import action
config = {
'SQLALCHEMY_DATABASE_URI': 'sqlite:///test.db',
'CSRF_ENABLED': True,
'SECRET_KEY': '\2\1thisismyscretkey\1\2\e\y\y\h',
'APP_NAME': 'Example of Filtering Many-to-many Relationships on a single field.'
}
app = Flask('single_filter_multi_value')
app.config.update(config)
db = SQLA(app)
appbuilder = AppBuilder(app, db.session)
program_registration = Table(
'program_registration',
Model.metadata,
Column('program_id', Integer, ForeignKey('program.id')),
Column('student_id', Integer, ForeignKey('student.id')))
course_registration = Table(
'course_registration',
Model.metadata,
Column('course_id', Integer, ForeignKey('course.id')),
Column('student_id', Integer, ForeignKey('student.id')))
class Teacher(Model):
id = Column(Integer, primary_key=True)
name = Column(String, nullable=False)
def __repr__(self):
return self.name
class Program(Model):
id = Column(Integer, primary_key=True)
name = Column(String, nullable=False)
def __repr__(self):
return self.name
class Student(Model):
id = Column(Integer, primary_key=True)
name = Column(String, nullable=False)
program = relationship(Program, secondary=program_registration,
backref='students')
def __repr__(self):
return self.name
class Course(Model):
id = Column(Integer, primary_key=True)
title = Column(String, nullable=False)
teacher_id = Column(Integer, ForeignKey('teacher.id'), nullable=False)
teacher = relationship(Teacher, backref='courses')
students = relationship(Student, secondary=course_registration,
backref='courses')
def __repr__(self):
return self.title
class CourseView(ModelView):
datamodel = SQLAInterface(Course)
list_columns = ['title', 'teacher']
show_columns = ['title', 'teacher']
@action("muldelete", "Delete", "Delete all Really?", "fa-rocket",
single=False)
def muldelete(self, items):
self.datamodel.delete_all(items)
self.update_redirect()
return redirect(self.get_redirect())
class ProgramView(ModelView):
datamodel = SQLAInterface(Program)
list_columns = ['name']
show_columns = ['name', 'students']
add_columns = ['name']
@action("muldelete", "Delete", "Delete all Really?", "fa-rocket",
single=False)
def muldelete(self, items):
self.datamodel.delete_all(items)
self.update_redirect()
return redirect(self.get_redirect())
class StudentView(ModelView):
datamodel = SQLAInterface(Student)
related_views = [CourseView, ProgramView]
list_columns = ['name', 'courses']
@action("muldelete", "Delete", "Delete all Really?", "fa-rocket",
single=False)
def muldelete(self, items):
self.datamodel.delete_all(items)
self.update_redirect()
return redirect(self.get_redirect())
class TeacherView(ModelView):
datamodel = SQLAInterface(Teacher)
related_views = [StudentView]
list_columns = ['name']
@action("muldelete", "Delete", "Delete all Really?", "fa-rocket",
single=False)
def muldelete(self, items):
self.datamodel.delete_all(items)
self.update_redirect()
return redirect(self.get_redirect())
db.create_all()
appbuilder.add_view(TeacherView, 'Teachers')
appbuilder.add_view(CourseView, 'Courses')
appbuilder.add_view(StudentView, 'Students')
appbuilder.add_view(ProgramView, 'Programs')
def add_data():
db.session.add(Program(name="Bachelor of Science IT"))
db.session.add(Program(name="Bachelor of Science Computer Science"))
mr_smith = Teacher(name='Jonathan Smith')
db.session.add(mr_smith)
rod = Student(name='Rod')
jane = Student(name='Jane')
freddy = Student(name='Freddy')
db.session.add(rod)
db.session.add(jane)
db.session.add(freddy)
db.session.add(Course(title="Introduction to Programming using Pyhon",
teacher=mr_smith,
students=[rod, jane, freddy]))
db.session.add(Course(title="Mathematics I",
teacher=mr_smith,
students=[rod, jane]))
db.session.commit()
if __name__ == '__main__':
if len(sys.argv) > 1 and sys.argv[1] == '--add_data':
add_data()
else:
app.run(debug=True)
| 28.902439 | 84 | 0.665823 | 2,564 | 0.540928 | 0 | 0 | 960 | 0.202532 | 0 | 0 | 881 | 0.185865 |
4f1cf95cec38d5a7764829af7a8a6cfbf0efd568 | 2,760 | py | Python | code/zeroinsertion_aging/plot-outofframe.py | andim/paper-tcellimprint | e89605e51014fa3f347f96bab3d3d84c2b013a2f | [
"MIT"
] | 2 | 2020-07-28T10:47:40.000Z | 2021-11-14T20:07:21.000Z | code/zeroinsertion_aging/plot-outofframe.py | andim/paper-tcellimprint | e89605e51014fa3f347f96bab3d3d84c2b013a2f | [
"MIT"
] | null | null | null | code/zeroinsertion_aging/plot-outofframe.py | andim/paper-tcellimprint | e89605e51014fa3f347f96bab3d3d84c2b013a2f | [
"MIT"
] | null | null | null | import numpy as np
import matplotlib.pyplot as plt
import matplotlib
import palettable
import pandas as pd
import sys
sys.path.append('..')
from lib import *
plt.style.use('../custom.mplstyle')
agebinsize = 20.0
agebins = np.arange(0.0, 90.0, agebinsize)
bin_ts = agebins[:-1]+agebinsize/2
bins = np.array([1, 1000, 10000, 100000])
df_enrichments = pd.read_csv(data_directory +'emerson-enrichments.csv', index_col=0,
true_values=['True'], false_values=['False'])
for name in ['zeroInsertion', 'zeroInsertion_out', 'out']:
for nzeros in range(0,3):
zeros = '0'*nzeros
print(zeros)
df_enrichments[name+'1000'+zeros] = (5*df_enrichments[name+'1000'+zeros]
+ 3*df_enrichments[name+'500'+zeros]
+ 2*df_enrichments[name+'200'+zeros])/10.0
for rank in bins[1:]:
df_enrichments['outnorm'+'%s'%rank] = df_enrichments['zeroInsertion_out'+'%s'%rank]/df_enrichments['out'+'%s'%rank]
df_enrichments['innorm'+'%s'%rank] = (df_enrichments['zeroInsertion'+'%s'%rank]-df_enrichments['zeroInsertion_out'+'%s'%rank])/(1-df_enrichments['out'+'%s'%rank])
def aggregate(df, name):
grouped = df.groupby(pd.cut(df['Age'], bins=agebins))
meanfreq = grouped.agg('mean')
meanfreq = np.array([list(meanfreq[name+'%s'%rank]) for rank in bins[1:]])
semfreq = grouped.agg('sem')
semfreq = np.array([list(semfreq[name+'%s'%rank]) for rank in bins[1:]])
return meanfreq, semfreq
fig, ax = plt.subplots(figsize=(3.5, 2.5))
colors = np.asarray(palettable.cartocolors.sequential.BluGrn_3_r.mpl_colors)
nsizes = 3
for j, name in enumerate(['innorm', 'outnorm']):
meanfreq, semfreq = aggregate(df_enrichments, name)
for i in range(0, nsizes):
l, = ax.plot(bin_ts, meanfreq[i, :], '-o' if j == 0 else ':o', c=colors[i], label='%g'%bins[i+1] if j==0 else None)
if i == 0:
if j == 0:
lpos = l
else:
lneg = l
ax.fill_between(bin_ts,
meanfreq[i, :]-semfreq[i, :],
meanfreq[i, :]+semfreq[i, :], facecolor=colors[i], alpha=.5, edgecolor=None)
ax.set_xlabel('Age in years (binned)')
ax.set_xticks(agebins[1:-1])
ax.set_ylabel('Zero insertion clones')
legend_kwargs = dict(ncol=3)
legend = plt.legend(title='Clone size rank (binned)', loc='upper right', bbox_to_anchor=(1.0, 1.05), **legend_kwargs)
ax.add_artist(legend)
ax.legend([lpos, lneg], ['yes', 'no'], title='Productive sequence', loc='upper right', bbox_to_anchor=(1.0, 0.83), **legend_kwargs)
ax.set_ylim(0.0, 0.09)
ax.set_yticks(np.arange(0.0, 0.09, 0.02))
ax.set_xticks(agebins[1:-1])
fig.tight_layout()
plt.show()
fig.savefig(figure_directory+'figure_zeroinsertion_out.svg')
| 37.808219 | 166 | 0.640942 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 445 | 0.161232 |
4f1d60a2d4237bbd3efae93314b622ddcfa12232 | 408 | py | Python | examples/ivis_job/docker_build.py | smartarch/qoscloud | 13b11b0baaad0d9b234d7defccdbd8756c2618a1 | [
"MIT"
] | 2 | 2021-02-20T13:53:02.000Z | 2021-11-15T16:11:32.000Z | examples/ivis_job/docker_build.py | smartarch/qoscloud | 13b11b0baaad0d9b234d7defccdbd8756c2618a1 | [
"MIT"
] | null | null | null | examples/ivis_job/docker_build.py | smartarch/qoscloud | 13b11b0baaad0d9b234d7defccdbd8756c2618a1 | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
This script builds the docker images of image client and recognizer server and pushes them to dockerhub.
"""
from subprocess import call
print("Building the default docker image")
call("docker build -t d3srepo/qoscloud-default -f Dockerfile ../..", shell=True)
print("Pushing images to DockerHub")
call("docker push d3srepo/qoscloud-default", shell=True)
| 31.384615 | 104 | 0.742647 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 321 | 0.786765 |
4f1e7496b3483e64bf6a8b20b24344e523b362a7 | 808 | py | Python | pyrene/main.py | krisztianfekete/pyrene | ad9f2fb979f06930399c9c8214c3fe3c2d6efa06 | [
"MIT"
] | null | null | null | pyrene/main.py | krisztianfekete/pyrene | ad9f2fb979f06930399c9c8214c3fe3c2d6efa06 | [
"MIT"
] | 2 | 2015-01-06T09:29:29.000Z | 2015-01-06T09:50:15.000Z | pyrene/main.py | krisztianfekete/pyrene | ad9f2fb979f06930399c9c8214c3fe3c2d6efa06 | [
"MIT"
] | null | null | null | # Py3 compatibility
from __future__ import print_function
from __future__ import unicode_literals
import tempfile
import os
import sys
import shutil
from .network import Network
from .util import Directory
from .shell import PyreneCmd
def main():
dot_pyrene = os.path.expanduser('~/.pyrene')
dot_pypirc = os.path.expanduser('~/.pypirc')
tempdir = tempfile.mkdtemp(suffix='.pyrene')
network = Network(dot_pyrene)
try:
if not os.path.exists(dot_pyrene):
network.add_known_repos(dot_pypirc)
cmd = PyreneCmd(network, Directory(tempdir), dot_pypirc)
line = ' '.join(sys.argv[1:])
if line:
cmd.onecmd(line)
else:
cmd.cmdloop()
finally:
shutil.rmtree(tempdir)
if __name__ == '__main__':
main()
| 21.263158 | 64 | 0.659653 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 63 | 0.07797 |
4f1f838da92b0cfd903c819acc97f4f554c7837b | 22,204 | py | Python | Safe Marks/HelperLibrary/Student.py | mriduldhall/Safe-Marks | 3ea657c842fe30f084e0d70633d4319073bc82f0 | [
"MIT"
] | null | null | null | Safe Marks/HelperLibrary/Student.py | mriduldhall/Safe-Marks | 3ea657c842fe30f084e0d70633d4319073bc82f0 | [
"MIT"
] | null | null | null | Safe Marks/HelperLibrary/Student.py | mriduldhall/Safe-Marks | 3ea657c842fe30f084e0d70633d4319073bc82f0 | [
"MIT"
] | null | null | null | from Interface.StudentCommandLineInterface import CLI
from HelperLibrary.StorageFunctions import StorageFunctions
from HelperLibrary.MarkSheet import MarkSheet
from datetime import datetime
class StudentController:
def __init__(self, student, table_name):
self.student = student
self.table_name = table_name
def retrieve_data(self):
self.student.name = (self.student.name.lower()).capitalize()
student_data = StorageFunctions("students").retrieve(["name"], [self.student.name])
student_data = list(student_data[0])
student_id = student_data[0]
year_group_id = student_data[3]
del student_data[0]
term_id_list = StorageFunctions("terms").list("id")
for term_id in term_id_list:
mark_sheet_data = StorageFunctions("mark_sheets").retrieve(["student_id", "term_id", "year_group_id"], [student_id, term_id, year_group_id])
term_data = StorageFunctions("terms").retrieve(["id"], [term_id])
term = [(term_data[0])[1]]
if mark_sheet_data:
student_data = student_data + term + list((mark_sheet_data[0])[1:4])
else:
student_data = student_data + term + [None, None, None]
return student_data
def validate_if_student_exists(self):
student_data = StorageFunctions(self.table_name).retrieve(["name"], [self.student.name])
if not student_data:
return False
else:
return True
def check_archive_status(self):
if self.student.leave_date:
return True
else:
return False
def list_students(self):
list_of_students = StorageFunctions(self.table_name).list("name")
print("List of students:")
if list_of_students:
counter = 1
for student_name in sorted(list_of_students):
print(counter, ":", student_name, end="\n")
counter += 1
else:
print("No available students")
def list_archived_students(self):
archived_students_data = StorageFunctions(self.table_name).retrieve(["leave_date"], [None], negative=True)
print("List of old students:")
if archived_students_data:
counter = 1
for student_data in archived_students_data:
print(counter, ":", student_data[1], end="\n")
counter += 1
else:
print("No available students")
def create_student(self):
StorageFunctions(self.table_name).append("(name, age, current_year_group, date_of_birth, address, father_name, mother_name, leave_date)", [self.student.name, self.student.age, self.student.year_group, self.student.date_of_birth, self.student.address, self.student.father_name, self.student.mother_name, self.student.leave_date])
student_data = StorageFunctions(self.table_name).retrieve(["name"], [self.student.name])
student_data = student_data[0]
StorageFunctions("mark_sheets").append("(math_mark, science_mark, english_mark, student_id, term_id, year_group_id)", [self.student.summer_mark_sheet.math_grade, self.student.summer_mark_sheet.science_grade, self.student.summer_mark_sheet.english_grade, student_data[0], (StorageFunctions("terms").retrieve(["term"], ["Summer"])[0])[0], self.student.year_group])
StorageFunctions("mark_sheets").append("(math_mark, science_mark, english_mark, student_id, term_id, year_group_id)", [self.student.spring_mark_sheet.math_grade, self.student.spring_mark_sheet.science_grade, self.student.spring_mark_sheet.english_grade, student_data[0], (StorageFunctions("terms").retrieve(["term"], ["Spring"])[0])[0], self.student.year_group])
StorageFunctions("mark_sheets").append("(math_mark, science_mark, english_mark, student_id, term_id, year_group_id)", [self.student.autumn_mark_sheet.math_grade, self.student.autumn_mark_sheet.science_grade, self.student.autumn_mark_sheet.english_grade, student_data[0], (StorageFunctions("terms").retrieve(["term"], ["Autumn"])[0])[0], self.student.year_group])
def create_mark_sheets(self):
student_id = StorageFunctions("students").retrieve(["name"], [self.student.name])[0][0]
term_id_list = StorageFunctions("terms").list("id")
term_name_list = StorageFunctions("terms").list("term")
for term_id in term_id_list:
mark_sheets_data = StorageFunctions("mark_sheets").retrieve(["student_id", "term_id", "year_group_id"], [student_id, term_id, self.student.year_group])
if not mark_sheets_data:
StorageFunctions("mark_sheets").append("(math_mark, science_mark, english_mark, student_id, term_id, year_group_id)", [0, 0, 0, student_id, term_id, self.student.year_group])
self.student.__setattr__(term_name_list[term_id_list.index(term_id)].lower() + "_mark_sheet", MarkSheet(self.student.name, term_name_list[term_id_list.index(term_id)].lower(), self.student.year_group))
def validate_student_details(self):
if self.student.age < 1:
return False, "Invalid age!"
elif self.student.year_group < 1:
self.student.year_group = 1
return True, "Age too low!\nSetting year group to 1"
elif self.student.year_group > 13:
self.student.year_group = 13
return True, "Age too high!\nSetting year group to 13"
else:
return True, None
@staticmethod
def _choose_mark_sheet(activity):
mark_sheet_choice_dictionary = {'1': 'Summer', '2': 'Spring', '3': 'Autumn'}
while True:
print("Enter 1 to", activity, "the summer term mark sheet, 2 for spring term mark sheet and 3 for autumn term mark sheet", end='')
mark_sheet_choice = mark_sheet_choice_dictionary.get(input())
if not mark_sheet_choice:
print("Please enter a valid choice")
else:
return mark_sheet_choice
def get_student_details(self):
print("Student name:", self.student.name)
print("Student age:", self.student.age)
print("Student year group:", self.student.year_group)
print("Student date of birth:", self.student.date_of_birth)
print("Student address:", self.student.address)
print("Student father's name:", self.student.father_name)
print("Student mother's name:", self.student.mother_name)
print("Student leave date:", self.student.leave_date)
def get_mark_sheet_details(self):
mark_sheet_choice = self._choose_mark_sheet("get details of")
MarkSheet.get_details(getattr(self.student, mark_sheet_choice.lower() + "_mark_sheet"))
def archive_get_mark_sheet_detail(self):
mark_sheet_choice = self._choose_mark_sheet("get details of")
term_id = StorageFunctions("terms").retrieve(["term"], [mark_sheet_choice])[0][0]
year_group = input("Enter year group to get data of:")
year_group_id = StorageFunctions("year_groups").retrieve(["year_group"], [year_group])[0][0]
student_id = StorageFunctions("students").retrieve(["name"], [self.student.name])[0][0]
mark_sheet_data = StorageFunctions("mark_sheets").retrieve(["student_id", "term_id", "year_group_id"], [student_id, term_id, year_group_id])
if mark_sheet_data:
mark_sheet_data = mark_sheet_data[0]
MarkSheet.get_details(MarkSheet(self.student.name, mark_sheet_choice, year_group, mark_sheet_data[1], mark_sheet_data[2], mark_sheet_data[3]))
else:
print("No data available!")
def get_mark_sheet_marks(self):
mark_sheet_choice = self._choose_mark_sheet("get marks of")
MarkSheet.get_marks(getattr(self.student, mark_sheet_choice.lower() + "_mark_sheet"))
def archive_get_mark_sheet_marks(self):
mark_sheet_choice = self._choose_mark_sheet("get details of")
term_id = StorageFunctions("terms").retrieve(["term"], [mark_sheet_choice])[0][0]
year_group = input("Enter year group to get data of:")
year_group_id = StorageFunctions("year_groups").retrieve(["year_group"], [year_group])[0][0]
student_id = StorageFunctions("students").retrieve(["name"], [self.student.name])[0][0]
mark_sheet_data = StorageFunctions("mark_sheets").retrieve(["student_id", "term_id", "year_group_id"], [student_id, term_id, year_group_id])
if mark_sheet_data:
mark_sheet_data = mark_sheet_data[0]
MarkSheet.get_marks(MarkSheet(self.student.name, mark_sheet_choice, year_group, mark_sheet_data[1], mark_sheet_data[2], mark_sheet_data[3]))
else:
print("No data available!")
def get_all_student_data(self):
self.get_student_details()
print()
student_id = StorageFunctions("students").retrieve(["name"], [self.student.name])[0][0]
all_mark_sheets_data = StorageFunctions("mark_sheets").retrieve(['student_id'], [student_id])
all_year_group_ids = []
for student_mark_sheet_data in all_mark_sheets_data:
mark_sheet_year_group_id = student_mark_sheet_data[6]
if mark_sheet_year_group_id not in all_year_group_ids:
all_year_group_ids.append(mark_sheet_year_group_id)
all_year_group_ids.sort()
term_ids_list = StorageFunctions("terms").list("id")
term_names_list = StorageFunctions("terms").list("term")
for year_group_id in all_year_group_ids:
year_group = StorageFunctions("year_groups").retrieve(['id'], [year_group_id])[0][1]
print("Year", year_group, ":")
for term_id in term_ids_list:
mark_sheet_data = StorageFunctions("mark_sheets").retrieve(['student_id', 'year_group_id', 'term_id'], [student_id, year_group_id, term_id])[0]
mark_sheet = MarkSheet(self.student.name, term_names_list[term_ids_list.index(term_id)], year_group, mark_sheet_data[1], mark_sheet_data[2], mark_sheet_data[3])
mark_sheet.get_details()
mark_sheet.get_marks()
print()
def edit_mark_sheet(self):
mark_sheet_choice = self._choose_mark_sheet("edit")
self.student.__getattribute__(mark_sheet_choice.lower() + "_mark_sheet").edit_mark_sheet()
def edit_student_details(self):
attributes = {
'1': self.edit_name,
'2': self.edit_year_group,
'3': self.edit_date_of_birth,
'4': self.edit_address,
'5': self.edit_father_name,
'6': self.edit_mother_name,
}
exit_initiated = False
while not exit_initiated:
edit_option = input("Enter 1 to edit name, 2 to edit year group, 3 to edit date of birth, 4 to edit address, 5 to edit father name, 6 to edit mother name and 7 to exit:")
if edit_option == str(len(attributes) + 1):
exit_initiated = True
elif (edit_option > str(len(attributes) + 1)) or (edit_option < '1'):
print("Please enter a valid choice!")
else:
attributes.get(edit_option)()
def edit_name(self):
valid_name = False
while not valid_name:
original_name = self.student.name
print("Student's current name is", self.student.name)
self.student.name = input("Enter new name for student:")
if not self.validate_if_student_exists():
valid_name = True
self.save_student_data(original_name)
else:
print("Student already exists!")
self.student.name = original_name
def edit_year_group(self):
print("Student's current year group is", self.student.year_group)
self.student.year_group = int(input("Enter new year group for student:"))
self.create_mark_sheets()
def edit_date_of_birth(self):
print("Student's current date of birth is", self.student.date_of_birth)
birth_year = int(input("Enter new year of birth:"))
birth_month = int(input("Enter new month of birth:"))
birth_date = int(input("Enter new date of birth:"))
self.student.date_of_birth = datetime(birth_year, birth_month, birth_date)
self.student.age = self.student.calculate_age()
def edit_address(self):
print("Student's current address is", self.student.address)
self.student.address = input("Enter new address of student:")
def edit_father_name(self):
print("Student's father's current name is", self.student.father_name)
self.student.father_name = input("Enter new name for student's father:")
def edit_mother_name(self):
print("Student's father's current name is", self.student.mother_name)
self.student.mother_name = input("Enter new name for student's mother:")
def save_student_data(self, old_name=None, save_mark_sheet_data=True):
if not old_name:
student_data = StorageFunctions("students").retrieve(["name"], [self.student.name])
else:
student_data = StorageFunctions("students").retrieve(["name"], [old_name])
student_id = (student_data[0])[0]
StorageFunctions("students").update(["name", "age", "current_year_group", "date_of_birth", "address", "father_name", "mother_name", "leave_date"], [self.student.name, self.student.age, self.student.year_group, self.student.date_of_birth, self.student.address, self.student.father_name, self.student.mother_name, self.student.leave_date], student_id)
if save_mark_sheet_data:
term_id_list = StorageFunctions("terms").list("id")
for term_id in term_id_list:
mark_sheet_data = StorageFunctions("mark_sheets").retrieve(["student_id", "term_id", "year_group_id"], [student_id, term_id, self.student.year_group])
mark_sheet_id = (mark_sheet_data[0])[0]
term_data = StorageFunctions("terms").retrieve(["id"], [term_id])
term = (term_data[0])[1]
StorageFunctions("mark_sheets").update(["math_mark", "science_mark", "english_mark"], [getattr(self.student, term.lower() + "_mark_sheet").math_grade, getattr(self.student, term.lower() + "_mark_sheet").science_grade, getattr(self.student, term.lower() + "_mark_sheet").english_grade], mark_sheet_id)
def delete_student(self):
student_data = StorageFunctions("students").retrieve(["name"], [self.student.name])
student_id = (student_data[0])[0]
StorageFunctions("mark_sheets").delete(student_id, "student_id")
StorageFunctions("students").delete(student_id)
def archive(self):
leave_year = int(input("Enter leave year:"))
leave_month = int(input("Enter leave month:"))
leave_date = int(input("Enter leave date:"))
self.student.leave_date = datetime(leave_year, leave_month, leave_date)
self.student.year_group = None
self.save_student_data(save_mark_sheet_data=False)
def unarchive(self):
self.student.recreate_student()
new_year_group = int(input("Enter new year group for student:"))
new_year_group_data = StorageFunctions("year_groups").retrieve(["year_group"], [new_year_group])
if new_year_group_data:
self.student.year_group = new_year_group_data[0][0]
self.student.leave_date = None
self.create_mark_sheets()
self.save_student_data()
return "Student successfully added back"
else:
return "Invalid year group"
class Student:
def __init__(self, name, date_of_birth, address, father_name, mother_name, table_name="students"):
self.name = name
self.date_of_birth = date_of_birth
if self.date_of_birth is not None:
self.age = self.calculate_age()
self.year_group = (StorageFunctions("year_groups").retrieve(["year_group"], [self.age-4])[0])[0]
else:
self.age = None
self.year_group = None
self.address = address
self.father_name = father_name
self.mother_name = mother_name
self.leave_date = None
self.summer_mark_sheet = MarkSheet(self.name, "Summer", self.year_group)
self.spring_mark_sheet = MarkSheet(self.name, "Spring", self.year_group)
self.autumn_mark_sheet = MarkSheet(self.name, "Spring", self.year_group)
self.student_controller = StudentController(self, table_name)
self.student_menu_dict = {'1': self.student_controller.edit_mark_sheet,
'2': self.student_controller.get_student_details,
'3': self.student_controller.get_mark_sheet_details,
'4': self.student_controller.get_mark_sheet_marks,
'5': self.student_controller.get_all_student_data,
}
self.admin_student_menu_dict = {'1': self.student_controller.edit_mark_sheet,
'2': self.student_controller.get_student_details,
'3': self.student_controller.get_mark_sheet_details,
'4': self.student_controller.get_mark_sheet_marks,
'5': self.student_controller.edit_student_details,
'6': self.student_controller.get_all_student_data,
'7': self.student_controller.archive,
'8': self.delete,
}
self.archive_student_menu_dict = {'1': self.student_controller.get_student_details,
'2': self.student_controller.archive_get_mark_sheet_detail,
'3': self.student_controller.archive_get_mark_sheet_marks,
'4': self.student_controller.get_all_student_data,
}
self.admin_archive_student_menu_dict = {'1': self.student_controller.get_student_details,
'2': self.student_controller.archive_get_mark_sheet_detail,
'3': self.student_controller.archive_get_mark_sheet_marks,
'4': self.student_controller.get_all_student_data,
'5': self.delete,
}
def recreate_student(self):
student_data = self.student_controller.retrieve_data()
self.name = student_data[0]
self.age = student_data[1]
self.year_group = student_data[2]
self.date_of_birth = student_data[3]
self.address = student_data[4]
self.father_name = student_data[5]
self.mother_name = student_data[6]
self.leave_date = student_data[7]
self.summer_mark_sheet.student = self.name
self.summer_mark_sheet.term = student_data[8]
self.summer_mark_sheet.year_group = self.year_group
self.summer_mark_sheet.math_grade = student_data[9]
self.summer_mark_sheet.science_grade = student_data[10]
self.summer_mark_sheet.english_grade = student_data[11]
self.spring_mark_sheet.student = self.name
self.spring_mark_sheet.term = student_data[12]
self.spring_mark_sheet.year_group = self.year_group
self.spring_mark_sheet.math_grade = student_data[13]
self.spring_mark_sheet.science_grade = student_data[14]
self.spring_mark_sheet.english_grade = student_data[15]
self.autumn_mark_sheet.student = self.name
self.autumn_mark_sheet.term = student_data[16]
self.autumn_mark_sheet.year_group = self.year_group
self.autumn_mark_sheet.math_grade = student_data[17]
self.autumn_mark_sheet.science_grade = student_data[18]
self.autumn_mark_sheet.english_grade = student_data[19]
def calculate_age(self):
current_date = datetime.now()
age = current_date.year - self.date_of_birth.year
if current_date.month < self.date_of_birth.month or ((current_date.month == self.date_of_birth.month) and (current_date.day < self.date_of_birth.day)):
age -= 1
return age
def create_new_student(self):
if not self.student_controller.validate_if_student_exists():
self.student_controller.create_student()
return "Student successfully created"
else:
return "Student already exists"
def create_old_student(self):
choice_list_of_students = bool(int(input("Enter 1 to get a list of all old students and 0 to continue without a list of students:")))
if choice_list_of_students:
self.student_controller.list_archived_students()
self.name = input("Enter student name to add back into school:").capitalize()
if self.student_controller.validate_if_student_exists():
return self.student_controller.unarchive()
else:
return "Student does not exist"
def manage(self, admin):
choice_list_of_students = bool(int(input("Enter 1 to get a list of all students and 0 to continue without a list of students:")))
if choice_list_of_students:
self.student_controller.list_students()
self.name = input("Enter student name to manage student:").capitalize()
if self.student_controller.validate_if_student_exists():
self.recreate_student()
archive = self.student_controller.check_archive_status()
CLI(self, admin, archive).initiate()
return "Exiting..."
else:
return "Student does not exist"
def delete(self):
confirm_deletion = bool(int(input("Enter 1 to confirm deletion of student and 0 to cancel deletion:")))
if confirm_deletion:
self.student_controller.delete_student()
return True
else:
return False
| 55.929471 | 370 | 0.650513 | 22,007 | 0.991128 | 0 | 0 | 523 | 0.023554 | 0 | 0 | 3,577 | 0.161097 |
4f1fdfc8f0d95cc1bec3dc09896b940a6526f117 | 4,930 | py | Python | zhaopin/zhaopin/middlewares.py | Bruceey/PythonSpider | 4d64c45ca41d449f4950915887ef786ae812e7fa | [
"MIT"
] | 3 | 2021-05-21T03:31:35.000Z | 2021-07-03T11:39:05.000Z | zhaopin/zhaopin/middlewares.py | Bruceey/PythonSpider | 4d64c45ca41d449f4950915887ef786ae812e7fa | [
"MIT"
] | null | null | null | zhaopin/zhaopin/middlewares.py | Bruceey/PythonSpider | 4d64c45ca41d449f4950915887ef786ae812e7fa | [
"MIT"
] | 3 | 2021-04-14T15:09:00.000Z | 2021-12-13T14:37:54.000Z | # Define here the models for your spider middleware
#
# See documentation in:
# https://docs.scrapy.org/en/latest/topics/spider-middleware.html
from scrapy import signals
# useful for handling different item types with a single interface
from itemadapter import is_item, ItemAdapter
from scrapy.http.response.html import HtmlResponse
from selenium import webdriver
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
from selenium.webdriver.common.by import By
from selenium.common.exceptions import NoSuchElementException
from selenium.webdriver import ActionChains
from PIL import Image
import base64
import json
import requests
import time
class ZhaopinDownloaderMiddleware:
# Not all methods need to be defined. If a method is not defined,
# scrapy acts as if the downloader middleware does not modify the
# passed objects.
def __init__(self):
options = webdriver.ChromeOptions()
# 处理证书错误
options.add_argument('--ignore-certificate-errors')
# 修改windows.navigator.webdriver,防机器人识别机制,selenium自动登陆判别机制
options.add_experimental_option('excludeSwitches', ['enable-automation'])
options.add_argument("--disable-blink-features=AutomationControlled")
self.browser = webdriver.Chrome(options=options)
self.wait = WebDriverWait(self.browser, 10)
@classmethod
def from_crawler(cls, crawler):
# This method is used by Scrapy to create your spiders.
s = cls()
crawler.signals.connect(s.spider_opened, signal=signals.spider_opened)
return s
def process_request(self, request, spider):
if spider.name == 'boss':
self.browser.get(request.url)
time.sleep(1)
html = self.browser.page_source
# 点击关掉登录提示框
if '立即登录,享受优质服务' in html:
self.browser.find_element_by_css_selector('.closeIcon').click()
print("出现登录提示框,正在关闭...")
time.sleep(1)
# 解决ip验证问题
elif '当前IP地址可能存在异常访问行为,完成验证后即可正常使用' in html:
self.browser.find_element_by_css_selector(".btn").click()
captcha_element = self.wait.until(EC.presence_of_element_located((By.CSS_SELECTOR, '.geetest_widget')))
loc = captcha_element.location
size = captcha_element.size
left, top, right, bottom = loc['x'], loc['y'], loc['x'] + size['width'], loc['y'] + size['height']
# 设置浏览器窗口宽高
width = self.browser.execute_script('return document.documentElement.scrollWidth')
height = self.browser.execute_script('return document.documentElement.scrollHeight')
self.browser.set_window_size(width, height)
screenshot = self.browser.get_screenshot_as_png()
screenshot = Image.open(screenshot)
captcha = screenshot.crop((left, top, right, bottom))
captcha_bytes = captcha.tobytes()
result_str = ZhaopinDownloaderMiddleware.base64_api('xxx', 'xxx', captcha_bytes)
print(result_str)
for cor in result_str.split('|'):
x, y = cor.split(',')
ActionChains(self.browser).move_to_element_with_offset(captcha_element, int(x), int(y)).click().perform()
time.sleep(.3)
# 点击确认
self.browser.find_element_by_css_selector('.geetest_commit_tip').click()
time.sleep(1)
return HtmlResponse(url=request.url, body=html, status=200, encoding='utf-8')
return None
@staticmethod
def base64_api(uname, pwd, img: bytes, typeid=21):
# 请求快识别平台
b64 = base64.b64encode(img).decode('utf8')
data = {"username": uname, "password": pwd, "typeid": typeid, "image": b64}
result = json.loads(requests.post("http://api.ttshitu.com/predict", json=data).text)
if result['success']:
return result["data"]["result"]
else:
return result["message"]
def process_response(self, request, response, spider):
# Called with the response returned from the downloader.
# Must either;
# - return a Response object
# - return a Request object
# - or raise IgnoreRequest
return response
def process_exception(self, request, exception, spider):
# Called when a download handler or a process_request()
# (from other downloader middleware) raises an exception.
# Must either:
# - return None: continue processing this exception
# - return a Response object: stops process_exception() chain
# - return a Request object: stops process_exception() chain
pass
def spider_opened(self, spider):
spider.logger.info('Spider opened: %s' % spider.name)
| 42.869565 | 125 | 0.648479 | 4,433 | 0.860777 | 0 | 0 | 679 | 0.131845 | 0 | 0 | 1,688 | 0.327767 |
4f20762324b6b2fc448e38806dd6a4f57cbf8e18 | 119 | py | Python | example/test_problem/ELBDM/DiscHeating/plot_script/FieldList.py | CliffLinTw/gamer | 6974d0b19133f253f2b867542f97b2acf1e9d756 | [
"BSD-3-Clause"
] | null | null | null | example/test_problem/ELBDM/DiscHeating/plot_script/FieldList.py | CliffLinTw/gamer | 6974d0b19133f253f2b867542f97b2acf1e9d756 | [
"BSD-3-Clause"
] | null | null | null | example/test_problem/ELBDM/DiscHeating/plot_script/FieldList.py | CliffLinTw/gamer | 6974d0b19133f253f2b867542f97b2acf1e9d756 | [
"BSD-3-Clause"
] | null | null | null | import yt
ds = yt.load("/work1/clifflin/gamer-fork/bin/Plummer/Data_000000")
for i in sorted(ds.field_list):
print(i)
| 23.8 | 66 | 0.747899 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 52 | 0.436975 |
4f21df34e1d98ce1e3adaa44158d7fed8c12ff92 | 1,121 | py | Python | snippets/Sage/batchdel.py | JLLeitschuh/TIPL | 89c5d82932f89a2b4064d5d86ac83045ce9bc7d5 | [
"Apache-2.0"
] | 1 | 2019-11-22T11:02:52.000Z | 2019-11-22T11:02:52.000Z | snippets/Sage/batchdel.py | JLLeitschuh/TIPL | 89c5d82932f89a2b4064d5d86ac83045ce9bc7d5 | [
"Apache-2.0"
] | 4 | 2019-11-21T14:13:32.000Z | 2020-02-11T15:15:23.000Z | snippets/Sage/batchdel.py | JLLeitschuh/TIPL | 89c5d82932f89a2b4064d5d86ac83045ce9bc7d5 | [
"Apache-2.0"
] | 1 | 2020-02-11T06:19:45.000Z | 2020-02-11T06:19:45.000Z | import sys,os
from numpy import *
from subprocess import *
from glob import glob
doResume=1
showisq=1
showlen=0
rdelete=1
fixmasks=1
vmsFix=lambda wholeFile: '\\;'.join(wholeFile.split(';'))
megsize=lambda fileName: os.path.getsize(fileName)/1e6
if len(sys.argv)<2:
for rt,drs,files in os.walk(os.getcwd(),topdown=False):
ffiles=filter(lambda x: x.find('.csv')>=0,files)
for cFile in ffiles:
#
if cFile.lower().find('lacun')>=0: cPre='lacun'
if cFile.lower().find('canal')>=0: cPre='canal'
if cFile.lower().find('edge')<0:
wholeFile=(rt+'/'+cFile)
try:
curDir='/'.join((rt+'/'+cFile).split('/')[:-2])
curSample='_'.join((rt+'/'+cFile).split('/')[-3].split('_')[1:])
os.chdir(curDir)
if showisq: os.system('ls -lh '+vmsFix(wholeFile))
if megsize(wholeFile)<0.1:
csvfiles=glob(rt+'/'+cPre+'_*.csv')
if showlen:
for acsvFile in csvfiles: os.system('wc -l '+acsvFile)
for ccsv in csvfiles:
execCmd='rm '+ccsv
print (ccsv,megsize(ccsv))
if rdelete: os.system(execCmd)
except:
print rt+'/'+cFile+' already gone'
| 29.5 | 69 | 0.619982 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 124 | 0.110616 |
4f222ada9c9e67ab5a6afc3a9166ea01cae912d9 | 6,642 | py | Python | winactivities/activities.py | forensicmatt/ActivitiesCacheParser | fc78c2b7ccb915e479a8a11be489afa14a025277 | [
"Apache-2.0"
] | 3 | 2018-09-11T04:00:33.000Z | 2020-08-08T01:10:35.000Z | winactivities/activities.py | forensicmatt/ActivitiesCacheParser | fc78c2b7ccb915e479a8a11be489afa14a025277 | [
"Apache-2.0"
] | null | null | null | winactivities/activities.py | forensicmatt/ActivitiesCacheParser | fc78c2b7ccb915e479a8a11be489afa14a025277 | [
"Apache-2.0"
] | null | null | null | import ujson
import binascii
from collections import OrderedDict
from winactivities.helpers import DbHandler, datetime_decode_1970_str
ACTIVITIES_SCHEMA = {
"tables": [
"Activity",
"Activity_PackageId",
"ActivityAssetCache",
"ActivityOperation",
"AppSettings",
"ManualSequence",
"Metadata"
],
"views": [
"SmartLookup"
]
}
class ActivitiesDb(object):
def __init__(self, source):
self._source = source
self.db_handler = DbHandler(
database=self._source
)
def iter_records(self):
for table_name in ACTIVITIES_SCHEMA["tables"]:
query_str = """
SELECT rowid, *
FROM {}
""".format(table_name)
for row in self.db_handler.iter_rows(query_str):
record = self.get_record(table_name, row)
setattr(record, "_table", table_name)
yield record
def get_record(self, collection_name, row):
if collection_name == "Activity":
return ActivityRecord(row)
elif collection_name == "Activity_PackageId":
return PackageIdRecord(row)
elif collection_name == "ActivityOperation":
return ActivityOperationRecord(row)
else:
return GenericRecord(row)
def get_activity_sequence(self):
sequence_query_str = """
SELECT
ManualSequence.Value
FROM
ManualSequence
WHERE "Key" LIKE "Activity"
"""
connection = self.db_handler.get_connection()
cursor = connection.cursor()
cursor.execute(sequence_query_str)
sequence = cursor.fetchone()
return sequence[0]
def iter_activities(self, sequence=0):
query_str = """
SELECT rowid, *
FROM Activity
WHERE ETag > {}
ORDER BY ETag DESC
""".format(sequence)
for row in self.db_handler.iter_rows(query_str):
yield ActivityRecord(row)
class GenericRecord(dict):
def __init__(self, row):
self.update(row)
def as_ordered_dict(self):
"""Reformat record"""
record = OrderedDict([])
record.update(self)
return record
class ActivityOperationRecord(dict):
def __init__(self, row):
self.update(row)
def as_ordered_dict(self):
"""Reformat record"""
record = OrderedDict([
("OperationOrder", self["OperationOrder"]),
("Id", binascii.b2a_hex(self["Id"])),
("OperationType", self["OperationType"]),
("AppId", ujson.loads(self["AppId"])),
("PackageIdHash", self["PackageIdHash"]),
("AppActivityId", self["AppActivityId"]),
("ActivityType", self["ActivityType"]),
("ParentActivityId", self["ParentActivityId"]),
("Tag", self["Tag"]),
("Group", self["Group"]),
("MatchId", self["MatchId"]),
("LastModifiedTime", datetime_decode_1970_str(self["LastModifiedTime"])),
("ExpirationTime", datetime_decode_1970_str(self["ExpirationTime"])),
("Payload", ujson.loads(self["Payload"])),
("Priority", self["Priority"]),
("CreatedTime", datetime_decode_1970_str(self["CreatedTime"])),
("Attachments", self["Attachments"]),
("PlatformDeviceId", self["PlatformDeviceId"]),
("CreatedInCloud", self["CreatedInCloud"]),
("StartTime", datetime_decode_1970_str(self["StartTime"])),
("EndTime", datetime_decode_1970_str(self["EndTime"])),
("LastModifiedOnClient", self["LastModifiedOnClient"]),
("CorrelationVector", self["CorrelationVector"]),
("GroupAppActivityId", self["GroupAppActivityId"]),
("ClipboardPayload", self["ClipboardPayload"]),
("EnterpriseId", self["EnterpriseId"]),
("OriginalPayload", self["OriginalPayload"]),
("OriginalLastModifiedOnClient", self["OriginalLastModifiedOnClient"]),
("ETag", self["ETag"])
])
return record
class PackageIdRecord(dict):
def __init__(self, row):
self.update(row)
def as_ordered_dict(self):
"""Reformat record"""
record = OrderedDict([
("_rowid", self["rowid"]),
("ActivityId", binascii.b2a_hex(self["ActivityId"])),
("Platform", self["Platform"]),
("PackageName", self["PackageName"]),
("ExpirationTime", self["ExpirationTime"])
])
return record
class ActivityRecord(dict):
def __init__(self, row):
self.update(row)
def as_ordered_dict(self):
"""Reformat record"""
record = OrderedDict([
("_rowid", self["rowid"]),
("Id", binascii.b2a_hex(self["Id"])),
("AppId", ujson.loads(self['AppId'])),
("PackageIdHash", self["PackageIdHash"]),
("AppActivityId", self["AppActivityId"]),
("ActivityType", self["ActivityType"]),
("ActivityStatus", self["ActivityStatus"]),
("ParentActivityId", binascii.b2a_hex(self["ParentActivityId"])),
("Tag", self["Tag"]),
("Group", self["Group"]),
("MatchId", self["MatchId"]),
("LastModifiedTime", datetime_decode_1970_str(
self["LastModifiedTime"])
),
("ExpirationTime", datetime_decode_1970_str(
self["ExpirationTime"])
),
("Payload", ujson.loads(self['Payload'])),
("Priority", self["Priority"]),
("IsLocalOnly", self["IsLocalOnly"]),
("PlatformDeviceId", self["PlatformDeviceId"]),
("CreatedInCloud", self["CreatedInCloud"]),
("StartTime", datetime_decode_1970_str(
self["StartTime"])
),
("EndTime", datetime_decode_1970_str(
self["EndTime"])
),
("LastModifiedOnClient", datetime_decode_1970_str(
self["LastModifiedOnClient"])
),
("GroupAppActivityId", self["GroupAppActivityId"]),
("ClipboardPayload", self["ClipboardPayload"]),
("EnterpriseId", self["EnterpriseId"]),
("OriginalPayload", self["OriginalPayload"]),
("OriginalLastModifiedOnClient", datetime_decode_1970_str(
self["OriginalLastModifiedOnClient"])
),
("ETag", self["ETag"])
])
return record
| 34.414508 | 85 | 0.560524 | 6,224 | 0.937067 | 698 | 0.105089 | 0 | 0 | 0 | 0 | 2,273 | 0.342216 |
4f239f097e9b19538c2da8b9e6eb10b642787d4e | 3,219 | py | Python | custom_components/tuneblade/tuneblade.py | spycle/tune_blade | bce9847531f410634765df7391565e2094549eb6 | [
"Apache-2.0"
] | null | null | null | custom_components/tuneblade/tuneblade.py | spycle/tune_blade | bce9847531f410634765df7391565e2094549eb6 | [
"Apache-2.0"
] | null | null | null | custom_components/tuneblade/tuneblade.py | spycle/tune_blade | bce9847531f410634765df7391565e2094549eb6 | [
"Apache-2.0"
] | null | null | null | """TuneBlade API Client."""
import logging
import asyncio
import socket
from typing import Optional
import aiohttp
import async_timeout
TIMEOUT = 10
_LOGGER: logging.Logger = logging.getLogger(__package__)
HEADERS = {"Content-type": "application/json; charset=UTF-8"}
class TuneBladeApiClient:
def __init__(
self, host: str, port: str, device_id: str, username: str, password: str, airplay_password: str, session: aiohttp.ClientSession, auth
) -> None:
"""Sample API Client."""
self._host = host
self._port = port
self._username = username
self._password = password
self._airplay_password = airplay_password
self._session = session
if device_id == "Master":
self._url = "http://"+host+":"+port+"/master"
else:
self._url = "http://"+host+":"+port+"/devices/"+device_id
async def async_get_data(self) -> dict:
"""Get data from the API."""
return await self.api_wrapper("get", self._url)
async def async_conn(self, value: str) -> None:
"""Get data from the API."""
await self.api_wrapper("put", self._url, data={"Password": self._airplay_password, "Status": value}, headers=HEADERS)
async def async_set_volume(self, volume: str) -> None:
"""Get data from the API."""
await self.api_wrapper("put", self._url, data={"Password": self._airplay_password, "Volume": str(int(volume*100))}, headers=HEADERS)
async def async_set_volume_master(self, volume: str) -> None:
"""Get data from the API."""
await self.api_wrapper("put", self._url, data={"Status": "Connect", "Volume": str(int(volume*100))}, headers=HEADERS)
async def api_wrapper(
self, method: str, url: str, data: dict = {}, headers: dict = {}
) -> dict:
"""Get information from the API."""
try:
async with async_timeout.timeout(TIMEOUT):
if method == "get":
response = await self._session.get(url, headers=headers)
return await response.json()
elif method == "put":
await self._session.put(url, headers=headers, json=data)
elif method == "patch":
await self._session.patch(url, headers=headers, json=data)
elif method == "post":
await self._session.post(url, headers=headers, json=data)
except asyncio.TimeoutError as exception:
_LOGGER.error(
"Timeout error fetching information from %s - %s",
url,
exception,
)
except (KeyError, TypeError) as exception:
_LOGGER.error(
"Error parsing information from %s - %s",
url,
exception,
)
except (aiohttp.ClientError, socket.gaierror) as exception:
_LOGGER.error(
"Error fetching information from %s - %s",
url,
exception,
)
except Exception as exception: # pylint: disable=broad-except
_LOGGER.error("Something really wrong happened! - %s", exception)
| 36.579545 | 141 | 0.581858 | 2,945 | 0.91488 | 0 | 0 | 0 | 0 | 2,307 | 0.716682 | 600 | 0.186393 |
4f23e71ddb8ee1ace14dbb8e3bd3439af093af7d | 1,607 | py | Python | train.py | RuiShu/fast-style-transfer | abea698668aa070375afa488e490b93ba5bb9563 | [
"MIT"
] | 16 | 2017-09-30T21:13:27.000Z | 2019-08-31T19:16:44.000Z | train.py | RuiShu/fast-style-transfer | abea698668aa070375afa488e490b93ba5bb9563 | [
"MIT"
] | null | null | null | train.py | RuiShu/fast-style-transfer | abea698668aa070375afa488e490b93ba5bb9563 | [
"MIT"
] | 1 | 2021-08-05T08:59:58.000Z | 2021-08-05T08:59:58.000Z | from config import args
from utils import delete_existing, get_img, get_img_files
import tensorbayes as tb
import tensorflow as tf
import numpy as np
import os
def push_to_buffer(buf, data_files):
files = np.random.choice(data_files, len(buf), replace=False)
for i, f in enumerate(files):
buf[i] = get_img(f, (256, 256, 3))
def train(M):
delete_existing(args.log_dir)
train_writer = tf.summary.FileWriter(args.log_dir)
train_files = get_img_files(args.train_dir)
validation_files = get_img_files(args.validation_dir)
iterep = args.iter_visualize
with M.graph.as_default():
M.sess.run(tf.global_variables_initializer())
saver = tf.train.Saver()
batch = np.zeros((args.batch_size, 256, 256, 3), dtype='float32')
for i in xrange(len(train_files) * args.n_epochs):
push_to_buffer(batch, train_files)
summary, _ = M.sess.run(M.ops_main, {M.x: batch})
train_writer.add_summary(summary, i + 1)
train_writer.flush()
message='i={:d}'.format(i + 1)
end_viz, _ = tb.utils.progbar(i, iterep, message)
if (i + 1) % args.iter_visualize == 0:
for f, op in zip(validation_files, M.ops_images):
img = np.expand_dims(get_img(f), 0)
summary = M.sess.run(op, {M.x_test: img})
train_writer.add_summary(summary, i + 1)
if (i + 1) % args.iter_save == 0:
path = saver.save(M.sess, os.path.join(args.model_dir, 'model'),
global_step=i + 1)
print "Saving model to {:s}".format(path)
| 35.711111 | 76 | 0.627878 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 46 | 0.028625 |
4f24e90f804e495cead994aea15903dbefcdaca4 | 141 | py | Python | Semana 07/frequencia.py | heltonricardo/grupo-estudos-maratonas-programacao | 0c07d84a900858616647d07574ec56b0533cddfb | [
"MIT"
] | null | null | null | Semana 07/frequencia.py | heltonricardo/grupo-estudos-maratonas-programacao | 0c07d84a900858616647d07574ec56b0533cddfb | [
"MIT"
] | null | null | null | Semana 07/frequencia.py | heltonricardo/grupo-estudos-maratonas-programacao | 0c07d84a900858616647d07574ec56b0533cddfb | [
"MIT"
] | null | null | null | n = int(input())
v = []
for i in range(n): v.append(int(input()))
s = sorted(set(v))
for i in s: print(f'{i} aparece {v.count(i)} vez (es)')
| 23.5 | 55 | 0.574468 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 36 | 0.255319 |
4f264c9af17a23440e1def56bb2ced5d575f4e4e | 1,543 | py | Python | TextGen/src/TextGen-2.py | fatemetkl/TextClassification-NLP-Forml_and_Informal | 4f79ecd7e860dabf83694230c2c52cd498869aaf | [
"MIT"
] | null | null | null | TextGen/src/TextGen-2.py | fatemetkl/TextClassification-NLP-Forml_and_Informal | 4f79ecd7e860dabf83694230c2c52cd498869aaf | [
"MIT"
] | null | null | null | TextGen/src/TextGen-2.py | fatemetkl/TextClassification-NLP-Forml_and_Informal | 4f79ecd7e860dabf83694230c2c52cd498869aaf | [
"MIT"
] | null | null | null | import random
import os
from decimal import *
os.getcwd()
os.chdir('..')
os.chdir('..')
parent=os.getcwd()
#seed = 15
path1="Model/label2.2gram.lm"
filename1=os.path.join(parent,path1)
with open(filename1,'r',encoding='utf-8') as f:
text=f.read().split('\n')
dict_val={}
words=[]
new_text=[]
for i in range (0,len(text)-2):
new_text.append(text[i])
for line in new_text:
list_t= line.split("|")
word1=list_t[0]
word2=list_t[1]
value=list_t[2]
if len(value)>0:
if value[0]=='0':
dict_val[word1+" "+word2]=value.replace('\n','')
if word1=="<s>":
words.append(word1+" "+word2)
seed=input("Enter seed : ")
random.seed( int(seed) )
n = input("Enter n : ")
output=''
for i in range(0,int(n)):
rand_word=random.choice(words)
word=rand_word.split(" ")[1]
sentence=[]
sentence.append(rand_word)
while word!= "</s>":
max_tt=0
next_word=""
for toupels in dict_val:
pre=toupels.split(" ")[0]
now=toupels.split(" ")[1]
if word == pre and now not in sentence :
if Decimal(dict_val[toupels]) > Decimal(max_tt):
max_tt=dict_val[toupels]
next_word=now
word=next_word
sentence.append(next_word)
out_text=''
for word in sentence:
out_text+= word+" "
output += out_text +'\n'
out_t=os.path.join(parent,'TextGen/label2.2gram.gen')
f = open(out_t,'w')
f.write(output)
f.close()
| 21.732394 | 71 | 0.562541 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 168 | 0.108597 |
4f2700b6302c6b072cf15c607d2751cacf024b9d | 33,707 | py | Python | pyFAI/average.py | fpwg/pyFAI | cebf935b79333c9289a78b74ee99d550ebcdc549 | [
"MIT"
] | 1 | 2021-04-28T20:09:13.000Z | 2021-04-28T20:09:13.000Z | pyFAI/average.py | fpwg/pyFAI | cebf935b79333c9289a78b74ee99d550ebcdc549 | [
"MIT"
] | null | null | null | pyFAI/average.py | fpwg/pyFAI | cebf935b79333c9289a78b74ee99d550ebcdc549 | [
"MIT"
] | null | null | null | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Project: Azimuthal integration
# https://github.com/silx-kit/pyFAI
#
# Copyright (C) 2003-2018 European Synchrotron Radiation Facility, Grenoble,
# France
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
"""
Utilities, mainly for image treatment
"""
__authors__ = ["Jérôme Kieffer", "Valentin Valls"]
__contact__ = "Jerome.Kieffer@ESRF.eu"
__license__ = "MIT"
__copyright__ = "European Synchrotron Radiation Facility, Grenoble, France"
__date__ = "19/02/2019"
__status__ = "production"
import logging
import numpy
import fabio
import weakref
from scipy import ndimage
from scipy.interpolate import interp1d
from scipy.optimize.optimize import fmin
from scipy.optimize.optimize import fminbound
from .third_party import six
from .utils import stringutil
from .utils import header_utils
from ._version import calc_hexversion
if ("hexversion" not in dir(fabio)) or (fabio.hexversion < calc_hexversion(0, 4, 0, "dev", 5)):
# Short cut fabio.factory do not exists on older versions
fabio.factory = fabio.fabioimage.FabioImage.factory
logger = logging.getLogger(__name__)
class ImageReductionFilter(object):
"""
Generic filter applied in a set of images.
"""
def init(self, max_images=None):
"""
Initialize the filter before using it.
:param int max_images: Max images supported by the filter
"""
pass
def add_image(self, image):
"""
Add an image to the filter.
:param numpy.ndarray image: image to add
"""
raise NotImplementedError()
def get_parameters(self):
"""Return a dictionary containing filter parameters
:rtype: dict
"""
return {"cutoff": None, "quantiles": None}
def get_result(self):
"""
Get the result of the filter.
:return: result filter
"""
raise NotImplementedError()
class ImageAccumulatorFilter(ImageReductionFilter):
"""
Filter applied in a set of images in which it is possible
to reduce data step by step into a single merged image.
"""
def init(self, max_images=None):
self._count = 0
self._accumulated_image = None
def add_image(self, image):
"""
Add an image to the filter.
:param numpy.ndarray image: image to add
"""
self._accumulated_image = self._accumulate(self._accumulated_image, image)
self._count += 1
def _accumulate(self, accumulated_image, added_image):
"""
Add an image to the filter.
:param numpy.ndarray accumulated_image: image use to accumulate
information
:param numpy.ndarray added_image: image to add
"""
raise NotImplementedError()
def get_result(self):
"""
Get the result of the filter.
:return: result filter
:rtype: numpy.ndarray
"""
result = self._accumulated_image
# release the allocated memory
self._accumulated_image = None
return result
class MaxAveraging(ImageAccumulatorFilter):
name = "max"
def _accumulate(self, accumulated_image, added_image):
if accumulated_image is None:
return added_image
return numpy.maximum(accumulated_image, added_image)
class MinAveraging(ImageAccumulatorFilter):
name = "min"
def _accumulate(self, accumulated_image, added_image):
if accumulated_image is None:
return added_image
return numpy.minimum(accumulated_image, added_image)
class SumAveraging(ImageAccumulatorFilter):
name = "sum"
def _accumulate(self, accumulated_image, added_image):
if accumulated_image is None:
return added_image
return accumulated_image + added_image
class MeanAveraging(SumAveraging):
name = "mean"
def get_result(self):
result = super(MeanAveraging, self).get_result()
return result / numpy.float32(self._count)
class ImageStackFilter(ImageReductionFilter):
"""
Filter creating a stack from all images and computing everything at the
end.
"""
def init(self, max_images=None):
self._stack = None
self._max_stack_size = max_images
self._count = 0
def add_image(self, image):
"""
Add an image to the filter.
:param numpy.ndarray image: image to add
"""
if self._stack is None:
shape = self._max_stack_size, image.shape[0], image.shape[1]
self._stack = numpy.zeros(shape, dtype=numpy.float32)
self._stack[self._count] = image
self._count += 1
def _compute_stack_reduction(self, stack):
"""Called after initialization of the stack and return the reduction
result."""
raise NotImplementedError()
def get_result(self):
if self._stack is None:
raise Exception("No data to reduce")
shape = self._count, self._stack.shape[1], self._stack.shape[2]
self._stack.resize(shape)
result = self._compute_stack_reduction(self._stack)
# release the allocated memory
self._stack = None
return result
class AverageDarkFilter(ImageStackFilter):
"""
Filter based on the algorithm of average_dark
TODO: Must be split according to each filter_name, and removed
"""
def __init__(self, filter_name, cut_off, quantiles):
super(AverageDarkFilter, self).__init__()
self._filter_name = filter_name
self._cut_off = cut_off
self._quantiles = quantiles
@property
def name(self):
return self._filter_name
def get_parameters(self):
"""Return a dictionary containing filter parameters"""
return {"cutoff": self._cut_off, "quantiles": self._quantiles}
def _compute_stack_reduction(self, stack):
"""
Compute the stack reduction.
:param numpy.ndarray stack: stack to reduce
:return: result filter
:rtype: numpy.ndarray
"""
return average_dark(stack,
self._filter_name,
self._cut_off,
self._quantiles)
_FILTERS = [
MaxAveraging,
MinAveraging,
MeanAveraging,
SumAveraging,
]
_FILTER_NAME_MAPPING = {}
for _f in _FILTERS:
_FILTER_NAME_MAPPING[_f.name] = _f
_AVERAGE_DARK_FILTERS = set(["min", "max", "sum", "mean", "std", "quantiles", "median"])
def is_algorithm_name_exists(filter_name):
"""Return true if the name is a name of a filter algorithm"""
if filter_name in _FILTER_NAME_MAPPING:
return True
elif filter_name in _AVERAGE_DARK_FILTERS:
return True
return False
class AlgorithmCreationError(RuntimeError):
"""Exception returned if creation of an ImageReductionFilter is not
possible"""
pass
def create_algorithm(filter_name, cut_off=None, quantiles=None):
"""Factory to create algorithm according to parameters
:param cutoff: keep all data where (I-center)/std < cutoff
:type cutoff: float or None
:param quantiles: 2-tuple of floats average out data between the two
quantiles
:type quantiles: tuple(float, float) or None
:return: An algorithm
:rtype: ImageReductionFilter
:raise AlgorithmCreationError: If it is not possible to create the
algorithm
"""
if filter_name in _FILTER_NAME_MAPPING and cut_off is None:
# use less memory
filter_class = _FILTER_NAME_MAPPING[filter_name]
algorithm = filter_class()
elif filter_name in _AVERAGE_DARK_FILTERS:
# must create a big array with all the data
if filter_name == "quantiles" and quantiles is None:
raise AlgorithmCreationError("Quantiles algorithm expect quantiles parameters")
algorithm = AverageDarkFilter(filter_name, cut_off, quantiles)
else:
raise AlgorithmCreationError("No algorithm available for the expected parameters")
return algorithm
def bounding_box(img):
"""
Tries to guess the bounding box around a valid massif
:param img: 2D array like
:return: 4-typle (d0_min, d1_min, d0_max, d1_max)
"""
img = img.astype(numpy.int)
img0 = (img.sum(axis=1) > 0).astype(numpy.int)
img1 = (img.sum(axis=0) > 0).astype(numpy.int)
dimg0 = img0[1:] - img0[:-1]
min0 = dimg0.argmax()
max0 = dimg0.argmin() + 1
dimg1 = img1[1:] - img1[:-1]
min1 = dimg1.argmax()
max1 = dimg1.argmin() + 1
if max0 == 1:
max0 = img0.size
if max1 == 1:
max1 = img1.size
return (min0, min1, max0, max1)
def remove_saturated_pixel(ds, threshold=0.1, minimum=None, maximum=None):
"""
Remove saturated fixes from an array inplace.
:param ds: a dataset as ndarray
:param float threshold: what is the upper limit?
all pixel > max*(1-threshold) are discareded.
:param float minimum: minumum valid value (or True for auto-guess)
:param float maximum: maximum valid value
:return: the input dataset
"""
shape = ds.shape
if ds.dtype == numpy.uint16:
maxt = (1.0 - threshold) * 65535.0
elif ds.dtype == numpy.int16:
maxt = (1.0 - threshold) * 32767.0
elif ds.dtype == numpy.uint8:
maxt = (1.0 - threshold) * 255.0
elif ds.dtype == numpy.int8:
maxt = (1.0 - threshold) * 127.0
else:
if maximum is None:
maxt = (1.0 - threshold) * ds.max()
else:
maxt = maximum
if maximum is not None:
maxt = min(maxt, maximum)
invalid = (ds > maxt)
if minimum:
if minimum is True:
# automatic guess of the best minimum TODO: use the HWHM to guess the minumum...
data_min = ds.min()
x, y = numpy.histogram(numpy.log(ds - data_min + 1.0), bins=100)
f = interp1d((y[1:] + y[:-1]) / 2.0, -x, bounds_error=False, fill_value=-x.min())
max_low = fmin(f, y[1], disp=0)
max_hi = fmin(f, y[-1], disp=0)
if max_hi > max_low:
f = interp1d((y[1:] + y[:-1]) / 2.0, x, bounds_error=False)
min_center = fminbound(f, max_low, max_hi)
else:
min_center = max_hi
minimum = float(numpy.exp(y[((min_center / y) > 1).sum() - 1])) - 1.0 + data_min
logger.debug("removeSaturatedPixel: best minimum guessed is %s", minimum)
ds[ds < minimum] = minimum
ds -= minimum # - 1.0
if invalid.sum(dtype=int) == 0:
logger.debug("No saturated area where found")
return ds
gi = ndimage.morphology.binary_dilation(invalid)
lgi, nc = ndimage.label(gi)
if nc > 100:
logger.warning("More than 100 saturated zones were found on this image !!!!")
for zone in range(nc + 1):
dzone = (lgi == zone)
if dzone.sum(dtype=int) > ds.size // 2:
continue
min0, min1, max0, max1 = bounding_box(dzone)
ksize = min(max0 - min0, max1 - min1)
subset = ds[max(0, min0 - 4 * ksize):min(shape[0], max0 + 4 * ksize), max(0, min1 - 4 * ksize):min(shape[1], max1 + 4 * ksize)]
while subset.max() > maxt:
subset = ndimage.median_filter(subset, ksize)
ds[max(0, min0 - 4 * ksize):min(shape[0], max0 + 4 * ksize), max(0, min1 - 4 * ksize):min(shape[1], max1 + 4 * ksize)] = subset
return ds
def average_dark(lstimg, center_method="mean", cutoff=None, quantiles=(0.5, 0.5)):
"""
Averages a serie of dark (or flat) images.
Centers the result on the mean or the median ...
but averages all frames within cutoff*std
:param lstimg: list of 2D images or a 3D stack
:param str center_method: is the center calculated by a "mean", "median",
"quantile", "std"
:param cutoff: keep all data where (I-center)/std < cutoff
:type cutoff: float or None
:param quantiles: 2-tuple of floats average out data between the two
quantiles
:type quantiles: tuple(float, float) or None
:return: 2D image averaged
"""
if "ndim" in dir(lstimg) and lstimg.ndim == 3:
stack = lstimg.astype(numpy.float32)
shape = stack.shape[1:]
length = stack.shape[0]
else:
shape = lstimg[0].shape
length = len(lstimg)
if length == 1:
return lstimg[0].astype(numpy.float32)
stack = numpy.zeros((length, shape[0], shape[1]), dtype=numpy.float32)
for i, img in enumerate(lstimg):
stack[i] = img
if center_method in dir(stack):
center = stack.__getattribute__(center_method)(axis=0)
elif center_method == "median":
logger.info("Filtering data (median)")
center = numpy.median(stack, axis=0)
elif center_method.startswith("quantil"):
logger.info("Filtering data (quantiles: %s)", quantiles)
sorted_ = numpy.sort(stack, axis=0)
lower = max(0, int(numpy.floor(min(quantiles) * length)))
upper = min(length, int(numpy.ceil(max(quantiles) * length)))
if (upper == lower):
if upper < length:
upper += 1
elif lower > 0:
lower -= 1
else:
logger.warning("Empty selection for quantil %s, would keep points from %s to %s", quantiles, lower, upper)
center = sorted_[lower:upper].mean(axis=0)
else:
raise RuntimeError("Cannot understand method: %s in average_dark" % center_method)
if cutoff is None or cutoff <= 0:
output = center
else:
std = stack.std(axis=0)
strides = 0, std.strides[0], std.strides[1]
std.shape = 1, shape[0], shape[1]
std.strides = strides
center.shape = 1, shape[0], shape[1]
center.strides = strides
mask = ((abs(stack - center) / std) > cutoff)
stack[numpy.where(mask)] = 0.0
summed = stack.sum(axis=0)
output = summed / numpy.float32(numpy.maximum(1, (length - mask.sum(axis=0))))
return output
def _normalize_image_stack(image_stack):
"""
Convert input data to a list of 2D numpy arrays or a stack
of numpy array (3D array).
:param image_stack: slice of images
:type image_stack: list or numpy.ndarray
:return: A stack of image (list of 2D array or a single 3D array)
:rtype: list or numpy.ndarray
"""
if image_stack is None:
return None
if isinstance(image_stack, numpy.ndarray) and image_stack.ndim == 3:
# numpy image stack (single 3D image)
return image_stack
if isinstance(image_stack, list):
# list of numpy images (multi 2D images)
result = []
for image in image_stack:
if isinstance(image, six.string_types):
data = fabio.open(image).data
elif isinstance(image, numpy.ndarray) and image.ndim == 2:
data = image
else:
raise Exception("Unsupported image type '%s' in image_stack" % type(image))
result.append(data)
return result
raise Exception("Unsupported type '%s' for image_stack" % type(image_stack))
class AverageWriter():
"""Interface for using writer in `Average` process."""
def write_header(self, merged_files, nb_frames, monitor_name):
"""Write the header of the average
:param list merged_files: List of files used to generate this output
:param int nb_frames: Number of frames used
:param str monitor_name: Name of the monitor used. Can be None.
"""
raise NotImplementedError()
def write_reduction(self, algorithm, data):
"""Write one reduction
:param ImageReductionFilter algorithm: Algorithm used
:param object data: Data of this reduction
"""
raise NotImplementedError()
def close(self):
"""Close the writer. Must not be used anymore."""
raise NotImplementedError()
class MultiFilesAverageWriter(AverageWriter):
"""Write reductions into multi files. File headers are duplicated."""
def __init__(self, file_name_pattern, file_format, dry_run=False):
"""
:param str file_name_pattern: File name pattern for the output files.
If it contains "{method_name}", it is updated for each
reduction writing with the name of the reduction.
:param str file_format: File format used. It is the default
extension file.
:param bool dry_run: If dry_run, the file is created on memory but not
saved on the file system at the end
"""
self._file_name_pattern = file_name_pattern
self._global_header = {}
self._fabio_images = weakref.WeakKeyDictionary()
self._dry_run = dry_run
# in case "edf.gz"
if "." in file_format:
file_format = file_format.split(".")[0]
self._fabio_class = fabio.factory(file_format + "image")
def write_header(self, merged_files, nb_frames, monitor_name):
self._global_header["nfiles"] = len(merged_files)
self._global_header["nframes"] = nb_frames
if monitor_name is not None:
self._global_header["monitor_name"] = monitor_name
pattern = "merged_file_%%0%ii" % len(str(len(merged_files)))
for i, f in enumerate(merged_files):
name = pattern % i
self._global_header[name] = f.filename
def _get_file_name(self, reduction_name):
keys = {"method_name": reduction_name}
return stringutil.safe_format(self._file_name_pattern, keys)
def write_reduction(self, algorithm, data):
file_name = self._get_file_name(algorithm.name)
# overwrite the method
header = fabio.fabioimage.OrderedDict()
header["method"] = algorithm.name
for name, value in self._global_header.items():
header[name] = str(value)
filter_parameters = algorithm.get_parameters()
for name, value in filter_parameters.items():
header[name] = str(value)
image = self._fabio_class.__class__(data=data, header=header)
if not self._dry_run:
image.write(file_name)
logger.info("Wrote %s", file_name)
self._fabio_images[algorithm] = image
def get_fabio_image(self, algorithm):
"""Get the constructed fabio image
:rtype: fabio.fabioimage.FabioImage
"""
return self._fabio_images[algorithm]
def close(self):
"""Close the writer. Must not be used anymore."""
self._header = None
def common_prefix(string_list):
"""Return the common prefix of a list of strings
TODO: move it into utils package
:param list(str) string_list: List of strings
:rtype: str
"""
prefix = ""
for ch in zip(string_list):
c = ch[0]
good = True
for i in ch:
if i != c:
good = False
break
if good:
prefix += c
else:
break
return prefix
class AverageObserver(object):
def image_loaded(self, fabio_image, image_index, images_count):
"""Called when an input image is loaded"""
pass
def process_started(self):
"""Called when the full processing is started"""
pass
def algorithm_started(self, algorithm):
"""Called when an algorithm is started"""
pass
def frame_processed(self, algorithm, frame_index, frames_count):
"""Called after providing a frame to an algorithm"""
pass
def result_processing(self, algorithm):
"""Called before the result of an algorithm is computed"""
pass
def algorithm_finished(self, algorithm):
"""Called when an algorithm is finished"""
pass
def process_finished(self):
"""Called when the full process is finished"""
pass
class Average(object):
"""Process images to generate an average using different algorithms."""
def __init__(self):
"""Constructor"""
self._dark = None
self._raw_flat = None
self._flat = None
self._monitor_key = None
self._threshold = None
self._minimum = None
self._maximum = None
self._fabio_images = []
self._writer = None
self._algorithms = []
self._nb_frames = 0
self._correct_flat_from_dark = False
self._results = weakref.WeakKeyDictionary()
self._observer = None
def set_observer(self, observer):
"""Set an observer to the average process.
:param AverageObserver observer: An observer
"""
self._observer = observer
def set_dark(self, dark_list):
"""Defines images used as dark.
:param list dark_list: List of dark used
"""
if dark_list is None:
self._dark = None
return
darks = _normalize_image_stack(dark_list)
self._dark = average_dark(darks, center_method="mean", cutoff=4)
def set_flat(self, flat_list):
"""Defines images used as flat.
:param list flat_list: List of dark used
"""
if flat_list is None:
self._raw_flat = None
return
flats = _normalize_image_stack(flat_list)
self._raw_flat = average_dark(flats, center_method="mean", cutoff=4)
def set_correct_flat_from_dark(self, correct_flat_from_dark):
"""Defines if the dark must be applied on the flat.
:param bool correct_flat_from_dark: If true, the dark is applied.
"""
self._correct_flat_from_dark = correct_flat_from_dark
def get_counter_frames(self):
"""Returns the number of frames used for the process.
:rtype: int
"""
return self._nb_frames
def get_fabio_images(self):
"""Returns source images as fabio images.
:rtype: list(fabio.fabioimage.FabioImage)"""
return self._fabio_images
def set_images(self, image_list):
"""Defines the set set of source images to used to process an average.
:param list image_list: List of filename, numpy arrays, fabio images
used as source for the computation.
"""
self._fabio_images = []
self._nb_frames = 0
if len(image_list) > 100:
# if too many files are opened, it may crash. The har limit is 1024
copy_data = True
else:
copy_data = False
for image_index, image in enumerate(image_list):
if isinstance(image, six.string_types):
logger.info("Reading %s", image)
fabio_image = fabio.open(image)
if copy_data and fabio_image.nframes == 1:
# copy the data so that we can close the file right now.
fimg = fabio_image.convert(fabio_image.__class__)
fimg.filename = image
fabio_image.close()
fabio_image = fimg
elif isinstance(image, fabio.fabioimage.fabioimage):
fabio_image = image
else:
if fabio.hexversion < 262148:
logger.error("Old version of fabio detected, upgrade to 0.4 or newer")
# Assume this is a numpy array like
if not isinstance(image, numpy.ndarray):
raise RuntimeError("Not good type for input, got %s, expected numpy array" % type(image))
fabio_image = fabio.numpyimage.NumpyImage(data=image)
if self._observer:
self._observer.image_loaded(fabio_image, image_index, len(image_list))
self._fabio_images.append(fabio_image)
self._nb_frames += fabio_image.nframes
def set_monitor_name(self, monitor_name):
"""Defines the monitor name used to correct images before processing
the average. This monitor must be part of the file header, else the
image is skipped.
:param str monitor_name: Name of the monitor available on the header
file
"""
self._monitor_key = monitor_name
def set_pixel_filter(self, threshold, minimum, maximum):
"""Defines the filter applied on each pixels of the images before
processing the average.
:param threshold: what is the upper limit?
all pixel > max*(1-threshold) are discareded.
:param minimum: minimum valid value or True
:param maximum: maximum valid value
"""
self._threshold = threshold
self._minimum = minimum
self._maximum = maximum
def set_writer(self, writer):
"""Defines the object write which will be used to store the result.
:param AverageWriter writer: The writer to use."""
self._writer = writer
def add_algorithm(self, algorithm):
"""Defines another algorithm which will be computed on the source.
:param ImageReductionFilter algorithm: An averaging algorithm.
"""
self._algorithms.append(algorithm)
def _get_corrected_image(self, fabio_image, image):
"""Returns an image corrected by pixel filter, saturation, flat, dark,
and monitor correction. The internal computation is done in float
64bits. The result is provided as float 32 bits.
:param fabio.fabioimage.FabioImage fabio_image: Object containing the
header of the data to process
:param numpy.ndarray image: Data to process
:rtype: numpy.ndarray
"""
corrected_image = numpy.ascontiguousarray(image, numpy.float64)
if self._threshold or self._minimum or self._maximum:
corrected_image = remove_saturated_pixel(corrected_image, self._threshold, self._minimum, self._maximum)
if self._dark is not None:
corrected_image -= self._dark
if self._flat is not None:
corrected_image /= self._flat
if self._monitor_key is not None:
try:
monitor = header_utils.get_monitor_value(fabio_image, self._monitor_key)
corrected_image /= monitor
except header_utils.MonitorNotFound as e:
logger.warning("Monitor not found in filename '%s', data skipped. Cause: %s", fabio_image.filename, str(e))
return None
return numpy.ascontiguousarray(corrected_image, numpy.float32)
def _get_image_reduction(self, algorithm):
"""Returns the result of an averaging algorithm using all over
parameters defined in this object.
:param ImageReductionFilter algorithm: Averaging algorithm
:rtype: numpy.ndarray
"""
algorithm.init(max_images=self._nb_frames)
frame_index = 0
for fabio_image in self._fabio_images:
for frame in range(fabio_image.nframes):
if fabio_image.nframes == 1:
data = fabio_image.data
else:
data = fabio_image.getframe(frame).data
logger.debug("Intensity range for %s#%i is %s --> %s", fabio_image.filename, frame, data.min(), data.max())
corrected_image = self._get_corrected_image(fabio_image, data)
if corrected_image is not None:
algorithm.add_image(corrected_image)
if self._observer:
self._observer.frame_processed(algorithm, frame_index, self._nb_frames)
frame_index += 1
if self._observer:
self._observer.result_processing(algorithm)
return algorithm.get_result()
def _update_flat(self):
"""
Update the flat according to the last process parameters
:rtype: numpy.ndarray
"""
if self._raw_flat is not None:
flat = numpy.array(self._raw_flat)
if self._correct_flat_from_dark:
if self._dark is not None:
flat -= self._dark
else:
logger.debug("No dark. Flat correction using dark skipped")
flat[numpy.where(flat <= 0)] = 1.0
else:
flat = None
self._flat = flat
def process(self):
"""Process source images to all defined averaging algorithms defined
using defined parameters. To access to the results you have to define
a writer (`AverageWriter`). To follow the process forward you have to
define an observer (`AverageObserver`).
"""
self._update_flat()
writer = self._writer
if self._observer:
self._observer.process_started()
if writer is not None:
writer.write_header(self._fabio_images, self._nb_frames, self._monitor_key)
for algorithm in self._algorithms:
if self._observer:
self._observer.algorithm_started(algorithm)
image_reduction = self._get_image_reduction(algorithm)
logger.debug("Intensity range in merged dataset : %s --> %s", image_reduction.min(), image_reduction.max())
if writer is not None:
writer.write_reduction(algorithm, image_reduction)
self._results[algorithm] = image_reduction
if self._observer:
self._observer.algorithm_finished(algorithm)
if self._observer:
self._observer.process_finished()
if writer is not None:
writer.close()
def get_image_reduction(self, algorithm):
"""Returns the result of an algorithm. The `process` must be already
done.
:param ImageReductionFilter algorithm: An averaging algorithm
:rtype: numpy.ndarray
"""
return self._results[algorithm]
def average_images(listImages, output=None, threshold=0.1, minimum=None,
maximum=None, darks=None, flats=None, filter_="mean",
correct_flat_from_dark=False, cutoff=None, quantiles=None,
fformat="edf", monitor_key=None):
"""
Takes a list of filenames and create an average frame discarding all
saturated pixels.
:param listImages: list of string representing the filenames
:param output: name of the optional output file
:param threshold: what is the upper limit? all pixel > max*(1-threshold)
are discareded.
:param minimum: minimum valid value or True
:param maximum: maximum valid value
:param darks: list of dark current images for subtraction
:param flats: list of flat field images for division
:param filter_: can be "min", "max", "median", "mean", "sum", "quantiles"
(default='mean')
:param correct_flat_from_dark: shall the flat be re-corrected ?
:param cutoff: keep all data where (I-center)/std < cutoff
:param quantiles: 2-tuple containing the lower and upper quantile (0<q<1)
to average out.
:param fformat: file format of the output image, default: edf
:param monitor_key str: Key containing the monitor. Can be none.
:return: filename with the data or the data ndarray in case format=None
"""
# input sanitization
if not is_algorithm_name_exists(filter_):
logger.warning("Filter %s not understood. switch to mean filter", filter_)
filter_ = "mean"
if quantiles is not None and filter_ != "quantiles":
logger.warning("Set method to quantiles as quantiles parameters is defined.")
filter_ = "quantiles"
average = Average()
average.set_images(listImages)
average.set_dark(darks)
average.set_flat(flats)
average.set_correct_flat_from_dark(correct_flat_from_dark)
average.set_monitor_name(monitor_key)
average.set_pixel_filter(threshold, minimum, maximum)
algorithm = create_algorithm(filter_, cutoff, quantiles)
average.add_algorithm(algorithm)
# define writer
if fformat is not None:
if fformat.startswith("."):
fformat = fformat.lstrip(".")
if output is None:
prefix = common_prefix([i.filename for i in average.get_fabio_images()])
output = "filt%02i-%s.%s" % (average.get_counter_frames(), prefix, fformat)
output = "{method_name}" + output
if output is not None:
writer = MultiFilesAverageWriter(output, fformat)
average.set_writer(writer)
else:
writer = None
average.process()
if writer is not None:
fabio_image = writer.get_fabio_image(algorithm)
return fabio_image.filename
else:
return average.get_image_reduction(algorithm)
| 34.929534 | 135 | 0.632213 | 19,389 | 0.575188 | 0 | 0 | 62 | 0.001839 | 0 | 0 | 12,662 | 0.375627 |
4f27e94364e886c37f053be2d331525eebc5847a | 74 | py | Python | constants.py | texdarkstar/py3bot | 05a60c49894415e00a59bbe086a58c4e6b331fb5 | [
"MIT"
] | 1 | 2021-11-21T02:02:44.000Z | 2021-11-21T02:02:44.000Z | constants.py | texdarkstar/py3bot | 05a60c49894415e00a59bbe086a58c4e6b331fb5 | [
"MIT"
] | null | null | null | constants.py | texdarkstar/py3bot | 05a60c49894415e00a59bbe086a58c4e6b331fb5 | [
"MIT"
] | 1 | 2021-11-21T02:02:47.000Z | 2021-11-21T02:02:47.000Z | from telnetlib import IAC, WILL, TTYPE, SB, SE
IS = chr(0).encode()
| 14.8 | 47 | 0.635135 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
4f2abc95f3e1e4c6346436b87adf5ac5fcac62da | 7,754 | py | Python | schedule/views.py | AndrewSaltz/Lab_Schedule | a71f3ddfd51661bac0dc1de72635df19a56f23c9 | [
"MIT"
] | null | null | null | schedule/views.py | AndrewSaltz/Lab_Schedule | a71f3ddfd51661bac0dc1de72635df19a56f23c9 | [
"MIT"
] | null | null | null | schedule/views.py | AndrewSaltz/Lab_Schedule | a71f3ddfd51661bac0dc1de72635df19a56f23c9 | [
"MIT"
] | null | null | null | from django.shortcuts import render
import datetime
from datetime import date
import calendar
from schedule.models import Event, period_choices, cart_choice
from django.views.generic import UpdateView, TemplateView, ListView
from schedule.forms import ReservationForm
from django.http import HttpResponseRedirect, HttpResponse
from django import forms
import json
import calendar
from django.http import JsonResponse
from django.core.serializers.json import DjangoJSONEncoder
from django.views.decorators.csrf import csrf_exempt, ensure_csrf_cookie
from django.db import IntegrityError
from django.contrib.auth.mixins import LoginRequiredMixin, PermissionRequiredMixin
from django.contrib.auth.models import User
class Home(LoginRequiredMixin, ListView):
model = Event
template_name = "schedule/home.html"
login_url = '/login/'
redirect_field_name = 'redirect_to'
def get_context_data(self, **kwargs):
context = super(Home, self).get_context_data(**kwargs)
q = self.kwargs['adjuster'] #Change week
adjust = int(q)
if adjust != 0:
adjust *=7 # Add to Monday to move up and down weeks
today = date.today()
Monday = date.today() - datetime.timedelta(days=date.today().weekday())
Friday = Monday + datetime.timedelta(days=4)
Monday = Monday + datetime.timedelta(days=adjust)
Friday = Friday + datetime.timedelta(days=adjust) # Alter the days if needed. Really non-pythony
context['monday'] = Monday
context['q'] = q
context['this_week'] = Event.objects.filter(day__gte=Monday,day__lte=Friday)
context['the_day'] = calendar.day_name[today.weekday()]
periods = []
user = self.request.user
context['my_reservation'] = Event.objects.filter(day__gte=Monday,day__lte=Friday,teacher=user.id)
for p in period_choices:
periods.append(p[1])
context['periods'] = periods
context['username'] = self.request.user.username
return context
@ensure_csrf_cookie
def reserve(request):
if request.is_ajax():
pk = request.POST['pk']
slot = Event.objects.get(pk=pk)
user = request.user
if slot.is_reserved == True:
if user == slot.teacher:
slot.is_reserved = False
slot.teacher = None
slot.save()
result = 1
elif user.is_superuser and user != slot.teacher: # Override as admin
slot.is_reserved == True
slot.teacher = user
slot.save()
result = 2
else:
result = 3
else:
slot.is_reserved = True
slot.teacher = user
slot.save()
result = 2
data = {'result': result}
return HttpResponse(json.dumps(data, cls=DjangoJSONEncoder))
class Dashboard(PermissionRequiredMixin,TemplateView):
template_name = "schedule/dashboard.html"
permission_required = 'is_staff'
def get_context_data(self, **kwargs):
context = super(Dashboard, self).get_context_data(**kwargs)
periods = []
for p in period_choices:
periods.append(p[1])
today = datetime.date.today()
tomorrow = today + datetime.timedelta(days=1)
cart_list =[]
for cart in cart_choice:
cart_list.append(cart[0])
context['periods']=periods # Make this a mixin
context['this_day'] = Event.objects.filter(day=today)
context['next_day'] = Event.objects.filter(day=tomorrow)
context['cart_list'] = cart_list
context['today'] = today
context['tomorrow'] = tomorrow
return context
@ensure_csrf_cookie
def create_week(request):
if request.is_ajax():
to_monday = date.today().weekday()
start = date.today() - datetime.timedelta(days=to_monday) #Find Monday
day = start # Day will change, start will not
end = start + datetime.timedelta(days=4) # One week, edit later for flexibility
weekend = set([5, 6]) # Python week starts on Monday as 0
dupe_list = []
total = 0
while day <= end:
if day.weekday() not in weekend:
for period in period_choices:
for cart in cart_choice:
open = Event(day=day, period=period[0], cart=cart[0])
try:
open.save()
total+=1
except IntegrityError:
dupe = str(open)
dupe_list.append(dupe)
pass
day += datetime.timedelta(days=1) # Adds one day until the current day is past the end day
data = {'start': start, 'end': end, 'dupe_list': dupe_list, 'total': total}
return HttpResponse(json.dumps(data, cls=DjangoJSONEncoder))
def redirect_root(request):
return HttpResponseRedirect('/week/0/')
@ensure_csrf_cookie
def create_month(request):
if request.is_ajax():
month = date.today().month
year = date.today().year
# last = calendar.monthrange(year, month) # Last day
start = date.today().replace(day=1) # Get the first day of the month
end = date.today().replace(day=(calendar.monthrange(year, month)[1]))
weekend = set([5, 6]) # Python week starts on Monday as 0
dupe_list = []
total = 0
day = start
while day <= end:
if day.weekday() not in weekend:
for period in period_choices:
for cart in cart_choice:
open = Event(day=day, period=period[0], cart=cart[0])
try:
open.save()
total+=1
except IntegrityError:
dupe = str(open)
dupe_list.append(dupe)
pass
day += datetime.timedelta(days=1)
data = {'start': start, 'end': end, 'dupe_list': dupe_list, 'total': total}
return HttpResponse(json.dumps(data, cls=DjangoJSONEncoder))
@ensure_csrf_cookie
def create_twelve(request):
if request.is_ajax():
to_monday = date.today().weekday()
start = date.today() - datetime.timedelta(days=to_monday) # Find Monday
fake_end = start + datetime.timedelta(days=84)
if fake_end.weekday() != 4:
end = fake_end - datetime.timedelta(days=(fake_end.weekday() - 4))
else:
end = fake_end
day = start
total = 0
dupe_list = []
weekend = set([5, 6]) # Python week starts on Monday as 0
while day <= end:
if day.weekday() not in weekend:
for period in period_choices:
for cart in cart_choice:
open = Event(day=day, period=period[0], cart=cart[0])
try:
open.save()
total+=1
except IntegrityError:
dupe = str(open)
dupe_list.append(dupe)
pass
day += datetime.timedelta(days=1)
data = {'start': start, 'end': end, 'dupe_list': dupe_list, 'total': total}
return HttpResponse(json.dumps(data, cls=DjangoJSONEncoder))
@ensure_csrf_cookie
def delete_all(request):
if request.is_ajax():
Event.objects.all().delete()
return HttpResponse()
| 39.764103 | 107 | 0.570415 | 2,149 | 0.277147 | 0 | 0 | 4,800 | 0.619035 | 0 | 0 | 800 | 0.103173 |
4f2add3dfacdc457e8a021fc99bae54faa461b6f | 13,569 | py | Python | scripts/sg-toolbox/SG-Glyph-CopyLayer.py | tphinney/science-gothic | b5e8a73778fdb62c38a0ee81cbe923ae9e15fc9a | [
"Apache-2.0"
] | 104 | 2019-08-08T20:18:18.000Z | 2022-03-23T21:08:24.000Z | scripts/sg-toolbox/SG-Glyph-CopyLayer.py | tphinney/science-gothic | b5e8a73778fdb62c38a0ee81cbe923ae9e15fc9a | [
"Apache-2.0"
] | 269 | 2019-08-06T22:12:53.000Z | 2022-03-23T18:05:07.000Z | scripts/FontLab/sg-toolbox/SG-Glyph-CopyLayer.py | tphinney/bank-gothic | 5857f71c207ff9fd54899423304267339196ef21 | [
"Apache-2.0"
] | 5 | 2019-08-09T21:51:45.000Z | 2020-04-26T18:09:01.000Z | #FLM: Glyph: Copy Layer (TypeRig)
# ----------------------------------------
# (C) Vassil Kateliev, 2019 (http://www.kateliev.com)
# (C) Karandash Type Foundry (http://www.karandash.eu)
#-----------------------------------------
# www.typerig.com
# No warranties. By using this you agree
# that you use it at your own risk!
# - Dependencies -----------------
import os
from collections import OrderedDict
import fontlab as fl6
from PythonQt import QtCore
from typerig.gui import QtGui
from typerig.gui.widgets import getProcessGlyphs
from typerig.proxy import *
# - Init --------------------------------
app_version = '1.97'
app_name = '[SG] Copy Layers'
# -- Copy Presets (by request)
copy_presets = {'contrast':[('Blk','Blk Ctr'),
('Blk Cnd','Blk Cnd Ctr'),
('Blk Exp','Blk Exp Ctr'),
('Cnd','Cnd Ctr'),
('Medium','Ctr'),
('Exp','Exp Ctr'),
('Lt','Lt Ctr'),
('Lt Cnd','Lt Cnd Ctr'),
('Lt Exp','Lt Exp Ctr')],
'ctr_light':[('Lt','Lt Ctr'),
('Lt Cnd','Lt Cnd Ctr'),
('Lt Exp','Lt Exp Ctr')],
'ctr_light_s':[('Lt','Lt Ctr'),
('Lt Cnd','Lt Cnd Ctr'),
('Lt Exp','Lt Exp Ctr'),
('Lt S','Lt Ctr S'),
('Lt Cnd S','Lt Cnd Ctr S'),
('Lt Exp S','Lt Exp Ctr S')],
'width': [('Blk','Blk Cnd'),
('Medium','Cnd'),
('Lt','Lt Cnd'),
('Blk','Blk Exp'),
('Medium','Exp'),
('Lt','Lt Exp')],
'weight':
[('Medium','Lt'),
('Medium','Blk')],
'slant': [('Lt','Lt S'),
('Medium','Medium S'),
('Blk','Blk S'),
('Lt Cnd','Lt Cnd S'),
('Cnd','Cnd S'),
('Blk Cnd','Blk Cnd S'),
('Lt Exp','Lt Exp S'),
('Exp','Exp S'),
('Blk Exp','Blk Exp S'),
('Lt','Lt Ctr S'),
('Ctr','Ctr S'),
('Blk Ctr','Blk Ctr S'),
('Lt Cnd','Lt Cnd Ctr S'),
('Cnd Ctr','Cnd Ctr S'),
('Blk Cnd Ctr','Blk Cnd Ctr S'),
('Lt Exp','Lt Exp Ctr S'),
('Exp Ctr','Exp Ctr S'),
('Blk Exp Ctr','Blk Exp Ctr S')]
}
# -- GUI related
table_dict = {1:OrderedDict([('Master Name', None), ('SRC', False), ('DST', False)])}
spinbox_range = (-99, 99)
# - Widgets --------------------------------
class WTableView(QtGui.QTableWidget):
def __init__(self, data):
super(WTableView, self).__init__()
# - Init
self.setColumnCount(max(map(len, data.values())))
self.setRowCount(len(data.keys()))
# - Set
self.setTable(data)
self.itemChanged.connect(self.markChange)
# - Styling
self.horizontalHeader().setStretchLastSection(True)
self.setAlternatingRowColors(True)
self.setShowGrid(False)
#self.resizeColumnsToContents()
self.resizeRowsToContents()
def setTable(self, data, data_check=[], reset=False):
name_row, name_column = [], []
self.blockSignals(True)
self.setColumnCount(max(map(len, data.values())))
self.setRowCount(len(data.keys()))
# - Populate
for n, layer in enumerate(sorted(data.keys())):
name_row.append(layer)
for m, key in enumerate(data[layer].keys()):
# -- Build name column
name_column.append(key)
# -- Add first data column
newitem = QtGui.QTableWidgetItem(str(data[layer][key])) if m == 0 else QtGui.QTableWidgetItem()
# -- Selectively colorize missing data
if m == 0 and len(data_check) and data[layer][key] not in data_check: newitem.setBackground(QtGui.QColor('red'))
# -- Build Checkbox columns
if m > 0: newitem.setFlags(QtCore.Qt.ItemIsUserCheckable | QtCore.Qt.ItemIsEnabled)
if m > 0: newitem.setCheckState(QtCore.Qt.Unchecked if not data[layer][key] else QtCore.Qt.Checked)
self.setItem(n, m, newitem)
self.setHorizontalHeaderLabels(name_column)
self.setVerticalHeaderLabels(name_row)
self.blockSignals(False)
def getTable(self):
returnDict = {}
for row in range(self.rowCount):
#returnDict[self.item(row, 0).text()] = (self.item(row, 1).checkState() == QtCore.Qt.Checked, self.item(row, 2).checkState() == QtCore.Qt.Checked)
if self.item(row, 1).checkState() == QtCore.Qt.Checked:
returnDict.setdefault('SRC',[]).append(self.item(row, 0).text())
if self.item(row, 2).checkState() == QtCore.Qt.Checked:
returnDict.setdefault('DST',[]).append(self.item(row, 0).text())
return returnDict
def markChange(self, item):
item.setBackground(QtGui.QColor('powderblue'))
# - Dialogs --------------------------------
class dlg_CopyLayer(QtGui.QDialog):
def __init__(self):
super(dlg_CopyLayer, self).__init__()
# - Init
self.active_font = pFont()
self.pMode = 0
# - Basic Widgets
self.tab_masters = WTableView(table_dict)
self.table_populate()
self.edt_checkStr = QtGui.QLineEdit()
self.edt_checkStr.setPlaceholderText('DST string')
self.edt_checkStr.setToolTip('Enter search criteria for selectively selecting destination masters.')
self.btn_refresh = QtGui.QPushButton('Clear')
self.btn_checkOn = QtGui.QPushButton('Select')
self.btn_execute = QtGui.QPushButton('Execute Selection')
self.btn_preset_contrast = QtGui.QPushButton('Copy to Contrast Masters')
self.btn_preset_width = QtGui.QPushButton('Copy to Width Masters')
self.btn_preset_weight = QtGui.QPushButton('Copy to Weight Masters')
self.btn_preset_ctrlt = QtGui.QPushButton('Copy to Light Contrast Masters')
self.btn_preset_ctrlts = QtGui.QPushButton('Copy to Light Contrast Masters (incl. Slant)')
self.btn_preset_slant = QtGui.QPushButton('Copy to Slant Masters')
self.btn_refresh.clicked.connect(self.table_populate)
self.btn_checkOn.clicked.connect(lambda: self.table_populate(True))
self.btn_execute.clicked.connect(self.execute_table)
self.btn_preset_contrast.clicked.connect(lambda: self.execute_preset(copy_presets['contrast']))
self.btn_preset_width.clicked.connect(lambda: self.execute_preset(copy_presets['width']))
self.btn_preset_weight.clicked.connect(lambda: self.execute_preset(copy_presets['weight']))
self.btn_preset_ctrlt.clicked.connect(lambda: self.execute_preset(copy_presets['ctr_light']))
self.btn_preset_ctrlts.clicked.connect(lambda: self.execute_preset(copy_presets['ctr_light_s']))
self.btn_preset_slant.clicked.connect(lambda: self.execute_preset(copy_presets['slant']))
self.rad_glyph = QtGui.QRadioButton('Glyph')
self.rad_window = QtGui.QRadioButton('Window')
self.rad_selection = QtGui.QRadioButton('Selection')
self.rad_font = QtGui.QRadioButton('Font')
self.chk_outline = QtGui.QCheckBox('Outline')
self.chk_guides = QtGui.QCheckBox('Guides')
self.chk_anchors = QtGui.QCheckBox('Anchors')
self.chk_lsb = QtGui.QCheckBox('LSB')
self.chk_adv = QtGui.QCheckBox('Advance')
self.chk_rsb = QtGui.QCheckBox('RSB')
self.chk_lnk = QtGui.QCheckBox('Metric Links')
self.chk_crlayer = QtGui.QCheckBox('Add layers')
# -- Set States
self.chk_outline.setCheckState(QtCore.Qt.Checked)
self.chk_adv.setCheckState(QtCore.Qt.Checked)
self.chk_lsb.setCheckState(QtCore.Qt.Checked)
self.chk_anchors.setCheckState(QtCore.Qt.Checked)
self.chk_lnk.setCheckState(QtCore.Qt.Checked)
self.chk_crlayer.setCheckState(QtCore.Qt.Checked)
self.chk_guides.setEnabled(False)
self.rad_glyph.setChecked(True)
self.rad_glyph.setEnabled(True)
self.rad_window.setEnabled(True)
self.rad_selection.setEnabled(True)
self.rad_font.setEnabled(False)
self.rad_glyph.toggled.connect(self.refreshMode)
self.rad_window.toggled.connect(self.refreshMode)
self.rad_selection.toggled.connect(self.refreshMode)
self.rad_font.toggled.connect(self.refreshMode)
# - Build layouts
layoutV = QtGui.QGridLayout()
layoutV.addWidget(QtGui.QLabel('Process Mode:'), 0, 0, 1, 8, QtCore.Qt.AlignBottom)
layoutV.addWidget(self.rad_glyph, 1, 0, 1, 2)
layoutV.addWidget(self.rad_window, 1, 2, 1, 2)
layoutV.addWidget(self.rad_selection, 1, 4, 1, 2)
layoutV.addWidget(self.rad_font, 1, 6, 1, 2)
layoutV.addWidget(QtGui.QLabel('Copy Options:'), 2, 0, 1, 8, QtCore.Qt.AlignBottom)
layoutV.addWidget(self.chk_outline, 3, 0, 1, 2)
layoutV.addWidget(self.chk_guides, 3, 2, 1, 2)
layoutV.addWidget(self.chk_anchors, 3, 4, 1, 2)
layoutV.addWidget(self.chk_crlayer, 3, 6, 1, 2)
layoutV.addWidget(self.chk_lsb, 4, 0, 1, 2)
layoutV.addWidget(self.chk_adv, 4, 2, 1, 2)
layoutV.addWidget(self.chk_rsb, 4, 4, 1, 2)
layoutV.addWidget(self.chk_lnk, 4, 6, 1, 2)
layoutV.addWidget(QtGui.QLabel('Master Layers: Single source to multiple destinations'), 5, 0, 1, 8, QtCore.Qt.AlignBottom)
layoutV.addWidget(QtGui.QLabel('Search:'), 6, 0, 1, 1)
layoutV.addWidget(self.edt_checkStr, 6, 1, 1, 3)
layoutV.addWidget(self.btn_checkOn, 6, 4, 1, 2)
layoutV.addWidget(self.btn_refresh, 6, 6, 1, 2)
layoutV.addWidget(self.tab_masters, 7, 0, 15, 8)
layoutV.addWidget(self.btn_execute, 22, 0, 1,8)
layoutV.addWidget(QtGui.QLabel('Master Layers: Copy Presets'), 23, 0, 1, 8, QtCore.Qt.AlignBottom)
layoutV.addWidget(self.btn_preset_weight, 24, 0, 1,8)
layoutV.addWidget(self.btn_preset_width, 25, 0, 1,8)
layoutV.addWidget(self.btn_preset_contrast, 26, 0, 1,8)
layoutV.addWidget(self.btn_preset_ctrlt, 27, 0, 1,8)
layoutV.addWidget(self.btn_preset_ctrlts, 28, 0, 1,8)
layoutV.addWidget(self.btn_preset_slant, 29, 0, 1,8)
# - Set Widget
self.setLayout(layoutV)
self.setWindowTitle('%s %s' %(app_name, app_version))
self.setGeometry(300, 300, 300, 600)
self.setWindowFlags(QtCore.Qt.WindowStaysOnTopHint) # Always on top!!
self.show()
def refreshMode(self):
if self.rad_glyph.isChecked(): self.pMode = 0
if self.rad_window.isChecked(): self.pMode = 1
if self.rad_selection.isChecked(): self.pMode = 2
if self.rad_font.isChecked(): self.pMode = 3
def copyLayer(self, glyph, srcLayerName, dstLayerName, options, cleanDST=False, addLayer=False):
# -- Check if srcLayerExists
if glyph.layer(srcLayerName) is None:
print 'WARN:\tGlyph: %s\tMissing source layer: %s\tSkipped!' %(glyph.name, srcLayerName)
return
# -- Check if dstLayerExists
if glyph.layer(dstLayerName) is None:
print 'WARN:\tGlyph: %s\tMissing destination layer: %s\tAdd new: %s.' %(glyph.name, dstLayerName, addLayer)
if addLayer:
newLayer = fl6.flLayer()
newLayer.name = str(dstLayerName)
glyph.addLayer(newLayer)
else:
return
# -- Outline
if options['out']:
# --- Get shapes
srcShapes = glyph.shapes(srcLayerName)
# --- Cleanup destination layers
if cleanDST:
glyph.layer(dstLayerName).removeAllShapes()
# --- Copy/Paste shapes
for shape in srcShapes:
newShape = glyph.layer(dstLayerName).addShape(shape.cloneTopLevel())
glyph.update()
# -- Metrics
if options['lsb']: glyph.setLSB(glyph.getLSB(srcLayerName), dstLayerName)
if options['adv']: glyph.setAdvance(glyph.getAdvance(srcLayerName), dstLayerName)
if options['rsb']: glyph.setRSB(glyph.getRSB(srcLayerName), dstLayerName)
if options['lnk']:
glyph.setLSBeq(glyph.getSBeq(srcLayerName)[0], dstLayerName)
glyph.setRSBeq(glyph.getSBeq(srcLayerName)[1], dstLayerName)
# -- Anchors
if options['anc']:
if cleanDST:
glyph.clearAnchors(dstLayerName)
for src_anchor in glyph.anchors(srcLayerName):
#glyph.layer(dstLayerName).addAnchor(src_anchor)
glyph.addAnchor((src_anchor.point.x(), src_anchor.point.y()), src_anchor.name, dstLayerName)
def table_populate(self, filterDST=False):
if not filterDST:
self.tab_masters.setTable({n:OrderedDict([('Master Name', master), ('SRC', False), ('DST', False)]) for n, master in enumerate(self.active_font.pMasters.names)})
self.tab_masters.resizeColumnsToContents()
else:
#print ';'.join(sorted(self.active_font.pMasters.names))
self.tab_masters.setTable({n:OrderedDict([('Master Name', master), ('SRC', False), ('DST', self.edt_checkStr.text in master)]) for n, master in enumerate(self.active_font.pMasters.names)})
self.tab_masters.resizeColumnsToContents()
def getCopyOptions(self):
options = {'out': self.chk_outline.isChecked(),
'gui': self.chk_guides.isChecked(),
'anc': self.chk_anchors.isChecked(),
'lsb': self.chk_lsb.isChecked(),
'adv': self.chk_adv.isChecked(),
'rsb': self.chk_rsb.isChecked(),
'lnk': self.chk_lnk.isChecked(),
'ref': self.chk_crlayer.isChecked()
}
return options
def execute_table(self):
# - Init
copy_options = self.getCopyOptions()
process_glyphs = getProcessGlyphs(self.pMode)
# - Process
process_dict = self.tab_masters.getTable()
process_src = process_dict['SRC'][0]
process_dst = process_dict['DST']
for wGlyph in process_glyphs:
for dst_layer in process_dst:
self.copyLayer(wGlyph, process_src, dst_layer, copy_options, True, self.chk_crlayer.isChecked())
wGlyph.update()
wGlyph.updateObject(wGlyph.fl, 'Glyph: /%s;\tCopy Layer | %s -> %s.' %(wGlyph.name, process_src, '; '.join(process_dst)))
def execute_preset(self, preset_list):
# - Init
copy_options = self.getCopyOptions()
process_glyphs = getProcessGlyphs(self.pMode)
print_preset = [' -> '.join(item) for item in preset_list]
# - Process
for wGlyph in process_glyphs:
for process_src, process_dst in preset_list:
self.copyLayer(wGlyph, process_src, process_dst, copy_options, True, self.chk_crlayer.isChecked())
wGlyph.update()
wGlyph.updateObject(wGlyph.fl, 'Glyph: /%s;\tCopy Layer Preset | %s.' %(wGlyph.name, ' | '.join(print_preset)))
# - RUN ------------------------------
dialog = dlg_CopyLayer() | 37.587258 | 191 | 0.67271 | 11,179 | 0.823863 | 0 | 0 | 0 | 0 | 0 | 0 | 3,076 | 0.226693 |
4f2c289d320d89598f93fc9d4f19d5ebb552c658 | 3,386 | py | Python | gen.nginx.py | Frozen12/OnlineIDE | be89c6f13846e48e0e97ed655ff13be414d326be | [
"Apache-2.0"
] | 2 | 2022-01-16T15:55:24.000Z | 2022-01-22T15:06:54.000Z | gen.nginx.py | Frozen12/OnlineIDE | be89c6f13846e48e0e97ed655ff13be414d326be | [
"Apache-2.0"
] | 1 | 2021-05-13T18:49:31.000Z | 2021-05-18T07:31:01.000Z | gen.nginx.py | Frozen12/OnlineIDE | be89c6f13846e48e0e97ed655ff13be414d326be | [
"Apache-2.0"
] | 6 | 2021-05-13T17:14:29.000Z | 2022-01-30T07:20:12.000Z | import os
print("""
##
# You should look at the following URL's in order to grasp a solid understanding
# of Nginx configuration files in order to fully unleash the power of Nginx.
# https://www.nginx.com/resources/wiki/start/
# https://www.nginx.com/resources/wiki/start/topics/tutorials/config_pitfalls/
# https://wiki.debian.org/Nginx/DirectoryStructure
#
# In most cases, administrators will remove this file from sites-enabled/ and
# leave it as reference inside of sites-available where it will continue to be
# updated by the nginx packaging team.
#
# This file will automatically load configuration files provided by other
# applications, such as Drupal or Wordpress. These applications will be made
# available underneath a path with that package name, such as /drupal8.
#
# Please see /usr/share/doc/nginx-doc/examples/ for more detailed examples.
##
# Default server configuration
#
server {
listen %s default_server;
listen [::]:%s default_server;
# SSL configuration
#
# listen 443 ssl default_server;
# listen [::]:443 ssl default_server;
#
# Note: You should disable gzip for SSL traffic.
# See: https://bugs.debian.org/773332
#
# Read up on ssl_ciphers to ensure a secure configuration.
# See: https://bugs.debian.org/765782
#
# Self signed certs generated by the ssl-cert package
# Don't use them in a production server!
#
# include snippets/snakeoil.conf;
root /var/www/html;
error_page 404 /notfound;
# Add index.php to the list if you are using PHP
index index.html index.htm index.nginx-debian.html;
server_name _;
location / {
proxy_pass http://0.0.0.0:8000/;
}
location /terminal {
proxy_pass http://0.0.0.0:8001/;
}
location /terminal/ws {
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
proxy_set_header Host $host;
proxy_http_version 1.1;
proxy_set_header Upgrade $http_upgrade;
proxy_set_header Connection "upgrade";
proxy_pass http://0.0.0.0:8001/ws;
}
location /preview {
proxy_pass http://0.0.0.0:5000/;
}
location /notfound {
root /krypton/worker;
}
# pass PHP scripts to FastCGI server
#
#location ~ \.php$ {
# include snippets/fastcgi-php.conf;
#
# # With php-fpm (or other unix sockets):
# fastcgi_pass unix:/run/php/php7.3-fpm.sock;
# # With php-cgi (or other tcp sockets):
# fastcgi_pass 127.0.0.1:9000;
#}
# deny access to .htaccess files, if Apache's document root
# concurs with nginx's one
#
#location ~ /\.ht {
# deny all;
#}
}
# Virtual Host configuration for example.com
#
# You can move that to a different file under sites-available/ and symlink that
# to sites-enabled/ to enable it.
#
#server {
# listen 80;
# listen [::]:80;
#
# server_name example.com;
#
# root /var/www/example.com;
# index index.html;
#
# location / {
# try_files $uri $uri/ =404;
# }
#}
"""%(os.environ.get("PORT"), os.environ.get("PORT"))) | 30.781818 | 80 | 0.603662 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 3,332 | 0.984052 |
4f2d670c86eaa242941f2df0207696db4b4a23b2 | 2,202 | py | Python | pycmbs/tests/test_license.py | pygeo/pycmbs | 0df863e1575ffad21c1ea9790bcbd3a7982d99c6 | [
"MIT"
] | 9 | 2015-04-01T04:22:25.000Z | 2018-08-31T03:51:34.000Z | pycmbs/tests/test_license.py | pygeo/pycmbs | 0df863e1575ffad21c1ea9790bcbd3a7982d99c6 | [
"MIT"
] | 14 | 2015-01-27T20:33:10.000Z | 2016-06-02T07:23:25.000Z | pycmbs/tests/test_license.py | pygeo/pycmbs | 0df863e1575ffad21c1ea9790bcbd3a7982d99c6 | [
"MIT"
] | 8 | 2015-02-07T20:46:42.000Z | 2019-10-25T00:36:32.000Z | # -*- coding: utf-8 -*-
"""
This file is part of pyCMBS.
(c) 2012- Alexander Loew
For COPYING and LICENSE details, please refer to the LICENSE file
"""
import os
import glob
import unittest
class TestCodingStandards(unittest.TestCase):
"""
test coding standards: check for license
"""
def test_PythonFiles_HaveLicenseText(self):
pyfiles = find_python_files()
files_missing_license = []
for filename in pyfiles:
file_basename = os.path.basename(filename)
if license_missing(filename): # is True #and file_basename not in skip_files:
files_missing_license.append(filename)
self.assertEquals(len(files_missing_license), 0,
str(files_missing_license))
def find_python_files():
"""
find all python files in pyCMBS installation
"""
# get path of current file and set root path relative to it
path = os.path.dirname(os.path.realpath(__file__)) + os.sep + '..' + os.sep + '..' + os.sep
# *.py
pyfiles = [os.path.join(dirpath, f)
for dirpath, dirnames, files in os.walk(path)
for f in files if f.endswith('.py')]
# *.pyx
pyxfiles = [os.path.join(dirpath, f)
for dirpath, dirnames, files in os.walk(path)
for f in files if f.endswith('.pyx')]
res = pyfiles + pyxfiles
return res
def license_missing(filename):
license_string = \
"This file is part of pyCMBS." + "\n" + \
"(c) 2012- Alexander Loew" + "\n" + \
"For COPYING and LICENSE details, please refer to the LICENSE file"
# check first if directory shall be skipped
skip_dirs = ['docsrc']
skip_files = ['emd.py']
skip_tags = ['cartopy']
for sd in skip_dirs: # skip predefined directories
if sd in os.path.dirname(filename):
return False
license_missing = True
fh = open(filename, 'r')
file_contents = fh.read()
if license_string in file_contents:
license_missing = False
fh.close()
if os.path.basename(filename) in skip_files:
license_missing = False
for k in skip_tags:
if k in filename:
license_missing = False
return license_missing
| 28.230769 | 95 | 0.637148 | 563 | 0.255677 | 0 | 0 | 0 | 0 | 0 | 0 | 635 | 0.288374 |
4f2d92497c994456f8956b4b8039fe058d34049f | 2,674 | py | Python | torch_geometric/utils/homophily.py | LingxiaoShawn/pytorch_geometric | 50b7bfc4a59b5b6f7ec547ff862985f3b2e22798 | [
"MIT"
] | 1 | 2022-02-21T13:23:19.000Z | 2022-02-21T13:23:19.000Z | torch_geometric/utils/homophily.py | LingxiaoShawn/pytorch_geometric | 50b7bfc4a59b5b6f7ec547ff862985f3b2e22798 | [
"MIT"
] | null | null | null | torch_geometric/utils/homophily.py | LingxiaoShawn/pytorch_geometric | 50b7bfc4a59b5b6f7ec547ff862985f3b2e22798 | [
"MIT"
] | null | null | null | from typing import Union
import torch
from torch import Tensor
from torch_scatter import scatter_mean
from torch_sparse import SparseTensor
from torch_geometric.typing import Adj, OptTensor
def homophily(edge_index: Adj, y: Tensor, batch: OptTensor = None,
method: str = 'edge') -> Union[float, Tensor]:
r"""The homophily of a graph characterizes how likely nodes with the same
label are near each other in a graph.
There are many measures of homophily that fits this definition.
In particular:
- In the `"Beyond Homophily in Graph Neural Networks: Current Limitations
and Effective Designs" <https://arxiv.org/abs/2006.11468>`_ paper, the
homophily is the fraction of edges in a graph which connects nodes
that have the same class label:
.. math::
\text{homophily} = \frac{| \{ (v,w) : (v,w) \in \mathcal{E} \wedge
y_v = y_w \} | } {|\mathcal{E}|}
That measure is called the *edge homophily ratio*.
- In the `"Geom-GCN: Geometric Graph Convolutional Networks"
<https://arxiv.org/abs/2002.05287>`_ paper, edge homophily is normalized
across neighborhoods:
.. math::
\text{homophily} = \frac{1}{|\mathcal{V}|} \sum_{v \in \mathcal{V}}
\frac{ | \{ (w,v) : w \in \mathcal{N}(v) \wedge y_v = y_w \} | }
{ |\mathcal{N}(v)| }
That measure is called the *node homophily ratio*.
Args:
edge_index (Tensor or SparseTensor): The graph connectivity.
y (Tensor): The labels.
batch (LongTensor, optional): Batch vector
:math:`\mathbf{b} \in {\{ 0, \ldots,B-1\}}^N`, which assigns
each node to a specific example. (default: :obj:`None`)
method (str, optional): The method used to calculate the homophily,
either :obj:`"edge"` (first formula) or :obj:`"node"`
(second formula). (default: :obj:`"edge"`)
"""
assert method in ['edge', 'node']
y = y.squeeze(-1) if y.dim() > 1 else y
if isinstance(edge_index, SparseTensor):
col, row, _ = edge_index.coo()
else:
row, col = edge_index
if method == 'edge':
out = torch.zeros(row.size(0), device=row.device)
out[y[row] == y[col]] = 1.
if batch is None:
return float(out.mean())
else:
return scatter_mean(out, batch[col], dim=0)
else:
out = torch.zeros(row.size(0), device=row.device)
out[y[row] == y[col]] = 1.
out = scatter_mean(out, col, 0, dim_size=y.size(0))
if batch is None:
return float(out.mean())
else:
return scatter_mean(out, batch, dim=0)
| 36.135135 | 78 | 0.60546 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,620 | 0.605834 |
4f2da43ba15b54e4aa7a9570b02b17a2b0fc1c6e | 22 | py | Python | tt/maxvol/__init__.py | rballester/ttpy | a2fdf08fae9d34cb1e5ba28482e82e04b249911b | [
"MIT"
] | null | null | null | tt/maxvol/__init__.py | rballester/ttpy | a2fdf08fae9d34cb1e5ba28482e82e04b249911b | [
"MIT"
] | null | null | null | tt/maxvol/__init__.py | rballester/ttpy | a2fdf08fae9d34cb1e5ba28482e82e04b249911b | [
"MIT"
] | 1 | 2021-01-10T07:02:09.000Z | 2021-01-10T07:02:09.000Z | from _maxvol import *
| 11 | 21 | 0.772727 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
4f2eed39bc85b82594221090c77cb7382ee39bd1 | 169 | py | Python | paxLibUL/convolution/__init__.py | PAX-ULaval/pax-libraries | 60e065ef480d85a3c03cfad4d2bbc1a70632c98b | [
"MIT"
] | null | null | null | paxLibUL/convolution/__init__.py | PAX-ULaval/pax-libraries | 60e065ef480d85a3c03cfad4d2bbc1a70632c98b | [
"MIT"
] | null | null | null | paxLibUL/convolution/__init__.py | PAX-ULaval/pax-libraries | 60e065ef480d85a3c03cfad4d2bbc1a70632c98b | [
"MIT"
] | null | null | null | # pylint: disable=wildcard-import
from .architectures import *
from .callbacks import *
from .datasets import *
from .visualisation import *
from .weights_init import *
| 24.142857 | 33 | 0.781065 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 33 | 0.195266 |
4f309231478a6b77460cb55dce7f7772392ec78f | 11,543 | py | Python | mstrio/api/migration.py | czyzq/mstrio-py | b25fd19936b659d503a7eaaa96c8d0b4e118cb7c | [
"Apache-2.0"
] | 1 | 2022-02-15T13:18:04.000Z | 2022-02-15T13:18:04.000Z | mstrio/api/migration.py | czyzq/mstrio-py | b25fd19936b659d503a7eaaa96c8d0b4e118cb7c | [
"Apache-2.0"
] | null | null | null | mstrio/api/migration.py | czyzq/mstrio-py | b25fd19936b659d503a7eaaa96c8d0b4e118cb7c | [
"Apache-2.0"
] | null | null | null | from typing import Optional
import requests
from mstrio.connection import Connection
from mstrio.utils.error_handlers import ErrorHandler
@ErrorHandler(err_msg='Error while creating the package holder')
def create_package_holder(connection: Connection, project_id: Optional[str] = None,
error_msg: Optional[str] = None) -> requests.Response:
"""Create a new in-memory metadata package holder.
Args:
connection (Connection): Object representation of connection to
MSTR Server.
project_id (Optional[str]): Optional ID of a project. Defaults to None.
error_msg (Optional[str]): Optional error message. Defaults to None.
Returns:
requests.Response: Response object containing all of the information
returned by the server.
"""
project_id = project_id if project_id is not None else connection.project_id
return connection.post(
url=f'{connection.base_url}/api/packages',
headers={'X-MSTR-ProjectID': project_id}
)
@ErrorHandler(err_msg='Error while updating the package holder with id: {id}')
def update_package_holder(connection: Connection, body: dict, id: str,
project_id: Optional[str] = None, prefer: str = "respond-async",
error_msg: Optional[str] = None) -> requests.Response:
"""Fill the content of the in-memory metadata package holder per supplied
specification. Currently, it's only supported when the holder is empty.
Args:
connection (Connection): Object representation of connection to
MSTR Server.
body (dict): dictionarized PackageConfig object (with `to_dict()`)
id (str): ID of the package to be updated
prefer (str, optional): API currently just supports asynchronous mode,
not support synchronous mode, so header parameter ‘Prefer’ must be set
to ‘respond-async’ in your request. Defaults to "respond-async".
project_id (Optional[str]): Optional ID of a project. Defaults to None.
error_msg (Optional[str]): Optional error message. Defaults to None.
Returns:
requests.Response: Response object containing all of the information
returned by the server.
"""
project_id = project_id if project_id is not None else connection.project_id
return connection.put(
url=f'{connection.base_url}/api/packages/{id}',
headers={
'X-MSTR-ProjectID': project_id,
'Prefer': prefer
},
json=body
)
@ErrorHandler(err_msg='Error while downloading the package with id: {id}')
def download_package(connection: Connection, id: str, project_id: Optional[str] = None,
error_msg: Optional[str] = None) -> requests.Response:
"""Download a package binary.
Args:
connection (Connection): Object representation of connection to
MSTR Server.
id (str): ID of the package to be downloaded.
project_id (Optional[str]): Optional ID of a project. Defaults to None.
error_msg (Optional[str]): Optional error message. Defaults to None.
Returns:
requests.Response: Response object containing all of the information
returned by the server.
"""
project_id = project_id if project_id is not None else connection.project_id
return connection.get(
url=f'{connection.base_url}/api/packages/{id}/binary',
headers={'X-MSTR-ProjectID': project_id}
)
@ErrorHandler(err_msg='Error while uploading the package with id: {id}')
def upload_package(connection: Connection, id: str, file: bytes, project_id: Optional[str] = None,
error_msg: Optional[str] = None) -> requests.Response:
"""Upload package to sandbox directly.
Args:
connection (Connection): Object representation of connection to
MSTR Server.
id (str): ID of the package to be uploaded.
file (bytes): package in a format of a binary string.
project_id (Optional[str]): Optional ID of a project. Defaults to None.
error_msg (Optional[str]): Optional error message. Defaults to None.
Returns:
requests.Response: Response object containing all of the information
returned by the server.
"""
project_id = project_id if project_id is not None else connection.project_id
return connection.put(
url=f'{connection.base_url}/api/packages/{id}/binary',
headers={'X-MSTR-ProjectID': project_id},
files={'file': file}
)
@ErrorHandler(err_msg='Error while getting the package holder with id: {id}')
def get_package_holder(connection: Connection, id: str, project_id: Optional[str] = None,
show_content: bool = True,
error_msg: Optional[str] = None) -> requests.Response:
"""Get definition of a package, including package status and its detail
content.
Args:
connection (Connection): Object representation of connection to
MSTR Server.
id (str): ID of the package to be retrieved.
project_id (Optional[str]): Optional ID of a project. Defaults to None.
show_content (bool, optional): Show package content or not. Defaults to
False.
error_msg (Optional[str]): Optional error message. Defaults to None.
Returns:
requests.Response: Response object containing all of the information
returned by the server.
"""
project_id = project_id if project_id is not None else connection.project_id
return connection.get(
url=f'{connection.base_url}/api/packages/{id}',
headers={'X-MSTR-ProjectID': project_id},
params={'showContent': show_content}
)
@ErrorHandler(err_msg='Error while deleting the package holder with id: {id}')
def delete_package_holder(connection: Connection, id: str, project_id: Optional[str] = None,
prefer: str = 'respond-async',
error_msg: Optional[str] = None) -> requests.Response:
"""Delete the in-memory metadata package holder, releasing associated
Intelligence Server resources.
Args:
connection (Connection): Object representation of connection to
MSTR Server.
id (str): ID of the package to be deleted.
prefer (str, optional): API currently just supports asynchronous mode,
not support synchronous mode, so header parameter ‘Prefer’ must be set
to ‘respond-async’ in your request. Defaults to "respond-async".
project_id (Optional[str]): Optional ID of a project. Defaults to None.
error_msg (Optional[str]): Optional error message. Defaults to None.
Returns:
requests.Response: Response object containing all of the information
returned by the server.
"""
project_id = project_id if project_id is not None else connection.project_id
return connection.delete(
url=f'{connection.base_url}/api/packages/{id}',
headers={
'X-MSTR-ProjectID': project_id,
'Prefer': prefer
}
)
@ErrorHandler(err_msg='Error while creating the import for package holder with id: {id}')
def create_import(connection: Connection, id: str, project_id: Optional[str] = None,
generate_undo: bool = False,
error_msg: Optional[str] = None) -> requests.Response:
"""Create a package import process.
Args:
connection (Connection): Object representation of connection to
MSTR Server.
id (str): ID of the package for which import process will be
created.
generate_undo (bool, optional): Generate undo package or not. Defaults
to False.
project_id (Optional[str]): Optional ID of a project. Defaults to None.
error_msg (Optional[str]): Optional error message. Defaults to None.
Returns:
requests.Response: Response object containing all of the information
returned by the server.
"""
# TODO: Change to a parameter when any other values are supported
prefer = 'respond-async'
project_id = project_id if project_id is not None else connection.project_id
return connection.post(
url=f'{connection.base_url}/api/packages/imports',
headers={
'X-MSTR-ProjectID': project_id,
'Prefer': prefer
},
params={
'packageId': id,
'generateUndo': generate_undo
},
)
@ErrorHandler(err_msg='Error while getting the import with id: {id}')
def get_import(connection: Connection, id: str, project_id: Optional[str] = None,
error_msg: Optional[str] = None) -> requests.Response:
"""Get result of a package import process.
Args:
connection (Connection): Object representation of connection to
MSTR Server.
id (str): Import process ID.
project_id (Optional[str]): Optional ID of a project. Defaults to None.
error_msg (Optional[str]): Optional error message. Defaults to None.
Returns:
requests.Response: Response object containing all of the information
returned by the server.
"""
project_id = project_id if project_id is not None else connection.project_id
return connection.get(
url=f'{connection.base_url}/api/packages/imports/{id}',
headers={'X-MSTR-ProjectID': project_id}
)
@ErrorHandler(err_msg='Error while deleting the import with id: {id}')
def delete_import(connection: Connection, id: str, project_id: Optional[str] = None,
error_msg: Optional[str] = None) -> requests.Response:
"""Closes an existing import process previously created.
Args:
connection (Connection): Object representation of connection to
MSTR Server.
id (str): Import process ID.
project_id (Optional[str]): Optional ID of a project. Defaults to None.
error_msg (Optional[str]): Optional error message. Defaults to None.
Returns:
requests.Response: Response object containing all of the information
returned by the server.
"""
# TODO: Change to a parameter when any other values are supported
prefer = 'respond-async'
project_id = project_id if project_id is not None else connection.project_id
return connection.delete(
url=f'{connection.base_url}/api/packages/imports/{id}',
headers={
'X-MSTR-ProjectID': project_id,
'Prefer': prefer
}
)
@ErrorHandler(err_msg='Error while creating the undo for import with id: {id}')
def create_undo(connection: Connection, id: str, project_id: Optional[str] = None,
error_msg: Optional[str] = None) -> requests.Response:
"""Download undo package binary for this import process.
Args:
connection (Connection): Object representation of connection to
MSTR Server.
id (str): Import process ID.
project_id (Optional[str]): Optional ID of a project. Defaults to None.
error_msg (Optional[str]): Optional error message. Defaults to None.
Returns:
requests.Response: Response object containing all of the information
returned by the server.
"""
project_id = project_id if project_id is not None else connection.project_id
return connection.get(
url=f'{connection.base_url}/api/packages/imports/{id}/undoPackage/binary',
headers={'X-MSTR-ProjectID': project_id}
)
| 40.644366 | 98 | 0.664559 | 0 | 0 | 0 | 0 | 11,389 | 0.985293 | 0 | 0 | 7,308 | 0.632235 |
4f32878e0e19ae616ff00d05ec4a7c3db599966a | 3,437 | py | Python | lomap/examples/ijrr2014_rec_hor/environment.py | xli4217/tltl_reward | b24653f26f588858b42abd4b225a9766ea8771db | [
"MIT"
] | null | null | null | lomap/examples/ijrr2014_rec_hor/environment.py | xli4217/tltl_reward | b24653f26f588858b42abd4b225a9766ea8771db | [
"MIT"
] | null | null | null | lomap/examples/ijrr2014_rec_hor/environment.py | xli4217/tltl_reward | b24653f26f588858b42abd4b225a9766ea8771db | [
"MIT"
] | null | null | null | #! /usr/bin/env python
# Copyright (C) 2012-2015, Alphan Ulusoy (alphan@bu.edu)
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
# Environment class
#
class Environment:
def __init__(self, case):
"""Defines regions in the environment.
"""
# Global and local requests
self.global_reqs = dict()
self.local_reqs = dict()
if case == 'case1':
# Static requests (labels are cell coordinates)
self.global_reqs[(3,1)] = {'reqs':{'photo'}, 'color':'green'}
self.global_reqs[(5,10)] = {'reqs':{'upload'}, 'color':'blue'}
self.global_reqs[(9,7)] = {'reqs':{'upload'}, 'color':'blue'}
# Local requests (labels are cell coordinates)
self.local_reqs = dict()
self.local_reqs[(1,7)] = {'reqs':{'unsafe'}, 'on':True, 'color':'yellow'}
self.local_reqs[(2,7)] = {'reqs':{'unsafe'}, 'on':True, 'color':'yellow'}
self.local_reqs[(3,7)] = {'reqs':{'unsafe'}, 'on':True, 'color':'yellow'}
self.local_reqs[(4,7)] = {'reqs':{'unsafe'}, 'on':True, 'color':'yellow'}
self.local_reqs[(5,7)] = {'reqs':{'unsafe'}, 'on':True, 'color':'yellow'}
self.local_reqs[(6,7)] = {'reqs':{'unsafe'}, 'on':True, 'color':'yellow'}
self.local_reqs[(7,7)] = {'reqs':{'unsafe'}, 'on':True, 'color':'yellow'}
self.local_reqs[(9,4)] = {'reqs':{'extinguish'}, 'on':True, 'color':'red'}
self.local_reqs[(9,2)] = {'reqs':{'assist'}, 'on':True, 'color':'cyan'}
elif case == 'case2':
# Static requests (labels are cell coordinates)
self.global_reqs[(3,3)] = {'reqs':{'photo1'}, 'color':'LightGreen'}
self.global_reqs[(19,6)] = {'reqs':{'photo2'}, 'color':'Green'}
self.global_reqs[(11,10)] = {'reqs':{'upload'}, 'color':'blue'}
# Local requests (labels are cell coordinates)
self.local_reqs = dict()
self.local_reqs[(8,8)] = {'reqs':{'pickup'}, 'on':True, 'color':'red'}
self.local_reqs[(6,7)] = {'reqs':{'dropoff'}, 'on':True, 'color':'cyan'}
self.local_reqs[(9,6)] = {'reqs':{'pickup'}, 'on':True, 'color':'red'}
self.local_reqs[(3,5)] = {'reqs':{'dropoff'}, 'on':True, 'color':'cyan'}
elif case == 'case3':
# Static requests (labels are cell coordinates)
self.global_reqs[(3,3)] = {'reqs':{'photo1'}, 'color':'LightGreen'}
self.global_reqs[(19,6)] = {'reqs':{'photo2'}, 'color':'DarkGreen'}
self.global_reqs[(11,10)] = {'reqs':{'upload'}, 'color':'blue'}
# Local requests (labels are cell coordinates)
self.local_reqs = dict()
self.local_reqs[(14,8)] = {'reqs':{'pickup1'}, 'on':True, 'color':'Red'}
self.local_reqs[(12,7)] = {'reqs':{'dropoff1'}, 'on':True, 'color':'Cyan'}
self.local_reqs[(13,4)] = {'reqs':{'pickup2'}, 'on':True, 'color':'DarkRed'}
self.local_reqs[(16,6)] = {'reqs':{'dropoff2'}, 'on':True, 'color':'DarkCyan'}
else:
assert False, 'Case %s is not implemented' % case
| 48.408451 | 81 | 0.632528 | 2,621 | 0.762584 | 0 | 0 | 0 | 0 | 0 | 0 | 2,009 | 0.584521 |
4f343ede862d2971e0b163d9a064f189c0efa727 | 1,452 | py | Python | scripts/dialent/task3/test.py | victorbocharov/factRuEval-2016 | ad5ce5d753cee8307c79b0c4978caeed05ba7d07 | [
"MIT"
] | 52 | 2016-01-13T16:54:14.000Z | 2022-03-04T19:05:38.000Z | scripts/dialent/task3/test.py | bond005/factRuEval-2016 | 3a1b4540b1025fa73118d0e065c526437b37df12 | [
"MIT"
] | 147 | 2016-01-13T21:18:30.000Z | 2021-03-30T10:19:55.000Z | scripts/dialent/task3/test.py | bond005/factRuEval-2016 | 3a1b4540b1025fa73118d0e065c526437b37df12 | [
"MIT"
] | 37 | 2015-12-18T10:36:58.000Z | 2022-03-04T19:06:07.000Z | # This module deals with test data representation for the third task
#########################################################################################
import os
from dialent.common.util import normalize
from dialent.common.util import safeOpen
from dialent.objects.fact import Fact
#########################################################################################
class Test:
"""Test markup for the third track"""
def __init__(self, name, dir='.'):
"""Load the data from the given document
name - file to load the data from (without an extension)
"""
try:
self.name = name
full_name = os.path.join(dir, name + '.task3')
self.load(full_name)
except Exception as e:
print('Failed to load "{}"'.format(full_name))
print(e)
def load(self, filename):
"""Do the exception-prone loading"""
self.facts = []
with safeOpen(filename) as f:
buffer = ''
for raw_line in f:
line = normalize(raw_line)
if len(line) == 0:
if len(buffer) > 0:
self.facts.append(Fact.fromTest(buffer))
buffer = ''
else:
buffer += line + '\n'
if len(buffer) > 0:
self.facts.append(Fact.fromTest(buffer))
| 30.25 | 89 | 0.451791 | 1,044 | 0.719008 | 0 | 0 | 0 | 0 | 0 | 0 | 485 | 0.334022 |
4f3441ef7f4d1a2370cb25d0b0057052c0eb45a0 | 8,490 | py | Python | resources/namespace-check/index.py | aws-samples/eks-configrules-with-cdk | 735658a0fe92c414d20314176d91e2c797defbd7 | [
"MIT-0"
] | 1 | 2022-03-23T18:18:49.000Z | 2022-03-23T18:18:49.000Z | resources/namespace-check/index.py | aws-samples/eks-configrules-with-cdk | 735658a0fe92c414d20314176d91e2c797defbd7 | [
"MIT-0"
] | null | null | null | resources/namespace-check/index.py | aws-samples/eks-configrules-with-cdk | 735658a0fe92c414d20314176d91e2c797defbd7 | [
"MIT-0"
] | null | null | null |
import authutils as auth
import os
import kubernetes
from kubernetes.client.rest import ApiException
import boto3
from botocore.exceptions import ClientError
from datetime import datetime, timedelta
from botocore import session
from awscli.customizations.eks.get_token import STSClientFactory, TokenGenerator, TOKEN_EXPIRATION_MINS
import traceback
import logging
import json
def check_compliancechange(configrule,current_state):
try:
client = boto3.client('config')
config_status = client.get_compliance_details_by_config_rule(ConfigRuleName=configrule)
change_compliance_state = False
if len(config_status['EvaluationResults']) == 0:
logging.info('no prior evaluation results recorded for rule')
change_compliance_state == True
previous_state = config_status['EvaluationResults'][0]['ComplianceType']
if current_state == previous_state:
previous_state = config_status['EvaluationResults'][0]['ComplianceType']
logging.info('Compliance state matches')
change_compliance_state == False
return change_compliance_state
else:
previous_state = config_status['EvaluationResults'][0]['ComplianceType']
logging.info(f'Compliance state has changed, previous state was: {previous_state}, current state is: {current_state}')
change_compliance_state == True
return change_compliance_state
except ClientError as e:
logging.error('issue determining change in compliance')
logging.error(str(e))
logger = logging.getLogger()
logger.setLevel(logging.INFO)
logging.basicConfig(
format='%(levelname)s %(threadName)s [%(filename)s:%(lineno)d] %(message)s',
datefmt='%Y-%m-%d:%H:%M:%S',
level=logging.INFO
)
def evaluate_compliance(configuration_item):
try:
token = auth.get_k8s_cluster_token(configuration_item)
logging.info('Token received')
endpoint = auth.get_k8s_cluster_endpoint(configuration_item)
logging.info('Endpoint received')
k8s_api = auth.initialize_k8s_api(configuration_item, token, endpoint)
logging.info('K8s API initialized')
logging.info(f'checking for pods that allow privilege escalation in cluster {configuration_item}')
evaluation_test = check_default_namespace(k8s_api,cluster_name=configuration_item)
return evaluation_test
except Exception as e:
return str(e)
# def auth.get_k8s_cluster_token(cluster_name):
# work_session = session.get_session()
# client_factory = STSClientFactory(work_session)
# sts_client = client_factory.get_sts_client(role_arn=None)
# token = TokenGenerator(sts_client).get_token(cluster_name)
# token_expiration = datetime.utcnow() + timedelta(minutes=TOKEN_EXPIRATION_MINS)
# expiration = token_expiration.strftime('%Y-%m-%dT%H:%M:%SZ')
# response = {
# "kind": "ExecCredential",
# "apiVersion": "client.authentication.k8s.io/v1alpha1",
# "spec": {},
# "status": {
# "expirationTimestamp": expiration,
# "token": token
# }
# }
# token = response['status']['token']
# return token
# Check network policy is configured on namespacedef check_pods_default_namespace(k8s_api,cluster_name):
def check_default_namespace(k8s_api,cluster_name):
try:
logging.info('checking cluster exists')
clustercheck = describe_cluster(cluster_name)
errors = ['ResourceNotFoundException','ClientException','ServerException','ServiceUnavailableException']
if clustercheck in errors:
logging.error(f'error {clustercheck} encountered discovering cluster {cluster_name}')
return {
"compliance_type": 'NOT_APPLICABLE',
"annotation": f"Validation was not run against cluster {cluster_name}, error encountered: {clustercheck}"
}
else:
pods = k8s_api.list_pod_for_all_namespaces(watch=False)
pods_default_namespace = []
for pod in pods.items:
if pod.metadata.namespace == 'default':
logging.info(f'pod {pod.metadata.name} is running in default namepace')
pods_default_namespace.append(pod.metadata.name)
else:
logging.info(f'pod {pod.metadata.name} is compliant, running in namspace {pod.metadata.namespace}')
if len(pods_default_namespace) > 0:
return {
"compliance_type": 'NON_COMPLIANT',
"annotation": f"Pods: {pods_default_namespace} are running in the default namespace in cluster: {cluster_name}, ensure that pods are not running in the default namespace",
"clusterarn": clustercheck
}
elif len(pods_default_namespace) == 0:
return {
"compliance_type": 'COMPLIANT',
"annotation": f"Cluster: {cluster_name} does not have any pods running in the default namespace",
"clusterarn": clustercheck
}
else:
return {
"compliance_type": 'NOT_APPLICABLE',
"annotation": f"Unable to determine status of Cluster: {cluster_name}",
"clusterarn": clustercheck
}
except Exception as e:
logging.error('issue encountered determining network policies on namespace')
logging.error(e)
'''Validates EKS CLuster exists and obtains ARN'''
def describe_cluster(cluster_name):
try:
client = boto3.client('eks')
cluster = client.describe_cluster(name=cluster_name)
clusterarn = cluster['cluster']['arn']
return clusterarn
except ClientError as e:
logging.error(f'Issue describing cluster {cluster_name}')
logging.error(str(e))
return e.response['Error']['Code']
# def auth.initialize_k8s_api(cluster_name, token, endpoint):
# configuration = kubernetes.client.Configuration()
# configuration.api_key['authorization'] = token
# configuration.api_key_prefix['authorization'] = 'Bearer'
# configuration.host = endpoint
# configuration.verify_ssl = False
# client = kubernetes.client.api_client.ApiClient(configuration)
# api = kubernetes.client.api.core_v1_api.CoreV1Api(client)
# return api
# def get_k8s_cluster_endpoint(cluster_name):
# client = boto3.client('eks')
# response = client.describe_cluster(name=cluster_name)
# endpoint = response['cluster']['endpoint']
# return endpoint
def lambda_handler(event, context):
try:
logging.info(event)
# decode the aws confing response
invoking_event = json.loads(event['invokingEvent'])
rule_params = json.loads(event['ruleParameters'])
logging.info(rule_params)
configuration_items = [rule_params['inscopeclusters']]
config = boto3.client('config')
logging.info('Setting up connection to EKS cluster')
for configuration_item in configuration_items:
logging.info(f'checking compliance for cluster {configuration_item}')
evaluation = evaluate_compliance(configuration_item)
logging.info('evaluation result')
logging.info(evaluation)
logging.info('putting compliance findings')
try:
response = config.put_evaluations(
Evaluations=[
{
'ComplianceResourceType': 'AWS::EKS::Cluster',
'ComplianceResourceId': evaluation['clusterarn'],
'ComplianceType': evaluation['compliance_type'],
'Annotation': evaluation['annotation'],
'OrderingTimestamp': invoking_event['notificationCreationTime']
},
],
ResultToken=event['resultToken'])
except ClientError as e:
logging.error('error in putting config check results')
logging.error(str(e))
except Exception as e:
logging.error('Error in compliance check operation')
logging.error(str(e))
| 43.538462 | 195 | 0.634629 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 3,715 | 0.437574 |
4f34a307cb9dfa5bbf4cfb31c065fa12965bf81c | 2,382 | py | Python | test/test_todos.py | rajasgs/flask-rest-math-simple | 3a4206be66f3e53e669daaca924eb32b34b28822 | [
"MIT"
] | null | null | null | test/test_todos.py | rajasgs/flask-rest-math-simple | 3a4206be66f3e53e669daaca924eb32b34b28822 | [
"MIT"
] | 2 | 2021-04-27T13:53:45.000Z | 2021-06-02T02:34:43.000Z | test/test_todos.py | rajasgs/flask-rest-math-simple | 3a4206be66f3e53e669daaca924eb32b34b28822 | [
"MIT"
] | null | null | null | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# the above line is to avoid 'SyntaxError: Non-UTF-8 code starting with' error
'''
Created on
Course work:
@author: raja
Source:
https://realpython.com/testing-third-party-apis-with-mocks/
'''
# Standard library imports...
from unittest.mock import Mock, patch
# Third-party imports...
from nose.tools import assert_true
import requests
# Third-party imports...
from nose.tools import assert_is_not_none
# Local imports...
from services import get_todos
def test_request_response():
# Send a request to the API server and store the response.
response = requests.get('http://127.0.0.1:5000/placeholder/todos')
# Confirm that the request-response cycle completed successfully.
assert_true(response.ok)
def test_request_response_1():
# Call the service, which will send a request to the server.
response = get_todos()
# If the request is sent successfully, then I expect a response to be returned.
assert_is_not_none(response)
@patch('services.requests.get')
def test_getting_todos(mock_get):
# Configure the mock to return a response with an OK status code.
mock_get.return_value.ok = True
# Call the service, which will send a request to the server.
response = get_todos()
# If the request is sent successfully, then I expect a response to be returned.
assert_is_not_none(response)
def test_getting_todos_1():
with patch('services.requests.get') as mock_get:
# Configure the mock to return a response with an OK status code.
mock_get.return_value.ok = True
# Call the service, which will send a request to the server.
response = get_todos()
# If the request is sent successfully, then I expect a response to be returned.
assert_is_not_none(response)
def test_getting_todos_2():
mock_get_patcher = patch('services.requests.get')
# Start patching `requests.get`.
mock_get = mock_get_patcher.start()
# Configure the mock to return a response with an OK status code.
mock_get.return_value.ok = True
# Call the service, which will send a request to the server.
response = get_todos()
# Stop patching `requests.get`.
mock_get_patcher.stop()
# If the request is sent successfully, then I expect a response to be returned.
assert_is_not_none(response) | 26.764045 | 83 | 0.714945 | 0 | 0 | 0 | 0 | 387 | 0.162469 | 0 | 0 | 1,383 | 0.580605 |
4f353b795b25a90ac549d4048ded818484a8ed13 | 21,895 | py | Python | maskrcnn_benchmark/modeling/roi_heads/box_head/loss.py | RyanXLi/OneshotDet | 77f629978d9d1739787b08de8cccea81341507bf | [
"BSD-2-Clause"
] | 16 | 2020-09-07T15:28:57.000Z | 2022-03-03T02:52:25.000Z | maskrcnn_benchmark/modeling/roi_heads/box_head/loss.py | RyanXLi/OneshotDet | 77f629978d9d1739787b08de8cccea81341507bf | [
"BSD-2-Clause"
] | 3 | 2021-01-06T12:02:54.000Z | 2021-03-14T14:08:57.000Z | maskrcnn_benchmark/modeling/roi_heads/box_head/loss.py | RyanXLi/OneshotDet | 77f629978d9d1739787b08de8cccea81341507bf | [
"BSD-2-Clause"
] | 4 | 2020-11-13T09:21:36.000Z | 2021-05-27T02:12:19.000Z | # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
import torch
from torch.nn import functional as F
from maskrcnn_benchmark.layers import smooth_l1_loss, SigmoidFocalLoss
from maskrcnn_benchmark.modeling.box_coder import BoxCoder
from maskrcnn_benchmark.modeling.matcher import Matcher
from maskrcnn_benchmark.structures.boxlist_ops import boxlist_iou
from maskrcnn_benchmark.modeling.balanced_positive_negative_sampler import (
BalancedPositiveNegativeSampler
)
from maskrcnn_benchmark.modeling.utils import cat
class FastRCNNLossComputation(object):
"""
Computes the loss for Faster R-CNN.
Also supports FPN
"""
def __init__(
self,
cfg,
proposal_matcher,
fg_bg_sampler,
box_coder,
cls_agnostic_bbox_reg=False
):
"""
Arguments:
proposal_matcher (Matcher)
fg_bg_sampler (BalancedPositiveNegativeSampler)
box_coder (BoxCoder)
"""
self.cfg = cfg
self.proposal_matcher = proposal_matcher
self.fg_bg_sampler = fg_bg_sampler
self.box_coder = box_coder
self.cls_agnostic_bbox_reg = cls_agnostic_bbox_reg
if cfg.FEW_SHOT.SECOND_STAGE_CLS_LOSS == 'focal_loss':
self.cls_loss_func = SigmoidFocalLoss(
cfg.MODEL.FCOS.LOSS_GAMMA,
cfg.FEW_SHOT.SECOND_STAGE_LOSS_ALPHA
)
def match_targets_to_proposals(self, proposal, target):
match_quality_matrix = boxlist_iou(target, proposal) # (M, N)
matched_idxs = self.proposal_matcher(match_quality_matrix) # -1, -2 (N)
###############################
### create soft labels ### currently only assume 1 class
###############################
if self.cfg.FEW_SHOT.SOFT_LABELING:
match_iou_matrix = match_quality_matrix.t() # (N, M)
# assert match_iou_matrix.size(1) == 2, 'only supporting 1 classes right now, but received {} classes'.format(match_iou_matrix.size(1))
# assert torch.sum(match_iou_matrix[torch.nonzero(matched_idxs<1), 1]) == 0, \
# ['positive class column of non-positive prediction should all be zero ! ',
# torch.sum(match_iou_matrix[torch.nonzero(matched_idxs>0), 1])]
matched_idxs_temp = matched_idxs.clone()
matched_idxs_temp[matched_idxs_temp<0] = 0
match_iou = match_iou_matrix[torch.arange(len(matched_idxs_temp)).long(), matched_idxs_temp].clone()
matched_idxs_invalid_inds = torch.nonzero(matched_idxs<0)
match_iou[matched_idxs_invalid_inds] = 0
# match_iou_matrix_max, _ = match_iou_matrix.max(dim=1)
# match_iou_big_iou = torch.nonzero(match_iou_matrix_max>0.5).squeeze(1)
# print(match_iou_big_iou)
# print('iiou', torch.cat([match_iou[match_iou_big_iou].unsqueeze(1), match_iou_matrix[match_iou_big_iou]], dim=1))
# Fast RCNN only need "labels" field for selecting the targets
target = target.copy_with_fields("labels")
# get the targets corresponding GT for each proposal
# NB: need to clamp the indices because we can have a single
# GT in the image, and matched_idxs can be -2, which goes
# out of bounds
matched_targets = target[matched_idxs.clamp(min=0)]
if self.cfg.FEW_SHOT.SOFT_LABELING:
matched_targets.add_field('soft_labels', match_iou)
matched_targets.add_field("matched_idxs", matched_idxs)
return matched_targets
def soft_labeling_function(self, t):
'''
discrete: t >= 0.5
linear : t
transLinear: (0.2*t+0.8) * (t>=0.5) + 0.5 ~ 1 --> 0.9 ~ 1
(2.25*t-0.225) * (t>=0.1) * (t<0.5) + 0.1 ~ 0.5 --> 0 ~ 0.9
0 0 ~ 0.1
trans4thLinear: (0.2*t + 0.8) * (t>=0.5) + 0.5 ~ 1 --> 0.9 ~ 1
0.9*(2*t)**4 0 ~ 0.5 --> 0 ~ 0.9
'''
if self.cfg.FEW_SHOT.SOFT_LABELING_FUNC == 'discrete':
return (t>=0.5).float()
elif self.cfg.FEW_SHOT.SOFT_LABELING_FUNC == 'linear':
return t
elif self.cfg.FEW_SHOT.SOFT_LABELING_FUNC == 'transLinear': # transitional linear
upper = (0.2*t+0.8) * (t>=0.5).float()
middle = (2.25*t-0.225) * (t>=0.1).float() * (t<0.5).float()
lower = 0
return upper+middle+lower
elif self.cfg.FEW_SHOT.SOFT_LABELING_FUNC == 'trans4thLinear': # transitional 4th order linear
upper = (0.2*t+0.8) * (t>=0.5).float()
lower = 0.9*((2*t)**4) * (t<0.5).float()
return upper+lower
def prepare_targets(self, proposals, targets):
labels = []
regression_targets = []
if self.cfg.FEW_SHOT.SOFT_LABELING:
soft_labels = []
for proposals_per_image, targets_per_image in zip(proposals, targets):
matched_targets = self.match_targets_to_proposals( # -1, -2
proposals_per_image, targets_per_image
)
matched_idxs = matched_targets.get_field("matched_idxs")
if self.cfg.FEW_SHOT.SOFT_LABELING:
soft_labels_per_image = self.soft_labeling_function(matched_targets.get_field("soft_labels"))
soft_labels.append(soft_labels_per_image)
labels_per_image = matched_targets.get_field("labels")
labels_per_image = labels_per_image.to(dtype=torch.int64)
# Label background (below the low threshold)
bg_inds = matched_idxs == Matcher.BELOW_LOW_THRESHOLD
labels_per_image[bg_inds] = 0
# Label ignore proposals (between low and high thresholds)
ignore_inds = matched_idxs == Matcher.BETWEEN_THRESHOLDS
labels_per_image[ignore_inds] = -1 # -1 is ignored by sampler
# compute regression targets
regression_targets_per_image = self.box_coder.encode(
matched_targets.bbox, proposals_per_image.bbox
)
labels.append(labels_per_image)
regression_targets.append(regression_targets_per_image)
if self.cfg.FEW_SHOT.SOFT_LABELING:
return labels, soft_labels, regression_targets
return labels, regression_targets
# def sample_proposal_per_box(self, box, image_size, iou_range=(0.5, 0.9), num=100):
# '''
# receive box (x1, y1, x2, y2)
# '''
# def uniSample(a, b):
# return torch.rand(1).squeeze()*(b-a) + a
# def b_area(box):
# return (box[2]-box[0])*(box[3]-box[1])
# def b_iou(box1, box2):
# l = torch.max(box1[0], box2[0])
# t = torch.max(box1[1], box2[1])
# r = torch.min(box1[2], box2[2])
# b = torch.min(box1[3], box2[3])
# area = (r-l)*(b-t)
# return area / ( b_area(box1) + b_area(box2) - area )
# x1, y1, x2, y2 = box
# W, H = image_size
# assert x1>=0 and x1<=x2 and x2<W and \
# y1>=0 and y1<=y2 and y2<H, 'input box not valid {} {} {} {}'.format(x1, y1, x2, y2)
# center_x, center_y = (x1+x2)/2, (y1+y2)/2
# box_w = x2-x1+1
# box_h = y2-y1+1
# proposals = []
# while len(proposals) < num:
# rand_iou = uniSample(*iou_range) # restrict to iou range
# min_iou_term = 2*rand_iou/(rand_iou+1)
# xc_prop = uniSample(0, 0.3333) # restrict to 0~1/3
# yc_prop_min = torch.max(0., min_iou_term)
# yc_prop_max = torch.min(0.3333, min_iou_term)
# if yc_prop_max <= yc_prop_min:
# continue
# yc_prop = uniSample(yc_prop_min, yc_prop_max)
# xc_dir = torch.randint(2).float()*2-1
# xc = center_x + xc_dir*box_w*xc_prop
# yc_dir = torch.randint(2).float()*2-1
# yc = center_y + yc_dir*box_h*yc_prop
# for i in range(100): # each center location only try at most 100 times, resort another center location if not
# width_min = rand_iou*box_w,
# width_max = box_w/rand_iou
# width = uniSample(width_min, width_max)
# height_min = rand_iou*box_h / min(width, box_w)
# height_max = box_h/rand_iou
# height = uniSample(height_min, height_max)
# rand_box = torch.tensor( [int(xc-width/2), int(yc-height/2), int(xc+width/2), int(yc+height/2)]).float()
# rand_box.clamp_(0, image_size-1)
# if b_iou(rand_box, box) >= rand_iou:
# break
# proposals.append(rand_box)
# proposals = torch.stack(proposals, dim=0) # (num, 4) xyxy
# return proposals
# def handcraft_sample_proposals(self, targets, num_per_image=100):
# proposals = []
# labels = []
# num_sample_per_box = 100
# for targets_per_image in targets:
# image_size = targets_per_image.size
# bboxes_per_image = targets_per_image.bbox
# labels_per_image = targets.get_field('labels')
# proposals_per_image = []
# labels_per_image = []
# for bbox, label in zip(bboxes_per_image, labels_per_image):
# sampled_bboxes = self.sample_proposal_per_box(bbox, image_size, iou_range=(0.5, 0.9), num=num_sample_per_box)
# sampled_labels = label.unsqueeze().repeat(len(sampled_bboxes))
# proposals_per_image.append(sampled_bboxes)
# labels_per_image.append(sampled_labels)
# proposals_per_image = torch.cat(proposals_per_image, dim=0)
# labels_per_image = torch.cat(labels_per_image, dim=0)
# if len(proposals_per_image) > num_per_image:
# inds = torch.randperm(len(proposals_per_image))
# selected_inds = inds[:num_per_image]
# proposals_per_image = proposals_per_image[selected_inds]
# labels_per_image = labels_per_image[selected_inds]
# assert len(proposals_per_image) == num_per_image, [ len(proposals_per_image), num_per_image ]
# proposals.append(proposals_per_image)
# labels.append(labels_per_image)
# return proposals, labels
def subsample(self, proposals, targets):
"""
This method performs the positive/negative sampling, and return
the sampled proposals.
Note: this function keeps a state.
Arguments:
proposals (list[BoxList])
targets (list[BoxList])
"""
# handcrafted_proposals, handcrafted_labels = self.handcraft_sample_proposals(proposals, targets)
# assert len(handcrafted_proposals) == len(proposals), [len(handcrafted_proposals), len(proposals)]
# proposals = [torch.cat([proposals[i], handcrafted_proposals[i]]) for i in range(len(proposals))]
if self.cfg.FEW_SHOT.SOFT_LABELING:
labels, soft_labels, regression_targets = self.prepare_targets(proposals, targets)
else:
labels, regression_targets = self.prepare_targets(proposals, targets)
sampled_pos_inds, sampled_neg_inds = self.fg_bg_sampler(labels, neg_supp=self.cfg.FEW_SHOT.NEG_SUPPORT.TURN_ON)
# print('pos:', len(sampled_pos_inds[0].nonzero()), 'neg', len(sampled_neg_inds[0].nonzero()))
proposals = list(proposals)
# add corresponding label and regression_targets information to the bounding boxes
if self.cfg.FEW_SHOT.SOFT_LABELING:
for labels_per_image, soft_labels_per_image, regression_targets_per_image, proposals_per_image in zip(
labels, soft_labels, regression_targets, proposals
):
proposals_per_image.add_field("labels", labels_per_image)
proposals_per_image.add_field("soft_labels", soft_labels_per_image)
proposals_per_image.add_field(
"regression_targets", regression_targets_per_image
)
else:
for labels_per_image, regression_targets_per_image, proposals_per_image in zip(
labels, regression_targets, proposals
):
proposals_per_image.add_field("labels", labels_per_image)
proposals_per_image.add_field(
"regression_targets", regression_targets_per_image
)
# distributed sampled proposals, that were obtained on all feature maps
# concatenated via the fg_bg_sampler, into individual feature map levels
for img_idx, (pos_inds_img, neg_inds_img) in enumerate(
zip(sampled_pos_inds, sampled_neg_inds)
):
img_sampled_inds = torch.nonzero(pos_inds_img | neg_inds_img).squeeze(1)
proposals_per_image = proposals[img_idx][img_sampled_inds]
# assert torch.sum(proposals_per_image.get_field('labels').float() - proposals_per_image.get_field('soft_labels'))==0, \
# [proposals_per_image.get_field('labels'), proposals_per_image.get_field('soft_labels')]
proposals[img_idx] = proposals_per_image
self._proposals = proposals
return proposals
def CXE(self, predicted, target):
my_target = torch.stack([1-target, target], dim=1)
return -(my_target * torch.log(predicted)).mean()
def FOCAL_LOSS(self, predicted_diff):
'''
multi cls focal loss
'''
EPISILON = 1e-6
log_pt = torch.log(1-predicted_diff + EPISILON)
return - ((predicted_diff) * log_pt).mean()
def __call__(self, class_logits, box_regression, neg_class_logits=None, rev_class_logits=None, gt_label=-1):
"""
Computes the loss for Faster R-CNN.
This requires that the subsample method has been called beforehand.
Arguments:
class_logits (list[Tensor])
box_regression (list[Tensor])
Returns:
classification_loss (Tensor)
box_loss (Tensor)
"""
class_logits = cat(class_logits, dim=0)
box_regression = cat(box_regression, dim=0)
if self.cfg.FEW_SHOT.REVERSE_ORDER:
assert rev_class_logits is not None
rev_class_logits = cat(rev_class_logits, dim=0)
device = class_logits.device
if not hasattr(self, "_proposals"):
raise RuntimeError("subsample needs to be called before")
proposals = self._proposals
if self.cfg.FEW_SHOT.SOFT_LABELING:
soft_labels = cat([proposal.get_field("soft_labels") for proposal in proposals], dim=0)
labels = cat([proposal.get_field("labels") for proposal in proposals], dim=0)
regression_targets = cat(
[proposal.get_field("regression_targets") for proposal in proposals], dim=0
)
if gt_label == -1:
N = labels.size(0)
pos_inds = torch.nonzero(labels > 0)
if self.cfg.FEW_SHOT.SECOND_STAGE_CLS_LOSS == 'focal_loss':
classification_loss = self.cls_loss_func(
class_logits,
labels.int()
) / max(pos_inds.numel(), 1)
elif self.cfg.FEW_SHOT.SECOND_STAGE_CLS_LOSS == 'ce_loss':
if self.cfg.FEW_SHOT.LOSS_WEIGHTED:
fg_weight = 0.75
if class_logits.size(1) == 2:
weight = torch.tensor([1-fg_weight, fg_weight]).cuda(class_logits.get_device()).float()
elif class_logits.size(1) == 3:
weight = torch.tensor([1-fg_weight, fg_weight, fg_weight]).cuda(class_logits.get_device()).float()
else:
raise Exception('class logits dimention wrong, can only be 2 or 3 for softmax ce loss')
classification_loss = F.cross_entropy(class_logits, labels, weight=weight) # change to focal loss
else:
classification_loss = F.cross_entropy(class_logits, labels) # change to focal loss
elif self.cfg.FEW_SHOT.SECOND_STAGE_CLS_LOSS == 'mse_loss' and self.cfg.FEW_SHOT.SOFT_LABELING:
classification_loss = torch.mean((class_logits.sigmoid()-soft_labels)**2)
elif self.cfg.FEW_SHOT.SECOND_STAGE_CLS_LOSS == 'mse_loss' and not self.cfg.FEW_SHOT.SOFT_LABELING:
classification_loss = torch.mean((class_logits.sigmoid()-labels.float())**2)
elif self.cfg.FEW_SHOT.SECOND_STAGE_CLS_LOSS == 'l1_loss' and self.cfg.FEW_SHOT.SOFT_LABELING:
classification_loss = torch.mean(torch.abs(class_logits.sigmoid()-soft_labels))
elif self.cfg.FEW_SHOT.SECOND_STAGE_CLS_LOSS == 'cxe_loss' and self.cfg.FEW_SHOT.SOFT_LABELING:
classification_loss = self.CXE(class_logits.softmax(dim=1), soft_labels)
else:
raise Exception('clasification loss of second stage not valid')
if self.cfg.FEW_SHOT.REVERSE_ORDER:
reverse_cls_loss = self.FOCAL_LOSS(
torch.abs(class_logits.softmax(dim=-1)-rev_class_logits.softmax(dim=-1))
)
# get indices that correspond to the regression targets for
# the corresponding ground truth labels, to be used with
# advanced indexing
sampled_pos_inds_subset = torch.nonzero(labels > 0).squeeze(1)
labels_pos = labels[sampled_pos_inds_subset]
if self.cls_agnostic_bbox_reg:
map_inds = torch.tensor([4, 5, 6, 7], device=device)
else:
map_inds = 4 * labels_pos[:, None] + torch.tensor(
[0, 1, 2, 3], device=device)
box_loss = smooth_l1_loss(
box_regression[sampled_pos_inds_subset[:, None], map_inds],
regression_targets[sampled_pos_inds_subset],
size_average=False,
beta=1,
)
box_loss = box_loss / labels.numel()
else: # only calculating loss of bboxes belonging to gt_label and mse loss
N = labels.size(0)
cls_labels = labels.clone()
cls_labels[torch.nonzero(cls_labels!=gt_label)] = 0
cls_labels[torch.nonzero(cls_labels==gt_label)] = 1
pos_inds = torch.nonzero(cls_labels > 0)
if self.cfg.FEW_SHOT.SECOND_STAGE_CLS_LOSS == 'focal_loss':
classification_loss = self.cls_loss_func(
class_logits,
cls_labels.int()
) / ( pos_inds.numel() + N )
else:
classification_loss = F.cross_entropy(class_logits, cls_labels) # change to focal loss
# get indices that correspond to the regression targets for
# the corresponding ground truth labels, to be used with
# advanced indexing
if cls_labels.numel() == 0:
box_loss = torch.tensor(0).float().cuda(device)
else:
sampled_pos_inds_subset = torch.nonzero(cls_labels > 0).squeeze(1)
labels_pos = cls_labels[sampled_pos_inds_subset]
if self.cls_agnostic_bbox_reg:
map_inds = torch.tensor([4, 5, 6, 7], device=device)
else:
map_inds = 4 * labels_pos[:, None] + torch.tensor(
[0, 1, 2, 3], device=device)
box_loss = smooth_l1_loss(
box_regression[sampled_pos_inds_subset[:, None], map_inds],
regression_targets[sampled_pos_inds_subset],
size_average=False,
beta=1,
)
box_loss = box_loss / cls_labels.numel()
if self.cfg.FEW_SHOT.REVERSE_ORDER:
return classification_loss, box_loss, reverse_cls_loss
# add in new neg support linz
if neg_class_logits is not None:
neg_class_logits = cat(neg_class_logits, dim=0)
focus_neg_class_logits = neg_class_logits[labels==1]
focus_pos_class_logits = class_logits[labels==1]
# focus_labels = labels[labels==1]
focus_neg_class_scores = focus_neg_class_logits.softmax(dim=1)[:,1]
focus_pos_class_scores = focus_pos_class_logits.softmax(dim=1)[:,1]
cls_suppress_loss = F.relu(focus_neg_class_scores-focus_pos_class_scores+0.3).mean()
# cls_suppress_loss = F.cross_entropy(focus_neg_class_logits, 1-focus_labels) # change to focal loss
return classification_loss, box_loss, cls_suppress_loss
return classification_loss, box_loss
def make_roi_box_loss_evaluator(cfg):
matcher = Matcher(
cfg.MODEL.ROI_HEADS.FG_IOU_THRESHOLD,
cfg.MODEL.ROI_HEADS.BG_IOU_THRESHOLD,
allow_low_quality_matches=False,
)
bbox_reg_weights = cfg.MODEL.ROI_HEADS.BBOX_REG_WEIGHTS
box_coder = BoxCoder(weights=bbox_reg_weights)
fg_bg_sampler = BalancedPositiveNegativeSampler(
cfg.MODEL.ROI_HEADS.BATCH_SIZE_PER_IMAGE, cfg.MODEL.ROI_HEADS.POSITIVE_FRACTION
)
cls_agnostic_bbox_reg = cfg.MODEL.CLS_AGNOSTIC_BBOX_REG
loss_evaluator = FastRCNNLossComputation(
cfg,
matcher,
fg_bg_sampler,
box_coder,
cls_agnostic_bbox_reg
)
return loss_evaluator
| 46.191983 | 147 | 0.602147 | 20,644 | 0.942864 | 0 | 0 | 0 | 0 | 0 | 0 | 8,287 | 0.378488 |
4f355e9a0ec2580f7a7a369727ffb40554a5d73b | 1,108 | py | Python | morse_code.py | shwetabhsharan/leetcode | 6630592b1f962bb4c4bb3c83162a8ff12b2074b3 | [
"MIT"
] | null | null | null | morse_code.py | shwetabhsharan/leetcode | 6630592b1f962bb4c4bb3c83162a8ff12b2074b3 | [
"MIT"
] | null | null | null | morse_code.py | shwetabhsharan/leetcode | 6630592b1f962bb4c4bb3c83162a8ff12b2074b3 | [
"MIT"
] | null | null | null | """
Morse Code Implementation to tell unique pattern
Example:
Input: words = ["gin", "zen", "gig", "msg"]
Output: 2
Explanation:
The transformation of each word is:
"gin" -> "--...-."
"zen" -> "--...-."
"gig" -> "--...--."
"msg" -> "--...--."
There are 2 different transformations, "--...-." and "--...--.".
Notes
The length of words will be at most 100.
Each words[i] will have length in range [1, 12].
words[i] will only consist of lowercase letters.
"""
def uniqueMorseRepresentations(words):
import string
unique_code = []
if len(words) > 100:
return -1
letter_map = dict()
letters = [letter for letter in string.ascii_lowercase]
for count, letter in enumerate(morse_code_list):
letter_map[letters[count]] = morse_code_list[count]
for word in words:
morse_code = ""
for char in word:
morse_code = morse_code + letter_map[char]
if morse_code not in unique_code:
unique_code.append(morse_code)
return len(unique_code)
words = ["gin", "zen", "gig", "msg"]
print uniqueMorseRepresentations(words) | 22.16 | 64 | 0.624549 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 482 | 0.435018 |
4f359427f72340bc616793906a52f0e28c61b07a | 5,622 | py | Python | nn.py | sumitsk/cspace_belief | 9cb9b14f39066461224690f2b815a9933cf58c89 | [
"MIT"
] | 3 | 2019-04-11T09:06:39.000Z | 2019-04-13T20:09:12.000Z | nn.py | sumitsk/cspace_belief | 9cb9b14f39066461224690f2b815a9933cf58c89 | [
"MIT"
] | null | null | null | nn.py | sumitsk/cspace_belief | 9cb9b14f39066461224690f2b815a9933cf58c89 | [
"MIT"
] | null | null | null | #!/usr/bin/env python -W ignore::DeprecationWarning
import numpy as np
import os
import knn
import warnings
warnings.filterwarnings("ignore")
from sklearn.neighbors import NearestNeighbors, LSHForest
if __name__ == '__main__':
files = (['env_shelf01', 'env_table1', 'env_table3',
'env_shelf02', 'env_kitchen1', 'env_kitchen2',
'env_kitchen_refrigerator', 'env_kitchen_microwave'])
local_path = os.getcwd()
training_set_path = os.path.join(local_path, "imp_samples/sobol_samples_1_7/")
test_set_path = os.path.join(local_path, "test_set/")
results_path = os.path.join(local_path, "metric_results/")
cfree_val = -1.0
cobs_val = 1.0
threshold = 0.0
d = cobs_val - cfree_val
dim = 7
k_values = [1, 5, 10, 15, 20]
N_values = [1000, 5000, 10000, 15000, 20000]
avg_accnn = np.zeros((len(N_values), len(k_values)))
avg_errnn = np.zeros((len(N_values), len(k_values)))
avg_accann = np.zeros((len(N_values), len(k_values)))
avg_errann = np.zeros((len(N_values), len(k_values)))
for i in range(len(files)):
print('------------------', files[i], '------------------')
accnn = np.zeros((len(N_values), len(k_values)))
errnn = np.zeros((len(N_values), len(k_values)))
accann = np.zeros((len(N_values), len(k_values)))
errann = np.zeros((len(N_values), len(k_values)))
accmnn = np.zeros((len(N_values), len(k_values)))
errmnn = np.zeros((len(N_values), len(k_values)))
accmann = np.zeros((len(N_values), len(k_values)))
errmann = np.zeros((len(N_values), len(k_values)))
for j in range(len(N_values)):
training_set_size = N_values[j]
test_set_size = training_set_size / 10
print("training_set_size:", training_set_size)
fn = 'sobol_' + files[i] + '_' + str(training_set_size) + '.npz'
n = np.load(os.path.join(training_set_path,fn))
training_set = n['samples']
training_set_ccr = n['ccr']
sjw = n['sjw']
S = np.cov(training_set.transpose())
inv_cov = np.linalg.inv(S)
fn1 = files[i] + '_' + str(N_values[j]) + '.npz'
n1 = np.load(os.path.join(test_set_path, fn1))
test_set = n1['test_set']
ccr = n1['ccr']
lshf = LSHForest()
lshf.fit(training_set)
nbrs = NearestNeighbors()
nbrs.fit(training_set)
cprnn = np.ones((test_set_size, len(k_values)))
cprann = np.ones((test_set_size, len(k_values)))
cprmnn = np.ones((test_set_size, len(k_values)))
cprmann = np.ones((test_set_size, len(k_values)))
idx = 0
while idx < test_set_size:
query = test_set[idx]
for t in range(len(k_values)):
n_neighbors = k_values[t]
cprnn[idx,t] = knn.nn_predictor(query, training_set, training_set_ccr, nbrs, n_neighbors,
method='r', weights=sjw)
cprann[idx,t] = knn.nn_predictor(query, training_set, training_set_ccr, lshf, n_neighbors,
method='r', weights=sjw)
cprmnn[idx,t] = knn.nn_predictor(query, training_set, training_set_ccr, nbrs, n_neighbors,
method='r', weights=sjw, inv_cov=inv_cov)
cprmann[idx,t] = knn.nn_predictor(query, training_set, training_set_ccr, lshf, n_neighbors,
method='r', weights=sjw, inv_cov=inv_cov)
# print(cprmann[idx,t] , cprann[idx,t]
# raw_input("s")
idx = idx + 1
for t in range(len(k_values)):
accnn[j,t] = 1.0*np.sum((cprnn[:,t] > threshold) == (ccr == cobs_val)) / test_set_size
accann[j,t] = 1.0*np.sum((cprann[:,t] > threshold) == (ccr == cobs_val)) / test_set_size
errnn[j,t] = np.sum(np.absolute(cprnn[:,t] - ccr)) / d / test_set_size
errann[j,t] = np.sum(np.absolute(cprann[:,t] - ccr)) / d / test_set_size
accmnn[j,t] = 1.0*np.sum((cprmnn[:,t] > threshold) == (ccr == cobs_val)) / test_set_size
accmann[j,t] = 1.0*np.sum((cprmann[:,t] > threshold) == (ccr == cobs_val)) / test_set_size
errmnn[j,t] = np.sum(np.absolute(cprmnn[:,t] - ccr)) / d / test_set_size
errmann[j,t] = np.sum(np.absolute(cprmann[:,t] - ccr)) / d / test_set_size
# print(accnn[j,t] , errnn[j,t], accann[j,t] , errann[j,t]
# print(accmnn[j,t] , errmnn[j,t], accmann[j,t] , errmann[j,t]
avg_accnn = avg_accnn + accnn
avg_errnn = avg_errnn + errnn
avg_accann = avg_accann + accann
avg_errann = avg_errann + errann
np.savez(os.path.join(results_path, files[i]+'_NN_w'),
accnn=accnn, accann=accann, errnn=errnn, errann=errann)
np.savez(os.path.join(results_path, files[i]+'_NN_m_w'),
accnn=accmnn, accann=accmann, errnn=errmnn, errann=errmann)
print(accnn, accmnn)
avg_accnn = avg_accnn / len(files)
avg_errnn = avg_errnn / len(files)
avg_accann = avg_accann / len(files)
avg_errann = avg_errann / len(files)
print("average_values:", avg_accnn, avg_accann, avg_errnn, avg_errann)
| 43.581395 | 111 | 0.549804 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 603 | 0.107257 |
4f35e5d4ae5344e336395cce9d435d980c9b8a4f | 1,771 | py | Python | src/genie/libs/parser/iosxr/tests/ShowOspfVrfAllInclusiveShamLinks/cli/equal/golden_output_1_expected.py | balmasea/genieparser | d1e71a96dfb081e0a8591707b9d4872decd5d9d3 | [
"Apache-2.0"
] | 204 | 2018-06-27T00:55:27.000Z | 2022-03-06T21:12:18.000Z | src/genie/libs/parser/iosxr/tests/ShowOspfVrfAllInclusiveShamLinks/cli/equal/golden_output_1_expected.py | balmasea/genieparser | d1e71a96dfb081e0a8591707b9d4872decd5d9d3 | [
"Apache-2.0"
] | 468 | 2018-06-19T00:33:18.000Z | 2022-03-31T23:23:35.000Z | src/genie/libs/parser/iosxr/tests/ShowOspfVrfAllInclusiveShamLinks/cli/equal/golden_output_1_expected.py | balmasea/genieparser | d1e71a96dfb081e0a8591707b9d4872decd5d9d3 | [
"Apache-2.0"
] | 309 | 2019-01-16T20:21:07.000Z | 2022-03-30T12:56:41.000Z |
expected_output = {
"vrf": {
"VRF1": {
"address_family": {
"ipv4": {
"instance": {
"1": {
"areas": {
"0.0.0.1": {
"sham_links": {
"10.21.33.33 10.151.22.22": {
"cost": 111,
"dcbitless_lsa_count": 1,
"donotage_lsa": "not allowed",
"dead_interval": 13,
"demand_circuit": True,
"hello_interval": 3,
"hello_timer": "00:00:00:772",
"if_index": 2,
"local_id": "10.21.33.33",
"name": "SL0",
"link_state": "up",
"remote_id": "10.151.22.22",
"retransmit_interval": 5,
"state": "point-to-point,",
"transit_area_id": "0.0.0.1",
"transmit_delay": 7,
"wait_interval": 13,
}
}
}
}
}
}
}
}
}
}
}
| 42.166667 | 74 | 0.197628 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 415 | 0.234331 |
4f39fc5e401ac2bf156ceb867519dfcc3f4064b2 | 1,416 | py | Python | day03/path-02/code.py | zeddarn/advent-of-code-2021 | 09f45bf45156ee8f8df567a8e0fe43fd7b98a9df | [
"Apache-2.0"
] | null | null | null | day03/path-02/code.py | zeddarn/advent-of-code-2021 | 09f45bf45156ee8f8df567a8e0fe43fd7b98a9df | [
"Apache-2.0"
] | null | null | null | day03/path-02/code.py | zeddarn/advent-of-code-2021 | 09f45bf45156ee8f8df567a8e0fe43fd7b98a9df | [
"Apache-2.0"
] | null | null | null | import os
filename = os.path.dirname(__file__) + "\\input"
arrayList = []
with open(filename) as file:
for line in file:
arrayList.append(line.rstrip())
width = len(arrayList[0].rstrip())
print(f'len {width}')
gamma_nums = arrayList
for r in range(width):
start = 0
x = []
for line in gamma_nums:
text = list(line.rstrip())
if start == 0:
x = [0] * len(text)
i = 0
for t in text:
if t == '1':
x[i] += 1
i += 1
start += 1
y = x[r]
if len(gamma_nums) == 1:
gamma_nums = gamma_nums
elif y >= start / 2:
gamma_nums = list(filter(lambda score: score[r] == '1', gamma_nums))
else:
gamma_nums = list(filter(lambda score: score[r] == '0', gamma_nums))
co2_nums = arrayList
for r in range(width):
start = 0
x = []
for line in co2_nums:
text = list(line.rstrip())
if start == 0:
x = [0] * len(text)
i = 0
for t in text:
if t == '1':
x[i] += 1
i += 1
start += 1
y = x[r]
if len(co2_nums) == 1:
co2_nums = co2_nums
elif y < start / 2:
co2_nums = list(filter(lambda score: score[r] == '1', co2_nums))
else:
co2_nums = list(filter(lambda score: score[r] == '0', co2_nums))
print(int(co2_nums[0], 2) * int(gamma_nums[0], 2))
| 24.413793 | 76 | 0.501412 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 41 | 0.028955 |
4f3cddc3d48976524af037fa191a969e354eac83 | 193 | py | Python | keymaster/__main__.py | shiroyuki/spymaster | 1efee54427378394ab04d0e53247eb38c28bc97c | [
"Apache-2.0"
] | null | null | null | keymaster/__main__.py | shiroyuki/spymaster | 1efee54427378394ab04d0e53247eb38c28bc97c | [
"Apache-2.0"
] | null | null | null | keymaster/__main__.py | shiroyuki/spymaster | 1efee54427378394ab04d0e53247eb38c28bc97c | [
"Apache-2.0"
] | null | null | null | import os, sys
sys.path.insert(0, os.path.join(os.getcwd(), '..', 'Imagination'))
sys.path.insert(0, os.path.join(os.getcwd(), '..', 'xmode'))
from keymaster.starter import activate
activate() | 32.166667 | 66 | 0.689119 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 28 | 0.145078 |
4f3d60508b961f3b5da0a3e216be2a3ee247021d | 198 | py | Python | Server/Python/src/dbs/dao/MySQL/DataTier/List.py | vkuznet/DBS | 14df8bbe8ee8f874fe423399b18afef911fe78c7 | [
"Apache-2.0"
] | 8 | 2015-08-14T04:01:32.000Z | 2021-06-03T00:56:42.000Z | Server/Python/src/dbs/dao/MySQL/DataTier/List.py | yuyiguo/DBS | 14df8bbe8ee8f874fe423399b18afef911fe78c7 | [
"Apache-2.0"
] | 162 | 2015-01-07T21:34:47.000Z | 2021-10-13T09:42:41.000Z | Server/Python/src/dbs/dao/MySQL/DataTier/List.py | yuyiguo/DBS | 14df8bbe8ee8f874fe423399b18afef911fe78c7 | [
"Apache-2.0"
] | 16 | 2015-01-22T15:27:29.000Z | 2021-04-28T09:23:28.000Z | #!/usr/bin/env python
"""
This module provides DataTier.List data access object.
"""
from dbs.dao.Oracle.DataTier.List import List as OraDataTierList
class List(OraDataTierList):
pass
| 19.8 | 64 | 0.727273 | 45 | 0.227273 | 0 | 0 | 0 | 0 | 0 | 0 | 83 | 0.419192 |
4f41108bae3999d613b16bce6c5067e9321c891a | 262 | py | Python | backoffice/utils/constant.py | MedPy-C/backend | 262834adb1f4f5714c4bd490595fdfa1f49c9675 | [
"MIT"
] | null | null | null | backoffice/utils/constant.py | MedPy-C/backend | 262834adb1f4f5714c4bd490595fdfa1f49c9675 | [
"MIT"
] | 1 | 2021-05-20T16:08:35.000Z | 2021-05-20T16:08:35.000Z | backoffice/utils/constant.py | MedPy-C/backend | 262834adb1f4f5714c4bd490595fdfa1f49c9675 | [
"MIT"
] | null | null | null | from enum import Enum
class RoleLevel(Enum):
OWNER = 0
ADMIN = 1
USER = 3
class Status(Enum):
ACTIVE = 1
INACTIVE = 0
class AccessLevel(Enum):
ADMIN = 0
USER = 1
class URL():
ACTIVATION = '/backoffice/invitation/activate/'
| 12.47619 | 51 | 0.614504 | 229 | 0.874046 | 0 | 0 | 0 | 0 | 0 | 0 | 34 | 0.129771 |
4f41b185490551ba17dfbc19cca9e3a24ae60285 | 1,309 | py | Python | pages/migrations/0018_auto_20171102_1809.py | Vicarium/amy_site | eeb779aae74dc3af96f2837d876bafb8e13522d2 | [
"MIT"
] | null | null | null | pages/migrations/0018_auto_20171102_1809.py | Vicarium/amy_site | eeb779aae74dc3af96f2837d876bafb8e13522d2 | [
"MIT"
] | null | null | null | pages/migrations/0018_auto_20171102_1809.py | Vicarium/amy_site | eeb779aae74dc3af96f2837d876bafb8e13522d2 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
# Generated by Django 1.11.6 on 2017-11-02 18:09
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
import wagtail.wagtailcore.fields
class Migration(migrations.Migration):
dependencies = [
('wagtailimages', '0019_delete_filter'),
('wagtailcore', '0040_page_draft_title'),
('pages', '0017_standardindexpage_template_string'),
]
operations = [
migrations.CreateModel(
name='TestimonialPage',
fields=[
('page_ptr', models.OneToOneField(auto_created=True, on_delete=django.db.models.deletion.CASCADE, parent_link=True, primary_key=True, serialize=False, to='wagtailcore.Page')),
('intro', wagtail.wagtailcore.fields.RichTextField(blank=True)),
('feed_image', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='+', to='wagtailimages.Image')),
],
options={
'abstract': False,
},
bases=('wagtailcore.page',),
),
migrations.AlterField(
model_name='testimonial',
name='text',
field=wagtail.wagtailcore.fields.RichTextField(),
),
]
| 35.378378 | 191 | 0.624905 | 1,084 | 0.828113 | 0 | 0 | 0 | 0 | 0 | 0 | 324 | 0.247517 |
4f42b91985cac19e93890aa2355cfbf1f9b33619 | 5,647 | py | Python | baselineCorrection.py | sinaravi/Baseline-Correction | 9d2a016d07a65eeec40c464682ff4bd4aca44ad0 | [
"MIT"
] | 1 | 2022-03-12T03:46:18.000Z | 2022-03-12T03:46:18.000Z | baselineCorrection.py | sinaravi/Baseline-Correction | 9d2a016d07a65eeec40c464682ff4bd4aca44ad0 | [
"MIT"
] | null | null | null | baselineCorrection.py | sinaravi/Baseline-Correction | 9d2a016d07a65eeec40c464682ff4bd4aca44ad0 | [
"MIT"
] | null | null | null | import peakutils
from peakutils.plot import plot as pplot
import pandas as pd
import matplotlib.pyplot as plt
import numpy as np
from scipy import sparse
import math
from scipy.sparse.linalg import spsolve
PATH = "PDha_1.csv" # csv or txt format
df = pd.read_csv( PATH, sep="\t", skiprows=[0, 1, 2, 3, 4],delimiter="," ) # in skiprows write the rows which contain text in order to elliminate them
df.to_numpy()
class baselinecorrection:
def __init__(self,path):
self.path=path
self.read_data()
def read_data(self):
df = pd.read_csv( self.path, sep="\t", skiprows=[0, 1, 2, 3, 4],delimiter="," ) # in skiprows write the rows which contain text in order to eliminate them
df.to_numpy()
data = df.values.T
y = data
x = data[0] # wavelength
self.x = np.nan_to_num( x )
y = np.nan_to_num( y )
self.y = np.delete( y, 0, 0 ) #spectral data
def baseline_subtraction(self,rawspc):
b = len( rawspc )
t = int( input( "plz enter polynomial degree " ) )
c = np.zeros( rawspc.shape )
for i in range( b ):
base = peakutils.baseline(rawspc[i], t) # choose order of polynomial here
c[i] = rawspc[i] - base
c[c < 0] = 0
return c, base
def baseline_als(self, lam, p, niter):
"""
:param lam: λ for smoothness default 10**5
:param p: p for asymmetry default: 0.1
:param niter: number of iteration default: 10
:return: least square baseline error
"""
y = self.y[0, :]
l = len( y )
eye = np.eye( l )
D = sparse.csc_matrix( np.diff( eye, 2 ) )
w = np.ones( l )
for i in range( niter ):
W = sparse.spdiags( w, 0, l, l )
Z = W + lam * D.dot( D.transpose() )
z = spsolve( Z, w * y )
w = p * (y > z) + (1 - p) * (y < z)
return(z)
def find_index(self, wavelenght_arr, number):
for index, value in enumerate(wavelenght_arr):
if abs(value - number) < 0.0000001:
index = float(index)
return index
return index
def region_select(self):
"""
:return: new x and y due to selected region
"""
wu = float( input( "plz enter upper boundry wavelenght " ) )
ou = self.find_index(self.x, wu )
wl = float( input( "plz enter lower boundry wavelenght " ) )
ol = self.find_index(self.x, wl )
ou=int(ou)
ol=int(ol)
xi = self.x[ol:ou, ]
yi = self.y[:, ol:ou]
return xi, yi
def plot_sub(self,raw,wavelebgth):
fig, ax = plt.subplots(3)
ax[0].plot(wavelebgth, raw.T)
ax[0].set_title( "Raw spectra", fontsize=15 )
ax[1].plot(wavelebgth, c.T)
ax[1].set_title( "corrected spectra", fontsize=15 )
ax[2].plot(wavelebgth, base)
ax[2].set_title( "noise", fontsize=15 )
plt.savefig( 'FTIR subtraction poly.png' )
plt.tight_layout()
plt.show()
plt.plot(wavelebgth, c.T)
plt.title("corrected graph")
plt.savefig('corrected graph.png')
plt.show()
return
def plot_als(self,raw,wavelebgth):
c = self.baseline_als(lam=10**5, p=0.1, niter=10)
fig, ax = plt.subplots(3)
ax[0].plot(wavelebgth, raw.T)
ax[0].set_title("Raw spectra", fontsize=15)
ax[1].plot(wavelebgth, (raw - c).T)
ax[1].set_title("corrected spectra", fontsize=15)
ax[2].plot(wavelebgth, c)
ax[2].set_title("noise", fontsize=15)
plt.savefig('corrected graph.png')
plt.tight_layout()
plt.show()
plt.plot(wavelebgth, (raw - c).T)
plt.title("FTIR corrected")
plt.savefig('FTIR corrected.png')
plt.show()
return
c1 = baselinecorrection(PATH)
wl = c1.x
raw = c1.y
plt.plot(wl, raw.T)
plt.title("raw spectra")
plt.show()
r=np.zeros(df.shape)
l = input( "Do you need to select desire regions? yes =y no =n" ).lower()
if l == "n": # 1. whole spectrum fitting
l1 = input( "least square or subtraction method? least square =a subtraction=s" ).lower()
if l1 == "s":
c, base = c1.baseline_subtraction( raw )
r[:, 0] = wl.transpose()
r[:, 1:] = c.transpose()
np.savetxt( 'baseline corrected spectra.txt', r, delimiter='\t' ) # change plot title base on your poly degree
c1.plot_sub(raw,wl)
elif l1 == "a":
G = c1.baseline_als( lam=10 ** 5, p=0.1, niter=10 )
G1 = raw - G
r[:, 0] = wl.transpose()
r[:, 1:] = G1.transpose()
np.savetxt( 'baseline corrected spectra.txt', r, delimiter='\t' ) # change plot title base on your poly degree
c1.plot_als(raw,wl)
else:
raise ValueError( "this value is not valid for arg method(method must be s or a)" )
elif l == "y":
l1 = input( "least square or subtraction method? least square =a subtraction=s" ).lower()
if l1 == "s":
wli, rawi = c1.region_select()
c, base = c1.baseline_subtraction( rawi )
c1.plot_sub(rawi,wli)
elif l1 == "a":
wli, rawi = c1.region_select()
c = c1.baseline_als(rawi,lam=10**5,p=0.1,niter=10)
c1.plot_als(rawi,wli)
else:
raise ValueError("this value is not valid for arg method(method must be s or a)")
else:
raise ValueError("this value is not valid for arg method(method must be y or n)")
| 36.198718 | 164 | 0.554631 | 3,546 | 0.627833 | 0 | 0 | 0 | 0 | 0 | 0 | 1,452 | 0.257082 |
4f42f0318bdc9f9e318bf4c6ab8ff73477869c44 | 1,337 | py | Python | tests/core/test_base_component.py | strickvl/zenml | f1499e9c3fee00fd1d66de14cab66c4472c0085d | [
"Apache-2.0"
] | 1,275 | 2020-11-19T14:18:25.000Z | 2021-08-13T07:31:39.000Z | tests/core/test_base_component.py | strickvl/zenml | f1499e9c3fee00fd1d66de14cab66c4472c0085d | [
"Apache-2.0"
] | 62 | 2020-11-30T16:06:14.000Z | 2021-08-10T08:34:52.000Z | tests/core/test_base_component.py | strickvl/zenml | f1499e9c3fee00fd1d66de14cab66c4472c0085d | [
"Apache-2.0"
] | 75 | 2020-12-22T19:15:08.000Z | 2021-08-13T03:07:50.000Z | # Copyright (c) ZenML GmbH 2021. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at:
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express
# or implied. See the License for the specific language governing
# permissions and limitations under the License.
from typing import Text
from zenml.core.base_component import BaseComponent
class MockComponent(BaseComponent):
"""Mocking the base component for testing."""
tmp_path: str
def get_serialization_dir(self) -> Text:
"""Mock serialization dir"""
return self.tmp_path
def test_base_component_serialization_logic(tmp_path):
"""Tests the UUID serialization logic of BaseComponent"""
# Application of the monkeypatch to replace Path.home
# with the behavior of mockreturn defined above.
# mc = MockComponent(tmp_path=str(tmp_path))
# Calling getssh() will use mockreturn in place of Path.home
# for this test with the monkeypatch.
# print(mc.get_serialization_dir())
| 33.425 | 70 | 0.735228 | 216 | 0.161556 | 0 | 0 | 0 | 0 | 0 | 0 | 1,006 | 0.752431 |
4f43d6d7f8cc17aa055442affd0c29f290e5addd | 591 | py | Python | courses/migrations/0009_alter_skills_program_duration_and_more.py | sisekelohub/sisekelo | 7e1b0de6abf07e65ed746d0d929c3de37fb421c3 | [
"MIT"
] | 1 | 2022-02-20T16:03:04.000Z | 2022-02-20T16:03:04.000Z | courses/migrations/0009_alter_skills_program_duration_and_more.py | sisekelohub/sisekelo | 7e1b0de6abf07e65ed746d0d929c3de37fb421c3 | [
"MIT"
] | null | null | null | courses/migrations/0009_alter_skills_program_duration_and_more.py | sisekelohub/sisekelo | 7e1b0de6abf07e65ed746d0d929c3de37fb421c3 | [
"MIT"
] | null | null | null | # Generated by Django 4.0 on 2022-01-02 21:51
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('courses', '0008_alter_learnership_duration'),
]
operations = [
migrations.AlterField(
model_name='skills_program',
name='duration',
field=models.CharField(max_length=50, null=True),
),
migrations.AlterField(
model_name='specialized_course',
name='duration',
field=models.CharField(max_length=50, null=True),
),
]
| 24.625 | 61 | 0.602369 | 500 | 0.846024 | 0 | 0 | 0 | 0 | 0 | 0 | 143 | 0.241963 |
4f44af0f9e10537a5a0a0cc8c62e5e6d33dff3dd | 12,427 | py | Python | model/utils.py | Gofinge/HF | 71baee1632cd0bf3562eed1e315b6a605699dbe9 | [
"MIT"
] | 7 | 2019-03-13T15:09:06.000Z | 2022-02-15T20:01:22.000Z | model/utils.py | Gofinge/HF | 71baee1632cd0bf3562eed1e315b6a605699dbe9 | [
"MIT"
] | null | null | null | model/utils.py | Gofinge/HF | 71baee1632cd0bf3562eed1e315b6a605699dbe9 | [
"MIT"
] | 5 | 2019-03-23T05:31:52.000Z | 2020-11-09T03:01:17.000Z | import numpy as np
from keras import backend as K
from sklearn.preprocessing import MinMaxScaler
import tensorflow as tf
import csv
from sklearn.neighbors import KDTree
import matplotlib.pyplot as plt
from model.config import *
from tensorflow.python.ops import *
import seaborn as sns
import pandas as pd
def data_transform_lstm(raw_data, time_step):
data = np.array(raw_data)
window_num = data.shape[0] - time_step + 1
x = []
y = []
for i in range(window_num):
x.append(data[i:time_step + i, 0:data.shape[1] - 1])
y.append(data[time_step + i - 1, -1])
return np.array(x), np.array(y)
def data_transform_lstm2(raw_data, time_step):
data = np.array(raw_data)
window_num = data.shape[0] - time_step + 1
x = []
y = []
for i in range(window_num - 1):
x.append(data[i:time_step + i, 0:data.shape[1] - 1])
y.append(data[time_step + i, -1])
return np.array(x), np.array(y)
def data_transform_lstm_30s(raw_data, time_step):
data = np.array(raw_data)
window_num = data.shape[0] - time_step + 1
x = []
y = []
for i in range(window_num):
window = data[i:time_step + i, 0:data.shape[1] - 1]
window_mean_price = np.average(window[:, 0])
x.append(data[i:time_step + i, 0:data.shape[1] - 1])
y.append(data[time_step + i - 1, -1])
return np.array(x), np.array(y)
def data_transform_lstm_mv(raw_data, time_step):
data = np.array(raw_data)
window_num = data.shape[0] - time_step + 1
x = []
y1 = []
y2 = []
mid_price = []
mean_price = []
for i in range(window_num):
window = data[i:time_step + i, 0:data.shape[1] - 4]
x.append(window)
y1.append(data[time_step + i - 1, -2])
y2.append(data[time_step + i - 1, -1])
mid_price.append(data[time_step + i - 1, -3])
mean_price.append(data[time_step + i - 1, -4])
return np.array(x), [np.array(y1), np.array(y2)], mid_price, mean_price
def data_transform_cnn(raw_data, time_step):
data = np.array(raw_data)
window_num = data.shape[0] - time_step + 1
x = []
y = []
for i in range(window_num):
temp = data[i:time_step + i, 0:data.shape[1] - 1]
temp = np.reshape(temp, (time_step, int((data.shape[1] - 1) / 2), 2))
x.append(temp)
y.append(np.sign(data[time_step + i - 1, -1]))
return np.array(x), np.array(y)
def data_transform_for_xgboost(raw_data):
data = np.array(raw_data)
x = []
y = []
for i in range(len(data)):
x.append(data[i, 0:data.shape[1] - 1])
y.append(data[i, -1])
return np.array(x), np.array(y)
def feature_normalize(data, label_num=1):
scaler_feature = MinMaxScaler(feature_range=(0, 1))
data[:, 0:-label_num] = scaler_feature.fit_transform(data[:, 0:-label_num])
return data
def normalize(data):
scaler = MinMaxScaler()
data = scaler.fit_transform(data)
return data
def inverse(dataSet, scaler):
return scaler.inverse_transform(dataSet)
def drop_zero(y_true, y_pred):
y_true_ind = 100 * tf.abs(tf.clip_by_value(y_true, -0.01, 0.01))
square = tf.square(y_true - y_pred)
sum = K.mean(tf.multiply(y_true_ind, square))
return sum
def two_class_penalty(y_true, y_pred):
penalty = 5
penalty_ind = tf.abs(tf.clip_by_value(tf.sign(tf.multiply(y_true, y_pred)), -1, 0))
normal_ind = tf.clip_by_value(tf.sign(tf.multiply(y_true, y_pred)), 0, 1)
penalty_coef = penalty * penalty_ind + normal_ind
square = tf.square(y_true - y_pred)
sum = K.mean(tf.multiply(penalty_coef, square))
return sum
# acc: p=1(MSE): 0.74 p=2: 0.80,0.74,0.78 p=3: 0.75,0.78,0.78,0.75 p=4: 0.76 p=5: 0.75 p=10: 0.69
def three_class_penalty(y_true, y_pred):
penalty_1 = 2
penalty_2 = penalty_1 * 1.5
y_true_round = tf.round(100 * y_true)
y_true_round = tf.clip_by_value(y_true_round, -1, 1)
y_pred_round = tf.round(100 * y_pred)
y_pred_round = tf.clip_by_value(y_pred_round, -1, 1)
penalty_delta = tf.abs(y_true_round - y_pred_round) # tensor中的元素的取值为0,1,2
temp_0 = tf.abs(tf.clip_by_value(penalty_delta, 0, 1) - 1)
temp_1 = penalty_1 * (tf.clip_by_value(penalty_delta, 0, 1))
temp_2 = penalty_2 * (tf.clip_by_value(penalty_delta, 1, 2) - 1)
coef = temp_0 + temp_1 + temp_2
square = tf.square(y_true - y_pred)
sum = K.mean(tf.multiply(coef, square))
return sum
def smooth(y_true, y_pred):
diff = y_true - y_pred
mse = K.mean(tf.square(diff))
rs = bitwise_ops.right_shift(diff)
grad = rs - diff
rs = bitwise_ops.right_shift(grad)
grad = rs - grad
sum = K.mean(tf.square(grad))
return sum + mse
def one_hot_encode(y, category_num):
encode = []
value_list = []
for value in y:
vector = [0 for _ in range(category_num)]
if value not in value_list:
value_list.append(value)
ind = value_list.index(value)
try:
vector[ind] = 1
except IndexError:
print('Error: index = ' + str(ind) + ' value = ' + str(value))
encode.append(vector)
return encode
def one_hot_decode(y):
decode = []
for vector in y:
ind = vector.index(1)
decode.append(ind)
return decode
def batch_labelize_prob_vector(y):
labelized = []
for vector in y:
vector = labelize_prob_vector(vector)
labelized.append(vector)
return labelized
def labelize_prob_vector(vector):
vector = list(vector)
size = len(vector)
m = max(vector)
ind = vector.index(m)
label_vec = [0 for _ in range(size)]
label_vec[ind] = 1
return label_vec
def sign(vector):
sign_vector = []
for value in vector:
if value > 0:
sign_vector.append(1)
if value == 0:
sign_vector.append(0)
if value < 0:
sign_vector.append(-1)
return sign_vector
def save_feature_selection(feature_list, acc):
csv_file = open('feature_selection.csv', 'w', newline='')
writer = csv.writer(csv_file)
writer.writerow([acc] + feature_list)
return
def over_sampling_naive(train_x, train_y):
pos_indices = find_all_indices(train_y, 1)
neg_indices = find_all_indices(train_y, -1)
zero_indices = find_all_indices(train_y, 0)
pos_sample = np.array(train_x)[pos_indices]
neg_sample = np.array(train_x)[neg_indices]
pos_size = len(pos_sample)
neg_size = len(neg_sample)
zero_size = len(train_x) - pos_size - neg_size
power = zero_size / (pos_size + neg_size)
pos_sample = _over_sampling_naive(pos_sample, power)
neg_sample = _over_sampling_naive(neg_sample, power)
train_x = list(np.array(train_x)[zero_indices])
train_y = [0 for _ in range(zero_size)]
train_x.extend(pos_sample)
train_y.extend([1 for _ in range(len(pos_sample))])
train_x.extend(neg_sample)
train_y.extend([-1 for _ in range(len(neg_sample))])
return np.array(train_x), np.array(train_y)
def over_sampling_smote(train_x, train_y):
pos_indices = find_all_indices(train_y, 1)
neg_indices = find_all_indices(train_y, -1)
zero_indices = find_all_indices(train_y, 0)
pos_sample = np.array(train_x)[pos_indices]
neg_sample = np.array(train_x)[neg_indices]
pos_size = len(pos_sample)
neg_size = len(neg_sample)
zero_size = len(train_x) - pos_size - neg_size
power = zero_size / (pos_size + neg_size)
pos_sample = _over_sampling_smote(pos_sample, power)
neg_sample = _over_sampling_smote(neg_sample, power)
train_x = list(np.array(train_x)[zero_indices])
train_y = [0 for _ in range(zero_size)]
train_x.extend(pos_sample)
train_y.extend([1 for _ in range(len(pos_sample))])
train_x.extend(neg_sample)
train_y.extend([-1 for _ in range(len(neg_sample))])
return np.array(train_x), np.array(train_y)
def _over_sampling_smote(sample, power):
kdtree = KDTree(sample)
indices = [i for i in range(len(sample))]
np.random.shuffle(indices)
new_sample_list = []
count = int(power * len(sample)) - len(sample)
each = int(power)
feature_num = len(sample[0])
for ori_ind in indices:
_, near_ind = kdtree.query([sample[ori_ind]], each)
for i in near_ind[0]:
coef = np.random.rand()
new_sample = [coef * sample[i][j] + (1 - coef) * sample[ori_ind][j] for j in range(feature_num)]
new_sample_list.append(new_sample)
count -= 1
if count < 0:
break
sample = list(sample)
sample.extend(new_sample_list)
return sample
def _over_sampling_naive(sample, power):
indices = [i for i in range(len(sample))]
np.random.shuffle(indices)
new_sample_list = []
count = int(power * len(sample)) - len(sample)
each = int(power)
for ind in indices:
for i in range(each):
new_sample_list.append(sample[ind])
count -= each
if count < 0:
break
sample = list(sample)
sample.extend(new_sample_list)
return sample
def find_all_indices(data_list, value):
indices = []
for i in range(len(data_list)):
if data_list[i] == value:
indices.append(i)
return indices
def show_feature_importance(clf, feature_list):
fi_list = clf.feature_importances_
ind = np.argsort(fi_list)
feature_list = list(np.array(feature_list)[ind])
fi_list = list(fi_list[ind])
feature_list.reverse()
fi_list.reverse()
for i in range(len(ind)):
print(feature_list[i], ': ', fi_list[i])
np.save('feature_importance', [feature_list, fi_list])
def extract_feature_and_label(data, feature_name_list, label_name_list):
feature_and_label_name = list(np.copy(feature_name_list))
feature_and_label_name.extend(label_name_list)
return data[feature_and_label_name].values
def divide_train_and_test(data, ratio):
train_size = int(len(data) * ratio)
train, test = data[0:train_size, :], data[train_size:len(data), :]
return train, test
def bagging(*pred_list):
bagging_pred = []
for i in range(len(pred_list[0])):
sum = 0
for j in range(len(pred_list)):
sum += pred_list[j][i]
sum /= len(pred_list)
bagging_pred.append(sum)
return bagging_pred
def plot_scatter(y_true, y_pred, sample_size=50):
sample_size = 50
x_list = []
pred_list = []
true_list = []
for i in range(sample_size):
if y_true[i] != 0:
x_list.append(i)
pred_list.append(y_pred[i])
true_list.append(y_true[i])
try:
plt.scatter(x_list, true_list)
plt.scatter(x_list, pred_list, marker='x')
except:
print(x_list)
print(true_list)
print(pred_list)
plt.show()
def plot_confidence_interval(true_mean_price, mean_list, std_list, sample_num=300):
true_mean_price = true_mean_price[0:sample_num]
mean_list = mean_list[0:sample_num]
std_list = std_list[0:sample_num]
plt.figure(figsize=(100, 15))
up, down = mean_list + std_list * z_95, mean_list - std_list * z_95
plt.plot(true_mean_price, color='g')
plt.plot(mean_list, color='y')
plt.plot(up, color='r')
plt.plot(down, color='r')
plt.legend(['true_mean_price', 'true_price', 'predict_mean', 'confidence interval'], loc='upper left')
plt.xlabel('time')
plt.ylabel('price')
plt.show()
def plot_classification(y_true, y_pred, sample_num=1000):
y_true = y_true[0:sample_num]
y_pred = y_pred[0:sample_num]
correct = [y_true[i] ^ y_pred[i] for i in range(len(y_true))]
height = np.random.random(len(y_true))
dt = pd.DataFrame(data=list(zip(y_true, y_pred, correct, height)),
columns=['true', 'predicted', 'correct', 'height'])
sns.stripplot(x='true', y='height', hue='predicted', data=dt, alpha=0.8)
plt.title('XGBoost')
plt.ylabel('')
plt.yticks(range(1), [''])
plt.xlabel('trend')
plt.xticks(range(3), ['fall(-1)', 'unchanged', 'rise(1)'])
plt.show()
def plot_regression(y_true, y_pred, sample_num=1000, title=''):
start = 1000
y_true = y_true[start:start + sample_num]
y_pred = y_pred[start:start + sample_num]
plt.figure(figsize=(12, 8))
plt.plot(y_true)
plt.plot(y_pred, color='green')
plt.title(title)
plt.legend(['true', 'predicted'])
plt.show()
| 30.162621 | 108 | 0.641345 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 462 | 0.037117 |
4f44d34e791f88d9e4cf37473472b92bbf0299db | 2,378 | py | Python | phasor/optics/space.py | mccullerlp/OpenLoop | fe86dc6dec3740d4b6be6b88d8eef8566e2aa78d | [
"Apache-2.0"
] | 5 | 2018-02-28T00:43:37.000Z | 2020-01-21T11:39:15.000Z | phasor/optics/space.py | mccullerlp/OpenLoop | fe86dc6dec3740d4b6be6b88d8eef8566e2aa78d | [
"Apache-2.0"
] | 1 | 2019-09-07T23:15:43.000Z | 2019-09-07T23:15:43.000Z | phasor/optics/space.py | mccullerlp/OpenLoop | fe86dc6dec3740d4b6be6b88d8eef8566e2aa78d | [
"Apache-2.0"
] | 1 | 2020-08-21T04:42:09.000Z | 2020-08-21T04:42:09.000Z | # -*- coding: utf-8 -*-
"""
"""
from __future__ import division, print_function, unicode_literals
#from phasor.utilities.print import print
import declarative as decl
from . import bases
from . import ports
from . import standard_attrs
class Space(
bases.OpticalCouplerBase,
bases.SystemElementBase,
):
@decl.dproperty
def po_Fr(self):
return ports.OpticalPort(sname = 'po_Fr', pchain = 'po_Bk')
@decl.dproperty
def po_Bk(self):
return ports.OpticalPort(sname = 'po_Bk', pchain = 'po_Fr')
length = standard_attrs.generate_length()
_L_detune_default = ('L_detune_m', 0)
L_detune = standard_attrs.generate_L_detune()
def phase_lower(self, iwavelen_m, F):
symbols = self.symbols
return symbols.math.exp(-symbols.i2pi * (F * self.L_m.val / symbols.c_m_s + self.L_detune_m.val * iwavelen_m))
def phase_raise(self, iwavelen_m, F):
symbols = self.symbols
return symbols.math.exp(symbols.i2pi * (F * self.L_m.val / symbols.c_m_s + self.L_detune_m.val * iwavelen_m))
@decl.mproperty
def ports_optical(self):
return [
self.po_Fr,
self.po_Bk,
]
@decl.mproperty
def pmap(self):
return {
self.po_Fr : self.po_Bk,
self.po_Bk : self.po_Fr,
}
def system_setup_ports(self, ports_algorithm):
for port in self.ports_optical:
for kfrom in ports_algorithm.port_update_get(port.i):
ports_algorithm.port_coupling_needed(self.pmap[port].o, kfrom)
for kto in ports_algorithm.port_update_get(port.o):
ports_algorithm.port_coupling_needed(self.pmap[port].i, kto)
return
def system_setup_coupling(self, matrix_algorithm):
for port in self.ports_optical:
for kfrom in matrix_algorithm.port_set_get(port.i):
iwavelen_m, freq = self.system.optical_frequency_extract(kfrom)
if kfrom.subkey_has(ports.LOWER):
cplg = self.phase_lower(iwavelen_m, freq)
elif kfrom.subkey_has(ports.RAISE):
cplg = self.phase_raise(iwavelen_m, freq)
else:
raise RuntimeError("Boo")
matrix_algorithm.port_coupling_insert(port.i, kfrom, self.pmap[port].o, kfrom, cplg)
return
| 32.135135 | 118 | 0.632044 | 2,136 | 0.898234 | 0 | 0 | 463 | 0.194701 | 0 | 0 | 116 | 0.04878 |
4f4626181d8dc695cfa9958137d926bcace410eb | 12,106 | py | Python | tests/ci/release.py | BanditDangzw/ClickHouse | 6b62049a852149e151e57962044982aa6ba1b2ff | [
"Apache-2.0"
] | null | null | null | tests/ci/release.py | BanditDangzw/ClickHouse | 6b62049a852149e151e57962044982aa6ba1b2ff | [
"Apache-2.0"
] | null | null | null | tests/ci/release.py | BanditDangzw/ClickHouse | 6b62049a852149e151e57962044982aa6ba1b2ff | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python
from contextlib import contextmanager
from typing import List, Optional
import argparse
import logging
from git_helper import commit
from version_helper import (
FILE_WITH_VERSION_PATH,
ClickHouseVersion,
VersionType,
git,
get_abs_path,
get_version_from_repo,
update_cmake_version,
)
class Release:
BIG = ("major", "minor")
SMALL = ("patch",)
def __init__(self, version: ClickHouseVersion):
self._version = version
self._git = version._git
self._release_commit = ""
self._rollback_stack = [] # type: List[str]
def run(self, cmd: str, cwd: Optional[str] = None) -> str:
cwd_text = ""
if cwd:
cwd_text = f" (CWD='{cwd}')"
logging.info("Running command%s:\n %s", cwd_text, cmd)
return self._git.run(cmd, cwd)
def update(self):
self._git.update()
self.version = get_version_from_repo()
def do(self, args: argparse.Namespace):
self.release_commit = args.commit
if not args.no_check_dirty:
logging.info("Checking if repo is clean")
self.run("git diff HEAD --exit-code")
if not args.no_check_branch:
self.check_branch(args.release_type)
if args.release_type in self.BIG:
# Checkout to the commit, it will provide the correct current version
with self._checkout(self.release_commit, True):
if args.no_prestable:
logging.info("Skipping prestable stage")
else:
with self.prestable(args):
logging.info("Prestable part of the releasing is done")
with self.testing(args):
logging.info("Testing part of the releasing is done")
self.log_rollback()
def check_no_tags_after(self):
tags_after_commit = self.run(f"git tag --contains={self.release_commit}")
if tags_after_commit:
raise Exception(
f"Commit {self.release_commit} belongs to following tags:\n"
f"{tags_after_commit}\nChoose another commit"
)
def check_branch(self, release_type: str):
if release_type in self.BIG:
# Commit to spin up the release must belong to a main branch
output = self.run(f"git branch --contains={self.release_commit} master")
if "master" not in output:
raise Exception(
f"commit {self.release_commit} must belong to 'master' for "
f"{release_type} release"
)
if release_type in self.SMALL:
branch = f"{self.version.major}.{self.version.minor}"
if self._git.branch != branch:
raise Exception(f"branch must be '{branch}' for {release_type} release")
def log_rollback(self):
if self._rollback_stack:
rollback = self._rollback_stack
rollback.reverse()
logging.info(
"To rollback the action run the following commands:\n %s",
"\n ".join(rollback),
)
@contextmanager
def prestable(self, args: argparse.Namespace):
self.check_no_tags_after()
# Create release branch
self.update()
release_branch = f"{self.version.major}.{self.version.minor}"
with self._create_branch(release_branch, self.release_commit):
with self._checkout(release_branch, True):
self.update()
self.version.with_description(VersionType.PRESTABLE)
with self._create_gh_release(args):
with self._bump_prestable_version(release_branch, args):
# At this point everything will rollback automatically
yield
@contextmanager
def testing(self, args: argparse.Namespace):
# Create branch for a version bump
self.update()
self.version = self.version.update(args.release_type)
helper_branch = f"{self.version.major}.{self.version.minor}-prepare"
with self._create_branch(helper_branch, self.release_commit):
with self._checkout(helper_branch, True):
self.update()
self.version = self.version.update(args.release_type)
with self._bump_testing_version(helper_branch, args):
yield
@property
def version(self) -> ClickHouseVersion:
return self._version
@version.setter
def version(self, version: ClickHouseVersion):
if not isinstance(version, ClickHouseVersion):
raise ValueError(f"version must be ClickHouseVersion, not {type(version)}")
self._version = version
@property
def release_commit(self) -> str:
return self._release_commit
@release_commit.setter
def release_commit(self, release_commit: str):
self._release_commit = commit(release_commit)
@contextmanager
def _bump_prestable_version(self, release_branch: str, args: argparse.Namespace):
self._git.update()
new_version = self.version.patch_update()
new_version.with_description("prestable")
update_cmake_version(new_version)
cmake_path = get_abs_path(FILE_WITH_VERSION_PATH)
self.run(
f"git commit -m 'Update version to {new_version.string}' '{cmake_path}'"
)
with self._push(release_branch, args):
with self._create_gh_label(
f"v{release_branch}-must-backport", "10dbed", args
):
with self._create_gh_label(
f"v{release_branch}-affected", "c2bfff", args
):
self.run(
f"gh pr create --repo {args.repo} --title 'Release pull "
f"request for branch {release_branch}' --head {release_branch} "
"--body 'This PullRequest is a part of ClickHouse release "
"cycle. It is used by CI system only. Do not perform any "
"changes with it.' --label release"
)
# Here the prestable part is done
yield
@contextmanager
def _bump_testing_version(self, helper_branch: str, args: argparse.Namespace):
self.version.with_description("testing")
update_cmake_version(self.version)
cmake_path = get_abs_path(FILE_WITH_VERSION_PATH)
self.run(
f"git commit -m 'Update version to {self.version.string}' '{cmake_path}'"
)
with self._push(helper_branch, args):
body_file = get_abs_path(".github/PULL_REQUEST_TEMPLATE.md")
self.run(
f"gh pr create --repo {args.repo} --title 'Update version after "
f"release' --head {helper_branch} --body-file '{body_file}'"
)
# Here the prestable part is done
yield
@contextmanager
def _checkout(self, ref: str, with_checkout_back: bool = False):
orig_ref = self._git.branch or self._git.sha
need_rollback = False
if ref not in (self._git.branch, self._git.sha):
need_rollback = True
self.run(f"git checkout {ref}")
# checkout is not put into rollback_stack intentionally
rollback_cmd = f"git checkout {orig_ref}"
try:
yield
except BaseException:
logging.warning("Rolling back checked out %s for %s", ref, orig_ref)
self.run(f"git reset --hard; git checkout {orig_ref}")
raise
else:
if with_checkout_back and need_rollback:
self.run(rollback_cmd)
@contextmanager
def _create_branch(self, name: str, start_point: str = ""):
self.run(f"git branch {name} {start_point}")
rollback_cmd = f"git branch -D {name}"
self._rollback_stack.append(rollback_cmd)
try:
yield
except BaseException:
logging.warning("Rolling back created branch %s", name)
self.run(rollback_cmd)
raise
@contextmanager
def _create_gh_label(self, label: str, color: str, args: argparse.Namespace):
self.run(f"gh api repos/{args.repo}/labels -f name={label} -f color={color}")
rollback_cmd = f"gh api repos/{args.repo}/labels/{label} -X DELETE"
self._rollback_stack.append(rollback_cmd)
try:
yield
except BaseException:
logging.warning("Rolling back label %s", label)
self.run(rollback_cmd)
raise
@contextmanager
def _create_gh_release(self, args: argparse.Namespace):
with self._create_tag(args):
# Preserve tag if version is changed
tag = self.version.describe
self.run(
f"gh release create --prerelease --draft --repo {args.repo} "
f"--title 'Release {tag}' '{tag}'"
)
rollback_cmd = f"gh release delete --yes --repo {args.repo} '{tag}'"
self._rollback_stack.append(rollback_cmd)
try:
yield
except BaseException:
logging.warning("Rolling back release publishing")
self.run(rollback_cmd)
raise
@contextmanager
def _create_tag(self, args: argparse.Namespace):
tag = self.version.describe
self.run(f"git tag -a -m 'Release {tag}' '{tag}'")
rollback_cmd = f"git tag -d '{tag}'"
self._rollback_stack.append(rollback_cmd)
try:
with self._push(f"'{tag}'", args):
yield
except BaseException:
logging.warning("Rolling back tag %s", tag)
self.run(rollback_cmd)
raise
@contextmanager
def _push(self, ref: str, args: argparse.Namespace):
self.run(f"git push git@github.com:{args.repo}.git {ref}")
rollback_cmd = f"git push -d git@github.com:{args.repo}.git {ref}"
self._rollback_stack.append(rollback_cmd)
try:
yield
except BaseException:
logging.warning("Rolling back pushed ref %s", ref)
self.run(rollback_cmd)
raise
def parse_args() -> argparse.Namespace:
parser = argparse.ArgumentParser(
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
description="Script to release a new ClickHouse version, requires `git` and "
"`gh` (github-cli) commands",
)
parser.add_argument(
"--repo",
default="ClickHouse/ClickHouse",
help="repository to create the release",
)
parser.add_argument(
"--type",
default="minor",
# choices=Release.BIG+Release.SMALL, # add support later
choices=Release.BIG + Release.SMALL,
dest="release_type",
help="a release type, new branch is created only for 'major' and 'minor'",
)
parser.add_argument(
"--no-prestable",
action="store_true",
help=f"for release types in {Release.BIG} skip creating prestable release and "
"release branch",
)
parser.add_argument(
"--commit",
default=git.sha,
type=commit,
help="commit create a release, default to HEAD",
)
parser.add_argument(
"--no-check-dirty",
action="store_true",
help="skip check repository for uncommited changes",
)
parser.add_argument(
"--no-check-branch",
action="store_true",
help="by default, 'major' and 'minor' types work only for master, and 'patch' "
"works only for a release branches, that name should be the same as "
"'$MAJOR.$MINOR' version, e.g. 22.2",
)
return parser.parse_args()
def prestable():
pass
def main():
logging.basicConfig(level=logging.INFO)
args = parse_args()
release = Release(get_version_from_repo())
release.do(args)
if __name__ == "__main__":
main()
| 35.922849 | 88 | 0.59392 | 10,001 | 0.826119 | 6,357 | 0.525112 | 7,089 | 0.585577 | 0 | 0 | 3,488 | 0.288122 |
4f474c8d10d4136625b4321c8b9558f07ac791aa | 1,938 | py | Python | src/controller.py | shaoeric/torch-atom | 7688fc38c0d19fe4d13a9773115df911ffe6eaaa | [
"MIT"
] | 28 | 2022-03-06T06:04:54.000Z | 2022-03-27T04:14:33.000Z | src/controller.py | shaoeric/torch-atom | 7688fc38c0d19fe4d13a9773115df911ffe6eaaa | [
"MIT"
] | null | null | null | src/controller.py | shaoeric/torch-atom | 7688fc38c0d19fe4d13a9773115df911ffe6eaaa | [
"MIT"
] | 3 | 2022-03-11T07:01:58.000Z | 2022-03-17T05:34:41.000Z | import torch
import torch.nn as nn
from torch import optim
from src.losses import LossWrapper
from typing import List
__all__ = ["Controller"]
class Controller(object):
def __init__(self,
loss_wrapper: LossWrapper,
model: nn.Module,
optimizer: optim.Optimizer
) -> None:
self.loss_wrapper = loss_wrapper
self.model = model
self.optimizer = optimizer
def train_step(self, input: torch.Tensor, label: torch.Tensor, *args, **kwargs):
"""
Define the training process for the model, easy for extension for multiple models
Args:
input (torch.Tensor): input tensor of the model
label (torch.Tensor): ground truth of the input tensor
Returns:
loss (torch.FloatTensor): train loss
loss_tuple (tuple[torch.FloatTensor]): a tuple of loss item
output_no_grad (torch.FloatTensor): model output without grad
"""
self.optimizer.zero_grad()
output = self.model(input)
loss, loss_tuple, output_no_grad = self.loss_wrapper(output, [label])
loss.backward()
self.optimizer.step()
return loss, loss_tuple, output_no_grad
def validate_step(self, input: torch.Tensor, label: torch.Tensor, *args, **kwargs):
"""
Define the validation process for the model
Args:
input (torch.Tensor): input tensor for the model
label (torch.Tensor): ground truth for the input tensor
Returns:
loss (torch.FloatTensor): validation loss item, without grad
loss_tuple (tuple[torch.FloatTensor]): a tuple of loss item
output_no_grad (torch.FloatTensor): model output without grad
"""
output = self.model(input)
loss, loss_tuple, output_no_grad = self.loss_wrapper(output, [label])
return loss.detach(), loss_tuple, output_no_grad | 33.413793 | 89 | 0.639319 | 1,791 | 0.924149 | 0 | 0 | 0 | 0 | 0 | 0 | 920 | 0.474716 |
4f4925b1458a2f02b808c03448a38b7a7bd10981 | 20 | py | Python | REDServer/gunicorn.conf.py | illusioneering/RED | 03ea52fabe318ef201c17943d293e95ff846baba | [
"MIT"
] | 1 | 2021-04-13T16:37:31.000Z | 2021-04-13T16:37:31.000Z | REDServer/gunicorn.conf.py | illusioneering/RED | 03ea52fabe318ef201c17943d293e95ff846baba | [
"MIT"
] | null | null | null | REDServer/gunicorn.conf.py | illusioneering/RED | 03ea52fabe318ef201c17943d293e95ff846baba | [
"MIT"
] | 1 | 2021-04-13T21:57:14.000Z | 2021-04-13T21:57:14.000Z | bind = "0.0.0.0:80"
| 10 | 19 | 0.5 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 12 | 0.6 |
4f4c269d8d1071e01fd3b6a4ed72884d440242d7 | 791 | py | Python | helper/cLog.py | aub-cp-training/Discord-Bot | 07d8a3c98fe9999131526624cad2f4598f60ddab | [
"MIT"
] | null | null | null | helper/cLog.py | aub-cp-training/Discord-Bot | 07d8a3c98fe9999131526624cad2f4598f60ddab | [
"MIT"
] | null | null | null | helper/cLog.py | aub-cp-training/Discord-Bot | 07d8a3c98fe9999131526624cad2f4598f60ddab | [
"MIT"
] | null | null | null | from helper.cTime import MyDate
# ------------------ [ elog() ] ------------------ #
# Logs the error message into the "error_log.log" file
# States what file, call and exception created the error log
def elog(ex, stk):
fs = open("./logs/error_log.log", "a")
frame = str(stk[0][0]).strip('<>').split(',')
path = frame[1][8:-1].split('/')
_file = '/'.join(path[3:])
call = frame[3][6:] + '()'
fs.write(MyDate().footer() + " " + _file + " CALL " + call + " ERROR-MSG " + str(ex) + '\n')
fs.close()
# ------------------ [ alog() ] ------------------ #
# Clears "activity_log.log" and writes when the exception was called
def alog(ex):
fs = open("./logs/activity_log.log", "a")
fs.write(MyDate().footer() + " " + str(ex) + '\n')
fs.close() | 39.55 | 97 | 0.505689 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 391 | 0.494311 |
4f4c95f74358defd6290b5df4ac0fc0d0ae3c8b4 | 867 | py | Python | src/pandas_profiling_study/report/structure/variables/__init__.py | lucasiscoviciMoon/pandas-profiling-study | 142d3b0f5e3139cdb531819f637a407682fa5684 | [
"MIT"
] | null | null | null | src/pandas_profiling_study/report/structure/variables/__init__.py | lucasiscoviciMoon/pandas-profiling-study | 142d3b0f5e3139cdb531819f637a407682fa5684 | [
"MIT"
] | null | null | null | src/pandas_profiling_study/report/structure/variables/__init__.py | lucasiscoviciMoon/pandas-profiling-study | 142d3b0f5e3139cdb531819f637a407682fa5684 | [
"MIT"
] | 1 | 2020-04-25T15:20:39.000Z | 2020-04-25T15:20:39.000Z | from ....report.structure.variables.render_boolean import render_boolean
from ....report.structure.variables.render_categorical import (
render_categorical,
)
from ....report.structure.variables.render_common import render_common
from ....report.structure.variables.render_complex import render_complex
from ....report.structure.variables.render_count import render_count
from ....report.structure.variables.render_date import render_date
from ....report.structure.variables.render_path import render_path
from ....report.structure.variables.render_path_image import (
render_path_image,
)
from ....report.structure.variables.render_real import render_real
from ....report.structure.variables.render_url import render_url
from ....report.structure.variables.render_generic import render_generic
from ....report.structure.variables.render_int import render_int | 54.1875 | 72 | 0.828143 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
4f4ce494fc5205b01c11abe3dcc62bccc1d2df8a | 1,273 | py | Python | venv/lib/python3.9/site-packages/py2app/bootstrap/virtualenv_site_packages.py | dequeb/asmbattle | 27e8b209de5787836e288a2f2f9b7644ce07563e | [
"MIT"
] | 1 | 2021-03-25T21:51:36.000Z | 2021-03-25T21:51:36.000Z | venv/lib/python3.9/site-packages/py2app/bootstrap/virtualenv_site_packages.py | dequeb/asmbattle | 27e8b209de5787836e288a2f2f9b7644ce07563e | [
"MIT"
] | null | null | null | venv/lib/python3.9/site-packages/py2app/bootstrap/virtualenv_site_packages.py | dequeb/asmbattle | 27e8b209de5787836e288a2f2f9b7644ce07563e | [
"MIT"
] | null | null | null | def _site_packages(prefix, real_prefix, global_site_packages):
import os
import site
import sys
paths = []
paths.append(
os.path.join(prefix, "lib", "python" + sys.version[:3], "site-packages")
)
if os.path.join(".framework", "") in os.path.join(prefix, ""):
home = os.environ.get("HOME")
if home:
paths.append(
os.path.join(
home, "Library", "Python", sys.version[:3], "site-packages"
)
)
# Work around for a misfeature in setuptools: easy_install.pth places
# site-packages way to early on sys.path and that breaks py2app bundles.
# NOTE: this is hacks into an undocumented feature of setuptools and
# might stop to work without warning.
sys.__egginsert = len(sys.path)
for path in paths:
site.addsitedir(path)
# Ensure that the global site packages get placed on sys.path after
# the site packages from the virtual environment (this functionality
# is also in virtualenv)
sys.__egginsert = len(sys.path)
if global_site_packages:
site.addsitedir(
os.path.join(
real_prefix, "lib", "python" + sys.version[:3], "site-packages"
)
)
| 31.825 | 80 | 0.602514 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 515 | 0.404556 |
4f4d2cdfa9f38dc5e7ffe58506d96f6ab3e292e3 | 2,324 | py | Python | pytorch/fnetar.py | q1park/tempformer-xl | 05be261ba2221dd7fd7adc39df78941eaa4937c2 | [
"Apache-2.0"
] | 2 | 2021-09-29T11:12:02.000Z | 2021-09-30T08:37:14.000Z | pytorch/fnetar.py | q1park/tempformer-xl | 05be261ba2221dd7fd7adc39df78941eaa4937c2 | [
"Apache-2.0"
] | null | null | null | pytorch/fnetar.py | q1park/tempformer-xl | 05be261ba2221dd7fd7adc39df78941eaa4937c2 | [
"Apache-2.0"
] | null | null | null | import torch
import torch.nn as nn
from modules.xlmask import XlMask
from modules.xlmemory import XlMemory
from modules.xlposition import XlPosition
from xllayer import XlLayer
from fnetarlayer import FnetarLayer
class Fnetar(nn.Module):
def __init__(
self,
n_layer: int,
d_model: int,
n_head: int,
d_head: int,
d_inner: int,
drop_out: float,
drop_att: float,
tgt_len: int=None,
mem_len: int=None,
same_length: bool=False,
clamp_len: int=-1
):
super(Fnetar, self).__init__()
self.position = XlPosition(d_model=d_model, n_head=n_head, d_head=d_head, clamp_len=clamp_len)
self.layers = nn.ModuleList([
XlLayer(
d_model=d_model, n_head=n_head, d_head=d_head, d_inner=d_inner,
drop_out=drop_out, drop_att=drop_att
)
for _ in range(n_layer // 2)
] + [
FnetarLayer(
d_model=d_model, d_inner=d_inner, drop_out=drop_out,
tgt_len=tgt_len, mem_len=mem_len, same_length=same_length
)
for _ in range(n_layer // 2)
])
self.attn_mask = XlMask(tgt_len=tgt_len, mem_len=mem_len, same_length=same_length)
self.drop_out = nn.Dropout(drop_out)
def forward(self, x, memory: XlMemory) -> (torch.Tensor, XlMemory):
bsz, qlen, mlen = x.size(0), x.size(1), memory.size(0)
klen = mlen + qlen
p = self.position.wave_grid(klen=klen, device=x.device, dtype=x.dtype)
p = self.drop_out(p)
hids = [x]
first_fourier = True
for i, layer in enumerate(self.layers):
if isinstance(layer, XlLayer):
x = layer(x=x, mem=memory[i], p=p, position=self.position, mask=self.attn_mask)
elif isinstance(layer, FnetarLayer):
if first_fourier:
add_position = True
first_fourier = False
else:
add_position = False
x = layer(x=x, mem=memory[i], p=p, add_position=add_position)
hids.append(x)
memory.update_memory(hids, memory, mlen, qlen)
x = self.drop_out(x)
return x, memory | 33.681159 | 102 | 0.568417 | 2,109 | 0.907487 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
4f4f6e6b80c56c5a27d1f21b5dbb08a35977376c | 362 | py | Python | nomad/images/migrations/0003_auto_20181218_2248.py | jss8882/nomad | 914fefe3a9bd47c6a252829f4cf5a30796ea4885 | [
"MIT"
] | null | null | null | nomad/images/migrations/0003_auto_20181218_2248.py | jss8882/nomad | 914fefe3a9bd47c6a252829f4cf5a30796ea4885 | [
"MIT"
] | 7 | 2021-05-07T23:17:55.000Z | 2022-02-26T14:37:28.000Z | nomad/images/migrations/0003_auto_20181218_2248.py | jss8882/nomad | 914fefe3a9bd47c6a252829f4cf5a30796ea4885 | [
"MIT"
] | null | null | null | # Generated by Django 2.0.9 on 2018-12-18 13:48
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('images', '0002_auto_20181218_2029'),
]
operations = [
migrations.RenameField(
model_name='like',
old_name='message',
new_name='creator',
),
]
| 19.052632 | 47 | 0.582873 | 277 | 0.765193 | 0 | 0 | 0 | 0 | 0 | 0 | 104 | 0.287293 |
4f5001e8746107e356347ba4628bb42d71a639be | 1,124 | py | Python | src/main.py | mmData/Hack4Good | 61d79a4c56872ed5fabb280b8e9f91c4c4f71ece | [
"RSA-MD"
] | null | null | null | src/main.py | mmData/Hack4Good | 61d79a4c56872ed5fabb280b8e9f91c4c4f71ece | [
"RSA-MD"
] | null | null | null | src/main.py | mmData/Hack4Good | 61d79a4c56872ed5fabb280b8e9f91c4c4f71ece | [
"RSA-MD"
] | null | null | null | """
Created on Wed Nov 07 2018
@author: Analytics Club at ETH internal@analytics-club.org
Example structure of the main file
"""
from src.data_extraction import load_data, save_data, merging, xml2df
from src.preprocessing import text_process, anonymization, clean_up, detect_language
def extract_data(program, mode):
"""
function to extract data
"""
print('Starting data extraction ...')
print('Program {}, mode {}'.format(program, mode))
def preprocess(program, mode):
"""
function for preprocessing
"""
print("Doing preprocessing")
print('Program {}, mode {}'.format(program, mode))
def train(program, mode):
"""
function to train the model
"""
print('Program {}, mode {}'.format(program, mode))
print('Training the model...')
def predict(program, mode):
"""
function to do predictions
"""
print('Program {}, mode {}'.format(program, mode))
print('Do prediction...')
def test(program, mode):
"""
function to do tests
"""
print('Program {}, mode {}'.format(program, mode))
print('Doing tests...')
| 18.42623 | 84 | 0.636121 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 556 | 0.494662 |
4f514c4b037c5ec11c6ed2a7b923cc3c118b32f5 | 378 | py | Python | dcodex_lectionary/migrations/0031_auto_20201119_2140.py | rbturnbull/dcodex_lectionary | 9a4787eb353d09fef023cd82af8859a7ee041aee | [
"Apache-2.0"
] | null | null | null | dcodex_lectionary/migrations/0031_auto_20201119_2140.py | rbturnbull/dcodex_lectionary | 9a4787eb353d09fef023cd82af8859a7ee041aee | [
"Apache-2.0"
] | 2 | 2021-08-09T01:11:59.000Z | 2021-08-09T01:12:49.000Z | dcodex_lectionary/migrations/0031_auto_20201119_2140.py | rbturnbull/dcodex_lectionary | 9a4787eb353d09fef023cd82af8859a7ee041aee | [
"Apache-2.0"
] | null | null | null | # Generated by Django 3.0.11 on 2020-11-19 10:40
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('dcodex_lectionary', '0030_auto_20201119_2131'),
]
operations = [
migrations.RenameField(
model_name='movableday',
old_name='period',
new_name='season',
),
]
| 19.894737 | 57 | 0.600529 | 292 | 0.772487 | 0 | 0 | 0 | 0 | 0 | 0 | 120 | 0.31746 |
4f530bbd9256fee9929035d49abd45c5563302e2 | 389 | py | Python | misc/context_processors.py | ilblackdragon/django-misc | 0accd2dc97de656a1c9e275be81e817f78a2eb9d | [
"MIT"
] | 6 | 2015-05-13T14:56:30.000Z | 2019-06-27T13:24:04.000Z | misc/context_processors.py | ilblackdragon/django-misc | 0accd2dc97de656a1c9e275be81e817f78a2eb9d | [
"MIT"
] | null | null | null | misc/context_processors.py | ilblackdragon/django-misc | 0accd2dc97de656a1c9e275be81e817f78a2eb9d | [
"MIT"
] | null | null | null | from django.conf import settings
def useful_constants(request):
"""
This workaround useful if you want use {% if var == None %}, because
{% if not var %} First {% else %} Second {% endif %} will show the result:
var = None => First
var = False => First
var = True => True
"""
return {'True': True, 'False': False, 'None': None, 'settings': settings}
| 29.923077 | 78 | 0.59383 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 269 | 0.691517 |
4f54da94c5e8d003456c1c27b57ad6a8d761118e | 355 | py | Python | ocdskingfisher/sources/digiwhist_germany.py | odscjames/lhs-alpha | d882cadfcf3464fd29529cb862567dc311d892e2 | [
"BSD-3-Clause"
] | null | null | null | ocdskingfisher/sources/digiwhist_germany.py | odscjames/lhs-alpha | d882cadfcf3464fd29529cb862567dc311d892e2 | [
"BSD-3-Clause"
] | null | null | null | ocdskingfisher/sources/digiwhist_germany.py | odscjames/lhs-alpha | d882cadfcf3464fd29529cb862567dc311d892e2 | [
"BSD-3-Clause"
] | null | null | null | from ocdskingfisher.sources.digiwhist_base import DigiwhistBaseSource
class DigiwhistGermanyRepublicSource(DigiwhistBaseSource):
publisher_name = 'Digiwhist Germany'
url = 'https://opentender.eu/download'
source_id = 'digiwhist_germany'
def get_data_url(self):
return 'https://opentender.eu/data/files/DE_ocds_data.json.tar.gz'
| 32.272727 | 74 | 0.771831 | 282 | 0.794366 | 0 | 0 | 0 | 0 | 0 | 0 | 129 | 0.36338 |
4f55bc18c7fbfc94620218322c5899ca0d5ca398 | 622 | py | Python | catfacts/facts03.py | mikerauer/pyb-class | b7f6202c58df654eb81263d12c2634fa37a27e07 | [
"MIT"
] | null | null | null | catfacts/facts03.py | mikerauer/pyb-class | b7f6202c58df654eb81263d12c2634fa37a27e07 | [
"MIT"
] | null | null | null | catfacts/facts03.py | mikerauer/pyb-class | b7f6202c58df654eb81263d12c2634fa37a27e07 | [
"MIT"
] | null | null | null | #!/usr/bin/python3
#import always go at the top of your code
import requests
def main():
'''run time code'''
#create r, which is out requests object
r = requests.get('http://cat-fact.herokuapp.com/facts')
#catfact is our iterable -- that just means it will take on the values found within
#r.json()["all"], one after the next -- which happens to be a dictionary
#the code with the loop, says, "from that single dictionary"
#print the value associated with text
for catfact in r.json()['all']:
print(catfact.get('text')) #the .get() method returns none if key not found
main()
| 29.619048 | 87 | 0.673633 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 465 | 0.747588 |
4f55f613cbd24d99e81b7a754fe8f317fb9a58a4 | 2,731 | py | Python | train_isic18.py | brieberg/keras-deeplab-v3-plus | f2c5c8f4c03ceef2395dbf1e55001d293e295ae0 | [
"MIT"
] | null | null | null | train_isic18.py | brieberg/keras-deeplab-v3-plus | f2c5c8f4c03ceef2395dbf1e55001d293e295ae0 | [
"MIT"
] | null | null | null | train_isic18.py | brieberg/keras-deeplab-v3-plus | f2c5c8f4c03ceef2395dbf1e55001d293e295ae0 | [
"MIT"
] | null | null | null | import numpy as np
import os
from utils import *
from keras.preprocessing.image import ImageDataGenerator
from keras.preprocessing import image
from model import Deeplabv3
import keras
from tensorflow.python.keras.layers import *
from tensorflow.python.keras.layers.convolutional import Deconvolution2D
from numpy import random
from random import seed, sample, randrange
from tensorflow.python.keras.callbacks import ReduceLROnPlateau, ModelCheckpoint, CSVLogger
from tensorflow.python.keras.optimizers import Adam
######## Train Model on ISIC
EPOCHS = 10
BS = 2
runnr = randrange(1, 10000)
print("RUNNUMBER", runnr)
deeplab_model = Deeplabv3(input_shape=(256, 256, 3), classes=2)
test_percentage = 0.1
file_list = recursive_glob(
"/home/bijan/Workspace/Python/keras-deeplab-v3-plus/data/ISIC2018_Task1-2_Training_Input",
".jpg"
)
seed(123)
test_indices = sample(range(0, 2594), int(2594 * test_percentage))
test = [sorted(file_list)[k] for k in test_indices]
train = sorted([k for k in file_list if k not in test])
file_list = recursive_glob(
"/home/bijan/Workspace/Python/keras-deeplab-v3-plus/data/ISIC2018_Task1_Training_GroundTruth/",
".png"
)
test_labels = [sorted(file_list)[k] for k in test_indices]
train_labels = sorted(list(set(file_list) - set(test_labels)))
aug = ImageDataGenerator(zoom_range=0.15,
width_shift_range=0.2,
height_shift_range=0.2,
shear_range=0.15,
horizontal_flip=True,
fill_mode="nearest")
csv_logger = CSVLogger('./runs/'+str(runnr)+'_log.csv', append=True, separator=';')
reduce_lr = ReduceLROnPlateau(monitor='val_loss', factor=0.2, patience=3, min_lr=0.001)
checkpointer = ModelCheckpoint(filepath='./runs/'+str(runnr)+'_model.hdf5',
verbose=1,
save_weights_only=True,
save_best_only=True)
deeplab_model.compile(optimizer=Adam(lr=0.0001), loss=keras.losses.binary_crossentropy, metrics=['binary_accuracy'])
H = deeplab_model.fit_generator(isic_generator(train, train_labels, batch_size=BS),
validation_data=isic_generator(test, test_labels, batch_size=BS),
validation_steps=len(test_labels),
steps_per_epoch=len(train) // BS,
epochs=EPOCHS,
max_queue_size=3,
callbacks=[checkpointer, csv_logger, reduce_lr])
model_json = deeplab_model.to_json()
with open("./runs/"+str(runnr)+"_model.json", "w") as json_file:
json_file.write(model_json)
print(H.history)
| 35.934211 | 116 | 0.663493 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 339 | 0.12413 |
4f5630ab38660bf275aba0281e617c57be524011 | 6,769 | py | Python | 3rd_party/nek5000/short_tests/lib/nekBinRun.py | RonRahaman/nekRS | ffc02bca33ece6ba3330c4ee24565b1c6b5f7242 | [
"BSD-3-Clause"
] | 1 | 2022-01-06T16:16:08.000Z | 2022-01-06T16:16:08.000Z | 3rd_party/nek5000/short_tests/lib/nekBinRun.py | neams-th-coe/nekRS | 5d2c8ab3d14b3fb16db35682336a1f96000698bb | [
"BSD-3-Clause"
] | null | null | null | 3rd_party/nek5000/short_tests/lib/nekBinRun.py | neams-th-coe/nekRS | 5d2c8ab3d14b3fb16db35682336a1f96000698bb | [
"BSD-3-Clause"
] | 1 | 2019-09-10T20:12:48.000Z | 2019-09-10T20:12:48.000Z | import os
import sys
from warnings import warn
from subprocess import call, check_call, PIPE, STDOUT, Popen, CalledProcessError
from pathlib import Path
def run_meshgen(command, stdin, cwd, verbose=False):
base_command = Path(command).name
logfile = Path(cwd) / f"{base_command}.out"
print(
f'Running "{base_command}"...\n'
f' Using command "{command}"\n'
f' Using stdin "{stdin}"\n'
f' Using working directory "{cwd}"'
)
stdin = "\n".join(stdin) + "\n"
try:
(stdoutdata, stderrdata) = Popen(
[command], stdin=PIPE, stderr=STDOUT, stdout=PIPE, cwd=cwd, text=True
).communicate(stdin)
with open(logfile, "w") as file:
file.writelines(stdoutdata)
if verbose:
sys.stdout.write(stdoutdata)
except (OSError, CalledProcessError) as error:
warn(
f'Could not complete {command}! Caught error: "{error}". '
f'Check "{logfile}" for details.'
)
raise
else:
print(f"Successfully finished {base_command}!")
def run_nek_script(script, rea_file, cwd, log_suffix="", mpi_procs="1"):
cwd = Path(cwd)
try:
logs = (
cwd / "logfile",
cwd / f"{rea_file}.log.{mpi_procs}",
)
# Remove old logs
for l in logs:
if os.path.exists(l):
os.remove(l)
# Running 'script' through shell since it doesn't have a shebang at the top.
# Need to concatenate args into a string if shell=True
cmd = " ".join([script, rea_file, str(mpi_procs)])
print("Running nek5000...")
print(f' Using command "{cmd}"')
print(f' Using working directory "{cwd}"')
try:
# TODO: This doesn't work as intended. If the nek executable fails, the nek script doesn't return the error.
# Check doxygen to see what exit values there are (some succesful exit values there are!)
check_call(cmd, cwd=cwd, shell=True)
except Exception as error:
warn(f"Could not successfully run nek5000! Caught error: {error}")
else:
# print('Successfully ran nek5000!')
print("Finished running nek5000!")
# Rename logs
if log_suffix:
for l in logs:
os.rename(l, l + log_suffix)
# This are expected exceptions if 'check_call' or 'os.rename' fail.
# We issue a warning, not error, so subsequent tests can continue
except (OSError, CalledProcessError) as error:
warn(
'Could not complete command: "{}": {}'.format(
" ".join([script, rea_file, mpi_procs]), error
)
)
def run_nek(
cwd, rea_file, ifmpi, log_suffix="", n_procs=1, step_limit=None, verbose=False
):
# Paths to executables, files
cwd = Path(cwd)
nek5000 = str(cwd / "nek5000")
logfile = cwd / f"{rea_file}.log.{n_procs}{log_suffix}"
session_name = cwd / "SESSION.NAME"
ioinfo = cwd / "ioinfo"
if ifmpi:
command = ["mpiexec", "-np", str(n_procs), nek5000]
else:
command = [nek5000]
print("Running nek5000...")
print(' Using command "{}"'.format(" ".join(command)))
print(f' Using working directory "{cwd}"')
print(f' Using .rea file "{rea_file}"')
# Any error here is unexepected
try:
with open(session_name, "w") as file:
file.writelines(
[
"1\n",
f"{rea_file}\n",
f"{cwd}/\n",
]
)
if step_limit:
with open(ioinfo, "w") as file:
file.writelines([f"-{step_limit}"])
if verbose:
with open(logfile, "w") as file:
proc = Popen(command, cwd=cwd, stderr=STDOUT, stdout=PIPE, text=True)
for line in proc.stdout:
sys.stdout.write(line)
file.write(line)
else:
with open(logfile, "w") as file:
call(command, cwd=cwd, stdout=file)
except Exception as error:
warn(f"Could not successfully run nek5000! Caught error: {error}")
else:
print("Finished running nek5000!")
def run_neknek(
cwd,
inside,
np_inside,
outside,
np_outside,
coupled=True,
step_limit=None,
log_suffix="",
verbose=False,
):
# Paths to executables, files
cwd = Path(cwd)
nek5000 = str(cwd / "nek5000")
logfile = cwd / f"{inside}{np_inside}.{outside}{np_outside}.log{log_suffix}"
ifcoupled = "F"
if coupled:
ifcoupled = "T"
session_name = cwd / "SESSION.NAME"
ioinfo = cwd / "ioinfo"
command = ["mpiexec", "-np", str(int(np_inside) + int(np_outside)), nek5000]
print("Running nek5000...")
print(' Using command "{}"'.format(" ".join(command)))
print(f' Using working directory "{cwd}"')
print(f' Using .rea files "{inside}", "{outside}"')
# Any error here is unexpected
try:
# Create SESSION.NAME
with open(session_name, "w") as file:
file.writelines(
[
"{}\n".format(2),
f"{ifcoupled}\n",
f"{inside}\n",
f"{cwd}\n",
f"{np_inside}\n",
f"{outside}\n",
f"{cwd}\n",
f"{np_outside}\n",
]
)
# Write step limit
if step_limit:
with open(ioinfo, "w") as file:
file.writelines([f"-{step_limit}"])
if verbose:
with open(logfile, "w") as file:
proc = Popen(command, cwd=cwd, stderr=STDOUT, stdout=PIPE, text=True)
for line in proc.stdout:
sys.stdout.write(line)
file.write(line)
else:
with open(logfile, "w") as file:
call(command, cwd=cwd, stdout=file)
except Exception as error:
warn(f"Could not successfully run nek5000! Caught error: {error}")
else:
print("Finished running nek5000!")
def mvn(src_prefix, dst_prefix, cwd):
exts = (".box", ".rea", ".usr", ".map", ".sep", ".re2")
print("Running mvn...")
print(f' Using working directory "{cwd}"')
cwd = Path(cwd)
for x in exts:
src = cwd / (src_prefix + x)
dst = cwd / (dst_prefix + x)
try:
src.rename(dst)
except OSError as error:
warn(f" Could not move {src} to {dst}: {error}")
else:
print(f" Successfully moved {src} to {dst}")
print("Finished running mvn!")
| 30.628959 | 121 | 0.534495 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 2,255 | 0.333136 |
4f56364fdae732161a7da9eee32d19ccc90073e6 | 909 | py | Python | main/cargo-bootstrap/template.py | RoastVeg/cports | 803c7f07af341eb32f791b6ec1f237edb2764bd5 | [
"BSD-2-Clause"
] | null | null | null | main/cargo-bootstrap/template.py | RoastVeg/cports | 803c7f07af341eb32f791b6ec1f237edb2764bd5 | [
"BSD-2-Clause"
] | null | null | null | main/cargo-bootstrap/template.py | RoastVeg/cports | 803c7f07af341eb32f791b6ec1f237edb2764bd5 | [
"BSD-2-Clause"
] | null | null | null | pkgname = "cargo-bootstrap"
pkgver = "1.60.0"
pkgrel = 0
# satisfy runtime dependencies
hostmakedepends = ["curl"]
depends = ["!cargo"]
pkgdesc = "Bootstrap binaries of Rust package manager"
maintainer = "q66 <q66@chimera-linux.org>"
license = "MIT OR Apache-2.0"
url = "https://rust-lang.org"
source = f"https://ftp.octaforge.org/chimera/distfiles/cargo-{pkgver}-{self.profile().triplet}.tar.xz"
options = ["!strip"]
match self.profile().arch:
case "ppc64le":
sha256 = "29d19c5015d97c862af365cda33339619fb23ae9a2ae2ea5290765604f99e47d"
case "x86_64":
sha256 = "07ab0bdeaf14f31fe07e40f2b3a9a6ae18a4b61579c8b6fa22ecd684054a81af"
case _:
broken = f"not yet built for {self.profile().arch}"
def do_install(self):
self.install_bin("cargo")
self.install_license("LICENSE-APACHE")
self.install_license("LICENSE-MIT")
self.install_license("LICENSE-THIRD-PARTY")
| 33.666667 | 102 | 0.719472 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 533 | 0.586359 |
4f5645014936668c159ef7897d13239bafc29c34 | 209 | py | Python | reagent/core/fb_checker.py | alexnikulkov/ReAgent | e404c5772ea4118105c2eb136ca96ad5ca8e01db | [
"BSD-3-Clause"
] | 1 | 2021-05-03T15:18:58.000Z | 2021-05-03T15:18:58.000Z | reagent/core/fb_checker.py | alexnikulkov/ReAgent | e404c5772ea4118105c2eb136ca96ad5ca8e01db | [
"BSD-3-Clause"
] | null | null | null | reagent/core/fb_checker.py | alexnikulkov/ReAgent | e404c5772ea4118105c2eb136ca96ad5ca8e01db | [
"BSD-3-Clause"
] | null | null | null | #!/usr/bin/env python3
import importlib.util
def is_fb_environment():
if importlib.util.find_spec("fblearner") is not None:
return True
return False
IS_FB_ENVIRONMENT = is_fb_environment()
| 17.416667 | 57 | 0.727273 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 33 | 0.157895 |
4f5660c2d05d0f34ee53240885a7bd79ea5cdbf5 | 5,284 | py | Python | src/looper.py | darklab8/darklab_darkbot | 2a6bb2f1a423989f3fce18371eb07f56c9e98bcd | [
"MIT"
] | 1 | 2022-03-17T05:55:56.000Z | 2022-03-17T05:55:56.000Z | src/looper.py | darklab8/darklab_darkbot | 2a6bb2f1a423989f3fce18371eb07f56c9e98bcd | [
"MIT"
] | 1 | 2022-02-23T11:31:43.000Z | 2022-02-23T11:31:43.000Z | src/looper.py | darklab8/darklab_darkbot | 2a6bb2f1a423989f3fce18371eb07f56c9e98bcd | [
"MIT"
] | null | null | null | "module for background tasks in the loop"
import datetime
import discord
from discord.ext import commands, tasks
from threading import Thread
import asyncio
import time
from src.views import View
from src.data_model import DataModel
import src.settings as settings
from src.storage import Storage
import logging
from .message_sent_history import message_history
from .shuffler import shuffled_dict
class Looper(commands.Cog):
def __init__(self, bot, storage: Storage, chanell_controller):
self.bot = bot
self.printer.start()
self.storage = storage
self.chanell_controller = chanell_controller
api_data = self.storage.get_game_data()
channels_ids = self.storage.get_channels_id()
for record in api_data.new_forum_records:
for channel_id in channels_ids:
message_history.add_message(channel_id=channel_id, record=record)
self.data = DataModel(api_data=api_data)
async def cog_unload(self):
self.printer.cancel()
print('unloading...')
@tasks.loop(seconds=5.0, reconnect=True, count=1)
async def printer(self):
try:
logging.info('OK executing printer loop')
updating_api_data = await self.storage.a_get_game_data()
self.data.update(updating_api_data)
# logging.info(f"context=new_forum_records type=looper, data={self.data.api_data.new_forum_records}")
self.storage.save_channel_settings()
channel_ids = self.storage.get_channels_id()
forbidden_channels = []
allowed_channels = {}
for channel_id in channel_ids:
channel_info = self.bot.get_channel(channel_id)
if channel_info is None:
forbidden_channels.append(channel_id)
else:
allowed_channels[channel_id] = channel_info
logging.info(f'context=allowed_channels, allowed_channels={allowed_channels.keys()}')
logging.info(f'context=forbidden_channels, forbidden_channels={forbidden_channels}')
shuffled_allowed_channels = shuffled_dict(allowed_channels)
logging.info(f'context=shuffled_allowed_channels, shuffled_allowed_channels={shuffled_allowed_channels}')
for channel_id, channel_info in shuffled_allowed_channels.items():
try:
logging.info(f'context=loop_begins_for_channel channel={channel_id} in guild={self.bot.get_channel(channel_id).guild}')
# delete expired messages
await self.chanell_controller.delete_exp_msgs(
channel_id, 40)
logging.info(f'context=channel_loop, channel={channel_id}, msg=deleting_old_msgs')
rendered_date, rendered_all, render_forum_records = await View(
self.data.api_data, self.storage,
channel_id).render_all()
logging.info(f'context=channel_loop, channel={channel_id}, msg=rendered_all')
# send final data update
try:
await self.chanell_controller.update_info(
channel_id,
rendered_all,
render_forum_records=render_forum_records)
logging.info(f'context=channel_loop, channel={channel_id}, msg=update_info_is_done')
except discord.errors.HTTPException:
await self.chanell_controller.update_info(
channel_id, rendered_date +
'\n**ERR: you tried to render too much info!**' +
'\nremove some of the values from config' +
'\nor write them fully instead of tags')
except Exception as error:
error_msg = f"ERR, loop_cycle, channel_id={str(channel_id)}, error={str(error)}"
logging.info(error_msg)
if isinstance(error, KeyboardInterrupt):
raise KeyboardInterrupt(
"time to exit, KeyboardInterrupt")
except Exception as error:
logging.error(f"ERR, context=whole_loop, error={str(error)}")
if isinstance(error, KeyboardInterrupt):
print("gracefully exiting")
def task(self, loop):
asyncio.run_coroutine_threadsafe(self.printer(), loop)
def task_creator(self, loop, delay=5):
print("starting task creator")
while True:
thread = Thread(
target=self.task,
args=(loop, ),
daemon=True,
)
thread.start()
time.sleep(delay)
def create_task_creator(self, loop):
"launch background daemon process"
thread = Thread(
target=self.task_creator,
args=(loop, ),
daemon=True,
)
thread.start()
@printer.before_loop
async def before_printer(self):
print('waiting...')
self.create_task_creator(asyncio.get_running_loop())
await self.bot.wait_until_ready()
| 37.475177 | 139 | 0.601438 | 4,882 | 0.923921 | 0 | 0 | 3,596 | 0.680545 | 3,604 | 0.682059 | 1,133 | 0.214421 |
4f57c9a375f9390d8eda45711f1d8f3c5ab13554 | 349 | py | Python | Python/FromUniversity/sqlite3/select.py | programmer-666/Codes | fdffe38a789ba3636dff7ceaa9f1b4113ae17c2b | [
"MIT"
] | null | null | null | Python/FromUniversity/sqlite3/select.py | programmer-666/Codes | fdffe38a789ba3636dff7ceaa9f1b4113ae17c2b | [
"MIT"
] | null | null | null | Python/FromUniversity/sqlite3/select.py | programmer-666/Codes | fdffe38a789ba3636dff7ceaa9f1b4113ae17c2b | [
"MIT"
] | 1 | 2021-09-16T14:24:29.000Z | 2021-09-16T14:24:29.000Z | import sqlite3 as slt
""" fetchone - tek tek alır. fetchmany - belirtilen sayı kadar alır. """
db = slt.connect("user.db")
print(db.cursor().execute("SELECT * FROM USERPASSWORDS").fetchall())
#print(db.cursor().execute("SELECT * FROM USERNAMES").fetchmany(2))
#print(db.cursor().execute("SELECT * FROM USERNAMES").fetchone())
db.commit();db.close()
| 43.625 | 72 | 0.710602 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 245 | 0.696023 |
4f5a68bc57b03b9960b5f5024d7514c44569ccf0 | 18,957 | py | Python | RestPy/ixnetwork_restpy/testplatform/sessions/ixnetwork/vport/protocols/pimsm/router/interface/joinprune/joinprune.py | ralfjon/IxNetwork | c0c834fbc465af69c12fd6b7cee4628baba7fff1 | [
"MIT"
] | null | null | null | RestPy/ixnetwork_restpy/testplatform/sessions/ixnetwork/vport/protocols/pimsm/router/interface/joinprune/joinprune.py | ralfjon/IxNetwork | c0c834fbc465af69c12fd6b7cee4628baba7fff1 | [
"MIT"
] | null | null | null | RestPy/ixnetwork_restpy/testplatform/sessions/ixnetwork/vport/protocols/pimsm/router/interface/joinprune/joinprune.py | ralfjon/IxNetwork | c0c834fbc465af69c12fd6b7cee4628baba7fff1 | [
"MIT"
] | null | null | null |
# Copyright 1997 - 2018 by IXIA Keysight
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
from ixnetwork_restpy.base import Base
from ixnetwork_restpy.files import Files
class JoinPrune(Base):
"""The JoinPrune class encapsulates a user managed joinPrune node in the ixnetwork hierarchy.
An instance of the class can be obtained by accessing the JoinPrune property from a parent instance.
The internal properties list will be empty when the property is accessed and is populated from the server using the find method.
The internal properties list can be managed by the user by using the add and remove methods.
"""
_SDM_NAME = 'joinPrune'
def __init__(self, parent):
super(JoinPrune, self).__init__(parent)
@property
def LearnedMgrState(self):
"""An instance of the LearnedMgrState class.
Returns:
obj(ixnetwork_restpy.testplatform.sessions.ixnetwork.vport.protocols.pimsm.router.interface.joinprune.learnedmgrstate.learnedmgrstate.LearnedMgrState)
Raises:
NotFoundError: The requested resource does not exist on the server
ServerError: The server has encountered an uncategorized error condition
"""
from ixnetwork_restpy.testplatform.sessions.ixnetwork.vport.protocols.pimsm.router.interface.joinprune.learnedmgrstate.learnedmgrstate import LearnedMgrState
return LearnedMgrState(self)
@property
def DiscardRegisterStates(self):
"""If checked, the Learned Join States sent by the RP (DUT) in response to this specific Register Message will be discarded - and will not be displayed in the table of the Register Range window.
Returns:
bool
"""
return self._get_attribute('discardRegisterStates')
@DiscardRegisterStates.setter
def DiscardRegisterStates(self, value):
self._set_attribute('discardRegisterStates', value)
@property
def Enabled(self):
"""Enables the use of this join/prune.
Returns:
bool
"""
return self._get_attribute('enabled')
@Enabled.setter
def Enabled(self, value):
self._set_attribute('enabled', value)
@property
def EnabledDataMdt(self):
"""If enabled, pimsmLearnedDataMdt will be available. (default = disabled)
Returns:
bool
"""
return self._get_attribute('enabledDataMdt')
@EnabledDataMdt.setter
def EnabledDataMdt(self, value):
self._set_attribute('enabledDataMdt', value)
@property
def FlapEnabled(self):
"""Enables emulated flapping of this multicast group range. NOTE: Flapping is not supported for the Switchover (*, G) -> (S, G) range type.
Returns:
bool
"""
return self._get_attribute('flapEnabled')
@FlapEnabled.setter
def FlapEnabled(self, value):
self._set_attribute('flapEnabled', value)
@property
def FlapInterval(self):
"""Defines the join/prune flapping interval.
Returns:
number
"""
return self._get_attribute('flapInterval')
@FlapInterval.setter
def FlapInterval(self, value):
self._set_attribute('flapInterval', value)
@property
def GroupAddress(self):
"""An IPv4 or IPv6 address used with the group mask to create a range of multicast addresses.
Returns:
str
"""
return self._get_attribute('groupAddress')
@GroupAddress.setter
def GroupAddress(self, value):
self._set_attribute('groupAddress', value)
@property
def GroupCount(self):
"""The number of multicast group addresses to be included in the multicast group range. The maximum number of valid possible addresses depends on the values for the group address and the group mask width.
Returns:
number
"""
return self._get_attribute('groupCount')
@GroupCount.setter
def GroupCount(self, value):
self._set_attribute('groupCount', value)
@property
def GroupMappingMode(self):
"""Sets the type of mapping that occurs when routes are advertised. This only applies for (S, G) and switchover types for MGR and is meaningful for RR.
Returns:
str(fullyMeshed|oneToOne)
"""
return self._get_attribute('groupMappingMode')
@GroupMappingMode.setter
def GroupMappingMode(self, value):
self._set_attribute('groupMappingMode', value)
@property
def GroupMaskWidth(self):
"""The number of bits in the mask applied to the group address. (The masked bits in the group address form the address prefix.)The default value is 32. The valid range is 1 to 128, depending on address family type.
Returns:
number
"""
return self._get_attribute('groupMaskWidth')
@GroupMaskWidth.setter
def GroupMaskWidth(self, value):
self._set_attribute('groupMaskWidth', value)
@property
def GroupRange(self):
"""The multicast group range type.
Returns:
str(rp|g|sg|sptSwitchOver|registerTriggeredSg)
"""
return self._get_attribute('groupRange')
@GroupRange.setter
def GroupRange(self, value):
self._set_attribute('groupRange', value)
@property
def NumRegToReceivePerSg(self):
"""If rangeType is set to pimsmJoinsPrunesTypeRegisterTriggeredSG, then this is the count of register messages received that will trigger transmission of a (S,G) message. (default = 10)
Returns:
number
"""
return self._get_attribute('numRegToReceivePerSg')
@NumRegToReceivePerSg.setter
def NumRegToReceivePerSg(self, value):
self._set_attribute('numRegToReceivePerSg', value)
@property
def PackGroupsEnabled(self):
"""If enabled, multiple groups can be included within a single packet.
Returns:
bool
"""
return self._get_attribute('packGroupsEnabled')
@PackGroupsEnabled.setter
def PackGroupsEnabled(self, value):
self._set_attribute('packGroupsEnabled', value)
@property
def PruneSourceAddress(self):
"""ONLY used for (*,G) Type to send (S,G,rpt) Prune Messages. (Multicast addresses are invalid.)
Returns:
str
"""
return self._get_attribute('pruneSourceAddress')
@PruneSourceAddress.setter
def PruneSourceAddress(self, value):
self._set_attribute('pruneSourceAddress', value)
@property
def PruneSourceCount(self):
"""The number of prune source addresses to be included. The maximum number of valid possible addresses depends on the values for the source address and the source mask width. The default value is 0. ONLY used for (*,G) type to send (S,G,rpt) prune messages.
Returns:
number
"""
return self._get_attribute('pruneSourceCount')
@PruneSourceCount.setter
def PruneSourceCount(self, value):
self._set_attribute('pruneSourceCount', value)
@property
def PruneSourceMaskWidth(self):
"""The number of bits in the mask applied to the prune source address. (The masked bits in the prune source address form the address prefix.)
Returns:
number
"""
return self._get_attribute('pruneSourceMaskWidth')
@PruneSourceMaskWidth.setter
def PruneSourceMaskWidth(self, value):
self._set_attribute('pruneSourceMaskWidth', value)
@property
def RpAddress(self):
"""The IP address of the Rendezvous Point (RP) router.
Returns:
str
"""
return self._get_attribute('rpAddress')
@RpAddress.setter
def RpAddress(self, value):
self._set_attribute('rpAddress', value)
@property
def SourceAddress(self):
"""The Multicast Source Address. Used for (S,G) Type and (S,G, rpt) only. (Multicast addresses are invalid.)
Returns:
str
"""
return self._get_attribute('sourceAddress')
@SourceAddress.setter
def SourceAddress(self, value):
self._set_attribute('sourceAddress', value)
@property
def SourceCount(self):
"""The number of multicast source addresses to be included. The maximum number of valid possible addresses depends on the values for the source address and the source mask width.
Returns:
number
"""
return self._get_attribute('sourceCount')
@SourceCount.setter
def SourceCount(self, value):
self._set_attribute('sourceCount', value)
@property
def SourceMaskWidth(self):
"""The number of bits in the mask applied to the source address. (The masked bits in the source address form the address prefix.)The default value is 32. The valid range is 1 to 128, depending on address family type. Used for (S,G) Type and (S,G, rpt) only.
Returns:
number
"""
return self._get_attribute('sourceMaskWidth')
@SourceMaskWidth.setter
def SourceMaskWidth(self, value):
self._set_attribute('sourceMaskWidth', value)
@property
def SptSwitchoverInterval(self):
"""The time interval (in seconds) allowed for the switch from using the RP tree to using a Source-specific tree - from (*,G) to (S,G). The default value is 0.
Returns:
number
"""
return self._get_attribute('sptSwitchoverInterval')
@SptSwitchoverInterval.setter
def SptSwitchoverInterval(self, value):
self._set_attribute('sptSwitchoverInterval', value)
def add(self, DiscardRegisterStates=None, Enabled=None, EnabledDataMdt=None, FlapEnabled=None, FlapInterval=None, GroupAddress=None, GroupCount=None, GroupMappingMode=None, GroupMaskWidth=None, GroupRange=None, NumRegToReceivePerSg=None, PackGroupsEnabled=None, PruneSourceAddress=None, PruneSourceCount=None, PruneSourceMaskWidth=None, RpAddress=None, SourceAddress=None, SourceCount=None, SourceMaskWidth=None, SptSwitchoverInterval=None):
"""Adds a new joinPrune node on the server and retrieves it in this instance.
Args:
DiscardRegisterStates (bool): If checked, the Learned Join States sent by the RP (DUT) in response to this specific Register Message will be discarded - and will not be displayed in the table of the Register Range window.
Enabled (bool): Enables the use of this join/prune.
EnabledDataMdt (bool): If enabled, pimsmLearnedDataMdt will be available. (default = disabled)
FlapEnabled (bool): Enables emulated flapping of this multicast group range. NOTE: Flapping is not supported for the Switchover (*, G) -> (S, G) range type.
FlapInterval (number): Defines the join/prune flapping interval.
GroupAddress (str): An IPv4 or IPv6 address used with the group mask to create a range of multicast addresses.
GroupCount (number): The number of multicast group addresses to be included in the multicast group range. The maximum number of valid possible addresses depends on the values for the group address and the group mask width.
GroupMappingMode (str(fullyMeshed|oneToOne)): Sets the type of mapping that occurs when routes are advertised. This only applies for (S, G) and switchover types for MGR and is meaningful for RR.
GroupMaskWidth (number): The number of bits in the mask applied to the group address. (The masked bits in the group address form the address prefix.)The default value is 32. The valid range is 1 to 128, depending on address family type.
GroupRange (str(rp|g|sg|sptSwitchOver|registerTriggeredSg)): The multicast group range type.
NumRegToReceivePerSg (number): If rangeType is set to pimsmJoinsPrunesTypeRegisterTriggeredSG, then this is the count of register messages received that will trigger transmission of a (S,G) message. (default = 10)
PackGroupsEnabled (bool): If enabled, multiple groups can be included within a single packet.
PruneSourceAddress (str): ONLY used for (*,G) Type to send (S,G,rpt) Prune Messages. (Multicast addresses are invalid.)
PruneSourceCount (number): The number of prune source addresses to be included. The maximum number of valid possible addresses depends on the values for the source address and the source mask width. The default value is 0. ONLY used for (*,G) type to send (S,G,rpt) prune messages.
PruneSourceMaskWidth (number): The number of bits in the mask applied to the prune source address. (The masked bits in the prune source address form the address prefix.)
RpAddress (str): The IP address of the Rendezvous Point (RP) router.
SourceAddress (str): The Multicast Source Address. Used for (S,G) Type and (S,G, rpt) only. (Multicast addresses are invalid.)
SourceCount (number): The number of multicast source addresses to be included. The maximum number of valid possible addresses depends on the values for the source address and the source mask width.
SourceMaskWidth (number): The number of bits in the mask applied to the source address. (The masked bits in the source address form the address prefix.)The default value is 32. The valid range is 1 to 128, depending on address family type. Used for (S,G) Type and (S,G, rpt) only.
SptSwitchoverInterval (number): The time interval (in seconds) allowed for the switch from using the RP tree to using a Source-specific tree - from (*,G) to (S,G). The default value is 0.
Returns:
self: This instance with all currently retrieved joinPrune data using find and the newly added joinPrune data available through an iterator or index
Raises:
ServerError: The server has encountered an uncategorized error condition
"""
return self._create(locals())
def remove(self):
"""Deletes all the joinPrune data in this instance from server.
Raises:
NotFoundError: The requested resource does not exist on the server
ServerError: The server has encountered an uncategorized error condition
"""
self._delete()
def find(self, DiscardRegisterStates=None, Enabled=None, EnabledDataMdt=None, FlapEnabled=None, FlapInterval=None, GroupAddress=None, GroupCount=None, GroupMappingMode=None, GroupMaskWidth=None, GroupRange=None, NumRegToReceivePerSg=None, PackGroupsEnabled=None, PruneSourceAddress=None, PruneSourceCount=None, PruneSourceMaskWidth=None, RpAddress=None, SourceAddress=None, SourceCount=None, SourceMaskWidth=None, SptSwitchoverInterval=None):
"""Finds and retrieves joinPrune data from the server.
All named parameters support regex and can be used to selectively retrieve joinPrune data from the server.
By default the find method takes no parameters and will retrieve all joinPrune data from the server.
Args:
DiscardRegisterStates (bool): If checked, the Learned Join States sent by the RP (DUT) in response to this specific Register Message will be discarded - and will not be displayed in the table of the Register Range window.
Enabled (bool): Enables the use of this join/prune.
EnabledDataMdt (bool): If enabled, pimsmLearnedDataMdt will be available. (default = disabled)
FlapEnabled (bool): Enables emulated flapping of this multicast group range. NOTE: Flapping is not supported for the Switchover (*, G) -> (S, G) range type.
FlapInterval (number): Defines the join/prune flapping interval.
GroupAddress (str): An IPv4 or IPv6 address used with the group mask to create a range of multicast addresses.
GroupCount (number): The number of multicast group addresses to be included in the multicast group range. The maximum number of valid possible addresses depends on the values for the group address and the group mask width.
GroupMappingMode (str(fullyMeshed|oneToOne)): Sets the type of mapping that occurs when routes are advertised. This only applies for (S, G) and switchover types for MGR and is meaningful for RR.
GroupMaskWidth (number): The number of bits in the mask applied to the group address. (The masked bits in the group address form the address prefix.)The default value is 32. The valid range is 1 to 128, depending on address family type.
GroupRange (str(rp|g|sg|sptSwitchOver|registerTriggeredSg)): The multicast group range type.
NumRegToReceivePerSg (number): If rangeType is set to pimsmJoinsPrunesTypeRegisterTriggeredSG, then this is the count of register messages received that will trigger transmission of a (S,G) message. (default = 10)
PackGroupsEnabled (bool): If enabled, multiple groups can be included within a single packet.
PruneSourceAddress (str): ONLY used for (*,G) Type to send (S,G,rpt) Prune Messages. (Multicast addresses are invalid.)
PruneSourceCount (number): The number of prune source addresses to be included. The maximum number of valid possible addresses depends on the values for the source address and the source mask width. The default value is 0. ONLY used for (*,G) type to send (S,G,rpt) prune messages.
PruneSourceMaskWidth (number): The number of bits in the mask applied to the prune source address. (The masked bits in the prune source address form the address prefix.)
RpAddress (str): The IP address of the Rendezvous Point (RP) router.
SourceAddress (str): The Multicast Source Address. Used for (S,G) Type and (S,G, rpt) only. (Multicast addresses are invalid.)
SourceCount (number): The number of multicast source addresses to be included. The maximum number of valid possible addresses depends on the values for the source address and the source mask width.
SourceMaskWidth (number): The number of bits in the mask applied to the source address. (The masked bits in the source address form the address prefix.)The default value is 32. The valid range is 1 to 128, depending on address family type. Used for (S,G) Type and (S,G, rpt) only.
SptSwitchoverInterval (number): The time interval (in seconds) allowed for the switch from using the RP tree to using a Source-specific tree - from (*,G) to (S,G). The default value is 0.
Returns:
self: This instance with matching joinPrune data retrieved from the server available through an iterator or index
Raises:
ServerError: The server has encountered an uncategorized error condition
"""
return self._select(locals())
def read(self, href):
"""Retrieves a single instance of joinPrune data from the server.
Args:
href (str): An href to the instance to be retrieved
Returns:
self: This instance with the joinPrune data from the server available through an iterator or index
Raises:
NotFoundError: The requested resource does not exist on the server
ServerError: The server has encountered an uncategorized error condition
"""
return self._read(href)
| 49.238961 | 444 | 0.752862 | 17,744 | 0.936013 | 0 | 0 | 7,897 | 0.416574 | 0 | 0 | 13,996 | 0.738302 |
4f5ab25c387c3616ec8ab3954a5dcc3b31c1386c | 2,022 | py | Python | tools/terminal.py | adelsonllima/djangoplus | a4ce50bf8231a0d9a4a40751f0d076c2e9931f44 | [
"BSD-3-Clause"
] | 21 | 2017-10-08T23:19:47.000Z | 2020-01-16T20:02:08.000Z | tools/terminal.py | adelsonllima/djangoplus | a4ce50bf8231a0d9a4a40751f0d076c2e9931f44 | [
"BSD-3-Clause"
] | 6 | 2020-06-03T05:30:52.000Z | 2022-01-13T00:44:26.000Z | tools/terminal.py | adelsonllima/djangoplus | a4ce50bf8231a0d9a4a40751f0d076c2e9931f44 | [
"BSD-3-Clause"
] | 9 | 2017-10-09T22:58:31.000Z | 2021-11-20T15:20:18.000Z | # -*- coding: utf-8 -*-
import os
import sys
import time
import random
from subprocess import Popen, PIPE
from django.utils import termcolors
TYPING_SPEED = 50
def simulate_command_type(commands, shell=False):
for command in commands.split('&& '):
if not command.startswith('source'):
simulate_type(command, shell=shell)
def simulate_type(command, shell=False):
sys.stdout.write('breno@localhost: ~$ ')
for c in command:
sys.stdout.write(c)
sys.stdout.flush()
time.sleep(random.random()*10.0/TYPING_SPEED)
print('')
def bold(text):
return termcolors.make_style(fg='black', opts=('bold',))(text)
def info(text):
return termcolors.make_style(fg='cyan')(text)
def error(text):
return termcolors.make_style(fg='red', opts=('bold',))(text)
class Terminal(object):
def __init__(self, verbose=True, python='python'):
self.proccess = None
self.verbose = verbose
self.python = python
def execute(self, command, clear=True, base_dir=None):
if clear:
os.system('clear')
simulate_type('', shell=True)
if self.verbose:
simulate_command_type(command, shell=True)
if command.startswith('python'):
command.replace('python', self.python)
if base_dir:
command = 'cd {} && {}'.format(base_dir, command)
if not self.verbose:
command = '{} > /dev/null'.format(command)
os.system(command)
def show(self, visible=True):
if os.path.exists('/usr/bin/osascript'):
minimize_terminal_script = '''
tell application "Terminal"
set miniaturized of window 1 to {}
end tell
'''.format(visible and 'false' or 'true')
self.proccess = Popen(['osascript', '-'], stdin=PIPE, stdout=PIPE, stderr=PIPE)
self.proccess.communicate(minimize_terminal_script.encode())
def hide(self):
self.show(False)
| 27.69863 | 91 | 0.610287 | 1,200 | 0.593472 | 0 | 0 | 0 | 0 | 0 | 0 | 341 | 0.168645 |
4f5c130fd9494ec38edf458c7749a89304b4df07 | 8,012 | py | Python | tests/unit/local/lambdafn/test_config.py | torresxb1/aws-sam-cli | d307f2eb6e1a91a476a5e2ca6070f974b0c913f1 | [
"BSD-2-Clause",
"Apache-2.0"
] | 859 | 2020-08-25T03:53:17.000Z | 2022-03-31T12:33:07.000Z | tests/unit/local/lambdafn/test_config.py | torresxb1/aws-sam-cli | d307f2eb6e1a91a476a5e2ca6070f974b0c913f1 | [
"BSD-2-Clause",
"Apache-2.0"
] | 1,369 | 2020-08-25T10:57:03.000Z | 2022-03-31T23:00:25.000Z | tests/unit/local/lambdafn/test_config.py | torresxb1/aws-sam-cli | d307f2eb6e1a91a476a5e2ca6070f974b0c913f1 | [
"BSD-2-Clause",
"Apache-2.0"
] | 275 | 2020-08-25T19:33:50.000Z | 2022-03-26T08:32:52.000Z | from unittest import TestCase
from unittest.mock import Mock
from parameterized import parameterized
from samcli.lib.utils.packagetype import ZIP
from samcli.local.lambdafn.config import FunctionConfig
from samcli.commands.local.cli_common.user_exceptions import InvalidSamTemplateException
class TestFunctionConfig(TestCase):
DEFAULT_MEMORY = 128
DEFAULT_TIMEOUT = 3
def setUp(self):
self.name = "name"
self.runtime = "runtime"
self.handler = "handler"
self.imageuri = None
self.imageconfig = None
self.packagetype = ZIP
self.code_path = "codepath"
self.memory = 1234
self.timeout = 34
self.env_vars_mock = Mock()
self.layers = ["layer1"]
self.architecture = "arm64"
def test_init_with_env_vars(self):
config = FunctionConfig(
self.name,
self.runtime,
self.handler,
self.imageuri,
self.imageconfig,
self.packagetype,
self.code_path,
self.layers,
self.architecture,
memory=self.memory,
timeout=self.timeout,
env_vars=self.env_vars_mock,
)
self.assertEqual(config.name, self.name)
self.assertEqual(config.runtime, self.runtime)
self.assertEqual(config.handler, self.handler)
self.assertEqual(config.imageuri, self.imageuri)
self.assertEqual(config.imageconfig, self.imageconfig)
self.assertEqual(config.packagetype, self.packagetype)
self.assertEqual(config.code_abs_path, self.code_path)
self.assertEqual(config.layers, self.layers)
self.assertEqual(config.memory, self.memory)
self.assertEqual(config.timeout, self.timeout)
self.assertEqual(config.env_vars, self.env_vars_mock)
self.assertEqual(self.env_vars_mock.handler, self.handler)
self.assertEqual(self.env_vars_mock.memory, self.memory)
self.assertEqual(self.env_vars_mock.timeout, self.timeout)
def test_init_without_optional_values(self):
config = FunctionConfig(
self.name,
self.runtime,
self.handler,
self.imageuri,
self.imageconfig,
self.packagetype,
self.code_path,
self.layers,
self.architecture,
)
self.assertEqual(config.name, self.name)
self.assertEqual(config.runtime, self.runtime)
self.assertEqual(config.handler, self.handler)
self.assertEqual(config.packagetype, self.packagetype)
self.assertEqual(config.imageuri, self.imageuri)
self.assertEqual(config.imageconfig, self.imageconfig)
self.assertEqual(config.code_abs_path, self.code_path)
self.assertEqual(config.layers, self.layers)
self.assertEqual(config.memory, self.DEFAULT_MEMORY)
self.assertEqual(config.timeout, self.DEFAULT_TIMEOUT)
self.assertIsNotNone(config.env_vars)
self.assertEqual(config.env_vars.handler, self.handler)
self.assertEqual(config.env_vars.memory, self.DEFAULT_MEMORY)
self.assertEqual(config.env_vars.timeout, self.DEFAULT_TIMEOUT)
def test_init_with_timeout_of_int_string(self):
config = FunctionConfig(
self.name,
self.runtime,
self.handler,
self.imageuri,
self.imageconfig,
self.packagetype,
self.code_path,
self.layers,
self.architecture,
memory=self.memory,
timeout="34",
env_vars=self.env_vars_mock,
)
self.assertEqual(config.name, self.name)
self.assertEqual(config.runtime, self.runtime)
self.assertEqual(config.handler, self.handler)
self.assertEqual(config.packagetype, self.packagetype)
self.assertEqual(config.imageuri, self.imageuri)
self.assertEqual(config.imageconfig, self.imageconfig)
self.assertEqual(config.code_abs_path, self.code_path)
self.assertEqual(config.layers, self.layers)
self.assertEqual(config.memory, self.memory)
self.assertEqual(config.timeout, 34)
self.assertEqual(config.env_vars, self.env_vars_mock)
self.assertEqual(self.env_vars_mock.handler, self.handler)
self.assertEqual(self.env_vars_mock.memory, self.memory)
self.assertEqual(self.env_vars_mock.timeout, 34)
class TestFunctionConfigInvalidTimeouts(TestCase):
def setUp(self):
self.name = "name"
self.runtime = "runtime"
self.handler = "handler"
self.imageuri = None
self.imageconfig = None
self.packagetype = ZIP
self.code_path = "codepath"
self.memory = 1234
self.env_vars_mock = Mock()
self.layers = ["layer1"]
self.architecture = "x86_64"
@parameterized.expand(
[
("none int string",),
({"dictionary": "is not a string either"},),
("/local/lambda/timeout",),
("3.2",),
("4.2",),
("0.123",),
]
)
def test_init_with_invalid_timeout_values(self, timeout):
with self.assertRaises(InvalidSamTemplateException):
FunctionConfig(
self.name,
self.runtime,
self.imageuri,
self.handler,
self.packagetype,
self.imageconfig,
self.code_path,
self.layers,
self.architecture,
memory=self.memory,
timeout=timeout,
env_vars=self.env_vars_mock,
)
class TestFunctionConfig_equals(TestCase):
DEFAULT_MEMORY = 128
DEFAULT_TIMEOUT = 3
def setUp(self):
self.name = "name"
self.name2 = "name2"
self.runtime = "runtime"
self.handler = "handler"
self.imageuri = None
self.imageconfig = None
self.packagetype = ZIP
self.code_path = "codepath"
self.memory = 1234
self.timeout = 34
self.env_vars_mock = Mock()
self.layers = ["layer1"]
self.architecture = "arm64"
def test_equals_function_config(self):
config1 = FunctionConfig(
self.name,
self.runtime,
self.handler,
self.imageuri,
self.imageconfig,
self.packagetype,
self.code_path,
self.layers,
self.architecture,
memory=self.memory,
timeout=self.timeout,
env_vars=self.env_vars_mock,
)
config2 = FunctionConfig(
self.name,
self.runtime,
self.handler,
self.imageuri,
self.imageconfig,
self.packagetype,
self.code_path,
self.layers,
self.architecture,
memory=self.memory,
timeout=self.timeout,
env_vars=self.env_vars_mock,
)
self.assertTrue(config1 == config2)
def test_not_equals_function_config(self):
config1 = FunctionConfig(
self.name,
self.runtime,
self.handler,
self.imageuri,
self.imageconfig,
self.packagetype,
self.code_path,
self.layers,
self.architecture,
memory=self.memory,
timeout=self.timeout,
env_vars=self.env_vars_mock,
)
config2 = FunctionConfig(
self.name2,
self.runtime,
self.handler,
self.imageuri,
self.imageconfig,
self.packagetype,
self.code_path,
self.layers,
self.architecture,
memory=self.memory,
timeout=self.timeout,
env_vars=self.env_vars_mock,
)
self.assertTrue(config1 != config2)
| 31.920319 | 88 | 0.594109 | 7,710 | 0.962307 | 0 | 0 | 808 | 0.100849 | 0 | 0 | 252 | 0.031453 |
4f5c288f9e0f093a2231fc27d8381b1980cd1226 | 566 | py | Python | pronotepy/ent/test_ent.py | Bapt5/pronotepy | 8787d3f6b3d3062e598c74a40df4a3ebf759201b | [
"MIT"
] | null | null | null | pronotepy/ent/test_ent.py | Bapt5/pronotepy | 8787d3f6b3d3062e598c74a40df4a3ebf759201b | [
"MIT"
] | null | null | null | pronotepy/ent/test_ent.py | Bapt5/pronotepy | 8787d3f6b3d3062e598c74a40df4a3ebf759201b | [
"MIT"
] | 3 | 2022-03-11T20:51:23.000Z | 2022-03-31T20:03:13.000Z | import unittest
from inspect import getmembers, isfunction
from functools import partial
import pronotepy
from pronotepy import ent
class TestENT(unittest.TestCase):
functions: list
@classmethod
def setUpClass(cls) -> None:
cls.functions = getmembers(
ent, lambda x: isfunction(x) or isinstance(x, partial)
)
def test_functions(self) -> None:
for func in self.functions:
self.assertRaises(pronotepy.ENTLoginError, func[1], "username", "password")
if __name__ == "__main__":
unittest.main()
| 21.769231 | 87 | 0.680212 | 380 | 0.671378 | 0 | 0 | 158 | 0.279152 | 0 | 0 | 30 | 0.053004 |
4f5cb8549eb68a8c6eb2d4195211dbccec0f0258 | 214 | py | Python | examples/lolcode_rockstar.py | hoojaoh/rockstar | 2cb911be76fc93692c180d629f0b282d672ea8f7 | [
"MIT"
] | 4,603 | 2015-07-16T20:11:28.000Z | 2022-03-21T23:51:47.000Z | examples/lolcode_rockstar.py | hoojaoh/rockstar | 2cb911be76fc93692c180d629f0b282d672ea8f7 | [
"MIT"
] | 90 | 2015-07-18T11:51:33.000Z | 2021-05-10T02:45:58.000Z | examples/lolcode_rockstar.py | hoojaoh/rockstar | 2cb911be76fc93692c180d629f0b282d672ea8f7 | [
"MIT"
] | 436 | 2015-07-16T22:10:50.000Z | 2022-02-15T04:53:19.000Z | from rockstar import RockStar
lolcode_code = """HAI
CAN HAS STDIO?
VISIBLE "HAI WORLD!"
KTHXBYE"""
rock_it_bro = RockStar(days=400, file_name='helloworld.lol', code=lolcode_code)
rock_it_bro.make_me_a_rockstar()
| 21.4 | 79 | 0.780374 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 69 | 0.32243 |
4f5f6798bc7cf3af00213c930680cdbb57013598 | 2,046 | py | Python | src/tests/integration/test_profile_admin_view.py | pradipta/back-end | 05895b051afc4c8e0cb17db708063d80102e9de5 | [
"MIT"
] | 17 | 2019-05-11T22:15:34.000Z | 2022-03-26T22:45:33.000Z | src/tests/integration/test_profile_admin_view.py | pradipta/back-end | 05895b051afc4c8e0cb17db708063d80102e9de5 | [
"MIT"
] | 390 | 2019-05-23T10:48:57.000Z | 2021-12-17T21:01:43.000Z | src/tests/integration/test_profile_admin_view.py | pradipta/back-end | 05895b051afc4c8e0cb17db708063d80102e9de5 | [
"MIT"
] | 40 | 2019-05-21T14:41:57.000Z | 2021-01-30T13:39:38.000Z | import humps
import pytest
from django import test
from django.contrib.auth.models import User
from django.urls import reverse
def test_profile_updates_correctly(
profile_admin_client: test.Client, user: User, update_profile_params
):
url = f"{reverse('admin_update_profile')}?email={user.email}"
res = profile_admin_client.patch(url, humps.camelize(update_profile_params))
assert res.status_code == 200
user.refresh_from_db()
profile = user.profile
for key, val in update_profile_params.items():
assert getattr(profile, key) == val
@pytest.mark.parametrize(
argnames="method, status",
argvalues=[("get", 400), ("put", 400), ("post", 405), ("patch", 400)],
)
def test_requires_query_param(
profile_admin_client: test.Client, method: str, status: int
):
request_method = getattr(profile_admin_client, method)
url = f"{reverse('admin_update_profile')}"
res = request_method(url)
assert res.status_code == status
def test_missing_profile_returns_404(profile_admin_client: test.Client):
url = f"{reverse('admin_update_profile')}?email=abc"
res = profile_admin_client.get(url)
assert res.status_code == 404
@pytest.mark.parametrize(
argnames="method, status", argvalues=[("get", 200), ("post", 405), ("patch", 200)]
)
def test_staff_user_has_access(
authed_admin_client: test.Client, user: User, method: str, status: int
):
request_method = getattr(authed_admin_client, method)
url = f"{reverse('admin_update_profile')}?email={user.email}"
res = request_method(url)
assert res.status_code == status
@pytest.mark.parametrize(
argnames="method, status",
argvalues=[("get", 403), ("put", 403), ("post", 405), ("patch", 403)],
)
def test_view_requires_profile_admin_group(
authed_client: test.Client, user: User, method: str, status: int
):
request_method = getattr(authed_client, method)
url = f"{reverse('admin_update_profile')}?email={user.email}"
res = request_method(url)
assert res.status_code == status
| 29.652174 | 86 | 0.711144 | 0 | 0 | 0 | 0 | 1,256 | 0.613881 | 0 | 0 | 359 | 0.175464 |
4f5fad3d63a59d5dec2ec21bc023ae1c47bc19d2 | 371 | py | Python | Scripts/console/script.py | sporiyano/graphiti | 56d85e5262b76dc6ce4f213a4d80486e015de1b7 | [
"BSD-2-Clause"
] | 93 | 2015-01-01T17:49:53.000Z | 2022-02-24T21:25:15.000Z | Scripts/console/script.py | sporiyano/graphiti | 56d85e5262b76dc6ce4f213a4d80486e015de1b7 | [
"BSD-2-Clause"
] | 13 | 2015-03-30T18:01:05.000Z | 2018-05-28T03:47:33.000Z | Scripts/console/script.py | ThibaultReuille/graphiti | 56d85e5262b76dc6ce4f213a4d80486e015de1b7 | [
"BSD-2-Clause"
] | 31 | 2015-01-14T12:16:13.000Z | 2022-02-24T21:25:16.000Z | from Scripts import graphiti as og
from Scripts import std
from Scripts import nx
import sys
import argparse
import os.path
import glob
import fnmatch
import itertools
import random
import math
import json
class Script(object):
def __init__(self, console):
self.console = console
def run(self, args):
self.console.log("Error: run() method not implemented!") | 14.84 | 58 | 0.770889 | 158 | 0.425876 | 0 | 0 | 0 | 0 | 0 | 0 | 38 | 0.102426 |
4f5ff30ca61fd37831eb16f637a4b5ec27844a14 | 6,857 | py | Python | fca/algorithms/exploration/examples/bi_unars.py | ksiomelo/cubix | cd9e6dda6696b302a7c0d383259a9d60b15b0d55 | [
"Apache-2.0"
] | 3 | 2015-09-07T00:16:16.000Z | 2019-01-11T20:27:56.000Z | fca/algorithms/exploration/examples/bi_unars.py | ksiomelo/cubix | cd9e6dda6696b302a7c0d383259a9d60b15b0d55 | [
"Apache-2.0"
] | null | null | null | fca/algorithms/exploration/examples/bi_unars.py | ksiomelo/cubix | cd9e6dda6696b302a7c0d383259a9d60b15b0d55 | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python
# encoding: utf-8
import copy
import itertools
import fca
from fca.algorithms.exploration.exploration import (AttributeExploration,
ExplorationDB)
from fca import Implication
# from fca.algorithms.closure_operators import simple_closure as closure
class BiUnar(object):
def __init__(self, f, g):
self.f = f
self.g = g
def __str__(self):
return '%s%s' % (self.f, self.g)
def automorphic_copy(self):
return BiUnar(self.g, self.f)
class Term(object):
def __init__(self, name, function):
self._name = name
self._function = function
def __call__(self, bu):
return self._function(bu)
def __str__(self):
return self._name
class Equality(object):
def __init__(self, left_term, right_term):
self.left = left_term
self.right = right_term
def __call__(self, bu):
return self.left(bu) == self.right(bu)
def __str__(self):
return "%s = %s" % (self.left, self.right)
def automorphic_copy(self):
return Equality(get_symmetric_term(self.left),
get_symmetric_term(self.right))
#
# def __eq__(self, eq):
# return ((self.left == eq.left and self.right == eq.right) or
# (self.left == eq.right and self.right == eq.left))
#
# def __ne__(self, eq):
# return not self == eq
#
# def __hash__(self):
# if self.left < self.right:
# return hash((self.left, self.right))
# else:
# return hash((self.right, self.left))
class CommandLineExpert(object):
def is_valid(self, imp):
print "{0}".format(imp)
return input('Is the following implication valid? Enter "True" or "False": {0}'.format(imp))
def explore(self, exploration):
while exploration.get_open_implications():
imp = exploration.get_open_implications()[0]
if self.is_valid(imp):
exploration.confirm_implications([imp,
get_symmetric_implication(imp)])
else:
exploration.reject_implication(imp)
def provide_counterexample(self, imp):
print 'Provide a counterexample by typing in two tuples.'
bu = BiUnar(input('f: '), input('g: '))
if input('Add as a partial example? Enter "True" or "False": '):
intent = generate_partial_counterexample(imp, bu)
else:
intent = [bu_intent(bu)] * 2 # since our context is partial
return ((bu, bu.automorphic_copy()),
(intent, [get_symmetric_attribute_set(s) for s in intent]))
def compose(f, g):
return tuple([f[x - 1] for x in g])
def generate_examples(n):
maps = itertools.product(range(1, n + 1), repeat=n)
return (BiUnar(mm[0], mm[1]) for mm in itertools.product(maps, repeat=2))
def bu_intent(bu):
return set([a for a in attributes if a(bu)])
def generate_partial_counterexample(imp, bu):
relevant_attributes = imp.premise | imp.conclusion
xintent = set([a for a in relevant_attributes if a(bu)])
qintent = (set(attributes) - relevant_attributes) | xintent
return xintent, qintent
def find_counter_example(n, imp):
for e in generate_examples(n):
if not imp.is_respected(bu_intent(e)):
return e
def generate_context(n, attributes):
objects = [e for e in generate_examples(n)]
table = [[a(o) for a in attributes] for o in objects]
cxt = fca.partial_context.PartialContext(table,
copy.deepcopy(table),
objects,
attributes)
# TODO: reduce cxt
return cxt
def generate_background_implications(attributes):
return [Implication(set([x, y]), set([z]))
for x in attributes
for y in attributes
for z in attributes
if (x.left == z.left and
x.right == y.left and
y.right == z.right) or
(x.left == z.left and
x.right == y.right and
y.left == z.right) or
(x.left == y.left and
x.right == z.left and
y.right == z.right)
]
def is_orbit_maximal(eqs):
symmetric_eqs = get_symmetric_attribute_set(eqs)
for a in attributes:
if a in eqs:
if a not in symmetric_eqs:
return True
elif a in symmetric_eqs:
return False
return True
def get_symmetric_attribute_set(eqs):
symmetric_eqs = set([])
for e in eqs:
left = get_symmetric_term(e.left)
right = get_symmetric_term(e.right)
if terms.index(left) > terms.index(right):
left, right = right, left
for a in attributes:
if a.left == left and a.right == right:
symmetric_eqs.add(a)
break
return symmetric_eqs
def get_symmetric_term(term):
s = str(term)
s = s.replace('f', 'F')
s = s.replace('g', 'f')
s = s.replace('F', 'g')
for t in terms:
if str(t) == s:
return t
def get_symmetric_equality(e):
s = str(e.automorphic_copy())
for a in attributes:
if str(a) == s:
return a
def get_symmetric_implication(imp):
return Implication(get_symmetric_attribute_set(imp.premise),
get_symmetric_attribute_set(imp.conclusion))
terms = [
Term('id', lambda bu: tuple(range(1, len(bu.f) + 1))),
Term('f', lambda bu: bu.f),
Term('g', lambda bu: bu.g),
Term('ff', lambda bu: compose(bu.f, bu.f)),
Term('fg', lambda bu: compose(bu.f, bu.g)),
Term('gf', lambda bu: compose(bu.g, bu.f)),
Term('gg', lambda bu: compose(bu.g, bu.g))
]
attributes = [Equality(terms[i], terms[j]) for i in range(len(terms))
for j in range(i + 1, len(terms))]
db = ExplorationDB(generate_context(3, attributes),
generate_background_implications(attributes),
is_orbit_maximal)
expert = CommandLineExpert()
exploration = AttributeExploration(db, expert)
# expert.explore(exploration)
| 31.027149 | 100 | 0.530407 | 2,120 | 0.309173 | 0 | 0 | 0 | 0 | 0 | 0 | 847 | 0.123523 |
4f61cbde4e5bed88e3b050cf3c9d906546413d2f | 3,267 | py | Python | test/Uds-Config-Tool/Profiling/test_profilingTestsOfDecodeMethods.py | J3rome/python-uds | fe0f7a9505cb7b87f693ab736d713d7871dff288 | [
"MIT"
] | 62 | 2019-02-13T20:26:12.000Z | 2022-02-23T19:47:34.000Z | test/Uds-Config-Tool/Profiling/test_profilingTestsOfDecodeMethods.py | J3rome/python-uds | fe0f7a9505cb7b87f693ab736d713d7871dff288 | [
"MIT"
] | 58 | 2018-07-09T10:58:33.000Z | 2022-01-31T20:27:13.000Z | test/Uds-Config-Tool/Profiling/test_profilingTestsOfDecodeMethods.py | J3rome/python-uds | fe0f7a9505cb7b87f693ab736d713d7871dff288 | [
"MIT"
] | 33 | 2019-03-25T07:30:34.000Z | 2022-03-08T12:55:35.000Z | #!/usr/bin/env python
__author__ = "Richard Clubb"
__copyrights__ = "Copyright 2018, the python-uds project"
__credits__ = ["Richard Clubb"]
__license__ = "MIT"
__maintainer__ = "Richard Clubb"
__email__ = "richard.clubb@embeduk.com"
__status__ = "Development"
import cProfile
import sys
from functools import reduce
# ----------------------------------------------------------------
# Profiler Code
# ----------------------------------------------------------------
def do_cprofile(func):
def profiled_func(*args, **kwargs):
profile = cProfile.Profile()
try:
profile.enable()
result = func(*args, **kwargs)
profile.disable()
return result
finally:
profile.print_stats()
return profiled_func
# ----------------------------------------------------------------
# buildIntFromList Tests
# ----------------------------------------------------------------
@do_cprofile
def buildIntFromListNonRecursiveFunc(aList):
def buildIntFromList(aList):
result = 0
for i in range(0, len(aList)):
result += (aList[i] << (8 * (len(aList) - (i+1))))
return result
return buildIntFromList(aList)
@do_cprofile
def buildIntFromListRecursiveFunc(aList):
def buildIntFromList(aList):
if(len(aList) == 1):
return aList[0]
else:
return (aList[0] << (8 * (len(aList) - 1) )) + buildIntFromList(aList[1:])
return buildIntFromList(aList)
@do_cprofile
def buildIntFromListReduceFunc(aList):
def buildIntFromList(aList):
return reduce(lambda x, y: (x << 8) + y, aList)
return buildIntFromList(aList)
# ----------------------------------------------------------------
# byteListToString Tests
# ----------------------------------------------------------------
@do_cprofile
def byteListToStringNonRecursiveFunc(aList):
def byteListToString(aList):
result = ""
for i in aList:
result += chr(i)
return result
return byteListToString(aList)
@do_cprofile
def byteListToStringRecursiveFunc(aList):
def byteListToString(aList):
if(len(aList) == 1):
return chr(aList[0])
else:
return chr(aList[0]) + byteListToString(aList[1:])
return byteListToString(aList)
@do_cprofile
def byteListToStringReduceFunc(aList):
def byteListToString(aList):
return reduce(lambda x, y: x + y, list(map(chr, aList)))
return byteListToString(aList)
if __name__ == "__main__":
sys.setrecursionlimit(4000)
testListA = []
for i in range(0, 2500):
testListA.append(0x5a)
testListB = []
for i in range(0, 2500):
testListB.append(0x30)
print("Testing the buildIntFromList methods")
resultA = buildIntFromListNonRecursiveFunc(testListA)
resultB = buildIntFromListRecursiveFunc(testListA)
resultC = buildIntFromListReduceFunc(testListA)
assert(resultA == resultB == resultC)
print("Testing the byteListToString methods")
resultA = byteListToStringNonRecursiveFunc(testListB)
resultB = byteListToStringRecursiveFunc(testListB)
resultC = byteListToStringReduceFunc(testListB)
assert (resultA == resultB == resultC)
pass
| 26.560976 | 86 | 0.582798 | 0 | 0 | 0 | 0 | 1,388 | 0.424855 | 0 | 0 | 698 | 0.213652 |
4f62f46e41b738f931ac8f3b73de56c9e3e4c4fe | 7,474 | py | Python | server/app/api/v2.py | chanzuckerberg/single-cell-explorer | 51402e8befeca61311e6bd7a4127fa24b9f6e7be | [
"MIT"
] | 2 | 2021-08-30T16:32:16.000Z | 2022-03-25T22:36:23.000Z | server/app/api/v2.py | chanzuckerberg/single-cell-explorer | 51402e8befeca61311e6bd7a4127fa24b9f6e7be | [
"MIT"
] | 194 | 2021-08-18T23:52:44.000Z | 2022-03-30T19:40:41.000Z | server/app/api/v2.py | chanzuckerberg/single-cell-explorer | 51402e8befeca61311e6bd7a4127fa24b9f6e7be | [
"MIT"
] | 1 | 2022-01-21T09:20:15.000Z | 2022-01-21T09:20:15.000Z | import logging
from functools import wraps
from flask import (
current_app,
Blueprint,
request,
send_from_directory,
redirect,
)
from flask_restful import Api, Resource
import server.common.rest as common_rest
from server.app.api import ONE_WEEK, cache_control
from server.app.api.util import get_dataset_artifact_s3_uri, get_data_adaptor
from server.common.errors import (
DatasetAccessError,
DatasetNotFoundError,
DatasetMetadataError,
TombstoneError,
)
def rest_get_data_adaptor(func):
@wraps(func)
def wrapped_function(self, dataset=None):
try:
s3_uri = get_dataset_artifact_s3_uri(self.url_dataroot, dataset)
data_adaptor = get_data_adaptor(s3_uri, app_config=current_app.app_config)
# HACK: Used *only* to pass the dataset_explorer_location to DatasetMeta.get_dataset_and_collection_
# metadata()
data_adaptor.dataset_id = dataset
return func(self, data_adaptor)
except (DatasetAccessError, DatasetNotFoundError, DatasetMetadataError) as e:
return common_rest.abort_and_log(
e.status_code, f"Invalid s3_uri {dataset}: {e.message}", loglevel=logging.INFO, include_exc_info=True
)
except TombstoneError as e:
parent_collection_url = (
f"{current_app.app_config.server_config.get_web_base_url()}/collections/{e.collection_id}" # noqa E501
)
return redirect(f"{parent_collection_url}?tombstoned_dataset_id={e.dataset_id}")
return wrapped_function
class DatasetResource(Resource):
"""Base class for all Resources that act on datasets."""
def __init__(self, url_dataroot):
super().__init__()
self.url_dataroot = url_dataroot
class SchemaAPI(DatasetResource):
# TODO @mdunitz separate dataset schema and user schema
@cache_control(public=True, max_age=ONE_WEEK)
@rest_get_data_adaptor
def get(self, data_adaptor):
return common_rest.schema_get(data_adaptor)
# TODO: This is being used by v3 API as well. Is must always be provided the dataset_explorer_location, and never the
# dataset s3_uri. Move to app/app.py?
class DatasetMetadataAPI(DatasetResource):
@cache_control(public=True, no_store=True, max_age=0)
@rest_get_data_adaptor
def get(self, data_adaptor):
return common_rest.dataset_metadata_get(current_app.app_config, self.url_dataroot, data_adaptor.dataset_id)
class ConfigAPI(DatasetResource):
@cache_control(public=True, max_age=ONE_WEEK)
@rest_get_data_adaptor
def get(self, data_adaptor):
return common_rest.config_get(current_app.app_config, data_adaptor)
class AnnotationsObsAPI(DatasetResource):
@cache_control(public=True, max_age=ONE_WEEK)
@rest_get_data_adaptor
def get(self, data_adaptor):
return common_rest.annotations_obs_get(request, data_adaptor)
@cache_control(no_store=True)
@rest_get_data_adaptor
def put(self, data_adaptor):
return common_rest.annotations_obs_put(request, data_adaptor)
class AnnotationsVarAPI(DatasetResource):
@cache_control(public=True, max_age=ONE_WEEK)
@rest_get_data_adaptor
def get(self, data_adaptor):
return common_rest.annotations_var_get(request, data_adaptor)
class DataVarAPI(DatasetResource):
@cache_control(no_store=True)
@rest_get_data_adaptor
def put(self, data_adaptor):
return common_rest.data_var_put(request, data_adaptor)
@cache_control(public=True, max_age=ONE_WEEK)
@rest_get_data_adaptor
def get(self, data_adaptor):
return common_rest.data_var_get(request, data_adaptor)
class ColorsAPI(DatasetResource):
@cache_control(public=True, max_age=ONE_WEEK)
@rest_get_data_adaptor
def get(self, data_adaptor):
return common_rest.colors_get(data_adaptor)
class DiffExpObsAPI(DatasetResource):
@cache_control(no_store=True)
@rest_get_data_adaptor
def post(self, data_adaptor):
return common_rest.diffexp_obs_post(request, data_adaptor)
class LayoutObsAPI(DatasetResource):
@cache_control(public=True, max_age=ONE_WEEK)
@rest_get_data_adaptor
def get(self, data_adaptor):
return common_rest.layout_obs_get(request, data_adaptor)
class GenesetsAPI(DatasetResource):
@cache_control(public=True, max_age=ONE_WEEK)
@rest_get_data_adaptor
def get(self, data_adaptor):
return common_rest.genesets_get(request, data_adaptor)
class SummarizeVarAPI(DatasetResource):
@rest_get_data_adaptor
@cache_control(public=True, max_age=ONE_WEEK)
def get(self, data_adaptor):
return common_rest.summarize_var_get(request, data_adaptor)
@rest_get_data_adaptor
@cache_control(no_store=True)
def post(self, data_adaptor):
return common_rest.summarize_var_post(request, data_adaptor)
def get_api_dataroot_resources(bp_dataroot, url_dataroot=None):
"""Add resources that refer to a dataset"""
api = Api(bp_dataroot)
def add_resource(resource, url):
"""convenience function to make the outer function less verbose"""
api.add_resource(resource, url, resource_class_args=(url_dataroot,))
# Initialization routes
add_resource(SchemaAPI, "/schema")
add_resource(DatasetMetadataAPI, "/dataset-metadata")
add_resource(ConfigAPI, "/config")
# Data routes
add_resource(AnnotationsObsAPI, "/annotations/obs")
add_resource(AnnotationsVarAPI, "/annotations/var")
add_resource(DataVarAPI, "/data/var")
add_resource(GenesetsAPI, "/genesets")
add_resource(SummarizeVarAPI, "/summarize/var")
# Display routes
add_resource(ColorsAPI, "/colors")
# Computation routes
add_resource(DiffExpObsAPI, "/diffexp/obs")
add_resource(LayoutObsAPI, "/layout/obs")
return api
def register_api_v2(app, app_config, server_config, api_url_prefix):
api_version = "/api/v0.2"
if app_config.is_multi_dataset():
# NOTE: These routes only allow the dataset to be in the directory
# of the dataroot, and not a subdirectory. We may want to change
# the route format at some point
for dataroot_dict in server_config.multi_dataset__dataroot.values():
url_dataroot = dataroot_dict["base_url"]
bp_dataroot = Blueprint(
name=f"api_dataset_{url_dataroot}_{api_version}",
import_name=__name__,
url_prefix=(f"{api_url_prefix}/{url_dataroot}/<dataset>" + api_version).replace("//", "/"),
)
dataroot_resources = get_api_dataroot_resources(bp_dataroot, url_dataroot)
app.register_blueprint(dataroot_resources.blueprint)
app.add_url_rule(
f"/{url_dataroot}/<string:dataset>/static/<path:filename>",
f"static_assets_{url_dataroot}",
view_func=lambda dataset, filename: send_from_directory("../common/web/static", filename),
methods=["GET"],
)
else:
bp_api = Blueprint("api", __name__, url_prefix=f"{api_url_prefix}{api_version}")
resources = get_api_dataroot_resources(bp_api)
app.register_blueprint(resources.blueprint)
app.add_url_rule(
"/static/<path:filename>",
"static_assets",
view_func=lambda filename: send_from_directory("../common/web/static", filename),
methods=["GET"],
)
| 36.10628 | 119 | 0.713139 | 3,117 | 0.417046 | 0 | 0 | 3,408 | 0.455981 | 0 | 0 | 1,408 | 0.188386 |
4f63016bb9522a154445cec3ae6be11a7a1c7cfa | 8,649 | py | Python | marathon_acme/tests/test_cli.py | praekeltfoundation/marathon-acme | b1b71e3dde0ba30e575089280658bd32890e3325 | [
"MIT"
] | 8 | 2016-12-15T14:49:17.000Z | 2019-03-11T19:52:22.000Z | marathon_acme/tests/test_cli.py | praekeltfoundation/marathon-acme | b1b71e3dde0ba30e575089280658bd32890e3325 | [
"MIT"
] | 129 | 2016-05-27T11:41:14.000Z | 2018-10-19T15:02:06.000Z | marathon_acme/tests/test_cli.py | praekeltfoundation/marathon-acme | b1b71e3dde0ba30e575089280658bd32890e3325 | [
"MIT"
] | 3 | 2016-12-15T19:47:18.000Z | 2017-04-10T20:41:46.000Z | import os
from fixtures import TempDir
from testtools import ExpectedException, TestCase, run_test_with
from testtools.assertions import assert_that
from testtools.matchers import (
Contains, DirExists, Equals, FileContains, FileExists, MatchesStructure)
from testtools.twistedsupport import (
AsynchronousDeferredRunTest, flush_logged_errors)
from twisted.internet import reactor
from twisted.internet.defer import inlineCallbacks
from twisted.internet.error import CannotListenError, ConnectionRefusedError
from txacme.urls import LETSENCRYPT_STAGING_DIRECTORY
from marathon_acme.cli import init_storage_dir, main, parse_listen_addr
# Make sure we always use the Let's Encrypt Staging endpoint for these tests
def main_t(reactor, **kwargs):
argv = kwargs.get('argv', [])
env = kwargs.get('env', {})
return main(reactor, acme_url=LETSENCRYPT_STAGING_DIRECTORY.asText(),
argv=argv, env=env)
class TestCli(TestCase):
# These are testtools-style tests so we can run aynchronous tests
def test_storage_dir_required(self):
"""
When the program is run with no arguments, it should exit with code 2
because there is one required argument.
"""
with ExpectedException(SystemExit, MatchesStructure(code=Equals(2))):
main_t(reactor, argv=[])
@inlineCallbacks
@run_test_with(AsynchronousDeferredRunTest.make_factory(timeout=10.0))
def test_storage_dir_provided(self):
"""
When the program is run with an argument, it should start up and run.
The program is expected to fail because it is unable to connect to
Marathon.
This test takes a while because we have to let txacme go through it's
initial sync (registration + issuing of 0 certificates) before things
can be halted.
"""
temp_dir = self.useFixture(TempDir())
yield main_t(reactor, argv=[
temp_dir.path,
'--acme', LETSENCRYPT_STAGING_DIRECTORY.asText(),
'--marathon', 'http://localhost:28080' # An address we can't reach
])
# Expect a 'certs' directory to be created
self.assertThat(os.path.isdir(temp_dir.join('certs')), Equals(True))
# Expect an 'unmanaged-certs' directory to be created
self.assertThat(
os.path.isdir(temp_dir.join('unmanaged-certs')), Equals(True))
# Expect a default certificate to be created
self.assertThat(os.path.isfile(temp_dir.join('default.pem')),
Equals(True))
# Expect to be unable to connect to Marathon
flush_logged_errors(ConnectionRefusedError)
@inlineCallbacks
@run_test_with(AsynchronousDeferredRunTest.make_factory(timeout=5.0))
def test_storage_dir_provided_vault(self):
"""
When the program is run with an argument and the --vault option, it
should start up and run. The program is expected to fail because it is
unable to connect to Vault.
Unlike the above test, this crashes immediately and returns because we
never actually start up txacme or marathon-acme if we can't get/store
an ACME client key.
"""
with ExpectedException(ConnectionRefusedError,
r'Connection was refused by other side'):
yield main_t(
reactor,
env={
# An address we can't reach
'VAULT_ADDR': 'http://localhost:28080'
},
argv=[
'secret',
'--vault',
'--acme', LETSENCRYPT_STAGING_DIRECTORY.asText(),
]
)
flush_logged_errors(ConnectionRefusedError)
@inlineCallbacks
@run_test_with(AsynchronousDeferredRunTest.make_factory(timeout=5.0))
def test_cannot_listen(self):
"""
When the program is run with an argument and a listen address specified
with an address that we can't listen on (e.g. 1.1.1.1), a
CannotListenError is expected to be logged and the program should stop.
"""
temp_dir = self.useFixture(TempDir())
yield main_t(reactor, argv=[
temp_dir.path,
'--listen', '1.1.1.1:8080', # An address we can't listen on
])
# Expect a 'certs' directory to be created
self.assertThat(os.path.isdir(temp_dir.join('certs')), Equals(True))
# Expect a default certificate to be created
self.assertThat(os.path.isfile(temp_dir.join('default.pem')),
Equals(True))
# Expect to be unable to listen
flush_logged_errors(CannotListenError)
class TestParseListenAddr(object):
def test_parse_no_colon(self):
"""
When a listen address is parsed with no ':' character, an error is
raised.
"""
with ExpectedException(
ValueError,
r"'foobar' does not have the correct form for a listen address: "
r'\[ipaddress\]:port'):
parse_listen_addr('foobar')
def test_parse_no_ip_address(self):
"""
When a listen address is parsed with no IP address, an endpoint
description with the listen address's port but no interface is
returned.
"""
assert_that(parse_listen_addr(':8080'), Equals('tcp:8080'))
def test_parse_ipv4(self):
"""
When a listen address is parsed with an IPv4 address, an appropriate
interface is present in the returned endpoint description.
"""
assert_that(parse_listen_addr('127.0.0.1:8080'),
Equals('tcp:8080:interface=127.0.0.1'))
def test_parse_ipv6(self):
"""
When a listen address is parsed with an IPv4 address, an appropriate
interface is present in the returned endpoint description.
"""
assert_that(parse_listen_addr('[::]:8080'),
Equals('tcp6:8080:interface=\\:\\:'))
def test_parse_invalid_ipaddress(self):
"""
When a listen address is parsed with an invalid IP address, an error
is raised.
"""
with ExpectedException(
ValueError,
r"u?'hello' does not appear to be an IPv4 or IPv6 address"):
parse_listen_addr('hello:8080')
def test_parse_invalid_port(self):
"""
When a listen address is parsed with an invalid port, an error is
raised.
"""
with ExpectedException(
ValueError,
r"'foo' does not appear to be a valid port number"):
parse_listen_addr(':foo')
with ExpectedException(
ValueError,
r"'0' does not appear to be a valid port number"):
parse_listen_addr(':0')
with ExpectedException(
ValueError,
r"'65536' does not appear to be a valid port number"):
parse_listen_addr(':65536')
with ExpectedException(
ValueError,
r"'' does not appear to be a valid port number"):
parse_listen_addr(':')
class TestInitStorageDir(object):
def test_files_created_if_not_exist(self, tmpdir):
"""
When the certificate directory does not contain a 'default.pem' file
and a 'certs' directory, calling init_storage_dir() should create a
'default.pem' file with x509 certificate data and create a 'certs'
directory.
"""
init_storage_dir(str(tmpdir))
assert_that(str(tmpdir.join('default.pem')), FileExists())
# Check that this *looks* like a x509 cert
assert_that(str(tmpdir.join('default.pem')),
FileContains(matcher=Contains(
'-----BEGIN RSA PRIVATE KEY-----')))
assert_that(str(tmpdir.join('certs')), DirExists())
def test_files_not_created_if_exist(self, tmpdir):
"""
When the certificate directory does contain a 'default.pem' file
and a 'certs' directory, calling init_storage_dir() should not attempt
to create those files.
"""
tmpdir.join('default.pem').write('blah')
tmpdir.join('certs').mkdir()
tmpdir.join('unmanaged-certs').mkdir()
init_storage_dir(str(tmpdir))
assert_that(str(tmpdir.join('default.pem')), FileExists())
# Check that the file hasn't changed
assert_that(str(tmpdir.join('default.pem')), FileContains('blah'))
assert_that(str(tmpdir.join('certs')), DirExists())
| 36.961538 | 79 | 0.623772 | 7,706 | 0.89097 | 3,095 | 0.357845 | 3,381 | 0.390912 | 0 | 0 | 3,888 | 0.449532 |
4f639ad546becd053f95f292acd5d75f4f3f1148 | 1,270 | py | Python | Lexical_Semantics/Repositories/Word2Vec_and_Fasttext/word2vec-and-fasttext.py | MWTA/Text-Mining | d64250ed9f7d8f999bb925ec01c041062b1f4145 | [
"MIT"
] | null | null | null | Lexical_Semantics/Repositories/Word2Vec_and_Fasttext/word2vec-and-fasttext.py | MWTA/Text-Mining | d64250ed9f7d8f999bb925ec01c041062b1f4145 | [
"MIT"
] | null | null | null | Lexical_Semantics/Repositories/Word2Vec_and_Fasttext/word2vec-and-fasttext.py | MWTA/Text-Mining | d64250ed9f7d8f999bb925ec01c041062b1f4145 | [
"MIT"
] | null | null | null | import numpy as np
import os
import re
import urllib.request
import zipfile
import lxml.etree
from gensim.models import FastText
from random import shuffle
#download the data
urllib.request.urlretrieve(
"https://wit3.fbk.eu/get.php?path=XML_releases/xml/ted_en-20160408.zip&filename=ted_en-20160408.zip", filename="ted_en-20160408.zip")
# extract subtitle
with zipfile.ZipFile('ted_en-20160408.zip', 'r') as z:
doc = lxml.etree.parse(z.open('ted_en-20160408.xml', 'r'))
input_text = '\n'.join(doc.xpath('//content/text()'))
# remove parenthesis
input_text_noparens = re.sub(r'\([^)]*\)', '', input_text)
# store as list of sentences
sentences_strings_ted = []
for line in input_text_noparens.split('\n'):
m = re.match(r'^(?:(?P<precolon>[^:]{,20}):)?(?P<postcolon>.*)$', line)
sentences_strings_ted.extend(sent for sent in m.groupdict()[
'postcolon'].split('.') if sent)
# store as list of lists of words
sentences_ted = []
for sent_str in sentences_strings_ted:
tokens = re.sub(r"[^a-z0-9]+", " ", sent_str.lower()).split()
sentences_ted.append(tokens)
model_ted = FastText(sentences_ted, size=100, window=5,
min_count=5, workers=4, sg=1)
model_ted.wv.most_similar("Gastroenteritis")
| 32.564103 | 137 | 0.685827 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 424 | 0.333858 |
4f63a170f475f21430256af0098ee8caf3ec6b78 | 2,453 | py | Python | Source Code/Python API/multivent_client.py | D-TACQ/acq400_lv | 684e06a294ceb865511a7568e5038b209bdc3374 | [
"MIT"
] | null | null | null | Source Code/Python API/multivent_client.py | D-TACQ/acq400_lv | 684e06a294ceb865511a7568e5038b209bdc3374 | [
"MIT"
] | 2 | 2018-04-23T16:37:19.000Z | 2018-07-11T10:51:19.000Z | Source Code/Python API/multivent_client.py | D-TACQ/acq400_lv | 684e06a294ceb865511a7568e5038b209bdc3374 | [
"MIT"
] | 3 | 2018-04-20T11:53:29.000Z | 2018-04-25T15:25:55.000Z | #!/usr/local/bin/python
# UUT is running continuous pre/post snapshots
# subscribe to the snapshots and save all the data.
import threading
import epics
import argparse
import time
import datetime
import os
NCHAN = 16
# WF record, raw binary (shorts)
WFNAME = ":1:AI:WF:{:02d}"
# alt WF record, VOLTS. Kindof harder to store this in a portable way..
#WFNAME = ":1:AI:WF:{:02d}:V.VALA""
#1:AI:WF:08:V.VALA
class Uut:
root = "DATA"
def make_file_name(self, upcount):
timecode = datetime.datetime.now().strftime("%Y/%m/%d/%H/%M/")
return self.root+"/"+timecode +"{:06d}".format(upcount)
def store_format(self, path):
# created a kst / dirfile compatible format file
fp = open(path+"/format", "w")
fp.write ("# format file {}\n".format(path))
# TODO enter start sample from event sample count
fp.write ("START_SAMPLE CONST UINT32 0\n")
fp.writelines(["CH{:02d} RAW s 1\n".format(ch) for ch in range(1,NCHAN+1)])
fp.close()
def on_update(self, **kws):
self.upcount = self.upcount + 1
fn = self.make_file_name(self.upcount)
print(fn)
if not os.path.isdir(fn):
os.makedirs(fn)
for ch in range(1, NCHAN+1):
yy = self.channels[ch-1].get()
yy.astype('int16').tofile(fn+"/CH{:02d}".format(ch))
self.store_format(fn)
print("{} {}".format(kws['pvname'], kws['value']))
print(self.channels[1])
def monitor(self):
self.channels = [epics.PV(self.name+WFNAME.format(ch)) for ch in range(1, NCHAN+1)]
updates = epics.PV(self.name + ":1:AI:WF:01:UPDATES", auto_monitor=True, callback=self.on_update)
def __init__(self, _name):
self.name = _name
self.upcount = 0
threading.Thread(target=self.monitor).start()
def multivent(parser):
uuts = [Uut(_name) for _name in parser.uuts]
for u in uuts:
u.root = parser.root
while True:
time.sleep(0.5)
def run_main():
parser = argparse.ArgumentParser(description='acq400 multivent')
parser.add_argument('--root', type=str, default="DATA", help="output root path")
parser.add_argument('uuts', nargs='+', help="uut names")
multivent(parser.parse_args())
# execution starts here
if __name__ == '__main__':
run_main()
| 31.448718 | 105 | 0.593151 | 1,526 | 0.622095 | 0 | 0 | 0 | 0 | 0 | 0 | 671 | 0.273543 |
4f646c6d02363e90aa2470f4984748ec24fa0ff2 | 749 | py | Python | converter.py | almehady/Convert-Bijoy-to-Unicode-File | 0d3c5032c157d52c8153fbcc760e9644a5df8b5d | [
"MIT"
] | 1 | 2021-07-12T07:47:20.000Z | 2021-07-12T07:47:20.000Z | converter.py | almehady/Convert-Bijoy-to-Unicode-File | 0d3c5032c157d52c8153fbcc760e9644a5df8b5d | [
"MIT"
] | null | null | null | converter.py | almehady/Convert-Bijoy-to-Unicode-File | 0d3c5032c157d52c8153fbcc760e9644a5df8b5d | [
"MIT"
] | null | null | null | from bijoy2unicode import converter
from langdetect import detect
import unicodedata
def isLineEmpty(line):
return len(line.strip()) == 0
read_file = open("bijoy.txt", "r")
write_file = open('unicode.txt', 'w')
# read_file = ["16. Òm~h©`xNj "]
for line in read_file:
if not isLineEmpty(line):
nData = unicodedata.normalize('NFKD', line).encode('UTF-8', 'ignore')
new_line = nData.decode()
eng_text_checker = detect(new_line)
if eng_text_checker != 'en':
test = converter.Unicode()
bijoy_text = new_line
toPrint=test.convertBijoyToUnicode(bijoy_text)
else:
toPrint = new_line
write_file.write(toPrint)
read_file.close()
write_file.close()
| 25.827586 | 77 | 0.643525 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 89 | 0.118509 |
4f64e638900b111178c25bd88e02cdd0ab944ded | 5,904 | py | Python | django_client_framework/models/abstract/serializable.py | kaleido-public/django-client-framework | cd755261e001a0d446a85407550648563511f61b | [
"MIT"
] | null | null | null | django_client_framework/models/abstract/serializable.py | kaleido-public/django-client-framework | cd755261e001a0d446a85407550648563511f61b | [
"MIT"
] | 3 | 2021-06-28T20:36:39.000Z | 2021-11-11T02:12:35.000Z | django_client_framework/models/abstract/serializable.py | kaleido-public/django-client-framework | cd755261e001a0d446a85407550648563511f61b | [
"MIT"
] | null | null | null | from __future__ import annotations
from abc import abstractmethod
from logging import getLogger
from typing import (
TYPE_CHECKING,
Any,
Dict,
Generic,
List,
Optional,
Type,
TypeVar,
cast,
)
from django.conf import settings
from django.core.cache import cache
from django.db import models as m
from .model import DCFModel, IDCFModel, __implements__
LOG = getLogger(__name__)
if TYPE_CHECKING:
from ...serializers.serializer import DCFSerializer
D = TypeVar("D")
T = TypeVar("T", bound="ISerializable")
class ISerializable(IDCFModel[DCFModel], Generic[T, D]):
def to_serializable(self) -> Serializable[T, D]:
return cast(Serializable[T, D], self)
@classmethod
@abstractmethod
def get_serializer_class(
cls, *, version: str, context: Dict[str, Any]
) -> Type[DCFSerializer[T, D]]:
...
@abstractmethod
def get_serializer(
self, *, version: str, context: Dict[str, Any], **kwargs: Any
) -> DCFSerializer[T, D]:
...
@abstractmethod
def json(
self,
*,
version: str,
context: Dict[str, Any] = {},
serializer: Optional[DCFSerializer[T, D]] = None,
ignore_cache: bool = False,
) -> D:
...
@abstractmethod
def get_json(
self,
*,
version: str,
context: Dict[str, Any] = {},
serializer: Optional[DCFSerializer[T, D]] = None,
) -> D:
...
class Serializable(__implements__, ISerializable[T, D]):
@classmethod
def get_serializer_class(
cls, *, version: str, context: Dict[str, Any]
) -> Type[DCFSerializer[T, D]]:
raise NotImplementedError(
f"{cls} must implement .get_serializer_class(version, context)"
)
def get_serializer(
self, *, version: str, context: Dict[str, Any], **kwargs: Any
) -> DCFSerializer[T, D]:
return self.get_serializer_class(version=version, context=context)(
instance=self, **kwargs
)
def json(
self,
*,
version: str,
context: Dict[str, Any] = {},
serializer: Optional[DCFSerializer[T, D]] = None,
ignore_cache: bool = False,
) -> D:
if ignore_cache or self.get_cache_timeout() == 0:
return self.get_json(
version=version,
context=context,
serializer=serializer,
)
return self.cached_json(
version=version,
context=context,
serializer=serializer,
)
def get_json(
self: T,
*,
version: str,
context: Dict[str, Any] = {},
serializer: Optional[DCFSerializer[T, D]] = None,
) -> D:
if serializer is None:
serializer = self.get_serializer(version=version, context=context)
return serializer.to_representation(instance=self)
def get_extra_content_to_hash(self) -> List[Any]:
return []
def values(self) -> Optional[T]:
self._meta: Any
return self._meta.model.objects.filter(pk=self.id).values().first()
def __repr__(self) -> str:
if settings.DEBUG:
return f"<<{self.__class__.__name__}:{self.values()}>>"
else:
return f"<{self.__class__.__name__}:{self.id}>"
def __str__(self) -> str:
return f"<{self.__class__.__name__}:{self.id}>"
def get_cache_timeout(self) -> int:
"""Return how long to cache the serialization in seconds"""
return 0
def cached_json(
self,
*,
version: str,
context: Dict[str, Any] = {},
serializer: Optional[DCFSerializer[T, D]] = None,
) -> Any:
timeout = self.get_cache_timeout()
if timeout == 0:
return self.get_json(
version=version,
context=context,
serializer=serializer,
)
if result := cache.get(
self.get_cache_key_for_serialization(version, context), None
):
return result
else:
data = self.get_json(
version=version,
context=context,
serializer=serializer,
)
cache.add(
self.get_cache_key_for_serialization(version, context),
data,
timeout=timeout,
)
return data
def get_cache_key_for_serialization(
self, version: str, context: Dict[str, Any]
) -> str:
# whenver one of the hashed content is changed, the cache misses, and a
# re-serialization is forced.
return "serialization_cache_" + str(
hash(
[self._meta.model_name, self.id, version, context]
+ self.get_extra_content_to_hash()
)
)
def check_integrity() -> None:
from ...serializers import DelegateSerializer, Serializer
for model in Serializable.__subclasses__():
if model.__module__ == "__fake__":
break
if Serializable not in model.__bases__:
break
if m.Model not in model.__bases__:
break
i = model.__bases__.index(Serializable)
j = model.__bases__.index(m.Model)
if i > j:
raise AssertionError(
f"{model} must extend {Serializable} before {m.Model}, current order: {model.__bases__}"
)
for model in Serializable.__subclasses__():
sercls: Type[Serializer] = model.get_serializer_class(
version="default", context={}
)
if not (
issubclass(sercls, Serializer) or issubclass(sercls, DelegateSerializer)
):
raise NotImplementedError(
f"{model}.get_serializer_class() does not return a Serialzer class "
)
| 27.981043 | 104 | 0.568936 | 4,340 | 0.735095 | 0 | 0 | 1,000 | 0.169377 | 0 | 0 | 568 | 0.096206 |
4f662368d20be96e521c2f61005ae8b7c981fe81 | 1,592 | py | Python | tests/test_util.py | KikeM/watchpoints | d101035efef45bab7670e1a05a141c74c34c1f9e | [
"Apache-2.0"
] | null | null | null | tests/test_util.py | KikeM/watchpoints | d101035efef45bab7670e1a05a141c74c34c1f9e | [
"Apache-2.0"
] | null | null | null | tests/test_util.py | KikeM/watchpoints | d101035efef45bab7670e1a05a141c74c34c1f9e | [
"Apache-2.0"
] | null | null | null | # Licensed under the Apache License: http://www.apache.org/licenses/LICENSE-2.0
# For details: https://github.com/gaogaotiantian/watchpoints/blob/master/NOTICE.txt
import unittest
import inspect
from watchpoints.util import getline, getargnodes
class TestUtil(unittest.TestCase):
def test_getline(self):
def watch(*args):
frame = inspect.currentframe().f_back
return getline(frame)
a = []
b = {}
line = watch(a)
self.assertEqual(line, "line = watch ( a )")
line = watch(
a,
b
)
self.assertEqual(line, "line = watch ( a , b )")
def test_getargnodes(self):
def watch(*args):
frame = inspect.currentframe().f_back
return list(getargnodes(frame))
a = [0, 1]
b = {}
argnodes = watch(a)
self.assertEqual(len(argnodes), 1)
self.assertEqual(argnodes[0][1], "a")
argnodes = watch(
a,
b
)
self.assertEqual(len(argnodes), 2)
self.assertEqual(argnodes[0][1], "a")
self.assertEqual(argnodes[1][1], "b")
argnodes = watch(
a[0], # comments
b
)
self.assertEqual(len(argnodes), 2)
self.assertEqual(argnodes[0][1], "a[0]")
self.assertEqual(argnodes[1][1], "b")
with self.assertRaises(Exception):
argnodes = [i for i in watch(a)]
| 30.037736 | 83 | 0.502513 | 1,342 | 0.842965 | 0 | 0 | 0 | 0 | 0 | 0 | 234 | 0.146985 |
4f6763749fb354d4c4d0e7e61af9289c8cd231d4 | 1,762 | py | Python | extraction/vahadane_stain_extractor.py | sebastianffx/stainlib | 6d79f165fc69e7599d14310c6f4d26e3d0d01543 | [
"MIT"
] | 3 | 2021-09-15T00:58:11.000Z | 2022-03-09T21:19:41.000Z | extraction/vahadane_stain_extractor.py | sebastianffx/stainlib | 6d79f165fc69e7599d14310c6f4d26e3d0d01543 | [
"MIT"
] | 1 | 2021-11-30T16:52:38.000Z | 2021-11-30T16:52:38.000Z | extraction/vahadane_stain_extractor.py | sebastianffx/stainlib | 6d79f165fc69e7599d14310c6f4d26e3d0d01543 | [
"MIT"
] | 2 | 2020-12-29T10:00:52.000Z | 2021-11-30T16:53:19.000Z | """
Stain normalization inspired by method of:
A. Vahadane et al., ‘Structure-Preserving Color Normalization and Sparse Stain Separation for Histological Images’, IEEE Transactions on Medical Imaging, vol. 35, no. 8, pp. 1962–1971, Aug. 2016.
Uses the spams package:
http://spams-devel.gforge.inria.fr/index.html
Use with python via e.g https://anaconda.org/conda-forge/python-spams
"""
import spams
from stainlib.utils.stain_utils import ABCStainExtractor, is_uint8_image
from stainlib.utils.stain_utils import normalize_matrix_rows, convert_RGB_to_OD, LuminosityThresholdTissueLocator
class VahadaneStainExtractor(ABCStainExtractor):
@staticmethod
def get_stain_matrix(I, luminosity_threshold=0.8, regularizer=0.1):
"""
Stain matrix estimation via method of:
A. Vahadane et al. 'Structure-Preserving Color Normalization and Sparse Stain Separation for Histological Images'
:param I: Image RGB uint8.
:param luminosity_threshold:
:param regularizer:
:return:
"""
assert is_uint8_image(I), "Image should be RGB uint8."
# convert to OD and ignore background
tissue_mask = LuminosityThresholdTissueLocator.get_tissue_mask(I, luminosity_threshold=luminosity_threshold).reshape((-1,))
OD = convert_RGB_to_OD(I).reshape((-1, 3))
OD = OD[tissue_mask]
# do the dictionary learning
dictionary = spams.trainDL(X=OD.T, K=2, lambda1=regularizer, mode=2,
modeD=0, posAlpha=True, posD=True, verbose=False).T
# order H and E.
# H on first row.
if dictionary[0, 0] < dictionary[1, 0]:
dictionary = dictionary[[1, 0], :]
return normalize_matrix_rows(dictionary)
| 40.045455 | 195 | 0.694665 | 1,169 | 0.661199 | 0 | 0 | 1,115 | 0.630656 | 0 | 0 | 823 | 0.465498 |
4f69a0fe6181c30f0c8d3af0a05441d93724aee1 | 42 | py | Python | GithubApiDemo/__init__.py | eric-nord/githubApiDemo | ca9f05d60f099d6a35765f31cfdd1c6ad4618fde | [
"MIT"
] | null | null | null | GithubApiDemo/__init__.py | eric-nord/githubApiDemo | ca9f05d60f099d6a35765f31cfdd1c6ad4618fde | [
"MIT"
] | null | null | null | GithubApiDemo/__init__.py | eric-nord/githubApiDemo | ca9f05d60f099d6a35765f31cfdd1c6ad4618fde | [
"MIT"
] | null | null | null | """ Explicit is better than implicit """
| 14 | 40 | 0.666667 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 40 | 0.952381 |
4f69d7b4a84ab28bb119ffd37322852c103af40b | 12,365 | py | Python | src/optim.py | CSI-ADS/OptimControl | 40c551348058335a27d022bb1938362b3ab68158 | [
"Apache-2.0"
] | 1 | 2022-02-28T16:22:26.000Z | 2022-02-28T16:22:26.000Z | src/optim.py | CSI-ADS/OptimControl | 40c551348058335a27d022bb1938362b3ab68158 | [
"Apache-2.0"
] | null | null | null | src/optim.py | CSI-ADS/OptimControl | 40c551348058335a27d022bb1938362b3ab68158 | [
"Apache-2.0"
] | null | null | null | from collections import defaultdict
import torch
import numpy as np
from tqdm import tqdm, trange
from .utils import *
from .loss import compute_control_from_loss
import time
def get_cl_init(N, loc=-7, scale=1e-4, vals=None, device=None):
if vals is not None:
assert N == len(vals)
cl_normal = torch.tensor(vals)
else:
cl_normal = torch.normal(mean=loc, std=scale, size=(N,))
#cl[0] = 0
cl = cl_normal.clone().detach().requires_grad_(True)
cl = cl.to(device)
#cl = torch.tensor(cl_normal.clone().detach(), requires_grad=True, device=device, dtype=dtype)
return cl
def compute_total_shares(cl, g, source_mask=None): # cl is |S|, while ol is N
cl_soft = torch.sigmoid(cl) # !
shares_in_network = g.total_shares_in_network
# root nodes are taken to be fully controllable
shares_in_network[g.identify_uncontrollable()] = 1.0
if source_mask is not None:
assert sum(source_mask) == cl.shape[0], "mask not matching parameters"
shares_in_network = shares_in_network[source_mask] # mask size S
# core
ol = shares_in_network*cl_soft # ol is 0 for no external ownership!
assert torch.min(ol) >= 0 and torch.max(ol) <= 1, "strange: {} -- {}".format(torch.min(ol), torch.max(ol))
# size S to N
ol = pad_from_mask(ol, source_mask, ttype='torch')
assert ol.shape[0] == g.number_of_nodes, "should be N size"
return ol
def compute_value(fn, cl, g, *args, **kwargs):
ol = compute_total_shares(cl, g, source_mask=kwargs.get("source_mask"))
return fn(ol, g, *args, **kwargs)
def compute_value_and_grad(fn, cl, g, *args, **kwargs):
value = compute_value(fn, cl, g, *args, **kwargs)
value.backward()
grads = cl.grad
return value, grads
def get_params(optimizer):
for group_param in optimizer.param_groups:
for param in group_param["params"]:
return param # just one
def update(loss_fn, optimizer, params, *args, **kwargs):
optimizer.zero_grad()
params = get_params(optimizer)
cost, grads = compute_value_and_grad(loss_fn, params, *args, **kwargs)
optimizer.step()
return params, cost
def optimize_control(
loss_fn, cl, g,
lambd=0,
verbose=False, return_hist=False,
lr=0.1, scheduler=None, num_steps=10000,
device=None, save_params_every=100, save_loss_arr=False,
save_separate_losses=False,
loss_1_name="loss_control", loss_2_name="loss_cost",
loss_tol=1e-8,
es_wait=1,
**kwargs):
params = cl
g.to(device)
#print("Optimize for lambd={} on device={} and dtype={}".format(lambd, device, params.dtype))
optimizer = torch.optim.Adam([{"params" : params}], lr=lr)
if scheduler is not None:
scheduler = scheduler(optimizer)
hist = defaultdict(list)
target_mask = kwargs.get("target_mask", None)
source_mask = kwargs.get("source_mask", None)
i_last = 0
loss_prev = None
pbar = tqdm(range(num_steps), disable=not verbose)
n_wait = 0
start = time.time()
for i in pbar:
i_last = i
params, loss = update(loss_fn, optimizer, params, g, lambd=lambd, **kwargs)
#print("params = ", params)
#print("loss = ", loss)
with torch.no_grad():
hist["loss"].append(loss.detach().cpu().numpy())
hist["i_iter"].append(i)
if (i % save_params_every == 0) or (i == num_steps - 1):
hist["saved_at"].append(i)
hist["params"].append(params.detach().cpu().numpy())
hist["params_sm"].append(torch.sigmoid(params).detach().cpu().numpy())
hist["ol"].append(compute_total_shares(cl, g, source_mask=source_mask).detach().cpu().numpy())
if save_loss_arr:
losses = compute_value(loss_fn, params, g, lambd=lambd, as_separate=True, as_array=True, **kwargs)
hist["{}_arr".format(loss_1_name)].append(losses[0].detach().cpu().numpy())
hist["{}_arr".format(loss_2_name)].append(losses[1].detach().cpu().numpy())
if save_separate_losses:
losses = compute_value(loss_fn, params, g, lambd=lambd, as_separate=True, as_array=False, **kwargs)
hist[loss_1_name].append(losses[0].detach().cpu().numpy())
hist[loss_2_name].append(losses[1].detach().cpu().numpy())
if scheduler is not None:
scheduler.step()
hist["lr"].append(scheduler.get_lr()[0])
else:
hist["lr"] = lr
with torch.no_grad():
if (loss_prev is not None) and (torch.abs(loss - loss_prev) < loss_tol):
n_wait += 1
if n_wait >= es_wait:
if verbose:
print("breaking optimization at step {} with loss: {}".format(i, loss))
break
else:
if verbose:
print("waiting, but no improvement: {}".format(n_wait))
else:
n_wait = 0 # reset, must be subsequent
loss_prev = loss
pbar.set_postfix({'loss': loss.detach().cpu().numpy()})
end = time.time()
with torch.no_grad():
print("computing hist info")
hist["time"] = end - start
hist["final_iter"] = i_last
hist["final_params"] = params.detach().cpu().numpy()
hist["final_params_sm"] = torch.sigmoid(params).detach().cpu().numpy()
hist["final_ol"] = compute_total_shares(cl, g, source_mask=source_mask).detach().cpu().numpy()
hist["total_shares_in_network"] = g.total_shares_in_network.detach().cpu().numpy()
losses = compute_value(loss_fn, params, g, lambd=lambd, as_separate=True, as_array=True, **kwargs)
hist["final_{}_arr".format(loss_1_name)] = losses[0].detach().cpu().numpy()
hist["final_{}_arr".format(loss_2_name)] = losses[1].detach().cpu().numpy()
losses = compute_value(loss_fn, params, g, lambd=lambd, as_separate=True, as_array=False, **kwargs)
hist["final_{}".format(loss_1_name)]= losses[0].detach().cpu().numpy()
hist["final_{}".format(loss_2_name)] = losses[1].detach().cpu().numpy()
hist["final_control"] = compute_control_from_loss(losses[0].detach().cpu().numpy(), g, target_mask)
hist["final_control_shares"] = compute_control_from_loss(losses[0].detach().cpu().numpy(), g, target_mask, normalize='shares')
hist["final_control_nodes"] = compute_control_from_loss(losses[0].detach().cpu().numpy(), g, target_mask, normalize='nodes')
ret = (params, loss)
if return_hist:
ret += (dict(hist), )
return ret
def constraint_optimize_control(
loss_fns, cl, g, budget,
verbose=False, return_hist=False,
lr=0.1, scheduler=None,
max_iter=100, num_steps=10000,
device=None, save_params_every=100, save_loss_arr=False,
constr_tol = 1e-8,
loss_tol = 1e-8,
loss_1_name="loss_control", loss_2_name="loss_cost",
es_wait=1,
**kwargs
):
params = cl
rho, alpha, constr, constr_new = 1.0, 0.0, float("Inf"), float("Inf")
flag_max_iter = True
hist = defaultdict(list)
step_nr = -1
for i in range(max_iter):
print("Starting with iter i={}".format(i))
while rho < 1e+20:
# optimize the actual loss
step_nr += 1
def augm_loss(*loss_args, as_separate=False, **loss_kwargs):
l, c = loss_fns(*loss_args, as_separate=True, **loss_kwargs)
if loss_kwargs.get("as_array", False):
sm, tm = loss_kwargs.get("source_mask", None), loss_kwargs.get("target_mask", None)
if sm is not None:
l = pad_from_mask(l, tm, ttype='torch')
if tm is not None:
c = pad_from_mask(c, sm, ttype='torch')
#h_constr = torch.abs(c - budget)
h_constr = c - budget # can only be smaller than or equal to zero
# if c > budget, which is not allowed: h_constr is positive, and only then contribute to loss
h_constr = torch.clamp(h_constr/budget, min=0)
h_constr = torch.abs(h_constr) # just a check
augm_lagr = l + 0.5 * rho * h_constr**2 + alpha * h_constr # augmented lagrangian
# print("terms:", l.detach().cpu().numpy(), (0.5*rho*c**2).detach().cpu().numpy(), (alpha*c).detach().cpu().numpy())
# print("c = ", c)
#print("l, h:", l, h_constr)
if as_separate:
return augm_lagr, h_constr#*0
else:
return augm_lagr
params, augm_new, hist_new = optimize_control(augm_loss, params, g,
lambd=1,
verbose=verbose, return_hist=True,
lr=lr if step_nr == 0 else lr/10, scheduler=scheduler, num_steps=num_steps,
device=device, save_params_every=save_params_every, save_loss_arr=save_loss_arr,
loss_1_name="loss_augm", loss_2_name="loss_costr",
loss_tol=loss_tol,
es_wait=es_wait,
**kwargs
)
loss_new = hist_new["final_loss_augm"]
constr_new = hist_new["final_loss_costr"]
with torch.no_grad():
# print(kwargs)
losses_orig_new = compute_value(loss_fns, params, g, lambd=1, as_separate=True, as_array=False, **kwargs)
hist[loss_1_name].append(losses_orig_new[0].detach().cpu().numpy())
hist[loss_2_name].append(losses_orig_new[1].detach().cpu().numpy())
print("current required value (budget={}): {}".format(budget, losses_orig_new[1]))
hist["loss_augm"].append(loss_new)
hist["i_contr_iter"].append(i)
hist["step_nr"].append(step_nr)
hist["rho"].append(rho)
hist["alpha"].append(alpha)
hist["constr"].append(constr_new)
hist["tot_cost"].append(constr_new+budget)
hist["final_iter"].append(hist_new["final_iter"])
hist["total_shares_in_network"] = hist_new["total_shares_in_network"]
hist["hist_optim"].append(hist_new)
# print("iter:",hist["final_iter"][-1], " augm:", loss_new, "constr_new:", constr_new, "constr:", constr, "tot_cost:", hist["tot_cost"][-1])
# print("loss_control:", hist["loss_control"][-1], "loss_cost:", hist["loss_cost"][-1])
if np.abs(constr_new) > 0.25 * np.abs(constr):
rho *= 10
print("Increasing rho to: 10**", np.log10(rho))
else:
print("Break: ", constr_new, constr)
break
constr = constr_new
alpha += np.abs(rho * constr) # just to be sure, should go up!
if np.abs(constr) <= constr_tol:
flag_max_iter = False
break
with torch.no_grad():
print("computing final hist info")
hist["final_iter"] = step_nr
hist["final_params"] = params.detach().cpu().numpy()
hist["final_params_sm"] = torch.sigmoid(params).detach().cpu().numpy()
hist["final_ol"] = compute_total_shares(params, g, source_mask=kwargs.get("source_mask", None)).detach().cpu().numpy()
losses = compute_value(loss_fns, params, g, lambd=1, as_separate=True, as_array=True, **kwargs)
hist["final_{}_arr".format(loss_1_name)]= losses[0].detach().cpu().numpy()
hist["final_{}_arr".format(loss_2_name)] = losses[1].detach().cpu().numpy()
losses = compute_value(loss_fns, params, g, lambd=1, as_separate=True, as_array=False, **kwargs)
hist["final_{}".format(loss_1_name)]= losses[0].detach().cpu().numpy()
hist["final_{}".format(loss_2_name)] = losses[1].detach().cpu().numpy()
hist["final_control"] = hist_new["final_control"]
hist["final_control_shares"] = hist_new["final_control_shares"]
hist["final_control_nodes"] = hist_new["final_control_nodes"]
ret = (params, losses, constr)
if return_hist:
ret += (dict(hist), )
return ret
return params_est, constr, hist
| 44.638989 | 152 | 0.591751 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 2,285 | 0.184796 |
4f69fd9cc8113e6ef8df3f55cd4920361ce2a21d | 1,578 | py | Python | herramientas/zapador/zapador/zapadorapp.py | ZR-TECDI/Framework_ZR | 0fa994d4b3cbf19affab84695705d022adae872c | [
"MIT"
] | 4 | 2019-09-07T19:31:27.000Z | 2020-06-13T21:41:45.000Z | herramientas/zapador/zapador/zapadorapp.py | ZR-TECDI/Framework_ZR | 0fa994d4b3cbf19affab84695705d022adae872c | [
"MIT"
] | 21 | 2019-09-09T00:55:40.000Z | 2021-08-09T20:50:56.000Z | herramientas/zapador/zapador/zapadorapp.py | ZR-TECDI/Framework_ZR | 0fa994d4b3cbf19affab84695705d022adae872c | [
"MIT"
] | null | null | null | from kivy.app import App
from kivy.lang import Builder
from kivy.uix.gridlayout import GridLayout
from kivy.uix.screenmanager import ScreenManager, Screen, FadeTransition
import zapador.constantes as cons
from zapador.metodos import pre_run
from zapador.clases import *
from kivy.factory import Factory
ubicacion = cons.DIR_SCRIPT + '/zapador/kv/'
with open('{}main.kv'.format(ubicacion), encoding='UTF-8') as f:
Builder.load_string(f.read())
with open('{}clases.kv'.format(ubicacion), encoding='UTF-8') as f:
Builder.load_string(f.read())
with open('{}contenido.kv'.format(ubicacion), encoding='UTF-8') as f:
Builder.load_string(f.read())
class Pantalla_Nueva(Screen):
pass
class Pantalla_Importar(Screen):
popup = Factory.CargarMision()
importar = None
def on_pre_enter(self):
self.importar = self.children[0].children[0]
self.popup.papi = self.importar
self.popup.open()
class Pantalla_Opciones(Screen):
pass
sm = ScreenManager(transition=FadeTransition())
sm.add_widget(Pantalla_Nueva(name='pantalla_nueva'))
sm.add_widget(Pantalla_Importar(name='pantalla_importar'))
sm.add_widget(Pantalla_Opciones(name='pantalla_opciones'))
def cambiar_pantalla(pantalla):
sm.current = pantalla
class ZapadorApp(App):
"""Entry point para Zapador app"""
def on_start(self):
pre_run()
def on_stop(self):
Descargando.stop.set()
def build(self):
self.title = 'Zapador v'+cons.VERSION
self.icon = 'zapador/assets/img/zapador.ico'
return sm
| 27.684211 | 72 | 0.707858 | 611 | 0.387199 | 0 | 0 | 0 | 0 | 0 | 0 | 206 | 0.130545 |
4f6a761a21c5baeb65fb9380c05c0978f29b2265 | 327 | py | Python | src/search_items/models.py | janakparajuli/Survey_Office | 1d5eb673eef67f923bf4c2b24156bea76f5fc32d | [
"Apache-2.0"
] | null | null | null | src/search_items/models.py | janakparajuli/Survey_Office | 1d5eb673eef67f923bf4c2b24156bea76f5fc32d | [
"Apache-2.0"
] | null | null | null | src/search_items/models.py | janakparajuli/Survey_Office | 1d5eb673eef67f923bf4c2b24156bea76f5fc32d | [
"Apache-2.0"
] | null | null | null | from __future__ import unicode_literals
from django.conf import settings
from django.db import models
# Create your models here.
class Search(models.Model):
name = models.CharField(max_length=120)
link = models.CharField(max_length=120)
def __unicode__(self):
return self.name
def __str__(self):
return self.name
| 19.235294 | 40 | 0.776758 | 194 | 0.593272 | 0 | 0 | 0 | 0 | 0 | 0 | 26 | 0.079511 |
4f6aa31c1d76802df396a9fb26baa8334a0f406b | 656 | py | Python | test/unit/agent/common/util/math.py | dp92987/nginx-amplify-agent | 1b2eed6eab52a82f35974928d75044451b4bedaf | [
"BSD-2-Clause"
] | 308 | 2015-11-17T13:15:33.000Z | 2022-03-24T12:03:40.000Z | test/unit/agent/common/util/math.py | dp92987/nginx-amplify-agent | 1b2eed6eab52a82f35974928d75044451b4bedaf | [
"BSD-2-Clause"
] | 211 | 2015-11-16T15:27:41.000Z | 2022-03-28T16:20:15.000Z | test/unit/agent/common/util/math.py | dp92987/nginx-amplify-agent | 1b2eed6eab52a82f35974928d75044451b4bedaf | [
"BSD-2-Clause"
] | 80 | 2015-11-16T18:20:30.000Z | 2022-03-02T12:47:56.000Z | # -*- coding: utf-8 -*-
from amplify.agent.common.util.math import median
from unittest import TestCase
from hamcrest import *
__author__ = "Raymond Lau"
__copyright__ = "Copyright (C) Nginx, Inc. All rights reserved."
__license__ = ""
__maintainer__ = "Raymond Lau"
__email__ = "raymond.lau@nginx.com"
class MathTestCase(TestCase):
def test_median(self):
# even length
assert_that(median([1, 3, 5, 7]), equal_to(4.0))
# unsorted
assert_that(median([1, 5, 7, 3]), equal_to(4.0))
# odd length
assert_that(median([1, 2, 3, 4, 5, 6, 7]), equal_to(4.0))
assert_that(median([]), equal_to(None))
| 26.24 | 65 | 0.641768 | 348 | 0.530488 | 0 | 0 | 0 | 0 | 0 | 0 | 157 | 0.239329 |
4f6ad6255e2873d522c35098c7488d053af1ac51 | 12,245 | py | Python | test_code/deep_vis/deep_vis_keras.py | mikaelbj/training-visualizer | e6b515672e77331a1d0eeb8469655004f13b0d42 | [
"MIT"
] | 1 | 2017-06-28T20:17:50.000Z | 2017-06-28T20:17:50.000Z | test_code/deep_vis/deep_vis_keras.py | mikaelbj/training-visualizer | e6b515672e77331a1d0eeb8469655004f13b0d42 | [
"MIT"
] | null | null | null | test_code/deep_vis/deep_vis_keras.py | mikaelbj/training-visualizer | e6b515672e77331a1d0eeb8469655004f13b0d42 | [
"MIT"
] | null | null | null | import numpy as np
from scipy.misc import toimage
from scipy.ndimage.filters import gaussian_filter
from os import mkdir
from os.path import dirname, join
from time import time
from keras.models import Model
from keras.layers import Dense
from keras import backend as K
from keras.applications.vgg16 import VGG16
# define output path and make folder
output_path = join(dirname(__file__), 'output')
try:
mkdir(output_path)
except FileExistsError:
# folder exists, which is what we wanted
pass
# set channel dimension based on image data format from Keras backend
if K.image_data_format() == 'channels_last':
ch_dim = 3
else:
ch_dim = 1
# for VGG16 specific testing
is_VGG16 = True
VGG16_MEAN_VALUES = np.array([103.939, 116.779, 123.68])
# set learning rate
learning_rate = 2500.0
# how many times we update image
no_of_iterations = 500
# specify L2-decay
# used to prevent a small number of extreme pixel values from dominating the output image
l2_decay = 0.0001
# specify frequency of blurring and standard deviation for kernel for Gaussian blur
# used to penalize high frequency information in the output image
blur_interval = 4
# standard deviation values between 0.0 and 0.3 work poorly, according to yosinski
blur_std = 1.0
# specify value percentile limit
# used to induce sparsity by setting pixels with small absolute value to zero
value_percentile = 0
# specify norm percentile limit
# used to induce sparsity by setting pixels with small norm to zero
norm_percentile = 0
# specify contribution percentile limit
# used to induce sparsity by setting pixels with small contribution to zero
contribution_percentile = 0
# specify absolute contribution percentile limit
# used to induce sparsity by setting pixels with small absolute contribution to zero
abs_contribution_percentile = 0
# choose whether to include regularization
regularize = True
# utility function used to convert an array into a savable image array
def deprocess(vis_array):
# remove batch dimension, and alter color dimension accordingly
img_array = vis_array[0]
if K.image_data_format() == 'channels_first':
# alter dimensions from (color, height, width) to (height, width, color)
img_array = img_array.transpose((1, 2, 0))
if is_VGG16:
# add mean values
img_array += VGG16_MEAN_VALUES.reshape((1, 1, 3))
# change back to RGB
img_array = img_array[:, :, ::-1]
# clip in [0, 255], and convert to uint8
img_array = np.clip(img_array, 0, 255).astype('uint8')
return img_array
# creates a model to generate gradients from
def create_model():
base_model = VGG16(include_top=True, weights='imagenet')
# save weights from last layer (softmax)
softmax_weights = base_model.layers[-1].get_weights()
# create new last layer for model with linear activation and connect to same layer as old layer
out = Dense(1000, activation='linear', weights=softmax_weights)(base_model.layers[-1].input)
return Model(base_model.input, out)
# saves the visualization and a text file describing its creation environment
def save_visualization(img, layer_no, unit_index, loss_value):
# create appropriate name to identify image
if regularize:
img_name = 'regularized'
else:
img_name = 'vanilla'
img_name += '_{}_{}_{}'.format(layer_no, unit_index, time())
# save the resulting image to disk
# avoid scipy.misc.imsave because it will normalize the image pixel value between 0 and 255
toimage(img).save(join(output_path, img_name + '.png'))
# also save a txt-file containing information about creation environment and obtained loss
img_info = 'Image "{}.png" was created from unit {} in layer {}, using the following hyperparameters:\n\n' \
'Learning rate: {}\n' \
'Number of iterations: {}\n' \
'----------\n' \
''.format(img_name, unit_index, layer_no, learning_rate, no_of_iterations)
if regularize:
img_info += 'Regularization enabled\n\n' \
'L2-decay: {}\n' \
'Blur interval and std: {} & {}\n' \
'Value percentile: {}\n' \
'Norm percentile: {}\n' \
'Contribution percentile: {}\n' \
'Abs. contribution percentile: {}\n' \
''.format(l2_decay, blur_interval, blur_std, value_percentile, norm_percentile,
contribution_percentile, abs_contribution_percentile)
else:
img_info += 'Regularization disabled\n'
img_info += '----------\n' \
'Obtained loss value: {}\n' \
''.format(loss_value)
with open(join(output_path, img_name + '_info.txt'), 'w') as f:
f.write(img_info)
print('\nImage of unit {} from layer {} have been saved as {}.png\n'.format(unit_index, layer_no, img_name))
# returns a function for computing loss and gradients w.r.t. the activations for the chosen unit in the output tensor
def get_loss_and_gradient_function(input_tensor, output_tensor, unit_index):
# if unit index is specified as integer, convert to tuple
if isinstance(unit_index, int):
unit_index = (unit_index,)
if len(output_tensor.shape[1:]) != len(unit_index):
raise ValueError('Index mismatch: Unit indices should be of length {}, not {}'
.format(len(output_tensor.shape[1:]), len(unit_index)))
else:
tensor_min = np.array([0 for _ in output_tensor.shape[1:]])
tensor_max = np.array([int(dim) - 1 for dim in output_tensor.shape[1:]])
if np.any(np.array(unit_index) < tensor_min) or np.any(np.array(unit_index) > tensor_max):
raise ValueError('Invalid unit index {}: Unit indices should have values between {} and {}'
.format(np.array(unit_index), tensor_min, tensor_max))
# pad with batch index
unit_index = (0,) + unit_index
# loss is the activation of the unit in the chosen output tensor (chosen layer output)
loss = output_tensor[unit_index]
# compute gradients of the loss of the chosen unit w.r.t. the input image
gradients = K.gradients(loss, input_tensor)[0]
# return function returning the loss and gradients given a visualization image
# add a flag to disable the learning phase
return K.function([input_tensor, K.learning_phase()], [loss, gradients])
# creates an random, initial image to manipulate into a visualization
def create_initial_image(model_input_shape):
# add (1,) for batch dimension
return np.random.normal(0, 10, (1,) + model_input_shape[1:])
# regularizes visualization with various techniques
# each technique is activated by non-zero values for their respective global variables
def apply_ensemble_regularization(visualization, pixel_gradients, iteration_no):
# regularizer #1
# apply L2-decay
if l2_decay > 0:
visualization *= (1 - l2_decay)
# regularizer #2
# apply Gaussian blur
if blur_interval > 0 and blur_std > 0:
# only blur at certain iterations, as blurring is expensive
if not iteration_no % blur_interval:
# define standard deviations for blur kernel
blur_kernel_std = [0, blur_std, blur_std, blur_std]
# blur along height and width, but not along channel (color) dimension
blur_kernel_std[ch_dim] = 0
# perform blurring
visualization = gaussian_filter(visualization, sigma=blur_kernel_std)
# regularizer #3
# apply value limit
if value_percentile > 0:
# find absolute values
abs_visualization = abs(visualization)
# find mask of high values (values above chosen value percentile)
high_value_mask = abs_visualization >= np.percentile(abs_visualization, value_percentile)
# apply to image to set pixels with small values to zero
visualization *= high_value_mask
# regularizer #4
# apply norm limit
if norm_percentile > 0:
# compute pixel norms along channel (color) dimension
pixel_norms = np.linalg.norm(visualization, axis=ch_dim)
# find initial mask of high norms (norms above chosen norm percentile)
high_norm_mask = pixel_norms >= np.percentile(pixel_norms, norm_percentile)
# expand mask to account for color dimension
high_norm_mask = expand_for_color(high_norm_mask)
# apply to image to set pixels with small norms to zero
visualization *= high_norm_mask
# regularizer #5
# apply contribution limit
if contribution_percentile > 0:
# predict the contribution of each pixel
predicted_contribution = -visualization * pixel_gradients
# sum over channel (color) dimension
contribution = predicted_contribution.sum(ch_dim)
# find initial mask of high contributions (contr. above chosen contr. percentile)
high_contribution_mask = contribution >= np.percentile(contribution, contribution_percentile)
# expand mask to account for color dimension
high_contribution_mask = expand_for_color(high_contribution_mask)
# apply to image to set pixels with small contributions to zero
visualization *= high_contribution_mask
# regularizer #6
# apply absolute contribution limit
if abs_contribution_percentile > 0:
# alternative approach
# predict the contribution of each pixel
predicted_contribution = -visualization * pixel_gradients
# sum over channel (color) dimension, and find absolute value
abs_contribution = abs(predicted_contribution.sum(ch_dim))
# find initial mask of high absolute contributions (abs. contr. above chosen abs. contr. percentile)
high_abs_contribution_mask = abs_contribution >= np.percentile(abs_contribution, abs_contribution_percentile)
# expand mask to account for color dimension
high_abs_contribution_mask = expand_for_color(high_abs_contribution_mask)
# apply to image to set pixels with small absolute contributions to zero
visualization *= high_abs_contribution_mask
return visualization
# use to expand a (batch, height, width)-numpy array with a channel (color) dimension
def expand_for_color(np_array):
# expand at channel (color) dimension
np_array = np.expand_dims(np_array, axis=ch_dim)
# create tile repetition list, repeating thrice in channel (color) dimension
tile_reps = [1, 1, 1, 1]
tile_reps[ch_dim] = 3
# apply tile repetition
np_array = np.tile(np_array, tile_reps)
return np_array
def main():
# create model to generate gradients from
model = create_model()
# select units to visualize for by adding (layer number, unit index), where unit index is tuple for layers with
# 3D structured output, like convolutional and pooling layers
# units_to_visualize = [(22, 130), (2, 351), (22, 736), (22, 850)]
# units_to_visualize = [(22, 402), (22, 587), (22, 950)]
units_to_visualize = [(1, (112, 112, ch)) for ch in range(1)]
# unit indices in last layer represent the following classes:
# 130 flamingo, 351 hartebeest, 736 pool table, 850 teddy bear
# for the chosen layer number and unit index
for layer_no, unit_index in units_to_visualize:
print('\nProcessing unit {} in layer {}'.format(unit_index, layer_no))
# used to time generation of each image
start_time = time()
if layer_no < 0 or layer_no >= len(model.layers):
raise ValueError('Invalid layer number {}: Layer numbers should be between {} and {}'.format(layer_no, 0, len(model.layers) - 1))
# create and save loss and gradient function for current unit
compute_loss_and_gradients = get_loss_and_gradient_function(model.input, model.layers[layer_no].output, unit_index)
# create an initial visualization image
visualization = create_initial_image(model.input_shape)
# perform gradient ascent update with or without regularization for n steps
for i in range(1, no_of_iterations + 1):
# compute loss and gradient values (input 0 as arg. #2 to deactivate training layers, like dropout)
loss_value, pixel_gradients = compute_loss_and_gradients([visualization, 0])
# update visualization image
visualization += pixel_gradients * learning_rate
# if regularization has been activated, regularize image
if regularize:
visualization = apply_ensemble_regularization(visualization, pixel_gradients, i)
# print('Current loss value:', loss_value)
print('Round {} finished.'.format(i))
# process visualization to match with standard image dimensions
visualization_image = deprocess(visualization)
# save visualization image, complete with info about creation environment
save_visualization(visualization_image, layer_no, unit_index, loss_value)
print('Visualization for unit {} from layer {} completed in {:.4f} seconds'.format(unit_index, layer_no, time() - start_time))
main()
| 35.699708 | 132 | 0.748387 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 6,180 | 0.504696 |
4f6b27f239470833a1e11bf6ba509f896f48248b | 10,131 | py | Python | neatsociety/population.py | machinebrains/neat-society | 395c092083aa62ac8ad2a93a9afde659ecbd85fd | [
"BSD-3-Clause"
] | 2 | 2018-03-03T16:30:09.000Z | 2022-01-02T17:02:22.000Z | neatsociety/population.py | machinebrains/neat-society | 395c092083aa62ac8ad2a93a9afde659ecbd85fd | [
"BSD-3-Clause"
] | null | null | null | neatsociety/population.py | machinebrains/neat-society | 395c092083aa62ac8ad2a93a9afde659ecbd85fd | [
"BSD-3-Clause"
] | null | null | null | from __future__ import print_function
import gzip
import pickle
import random
import time
import os
from neatsociety.config import Config
from neatsociety.indexer import Indexer, InnovationIndexer
from neatsociety.reporting import ReporterSet, StatisticsReporter, StdOutReporter
from neatsociety.species import Species
class CompleteExtinctionException(Exception):
pass
class Population(object):
"""
This class implements the core NEAT algorithm. It maintains a list of Species instances,
each of which contains a collection of Genome instances.
"""
def __init__(self, config):
"""
:param config: Either a config.Config object or path to a configuration file.
"""
# If config is not a Config object, assume it is a path to the config file.
if not isinstance(config, Config):
config = Config(config)
# Configure statistics and reporting as requested by the user.
self.reporters = ReporterSet()
if config.collect_statistics:
self.statistics = StatisticsReporter()
self.add_reporter(self.statistics)
else:
self.statistics = None
if config.report:
self.add_reporter(StdOutReporter())
self.config = config
## Check if we have a society directory defined and may be this a continuation on an existing run.
if self.config.society_directory != None:
## Check if latest society file is available
if os.path.isdir(self.config.society_directory):
print("Society Directory")
self.species_indexer = Indexer(1)
self.genome_indexer = Indexer(1)
self.innovation_indexer = InnovationIndexer(0)
self.reproduction = config.reproduction_type(self.config, self.reporters,
self.genome_indexer, self.innovation_indexer)
self.species = []
self.generation = -1
self.total_evaluations = 0
# Create a population if one is not given, then partition into species.
self.population = self._create_population()
self._speciate(initial_population)
def add_reporter(self, reporter):
self.reporters.add(reporter)
def remove_reporter(self, reporter):
self.reporters.remove(reporter)
def load_checkpoint(self, filename):
'''Resumes the simulation from a previous saved point.'''
self.reporters.loading_checkpoint(filename)
with gzip.open(filename) as f:
(self.species,
self.generation,
random_state) = pickle.load(f)
random.setstate(random_state)
def save_checkpoint(self, filename=None, checkpoint_type="user"):
""" Save the current simulation state. """
if filename is None:
filename = os.path.join(self.config.checkpoint_time_interval.models_directory,'neatsociety-checkpoint-{0}'.format(self.generation))
self.reporters.saving_checkpoint(checkpoint_type, filename)
with gzip.open(filename, 'w', compresslevel=5) as f:
data = (self.species,
self.generation,
random.getstate())
pickle.dump(data, f, protocol=pickle.HIGHEST_PROTOCOL)
def _create_population(self):
# Create a collection of unconnected genomes with no hidden nodes.
new_population = []
# TODO: The genotype class should know how to do everything below, based
# solely on what's in the config object. This allows users to completely
# replace the initial population creation scheme if they choose.
for i in range(self.config.init_pop_size):
g_id = self.genome_indexer.next()
g = self.config.genotype.create_unconnected(g_id, self.config)
new_population.append(g)
# Add hidden nodes if requested.
if self.config.hidden_nodes > 0:
for g in new_population:
g.add_hidden_nodes(self.config.hidden_nodes)
# Add connections based on initial connectivity type.
if self.config.initial_connection == 'fully_connected':
for g in new_population:
g.connect_full(self.innovation_indexer)
elif self.config.initial_connection == 'partial':
for g in new_population:
g.connect_partial(self.innovation_indexer, self.config.connection_fraction)
elif self.config.initial_connection == 'fs_neat':
for g in new_population:
g.connect_fs_neat(self.innovation_indexer)
else:
raise Exception("Invalid initial connection type: {!r}".format(self.config.initial_connection))
return new_population
def _speciate(self, population):
"""
Place genomes into species by genetic similarity.
Note that this method assumes the current representatives of the species are from the old
generation, and that after speciation has been performed, the old representatives should be
dropped and replaced with representatives from the new generation. If you violate this
assumption, you should make sure other necessary parts of the code are updated to reflect
the new behavior.
"""
for individual in population:
# Find the species with the most similar representative.
min_distance = None
closest_species = None
for s in self.species:
distance = individual.distance(s.representative)
if distance < self.config.compatibility_threshold:
if min_distance is None or distance < min_distance:
closest_species = s
min_distance = distance
if closest_species:
closest_species.add(individual)
else:
# No species is similar enough, create a new species for this individual.
self.species.append(Species(individual, self.species_indexer.next()))
# Only keep non-empty species.
self.species = [s for s in self.species if s.members]
# Select a random current member as the new representative.
for s in self.species:
s.representative = random.choice(s.members)
def run(self, fitness_function, n):
"""
Runs NEAT's genetic algorithm for n generations.
The user-provided fitness_function should take one argument, a list of all genomes in the population,
and its return value is ignored. This function is free to maintain external state, perform evaluations
in parallel, and probably any other thing you want. The only requirement is that each individual's
fitness member must be set to a floating point value after this function returns.
It is assumed that fitness_function does not modify the list of genomes, or the genomes themselves, apart
from updating the fitness member.
"""
# Remember start time for saving timed checkpoints.
last_checkpoint = time.time()
for g in range(n):
self.generation += 1
self.reporters.start_generation(self.generation)
# Collect a list of all members from all species.
population = []
for s in self.species:
population.extend(s.members)
# Evaluate all individuals in the population using the user-provided function.
# TODO: Add an option to only evaluate each genome once, to reduce number of
# fitness evaluations in cases where the fitness is known to be the same if the
# genome doesn't change--in these cases, evaluating unmodified elites in each
# generation is a waste of time.
unevaluated_population = [p for p in population if p.fitness == None]
fitness_function(unevaluated_population)
self.total_evaluations += len(unevaluated_population)
# Gather and report statistics.
best = max(population)
self.reporters.post_evaluate(population, self.species, best)
# Save the best genome from the current generation if requested.
if self.config.save_best:
with open('best_genome_' + str(self.generation), 'wb') as f:
pickle.dump(best, f)
# End if the fitness threshold is reached.
if best.fitness >= self.config.max_fitness_threshold:
self.reporters.found_solution(self.generation, best)
break
# Create the next generation from the current generation.
self.species, new_population = self.reproduction.reproduce(self.species, self.config.pop_size)
# Check for complete extinction
#if not self.species:
# self.reporters.complete_extinction()
# # If requested by the user, create a completely new population,
# # otherwise raise an exception.
# if self.config.reset_on_extinction:
# new_population = self._create_population()
# else:
# raise CompleteExtinctionException()
# Update species age.
for s in self.species:
s.age += 1
# Divide the new population into species.
self._speciate(new_population)
# Save checkpoints if necessary.
if self.config.checkpoint_time_interval is not None:
timed_checkpoint_due = last_checkpoint + 60 * self.config.checkpoint_time_interval
if time.time() >= timed_checkpoint_due:
self.save_checkpoint(checkpoint_type="timed")
last_checkpoint = time.time()
if self.config.checkpoint_gen_interval is not None \
and self.generation % self.config.checkpoint_gen_interval == 0:
self.save_checkpoint(checkpoint_type="generation")
self.reporters.end_generation()
| 41.35102 | 143 | 0.640509 | 9,804 | 0.967723 | 0 | 0 | 0 | 0 | 0 | 0 | 3,674 | 0.362649 |
4f6cf46631873c3214fc65b73ffdf48157b6c8cb | 11,450 | py | Python | prediction/main_sementics.py | Anukriti12/OptumStratethon2.0 | b66dba07735bfa47d99e9907eb8bccdd3b77075c | [
"MIT"
] | 1 | 2021-03-04T05:49:01.000Z | 2021-03-04T05:49:01.000Z | prediction/main_sementics.py | ankitpriyarup/optum_carewheel | f6c66f293b2980501e8bca8ab7e26ebd3b26cdd1 | [
"Apache-2.0"
] | null | null | null | prediction/main_sementics.py | ankitpriyarup/optum_carewheel | f6c66f293b2980501e8bca8ab7e26ebd3b26cdd1 | [
"Apache-2.0"
] | 1 | 2020-09-13T11:50:49.000Z | 2020-09-13T11:50:49.000Z | import os.path
import sys
import numpy as np
import pandas as pd
import sklearn
import argparse
from sklearn.ensemble import RandomForestClassifier
from sklearn.svm import SVC
from sklearn.model_selection import GridSearchCV
from sklearn.model_selection import train_test_split
from sklearn.metrics import classification_report
from keras.models import Sequential
from keras.layers import Dense, Dropout
from keras.optimizers import SGD
from acumos.modeling import Model, List, create_namedtuple
from acumos.session import Requirements
from os import path
from image_mood_classifier._version import MODEL_NAME, __version__ as VERSION
from image_mood_classifier.prediction_formatter import Formatter
from image_mood_classifier._version import MODEL_NAME
def load_dataset(path_features=None):
from image_mood_classifier.prediction_formatter import Formatter
if path_features is None:
dummySample = {Formatter.COL_NAME_IDX: 0,
Formatter.COL_NAME_CLASS: "toy", Formatter.COL_NAME_PREDICTION: 0.243}
df = pd.DataFrame([dummySample])
return df.drop([0])
df = pd.read_csv(path_features)
return df
def create_keras_model_(unit_size=32, input_size=8, label_size=1):
classifier = Sequential()
classifier.add(Dense(unit_size, activation='relu', input_dim=input_size))
classifier.add(Dropout(0.5))
classifier.add(Dense(unit_size, activation='relu'))
classifier.add(Dropout(0.5))
classifier.add(Dense(label_size, activation='softmax'))
sgd = SGD(lr=0.01, decay=1e-6, momentum=0.9, nesterov=True)
classifier.compile(loss='categorical_crossentropy',
optimizer=sgd, metrics=['accuracy'])
return classifier
def classifier_train(X, y, method="svc"):
if method == "rf":
classifier = RandomForestClassifier()
param_grid = [
{'n_estimators': [10, 100, 300, 500], 'min_samples_split': [2, 10]}
]
elif method == "svc":
classifier = SVC()
param_grid = [
{'kernel': ['rbf'], 'gamma': [
1e-3, 1e-4], 'C': [1, 10, 100, 1000]},
{'kernel': ['linear'], 'C': [1, 10, 100, 1000]}
]
elif method == "dnn":
from keras.wrappers.scikit_learn import KerasClassifier
from functools import partial # partial function
build_partial = partial(create_keras_model_,
input_size=X.shape[1], label_size=1)
classifier = KerasClassifier(build_fn=build_partial, verbose=0)
param_grid = [
{'unit_size': [32, 64, 256, 512]},
]
clf = GridSearchCV(classifier, param_grid, cv=5, n_jobs=-1, verbose=2)
clf.fit(X, y)
classifier = clf.best_estimator_
print([classifier, clf.best_params_, clf.best_score_])
X_train, X_test, y_train, y_test = train_test_split(
X, y, test_size=0.2, random_state=1, stratify=y)
classifier.fit(X_train, y_train)
y_true, y_pred = y_test, classifier.predict(X_test)
print(classification_report(y_true, y_pred))
classifier.fit(X, y)
return classifier
def model_create_pipeline(formatter, clf):
formatter.set_params(classifier=clf)
tag_type = []
for item in formatter.output_types_:
for k in item:
tag_type.append((k, item[k]))
name_in = "ImageTag"
ImageTag = create_namedtuple(name_in, tag_type)
name_multiple_in = name_in + "s"
ImageTagSet = create_namedtuple(
name_in + "Set", [(name_multiple_in, List[ImageTag])])
def predict_class(val_wrapped: ImageTagSet) -> ImageTagSet:
df = pd.DataFrame(getattr(val_wrapped, name_multiple_in),
columns=ImageTag._fields)
tags_df = formatter.predict(df)
tags_parts = tags_df.to_dict('split')
tags_list = [ImageTag(*r) for r in tags_parts['data']]
print("[{} - {}:{}]: Input {} row(s) ({}), output {} row(s) ({}))".format(
"classify", MODEL_NAME, VERSION, len(df), ImageTagSet, len(tags_df), ImageTagSet))
return ImageTagSet(tags_list)
package_path = path.dirname(path.realpath(__file__))
return Model(classify=predict_class), Requirements(packages=[package_path], reqs=[pd, np, sklearn])
def model_archive(clf=None, debugging=False):
if not debugging:
return None
import pickle
if clf is None:
if os.path.exists('model_cf.pkl'):
print("DEBUG ARCHIVE: Loading an old model...")
with open("model_cf.pkl", "rb") as f:
clf = pickle.load(f)
elif not os.path.exists('model_cf.pkl'):
print("Saving a new model...")
with open("DEBUG ARCHIVE: model_cf.pkl", "wb") as f:
pickle.dump(clf, f)
return clf
def main(config={}):
parser = argparse.ArgumentParser()
submain = parser.add_argument_group(
'main execution and evaluation functionality')
submain.add_argument('-p', '--predict_path', type=str, default='',
help="Save predictions from model (model must be provided via 'dump_model')")
submain.add_argument('-i', '--input', type=str, default='',
help='Absolute path to input training data file. (for now must be a header-less CSV)')
submain.add_argument('-C', '--cuda_env', type=str, default='',
help='Anything special to inject into CUDA_VISIBLE_DEVICES environment string')
subopts = parser.add_argument_group(
'model creation and configuration options')
subopts.add_argument('-l', '--labels', type=str, default='',
help="Path to label one-column file with one row for each input")
subopts.add_argument('-m', '--model_type', type=str, default='rf',
help='specify the underlying classifier type (rf (randomforest), svc (SVM))', choices=['svm', 'rf'])
subopts.add_argument('-f', '--feature_nomask', dest='feature_nomask',
default=False, action='store_true', help='create masked samples on input')
subopts.add_argument('-n', '--add_softnoise', dest='softnoise', default=False,
action='store_true', help='do not add soft noise to classification inputs')
subopts.add_argument('-a', '--push_address',
help='server address to push the model (e.g. http://localhost:8887/upload)', default=os.getenv('ACUMOS_PUSH', ""))
subopts.add_argument('-A', '--auth_address',
help='server address for login and push of the model (e.g. http://localhost:8887/auth)', default=os.getenv('ACUMOS_AUTH', ""))
subopts.add_argument(
'-d', '--dump_model', help='dump model to a pickle directory for local running', default='')
subopts.add_argument('-s', '--summary', type=int, dest='summary', default=0,
help='summarize top N image classes are strong for which label class (only in training)')
config.update(vars(parser.parse_args()))
if not os.path.exists(config['input']):
print("The target input '{:}' was not found, please check input arguments.".format(
config['input']))
sys.exit(-1)
print("Loading raw samples...")
rawDf = pd.read_csv(config['input'], delimiter=",")
if config['cuda_env']:
os.environ['CUDA_VISIBLE_DEVICES'] = config['cuda_env']
if not config['predict_path'] and config['labels']:
if not os.path.exists(config['labels']):
print("The target labels '{:}' was not found, please check input arguments.".format(
config['labels']))
sys.exit(-1)
formatter = Formatter(input_softnoise=config['softnoise'])
print("Loading labels to train a new model...")
rawLabel = pd.read_csv(config['labels'], header=None, delimiter=",")
if len(rawLabel.columns) != 1:
print(
"Error, not currently programmed to best-of class selection to a singleton.")
sys.exit(-1)
rawLabel = rawLabel[0].tolist()
formatter.learn_input_mapping(rawDf, "tag", "image", "score")
print("Converting block of {:} responses into training data, utilizing {:} images...".format(
len(rawDf), len(rawLabel)))
objRefactor = formatter.transform_raw_sample(
rawDf, rawLabel, None if config['feature_nomask'] else Formatter.SAMPLE_GENERATE_MASKING)
print("Generated {:} total samples (skip-masking: {:})".format(
len(objRefactor['values']), config['feature_nomask']))
clf = model_archive()
if config['summary']:
df_combined = pd.DataFrame(
objRefactor['values'], columns=objRefactor['columns'])
df_combined['_labels'] = objRefactor['labels']
groupSet = df_combined.groupby('_labels')
for nameG, rowsG in groupSet:
df_sum = rowsG.sum(axis=0, numeric_only=True)
series_top = df_sum.sort_values(ascending=False)
print("Label: '{:}', top {:} classes...".format(
nameG, config['summary']))
print(series_top[0:config['summary']])
if config['push_address'] or config['dump_model']:
if clf is None:
clf = classifier_train(
objRefactor['values'], objRefactor['labels'], config['model_type'])
model, reqs = model_create_pipeline(formatter, clf)
model_archive(clf)
if config['push_address']:
from acumos.session import AcumosSession
session = AcumosSession(
push_api=config['push_address'], auth_api=config['auth_address'])
session.push(model, MODEL_NAME, reqs)
print("Pushing new model to '{:}'...".format(
config['push_address']))
if config['dump_model']:
from acumos.session import AcumosSession
from os import makedirs
if not os.path.exists(config['dump_model']):
makedirs(config['dump_model'])
print("Dumping new model to '{:}'...".format(
config['dump_model']))
session = AcumosSession()
session.dump(model, MODEL_NAME, config['dump_model'], reqs)
else:
if not config['dump_model'] or not os.path.exists(config['dump_model']):
print("Attempting to predict from a dumped model, but model not found.".format(
config['dump_model']))
sys.exit(-1)
print("Attempting predict/transform on input sample...")
from acumos.wrapped import load_model
model = load_model(config['dump_model'])
type_in = model.classify._input_type
classify_in = type_in(*tuple(col for col in rawDf.values.T))
out_wrapped = model.classify.from_wrapped(classify_in).as_wrapped()
dfPred = pd.DataFrame(out_wrapped[0])
if config['predict_path']:
print("Writing prediction to file '{:}'...".format(
config['predict_path']))
dfPred.to_csv(config['predict_path'], sep=",", index=False)
if dfPred is not None:
print("Predictions:\n{:}".format(dfPred))
if __name__ == '__main__':
pathRoot = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
if pathRoot not in sys.path:
sys.path.append(pathRoot)
main()
| 47.510373 | 151 | 0.62655 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 2,747 | 0.239913 |
4f6dcaec2d498a6e7a2ab48120ce085ef871eca3 | 1,219 | py | Python | PLC/PRC/TPCs/tpc8/construct.py | luis1ribeiro/MEI---Majors-Degree | c3b6660703e07eea0506eac29e7eb9af2264fc7a | [
"MIT"
] | 1 | 2021-07-19T22:09:47.000Z | 2021-07-19T22:09:47.000Z | PLC/PRC/TPCs/tpc8/construct.py | luis1ribeiro/MEI-MajorsDegree | c3b6660703e07eea0506eac29e7eb9af2264fc7a | [
"MIT"
] | null | null | null | PLC/PRC/TPCs/tpc8/construct.py | luis1ribeiro/MEI-MajorsDegree | c3b6660703e07eea0506eac29e7eb9af2264fc7a | [
"MIT"
] | null | null | null | import json, urllib.parse
import requests as reqs
prefixes = '''
PREFIX rdf: <http://www.w3.org/1999/02/22-rdf-syntax-ns#>
PREFIX owl: <http://www.w3.org/2002/07/owl#>
PREFIX rdfs: <http://www.w3.org/2000/01/rdf-schema#>
PREFIX noInferences: <http://www.ontotext.com/explicit>
PREFIX skos: <http://www.w3.org/2004/02/skos/core#>
PREFIX : <http://www.di.uminho.pt/prc2021/mapa-virtual#>
'''
getLink = "http://localhost:7200/repositories/mava-virtual?query="
upLink = "http://localhost:7200/repositories/mava-virtual/statements?update="
# Query feita em aula.
query = '''CONSTRUCT { ?c1 :temLigação ?c2 . }
WHERE { ?l :origem ?c1. ?l :destino ?c2. } '''
encoded = urllib.parse.quote(prefixes + query)
resp = reqs.get(getLink + encoded)
resp.raise_for_status()
for l in resp.text.split('.\n'):
s = l.split()
if len(s) == 3:
c1 = s[0].split('#')[1][:-1]
c2 = s[2].split('#')[1][:-1]
# print(c1,c2)
insert = "INSERT DATA { :" + c1 + " :temLigação" + " :" + c2 + " . }"
encoded_ = urllib.parse.quote(prefixes + insert)
resp_ = reqs.post(upLink + encoded_)
resp_.raise_for_status()
| 33.861111 | 77 | 0.591468 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 690 | 0.564186 |
4f6e2090924e84b3340baf9b6caee34a15b01328 | 4,518 | py | Python | src/gaze_estimation.py | gsamueil/OpenVino-Computer-Pointer-Controller | bb8c97a5ab4aac334b60f92b747cd3496c399841 | [
"MIT"
] | 1 | 2020-06-20T13:23:47.000Z | 2020-06-20T13:23:47.000Z | src/gaze_estimation.py | gsamueil/OpenVino-Computer-Pointer-Controller | bb8c97a5ab4aac334b60f92b747cd3496c399841 | [
"MIT"
] | null | null | null | src/gaze_estimation.py | gsamueil/OpenVino-Computer-Pointer-Controller | bb8c97a5ab4aac334b60f92b747cd3496c399841 | [
"MIT"
] | 4 | 2020-07-01T16:11:43.000Z | 2020-08-01T23:15:36.000Z | '''
This is a sample class for a model. You may choose to use it as-is or make any changes to it.
This has been provided just to give you an idea of how to structure your model class.
'''
import cv2
import numpy as np
import logging as log
from openvino.inference_engine import IENetwork, IECore
import warnings
import math
warnings.filterwarnings("ignore")
class GazeEstimationClass:
'''
Class for the Gaze Estimation Model.
'''
def __init__(self, model_name, device='CPU', extensions=None):
'''
this method is to set instance variables.
'''
self.model_weights = model_name + '.bin'
self.model_structure = model_name + '.xml'
self.device = device
self.extension = extensions
try:
self.model = IENetwork(self.model_structure, self.model_weights)
except Exception as e:
raise ValueError("Could not Initialise the network. Have you enterred the correct model path?")
self.input_name = next(iter(self.model.inputs))
# self.input_shape = self.model.inputs[self.input_name['left_eye_image']].shape
self.output_name = next(iter(self.model.outputs))
self.output_shape = self.model.outputs[self.output_name].shape
def load_model(self):
'''
This method is for loading the model to the device specified by the user.
If your model requires any Plugins, this is where you can load them.
'''
self.model = IENetwork(self.model_structure, self.model_weights)
self.core = IECore()
supported_layers = self.core.query_network(network=self.model, device_name=self.device)
unsupported_layers = [R for R in self.model.layers.keys() if R not in supported_layers]
if len(unsupported_layers) != 0:
log.error("Unsupported layers found ...")
log.error("Adding specified extension")
self.core.add_extension(self.extension, self.device)
supported_layers = self.core.query_network(network=self.model, device_name=self.device)
unsupported_layers = [R for R in self.model.layers.keys() if R not in supported_layers]
if len(unsupported_layers) != 0:
log.error("ERROR: There are still unsupported layers after adding extension...")
exit(1)
self.net = self.core.load_network(network=self.model, device_name=self.device, num_requests=1)
def predict(self, left_eye_image, right_eye_image, head_pose_output):
'''
This method is meant for running predictions on the input image.
'''
self.left_eye_pre_image, self.right_eye_pre_image = self.preprocess_input(left_eye_image, right_eye_image)
self.results = self.net.infer(
inputs={'left_eye_image': self.left_eye_pre_image, 'right_eye_image': self.right_eye_pre_image,
'head_pose_angles': head_pose_output})
self.mouse_coordinate, self.gaze_vector = self.preprocess_output(self.results, head_pose_output)
return self.mouse_coordinate, self.gaze_vector
def check_model(self):
pass
def preprocess_input(self, left_eye_image, right_eye_image):
'''
Before feeding the data into the model for inference,
you might have to preprocess it. This function is where you can do that.
'''
left_eye_pre_image = cv2.resize(left_eye_image, (60, 60))
left_eye_pre_image = left_eye_pre_image.transpose((2, 0, 1))
left_eye_pre_image = left_eye_pre_image.reshape(1, *left_eye_pre_image.shape)
right_eye_pre_image = cv2.resize(right_eye_image, (60, 60))
right_eye_pre_image = right_eye_pre_image.transpose((2, 0, 1))
right_eye_pre_image = right_eye_pre_image.reshape(1, *right_eye_pre_image.shape)
return left_eye_pre_image, right_eye_pre_image
def preprocess_output(self, outputs, head_pose_estimation_output):
'''
Before feeding the output of this model to the next model,
you might have to preprocess the output. This function is where you can do that.
'''
roll_value = head_pose_estimation_output[2]
outputs = outputs[self.output_name][0]
cos_theta = math.cos(roll_value * math.pi / 180)
sin_theta = math.sin(roll_value * math.pi / 180)
x_value = outputs[0] * cos_theta + outputs[1] * sin_theta
y_value = outputs[1] * cos_theta - outputs[0] * sin_theta
return (x_value, y_value), outputs
| 43.442308 | 114 | 0.679062 | 4,157 | 0.920097 | 0 | 0 | 0 | 0 | 0 | 0 | 1,254 | 0.277556 |