blob_id stringlengths 40 40 | directory_id stringlengths 40 40 | path stringlengths 3 288 | content_id stringlengths 40 40 | detected_licenses listlengths 0 112 | license_type stringclasses 2 values | repo_name stringlengths 5 115 | snapshot_id stringlengths 40 40 | revision_id stringlengths 40 40 | branch_name stringclasses 684 values | visit_date timestamp[us]date 2015-08-06 10:31:46 2023-09-06 10:44:38 | revision_date timestamp[us]date 1970-01-01 02:38:32 2037-05-03 13:00:00 | committer_date timestamp[us]date 1970-01-01 02:38:32 2023-09-06 01:08:06 | github_id int64 4.92k 681M ⌀ | star_events_count int64 0 209k | fork_events_count int64 0 110k | gha_license_id stringclasses 22 values | gha_event_created_at timestamp[us]date 2012-06-04 01:52:49 2023-09-14 21:59:50 ⌀ | gha_created_at timestamp[us]date 2008-05-22 07:58:19 2023-08-21 12:35:19 ⌀ | gha_language stringclasses 147 values | src_encoding stringclasses 25 values | language stringclasses 1 value | is_vendor bool 2 classes | is_generated bool 2 classes | length_bytes int64 128 12.7k | extension stringclasses 142 values | content stringlengths 128 8.19k | authors listlengths 1 1 | author_id stringlengths 1 132 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
90d70b3dd8d16e89f6c65db6ae6156717e7b9391 | b6b30fb06124883b074144c419b43d9182efcdff | /Time Series/multi_headed_multivariate_mlp.py | 4f65ad9be0b18a8c59829d143aa01a4171db2e67 | [] | no_license | JohnnySunkel/BlueSky | da9f5107034289bfbdd3ba40458f9b9bd8d01a13 | 5a20eba9ef7509a5a7b7af86e7be848242e1a72f | refs/heads/master | 2021-07-07T09:57:37.256950 | 2020-09-02T23:06:46 | 2020-09-02T23:06:46 | 166,883,639 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,063 | py | # Multi-headed multivariate MLP
from numpy import array, hstack
from keras.models import Model
from keras.layers import Input, Dense
from keras.layers.merge import concatenate
# Split a multivariate time series into samples
def split_sequences(sequences, n_steps):
X, y = list(), list()
for i in range(len(sequences)):
# Find the end of this pattern
end_ix = i + n_steps
# Check if we are beyond the dataset
if end_ix > len(sequences) - 1:
break
# Gather input and output parts of the pattern
seq_x, seq_y = sequences[i: end_ix, :-1], sequences[end_ix, -1]
X.append(seq_x)
y.append(seq_y)
return array(X), array(y)
# Define input sequences
in_seq1 = array([10, 20, 30, 40, 50, 60, 70, 80, 90])
in_seq2 = array([15, 25, 35, 45, 55, 65, 75, 85, 95])
out_seq = array([in_seq1[i] + in_seq2[i] for i in range(len(in_seq1))])
# Reshape the data to [rows, columns]
in_seq1 = in_seq1.reshape((len(in_seq1), 1))
in_seq2 = in_seq2.reshape((len(in_seq2), 1))
out_seq = out_seq.reshape((len(out_seq), 1))
# Stack the columns horizontally
dataset = hstack((in_seq1, in_seq2, out_seq))
# Choose the number of time steps
n_steps = 3
# Convert into input/output
X, y = split_sequences(dataset, n_steps)
# Separate input data
X1 = X[:, :, 0]
X2 = X[:, :, 1]
# First input model
visible1 = Input(shape = (n_steps, ))
dense1 = Dense(100, activation = 'relu')(visible1)
# Second input model
visible2 = Input(shape = (n_steps, ))
dense2 = Dense(100, activation = 'relu')(visible2)
# Merge input models
merge = concatenate([dense1, dense2])
output = Dense(1)(merge)
# Connect input and output models
model = Model(inputs = [visible1, visible2], outputs = output)
model.compile(optimizer = 'adam', loss = 'mse')
# Fit the model
model.fit([X1, X2], y, epochs = 2000, verbose = 0)
# Make predictions
x_input = array([[70, 75], [80, 85], [90, 95]])
x1 = x_input[:, 0].reshape((1, n_steps))
x2 = x_input[:, 1].reshape((1, n_steps))
y_hat = model.predict([x1, x2], verbose = 0)
print(y_hat)
| [
"noreply@github.com"
] | JohnnySunkel.noreply@github.com |
ea2da028c601946bffd0f087cf1eb7e74cd23590 | e23a4f57ce5474d468258e5e63b9e23fb6011188 | /115_testing/examples/Unit Testing with Python/2-unit-exercise-files/demos/after/telemetry.py | 00fac72ab8ea49da2cf6e6458bbf863227ae1301 | [] | no_license | syurskyi/Python_Topics | 52851ecce000cb751a3b986408efe32f0b4c0835 | be331826b490b73f0a176e6abed86ef68ff2dd2b | refs/heads/master | 2023-06-08T19:29:16.214395 | 2023-05-29T17:09:11 | 2023-05-29T17:09:11 | 220,583,118 | 3 | 2 | null | 2023-02-16T03:08:10 | 2019-11-09T02:58:47 | Python | UTF-8 | Python | false | false | 3,031 | py | DiagnosticChannelConnectionString = "*111#"
class TelemetryDiagnosticControls:
def __init__(self, telemetry_client=None):
self.telemetry_client = telemetry_client or TelemetryClient()
self.diagnostic_info = ""
def check_transmission(self):
telemetry_client = self.reconnect(DiagnosticChannelConnectionString)
self.diagnostic_info = ""
self.diagnostic_info = self.fetch_diagnostic_info(telemetry_client)
def reconnect(self, address):
self.telemetry_client.disconnect()
retryLeft = 3
while ((not self.telemetry_client.online_status) and retryLeft > 0):
self.telemetry_client.connect(address)
retryLeft -= 1
if not self.telemetry_client.online_status:
raise Exception("Unable to connect.")
return self.telemetry_client
def fetch_diagnostic_info(self, connected_client):
connected_client.send(TelemetryClient.DIAGNOSTIC_MESSAGE)
if not self.telemetry_client.online_status:
raise Exception("Unable to connect.")
return connected_client.receive()
class TelemetryClient(object):
DIAGNOSTIC_MESSAGE = "AT#UD"
def __init__(self):
self.online_status = False
self._diagnostic_message_result = ""
def connect(self, telemetry_server_connection_string):
if not telemetry_server_connection_string:
raise Exception()
# simulate the operation on a real modem
success = random.randint(0, 10) <= 8
self.online_status = success
def disconnect(self):
self.online_status = False
def send(self, message):
if not message:
raise Exception()
if message == TelemetryClient.DIAGNOSTIC_MESSAGE:
# simulate a status report
self._diagnostic_message_result = """\
LAST TX rate................ 100 MBPS\r\n
HIGHEST TX rate............. 100 MBPS\r\n
LAST RX rate................ 100 MBPS\r\n
HIGHEST RX rate............. 100 MBPS\r\n
BIT RATE.................... 100000000\r\n
WORD LEN.................... 16\r\n
WORD/FRAME.................. 511\r\n
BITS/FRAME.................. 8192\r\n
MODULATION TYPE............. PCM/FM\r\n
TX Digital Los.............. 0.75\r\n
RX Digital Los.............. 0.10\r\n
BEP Test.................... -5\r\n
Local Rtrn Count............ 00\r\n
Remote Rtrn Count........... 00"""
return
# here should go the real Send operation (not needed for this exercise)
def receive(self):
if not self._diagnostic_message_result:
# simulate a received message (just for illustration - not needed for this exercise)
message = ""
messageLength = random.randint(0, 50) + 60
i = messageLength
while(i >= 0):
message += chr((random.randint(0, 40) + 86))
i -= 1
else:
message = self._diagnostic_message_result
self._diagnostic_message_result = ""
return message
| [
"sergejyurskyj@yahoo.com"
] | sergejyurskyj@yahoo.com |
d92a522de4d6c6b8d5fc268e8e8c0dc7741b7399 | 887d5bdb9071ff5d1b1ef836a9638c790b43a377 | /tests.py | 289d3c1de37aafce37d16318d8cb3526b04d6682 | [
"MIT"
] | permissive | contraslash/blogs_engine-django | a1a01af49ceb676c6ecec75d9b376916bc3e36c6 | b353da29a6302c2a7d8b68d12ae30a18c9c1c405 | refs/heads/master | 2020-04-23T17:45:16.697388 | 2016-12-22T03:53:07 | 2016-12-22T03:53:07 | 171,343,256 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 151 | py | from django.test import TestCase
from django.views import generic
# Create your tests here.
class Asd(generic.CreateView):
template_name = "blo"
| [
"ma0@contraslash.com"
] | ma0@contraslash.com |
3b1018487b9446736ec9e425f983dfe31a1d82a0 | 1d2bbeda56f8fede69cd9ebde6f5f2b8a50d4a41 | /easy/python3/c0108_441_arranging-coins/00_leetcode_0108.py | a91b4229836ec589303b2d609b254240c1b7bc4a | [] | no_license | drunkwater/leetcode | 38b8e477eade68250d0bc8b2317542aa62431e03 | 8cc4a07763e71efbaedb523015f0c1eff2927f60 | refs/heads/master | 2020-04-06T07:09:43.798498 | 2018-06-20T02:06:40 | 2018-06-20T02:06:40 | 127,843,545 | 0 | 2 | null | null | null | null | UTF-8 | Python | false | false | 865 | py | # DRUNKWATER TEMPLATE(add description and prototypes)
# Question Title and Description on leetcode.com
# Function Declaration and Function Prototypes on leetcode.com
#441. Arranging Coins
#You have a total of n coins that you want to form in a staircase shape, where every k-th row must have exactly k coins.
#Given n, find the total number of full staircase rows that can be formed.
#n is a non-negative integer and fits within the range of a 32-bit signed integer.
#Example 1:
#n = 5
#The coins can form the following rows:
#¤
#¤ ¤
#¤ ¤
#Because the 3rd row is incomplete, we return 2.
#Example 2:
#n = 8
#The coins can form the following rows:
#¤
#¤ ¤
#¤ ¤ ¤
#¤ ¤
#Because the 4th row is incomplete, we return 3.
#class Solution:
# def arrangeCoins(self, n):
# """
# :type n: int
# :rtype: int
# """
# Time Is Money | [
"Church.Zhong@audiocodes.com"
] | Church.Zhong@audiocodes.com |
5857d92b11a30c222fbbda0802a5c07c34474583 | ca7aa979e7059467e158830b76673f5b77a0f5a3 | /Python_codes/p03166/s381331424.py | 66f1fe1fea5bf2c543493316f5693b3ab94d4383 | [] | no_license | Aasthaengg/IBMdataset | 7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901 | f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8 | refs/heads/main | 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 436 | py | import sys
sys.setrecursionlimit(10**6)
n, m = map(int, input().split())
G = [[] for _ in range(n)]
for i in range(m):
x, y = map(int, input().split())
x, y = x-1, y-1
G[x].append(y)
memo = [-1]*n
def dp(v):
if memo[v] != -1:
return memo[v]
res = 0
for nv in G[v]:
res = max(res, dp(nv) + 1)
memo[v] = res
return res
ans = 0
for v in range(n):
ans = max(ans, dp(v))
print(ans) | [
"66529651+Aastha2104@users.noreply.github.com"
] | 66529651+Aastha2104@users.noreply.github.com |
18c4de15db1d8c61e9d55a61cc302d60d137d358 | 3110707acf34609a40eceadaf7364a097f1486be | /pyasdf/tests/test_yaml.py | d3880278cfe9b622ec0b0c7975f84cd409d1c075 | [] | no_license | ejeschke/pyasdf | 79f1baa6c87c98bdb433859c8a882d74c944f45c | eda323a8a2c20ad66d64cda0cfc66cabb5a27e3c | refs/heads/master | 2021-01-18T17:59:23.208230 | 2014-07-30T17:40:48 | 2014-07-30T17:40:48 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,384 | py | # Licensed under a 3-clause BSD style license - see LICENSE.rst
# -*- coding: utf-8 -*-
from __future__ import absolute_import, division, unicode_literals, print_function
import io
import numpy as np
from astropy.extern import six
from astropy import units as u
from astropy.utils.compat.odict import OrderedDict
from astropy.tests.helper import pytest
import yaml
from .. import asdf
from .. import tagged
from .. import treeutil
from . import helpers
def test_ordered_dict(tmpdir):
# Test that we can write out and read in ordered dicts.
tree = {
"ordered_dict": OrderedDict(
[('first', 'foo'),
('second', 'bar'),
('third', 'baz')]),
"unordered_dict": {
'first': 'foo',
'second': 'bar',
'third': 'baz'
}
}
def check_asdf(asdf):
tree = asdf.tree
assert isinstance(tree['ordered_dict'], OrderedDict)
assert list(tree['ordered_dict'].keys()) == ['first', 'second', 'third']
assert not isinstance(tree['unordered_dict'], OrderedDict)
assert isinstance(tree['unordered_dict'], dict)
def check_raw_yaml(content):
assert b'OrderedDict' not in content
helpers.assert_roundtrip_tree(tree, tmpdir, check_asdf, check_raw_yaml)
def test_unicode_write(tmpdir):
# We want to write unicode out as regular utf-8-encoded
# characters, not as escape sequences
tree = {
"ɐʇɐp‾ǝpoɔıun": 42,
"ascii_only": "this is ascii"
}
def check_asdf(asdf):
assert "ɐʇɐp‾ǝpoɔıun" in asdf.tree
assert isinstance(asdf.tree['ascii_only'], six.text_type)
def check_raw_yaml(content):
# Ensure that unicode is written out as UTF-8 without escape
# sequences
assert "ɐʇɐp‾ǝpoɔıun".encode('utf-8') in content
# Ensure that the unicode "tag" is not used
assert b"unicode" not in content
helpers.assert_roundtrip_tree(tree, tmpdir, check_asdf, check_raw_yaml)
def test_arbitrary_python_object():
# Putting "just any old" Python object in the tree should raise an
# exception.
class Foo(object):
pass
tree = {'object': Foo()}
buff = io.BytesIO()
ff = asdf.AsdfFile(tree)
with pytest.raises(yaml.YAMLError):
ff.write_to(buff)
def test_python_tuple(tmpdir):
# We don't want to store tuples as tuples, because that's not a
# built-in YAML data type. This test ensures that they are
# converted to lists.
tree = {
"val": (1, 2, 3)
}
def check_asdf(asdf):
assert isinstance(asdf.tree['val'], list)
def check_raw_yaml(content):
assert b'tuple' not in content
helpers.assert_roundtrip_tree(tree, tmpdir, check_asdf, check_raw_yaml)
def test_tags_removed_after_load(tmpdir):
tree = {
"foo": ["bar", (1, 2, None)]
}
def check_asdf(asdf):
def assert_untagged(node):
if node != asdf.tree:
assert not isinstance(node, tagged.Tagged)
treeutil.walk(asdf.tree, assert_untagged)
helpers.assert_roundtrip_tree(tree, tmpdir, check_asdf)
def test_explicit_tags():
yaml = """%ASDF 0.1.0
%YAML 1.1
--- !<tag:stsci.edu:asdf/0.1.0/core/asdf>
unit: !<tag:stsci.edu:asdf/0.1.0/unit/unit> m
...
"""
# Check that fully-qualified explicit tags work
buff = helpers.yaml_to_asdf(yaml, yaml_headers=False)
ff = asdf.AsdfFile.read(buff)
assert isinstance(ff.tree['unit'], u.UnitBase)
def test_yaml_internal_reference(tmpdir):
# Test that YAML internal references (anchors and aliases) work,
# as well as recursive data structures.
d = {
'foo': '2',
}
d['bar'] = d
l = []
l.append(l)
tree = {
'first': d,
'second': d,
'list': l
}
def check_yaml(content):
assert b'list:--&id002-*id002' in b''.join(content.split())
helpers.assert_roundtrip_tree(tree, tmpdir, raw_yaml_check_func=check_yaml)
def test_yaml_nan_inf():
tree = {
'a': np.nan,
'b': np.inf,
'c': -np.inf
}
buff = io.BytesIO()
ff = asdf.AsdfFile(tree).write_to(buff)
buff.seek(0)
ff = asdf.AsdfFile().read(buff)
assert np.isnan(ff.tree['a'])
assert np.isinf(ff.tree['b'])
assert np.isinf(ff.tree['c'])
| [
"mdboom@gmail.com"
] | mdboom@gmail.com |
20c9b51d41cdf40cd15823324d259e09f48949eb | fbbe424559f64e9a94116a07eaaa555a01b0a7bb | /Sklearn_scipy_numpy/source/sklearn/feature_selection/tests/test_chi2.py | baaa4907bad5e9c99a5c827aaf6328c145c57de5 | [
"MIT"
] | permissive | ryfeus/lambda-packs | 6544adb4dec19b8e71d75c24d8ed789b785b0369 | cabf6e4f1970dc14302f87414f170de19944bac2 | refs/heads/master | 2022-12-07T16:18:52.475504 | 2022-11-29T13:35:35 | 2022-11-29T13:35:35 | 71,386,735 | 1,283 | 263 | MIT | 2022-11-26T05:02:14 | 2016-10-19T18:22:39 | Python | UTF-8 | Python | false | false | 2,398 | py | """
Tests for chi2, currently the only feature selection function designed
specifically to work with sparse matrices.
"""
import numpy as np
from scipy.sparse import coo_matrix, csr_matrix
import scipy.stats
from sklearn.feature_selection import SelectKBest, chi2
from sklearn.feature_selection.univariate_selection import _chisquare
from nose.tools import assert_raises
from numpy.testing import assert_equal, assert_array_almost_equal
# Feature 0 is highly informative for class 1;
# feature 1 is the same everywhere;
# feature 2 is a bit informative for class 2.
X = [[2, 1, 2],
[9, 1, 1],
[6, 1, 2],
[0, 1, 2]]
y = [0, 1, 2, 2]
def mkchi2(k):
"""Make k-best chi2 selector"""
return SelectKBest(chi2, k=k)
def test_chi2():
# Test Chi2 feature extraction
chi2 = mkchi2(k=1).fit(X, y)
chi2 = mkchi2(k=1).fit(X, y)
assert_equal(chi2.get_support(indices=True), [0])
assert_equal(chi2.transform(X), np.array(X)[:, [0]])
chi2 = mkchi2(k=2).fit(X, y)
assert_equal(sorted(chi2.get_support(indices=True)), [0, 2])
Xsp = csr_matrix(X, dtype=np.float)
chi2 = mkchi2(k=2).fit(Xsp, y)
assert_equal(sorted(chi2.get_support(indices=True)), [0, 2])
Xtrans = chi2.transform(Xsp)
assert_equal(Xtrans.shape, [Xsp.shape[0], 2])
# == doesn't work on scipy.sparse matrices
Xtrans = Xtrans.toarray()
Xtrans2 = mkchi2(k=2).fit_transform(Xsp, y).toarray()
assert_equal(Xtrans, Xtrans2)
def test_chi2_coo():
# Check that chi2 works with a COO matrix
# (as returned by CountVectorizer, DictVectorizer)
Xcoo = coo_matrix(X)
mkchi2(k=2).fit_transform(Xcoo, y)
# if we got here without an exception, we're safe
def test_chi2_negative():
# Check for proper error on negative numbers in the input X.
X, y = [[0, 1], [-1e-20, 1]], [0, 1]
for X in (X, np.array(X), csr_matrix(X)):
assert_raises(ValueError, chi2, X, y)
def test_chisquare():
# Test replacement for scipy.stats.chisquare against the original.
obs = np.array([[2., 2.],
[1., 1.]])
exp = np.array([[1.5, 1.5],
[1.5, 1.5]])
# call SciPy first because our version overwrites obs
chi_scp, p_scp = scipy.stats.chisquare(obs, exp)
chi_our, p_our = _chisquare(obs, exp)
assert_array_almost_equal(chi_scp, chi_our)
assert_array_almost_equal(p_scp, p_our)
| [
"master@MacBook-Pro-admin.local"
] | master@MacBook-Pro-admin.local |
efdce2940b1fe937faf087137eb9049b322a08fd | ffbfb86db9dac89c1cc24e648b199a8d3db9850f | /python/python_pingSweep_processing.py | 4b75cc8f862a353a47909c5c0aa12cab1ff14abd | [] | no_license | attikis/programming | e7ecef5c2bf2af71a3e89e6156a4a934fb2ed724 | 4b6215021d6ca1effd0f18ecfe8afc67056b7098 | refs/heads/master | 2021-05-09T21:51:40.425627 | 2019-02-19T07:22:56 | 2019-02-19T07:22:56 | 118,735,207 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,436 | py | #!/usr/bin/env python
# Script docstrings
'''
Usage:
./fileName.py
Permissions:
chmod +x fileName.py
Description:
This is and example that uses the processing module to make a subnet discovery script. It is almost identical to the threading script
python_pingSweep_threading.py, but relies on processing instead of threading.
'''
# Import my own modules here
import python_myFunctions as myFunctions
mf = myFunctions.CreateObject()
# All other required modules here
from processing import Process, Queue, Pool
import time
import subprocess
from IPy import IP #sudo easy_install IPy
import sys
# Declarations here
queue = Queue()
# Create an instance of an IP object. If no size specification is given a size of 1 address (/32 for IPv4 and /128 for IPv6) is assumed.
#ipAddresses = IP("10.0.1.0/24") #generates 10.0.1.0 -> 10.0.1.255
ipAddresses = ["172.20.43.175", "194.42.7.189", "194.42.7.57", "127.0.0.1"]
nProcesses = 2
def f(iProcess, queue):
# Create an infinite loop!
while True:
# Place a conditional statement for exiting the loop
if queue.empty():
mf.Cout("Queue for Process #%s is empty. Exiting python shell." % (iProcess))
#print __doc__
sys.exit(1)
mf.Cout("Process #%s started." % (iProcess))
# Get an item from the queue in order to ping it (i.e. get an ip address)
ip = queue.get()
cmd = "ping -c 2 %s" % (ip)
retVal = subprocess.call(cmd, shell = True, stdout=open("/dev/null", "w"), stderr = subprocess.STDOUT)
# Check return value; if else than zero inform user
if retVal == 0:
mf.Cout("Process #%s is alive." % (iProcess))
else:
mf.Cout("Process #%s is not responding for IP Address %s." % (iProcess, ip))
if __name__ == "__main__":
mf.StopWatchStart()
# Loop over all IP addresses
for ip in ipAddresses:
# Put an item into the queue
queue.put(ip)
# Loop over a given number of processes;
for iProcess in range(nProcesses):
# Create process
p = Process(target = f, args=[iProcess, queue])
# Start process
p.start()
mf.Cout("Main process joins on queue.")
# Join procees to on queue so that all processes are gotten and processed before exiting the program.
p.join()
mf.Cout("Main program finished.")
#timer.sleep(5)
mf.StopWatchStop()
| [
"attikis@cern.ch"
] | attikis@cern.ch |
4b6529761b99b2168fa4f88eb8712c1b4c7d82f3 | de24f83a5e3768a2638ebcf13cbe717e75740168 | /moodledata/vpl_data/169/usersdata/353/81527/submittedfiles/divisores.py | 2cc82eb011ec538b680e812a79745e972dd28fc2 | [] | no_license | rafaelperazzo/programacao-web | 95643423a35c44613b0f64bed05bd34780fe2436 | 170dd5440afb9ee68a973f3de13a99aa4c735d79 | refs/heads/master | 2021-01-12T14:06:25.773146 | 2017-12-22T16:05:45 | 2017-12-22T16:05:45 | 69,566,344 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 276 | py | # -*- coding: utf-8 -*-
n =int(input())
a =int(input())
b =int(input())
i = 1
while n >= 0:
if i%a == 0:
print(i)
n = n - 1
if i%b == 0:
print(i)
n = n - 1
i = i + 1
import math
| [
"rafael.mota@ufca.edu.br"
] | rafael.mota@ufca.edu.br |
b62819c394cf8f65af2b9fbaed69dd49c3d24c2c | 5af277b5819d74e61374d1d78c303ac93c831cf5 | /tcc/evaluation/event_completion.py | 370878db3a06ee8e4ef4a08821c9a765b1e54613 | [
"Apache-2.0"
] | permissive | Ayoob7/google-research | a2d215afb31513bd59bc989e09f54667fe45704e | 727ec399ad17b4dd1f71ce69a26fc3b0371d9fa7 | refs/heads/master | 2022-11-11T03:10:53.216693 | 2020-06-26T17:13:45 | 2020-06-26T17:13:45 | 275,205,856 | 2 | 0 | Apache-2.0 | 2020-06-26T16:58:19 | 2020-06-26T16:58:18 | null | UTF-8 | Python | false | false | 6,956 | py | # coding=utf-8
# Copyright 2020 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
r"""Evaluation on detecting key events using a RNN.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from absl import flags
from absl import logging
import concurrent.futures as cf
import numpy as np
import sklearn
import tensorflow.compat.v2 as tf
from tcc.config import CONFIG
from tcc.dataset_splits import DATASET_TO_NUM_CLASSES
from tcc.evaluation.task import Task
from tcc.evaluation.task_utils import get_targets_from_labels
from tcc.evaluation.task_utils import unnormalize
FLAGS = flags.FLAGS
layers = tf.keras.layers
class VectorRegression(sklearn.base.BaseEstimator):
"""Class to perform regression on multiple outputs."""
def __init__(self, estimator):
self.estimator = estimator
def fit(self, x, y):
_, m = y.shape
# Fit a separate regressor for each column of y
self.estimators_ = [sklearn.base.clone(self.estimator).fit(x, y[:, i])
for i in range(m)]
return self
def predict(self, x):
# Join regressors' predictions
res = [est.predict(x)[:, np.newaxis] for est in self.estimators_]
return np.hstack(res)
def score(self, x, y):
# Join regressors' scores
res = [est.score(x, y[:, i]) for i, est in enumerate(self.estimators_)]
return np.mean(res)
def get_error(predictions, labels, seq_lens, global_step, num_classes, prefix):
"""Get error based on predictions."""
errs = []
for i in range(num_classes - 1):
abs_err = 0
for j in range(len(predictions)):
# Choose last seq_len steps as our preprocessing pads sequences in
# front with zeros.
unnorm_preds = unnormalize(predictions[j][:, i])
unnorm_labels = unnormalize(labels[j][:, i])
abs_err += abs(unnorm_labels - unnorm_preds) / seq_lens[j]
err = abs_err / len(predictions)
logging.info('[Global step: {}] {} {} Fraction Error: '
'{:.3f},'.format(global_step.numpy(), prefix, i, err))
tf.summary.scalar('event_completion/%s_%d_error' % (prefix, i),
err, step=global_step)
errs.append(err)
avg_err = np.mean(errs)
logging.info('[Global step: {}] {} Fraction Error: '
'{:.3f},'.format(global_step.numpy(), prefix, avg_err))
tf.summary.scalar('event_completion/avg_error_%s' % prefix,
avg_err, step=global_step)
return avg_err
def fit_model(train_embs, train_labels, val_embs, val_labels,
global_step, num_classes, prefix, report_error=False):
"""Linear Regression to regress to fraction completed."""
train_seq_lens = [len(x) for x in train_labels]
val_seq_lens = [len(x) for x in val_labels]
train_embs = np.concatenate(train_embs, axis=0)
train_labels = np.concatenate(train_labels, axis=0)
val_embs = np.concatenate(val_embs, axis=0)
val_labels = np.concatenate(val_labels, axis=0)
lin_model = VectorRegression(sklearn.linear_model.LinearRegression())
lin_model.fit(train_embs, train_labels)
train_score = lin_model.score(train_embs, train_labels)
val_score = lin_model.score(val_embs, val_labels)
# Not used for evaluation right now.
if report_error:
val_predictions = lin_model.predict(val_embs)
train_predictions = lin_model.predict(train_embs)
train_labels = np.array_split(train_labels,
np.cumsum(train_seq_lens))[:-1]
train_predictions = np.array_split(train_predictions,
np.cumsum(train_seq_lens))[:-1]
val_labels = np.array_split(val_labels,
np.cumsum(val_seq_lens))[:-1]
val_predictions = np.array_split(val_predictions,
np.cumsum(val_seq_lens))[:-1]
get_error(train_predictions, train_labels, train_seq_lens,
global_step, num_classes, 'train_' + prefix)
get_error(val_predictions, val_labels, val_seq_lens,
global_step, num_classes, 'val_' + prefix)
return train_score, val_score
class EventCompletion(Task):
"""Predict event completion using linear regression."""
def __init__(self):
super(EventCompletion, self).__init__(downstream_task=True)
def evaluate_embeddings(self, algo, global_step, datasets):
"""Labeled evaluation."""
fractions = CONFIG.EVAL.CLASSIFICATION_FRACTIONS
train_embs = datasets['train_dataset']['embs']
val_embs = datasets['val_dataset']['embs']
num_classes = DATASET_TO_NUM_CLASSES[datasets['name']]
if not train_embs or not val_embs:
logging.warn('All embeddings are NAN. Something is wrong with model.')
return 1.0
val_labels = get_targets_from_labels(datasets['val_dataset']['labels'],
num_classes)
num_samples = len(datasets['train_dataset']['embs'])
def worker(fraction_used):
num_samples_used = max(1, int(fraction_used * num_samples))
train_embs = datasets['train_dataset']['embs'][:num_samples_used]
train_labels = get_targets_from_labels(
datasets['train_dataset']['labels'][:num_samples_used], num_classes)
return fit_model(train_embs, train_labels, val_embs, val_labels,
global_step, num_classes, '%s_%s' % (datasets['name'],
str(fraction_used)))
val_scores = []
with cf.ThreadPoolExecutor(max_workers=len(fractions)) as executor:
results = executor.map(worker, fractions)
for (fraction, (train_score, val_score)) in zip(fractions, results):
prefix = '%s_%s' % (datasets['name'], str(fraction))
logging.info('[Global step: {}] Event Completion {} Fraction Train '
'Score: {:.3f},'.format(global_step.numpy(), prefix,
train_score))
logging.info('[Global step: {}] Event Completion {} Fraction Val '
'Score: {:.3f},'.format(global_step.numpy(), prefix,
val_score))
tf.summary.scalar('event_completion/train_%s_score' % prefix,
train_score, step=global_step)
tf.summary.scalar('event_completion/val_%s_score' % prefix,
val_score, step=global_step)
val_scores.append(val_score)
return val_scores[-1]
| [
"copybara-worker@google.com"
] | copybara-worker@google.com |
6b28777fed9961433778169cd0d61533f9754f15 | ca7aa979e7059467e158830b76673f5b77a0f5a3 | /Python_codes/p03072/s761698339.py | 37198675e9186e088b0b84fdd2bcd6035e9f1d57 | [] | no_license | Aasthaengg/IBMdataset | 7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901 | f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8 | refs/heads/main | 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 206 | py | n = int(input())
h = list(map(int,input().split()))
ans = 1
max_h = 0
for i in range(1,n):
if h[0] <= h[i] and max_h <= h[i] and h[i] >= h[i-1]:
ans += 1
max_h = max(h[i],max_h)
print(ans) | [
"66529651+Aastha2104@users.noreply.github.com"
] | 66529651+Aastha2104@users.noreply.github.com |
ba1e2d51b0bc7b1812a9b7129d3b5d6e4cbda31a | 5b4b1866571453f78db5b06a08ff0eda17b91b04 | /test/vanilla/Expected/AcceptanceTests/UrlMultiCollectionFormat/urlmulticollectionformat/aio/__init__.py | e8ac47b28c47913b3ea7375ba2b57eb7a802e649 | [
"MIT",
"LicenseRef-scancode-generic-cla"
] | permissive | koek67/autorest.azure-functions-python | ba345f1d194ca7431daab1210a0cd801d4946991 | b0896d8aec6b0fd6f0bcb12ea8e0489652dc2783 | refs/heads/main | 2022-12-20T13:27:56.405901 | 2020-09-30T08:23:11 | 2020-09-30T08:23:11 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 641 | py | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from ._auto_rest_url_mutli_collection_format_test_service_async import AutoRestUrlMutliCollectionFormatTestService
__all__ = ['AutoRestUrlMutliCollectionFormatTestService']
| [
"varad.meru@gmail.com"
] | varad.meru@gmail.com |
1c821b7951c61add7d55ff86abfbedc2065aac7f | 50dd2a43daa8316fc11e0c176b5872738fcc5dde | /Learning/044_Indexeur_texte/pyindex.py | 7a681e95191598a65fa065cecc0b4fa7dbb49ae1 | [] | no_license | FrenchBear/Python | 58204d368e3e72071eef298ff00d06ff51bd7914 | b41ab4b6a59ee9e145ef2cd887a5fe306973962b | refs/heads/master | 2023-08-31T18:43:37.792427 | 2023-08-26T15:53:20 | 2023-08-26T15:53:20 | 124,466,047 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,927 | py | # Indexeur texte
# Variations sur le thème du ti et de l'indexation de texte
#
# 2018-08-14 PV
# 2018-09-01 PV Tried collections.Counter, replaced my own top function by itertools.islice
import collections
import functools
import itertools
import locale
import re
import unicodedata
import time
from typing import DefaultDict
# print(locale.getlocale(locale.LC_ALL)) # (None, None)
locale.setlocale(locale.LC_ALL, "en_US.UTF-8")
# print(locale.getlocale(locale.LC_ALL)) # ('en_US', 'UTF-8')
"""
l = ['a', 'à', 'A', 'Â', 'b', 'c', 'ç', 'C', 'Ç', 'd', 'boeuf', 'bœuf', 'boev']
l.sort(key=functools.cmp_to_key(locale.strcoll))
print(l)
"""
"""
# Dictionary filtering
d = {'trois':3, 'quatre':4, 'cinq':5, 'six':6}
print(list(filter(lambda key: d[key]>=5, d)))
e = {key: value for (key, value) in d.items() if value>=5}
print(e)
"""
class french_text_canonizer:
# This list can be customized to fit specific needs
# expansion_list = [('œ', 'oe'), ('Œ', 'OE'), ('æ', 'ae'), ('Æ', 'AE'),
# ("‘", "'"), ("’", "'"), ("‚", "'"), ("‛", "'"), ('‹', "'"), ('›', "'"),
# ('“', '"'), ('”', '"'), ('„', '"'), ('‟', '"'), ('«', '"'), ('»', '"'),
# ('ø', 'o\N{COMBINING SHORT SOLIDUS OVERLAY}'), ('Ø', 'O\N{COMBINING LONG SOLIDUS OVERLAY}'),
# ('‽', '?!'), # ('¿', '?'), ('¡', '!'),
# ("\N{NO-BREAK SPACE}", ' ')
# ]
# For this context
# expansion_list = [('œ', 'oe'), ("’", "'")]
expansion_list = [("œ", "oe")]
def __init__(self, case_significant: bool, accent_significant: bool):
self.case_significant = case_significant
self.accent_significant = accent_significant
# def canonize_ci_ai(self, s: str) -> str:
# return ''.join(c for c in list(unicodedata.normalize("NFKD", s.replace('œ', 'oe').replace("’", "'").upper())) if unicodedata.category(c) != 'Mn')
# def canonize_ci_as(self, s: str) -> str:
# return unicodedata.normalize("NFKD", s.replace('œ', 'oe').replace("’", "'").upper())
# Previously:
# def canonize(self, s: str) -> str:
# __call__=canonize
# Default method for objets of the class
def __call__(self, s: str) -> str:
# First some expansions not managed by normalization
for s1, s2 in french_text_canonizer.expansion_list:
if s1 in s:
s = s.replace(s1, s2)
# Enable expansion of ij IJ ff fi fl ffi ffl ſt st ‼ and others
s = unicodedata.normalize("NFKD", s)
if not self.case_significant:
s = s.upper()
if not self.accent_significant:
s = "".join(c for c in list(s) if unicodedata.category(c) != "Mn")
return s # unicodedata.normalize("NFC", s)
# Create all combinations for tests
fr_ci_ai = french_text_canonizer(False, False)
fr_ci_as = french_text_canonizer(False, True)
fr_cs_ai = french_text_canonizer(True, False)
fr_cs_as = french_text_canonizer(True, True)
# s = "Où ça? Écoute ‘ton’ cœur‼ «Dijsktra» Søråñ “ÆØRÅÑ”"
# print(s)
# print(fr_ci_ai.canonize(s))
# print(fr_ci_as.canonize(s))
# print(fr_cs_ai.canonize(s))
# print(fr_cs_as.canonize(s))
WORD_RE = re.compile(r"[\w]+")
WORD_QUOTE_RE = re.compile(r"[\w’]+")
DIGITS_ONLY_RE = re.compile(r"\d+")
# Tried to use a collections.Counter() instead of a collections.defaultdict(int)
# but performance is very bas (3.4s execution instead of 1.8s)
# Probable cause: collections.Counter() only support update(iterable) updte
# and has no single Add(item) method, so there is too much overhead building an
# iterable from a single element just to add one item to a counter.
class forms_locations:
def __init__(self):
self.forms: DefaultDict[str, int] = collections.defaultdict(int)
# self.forms: collections.Counter() = collections.Counter()
self.locations: list[tuple[int, int]] = []
def __str__(self):
# return self.forms.most_common(1)[0][0]
m = -1
s = ""
for form, count in self.forms.items():
if count > m:
m = count
s = form
return s
def count(self):
return sum(c for c in self.forms.values())
def index_file(
file: str, wordre, canonize
) -> tuple[DefaultDict[str, forms_locations], int]:
index: DefaultDict[str, forms_locations] = collections.defaultdict(forms_locations)
words_count = 0
with open(file, "r", encoding="utf-8-sig") as fp:
for line_no, line in enumerate(fp, 1):
for match in wordre.finditer(line):
if not DIGITS_ONLY_RE.fullmatch(match.group()):
words_count += 1
# word = match.group().lower()
word = canonize(match.group())
column_no = match.start() + 1
location = (line_no, column_no)
index[word].forms[match.group()] += 1
# index[word].forms.update([match.group()])
index[word].locations.append(location)
return index, words_count
def test_indexer():
# file = "hp5.txt"
file = "sda2.txt"
ix1 = fr_ci_as # Duration: 1.165
def ix2(s):
return locale.strxfrm(
unicodedata.normalize("NFKD", s).upper()
) # Duration: 1.106
"""
locale sort keys a à âge agit...
fr_ci_as sorts keys a agit... à âge
"""
start = time.time()
index, words_count = index_file(file, WORD_RE, ix2)
print("Duration: %.3f" % (time.time() - start))
print(f"Words: {words_count}, Index size: {len(index)}")
# Reduce index to entries seen at least 10 times
ir = {key: value for (key, value) in index.items() if len(value.locations) >= 10}
print(f"Index réduit: {len(ir)}")
# Print reduced index in alphabetical order
nl = 0
print(
"Sorted by alphabetical order (word, frequencey) reduced index (frequency≥10)"
)
with open("analysis.txt", "w", encoding="utf-8") as fo:
for key in sorted(ir):
l = f"{ir[key]}\t{len(ir[key].locations)}\t{dict(ir[key].forms)}\n"
nl += 1
if nl < 150:
print(l, end="")
fo.write(l)
# Print top 20 by frequency descending
print("\nSorted by decreasing frequency (word, frequency) full index")
nw = 0
for word, count in sorted(
[(str(fl), fl.count()) for fl in index.values()],
key=lambda tup: tup[1],
reverse=True,
):
print(f"{word}\t{count}")
nw += 1
if nw > 20:
break
print("\nSorted be decreasing length (word, length) full index")
nw = 0
for word, count in sorted(
[(str(fl), len(str(fl))) for fl in index.values()],
key=lambda tup: tup[1],
reverse=True,
):
print(f"{word}\t{count}")
nw += 1
if nw > 20:
break
def search_quote(file: str):
# Recherche les formes avec apostrophes identiques à un mot sans apostrophe
# comme d'écrire/décrire, l'éviter/léviter, l'aide/laide, d'avantage/davantage, l'imiter/limiter...
index, _ = index_file(file, WORD_RE, fr_ci_as)
index_quote, _ = index_file(file, WORD_QUOTE_RE, fr_ci_as)
for word in [w for w in index_quote.keys() if "’" in w]:
if word.replace("’", "") in index.keys():
print(word)
def count_letters(file: str):
dic: DefaultDict[str, int] = collections.defaultdict(int)
with open(file, "r", encoding="utf-8-sig") as fp:
for line in fp:
for char in line:
dic[char] += 1
for letter, count in sorted(dic.items(), key=lambda tup: tup[1], reverse=True):
print(f"{letter}\t{count}")
# count_letters("sda.txt")
test_indexer()
| [
"FrenchBear38@outlook.com"
] | FrenchBear38@outlook.com |
57dfd7e7cab29371021bd1197463eab3e4999c77 | 1facfd9d94b0f08ddde2834c717bda55359c2e35 | /Python programming for the absolute beginner - Michael Dawson/Chapter 5 - List & Dictionaries/5.2 ver.1.py | e60614e158a741eac49dc0053be2b2dc3d212008 | [] | no_license | echpochmak/ppftab | 9160383c1d34a559b039af5cd1451a18f2584549 | f5747d87051d837eca431f782491ec9ba3b44626 | refs/heads/master | 2021-09-15T07:23:06.581750 | 2018-05-28T14:33:13 | 2018-05-28T14:33:13 | 261,880,781 | 1 | 0 | null | 2020-05-06T21:18:13 | 2020-05-06T21:18:13 | null | UTF-8 | Python | false | false | 2,935 | py | #Character creator
#This app will create a hero with 4 atrributes and 30 points to dispatch between them
print("Welcome in to the Character creator. ")
print("You have 30 points to spend. ")
print("What would you like to do: ")
print("\n 1. Change the value of the Strengh. \
\n 2. Change the value of the Wisdom. \
\n 3. Change the value of the Health. \
\n 4. Change the value of the Flexibility. \
\n 5. Exit. ")
points = 30
strengh = 0
wisdom = 0
health = 0
flex = 0
choice = input("\n\n What is your choice? ")
while points >= 0:
if choice == "1":
str_choice = input("Would you like to add or remove? ")
str_choice = str_choice.lower()
if str_choice == "add":
str_value = int(input("How many point would you like to add? "))
strengh += str_value
points -= str_value
elif str_choice == "remove":
str_value = int(input("How many point would you like to remove? "))
strengh -= str_value
points += str_value
choice = input("What is your next choice? ")
elif choice == "2":
wisdom_choice = input("Would you like to add or remove? ")
wisdom_choice = wisdom_choice.lower()
if wisdom_choice == "add":
wisdom_value = int(input("How many point would you like to add? "))
wisdom += wisdom_value
points -= wisdom_value
elif wisdom_choice == "remove":
wisdom_value = int(input("How many point would you like to remove? "))
wisdom -= wisdom_value
points += wisdom_value
choice = input("What is your next choice? ")
elif choice == "3":
health_choice = input("Would you like to add or remove? ")
health_choice = health_choice.lower()
if health_choice == "add":
health_value = int(input("How many point would you like to add? "))
health += health_value
points -= health_value
elif health_choice == "remove":
health_value = int(input("How many point would you like to remove? "))
wisdom -= health_value
points += health_value
choice = input("What is your next choice? ")
elif choice == "4":
flex_choice = input("Would you like to add or remove? ")
flex_choice = flex_choice.lower()
if flex_choice == "add":
flex_value = int(input("How many point would you like to add? "))
flex += flex_value
points -= flex_value
elif flex_choice == "remove":
flex_value = int(input("How many point would you like to remove? "))
flex -= flex_value
points += flex_value
choice = input("What is your next choice? ")
elif choice == "5":
break
else:
print("Wrong value!")
choice = input("What is your next choice? ")
| [
"mateuszszpakowski@wp.pl"
] | mateuszszpakowski@wp.pl |
c43594a8f1f8638b38d27e1b9a137350537de436 | 06f65e7b12be94c6210b358aef06bb51f02e9ded | /pyscreenshot/childproc.py | 75394ec6719159d814ede90ecb674bbca125ea7f | [
"BSD-2-Clause"
] | permissive | ponty/pyscreenshot | 17a5f91eddbf6fcf7568a15f959749e7a8c09a74 | c9f6051fe339b4b4c59ef75aa17140211e51606f | refs/heads/master | 2023-03-12T10:28:08.054142 | 2023-03-12T06:38:57 | 2023-03-12T06:38:57 | 1,316,096 | 473 | 93 | BSD-2-Clause | 2023-03-11T13:36:12 | 2011-02-01T13:02:51 | Python | UTF-8 | Python | false | false | 1,220 | py | import logging
import os
from tempfile import TemporaryDirectory
from pyscreenshot.err import FailedBackendError
from pyscreenshot.imcodec import codec
from pyscreenshot.util import run_mod_as_subproc
log = logging.getLogger(__name__)
def childprocess_backend_version(backend):
p = run_mod_as_subproc("pyscreenshot.cli.print_backend_version", [backend])
if p.return_code != 0:
log.warning(p)
raise FailedBackendError(p)
return p.stdout
def childprocess_grab(backend, bbox):
with TemporaryDirectory(prefix="pyscreenshot") as tmpdirname:
filename = os.path.join(tmpdirname, "screenshot.png")
cmd = ["--filename", filename]
if bbox:
x1, y1, x2, y2 = map(str, bbox)
bbox = ":".join(map(str, (x1, y1, x2, y2)))
cmd += ["--bbox", bbox]
if backend:
cmd += ["--backend", backend]
if log.isEnabledFor(logging.DEBUG):
cmd += ["--debug"]
p = run_mod_as_subproc("pyscreenshot.cli.grab", cmd)
if p.return_code != 0:
# log.debug(p)
raise FailedBackendError(p)
data = open(filename, "rb").read()
data = codec[1](data)
return data
| [
"ponty@home"
] | ponty@home |
062d0a5eb9fb03396cb52837c2162b7ac0cb4978 | e0df2bc703d0d02423ea68cf0b8c8f8d22d5c163 | /ScientificComputing/ch09/tvtk_contours.py | aac7ccf588a95a8b5792ef84e586bb10884c125b | [] | no_license | socrates77-sh/learn | a5d459cb9847ba3b1bc4f9284ce35d4207d8aa8b | ae50978023f6b098b168b8cca82fba263af444aa | refs/heads/master | 2022-12-16T16:53:50.231577 | 2019-07-13T13:52:42 | 2019-07-13T13:52:42 | 168,442,963 | 0 | 0 | null | 2022-12-08T05:18:37 | 2019-01-31T01:30:06 | HTML | UTF-8 | Python | false | false | 862 | py | # -*- coding: utf-8 -*-
from tvtk.api import tvtk
from utility import show_actors
from tvtk_cut_plane import read_data
if __name__ == "__main__":
plot3d = read_data()
contours = tvtk.ContourFilter(input=plot3d.output)
contours.generate_values(8, plot3d.output.point_data.scalars.range)
mapper = tvtk.PolyDataMapper(input=contours.output,
scalar_range=plot3d.output.point_data.scalars.range)
actor = tvtk.Actor(mapper=mapper)
actor.property.opacity = 0.3
# StructuredGrid网格的外框
outline = tvtk.StructuredGridOutlineFilter(input=plot3d.output)
outline_mapper = tvtk.PolyDataMapper(input=outline.output)
outline_actor = tvtk.Actor(mapper=outline_mapper)
outline_actor.property.color = 0.3, 0.3, 0.3
win, gui = show_actors([actor, outline_actor])
gui.start_event_loop()
| [
"zhwenrong@sina.com"
] | zhwenrong@sina.com |
23643c113416de2de8f60dc4e45b498e0a5ae736 | beaa8e9d6ec16c2ffe8a7d9f72fd6eea904083bb | /ttslearn/util.py | 8e1b4875816ed3a42f7e2682c5e42f2d77c9d161 | [
"MIT"
] | permissive | r9y9/ttslearn | 553f7a92c6160d4d379459bbfd5bf5924a4a4a70 | a970d4ee8aa1d9ce1603d8d3c06d5d67f26b639e | refs/heads/master | 2023-04-09T19:32:28.797819 | 2023-03-07T11:55:48 | 2023-03-07T11:55:48 | 378,789,439 | 220 | 43 | MIT | 2023-03-07T11:55:50 | 2021-06-21T02:54:03 | Jupyter Notebook | UTF-8 | Python | false | false | 7,646 | py | # Acknowledgements:
# mask-related functions were adapted from https://github.com/espnet/espnet
import importlib
import random
from functools import partial
from pathlib import Path
from typing import Any
import numpy as np
import pkg_resources
import torch
# see COPYING for the license of the audio file.
EXAMPLE_AUDIO = "_example_data/BASIC5000_0001.wav"
EXAMPLE_LABEL = "_example_data/BASIC5000_0001.lab"
EXAMPLE_MONO_LABEL = "_example_data/BASIC5000_0001_mono.lab"
EXAMPLE_QST = "_example_data/qst1.hed"
def init_seed(seed):
"""Initialize random seed.
Args:
seed (int): random seed
"""
random.seed(seed)
np.random.seed(seed)
torch.manual_seed(seed)
if torch.cuda.is_available():
torch.cuda.manual_seed_all(seed)
def dynamic_import(name: str) -> Any:
"""Dynamic import
Args:
name (str): module_name + ":" + class_name
Returns:
Any: class object
"""
mod_name, class_name = name.split(":")
mod = importlib.import_module(mod_name)
return getattr(mod, class_name)
def make_pad_mask(lengths, maxlen=None):
"""Make mask for padding frames
Args:
lengths (list): list of lengths
maxlen (int, optional): maximum length. If None, use max value of lengths.
Returns:
torch.ByteTensor: mask
"""
if not isinstance(lengths, list):
lengths = lengths.tolist()
bs = int(len(lengths))
if maxlen is None:
maxlen = int(max(lengths))
seq_range = torch.arange(0, maxlen, dtype=torch.int64)
seq_range_expand = seq_range.unsqueeze(0).expand(bs, maxlen)
seq_length_expand = seq_range_expand.new(lengths).unsqueeze(-1)
mask = seq_range_expand >= seq_length_expand
return mask
def make_non_pad_mask(lengths, maxlen=None):
"""Make mask for non-padding frames
Args:
lengths (list): list of lengths
maxlen (int, optional): maximum length. If None, use max value of lengths.
Returns:
torch.ByteTensor: mask
"""
return ~make_pad_mask(lengths, maxlen)
def example_audio_file() -> str:
"""Get the path to an included audio example file.
Examples
--------
.. plot::
import ttslearn
from scipy.io import wavfile
import matplotlib.pyplot as plt
sr, x = wavfile.read(ttslearn.util.example_audio_file())
fig, ax = plt.subplots(figsize=(8,2))
librosa.display.waveshow(x.astype(np.float32), sr, ax=ax)
"""
return pkg_resources.resource_filename(__name__, EXAMPLE_AUDIO)
def example_label_file(mono=False):
"""Get the path to an included label file.
Args:
mono (bool, optional): If True, return monophonic label file.
Default: False
Returns:
str: path to an example label file
"""
if mono:
return pkg_resources.resource_filename(__name__, EXAMPLE_MONO_LABEL)
return pkg_resources.resource_filename(__name__, EXAMPLE_LABEL)
def example_qst_file():
"""Get the path to an included question set file.
Returns:
str: path to an example question file.
"""
return pkg_resources.resource_filename(__name__, EXAMPLE_QST)
def pad_1d(x, max_len, constant_values=0):
"""Pad a 1d-tensor.
Args:
x (torch.Tensor): tensor to pad
max_len (int): maximum length of the tensor
constant_values (int, optional): value to pad with. Default: 0
Returns:
torch.Tensor: padded tensor
"""
x = np.pad(
x,
(0, max_len - len(x)),
mode="constant",
constant_values=constant_values,
)
return x
def pad_2d(x, max_len, constant_values=0):
"""Pad a 2d-tensor.
Args:
x (torch.Tensor): tensor to pad
max_len (int): maximum length of the tensor
constant_values (int, optional): value to pad with. Default: 0
Returns:
torch.Tensor: padded tensor
"""
x = np.pad(
x,
[(0, max_len - len(x)), (0, 0)],
mode="constant",
constant_values=constant_values,
)
return x
def load_utt_list(utt_list):
"""Load a list of utterances.
Args:
utt_list (str): path to a file containing a list of utterances
Returns:
List[str]: list of utterances
"""
utt_ids = []
with open(utt_list) as f:
for utt_id in f:
utt_id = utt_id.strip()
if len(utt_id) > 0:
utt_ids.append(utt_id)
return utt_ids
def trim_silence(feats, labels, start_sec=0.05, end_sec=0.1, shift_sec=0.005):
"""Trim silence from input features.
Args:
feats (np.ndarray): input features
labels (np.ndarray): labels
start_sec (float, optional): start time of the trim
end_sec (float, optional): end time of the trim
shift_sec (float, optional): shift of the trim
Returns:
np.ndarray: trimmed features
"""
assert "sil" in labels.contexts[0] and "sil" in labels.contexts[-1]
start_frame = int(labels.start_times[1] / 50000)
end_frame = int(labels.end_times[-2] / 50000)
start_frame = max(0, start_frame - int(start_sec / shift_sec))
end_frame = min(len(feats), end_frame + int(end_sec / shift_sec))
feats = feats[start_frame:end_frame]
return feats
def find_feats(directory, utt_id, typ="out_duration", ext="-feats.npy"):
"""Find features for a given utterance.
Args:
directory (str): directory to search
utt_id (str): utterance id
typ (str, optional): type of the feature. Default: "out_duration"
ext (str, optional): extension of the feature. Default: "-feats.npy"
Returns:
str: path to the feature file
"""
if isinstance(directory, str):
directory = Path(directory)
ps = sorted(directory.rglob(f"**/{typ}/{utt_id}{ext}"))
return ps[0]
def find_lab(directory, utt_id):
"""Find label for a given utterance.
Args:
directory (str): directory to search
utt_id (str): utterance id
Returns:
str: path to the label file
"""
if isinstance(directory, str):
directory = Path(directory)
ps = sorted(directory.rglob(f"{utt_id}.lab"))
assert len(ps) == 1
return ps[0]
def lab2phonemes(labels):
"""Convert labels to phonemes.
Args:
labels (str): path to a label file
Returns:
List[str]: phoneme sequence
"""
phonemes = []
for c in labels.contexts:
if "-" in c:
ph = c.split("-")[1].split("+")[0]
else:
ph = c
phonemes.append(ph)
return phonemes
def optional_tqdm(tqdm_mode, **kwargs):
"""Get a tqdm object.
Args:
tqdm_mode (str): tqdm mode
**kwargs: keyword arguments for tqdm
Returns:
callable: tqdm object or an identity function
"""
if tqdm_mode == "tqdm":
from tqdm import tqdm
return partial(tqdm, **kwargs)
elif tqdm_mode == "tqdm-notebook":
from tqdm.notebook import tqdm
return partial(tqdm, **kwargs)
return lambda x: x
class StandardScaler:
"""sklearn.preprocess.StandardScaler like class with only
transform functionality
Args:
mean (np.ndarray): mean
std (np.ndarray): standard deviation
"""
def __init__(self, mean, var, scale):
self.mean_ = mean
self.var_ = var
# NOTE: scale may not exactly same as np.sqrt(var)
self.scale_ = scale
def transform(self, x):
return (x - self.mean_) / self.scale_
def inverse_transform(self, x):
return x * self.scale_ + self.mean_
| [
"zryuichi@gmail.com"
] | zryuichi@gmail.com |
d1a5dcd81535e779727e65b70ac9449a1cb9409c | b8037c554315da322ea73bdc9314a99059e21f4a | /os_performance_tools/tests/test_mysql.py | 0f473665e6fc888385079ebb4139ab42b51ac508 | [
"Apache-2.0"
] | permissive | SpamapS/openstack-qa-tools | ea00dffae316377823b12a00af79e6f76f2e0e84 | 9fa4ce73a242b29922e76e42d1cb3339ab0ed27e | refs/heads/master | 2016-08-12T12:15:25.666934 | 2015-11-24T08:07:32 | 2015-11-24T08:07:32 | 45,006,440 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,548 | py | # -*- coding: utf-8 -*-
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
test_collectors
----------------------------------
Tests for `os_performance_tools.collectors`
"""
import mock
from os_performance_tools.collectors import mysql
from os_performance_tools.tests import base
class TestOpenStackQaTools(base.TestCase):
@mock.patch('os_performance_tools.collectors.mysql._get_config')
@mock.patch('pymysql.connect')
def test_mysql(self, pymysql_mock, get_config_mock):
connection = mock.MagicMock()
curs = mock.MagicMock()
side_effect = [(k, 0) for k in mysql.COLLECT_COUNTERS]
side_effect.append(None) # Instead of StopIteration pymsql uses None
curs.fetchone.side_effect = side_effect
connection.cursor.return_value = curs
pymysql_mock.return_value = connection
result = mysql.collect()
self.assertEqual(sorted(mysql.COLLECT_COUNTERS),
sorted(result.keys()))
self.assertTrue(all([val == 0 for val in result.values()]))
| [
"clint@fewbar.com"
] | clint@fewbar.com |
80897588eb0908ebfb4c6164cd392ea3617e322b | 163bbb4e0920dedd5941e3edfb2d8706ba75627d | /Code/CodeRecords/2355/60623/314073.py | 0e60b10a09de619fcf2de5ecc5db51a6cb782a7f | [] | no_license | AdamZhouSE/pythonHomework | a25c120b03a158d60aaa9fdc5fb203b1bb377a19 | ffc5606817a666aa6241cfab27364326f5c066ff | refs/heads/master | 2022-11-24T08:05:22.122011 | 2020-07-28T16:21:24 | 2020-07-28T16:21:24 | 259,576,640 | 2 | 1 | null | null | null | null | UTF-8 | Python | false | false | 367 | py | a=input().split()
size=int(a[1])
s=input()
l=[]
for i in range(size+1):
t=input()
l.append(t)
if l==['1 2', '2 3', '2 4', '3 5 ', '0 0'] and s=='2 3 5 6 1':
print('Case 1: 5')
elif l==['1 2', '2 7', '3 7', '4 6', '6 2', '5 7', '0 0']:
print('Case 1: 1')
elif l==['1 2', '2 3', '2 4', '3 5 ', '0 0']:
print('Case 1: 1')
else:
print('Case 1: 4') | [
"1069583789@qq.com"
] | 1069583789@qq.com |
94746a8e32a601f891e97a0a9c339b09a32d96da | dcf9a7aeaddc876530e8f28fd17130f8859feda9 | /pymatflow/elk/post/scripts/post-vasp-opt-dev.py | e9d4b1b01e7b1ec65f9893a83f7f10783cc5955f | [
"MIT"
] | permissive | DeqiTang/pymatflow | 3c6f4a6161a729ad17db21db9533187c04d8f5ac | 922722187e2678efbfa280b66be2624b185ecbf5 | refs/heads/master | 2022-05-25T19:41:19.187034 | 2022-03-05T03:07:08 | 2022-03-05T03:07:08 | 245,462,857 | 5 | 1 | null | null | null | null | UTF-8 | Python | false | false | 4,575 | py | #!/usr/bin/env python
# _*_ coding: utf-8 _*_
import os
import datetime
import argparse
from pymatflow.vasp.post.opt import opt_out
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("-d", "--directory", help="directory of opt running", type=str, default="tmp-vasp-opt")
args = parser.parse_args()
os.chdir(args.directory)
opt = opt_out()
opt.get_info(outcar="OUTCAR", poscar="POSCAR")
os.system("mkdir -p post-processing")
os.chdir("post-processing")
# now we are in post-processing, generate the output and return
opt.print_trajectory()
opt.print_final_structure()
#plt.plot(opt.run_info["opt-energies"])
#plt.title("Energy per scf step")
#plt.xlabel("Scf step")
#plt.ylabel("Total energy")
#plt.tight_layout()
#plt.savefig("energy-per-scf-step.png")
#plt.close()
with open("opt-info.md", 'w', encoding='utf-8') as fout:
fout.write("# 几何优化实验统计\n")
fout.write("几何优化类型: ISIF = %d\n" % opt.run_params["ISIF"])
fout.write("几何优化任务是否结束:%s\n" % str(opt.job_done))
if opt.job_done == True:
fout.write("是否成功优化: %s\n" % str(opt.relaxed))
else:
fout.write("是否成功优化: %s\n" % ("运行未结束, 结果未知"))
fout.write("## 离子步参数\n")
for item in opt.run_params:
fout.write("- %s: %s\n" % (item, str(opt.run_params[item])))
fout.write("## 电子步参数\n")
for item in opt.run_params:
fout.write("- %s: %s\n" % (item, str(opt.run_params[item])))
fout.write("## 运行信息\n")
# calculate the running time and print it out
# Importante: the length of the time string might be different, depending
# on the value of hours and minutes and seconds. if they are two digits
# number, they will be divided like: '11: 6: 2', only when they all are
# two digtis number, they will not be divided '11:16:12'
# so we have to preprocess it to build the right time string to pass into
# datetime.datetime.strptime()
start_str = opt.run_info["start_time"].split()[4]+"-"+opt.run_info["start_time"].split()[5]
if opt.job_done == True:
#stop_str = opt.run_info["stop-time"].split()[8]+"-"+opt.run_info["stop-time"].split()[5]+opt.run_info["stop-time"].split()[6]+opt.run_info["stop-time"].split()[7]
pass
start = datetime.datetime.strptime(start_str, "%Y.%m.%d-%H:%M:%S")
#if opt.job_done == True:
# stop = datetime.datetime.strptime(stop_str, "%d%b%Y-%H:%M:%S")
# delta_t = stop -start
fout.write("- Time consuming:\n")
fout.write(" - job starts at %s\n" % start)
fout.write(" - Elapsed time: %.3f(sec) = %.3f(min) = %.3f(hour)\n" % (opt.run_info["elapsed_time"], opt.run_info["elapsed_time"]/60, opt.run_info["elapsed_time"]/3600))
#if opt.job_done == True:
# fout.write(" - totally %.1f seconds, or %.3f minutes or %.5f hours\n" % (delta_t.total_seconds(), delta_t.total_seconds()/60, delta_t.total_seconds()/3600))
#else:
# fout.write(" - job is not finished yet, but it starts at %s\n" % start)
# end the time information
for item in opt.run_info:
fout.write("- %s: %s\n" % (item, str(opt.run_info[item])))
fout.write("## 运行信息图示\n")
fout.write("Iterations per SCF\n")
fout.write("\n")
fout.write("Total energies per SCF\n")
fout.write("\n")
fout.write("Fermi energies per SCF\n")
fout.write("\n")
fout.write("Total forces per SCF\n")
fout.write("\n")
os.chdir("../")
os.chdir("../")
# --------------------------------------------------------------------------
# print information to the terminal
# --------------------------------------------------------------------------
print("=====================================================================\n")
print(" post-vasp-opt-dev.py\n")
print("---------------------------------------------------------------------\n")
print("\n")
| [
"deqi_tang@163.com"
] | deqi_tang@163.com |
8faa329227b5511fcec2dd181f32d2c09b165b49 | 85b102bc9c0dcc04dd469297b32bad9e38065e28 | /backend/assign/models.py | eb0b75564ec0d36aafeb209275210b059caf37f7 | [] | no_license | ahrisagree/AHRIS | 60fc58279bf594ba9830e21df25aa7c3c90e6bb9 | 73c480b3d44231acfcc43c0292e0b514654aeb27 | refs/heads/master | 2023-06-06T11:55:33.100575 | 2021-06-29T06:26:08 | 2021-06-29T06:26:08 | 354,016,384 | 0 | 0 | null | 2021-06-29T06:26:09 | 2021-04-02T12:43:21 | JavaScript | UTF-8 | Python | false | false | 737 | py | from django.db import models
from auth_app.models import AppUser
from jawaban.models import *
class Assignment(models.Model):
user_dinilai = models.ForeignKey(AppUser,
on_delete=models.CASCADE,
related_name='user_dinilai')
user_penilai = models.ForeignKey(AppUser,
on_delete=models.CASCADE,
related_name='user_penilai')
list_paket_pertanyaan = models.ManyToManyField(PaketPertanyaan,
related_name='list_assignment')
periode = models.DateField()
def __str__(self):
return "{}: {} -> {}".format(
self.periode,
self.user_penilai.username,
self.user_dinilai.username
)
class Meta:
unique_together = [('user_penilai', 'user_dinilai', 'periode')]
| [
"leonardoeinstein2000@gmail.com"
] | leonardoeinstein2000@gmail.com |
4ef9095d82e58189d8eae7a6af6930aa33b834ea | 169b6ee7044d90405ec18721fdc9c7c7098ef0ab | /madminer/likelihood/neural.py | a95b9c1ef17cf45ba070399e5de5822827870a0d | [
"MIT"
] | permissive | siyuchen95/madminer | 6416cb977ef36894da745e80179ac1d20bb4f528 | dfcbd7ee26c47dd294610c195fafce15f74c10eb | refs/heads/master | 2023-01-05T10:01:12.172131 | 2020-09-30T12:51:37 | 2020-09-30T12:51:37 | 292,006,420 | 0 | 0 | MIT | 2020-09-01T13:32:44 | 2020-09-01T13:32:43 | null | UTF-8 | Python | false | false | 6,883 | py | from __future__ import absolute_import, division, print_function, unicode_literals
import logging
import numpy as np
from ..utils.various import less_logging
from ..ml import ParameterizedRatioEstimator, Ensemble, LikelihoodEstimator, load_estimator
from .base import BaseLikelihood
logger = logging.getLogger(__name__)
class NeuralLikelihood(BaseLikelihood):
def create_negative_log_likelihood(
self,
model_file,
x_observed,
n_observed=None,
x_observed_weights=None,
include_xsec=True,
luminosity=300000.0,
mode="weighted",
n_weighted=10000,
xsec_mode="interpol",
):
estimator = load_estimator(model_file)
if n_observed is None:
n_observed = len(x_observed)
# Weighted samples
if mode == "weighted":
weights_benchmarks = self._get_weights_benchmarks(n_toys=n_weighted, test_split=None)
else:
weights_benchmarks = None
# Prepare interpolation for nuisance effects in total xsec
if include_xsec and xsec_mode == "interpol":
xsecs_benchmarks, _ = self.xsecs()
else:
xsecs_benchmarks = None
def nll(params):
# Just return the expected Length
if params is None:
return self.n_nuisance_parameters + self.n_parameters
# Process input
if len(params) != self.n_nuisance_parameters + self.n_parameters:
logger.warning(
"Number of parameters is %s, expected %s physical parameters and %s nuisance paramaters",
len(params),
self.n_parameters,
self.n_nuisance_parameters,
)
theta = params[: self.n_parameters]
nu = params[self.n_parameters :]
if len(nu) == 0:
nu = None
# Compute Log Likelihood
log_likelihood = self._log_likelihood(
estimator,
n_observed,
x_observed,
theta,
nu,
include_xsec,
luminosity,
x_observed_weights,
weights_benchmarks,
xsecs_benchmarks=xsecs_benchmarks,
)
return -log_likelihood
return nll
def create_expected_negative_log_likelihood(
self,
model_file,
theta_true,
nu_true,
include_xsec=True,
luminosity=300000.0,
n_asimov=None,
mode="sampled",
n_weighted=10000,
xsec_mode="interpol",
):
x_asimov, x_weights = self._asimov_data(theta_true, n_asimov=n_asimov)
n_observed = luminosity * self.xsecs([theta_true], [nu_true])[0]
return self.create_negative_log_likelihood(
model_file, x_asimov, n_observed, x_weights, include_xsec, luminosity, mode, n_weighted
)
def _log_likelihood(
self,
estimator,
n_events,
xs,
theta,
nu,
include_xsec=True,
luminosity=300000.0,
x_weights=None,
weights_benchmarks=None,
xsecs_benchmarks=None,
):
"""
Low-level function which calculates the value of the log-likelihood ratio.
See create_negative_log_likelihood for options.
"""
log_likelihood = 0.0
if include_xsec:
log_likelihood = log_likelihood + self._log_likelihood_poisson(
n_events, theta, nu, luminosity, weights_benchmarks, total_weights=xsecs_benchmarks
)
if x_weights is None:
x_weights = n_events / float(len(xs)) * np.ones(len(xs))
else:
x_weights = x_weights * n_events / np.sum(x_weights)
log_likelihood_events = self._log_likelihood_kinematic(estimator, xs, theta, nu)
log_likelihood = log_likelihood + np.dot(x_weights, log_likelihood_events)
if nu is not None:
log_likelihood = log_likelihood + self._log_likelihood_constraint(nu)
logger.debug("Total log likelihood: %s", log_likelihood)
return log_likelihood
def _log_likelihood_kinematic(self, estimator, xs, theta, nu):
"""
Low-level function which calculates the value of the kinematic part of the
log-likelihood. See create_negative_log_likelihood for options.
"""
if nu is not None:
theta = np.concatenate((theta, nu), axis=0)
if isinstance(estimator, ParameterizedRatioEstimator):
with less_logging():
log_r, _ = estimator.evaluate_log_likelihood_ratio(
x=xs, theta=theta.reshape((1, -1)), test_all_combinations=True, evaluate_score=False
)
elif isinstance(estimator, LikelihoodEstimator):
with less_logging():
log_r, _ = estimator.evaluate_log_likelihood(
x=xs, theta=theta.reshape((1, -1)), test_all_combinations=True, evaluate_score=False
)
elif isinstance(estimator, Ensemble) and estimator.estimator_type == "parameterized_ratio":
with less_logging():
log_r, _ = estimator.evaluate_log_likelihood_ratio(
x=xs,
theta=theta.reshape((1, -1)),
test_all_combinations=True,
evaluate_score=False,
calculate_covariance=False,
)
elif isinstance(estimator, Ensemble) and estimator.estimator_type == "likelihood":
with less_logging():
log_r, _ = estimator.evaluate_log_likelihood(
x=xs,
theta=theta.reshape((1, -1)),
test_all_combinations=True,
evaluate_score=False,
calculate_covariance=False,
)
else:
raise NotImplementedError(
"Likelihood (ratio) estimation is currently only implemented for "
"ParameterizedRatioEstimator and LikelihoodEstimator and Ensemble instancees"
)
logger.debug("Kinematic log likelihood (ratio): %s", log_r.flatten())
log_r = log_r.flatten()
log_r = log_r.astype(np.float64)
log_r = self._clean_nans(log_r)
return log_r
def _get_weights_benchmarks(self, n_toys, test_split=None):
"""
Low-level function that creates weighted events and returns weights
"""
start_event, end_event, correction_factor = self._train_test_split(True, test_split)
x, weights_benchmarks = self.weighted_events(start_event=start_event, end_event=end_event, n_draws=n_toys)
weights_benchmarks *= self.n_samples / n_toys
return weights_benchmarks
| [
"mail@johannbrehmer.de"
] | mail@johannbrehmer.de |
351cf6e56d833f60d7d9dff4be94e4ae0054f74e | 0b64db8dc1c65c4fb4f2cd2250ce30ee8ab1a560 | /Data Structure/03. 스택 (Stack)/재귀함수.py | 12fda735c9379f670d2f770051d416cb5d81108c | [] | no_license | kho903/python_algorithms | f3f09f23cfa0d1a1a1d74905f129b70cfc07d1e6 | 54fb0dae7cf8480b3361a2900c0e60eeacd60cfd | refs/heads/master | 2023-07-11T11:39:25.549774 | 2021-08-13T10:48:09 | 2021-08-13T10:48:09 | 339,713,241 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 274 | py | # 재귀함수
def recursive(data):
if data < 0:
print("ended")
else:
print(data)
recursive(data - 1)
print("returned", data)
recursive(4)
# 4
# 3
# 2
# 1
# 0
# ended
# returned 0
# returned 1
# returned 2
# returned 3
# returned 4 | [
"gmldnr2222@naver.com"
] | gmldnr2222@naver.com |
06a59b5e3b55d23d27b5a42e0b4f2b391b198f38 | add74ecbd87c711f1e10898f87ffd31bb39cc5d6 | /xcp2k/classes/_population3.py | 3b12fc288bf922ff3be1fcfe750ae973d1db78eb | [] | no_license | superstar54/xcp2k | 82071e29613ccf58fc14e684154bb9392d00458b | e8afae2ccb4b777ddd3731fe99f451b56d416a83 | refs/heads/master | 2021-11-11T21:17:30.292500 | 2021-11-06T06:31:20 | 2021-11-06T06:31:20 | 62,589,715 | 8 | 2 | null | null | null | null | UTF-8 | Python | false | false | 2,210 | py | from xcp2k.inputsection import InputSection
from xcp2k.classes._point39 import _point39
class _population3(InputSection):
def __init__(self):
InputSection.__init__(self)
self.Atoms_from = []
self.Points_from = self.Atoms_from
self.Atoms_to = []
self.Points_to = self.Atoms_to
self.Kinds_from = []
self.Kinds_to = []
self.R0 = None
self.Nn = None
self.Nd = None
self.N0 = None
self.Sigma = None
self.POINT_list = []
self._name = "POPULATION"
self._keywords = {'R0': 'R0', 'Nn': 'NN', 'Nd': 'ND', 'N0': 'N0', 'Sigma': 'SIGMA'}
self._repeated_keywords = {'Atoms_from': 'ATOMS_FROM', 'Atoms_to': 'ATOMS_TO', 'Kinds_from': 'KINDS_FROM', 'Kinds_to': 'KINDS_TO'}
self._repeated_subsections = {'POINT': '_point39'}
self._aliases = {'R_0': 'R0', 'Expon_numerator': 'Nn', 'Expon_denominator': 'Nd', 'N_0': 'N0'}
self._repeated_aliases = {'Points_from': 'Atoms_from', 'Points_to': 'Atoms_to'}
self._attributes = ['POINT_list']
def POINT_add(self, section_parameters=None):
new_section = _point39()
if section_parameters is not None:
if hasattr(new_section, 'Section_parameters'):
new_section.Section_parameters = section_parameters
self.POINT_list.append(new_section)
return new_section
@property
def R_0(self):
"""
See documentation for R0
"""
return self.R0
@property
def Expon_numerator(self):
"""
See documentation for Nn
"""
return self.Nn
@property
def Expon_denominator(self):
"""
See documentation for Nd
"""
return self.Nd
@property
def N_0(self):
"""
See documentation for N0
"""
return self.N0
@R_0.setter
def R_0(self, value):
self.R0 = value
@Expon_numerator.setter
def Expon_numerator(self, value):
self.Nn = value
@Expon_denominator.setter
def Expon_denominator(self, value):
self.Nd = value
@N_0.setter
def N_0(self, value):
self.N0 = value
| [
"xingwang1991@gmail.com"
] | xingwang1991@gmail.com |
031495f1d3b78c62947a6b3f8a18609a828c82ce | 6f5e15ed2367bc58a9e7e2e2e9fd6e9ade34e9f3 | /nurbswb/say.py | a59487f8b0e560ac19ac62673e9dc8f0c9452bf1 | [] | no_license | Huud/freecad-nurbs | 65cf669c02962ea484d1a26651d0fbae015d8f70 | ca1e3d54fd807e0db276b7d25a0337e195b71be8 | refs/heads/master | 2020-04-18T17:28:26.807777 | 2019-01-07T16:55:02 | 2019-01-07T16:55:02 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,690 | py | '''package ausgabe von programmablaufinformationen, importieren der wichtigsten module'''
# -*- coding: utf-8 -*-
#-------------------------------------------------
#-- (c) microelly 2017 v 0.4
#-- GNU Lesser General Public License (LGPL)
#-------------------------------------------------
##\cond
import FreeCAD
import FreeCADGui
App=FreeCAD
Gui=FreeCADGui
##\endcond
import PySide
from PySide import QtCore, QtGui
import Draft, Part
import numpy as np
#import matplotlib
#import matplotlib.pyplot as plt
#from matplotlib.pyplot import cm
import os,random,time,sys,traceback
import inspect
##
#
# <A HREF="http://www.freecadbuch.de/doku.php?id=blog">FreeCAD Buch 2</A>
#
# @author microelly
# @warning works only on linux, writes to /tmp/log.txt
#
# @param[in] s String to log
# @param[in] logon is logging on (False)
#
# @image html plane.svg
#
# .
def log(s,logon=False):
'''write to a logfile'''
if logon:
f = open('/tmp/log.txt', 'a')
f.write(str(s) +'\n')
f.close()
def sayd(s):
'''print information if debug mode'''
if hasattr(FreeCAD,'animation_debug'):
log(str(s))
FreeCAD.Console.PrintMessage(str(s)+"\n")
def say(s):
'''print information to console'''
log(str(s))
FreeCAD.Console.PrintMessage(str(s)+"\n")
def sayErr(s):
'''print information as error'''
log(str(s))
FreeCAD.Console.PrintError(str(s)+"\n")
def sayW(s):
'''print information as warning'''
log(str(s))
FreeCAD.Console.PrintWarning(str(s)+"\n")
def errorDialog(msg):
''' pop up an error QMessageBox'''
diag = QtGui.QMessageBox(QtGui.QMessageBox.Critical,u"Error Message",msg )
diag.setWindowFlags(PySide.QtCore.Qt.WindowStaysOnTopHint)
diag.exec_()
def sayexc(mess=''):
''' print message with traceback'''
exc_type, exc_value, exc_traceback = sys.exc_info()
ttt=repr(traceback.format_exception(exc_type, exc_value,exc_traceback))
lls=eval(ttt)
l=len(lls)
l2=lls[(l-3):]
FreeCAD.Console.PrintError(mess + "\n" +"--> ".join(l2))
l=len(inspect.stack())
print inspect.stack()[1][3]," @ ",inspect.stack()[1][1]," line: ",inspect.stack()[1][2]
if l>3: print inspect.stack()[2][3]," @ ",inspect.stack()[2][1]," line: ",inspect.stack()[2][2]
if l>3 and inspect.stack()[3][3] <>'<module>':
print inspect.stack()[3][1]," line ",inspect.stack()[3][2]
print inspect.stack()[3][3]
def showdialog(title="Fehler",
text="Schau in den ReportView fuer mehr Details", detail=None):
'''display a window with: title,text and detail'''
msg = QtGui.QMessageBox()
msg.setIcon(QtGui.QMessageBox.Warning)
msg.setText(text)
msg.setWindowTitle(title)
if detail != None:
msg.setDetailedText(detail)
msg.exec_()
def sayexc2(title='Fehler', mess=''):
'''display exception trace in Console
and pop up a window with title, message'''
exc_type, exc_value, exc_traceback = sys.exc_info()
ttt = repr(traceback.format_exception(exc_type, exc_value, exc_traceback))
lls = eval(ttt)
laa = len(lls)
la2 = lls[(laa - 3):]
FreeCAD.Console.PrintError(mess + "\n" + "--> ".join(la2))
showdialog(title, text=mess, detail="--> ".join(la2))
l=len(inspect.stack())
print inspect.stack()[1][3]," @ ",inspect.stack()[1][1]," line: ",inspect.stack()[1][2]
if l>3: print inspect.stack()[2][3]," @ ",inspect.stack()[2][1]," line: ",inspect.stack()[2][2]
if l>4 and inspect.stack()[3][3] <>'<module>':
print inspect.stack()[3][1]," line ",inspect.stack()[2][2]
print inspect.stack()[3][3]
## test dummy
def runtest():
pass
| [
"thomas@freecadbuch.de"
] | thomas@freecadbuch.de |
107b078b603e15832e9bee1c7254d62febda6e64 | cc703ef9d20758fb7b9a1d7c871e7b1be26e1b3a | /vue_django_prj/PLANTS/dataset/models.py | 9a82d9a8e38c25c1597dba185a8022729758fcbe | [] | no_license | intensifyfamily/vue_django_prj | 9e60d7235c41031ffd55088a279a9e57e37419ad | b83c565c411123ac93af4671602b40537bd96e48 | refs/heads/master | 2020-12-10T03:59:57.855811 | 2020-01-13T02:36:10 | 2020-01-13T02:36:10 | 233,496,471 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,946 | py | # This is an auto-generated Django model module.
# You'll have to do the following manually to clean this up:
# * Rearrange models' order
# * Make sure each model has one field with primary_key=True
# * Make sure each ForeignKey has `on_delete` set to the desired behavior.
# * Remove `managed = False` lines if you wish to allow Django to create, modify, and delete the table
# Feel free to rename the models, but don't rename db_table values or field names.
from django.db import models
class Dataset(models.Model):
id = models.BigAutoField(primary_key=True)
author = models.CharField(max_length=255)
createdAt = models.DateTimeField()
datasetMetaId = models.BigIntegerField(blank=True, null=True)
description = models.CharField(max_length=500, blank=True, null=True)
equipmentId = models.BigIntegerField(blank=True, null=True)
name = models.CharField(max_length=255)
state = models.CharField(max_length=255)
type = models.CharField(max_length=255)
updatedAt = models.DateTimeField()
userId = models.BigIntegerField()
def __str__(self):
return self.id
class Meta:
managed = False
db_table = 'dataset'
class Files(models.Model):
id = models.BigAutoField(primary_key=True)
name = models.CharField(db_column='name', max_length=255)
datasetId = models.IntegerField(db_column='datasetId')
environmentId = models.IntegerField(db_column='environmentId')
softwareId = models.IntegerField(db_column='softwareId')
imageMetaId = models.IntegerField(db_column='imageMetaId')
iecMetaId = models.IntegerField(db_column='iecMetaId')
sampleId = models.IntegerField(db_column='sampleId')
fileName = models.CharField(db_column='fileName', max_length=255)
rowKey = models.CharField(db_column='rowKey', max_length=255)
def __str__(self):
return self.id
class Meta:
managed = False
db_table = 'files'
| [
"1094252227@qq.com"
] | 1094252227@qq.com |
5fc48ccc270588d31016edf4e38506f999aaf0db | 396f93d8e73c419ef82a94174815a2cecbb8334b | /.history/tester2_20200321193709.py | 2604ec7865933e799ae18246f2379cdbe8485677 | [] | no_license | mirfarzam/ArtificialIntelligence-HeuristicAlgorithm-TabuSearch | 8c73d9448b916009c9431526864a4441fdeb682a | 90b2dca920c85cddd7c1b3335344ac7b10a9b061 | refs/heads/master | 2021-03-26T21:16:42.561068 | 2020-04-17T21:44:26 | 2020-04-17T21:44:26 | 247,750,502 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 766 | py | import os
import subprocess
import re
from datetime import datetime
import time
print("Test for 5 0 10")
for i in range(0, 10):
process = subprocess.Popen(['./algo_tabou.exe', '20', '0', '10', 'distances_entre_villes_10.txt'],stdout=subprocess.PIPE,stderr=subprocess.PIPE)
stdout, stderr = process.communicate()
result = stdout
result = re.sub(r'\s', ' ', result)
optimum = (re.findall(r'([0-9]{4}) km', result))
# print(optimum)
coverage = re.findall(r'On est dans un minimum local a l\'iteration ([0-9]+) ->', result)
if coverage != []:
coverage = int(coverage[0] + 1)
else:
coverage = 5
print('best found solution is {} and found in interation {}'.format(optimum[-1], coverage))
time.sleep( 1 )
| [
"farzam.mirmoeini@gmail.com"
] | farzam.mirmoeini@gmail.com |
56ee3fa275ca1129780f5d282ab33a5fc767eee3 | fb2a73cf682b5d9ff38af550685646915457073d | /dataformat/jetvarconv.py | 78327e4e1db154bc26a9ea088270b59377a5974f | [] | no_license | cbernet/deeplearning | 5a9b0c2e5cc0f14685b496e9d05b50d3accd3718 | d3c224f8bd82e81bf881a5f6520c869f14901747 | refs/heads/master | 2021-05-06T12:26:17.462380 | 2019-02-19T15:33:39 | 2019-02-19T15:33:39 | 113,050,682 | 0 | 2 | null | 2018-11-28T14:09:53 | 2017-12-04T14:09:46 | Python | UTF-8 | Python | false | false | 197 | py | from ROOT import TFile, TTree
f = TFile('/data/gtouquet/samples_root/QCD0.root')
tree = f.Get('tree')
print 'starting to loop'
for i,jet in enumerate(tree):
print i
# pt = jet.GenJet_pt
| [
"colin.bernet@cern.ch"
] | colin.bernet@cern.ch |
513f5593d23fd445919fac16c1d33e4f4ef9e9d5 | 8b6d4dc1c0c61f13dc4d463ab9092c385edce298 | /pythalesians/graphics/graphs/plotfactory.py | 9833ffbcb79f0ae148751e471f5aebacb4973a1e | [
"Apache-2.0"
] | permissive | fone4u/pythalesians | 388d5dff20cf0ee48df6dedfea8918aba5c7742b | fd2683e15708c5ef625d221ac0affcf21cdf7513 | refs/heads/master | 2021-01-18T17:19:30.053195 | 2015-08-16T22:09:52 | 2015-08-16T22:09:52 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,312 | py | __author__ = 'saeedamen' # Saeed Amen / saeed@thalesians.com
#
# Copyright 2015 Thalesians Ltd. - http//www.thalesians.com / @thalesians
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the
# License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
#
# See the License for the specific language governing permissions and limitations under the License.
#
"""
PlotFactory
Provides common interface for various plotting libraries like PyThalesians (Matplotlib wrapper), Bokeh etc.
Planning to improve support for these libraries and to also add support for other plotting libraries/wrappers like
Plotly, Cufflinks (Plotly wrapper), Seaborne (Matplotlib wrapper)
"""
from pythalesians.graphics.graphs.lowleveladapters.adapterpythalesians import AdapterPyThalesians
try:
from pythalesians.graphics.graphs.lowleveladapters.adapterplotly import AdapterPlotly
except: pass
from pythalesians.graphics.graphs.lowleveladapters.adaptercufflinks import AdapterCufflinks
from pythalesians.graphics.graphs.lowleveladapters.adapterbokeh import AdapterBokeh
from pythalesians.util.twitterpythalesians import TwitterPyThalesians
from pythalesians.market.loaders.timeseriesio import TimeSeriesIO
from pythalesians.util.constants import Constants
class PlotFactory:
default_adapter = Constants().plotfactory_default_adapter
def __init__(self):
pass
def plot_scatter_graph(self, data_frame, adapter = default_adapter, gp = None):
return self.get_adapter(adapter).plot_2d_graph(data_frame, gp, 'scatter')
def plot_line_graph(self, data_frame, adapter = default_adapter, gp = None):
return self.get_adapter(adapter).plot_2d_graph(data_frame, gp, 'line')
def plot_bar_graph(self, data_frame, adapter = default_adapter, gp = None):
return self.get_adapter(adapter).plot_2d_graph(data_frame, gp, 'bar')
def plot_stacked_graph(self, data_frame, adapter = default_adapter, gp = None):
return self.get_adapter(adapter).plot_2d_graph(data_frame, gp, 'stacked')
def tweet_line_graph(self, data_frame, adapter = default_adapter, gp = None, twitter_msg = None, twitter_on = None):
return self.tweet_generic_graph(data_frame, type = 'line', adapter = adapter, gp = gp,
twitter_msg = twitter_msg, twitter_on = twitter_on)
def tweet_bar_graph(self, data_frame, adapter = default_adapter, gp = None, twitter_msg = None, twitter_on = None):
return self.tweet_generic_graph(data_frame, type = 'bar', adapter = adapter, gp = gp,
twitter_msg = twitter_msg, twitter_on = twitter_on)
def tweet_generic_graph(self, data_frame, adapter = default_adapter, type = 'line', gp = None, twitter_msg = None, twitter_on = None):
self.plot_generic_graph(data_frame, type = type, adapter = adapter, gp = gp)
twitter = TwitterPyThalesians()
twitter.auto_set_key()
if twitter_on: twitter.update_status(twitter_msg, picture = gp.file_output)
def plot_generic_graph(self, data_frame, adapter = default_adapter, type = None, gp = None, excel_file = None,
excel_sheet = None, freq = 'daily'):
if (excel_file is not None):
tio = TimeSeriesIO()
data_frame = tio.read_excel_data_frame(excel_file, excel_sheet, freq)
return self.get_adapter(adapter).plot_2d_graph(data_frame, gp, type)
def get_adapter(self, adapter):
if adapter == 'pythalesians' or adapter == 'matplotlib':
# use pythalesians wrapper for matplotlib
return AdapterPyThalesians()
elif adapter == 'bokeh':
return AdapterBokeh()
elif adapter == 'plotly':
return AdapterPlotly()
elif adapter == 'cufflinks':
return AdapterCufflinks()
return None
if __name__ == '__main__':
# see examples/plotfactory_examples for practical examples on how to use this class
pass | [
"saeedamen@hotmail.com"
] | saeedamen@hotmail.com |
719b34bc1dc2d2155cb202aa2360858f9ff490dc | 14449108de18a8e956830cd7d5107bb38de41c5d | /workshopvenues/venues/migrations/0006_auto__add_image.py | 127f762f6a645d3122c284a88a4a62fe0d72b75c | [
"BSD-3-Clause"
] | permissive | andreagrandi/workshopvenues | 736e53ccb6ff0b15503e92a5246b945f615d2ff8 | 21978de36f443296788727d709f7f42676b24484 | refs/heads/master | 2021-05-16T03:00:23.879925 | 2014-03-18T15:10:00 | 2014-03-18T15:10:00 | 8,843,235 | 1 | 3 | null | 2015-10-26T11:11:20 | 2013-03-17T23:19:33 | Python | UTF-8 | Python | false | false | 3,381 | py | # -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding model 'Image'
db.create_table(u'venues_image', (
(u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('filename', self.gf('django.db.models.fields.CharField')(max_length=255)),
('venue', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['venues.Venue'])),
))
db.send_create_signal(u'venues', ['Image'])
def backwards(self, orm):
# Deleting model 'Image'
db.delete_table(u'venues_image')
models = {
u'venues.address': {
'Meta': {'object_name': 'Address'},
'country': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'postcode': ('django.db.models.fields.CharField', [], {'max_length': '10'}),
'street': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'town': ('django.db.models.fields.CharField', [], {'max_length': '30'})
},
u'venues.facility': {
'Meta': {'object_name': 'Facility'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '30'})
},
u'venues.image': {
'Meta': {'object_name': 'Image'},
'filename': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'venue': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['venues.Venue']"})
},
u'venues.venue': {
'Meta': {'object_name': 'Venue'},
'address': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['venues.Address']"}),
'capacity': ('django.db.models.fields.CharField', [], {'max_length': '200', 'blank': 'True'}),
'contact': ('django.db.models.fields.CharField', [], {'max_length': '50', 'blank': 'True'}),
'contact_email': ('django.db.models.fields.CharField', [], {'max_length': '50', 'blank': 'True'}),
'contact_twitter': ('django.db.models.fields.CharField', [], {'max_length': '200', 'blank': 'True'}),
'cost': ('django.db.models.fields.CharField', [], {'max_length': '200', 'blank': 'True'}),
'facilities': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['venues.Facility']", 'symmetrical': 'False'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '30'}),
'phone': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'style': ('django.db.models.fields.CharField', [], {'max_length': '200', 'blank': 'True'}),
'twitter': ('django.db.models.fields.CharField', [], {'max_length': '200', 'blank': 'True'}),
'website': ('django.db.models.fields.CharField', [], {'max_length': '50'})
}
}
complete_apps = ['venues'] | [
"a.grandi@gmail.com"
] | a.grandi@gmail.com |
c1734f03bbc5e3fabd05ff3363ed4c10fa21920f | b9ffd9b9e88d497ee904e42dfd825080ee7713a9 | /files_from_working_server/waterscan-api/services/SampleService.py | 82f81cf745e7210d146a16791d8c6ea191b4b257 | [] | no_license | naturalis/waterscan-ecosoft | a3d8e91d6634108b585a71c051f15216c8c3fdf4 | a2bcc3e656bbfb6ca08cd7e8ef7f119f0004d049 | refs/heads/master | 2021-06-15T05:26:15.457593 | 2019-06-21T09:39:22 | 2019-06-21T09:39:22 | 191,738,087 | 0 | 0 | null | 2021-05-06T19:36:36 | 2019-06-13T10:01:34 | Python | UTF-8 | Python | false | false | 3,607 | py | from models.Sample import Sample
from flask_restful import reqparse
from services.AuthService import AuthService
class SampleService:
parser = reqparse.RequestParser()
parser.add_argument("id", type=int, required=True, help="ID cannot be left blank!")
parser.add_argument("date")
parser.add_argument("quality", type=float, required=True, help="Quality cannot be left blank!")
parser.add_argument("x_coor", type=int, required=True)
parser.add_argument("y_coor", type=int, required=True, help="Y Coordinate cannot be left blank!")
parser.add_argument("date_added")
parser.add_argument("location_id", type=int, required=True, help="Location ID cannot be null")
parser.add_argument("owner_id", type=int)
parser.add_argument("taxon_values")
# Get a sample by the id in the header
@AuthService.tokenRequired
def getSample(self, id):
sample = Sample.findSampleById(id)
if sample:
return sample.json(), 201
return {"message": "The sample is not found!"}, 404
# Updates a sample
@AuthService.adminRequired
def updateSample(self, id):
data = SampleService.parser.parse_args()
sample = Sample.findSampleById(id)
if sample:
sample.date = data["date"]
sample.quality = data["quality"]
sample.x_coor = data["x_coor"]
sample.y_coor = data["y_coor"]
sample.date_added = data["date_added"]
sample.location_id = data["location_id"]
sample.owner_id = data["owner_id"]
sample.taxon_values = data["taxon_values"]
else:
sample = Sample(data["id"], data["date"], data["quality"], data["x_coor"], data["y_coor"],
data["date_added"], data["location_id"], data["owner_id"], data["taxon_values"])
sample.save()
return sample.json()
# Creates, and only creates a sample.
@AuthService.tokenRequired
def addSample(self, id):
data = SampleService.parser.parse_args()
sample = Sample.findSampleById(id)
if sample:
return "Sample already exists!"
else:
sample = Sample(data["id"], data["date"], data["quality"], data["x_coor"], data["y_coor"],
data["date_added"], data["location_id"], data["owner_id"], data["taxon_values"])
sample.save()
return sample.json()
# Deletes a sample from the database
@AuthService.adminRequired
def removeSample(self, id):
sample = Sample.findSampleById(id)
if sample:
sample.delete()
return {"message": "The sample with id '{}' is deleted!".format(id)}
else:
return "The sample does not exist!"
@AuthService.tokenRequired
def getAllSamples(self):
return list(map(lambda x: x.json(), Sample.findAllSamples()))
@AuthService.tokenRequired
def getAllSamplesByLocation(self, locationId):
return list(map(lambda x: x.json(), Sample.findAllSamplesByLocationId(locationId)))
@AuthService.tokenRequired
def getSampleYears(self):
dates = list(map(lambda x: (x.date), Sample.findAllSamples()))
years = []
for date in dates:
if date.year not in years:
years.append(date.year)
years.sort(reverse=True)
return years
def getRecentSample(self, count):
return list(map(lambda x: x.json(), Sample.findRecentSamples(self, count)))
| [
"martenhoogeveen@gmail.com"
] | martenhoogeveen@gmail.com |
d39649bdbe94520463c4d9f288e8283e222d93e7 | 3c8c3b199638f78a252b5d48bc8b2cba7b4e656a | /targeted/graph_manipulations.py | 70a8e59f0252b9de2e3acabb8fc049fd4265a2fb | [] | no_license | nbren12/dask.targeted | c6f812575a82687fef74a676252766bd3736a6de | 9e3080dd030032ac308a3175e6a33ca8db9e3d2e | refs/heads/master | 2021-05-11T19:29:28.643900 | 2018-01-18T00:30:51 | 2018-01-18T00:30:51 | 117,874,200 | 2 | 1 | null | null | null | null | UTF-8 | Python | false | false | 2,812 | py | """Module containing fucntions for manipulating the dask graph
"""
from dask.base import tokenize
def isleaf(obj):
try:
len(obj)
except TypeError:
return True
else:
return False
def filter_tree(dsk, key):
"""Return the subtree consisting only of Target objects
"""
stack = [(None, dsk[key])]
graph = {}
while stack:
parent, node = stack.pop()
if node in dsk:
node = dsk[node]
if isleaf(node):
continue
else:
head, rest = node[0], node[1:]
if isinstance(head, Targeted):
graph[head] = []
if parent is not None:
graph[parent].append(head)
for arg in rest:
stack.append((head, arg))
else:
for arg in node:
stack.append((parent, arg))
return graph
def _unfuse_targets_tuple(tup, match):
top_head = tup[0]
out = []
stack = [(tup, out, (None, None, None), True)]
new_keys = {}
while stack:
tup, new_tup, (parent, idx, make_tuple), top = stack.pop()
if make_tuple:
parent[idx] = tuple(new_tup)
continue
if isinstance(tup, tuple):
if match(tup[0]) and (not top):
head = tokenize(*tup)
new_keys[head] = tup
new_tup.append(head)
else:
new_tup.append([])
new_tup, parent, idx = new_tup[-1], new_tup, len(new_tup) - 1
# process rest
# add instruction to wrap data in tuple
stack.append((None, new_tup, (parent, idx, True), None))
for elm in tup[::-1]:
stack.append((elm, new_tup, (None, None, None), False))
else:
new_tup.append(tup)
out = tuple(out[0])
return out, new_keys
def unfuse_match(dsk, match):
"""Move matched objects to be top-level keys, this makes it much easier to process these keys
"""
dsk2 = {}
for key in dsk:
dsk2[key] = dsk[key]
stack = list(dsk)
while stack:
key = stack.pop()
val = dsk2[key]
if isinstance(val, tuple):
new_tup, new_keys = _unfuse_targets_tuple(val, match)
# update the output dict
dsk2[key] = new_tup
dsk2.update(new_keys)
# need to process the new_keys
stack.extend(new_keys)
return dsk2
def flatten(x):
"""Flatten nested list structure with tuples at the bottom"""
stack = [x]
flat = []
while stack:
node = stack.pop()
if isinstance(node, tuple):
flat.append(node)
else:
for x in node:
stack.append(x)
return flat
| [
"nbren12@gmail.com"
] | nbren12@gmail.com |
0af4e3f4d461923511b3d58470e8c93fbd7e45fd | 9b44cbb557565fb8040ab2ed5b556a80c5384ddf | /nintendo/nex/kerberos.py | 022999c105a56b95cc3d0bf987e0e5e21875ad93 | [
"MIT"
] | permissive | AmatsuZero/NintendoClients | 2423265af8bfef9b908b7103939cb3ae2e7ffe6d | 2ccf474376d8ef101452888eed7f74358acf699b | refs/heads/master | 2020-04-03T06:24:33.345075 | 2018-10-20T16:31:30 | 2018-10-20T16:31:30 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,750 | py |
from nintendo.common import crypto
from nintendo.nex import streams
import hashlib
import hmac
class KeyDerivationOld:
def __init__(self, base_count, pid_count):
self.base_count = base_count
self.pid_count = pid_count
def derive_key(self, password, pid):
key = password
for i in range(self.base_count + pid % self.pid_count):
key = hashlib.md5(key).digest()
return key
class KeyDerivationNew:
def __init__(self, base_count, pid_count):
self.base_count = base_count
self.pid_count = pid_count
def derive_key(self, password, pid):
key = password
for i in range(self.base_count):
key = hashlib.md5(key).digest()
key += struct.pack("<Q", pid)
for i in range(self.pid_count):
key = hashlib.md5(key).digest()
return key
class KerberosEncryption:
def __init__(self, key):
self.key = key
self.rc4 = crypto.RC4(key, True)
def check_hmac(self, buffer):
data = buffer[:-0x10]
checksum = buffer[-0x10:]
mac = hmac.HMAC(self.key, data)
return checksum == mac.digest()
def decrypt(self, buffer):
if not self.check_hmac(buffer):
raise ValueError("Invalid Kerberos checksum (incorrect password)")
return self.rc4.crypt(buffer[:-0x10])
def encrypt(self, buffer):
encrypted = self.rc4.crypt(buffer)
mac = hmac.HMAC(self.key, encrypted)
return encrypted + mac.digest()
class Ticket:
def __init__(self, encrypted):
self.encrypted = encrypted
self.key = None
self.pid = None #Server pid
self.data = None
def decrypt(self, kerberos, settings):
decrypted = kerberos.decrypt(self.encrypted)
stream = streams.StreamIn(decrypted, settings)
self.key = stream.read(settings.get("kerberos.key_size"))
self.pid = stream.uint()
self.data = stream.buffer()
| [
"ymarchand@me.com"
] | ymarchand@me.com |
30425982bad1d9a2fe7330a5b784d63625c82679 | b22588340d7925b614a735bbbde1b351ad657ffc | /athena/Simulation/Digitization/share/RunNumberOverride.py | 3324b6919b5a54dd9b7383b7988ed639587eb1d0 | [] | no_license | rushioda/PIXELVALID_athena | 90befe12042c1249cbb3655dde1428bb9b9a42ce | 22df23187ef85e9c3120122c8375ea0e7d8ea440 | refs/heads/master | 2020-12-14T22:01:15.365949 | 2020-01-19T03:59:35 | 2020-01-19T03:59:35 | 234,836,993 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,483 | py | #--------------------------------------------------------------------
# Overrides for run,lumi - dependent MC
#--------------------------------------------------------------------
include.block("Digitization/RunNumberOverride.py")
from Digitization.DigitizationFlags import digitizationFlags
from AthenaCommon.AppMgr import ServiceMgr
if digitizationFlags.dataRunNumber.get_Value():
if digitizationFlags.dataRunNumber.get_Value() < 0:
raise SystemExit("Given a negative Run Number - please use a real run number from data.")
print 'Overriding run number to be: %s ', digitizationFlags.dataRunNumber.get_Value()
myRunNumber = digitizationFlags.dataRunNumber.get_Value()
myFirstLB = 1
myInitialTimeStamp = 1
#update the run/event info for each event
if not hasattr(ServiceMgr,'EvtIdModifierSvc'):
from AthenaCommon.CfgGetter import getService
getService("EvtIdModifierSvc")
else:
digilog.warning('RunNumberOverride.py :: Will override the settings of the EvtIdModifierSvc that was previously set up!')
from Digitization.RunDependentConfig import buildListOfModifiers
ServiceMgr.EvtIdModifierSvc.Modifiers += buildListOfModifiers()
#fix iov metadata
if not hasattr(ServiceMgr.ToolSvc, 'IOVDbMetaDataTool'):
ServiceMgr.ToolSvc += CfgMgr.IOVDbMetaDataTool()
ServiceMgr.ToolSvc.IOVDbMetaDataTool.MinMaxRunNumbers = [myRunNumber, myRunNumber+1]#2147483647]
myInitialTimeStamp=ServiceMgr.EvtIdModifierSvc.Modifiers[2]
ServiceMgr.EventSelector.OverrideRunNumberFromInput=True
ServiceMgr.EventSelector.OverrideRunNumber=True
ServiceMgr.EventSelector.RunNumber=myRunNumber
ServiceMgr.EventSelector.FirstLB = myFirstLB
try:
from RunDependentSimComps.RunDMCFlags import runDMCFlags
myInitialTimeStamp = runDMCFlags.RunToTimestampDict.getTimestampForRun(myRunNumber)
#print "FOUND TIMESTAMP ", str(myInitialTimeStamp)
except:
myInitialTimeStamp = 1
ServiceMgr.EventSelector.InitialTimeStamp = myInitialTimeStamp
from AthenaCommon.AthenaCommonFlags import athenaCommonFlags
if not athenaCommonFlags.DoFullChain:
if digitizationFlags.simRunNumber.get_Value() < 0:
raise SystemExit("Failed to read HIT file Run Number - please check input file for corruption.")
##FIXME need to do some error checking at this point
ServiceMgr.EventSelector.OldRunNumber=digitizationFlags.simRunNumber.get_Value()
| [
"rushioda@lxplus754.cern.ch"
] | rushioda@lxplus754.cern.ch |
aa3a52f4f69a6e1eeff72b327ac53efb3ec99be5 | c2ff2ee2b0c84e047a80cfdf0b0d0b122fc9db79 | /features/himario/mmediting/mmedit/models/backbones/encoder_decoders/pconv_encoder_decoder.py | 939bfbdb854e9aae14d3662cabfd025b40d854bd | [
"MIT",
"Apache-2.0"
] | permissive | obarnard99/vilio | 275dcb62cdb8b2d8c55ab1e73f3a796bd2073a5b | 77aac226c3a0910410f11a5999f8908181f57ccd | refs/heads/master | 2023-06-29T17:02:02.282457 | 2021-06-22T09:50:11 | 2021-06-22T09:50:11 | 337,738,373 | 0 | 0 | MIT | 2021-06-22T09:50:12 | 2021-02-10T13:50:49 | Python | UTF-8 | Python | false | false | 1,786 | py | import torch.nn as nn
from mmcv.runner import auto_fp16, load_checkpoint
from mmedit.models.builder import build_component
from mmedit.models.registry import BACKBONES
from mmedit.utils import get_root_logger
@BACKBONES.register_module()
class PConvEncoderDecoder(nn.Module):
"""Encoder-Decoder with partial conv module.
Args:
encoder (dict): Config of the encoder.
decoder (dict): Config of the decoder.
"""
def __init__(self, encoder, decoder):
super(PConvEncoderDecoder, self).__init__()
self.encoder = build_component(encoder)
self.decoder = build_component(decoder)
# support fp16
self.fp16_enabled = False
@auto_fp16()
def forward(self, x, mask_in):
"""Forward Function.
Args:
x (torch.Tensor): Input tensor with shape of (n, c, h, w).
mask_in (torch.Tensor): Input tensor with shape of (n, c, h, w).
Returns:
torch.Tensor: Output tensor with shape of (n, c, h', w').
"""
enc_outputs = self.encoder(x, mask_in)
x, final_mask = self.decoder(enc_outputs)
return x, final_mask
def init_weights(self, pretrained=None):
"""Init weights for models.
Args:
pretrained (str, optional): Path for pretrained weights. If given
None, pretrained weights will not be loaded. Defaults to None.
"""
if isinstance(pretrained, str):
logger = get_root_logger()
load_checkpoint(self, pretrained, strict=False, logger=logger)
elif pretrained is None:
# Here, we just use the default initialization in `ConvModule`.
pass
else:
raise TypeError('pretrained must be a str or None')
| [
"obarnard99@gmail.com"
] | obarnard99@gmail.com |
5ea468993d1d3b99c52367a6dab9133b2462b529 | 3a0430831f3f9fc551ce02f625318754c17a5357 | /app/api.py | 91a21f3023a63a5dd892ed65b19d556b369c1e3d | [
"Apache-2.0",
"MIT"
] | permissive | victor-iyi/heart-disease | 8589409388495029a2219c08fad57e0941bfbff1 | 06540b582e8752d2bb6a32366077872d32d7c0e4 | refs/heads/master | 2023-08-03T11:18:37.711933 | 2021-09-19T16:30:05 | 2021-09-19T16:30:05 | 363,746,469 | 2 | 1 | null | null | null | null | UTF-8 | Python | false | false | 2,027 | py | # Copyright 2021 Victor I. Afolabi
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from fastapi import Depends, FastAPI
from fastapi.middleware.cors import CORSMiddleware
from starlette.responses import RedirectResponse
from app.database import Base, engine
from app.dependencies import get_db
from app.routers import model, predict, users
# Create the database.
Base.metadata.create_all(bind=engine)
# CORS origins.
origins = [
"http://localhost.tiangolo.com",
"https://localhost.tiangolo.com",
"http://localhost",
"http://localhost:8080",
]
# App object.
app = FastAPI(
title='heart-disease',
version='1.0',
description='Predict heart disease with different ML algorithms.',
dependencies=[Depends(get_db)],
)
app.add_middleware(
CORSMiddleware,
allow_origins=origins,
allow_credentials=True,
allow_methods=['*'],
allow_headers=['*'],
)
# Add routers.
app.include_router(model.router)
app.include_router(predict.router)
app.include_router(users.router)
# @app.middleware("http")
# async def db_session_middleware(
# request: Request, call_next: Callable[[Request], Response]
# ) -> Response:
# response = Response('Internal server error', status_code=500)
# try:
# request.state.db = SessionLocal()
# response = await call_next(request)
# finally:
# request.state.db.close()
# return response
@app.get('/', include_in_schema=False)
async def docs_redirect() -> RedirectResponse:
return RedirectResponse('/docs')
| [
"javafolabi@gmail.com"
] | javafolabi@gmail.com |
e926ecb68316ba60a8c39cdb7f311e8f6322902c | 6f05f7d5a67b6bb87956a22b988067ec772ba966 | /data/train/python/1607bca81baacc0ce7ea79cb24ca3d55d5b5c036signals.py | 1607bca81baacc0ce7ea79cb24ca3d55d5b5c036 | [
"MIT"
] | permissive | harshp8l/deep-learning-lang-detection | 93b6d24a38081597c610ecf9b1f3b92c7d669be5 | 2a54293181c1c2b1a2b840ddee4d4d80177efb33 | refs/heads/master | 2020-04-07T18:07:00.697994 | 2018-11-29T23:21:23 | 2018-11-29T23:21:23 | 158,597,498 | 0 | 0 | MIT | 2018-11-21T19:36:42 | 2018-11-21T19:36:41 | null | UTF-8 | Python | false | false | 829 | py | # django imports
import django.dispatch
# Shop
shop_changed = django.dispatch.Signal()
# Catalog
cart_changed = django.dispatch.Signal()
category_changed = django.dispatch.Signal()
product_changed = django.dispatch.Signal()
muecke_sorting_changed = django.dispatch.Signal()
# Marketing
topseller_changed = django.dispatch.Signal()
featured_changed = django.dispatch.Signal()
# Order
order_created = django.dispatch.Signal()
order_paid = django.dispatch.Signal()
order_sent = django.dispatch.Signal()
order_submitted = django.dispatch.Signal()
# Property
property_type_changed = django.dispatch.Signal()
# TODO: Replace this with "m2m_changed" when available, or think about to use
# an explicit relation ship class
product_removed_property_group = django.dispatch.Signal()
# User
customer_added = django.dispatch.Signal()
| [
"aliostad+github@gmail.com"
] | aliostad+github@gmail.com |
4c103cd4c5846c3bc1cdaf7b14167b8cbc20344f | 7698a74a06e10dd5e1f27e6bd9f9b2a5cda1c5fb | /zzz.scripts/mk.ROC_pdb.csv.mod2012_07.py | 4d4f757e03bf34551f74f75d00a76184ac6e0d69 | [] | no_license | kingbo2008/teb_scripts_programs | ef20b24fe8982046397d3659b68f0ad70e9b6b8b | 5fd9d60c28ceb5c7827f1bd94b1b8fdecf74944e | refs/heads/master | 2023-02-11T00:57:59.347144 | 2021-01-07T17:42:11 | 2021-01-07T17:42:11 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,073 | py | #! /usr/bin/python
import sys
import gzip
#f = gzip.open('/home/joe/file.txt.gz', 'rb')
#file_content = f.read()
#f.close()
class LIG_DATA:
def __init__(self, score, ligand_bind):
self.score, self.ligand_bind = score, ligand_bind
def __cmp__(self, other):
return cmp(self.score, other.score)
# this defines a compares two LIG_DATA by comparing the two scores
# it is sorted in decinding order.
def byScore(x, y):
return cmp(x.score, y.score)
#################################################################################################################
def read_multiPDB_files_data(mPDB_decoy,mPDB_ligand):
print "decoy file ="+mPDB_decoy
print "ligand file ="+mPDB_ligand
namesplit = mPDB_decoy.split('.') #split on '.'
l = len(namesplit)
print l
# for i in range(l):
# print str(i) + " : " + namesplit[i]
print namesplit[l-1] + '== gz'
if ( namesplit[l-1] == 'gz'):
print "reading in gziped file"
file1 = gzip.open(mPDB_decoy,'r')
elif (namesplit[l-1] == 'pdb'):
print "reading in multiPDB file"
file1 = open(mPDB_decoy,'r')
else:
print mPDB_decoy+"::wrong file formate"
namesplit = mPDB_ligand.split('.') #split on '.'
l = len(namesplit)
print namesplit[l-1] + '== gz'
if ( namesplit[l-1] == 'gz'):
print "reading in gziped file"
file2 = gzip.open(mPDB_ligand,'r')
elif (namesplit[l-1] == 'pdb'):
print "reading in multiPDB file"
file2 = open(mPDB_ligand,'r')
else:
print mPDB_ligand+"::wrong file formate"
ligand_info = []
count_lig = 0
count_true = 0
count_false = 0
# put scores for all molecules in decoys in array
#print str(mPDB_decoy)+"\n"
lines = file1.readlines()
for line in lines:
linesplit = line.split() #split on white space
if (len(linesplit) >= 5):
if (linesplit[2] == "energy"):
ligand_score = float(linesplit[4])
ligand_bind = int(0)#not known to binded
ligand_data = LIG_DATA(ligand_score,ligand_bind)
ligand_info.append(ligand_data)
count_lig = count_lig + 1
count_false = count_false + 1
del lines
# put scores for all molecules in ligands in array
#file2 = open(mPDB_ligand,'r')
lines = file2.readlines()
for line in lines:
linesplit = line.split() #split on white space
if (len(linesplit) >= 5):
if (linesplit[2] == "energy"):
ligand_score = float(linesplit[4])
ligand_bind = int(1)# known to binded
ligand_data = LIG_DATA(ligand_score,ligand_bind)
ligand_info.append(ligand_data)
count_lig = count_lig + 1
count_true = count_true + 1
ligand_info.sort(byScore)
return ligand_info, count_false, count_true
#################################################################################################################
def write_ROC(OUTPUTFILE,list,num_false,num_true):
## write a csv file the has 3 columns
## column1 is the size kept
## column2 TP/T
## column3 FP/F
file = open(OUTPUTFILE,'write')
file.write("NUM_kept,TPRate,FPRate\n" )
true_positive = 0
false_positive = 0
for i in range(len(list)):
if (list[i].ligand_bind == 1):
true_positive = true_positive + 1
elif (list[i].ligand_bind == 0):
false_positive = false_positive + 1
frac_true = float(true_positive)/float(num_true)
frac_false = float(false_positive)/float(num_false)
file.write(str(i+1) + "," + str(frac_true) + "," + str(frac_false) + "\n" )
#################################################################################################################
#################################################################################################################
def main():
if len(sys.argv) != 6: # if no input
print "This function take input files names produced by DOCK6:"
print " (1) multiPDB of decoys"
print " (2) multPDB file of known binders"
print " (3) # of decoys docked"
print " (4) # of known binders docked"
print " (5) CSV file for a ROC curve"
print len(sys.argv)
return
decoy_file = sys.argv[1]
ligand_file = sys.argv[2]
num_dec = int(sys.argv[3])
num_lig = int(sys.argv[4])
outputfile = sys.argv[5]
list, num_false, num_true = read_multiPDB_files_data(decoy_file,ligand_file)
print "Decoys:"+str(num_dec)+" vs. "+str(num_false)
print "Actives:"+str(num_lig)+" vs. "+str(num_true)
write_ROC(outputfile,list,num_dec, num_lig)
#################################################################################################################
#################################################################################################################
main()
| [
"tbalius@gimel.cluster.ucsf.bkslab.org"
] | tbalius@gimel.cluster.ucsf.bkslab.org |
72423dc435e59236a6497abcfeba64001677d08e | 9d9fcf401bb47ccaaa6c3fd3fe7a8be255762855 | /libs/fire/multi_cmd.py | d7bfc1dbb0d78512a3939b6ba75c0df5c1a1f8c3 | [] | no_license | hanhiver/PythonBasic | f05ef9fe713f69610860c63e5223317decee09ad | 8e012855cce61fb53437758021416e5f6deb02ea | refs/heads/master | 2022-10-11T22:57:47.931313 | 2020-12-30T12:32:44 | 2020-12-30T12:32:44 | 148,477,052 | 0 | 3 | null | 2022-10-01T05:35:03 | 2018-09-12T12:29:33 | Python | UTF-8 | Python | false | false | 144 | py | #!/usr/bin/env python
import fire
def add(x, y):
return x + y
def mul(x, y):
return x * y
if __name__ == '__main__':
fire.Fire()
| [
"handongfr@163.com"
] | handongfr@163.com |
43cb85d31aea27cd755319a97b5c1bc07960a374 | 4f6fdd0effc474226b75ccc5d247509121b90fdf | /dictionary/Count the Frequency of Words Appearing in a String Using a Dictionary.py | 9da1146a61549d106450571ac2250f3ae462780d | [] | no_license | tuhiniris/Python-ShortCodes-Applications | 1580785a6922c70df3b7375cb81e98f4a684d86f | f3fe7ac1c11a631fcd05b9d19b25b1a841d94550 | refs/heads/main | 2023-04-07T21:42:21.094545 | 2021-04-21T07:47:46 | 2021-04-21T07:47:46 | 358,605,530 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 662 | py | '''
Problem Description
The program takes a string and counts the frequency of words appearing in that string using a dictionary.
Problem Solution
1. Enter a string and store it in a variable.
2. Declare a list variable and initialize it to an empty list.
3. Split the string into words and store it in the list.
4. Count the frequency of each word and store it in another list.
5. Using the zip() function, merge the lists containing the words and the word counts into a dictionary.
3. Print the final dictionary.
4. Exit.
'''
Text=input('Enter string: ')
l=[]
l=Text.split()
wordcounter=[l.count(p) for p in l]
print(dict(zip(l,wordcounter))) | [
"noreply@github.com"
] | tuhiniris.noreply@github.com |
99d9daa991e32028fe6782e04f797c37b5627ec7 | 89b45e528f3d495f1dd6f5bcdd1a38ff96870e25 | /pyneng/exercises/12_useful_modules/test_task_12_3.py | 681feeabf02e5611c1c37009e0ce1a26846469ed | [] | no_license | imatyukin/python | 2ec6e712d4d988335fc815c7f8da049968cc1161 | 58e72e43c835fa96fb2e8e800fe1a370c7328a39 | refs/heads/master | 2023-07-21T13:00:31.433336 | 2022-08-24T13:34:32 | 2022-08-24T13:34:32 | 98,356,174 | 2 | 0 | null | 2023-07-16T02:31:48 | 2017-07-25T22:45:29 | Python | UTF-8 | Python | false | false | 1,191 | py | import sys
import task_12_3
sys.path.append("..")
from pyneng_common_functions import (check_function_exists, check_pytest,
unified_columns_output)
check_pytest(__loader__, __file__)
def test_function_created():
"""
Проверка, что функция создана
"""
check_function_exists(task_12_3, "print_ip_table")
def test_function_stdout(capsys):
"""
Проверка работы задания
"""
reach_ip = ["10.10.1.7", "10.10.1.8", "10.10.1.9", "10.10.1.15"]
unreach_ip = ["10.10.2.1", "10.10.1.2"]
return_value = task_12_3.print_ip_table(reach_ip, unreach_ip)
stdout, err = capsys.readouterr()
correct_stdout = unified_columns_output(
"Reachable Unreachable\n"
"----------- -------------\n"
"10.10.1.7 10.10.2.1\n"
"10.10.1.8 10.10.1.2\n"
"10.10.1.9\n"
"10.10.1.15\n"
)
assert None == return_value, "Функция должна возвращать None"
assert correct_stdout == unified_columns_output(
stdout
), "Функция возвращает неправильное значение"
| [
"i.matyukin@gmail.com"
] | i.matyukin@gmail.com |
04c6cd398b0c0efc787bf89339958f5396d5ce78 | 108f1096b8b0b9e5449bd35b882f5fb689f7d683 | /fetchapp/lib/python2.7/site-packages/fetchcore/resources/tasks/actions/definitions/survey/survey_action.py | 111e18635efe3d88526b824d7b1d1bb0854e7a00 | [] | no_license | JasonVranek/jason_fetchcore | 868b1918aed450d241787ecd63f77881fa9c10d8 | 7c30438f145c5e59522bb0f27a3914ce21a13c33 | refs/heads/master | 2021-09-28T17:47:58.163099 | 2018-08-29T17:31:19 | 2018-08-29T17:31:19 | 116,185,829 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 1,601 | py | # Copyright 2017 Fetch Robotics Inc.
# Author(s): Cappy Pitts
# Fetchcore SDK Python
from fetchcore.definitions import ActionStatus, ActionPreemption
from fetchcore.resources import Action
from fetchcore.resources.tasks.actions.definitions import BaseSurvey
class SurveyAction(Action, BaseSurvey):
"""
The SURVEY action allows the robot to perform data survey activities.
"""
optional_fields = ["intermediate_task_template_id", "max_velocity", "max_angular_velocity", "limit_velocity"]
required_fields = ["survey_path_id"]
def __init__(
self, id=None, action_definition="SURVEY", preemptable=ActionPreemption.NONE, task=None,
status=ActionStatus.NEW, start=None, end=None, survey_path_id=None, intermediate_task_template_id=None,
max_velocity=None, max_angular_velocity=None, limit_velocity=None,
inputs=None, outputs=None, states=None, on_complete=None, on_pause=None, on_resume=None, created=None,
modified=None, **kwargs
):
super(SurveyAction, self).__init__(
id=id, action_definition=action_definition, preemptable=preemptable, task=task, status=status, start=start,
end=end, survey_path_id=survey_path_id, intermediate_task_template_id=intermediate_task_template_id,
max_velocity=max_velocity, max_angular_velocity=max_angular_velocity, limit_velocity=limit_velocity,
inputs=inputs, outputs=outputs, states=states, on_complete=on_complete, on_pause=on_pause,
on_resume=on_resume, created=created, modified=modified, **kwargs
)
| [
"jvranek@ucsc.edu"
] | jvranek@ucsc.edu |
78718a541e7e9247a27b366867c649f24bd1a6b3 | e9f2ab8952f91b5f908a14256120827bb2ea98df | /makeproject/templates/kaggle/src/submissions/base.py | 78cef16c5fb5453845842cfa9dea5d6cdae2f85b | [] | no_license | mkurnikov/project_templates | dc77445595f96b7d25afc5efc513e8cbd645dda3 | e94ad7abbcdcef2aaecaa86b4df511edca1a8b17 | refs/heads/master | 2021-01-10T21:49:49.777814 | 2015-08-05T16:17:46 | 2015-08-05T16:17:46 | 38,043,018 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 310 | py | from __future__ import division, print_function, \
unicode_literals, absolute_import
# noinspection PyUnresolvedReferences
from py3compatibility import *
import pandas as pd
import numpy as np
from kaggle_tools.submission import BaseSubmittion
import settings
#TODO: create subclass for BaseSubmission
| [
"maxim.kurnikov@gmail.com"
] | maxim.kurnikov@gmail.com |
0547c276fbecc0137a54fe83996856e7a20eaa9b | 3adec884f06eabfe50d4ab3456123e04d02b02ff | /347. Top K Frequent Elements.py | 8058bcdc3acf8930a4db5d97988697177aa0d38b | [] | no_license | windmzx/pyleetcode | c57ecb855c8e560dd32cf7cf14616be2f91ba50e | d0a1cb895e1604fcf70a73ea1c4b1e6b283e3400 | refs/heads/master | 2022-10-05T17:51:08.394112 | 2020-06-09T09:24:28 | 2020-06-09T09:24:28 | 250,222,719 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,056 | py | from typing import List
class Solution:
def topKFrequent(self, nums: List[int], k: int) -> List[int]:
map = {}
for i in nums:
map[i] = map.get(i, 0)+1
tree = list(map.keys())
def adjust_heap(start, end):
left = start*2+1
right = start*2+2
if left < end and map[tree[left]] < map[tree[start]]:
tree[left], tree[start] = tree[start], tree[left]
adjust_heap(left, end)
if right < end and map[tree[right]] < map[tree[start]]:
tree[right], tree[start] = tree[start], tree[right]
adjust_heap(right, end)
for i in range(k//2-1,-1,-1):
adjust_heap(i, k)
for i in range(k, len(tree)):
if map[tree[i]] > map[tree[0]]:
tree[0] = tree[i]
adjust_heap(0, k)
return tree[:k]
if __name__ == "__main__":
x = Solution()
re = [6,0,1,4,9,7,-3,1,-4,-8,4,-7,-3,3,2,-3,9,5,-4,0]
res=x.topKFrequent(re,6)
print(res)
| [
"2281927774@qq.com"
] | 2281927774@qq.com |
9a864a23b31aaeea5bac77efb0d209b18048ccce | 786027545626c24486753351d6e19093b261cd7d | /ghidra9.2.1_pyi/ghidra/javaclass/format/attributes/LocalVariableJava.pyi | c0e9bdf926d2aa623bb2ba1e6666bf5ed2a9c568 | [
"MIT"
] | permissive | kohnakagawa/ghidra_scripts | 51cede1874ef2b1fed901b802316449b4bf25661 | 5afed1234a7266c0624ec445133280993077c376 | refs/heads/main | 2023-03-25T08:25:16.842142 | 2021-03-18T13:31:40 | 2021-03-18T13:31:40 | 338,577,905 | 14 | 1 | null | null | null | null | UTF-8 | Python | false | false | 1,774 | pyi | import ghidra.app.util.bin
import ghidra.program.model.data
import java.lang
class LocalVariableJava(object, ghidra.app.util.bin.StructConverter):
ASCII: ghidra.program.model.data.DataType = char
BYTE: ghidra.program.model.data.DataType = byte
DWORD: ghidra.program.model.data.DataType = dword
IBO32: ghidra.program.model.data.DataType = ImageBaseOffset32
POINTER: ghidra.program.model.data.DataType = pointer
QWORD: ghidra.program.model.data.DataType = qword
STRING: ghidra.program.model.data.DataType = string
UTF16: ghidra.program.model.data.DataType = unicode
UTF8: ghidra.program.model.data.DataType = string-utf8
VOID: ghidra.program.model.data.DataType = void
WORD: ghidra.program.model.data.DataType = word
def __init__(self, __a0: ghidra.app.util.bin.BinaryReader): ...
def equals(self, __a0: object) -> bool: ...
def getClass(self) -> java.lang.Class: ...
def getDescriptorIndex(self) -> int: ...
def getIndex(self) -> int: ...
def getLength(self) -> int: ...
def getNameIndex(self) -> int: ...
def getStartPC(self) -> int: ...
def hashCode(self) -> int: ...
def notify(self) -> None: ...
def notifyAll(self) -> None: ...
def toDataType(self) -> ghidra.program.model.data.DataType: ...
def toString(self) -> unicode: ...
@overload
def wait(self) -> None: ...
@overload
def wait(self, __a0: long) -> None: ...
@overload
def wait(self, __a0: long, __a1: int) -> None: ...
@property
def descriptorIndex(self) -> int: ...
@property
def index(self) -> int: ...
@property
def length(self) -> int: ...
@property
def nameIndex(self) -> int: ...
@property
def startPC(self) -> int: ...
| [
"tsunekou1019@gmail.com"
] | tsunekou1019@gmail.com |
97aecc33ded12b846d35b67a7626508f6f3e7a9f | e6f73cc3398050b23df28e3f11a10afbb46ee38b | /python/coroutines/grep.py | 2cef424b464aa5bee0f207cc6bdbc8aa24bae288 | [] | no_license | monarin/divelite | 3db262bf07a0de870d0bfe650ebdf21225b88c1b | 0d297bda7368c5295336565431fbfa18a5686f15 | refs/heads/master | 2023-06-29T23:42:34.541874 | 2023-06-08T17:59:59 | 2023-06-08T17:59:59 | 120,695,376 | 0 | 1 | null | 2018-09-06T00:03:30 | 2018-02-08T01:41:19 | C++ | UTF-8 | Python | false | false | 218 | py | def grep(pattern):
print("Looking for %s" % pattern)
while True:
line = (yield)
if pattern in line:
print(line)
if __name__ == "__main__":
from IPython import embed
embed()
| [
"monarin@gmail.com"
] | monarin@gmail.com |
c99921b89fbd79ffd6ef633acf75fd991446f18b | 14f4d045750f7cf45252838d625b2a761d5dee38 | /argo/argo/models/io_k8s_api_core_v1_container_state_running.py | 38cc7bc2b3bdfd1bb30654da433179e5b368fb21 | [] | no_license | nfillot/argo_client | cf8d7413d728edb4623de403e03d119fe3699ee9 | c8cf80842f9eebbf4569f3d67b9d8eff4ba405fa | refs/heads/master | 2020-07-11T13:06:35.518331 | 2019-08-26T20:54:07 | 2019-08-26T20:54:07 | 204,546,868 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,534 | py | # coding: utf-8
"""
Kubernetes
No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) # noqa: E501
OpenAPI spec version: v1.14.0
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re # noqa: F401
import six
from argo.models.io_k8s_apimachinery_pkg_apis_meta_v1_time import IoK8sApimachineryPkgApisMetaV1Time # noqa: F401,E501
class IoK8sApiCoreV1ContainerStateRunning(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'started_at': 'IoK8sApimachineryPkgApisMetaV1Time'
}
attribute_map = {
'started_at': 'startedAt'
}
def __init__(self, started_at=None): # noqa: E501
"""IoK8sApiCoreV1ContainerStateRunning - a model defined in Swagger""" # noqa: E501
self._started_at = None
self.discriminator = None
if started_at is not None:
self.started_at = started_at
@property
def started_at(self):
"""Gets the started_at of this IoK8sApiCoreV1ContainerStateRunning. # noqa: E501
:return: The started_at of this IoK8sApiCoreV1ContainerStateRunning. # noqa: E501
:rtype: IoK8sApimachineryPkgApisMetaV1Time
"""
return self._started_at
@started_at.setter
def started_at(self, started_at):
"""Sets the started_at of this IoK8sApiCoreV1ContainerStateRunning.
:param started_at: The started_at of this IoK8sApiCoreV1ContainerStateRunning. # noqa: E501
:type: IoK8sApimachineryPkgApisMetaV1Time
"""
self._started_at = started_at
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
if issubclass(IoK8sApiCoreV1ContainerStateRunning, dict):
for key, value in self.items():
result[key] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, IoK8sApiCoreV1ContainerStateRunning):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| [
"nfillot@weborama.com"
] | nfillot@weborama.com |
958bbb8fa3877cb6cd1c0cffd21fa211640c5f87 | 69a60cdf962de532d63aa6111ddd7e3f9663abf3 | /wagtail/tests/snippets/models.py | dbb8a274bd0064d213a3cb10b1bbfa72d87386fd | [
"BSD-3-Clause"
] | permissive | JoshBarr/wagtail | 47b827dc7394a8ebda76a7cc40e343fcd181ad96 | 7b8fbf89dac69386dfeb57dd607f43ab42d1ffab | refs/heads/master | 2021-01-09T06:35:13.010607 | 2016-02-08T13:06:05 | 2016-02-08T13:06:05 | 33,353,097 | 1 | 2 | null | 2016-02-23T20:14:13 | 2015-04-03T07:49:11 | Python | UTF-8 | Python | false | false | 1,397 | py | from django.db import models
from django.utils.encoding import python_2_unicode_compatible
from wagtail.wagtailsearch import index
from wagtail.wagtailsnippets.models import register_snippet
# AlphaSnippet and ZuluSnippet are for testing ordering of
# snippets when registering. They are named as such to ensure
# thier ordering is clear. They are registered during testing
# to ensure specific [in]correct register ordering
# AlphaSnippet is registered during TestSnippetOrdering
@python_2_unicode_compatible
class AlphaSnippet(models.Model):
text = models.CharField(max_length=255)
def __str__(self):
return self.text
# ZuluSnippet is registered during TestSnippetOrdering
@python_2_unicode_compatible
class ZuluSnippet(models.Model):
text = models.CharField(max_length=255)
def __str__(self):
return self.text
# Register model as snippet using register_snippet as both a function and a decorator
class RegisterFunction(models.Model):
pass
register_snippet(RegisterFunction)
@register_snippet
class RegisterDecorator(models.Model):
pass
# A snippet model that inherits from index.Indexed can be searched on
@register_snippet
class SearchableSnippet(models.Model, index.Indexed):
text = models.CharField(max_length=255)
search_fields = (
index.SearchField('text'),
)
def __str__(self):
return self.text
| [
"karlhobley10@gmail.com"
] | karlhobley10@gmail.com |
ccae41156260c5b9dad887bcd88cb8509c6bb0db | 03e3138f99f275d15d41a5c5bfb212f85d64d02e | /source/res/scripts/client/AvatarInputHandler/AimingSystems/StrategicAimingSystem.py | 08efa014f10f08c2f519021bbee6e4eebfdda389 | [] | no_license | TrenSeP/WorldOfTanks-Decompiled | e428728e7901146d0b599d02c930d70532232a97 | 1faa748acec1b7e435b657fd054ecba23dd72778 | refs/heads/1.4.1 | 2020-04-27T08:07:49.813023 | 2019-03-05T17:37:06 | 2019-03-05T17:37:06 | 174,159,837 | 1 | 0 | null | 2019-03-06T14:33:33 | 2019-03-06T14:24:36 | Python | UTF-8 | Python | false | false | 2,970 | py | # Python bytecode 2.7 (decompiled from Python 2.7)
# Embedded file name: scripts/client/AvatarInputHandler/AimingSystems/StrategicAimingSystem.py
import math
import BigWorld
import Math
from Math import Vector3
from AvatarInputHandler import mathUtils, AimingSystems
from AvatarInputHandler.AimingSystems import IAimingSystem
from AvatarInputHandler.cameras import _clampPoint2DInBox2D
class StrategicAimingSystem(IAimingSystem):
_LOOK_DIR = Vector3(0, -math.cos(0.001), math.sin(0.001))
heightFromPlane = property(lambda self: self.__heightFromPlane)
planePosition = property(lambda self: self._planePosition)
def __init__(self, height, yaw):
super(StrategicAimingSystem, self).__init__()
self._matrix = mathUtils.createRotationMatrix((yaw, 0, 0))
self._planePosition = Vector3(0, 0, 0)
self.__camDist = 0.0
self.__height = height
self.__heightFromPlane = 0.0
def destroy(self):
pass
def enable(self, targetPos):
self.updateTargetPos(targetPos)
def disable(self):
pass
def getDesiredShotPoint(self, terrainOnlyCheck=False):
return AimingSystems.getDesiredShotPoint(self._matrix.translation, Vector3(0, -1, 0), True, True, terrainOnlyCheck)
def handleMovement(self, dx, dy):
shift = self._matrix.applyVector(Vector3(dx, 0, dy))
self._planePosition += Vector3(shift.x, 0, shift.z)
self._updateMatrix()
def updateTargetPos(self, targetPos):
self._planePosition.x = targetPos.x
self._planePosition.z = targetPos.z
self._updateMatrix()
def setYaw(self, yaw):
self._matrix = mathUtils.createRotationMatrix((yaw, 0, 0))
self._updateMatrix()
def getCamDist(self):
return self.__camDist
def overrideCamDist(self, camDist):
self.__camDist = camDist
return camDist
def getShotPoint(self):
desiredShotPoint = self.getDesiredShotPoint()
return Vector3(desiredShotPoint.x, self.getCamDist(), desiredShotPoint.z)
def getZoom(self):
pass
@property
def height(self):
return self.__height
@height.setter
def height(self, value):
self.__height = value
def _clampToArenaBB(self):
bb = BigWorld.player().arena.arenaType.boundingBox
pos2D = _clampPoint2DInBox2D(bb[0], bb[1], Math.Vector2(self._planePosition.x, self._planePosition.z))
self._planePosition.x = pos2D[0]
self._planePosition.z = pos2D[1]
def _updateMatrix(self):
self._clampToArenaBB()
collPoint = BigWorld.wg_collideSegment(BigWorld.player().spaceID, self._planePosition + Math.Vector3(0, 1000.0, 0), self._planePosition + Math.Vector3(0, -250.0, 0), 3)
self.__heightFromPlane = 0.0 if collPoint is None else collPoint.closestPoint[1]
self._matrix.translation = self._planePosition + Vector3(0, self.__heightFromPlane + self.__height, 0)
return
| [
"StranikS_Scan@mail.ru"
] | StranikS_Scan@mail.ru |
da398b4bf6ce07b7dccedfe62090db6d3e267595 | de24f83a5e3768a2638ebcf13cbe717e75740168 | /moodledata/vpl_data/303/usersdata/287/89507/submittedfiles/testes.py | 795ba4f86f78ee3cf4a8b30b24f3d9231a1fef26 | [] | no_license | rafaelperazzo/programacao-web | 95643423a35c44613b0f64bed05bd34780fe2436 | 170dd5440afb9ee68a973f3de13a99aa4c735d79 | refs/heads/master | 2021-01-12T14:06:25.773146 | 2017-12-22T16:05:45 | 2017-12-22T16:05:45 | 69,566,344 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 461 | py | # -*- coding: utf-8 -*-
from minha_bib import*
import time
import os
import random
board=[" "," "," "," "," "," "," "," "," "," "]
print(" | | ")
print""+board[1]+"|"+board[2]+"|"+board[3]+""
print(" | | ")
print("---|---|---")
print(" | | ")
print(""+board[4]+"|"+board[5]+"|"+board[6]+"")
print(" | | ")
print("---|---|---")
print(" | | ")
print(""+board[7]+"|"+board[8]+"|"+board[9]+"")
print(" | | ")
| [
"rafael.mota@ufca.edu.br"
] | rafael.mota@ufca.edu.br |
28065f6e9056ed0090c0a0c6bedbaf7df7761d86 | 8ba907676e912550aca4490e44ba838abb9058f3 | /utils/lib_geo_trans.py | 964a95e666e4c6c8eeba6bc5fedb788d7ff5ac9e | [
"MIT"
] | permissive | Michael-lhh/ros_detect_planes_from_depth_img | 8276a97a05bcbaa1c917ebb95557e14ef3fb2d82 | 8593cca2014129e0115ac8a01966ef03c7bd418d | refs/heads/master | 2023-05-27T09:22:19.369969 | 2021-06-16T03:03:37 | 2021-06-16T03:03:37 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,591 | py |
'''
Geometric and camera related transformations
'''
import numpy as np
import copy
import cv2
''' =============================================================================== '''
''' Basic maths. '''
def form_T(R, p):
T = np.identity(4)
T[0:3, 0:3] = R
T[0:3, 3:4] = np.array(p).reshape((3, 1))
return T
def get_Rp_from_T(T):
R = T[0:3, 0:3]
p = T[0:3, 3:4]
return (R, p)
def inv_R_p(R, p):
T = form_T(R, p)
T = np.linalg.inv(T)
R_inv, p_inv = get_Rp_from_T(T)
return R_inv, p_inv
def xyz_to_T(x=None, y=None, z=None):
''' Get 4x4 Transformation matrix from
the translation (x, y, z)
'''
T = np.identity(4)
data = [x, y, z]
for i in range(3):
if data[i] is not None:
T[i, 3] = data[i]
return T
def rot3x3_to_4x4(R):
T = np.identity(4)
T[0:3, 0:3] = R
return T
def rot(axis, angle, matrix_len=4):
R_vec = np.array(axis).astype(float)*angle
R, _ = cv2.Rodrigues(R_vec)
if matrix_len == 4:
R = rot3x3_to_4x4(R)
return R
def rotx(angle, matrix_len=4):
return rot([1, 0, 0], angle, matrix_len)
def roty(angle, matrix_len=4):
return rot([0, 1, 0], angle, matrix_len)
def rotz(angle, matrix_len=4):
return rot([0, 0, 1], angle, matrix_len)
def euler2matrix(x, y, z, order='rxyz'):
return rotx(x).dot(roty(y)).dot(rotz(z))
''' =============================================================================== '''
''' Camera related transformations between world/camera/image. '''
def distortPoint(x, y, distortion_coeffs=None):
''' Distort a point.
Arguments:
x {float}, y {float}: Point's position on the camera normalized plane (z=1).
distortion_coeffs {array}: 5 parameters; Radial/tangential model.
Distortion direction: When points are from world to image.
As mentioned in: https://docs.opencv.org/2.4/modules/calib3d/doc/camera_calibration_and_3d_reconstruction.html,
the distortation is considered to be happened
when points are projected from the real world to the image.
Return:
x_distort {float}, y_distort {float}.
'''
if distortion_coeffs is None:
return x, y
r2 = x*x + y*y
r4 = r2*r2
r6 = r4*r2
d = distortion_coeffs
k1, k2, p1, p2, k3 = d[0], d[1], d[2], d[3], d[4]
x_distort = x * (1 + k1 * r2 + k2 * r4 + k3 * r6) + \
2*p1*x*y + p2*(r2 + 2*x*x)
y_distort = y * (1 + k1 * r2 + k2 * r4 + k3 * r6) + \
p1*(r2 + 2*y*y) + 2*p2*x*y
return x_distort, y_distort
def world2cam(pts_3d_world, T_cam_to_world):
''' Project points represented in world coordinate to image coordinate.
Arguments:
pts {np.ndarray}: 3xN.
T_cam_to_world {np.ndarray}: 4x4.
Return:
pts_3d_camera {np.ndarray}: 3xN. Points in camera coordinate.
'''
# -- Check input.
if type(pts_3d_world) == list:
pts_3d_world = np.array(pts_3d_world)
if len(pts_3d_world.shape) == 1: # (3, ) --> (3, 1)
pts_3d_world = pts_3d_world[:, np.newaxis]
# -- Transform.
# Change image to homogeneous coordinate.
if pts_3d_world.shape[0] == 3: # (3, N) --> (4, N)
blanks = np.ones((1, pts_3d_world.shape[1]))
pts_3d_world = np.vstack((pts_3d_world, blanks))
pts_3d_camera = T_cam_to_world.dot(pts_3d_world)
pts_3d_camera = pts_3d_camera[0:3, :] # Remove homogeneous coordinate.
return pts_3d_camera
def cam2pixel(pts_3d, camera_intrinsics, distortion_coeffs=None):
''' Project points represented in camera coordinate onto the image plane.
Arguments:
pts {np.ndarray}: 3xN.
camera_intrinsics {np.ndarray}: 3x3.
Return:
image_points_xy {np.ndarray, np.float32}: 2xN.
'''
# -- Check input.
if type(pts_3d) == list:
pts_3d = np.array(pts_3d)
if len(pts_3d.shape) == 1: # (3, ) --> (3, 1)
pts_3d = pts_3d[:, np.newaxis]
# -- Transform
# Transform to camera normalized plane (z=1)
pts_3d = pts_3d/pts_3d[2, :] # z=1
# Distort point
if distortion_coeffs is not None:
for i in range(pts_3d.shape[1]):
pts_3d[0, i], pts_3d[1, i] = distortPoint(
pts_3d[0, i], pts_3d[1, i], distortion_coeffs)
# Project to image plane
image_points_xy = camera_intrinsics.dot(pts_3d)[0:2, :]
return image_points_xy
def world2pixel(pts_3d_in_world, T_cam_to_world, camera_intrinsics, distortion_coeffs=None):
''' Combination of `world2cam` and `cam2pixel`.
Arguments:
pts {np.ndarray}: 3xN.
T_cam_to_world {np.ndarray}: 4x4.
camera_intrinsics {np.ndarray}: 3x3.
Return:
image_points_xy {np.ndarray, np.float32}: 2xN.
'''
# -- Check input.
camera_intrinsics = np.array(camera_intrinsics)
if camera_intrinsics.shape != (3, 3):
raise RuntimeError("The camera_intrinsics needs to be a 3x3 matrix.")
if isinstance(pts_3d_in_world, np.ndarray) and len(pts_3d_in_world.shape) == 2 \
and pts_3d_in_world.shape[0] != 3: # Nx3 --> 3xN
pts_3d_in_world = pts_3d_in_world.T
# -- Transform coordinate.
image_points_xy = cam2pixel(
world2cam(pts_3d_in_world, T_cam_to_world), camera_intrinsics, distortion_coeffs)
return image_points_xy
''' =============================================================================== '''
''' Unit tests. '''
def test_basic_maths():
R = euler2matrix(np.pi/2, 0, 0)
R = rotz(np.pi/2)
print(R)
if __name__ == "__main__":
test_basic_maths()
pass
| [
"felixchenfy@gmail.com"
] | felixchenfy@gmail.com |
1035c714671c2047f5a945d1214a62dc28e0f491 | f23572d0c916ed3325480de2daca15d9d16f9186 | /queues/min_sliding_window.py | ed57f373c395238e96dc1dfc4456da85752a956e | [] | no_license | iCodeIN/data_structures | 08067aa661ea1dac01c70ac5a3e53f5e146fc82c | bbd01fb1785827c64ea28636352c6e0d7c8d62f4 | refs/heads/master | 2023-02-21T06:40:05.336522 | 2021-01-22T05:41:27 | 2021-01-22T05:41:27 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,634 | py | from collections import deque
# also contains appendleft in addition to popleft for deque
# produce minimum value for every sliding window of size k
def MinSlidingWindow(A, k):
D = deque()
res = []
# tuples in deque
for i in range(len(A)):
# while last value in dequeue is greater or equal than current value, pop
# makes sure min is left
while D and D[-1][0] >= A[i]:
D.pop()
# (current value, i+k-1 (index k forward))
D.append((A[i], i + k - 1))
# this is to start appending from k-1 onwards
if i >= k - 1:
res.append(D[0][0])
# if i=D[0][1] that means it matches i+k-1 and has been the min for k times
if i == D[0][1]:
D.popleft()
return res
if __name__ == "__main__":
print(MinSlidingWindow([4, 3, 2, 1, 5, 7, 6, 8, 9], 3))
# 0. D = [(4,2)]
# i = 0
# res = []
# 1. D = [(3,3)] - 4>3
# i = 1
# res = []
# 2. D = [(2,4)] - 3>2
# i = 2 >= 2
# res = [2]
# 3. D = [(1,5)] - 2>1
# i = 3 >= 2
# res = [2,1]
# 4. D = [(1,5),(5,6)] - 1<5
# i = 4 >= 2
# res = [2,1,1]
# 5. D = [(5,6),(7,7)] - 5<7
# i = 5 >= 2
# res = [2,1,1,1]
# popleft
# 6. D = [(6,8)] - 7>6
# i = 6 >= 2
# res = [2,1,1,1,5]
# pop left
# 7. D = [(6,8),(8,9)] - 6<8
# i = 7 >= 2
# res = [2,1,1,1,5,6]
# 8. D = [(6,8),(8,9),(9,10)] - 8<9
# i = 8 >= 2
# res = [2,1,1,1,5,6,6]
| [
"chosun41"
] | chosun41 |
7928546bbbc4a5f18a10b92de79c3c75e25eb4a1 | 9d1238fb0e4a395d49a7b8ff745f21476c9d9c00 | /framework/Tests/PAS/PAS/SecuritySettings/HealthCheck/API/test_add_system_discovery.py | 9aa0afb1c625fc1578efac3eeb6629f4607d9939 | [] | no_license | jaspalsingh92/TestAutomation-1 | a48ee1d3b73386f1bf8f53328a5b55444238e054 | e631c67255b10f150e0012991fb1474ede904417 | refs/heads/master | 2023-04-18T14:52:08.836221 | 2021-04-07T12:01:07 | 2021-04-07T12:01:07 | 357,175,690 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,080 | py | import logging
import pytest
from Shared.API.discovery import Discovery
from Shared.API.redrock import RedrockController
from Shared.API.server import ServerManager
logger = logging.getLogger("test")
@pytest.mark.api
@pytest.mark.pas_failed
@pytest.mark.bhavna
def test_add_system_discovery(core_session, ad_discovery_profile, pas_ad_discovery_config, core_value,
delete_discovered_system):
"""
Test Case: Add System (C1555)
:param core_session: Authenticated Centrify Session
:param ad_discovery_profile: Fixture to create AD profile for system discovery
:param pas_ad_discovery_config: fixture to read yaml file to create profile
:param core_value: fixture to read core.yaml
:param delete_discovered_system: fixture for cleanup before and after discovery
"""
config_profile = pas_ad_discovery_config
profile_data = config_profile['ad_profile_data'][0]
domain_details = config_profile['domains']
domain_name = []
domain_id_list = []
for domains in domain_details:
domain_name.append(domains['Name'])
for domain in domain_name:
domain_id = RedrockController.get_id_from_name(core_session, domain, 'VaultDomain')
domain_id_list.append(domain_id)
# Delete System and account before discovery
delete_discovered_system(profile_data['discovered_system'], domain_id_list)
profile_list, profile_name, profile_id, account_list, account_id = ad_discovery_profile(domain_name,
profile_data['account_in_domain'])
# Run Discovery to discover window system
result_run, success, response = Discovery.run_discovery_profile(core_session, profile_id, wait_for_run=True)
assert success, f"Failed to run discovery profile: {response}"
logger.info(f"Discovery ran successfully, API response: {result_run} ")
# Check Last Verify and Last Verify Result for Account
last_verify = None
last_verify_result = None
account_list = RedrockController.get_accounts(core_session)
discovered_account = []
for account in account_list:
if account['AccountDiscoveredTime'] is not None:
discovered_account.append(account['ID'])
last_verify = account['Status']
last_verify_result = account['LastHealthCheck']
assert len(discovered_account) != 0, f"Failed to discover system, account and services {discovered_account}"
logger.info(f"Discovered Account is: {discovered_account}")
assert last_verify_result is not None, "Failed to test account"
assert last_verify == "Missing Password", "Failed to test account"
logger.info(f"Last Test:{last_verify} and Last Test Result: {last_verify_result}")
# Check Last Test Result and Last Test for Domain
result = ServerManager.get_all_domains(core_session)
for domain in result:
if domain['Name'] in domain_name:
last_test = domain['LastHealthCheck']
last_test_result = domain['HealthCheckInterval']
assert last_test is not None, "Failed to test Domain"
assert last_test_result is None, "Failed to test Domain"
logger.info(f"Domain Name: {domain['Name']}, Last Test:{last_test} and Last Test Result: {last_test_result}")
# Check Last Test Result and Last Test for Discovered System
result = RedrockController.get_computers(core_session)
last_test = None
last_test_result = None
discovered_system = []
for system in result:
if system['DiscoveredTime'] is not None:
discovered_system.append(system['ID'])
last_test = system['LastHealthCheck']
last_test_result = system['HealthCheckInterval']
assert last_test is not None, "Failed to test system"
assert last_test_result is None, "Failed to test system"
logger.info(f"Last Test:{last_test} and Last Test Result: {last_test_result}")
# Cleanup after discovery
delete_discovered_system(profile_data['discovered_system'], domain_id_list)
| [
"singh.jaspal92@gmail.com"
] | singh.jaspal92@gmail.com |
4cfeb664ff12b359f9dbeec012b98f619b3bd7b8 | 2f9eddc071b87698e27a3fd5c6066a0db99f6f92 | /tree/_min_max_dep.py | a4f206380a87d640134f78dd0375fabd1afa8581 | [] | no_license | Lancher/coding-challenge | d8c5ec528ef802fef9817e64be94fc246d95e84f | 4c8f833a6e3375c6cbb31ff487ad766c53a74755 | refs/heads/master | 2022-01-20T03:19:11.452412 | 2022-01-10T05:23:47 | 2022-01-10T05:23:47 | 137,972,744 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,265 | py | import Queue
# LEETCODE@ 104. Maximum Depth of Binary Tree
#
# --END--
def max_dep_dfs(node):
return 0 if not node else max(max_dep_dfs(node.left), max_dep_dfs(node.right)) + 1
def max_dep_bfs(root):
if not root:
return 0
q = Queue.Queue()
q.put(root)
res = 0
while not q.empty():
res += 1
sz = q.qsize()
for _ in range(sz):
node = q.get()
if node.left:
q.put(node.left)
if node.right:
q.put(node.right)
return res
# LEETCODE@ 111. Minimum Depth of Binary Tree
#
# --END--
def min_dep_dfs(node):
if not node:
return 0
left_h = min_dep_dfs(node.left)
right_h = min_dep_dfs(node.right)
return left_h + right_h + 1 if right_h == 0 or right_h == 0 else min(left_h, right_h) + 1
def min_dep_bfs(node):
if not node:
return 0
q = Queue.Queue()
q.put(node)
res = 0
while not q.empty():
res += 1
sz = q.qsize()
for _ in range(sz):
node = q.get()
if not node.left and not node.right:
return res
if node.left:
q.put(node.left)
if node.right:
q.put(node.right)
| [
"steve.liushihao@gmail.com"
] | steve.liushihao@gmail.com |
277283bf7f5bf92b021cd55232e5da438ada4b97 | f513c794fd95cb72ee776029ece38a08c4b4da0b | /corehq/ex-submodules/casexml/apps/stock/migrations/0007_auto.py | bcf40e679f31c5b41290da11d6c2ba9b816f7a20 | [] | no_license | bglar/commcare-hq | a92f034a0c2faf787da8321b4d79e55f098bd89f | 972129fc26864c08c7bef07874bd2a7218550bff | refs/heads/master | 2021-05-28T20:44:12.876151 | 2015-01-16T16:23:52 | 2015-01-16T16:23:52 | 29,391,363 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,582 | py | # encoding: utf-8
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding index on 'DocDomainMapping', fields ['domain_name']
db.create_index(u'stock_docdomainmapping', ['domain_name'])
def backwards(self, orm):
# Removing index on 'DocDomainMapping', fields ['domain_name']
db.delete_index(u'stock_docdomainmapping', ['domain_name'])
models = {
u'stock.docdomainmapping': {
'Meta': {'object_name': 'DocDomainMapping'},
'doc_id': ('django.db.models.fields.CharField', [], {'max_length': '100', 'primary_key': 'True', 'db_index': 'True'}),
'doc_type': ('django.db.models.fields.CharField', [], {'max_length': '100', 'db_index': 'True'}),
'domain_name': ('django.db.models.fields.CharField', [], {'max_length': '100', 'db_index': 'True'})
},
u'stock.stockreport': {
'Meta': {'object_name': 'StockReport'},
'date': ('django.db.models.fields.DateTimeField', [], {'db_index': 'True'}),
'form_id': ('django.db.models.fields.CharField', [], {'max_length': '100', 'db_index': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'type': ('django.db.models.fields.CharField', [], {'max_length': '20'})
},
u'stock.stocktransaction': {
'Meta': {'object_name': 'StockTransaction'},
'case_id': ('django.db.models.fields.CharField', [], {'max_length': '100', 'db_index': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'product_id': ('django.db.models.fields.CharField', [], {'max_length': '100', 'db_index': 'True'}),
'quantity': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '20', 'decimal_places': '5'}),
'report': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['stock.StockReport']"}),
'section_id': ('django.db.models.fields.CharField', [], {'max_length': '100', 'db_index': 'True'}),
'stock_on_hand': ('django.db.models.fields.DecimalField', [], {'max_digits': '20', 'decimal_places': '5'}),
'subtype': ('django.db.models.fields.CharField', [], {'max_length': '20', 'null': 'True', 'blank': 'True'}),
'type': ('django.db.models.fields.CharField', [], {'max_length': '20'})
}
}
complete_apps = ['stock']
| [
"czue@dimagi.com"
] | czue@dimagi.com |
ebbf80eac2e785abd9f92fc22c63373e21f2c3dd | 1825283527f5a479204708feeaf55f4ab6d1290b | /leetcode/python/566/566.reshape-the-matrix.py | 6fd988aa17f03fcca01f758968c8067302f63bc8 | [] | no_license | frankieliu/problems | b82c61d3328ffcc1da2cbc95712563355f5d44b5 | 911c6622448a4be041834bcab25051dd0f9209b2 | refs/heads/master | 2023-01-06T14:41:58.044871 | 2019-11-24T03:47:22 | 2019-11-24T03:47:22 | 115,065,956 | 1 | 0 | null | 2023-01-04T07:25:52 | 2017-12-22T02:06:57 | HTML | UTF-8 | Python | false | false | 1,723 | py | #
# @lc app=leetcode id=566 lang=python3
#
# [566] Reshape the Matrix
#
# https://leetcode.com/problems/reshape-the-matrix/description/
#
# algorithms
# Easy (58.29%)
# Total Accepted: 70K
# Total Submissions: 120K
# Testcase Example: '[[1,2],[3,4]]\n1\n4'
#
# In MATLAB, there is a very useful function called 'reshape', which can
# reshape a matrix into a new one with different size but keep its original
# data.
#
#
#
# You're given a matrix represented by a two-dimensional array, and two
# positive integers r and c representing the row number and column number of
# the wanted reshaped matrix, respectively.
#
# The reshaped matrix need to be filled with all the elements of the original
# matrix in the same row-traversing order as they were.
#
#
#
# If the 'reshape' operation with given parameters is possible and legal,
# output the new reshaped matrix; Otherwise, output the original matrix.
#
#
# Example 1:
#
# Input:
# nums =
# [[1,2],
# [3,4]]
# r = 1, c = 4
# Output:
# [[1,2,3,4]]
# Explanation:The row-traversing of nums is [1,2,3,4]. The new reshaped matrix
# is a 1 * 4 matrix, fill it row by row by using the previous list.
#
#
#
# Example 2:
#
# Input:
# nums =
# [[1,2],
# [3,4]]
# r = 2, c = 4
# Output:
# [[1,2],
# [3,4]]
# Explanation:There is no way to reshape a 2 * 2 matrix to a 2 * 4 matrix. So
# output the original matrix.
#
#
#
# Note:
#
# The height and width of the given matrix is in range [1, 100].
# The given r and c are all positive.
#
#
#
class Solution:
def matrixReshape(self, nums, r, c):
"""
:type nums: List[List[int]]
:type r: int
:type c: int
:rtype: List[List[int]]
"""
| [
"frankie.y.liu@gmail.com"
] | frankie.y.liu@gmail.com |
207b59c4cdeb20cd6a73191000974cb26fb23684 | cba0f1286e4271ac35101a25d5040b2e4f405bde | /cgi-bin/admin/carlson/temp.py | 8800bc51f31f9109b6f30307d006c9efdb185bf6 | [] | no_license | akrherz/pals | 271c92d098909abb5b912db4ae08f0c3589e5ec7 | adc213333fb23dc52d6784ce160c4ff8a8f193e3 | refs/heads/master | 2021-01-10T15:01:59.570168 | 2019-12-18T16:59:08 | 2019-12-18T16:59:08 | 45,484,297 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 475 | py | #!/usr/local/bin/python
# This will give the option of which file they want to edit
# Daryl Herzmann 5-28-99
import os, cgi, style, string
from pgext import *
mydb = connect('carlson')
base_fref = '/home/httpd/html/carlson'
images_dir = base_fref+"/images"
def Main():
files = os.listdir(images_dir)
for file in files:
file = string.split(file, ".")
thisfile = file[0]
insert = mydb.query("INSERT into images values('"+thisfile+"')")
print thisfile
Main()
| [
"akrherz@iastate.edu"
] | akrherz@iastate.edu |
2d04c58334bb9b3903567d3a60527efcb56eed64 | b2174c63eec5164ec01b35c57c278d2753859d3d | /src_working_6actions_same/helpers.py | 6ef2e8d81229ebb170d9ad0e7068b37b6109f5bd | [] | no_license | koriavinash1/Brain_Tumor_Localization_using_RL_agent | 4bbc923b1ce1b3b58e4d24489b1d305c49a4ad33 | ad89e2ec483cb6da87eef437f67ac79dcfc53a8d | refs/heads/master | 2020-03-18T02:05:50.258001 | 2018-06-26T23:52:48 | 2018-06-26T23:52:48 | 134,174,420 | 6 | 1 | null | null | null | null | UTF-8 | Python | false | false | 825 | py | import os
import random
import numpy as np
import cv2
from PIL import Image
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
from torchvision.models.resnet import model_urls as resnetmodel_urls
from tqdm import tqdm
def calculate_iou(img_mask, gt_mask):
"""
for reward
"""
img_mask = np.uint8(img_mask)
gt_mask = np.uint8(gt_mask)
# print (np.unique(img_mask), np.unique(gt_mask))
img_and = np.sum((img_mask > 0)*(gt_mask > 0))
img_or = np.sum((img_mask > 0)) + np.sum((gt_mask > 0))
iou = 2.0 * float(img_and)/(float(img_or) + 1e-3)
return iou
def calculate_overlapping(img_mask, gt_mask):
"""
"""
gt_mask *= 1.0
img_and = cv2.bitwise_and(img_mask, gt_mask)
j = np.count_nonzero(img_and)
i = np.count_nonzero(gt_mask)
overlap = float(j)/(abs(float(i)) + 1e-3)
return overlap
| [
"koriavinash1@gmail.com"
] | koriavinash1@gmail.com |
9a9347f24537bb74e652cd0909cfb0ca463974dc | 7c99ea5b1ffe089c97615336daf4b6ceed9a5b00 | /Configurations/VBSOS/SignalRegions/2018/comb_v6/structure.py | 5de54cae5b45b2abbefc158f669d666f3f1a613c | [] | no_license | flaviacetorelli/PlotsConfigurations | 948faadba356e1d5e6f546dc11dd8dacfe1c1910 | aa5cf802c86902378617f566186bc638e69f9936 | refs/heads/master | 2022-05-02T00:37:56.070453 | 2022-03-18T10:14:23 | 2022-03-18T10:14:23 | 235,580,894 | 0 | 1 | null | 2020-01-22T13:37:30 | 2020-01-22T13:37:29 | null | UTF-8 | Python | false | false | 2,258 | py | structure['DY'] = {
'isSignal' : 0,
'isData' : 0
}
structure['Dyemb'] = {
'isSignal' : 0,
'isData' : 0
}
structure['Dyveto'] = {
'isSignal' : 0,
'isData' : 0,
'removeFromCuts' : [ k for k in cuts ],
}
structure['Zjj'] = {
'isSignal' : 0,
'isData' : 0
}
structure['top'] = {
'isSignal' : 0,
'isData' : 0
}
structure['WW'] = {
'isSignal' : 0,
'isData' : 0
}
structure['VVV'] = {
'isSignal' : 0,
'isData' : 0
}
structure['VZ'] = {
'isSignal' : 0,
'isData' : 0
}
structure['Vg'] = {
'isSignal' : 0,
'isData' : 0
}
structure['VgS_H'] = {
'isSignal' : 0,
'isData' : 0
}
structure['VgS_L'] = {
'isSignal' : 0,
'isData' : 0
}
structure['ggWW'] = {
'isSignal' : 0,
'isData' : 0
}
structure['Fake_e'] = {
'isSignal' : 0,
'isData' : 0,
}
structure['Fake_m'] = {
'isSignal' : 0,
'isData' : 0,
}
structure['qqH_hww'] = {
'isSignal' : 0,
'isData' : 0
}
structure['ggH_hww'] = {
'isSignal' : 0,
'isData' : 0
}
structure['WH_hww'] = {
'isSignal' : 0,
'isData' : 0
}
structure['ZH_hww'] = {
'isSignal' : 0,
'isData' : 0
}
structure['ggZH_hww'] = {
'isSignal' : 0,
'isData' : 0
}
structure['ttH_hww'] = {
'isSignal' : 0,
'isData' : 0
}
structure['WWewk'] = {
'isSignal' : 1,
'isData' : 0 ,
}
structure['DATA'] = {
'isSignal' : 0,
'isData' : 1
} | [
"flavia.cetorelli@cern.ch"
] | flavia.cetorelli@cern.ch |
a1be2df230d5addeea923afeee386b9b80c8f561 | 3767a04b2448048ef0c4163aa337608298db1e7d | /20190703/김주완 BOJ1907.py | bc4675c5efd83192fb8db64d1474b9a3301ac882 | [] | no_license | joowankim/algorithm_w | 5faff48e512e86700de0cf7f617ea280149bc7b9 | 09d23056e34583e9d3fafc8c396308c8ceac039b | refs/heads/master | 2022-02-28T09:13:28.938741 | 2019-10-05T01:13:52 | 2019-10-05T01:13:52 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 953 | py | M = ['', '', '']
M[0], tmp = map(str, input().split('+'))
M[1], M[2] = map(str, tmp.split('='))
m = [{'C': 0, 'H': 0, 'O': 0},
{'C': 0, 'H': 0, 'O': 0},
{'C': 0, 'H': 0, 'O': 0}]
for i in range(3):
for j in range(len(M[i])):
if M[i][j] in m[0].keys():
m[i][M[i][j]] += 1
else:
m[i][M[i][j-1]] += int(M[i][j]) - 1
def permutation(depth, coef):
if depth == 3:
if (coef[0] * m[0]['C'] + coef[1] * m[1]['C'] == coef[2] * m[2]['C']) and (coef[0] * m[0]['H'] + coef[1] * m[1]['H'] == coef[2] * m[2]['H']) and (coef[0] * m[0]['O'] + coef[1] * m[1]['O'] == coef[2] * m[2]['O']):
return coef
else:
return False
else:
s = ''
for i in range(1, 11):
seq = permutation(depth+1, coef + [i])
if seq:
break
return seq
ans = permutation(0, [])
print(str(ans[0]) + ' ' + str(ans[1]) + ' ' + str(ans[2])) | [
"rlawndhks217@gmail.com"
] | rlawndhks217@gmail.com |
ed7a274b8b8aaf1c1ad7d1a96678951f153b63ae | fbbe424559f64e9a94116a07eaaa555a01b0a7bb | /Rasterio_osgeo_shapely_PIL_pyproj_numpy/source/numpy/distutils/fcompiler/intel.py | f3e616e1de0c8cb8934fa23da8ffad899f6c0444 | [
"MIT"
] | permissive | ryfeus/lambda-packs | 6544adb4dec19b8e71d75c24d8ed789b785b0369 | cabf6e4f1970dc14302f87414f170de19944bac2 | refs/heads/master | 2022-12-07T16:18:52.475504 | 2022-11-29T13:35:35 | 2022-11-29T13:35:35 | 71,386,735 | 1,283 | 263 | MIT | 2022-11-26T05:02:14 | 2016-10-19T18:22:39 | Python | UTF-8 | Python | false | false | 6,626 | py | # http://developer.intel.com/software/products/compilers/flin/
from __future__ import division, absolute_import, print_function
import sys
from numpy.distutils.ccompiler import simple_version_match
from numpy.distutils.fcompiler import FCompiler, dummy_fortran_file
compilers = ['IntelFCompiler', 'IntelVisualFCompiler',
'IntelItaniumFCompiler', 'IntelItaniumVisualFCompiler',
'IntelEM64VisualFCompiler', 'IntelEM64TFCompiler']
def intel_version_match(type):
# Match against the important stuff in the version string
return simple_version_match(start=r'Intel.*?Fortran.*?(?:%s).*?Version' % (type,))
class BaseIntelFCompiler(FCompiler):
def update_executables(self):
f = dummy_fortran_file()
self.executables['version_cmd'] = ['<F77>', '-FI', '-V', '-c',
f + '.f', '-o', f + '.o']
def runtime_library_dir_option(self, dir):
return '-Wl,-rpath="%s"' % dir
class IntelFCompiler(BaseIntelFCompiler):
compiler_type = 'intel'
compiler_aliases = ('ifort',)
description = 'Intel Fortran Compiler for 32-bit apps'
version_match = intel_version_match('32-bit|IA-32')
possible_executables = ['ifort', 'ifc']
executables = {
'version_cmd' : None, # set by update_executables
'compiler_f77' : [None, "-72", "-w90", "-w95"],
'compiler_f90' : [None],
'compiler_fix' : [None, "-FI"],
'linker_so' : ["<F90>", "-shared"],
'archiver' : ["ar", "-cr"],
'ranlib' : ["ranlib"]
}
pic_flags = ['-fPIC']
module_dir_switch = '-module ' # Don't remove ending space!
module_include_switch = '-I'
def get_flags_free(self):
return ['-FR']
def get_flags(self):
return ['-fPIC']
def get_flags_opt(self): # Scipy test failures with -O2
return ['-xhost -openmp -fp-model strict -O1']
def get_flags_arch(self):
return []
def get_flags_linker_so(self):
opt = FCompiler.get_flags_linker_so(self)
v = self.get_version()
if v and v >= '8.0':
opt.append('-nofor_main')
if sys.platform == 'darwin':
# Here, it's -dynamiclib
try:
idx = opt.index('-shared')
opt.remove('-shared')
except ValueError:
idx = 0
opt[idx:idx] = ['-dynamiclib', '-Wl,-undefined,dynamic_lookup']
return opt
class IntelItaniumFCompiler(IntelFCompiler):
compiler_type = 'intele'
compiler_aliases = ()
description = 'Intel Fortran Compiler for Itanium apps'
version_match = intel_version_match('Itanium|IA-64')
possible_executables = ['ifort', 'efort', 'efc']
executables = {
'version_cmd' : None,
'compiler_f77' : [None, "-FI", "-w90", "-w95"],
'compiler_fix' : [None, "-FI"],
'compiler_f90' : [None],
'linker_so' : ['<F90>', "-shared"],
'archiver' : ["ar", "-cr"],
'ranlib' : ["ranlib"]
}
class IntelEM64TFCompiler(IntelFCompiler):
compiler_type = 'intelem'
compiler_aliases = ()
description = 'Intel Fortran Compiler for 64-bit apps'
version_match = intel_version_match('EM64T-based|Intel\\(R\\) 64|64|IA-64|64-bit')
possible_executables = ['ifort', 'efort', 'efc']
executables = {
'version_cmd' : None,
'compiler_f77' : [None, "-FI"],
'compiler_fix' : [None, "-FI"],
'compiler_f90' : [None],
'linker_so' : ['<F90>', "-shared"],
'archiver' : ["ar", "-cr"],
'ranlib' : ["ranlib"]
}
def get_flags(self):
return ['-fPIC']
def get_flags_opt(self): # Scipy test failures with -O2
return ['-openmp -fp-model strict -O1']
def get_flags_arch(self):
return ['']
# Is there no difference in the version string between the above compilers
# and the Visual compilers?
class IntelVisualFCompiler(BaseIntelFCompiler):
compiler_type = 'intelv'
description = 'Intel Visual Fortran Compiler for 32-bit apps'
version_match = intel_version_match('32-bit|IA-32')
def update_executables(self):
f = dummy_fortran_file()
self.executables['version_cmd'] = ['<F77>', '/FI', '/c',
f + '.f', '/o', f + '.o']
ar_exe = 'lib.exe'
possible_executables = ['ifort', 'ifl']
executables = {
'version_cmd' : None,
'compiler_f77' : [None],
'compiler_fix' : [None],
'compiler_f90' : [None],
'linker_so' : [None],
'archiver' : [ar_exe, "/verbose", "/OUT:"],
'ranlib' : None
}
compile_switch = '/c '
object_switch = '/Fo' # No space after /Fo!
library_switch = '/OUT:' # No space after /OUT:!
module_dir_switch = '/module:' # No space after /module:
module_include_switch = '/I'
def get_flags(self):
opt = ['/nologo', '/MD', '/nbs', '/names:lowercase', '/assume:underscore']
return opt
def get_flags_free(self):
return []
def get_flags_debug(self):
return ['/4Yb', '/d2']
def get_flags_opt(self):
return ['/O1'] # Scipy test failures with /O2
def get_flags_arch(self):
return ["/arch:IA32", "/QaxSSE3"]
def runtime_library_dir_option(self, dir):
raise NotImplementedError
class IntelItaniumVisualFCompiler(IntelVisualFCompiler):
compiler_type = 'intelev'
description = 'Intel Visual Fortran Compiler for Itanium apps'
version_match = intel_version_match('Itanium')
possible_executables = ['efl'] # XXX this is a wild guess
ar_exe = IntelVisualFCompiler.ar_exe
executables = {
'version_cmd' : None,
'compiler_f77' : [None, "-FI", "-w90", "-w95"],
'compiler_fix' : [None, "-FI", "-4L72", "-w"],
'compiler_f90' : [None],
'linker_so' : ['<F90>', "-shared"],
'archiver' : [ar_exe, "/verbose", "/OUT:"],
'ranlib' : None
}
class IntelEM64VisualFCompiler(IntelVisualFCompiler):
compiler_type = 'intelvem'
description = 'Intel Visual Fortran Compiler for 64-bit apps'
version_match = simple_version_match(start='Intel\(R\).*?64,')
def get_flags_arch(self):
return ['']
if __name__ == '__main__':
from distutils import log
log.set_verbosity(2)
from numpy.distutils.fcompiler import new_fcompiler
compiler = new_fcompiler(compiler='intel')
compiler.customize()
print(compiler.get_version())
| [
"ryfeus@gmail.com"
] | ryfeus@gmail.com |
513598f526cd14a5936675ff73cb3496d0a35d39 | acff427a36d6340486ff747ae9e52f05a4b027f2 | /main/server/database/postgresql/actions.py | 437615bb7b7084027884e1c5e5f6ec42df5622b7 | [] | no_license | jeremie1112/pisilinux | 8f5a03212de0c1b2453132dd879d8c1556bb4ff7 | d0643b537d78208174a4eeb5effeb9cb63c2ef4f | refs/heads/master | 2020-03-31T10:12:21.253540 | 2018-10-08T18:53:50 | 2018-10-08T18:53:50 | 152,126,584 | 2 | 1 | null | 2018-10-08T18:24:17 | 2018-10-08T18:24:17 | null | UTF-8 | Python | false | false | 2,773 | py | #!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Licensed under the GNU General Public License, version 3.
# See the file http://www.gnu.org/licenses/gpl.txt
from pisi.actionsapi import autotools
from pisi.actionsapi import pisitools
from pisi.actionsapi import shelltools
from pisi.actionsapi import get
def setup():
autotools.configure("--with-python \
--with-perl \
--include=/usr/include/postgresql \
--with-includes=/usr/include/libxml2/ \
--with-tcl \
--with-krb5 \
--with-openssl \
--enable-nls \
--with-pam \
--with-libxml \
--with-libxslt \
--with-ldap \
--enable-integer-datetimes \
--enable-thread-safety \
--enable-depend \
--host=%s \
--libdir=/usr/lib \
--disable-rpath \
--with-docdir=/usr/share/doc/postgresql" % get.CHOST())
def build():
if get.LDFLAGS():
ld = "-j1 LD=%s %s" % (get.LD(), get.LDFLAGS())
else:
ld = "-j1 LD=%s" % get.LD()
autotools.make(ld)
shelltools.cd("contrib")
autotools.make(ld)
shelltools.cd("..")
shelltools.cd("contrib/xml2")
autotools.make(ld)
shelltools.cd("../..")
shelltools.cd("src/interfaces/libpq")
autotools.make(ld)
shelltools.cd("../../..")
shelltools.cd("doc")
autotools.make(ld)
shelltools.cd("..")
def install():
autotools.rawInstall("DESTDIR=%s LIBDIR=%s/usr/lib" % (get.installDIR(), get.installDIR()))
shelltools.cd("contrib")
autotools.rawInstall("DESTDIR=%s LIBDIR=%s/usr/lib" % (get.installDIR(), get.installDIR()))
shelltools.cd("..")
shelltools.cd("contrib/xml2")
autotools.rawInstall("DESTDIR=%s LIBDIR=%s/usr/lib" % (get.installDIR(), get.installDIR()))
shelltools.cd("../..")
shelltools.cd("src/interfaces/libpq")
autotools.rawInstall("DESTDIR=%s LIBDIR=%s/usr/lib" % (get.installDIR(), get.installDIR()))
shelltools.cd("../../..")
shelltools.cd("doc")
autotools.rawInstall("DESTDIR=%s LIBDIR=%s/usr/lib" % (get.installDIR(), get.installDIR()))
shelltools.cd("..")
# No static libs
pisitools.remove("/usr/lib/*.a")
pisitools.dodoc("README", "HISTORY", "COPYRIGHT")
pisitools.dodoc("doc/MISSING_FEATURES", "doc/KNOWN_BUGS", "doc/TODO", "doc/bug.template")
pisitools.dodir("/var/lib/postgresql")
pisitools.dodir("/var/lib/postgresql/data")
pisitools.dodir("/var/lib/postgresql/backups")
| [
"erkanisik@yahoo.com"
] | erkanisik@yahoo.com |
795eb9f2e13d466e4bc7976c8dacf91423a78183 | a730e6c54cd99beb00fe7b9c08ff92c5e1800bbf | /accounts/views.py | 7921d24f44f6bc7d4f8cf316e05d136d7c5d326b | [
"BSD-3-Clause"
] | permissive | pombredanne/dezede | 3d2f4b7bdd9163ae1c0b92593bbd5fae71d1cd91 | 22756da8f949e28b9d789936d58eabe813ef4278 | refs/heads/master | 2021-01-17T15:29:30.382243 | 2013-11-09T17:35:42 | 2013-11-09T17:35:42 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,294 | py | # coding: utf-8
from __future__ import unicode_literals, division
import datetime
from django.contrib.auth import get_user_model
from django.contrib.sites.models import get_current_site
from django.core.exceptions import PermissionDenied
from django.db import connection
from django.db.models import Count
from django.http import Http404
from django.shortcuts import redirect
from django.template.loader import render_to_string
from django.views.generic import DetailView, TemplateView
from registration.backends.default.views import RegistrationView
from cache_tools import cached_ugettext_lazy as _
from libretto.models import AncrageSpatioTemporel
from .models import HierarchicUser
from .forms import UserRegistrationForm
class GrantToAdmin(DetailView):
model = get_user_model()
template_name = 'accounts/grant_to_admin.html'
def grant_user(self, user):
user.is_staff = True
user.save()
site_url = 'http://' + get_current_site(self.request).domain
email_content = render_to_string(
'accounts/granted_to_admin_email.txt',
{'user': user, 'site_url': site_url})
user.email_user(
'[Dezède] Accès autorisé à l’administration',
email_content)
def get_context_data(self, **kwargs):
context = super(GrantToAdmin, self).get_context_data(**kwargs)
current_user = self.request.user
user_to_be_granted = self.object
if current_user != user_to_be_granted.mentor:
raise PermissionDenied
if user_to_be_granted.is_staff:
context['already_staff'] = True
else:
self.grant_user(user_to_be_granted)
return context
class MyRegistrationView(RegistrationView):
form_class = UserRegistrationForm
def form_valid(self, request, form):
# Taken from the overriden method.
new_user = self.register(request, **form.cleaned_data)
form.save(request, new_user)
success_url = self.get_success_url(request, new_user)
try:
to, args, kwargs = success_url
return redirect(to, *args, **kwargs)
except ValueError:
return redirect(success_url)
class EvenementsGraph(TemplateView):
template_name = 'accounts/include/evenements_graph.svg'
content_type = 'image/svg+xml'
displayed_range = 150 # years
row_length = 20 # years
rect_size = 16 # pixels
rect_margin = 4 # pixels
# When we set the font size, the figures are smaller than
# the specified size.
figure_size_factor = 0.8
hue = 0 # degrees
legend_levels = 6
def get_context_data(self, **kwargs):
context = super(EvenementsGraph, self).get_context_data(**kwargs)
qs = AncrageSpatioTemporel.objects.all()
username = self.request.GET.get('username')
if username is not None:
User = get_user_model()
if not User.objects.filter(username=username).exists():
raise Http404
qs = qs.filter(evenements_debuts__owner__username=username)
else:
qs = qs.filter(evenements_debuts__isnull=False)
context['data'] = data = list(
qs
.extra({'year': connection.ops.date_trunc_sql('year', 'date')})
.values('year').annotate(n=Count('evenements_debuts'))
.order_by('year'))
for d in data:
d['year'] = d['year'].year
years = [d['year'] for d in data]
current_year = datetime.datetime.now().year
if not years:
years = [current_year]
data = [{'year': current_year, 'n': 0}]
min_year = min(years)
max_year = max(years)
margin = max(0, (self.displayed_range - (max_year - min_year)) // 2)
max_upper_margin = max(0, current_year - max_year)
margin_offset = max(0, margin - max_upper_margin)
context['min_year'] = min_year = min_year - (margin + margin_offset)
context['max_year'] = max_year = max_year + (margin - margin_offset)
counts = [d['n'] for d in data] + [0]
context['max_n'] = max(counts)
# Fill data with missing years between min_year and max_year.
for year in range(min_year, max_year + 1):
if year not in years:
data.append({'year': year, 'n': 0})
context['data'] = sorted(data, key=lambda d: d['year'])
def get_float(attr):
return float(self.request.GET.get(attr) or getattr(self, attr))
row_length = self.row_length
context['row_length'] = row_length
context['size'] = get_float('rect_size')
context['margin'] = get_float('rect_margin')
context['figure_size_factor'] = get_float('figure_size_factor')
context['hue'] = get_float('hue')
isolated_starting_years = min_year % row_length
isolated_ending_years = max_year % row_length
min_year -= isolated_starting_years
max_year -= isolated_ending_years
n_rows = (max_year - min_year) // row_length
if isolated_ending_years > 0:
n_rows += 1
context['n_rows'] = n_rows
def levels_iterator(start, end, n_steps):
step = (end - start) / (n_steps - 1)
i = start
while i <= end:
yield i
i += step
context['legend_levels'] = list(
levels_iterator(0, 1, self.legend_levels))
return context
class HierarchicUserDetail(DetailView):
model = HierarchicUser
slug_url_kwarg = 'username'
slug_field = 'username'
def get_context_data(self, **kwargs):
context = super(HierarchicUserDetail, self).get_context_data(**kwargs)
if self.object.legal_person:
context['verboses'] = {
'disciple': _('membre'),
'disciples': _('membres'),
}
else:
context['verboses'] = {
'disciple': _('disciple'),
'disciples': _('disciples'),
}
if getattr(self.object.mentor, 'legal_person', False):
context['verboses']['mentor'] = _('appartenance')
else:
context['verboses']['mentor'] = _('mentor')
return context
| [
"bordage.bertrand@gmail.com"
] | bordage.bertrand@gmail.com |
acb86f015cf6960f819f6dc00d2d369bc70c0fdb | 6fa7f99d3d3d9b177ef01ebf9a9da4982813b7d4 | /TZXG9RfcZ7T3o43QF_7.py | 9943dc3731e9a63743d8ec74a3b86010cc66d355 | [] | no_license | daniel-reich/ubiquitous-fiesta | 26e80f0082f8589e51d359ce7953117a3da7d38c | 9af2700dbe59284f5697e612491499841a6c126f | refs/heads/master | 2023-04-05T06:40:37.328213 | 2021-04-06T20:17:44 | 2021-04-06T20:17:44 | 355,318,759 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 512 | py |
def same_length(txt):
oCache = 0
zCache = 0
charCache = None
for i in txt:
if i == '1':
if charCache == '0':
if oCache != zCache:
return False
oCache = 0
zCache = 0
oCache +=1
charCache = '1'
elif i == '0':
zCache += 1
charCache = '0'
else:
return False
if oCache == zCache:
return True
else:
return False
| [
"daniel.reich@danielreichs-MacBook-Pro.local"
] | daniel.reich@danielreichs-MacBook-Pro.local |
f760fb13f796457f91598dadd8ec11118dea7015 | 23345cb27e6a8310001dd367c87b6c59af0b5931 | /Practics/combination.py | 324fe0aa17ee8c4e6ba42b6ffa3952d4d4823e7b | [] | no_license | ccsandhanshive/Practics-Code | 20840423385e7cd26672236cfbb565ff432af4cb | 048f2d2cd4bd76391e4578c18f20467f4526d137 | refs/heads/master | 2021-04-23T17:44:15.003384 | 2020-04-16T07:06:54 | 2020-04-16T07:06:54 | 249,952,409 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 230 | py |
n=int(input())
a=[]
for i in range(n):
a1=int(input())
a.append(a1)
for i in range(len(a)):
for j in range(len(a)-1):
temp=a[i]
a[i]=a[j]
a[j]=temp
print(a)
| [
"noreply@github.com"
] | ccsandhanshive.noreply@github.com |
e0d3567933a7012b0be5d51b4011aef35b15b1b6 | 33c0dc58266d6d1f4e64604ebadcc580c7bd8b38 | /alembic/versions/b29cbd0bb078_initial_revision.py | e163b22334a715d5c976d50cd3ea34be6cddd5cd | [
"Apache-2.0"
] | permissive | bcgov/wps-api | 67ee86b1e3055b5f0b505c8b818a09b8f7fcd453 | 1392ae87434428b10854bf67dd8b7517da6b3a02 | refs/heads/main | 2020-12-23T10:39:31.739567 | 2020-08-14T21:26:26 | 2020-08-14T21:26:26 | 237,125,626 | 1 | 3 | Apache-2.0 | 2020-08-19T21:40:00 | 2020-01-30T02:37:50 | Python | UTF-8 | Python | false | false | 6,129 | py | """Initial revision
Revision ID: b29cbd0bb078
Revises:
Create Date: 2020-07-27 11:28:42.510750
"""
from alembic import op
import sqlalchemy as sa
from sqlalchemy.dialects import postgresql
import geoalchemy2
# revision identifiers, used by Alembic.
revision = 'b29cbd0bb078'
down_revision = None
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.create_table('prediction_models',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('name', sa.String(), nullable=False),
sa.Column('abbreviation', sa.String(), nullable=False),
sa.Column('projection', sa.String(), nullable=False),
sa.PrimaryKeyConstraint('id'),
sa.UniqueConstraint('abbreviation', 'projection'),
comment='Identifies the Weather Prediction model'
)
op.create_index(op.f('ix_prediction_models_id'),
'prediction_models', ['id'], unique=False)
op.create_table('processed_model_run_files',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('url', sa.String(), nullable=False),
sa.Column('create_date', sa.TIMESTAMP(
timezone=True), nullable=False),
sa.Column('update_date', sa.TIMESTAMP(
timezone=True), nullable=False),
sa.PrimaryKeyConstraint('id'),
sa.UniqueConstraint('url'),
comment='Record to indicate that a particular model run file has been processed.'
)
op.create_index(op.f('ix_processed_model_run_files_id'),
'processed_model_run_files', ['id'], unique=False)
op.create_table('prediction_model_grid_subsets',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('prediction_model_id',
sa.Integer(), nullable=False),
sa.Column('geom', geoalchemy2.types.Geometry(geometry_type='POLYGON',
from_text='ST_GeomFromEWKT', name='geometry'), nullable=False),
sa.ForeignKeyConstraint(['prediction_model_id'], [
'prediction_models.id'], ),
sa.PrimaryKeyConstraint('id'),
sa.UniqueConstraint('prediction_model_id', 'geom'),
comment='Identify the vertices surrounding the area of interest'
)
op.create_index(op.f('ix_prediction_model_grid_subsets_id'),
'prediction_model_grid_subsets', ['id'], unique=False)
op.create_table('prediction_model_runs',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('prediction_model_id',
sa.Integer(), nullable=False),
sa.Column('prediction_run_timestamp', sa.TIMESTAMP(
timezone=True), nullable=False),
sa.ForeignKeyConstraint(['prediction_model_id'], [
'prediction_models.id'], ),
sa.PrimaryKeyConstraint('id'),
sa.UniqueConstraint(
'prediction_model_id', 'prediction_run_timestamp'),
comment='Identify which prediction model run (e.g. 2020 07 07 12:00).'
)
op.create_index(op.f('ix_prediction_model_runs_id'),
'prediction_model_runs', ['id'], unique=False)
op.create_table('model_run_grid_subset_predictions',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('prediction_model_run_id',
sa.Integer(), nullable=False),
sa.Column('prediction_model_grid_subset_id',
sa.Integer(), nullable=False),
sa.Column('prediction_timestamp', sa.TIMESTAMP(
timezone=True), nullable=False),
sa.Column('tmp_tgl_2', postgresql.ARRAY(
sa.Float()), nullable=True),
sa.Column('rh_tgl_2', postgresql.ARRAY(
sa.Float()), nullable=True),
sa.ForeignKeyConstraint(['prediction_model_grid_subset_id'], [
'prediction_model_grid_subsets.id'], ),
sa.ForeignKeyConstraint(['prediction_model_run_id'], [
'prediction_model_runs.id'], ),
sa.PrimaryKeyConstraint('id'),
sa.UniqueConstraint('prediction_model_run_id',
'prediction_model_grid_subset_id', 'prediction_timestamp'),
comment='The prediction for a grid subset of a particular model run.'
)
op.create_index(op.f('ix_model_run_grid_subset_predictions_id'),
'model_run_grid_subset_predictions', ['id'], unique=False)
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_index(op.f('ix_model_run_grid_subset_predictions_id'),
table_name='model_run_grid_subset_predictions')
op.drop_table('model_run_grid_subset_predictions')
op.drop_index(op.f('ix_prediction_model_runs_id'),
table_name='prediction_model_runs')
op.drop_table('prediction_model_runs')
op.drop_index(op.f('ix_prediction_model_grid_subsets_id'),
table_name='prediction_model_grid_subsets')
op.drop_table('prediction_model_grid_subsets')
op.drop_index(op.f('ix_processed_model_run_files_id'),
table_name='processed_model_run_files')
op.drop_table('processed_model_run_files')
op.drop_index(op.f('ix_prediction_models_id'),
table_name='prediction_models')
op.drop_table('prediction_models')
# ### end Alembic commands ###
| [
"noreply@github.com"
] | bcgov.noreply@github.com |
d9645630fc062ba3cbac14e1fd15210df2feb750 | 21d5222ad6c9bdbbc6dec049ececa7f7559b1c7c | /backend/config.py | d437712413d64daebd8de7840c5a7f2a5e2e2502 | [] | no_license | RachitBhargava99/HackVenture-Sparta | 50cec057f07980af25e9f9adbdc60a4efd4bfbec | c69407fcff88fddba86c52c783ce4b5064b072fd | refs/heads/master | 2020-04-17T10:27:17.588040 | 2019-01-19T07:55:27 | 2019-01-19T07:55:27 | 166,501,841 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 439 | py | import os
class Config:
SECRET_KEY = '0917b13a9091915d54b6336f45909539cce452b3661b21f386418a257883b30a'
SQLALCHEMY_DATABASE_URI = os.environ.get('SQLALCHEMY_DATABASE_URI')
SQLALCHEMY_TRACK_MODIFICATIONS = False
ENDPOINT_ROUTE = ''
CURRENT_URL = '127.0.0.2'
MAIL_SERVER = 'smtp.gmail.com'
MAIL_PORT = 465
MAIL_USE_SSL = True
MAIL_USERNAME = 'rachitbhargava99@gmail.com'
MAIL_PASSWORD = 'Ananya88#'
| [
"rachitbhargava99@gmail.com"
] | rachitbhargava99@gmail.com |
9358c316dfba80b8159e31b590a3628c52e4c55b | 2947efe1efd6e19981d0aa5c55dfc5f3700b8a1b | /segm/utils/distributed.py | 7c9c06bfe277feb41c9b0124b395d351b7498771 | [] | no_license | srdg/segmenter | f71effdade6d11da5ab041cadcb283123e9f1126 | 4f8a4435ea67c8611c5180edc7bec1d24f7342ad | refs/heads/master | 2023-08-01T12:55:21.474549 | 2021-09-14T16:19:46 | 2021-09-14T16:19:46 | 402,280,949 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,965 | py | import os
import hostlist
from pathlib import Path
import torch
import torch.distributed as dist
import segm.utils.torch as ptu
def init_process(backend="nccl"):
print(f"Starting process with rank {ptu.dist_rank}...", flush=True)
if "SLURM_STEPS_GPUS" in os.environ:
gpu_ids = os.environ["SLURM_STEP_GPUS"].split(",")
os.environ["MASTER_PORT"] = str(12345 + int(min(gpu_ids)))
else:
os.environ["MASTER_PORT"] = str(12345)
if "SLURM_JOB_NODELIST" in os.environ:
hostnames = hostlist.expand_hostlist(os.environ["SLURM_JOB_NODELIST"])
os.environ["MASTER_ADDR"] = hostnames[0]
else:
os.environ["MASTER_ADDR"] = "127.0.0.1"
dist.init_process_group(
backend,
rank=ptu.dist_rank,
world_size=ptu.world_size,
)
print(f"Process {ptu.dist_rank} is connected.", flush=True)
dist.barrier()
silence_print(ptu.dist_rank == 0)
if ptu.dist_rank == 0:
print(f"All processes are connected.", flush=True)
def silence_print(is_master):
"""
This function disables printing when not in master process
"""
import builtins as __builtin__
builtin_print = __builtin__.print
def print(*args, **kwargs):
force = kwargs.pop("force", False)
if is_master or force:
builtin_print(*args, **kwargs)
__builtin__.print = print
def sync_model(sync_dir, model):
# https://github.com/ylabbe/cosypose/blob/master/cosypose/utils/distributed.py
sync_path = Path(sync_dir).resolve() / "sync_model.pkl"
if ptu.dist_rank == 0 and ptu.world_size > 1:
torch.save(model.state_dict(), sync_path)
dist.barrier()
if ptu.dist_rank > 0:
model.load_state_dict(torch.load(sync_path))
dist.barrier()
if ptu.dist_rank == 0 and ptu.world_size > 1:
sync_path.unlink()
return model
def barrier():
dist.barrier()
def destroy_process():
dist.destroy_process_group()
| [
"rstrudel@gmail.com"
] | rstrudel@gmail.com |
b1517533ad31f610116c02800cabf177e1f87fdb | 38c35956be6343855914b1c58b8fbd2e40c6e615 | /Iniciantes/2670.py | 98cda4c7d8f4a66c7f1fbff27177d9696c61e8c4 | [] | no_license | LucasBarbosaRocha/URI | b43e4f4a6b3beed935f24839001bea354411c4bd | 2c9bcc13300a9f6243242e483c8f9ec3296a88ad | refs/heads/master | 2020-06-25T05:06:51.297824 | 2019-08-22T04:50:11 | 2019-08-22T04:50:11 | 199,210,037 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 216 | py | def min(A, B):
if A <= B:
return A
else:
return B
A = input()
B = input()
C = input()
auxA = 0
auxB = 0
auxC = 0
auxA = (B*2)+(C*4)
auxB = (A*2)+(C*2)
auxC = (A*4)+(B*2)
print min(auxC, min(auxA, auxB))
| [
"lucas.lb.rocha@gmail.com"
] | lucas.lb.rocha@gmail.com |
f42f45a5005e4c4967dbd161ea0a0e13f45c2788 | 56584187544e07225736d0b5399cf6aa7a0442aa | /lie_ti/__main__.py | 5f9a1ca05f82b4678da264c383fa9f3d895e7d71 | [
"Apache-2.0"
] | permissive | MD-Studio/lie_ti | 81835626c637e5c8cee6ba33e3df5ee1cd910c90 | 0b3ae93cd9a8b46a6caa40c4f0bed3cb5c6d2b2b | refs/heads/master | 2020-03-19T08:14:16.042990 | 2018-06-11T08:24:37 | 2018-06-11T08:24:37 | 136,187,142 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 144 | py | from mdstudio.runner import main
from lie_ti.ti_endpoints import Gromacs_ti_wamp_api
if __name__ == '__main__':
main(Gromacs_ti_wamp_api)
| [
"tifonzafel@gmail.com"
] | tifonzafel@gmail.com |
99b1e1591a75c724da468af44076c6e4b92e6f52 | 6444622ad4a150993955a0c8fe260bae1af7f8ce | /djangoenv/lib/python2.7/site-packages/django/conf/locale/fi/formats.py | 0f9279ec6239856de019d9dd11e7144ad0546ce7 | [] | no_license | jeremyrich/Lesson_RestAPI_jeremy | ca965ef017c53f919c0bf97a4a23841818e246f9 | a44263e45b1cc1ba812059f6984c0f5be25cd234 | refs/heads/master | 2020-04-25T23:13:47.237188 | 2019-03-22T09:26:58 | 2019-03-22T09:26:58 | 173,138,073 | 0 | 0 | null | 2019-03-22T09:26:59 | 2019-02-28T15:34:19 | Python | UTF-8 | Python | false | false | 1,327 | py | # -*- encoding: utf-8 -*-
# This file is distributed under the same license as the Django package.
#
from __future__ import unicode_literals
# The *_FORMAT strings use the Django date format syntax,
# see http://docs.djangoproject.com/en/dev/ref/templates/builtins/#date
DATE_FORMAT = "j. E Y"
TIME_FORMAT = "G.i"
DATETIME_FORMAT = r"j. E Y \k\e\l\l\o G.i"
YEAR_MONTH_FORMAT = "F Y"
MONTH_DAY_FORMAT = "j. F"
SHORT_DATE_FORMAT = "j.n.Y"
SHORT_DATETIME_FORMAT = "j.n.Y G.i"
FIRST_DAY_OF_WEEK = 1 # Monday
# The *_INPUT_FORMATS strings use the Python strftime format syntax,
# see http://docs.python.org/library/datetime.html#strftime-strptime-behavior
DATE_INPUT_FORMATS = ["%d.%m.%Y", "%d.%m.%y"] # '20.3.2014' # '20.3.14'
DATETIME_INPUT_FORMATS = [
"%d.%m.%Y %H.%M.%S", # '20.3.2014 14.30.59'
"%d.%m.%Y %H.%M.%S.%f", # '20.3.2014 14.30.59.000200'
"%d.%m.%Y %H.%M", # '20.3.2014 14.30'
"%d.%m.%Y", # '20.3.2014'
"%d.%m.%y %H.%M.%S", # '20.3.14 14.30.59'
"%d.%m.%y %H.%M.%S.%f", # '20.3.14 14.30.59.000200'
"%d.%m.%y %H.%M", # '20.3.14 14.30'
"%d.%m.%y", # '20.3.14'
]
TIME_INPUT_FORMATS = [
"%H.%M.%S", # '14.30.59'
"%H.%M.%S.%f", # '14.30.59.000200'
"%H.%M", # '14.30'
]
DECIMAL_SEPARATOR = ","
THOUSAND_SEPARATOR = "\xa0" # Non-breaking space
NUMBER_GROUPING = 3
| [
"jeremyrich@free.fr"
] | jeremyrich@free.fr |
2dcc61c8a1e88fc015b1c956cea058aa5ec4e17d | f7ba2a286ff32023042fc619fc20a9749133cbd2 | /venv/lib/python2.7/site-packages/awscli/customizations/s3uploader.py | e31b380af48a7a55bd0903cfe496ce90d3e163fd | [
"MIT",
"Apache-2.0"
] | permissive | FreeFlowOrg/researchflo | 6295dbcd3bdd385bac86cb0d1dac8ed337902c19 | 75f22360b8d69bd4c24ae00e0b79734f7c3fab11 | refs/heads/master | 2022-12-11T20:52:01.276263 | 2018-10-01T18:24:55 | 2018-10-01T18:24:55 | 135,716,220 | 0 | 1 | Apache-2.0 | 2022-11-23T05:41:03 | 2018-06-01T12:41:18 | Python | UTF-8 | Python | false | false | 7,341 | py | # Copyright 2012-2015 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"). You
# may not use this file except in compliance with the License. A copy of
# the License is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the "license" file accompanying this file. This file is
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
# ANY KIND, either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
import hashlib
import logging
import threading
import os
import sys
import botocore
import botocore.exceptions
from s3transfer.manager import TransferManager
from s3transfer.subscribers import BaseSubscriber
LOG = logging.getLogger(__name__)
class NoSuchBucketError(Exception):
def __init__(self, **kwargs):
msg = self.fmt.format(**kwargs)
Exception.__init__(self, msg)
self.kwargs = kwargs
fmt = ("S3 Bucket does not exist. "
"Execute the command to create a new bucket"
"\n"
"aws s3 mb s3://{bucket_name}")
class S3Uploader(object):
"""
Class to upload objects to S3 bucket that use versioning. If bucket
does not already use versioning, this class will turn on versioning.
"""
def __init__(self, s3_client,
bucket_name,
region,
prefix=None,
kms_key_id=None,
force_upload=False,
transfer_manager=None):
self.bucket_name = bucket_name
self.prefix = prefix
self.kms_key_id = kms_key_id or None
self.force_upload = force_upload
self.s3 = s3_client
self.region = region
self.transfer_manager = transfer_manager
if not transfer_manager:
self.transfer_manager = TransferManager(self.s3)
def upload(self, file_name, remote_path):
"""
Uploads given file to S3
:param file_name: Path to the file that will be uploaded
:param remote_path: be uploaded
:return: VersionId of the latest upload
"""
if self.prefix and len(self.prefix) > 0:
remote_path = "{0}/{1}".format(self.prefix, remote_path)
# Check if a file with same data exists
if not self.force_upload and self.file_exists(remote_path):
LOG.debug("File with same data is already exists at {0}. "
"Skipping upload".format(remote_path))
return self.make_url(remote_path)
try:
# Default to regular server-side encryption unless customer has
# specified their own KMS keys
additional_args = {
"ServerSideEncryption": "AES256"
}
if self.kms_key_id:
additional_args["ServerSideEncryption"] = "aws:kms"
additional_args["SSEKMSKeyId"] = self.kms_key_id
print_progress_callback = \
ProgressPercentage(file_name, remote_path)
future = self.transfer_manager.upload(file_name,
self.bucket_name,
remote_path,
additional_args,
[print_progress_callback])
future.result()
return self.make_url(remote_path)
except botocore.exceptions.ClientError as ex:
error_code = ex.response["Error"]["Code"]
if error_code == "NoSuchBucket":
raise NoSuchBucketError(bucket_name=self.bucket_name)
raise ex
def upload_with_dedup(self, file_name, extension=None):
"""
Makes and returns name of the S3 object based on the file's MD5 sum
:param file_name: file to upload
:param extension: String of file extension to append to the object
:return: S3 URL of the uploaded object
"""
# This construction of remote_path is critical to preventing duplicate
# uploads of same object. Uploader will check if the file exists in S3
# and re-upload only if necessary. So the template points to same file
# in multiple places, this will upload only once
filemd5 = self.file_checksum(file_name)
remote_path = filemd5
if extension:
remote_path = remote_path + "." + extension
return self.upload(file_name, remote_path)
def file_exists(self, remote_path):
"""
Check if the file we are trying to upload already exists in S3
:param remote_path:
:return: True, if file exists. False, otherwise
"""
try:
# Find the object that matches this ETag
self.s3.head_object(
Bucket=self.bucket_name, Key=remote_path)
return True
except botocore.exceptions.ClientError:
# Either File does not exist or we are unable to get
# this information.
return False
def make_url(self, obj_path):
return "s3://{0}/{1}".format(
self.bucket_name, obj_path)
def file_checksum(self, file_name):
with open(file_name, "rb") as file_handle:
md5 = hashlib.md5()
# Read file in chunks of 4096 bytes
block_size = 4096
# Save current cursor position and reset cursor to start of file
curpos = file_handle.tell()
file_handle.seek(0)
buf = file_handle.read(block_size)
while len(buf) > 0:
md5.update(buf)
buf = file_handle.read(block_size)
# Restore file cursor's position
file_handle.seek(curpos)
return md5.hexdigest()
def to_path_style_s3_url(self, key, version=None):
"""
This link describes the format of Path Style URLs
http://docs.aws.amazon.com/AmazonS3/latest/dev/UsingBucket.html#access-bucket-intro
"""
base = "https://s3.amazonaws.com"
if self.region and self.region != "us-east-1":
base = "https://s3-{0}.amazonaws.com".format(self.region)
result = "{0}/{1}/{2}".format(base, self.bucket_name, key)
if version:
result = "{0}?versionId={1}".format(result, version)
return result
class ProgressPercentage(BaseSubscriber):
# This class was copied directly from S3Transfer docs
def __init__(self, filename, remote_path):
self._filename = filename
self._remote_path = remote_path
self._size = float(os.path.getsize(filename))
self._seen_so_far = 0
self._lock = threading.Lock()
def on_progress(self, future, bytes_transferred, **kwargs):
# To simplify we'll assume this is hooked up
# to a single filename.
with self._lock:
self._seen_so_far += bytes_transferred
percentage = (self._seen_so_far / self._size) * 100
sys.stdout.write(
"\rUploading to %s %s / %s (%.2f%%)" %
(self._remote_path, self._seen_so_far,
self._size, percentage))
sys.stdout.flush()
| [
"saicharan.reddy1@gmail.com"
] | saicharan.reddy1@gmail.com |
e1ffa202e57145658c28e1264023274ecab93315 | 48eb1676712e570f42a19122e186b392b21d99dc | /origin/test_v.py | 55ce8975b839bbf1e0ba32ac931c06526e1acd10 | [] | no_license | AmberzzZZ/unet_vnet_keras | 1e731420522bef74cda77972f47b328bbb868f53 | 19c7d59b89853cb044aa9897729a09961e74a110 | refs/heads/master | 2020-09-09T07:34:21.602302 | 2020-08-27T14:57:05 | 2020-08-27T14:57:05 | 221,389,142 | 8 | 1 | null | null | null | null | UTF-8 | Python | false | false | 547 | py | from vnet import *
import cv2
import numpy as np
if __name__ == '__main__':
model = vnet(input_size=(256,256,1))
model.load_weights("vnet_membrane_06_dice_0.921.h5", by_name=True)
test_img = cv2.imread("data/membrane/test/0.png", 0)
tmp = cv2.resize(test_img, (256, 256))
tmp = tmp / 255.
tmp = np.reshape(tmp, (1,256,256,1))
mask = model.predict(tmp)
print(np.min(mask), np.max(mask))
# postprocess
mask[mask>=0.5] = 1
mask[mask<0.5] = 0
cv2.imshow("mask", mask[0,:,:,0])
cv2.waitKey(0)
| [
"774524217@qq.com"
] | 774524217@qq.com |
b01a58208ca56810f38a27ed8409cd78a6e8a37b | 90c570444d48308b5e680c497600a3180425a2ff | /buildDataForUI.py | 7446479fbe025f7017193e79a5a4330a1c85cb57 | [
"MIT"
] | permissive | amnh-sciviz/amnh-time-machine | cd55810fe08db69fa06c12113f9db5276aeeeea6 | c75c75c6bd3ee91d81cb4b0181a292de27eab9c8 | refs/heads/master | 2020-04-22T06:26:14.683159 | 2020-02-20T23:15:32 | 2020-02-20T23:15:32 | 170,190,346 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,996 | py | # -*- coding: utf-8 -*-
import argparse
import glob
import json
import os
from pprint import pprint
import sys
import lib.eac_utils as eac
import lib.io_utils as io
# input
parser = argparse.ArgumentParser()
parser.add_argument('-fdir', dest="FLOOR_PLANS_DIR", default="img/floor_plans/%s/*.png", help="Directory with floor plans")
parser.add_argument('-rdir', dest="REPORTS_DIR", default="img/annual_reports/%s.jpg", help="Directory with reports")
parser.add_argument('-ldir', dest="LOGOS_DIR", default="img/logos/*.png", help="Directory with logos")
parser.add_argument('-idir', dest="ITEMS_DIR", default="img/historic_thumbnails/%s.jpg", help="Directory with items")
parser.add_argument('-reports', dest="REPORTS_FILE", default="data/annual_reports.csv", help="File with annual report data (from scrapeAnnualReports.py)")
parser.add_argument('-dates', dest="EAC_DATES_FILE", default="data/eac_expeditions.csv", help="File with EAC dates data (from collectDates.py)")
parser.add_argument('-items', dest="ITEMS_FILE", default="data/historic_images.csv", help="File with digital items data (from scrapeDigitalItems.py)")
parser.add_argument('-start', dest="START_YEAR", default=1869, type=int, help="Start year")
parser.add_argument('-end', dest="END_YEAR", default=2019, type=int, help="End year")
parser.add_argument('-out', dest="OUTPUT_FILE", default="data/ui.json", help="File for output")
a = parser.parse_args()
FLOORS = 4
# Make sure output dirs exist
io.makeDirectories(a.OUTPUT_FILE)
def addRanges(items, startYear, endYear):
itemCount = len(items)
sortedItems = sorted(items, key=lambda k:k["year"])
for i, item in enumerate(sortedItems):
fromYear = item["year"] if i > 0 else startYear
toYear = endYear
j = i+1
while j < itemCount:
nextItem = sortedItems[j]
if nextItem["year"] > fromYear:
toYear = nextItem["year"]-1
break
j += 1
sortedItems[i].update({
"yearFrom": fromYear,
"yearTo": toYear
})
return sortedItems
# Retrieve floor plans
floorPlans = []
for i in range(FLOORS):
floor = i + 1
floorDir = a.FLOOR_PLANS_DIR % floor
floorPlanFiles = glob.glob(floorDir)
thisFloorPlans = []
for fn in floorPlanFiles:
year = int(io.getFileBasename(fn))
thisFloorPlans.append({
"image": fn,
"floor": floor,
"year": year
})
thisFloorPlans = addRanges(thisFloorPlans, a.START_YEAR, a.END_YEAR)
floorPlans += thisFloorPlans
# Retrieve logos
logos = []
logoFiles = glob.glob(a.LOGOS_DIR)
for fn in logoFiles:
year = int(io.getFileBasename(fn))
logos.append({
"image": fn,
"year": year
})
logos = addRanges(logos, a.START_YEAR, a.END_YEAR)
# Retrieve reports
reports = []
_, reportData = io.readCsv(a.REPORTS_FILE)
for r in reportData:
year = eac.stringToYear(r["dateIssued"])
if year > 0:
reports.append({
"title": r["title"],
"url": r["url"],
"image": a.REPORTS_DIR % r["id"],
"year": year
})
else:
print("No year found for annual report %s" % r["id"])
reports = addRanges(reports, a.START_YEAR, a.END_YEAR)
# retrieve items
items = []
_, itemData = io.readCsv(a.ITEMS_FILE)
for item in itemData:
year = eac.stringToYear(item["date"])
if year > 0:
items.append({
"title": item["title"],
"url": item["url"],
"image": a.ITEMS_DIR % item["id"],
"year": year
})
else:
print("No year found for item %s" % item["id"])
# retrieve expeditions
expeditions = []
_, eacData = io.readCsv(a.EAC_DATES_FILE)
expeditionData = [e for e in eacData if e["type"]=="Expedition"]
for e in expeditionData:
entry = {
"url": "http://data.library.amnh.org/archives-authorities/id/" + e["id"],
"title": e["name"],
"event": e["dateevent"],
"place": e["dateplace"],
"lon": e["lon"],
"lat": e["lat"]
}
yearFrom, yearTo = eac.stringsToDateRange(e["date"], e["fromdate"], e["todate"], e["name"])
if yearFrom > 0 and yearTo > 0:
if yearFrom == yearTo:
entry["year"] = yearFrom
else:
entry["yearFrom"] = yearFrom
entry["yearTo"] = yearTo
expeditions.append(entry)
# Retrieve museum events
museumData = [e for e in eacData if e["type"]=="Museum"]
museumEvents = []
for e in museumData:
year = eac.stringToYear(e["date"])
if year > 0:
museumEvents.append({
"year": year,
"title": e["dateevent"],
"url": "http://data.library.amnh.org/archives-authorities/id/amnhc_0000001"
})
data = {
"logos": logos,
"floorPlans": floorPlans,
"reports": reports,
"items": items,
"expeditions": expeditions,
"events": museumEvents
}
dataKeys = list(data.keys())
# Create a table of years with indices
years = [[[] for k in range(len(dataKeys))] for y in range(a.END_YEAR-a.START_YEAR+1)]
for dataKey in data:
items = data[dataKey]
dataIndex = dataKeys.index(dataKey)
for itemIndex, item in enumerate(items):
if "yearFrom" in item and "yearTo" in item:
year = item["yearFrom"]
while year <= item["yearTo"]:
yearIndex = year - a.START_YEAR
if 0 <= yearIndex <= (a.END_YEAR-a.START_YEAR):
years[yearIndex][dataIndex].append(itemIndex)
year += 1
else:
yearIndex = item["year"] - a.START_YEAR
if 0 <= yearIndex <= (a.END_YEAR-a.START_YEAR):
years[yearIndex][dataIndex].append(itemIndex)
outData = {
"yearStart": a.START_YEAR,
"yearEnd": a.END_YEAR,
"years": years,
"dataKeys": dataKeys
}
outData.update(data)
with open(a.OUTPUT_FILE, 'w') as f:
json.dump(outData, f)
print("Created %s" % a.OUTPUT_FILE)
| [
"brian@youaremyjoy.org"
] | brian@youaremyjoy.org |
2455a1fc0abe09984ea82a7fa3ee4c6bb8822fb5 | c4fa1ebcdd413c4ab3f0979ee3beead8a8809870 | /providers/edu/pcom/apps.py | 9e3a87e36866e646b8834a248e2321b01069fea9 | [] | no_license | terroni/SHARE | e47f291db7cf100d29a7904fe820e75d29db1472 | a5631f441da1288722c68785b86128c854cbe7c1 | refs/heads/develop | 2020-12-03T02:29:47.381341 | 2016-07-11T19:40:27 | 2016-07-11T19:40:27 | 63,097,148 | 1 | 0 | null | 2016-07-11T19:45:51 | 2016-07-11T19:45:50 | null | UTF-8 | Python | false | false | 487 | py | from share.provider import OAIProviderAppConfig
class AppConfig(OAIProviderAppConfig):
name = 'providers.edu.pcom'
version = '0.0.1'
title = 'pcom'
long_title = 'DigitalCommons@PCOM'
home_page = 'http://digitalcommons.pcom.edu'
url = 'http://digitalcommons.pcom.edu/do/oai/'
approved_sets = [
'biomed',
'pa_systematic_reviews',
'psychology_dissertations',
'scholarly_papers',
'research_day',
'posters',
]
| [
"icereval@gmail.com"
] | icereval@gmail.com |
9e34e216ecf6c0070d75d8934b660ffa00c13928 | f9223fa852ee24d438ef472af33a9afa7135a7f4 | /tests/projects_v0/test_simplelife.py | 10a557a633fcfd69c04e1c5c1c85d4bc3fa657d8 | [
"MIT",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | data-atuarial/lifelib | c6ea8d51d4bac5dbea167f98e27feecdabfcb8a7 | 7bfcc0c9d0a419502e47be123b6f080862974875 | refs/heads/master | 2023-05-08T01:35:31.292486 | 2021-05-23T04:32:15 | 2021-05-23T04:32:15 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 634 | py | import sys
import os.path
import pickle
import pathlib
from lifelib.projects_v0.simplelife.scripts import simplelife
from tests.data.generate_testdata import round_signif
if '' not in sys.path:
sys.path.insert(0, '')
model = simplelife.build(load_saved=False)
testdata = \
str(pathlib.Path(__file__).parents[1].joinpath('data/data_simplelife'))
def test_simpleflie():
data = []
proj = model.Projection
for i in range(10, 301, 10):
data.append(round_signif(proj(i).PV_NetCashflow(0), 10))
with open(testdata, 'rb') as file:
data_saved = pickle.load(file)
assert data == data_saved
| [
"fumito.ham@gmail.com"
] | fumito.ham@gmail.com |
e5e9debcef00111d47e3694faca763dcc766c031 | 0d996a22154d2d5fa2096e7676a9cc4e1927b638 | /tests/test_klaxon.py | 687ce7f88ab43b4724a3a6da4c5f28fee3b5e653 | [
"Apache-2.0"
] | permissive | hercules261188/klaxon | 0990bc4458999ac0e6ae6d93a38411ea7976c409 | 2275287ab4a4461314d66ab575050da9675e0478 | refs/heads/master | 2022-08-07T09:11:31.404306 | 2020-05-24T06:39:36 | 2020-05-24T06:39:36 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 781 | py | import subprocess as sp
import time
from klaxon import klaxon
import pytest
strings = ["What's good?", "hello, klaxon", ""]
@pytest.mark.parametrize("title", strings)
@pytest.mark.parametrize("subtitle", strings)
@pytest.mark.parametrize("message", strings)
def test_klaxon(title, subtitle, message):
klaxon(title=title, subtitle=subtitle, message=message, sound=None)
sp.run(
f"klaxon "
f'--title "{title}" '
f'--subtitle "{subtitle}" '
f'--message "{message}" '
f'--sound ""',
shell=True,
)
def test_klaxon_invoke_success():
sp.run(["inv", "succeed"])
def test_klaxon_invoke_normal_failure():
sp.run(["inv", "fail-normally"])
def test_klaxon_catastrophic_failure():
sp.run(["inv", "fail-badly"])
| [
"knowsuchagency@gmail.com"
] | knowsuchagency@gmail.com |
85451d9ce547f07b9f10e0e5eb83f5b11247059e | 9edaf93c833ba90ae9a903aa3c44c407a7e55198 | /autosar/models/application_data_type_subtypes_enum.py | 4c5b231b2294b1b68fa4aa1f1a70aea8a2ac7476 | [] | no_license | tefra/xsdata-samples | c50aab4828b8c7c4448dbdab9c67d1ebc519e292 | ef027fe02e6a075d8ed676c86a80e9647d944571 | refs/heads/main | 2023-08-14T10:31:12.152696 | 2023-07-25T18:01:22 | 2023-07-25T18:01:22 | 222,543,692 | 6 | 1 | null | 2023-06-25T07:21:04 | 2019-11-18T21:00:37 | Python | UTF-8 | Python | false | false | 587 | py | from enum import Enum
__NAMESPACE__ = "http://autosar.org/schema/r4.0"
class ApplicationDataTypeSubtypesEnum(Enum):
APPLICATION_ARRAY_DATA_TYPE = "APPLICATION-ARRAY-DATA-TYPE"
APPLICATION_ASSOC_MAP_DATA_TYPE = "APPLICATION-ASSOC-MAP-DATA-TYPE"
APPLICATION_COMPOSITE_DATA_TYPE = "APPLICATION-COMPOSITE-DATA-TYPE"
APPLICATION_DATA_TYPE = "APPLICATION-DATA-TYPE"
APPLICATION_DEFERRED_DATA_TYPE = "APPLICATION-DEFERRED-DATA-TYPE"
APPLICATION_PRIMITIVE_DATA_TYPE = "APPLICATION-PRIMITIVE-DATA-TYPE"
APPLICATION_RECORD_DATA_TYPE = "APPLICATION-RECORD-DATA-TYPE"
| [
"tsoulloftas@gmail.com"
] | tsoulloftas@gmail.com |
c0f61075ae11487003d1c0360901fcdd23b5b01b | 88ea6ae5a8f97e3771490583d8acecdbe2877fd8 | /zips/plugin.video.vistatv/resources/lib/sources/en/iwatchonline.py | bf61f6ad92e4d2076da1889734fbb8a69b27ef19 | [] | no_license | staycanuca/PersonalDataVistaTV | 26497a29e6f8b86592609e7e950d6156aadf881c | 4844edbfd4ecfc1d48e31432c39b9ab1b3b1a222 | refs/heads/master | 2021-01-25T14:46:25.763952 | 2018-03-03T10:48:06 | 2018-03-03T10:48:06 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,384 | py | # -*- coding: utf-8 -*-
'''
Cerebro ShowBox Scraper
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
'''
import re,urllib,urlparse
from resources.lib.modules import client
from resources.lib.modules import source_utils
from resources.lib.modules import dom_parser2
class source:
def __init__(self):
self.priority = 1
self.language = ['en']
self.domains = ['watchonline.to']
self.base_link = 'https://www.watchonline.to'
self.search_link = 'https://www.watchonline.to/search/'
def movie(self, imdb, title, localtitle, aliases, year):
try:
url = self._search(imdb, 'm')
return url
except:
return
def tvshow(self, imdb, tvdb, tvshowtitle, localtvshowtitle, aliases, year):
try:
url = self._search(imdb, 't')
return url
except:
return
def episode(self, url, imdb, tvdb, title, premiered, season, episode):
try:
if url == None: return
url = url.replace('tv-shows/','episode/')
url += '-s%02de%02d' % (int(season), int(episode))
return url
except:
return
def _search(self, imdb, type):
try:
post = ('searchquery=%s&searchin=%s' % (imdb, type))
r = client.request(self.search_link, post=post)
r = dom_parser2.parse_dom(r, 'div', {'class': ['widget','search-page']})[0]
r = dom_parser2.parse_dom(r, 'tbody')[0]
r = dom_parser2.parse_dom(r, 'a', req='href')[0]
if r:
url = r.attrs['href']
return url
else: return
except:
return
def sources(self, url, hostDict, hostprDict):
try:
sources = []
if url == None: return sources
r = client.request(url)
r = dom_parser2.parse_dom(r, 'tbody')[0]
r = dom_parser2.parse_dom(r, 'tr')
r = [(dom_parser2.parse_dom(i, 'a', req='href'), \
dom_parser2.parse_dom(i, 'img', req='src'), \
i.content) for i in r if i]
r = [(i[0][0].attrs['href'], i[1][0].attrs['src'], i[2]) for i in r if i[0] and i[1] and i[2]]
for i in r:
try:
url = i[0]
host = i[1].split('domain=')[-1]
info = []
if '<td>3D</td>' in i[2]: info.append('3D')
if '<td>hd</td>' in i[2].lower(): quality = '1080p'
elif '<td>hdtv</td>' in i[2].lower(): quality = '720p'
elif '<td>cam</td>' in i[2].lower(): quality = 'CAM'
elif '<td>dvd</td>' in i[2].lower(): quality = 'SD'
elif '<td>3d</td>' in i[2].lower(): quality = '1080p'
else: quality = 'SD'
info = ' | '.join(info)
url = client.replaceHTMLCodes(url)
url = url.encode('utf-8')
valid, host = source_utils.is_host_valid(host, hostDict)
if not valid: raise Exception()
host = client.replaceHTMLCodes(host)
host = host.encode('utf-8')
sources.append({'source': host, 'quality': quality, 'language': 'en', 'url': url, 'info': info, 'direct': False, 'debridonly': False})
except:
pass
return sources
except:
return sources
def resolve(self, url):
try:
u = client.request(url, output='extended')
url = re.findall("url=([^']+)", str(u[2]))[0]
return url
except:
return | [
"biglad@mgawow.co.uk"
] | biglad@mgawow.co.uk |
d3562d2150e67d8aeb1359b305bff4ce8985af8a | 5f1c3a2930b20c3847496a249692dc8d98f87eee | /Pandas/Pandas_PivotTable/Question7.py | 065d21ee31a997fe8414a8a57701000112d4e603 | [] | no_license | AmbyMbayi/CODE_py | c572e10673ba437d06ec0f2ae16022d7cbe21d1c | 5369abf21a8db1b54a5be6cbd49432c7d7775687 | refs/heads/master | 2020-04-24T05:01:46.277759 | 2019-02-22T08:26:04 | 2019-02-22T08:26:04 | 171,723,155 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 296 | py | """write a pandas program to create a pivot table and count the manager wise sale and mean value of sale amount
"""
import pandas as pd
import numpy as np
df = pd.read_excel('SaleData.xlsx')
table =pd.pivot_table(df, index=["Manager"], values=["Sale_amt"], aggfunc=[np.mean, len])
print(table) | [
"root@localhost.localdomain"
] | root@localhost.localdomain |
3c99e8da58ddb542648a4aa073fb61d093b199e9 | a9df288b1b05b93504dcd3cdccec5b7db859043c | /geometory/core_geometry_medial_axis_offset.py | 7e9887c21c1c4e11632c10fbaf811622933ba13b | [] | no_license | tnakaicode/pythonocct-demos | 16bf60ba1490b3a68c4954972ec4d6a890bef2c8 | 11e14089541e4310528d1b9c9571daf3f95b1845 | refs/heads/master | 2022-09-27T13:09:52.129514 | 2020-06-07T02:39:04 | 2020-06-07T02:39:04 | 270,162,652 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,845 | py | #!/usr/bin/env python
##Copyright 2009-2016 Jelle Feringa (jelleferinga@gmail.com)
##
##This file is part of pythonOCC.
##
##pythonOCC is free software: you can redistribute it and/or modify
##it under the terms of the GNU Lesser General Public License as published by
##the Free Software Foundation, either version 3 of the License, or
##(at your option) any later version.
##
##pythonOCC is distributed in the hope that it will be useful,
##but WITHOUT ANY WARRANTY; without even the implied warranty of
##MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
##GNU Lesser General Public License for more details.
##
##You should have received a copy of the GNU Lesser General Public License
##along with pythonOCC. If not, see <http://www.gnu.org/licenses/>.
# this example was ported from: http://heekscnc.blogspot.nl/2009/09/occ-offset.html, by Dan Heeks
from OCCT.BRepOffsetAPI import BRepOffsetAPI_MakeOffset
from OCCT.Visualization import BasicViewer
from OCCT.GeomAbs import GeomAbs_Arc
from OCCT.gp import gp_Pnt
from OCC.Extend.ShapeFactory import make_edge, make_vertex, make_wire, make_face
from OCC.Extend.TopologyUtils import TopologyExplorer
display, start_display, add_menu, add_function_to_menu = init_display()
def boolean_cut(shapeToCutFrom, cuttingShape):
from OCCT.BRepAlgoAPI import BRepAlgoAPI_Cut
cut = BRepAlgoAPI_Cut(shapeToCutFrom, cuttingShape)
shp = cut.Shape()
return shp
def make_face_to_contour_from():
v1 = make_vertex(gp_Pnt(0, 0, 0))
v2 = make_vertex(gp_Pnt(10, 0, 0))
v3 = make_vertex(gp_Pnt(7, 10, 0))
v4 = make_vertex(gp_Pnt(10, 20, 0))
v5 = make_vertex(gp_Pnt(0, 20, 0))
v6 = make_vertex(gp_Pnt(3, 10, 0))
e1 = make_edge(v1, v2)
e2 = make_edge(v2, v3)
e3 = make_edge(v3, v4)
e4 = make_edge(v4, v5)
e5 = make_edge(v5, v6)
e6 = make_edge(v6, v1)
v7 = make_vertex(gp_Pnt(2, 2, 0))
v8 = make_vertex(gp_Pnt(8, 2, 0))
v9 = make_vertex(gp_Pnt(7, 3, 0))
v10 = make_vertex(gp_Pnt(3, 3, 0))
e7 = make_edge(v7, v8)
e8 = make_edge(v8, v9)
e9 = make_edge(v9, v10)
e10 = make_edge(v10, v7)
w1 = make_wire([e1, e2, e3, e4, e5, e6])
f = make_face(w1)
w2 = make_wire(e7, e8, e9, e10)
f2 = make_face(w2)
f3 = boolean_cut(f, f2)
return f3
def create_offsets(face, nr_of_counters, distance_between_contours):
offset = BRepOffsetAPI_MakeOffset()
offset.Init(GeomAbs_Arc)
for wi in TopologyExplorer(face).wires():
offset.AddWire(wi)
for i in range(nr_of_counters):
offset.Perform(-distance_between_contours * i)
if offset.IsDone():
yield offset.Shape()
face = make_face_to_contour_from()
display.DisplayShape(face)
for contour in create_offsets(face, 50, 0.12):
display.DisplayShape(contour)
display.FitAll()
start_display()
| [
"tnakaicode@gmail.com"
] | tnakaicode@gmail.com |
b7431460ba56f58a97bd43540f84c61cbb64945d | 12a8cc08189cbaf84f4a3fd3a54595097a03ef3c | /migrations/versions/943ad2ba54d5_initial_migration.py | 7e33fdf88b1fb68660e5f1c5f0e8bbec72c5ba6b | [] | no_license | kepha-okari/watchlist-2 | 723acc9a616f10b1caab4c245763856b5c055c54 | 0cadf9f905d8788dc0999d4addd506d03949d33c | refs/heads/master | 2022-03-27T21:34:43.102901 | 2017-12-16T08:58:45 | 2017-12-16T08:58:45 | 114,447,275 | 0 | 1 | null | 2020-01-28T18:48:26 | 2017-12-16T08:57:20 | Python | UTF-8 | Python | false | false | 793 | py | """initial migration
Revision ID: 943ad2ba54d5
Revises:
Create Date: 2017-12-13 14:18:22.541000
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = '943ad2ba54d5'
down_revision = None
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.create_table('roles',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('name', sa.String(length=255), nullable=True),
sa.Column('pass_secure', sa.String(length=255), nullable=True),
sa.PrimaryKeyConstraint('id')
)
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_table('roles')
# ### end Alembic commands ###
| [
"kephaokari@gmail.com"
] | kephaokari@gmail.com |
ac25654f162b59399b03208a81b615468f5ada8b | 9edaf93c833ba90ae9a903aa3c44c407a7e55198 | /travelport/models/advisory_1.py | cb00e56b170435c05611e390c7fc8fa9d50ac8ad | [] | no_license | tefra/xsdata-samples | c50aab4828b8c7c4448dbdab9c67d1ebc519e292 | ef027fe02e6a075d8ed676c86a80e9647d944571 | refs/heads/main | 2023-08-14T10:31:12.152696 | 2023-07-25T18:01:22 | 2023-07-25T18:01:22 | 222,543,692 | 6 | 1 | null | 2023-06-25T07:21:04 | 2019-11-18T21:00:37 | Python | UTF-8 | Python | false | false | 3,346 | py | from __future__ import annotations
from dataclasses import dataclass, field
from xsdata.models.datatype import XmlDate
from travelport.models.type_advisory_type_1 import TypeAdvisoryType1
from travelport.models.type_geo_political_area_type_1 import TypeGeoPoliticalAreaType1
from travelport.models.type_key_element_1 import TypeKeyElement1
__NAMESPACE__ = "http://www.travelport.com/schema/sharedUprofile_v20_0"
@dataclass
class Advisory1(TypeKeyElement1):
"""A categorization of travel documents and other identification, or other
warnings that an agency may need to share with agents.
Examples include visas requirements, travel permit requirements,
passport requirements, etc. May also include government travel or
health advisories.
Parameters
----------
type_value
A categorization of travel documents and other identification, or
other warnings that an agency may need to share with agents.
Examples include visas requirements, travel permit requirements,
passport requirements, etc. May also include government travel or
health advisories.
start_date
The start date of the advisory
end_date
The end date of the advisory
summary
A summary of this Advisory
description
priority_order
Priority order associated with this Advisory.
geo_political_area_type
The type of the geographical location.
geo_political_area_code
The location code of the geographical location.
"""
class Meta:
name = "Advisory"
namespace = "http://www.travelport.com/schema/sharedUprofile_v20_0"
type_value: None | TypeAdvisoryType1 = field(
default=None,
metadata={
"name": "Type",
"type": "Attribute",
"required": True,
}
)
start_date: None | XmlDate = field(
default=None,
metadata={
"name": "StartDate",
"type": "Attribute",
"required": True,
}
)
end_date: None | XmlDate = field(
default=None,
metadata={
"name": "EndDate",
"type": "Attribute",
}
)
summary: None | str = field(
default=None,
metadata={
"name": "Summary",
"type": "Attribute",
"min_length": 1,
"max_length": 128,
}
)
description: None | str = field(
default=None,
metadata={
"name": "Description",
"type": "Attribute",
"min_length": 1,
"max_length": 1000,
}
)
priority_order: None | int = field(
default=None,
metadata={
"name": "PriorityOrder",
"type": "Attribute",
"min_inclusive": 1,
"max_inclusive": 99,
}
)
geo_political_area_type: None | TypeGeoPoliticalAreaType1 = field(
default=None,
metadata={
"name": "GeoPoliticalAreaType",
"type": "Attribute",
"required": True,
}
)
geo_political_area_code: None | str = field(
default=None,
metadata={
"name": "GeoPoliticalAreaCode",
"type": "Attribute",
"required": True,
"max_length": 6,
}
)
| [
"chris@komposta.net"
] | chris@komposta.net |
184fe1d15a17d8f13b48f3ddab37b627f68c0173 | cd142a4e15d3576546fcb44841417039f0b8fb00 | /build/hector_slam/hector_compressed_map_transport/catkin_generated/pkg.develspace.context.pc.py | a761e8917371071b2ce04b889b66afc8b28d3b26 | [] | no_license | mgou123/rplidar | 4389819eb1998d404d1066c7b4a983972d236ce7 | 608c1f6da2d3e5a8bac06e8d55d8569af828a40b | refs/heads/master | 2022-11-10T05:51:56.403293 | 2020-06-29T04:16:14 | 2020-06-29T04:16:14 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 389 | py | # generated from catkin/cmake/template/pkg.context.pc.in
CATKIN_PACKAGE_PREFIX = ""
PROJECT_PKG_CONFIG_INCLUDE_DIRS = "".split(';') if "" != "" else []
PROJECT_CATKIN_DEPENDS = "".replace(';', ' ')
PKG_CONFIG_LIBRARIES_WITH_PREFIX = "".split(';') if "" != "" else []
PROJECT_NAME = "hector_compressed_map_transport"
PROJECT_SPACE_DIR = "/home/xu/dogkin_ws/devel"
PROJECT_VERSION = "0.3.5"
| [
"492798337@qq.com"
] | 492798337@qq.com |
dd96b7881c8ba1ba5d736d3c23ed1b9aa28a2385 | 10d74a59b8c3bcada4709ad2985b917b88cb1838 | /tennis/migrations/0005_match_klaar.py | 2ef87553fc9ba9d9f2030d588b442a8d83522059 | [] | no_license | specialunderwear/tennis | 994f76b5cb34523e4f8478360ca2679626b88cb4 | e00e38eb42fd3bee2f20dc7bf67e94df37e07b7a | refs/heads/master | 2021-01-23T16:27:56.296951 | 2017-06-04T08:03:43 | 2017-06-04T08:03:43 | 93,298,961 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 448 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.9.6 on 2016-05-15 11:24
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('tennis', '0004_auto_20160515_1051'),
]
operations = [
migrations.AddField(
model_name='match',
name='klaar',
field=models.BooleanField(default=False),
),
]
| [
"lars@permanentmarkers.nl"
] | lars@permanentmarkers.nl |
789859cc20a96446a1452c168e05ce06801b3a1d | 72ea5f014282c4d1a4a0c70f9553bde7882962b4 | /mufins-project/mufins/tests/common/dataset/dataset_memory/test_data_set_memory.py | 8dfcd686d74efa0e4fd1db72376ef703432cd3ba | [
"MIT"
] | permissive | werywjw/mBERT-FineTuning | 6eb2b93a0f328f4af3cbd1ba5e042baadcb67428 | 7b64d0a91e9aa23eb3ace8c5c19262e2574f24d7 | refs/heads/master | 2023-08-08T01:41:06.716441 | 2021-09-14T20:21:25 | 2021-09-14T20:21:25 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,746 | py | '''
Unit test for DatasetMemory class in dataset module.
'''
import unittest
from mufins.common.dataset.dataset_memory import DatasetMemory
from mufins.tests.common.dataset.mock_data_spec import MockDataSpec
from mufins.common.error.incompatible_existing_data import IncompatibleExistingDataException
from mufins.common.error.invalid_state import InvalidStateException
#########################################
class TestDatasetMemory(unittest.TestCase):
'''
Unit test class.
'''
#########################################
def test_init(
self,
) -> None:
'''
Test the init method.
'''
spec = MockDataSpec()
dset = DatasetMemory[str](spec)
self.assertFalse(dset.init(3))
self.assertFalse(dset.init(3))
with self.assertRaises(IncompatibleExistingDataException):
dset.init(10)
########################################
def test_readwrite(
self,
) -> None:
'''
Test the Dataset class when it is read from and modified.
'''
spec = MockDataSpec()
dset = DatasetMemory[str](spec)
dset.init(3)
dset.load(as_readonly=False)
dset.set_row(0, {'a': [0,1,2], 'b':[10]})
dset.set_row(1, {'a': [3,4,5], 'b':[11]})
dset.set_field(2, 'a', [6,7,8])
dset.set_field(2, 'b', [12])
dset.close()
with self.assertRaises(InvalidStateException):
dset.get_row(0)
with self.assertRaises(InvalidStateException):
dset.get_field(0, 'a')
dset.init(3)
dset.load(as_readonly=True)
with self.assertRaises(InvalidStateException):
dset.set_row(0, {'a': [0,0,0], 'b': [0]})
with self.assertRaises(InvalidStateException):
dset.set_field(0, 'a', [0,0,0])
self.assertEqual(
{name: value.tolist() for (name, value) in dset.get_row(0).items()},
{'a': [0,1,2], 'b': [10]},
)
self.assertEqual(
{name: value.tolist() for (name, value) in dset.get_row(1).items()},
{'a': [3,4,5], 'b': [11]},
)
self.assertEqual(
{'a': dset.get_field(2, 'a').tolist(), 'b': dset.get_field(2, 'b').tolist()},
{'a': [6,7,8], 'b': [12]},
)
self.assertEqual(
list(dset.get_data(
batch_size=2,
value_filter = lambda i, x: i != 1,
value_mapper = lambda i, x: x['a'].tolist(),
)),
[
[0, 1, 2],
[6, 7, 8],
]
)
dset.close()
#########################################
if __name__ == '__main__':
unittest.main()
| [
"marctanti@gmail.com"
] | marctanti@gmail.com |
8a7991a27e16210731133a2c8a7913e4692d4f07 | 22e53332f0f1023a202e3094a08257d7da140c07 | /app/App.py | 5db136e94ee13d74c43b4ace3339566ff3d8ea0e | [] | no_license | generaldave/10_PRINT | 1e1ef44b46c0c264505f002a42b353642f439c9d | 5bfe21bc6f6ee762f61a6513c145fe83b1e2dab1 | refs/heads/master | 2021-05-04T18:40:14.941229 | 2017-10-06T03:32:44 | 2017-10-06T03:32:44 | 105,950,225 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,264 | py | ################################################################################
# #
# David Fuller #
# #
# App class: App initializer #
# #
# Created on 2016-12-29 #
# #
################################################################################
################################################################################
# #
# IMPORT STATEMENTS #
# #
################################################################################
from .Constants import * # Constants file
from .processing import Processing # Processing style package
import pygame # For GUI
import random
################################################################################
# #
# APP CLASS #
# #
################################################################################
class App(object):
############################################################################
# #
# CONSTRUCTOR #
# #
############################################################################
def __init__(self, appDirectory: str) -> None:
self.appDirectory = appDirectory
# Set up GUI
self.setupGUI()
# Run app
self.runApp()
############################################################################
# #
# METHODS #
# #
############################################################################
# Mehtod sets up GUI
def setupGUI(self) -> None:
# Screen attributes
pygame.init()
self.screen = pygame.display.set_mode(screen_resolution)
pygame.display.set_caption(title)
self.clock = pygame.time.Clock()
self.processing = Processing(self.screen)
# Method runs app
def runApp(self) -> None:
x = 0
y = 0
spacing = 20
running = True
while running:
for event in pygame.event.get():
# Handle quit event
if event.type == pygame.QUIT:
running = False
# Handle keyboard input
if event.type == pygame.KEYUP:
if event.key == pygame.K_F5:
self.screen.fill(black)
x = 0
y = 0
# 10 print stuff
self.processing.stroke(white)
if y < screen_resolution.height:
if random.random() < fifty_percent:
self.processing.line(x, y, x + spacing, y + spacing)
else:
self.processing.line(x, y + spacing, x + spacing, y)
x = x + spacing
if x >= screen_resolution.width:
x = 0
y = y + spacing
# Update Screen
pygame.display.update()
self.clock.tick(fps)
# Close app cleanly
pygame.quit()
| [
"icepicktoeye@gmail.com"
] | icepicktoeye@gmail.com |
1f8bfbd0ccb8664b2485049b0c5c54e8e745b320 | 2315b73e3cb4d166de6f5fa81db5a8637b091d44 | /runtests.py | de992cb6d630b513eaf8c9093e9e369a7acb9cc3 | [
"MIT"
] | permissive | elpatiostudio/wagtail-geo-widget | e8f64b213665b00265152b8ee5cd012f5efda11c | c7892f089344cb49a529d0a5e2b175d9064b649f | refs/heads/main | 2023-08-11T14:48:17.369751 | 2021-10-14T16:20:39 | 2021-10-14T16:20:39 | 411,716,231 | 0 | 0 | MIT | 2021-09-29T14:53:03 | 2021-09-29T14:53:03 | null | UTF-8 | Python | false | false | 1,365 | py | #!/usr/bin/env python
import sys
from django.conf import settings
from django.core.management import execute_from_command_line
if not settings.configured:
params = dict(
LOGGING = {
'version': 1,
'disable_existing_loggers': False,
'handlers': {
'console': {
'class': 'logging.StreamHandler',
},
},
'loggers': {
'wagtailgeowidget': {
'handlers': ['console'],
'level': 'ERROR',
'propagate': True,
},
},
},
DATABASES={
"default": {
"ENGINE": "django.db.backends.sqlite3",
}
},
INSTALLED_APPS=[
'django.contrib.contenttypes',
'django.contrib.auth',
'django.contrib.sites',
'wagtail.core',
'wagtail.sites',
'wagtail.users',
'wagtail.images',
'taggit',
'wagtailgeowidget',
"tests",
],
MIDDLEWARE_CLASSES=[],
ROOT_URLCONF='tests.urls',
)
settings.configure(**params)
def runtests():
argv = sys.argv[:1] + ["test"] + sys.argv[1:]
execute_from_command_line(argv)
if __name__ == "__main__":
runtests()
| [
"martin@marteinn.se"
] | martin@marteinn.se |
c397b9f629c7bef937b021875fdf2c11594d8082 | 99ba551645dc9beed36f0478b396977c50c3e7ef | /leetcode-vscode/653.两数之和-iv-输入-bst.py | 2e4246abd4fd777e34a74dca4789cff647f07de3 | [] | no_license | wulinlw/leetcode_cn | 57381b35d128fb3dad027208935d3de3391abfd0 | b0f498ebe84e46b7e17e94759dd462891dcc8f85 | refs/heads/master | 2021-08-09T17:26:45.688513 | 2021-07-15T14:38:30 | 2021-07-15T14:38:30 | 134,419,586 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,567 | py | #
# @lc app=leetcode.cn id=653 lang=python3
#
# [653] 两数之和 IV - 输入 BST
#
# https://leetcode-cn.com/problems/two-sum-iv-input-is-a-bst/description/
#
# algorithms
# Easy (54.00%)
# Likes: 116
# Dislikes: 0
# Total Accepted: 12.1K
# Total Submissions: 22.4K
# Testcase Example: '[5,3,6,2,4,null,7]\n9'
#
# 给定一个二叉搜索树和一个目标结果,如果 BST 中存在两个元素且它们的和等于给定的目标结果,则返回 true。
#
# 案例 1:
#
#
# 输入:
# 5
# / \
# 3 6
# / \ \
# 2 4 7
#
# Target = 9
#
# 输出: True
#
#
#
#
# 案例 2:
#
#
# 输入:
# 5
# / \
# 3 6
# / \ \
# 2 4 7
#
# Target = 28
#
# 输出: False
#
#
#
#
#
# @lc code=start
# Definition for a binary tree node.
class TreeNode:
def __init__(self, x):
self.val = x
self.left = None
self.right = None
class Solution:
def findTarget(self, root: TreeNode, k: int) -> bool:
if not root:return False
m = {}
def dfs(root):
if not root:return
if k-root.val in m:
return True
else:
m[root.val] = 1
l = dfs(root.left)
r = dfs(root.right)
return l or r
return dfs(root)
# @lc code=end
t1 = TreeNode(5)
t2 = TreeNode(3)
t3 = TreeNode(6)
t4 = TreeNode(2)
t5 = TreeNode(4)
t6 = TreeNode(7)
root = t1
root.left = t2
root.right = t3
t2.left = t4
t2.right = t5
t3.right = t6
o = Solution()
print(o.findTarget(root, 9))
| [
"wulinlw@gmail.com"
] | wulinlw@gmail.com |
9b634cf9201e6e3d17131b2195aa525fa33e3247 | aec185bb962d42a3068be486ecff9ead0e0fd310 | /setup.py | b3dc308ed05dd4aedd48407024768d1cef877d99 | [] | no_license | collective/collective.thememanager | c7b2975d7959ab67c1e081469cac9bfcd2ebc87e | 730095b3703063d8fb324cb74b86483962a44edb | refs/heads/master | 2023-03-22T10:58:46.289286 | 2011-05-25T14:00:23 | 2011-05-25T14:00:23 | 2,137,663 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 1,198 | py | from setuptools import setup, find_packages
import os
version = '1.0dev'
setup(name='collective.thememanager',
version=version,
description="A diazo theme manager for Plone",
long_description=open("README.txt").read() + "\n" +
open(os.path.join("docs", "HISTORY.txt")).read(),
# Get more strings from
# http://pypi.python.org/pypi?:action=list_classifiers
classifiers=[
"Framework :: Plone",
"Programming Language :: Python",
],
keywords='',
author='JeanMichel FRANCOIS aka toutpt',
author_email='toutpt@gmail.com',
url='http://svn.plone.org/svn/collective/',
license='GPL',
packages=find_packages(exclude=['ez_setup']),
namespace_packages=['collective'],
include_package_data=True,
zip_safe=False,
install_requires=[
'setuptools',
'plone.app.theming',
'plone.app.dexterity',
# -*- Extra requirements: -*-
],
entry_points="""
# -*- Entry points: -*-
[z3c.autoinclude.plugin]
target = plone
""",
setup_requires=["PasteScript"],
paster_plugins=["ZopeSkel"],
)
| [
"toutpt@gmail.com"
] | toutpt@gmail.com |
7a8d1cf47eba4fc1d127c9ff50c5c5d717298517 | 31a766fcae3779b05796534c354286083502f74a | /python/onshape_client/models/bt_metrics_list_params.py | 1aad5194f2df3e330bb95037ef729e71ce2c68ec | [] | no_license | nychang/onshape-clients | 5ea21e73a05948f5e232d4851eb8ae8a6b8c75c8 | 9c97baae57f80e3922726443584e4cc50b99623f | refs/heads/master | 2020-05-06T20:35:28.212953 | 2019-04-05T20:38:19 | 2019-04-05T20:38:19 | 180,243,972 | 0 | 0 | null | 2019-04-08T22:43:59 | 2019-04-08T22:43:59 | null | UTF-8 | Python | false | false | 3,248 | py | # coding: utf-8
"""
Onshape REST API
The Onshape REST API consumed by all clients. # noqa: E501
OpenAPI spec version: 1.96
Contact: api-support@onshape.zendesk.com
Generated by: https://openapi-generator.tech
"""
import pprint
import re # noqa: F401
import six
class BTMetricsListParams(object):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
openapi_types = {
'collected_metrics': 'list[BTMetricParams]'
}
attribute_map = {
'collected_metrics': 'collectedMetrics'
}
def __init__(self, collected_metrics=None): # noqa: E501
"""BTMetricsListParams - a model defined in OpenAPI""" # noqa: E501
self._collected_metrics = None
self.discriminator = None
if collected_metrics is not None:
self.collected_metrics = collected_metrics
@property
def collected_metrics(self):
"""Gets the collected_metrics of this BTMetricsListParams. # noqa: E501
:return: The collected_metrics of this BTMetricsListParams. # noqa: E501
:rtype: list[BTMetricParams]
"""
return self._collected_metrics
@collected_metrics.setter
def collected_metrics(self, collected_metrics):
"""Sets the collected_metrics of this BTMetricsListParams.
:param collected_metrics: The collected_metrics of this BTMetricsListParams. # noqa: E501
:type: list[BTMetricParams]
"""
self._collected_metrics = collected_metrics
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, BTMetricsListParams):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| [
"ethan.keller@gmail.com"
] | ethan.keller@gmail.com |
3410e22e98bb2aad51b258dd3ec739ebc0213874 | cb35df97989fcc46831a8adb8de3434b94fd2ecd | /tests/implicitron/test_eval_demo.py | 37df395db1cb871ae2e0f54f224d404056079fb1 | [
"MIT",
"BSD-3-Clause"
] | permissive | facebookresearch/pytorch3d | 6d93b28c0f36a4b7efa0a8143726200c252d3502 | a3d99cab6bf5eb69be8d5eb48895da6edd859565 | refs/heads/main | 2023-09-01T16:26:58.756831 | 2023-08-26T20:55:56 | 2023-08-26T20:55:56 | 217,433,767 | 7,964 | 1,342 | NOASSERTION | 2023-08-25T10:00:26 | 2019-10-25T02:23:45 | Python | UTF-8 | Python | false | false | 775 | py | # Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
import os
import unittest
from pytorch3d.implicitron import eval_demo
from tests.common_testing import interactive_testing_requested
from .common_resources import CO3D_MANIFOLD_PATH
"""
This test runs a single sequence eval_demo, useful for debugging datasets.
It only runs interactively.
"""
class TestEvalDemo(unittest.TestCase):
def test_a(self):
if not interactive_testing_requested():
return
os.environ["CO3D_DATASET_ROOT"] = CO3D_MANIFOLD_PATH
eval_demo.evaluate_dbir_for_category("donut", single_sequence_id=0)
| [
"facebook-github-bot@users.noreply.github.com"
] | facebook-github-bot@users.noreply.github.com |
2c06c9127680468e5c1970dad5384c5d306a10bd | e10a6d844a286db26ef56469e31dc8488a8c6f0e | /cascaded_networks/models/tdl.py | 39692f4cabcdbc128210b1583adf0c111ffb6f2d | [
"Apache-2.0",
"CC-BY-4.0"
] | permissive | Jimmy-INL/google-research | 54ad5551f97977f01297abddbfc8a99a7900b791 | 5573d9c5822f4e866b6692769963ae819cb3f10d | refs/heads/master | 2023-04-07T19:43:54.483068 | 2023-03-24T16:27:28 | 2023-03-24T16:32:17 | 282,682,170 | 1 | 0 | Apache-2.0 | 2020-07-26T15:50:32 | 2020-07-26T15:50:31 | null | UTF-8 | Python | false | false | 2,301 | py | # coding=utf-8
# Copyright 2022 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tapped Delay Line handler."""
import torch
import torch.nn as nn
class OneStepDelayKernel(nn.Module):
"""Single slot queue OSD kernel."""
def __init__(self, *args, **kwargs):
"""Initialize OSD kernel."""
super().__init__()
self.reset()
def reset(self):
self.state = None
def forward(self, current_state):
if self.state is not None:
prev_state = self.state
else:
prev_state = torch.zeros_like(current_state)
prev_state.requires_grad = True
self.state = current_state.clone()
return prev_state
class ExponentiallyWeightedSmoothingKernel(nn.Module):
"""Exponentially Weighted Smoothing Kernel.
alpha=0.0
--> state(t) = current_state
Functionally equivalent to sequential ResNet
alpha=1.0
--> state(t) = prev_state
Functionally equivalent to tapped delay line for 1 timestep delay
0.0 < alpha < 1.0
Continuous interpolation between discrete 1 timestep TDL and sequential ResNet
"""
def __init__(self, alpha=0.0):
"""Initialize EWS kernel."""
super().__init__()
self._alpha = alpha
self.reset()
def reset(self):
self.state = None
def forward(self, current_state):
if self.state is not None:
prev_state = self.state
else:
prev_state = torch.zeros_like(current_state)
prev_state.requires_grad = True
self.state = self._alpha*prev_state + (1-self._alpha)*current_state.clone()
return self.state
def setup_tdl_kernel(tdl_mode, kwargs):
"""Temporal kernel interface."""
if tdl_mode == 'OSD':
tdline = OneStepDelayKernel()
elif tdl_mode == 'EWS':
tdline = ExponentiallyWeightedSmoothingKernel(kwargs['tdl_alpha'])
return tdline
| [
"copybara-worker@google.com"
] | copybara-worker@google.com |
8022a5f6da175cacb32efa541bcfe8e34d814d45 | c5a627121168e9be7c1aabb04e41c9f84faa6cc6 | /scoring.py | 3b81e4f45e7aa64040e53d66af3c744caf332b56 | [] | no_license | etture/SDSS_custom | cf58bd8cb526203c653d960f893dc64beb18ab0e | 1a4a4ad2b5ca7c3e0114e7893017a1a15f8cef19 | refs/heads/master | 2021-01-16T04:52:00.121540 | 2020-02-25T16:48:25 | 2020-02-25T16:48:25 | 242,981,883 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,143 | py | import pandas as pd
from sklearn.metrics import log_loss
import pickle
import fire
rename_dict = {'STAR_WHITE_DWARF': 0, 'STAR_CATY_VAR':1, 'STAR_BROWN_DWARF':2,
'SERENDIPITY_RED':3, 'REDDEN_STD':4, 'STAR_BHB':5, 'GALAXY':6,
'SERENDIPITY_DISTANT':7, 'QSO':8, 'SKY':9, 'STAR_RED_DWARF':10, 'ROSAT_D':11,
'STAR_PN':12, 'SERENDIPITY_FIRST':13, 'STAR_CARBON':14, 'SPECTROPHOTO_STD':15,
'STAR_SUB_DWARF':16, 'SERENDIPITY_MANUAL':17, 'SERENDIPITY_BLUE':18}
def get_ground_truth(filename):
pickle_in = open(filename,"rb")
validate = pickle.load(pickle_in)
validate2 = list()
for v in validate:
validate2.append(rename_dict[v])
return validate2
def get_submission_file(filename):
sub = pd.read_csv(filename)
renamed = sub.rename(columns=rename_dict)
return renamed
def get_score(sub_file, ground_truth_filename='ybigta_validate_full.pickle'):
ground_truth = get_ground_truth(ground_truth_filename)
submission = get_submission_file(sub_file)
return log_loss(ground_truth, submission[submission.columns[1:]].values)
if __name__ == '__main__':
fire.Fire(get_score)
| [
"etture@gmail.com"
] | etture@gmail.com |
6e945e92ea47195f26e854a33ba13c01069e73dc | c5759366f8b2cb2e129df0637b62774225a0c41a | /code/tensor2tensor/tensor2tensor/problems.py | 1337c2a3388e967df697d25012cf4c0fa55d7d15 | [
"Apache-2.0"
] | permissive | cake-lab/transient-deep-learning | f8646a4386528aa147d8d3dcdff8089985870041 | 87c6717e4026801623cf0327e78ad57f51cb1461 | refs/heads/master | 2022-11-02T20:02:29.642997 | 2022-02-08T16:51:09 | 2022-02-08T16:51:09 | 227,036,173 | 11 | 1 | Apache-2.0 | 2022-10-05T13:01:38 | 2019-12-10T05:27:50 | Python | UTF-8 | Python | false | false | 1,009 | py | # coding=utf-8
# Copyright 2018 The Tensor2Tensor Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Access T2T Problems."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensor2tensor.data_generators import all_problems
from tensor2tensor.utils import registry
def problem(name):
return registry.problem(name)
def available():
return sorted(registry.list_problems())
all_problems.import_modules(all_problems.ALL_MODULES)
| [
"ozymandias@OzymandiasdeMacBook-Pro.local"
] | ozymandias@OzymandiasdeMacBook-Pro.local |
5e7ba5ce60b0d77c7ac37ee8cd88e7e084d1121f | 77d4d5a1881297dce3003560e04a2e39a97d4465 | /code_chef/CATFEED.py | 1820aa32f5462be36d82be05eec3c74f977bdd70 | [] | no_license | gomsterX/competitive_programming | c34820032c24532d62325a379590a22fa812159a | 72ac1fe61604e5a5e41f336bb40377fd7e4738d7 | refs/heads/master | 2023-07-19T21:28:16.205718 | 2021-09-02T14:18:44 | 2021-09-02T14:18:44 | 271,074,320 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 518 | py | #Problem ID: CATFEED
#Problem Name: Chef Feeds Cats
for _ in range(int(input())):
n, m = map(int, input().split())
l = list(map(int, input().split()))
fair = True
if(m<n):
if(len(set(l))<m):
fair = False
else:
for i in range(0, m, n):
if(i+n > m and len(set(l[i:])) < len(l[i:])):
fair = False
if(i+n <= m and len(set(l[i:i+n])) < n):
fair = False
if fair:
print("YES")
else:
print("NO")
| [
"mohamedmoussaa7@gmail.com"
] | mohamedmoussaa7@gmail.com |
040a18170e65a07634548419034581f3de4202cf | 8dea334c5f7c960eee18087cded70ac140b23bd5 | /mac_os_scripts_tests/set_background_test.py | 6513dc920db9164a8b0c2309e4febf574bea0841 | [
"MIT",
"LicenseRef-scancode-unknown-license-reference",
"BSD-3-Clause"
] | permissive | initialed85/mac_os_scripts | c94c2100995237133f9b8211c5ac56e4d67df7ee | aa8a2c1dc9193dbce796985f5f125c82f6f90bed | refs/heads/master | 2021-08-22T04:50:25.828232 | 2017-11-29T10:06:51 | 2017-11-29T10:06:51 | 107,938,438 | 32 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,321 | py | import unittest
from hamcrest import assert_that, equal_to
from mock import MagicMock, call
from mac_os_scripts.set_background import BackgroundSetter
from mac_os_scripts_tests.test_common import _NO_OUTPUT
class BackgroundSetterTest(unittest.TestCase):
def setUp(self):
self._subject = BackgroundSetter(
sudo_password=None,
)
self._subject.run_command = MagicMock()
def test_change_background(self):
self._subject.run_command.return_value = _NO_OUTPUT
assert_that(
self._subject.change_background('/usr/local/zetta/background.jpg'),
equal_to(True)
)
assert_that(
self._subject.run_command.mock_calls,
equal_to([
call(
command_line='/usr/bin/osascript /usr/local/zetta/mac_os_scripts/external/change_background.scpt /usr/local/zetta/background.jpg',
quiet=True, sudo_password_override=False, timeout=None, send_lines=None
)
])
)
def test_run_pass(self):
self._subject.change_background = MagicMock()
self._subject.change_background.return_value = True
assert_that(
self._subject.run('/usr/local/zetta/background.jpg'),
equal_to(True)
)
| [
"initialed85@gmail.com"
] | initialed85@gmail.com |
6bb620950242059cf4f51b1fa8c30bff7f8fa224 | ae7ba9c83692cfcb39e95483d84610715930fe9e | /yubinbai/pcuva-problems/UVa 10344 23 out of 5/main.py | 193c8366725bb72d7afc7608162ac4e7127668e9 | [] | no_license | xenron/sandbox-github-clone | 364721769ea0784fb82827b07196eaa32190126b | 5eccdd8631f8bad78eb88bb89144972dbabc109c | refs/heads/master | 2022-05-01T21:18:43.101664 | 2016-09-12T12:38:32 | 2016-09-12T12:38:32 | 65,951,766 | 5 | 7 | null | null | null | null | UTF-8 | Python | false | false | 588 | py | import sys
import itertools
sys.stdin = open('input.txt')
while True:
num = map(int, raw_input().split())
if not any(num):
break
flag = False
for t in itertools.permutations(num):
for o1, o2, o3, o4 in itertools.product(['+', '-', '*'], repeat=4):
s = '(((%d %s %d) %s %d) %s %d) %s %d' % (
t[0], o1, t[1], o2, t[2], o3, t[3], o4, t[4])
if eval(s) == 23:
flag = True
break
if flag:
break
if flag:
print 'Possible'
else:
print 'Impossible'
| [
"xenron@outlook.com"
] | xenron@outlook.com |
d8904f94775773453dfab4d7b2b3d0369228d234 | bd8bc7abe0f774f84d8275c43b2b8c223d757865 | /16_3SumClosest/threeSumClosest.py | 53c3978a270f37f6e4651504a4d8800baafe9ce0 | [
"MIT"
] | permissive | excaliburnan/SolutionsOnLeetcodeForZZW | bde33ab9aebe9c80d9f16f9a62df72d269c5e187 | 64018a9ead8731ef98d48ab3bbd9d1dd6410c6e7 | refs/heads/master | 2023-04-07T03:00:06.315574 | 2021-04-21T02:12:39 | 2021-04-21T02:12:39 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 673 | py | class Solution:
def threeSumClosest(self, nums: List[int], target: int) -> int:
nums.sort()
n = len(nums)
nearest = 10 ** 7
for i in range(n - 2):
lo, hi = i + 1, n - 1
# 双指针
while lo < hi:
total = nums[i] + nums[lo] + nums[hi]
if total == target:
return total
elif total < target:
lo += 1
else:
hi -= 1
# 直接根据"最近"的定义取结果
nearest = min(nearest, total, key=lambda x: abs(x - target))
return nearest
| [
"noreply@github.com"
] | excaliburnan.noreply@github.com |
4a228b16eeedd8cffd1be719945dd6849aca0089 | 20627b99d5658933dab7892a42576762a22d9f6b | /example_set-2/script_ex-2_3.py | 17ae4507a7ade1b544d608d9670dc26b9049313b | [] | no_license | rsp-esl/python_examples_learning | 07bd44a42c45d091bc3ead31241df319e73d2a6e | da8b35bccbaafe855bf544011272e550e5ec9a0a | refs/heads/master | 2021-09-23T11:59:29.388759 | 2018-09-22T11:27:43 | 2018-09-22T11:27:43 | 111,050,947 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,077 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
##############################################################################
# Author: Rawat S. (Dept. of Electrical & Computer Engineering, KMUTNB)
# Date: 2017-11-17
##############################################################################
from __future__ import print_function # for Python 2.6 or higher
## Data structures, Lists, List manipulation
n = 5
numbers = [ 2*i + 1 for i in range(n) ] # create a list
print (numbers[:]) # a new list with all elements
print (numbers[0:]) # a new list with all elements
print (numbers[:-1]) # ... with all except the last element
print (numbers[:-2]) # ... all except the last two elements
print (numbers[-1:]) # ... only the last element
print (numbers[-2:]) # ... the last two elements
print (numbers[1:-1]) # ... all except the first and last
# output:
# [1, 3, 5, 7, 9]
# [1, 3, 5, 7, 9]
# [1, 3, 5, 7]
# [1, 3, 5]
# [9]
# [7, 9]
# [3, 5, 7]
##############################################################################
| [
"noreply@github.com"
] | rsp-esl.noreply@github.com |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.