blob_id stringlengths 40 40 | directory_id stringlengths 40 40 | path stringlengths 3 288 | content_id stringlengths 40 40 | detected_licenses listlengths 0 112 | license_type stringclasses 2 values | repo_name stringlengths 5 115 | snapshot_id stringlengths 40 40 | revision_id stringlengths 40 40 | branch_name stringclasses 684 values | visit_date timestamp[us]date 2015-08-06 10:31:46 2023-09-06 10:44:38 | revision_date timestamp[us]date 1970-01-01 02:38:32 2037-05-03 13:00:00 | committer_date timestamp[us]date 1970-01-01 02:38:32 2023-09-06 01:08:06 | github_id int64 4.92k 681M ⌀ | star_events_count int64 0 209k | fork_events_count int64 0 110k | gha_license_id stringclasses 22 values | gha_event_created_at timestamp[us]date 2012-06-04 01:52:49 2023-09-14 21:59:50 ⌀ | gha_created_at timestamp[us]date 2008-05-22 07:58:19 2023-08-21 12:35:19 ⌀ | gha_language stringclasses 147 values | src_encoding stringclasses 25 values | language stringclasses 1 value | is_vendor bool 2 classes | is_generated bool 2 classes | length_bytes int64 128 12.7k | extension stringclasses 142 values | content stringlengths 128 8.19k | authors listlengths 1 1 | author_id stringlengths 1 132 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
9e045519004f73f6c2d4f4a1252bf2571792b29d | e23a4f57ce5474d468258e5e63b9e23fb6011188 | /125_algorithms/_exercises/templates/_algorithms_challenges/pybites/topics/DataFormats/79/community.py | 6f0da38f033e3bd7c46dae372d33bce22a7c7e20 | [] | no_license | syurskyi/Python_Topics | 52851ecce000cb751a3b986408efe32f0b4c0835 | be331826b490b73f0a176e6abed86ef68ff2dd2b | refs/heads/master | 2023-06-08T19:29:16.214395 | 2023-05-29T17:09:11 | 2023-05-29T17:09:11 | 220,583,118 | 3 | 2 | null | 2023-02-16T03:08:10 | 2019-11-09T02:58:47 | Python | UTF-8 | Python | false | false | 772 | py | _______ c__
____ c.. _______ C..
_______ r__
CSV_URL 'https://bites-data.s3.us-east-2.amazonaws.com/community.csv'
___ get_csv
"""Use requests to download the csv and return the
decoded content"""
w__ r__.S.. __ s
download s.g.. ?
decoded_content download.c__.d.. utf-8
cr c__.reader(decoded_content.s.. , d.._',')
next(cr)
my_list l..(cr)
r.. my_list
___ create_user_bar_chart(content
"""Receives csv file (decoded) content and print a table of timezones
and their corresponding member counts in pluses to standard output
"""
counter C..(user[2] ___ user __ content)
___ tz __ s..(counter
print _*{tz: <20} | {"+"*counter[tz]}')
create_user_bar_chart(get_csv
#get_csv() | [
"sergejyurskyj@yahoo.com"
] | sergejyurskyj@yahoo.com |
18bf9c0ccb23bcdc9920c3056bd3f9bf0ba9e519 | c8fbb8abd6f00a53607773aba706f2893861c50c | /5-html.py | 51cbaa634f58a2ea8395201091e1a0678808ca5b | [] | no_license | abelsonlive/intro-to-webscraping | f160e80678ac0a007b1f633bb3d8658fcfc35d77 | 27aaea56990dd9ccc45c4ca4ba93d49d3d73cec0 | refs/heads/master | 2016-09-05T08:51:45.877565 | 2015-02-11T01:32:55 | 2015-02-11T01:32:55 | 30,586,249 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 437 | py | import requests
from bs4 import BeautifulSoup
url = 'http://google.com/search'
search_parameters = {
'q': 'hack the gibson'
}
response = requests.get(url, params=search_parameters)
soup = BeautifulSoup(response.content)
for search_result in soup.find_all('h3', {'class':'r'}):
anchor_tag = search_result.find('a')
raw_link = anchor_tag.attrs.get('href')
clean_link = raw_link.split('q=')[-1]
print clean_link
| [
"brianabelson@gmail.com"
] | brianabelson@gmail.com |
3a696fce02ad18bb6f0d805193e9a19fec2c84c5 | 7f4b1d5e9963d63dd45b31c6cad8ced70d823217 | /interview-prep/geeks_for_geeks/greedy/huffman_decoding.py | cab62acfcb088effd9506ec8cb43182cc73ee544 | [] | no_license | mdhatmaker/Misc-python | b8be239619788ed343eb55b24734782e227594dc | 92751ea44f4c1d0d4ba60f5a1bb9c0708123077b | refs/heads/master | 2023-08-24T05:23:44.938059 | 2023-08-09T08:30:12 | 2023-08-09T08:30:12 | 194,360,769 | 3 | 4 | null | 2022-12-27T15:19:06 | 2019-06-29T03:39:13 | Python | UTF-8 | Python | false | false | 4,322 | py | import sys
import heapq
# https://practice.geeksforgeeks.org/problems/huffman-decoding-1/1
# https://www.geeksforgeeks.org/huffman-coding-greedy-algo-3/
# Given an encoded string, your task is to print the decoded String.
###############################################################################
def decode(arr):
return -1
def decode_file(root, s): # struct MinHeapNode* root, string s)
ans = ""
curr = root # struct MinHeapNode* curr = root;
for i in range(len(s)):
if s[i] == '0': curr = curr.left
else:
curr = curr.right
if (not curr.left) and (not curr.right):
ans += curr.data
curr = root
return ans + '\0'
"""
Steps to build Huffman Tree
Input is an array of unique characters along with their frequency
of occurrences and output is Huffman Tree.
1. Create a leaf node for each unique character and build a min
heap of all leaf nodes. (Min Heap is used as a priority queue.
The value of frequency field is used to compare two nodes in min
heap. Initially, the least frequent character is at root)
2. Extract two nodes with the minimum frequency from the min heap.
3. Create a new internal node with a frequency equal to the sum of
the two nodes frequencies. Make the first extracted node as its
left child and the other extracted node as its right child. Add
this node to the min heap.
4. Repeat steps #2 and #3 until the heap contains only one node.
The remaining node is the root node and the tree is complete.
"""
class Node:
def __init__(self, data, freq):
self.data = data
self.freq = freq
self.left = None
self.right = None
def __repr__(self):
return f'{self.data}:{self.freq}'
def __lt__(self, other):
if other and self.freq < other.freq: return True
else:
return False
def __gt__(self, other):
if other and self.freq > other.freq: return True
else:
return False
def __eq__(self, other):
if other and self.freq == other.freq: return True
else:
return False
def __ne__(self, other):
return not self.__eq__(other)
def build_huffman_tree(arr):
minHeap = []
for i in range(0, len(arr), 2):
item = (int(arr[i+1]), arr[i])
heapq.heappush(minHeap, item)
#while minHeap:
# print(heapq.heappop(minHeap), end=' ')
#print()
#struct MinHeapNode *left, *right, *top;
# Step 1: Create a min heap of capacity equal to size. Initially, there are
# nodes equal to size.
#struct MinHeap* minHeap = createAndBuildMinHeap(data, freq, size);
# Iterate while size of heap doesn't become 1
while not len(minHeap) == 1: # isSizeOne(minHeap):
# Step 2: Extract the two minimum freq items from in heap
left = heapq.heappop(minHeap) # extractMin(minHeap);
right = heapq.heappop(minHeap) # extractMin(minHeap);
# Step 3: Create a new internal node with frequency equal to the sum of the
# two nodes frequencies. Make the two extracted node as left and right children
# of this new node. Add this node to the min heap.
# '$' is a special value for internal nodes, not used
item = (left[0]+right[0], '$') # (left.freq + right.freq, '$')
heapq.heappush(minHeap, item)
#top = newNode('$', left->freq + right->freq);
#top->left = left;
#top->right = right;
#insertMinHeap(minHeap, top);
# Step 4: The remaining node is the root node and the tree is complete.
return heapq.heappop(minHeap) # extractMin(minHeap);
###############################################################################
if __name__ == "__main__":
test_inputs = []
#test_inputs.append( ("abc", "abc") )
#test_inputs.append( ("geeksforgeeks", "geeksforgeeks") )
test_inputs.append( ("a 5 b 9 c 12 d 13 e 16 f 45", "") )
""" Run process on sample inputs
"""
for inputs, results in test_inputs:
arr = [s for s in inputs.split()]
print(f'{inputs}')
rv = build_huffman_tree(arr)
print(f"{rv} expected: {results}")
#minHeap = rv
#while minHeap:
# print(heapq.heappop(minHeap), end=' ')
#print()
| [
"hatmanmd@yahoo.com"
] | hatmanmd@yahoo.com |
be7a3abe18aa4a85e2d13bdc988a23498b3a315b | a31edaf4843ff669d459d3150a7eebcd24f7e579 | /BSI/crypto/rand.py | 0689fc72aa12a374349c2dcb4e0b772d57a1c50b | [] | no_license | Ginkooo/PolibudaCode | 1d57b68cd7f62feb933bc90342f6128ff991f35d | 583c2b07b6947df782a412f26b224efc5c9e4180 | refs/heads/master | 2021-01-11T01:17:09.642285 | 2018-12-13T23:50:00 | 2018-12-13T23:50:00 | 70,732,542 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 883 | py | import subprocess
import binascii
from argparse import ArgumentParser
ap = ArgumentParser(description='Encrypt file using system entrophy')
ap.add_argument('algorithm', help='Algorithm to use (For full list of ciphertypes please see: man enc')
ap.add_argument('input_file', help='File to cipher')
ap.add_argument('output_file', help='Output file')
args = ap.parse_args()
rand_key = open('/dev/urandom', 'rb').read(14)
rand_iv = open('/dev/urandom', 'rb').read(14)
rand_key = binascii.hexlify(rand_key).decode('ascii')
rand_iv = binascii.hexlify(rand_iv).decode('ascii')
algo = args.algorithm
ifile = args.input_file
ofile = args.output_file
cmd = 'openssl enc -{} -e -in {} -out {} -K {} -iv {}'.format(algo, ifile, ofile, rand_key, rand_iv)
print(cmd)
subprocess.check_call(cmd, shell=True)
print('File enciphered with key:')
print(rand_key)
print('iv was:')
print(rand_iv)
| [
"piotr_czajka@outlook.com"
] | piotr_czajka@outlook.com |
085522d19e1404567e8c4300ce0fa4ea1ef34023 | 008ea0c503829f33840495373ad3d60794575af3 | /PYDayByDay/common/list.py | 6262e957b92bfdffbe3d3158524d64346d6e2d3c | [] | no_license | JyHu/PYStudy | 6515bea47ca6f80e336f3b6a7a14b1159fde872f | ec0855c414237bdd7d0cb28f79a81c02ccd52d45 | refs/heads/master | 2016-08-12T19:44:06.723361 | 2016-04-11T10:38:59 | 2016-04-11T10:38:59 | 45,384,810 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,591 | py | #coding = utf-8
__author__ = 'JinyouHU'
word = ['a', 'b', 'c', 'd', 'e', 'f', 'g']
a = word[2]
print 'a is :' +a
b = word[1:3]
print 'b is :'
print b
c = word[:2]
print 'c is :'
print c
d = word[0:]
print 'd is :'
print d
e = word[:2] + word[2:]
print 'e is :'
print e
f = word[-1]
print 'f is :'
print f
g = word[-4:2]
print 'g is :'
print g
h = word[-2:]
print 'h is :'
print h
i = word[:-2]
print 'i is :'
print i
l = len(word)
print 'length of word is :' + str(l)
print 'add new element...'
word.append('h')
print word
'''
List(列表) 是 Python 中使用最频繁的数据类型。
列表可以完成大多数集合类的数据结构实现。它支持字符,数字,字符串甚至可以包含列表(所谓嵌套)。
列表用[ ]标识。是python最通用的复合数据类型。看这段代码就明白。
列表中的值得分割也可以用到变量[头下标:尾下标],就可以截取相应的列表,从左到右索引默认0开始的,从右到左索引默认-1开始,下标可以为空表示取到头或尾。
'''
####################################################
'''
Python元组
元组是另一个数据类型,类似于List(列表)。
元组用"()"标识。内部元素用逗号隔开。但是元素不能二次赋值,相当于只读列表。
'''
tuple = ( 'abcd', 786 , 2.23, 'john', 70.2 )
tinytuple = (123, 'john')
print list # 输出完整列表
print list[0] # 输出列表的第一个元素
print list[1:3] # 输出第二个至第三个的元素
print list[2:] # 输出从第三个开始至列表末尾的所有元素 | [
"auu.aug@gmail.com"
] | auu.aug@gmail.com |
0d981572ba1a4d26a11b7a95c3f10693250d3020 | ac216a2cc36f91625e440247986ead2cd8cce350 | /packages/infra_libs/infra_libs/ts_mon/common/test/targets_test.py | 43e9246c2e49ae906e0fb13174e107561e482faa | [
"BSD-3-Clause"
] | permissive | xinghun61/infra | b77cdc566d9a63c5d97f9e30e8d589982b1678ab | b5d4783f99461438ca9e6a477535617fadab6ba3 | refs/heads/master | 2023-01-12T21:36:49.360274 | 2019-10-01T18:09:22 | 2019-10-01T18:09:22 | 212,168,656 | 2 | 1 | BSD-3-Clause | 2023-01-07T10:18:03 | 2019-10-01T18:22:44 | Python | UTF-8 | Python | false | false | 4,291 | py | # Copyright 2015 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import unittest
from infra_libs.ts_mon.common import targets
from infra_libs.ts_mon.protos import metrics_pb2
class TargetTest(unittest.TestCase):
def setUp(self):
self.task0 = targets.TaskTarget('serv', 'job', 'reg', 'host', 0)
self.task1 = targets.TaskTarget('serv', 'job', 'reg', 'host', 0)
self.task2 = targets.TaskTarget('serv', 'job', 'reg', 'host', 1)
self.device0 = targets.DeviceTarget('reg', 'role', 'net', 'host0')
self.device1 = targets.DeviceTarget('reg', 'role', 'net', 'host0')
self.device2 = targets.DeviceTarget('reg', 'role', 'net', 'host1')
def test_eq(self):
self.assertTrue(self.task0 == self.task1)
self.assertTrue(self.device0 == self.device1)
self.assertFalse(self.task0 == self.task2)
self.assertFalse(self.device0 == self.device2)
self.assertFalse(self.task0 == self.device0)
def test_hash(self):
d = {}
d[self.task0] = 1
d[self.task1] = 2
d[self.task2] = 3
d[self.device0] = 4
d[self.device1] = 5
d[self.device2] = 6
self.assertEqual(2, d[self.task0])
self.assertEqual(2, d[self.task1])
self.assertEqual(3, d[self.task2])
self.assertEqual(5, d[self.device0])
self.assertEqual(5, d[self.device1])
self.assertEqual(6, d[self.device2])
class DeviceTargetTest(unittest.TestCase):
def test_populate_target_pb(self):
pb = metrics_pb2.MetricsCollection()
target = targets.DeviceTarget('reg', 'role', 'net', 'host')
target.populate_target_pb(pb)
self.assertEqual(pb.network_device.metro, 'reg')
self.assertEqual(pb.network_device.role, 'role')
self.assertEqual(pb.network_device.hostgroup, 'net')
self.assertEqual(pb.network_device.hostname, 'host')
self.assertEqual(pb.network_device.realm, 'ACQ_CHROME')
self.assertEqual(pb.network_device.alertable, True)
def test_update_to_dict(self):
target = targets.DeviceTarget('reg', 'role', 'net', 'host')
self.assertEqual({
'region': 'reg',
'role': 'role',
'network': 'net',
'hostname': 'host'}, target.to_dict())
target.update({'region': 'other', 'hostname': 'guest'})
self.assertEqual({
'region': 'other',
'role': 'role',
'network': 'net',
'hostname': 'guest'}, target.to_dict())
def test_update_private_field(self):
target = targets.DeviceTarget('reg', 'role', 'net', 'host')
with self.assertRaises(AttributeError):
target.update({'realm': 'boo'})
def test_update_nonexistent_field(self):
target = targets.DeviceTarget('reg', 'role', 'net', 'host')
# Simulate a bug: exporting a non-existent field.
target._fields += ('bad',)
with self.assertRaises(AttributeError):
target.update({'bad': 'boo'})
class TaskTargetTest(unittest.TestCase):
def test_populate_target_pb(self):
pb = metrics_pb2.MetricsCollection()
target = targets.TaskTarget('serv', 'job', 'reg', 'host')
target.populate_target_pb(pb)
self.assertEqual(pb.task.service_name, 'serv')
self.assertEqual(pb.task.job_name, 'job')
self.assertEqual(pb.task.data_center, 'reg')
self.assertEqual(pb.task.host_name, 'host')
self.assertEqual(pb.task.task_num, 0)
def test_update_to_dict(self):
target = targets.TaskTarget('serv', 'job', 'reg', 'host', 5)
self.assertEqual({
'service_name': 'serv',
'job_name': 'job',
'region': 'reg',
'hostname': 'host',
'task_num': 5}, target.to_dict())
target.update({'region': 'other', 'hostname': 'guest'})
self.assertEqual({
'service_name': 'serv',
'job_name': 'job',
'region': 'other',
'hostname': 'guest',
'task_num': 5}, target.to_dict())
def test_update_private_field(self):
target = targets.TaskTarget('serv', 'job', 'reg', 'host')
with self.assertRaises(AttributeError):
target.update({'realm': 'boo'})
def test_update_nonexistent_field(self):
target = targets.TaskTarget('serv', 'job', 'reg', 'host')
# Simulate a bug: exporting a non-existent field.
target._fields += ('bad',)
with self.assertRaises(AttributeError):
target.update({'bad': 'boo'})
| [
"commit-bot@chromium.org"
] | commit-bot@chromium.org |
aa78a93cfcec310c321ca4d89d37d1d191768dec | 4a3dcb0cd8b271ca4e54eac077c286bfec399ff0 | /graphstats/embed/ase.py | d691563dac3fb5de1feec978498fea21de797f39 | [
"Apache-2.0"
] | permissive | tpsatish95/graspy | b5e42faccbb2cc0bdd607f8229e1cf83c9060530 | 3360e217a3f4200381a805c2b95b5de282d146ac | refs/heads/master | 2020-03-30T04:14:07.606769 | 2018-09-26T10:41:13 | 2018-09-26T10:41:13 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,490 | py |
# ase.py
# Created by Ben Pedigo on 2018-09-15.
# Email: bpedigo@jhu.edu
# Adapted from Disa Mhembere
from embed import BaseEmbed
from utils import import_graph, check_square
from svd import SelectSVD
from sklearn.decomposition import TruncatedSVD
import numpy as np
class ASEEmbedder(BaseEmbed):
"""
Class for computing the adjacency spectral embedding of a graph
"""
def __init__(self, n_components=2, eig_scale=0.5):
"""
Adjacency spectral embeding of a graph
Parameters
----------
n_components: int, optional (defaults None)
Number of embedding dimensions. If unspecified, uses graphstats.dimselect
"""
super.__init__(n_components=n_components, eig_scale=eig_scale)
def _reduce_dim(self, A):
if self.n_components == None:
tsvd = SelectSVD() #TODO other parameters here?
else:
tsvd = TruncatedSVD(n_components = min(self.n_components, A.shape[0] - 1))
tsvd.fit(A)
eig_vectors = tsvd.components_.T
eig_values = tsvd.singular_values_
#X_hat = eig_vectors[:, :A.shape[1]].copy() what was the point of this in original code
embedding = eig_vectors.dot(np.diag(eig_values**self.eig_scale))
return embedding
def fit(self, graph):
A = import_graph(graph)
check_square(A)
self.embedding = self._reduce_dim(A)
return self
| [
"benjamindpedigo@gmail.com"
] | benjamindpedigo@gmail.com |
f92bef4524810441c31b2e1f8fe242bfa96e8225 | 83d9b630bdc5535d67e35d69768b4d41fc4c9653 | /assignment1/assignment1/cs231n/gradient_check.py | 31257e96b4cf83c96ae458e56c1b435f19990bfc | [] | no_license | chenshaobin/assignment_CS231n | 2c8820f748fca6fb41cdb272a81c940f8c0a0e52 | 132c670d22dd37e6b4c1bd9da944c1f62a639d64 | refs/heads/main | 2022-12-30T21:05:12.500255 | 2020-10-18T04:49:40 | 2020-10-18T04:49:40 | 301,309,753 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,041 | py | from __future__ import print_function
from builtins import range
from past.builtins import xrange
import numpy as np
from random import randrange
def eval_numerical_gradient(f, x, verbose=True, h=0.00001):
"""
a naive implementation of numerical gradient of f at x
- f should be a function that takes a single argument
- x is the point (numpy array) to evaluate the gradient at
"""
# 损失函数对各参数的导数
fx = f(x) # evaluate function value at original point
grad = np.zeros_like(x)
# iterate over all indexes in x
it = np.nditer(x, flags=['multi_index'], op_flags=['readwrite'])
while not it.finished:
# evaluate function at x+h
ix = it.multi_index
oldval = x[ix]
x[ix] = oldval + h # increment by h
fxph = f(x) # evalute f(x + h)
x[ix] = oldval - h
fxmh = f(x) # evaluate f(x - h)
x[ix] = oldval # restore
# compute the partial derivative with centered formula
grad[ix] = (fxph - fxmh) / (2 * h) # the slope
if verbose:
print(ix, grad[ix])
it.iternext() # step to next dimension
return grad
def eval_numerical_gradient_array(f, x, df, h=1e-5):
"""
Evaluate a numeric gradient for a function that accepts a numpy
array and returns a numpy array.
"""
grad = np.zeros_like(x)
it = np.nditer(x, flags=['multi_index'], op_flags=['readwrite'])
while not it.finished:
ix = it.multi_index
oldval = x[ix]
x[ix] = oldval + h
pos = f(x).copy()
x[ix] = oldval - h
neg = f(x).copy()
x[ix] = oldval
grad[ix] = np.sum((pos - neg) * df) / (2 * h)
it.iternext()
return grad
def eval_numerical_gradient_blobs(f, inputs, output, h=1e-5):
"""
Compute numeric gradients for a function that operates on input
and output blobs.
We assume that f accepts several input blobs as arguments, followed by a
blob where outputs will be written. For example, f might be called like:
f(x, w, out)
where x and w are input Blobs, and the result of f will be written to out.
Inputs:
- f: function
- inputs: tuple of input blobs
- output: output blob
- h: step size
"""
numeric_diffs = []
for input_blob in inputs:
diff = np.zeros_like(input_blob.diffs)
it = np.nditer(input_blob.vals, flags=['multi_index'],
op_flags=['readwrite'])
while not it.finished:
idx = it.multi_index
orig = input_blob.vals[idx]
input_blob.vals[idx] = orig + h
f(*(inputs + (output,)))
pos = np.copy(output.vals)
input_blob.vals[idx] = orig - h
f(*(inputs + (output,)))
neg = np.copy(output.vals)
input_blob.vals[idx] = orig
diff[idx] = np.sum((pos - neg) * output.diffs) / (2.0 * h)
it.iternext()
numeric_diffs.append(diff)
return numeric_diffs
def eval_numerical_gradient_net(net, inputs, output, h=1e-5):
return eval_numerical_gradient_blobs(lambda *args: net.forward(),
inputs, output, h=h)
def grad_check_sparse(f, x, analytic_grad, num_checks=10, h=1e-5):
"""
sample a few random elements and only return numerical
in this dimensions.
"""
for i in range(num_checks):
ix = tuple([randrange(m) for m in x.shape])
# print('ix:', ix)
oldval = x[ix]
x[ix] = oldval + h # increment by h
fxph = f(x) # evaluate f(x + h)
x[ix] = oldval - h # increment by h
fxmh = f(x) # evaluate f(x - h)
x[ix] = oldval # reset
grad_numerical = (fxph - fxmh) / (2 * h)
grad_analytic = analytic_grad[ix]
rel_error = (abs(grad_numerical - grad_analytic) /
(abs(grad_numerical) + abs(grad_analytic)))
print('numerical: %f analytic: %f, relative error: %e'
%(grad_numerical, grad_analytic, rel_error))
| [
"13531194616@163.com"
] | 13531194616@163.com |
2310f6ba6d69d7d143a9f93b55954ca5c691f398 | 5d06a33d3685a6f255194b13fd2e615e38d68850 | /tests/opytimark/utils/test_constants.py | ee4d47bdb169e5811cb942a7bbb70115af9b72a6 | [
"Apache-2.0"
] | permissive | sarikoudis/opytimark | 617a59eafaabab5e67bd4040473a99f963df7788 | cad25623f23ce4b509d59381cf7bd79e41a966b6 | refs/heads/master | 2023-07-24T04:19:55.869169 | 2021-09-03T13:09:45 | 2021-09-03T13:09:45 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 215 | py | import sys
from opytimark.utils import constants
def test_constants():
assert constants.DATA_FOLDER == 'data/'
assert constants.EPSILON == 1e-32
assert constants.FLOAT_MAX == sys.float_info.max
| [
"gth.rosa@uol.com.br"
] | gth.rosa@uol.com.br |
dfdbbbdf80ff3a131f9a789153624a55f21f9c20 | aa4b80cf7e7ac0028d0c7f67ade982d9b740a38b | /python/list/list_max.py | 0272716fe25776fbf18ced846eea450e85342060 | [] | no_license | ratularora/python_code | 9ac82492b8dc2e0bc2d96ba6df6fdc9f8752d322 | ddce847ba338a41b0b2fea8a36d49a61aa0a5b13 | refs/heads/master | 2021-01-19T04:34:22.038909 | 2017-09-27T08:14:45 | 2017-09-27T08:14:45 | 84,435,244 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 145 | py | list1, list2 = [123, 565654, 'A','Z','gdgf'], [456, 700, 200]
print "Max value element : ", max(list1)
print "Max value element : ", max(list2)
| [
"arora.ratul@gmail.com"
] | arora.ratul@gmail.com |
ff455dd0b1d99aba94e9c35e313ed4aa46e522f1 | fb65b7c000642dca68c93ee85a87795b3f30fe21 | /Core_Python/pgm11.py | 652850cfd01fd7bd255185ef1e708fbf72d76138 | [] | no_license | toncysara17/luminarpythonprograms | f41b446251feba641e117d87ce235dc556086f8f | 17bc37c3f83c0e9792aaa8bccd901371a6413f14 | refs/heads/master | 2023-04-17T18:51:31.493118 | 2021-04-20T05:25:02 | 2021-04-20T05:25:02 | 358,550,813 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 278 | py | #pgm to swap two variables
num1=5
num2=10
print("value before swapping")
print("Number1 is",num1)
print("Number2 is",num2)
#create a temporary variable and swap a value
temp=num1
num1=num2
num2=temp
print("Value after swapping")
print("Number1 is",num1)
print("Number2 is",num2) | [
"toncysara12@gmail.com"
] | toncysara12@gmail.com |
4f338929596b6be67843874be8412f875486b877 | 0e5291f09c5117504447cc8df683ca1506b70560 | /test/test_writable_tenant_group.py | 735b459c6d0339a0fcabd4fa29ee9bc034db512a | [
"MIT"
] | permissive | nrfta/python-netbox-client | abd0192b79aab912325485bf4e17777a21953c9b | 68ba6dd4d7306513dc1ad38f3ac59122ba4f70a8 | refs/heads/master | 2022-11-13T16:29:02.264187 | 2020-07-05T18:06:42 | 2020-07-05T18:06:42 | 277,121,108 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 887 | py | # coding: utf-8
"""
NetBox API
API to access NetBox # noqa: E501
OpenAPI spec version: 2.8
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import unittest
import netbox_client
from netbox_client.models.writable_tenant_group import WritableTenantGroup # noqa: E501
from netbox_client.rest import ApiException
class TestWritableTenantGroup(unittest.TestCase):
"""WritableTenantGroup unit test stubs"""
def setUp(self):
pass
def tearDown(self):
pass
def testWritableTenantGroup(self):
"""Test WritableTenantGroup"""
# FIXME: construct object with mandatory attributes with example values
# model = netbox_client.models.writable_tenant_group.WritableTenantGroup() # noqa: E501
pass
if __name__ == '__main__':
unittest.main()
| [
"67791576+underline-bot@users.noreply.github.com"
] | 67791576+underline-bot@users.noreply.github.com |
fb54501652083103c6154e7fb4d55f828df3fc3a | b0d5e423f09181a322a0166b06bf7fe45a3befc0 | /MetioTube/asgi.py | e2cd92f47887e46c1b286179c5713a337df86ce6 | [
"MIT"
] | permissive | Sheko1/MetioTube | f5da4184bb1590565ba34cef2fff02b379ab3e56 | c1c36d00ea46fc37cc7f3c0c9c0cae6e89b2113c | refs/heads/main | 2023-07-04T12:54:57.500778 | 2021-08-14T19:41:56 | 2021-08-14T19:41:56 | 383,907,948 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 395 | py | """
ASGI config for MetioTube project.
It exposes the ASGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/3.2/howto/deployment/asgi/
"""
import os
from django.core.asgi import get_asgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'MetioTube.settings')
application = get_asgi_application()
| [
"martinkypar@gmail.com"
] | martinkypar@gmail.com |
2a4782180f8375d960ca9e96805817fe1a9d44db | 35a253595e158085dbb40d33d44dde026269c8a7 | /198 House Robber.py | 562340100cf91741af888da5d9b11ebcbb944cf3 | [
"MIT"
] | permissive | ee08b397/LeetCode-4 | 7a8174275fbe7e0e667575aedd1ff1a8647776c3 | 3b26870e946b510797b6b284822a1011ce048fbe | refs/heads/master | 2020-12-24T15:13:22.899164 | 2015-09-22T02:41:13 | 2015-09-22T02:41:13 | 43,003,940 | 1 | 0 | null | 2015-09-23T13:52:25 | 2015-09-23T13:52:25 | null | UTF-8 | Python | false | false | 891 | py | """
You are a professional robber planning to rob houses along a street. Each house has a certain amount of money stashed,
the only constraint stopping you from robbing each of them is that adjacent houses have security system connected and it
will automatically contact the police if two adjacent houses were broken into on the same night.
Given a list of non-negative integers representing the amount of money of each house, determine the maximum amount of
money you can rob tonight without alerting the police.
"""
__author__ = 'Daniel'
class Solution:
def rob(self, nums):
"""
DP
O(n)
f_i = max(f_{i-1}, f_{i-2} + A[i])
"""
n = len(nums)
f = [0 for _ in xrange(n+2)]
for i in xrange(2, n+2):
f[i] = max(
f[i-1],
f[i-2] + nums[i-2]
)
return f[-1]
| [
"zhangdanyangg@gmail.com"
] | zhangdanyangg@gmail.com |
0b8470e562b21979ccc4ab4da93335262b2d9c86 | 74e15a8246fff5fd65a4169a0908c2639912992a | /pykeg/plugin/datastore_test.py | ddd244646f429ac166ff2b9aa5dd7b32765e5fa0 | [
"MIT",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | Kegbot/kegbot-server | 5abf5fedf102aa9000e9dbe0ae90f9d4a70b93f3 | e0f953137bc261519444cb769455317074c7253e | refs/heads/main | 2023-02-16T08:57:50.931256 | 2022-08-18T17:21:13 | 2022-08-18T17:21:13 | 1,886,336 | 77 | 68 | MIT | 2023-02-15T19:01:41 | 2011-06-12T22:25:29 | JavaScript | UTF-8 | Python | false | false | 1,397 | py | from django.test import TransactionTestCase
from pykeg.core import models
from pykeg.plugin import datastore
class DatastoreTestCase(TransactionTestCase):
def test_model_datastore(self):
ds = datastore.ModelDatastore(plugin_name="test")
self.assertEqual(0, models.PluginData.objects.all().count())
ds.set("foo", "bar")
q = models.PluginData.objects.all()
self.assertEqual(1, q.count())
self.assertEqual("test", q[0].plugin_name)
self.assertEqual("foo", q[0].key)
self.assertEqual("bar", q[0].value)
# Setting to 'None' clears value
ds.set("foo", None)
self.assertEqual(0, models.PluginData.objects.all().count())
# Complex types survive.
ds.set("obj", {"asdf": 123, "foo": None})
self.assertEqual({"asdf": 123, "foo": None}, ds.get("obj"))
def test_in_memory_datastore(self):
ds = datastore.InMemoryDatastore(plugin_name="test")
self.assertEqual(0, len(ds.data))
ds.set("foo", "bar")
self.assertEqual(1, len(ds.data))
self.assertEqual("bar", ds.data["test:foo"])
# Setting to 'None' clears value
ds.set("foo", None)
self.assertEqual(0, len(ds.data))
# Complex types survive.
ds.set("obj", {"asdf": 123, "foo": None})
self.assertEqual({"asdf": 123, "foo": None}, ds.get("obj"))
| [
"opensource@hoho.com"
] | opensource@hoho.com |
bd87f7ffff21edcd8780035ad4b9bd302bfb6a72 | 0df898bf192b6ad388af160ecbf6609445c34f96 | /middleware/backend/app/magnet/research/schemas.py | 14784fcd60e81faf427db23b928fed336d7760bc | [] | no_license | sasano8/magnet | a5247e6eb0a7153d6bbca54296f61194925ab3dc | 65191c877f41c632d29133ebe4132a0bd459f752 | refs/heads/master | 2023-01-07T10:11:38.599085 | 2020-11-13T02:42:41 | 2020-11-13T02:42:41 | 298,334,432 | 1 | 1 | null | null | null | null | UTF-8 | Python | false | false | 244 | py | from typing import List, Optional
from magnet import config, BaseModel
class CaseNode(BaseModel):
Config = config.ORM
id: int
name: str
class Target(BaseModel):
Config = config.ORM
id: int
name: str
node_id: int
| [
"y-sasahara@ys-method.com"
] | y-sasahara@ys-method.com |
1d030a1a87fa78e0fb8f511029f3f2a4218a0f6b | 551b75f52d28c0b5c8944d808a361470e2602654 | /examples/IoTDA/V5/TagManagement.py | cb3d92b2c88cd64d06bf9647b4b17b19d38a2d7f | [
"Apache-2.0"
] | permissive | wuchen-huawei/huaweicloud-sdk-python-v3 | 9d6597ce8ab666a9a297b3d936aeb85c55cf5877 | 3683d703f4320edb2b8516f36f16d485cff08fc2 | refs/heads/master | 2023-05-08T21:32:31.920300 | 2021-05-26T08:54:18 | 2021-05-26T08:54:18 | 370,898,764 | 0 | 0 | NOASSERTION | 2021-05-26T03:50:07 | 2021-05-26T03:50:07 | null | UTF-8 | Python | false | false | 2,632 | py | # coding: utf-8
from huaweicloudsdkcore.http.http_config import HttpConfig
from huaweicloudsdkcore.auth.credentials import BasicCredentials
from huaweicloudsdkcore.exceptions import exceptions
from huaweicloudsdkiotda.v5 import *
def getResourcesByTags(client):
try:
resource_type = "device"
tags = [
{
"tag_key": "testTagName",
"tag_value": "testTagValue"
}
]
body = QueryResourceByTagsDTO(resource_type=resource_type, tags=tags)
request = ListResourcesByTagsRequest(body=body)
response = client.list_resources_by_tags(request)
print(response)
except exceptions.ClientRequestException as e:
print(e.status_code)
print(e.request_id)
print(e.error_code)
print(e.error_msg)
def bindTagsToResource(client):
try:
resource_id = "5e25d39a3b7c24fa3638804b_nb_0403_1"
resource_type = "device"
tags = [
{
"tag_key": "testTagName",
"tag_value": "testTagValue"
}
]
body = BindTagsDTO(resource_id=resource_id, resource_type=resource_type, tags=tags)
request = TagDeviceRequest(body=body)
response = client.tag_device(request)
print(response)
except exceptions.ClientRequestException as e:
print(e.status_code)
print(e.request_id)
print(e.error_code)
print(e.error_msg)
def unbindTagsToResource(client):
try:
resource_id = "5e25d39a3b7c24fa3638804b_nb_0403_1"
resource_type = "device"
tag_keys = ["testTagName"]
body = UnbindTagsDTO(resource_id=resource_id, resource_type=resource_type, tag_keys=tag_keys)
request = UntagDeviceRequest(body=body)
response = client.untag_device(request)
print(response)
except exceptions.ClientRequestException as e:
print(e.status_code)
print(e.request_id)
print(e.error_code)
print(e.error_msg)
if __name__ == '__main__':
ak = "{your ak string}"
sk = "{your sk string}"
endpoint = "{your endpoint}"
project_id = "{your project id}"
config = HttpConfig.get_default_config()
config.ignore_ssl_verification = True
credentials = BasicCredentials(ak, sk, project_id)
iotda_client = IoTDAClient().new_builder(IoTDAClient) \
.with_http_config(config) \
.with_credentials(credentials) \
.with_endpoint(endpoint) \
.build()
getResourcesByTags(iotda_client)
bindTagsToResource(iotda_client)
unbindTagsToResource(iotda_client)
| [
"hwcloudsdk@huawei.com"
] | hwcloudsdk@huawei.com |
38cef241ffcbaddf58cb3d75921e6a5ce7fd5e7b | 89dedd7f3c7acc81d12e2bcb2e716f9af9e5fa04 | /tools/cygprofile/check_orderfile.py | ea48127db476c6b8a4b449d1e0b3a84efcb28195 | [
"BSD-3-Clause",
"LGPL-2.0-or-later",
"LicenseRef-scancode-unknown-license-reference",
"GPL-2.0-only",
"Apache-2.0",
"LicenseRef-scancode-unknown",
"MIT"
] | permissive | bino7/chromium | 8d26f84a1b6e38a73d1b97fea6057c634eff68cb | 4666a6bb6fdcb1114afecf77bdaa239d9787b752 | refs/heads/master | 2022-12-22T14:31:53.913081 | 2016-09-06T10:05:11 | 2016-09-06T10:05:11 | 67,410,510 | 1 | 3 | BSD-3-Clause | 2022-12-17T03:08:52 | 2016-09-05T10:11:59 | null | UTF-8 | Python | false | false | 4,024 | py | #!/usr/bin/python
# Copyright 2015 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Check that symbols are ordered into a binary as they appear in the orderfile.
"""
import logging
import optparse
import sys
import cyglog_to_orderfile
import cygprofile_utils
import patch_orderfile
import symbol_extractor
_MAX_WARNINGS_TO_PRINT = 200
def _IsSameMethod(name1, name2):
"""Returns true if name1 or name2 are split method forms of the other."""
return patch_orderfile.RemoveSuffixes(name1) == \
patch_orderfile.RemoveSuffixes(name2)
def _CountMisorderedSymbols(symbols, symbol_infos):
"""Count the number of misordered symbols, and log them.
Args:
symbols: ordered sequence of symbols from the orderfile
symbol_infos: ordered list of SymbolInfo from the binary
Returns:
(misordered_pairs_count, matched_symbols_count, unmatched_symbols_count)
"""
name_to_symbol_info = symbol_extractor.CreateNameToSymbolInfo(symbol_infos)
matched_symbol_infos = []
missing_count = 0
misordered_count = 0
# Find the SymbolInfo matching the orderfile symbols in the binary.
for symbol in symbols:
if symbol in name_to_symbol_info:
matched_symbol_infos.append(name_to_symbol_info[symbol])
else:
missing_count += 1
if missing_count < _MAX_WARNINGS_TO_PRINT:
logging.warning('Symbol "%s" is in the orderfile, not in the binary' %
symbol)
logging.info('%d matched symbols, %d un-matched (Only the first %d unmatched'
' symbols are shown)' % (
len(matched_symbol_infos), missing_count,
_MAX_WARNINGS_TO_PRINT))
# In the order of the orderfile, find all the symbols that are at an offset
# smaller than their immediate predecessor, and record the pair.
previous_symbol_info = symbol_extractor.SymbolInfo(
name='', offset=-1, size=0, section='')
for symbol_info in matched_symbol_infos:
if symbol_info.offset < previous_symbol_info.offset and not (
_IsSameMethod(symbol_info.name, previous_symbol_info.name)):
logging.warning('Misordered pair: %s - %s' % (
str(previous_symbol_info), str(symbol_info)))
misordered_count += 1
previous_symbol_info = symbol_info
return (misordered_count, len(matched_symbol_infos), missing_count)
def main():
parser = optparse.OptionParser(usage=
'usage: %prog [options] <binary> <orderfile>')
parser.add_option('--target-arch', action='store', dest='arch',
choices=['arm', 'arm64', 'x86', 'x86_64', 'x64', 'mips'],
help='The target architecture for the binary.')
parser.add_option('--threshold', action='store', dest='threshold', default=1,
help='The maximum allowed number of out-of-order symbols.')
options, argv = parser.parse_args(sys.argv)
if not options.arch:
options.arch = cygprofile_utils.DetectArchitecture()
if len(argv) != 3:
parser.print_help()
return 1
(binary_filename, orderfile_filename) = argv[1:]
symbol_extractor.SetArchitecture(options.arch)
obj_dir = cygprofile_utils.GetObjDir(binary_filename)
symbol_to_sections_map = \
cyglog_to_orderfile.GetSymbolToSectionsMapFromObjectFiles(obj_dir)
section_to_symbols_map = cygprofile_utils.InvertMapping(
symbol_to_sections_map)
symbols = patch_orderfile.GetSymbolsFromOrderfile(orderfile_filename,
section_to_symbols_map)
symbol_infos = symbol_extractor.SymbolInfosFromBinary(binary_filename)
# Missing symbols is not an error since some of them can be eliminated through
# inlining.
(misordered_pairs_count, matched_symbols, _) = _CountMisorderedSymbols(
symbols, symbol_infos)
return (misordered_pairs_count > options.threshold) or (matched_symbols == 0)
if __name__ == '__main__':
logging.basicConfig(level=logging.INFO)
sys.exit(main())
| [
"bino.zh@gmail.com"
] | bino.zh@gmail.com |
a2bed2194305ab6bc2efcb6e7da0d2fcc9b5db94 | f063232b59eb7535e4212ec2b6b477c472fdb56e | /intersection-of-two-linked-lists.py | b97bd5fdd82b5d68474a1634ab27021e37453d30 | [] | no_license | xzjh/OJ_LeetCode | a01d43f6925bb8888bb79ca8a03a75dd8a6eac07 | fa2cfe2ec7774ab4a356520668d5dbee9d63077c | refs/heads/master | 2021-01-20T11:13:36.291125 | 2015-10-01T09:04:47 | 2015-10-01T09:04:47 | 25,239,393 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 830 | py | # Definition for singly-linked list.
# class ListNode:
# def __init__(self, x):
# self.val = x
# self.next = None
class Solution:
# @param two ListNodes
# @return the intersected ListNode
def getIntersectionNode(self, headA, headB):
if not headA or not headB:
return None
lenA = 1
lenB = 1
nodeA = headA
nodeB = headB
while nodeA.next:
nodeA = nodeA.next
lenA += 1
while nodeB.next:
nodeB = nodeB.next
lenB += 1
nodeA = headA
nodeB = headB
if lenA > lenB:
diff = lenA - lenB
for _ in range(diff):
nodeA = nodeA.next
elif lenA < lenB:
diff = lenB - lenA
for _ in range(diff):
nodeB = nodeB.next
while nodeA and nodeB and nodeA != nodeB:
nodeA = nodeA.next
nodeB = nodeB.next
if nodeA and nodeB:
return nodeA
else:
return None
| [
"jsxzjh@gmail.com"
] | jsxzjh@gmail.com |
ea606e1ffd245c9b3b6dbda9d9727b9c71c0c48f | 7fd898850480206395eba9878ef5316d5bd4dbcf | /Trakttv.bundle/Contents/Code/plex/media_server.py | 6ab79fe9cc16257e514934593086d7ceee34966c | [] | no_license | Qwaint/Plex-Trakt-Scrobbler | cdfbef4566b8db3a05e72a46ae92c655a8f697e5 | 383ffa338ad64e481bd14c71950af42f2f9edd83 | refs/heads/master | 2020-12-11T04:10:13.332420 | 2014-02-01T13:31:58 | 2014-02-01T13:31:58 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,836 | py | from core.helpers import add_attribute
from core.network import request
# Regular Expressions for GUID parsing
MOVIE_REGEXP = Regex('com.plexapp.agents.*://(?P<imdb_id>tt[-a-z0-9\.]+)')
MOVIEDB_REGEXP = Regex('com.plexapp.agents.themoviedb://(?P<tmdb_id>[0-9]+)')
STANDALONE_REGEXP = Regex('com.plexapp.agents.standalone://(?P<tmdb_id>[0-9]+)')
TVSHOW_REGEXP = Regex(
'com.plexapp.agents.(thetvdb|thetvdbdvdorder|abstvdb|xbmcnfotv|mcm)://'
'(MCM_TV_A_)?' # For Media Center Master
'(?P<tvdb_id>[-a-z0-9\.]+)/'
'(?P<season>[-a-z0-9\.]+)/'
'(?P<episode>[-a-z0-9\.]+)'
)
TVSHOW1_REGEXP = Regex(
'com.plexapp.agents.(thetvdb|thetvdbdvdorder|abstvdb|xbmcnfotv|mcm)://'
'(MCM_TV_A_)?' # For Media Center Master
'(?P<tvdb_id>[-a-z0-9\.]+)'
)
MOVIE_PATTERNS = [
MOVIE_REGEXP,
MOVIEDB_REGEXP,
STANDALONE_REGEXP
]
PMS_URL = 'http://localhost:32400%s' # TODO remove this, replace with PMS.base_url
class PMS(object):
base_url = 'http://localhost:32400'
@classmethod
def request(cls, path='/', response_type='xml', raise_exceptions=False, retry=True, timeout=3, **kwargs):
if not path.startswith('/'):
path = '/' + path
response = request(
cls.base_url + path,
response_type,
raise_exceptions=raise_exceptions,
retry=retry,
timeout=timeout,
**kwargs
)
return response.data if response else None
@classmethod
def metadata(cls, item_id):
# Prepare a dict that contains all the metadata required for trakt.
result = cls.request('library/metadata/%s' % item_id)
if not result:
return None
for section in result.xpath('//Video'):
metadata = {}
# Add attributes if they exist
add_attribute(metadata, section, 'duration', float, lambda x: int(x / 60000))
add_attribute(metadata, section, 'year', int)
add_attribute(metadata, section, 'lastViewedAt', int, target_key='last_played')
add_attribute(metadata, section, 'viewCount', int, target_key='plays')
add_attribute(metadata, section, 'type')
if metadata['type'] == 'movie':
metadata['title'] = section.get('title')
elif metadata['type'] == 'episode':
metadata['title'] = section.get('grandparentTitle')
metadata['episode_title'] = section.get('title')
# Add guid match data
cls.add_guid(metadata, section)
return metadata
Log.Warn('Unable to find metadata for item %s' % item_id)
return None
@staticmethod
def add_guid(metadata, section):
guid = section.get('guid')
if not guid:
return
if section.get('type') == 'movie':
# Cycle through patterns and try get a result
for pattern in MOVIE_PATTERNS:
match = pattern.search(guid)
# If we have a match, update the metadata
if match:
metadata.update(match.groupdict())
return
Log('The movie %s doesn\'t have any imdb or tmdb id, it will be ignored.' % section.get('title'))
elif section.get('type') == 'episode':
match = TVSHOW_REGEXP.search(guid)
# If we have a match, update the metadata
if match:
metadata.update(match.groupdict())
else:
Log('The episode %s doesn\'t have any tmdb id, it will not be scrobbled.' % section.get('title'))
else:
Log('The content type %s is not supported, the item %s will not be scrobbled.' % (
section.get('type'), section.get('title')
))
@classmethod
def client(cls, client_id):
if not client_id:
Log.Warn('Invalid client_id provided')
return None
result = cls.request('clients')
if not result:
return None
found_clients = []
for section in result.xpath('//Server'):
found_clients.append(section.get('machineIdentifier'))
if section.get('machineIdentifier') == client_id:
return section
Log.Info("Unable to find client '%s', available clients: %s" % (client_id, found_clients))
return None
@classmethod
def set_logging_state(cls, state):
# TODO PUT METHOD
result = cls.request(':/prefs?logDebug=%s' % int(state), 'text', method='PUT')
if result is None:
return False
Log.Debug('Response: %s' % result)
return True
@classmethod
def get_logging_state(cls):
result = cls.request(':/prefs')
if result is None:
return False
for setting in result.xpath('//Setting'):
if setting.get('id') == 'logDebug' and setting.get('value'):
value = setting.get('value').lower()
return True if value == 'true' else False
Log.Warn('Unable to determine logging state, assuming disabled')
return False
@classmethod
def get_server_info(cls):
return cls.request()
@classmethod
def get_server_version(cls, default=None):
server_info = cls.get_server_info()
if server_info is None:
return default
return server_info.attrib.get('version') or default
@classmethod
def get_sessions(cls):
return cls.request('status/sessions')
@classmethod
def get_video_session(cls, session_key):
sessions = cls.get_sessions()
if sessions is None:
Log.Warn('Status request failed, unable to connect to server')
return None
for section in sessions.xpath('//MediaContainer/Video'):
if section.get('sessionKey') == session_key and '/library/metadata' in section.get('key'):
return section
Log.Warn('Session not found')
return None
@classmethod
def get_metadata(cls, key):
return cls.request('library/metadata/%s' % key)
@classmethod
def get_metadata_guid(cls, key):
metadata = cls.get_metadata(key)
if metadata is None:
return None
return metadata.xpath('//Directory')[0].get('guid')
@classmethod
def get_metadata_leaves(cls, key):
return cls.request('library/metadata/%s/allLeaves' % key)
@classmethod
def get_sections(cls):
return cls.request('library/sections')
@classmethod
def get_section(cls, name):
return cls.request('library/sections/%s/all' % name)
@classmethod
def get_section_directories(cls, section_name):
section = cls.get_section(section_name)
if section is None:
return []
return section.xpath('//Directory')
@classmethod
def get_section_videos(cls, section_name):
section = cls.get_section(section_name)
if section is None:
return []
return section.xpath('//Video')
@classmethod
def scrobble(cls, video):
if video.get('viewCount') > 0:
Log.Debug('video has already been marked as seen')
return False
result = cls.request(
':/scrobble?identifier=com.plexapp.plugins.library&key=%s' % (
video.get('ratingKey')
),
response_type='text'
)
return result is not None
@classmethod
def rate(cls, video, rating):
result = cls.request(
':/rate?key=%s&identifier=com.plexapp.plugins.library&rating=%s' % (
video.get('ratingKey'), rating
),
response_type='text'
)
return result is not None
| [
"gardiner91@gmail.com"
] | gardiner91@gmail.com |
208f8c6609bfaa29d9f350584a72d47b067aac36 | 011157c49983db38489f26f51db7fe22f8519afc | /problems/812.py | 15ae89e58fec6eb2f61e83be7ee0790ec867c328 | [] | no_license | chasecolford/Leetcode | c0054774d99e7294419039f580c1590495f950b3 | dded74e0c6e7a6c8c8df58bed3640864d0ae3b91 | refs/heads/master | 2023-08-04T11:33:18.003570 | 2021-09-10T21:06:55 | 2021-09-10T21:06:55 | 283,154,381 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 257 | py | class Solution:
def largestTriangleArea(self, points: List[List[int]]) -> float:
return max(0.5 * abs(i[0] * j[1] + j[0] * k[1] + k[0] * i[1]- j[0] * i[1] - k[0] * j[1] - i[0] * k[1])
for i, j, k in itertools.combinations(points, 3)) | [
"56804717+ChaseSinify@users.noreply.github.com"
] | 56804717+ChaseSinify@users.noreply.github.com |
b05cf9848fef04d671c3f3771010b9614cef8003 | 741ee09b8b73187fab06ecc1f07f46a6ba77e85c | /AutonomousSourceCode/data/raw/squareroot/8dcd69f5-11fa-4c5f-8331-cd8f0db1aa54__Sarah01.py | b15d6db979b93090c7a22e978745c9ece3dc2371 | [] | no_license | erickmiller/AutomatousSourceCode | fbe8c8fbf215430a87a8e80d0479eb9c8807accb | 44ee2fb9ac970acf7389e5da35b930d076f2c530 | refs/heads/master | 2021-05-24T01:12:53.154621 | 2020-11-20T23:50:11 | 2020-11-20T23:50:11 | 60,889,742 | 6 | 1 | null | null | null | null | UTF-8 | Python | false | false | 540 | py | def Adder(N1,N2):
MyResult = N1 + N2
return MyResult
def Subtractor(N1,N2):
MyResult = N1 - N2
return MyResult
def Main():
X = input("Enter a value for X ---")
Y = input("Enter a value for Y ---")
if (X >= Y):
print "Subtraction happened"
MyResult = Subtractor(X,Y)
else:
print "Addition happened"
MyResult = Adder(X,Y)
Result1 = math.sqrt(MyResult)
print "the square root of ", MyResult, " is ", Result1
return
def Frog():
print "Yay!"
return
| [
"erickmiller@gmail.com"
] | erickmiller@gmail.com |
8e584b42af7aa4d3ca68a587b7979edf6ce05e75 | 0ac6eeac34c65a200d66be256593f3e064ab1a1a | /TagReadWrite/Utils.py | bb6601e96c8acd37b34652192770527b134782da | [] | no_license | flokli/CrossMgr | cc6c2476def8868a9fce14a6f2a08dd5eea79612 | 21d542edacfd89f645a3ebb426fb16635c1f452e | refs/heads/master | 2020-09-26T08:34:21.857072 | 2019-11-30T20:28:53 | 2019-11-30T20:28:53 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,988 | py | import datetime
import wx
import os
import re
import sys
import math
import socket
import subprocess
timeoutSecs = None
DEFAULT_HOST = None
def GetDefaultHost():
global DEFAULT_HOST
DEFAULT_HOST = socket.gethostbyname(socket.gethostname())
if DEFAULT_HOST == '127.0.0.1':
reSplit = re.compile('[: \t]+')
try:
co = subprocess.Popen(['ifconfig'], stdout = subprocess.PIPE)
ifconfig = co.stdout.read()
for line in ifconfig.split('\n'):
line = line.strip()
try:
if line.startswith('inet addr:'):
fields = reSplit.split( line )
addr = fields[2]
if addr != '127.0.0.1':
DEFAULT_HOST = addr
break
except:
pass
except:
pass
return DEFAULT_HOST
GetDefaultHost()
'''
wx.ICON_EXCLAMATION Shows an exclamation mark icon.
wx.ICON_HAND Shows an error icon.
wx.ICON_ERROR Shows an error icon - the same as wxICON_HAND.
wx.ICON_QUESTION Shows a question mark icon.
wx.ICON_INFORMATION Shows an information (i) icon.
'''
def MessageOK( parent, message, title = '', iconMask = wx.ICON_INFORMATION, pos = wx.DefaultPosition ):
dlg = wx.MessageDialog(parent, message, title, wx.OK | iconMask, pos)
dlg.ShowModal()
dlg.Destroy()
return True
def MessageOKCancel( parent, message, title = '', iconMask = wx.ICON_QUESTION):
dlg = wx.MessageDialog(parent, message, title, wx.OK | wx.CANCEL | iconMask )
response = dlg.ShowModal()
dlg.Destroy()
return True if response == wx.ID_OK else False
def SetValue( st, value ):
if st.GetValue() != value:
st.SetValue( value )
return True
return False
def SetLabel( st, label ):
if st.GetLabel() != label:
st.SetLabel( label )
return True
return False
def formatTime( secs, highPrecision = False ):
if secs is None:
secs = 0
if secs < 0:
sign = '-'
secs = -secs
else:
sign = ''
f, ss = math.modf(secs)
secs = int(ss)
hours = int(secs // (60*60))
minutes = int( (secs // 60) % 60 )
secs = secs % 60
if highPrecision:
decimal = '.%02d' % int( f * 100 )
else:
decimal = ''
if hours > 0:
return "%s%d:%02d:%02d%s" % (sign, hours, minutes, secs, decimal)
else:
return "%s%02d:%02d%s" % (sign, minutes, secs, decimal)
def formatTimeGap( secs, highPrecision = False ):
if secs is None:
secs = 0
if secs < 0:
sign = '-'
secs = -secs
else:
sign = ''
f, ss = math.modf(secs)
secs = int(ss)
hours = int(secs // (60*60))
minutes = int( (secs // 60) % 60 )
secs = secs % 60
if highPrecision:
decimal = '.%02d' % int( f * 100 )
else:
decimal = ''
if hours > 0:
return "%s%dh%d'%02d%s\"" % (sign, hours, minutes, secs, decimal)
else:
return "%s%d'%02d%s\"" % (sign, minutes, secs, decimal)
def formatTimeCompressed( secs, highPrecision = False ):
f = formatTime( secs, highPrecision )
if f[0] == '0':
return f[1:]
return f
def formatDate( date ):
y, m, d = date.split('-')
d = datetime.date( int(y,10), int(m,10), int(d,10) )
return d.strftime( '%B %d, %Y' )
def StrToSeconds( str = '' ):
secs = 0.0
for f in str.split(':'):
try:
n = float(f)
except ValueError:
n = 0.0
secs = secs * 60.0 + n
return secs
def SecondsToStr( secs = 0 ):
secs = int(secs)
return '%02d:%02d:%02d' % (secs // (60*60), (secs // 60)%60, secs % 60)
def SecondsToMMSS( secs = 0 ):
secs = int(secs)
return '%02d:%02d' % ((secs // 60)%60, secs % 60)
def ordinal( value ):
"""
Converts zero or a *postive* integer (or their string
representations) to an ordinal value.
>>> for i in range(1,13):
... ordinal(i)
...
'1st'
'2nd'
'3rd'
'4th'
'5th'
'6th'
'7th'
'8th'
'9th'
'10th'
'11th'
'12th'
>>> for i in (100, '111', '112',1011):
... ordinal(i)
...
'100th'
'111th'
'112th'
'1011th'
"""
try:
value = int(value)
except ValueError:
return value
if (value % 100)//10 != 1:
return "%d%s" % (value, ['th','st','nd','rd','th','th','th','th','th','th'][value%10])
return "%d%s" % (value, "th")
def getHomeDir():
sp = wx.StandardPaths.Get()
homedir = sp.GetUserDataDir()
try:
if os.path.basename(homedir) == '.CrossMgr':
homedir = os.path.join( os.path.dirname(homedir), '.CrossMgrApp' )
except:
pass
if not os.path.exists(homedir):
os.makedirs( homedir )
return homedir
def getDocumentsDir():
sp = wx.StandardPaths.Get()
return sp.GetDocumentsDir()
#------------------------------------------------------------------------
try:
dirName = os.path.dirname(os.path.abspath(__file__))
except:
dirName = os.path.dirname(os.path.abspath(sys.argv[0]))
if os.path.basename(dirName) == 'library.zip':
dirName = os.path.dirname(dirName)
if 'CrossMgr?' in os.path.basename(dirName):
dirName = os.path.dirname(dirName)
if os.path.isdir( os.path.join(dirName, 'CrossMgrImages') ):
pass
elif os.path.isdir( '/usr/local/CrossMgrImages' ):
dirName = '/usr/local'
imageFolder = os.path.join(dirName, 'CrossMgrImages')
htmlFolder = os.path.join(dirName, 'CrossMgrHtml')
helpFolder = os.path.join(dirName, 'CrossMgrHtmlDoc')
def getDirName(): return dirName
def getImageFolder(): return imageFolder
def getHtmlFolder(): return htmlFolder
def getHelpFolder(): return helpFolder
#------------------------------------------------------------------------
def disable_stdout_buffering():
fileno = sys.stdout.fileno()
temp_fd = os.dup(fileno)
sys.stdout.close()
os.dup2(temp_fd, fileno)
os.close(temp_fd)
sys.stdout = os.fdopen(fileno, "w", 0)
def readDelimitedData( s, delim ):
buffer = s.recv( 4096 )
while 1:
nl = buffer.find( delim )
if nl >= 0:
yield buffer[:nl]
buffer = buffer[nl+len(delim):]
else:
more = s.recv( 4096 )
if more:
buffer = buffer + more
else:
break
yield buffer
#------------------------------------------------------------------------------------------------
reIP = re.compile( '^[0-9.]+$' )
def GetAllIps():
addrInfo = socket.getaddrinfo( socket.gethostname(), None )
ips = []
for a in addrInfo:
try:
ip = a[4][0]
except:
continue
if reIP.search(ip):
ips.append( ip )
return ips
| [
"edward.sitarski@gmail.com"
] | edward.sitarski@gmail.com |
c4dae4c9dd4656c58295539cf766d16b915310a7 | b122b723c2fbadef6f19e8c9ec4e485d48c03dec | /Python/Binary Tree Level Order Traversal II.py | 80a09e56530fae38885f1f95c1f7d46bc5644087 | [] | no_license | zhanglintc/leetcode | 5ba3977172679fde8cdcd3f4940057d55d8112eb | 8edbd2fbad8b10a497c7a10e8cd09cc91eeba079 | refs/heads/master | 2020-12-13T08:49:57.244106 | 2018-09-06T13:52:43 | 2018-09-06T13:52:43 | 18,562,111 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,393 | py | # Binary Tree Level Order Traversal II
# for leetcode problems
# 2014.09.04 by zhanglin
# Problem Link:
# https://leetcode.com/problems/binary-tree-level-order-traversal-ii/
# Problem:
# Given a binary tree, return the bottom-up level order traversal of its nodes' values. (ie, from left to right, level by level from leaf to root).
# For example:
# Given binary tree {3,9,20,#,#,15,7},
# 3
# / \
# 9 20
# / \
# 15 7
# return its bottom-up level order traversal as:
# [
# [15,7],
# [9,20],
# [3]
# ]
# Definition for a binary tree node
# class TreeNode:
# def __init__(self, x):
# self.val = x
# self.left = None
# self.right = None
class Solution:
# @param root, a tree node
# @return a list of lists of integers
def levelOrderBottom(self, root):
dikt = {}
self.levelOrderBottom_helper(root, 1, dikt)
lst = []
for i in dikt:
lst.append(dikt[i])
return lst[::-1] # the only different from "Binary Tree Level Order Traversal"
def levelOrderBottom_helper(self, root, dept, dikt):
if root == None:
return root
if dept not in dikt:
dikt[dept] = []
dikt[dept].append(root.val)
self.levelOrderBottom_helper(root.left, dept + 1, dikt)
self.levelOrderBottom_helper(root.right, dept + 1, dikt)
| [
"zhanglintc623@gmail.com"
] | zhanglintc623@gmail.com |
db56147bf913f8b9dbc17b88ca38061e95d481cc | 628ec414b7807fc50de67345361e41cc68ba3720 | /mayan/apps/sources/serializers.py | 5da2f6af0ab247cf37c2dbf2e97eaeccfabfbd1c | [
"Apache-2.0"
] | permissive | TestingCodeReview/Mayan-EDMS | aafe144424ffa8128a4ff7cee24d91bf1e1f2750 | d493ec34b2f93244e32e1a2a4e6cda4501d3cf4e | refs/heads/master | 2020-05-27T23:34:44.118503 | 2019-04-05T02:04:18 | 2019-04-05T02:04:18 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,915 | py | from __future__ import unicode_literals
import logging
from rest_framework import serializers
from rest_framework.reverse import reverse
from .models import StagingFolderSource, WebFormSource
logger = logging.getLogger(__name__)
class StagingFolderFileSerializer(serializers.Serializer):
filename = serializers.CharField(max_length=255)
image_url = serializers.SerializerMethodField()
url = serializers.SerializerMethodField()
def get_image_url(self, obj):
return reverse(
'stagingfolderfile-image-view',
args=(obj.staging_folder.pk, obj.encoded_filename,),
request=self.context.get('request')
)
def get_url(self, obj):
return reverse(
'stagingfolderfile-detail',
args=(obj.staging_folder.pk, obj.encoded_filename,),
request=self.context.get('request')
)
class StagingFolderSerializer(serializers.HyperlinkedModelSerializer):
files = serializers.SerializerMethodField()
class Meta:
fields = ('files',)
model = StagingFolderSource
def get_files(self, obj):
try:
return [
StagingFolderFileSerializer(entry, context=self.context).data for entry in obj.get_files()
]
except Exception as exception:
logger.error('unhandled exception: %s', exception)
return []
class WebFormSourceSerializer(serializers.Serializer):
class Meta:
model = WebFormSource
class NewDocumentSerializer(serializers.Serializer):
source = serializers.IntegerField()
document_type = serializers.IntegerField(required=False)
description = serializers.CharField(required=False)
expand = serializers.BooleanField(default=False)
file = serializers.FileField()
filename = serializers.CharField(required=False)
use_file_name = serializers.BooleanField(default=False)
| [
"roberto.rosario.gonzalez@gmail.com"
] | roberto.rosario.gonzalez@gmail.com |
1828f84475f59d71a7e93bde5e4b60ce50d63686 | 6bec763c8553ad9e85bef147014b2ddcc934dde0 | /access_control/models/permission_create.py | 8a436eaf9ab1837d186a20136e7c458b25939706 | [
"BSD-3-Clause"
] | permissive | girleffect/core-management-layer | a0257d73c562ef89d38762aa6a4de892c4fc995c | 22eda532984616cf92b07bfdd9a1fffaee6c813c | refs/heads/develop | 2021-07-18T09:40:44.172628 | 2019-01-31T13:04:27 | 2019-01-31T13:04:27 | 112,724,847 | 0 | 1 | BSD-3-Clause | 2019-01-31T11:09:29 | 2017-12-01T10:04:04 | Python | UTF-8 | Python | false | false | 4,234 | py | # coding: utf-8
"""
Access Control API
# The Access Control API ## Overview The Access Control API is an API exposed to other core components. It uses an API Key in an HTTP header to perform authentication and authorisation. Most of the API calls facilitates CRUD of the entities defined in the Access Control component. Others calls allows the retrieval of information in a form that is convenient for other components (most notably the Management Layer) to consume. # noqa: E501
OpenAPI spec version:
Generated by: https://openapi-generator.tech
"""
import pprint
import re # noqa: F401
import six
class PermissionCreate(object):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
openapi_types = {
'name': 'str',
'description': 'str'
}
attribute_map = {
'name': 'name',
'description': 'description'
}
def __init__(self, name=None, description=None): # noqa: E501
"""PermissionCreate - a model defined in OpenAPI""" # noqa: E501
self._name = None
self._description = None
self.discriminator = None
self.name = name
if description is not None:
self.description = description
@property
def name(self):
"""Gets the name of this PermissionCreate. # noqa: E501
:return: The name of this PermissionCreate. # noqa: E501
:rtype: str
"""
return self._name
@name.setter
def name(self, name):
"""Sets the name of this PermissionCreate.
:param name: The name of this PermissionCreate. # noqa: E501
:type: str
"""
if name is None:
raise ValueError("Invalid value for `name`, must not be `None`") # noqa: E501
if name is not None and len(name) > 50:
raise ValueError("Invalid value for `name`, length must be less than or equal to `50`") # noqa: E501
self._name = name
@property
def description(self):
"""Gets the description of this PermissionCreate. # noqa: E501
:return: The description of this PermissionCreate. # noqa: E501
:rtype: str
"""
return self._description
@description.setter
def description(self, description):
"""Sets the description of this PermissionCreate.
:param description: The description of this PermissionCreate. # noqa: E501
:type: str
"""
self._description = description
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, PermissionCreate):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| [
"cobus.carstens@gmail.com"
] | cobus.carstens@gmail.com |
b49f37735e7b3e05cbe5e2ddc69c4518b3e7cdba | 96a34a048c783a75736bf0ec775df22142f9ee53 | /packages/service-integration/tests/test_osparc_config.py | 77348df499a6c482c24c537408717bc0bf04e8c1 | [
"MIT"
] | permissive | ITISFoundation/osparc-simcore | 77e5b9f7eb549c907f6ba2abb14862154cc7bb66 | f4c57ffc7b494ac06a2692cb5539d3acfd3d1d63 | refs/heads/master | 2023-08-31T17:39:48.466163 | 2023-08-31T15:03:56 | 2023-08-31T15:03:56 | 118,596,920 | 39 | 29 | MIT | 2023-09-14T20:23:09 | 2018-01-23T10:48:05 | Python | UTF-8 | Python | false | false | 3,130 | py | # pylint: disable=redefined-outer-name
# pylint: disable=unused-argument
# pylint: disable=unused-variable
import json
from pathlib import Path
from pprint import pformat
from typing import Any
import pytest
import yaml
from models_library.service_settings_labels import SimcoreServiceSettingLabelEntry
from service_integration.osparc_config import MetaConfig, RuntimeConfig, SettingsItem
@pytest.fixture
def labels(tests_data_dir: Path, labels_fixture_name: str) -> dict[str, str]:
data = yaml.safe_load((tests_data_dir / "docker-compose-meta.yml").read_text())
service_name = {
"legacy": "dy-static-file-server",
"service-sidecared": "dy-static-file-server-dynamic-sidecar",
"compose-sidecared": "dy-static-file-server-dynamic-sidecar-compose-spec",
"rocket": "rocket",
}
labels_annotations = data["services"][service_name[labels_fixture_name]]["build"][
"labels"
]
# patch -> replaces some environs
if compose_spec := labels_annotations.get("simcore.service.compose-spec"):
if compose_spec == "${DOCKER_COMPOSE_SPECIFICATION}":
labels_annotations["simcore.service.compose-spec"] = json.dumps(
yaml.safe_load((tests_data_dir / "compose-spec.yml").read_text())
)
return labels_annotations
@pytest.mark.parametrize(
"labels_fixture_name",
["legacy", "service-sidecared", "compose-sidecared", "rocket"],
)
def test_load_from_labels(
labels: dict[str, str], labels_fixture_name: str, tmp_path: Path
):
meta_cfg = MetaConfig.from_labels_annotations(labels)
runtime_cfg = RuntimeConfig.from_labels_annotations(labels)
print(meta_cfg.json(exclude_unset=True, indent=2))
print(runtime_cfg.json(exclude_unset=True, indent=2))
# create yamls from config
for model in (runtime_cfg, meta_cfg):
config_path = (
tmp_path / f"{model.__class__.__name__.lower()}-{labels_fixture_name}.yml"
)
with open(config_path, "wt") as fh:
data = json.loads(
model.json(exclude_unset=True, by_alias=True, exclude_none=True)
)
yaml.safe_dump(data, fh, sort_keys=False)
# reload from yaml and compare
new_model = model.__class__.from_yaml(config_path)
assert new_model == model
@pytest.mark.parametrize(
"example_data", SimcoreServiceSettingLabelEntry.Config.schema_extra["examples"]
)
def test_settings_item_in_sync_with_service_settings_label(
example_data: dict[str, Any]
):
print(pformat(example_data))
# First we parse with SimcoreServiceSettingLabelEntry since it also supports backwards compatibility
# and will upgrade old version
example_model = SimcoreServiceSettingLabelEntry.parse_obj(example_data)
# SettingsItem is exclusively for NEW labels, so it should not support backwards compatibility
new_model = SettingsItem(
name=example_model.name,
type=example_model.setting_type,
value=example_model.value,
)
# check back
SimcoreServiceSettingLabelEntry.parse_obj(new_model.dict(by_alias=True))
| [
"noreply@github.com"
] | ITISFoundation.noreply@github.com |
68be7d203cae104cbd639acae6fa2fd0e9babfc9 | d602881821bf49fe9ac246b9cc58e46440314725 | /src/utils.py | e1081d7a31a5597ee1b0573aba279a050dc1183c | [
"MIT"
] | permissive | nilax97/HTML2LaTeX-Convertor | 46775ab8b870d7ab609a92fa071efa1e54db22d2 | a7bdc8f53773f920dd6291575c77ecffee910fdf | refs/heads/master | 2022-06-12T04:04:00.584387 | 2020-05-05T08:03:38 | 2020-05-05T08:03:38 | 247,183,707 | 4 | 0 | null | null | null | null | UTF-8 | Python | false | false | 851 | py | def node_from_tag(text):
clean = text.split("<")[1].split(">")[0]
if(clean[-1]=='/'):
clean = clean[:-2]
elif(clean[0]=='/'):
clean = clean[1:]
clean = clean.split()
tag = clean[0].upper()
attr = []
values = []
i = 0
while(True and len(clean)>1):
i = i + 1
if(i==len(clean)):
break
if "=" in clean[i]:
temp = clean[i].split("=")
attr.append(temp[0].strip())
if(temp[1]!=""):
values.append(temp[1].replace("\"","").replace("\'","").strip())
else:
values.append(clean[i+1].replace("\"","").replace("\'","").strip())
i = i+1
else:
attr.append(clean[i])
temp = clean[i+1].split("=")
if(temp[1]!=""):
values.append(temp[1].replace("\"","").replace("\'","").strip())
i = i+1
else:
values.append(clean[i+2].replace("\"","").replace("\'","").strip())
i = i+2
return tag,attr,values | [
"agarwal.nilaksh@gmail.com"
] | agarwal.nilaksh@gmail.com |
3dac9cd531fdff6070c6f84ff2603f3c5ed04258 | 01bb8cdc7b8a0baa6e345e5bdc1a663b2a44a3f7 | /Chapter_7_User_Input_And_While_Loop/Practice2/9.counting.py | 3262a0a4021f7ba91a7f0a92bad8ee7c7ce740fa | [] | no_license | rishabhchopra1096/Python_Crash_Course_Code | 1e8076b38f89565fad9d9b68de879a972b0c96c8 | c1e1b0c90371d5913c201f46a0c8ceaec19b5af0 | refs/heads/master | 2021-04-25T21:25:37.351986 | 2017-11-03T08:11:13 | 2017-11-03T08:11:13 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 404 | py | c_n = 0
while c_n < 10:
c_n += 1
if c_n % 2 == 0:
continue
else:
print(c_n)
# Starting from 0 , we add 1 and then
# We check weather the number is even or not.
# 1 is noteven , so it is printed.
# Loop starts again , 1 is added to 1 = 2
# If the number is even , we go back to the first line of the loop.
# The loop is entered as 2 < 10.
# We add 1 to 2 , which becomes 3.
| [
"noreply@github.com"
] | rishabhchopra1096.noreply@github.com |
76831e371c436f3e90d22d6a2e80b3045e8d2c8f | 16fe74651e6692ea3d8d0302b40ac42f3d58e0ca | /minimum_height_trees.py | 56664999a98a2eefaa414b7e83a4f5222312baa2 | [
"MIT"
] | permissive | Ahmed--Mohsen/leetcode | 7574f71b10dfb9582f62e856bbc2559d3b21b2a1 | ad8967a5d85ac54f53b3fcce04df1b4bdec5fd9e | refs/heads/master | 2021-01-18T14:34:06.987665 | 2015-12-23T21:17:27 | 2015-12-23T21:17:27 | 33,744,104 | 4 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,530 | py | # -*- coding: utf-8 -*-
"""
For a undirected graph with tree characteristics, we can choose any node as the root. The result graph is then a rooted tree. Among all possible rooted trees, those with minimum height are called minimum height trees (MHTs). Given such a graph, write a function to find all the MHTs and return a list of their root labels.
Format
The graph contains n nodes which are labeled from 0 to n - 1. You will be given the number n and a list of undirected edges (each edge is a pair of labels).
You can assume that no duplicate edges will appear in edges. Since all edges are undirected, [0, 1] is the same as [1, 0] and thus will not appear together in edges.
Example 1:
Given n = 4, edges = [[1, 0], [1, 2], [1, 3]]
0
|
1
/ \
2 3
return [1]
Example 2:
Given n = 6, edges = [[0, 3], [1, 3], [2, 3], [4, 3], [5, 4]]
0 1 2
\ | /
3
|
4
|
5
return [3, 4]
Hint:
How many MHTs can a graph have at most?
Note:
(1) According to the definition of tree on Wikipedia: “a tree is an undirected graph in which any two vertices are connected by exactly one path. In other words, any connected graph without simple cycles is a tree.”
(2) The height of a rooted tree is the number of edges on the longest downward path between the root and a leaf.
"""
class Solution(object):
"""
:type n: int
:type edges: List[List[int]]
:rtype: List[int]
"""
def findMinHeightTrees(self, n, edges):
# the idea is to move from the leave nodes and move
# in-ward till we end up with either one or two roots
# same idea as topological sort
# base case
if n == 1: return [0]
# keep track of the the undirected edges
adj = [set() for i in range(n)]
for i, j in edges:
adj[i].add(j)
adj[j].add(i)
# leaves are those nodes that have in-degree of length 1
leaves = [i for i in range(n) if len(adj[i]) == 1]
# do BFS topological sorting
while n > 2:
n -= len(leaves)
# next level to the current leaves
next_leaves = []
# visit all neighbors to each leave
for i in leaves:
# no need to visit all i neighbors, we are only insterested
# in the shortest path so any neighbor is valid
j = adj[i].pop()
adj[j].remove(i)
# new leave found
if len(adj[j]) == 1:
next_leaves.append(j)
# set next level to be visited
leaves = next_leaves
return leaves
s = Solution()
print s.findMinHeightTrees(4, [[1,0],[1,2],[1,3]]) | [
"ahmed7890@gmail.com"
] | ahmed7890@gmail.com |
f4905f97c634eff4c0a17d3953c22ba496f165dd | af177f43b9e879b849cae739073bb63d2fae96f5 | /Core/migrations/0013_alter_book_isbn_number.py | a6698229dc641a5774d3eddfd67cbac6e5601dcb | [] | no_license | conradylx/STX_Library | 9d5ac5399f7d9402c00908f13712d228cfd9b412 | 7edffe5d7c433bbe4e163d664706bba5f15918b8 | refs/heads/master | 2023-07-12T01:13:01.189052 | 2021-08-17T12:36:53 | 2021-08-17T12:36:53 | 390,417,299 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 407 | py | # Generated by Django 3.2.5 on 2021-08-01 15:49
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('Core', '0012_auto_20210731_1434'),
]
operations = [
migrations.AlterField(
model_name='book',
name='isbn_number',
field=models.CharField(max_length=40, verbose_name='ISBN'),
),
]
| [
"50596942+conradylx@users.noreply.github.com"
] | 50596942+conradylx@users.noreply.github.com |
972772172fb486be96c1e5a2785a3676c73ab5c0 | 9b20743ec6cd28d749a4323dcbadb1a0cffb281b | /11_Time_Series_Forecasting_with_Python/A_02/save_model.py | eec597448c0a8c70db1c341abf5a7e609ebe8f37 | [] | no_license | jggrimesdc-zz/MachineLearningExercises | 6e1c7e1f95399e69bba95cdfe17c4f8d8c90d178 | ee265f1c6029c91daff172b3e7c1a96177646bc5 | refs/heads/master | 2023-03-07T19:30:26.691659 | 2021-02-19T08:00:49 | 2021-02-19T08:00:49 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 519 | py | # save and load an ARIMA model that causes an error
from pandas import read_csv
from statsmodels.tsa.arima_model import ARIMA
from statsmodels.tsa.arima_model import ARIMAResults
# load data
series = read_csv('daily-total-female-births.csv', header=0, index_col=0, parse_dates=True, squeeze=True)
# prepare data
X = series.values
X = X.astype('float32')
# fit model
model = ARIMA(X, order=(1, 1, 1))
model_fit = model.fit()
# save model
model_fit.save('model.pkl')
# load model
loaded = ARIMAResults.load('model.pkl')
| [
"jgrimes@jgrimes.tech"
] | jgrimes@jgrimes.tech |
8a874264aad962b142b5d59bcc9e2b52791eec44 | 50f4d2bb1b1222bcb2eb0122c48a0dd254deddfc | /Algorithm Concept/Quicksort.py | 4e4c886e297de61f0a86b4de7d19a28a4d1c6f92 | [] | no_license | yejinee/Algorithm | 9ae1c40382e9dcd868a28d42fe1cc543b790c7f5 | 81d409c4d0ea76cf152a5f334e53a870bc0656a7 | refs/heads/master | 2023-04-13T01:29:44.579635 | 2023-04-05T06:23:26 | 2023-04-05T06:23:26 | 235,014,475 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,324 | py | """
Q) NO.2751 수 정렬하기2 - 퀵정렬로 해결
[Heap Sort]
-Time complexity
(1) Worst case= O(n^2)
(2) Average case = O(nlogn)
(1) pivot을 정하고 고정시키는 방식으로 정렬
(2) pivot을 기준으로 left, right부분으로 나눠서 이 과정을 반복
- Partition process
: list에서 제일 첫번째 원소를 pivot으로 정하고 pivot을 고정시키는 과정
<순서>
1. i은 첫번째 원소, j는 마지막 원소 부터 시작
2.
(1) i의 경우 : 하나씩 index을 늘려가면서 pivot보다 큰 값 나오면 stop
(2) j의 경우 : 하나씩 index를 줄여가면서 pivot보다 작은 값나오면 stop
3.
IF i<j : i가 가리키는 값과 j가 가리키는 값을 서로 바꿈
ELSE: pivot값과 j가 가리키는 값을 서로 바꾸고 pivot을 고정시킨다.
"""
def partition(Arr,l,h):
pivot=Arr[l]
i=l
j=h
count=0
while i<j:
while Arr[i]<=pivot and i<h:
i+=1
while Arr[j]>=pivot and j>l:
j-=1
if i<j:
Arr[i],Arr[j]=Arr[j],Arr[i]
Arr[l], Arr[j]=Arr[j],Arr[l]
return Arr,j
def quicksort(Arr,l, h):
if l<h:
Arr,fix=partition(Arr,l,h)
quicksort(Arr,l,fix-1)
quicksort(Arr,fix+1,h)
A=[15,22,13,27,12,10,20,25]
quicksort(A,0,len(A)-1)
print(A)
| [
"kimyj9609@gmail.com"
] | kimyj9609@gmail.com |
241bfa7d4854c270c2e7607c981567bfa15c8063 | 9743d5fd24822f79c156ad112229e25adb9ed6f6 | /xai/brain/wordbase/otherforms/_lactated.py | e6bcd5dc984c751dcdeb351346c6cd15ead3cc72 | [
"MIT"
] | permissive | cash2one/xai | de7adad1758f50dd6786bf0111e71a903f039b64 | e76f12c9f4dcf3ac1c7c08b0cc8844c0b0a104b6 | refs/heads/master | 2021-01-19T12:33:54.964379 | 2017-01-28T02:00:50 | 2017-01-28T02:00:50 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 226 | py |
#calss header
class _LACTATED():
def __init__(self,):
self.name = "LACTATED"
self.definitions = lactate
self.parents = []
self.childen = []
self.properties = []
self.jsondata = {}
self.basic = ['lactate']
| [
"xingwang1991@gmail.com"
] | xingwang1991@gmail.com |
889aa45605c9e36c5387d79620b32ed507e15049 | 1ab243320bc1f1ee9dde3b0a1e3f1a418e6d5299 | /apps/save/views.py | 00b3d01a4bc5ea6a4679803c23838e86289c276f | [] | no_license | sjweil9/Djangos_Dungeon | a6bdd9fd4b8012ebdfb14caf2de41635a53ab089 | 2d2f1ceceddd6200db3de591c8f93926d973a704 | refs/heads/master | 2021-09-05T07:05:17.403789 | 2018-01-25T02:18:07 | 2018-01-25T02:18:07 | 108,317,359 | 0 | 1 | null | 2018-01-25T02:17:13 | 2017-10-25T19:39:19 | JavaScript | UTF-8 | Python | false | false | 4,624 | py | from django.shortcuts import render, redirect, HttpResponse
from .models import *
from django.contrib import messages
import json, ast
def game(req):
return render(req, 'save/game.html')
def signin(req):
if 'id' in req.session:
return redirect('/dashboard')
return render(req, 'save/signin.html')
def save(req):
if req.method == "POST":
if 'id' not in req.session:
return redirect('/')
if 'char' not in req.session:
return redirect('/dashboard')
Character.objects.save_char(req.session['char'], req.POST)
return HttpResponse()
def load(req):
if 'id' not in req.session:
return redirect('/')
if 'char' not in req.session:
return redirect('/dashboard')
loaded = Character.objects.get(id=req.session['char'])
if not loaded.character:
data = {'name': req.session['charname']}
data = json.dumps(data)
return HttpResponse(data, content_type='application/json')
character = loaded.character
return HttpResponse(character, content_type='application/json')
def register(req):
if req.method == "POST":
errors = User.objects.validate_user(req.POST)
if len(errors):
for tag, error in errors.iteritems():
messages.error(req, error, extra_tags=tag)
else:
user = User.objects.create_user(req.POST)
if user:
req.session['id'] = user.id
return redirect('/intro')
else:
messages.error(req, "That username is already taken!", extra_tags="usernamelen")
req.session['status'] = "register"
return redirect('/')
def login(req):
if req.method == "POST":
res = User.objects.login(req.POST)
if res['status']:
req.session['id'] = res['user'].id
return redirect('/dashboard')
else:
messages.error(req, res['error'], extra_tags="login")
req.session['status'] = "login"
return redirect('/')
def logout(req):
req.session.clear()
return redirect('/')
def intro(req):
if 'id' not in req.session:
return redirect('/')
return render(req, 'save/intro.html')
def dashboard(req):
if 'id' not in req.session:
return redirect('/')
context = {
'user': User.objects.get(id=req.session['id']),
'characters': [],
'totalchars': []
}
characters = list(Character.objects.filter(user=context['user']))
for character in characters:
if character.character:
loaded = json.loads(character.character)
loaded['id'] = character.id
context['characters'].append(loaded)
totalchars = list(Character.objects.all())
for character in totalchars:
if character.character:
loaded = json.loads(character.character)
loaded['id'] = character.id
context['totalchars'].append(loaded)
context['totalchars'] = sorted(context['totalchars'], key=lambda k: k['xp'], reverse=True)
return render(req, 'save/dashboard.html', context)
def newchar(req):
if 'id' not in req.session:
return redirect('/')
if req.method == "POST":
new = Character.objects.new_char(req.session['id'])
if new:
req.session['char'] = new.id
req.session['charname'] = req.POST['charname']
return redirect('/game')
else:
messages.error(req, "You may only have 3 characters. Please delete one to make another.", extra_tags="char")
return redirect('/dashboard')
def start(req, charid):
if 'id' not in req.session:
return redirect('/')
if Character.objects.filter(id=charid).exists():
character = Character.objects.get(id=charid)
if character.user.id == req.session['id']:
req.session['char'] = character.id
return redirect('/game')
return redirect('/dashboard')
def character(req, charid):
if 'id' not in req.session:
return redirect('/')
if Character.objects.filter(id=charid).exists():
loaded = Character.objects.get(id=charid)
chardata = json.loads(loaded.character)
return render(req, 'save/char.html', {'character': chardata})
return redirect('/dashboard')
def delete(req, charid):
if 'id' not in req.session:
return redirect('/')
if Character.objects.filter(id=charid).exists():
loaded = Character.objects.get(id=charid)
if loaded.user.id == req.session['id']:
Character.objects.delete_char(charid)
return redirect('/dashboard') | [
"stephen.weil@gmail.com"
] | stephen.weil@gmail.com |
13be5404a48b9269fddcac2f7bcee5c857ff102f | fd0d8b010d45f959f0660afb192c7349e266a329 | /competitive/AtCoder/ABC216/B.py | 079182f20988da04944c1de663e36a16a784ef31 | [
"MIT"
] | permissive | pn11/benkyokai | b650f5957545fdefbea7773aaae3f61f210f69ce | 9ebdc46b529e76b7196add26dbc1e62ad48e72b0 | refs/heads/master | 2023-01-28T01:38:29.566561 | 2021-10-03T04:20:14 | 2021-10-03T04:20:14 | 127,143,471 | 0 | 0 | MIT | 2023-01-07T07:19:05 | 2018-03-28T13:20:51 | Jupyter Notebook | UTF-8 | Python | false | false | 1,168 | py | import bisect
from collections import deque
from copy import deepcopy
from fractions import Fraction
from functools import reduce
import heapq as hq
import io
from itertools import combinations, permutations
import math
from math import factorial
import re
import sys
sys.setrecursionlimit(10000)
#from numba import njit
import numpy as np
_INPUT_1 = """\
3
tanaka taro
sato hanako
tanaka taro
"""
_INPUT_2 = """\
3
saito ichiro
saito jiro
saito saburo
"""
_INPUT_3 = """\
4
sypdgidop bkseq
sypdgidopb kseq
ozjekw mcybmtt
qfeysvw dbo
"""
def solve():
N = int(input())
D = {}
for _ in range(N):
s, t = [x for x in input().split()]
S = D.get(s, set())
S.add(t)
D[s] = S
num = 0
for k, v in D.items():
num += len(v)
if num != N:
print('Yes')
else:
print('No')
if __file__ != './Main.py':
if '_INPUT_1' in globals():
sys.stdin = io.StringIO(_INPUT_1)
solve()
if '_INPUT_2' in globals():
sys.stdin = io.StringIO(_INPUT_2)
solve()
if '_INPUT_3' in globals():
sys.stdin = io.StringIO(_INPUT_3)
solve()
else:
solve()
| [
"pn11@users.noreply.github.com"
] | pn11@users.noreply.github.com |
84dab7469a92a1687a7871963fbe15489cf73d99 | cefab48dff8fc40786f0a45f3df272646365e9f5 | /python/magnetics/probe_g2.py | d0e6c52d313b1d86545c376d95c08c12931dea54 | [] | no_license | shaunhaskey/pyMARS | d40265bd2d445f0429ae7177f2e75d83f0ba8b30 | e2424088492a8ab2f34acf62db42a77e44d5bc3b | refs/heads/master | 2020-12-25T17:24:28.392539 | 2016-08-01T22:14:27 | 2016-08-01T22:14:27 | 17,684,575 | 0 | 0 | null | 2014-03-13T03:41:59 | 2014-03-12T21:21:08 | Python | UTF-8 | Python | false | false | 4,291 | py | '''
Appears to be older than probe_g.py - this one doesn't contain my Biot-Savart calcuations
Just does probe_g and MARS comparison.
SH:14Sept2012
'''
import numpy as num
import os, time
import PythonMARS_funcs as pyMARS
import results_class as res
import RZfuncs
N = 6; n = 2; I = num.array([1.,-1.,0.,1,-1.,0.])
#print 'phi_location %.2f deg'%(phi_location)
template_dir = '/u/haskeysr/mars/templates/PROBE_G_TEMPLATE/'
base_run_dir = '/u/haskeysr/PROBE_G_RUNS/'
project_name = 'phi_scan/'
run_dir = base_run_dir + project_name
print run_dir
os.system('mkdir '+run_dir)
os.system('cp -r ' + template_dir +'* ' + run_dir)
print 'go to new directory'
os.chdir(run_dir + 'PROBE_G')
probe_g_template = file('probe_g.in', 'r')
probe_g_template_txt = probe_g_template.read()
probe_g_template.close()
diiid = file('diiid.in', 'r')
diiid_txt = diiid.read()
diiid.close()
#a, b = coil_responses6(1,1,1,1,1,1,Navg=120,default=1)
probe = [ '67A', '66M', '67B', 'ESL', 'ISL','UISL','LISL','Inner_pol','Inner_rad']
# probe type 1: poloidal field, 2: radial field
probe_type = num.array([ 1, 1, 1, 0, 0, 0, 0, 1,0])
# Poloidal geometry
Rprobe = num.array([ 2.265, 2.413, 2.265, 2.477, 2.431, 2.300, 2.300,1.,1.])
Zprobe = num.array([ 0.755, 0.0,-0.755, 0.000, 0.000, 0.714,-0.714,0.,0.])
tprobe = num.array([ -67.5, -90.0,-112.5, 0.000, 0.000, 22.6, -22.6,-90.,0.])*2*num.pi/360 #DTOR # poloidal inclination
lprobe = num.array([ 0.155, 0.140, 0.155, 1.194, 0.800, 0.680, 0.680, 0.05,0.05]) # Length of probe
probe_name = 'UISL'
k = probe.index(probe_name)
#Generate interpolation points
Rprobek, Zprobek = pyMARS.pickup_interp_points(Rprobe[k], Zprobe[k], lprobe[k], tprobe[k], probe_type[k], 800)
#Generate the points string and modify the .in file
r_flattened = Rprobek.flatten()
z_flattened = Zprobek.flatten()
phi_flattened = num.linspace(0,360,num=800)
r_flattened = phi_flattened * 0 + num.average(Rprobek.flatten())
z_flattened = phi_flattened * 0 + num.average(Zprobek.flatten())
print 'r',r_flattened
print 'z', z_flattened
print 'phi', phi_flattened
points_string = ''
print len(r_flattened)
for i in range(0,len(r_flattened)):
points_string+='%.3f %.3f %.3f\n'%(r_flattened[i], phi_flattened[i], z_flattened[i])
changes = {'<<npts>>' : str(len(r_flattened)),
'<<points>>' : points_string}
for tmp_key in changes.keys():
probe_g_template_txt = probe_g_template_txt.replace(tmp_key, changes[tmp_key])
probe_g_template = file('probe_g.in', 'w')
probe_g_template.write(probe_g_template_txt)
probe_g_template.close()
diiid_changes = {'<<upper>>': '1000 -1000 0 1000 -1000 0',
'<<lower>>': '1000 -1000 0 1000 -1000 0'}
for tmp_key in diiid_changes:
diiid_txt = diiid_txt.replace(tmp_key, diiid_changes[tmp_key])
diiid = file('diiid.in', 'w')
diiid.write(diiid_txt)
diiid.close()
#run probe_g
os.system('./probe_g')
#Read the output file
results = num.loadtxt('probe_gb.out', skiprows=8)
B_R = results[:,4]
B_phi =results[:,3]
B_Z= results[:,5]
phi_out = results[:,0]
R_out = results[:,1]
Z_out = results[:,2]
'''
print 'get the answer from MARS'
I0EXP = RZfuncs.I0EXP_calc(N,n,I)
base_dir = '/u/haskeysr/mars/grid_check10/qmult1.000/exp1.000/marsrun/'
Nchi=513
#plas_run = res.data(base_dir + 'RUNrfa.p', I0EXP = I0EXP, Nchi=Nchi)
vac_run = res.data(base_dir + 'RUNrfa.vac', I0EXP = I0EXP, Nchi=Nchi)
grid_r = vac_run.R*vac_run.R0EXP
grid_z = vac_run.Z*vac_run.R0EXP
'''
import matplotlib.pyplot as pt
fig = pt.figure()
ax = fig.add_subplot(111)
ax.plot(phi_flattened, B_R*10000., 'r-')
ax.plot(phi_flattened, B_Z*10000., 'k-')
fig.canvas.draw()
fig.show()
'''
Brprobek, Bzprobek = pyMARS.pickup_field_interpolation(grid_r, grid_z, vac_run.Br, vac_run.Bz, vac_run.Bphi, num.array(Rprobek), num.array(Zprobek))
import matplotlib.pyplot as pt
fig = pt.figure()
ax = fig.add_subplot(211)
ax2 = fig.add_subplot(212)
ax.plot(Rprobek, num.abs(Brprobek), 'b-')
ax2.plot(Rprobek, num.abs(B_R*10000), 'b--')
ax.plot(Zprobek, num.abs(Bzprobek), 'k-')
ax2.plot(Zprobek, num.abs(B_Z*10000), 'k--')
fig.canvas.draw()
fig.show()
fig = pt.figure()
ax = fig.add_subplot(211)
ax2 = fig.add_subplot(212)
ax.plot(Rprobek, R_out, 'b-')
ax2.plot(Zprobek, Z_out, 'b--')
fig.canvas.draw()
fig.show()
'''
| [
"shaunhaskey@gmail.com"
] | shaunhaskey@gmail.com |
d9f4f1cea64200274bfa01806a671624371f6713 | 2b167e29ba07e9f577c20c54cb943861d0ccfa69 | /simulationsOFC/pareto2/arch5_pod100_old/copyfile.py | bf351a7f1acedff879227fffabadbccab5ef7f28 | [] | no_license | LiYan1988/kthOld_OFC | 17aeeed21e195d1a9a3262ec2e67d6b1d3f9ff0f | b1237577ea68ad735a65981bf29584ebd889132b | refs/heads/master | 2021-01-11T17:27:25.574431 | 2017-01-23T05:32:35 | 2017-01-23T05:32:35 | 79,773,237 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,672 | py | # -*- coding: utf-8 -*-
"""
Created on Sat May 28 23:27:05 2016
@author: li
"""
import os
#dirname = os.path.dirname(os.path.realpath(__file__))
dirname=''
for i in range(20):
src = os.path.join(dirname, 'template_runsimu_connections.py')
dst = 'pareto'+str(i)+'.py'
dst = os.path.join(dirname, dst)
newline = "i = "+str(i)+" \n"
destination = open( dst, "w" )
source = open( src, "r" )
for l, line in enumerate(source):
if l!=22:
destination.write(line)
else:
destination.write(newline)
source.close()
destination.close()
# bash files
for i in range(20):
src = os.path.join(dirname, 'template_runsimu_connections.sh')
dst = 'pareto'+str(i)+'.sh'
dst = os.path.join(dirname, dst)
newline3 = "#SBATCH -J arch5_old_"+str(i)+"\n"
newline6 = "#SBATCH -o arch5_old_"+str(i)+".stdout\n"
newline7 = "#SBATCH -e arch5_old_"+str(i)+".stderr\n"
newline17 = "pdcp pareto"+str(i)+".py $TMPDIR\n"
newline21 = "python pareto"+str(i)+".py\n"
destination = open( dst, "w" )
source = open( src, "r" )
for l, line in enumerate(source):
if l==3:
destination.write(newline3)
elif l==6:
destination.write(newline6)
elif l==7:
destination.write(newline7)
elif l==17:
destination.write(newline17)
elif l==21:
destination.write(newline21)
else:
destination.write(line)
source.close()
destination.close()
f = open('run_sbatch.txt', 'w')
for i in range(20):
line = 'sbatch pareto'+str(i)+'.sh\n'
f.write(line)
f.close() | [
"li.yan.ly414@gmail.com"
] | li.yan.ly414@gmail.com |
b7af641910c25e095ec765b876ffc1ff2b93a6f5 | 028d788c0fa48a8cb0cc6990a471e8cd46f6ec50 | /Python-Advanced/Multidimensional-Lists/Lab/01_sum_matrix_elements.py | 8b2f7f607ffc3ecd86832a616a452d0d289331c1 | [] | no_license | Sheko1/SoftUni | d6b8e79ae545116f4c0e5705ad842f12d77a9c9d | a9fbeec13a30231b6a97c2b22bb35257ac1481c0 | refs/heads/main | 2023-07-13T15:39:48.826925 | 2021-08-21T12:51:02 | 2021-08-21T12:51:02 | 317,266,200 | 2 | 3 | null | null | null | null | UTF-8 | Python | false | false | 389 | py | def get_matrix():
row, column = [int(n) for n in input().split(", ")]
result = []
for r_1 in range(row):
el = [int(el) for el in input().split(", ")]
result.append(el)
return result
matrix = get_matrix()
matrix_sum = 0
for r in range(len(matrix)):
for c in range(len(matrix[r])):
matrix_sum += matrix[r][c]
print(matrix_sum)
print(matrix)
| [
"martinkypar@gmail.com"
] | martinkypar@gmail.com |
b4e8ec957d1b648d015a016c4e06df18db2ebfb7 | 504d6796ed53540b57532f3c85a148bf6ddce2fc | /button.py | f7dc0e9f31bc5f463498cb67e13c6936fb840d96 | [] | no_license | YGragon/AlienInvasion | 1633d8319ee40400f50f236a904295eeae725886 | 81cf5a7988333e7a26a2934af66d571a26ade3c1 | refs/heads/master | 2021-08-23T15:56:11.077293 | 2017-12-05T14:43:25 | 2017-12-05T14:43:25 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,189 | py | import pygame.font
class Button():
"""docstring for Button"""
def __init__(self, ai_settings, screen, msg):
"""初始化按钮的属性"""
self.screen = screen
self.screen_rect = screen.get_rect()
# 设置按钮的尺寸和其他属性
self.width, self.height = 200, 50
self.button_color = (0, 255, 0)
self.text_color = (255, 255, 255)
self.font = pygame.font.SysFont(None, 48)
# 创建按钮的rect, 并使其居中
self.rect = pygame.Rect(0, 0, self.width, self. height)
self.rect.center = self.screen_rect.center
# 按钮的标签只需创建一次
self.prep_msg(msg)
def prep_msg(self, msg):
"""将msg渲染为图像,并失去在按钮上居中"""
self.msg_image = self.font.render(msg, True, self.text_color, self.button_color)
self.msg_image_rect = self.msg_image.get_rect()
self.msg_image_rect.center = self.rect.center
def draw_botton(self):
# 绘制一个用颜色填充的按钮再绘制文本
self.screen.fill(self.button_color, self.rect)
self.screen.blit(self.msg_image, self.msg_image_rect)
| [
"1105894953@qq.com"
] | 1105894953@qq.com |
6d09ae55a40604788f8a470b1e4e72fbee35e4cb | e23a4f57ce5474d468258e5e63b9e23fb6011188 | /070_oop/001_classes/examples/Learning Python/030_010_Bound Methods and Other Callable Objects.py | 42e5cbd1bb55eb8bcbbd19028cb76cacc9337cb0 | [] | no_license | syurskyi/Python_Topics | 52851ecce000cb751a3b986408efe32f0b4c0835 | be331826b490b73f0a176e6abed86ef68ff2dd2b | refs/heads/master | 2023-06-08T19:29:16.214395 | 2023-05-29T17:09:11 | 2023-05-29T17:09:11 | 220,583,118 | 3 | 2 | null | 2023-02-16T03:08:10 | 2019-11-09T02:58:47 | Python | UTF-8 | Python | false | false | 2,994 | py | class Number:
def __init__(self, base):
self.base = base
def double(self):
return self.base * 2
def triple(self):
return self.base * 3
x = Number(2) # Class instance objects
y = Number(3) # State + methods
z = Number(4)
print('#' * 23 + ' Normal immediate calls')
print(x.double()) # Normal immediate calls
print('#' * 23 + ' List of bound methods')
acts = [x.double, y.double, y.triple, z.double] # List of bound methods
for act in acts: # Calls are deferred
print(act()) # Call as though functions
#
bound = x.double
print(bound.__self__, bound.__func__)
# (<__main__.Number object at 0x0278F610>, <function double at 0x027A4ED0>)
print(bound.__self__.base)
#
print('#' * 23 + ' Calls bound.__func__(bound.__self__, ...)')
print(bound()) # Calls bound.__func__(bound.__self__, ...)
#
def square(arg):
return arg ** 2 # Simple functions (def or lambda)
#
class Sum:
def __init__(self, val): # Callable instances
self.val = val
def __call__(self, arg):
return self.val + arg
#
class Product:
def __init__(self, val): # Bound methods
self.val = val
def method(self, arg):
return self.val * arg
#
sobject = Sum(2)
pobject = Product(3)
actions = [square, sobject, pobject.method] # Function, instance, method
#
print('#' * 23 + ' Function, instance, method. All 3 called same way. Call any 1-arg callable')
for act in actions: # All 3 called same way
print(act(5)) # Call any 1-arg callable
#
#
print('#' * 23 + ' Index, comprehensions, maps')
actions[-1](5) # Index, comprehensions, maps
#
[act(5) for act in actions]
list(map(lambda act: act(5), actions))
class Negate:
def __init__(self, val): # Classes are callables too
self.val = -val # But called for object, not work
def __repr__(self): # Instance print format
return str(self.val)
print('#' * 23 + ' Call a class too')
actions = [square, sobject, pobject.method, Negate] # Call a class too
for act in actions:
print(act(5))
#
print('#' * 23 + ' Runs __repr__ not __str__!')
print([act(5) for act in actions]) # Runs __repr__ not __str__!
#
#
table = {act(5): act for act in actions} # 2.6/3.0 dict comprehension
print('#' * 23 + ' 2.6/3.0 str.format')
for (key, value) in table.items():
print('{0:2} => {1}'.format(key, value)) # 2.6/3.0 str.format
# -5 => <class '__main__.Negate'>
# 25 => <function square at 0x025D4978>
# 15 => <bound method Product.method of <__main__.Product object at 0x025D0F90>>
# 7 => <__main__.Sum object at 0x025D0F70>
| [
"sergejyurskyj@yahoo.com"
] | sergejyurskyj@yahoo.com |
4994d7a5ad3a9ce58cc29a553e7a2db8735d7e33 | fe31602a910e70fa77d89fcd4c705cc677b0a898 | /pipeline/type/tmodel.py | eb11ade00b352a0abf20821c7d2f3f85e2555a78 | [] | no_license | WUT-IDEA/Y2019_CZH_GraduationDesignCode | e0748b4412bc6c8d160584dff7faf3d6f3395d90 | 83b807060c68b3edef574532b32e8ae7a759d63f | refs/heads/master | 2020-06-05T04:25:15.351883 | 2019-06-17T09:05:28 | 2019-06-17T09:05:28 | 192,312,562 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,022 | py | # -*- coding: utf-8 -*-
from __future__ import print_function
import os
os.environ["CUDA_VISIBLE_DEVICES"] = "3"
from keras.backend.tensorflow_backend import set_session
import tensorflow as tf
import keras
config = tf.ConfigProto()
config.gpu_options.per_process_gpu_memory_fraction = 0.4
set_session(tf.Session(config=config))
from keras.layers import Input, Dense, Embedding, LSTM, Bidirectional
from keras.models import Model
import numpy as np
import datetime
class LossHistory(keras.callbacks.Callback):
def __init__(self, logpath, modelpath):
super().__init__()
self.logpath = logpath
self.modelpath = modelpath
if not os.path.exists(modelpath):
os.mkdir(modelpath)
def set_model(self, model):
self.model = model
self.writer = open(self.logpath, "w")
def on_epoch_end(self, epoch, logs=None):
#if epoch == 0 or (epoch + 1) % 5 == 0:
self.model.save("{}/model_{}.h5".format(self.modelpath, epoch+1))
# self.writer.write("epoch {}, loss:{}, valid_loss:{}\n".format(epoch+1, logs['loss'], logs['val_loss']))
self.writer.write("epoch {}, loss:{}\n".format(epoch + 1, logs['loss']))
# read file by line
def read_byline(filepath):
q = []
t = []
y = []
with open(filepath, 'r') as reader:
for line in reader:
parts = line.strip().split("\t")
q.append(parts[0].split(" "))
t.append(parts[1].split(" "))
y.append(parts[2].strip())
q = np.asarray(q, dtype='int32')
y = np.asarray(y, dtype="int32")
t = np.asarray(t, dtype='int32')
return q, t, y
# Question
embedding_matrix_q = np.loadtxt("../../data/glove_test.txt", dtype=np.float32)
print(embedding_matrix_q.shape)
EMBEDDING_DIM_Q=300
MAX_SEQUENCE_LENGTH_Q=24
# define model 58968
sequence_input_q=Input(shape=(MAX_SEQUENCE_LENGTH_Q,), dtype='int32')
embedding_layer_q=Embedding(input_dim=62957,
output_dim=EMBEDDING_DIM_Q,
weights=[embedding_matrix_q],
input_length=MAX_SEQUENCE_LENGTH_Q,
trainable=False)
embedded_sequences_q=embedding_layer_q(sequence_input_q)
q_bilstm=Bidirectional(LSTM(100))(embedded_sequences_q)
# subject type
embedding_matrix_t=np.loadtxt("data/glove_type.txt")
print(embedding_matrix_t.shape)
EMBEDDING_DIM_T=300
MAX_SEQUENCE_LENGTH_T=6
# define model
sequence_input_t=Input(shape=(MAX_SEQUENCE_LENGTH_T,), dtype='int32')
embedding_layer_t=Embedding(input_dim=1053,
output_dim=EMBEDDING_DIM_T,
weights=[embedding_matrix_t],
input_length=MAX_SEQUENCE_LENGTH_T,
mask_zero=True,
trainable=False)
embedded_sequences_t=embedding_layer_t(sequence_input_t)
t_lstm=Bidirectional(LSTM(100))(embedded_sequences_t)
from keras.layers import concatenate
concatenatecon_layer=concatenate([q_bilstm, t_lstm],axis=-1)
dense1=Dense(100,activation="sigmoid")(concatenatecon_layer)
output=Dense(1,activation="sigmoid")(dense1)
# output=Dense(1,activation="sigmoid")(concatenatecon_layer)
model=Model(inputs=[sequence_input_q,sequence_input_t],outputs=output)
model.compile(optimizer="adam",
loss="binary_crossentropy",
metrics=["accuracy"])
print(model.summary())
input_q, input_t, y = read_byline("training_data/train_data.txt")
BATCH_SIZE=100
EPOCHS=60
history = LossHistory("log_t_binary.txt", "t_binary_model")
start_time = datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S")
model.fit(x=[input_q,input_t],
y=y,
batch_size=BATCH_SIZE,
callbacks=[history],
epochs=EPOCHS)
model.save("e_model.h5")
endTime = datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S")
print("startTime:"+start_time)
print("endTime:"+endTime) | [
"zhengyunpei@zhengyunpeideMacBook-Pro.local"
] | zhengyunpei@zhengyunpeideMacBook-Pro.local |
e57efe4423fa8d3f59604669b12ba4f71a8595b6 | 6d154b8fdea96187fe12c6c4324ec4f8980dcdfe | /Shortner/migrations/0004_alter_url_key.py | bc907321bb8551cfa5e5e3a7cf0af62000b338ed | [] | no_license | mohammad-osoolian/UrlShortner | ba245a5aa1595e43044cfb93badbae76d293f616 | 8ffbe0b1a951997a420381f64492bc38deb98c05 | refs/heads/master | 2023-07-18T11:02:55.092219 | 2021-08-29T11:12:55 | 2021-08-29T11:12:55 | 400,747,551 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 386 | py | # Generated by Django 3.2.6 on 2021-08-28 11:52
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('Shortner', '0003_url_created'),
]
operations = [
migrations.AlterField(
model_name='url',
name='key',
field=models.CharField(max_length=255, null=True),
),
]
| [
"="
] | = |
ccd817fac9a6c0e11148d92497b0f5a6c2934cb8 | cdbea65d6127779075759bf30ba2cd97d2feb3bc | /petstagram/accounts/migrations/0001_initial.py | a1d63cc03012845babc42670df6fb87da874de1c | [
"MIT"
] | permissive | DimAntDim/SoftUni_Petstagram_Workshop | 9285bbded707d0ef5d467314ebcba1a7df69b370 | b4d6da5fa0d19de4b434046d0b7c73a40c8343b5 | refs/heads/main | 2023-08-21T10:21:37.328351 | 2021-11-01T10:17:16 | 2021-11-01T10:17:16 | 375,528,481 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,530 | py | # Generated by Django 3.2.3 on 2021-07-19 15:25
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
('auth', '0012_alter_user_first_name_max_length'),
]
operations = [
migrations.CreateModel(
name='PetstagramUser',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('password', models.CharField(max_length=128, verbose_name='password')),
('last_login', models.DateTimeField(blank=True, null=True, verbose_name='last login')),
('is_superuser', models.BooleanField(default=False, help_text='Designates that this user has all permissions without explicitly assigning them.', verbose_name='superuser status')),
('email', models.EmailField(max_length=254, unique=True)),
('groups', models.ManyToManyField(blank=True, help_text='The groups this user belongs to. A user will get all permissions granted to each of their groups.', related_name='user_set', related_query_name='user', to='auth.Group', verbose_name='groups')),
('user_permissions', models.ManyToManyField(blank=True, help_text='Specific permissions for this user.', related_name='user_set', related_query_name='user', to='auth.Permission', verbose_name='user permissions')),
],
options={
'abstract': False,
},
),
]
| [
"66394357+DimAntDim@users.noreply.github.com"
] | 66394357+DimAntDim@users.noreply.github.com |
1cc53a307474feed659f6c0dba91367c367f464a | 1a1d61424d83663318b8f1ba30712538680a135a | /apps/payinfo/migrations/0003_auto_20181130_2120.py | c7c3df32d3bce51fd10c0d3036e65cc70133f6d8 | [] | no_license | htzs12/django_online | 411ba5c4a20544a07ce6a644306b1c127e6311be | 5c9c2a1a742d3dd97a430651f2bd14012f6eb3a2 | refs/heads/master | 2022-12-24T15:11:39.747641 | 2018-12-01T13:46:39 | 2018-12-01T13:46:39 | 154,823,539 | 0 | 0 | null | 2022-12-02T15:19:33 | 2018-10-26T11:23:21 | Python | UTF-8 | Python | false | false | 412 | py | # Generated by Django 2.0.5 on 2018-11-30 13:20
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('payinfo', '0002_auto_20181130_2118'),
]
operations = [
migrations.AlterField(
model_name='payinfo',
name='path',
field=models.FilePathField(path='/media', verbose_name='路径'),
),
]
| [
"www.htzs@qq.com"
] | www.htzs@qq.com |
0493b6b20bc6c75d3b668bdb0e66d23160bc5ba8 | 80e1a973c97c13fd63afc347409ca0d7fcff2795 | /pic/migrations/0004_images_date_posted.py | bb27ab1575016f858bc4cf761dbe6a5113d41c84 | [] | no_license | prathmesh2048/cyberboxer-assignment | d22584f60870560d4fd1cc7b62bfe8b377b55a3c | c0eb91e289b72f7f254a072d7d166ac42076859d | refs/heads/master | 2023-08-11T10:19:49.315590 | 2021-09-19T09:10:00 | 2021-09-19T09:10:00 | 408,081,529 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 498 | py | # Generated by Django 3.2.7 on 2021-09-18 19:38
from django.db import migrations, models
import django.utils.timezone
class Migration(migrations.Migration):
dependencies = [
('pic', '0003_alter_images_image_name'),
]
operations = [
migrations.AddField(
model_name='images',
name='date_posted',
field=models.DateTimeField(auto_now_add=True, default=django.utils.timezone.now),
preserve_default=False,
),
]
| [
"prathmeshnandurkar123@gmail.com"
] | prathmeshnandurkar123@gmail.com |
c616242aab638f27aa212ca80de8c1162b0f3f38 | f07a42f652f46106dee4749277d41c302e2b7406 | /Data Set/bug-fixing-5/2d0891e0897159d0010afa9be18d1421fcab47c2-<get_device_facts>-fix.py | 561f9e445087fabeb1ba7535484f2e8f2b8bf73f | [] | no_license | wsgan001/PyFPattern | e0fe06341cc5d51b3ad0fe29b84098d140ed54d1 | cc347e32745f99c0cd95e79a18ddacc4574d7faa | refs/heads/main | 2023-08-25T23:48:26.112133 | 2021-10-23T14:11:22 | 2021-10-23T14:11:22 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,938 | py | def get_device_facts(self):
device_facts = {
}
device_facts['devices'] = {
}
lspci = self.module.get_bin_path('lspci')
if lspci:
(rc, pcidata, err) = self.module.run_command([lspci, '-D'], errors='surrogate_then_replace')
else:
pcidata = None
try:
block_devs = os.listdir('/sys/block')
except OSError:
return device_facts
devs_wwn = {
}
try:
devs_by_id = os.listdir('/dev/disk/by-id')
except OSError:
pass
else:
for link_name in devs_by_id:
if link_name.startswith('wwn-'):
try:
wwn_link = os.readlink(os.path.join('/dev/disk/by-id', link_name))
except OSError:
continue
devs_wwn[os.path.basename(wwn_link)] = link_name[4:]
links = self.get_all_device_links()
device_facts['device_links'] = links
for block in block_devs:
virtual = 1
sysfs_no_links = 0
try:
path = os.readlink(os.path.join('/sys/block/', block))
except OSError:
e = sys.exc_info()[1]
if (e.errno == errno.EINVAL):
path = block
sysfs_no_links = 1
else:
continue
sysdir = os.path.join('/sys/block', path)
if (sysfs_no_links == 1):
for folder in os.listdir(sysdir):
if ('device' in folder):
virtual = 0
break
d = {
}
d['virtual'] = virtual
d['links'] = {
}
for (link_type, link_values) in iteritems(links):
d['links'][link_type] = link_values.get(block, [])
diskname = os.path.basename(sysdir)
for key in ['vendor', 'model', 'sas_address', 'sas_device_handle']:
d[key] = get_file_content(((sysdir + '/device/') + key))
sg_inq = self.module.get_bin_path('sg_inq')
if sg_inq:
device = ('/dev/%s' % block)
(rc, drivedata, err) = self.module.run_command([sg_inq, device])
if (rc == 0):
serial = re.search('Unit serial number:\\s+(\\w+)', drivedata)
if serial:
d['serial'] = serial.group(1)
for (key, test) in [('removable', '/removable'), ('support_discard', '/queue/discard_granularity')]:
d[key] = get_file_content((sysdir + test))
if (diskname in devs_wwn):
d['wwn'] = devs_wwn[diskname]
d['partitions'] = {
}
for folder in os.listdir(sysdir):
m = re.search((('(' + diskname) + '[p]?\\d+)'), folder)
if m:
part = {
}
partname = m.group(1)
part_sysdir = ((sysdir + '/') + partname)
part['links'] = {
}
for (link_type, link_values) in iteritems(links):
part['links'][link_type] = link_values.get(partname, [])
part['start'] = get_file_content((part_sysdir + '/start'), 0)
part['sectors'] = get_file_content((part_sysdir + '/size'), 0)
part['sectorsize'] = get_file_content((part_sysdir + '/queue/logical_block_size'))
if (not part['sectorsize']):
part['sectorsize'] = get_file_content((part_sysdir + '/queue/hw_sector_size'), 512)
part['size'] = bytes_to_human((float(part['sectors']) * 512.0))
part['uuid'] = get_partition_uuid(partname)
self.get_holders(part, part_sysdir)
d['partitions'][partname] = part
d['rotational'] = get_file_content((sysdir + '/queue/rotational'))
d['scheduler_mode'] = ''
scheduler = get_file_content((sysdir + '/queue/scheduler'))
if (scheduler is not None):
m = re.match('.*?(\\[(.*)\\])', scheduler)
if m:
d['scheduler_mode'] = m.group(2)
d['sectors'] = get_file_content((sysdir + '/size'))
if (not d['sectors']):
d['sectors'] = 0
d['sectorsize'] = get_file_content((sysdir + '/queue/logical_block_size'))
if (not d['sectorsize']):
d['sectorsize'] = get_file_content((sysdir + '/queue/hw_sector_size'), 512)
d['size'] = bytes_to_human((float(d['sectors']) * 512.0))
d['host'] = ''
m = re.match('.+/([a-f0-9]{4}:[a-f0-9]{2}:[0|1][a-f0-9]\\.[0-7])/', sysdir)
if (m and pcidata):
pciid = m.group(1)
did = re.escape(pciid)
m = re.search((('^' + did) + '\\s(.*)$'), pcidata, re.MULTILINE)
if m:
d['host'] = m.group(1)
self.get_holders(d, sysdir)
device_facts['devices'][diskname] = d
return device_facts | [
"dg1732004@smail.nju.edu.cn"
] | dg1732004@smail.nju.edu.cn |
27fa27939f9f0ed47d071348ff6a30e7f3939e4b | 6045f8519065f17b9d832a8e051723a520b58e3c | /09. Volleyball.py | adb6b6ba2810dd04ef0de6ab78c52138001659bf | [] | no_license | a-angeliev/Python-Fundamentals-SoftUni | a308a6c94eb705a3319f6e081543c1cad0b1b37d | a9a5eba0376ebc7395daeda527408d1e59d58316 | refs/heads/master | 2023-07-19T05:55:28.104160 | 2021-09-11T18:25:58 | 2021-09-11T18:25:58 | 399,575,767 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 370 | py | from math import floor
year = input()
holydays = int(input())
selo_weekends = int(input())
sofia_weekends = (48 - selo_weekends) * 3/4
sofia_weekends_p = sofia_weekends
holydays_p = holydays* 2/3
if year == "leap":
all_game = (sofia_weekends_p + holydays_p+selo_weekends)*115/100
else:
all_game = sofia_weekends_p+holydays_p+selo_weekends
print(floor(all_game)) | [
"nachko01@gmail.com"
] | nachko01@gmail.com |
a4bc595b22f210716af0ffe15d947d0da8517d34 | b68fea9d645de59ee31da970d3dc435460fde9de | /exercise/__init__.py | 97d19822dd916b50911cb1201bb3b888c295e0b9 | [
"BSD-3-Clause"
] | permissive | shagun30/djambala-2 | 03fde4d1a5b2a17fce1b44f63a489c30d0d9c028 | 06f14e3dd237d7ebf535c62172cfe238c3934f4d | refs/heads/master | 2021-01-10T04:20:30.735479 | 2008-05-22T05:02:08 | 2008-05-22T05:02:08 | 54,959,603 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 253 | py | """
/dms/exercise/
Aufgaben mit Abgabemoeglichkeit innerhalb des Django Content Management Systems
Hans Rauch
hans.rauch@gmx.net
Die Programme des dms-Systems koennen frei genutzt und den spezifischen
Beduerfnissen entsprechend angepasst werden.
"""
| [
"hans.rauch@gmx.net"
] | hans.rauch@gmx.net |
05fbc00b1e537495eebafabde55fad0c2743994b | cb61ba31b27b232ebc8c802d7ca40c72bcdfe152 | /leetcode/3. Longest Substring Without Repeating Characters/soln.py | b6def6444faec318c2e67438c047fdc782f1f20e | [
"Apache-2.0"
] | permissive | saisankargochhayat/algo_quest | c7c48187c76b5cd7c2ec3f0557432606e9096241 | a24f9a22c019ab31d56bd5a7ca5ba790d54ce5dc | refs/heads/master | 2021-07-04T15:21:33.606174 | 2021-02-07T23:42:43 | 2021-02-07T23:42:43 | 67,831,927 | 5 | 1 | Apache-2.0 | 2019-10-28T03:51:03 | 2016-09-09T20:51:29 | Python | UTF-8 | Python | false | false | 785 | py | class Solution:
def lengthOfLongestSubstring(self, s: str) -> int:
from collections import defaultdict
c_chars = defaultdict(int)
l, r, res = 0, 0, 0
contract = False
while r < len(s):
# Expand
if s[r] in c_chars and c_chars[s[r]] > 0: # check if contract is needed
contract = True
c_chars[s[r]] += 1
r += 1
# Contract
if contract:
d_char = s[r-1] # Char to remove
while s[l] != d_char and l < r:
c_chars[s[l]] -= 1
l += 1
c_chars[s[l]] -= 1
l += 1
contract = False
res = max(res, r-l)
return res | [
"saisankargochhayat@gmail.com"
] | saisankargochhayat@gmail.com |
257da03094424402f2f0aa6083bd458537c6060c | ca552cedf457ab4ad455b089f31b9fc13882c2aa | /app/core/migrations/0001_initial.py | 678152fef9b44a3515f6e92dd97d1c86f63db474 | [
"MIT"
] | permissive | akashjadhav3/django-recipe-app-api | 8837f45dbaacf502a57e90f10dca9b936d7eb893 | 3124c1d6a9c3b8badc02ef9f1a0acb2a779c86dd | refs/heads/master | 2023-03-24T13:10:37.909128 | 2020-08-02T18:07:51 | 2020-08-02T18:07:51 | 283,174,460 | 0 | 0 | MIT | 2021-03-19T23:51:08 | 2020-07-28T10:07:20 | Python | UTF-8 | Python | false | false | 1,709 | py | # Generated by Django 2.1.15 on 2020-08-01 15:00
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
('auth', '0009_alter_user_last_name_max_length'),
]
operations = [
migrations.CreateModel(
name='User',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('password', models.CharField(max_length=128, verbose_name='password')),
('last_login', models.DateTimeField(blank=True, null=True, verbose_name='last login')),
('is_superuser', models.BooleanField(default=False, help_text='Designates that this user has all permissions without explicitly assigning them.', verbose_name='superuser status')),
('email', models.EmailField(max_length=255, unique=True)),
('name', models.CharField(max_length=255)),
('is_active', models.BooleanField(default=True)),
('is_staff', models.BooleanField(default=False)),
('groups', models.ManyToManyField(blank=True, help_text='The groups this user belongs to. A user will get all permissions granted to each of their groups.', related_name='user_set', related_query_name='user', to='auth.Group', verbose_name='groups')),
('user_permissions', models.ManyToManyField(blank=True, help_text='Specific permissions for this user.', related_name='user_set', related_query_name='user', to='auth.Permission', verbose_name='user permissions')),
],
options={
'abstract': False,
},
),
]
| [
"root@localhost.localdomain"
] | root@localhost.localdomain |
d8ad11dced85b9f98cd2e9948220ded7a12f67e4 | 68a088346090ae4e929c208906b14181da0f92f6 | /第一阶段/2. Python01/day03/exercise/04_order.py | ec663c7ecb7258a59854c76da7d2f1df58f90655 | [] | no_license | LONG990122/PYTHON | d1530e734ae48416b5f989a4d97bd1d66d165b91 | 59a2a2a0b033c8ad0cb33d6126c252e9d574eff7 | refs/heads/master | 2020-07-07T09:38:03.501705 | 2019-09-23T16:28:31 | 2019-09-23T16:28:31 | 203,316,565 | 0 | 0 | null | 2019-10-23T15:02:33 | 2019-08-20T06:47:44 | HTML | UTF-8 | Python | false | false | 270 | py | # 1. 写一个程序,输入一段字符串,如果字符串不为
# 空,则把第一个字符的编码打印出来
s = input("请输入一段字符串: ")
if s != '':
# code = ord('s')
code = ord(s[0])
print('第一个字符的编码是:', code)
| [
"54302090+LONG990122@users.noreply.github.com"
] | 54302090+LONG990122@users.noreply.github.com |
702fc0631f68e8c5ce509203a5dfb60626bb656f | 210ecd63113ce90c5f09bc2b09db3e80ff98117a | /AbletonX1Mk2/APC40/TransportComponent.py | 33d1f976439e8a980d7df0f90542991af7714aed | [] | no_license | ajasver/MidiScripts | 86a765b8568657633305541c46ccc1fd1ea34501 | f727a2e63c95a9c5e980a0738deb0049363ba536 | refs/heads/master | 2021-01-13T02:03:55.078132 | 2015-07-16T18:27:30 | 2015-07-16T18:27:30 | 38,516,112 | 4 | 1 | null | null | null | null | UTF-8 | Python | false | false | 1,954 | py | #Embedded file name: /Users/versonator/Jenkins/live/Binary/Core_Release_64_static/midi-remote-scripts/APC40/TransportComponent.py
import Live
from _Framework.Control import ButtonControl
from _Framework.TransportComponent import TransportComponent as TransportComponentBase
from _Framework.SubjectSlot import subject_slot
class TransportComponent(TransportComponentBase):
""" TransportComponent that only uses certain buttons if a shift button is pressed """
rec_quantization_button = ButtonControl()
def __init__(self, *a, **k):
super(TransportComponent, self).__init__(*a, **k)
self._last_quant_value = Live.Song.RecordingQuantization.rec_q_eight
self._on_quantization_changed.subject = self.song()
self._update_quantization_state()
self.set_quant_toggle_button = self.rec_quantization_button.set_control_element
@rec_quantization_button.pressed
def rec_quantization_button(self, value):
if not self._last_quant_value != Live.Song.RecordingQuantization.rec_q_no_q:
raise AssertionError
quant_value = self.song().midi_recording_quantization
self._last_quant_value = quant_value != Live.Song.RecordingQuantization.rec_q_no_q and quant_value
self.song().midi_recording_quantization = Live.Song.RecordingQuantization.rec_q_no_q
else:
self.song().midi_recording_quantization = self._last_quant_value
@subject_slot('midi_recording_quantization')
def _on_quantization_changed(self):
if self.is_enabled():
self._update_quantization_state()
def _update_quantization_state(self):
quant_value = self.song().midi_recording_quantization
quant_on = quant_value != Live.Song.RecordingQuantization.rec_q_no_q
if quant_on:
self._last_quant_value = quant_value
self.rec_quantization_button.color = 'DefaultButton.On' if quant_on else 'DefaultButton.Off' | [
"admin@scoopler.com"
] | admin@scoopler.com |
a5d637c2475a685c654bafa50d05cf531743adfe | bb3ae8193289e98e01bea265646f7c77f20558af | /venv/Scripts/pisa-script.py | 30f2451464da560f085e14fd18b91fdb2fbe6889 | [] | no_license | chrisstianandres/almacen_yamaha | 4edbbc827bba7143f466d11c066e522cb8357b25 | 711096cd958e92cb6ec9423730a92120ac614337 | refs/heads/master | 2023-05-13T14:15:30.184461 | 2021-06-07T15:02:46 | 2021-06-07T15:02:46 | 370,217,067 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 423 | py | #!D:\PycharmProjects\almacen_yamaha\venv\Scripts\python.exe
# EASY-INSTALL-ENTRY-SCRIPT: 'xhtml2pdf==0.2.5','console_scripts','pisa'
__requires__ = 'xhtml2pdf==0.2.5'
import re
import sys
from pkg_resources import load_entry_point
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(
load_entry_point('xhtml2pdf==0.2.5', 'console_scripts', 'pisa')()
)
| [
"chrisstianandres@gmail.com"
] | chrisstianandres@gmail.com |
ddbde340dbd330040e85b9a914a191527fc52717 | e905abd9bb7bd7017657d0a0c4d724d16e37044c | /.history/article/spiders/sciencedirect_20201230010225.py | 787c70ae8c70e5f466306bdf5a5c7e93e8e15343 | [] | no_license | tabdelbari/articles | a8b921841f84fb473f5ed1cdcda743863e6bc246 | f0e1dfdc9e818e43095933139b6379a232647898 | refs/heads/main | 2023-03-05T10:21:35.565767 | 2021-02-10T13:35:14 | 2021-02-10T13:35:14 | 325,654,973 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,610 | py | import scrapy
import logging
import re
from scrapy_splash import SplashRequest
from article.items import ArticleItem
class SciencedirectSpider(scrapy.Spider):
name = 'sciencedirect'
allowed_domains = ['scienced.com']
custom_settings = {
'USER_AGENT': 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/51.0.2704.103 Safari/537.36'}
def __init__(self, topic='', keywords='', **kwargs):
super().__init__(**kwargs)
self.start_urls = ['https://www.sciencedirect.com/search?qs=%s' %keywords]
self.topic = topic
def start_requests(self):
for url in self.start_urls:
yield SplashRequest(url, callback=self.find_articles, args={ 'wait': 4 })
def find_articles(self, response):
# logging.info(response.text)
articles_urls = response.xpath('//*/div/h2/span/a/@href').getall()
logging.info(f'{len(articles_urls)} articles found')
for article_url in articles_urls:
article_url = 'https://www.sciencedirect.com' + article_url
yield SplashRequest(article_url, callback=self.parse_article, args={ 'wait': 4 })
next_page = response.xpath('//*[@id="srp-pagination"]/li[@class="pagination-link next-link"]/a/@href').get(default='')
logging.info('Next page found:')
if next_page != '':
next_page = 'https://www.sciencedirect.com' + next_page
yield SplashRequest(next_page, callback=self.find_articles)
def parse_article(self, response):
article = ArticleItem()
logging.info('Processing --> ' + response.url)
article.title = response.xpath('//*/article/h1/span').get(default='')
authors = []
authors_surnames = response.xpath('//*/div[@class="author-group"]/a/span/span[@class="text surname"]').getall()
authors_givennames = response.xpath('//*/div[@class="author-group"]/a/span/span[@class="text given-name"]').getall()
for i in range(0, len(authors_givennames)):
authors.append(authors_surnames[i] + ' ' + authors_givennames[i])
article.authors = '|'.join(authors)
article.country = ''
article.abstract = response.xpath('//*/div[@class="abstract author"]/div/p').get(default='')
article.date_pub = response.xpath('//*/div[@class="Publication"]/div/div').get(default='').split(',')[1]
article.journal = response.xpath('//*/div[@class="Publication"]/div/h2').get(default='')
article.topic = self.topic
article.latitude = ''
article.longitude = ''
yield article
| [
"abdelbari1996@hotmail.com"
] | abdelbari1996@hotmail.com |
b332da6e7afdf60dcfe5a0805699a6f76bed5000 | 1f70e6c069074d848347cfb6674b1376a323aae2 | /design/observer.py | eaf9c1e098a2f388539bda2ee30218482dd76cd3 | [] | no_license | TalentBoy2333/python_study | 5b3bf172a4bb04bd0ee05c24af7a223470ff78ca | 703d2ff4d9fe18c9c5b801c3784e5e8f0845a3a7 | refs/heads/master | 2023-05-25T15:27:22.315664 | 2021-06-14T08:16:50 | 2021-06-14T08:16:50 | 357,243,974 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,309 | py | ''' 观察者模式
'''
from abc import ABCMeta, abstractclassmethod
class Observer(metaclass=ABCMeta): # 订阅者
@abstractclassmethod
def update(self, notice):
pass
class Notice: # 发布者
def __init__(self):
self.observers = []
def attach(self, obs):
self.observers.append(obs)
def detach(self, obs):
self.observers.remove(obs)
def notify(self):
for obs in self.observers:
obs.update(self)
class StaffNotice(Notice):
def __init__(self, company_info=None):
super().__init__()
self.__company_info = company_info
@property
def company_info(self):
return self.__company_info
@company_info.setter
def company_info(self, info):
self.__company_info = info
self.notify()
class Staff(Observer):
def __init__(self):
self.company_info = None
def update(self, notice):
self.company_info = notice.company_info
notice = StaffNotice('init')
s1 = Staff()
s2 = Staff()
notice.attach(s1)
notice.attach(s2)
print(s1.company_info)
print(s2.company_info)
notice.company_info = 'money'
print(s1.company_info)
print(s2.company_info)
notice.detach(s2)
notice.company_info = 'holiday'
print(s1.company_info)
print(s2.company_info) | [
"957498562@qq.com"
] | 957498562@qq.com |
300dff82aea77ac86bbac47b0ee397a4503a22cc | 7bededcada9271d92f34da6dae7088f3faf61c02 | /pypureclient/flasharray/FA_2_20/models/replica_link_performance_replication.py | f928f8dbcd694fd51ffcb2543e1012ad8e850d8a | [
"BSD-2-Clause"
] | permissive | PureStorage-OpenConnect/py-pure-client | a5348c6a153f8c809d6e3cf734d95d6946c5f659 | 7e3c3ec1d639fb004627e94d3d63a6fdc141ae1e | refs/heads/master | 2023-09-04T10:59:03.009972 | 2023-08-25T07:40:41 | 2023-08-25T07:40:41 | 160,391,444 | 18 | 29 | BSD-2-Clause | 2023-09-08T09:08:30 | 2018-12-04T17:02:51 | Python | UTF-8 | Python | false | false | 6,750 | py | # coding: utf-8
"""
FlashArray REST API
No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen)
OpenAPI spec version: 2.20
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re
import six
import typing
from ....properties import Property
if typing.TYPE_CHECKING:
from pypureclient.flasharray.FA_2_20 import models
class ReplicaLinkPerformanceReplication(object):
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'bytes_per_sec_from_remote': 'int',
'bytes_per_sec_to_remote': 'int',
'bytes_per_sec_total': 'int',
'direction': 'str',
'local_pod': 'FixedReference',
'remote_pod': 'FixedReference',
'remotes': 'list[FixedReference]',
'time': 'int'
}
attribute_map = {
'bytes_per_sec_from_remote': 'bytes_per_sec_from_remote',
'bytes_per_sec_to_remote': 'bytes_per_sec_to_remote',
'bytes_per_sec_total': 'bytes_per_sec_total',
'direction': 'direction',
'local_pod': 'local_pod',
'remote_pod': 'remote_pod',
'remotes': 'remotes',
'time': 'time'
}
required_args = {
}
def __init__(
self,
bytes_per_sec_from_remote=None, # type: int
bytes_per_sec_to_remote=None, # type: int
bytes_per_sec_total=None, # type: int
direction=None, # type: str
local_pod=None, # type: models.FixedReference
remote_pod=None, # type: models.FixedReference
remotes=None, # type: List[models.FixedReference]
time=None, # type: int
):
"""
Keyword args:
bytes_per_sec_from_remote (int): The number of bytes received per second from a remote array.
bytes_per_sec_to_remote (int): The number of bytes transmitted per second to a remote array.
bytes_per_sec_total (int): Total bytes transmitted and received per second.
direction (str): The direction of replication. Valid values are `inbound` and `outbound`.
local_pod (FixedReference): Reference to a local pod.
remote_pod (FixedReference): Reference to a remote pod.
remotes (list[FixedReference]): Reference to a remote array.
time (int): Sample time in milliseconds since the UNIX epoch.
"""
if bytes_per_sec_from_remote is not None:
self.bytes_per_sec_from_remote = bytes_per_sec_from_remote
if bytes_per_sec_to_remote is not None:
self.bytes_per_sec_to_remote = bytes_per_sec_to_remote
if bytes_per_sec_total is not None:
self.bytes_per_sec_total = bytes_per_sec_total
if direction is not None:
self.direction = direction
if local_pod is not None:
self.local_pod = local_pod
if remote_pod is not None:
self.remote_pod = remote_pod
if remotes is not None:
self.remotes = remotes
if time is not None:
self.time = time
def __setattr__(self, key, value):
if key not in self.attribute_map:
raise KeyError("Invalid key `{}` for `ReplicaLinkPerformanceReplication`".format(key))
if key == "bytes_per_sec_from_remote" and value is not None:
if value < 0:
raise ValueError("Invalid value for `bytes_per_sec_from_remote`, must be a value greater than or equal to `0`")
if key == "bytes_per_sec_to_remote" and value is not None:
if value < 0:
raise ValueError("Invalid value for `bytes_per_sec_to_remote`, must be a value greater than or equal to `0`")
if key == "bytes_per_sec_total" and value is not None:
if value < 0:
raise ValueError("Invalid value for `bytes_per_sec_total`, must be a value greater than or equal to `0`")
self.__dict__[key] = value
def __getattribute__(self, item):
value = object.__getattribute__(self, item)
if isinstance(value, Property):
raise AttributeError
else:
return value
def __getitem__(self, key):
if key not in self.attribute_map:
raise KeyError("Invalid key `{}` for `ReplicaLinkPerformanceReplication`".format(key))
return object.__getattribute__(self, key)
def __setitem__(self, key, value):
if key not in self.attribute_map:
raise KeyError("Invalid key `{}` for `ReplicaLinkPerformanceReplication`".format(key))
object.__setattr__(self, key, value)
def __delitem__(self, key):
if key not in self.attribute_map:
raise KeyError("Invalid key `{}` for `ReplicaLinkPerformanceReplication`".format(key))
object.__delattr__(self, key)
def keys(self):
return self.attribute_map.keys()
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
if hasattr(self, attr):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
if issubclass(ReplicaLinkPerformanceReplication, dict):
for key, value in self.items():
result[key] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, ReplicaLinkPerformanceReplication):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| [
"noreply@github.com"
] | PureStorage-OpenConnect.noreply@github.com |
57b9200cadbd23bee5cbfb3057e293fd71831270 | c46407b9924351d794a0c28f498b4c74063f9b7b | /setup.py | 081220dbbcf8b8027b461700db64f086626ca1eb | [] | no_license | cprecioso/lektor-surge | fb317ae55c92230fdd607a173d17eecd71916e82 | da7cacda356f045cb71663c90c3139ff0e4b5451 | refs/heads/master | 2021-01-13T09:35:47.945391 | 2016-10-27T00:26:06 | 2016-10-27T00:26:06 | 72,056,084 | 0 | 0 | null | 2016-10-27T00:25:41 | 2016-10-27T00:25:40 | null | UTF-8 | Python | false | false | 409 | py | from setuptools import setup
setup(
name='lektor-surge',
version='0.2+',
author=u'A. Jesse Jiryu Davis',
author_email='jesse@emptysquare.net',
license='MIT',
py_modules=['lektor_surge'],
install_requires=['Lektor'],
url='https://github.com/ajdavis/lektor-surge',
entry_points={
'lektor.plugins': [
'surge = lektor_surge:SurgePlugin',
]
}
)
| [
"jesse@mongodb.com"
] | jesse@mongodb.com |
3f40a60b63fa2afebd92f23a45a3e7f418ae4644 | a3746020cf091f433beb41bde1b62818b4de569b | /past/rule_analysis/rule/text/check_lob_using.py | 7f1b4038034ddcd1a4e51f098a17ade8e31b68f3 | [] | no_license | kk71/sqlaudit | 59bab5765a67f56f1dd2f3103812051c5acbbc49 | 747aaa02573a9c2b46a9e14415d27c0ab8e6158c | refs/heads/master | 2023-02-04T18:38:46.125746 | 2020-06-05T09:49:46 | 2020-06-05T09:49:46 | 323,559,338 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 478 | py | # Author: kk.Fang(fkfkbill@gmail.com)
import re
from .utils import judge_if_ddl
def execute_rule(sql, db_model=None, **kwargs):
if not judge_if_ddl(sql):
return False
if not re.search(r"create\s+table", sql, re.I) and not re.search(r"alter\s+table", sql, re.I):
return False
if any([x in sql.lower() for x in ['blob', 'clob', 'bfile', 'xmltype']]):
#return "高频表上不推荐使用LOB字段"
return True
return False
| [
"fkfkbill@gmail.com"
] | fkfkbill@gmail.com |
f1115291106bbc3302ed73f4d698bd8e138e850f | bafb1c203362a9711f783115c7c573fdcd00a3d4 | /venv/Lib/site-packages/kivy/tests/test_clipboard.py | e9d8617a21f5b50010b3650d2a9d6d4bfb0df15d | [] | no_license | santokalayil/kivy_android_test_project | 0c41c40f6c8869767729cd153f4ce31ac09c0f1c | a4283ba4f4ca8961b2689ee7150297349aedb897 | refs/heads/main | 2023-04-14T08:18:40.453585 | 2021-04-27T19:15:21 | 2021-04-27T19:15:21 | 362,220,623 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,281 | py | from kivy.tests.common import GraphicUnitTest
class ClipboardTestCase(GraphicUnitTest):
def setUp(self):
from kivy.core.clipboard import Clipboard
self._clippy = Clipboard
clippy_types = Clipboard.get_types()
cliptype = clippy_types[0]
if 'UTF8_STRING' in clippy_types:
cliptype = 'UTF8_STRING'
self._cliptype = cliptype
super(ClipboardTestCase, self).setUp()
def test_clipboard_not_dummy(self):
clippy = self._clippy
if clippy.__class__.__name__ == 'ClipboardDummy':
self.fail('Something went wrong "dummy" clipboard is being used')
def test_clipboard_paste(self):
clippy = self._clippy
try:
clippy.paste()
except:
self.fail(
'Can not get data from clipboard')
def test_clipboard_copy(self):
clippy = self._clippy
try:
clippy.copy(u"Hello World")
except:
self.fail(
'Can not get put data to clipboard')
def test_clipboard_copy_paste(self):
clippy = self._clippy
txt1 = u"Hello 1"
clippy.copy(txt1)
ret = clippy.paste()
self.assertEqual(txt1, ret)
| [
"49450970+santokalayil@users.noreply.github.com"
] | 49450970+santokalayil@users.noreply.github.com |
cfef430b6bdd42f1a86c075b9f5b7da9eb77d3f5 | cf58614c12802286e4e416ef7b651ab6431f5b68 | /src/zojax/persistentlayout/information.py | b18f07a7c83d8d58666fa61a62cd8b7b1d61296b | [
"ZPL-2.1"
] | permissive | Zojax/zojax.persistentlayout | 3a31df0ac17e5b633a681b0bcf038c951fcba1fd | 94f05af015d69a1452a4bdc7f34d90db5acabc64 | refs/heads/master | 2021-01-01T18:55:56.560619 | 2011-08-09T19:31:12 | 2011-08-09T19:31:12 | 2,026,019 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,039 | py | ##############################################################################
#
# Copyright (c) 2009 Zope Foundation and Contributors.
# All Rights Reserved.
#
# This software is subject to the provisions of the Zope Public License,
# Version 2.1 (ZPL). A copy of the ZPL should accompany this distribution.
# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
# FOR A PARTICULAR PURPOSE.
#
##############################################################################
"""
$Id$
"""
from zope import interface
from interfaces import ILayoutInformation
class LayoutInformation(object):
interface.Interface(ILayoutInformation)
def __init__(self, uid, name, view, context, layer, layoutclass):
self.uid = uid
self.name = name
self.view = view
self.context = context
self.layer = layer
self.layoutclass = layoutclass
| [
"andrey.fedoseev@gmail.com"
] | andrey.fedoseev@gmail.com |
25471751b2db8edcf8582fdedc9dee2bd88e1a36 | ab15c38891f26888e4dd4f192b42e5d171437d98 | /ch07-improving-classification-with-a-meta-algorithm-adaboost/adaboost.py | 092232466bf7224513c3b52d9ed68f6b401b53a8 | [] | no_license | zzy1120716/machine-learning-in-action | f262b1c6aea3a262c25d9a56102466d73024dd0a | a46b0b1f9e134d85f4f28bef1de30cdf329d8653 | refs/heads/master | 2020-03-28T21:07:59.224931 | 2018-10-12T08:51:15 | 2018-10-12T08:51:15 | 149,131,110 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,701 | py | from numpy import *
"""
创建简单数据集
"""
def loadSimpData():
datMat = matrix([[ 1. , 2.1],
[ 2. , 1.1],
[ 1.3, 1. ],
[ 1. , 1. ],
[ 2. , 1. ]])
classLabels = [1.0, 1.0, -1.0, -1.0, 1.0]
return datMat,classLabels
"""
单层决策树生成函数
"""
def stumpClassify(dataMatrix,dimen,threshVal,threshIneq):#just classify the data
retArray = ones((shape(dataMatrix)[0],1))
if threshIneq == 'lt':
retArray[dataMatrix[:,dimen] <= threshVal] = -1.0
else:
retArray[dataMatrix[:,dimen] > threshVal] = -1.0
return retArray
def buildStump(dataArr,classLabels,D):
dataMatrix = mat(dataArr); labelMat = mat(classLabels).T
m,n = shape(dataMatrix)
numSteps = 10.0; bestStump = {}; bestClasEst = mat(zeros((m,1)))
minError = inf #init error sum, to +infinity
for i in range(n):#loop over all dimensions
rangeMin = dataMatrix[:,i].min(); rangeMax = dataMatrix[:,i].max();
stepSize = (rangeMax-rangeMin)/numSteps
for j in range(-1,int(numSteps)+1):#loop over all range in current dimension
for inequal in ['lt', 'gt']: #go over less than and greater than
threshVal = (rangeMin + float(j) * stepSize)
predictedVals = stumpClassify(dataMatrix,i,threshVal,inequal)#call stump classify with i, j, lessThan
errArr = mat(ones((m,1)))
errArr[predictedVals == labelMat] = 0
# 计算加权错误率
weightedError = D.T*errArr #calc total error multiplied by D
print("split: dim %d, thresh %.2f, thresh ineqal: %s, the weighted error is %.3f" % (i, threshVal, inequal, weightedError))
if weightedError < minError:
minError = weightedError
bestClasEst = predictedVals.copy()
bestStump['dim'] = i
bestStump['thresh'] = threshVal
bestStump['ineq'] = inequal
return bestStump,minError,bestClasEst
"""
基于单层决策树的AdaBoost训练过程
"""
def adaBoostTrainDS(dataArr,classLabels,numIt=40):
weakClassArr = []
m = shape(dataArr)[0]
D = mat(ones((m,1))/m) #init D to all equal
aggClassEst = mat(zeros((m,1)))
for i in range(numIt):
bestStump,error,classEst = buildStump(dataArr,classLabels,D)#build Stump
print("D:",D.T)
alpha = float(0.5*log((1.0-error)/max(error,1e-16)))#calc alpha, throw in max(error,eps) to account for error=0
bestStump['alpha'] = alpha
weakClassArr.append(bestStump) #store Stump Params in Array
print("classEst: ",classEst.T)
# 为下一次迭代计算*D*
expon = multiply(-1*alpha*mat(classLabels).T,classEst) #exponent for D calc, getting messy
D = multiply(D,exp(expon)) #Calc New D for next iteration
D = D/D.sum()
# 错误率累加计算
#calc training error of all classifiers, if this is 0 quit for loop early (use break)
aggClassEst += alpha*classEst
print("aggClassEst: ",aggClassEst.T)
aggErrors = multiply(sign(aggClassEst) != mat(classLabels).T,ones((m,1)))
errorRate = aggErrors.sum()/m
print("total error: ",errorRate)
if errorRate == 0.0: break
return weakClassArr, aggClassEst
"""
AdaBoost分类函数
"""
def adaClassify(datToClass,classifierArr):
dataMatrix = mat(datToClass)#do stuff similar to last aggClassEst in adaBoostTrainDS
m = shape(dataMatrix)[0]
aggClassEst = mat(zeros((m,1)))
for i in range(len(classifierArr)):
classEst = stumpClassify(dataMatrix, classifierArr[i]['dim'],\
classifierArr[i]['thresh'],\
classifierArr[i]['ineq'])#call stump classify
aggClassEst += classifierArr[i]['alpha']*classEst
print(aggClassEst)
return sign(aggClassEst)
"""
自适应数据加载函数
"""
def loadDataSet(fileName): #general function to parse tab -delimited floats
numFeat = len(open(fileName).readline().split('\t')) #get number of fields
dataMat = []; labelMat = []
fr = open(fileName)
for line in fr.readlines():
lineArr =[]
curLine = line.strip().split('\t')
for i in range(numFeat-1):
lineArr.append(float(curLine[i]))
dataMat.append(lineArr)
labelMat.append(float(curLine[-1]))
return dataMat,labelMat
"""
ROC曲线的绘制及AUC计算函数
"""
def plotROC(predStrengths, classLabels):
import matplotlib.pyplot as plt
cur = (1.0,1.0) #cursor
ySum = 0.0 #variable to calculate AUC
numPosClas = sum(array(classLabels)==1.0)
yStep = 1/float(numPosClas); xStep = 1/float(len(classLabels)-numPosClas)
# 获取排好序的索引
sortedIndicies = predStrengths.argsort()#get sorted index, it's reverse
fig = plt.figure()
fig.clf()
ax = plt.subplot(111)
#loop through all the values, drawing a line segment at each point
for index in sortedIndicies.tolist()[0]:
if classLabels[index] == 1.0:
delX = 0; delY = yStep;
else:
delX = xStep; delY = 0;
ySum += cur[1]
#draw line from cur to (cur[0]-delX,cur[1]-delY)
ax.plot([cur[0],cur[0]-delX],[cur[1],cur[1]-delY], c='b')
cur = (cur[0]-delX,cur[1]-delY)
ax.plot([0,1],[0,1],'b--')
plt.xlabel('False positive rate'); plt.ylabel('True positive rate')
plt.title('ROC curve for AdaBoost horse colic detection system')
ax.axis([0,1,0,1])
plt.show()
print("the Area Under the Curve is: ",ySum*xStep) | [
"zzy1120716@126.com"
] | zzy1120716@126.com |
3a171d3084131166e554195f7c2a2b3bca3b65f8 | 90419da201cd4948a27d3612f0b482c68026c96f | /sdk/python/pulumi_azure_nextgen/resources/v20190801/get_resource_group.py | a589cd0c39a411c4afa98dc37e3ceaceedc2c156 | [
"BSD-3-Clause",
"Apache-2.0"
] | permissive | test-wiz-sec/pulumi-azure-nextgen | cd4bee5d70cb0d332c04f16bb54e17d016d2adaf | 20a695af0d020b34b0f1c336e1b69702755174cc | refs/heads/master | 2023-06-08T02:35:52.639773 | 2020-11-06T22:39:06 | 2020-11-06T22:39:06 | 312,993,761 | 0 | 0 | Apache-2.0 | 2023-06-02T06:47:28 | 2020-11-15T09:04:00 | null | UTF-8 | Python | false | false | 4,330 | py | # coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union
from ... import _utilities, _tables
from . import outputs
__all__ = [
'GetResourceGroupResult',
'AwaitableGetResourceGroupResult',
'get_resource_group',
]
@pulumi.output_type
class GetResourceGroupResult:
"""
Resource group information.
"""
def __init__(__self__, location=None, managed_by=None, name=None, properties=None, tags=None, type=None):
if location and not isinstance(location, str):
raise TypeError("Expected argument 'location' to be a str")
pulumi.set(__self__, "location", location)
if managed_by and not isinstance(managed_by, str):
raise TypeError("Expected argument 'managed_by' to be a str")
pulumi.set(__self__, "managed_by", managed_by)
if name and not isinstance(name, str):
raise TypeError("Expected argument 'name' to be a str")
pulumi.set(__self__, "name", name)
if properties and not isinstance(properties, dict):
raise TypeError("Expected argument 'properties' to be a dict")
pulumi.set(__self__, "properties", properties)
if tags and not isinstance(tags, dict):
raise TypeError("Expected argument 'tags' to be a dict")
pulumi.set(__self__, "tags", tags)
if type and not isinstance(type, str):
raise TypeError("Expected argument 'type' to be a str")
pulumi.set(__self__, "type", type)
@property
@pulumi.getter
def location(self) -> str:
"""
The location of the resource group. It cannot be changed after the resource group has been created. It must be one of the supported Azure locations.
"""
return pulumi.get(self, "location")
@property
@pulumi.getter(name="managedBy")
def managed_by(self) -> Optional[str]:
"""
The ID of the resource that manages this resource group.
"""
return pulumi.get(self, "managed_by")
@property
@pulumi.getter
def name(self) -> str:
"""
The name of the resource group.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter
def properties(self) -> 'outputs.ResourceGroupPropertiesResponse':
"""
The resource group properties.
"""
return pulumi.get(self, "properties")
@property
@pulumi.getter
def tags(self) -> Optional[Mapping[str, str]]:
"""
The tags attached to the resource group.
"""
return pulumi.get(self, "tags")
@property
@pulumi.getter
def type(self) -> str:
"""
The type of the resource group.
"""
return pulumi.get(self, "type")
class AwaitableGetResourceGroupResult(GetResourceGroupResult):
# pylint: disable=using-constant-test
def __await__(self):
if False:
yield self
return GetResourceGroupResult(
location=self.location,
managed_by=self.managed_by,
name=self.name,
properties=self.properties,
tags=self.tags,
type=self.type)
def get_resource_group(resource_group_name: Optional[str] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetResourceGroupResult:
"""
Use this data source to access information about an existing resource.
:param str resource_group_name: The name of the resource group to get. The name is case insensitive.
"""
__args__ = dict()
__args__['resourceGroupName'] = resource_group_name
if opts is None:
opts = pulumi.InvokeOptions()
if opts.version is None:
opts.version = _utilities.get_version()
__ret__ = pulumi.runtime.invoke('azure-nextgen:resources/v20190801:getResourceGroup', __args__, opts=opts, typ=GetResourceGroupResult).value
return AwaitableGetResourceGroupResult(
location=__ret__.location,
managed_by=__ret__.managed_by,
name=__ret__.name,
properties=__ret__.properties,
tags=__ret__.tags,
type=__ret__.type)
| [
"public@paulstack.co.uk"
] | public@paulstack.co.uk |
dd64a7d5af16dc250d7f9f0558cf36a08ff22cb4 | f7630fd6c829cb306e72472296e3a513844d99af | /lib/python3.8/site-packages/ansible_collections/check_point/mgmt/plugins/modules/cp_mgmt_add_domain.py | 90c360467afab6cc661742573a653989786f573a | [] | no_license | baltah666/automation | 6eccce20c83dbe0d5aa9a82a27937886e3131d32 | 140eb81fe9bacb9a3ed1f1eafe86edeb8a8d0d52 | refs/heads/master | 2023-03-07T10:53:21.187020 | 2023-02-10T08:39:38 | 2023-02-10T08:39:38 | 272,007,277 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,822 | py | #!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Ansible module to manage CheckPoint Firewall (c) 2019
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = """
---
module: cp_mgmt_add_domain
short_description: Create new object
description:
- Create new object
- All operations are performed over Web Services API.
version_added: "2.9"
author: "Or Soffer (@chkp-orso)"
options:
name:
description:
- Object name.
type: str
required: True
servers:
description:
- Domain servers. When this field is provided, 'set-domain' command is executed asynchronously.
type: list
suboptions:
name:
description:
- Object name. Must be unique in the domain.
type: str
ip_address:
description:
- IPv4 or IPv6 address. If both addresses are required use ipv4-address and ipv6-address fields explicitly.
type: str
ipv4_address:
description:
- IPv4 address.
type: str
ipv6_address:
description:
- IPv6 address.
type: str
multi_domain_server:
description:
- Multi Domain server name or UID.
type: str
active:
description:
- Activate domain server. Only one domain server is allowed to be active
type: bool
skip_start_domain_server:
description:
- Set this value to be true to prevent starting the new created domain.
type: bool
type:
description:
- Domain server type.
type: str
choices: ['management server', 'log server', 'smc']
color:
description:
- Color of the object. Should be one of existing colors.
type: str
choices: ['aquamarine', 'black', 'blue', 'crete blue', 'burlywood', 'cyan', 'dark green', 'khaki', 'orchid', 'dark orange', 'dark sea green',
'pink', 'turquoise', 'dark blue', 'firebrick', 'brown', 'forest green', 'gold', 'dark gold', 'gray', 'dark gray', 'light green', 'lemon chiffon',
'coral', 'sea green', 'sky blue', 'magenta', 'purple', 'slate blue', 'violet red', 'navy blue', 'olive', 'orange', 'red', 'sienna', 'yellow']
comments:
description:
- Comments string.
type: str
details_level:
description:
- The level of detail for some of the fields in the response can vary from showing only the UID value of the object to a fully detailed
representation of the object.
type: str
choices: ['uid', 'standard', 'full']
ignore_warnings:
description:
- Apply changes ignoring warnings.
type: bool
ignore_errors:
description:
- Apply changes ignoring errors. You won't be able to publish such a changes. If ignore-warnings flag was omitted - warnings will also be ignored.
type: bool
extends_documentation_fragment: check_point.mgmt.checkpoint_commands
"""
EXAMPLES = """
- name: add-domain
cp_mgmt_add_domain:
name: domain1
servers:
ip_address: 192.0.2.1
multi_domain_server: MDM_Server
name: domain1_ManagementServer_1
"""
RETURN = """
cp_mgmt_domain:
description: The checkpoint add-domain output.
returned: always.
type: dict
"""
from ansible.module_utils.basic import AnsibleModule
from ansible_collections.check_point.mgmt.plugins.module_utils.checkpoint import checkpoint_argument_spec_for_commands, api_command
def main():
argument_spec = dict(
name=dict(type='str', required=True),
servers=dict(type='list', options=dict(
name=dict(type='str'),
ip_address=dict(type='str'),
ipv4_address=dict(type='str'),
ipv6_address=dict(type='str'),
multi_domain_server=dict(type='str'),
active=dict(type='bool'),
skip_start_domain_server=dict(type='bool'),
type=dict(type='str', choices=['management server', 'log server', 'smc'])
)),
color=dict(type='str', choices=['aquamarine', 'black', 'blue', 'crete blue', 'burlywood', 'cyan', 'dark green',
'khaki', 'orchid', 'dark orange', 'dark sea green', 'pink', 'turquoise', 'dark blue', 'firebrick', 'brown',
'forest green', 'gold', 'dark gold', 'gray', 'dark gray', 'light green', 'lemon chiffon', 'coral', 'sea green',
'sky blue', 'magenta', 'purple', 'slate blue', 'violet red', 'navy blue', 'olive', 'orange', 'red', 'sienna',
'yellow']),
comments=dict(type='str'),
details_level=dict(type='str', choices=['uid', 'standard', 'full']),
ignore_warnings=dict(type='bool'),
ignore_errors=dict(type='bool'),
)
argument_spec.update(checkpoint_argument_spec_for_commands)
module = AnsibleModule(argument_spec=argument_spec)
command = 'add-domain'
result = api_command(module, command)
module.exit_json(**result)
if __name__ == '__main__':
main()
| [
"baltah666@gmail.com"
] | baltah666@gmail.com |
2bf87000c434e7db266d0577faff3d8da61f2e6c | b42f4f67e71dee0f0cd95ee4ec0b781f1d27de4c | /Yr12 - to do list task, python revision.py | bbf3e7ee18240ea6e3b82e42ea335b675c47d09b | [] | no_license | Botany-Downs-Secondary-College/todo_list-Rishab-Lal | fa4e30669713fa178546597786463e98d59fc40c | 4e421cf74bb6fafecfc8c67b151a05741effcee8 | refs/heads/main | 2023-03-07T22:59:01.908938 | 2021-02-21T07:23:37 | 2021-02-21T07:23:37 | 338,934,435 | 0 | 0 | null | 2021-02-15T00:45:25 | 2021-02-15T00:45:20 | null | UTF-8 | Python | false | false | 2,022 | py | def command_operator(order):
if order == options_list[0] or order == options_list[3]:
task = input("what task would you like to add to your list?: ")
task_list.append(task)
elif order == options_list[1] or order == options_list[4]:
print("your tasks:")
tasks_in_list = len(task_list)
x = range(0, tasks_in_list, 1)
b = 0
for n in x:
b += 1
print("{}.".format(b), task_list[n])
user_name = input("greetings user, what is your name?: ")
print("hello user {}. It's nice to meet you.".format(user_name))
global task_list
global options_list
options_list = ["addtask", "viewtasks", "exitprogram","1","2","3"]
task_list = []
print("what would you like to do out of the following options {}?".format(user_name))
print("1. add a task to your to do list \n2. view current tasks in list \n3. exit the program")
order = input("to select one of the following, please enter the number of your objective or the following comands: ['add task', 'view tasks', 'exit program']: ")
loop = 0
order = str(order).strip().lower().replace(" ","")
while order not in options_list[2] or order not in options_list[5]:
if order in options_list[2] or order in options_list[5]:
break
if loop > 0:
order = input("please enter the number of your objective or the following comands: ['add task', 'view tasks', 'exit program']: ")
order = str(order).strip().lower().replace(" ","")
loop += 1
while order not in options_list:
print("command {} unrecognised".format(order))
order = input("to select one of the following, please enter the number of your objective or the following comands: ['add task', 'view tasks', 'exit program']: ")
order = str(order).strip().lower().replace(" ","")
if order in options_list[2] or order in options_list[5]:
break
command_operator(order)
print("bye bye.")
| [
"noreply@github.com"
] | Botany-Downs-Secondary-College.noreply@github.com |
02533f4a8fe865e8035777f87750a925e2a26d76 | 5d3c8af513c3ff3f39ee09b78b36d6a0ad3f22e8 | /day10/main.py | 62a27bac0f3619cdd65d3b40306451bb6724c006 | [] | no_license | jason9075/aoc_2020_puzzle | 9a46efb781f3878eaf93854e97e001cb1fef1d31 | dc2f4c1fa52330cd7b29911d9211cb87fdd82ceb | refs/heads/master | 2023-02-03T20:41:11.447718 | 2020-12-25T04:06:47 | 2020-12-25T04:06:47 | 319,540,893 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,372 | py | import numpy as np
def p1(file):
jol_num = []
with open(file) as fp:
line = fp.readline()
while line:
jol_num.append(int(line))
line = fp.readline()
# jol_num.append(0)
jol_num.sort()
jol_num.reverse()
num_of_thr = 0
num_of_one = 0
for idx, jol in enumerate(jol_num[:-1]):
diff = jol - jol_num[idx + 1]
if diff == 3:
num_of_thr += 1
elif diff == 1:
num_of_one += 1
else:
print(f"strange diff: {diff}")
num_of_thr += 1 # final +3
return num_of_thr, num_of_one, jol_num
def p2_wrong():
num_of_thr, num_of_one, jol_num = p1('input_test.txt')
print(jol_num)
print(num_of_thr)
print(num_of_one)
print(jol_num[0] + 3)
high = jol_num[0] + 3
jol_set = set(jol_num)
remain = high
sol_set = set() # 警告:把完整path存進set裡會超級佔用空間,此set僅供debug使用
sol_list = []
def find_path(path, s, r, usage_thr, usage_one):
if 1 <= usage_thr or 3 <= usage_one:
next_value = r - 3
if next_value in jol_set:
if next_value == 0:
# s.add(f'{path}-0')
sol_list.append(1)
if 1 <= usage_thr:
find_path(f'{path}-{next_value}', s, next_value, usage_thr - 1, usage_one)
else:
find_path(f'{path}-{next_value}', s, next_value, usage_thr, usage_one - 3)
if 2 <= usage_one:
next_value = r - 2
if next_value in jol_set:
if next_value == 0:
# s.add(f'{path}-0')
sol_list.append(1)
find_path(f'{path}-{next_value}', s, next_value, usage_thr, usage_one - 2)
if 1 <= usage_one:
next_value = r - 1
if next_value in jol_set:
if next_value == 0:
# s.add(f'{path}-0')
sol_list.append(1)
find_path(f'{path}-{next_value}', s, next_value, usage_thr, usage_one - 1)
find_path(f"{remain}", sol_set, remain, num_of_thr, num_of_one)
# print(f'sol_set: {sol_set}')
print(f'sol_count: {len(sol_list)}')
def p2():
_, _, jol_num = p1('input.txt')
jol_num.append(0)
print(jol_num)
jol_num.reverse()
# recursive
# def decompose(num):
# if num not in jol_num:
# return 0
# if num == 0:
# return 1
# if num == 1:
# return 1
# if num == 2:
# return 2
# return decompose(num - 3) + decompose(num - 2) + decompose(num - 1)
#
# print(decompose(jol_num[-1]))
# dp
data = np.zeros(jol_num[-1] + 1, dtype=int)
for i, jol in enumerate(jol_num):
if jol == 0:
data[jol] = 1
elif jol == 1:
data[jol] = 1
elif jol == 2:
data[jol] = 2
elif jol == 3:
data[jol] = 4
else:
data[jol] = data[jol - 1] + data[jol - 2] + data[jol - 3]
print(f'data : {data}')
print(f'answer : {data[-1]}')
if __name__ == '__main__':
# num_of_thr, num_of_one, _ = p1("input.txt")
# print(num_of_thr)
# print(num_of_one)
# print((num_of_thr) * num_of_one)
# p2_wrong()
p2()
| [
"jason9075@gmail.com"
] | jason9075@gmail.com |
8ec248e69b206d1a951f4d862f13b4e8dbc3705d | f3e5b47bb4781415f427af4c34f909ae3b67f411 | /概率/概率/资金流预测/function/概率/未命名文件夹/get_user_p.py | bcca6de5dad8132e3e108822f1ed31729df598b9 | [] | no_license | abnering/alitianchi | 83a45e4bc7d9adb06ad7451231ba241413768ba5 | 3e16a37ea4dd69c97f6d06fa8faa7ce81af23376 | refs/heads/master | 2020-05-18T15:34:30.783552 | 2015-06-28T08:58:29 | 2015-06-28T08:58:29 | 38,192,417 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,222 | py | import cPickle as pickle
def get_week_count():
week_count = [0,0,0,0,0,0,0]
for i in range(92):
week_count[i%7] += 1
return week_count
def get_purchase_p():
week_count = get_week_count()
usr_purchase_p = {}
f1 = file("../data/get_678_purchase.pkl",'rb')
usr_purchase = pickle.load(f1)
for key in usr_purchase.keys():
if key not in usr_purchase_p.keys():
usr_purchase_p[key] = [0.0,0.0,0.0,0.0,0.0,0.0,0.0]
for day in range(7):
usr_purchase_p[key][day] = float(usr_purchase[key][day])/week_count[day]
f11 = file("../data/get_purchase_p.pkl",'wb')
pickle.dump(usr_purchase_p,f11)
f1.close()
f11.close()
def get_redeem_p():
usr_redeem_p = { }
week_count = get_week_count()
f1 = file("../data/get_678_redeem.pkl",'rb')
usr_redeem = pickle.load(f1)
for key in usr_redeem.keys():
if key not in usr_redeem_p.keys():
usr_redeem_p[key] = [0.0,0.0,0.0,0.0,0.0,0.0,0.0]
for day in range(7):
usr_redeem_p[key][day] = float(usr_redeem[key][day])/week_count[day]
f11 = file("../data/get_redeem_p.pkl","wb")
pickle.dump(usr_redeem_p,f11)
f1.close()
f11.close()
| [
"="
] | = |
717e46f0ffd27a18a0b9b54a3611e47516442f14 | 2f98aa7e5bfc2fc5ef25e4d5cfa1d7802e3a7fae | /python/python_15746.py | 3243066645dbac93483ab2cce76bcdfcaa156fe8 | [] | no_license | AK-1121/code_extraction | cc812b6832b112e3ffcc2bb7eb4237fd85c88c01 | 5297a4a3aab3bb37efa24a89636935da04a1f8b6 | refs/heads/master | 2020-05-23T08:04:11.789141 | 2015-10-22T19:19:40 | 2015-10-22T19:19:40 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 175 | py | # string.format, differing length breaks table
for i in table_data:
interface,mac,ip = i
print '{:<20s}{:<20s}{:<20s}{s}'.format(ip, mac,'ARPA' ,interface)
| [
"ubuntu@ip-172-31-7-228.us-west-2.compute.internal"
] | ubuntu@ip-172-31-7-228.us-west-2.compute.internal |
3d17cdf60b5b1c9e6634e1185b48af2d4710f512 | 3c95032b4dfaa243a5dcf98956a233bb0b2c97b2 | /plot/swiss.py | 000063886447d17d0e6ceca70eadf629362c9d06 | [] | no_license | jhui/tf2 | b8c8736958117eca824fa83baddf6b48ebbc6b1d | 0eed446dd6252d17d23ef44140945dd8e25d06d4 | refs/heads/main | 2023-06-07T11:49:23.878736 | 2023-06-01T17:59:52 | 2023-06-01T17:59:52 | 324,392,041 | 1 | 1 | null | null | null | null | UTF-8 | Python | false | false | 1,445 | py | import matplotlib.pyplot as plt
from sklearn import manifold, datasets
sr_points, sr_color = datasets.make_swiss_roll(n_samples=8000, random_state=14)
fig = plt.figure(figsize=(12, 9))
ax = fig.add_subplot(111, projection="3d")
fig.add_axes(ax)
ax.scatter(
sr_points[:, 0], sr_points[:, 1], sr_points[:, 2], '.', c=sr_color, s=4, alpha=0.8
)
ax.set_title("Swiss Roll in Ambient Space")
ax.view_init(azim=-66, elev=12)
_ = ax.text2D(0.8, 0.05, s="n_samples=1500", transform=ax.transAxes)
sr_lle, sr_err = manifold.locally_linear_embedding(
sr_points, n_neighbors=12, n_components=2
)
sr_tsne = manifold.TSNE(n_components=2, perplexity=40, random_state=0).fit_transform(
sr_points
)
fig, axs = plt.subplots(figsize=(8, 8), nrows=2)
axs[0].scatter(sr_lle[:, 0], sr_lle[:, 1], c=sr_color, s=0.5)
axs[0].set_title("LLE Embedding of Swiss Roll")
axs[1].scatter(sr_tsne[:, 0], sr_tsne[:, 1], c=sr_color, s=0.5, alpha=0.6)
_ = axs[1].set_title("t-SNE Embedding of Swiss Roll")
plt.show()
sh_points, sh_color = datasets.make_swiss_roll(
n_samples=400, hole=True, random_state=10
)
fig = plt.figure(figsize=(8, 6))
ax = fig.add_subplot(111, projection="3d")
fig.add_axes(ax)
ax.scatter(
sh_points[:, 0], sh_points[:, 1], sh_points[:, 2], '.', c=sh_color, s=0.5, alpha=0.6
)
ax.set_title("Swiss-Hole in Ambient Space")
ax.view_init(azim=-66, elev=12)
_ = ax.text2D(0.8, 0.05, s="n_samples=1500", transform=ax.transAxes)
| [
"jonathan@jonathanhui.com"
] | jonathan@jonathanhui.com |
204379dedd5ca575eb34d4a3ecf6f7fb845eb022 | 80cfb5f42bc93433249bb2789be33208bb037679 | /build/lib/platin/language/__init__.py | 6e2e3196480267064d1af89f669431bd5d059f80 | [] | no_license | legibe/platin | b3960975c214b99d699fc3c8d5e94d473d173a8c | 430c4421ac2056c3041c1ac4b7f950c202334fb4 | refs/heads/master | 2020-12-24T21:28:15.740529 | 2016-05-13T00:29:53 | 2016-05-13T00:29:53 | 55,983,322 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 358 | py | #--------------------------------------------------------------------------------
# Copyright (c) 2013, MediaSift Ltd
# All rights reserved.
# Distribution of this software is strictly forbidden under the terms of this
# license.
#
# Author: Claude Gibert
#
#--------------------------------------------------------------------------------
import jsonreader
| [
"claudegibert@Bearwood.local"
] | claudegibert@Bearwood.local |
e630acfcebf47e142561828d46fe7d83a788efc0 | 2e682fd72e3feaa70e3f7bf2a3b83c50d783ec02 | /PyTorch/contrib/cv/detection/SOLOv1/tests/test_sampler.py | 8c6b401c06506371ea5f98f7873d093d3ee98610 | [
"LicenseRef-scancode-proprietary-license",
"BSD-2-Clause",
"Apache-2.0",
"MIT",
"BSD-3-Clause",
"LicenseRef-scancode-generic-cla",
"LicenseRef-scancode-unknown-license-reference",
"GPL-1.0-or-later"
] | permissive | Ascend/ModelZoo-PyTorch | 4c89414b9e2582cef9926d4670108a090c839d2d | 92acc188d3a0f634de58463b6676e70df83ef808 | refs/heads/master | 2023-07-19T12:40:00.512853 | 2023-07-17T02:48:18 | 2023-07-17T02:48:18 | 483,502,469 | 23 | 6 | Apache-2.0 | 2022-10-15T09:29:12 | 2022-04-20T04:11:18 | Python | UTF-8 | Python | false | false | 7,905 | py | # Copyright 2021 Huawei Technologies Co., Ltd
#
# Licensed under the BSD 3-Clause License (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://opensource.org/licenses/BSD-3-Clause
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
from mmdet.core import MaxIoUAssigner
from mmdet.core.bbox.samplers import OHEMSampler, RandomSampler
def test_random_sampler():
assigner = MaxIoUAssigner(
pos_iou_thr=0.5,
neg_iou_thr=0.5,
ignore_iof_thr=0.5,
ignore_wrt_candidates=False,
)
bboxes = torch.FloatTensor([
[0, 0, 10, 10],
[10, 10, 20, 20],
[5, 5, 15, 15],
[32, 32, 38, 42],
])
gt_bboxes = torch.FloatTensor([
[0, 0, 10, 9],
[0, 10, 10, 19],
])
gt_labels = torch.LongTensor([1, 2])
gt_bboxes_ignore = torch.Tensor([
[30, 30, 40, 40],
])
assign_result = assigner.assign(
bboxes,
gt_bboxes,
gt_bboxes_ignore=gt_bboxes_ignore,
gt_labels=gt_labels)
sampler = RandomSampler(
num=10, pos_fraction=0.5, neg_pos_ub=-1, add_gt_as_proposals=True)
sample_result = sampler.sample(assign_result, bboxes, gt_bboxes, gt_labels)
assert len(sample_result.pos_bboxes) == len(sample_result.pos_inds)
assert len(sample_result.neg_bboxes) == len(sample_result.neg_inds)
def test_random_sampler_empty_gt():
assigner = MaxIoUAssigner(
pos_iou_thr=0.5,
neg_iou_thr=0.5,
ignore_iof_thr=0.5,
ignore_wrt_candidates=False,
)
bboxes = torch.FloatTensor([
[0, 0, 10, 10],
[10, 10, 20, 20],
[5, 5, 15, 15],
[32, 32, 38, 42],
])
gt_bboxes = torch.empty(0, 4)
gt_labels = torch.empty(0, ).long()
assign_result = assigner.assign(bboxes, gt_bboxes, gt_labels=gt_labels)
sampler = RandomSampler(
num=10, pos_fraction=0.5, neg_pos_ub=-1, add_gt_as_proposals=True)
sample_result = sampler.sample(assign_result, bboxes, gt_bboxes, gt_labels)
assert len(sample_result.pos_bboxes) == len(sample_result.pos_inds)
assert len(sample_result.neg_bboxes) == len(sample_result.neg_inds)
def test_random_sampler_empty_pred():
assigner = MaxIoUAssigner(
pos_iou_thr=0.5,
neg_iou_thr=0.5,
ignore_iof_thr=0.5,
ignore_wrt_candidates=False,
)
bboxes = torch.empty(0, 4)
gt_bboxes = torch.FloatTensor([
[0, 0, 10, 9],
[0, 10, 10, 19],
])
gt_labels = torch.LongTensor([1, 2])
assign_result = assigner.assign(bboxes, gt_bboxes, gt_labels=gt_labels)
sampler = RandomSampler(
num=10, pos_fraction=0.5, neg_pos_ub=-1, add_gt_as_proposals=True)
sample_result = sampler.sample(assign_result, bboxes, gt_bboxes, gt_labels)
assert len(sample_result.pos_bboxes) == len(sample_result.pos_inds)
assert len(sample_result.neg_bboxes) == len(sample_result.neg_inds)
def _context_for_ohem():
try:
from test_forward import _get_detector_cfg
except ImportError:
# Hack: grab testing utils from test_forward to make a context for ohem
import sys
from os.path import dirname
sys.path.insert(0, dirname(__file__))
from test_forward import _get_detector_cfg
model, train_cfg, test_cfg = _get_detector_cfg(
'faster_rcnn_ohem_r50_fpn_1x.py')
model['pretrained'] = None
# torchvision roi align supports CPU
model['bbox_roi_extractor']['roi_layer']['use_torchvision'] = True
from mmdet.models import build_detector
context = build_detector(model, train_cfg=train_cfg, test_cfg=test_cfg)
return context
def test_ohem_sampler():
assigner = MaxIoUAssigner(
pos_iou_thr=0.5,
neg_iou_thr=0.5,
ignore_iof_thr=0.5,
ignore_wrt_candidates=False,
)
bboxes = torch.FloatTensor([
[0, 0, 10, 10],
[10, 10, 20, 20],
[5, 5, 15, 15],
[32, 32, 38, 42],
])
gt_bboxes = torch.FloatTensor([
[0, 0, 10, 9],
[0, 10, 10, 19],
])
gt_labels = torch.LongTensor([1, 2])
gt_bboxes_ignore = torch.Tensor([
[30, 30, 40, 40],
])
assign_result = assigner.assign(
bboxes,
gt_bboxes,
gt_bboxes_ignore=gt_bboxes_ignore,
gt_labels=gt_labels)
context = _context_for_ohem()
sampler = OHEMSampler(
num=10,
pos_fraction=0.5,
context=context,
neg_pos_ub=-1,
add_gt_as_proposals=True)
feats = [torch.rand(1, 256, int(2**i), int(2**i)) for i in [6, 5, 4, 3, 2]]
sample_result = sampler.sample(
assign_result, bboxes, gt_bboxes, gt_labels, feats=feats)
assert len(sample_result.pos_bboxes) == len(sample_result.pos_inds)
assert len(sample_result.neg_bboxes) == len(sample_result.neg_inds)
def test_ohem_sampler_empty_gt():
assigner = MaxIoUAssigner(
pos_iou_thr=0.5,
neg_iou_thr=0.5,
ignore_iof_thr=0.5,
ignore_wrt_candidates=False,
)
bboxes = torch.FloatTensor([
[0, 0, 10, 10],
[10, 10, 20, 20],
[5, 5, 15, 15],
[32, 32, 38, 42],
])
gt_bboxes = torch.empty(0, 4)
gt_labels = torch.LongTensor([])
gt_bboxes_ignore = torch.Tensor([])
assign_result = assigner.assign(
bboxes,
gt_bboxes,
gt_bboxes_ignore=gt_bboxes_ignore,
gt_labels=gt_labels)
context = _context_for_ohem()
sampler = OHEMSampler(
num=10,
pos_fraction=0.5,
context=context,
neg_pos_ub=-1,
add_gt_as_proposals=True)
feats = [torch.rand(1, 256, int(2**i), int(2**i)) for i in [6, 5, 4, 3, 2]]
sample_result = sampler.sample(
assign_result, bboxes, gt_bboxes, gt_labels, feats=feats)
assert len(sample_result.pos_bboxes) == len(sample_result.pos_inds)
assert len(sample_result.neg_bboxes) == len(sample_result.neg_inds)
def test_ohem_sampler_empty_pred():
assigner = MaxIoUAssigner(
pos_iou_thr=0.5,
neg_iou_thr=0.5,
ignore_iof_thr=0.5,
ignore_wrt_candidates=False,
)
bboxes = torch.empty(0, 4)
gt_bboxes = torch.FloatTensor([
[0, 0, 10, 10],
[10, 10, 20, 20],
[5, 5, 15, 15],
[32, 32, 38, 42],
])
gt_labels = torch.LongTensor([1, 2, 2, 3])
gt_bboxes_ignore = torch.Tensor([])
assign_result = assigner.assign(
bboxes,
gt_bboxes,
gt_bboxes_ignore=gt_bboxes_ignore,
gt_labels=gt_labels)
context = _context_for_ohem()
sampler = OHEMSampler(
num=10,
pos_fraction=0.5,
context=context,
neg_pos_ub=-1,
add_gt_as_proposals=True)
feats = [torch.rand(1, 256, int(2**i), int(2**i)) for i in [6, 5, 4, 3, 2]]
sample_result = sampler.sample(
assign_result, bboxes, gt_bboxes, gt_labels, feats=feats)
assert len(sample_result.pos_bboxes) == len(sample_result.pos_inds)
assert len(sample_result.neg_bboxes) == len(sample_result.neg_inds)
def test_random_sample_result():
from mmdet.core.bbox.samplers.sampling_result import SamplingResult
SamplingResult.random(num_gts=0, num_preds=0)
SamplingResult.random(num_gts=0, num_preds=3)
SamplingResult.random(num_gts=3, num_preds=3)
SamplingResult.random(num_gts=0, num_preds=3)
SamplingResult.random(num_gts=7, num_preds=7)
SamplingResult.random(num_gts=7, num_preds=64)
SamplingResult.random(num_gts=24, num_preds=3)
for i in range(3):
SamplingResult.random(rng=i)
| [
"wangjiangben@huawei.com"
] | wangjiangben@huawei.com |
fd7500964a5e092683adba5c3939e8b99221bfb9 | fa51b088ea761b78cf0c85837fabaa0b7035b105 | /compute/client_library/ingredients/instance-templates/create_from_instance.py | 1450cf02f213a99b15e8ff2976bf5fa433f50640 | [
"Apache-2.0"
] | permissive | manavgarg/python-docs-samples | f27307022092bc35358b8ddbd0f73d56787934d1 | 54b9cd6740b4dbc64db4d43a16de13c702b2364b | refs/heads/master | 2023-02-07T21:18:15.997414 | 2023-01-28T18:44:11 | 2023-01-28T18:44:11 | 245,290,674 | 0 | 0 | Apache-2.0 | 2020-03-05T23:44:17 | 2020-03-05T23:44:16 | null | UTF-8 | Python | false | false | 2,677 | py | # Copyright 2022 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# This is an ingredient file. It is not meant to be run directly. Check the samples/snippets
# folder for complete code samples that are ready to be used.
# Disabling flake8 for the ingredients file, as it would fail F821 - undefined name check.
# flake8: noqa
from google.cloud import compute_v1
# <INGREDIENT create_template_from_instance>
def create_template_from_instance(
project_id: str, instance: str, template_name: str
) -> compute_v1.InstanceTemplate:
"""
Create a new instance template based on an existing instance.
This new template specifies a different boot disk.
Args:
project_id: project ID or project number of the Cloud project you use.
instance: the instance to base the new template on. This value uses
the following format: "projects/{project}/zones/{zone}/instances/{instance_name}"
template_name: name of the new template to create.
Returns:
InstanceTemplate object that represents the new instance template.
"""
disk = compute_v1.DiskInstantiationConfig()
# Device name must match the name of a disk attached to the instance you are
# basing your template on.
disk.device_name = "disk-1"
# Replace the original boot disk image used in your instance with a Rocky Linux image.
disk.instantiate_from = "CUSTOM_IMAGE"
disk.custom_image = "projects/rocky-linux-cloud/global/images/family/rocky-linux-8"
# Override the auto_delete setting.
disk.auto_delete = True
template = compute_v1.InstanceTemplate()
template.name = template_name
template.source_instance = instance
template.source_instance_params = compute_v1.SourceInstanceParams()
template.source_instance_params.disk_configs = [disk]
template_client = compute_v1.InstanceTemplatesClient()
operation = template_client.insert(
project=project_id, instance_template_resource=template
)
wait_for_extended_operation(operation, "instance template creation")
return template_client.get(project=project_id, instance_template=template_name)
# </INGREDIENT>
| [
"71398022+dandhlee@users.noreply.github.com"
] | 71398022+dandhlee@users.noreply.github.com |
0027e82f0d6f818aca29541ec19d064e1b701138 | e57d7785276053332c633b57f6925c90ad660580 | /sdk/resourcehealth/azure-mgmt-resourcehealth/azure/mgmt/resourcehealth/v2015_01_01/aio/__init__.py | 0333edd9e09f5f821f552a2ee2cabccdf0696107 | [
"MIT",
"LicenseRef-scancode-generic-cla",
"LGPL-2.1-or-later"
] | permissive | adriananeci/azure-sdk-for-python | 0d560308497616a563b6afecbb494a88535da4c5 | b2bdfe659210998d6d479e73b133b6c51eb2c009 | refs/heads/main | 2023-08-18T11:12:21.271042 | 2021-09-10T18:48:44 | 2021-09-10T18:48:44 | 405,684,423 | 1 | 0 | MIT | 2021-09-12T15:51:51 | 2021-09-12T15:51:50 | null | UTF-8 | Python | false | false | 570 | py | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from ._microsoft_resource_health import MicrosoftResourceHealth
__all__ = ['MicrosoftResourceHealth']
| [
"noreply@github.com"
] | adriananeci.noreply@github.com |
037d7f026b3234c024d54712032323343ef39fe2 | ee53b0262007b2f0db0fe15b2ad85f65fafa4e25 | /Leetcode/1302. Deepest Leaves Sum.py | a2ec36969b746bee15fc4af758331ba0cc235878 | [] | no_license | xiaohuanlin/Algorithms | bd48caacb08295fc5756acdac609be78e143a760 | 157cbaeeff74130e5105e58a6b4cdf66403a8a6f | refs/heads/master | 2023-08-09T05:18:06.221485 | 2023-08-08T11:53:15 | 2023-08-08T11:53:15 | 131,491,056 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,056 | py | '''
Given the root of a binary tree, return the sum of values of its deepest leaves.
Example 1:
Input: root = [1,2,3,4,5,null,6,7,null,null,null,null,8]
Output: 15
Example 2:
Input: root = [6,7,8,2,7,1,3,9,null,1,4,null,null,null,5]
Output: 19
Constraints:
The number of nodes in the tree is in the range [1, 104].
1 <= Node.val <= 100
'''
import unittest
from typing import *
# Definition for a binary tree node.
# class TreeNode:
# def __init__(self, val=0, left=None, right=None):
# self.val = val
# self.left = left
# self.right = right
from collections import deque
class Solution:
def deepestLeavesSum(self, root: Optional[TreeNode]) -> int:
q = deque([root])
while q:
size = len(q)
sum_v = 0
for _ in range(size):
node = q.popleft()
sum_v += node.val
if node.left:
q.append(node.left)
if node.right:
q.append(node.right)
return sum_v
| [
"xiaohuanlin1993@gmail.com"
] | xiaohuanlin1993@gmail.com |
fcbdc53a51fb6af5dbdd65a63ece1bb714a0861f | 50f4d2bb1b1222bcb2eb0122c48a0dd254deddfc | /Solve Algorithm/Dicevolume.py | 539f6cb16e57da789adb0d754f9c9e082760acc6 | [] | no_license | yejinee/Algorithm | 9ae1c40382e9dcd868a28d42fe1cc543b790c7f5 | 81d409c4d0ea76cf152a5f334e53a870bc0656a7 | refs/heads/master | 2023-04-13T01:29:44.579635 | 2023-04-05T06:23:26 | 2023-04-05T06:23:26 | 235,014,475 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,070 | py | """
태민이는 주사위를 수집하는 취미를 가지고 있습니다.
주사위의 모양과 색깔은 각기 다르며, 크기 또한 다릅니다.
태민이는 지금까지 모은 N개의 주사위가 너무 난잡하게 보관해놓고 있어서
정리를 결심했습니다. 그래서 우선 N개의 주사위를 크기 순서대로 정리해보려고 마음 먹었습니다.
그렇게 주사위를 순서대로 정렬시켜보니 각 변의 길이가 1부터 N까지 모두 있는 것을 알게되었습니다.
이 사실이 매우 신기했던 태민이는 이 주사위들의 부피의 합은 어떻게 될지 궁금해졌습니다.
태민이가 현재 가지고 있는 모든 주사위의 부피의 합은 얼마일까요? 태민이의 궁금증을 풀어주세요!
"""
# SIGMA 1-N (N^3)
n = int(input())
#for문으로 구해보기 -> Time이 너무 오래 걸림
"""
sum=0
for i in range(1,n+1):
sum=sum+(i*i*i)
print(sum%1000000007)
"""
#SIGMA공식 사용하기
sum=(n*(n+1))/2 %1000000007
answer=(sum*sum) %1000000007
print("%d"%answer) | [
"kimyj9609@gmail.com"
] | kimyj9609@gmail.com |
0821ebaacd10da4c7d9f3837197682eb6f0d5430 | 1c61f90e32431a0bf813aa6ace88f7fa2627ee6f | /leadmanager/leads/serializers.py | 8e9d07756880747a16828975e4a37bf14d7cf81e | [] | no_license | WilliamOtieno/LeadManager_ReactDjango | 4c2b24072192c233ed9ab12521554272491f89bf | 209dc549651cebb7445f040bc38bdde275c67de9 | refs/heads/master | 2023-05-25T18:44:34.445294 | 2021-05-29T22:29:19 | 2021-05-29T22:29:19 | 371,958,224 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 181 | py | from .models import Lead
from rest_framework import serializers
class LeadSerializer(serializers.ModelSerializer):
class Meta:
model = Lead
fields = "__all__"
| [
"jimmywilliamotieno@gmail.com"
] | jimmywilliamotieno@gmail.com |
2b37db3d071a1c49eb259947fc325cbdc7cc9c67 | 3b7b6648b72910046b6a227db30f71aeee2cba9c | /2020-12-10-deepergooglenet/config/tiny_imagenet_config.py | 0741ab950f8790d2664a0b6bcd3f28c8091fa011 | [] | no_license | ken2190/deep-learning-study | f2abeb1cd302e405a15bbb52188ae44ffb414e2f | f2998be89d0c931176f158ae5f48ca562786e171 | refs/heads/main | 2023-04-02T05:07:08.504212 | 2021-04-11T15:11:22 | 2021-04-11T15:11:22 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,464 | py | import os
def path_join(*arr):
return os.path.sep.join([*arr])
def cd(path):
return os.system("cd {}".format(path))
def start(path):
os.system("start {}".format(path))
PROJECT_DIR = os.path.sep.join(os.path.realpath(__file__).split(os.path.sep)[:-2])
# DATASET_DIR = path_join("d:", "datasets", "tiny-imagenet-200")
# hdf5 conversion is much quicker if dataset lives in our SSD:
DATASET_DIR = path_join("C:", "Users", "user", "Repos", "Python", "DeepLearning",
"deep-learning-study", "datasets", "tiny-imagenet-200")
TRAIN_IMAGES_DIR = path_join(DATASET_DIR, "train")
VAL_IMAGES_DIR = path_join(DATASET_DIR, "val")
VAL_MAPPING_TXT = path_join(DATASET_DIR, "val", "val_annotations.txt")
WORDNET_IDS = path_join(DATASET_DIR, "wnids.txt")
WORD_LABELS = path_join(DATASET_DIR, "wnids.txt")
NUM_CLASSES = 200
NUM_TEST_IMAGES = 50 * NUM_CLASSES
TRAIN_HDF5_PATH = path_join(PROJECT_DIR, "hdf5", "train.hdf5")
VAL_HDF5_PATH = path_join(PROJECT_DIR, "hdf5", "val.hdf5")
TEST_HDF5_PATH = path_join(PROJECT_DIR, "hdf5", "test.hdf5")
DATASET_MEAN = path_join(PROJECT_DIR, "output", "tiny-image-net-200-mean.json")
MODEL_PATH = path_join(PROJECT_DIR, "output", "checkpoints", "model_epoch_70.hdf5")
CHECKPOINT_DIR = path_join(PROJECT_DIR, "output", "checkpoints")
FIG_PATH = path_join(PROJECT_DIR, "output", "deepergooglenet_tinyimagenet.png")
JSON_PATH = path_join(PROJECT_DIR, "output", "deepergooglenet_tinyimagenet.json")
| [
"machingclee@gmail.com"
] | machingclee@gmail.com |
1266c0ee1674e1b6a510166f34ca4890175e4235 | f487b2f8086fcf97311a61f79b7d01382b7c04b4 | /anomaly detection/cat.py | 8b94702fa88412761c7aa0857ad9122b27d1ccbb | [] | no_license | vinayakumarr/extreme-learning-machine-for-security | 7c6386accc4c663e50ef5844388cd805ec396f50 | c888e9d8cf64f623e810d783f1c49a7ce57ad4be | refs/heads/master | 2021-05-09T21:19:01.985634 | 2018-01-24T06:53:47 | 2018-01-24T06:53:47 | 118,724,306 | 7 | 4 | null | null | null | null | UTF-8 | Python | false | false | 353 | py | from __future__ import print_function
import pandas as pd
from keras.utils.np_utils import to_categorical
import numpy as np
print("Loading")
testlabel = pd.read_csv('data/corrected.csv', header=None)
Y1 = testlabel.iloc[:,0]
y_test1 = np.array(Y1)
y_test= to_categorical(y_test1)
np.savetxt('data/correctedonehot.csv', y_test, fmt='%01d')
| [
"noreply@github.com"
] | vinayakumarr.noreply@github.com |
eeb0149de58ec4a390b835b3bb2a4b3341edca3c | 78d35bb7876a3460d4398e1cb3554b06e36c720a | /sdk/eventgrid/azure-mgmt-eventgrid/azure/mgmt/eventgrid/aio/_event_grid_management_client.py | cda928965cc0af906415b9d662acd77e43c82655 | [
"MIT",
"LicenseRef-scancode-generic-cla",
"LGPL-2.1-or-later"
] | permissive | catchsrinivas/azure-sdk-for-python | e35f59b60318a31b3c940a7a3a07b61b28118aa5 | 596227a7738a5342274486e30489239d539b11d1 | refs/heads/main | 2023-08-27T09:08:07.986249 | 2021-11-11T11:13:35 | 2021-11-11T11:13:35 | 427,045,896 | 0 | 0 | MIT | 2021-11-11T15:14:31 | 2021-11-11T15:14:31 | null | UTF-8 | Python | false | false | 7,322 | py | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import Any, Optional, TYPE_CHECKING
from azure.core.pipeline.transport import AsyncHttpResponse, HttpRequest
from azure.mgmt.core import AsyncARMPipelineClient
from msrest import Deserializer, Serializer
if TYPE_CHECKING:
# pylint: disable=unused-import,ungrouped-imports
from azure.core.credentials_async import AsyncTokenCredential
from ._configuration import EventGridManagementClientConfiguration
from .operations import DomainsOperations
from .operations import DomainTopicsOperations
from .operations import EventSubscriptionsOperations
from .operations import SystemTopicEventSubscriptionsOperations
from .operations import Operations
from .operations import TopicsOperations
from .operations import PrivateEndpointConnectionsOperations
from .operations import PrivateLinkResourcesOperations
from .operations import SystemTopicsOperations
from .operations import ExtensionTopicsOperations
from .operations import TopicTypesOperations
from .. import models
class EventGridManagementClient(object):
"""Azure EventGrid Management Client.
:ivar domains: DomainsOperations operations
:vartype domains: azure.mgmt.eventgrid.aio.operations.DomainsOperations
:ivar domain_topics: DomainTopicsOperations operations
:vartype domain_topics: azure.mgmt.eventgrid.aio.operations.DomainTopicsOperations
:ivar event_subscriptions: EventSubscriptionsOperations operations
:vartype event_subscriptions: azure.mgmt.eventgrid.aio.operations.EventSubscriptionsOperations
:ivar system_topic_event_subscriptions: SystemTopicEventSubscriptionsOperations operations
:vartype system_topic_event_subscriptions: azure.mgmt.eventgrid.aio.operations.SystemTopicEventSubscriptionsOperations
:ivar operations: Operations operations
:vartype operations: azure.mgmt.eventgrid.aio.operations.Operations
:ivar topics: TopicsOperations operations
:vartype topics: azure.mgmt.eventgrid.aio.operations.TopicsOperations
:ivar private_endpoint_connections: PrivateEndpointConnectionsOperations operations
:vartype private_endpoint_connections: azure.mgmt.eventgrid.aio.operations.PrivateEndpointConnectionsOperations
:ivar private_link_resources: PrivateLinkResourcesOperations operations
:vartype private_link_resources: azure.mgmt.eventgrid.aio.operations.PrivateLinkResourcesOperations
:ivar system_topics: SystemTopicsOperations operations
:vartype system_topics: azure.mgmt.eventgrid.aio.operations.SystemTopicsOperations
:ivar extension_topics: ExtensionTopicsOperations operations
:vartype extension_topics: azure.mgmt.eventgrid.aio.operations.ExtensionTopicsOperations
:ivar topic_types: TopicTypesOperations operations
:vartype topic_types: azure.mgmt.eventgrid.aio.operations.TopicTypesOperations
:param credential: Credential needed for the client to connect to Azure.
:type credential: ~azure.core.credentials_async.AsyncTokenCredential
:param subscription_id: Subscription credentials that uniquely identify a Microsoft Azure subscription. The subscription ID forms part of the URI for every service call.
:type subscription_id: str
:param str base_url: Service URL
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
"""
def __init__(
self,
credential: "AsyncTokenCredential",
subscription_id: str,
base_url: Optional[str] = None,
**kwargs: Any
) -> None:
if not base_url:
base_url = 'https://management.azure.com'
self._config = EventGridManagementClientConfiguration(credential, subscription_id, **kwargs)
self._client = AsyncARMPipelineClient(base_url=base_url, config=self._config, **kwargs)
client_models = {k: v for k, v in models.__dict__.items() if isinstance(v, type)}
self._serialize = Serializer(client_models)
self._serialize.client_side_validation = False
self._deserialize = Deserializer(client_models)
self.domains = DomainsOperations(
self._client, self._config, self._serialize, self._deserialize)
self.domain_topics = DomainTopicsOperations(
self._client, self._config, self._serialize, self._deserialize)
self.event_subscriptions = EventSubscriptionsOperations(
self._client, self._config, self._serialize, self._deserialize)
self.system_topic_event_subscriptions = SystemTopicEventSubscriptionsOperations(
self._client, self._config, self._serialize, self._deserialize)
self.operations = Operations(
self._client, self._config, self._serialize, self._deserialize)
self.topics = TopicsOperations(
self._client, self._config, self._serialize, self._deserialize)
self.private_endpoint_connections = PrivateEndpointConnectionsOperations(
self._client, self._config, self._serialize, self._deserialize)
self.private_link_resources = PrivateLinkResourcesOperations(
self._client, self._config, self._serialize, self._deserialize)
self.system_topics = SystemTopicsOperations(
self._client, self._config, self._serialize, self._deserialize)
self.extension_topics = ExtensionTopicsOperations(
self._client, self._config, self._serialize, self._deserialize)
self.topic_types = TopicTypesOperations(
self._client, self._config, self._serialize, self._deserialize)
async def _send_request(self, http_request: HttpRequest, **kwargs: Any) -> AsyncHttpResponse:
"""Runs the network request through the client's chained policies.
:param http_request: The network request you want to make. Required.
:type http_request: ~azure.core.pipeline.transport.HttpRequest
:keyword bool stream: Whether the response payload will be streamed. Defaults to True.
:return: The response of your network call. Does not do error handling on your response.
:rtype: ~azure.core.pipeline.transport.AsyncHttpResponse
"""
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
http_request.url = self._client.format_url(http_request.url, **path_format_arguments)
stream = kwargs.pop("stream", True)
pipeline_response = await self._client._pipeline.run(http_request, stream=stream, **kwargs)
return pipeline_response.http_response
async def close(self) -> None:
await self._client.close()
async def __aenter__(self) -> "EventGridManagementClient":
await self._client.__aenter__()
return self
async def __aexit__(self, *exc_details) -> None:
await self._client.__aexit__(*exc_details)
| [
"noreply@github.com"
] | catchsrinivas.noreply@github.com |
c43915e05bf79a467cc796b6ded3f746d250c066 | 2bf75b04b6a80d17a40170fb5e3e6998dc58981b | /l2tdevtools/build_helpers/source.py | ba428d24dc6eb092383430c66a999b946efd81dd | [
"Apache-2.0"
] | permissive | kiddinn/l2tdevtools | 53534a3c3f9d2fb3b6c585611daf0fc5a336d84c | 0e55b449a5fc19dbab68980df47d9073b7f0618c | refs/heads/master | 2020-03-25T19:07:15.219418 | 2018-11-26T05:26:09 | 2018-11-26T05:26:09 | 144,065,721 | 0 | 0 | Apache-2.0 | 2018-08-08T20:45:29 | 2018-08-08T20:45:29 | null | UTF-8 | Python | false | false | 3,309 | py | # -*- coding: utf-8 -*-
"""Helper for building projects from source."""
from __future__ import unicode_literals
import logging
import os
import subprocess
import sys
from l2tdevtools.build_helpers import interface
class SourceBuildHelper(interface.BuildHelper):
"""Helper to build projects from source."""
class ConfigureMakeSourceBuildHelper(SourceBuildHelper):
"""Helper to build projects from source using configure and make."""
def Build(self, source_helper_object):
"""Builds the source.
Args:
source_helper_object (SourceHelper): source helper.
Returns:
bool: True if successful, False otherwise.
"""
source_filename = source_helper_object.Download()
if not source_filename:
logging.info('Download of: {0:s} failed'.format(
source_helper_object.project_name))
return False
source_directory = source_helper_object.Create()
if not source_directory:
logging.error(
'Extraction of source package: {0:s} failed'.format(source_filename))
return False
logging.info('Building source of: {0:s}'.format(source_filename))
if self._project_definition.patches:
# TODO: add self._ApplyPatches
pass
log_file_path = os.path.join('..', self.LOG_FILENAME)
command = './configure > {0:s} 2>&1'.format(log_file_path)
exit_code = subprocess.call('(cd {0:s} && {1:s})'.format(
source_directory, command), shell=True)
if exit_code != 0:
logging.error('Running: "{0:s}" failed.'.format(command))
return False
command = 'make >> {0:s} 2>&1'.format(log_file_path)
exit_code = subprocess.call('(cd {0:s} && {1:s})'.format(
source_directory, command), shell=True)
if exit_code != 0:
logging.error('Running: "{0:s}" failed.'.format(command))
return False
return True
def Clean(self, unused_source_helper_object):
"""Cleans the source.
Args:
source_helper_object (SourceHelper): source helper.
"""
# TODO: implement.
return
class SetupPySourceBuildHelper(SourceBuildHelper):
"""Helper to build projects from source using setup.py."""
def Build(self, source_helper_object):
"""Builds the source.
Args:
source_helper_object (SourceHelper): source helper.
Returns:
bool: True if successful, False otherwise.
"""
source_filename = source_helper_object.Download()
if not source_filename:
logging.info('Download of: {0:s} failed'.format(
source_helper_object.project_name))
return False
source_directory = source_helper_object.Create()
if not source_directory:
logging.error(
'Extraction of source package: {0:s} failed'.format(source_filename))
return False
logging.info('Building source of: {0:s}'.format(source_filename))
if self._project_definition.patches:
# TODO: add self._ApplyPatches
pass
log_file_path = os.path.join('..', self.LOG_FILENAME)
command = '{0:s} setup.py build > {1:s} 2>&1'.format(
sys.executable, log_file_path)
exit_code = subprocess.call('(cd {0:s} && {1:s})'.format(
source_directory, command), shell=True)
if exit_code != 0:
logging.error('Running: "{0:s}" failed.'.format(command))
return False
return True
| [
"joachim.metz@gmail.com"
] | joachim.metz@gmail.com |
a91d27053468eaf6f98de68ee62bcfe05d192e2b | 3cda2dc11e1b7b96641f61a77b3afde4b93ac43f | /nni/nas/evaluator/__init__.py | 5c14415483187674a4095a7419b65bde73c68a95 | [
"MIT"
] | permissive | Eurus-Holmes/nni | 6da51c352e721f0241c7fd26fa70a8d7c99ef537 | b84d25bec15ece54bf1703b1acb15d9f8919f656 | refs/heads/master | 2023-08-23T10:45:54.879054 | 2023-08-07T02:39:54 | 2023-08-07T02:39:54 | 163,079,164 | 3 | 2 | MIT | 2023-08-07T12:35:54 | 2018-12-25T12:04:16 | Python | UTF-8 | Python | false | false | 250 | py | # Copyright (c) Microsoft Corporation.
# Licensed under the MIT license.
from nni.common.framework import shortcut_framework
from .evaluator import *
from .functional import FunctionalEvaluator
shortcut_framework(__name__)
del shortcut_framework
| [
"noreply@github.com"
] | Eurus-Holmes.noreply@github.com |
7050101a01ba0b7f9d3b8222b5590e6ce9bb1653 | f2575444e57696b83ce6dcec40ad515b56a1b3a9 | /Python/Introduction/WriteAFunction.py | 308e029c6747ab642c896a925f68087dcd10765e | [] | no_license | abhi10010/Hackerrank-Solutions | 046487d79fc5bf84b4df5ef2117578d29cb19243 | da2a57b8ebfcc330d94d104c1755b8c62a9e3e65 | refs/heads/master | 2021-07-24T09:41:49.995295 | 2020-07-12T09:31:58 | 2020-07-12T09:31:58 | 195,647,097 | 4 | 0 | null | null | null | null | UTF-8 | Python | false | false | 163 | py | def is_leap(year):
leap = False
if (year%4==0 and year%100!=0):
leap = True
if year%400==0:
leap = True
return leap
| [
"noreply@github.com"
] | abhi10010.noreply@github.com |
0eba26bcfcd9f5e9a4db59f3d9390ad11e96df73 | b92c73ac2fca8a1f16388cd553dafa0f167bda93 | /Unit 1/linear search.py | 26e1396a7e0eb697b0dee26b5592835914ced8f4 | [] | no_license | DamoM73/Digital-Solutions-old | deb8d0fd7c256113fd7fad56b4658896de9f1cba | 750b76d847e1d1c1661c3f1bbf7d56a72666f094 | refs/heads/master | 2023-08-16T05:35:03.641402 | 2021-10-05T02:36:34 | 2021-10-05T02:36:34 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 331 | py | find = 10
found = False
number_list = [3,5,2,9,6,1,8,7]
length = len(number_list)
counter = 0
while found == False and counter < length:
if number_list[counter] == find:
found = True
print("Found at position", counter)
else:
counter = counter + 1
if found == False:
print("Number not in list") | [
"damomurtagh@gmail.com"
] | damomurtagh@gmail.com |
b5e24e6323440a416a551612ebd2d5789e7abce6 | f99a83f3d538a121184de88bff19ce396be6e3d5 | /stayclean-2022-november/checkin.py | 0035f9707815a1c2e971d997cf08e9fb7dca0582 | [
"MIT"
] | permissive | foobarbazblarg/stayclean | c38deddd971b795af58ae389b9e65914dea08d2d | 384a8261b1164797d6726166a6e40f323e5bd6dd | refs/heads/master | 2023-02-21T09:48:57.907540 | 2023-01-02T15:32:35 | 2023-01-02T15:32:35 | 45,186,602 | 1 | 0 | MIT | 2023-02-16T03:49:00 | 2015-10-29T13:59:33 | Python | UTF-8 | Python | false | false | 491 | py | #!/usr/bin/env python3
import sys
from participantCollection import ParticipantCollection
if __name__ == "__main__":
names = sys.argv[1::]
participants = ParticipantCollection()
for name in names:
if participants.hasParticipantNamed(name):
participants.participantNamed(name).hasCheckedIn = True
print(f"just checked in {name}")
else:
print(f"*** WARNING: {name} is not present in participants.txt")
participants.save()
| [
"foobarbazblarg@gmail.com"
] | foobarbazblarg@gmail.com |
2a7f0cc94cfa60d0fbb656abc845e8140d33fd55 | 3f15b2aac6cc0d9d8c85174a85aa2e7130f50fe1 | /memo.py | 8bf98c8737e0e42e3496f87c0e1237c2a8af72ea | [] | no_license | dfoderick/bitshovel-memo | 811bb9cb1ba961bb00a5f7c31ec901e4d9576d04 | 138019988a637a56a3f0bbdc1db9808084902118 | refs/heads/master | 2020-04-14T20:58:16.248551 | 2019-01-07T06:35:15 | 2019-01-07T06:35:15 | 164,112,814 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,680 | py | #naive implement memo.cash op_return protocol
#see https://memo.cash/protocol
#example: redis-cli publish memo.send <action> <value>
#redis-cli publish memo.send "Hello from BitShovel"
import sys
import redis
RED = redis.Redis(host="127.0.0.1", port="6379")
BUS = RED.pubsub()
def main():
#listen for anyone who wants to send to memo
memo_send = BUS.subscribe("memo.send")
for event in BUS.listen():
process_event(event)
def process_event(event):
try:
if event["type"] == "message" :
process_message(event["data"])
if event["type"] == "subscribe":
print('subscribed to {}'.format(event["channel"]))
except Exception as ex:
print(ex)
def process_message(data):
remainder = data
#default action is to post the data
prefix = "0x6d02"
command = "post"
if " " in data:
parsed = data.split(' ',1)
command = parsed[0].lower()
if len(parsed) > 1:
remainder = parsed[1]
if command == "setname":
prefix = "0x6d01"
elif command == "post":
prefix = "0x6d02"
elif command == "posttopic":
prefix = "0x6d0c"
else:
#no matches so dont eat the first word
remainder = data
send(command, prefix, remainder)
def send(command, prefix, remainder):
op_stuff = '{0} "{1}"'.format(prefix, remainder).lstrip()
RED.publish("bitshovel.send", op_stuff)
print('Send to BitShovel > {}'.format(op_stuff))
if __name__ == '__main__':
try:
main()
except KeyboardInterrupt:
print('Shutting down...')
BUS.unsubscribe()
sys.exit()
| [
"dfoderick@gmail.com"
] | dfoderick@gmail.com |
40ba102f4f4aa53184eb28b5f4f40e5b7c9db825 | 63efeff58299f3ca66c7be0aa80d636ade379ebf | /2019/july/shape_test.py | fca26e372a82f7855aa73e817427a95a6685dd4b | [] | no_license | gosch/Katas-in-python | 0eb6bafe2d6d42dac64c644c2fd48f90bdcef22b | f89ee2accdde75222fa1e4e0ca8b4f8e27b7b760 | refs/heads/master | 2021-07-24T23:50:26.268217 | 2020-04-14T23:53:15 | 2020-04-14T23:53:15 | 137,545,678 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 661 | py | # importing pandas module
import pandas as pd
import math
# making data frame
from pandocfilters import Math
data = pd.read_csv("https://cdncontribute.geeksforgeeks.org/wp-content/uploads/nba.csv")
# dataframe.size
size = data.size
# dataframe.shape
shape = data.shape
# dataframe.ndim
df_ndim = data.ndim
# series.ndim
series_ndim = data["Salary"].ndim
# printing size and shape
# print("Size = {}\nShape ={}\nShape[0] x Shape[1] = {}".
# format(size, shape, shape[0] * shape[1]))
df['V']=df['Z7'] -[math.exp(x]) for x in df['VFACT']]
# printing ndim
# print("ndim of dataframe = {}\nndim of series ={}".
# format(df_ndim, series_ndim)) | [
"francisco.gosch@ge.com"
] | francisco.gosch@ge.com |
5734512409b516a061b0050f422b34b7ca2d1f69 | d185832a16690d4f8a84311e5083f95541d9105c | /tracker/model/cvegrouppackage.py | e83aeeb1d96482bef0dfa222dea89e049fa0ff1d | [
"MIT"
] | permissive | dukebarman/arch-security-tracker | 5eaf074215f567e11b128e5c9f58c5f1f6e91edc | 569efa7c6b509c96339baa151c82a7398eb79743 | refs/heads/master | 2020-04-17T13:15:00.724114 | 2019-01-24T10:41:08 | 2019-01-24T10:41:08 | 166,607,334 | 0 | 0 | MIT | 2019-01-24T10:41:09 | 2019-01-20T00:17:37 | Python | UTF-8 | Python | false | false | 656 | py | from tracker import db
class CVEGroupPackage(db.Model):
id = db.Column(db.Integer(), index=True, unique=True, primary_key=True, autoincrement=True)
group_id = db.Column(db.Integer(), db.ForeignKey('cve_group.id', ondelete="CASCADE"), nullable=False)
pkgname = db.Column(db.String(64), nullable=False)
group = db.relationship("CVEGroup", back_populates="packages")
__tablename__ = 'cve_group_package'
__table_args__ = (db.Index('cve_group_package__group_pkgname_idx', group_id, pkgname, unique=True),)
def __repr__(self):
return '<CVEGroupPackage %r from %r referencing %r>' % (self.id, self.group_id, self.pkgname)
| [
"levente@leventepolyak.net"
] | levente@leventepolyak.net |
51ab28d92dbcae0a65a1fae287c6b9e11d8a3168 | f2b172f7c1dcf0ac28fe7465b5844b48facade18 | /12/1208/capitals.py | a134c5e6f06169f662925732217f9bc809346ab1 | [] | no_license | 0gravity000/IntroducingPython | 2fde12485d0597e72a7da801a08d5048a47f2ff5 | 5d3281dbe37ed1a08d71cb6a36841781f9ac0ccf | refs/heads/master | 2023-07-19T02:53:23.081806 | 2021-09-30T01:51:44 | 2021-09-30T01:51:44 | 403,935,207 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 382 | py | def process_cities(filename):
with open(filename, 'rt') as file:
for line in file:
line = line.strip()
if 'quit' == line.lower():
return
country, city = line.split(',')
city = city.strip()
country = country.strip()
print(city.title(), country.title(), sep=',')
if __name__ == '__main__':
import sys
process_cities(sys.argv[1])
| [
"0gravity000@gmail.com"
] | 0gravity000@gmail.com |
321c8151d99bfc35e1562fe05644e8a08ecc1a87 | 881041fab1b4d05f1c5371efed2f9276037eb609 | /tasks/nyc-permitted-event-information/depositor.py | 476281f4fd7af0a89bd394682d4f9c09cebab620 | [] | no_license | ResidentMario/urban-physiology-nyc-catalog | b568f3b6ee1a887a50c4df23c488f50c92e30625 | cefbc799f898f6cdf24d0a0ef6c9cd13c76fb05c | refs/heads/master | 2021-01-02T22:43:09.073952 | 2017-08-06T18:27:22 | 2017-08-06T18:27:22 | 99,377,500 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 372 | py | import requests
r = requests.get("https://data.cityofnewyork.us/api/views/tvpp-9vvx/rows.csv?accessType=DOWNLOAD")
with open("/home/alex/Desktop/urban-physiology-nyc-catalog/catalog/nyc-permitted-event-information/data.csv", "wb") as f:
f.write(r.content)
outputs = ["/home/alex/Desktop/urban-physiology-nyc-catalog/catalog/nyc-permitted-event-information/data.csv"]
| [
"aleksey.bilogur@gmail.com"
] | aleksey.bilogur@gmail.com |
8da7371fc0333a1219c82545e88fc239fe9137f0 | 0e1e643e864bcb96cf06f14f4cb559b034e114d0 | /Exps_7_v3/doc3d/I_to_M_Gk3_no_pad/wiColorJ/pyr_Tcrop255_p60_j15/Add2Loss/Sob_k21_s001_Bce_s001/pyr_1s/L3/step10_a.py | 526d45d6ec62d423f84189f8d28903e1c483f9a1 | [] | no_license | KongBOy/kong_model2 | 33a94a9d2be5b0f28f9d479b3744e1d0e0ebd307 | 1af20b168ffccf0d5293a393a40a9fa9519410b2 | refs/heads/master | 2022-10-14T03:09:22.543998 | 2022-10-06T11:33:42 | 2022-10-06T11:33:42 | 242,080,692 | 3 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,320 | py | #############################################################################################################################################################################################################
#############################################################################################################################################################################################################
### 把 kong_model2 加入 sys.path
import os
code_exe_path = os.path.realpath(__file__) ### 目前執行 step10_b.py 的 path
code_exe_path_element = code_exe_path.split("\\") ### 把 path 切分 等等 要找出 kong_model 在第幾層
code_dir = "\\".join(code_exe_path_element[:-1])
kong_layer = code_exe_path_element.index("kong_model2") ### 找出 kong_model2 在第幾層
kong_model2_dir = "\\".join(code_exe_path_element[:kong_layer + 1]) ### 定位出 kong_model2 的 dir
import sys ### 把 kong_model2 加入 sys.path
sys.path.append(kong_model2_dir)
sys.path.append(code_dir)
# print(__file__.split("\\")[-1])
# print(" code_exe_path:", code_exe_path)
# print(" code_exe_path_element:", code_exe_path_element)
# print(" code_dir:", code_dir)
# print(" kong_layer:", kong_layer)
# print(" kong_model2_dir:", kong_model2_dir)
#############################################################################################################################################################################################################
kong_to_py_layer = len(code_exe_path_element) - 1 - kong_layer ### 中間 -1 是為了長度轉index
# print(" kong_to_py_layer:", kong_to_py_layer)
if (kong_to_py_layer == 0): template_dir = ""
elif(kong_to_py_layer == 2): template_dir = code_exe_path_element[kong_layer + 1][0:] ### [7:] 是為了去掉 step1x_, 後來覺得好像改有意義的名字不去掉也行所以 改 0
elif(kong_to_py_layer == 3): template_dir = code_exe_path_element[kong_layer + 1][0:] + "/" + code_exe_path_element[kong_layer + 2][0:] ### [5:] 是為了去掉 mask_ ,前面的 mask_ 是為了python 的 module 不能 數字開頭, 隨便加的這樣子, 後來覺得 自動排的順序也可以接受, 所以 改0
elif(kong_to_py_layer > 3): template_dir = code_exe_path_element[kong_layer + 1][0:] + "/" + code_exe_path_element[kong_layer + 2][0:] + "/" + "/".join(code_exe_path_element[kong_layer + 3: -1])
# print(" template_dir:", template_dir) ### 舉例: template_dir: 7_mask_unet/5_os_book_and_paper_have_dtd_hdr_mix_bg_tv_s04_mae
#############################################################################################################################################################################################################
exp_dir = template_dir
#############################################################################################################################################################################################################
from step06_a_datas_obj import *
from step09_1side_L3 import *
from step10_a2_loss_info_obj import *
from step10_b2_exp_builder import Exp_builder
rm_paths = [path for path in sys.path if code_dir in path]
for rm_path in rm_paths: sys.path.remove(rm_path)
rm_moduless = [module for module in sys.modules if "step09" in module]
for rm_module in rm_moduless: del sys.modules[rm_module]
#############################################################################################################################################################################################################
'''
exp_dir 是 決定 result_dir 的 "上一層"資料夾 名字喔! exp_dir要巢狀也沒問題~
比如:exp_dir = "6_mask_unet/自己命的名字",那 result_dir 就都在:
6_mask_unet/自己命的名字/result_a
6_mask_unet/自己命的名字/result_b
6_mask_unet/自己命的名字/...
'''
use_db_obj = type8_blender_kong_doc3d_in_I_gt_MC
use_loss_obj = [G_bce_s001_sobel_k21_s001_loss_info_builder.set_loss_target("UNet_Mask").copy()] ### z, y, x 順序是看 step07_b_0b_Multi_UNet 來對應的喔
#############################################################
### 為了resul_analyze畫空白的圖,建一個empty的 Exp_builder
empty = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_1, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end=ch032_pyramid_1side_1.kong_model.model_describe) .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900, it_save_fq=900 * 2, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_result_name(result_name="為了resul_analyze畫空白的圖,建一個empty的 Exp_builder")
#############################################################
ch032_1side_1 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_1, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end=ch032_pyramid_1side_1.kong_model.model_describe) .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900, it_save_fq=900 * 2, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_result_name(result_name="")
ch032_1side_2 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_2, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end=ch032_pyramid_1side_2.kong_model.model_describe) .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900, it_save_fq=900 * 2, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_result_name(result_name="")
ch032_1side_3 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_3, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end=ch032_pyramid_1side_3.kong_model.model_describe) .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900, it_save_fq=900 * 2, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_result_name(result_name="")
ch032_1side_4 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_4, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end=ch032_pyramid_1side_4.kong_model.model_describe) .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900, it_save_fq=900 * 2, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_result_name(result_name="")
#############################################################
if(__name__ == "__main__"):
print("build exps cost time:", time.time() - start_time)
if len(sys.argv) < 2:
############################################################################################################
### 直接按 F5 或打 python step10_b1_exp_obj_load_and_train_and_test.py,後面沒有接東西喔!才不會跑到下面給 step10_b_subprocss.py 用的程式碼~~~
ch032_1side_1.build().run()
# print('no argument')
sys.exit()
### 以下是給 step10_b_subprocess.py 用的,相當於cmd打 python step10_b1_exp_obj_load_and_train_and_test.py 某個exp.build().run()
eval(sys.argv[1])
| [
"s89334roy@yahoo.com.tw"
] | s89334roy@yahoo.com.tw |
bcd6a3fac9732cbc1890b685ff96be4072b34044 | 163bbb4e0920dedd5941e3edfb2d8706ba75627d | /Code/CodeRecords/2064/60769/243881.py | 6745299c74805287f773ea018a445a74748503e3 | [] | no_license | AdamZhouSE/pythonHomework | a25c120b03a158d60aaa9fdc5fb203b1bb377a19 | ffc5606817a666aa6241cfab27364326f5c066ff | refs/heads/master | 2022-11-24T08:05:22.122011 | 2020-07-28T16:21:24 | 2020-07-28T16:21:24 | 259,576,640 | 2 | 1 | null | null | null | null | UTF-8 | Python | false | false | 478 | py | a = input()
dict = {"IV": 4, "IX": 9, "XL": 40, "XC": 90, "CD": 400, "CM": 900,
"I": 1, "V": 5, "X": 10, "L": 50, "C": 100, "D": 500, "M": 1000}
res = 0
jmp = False
for i in range(len(a)):
if jmp:
jmp = False
continue
if i < len(a) - 1:
if dict[a[i]] >= dict[a[i + 1]]:
res += dict[a[i]]
else:
res += dict[a[i:i + 2]]
i += 1
jmp = True
else:
res += dict[a[i]]
print(res) | [
"1069583789@qq.com"
] | 1069583789@qq.com |
bd0a17a144b476d54d6ec4c36018bac20e5578a4 | 08cb6c716f24ad0e5c3fe8fb2c292e81b63fc518 | /python/problem17a.py | 5843761bced645fe35cd1765502fc11ee322c31b | [
"MIT"
] | permissive | amyreese/euler | 170ada3909500cdc1e3394406b57529d9d5fe839 | 0e2a809620cb02367120c0fbfbf9b419edd42c6e | refs/heads/master | 2022-09-01T16:53:13.423566 | 2015-12-30T05:30:04 | 2015-12-30T05:30:04 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,333 | py |
onetonine = len("onetwothreefourfivesixseveneightnine")
onetoten = onetonine + len("ten")
eleventotwenty = len("eleventwelvethirteenfourteenfifteensixteenseventeeneighteennineteen")
twenties = len("twenty")*10 + onetonine
thirties = len("thirty")*10 + onetonine
forties = len("forty")*10 + onetonine
fifties = len("fifty")*10 + onetonine
sixties = len("sixty")*10 + onetonine
seventies = len("seventy")*10 + onetonine
eighties = len("eighty")*10 + onetonine
nineties = len("ninety")*10 + onetonine
hundred = onetoten + eleventotwenty + twenties + thirties + forties + fifties + sixties + seventies + eighties + nineties
onehundreds = len("onehundredand") * 100 - 3 + hundred
twohundreds = len("twohundredand") * 100 - 3 + hundred
threehundreds = len("threehundredand") * 100 - 3 + hundred
fourhundreds = len("fourhundredand") * 100 - 3 + hundred
fivehundreds = len("fivehundredand") * 100 - 3 + hundred
sixhundreds = len("sixhundredand") * 100 - 3 + hundred
sevenhundreds = len("sevenhundredand") * 100 - 3 + hundred
eighthundreds = len("eighthundredand") * 100 - 3 + hundred
ninehundreds = len("ninehundredand") * 100 - 3 + hundred
thousands = len("onethousand") + ninehundreds + eighthundreds + sevenhundreds +\
sixhundreds + fivehundreds + fourhundreds + threehundreds + twohundreds +\
onehundreds + hundred
print thousands
| [
"john@noswap.com"
] | john@noswap.com |
dec6b3e4df26199d84d883efb11adff47ef6e9cb | df60bc5a9c27b54b95568a9f04102785c7bc12c2 | /samples/resnet-cmle/resnet-train-pipeline.py | b709051693e09c6c968564be030ee8dbd2486eb4 | [
"Apache-2.0"
] | permissive | Anthonymcqueen21/pipelines | 1d9d0ae20ebd43b590b88d3f9cbb398f21488be4 | f0c8432748e5c013451dcac8e1ee3aee0bb415cc | refs/heads/master | 2020-06-14T09:45:10.246821 | 2019-07-03T00:11:13 | 2019-07-03T00:11:13 | 194,972,318 | 1 | 0 | Apache-2.0 | 2019-07-03T03:22:36 | 2019-07-03T03:22:36 | null | UTF-8 | Python | false | false | 5,903 | py | #!/usr/bin/env python3
# Copyright 2019 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import kfp.dsl as dsl
import kfp.gcp as gcp
import kfp.components as comp
import datetime
import json
import os
dataflow_python_op = comp.load_component_from_url(
'https://raw.githubusercontent.com/kubeflow/pipelines/2e52e54166795d20e92d287bde7b800b181eda02/components/gcp/dataflow/launch_python/component.yaml')
cloudml_train_op = comp.load_component_from_url(
'https://raw.githubusercontent.com/kubeflow/pipelines/2e52e54166795d20e92d287bde7b800b181eda02/components/gcp/ml_engine/train/component.yaml')
cloudml_deploy_op = comp.load_component_from_url(
'https://raw.githubusercontent.com/kubeflow/pipelines/2e52e54166795d20e92d287bde7b800b181eda02/components/gcp/ml_engine/deploy/component.yaml')
def resnet_preprocess_op(project_id: 'GcpProject', output: 'GcsUri', staging_dir: 'GcsUri', train_csv: 'GcsUri[text/csv]',
validation_csv: 'GcsUri[text/csv]', labels, train_size: 'Integer', validation_size: 'Integer',
step_name='preprocess'):
return dataflow_python_op(
python_file_path='gs://ml-pipeline-playground/samples/ml_engine/resnet-cmle/preprocess/preprocess.py',
project_id=project_id,
requirements_file_path='gs://ml-pipeline-playground/samples/ml_engine/resnet-cmle/preprocess/requirements.txt',
staging_dir=staging_dir,
args=json.dumps([
'--train_csv', str(train_csv),
'--validation_csv', str(validation_csv),
'--labels', str(labels),
'--output_dir', str(output),
'--train_size', str(train_size),
'--validation_size', str(validation_size)
])
)
def resnet_train_op(project_id, data_dir, output: 'GcsUri', region: 'GcpRegion', depth: int, train_batch_size: int,
eval_batch_size: int, steps_per_eval: int, train_steps: int, num_train_images: int,
num_eval_images: int, num_label_classes: int, tf_version, step_name='train'):
return cloudml_train_op(
project_id=project_id,
region='us-central1',
python_module='trainer.resnet_main',
package_uris=json.dumps(
['gs://ml-pipeline-playground/samples/ml_engine/resnet-cmle/trainer/trainer-1.0.tar.gz']),
job_dir=output,
args=json.dumps([
'--data_dir', str(data_dir),
'--model_dir', str(output),
'--use_tpu', 'True',
'--resnet_depth', str(depth),
'--train_batch_size', str(train_batch_size),
'--eval_batch_size', str(eval_batch_size),
'--steps_per_eval', str(steps_per_eval),
'--train_steps', str(train_steps),
'--num_train_images', str(num_train_images),
'--num_eval_images', str(num_eval_images),
'--num_label_classes', str(num_label_classes),
'--export_dir', '{}/export'.format(str(output))
]),
runtime_version=tf_version,
training_input=json.dumps({
'scaleTier': 'BASIC_TPU'
})
)
def resnet_deploy_op(model_dir, model, version, project_id: 'GcpProject', region: 'GcpRegion',
tf_version, step_name='deploy'):
# TODO(hongyes): add region to model payload.
return cloudml_deploy_op(
model_uri=model_dir,
project_id=project_id,
model_id=model,
version_id=version,
runtime_version=tf_version,
replace_existing_version='True'
)
@dsl.pipeline(
name='ResNet_Train_Pipeline',
description='Demonstrate the ResNet50 predict.'
)
def resnet_train(
project_id,
output,
region='us-central1',
model='bolts',
version='beta1',
tf_version='1.12',
train_csv='gs://bolts_image_dataset/bolt_images_train.csv',
validation_csv='gs://bolts_image_dataset/bolt_images_validate.csv',
labels='gs://bolts_image_dataset/labels.txt',
depth=50,
train_batch_size=1024,
eval_batch_size=1024,
steps_per_eval=250,
train_steps=10000,
num_train_images=218593,
num_eval_images=54648,
num_label_classes=10):
output_dir = os.path.join(str(output), '{{workflow.name}}')
preprocess_staging = os.path.join(output_dir, 'staging')
preprocess_output = os.path.join(output_dir, 'preprocessed_output')
train_output = os.path.join(output_dir, 'model')
preprocess = resnet_preprocess_op(project_id, preprocess_output, preprocess_staging, train_csv,
validation_csv, labels, train_batch_size, eval_batch_size).apply(gcp.use_gcp_secret())
train = resnet_train_op(project_id, preprocess_output, train_output, region, depth, train_batch_size,
eval_batch_size, steps_per_eval, train_steps, num_train_images, num_eval_images,
num_label_classes, tf_version).apply(gcp.use_gcp_secret())
train.after(preprocess)
export_output = os.path.join(str(train.outputs['job_dir']), 'export')
deploy = resnet_deploy_op(export_output, model, version, project_id, region,
tf_version).apply(gcp.use_gcp_secret())
if __name__ == '__main__':
import kfp.compiler as compiler
compiler.Compiler().compile(resnet_train, __file__ + '.zip')
| [
"k8s-ci-robot@users.noreply.github.com"
] | k8s-ci-robot@users.noreply.github.com |
ce8d7ce0b7077bb3fb89194e7b211332cc888836 | 533931c3d15020c6d1aab46f07a602b8257dada3 | /cs3311/ass3/bio | b8c136726899aaf277beacafbc72fb4bec47bd7a | [] | no_license | JoeZhao527/database-system | 9076b1ce6a978b8dfb461bb1d1da69ab887ff68e | 1bfdd9f536f1d67f487ed187aa77687e15192560 | refs/heads/main | 2023-03-24T13:09:56.117135 | 2021-03-19T01:47:15 | 2021-03-19T01:47:15 | 300,784,897 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,266 | #!/usr/bin/python3
# COMP3311 20T3 Ass3 ... get Name's biography/filmography
import sys
import psycopg2
#from helpers import whatever, functions, you, need
# define any local helper functions here
# set up some globals
usage = "Usage: bio 'NamePattern' [Year]"
db = None
pattern = None
year = None
input = None
# process command-line args
argc = len(sys.argv)
if argc == 2:
pattern = sys.argv[1]
input = '\''+pattern+'\''
elif argc == 3 and (sys.argv[2]).isdigit():
pattern = sys.argv[1]
year = sys.argv[2]
input = '\''+pattern+'\''+' '+str(year)
else:
print(usage)
exit()
# manipulate database
select_names1 = """
select name, birth_year, death_year, id
from Names n
where n.name ~* %s
order by name, birth_year, n.id;
"""
select_names2 = """
select name, birth_year, death_year, id
from Names n
where n.name ~* %s and birth_year = %s
order by name, birth_year, n.id;
"""
select_movies = """
select m.title, m.start_year, m.id
from Movies m
join Principals p on (m.id = p.movie_id)
where p.name_id = %s
order by m.start_year, m.title
"""
select_actors = """
select n.name, a.played
from Movies m
join Acting_roles a on (m.id = a.movie_id)
join Principals p on (m.id = p.movie_id and a.name_id = p.name_id)
join Names n on (n.id = a.name_id)
where m.id = %s and n.id = %s
order by p.ordering, a.played
"""
select_crews = """
select n.name, c.role
from Movies m
join Crew_roles c on (m.id = c.movie_id)
join Principals p on (m.id = p.movie_id and c.name_id = p.name_id)
join Names n on (n.id = c.name_id)
where m.id = %s and n.id = %s
order by p.ordering, c.role
"""
try:
db = psycopg2.connect("dbname=imdb")
# ... add your code here ...
cur = db.cursor()
num_names = 0
if year == None:
cur.execute(select_names1, [pattern])
num_names = len(list(cur))
cur.execute(select_names1, [pattern])
else:
cur.execute(select_names2, (pattern, year))
num_names = len(list(cur))
cur.execute(select_names2, (pattern, year))
if num_names == 0:
print('No name matching '+input)
elif num_names == 1:
name_id = None
for names in cur.fetchall():
if names[1] == None:
print('Filmography for '+names[0], '(???)')
elif names[2] == None:
print('Filmography for '+names[0], '('+str(names[1])+'-)')
else:
print('Filmography for '+names[0], '('+str(names[1])+'-'+str(names[2])+')')
name_id = names[3]
print('===============')
cur.execute(select_movies, [name_id])
for movies in cur.fetchall():
print(movies[0], '('+str(movies[1])+')')
cur.execute(select_actors, (movies[2], name_id))
for acting in cur.fetchall():
print(' playing', acting[1])
cur.execute(select_crews, (movies[2], name_id))
for crewing in cur.fetchall():
print(' as', (crewing[1].capitalize()).replace('_', ' '))
else:
print('Names matching '+input)
print('===============')
for names in cur.fetchall():
if names[1] == None:
print(names[0], '(???)')
elif names[2] == None:
print(names[0], '('+str(names[1])+'-)')
else:
print(names[0], '('+str(names[1])+'-'+str(names[2])+')')
except psycopg2.Error as err:
print("DB error: ", err)
finally:
if db:
db.close()
| [
"email@example.com"
] | email@example.com | |
63a8b851714d15cf5ac6f0ef5c8c7df8979f7929 | 0bb474290e13814c2498c086780da5096453da05 | /tenka1-2017/D/main.py | 471f525f95101f269499fbde1e4d56de0f16212b | [] | no_license | ddtkra/atcoder | 49b6205bf1bf6a50106b4ae94d2206a324f278e0 | eb57c144b5c2dbdd4abc432ecd8b1b3386244e30 | refs/heads/master | 2022-01-25T15:38:10.415959 | 2020-03-18T09:22:08 | 2020-03-18T09:22:08 | 208,825,724 | 1 | 0 | null | 2022-01-21T20:10:20 | 2019-09-16T14:51:01 | Python | UTF-8 | Python | false | false | 754 | py | #!/usr/bin/env python3
import sys
def solve(N: int, K: int, A: "List[int]", B: "List[int]"):
return
# Generated by 1.1.4 https://github.com/kyuridenamida/atcoder-tools (tips: You use the default template now. You can remove this line by using your custom template)
def main():
def iterate_tokens():
for line in sys.stdin:
for word in line.split():
yield word
tokens = iterate_tokens()
N = int(next(tokens)) # type: int
K = int(next(tokens)) # type: int
A = [int()] * (N) # type: "List[int]"
B = [int()] * (N) # type: "List[int]"
for i in range(N):
A[i] = int(next(tokens))
B[i] = int(next(tokens))
solve(N, K, A, B)
if __name__ == '__main__':
main()
| [
"deritefully@gmail.com"
] | deritefully@gmail.com |
132e6dd5345800e39acd1da16a86ddfc6215c444 | b47e438f1be149c5b339eb0d7e114d98fb986ad0 | /week-07/profiling/examples/pygame/swarm.py | c2afb0544dd9b3dbbc117f8ac3f424ce0d827e07 | [] | no_license | kstager/Python300-SystemDevelopmentWithPython-Spring-2014 | fb97ac425b09df34c00dc480ed3c263742d47d7f | ed37a50a8ea7c308a081bc87c8bea71520221a5e | refs/heads/master | 2021-01-22T15:22:07.197138 | 2014-07-22T13:13:43 | 2014-07-22T13:13:43 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,929 | py | import math
import pygame
import random
import sys
# from meliae import scanner
# scanner.dump_all_objects("meliae.dump") # you can pass a file-handle if you prefer
NUMBER_OF_SPHERES = 150
size = width, height = 800, 600
pygame.init()
black = 0, 0, 0
screen = pygame.display.set_mode(size)
class Sphere(object):
def __init__(self):
self.ball = pygame.image.load("ball.gif")
self.x = random.random() * width
self.y = random.random() * height
vx = 150*(random.random() - .5)
vy = 150*(random.random() - .5)
self.v = [vx, vy]
def update_v(self, other ):
"""update v with gravitational force of other"""
d = math.sqrt( (self.x - other.x)**2 + (self.y - other.y)**2)
v = ((other.x - self.x), (other.y - self.y))
f = map(lambda x: 200 * x / (d*d), v)
self.v = [self.v[0] + f[0], self.v[1] + f[1]]
def move(self, speed):
self.x = self.x + self.v[0] * speed
self.y = self.y + self.v[1] * speed
def draw(self):
screen.blit(self.ball, (self.x, self.y))
class Sun(Sphere):
def __init__(self):
self.ball = pygame.image.load("sun.gif")
self.x = width / 2.0
self.y = height / 2.0
self.v = [0,0]
if __name__ == "__main__":
sun = Sun()
titlebar = pygame.Rect(0,0,200, 100)
clock = pygame.time.Clock()
spheres = [Sphere() for i in xrange(NUMBER_OF_SPHERES)]
while True:
for event in pygame.event.get():
if event.type == pygame.QUIT: sys.exit()
screen.fill(black)
dt = clock.tick(40)
fps = clock.get_fps()
speed = 1 / float(dt)
for sphere in spheres:
sphere.update_v(sun)
sphere.move(speed)
sphere.draw()
sun.draw()
pygame.draw.rect(screen, (0,0,0), titlebar)
# screen.blit(label, (10, 10))
pygame.display.flip()
| [
"joseph.sheedy@gmail.com"
] | joseph.sheedy@gmail.com |
1ad6aa2c13b4180254111deb127f4c13e2d27af4 | f91474e528ca517f9e81b9dbb50894f2f958f213 | /party.py | def8619f9f28b52d7caa00ba4d9c66de562763db | [] | no_license | daminiamin/Testing-Balloonicorn-s-After-Party-Unit-Test | 8d537af37806486ebdb19f7b7347c2a75d7ba323 | 03fb2160743fbabf45d9a69aed8bcc4b38ed0dd1 | refs/heads/master | 2020-03-30T22:04:12.660895 | 2018-10-05T00:40:21 | 2018-10-05T00:40:21 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,017 | py | """Flask site for Balloonicorn's Party."""
from flask import Flask, session, render_template, request, flash, redirect
from flask_debugtoolbar import DebugToolbarExtension
from model import Game, connect_to_db
app = Flask(__name__)
app.secret_key = "SECRETSECRETSECRET"
app.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = False
@app.route("/")
def homepage():
"""Show homepage."""
return render_template("homepage.html")
@app.route("/rsvp", methods=['POST'])
def rsvp():
"""Register for the party."""
name = request.form.get("name")
email = request.form.get("email")
session['RSVP'] = True
flash("Yay!")
return redirect("/")
@app.route("/games")
def games():
if session['RSVP'] == True:
games = Game.query.all()
return render_template("games.html", games=games)
if __name__ == "__main__":
app.debug = True
app.config['DEBUG_TB_INTERCEPT_REDIRECTS'] = False
DebugToolbarExtension(app)
connect_to_db(app, "postgresql:///games")
app.run()
| [
"no-reply@hackbrightacademy.com"
] | no-reply@hackbrightacademy.com |
aef71039dde801227a1aaf2d7d3319adaa324b95 | ab6015247185ad2f7440d57aa1215ff25842a996 | /account/models.py | 4dee82653791f8061b1a8cb306186ce1f6af311b | [] | no_license | Johnson-xie/rurality | 7fc5c7c5b941989f1c50cef944f3f4d94ac39fe7 | cc35c8b0610c097db17ed1de554171737466e7b6 | refs/heads/master | 2023-02-20T15:48:46.233989 | 2021-01-23T11:04:16 | 2021-01-23T11:04:16 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,934 | py | from django.db import models
from django.core.signing import TimestampSigner
from django.contrib.auth.hashers import make_password
from django.contrib.auth.hashers import check_password
from base.models import BaseModel
class UserModel(BaseModel):
'''
用户表
'''
model_name = '用户'
model_sign = 'user'
TYP_NORMAL = 10
TYP_LDAP = 20
TYP_CHOICES = (
(TYP_NORMAL, '标准用户'),
(TYP_LDAP, 'LDAP用户'),
)
ST_NORMAL = 10
ST_FORBIDDEN = 20
ST_CHOICES = (
(ST_NORMAL, '正常'),
(ST_FORBIDDEN, '禁用'),
)
username = models.CharField('账户', max_length=128, db_index=True)
password = models.CharField('密码', max_length=256, null=True, default=None)
name = models.CharField('姓名', max_length=128, default='')
email = models.CharField('邮箱', max_length=128, null=True, default='')
phone = models.CharField('联系方式', max_length=64, null=True, default='')
status = models.IntegerField('状态', choices=ST_CHOICES, default=ST_NORMAL)
typ = models.SmallIntegerField('类型', choices=TYP_CHOICES, default=TYP_NORMAL)
class Meta:
db_table = 'user'
def to_dict(self):
'''
用户信息,不返回密码
'''
data = super().to_dict()
data.pop('password')
return data
def set_password(self, password):
'''
设置密码
'''
self.password = make_password(password)
self.save()
def check_password(self, password):
'''
校验密码
'''
return check_password(password, self.password)
def gen_token(self):
'''
生成接口认证的token
'''
signer = TimestampSigner()
token = signer.sign(self.id)
return token
class RoleModel(BaseModel):
'''
角色表
'''
model_name = '角色'
model_sign = 'role'
TYP_SYSTEM = 10
TYP_NORMAL = 20
TYP_CHOICES = (
(TYP_SYSTEM, '系统角色'),
(TYP_NORMAL, '普通角色'),
)
name = models.CharField('角色名', max_length=32)
typ = models.IntegerField('类型', choices=TYP_CHOICES, default=TYP_NORMAL)
sign = models.CharField('标识', max_length=32)
class Meta:
db_table = 'role'
class RoleUserModel(BaseModel):
'''
角色用户关系表
'''
model_name = '角色关联用户'
model_sign = 'role_user'
role = models.ForeignKey(RoleModel, on_delete=models.CASCADE)
user = models.ForeignKey(UserModel, on_delete=models.CASCADE)
class Meta:
db_table = 'role_user'
class DepartmentModel(BaseModel):
'''
部门
'''
model_name = '部门'
model_sign = 'department'
name = models.CharField('名称', max_length=32)
sign = models.CharField('标识', max_length=32)
class Meta:
db_table = 'department'
class DepartmentUserModel(BaseModel):
'''
部门与用户
'''
model_name = '部门关联用户'
model_sign = 'department_user'
TYP_MANAGER = 10
TYP_MEMBER = 20
TYP_CHOICES = (
(TYP_MANAGER, '部门负责人'),
(TYP_MEMBER, '普通成员'),
)
user = models.ForeignKey(UserModel, on_delete=models.CASCADE)
department = models.ForeignKey(DepartmentModel, on_delete=models.CASCADE)
typ = models.SmallIntegerField('类型', choices=TYP_CHOICES, default=TYP_MEMBER)
class Meta:
db_table = 'department_user'
class ModModel(BaseModel):
'''
模块表
'''
model_name = '模块'
model_sign = 'mod'
name = models.CharField('模块名', max_length=32)
sign = models.CharField('唯一标识', max_length=32)
rank = models.IntegerField('排序')
class Meta:
db_table = 'mod'
class PermissionModel(BaseModel):
'''
权限
'''
model_name = '权限'
model_sign = 'permission'
TYP_OP = 10
TYP_DATA = 20
TYP_CHOICES = (
(TYP_OP, '操作权限'),
(TYP_DATA, '数据权限'),
)
mod = models.ForeignKey(ModModel, on_delete=models.CASCADE, null=True)
name = models.CharField('权限名', max_length=128)
typ = models.SmallIntegerField('类型', choices=TYP_CHOICES)
sign = models.CharField('唯一标识', max_length=128)
rank = models.IntegerField('排序')
class Meta:
db_table = 'permission'
class RoleModModel(BaseModel):
'''
角色模块
'''
model_name = '角色关联模块'
model_sign = 'role_mod'
role = models.ForeignKey(RoleModel, on_delete=models.CASCADE)
mod = models.ForeignKey(ModModel, on_delete=models.CASCADE)
class Meta:
db_table = 'role_mod'
class RolePermissionModel(BaseModel):
'''
角色权限
'''
model_name = '角色关联权限'
model_sign = 'role_permission'
role = models.ForeignKey(RoleModel, on_delete=models.CASCADE)
permission = models.ForeignKey(PermissionModel, on_delete=models.CASCADE)
class Meta:
db_table = 'role_permission'
class LdapConfigModel(BaseModel):
'''
LDAP配置
'''
model_name = 'LDAP服务配置'
model_sign = 'ldap_config'
# 类似这样格式:ldap://ldap.oldb.top:389
host = models.CharField('地址', max_length=128)
# ldap管理员账号DN:类似这样cn=admin,dc=oldb,dc=top
admin_dn = models.CharField('管理员DN', max_length=128)
admin_password = models.CharField('管理员密码', max_length=128)
# 所有用户在此节点下
member_base_dn = models.CharField('用户基础DN', max_length=128)
class Meta:
db_table = 'ldap_config'
@classmethod
def none_to_dict(cls):
'''
不存在时,返回内容
'''
data = {
'host': '',
'admin_dn': '',
'admin_password': '',
'member_base_dn': '',
}
return data
| [
"boxingxing@limikeji.com"
] | boxingxing@limikeji.com |
f8ff31230d296d8d52bafc61867337d61507ab7e | 9743d5fd24822f79c156ad112229e25adb9ed6f6 | /xai/brain/wordbase/nouns/_airfares.py | 1310f1c3cdc33bc1f52a56002ac21d34a724d64c | [
"MIT"
] | permissive | cash2one/xai | de7adad1758f50dd6786bf0111e71a903f039b64 | e76f12c9f4dcf3ac1c7c08b0cc8844c0b0a104b6 | refs/heads/master | 2021-01-19T12:33:54.964379 | 2017-01-28T02:00:50 | 2017-01-28T02:00:50 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 245 | py |
from xai.brain.wordbase.nouns._airfare import _AIRFARE
#calss header
class _AIRFARES(_AIRFARE, ):
def __init__(self,):
_AIRFARE.__init__(self)
self.name = "AIRFARES"
self.specie = 'nouns'
self.basic = "airfare"
self.jsondata = {}
| [
"xingwang1991@gmail.com"
] | xingwang1991@gmail.com |
ed6b7a8c5f2057dacf6492198f9233cc8714b552 | 6aa406767612c3eec15c9dcd26ce49841c1555bb | /ADK_6.2.43/audio/kalimba/kymera/tools/KCSMaker/downloadFile.py | 88d7254055bcb3d572c7b9762266ccc7f436cbba | [] | no_license | hongshui3000/BluetoothEarbud | 0bb483ca3c19f9c4b317a6ef1f4e8a9712313dd1 | b173ec3666c9e02d115d52d301b74fcd2d08cb47 | refs/heads/master | 2021-04-06T12:42:57.286099 | 2018-03-06T01:55:33 | 2018-03-06T01:55:33 | 124,361,091 | 1 | 7 | null | 2018-03-08T08:30:02 | 2018-03-08T08:30:01 | null | UTF-8 | Python | false | false | 2,569 | py | ############################################################################
# CONFIDENTIAL
#
# Copyright (c) 2015 - 2017 Qualcomm Technologies International, Ltd.
#
############################################################################
import types
class downloadFile(list):
"""Container format that extends the built-in list to inclue comment strings
for the output kdc file."""
def append(self, value, comment=""):
"""Extend the built-in list append method to include the comment.
Note the behaviour of this method is different to the built-in method
when handling lists. In this case the append method behaves as extend
does."""
if type(value) == types.ListType:
list.append(self, (value[0], comment))
self.extend(value[1:])
else:
list.append(self, (value, comment))
def extend(self, value, comment=""):
"Extend the built-in list extend method to include the comment."
if len(value) == 0:
return
self.append(value[0], comment)
list.extend( self, map( lambda x: (x, ""), value[1:] ) )
def dumpToTextFile(self, fileT, write_mode="w", index=0):
with open(fileT, write_mode) as f:
for i in range(len(self)):
if self[i][1] != "":
f.write("%06X %04X # %s\n" % (index + i, self[i][0], self[i][1]))
else:
f.write("%06X %04X\n" % (index + i, self[i][0]))
return index + len(self)
def dumpToBinaryFile(self, fileB, write_mode="wb"):
with open(fileB, write_mode) as f:
f.write( "".join( map( lambda (k,s): "%c%c" % (k>>8, k&0xFF), self ) ) )
def dumpToDRAMFile(self, fileD, write_mode="w"):
with open(fileD, write_mode) as f:
word32 = ""
words16bit = map( lambda (k,s): "%04x" % (k), self )
words32bit = []
wordready = False
for word16 in words16bit:
word32 = word32 + word16
if wordready == True:
words32bit.append("0x" + word32)
wordready = False
word32 = ""
else:
wordready = True
size = len(words32bit)
words32bit.insert(0, "@40000000 " + str(size))
f.write( "\n".join( words32bit ) )
f.write("\n")
# return whether we left a 16 bit word behind
return wordready, word16, len(words32bit) | [
"chaw.meng@geortek.com"
] | chaw.meng@geortek.com |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.