blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 2
616
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
69
| license_type
stringclasses 2
values | repo_name
stringlengths 5
118
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringlengths 4
63
| visit_date
timestamp[us] | revision_date
timestamp[us] | committer_date
timestamp[us] | github_id
int64 2.91k
686M
⌀ | star_events_count
int64 0
209k
| fork_events_count
int64 0
110k
| gha_license_id
stringclasses 23
values | gha_event_created_at
timestamp[us] | gha_created_at
timestamp[us] | gha_language
stringclasses 213
values | src_encoding
stringclasses 30
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 2
10.3M
| extension
stringclasses 246
values | content
stringlengths 2
10.3M
| authors
listlengths 1
1
| author_id
stringlengths 0
212
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
0121e9cadbc8432b02c2398d2e0c7d49438b65d7
|
e13c202c254f2787768d06ac55004e855fd95d73
|
/SVM_sample_codes/PythonPrimalVersionsofSVM/simpleSVM.py
|
5d54826c954311d61c3c9555cbd95acf2fe327dd
|
[
"Apache-2.0"
] |
permissive
|
MaryZolfaghar/HandWrittenDigitPrediction
|
237f42077526bae13118e0224c7c9a029c6c63d7
|
fac16bbd783c76b47e872f489964093f616c9aa7
|
refs/heads/master
| 2022-09-10T20:48:39.036126
| 2020-05-19T06:10:31
| 2020-05-19T06:10:31
| 262,127,940
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,643
|
py
|
# ECS271 2019S
# Toy example of Linear SVM
# 4/16/2019
# To run this code, please first install cvxpy from https://www.cvxpy.org/
import numpy as np
import matplotlib
import matplotlib.pyplot as plt
import random
import time
import cvxpy as cp
# generate toy training data
N1 = 200 # number of positive instances
N2 = 100 # number of negative instances
D = 2 # feature dimension
eps = 1e-8 # select support vectors
random.seed(1) # For reproducibility
r1 = np.sqrt(1.5*np.random.rand(N1,1)) # Radius
t1 = 2*np.pi*np.random.rand(N1,1) # Angle
data1 = np.concatenate((r1*np.cos(t1), r1*np.sin(t1)), axis=1) # Points
r2 = np.sqrt(3*np.random.rand(N2,1)) # Radius
t2 = 2*np.pi*np.random.rand(N2,1) # Angle
data2 = np.concatenate((2.5+r2*np.cos(t2), 1.5+r2*np.sin(t2)), axis=1) # points
## generate toy testing data
Nt1 = 50 # number of positive instances
Nt2 = 25 # number of negative instances
D = 2 # feature dimension
random.seed(1) # For reproducibility
r1 = np.sqrt(3.4*np.random.rand(Nt1,1)) # Radius
t1 = 2*np.pi*np.random.rand(Nt1,1) # Angle
testdata1 = np.concatenate((r1*np.cos(t1), r1*np.sin(t1)), axis=1) # Points
r2 = np.sqrt(2.4*np.random.rand(Nt2,1)) # Radius
t2 = 2*np.pi*np.random.rand(Nt2,1) # Angle
testdata2 = np.concatenate((3+r2*np.cos(t2), r2*np.sin(t2)), axis=1) # points
## training linear SVM based on CVX optimizer
X = np.concatenate((data1, data2), axis=0)
y = np.concatenate((np.ones((N1, 1)), - np.ones((N2, 1))), axis=0)
w = cp.Variable((D, 1))
b = cp.Variable()
objective = cp.Minimize(cp.sum(cp.square(w)) * 0.5)
constraints = [cp.multiply(y, (X @ w + b)) >= 1]
prob = cp.Problem(objective, constraints)
prob.solve()
print("status:", prob.status)
print("optimal value", prob.value)
print("optimal var w = {}, b = {}".format(w.value, b.value))
## visualize decision boundary for training data
d = 0.02
x1 = np.arange(np.min(X[:,0]), np.max(X[:,0]), d)
x2 = np.arange(np.min(X[:,1]), np.max(X[:,1]), d)
x1Grid, x2Grid = np.meshgrid(x1, x2)
xGrid = np.stack((x1Grid.flatten('F'), x2Grid.flatten('F')), axis=1)
scores1 = xGrid.dot(w.value) + b.value
scores2 = -xGrid.dot(w.value) - b.value
plt.figure(0)
sup = y*(X.dot(w.value)+b.value)-1
sup_v1 = ((-eps<sup) & (sup<eps)).flatten()
h3 = plt.scatter(X[sup_v1,0], X[sup_v1,1], s=21, marker='o', c='k')
h1 = plt.scatter(data1[:,0], data1[:,1], s=15, marker='.', c='r')
h2 = plt.scatter(data2[:,0], data2[:,1], s=15, marker='.', c='b')
plt.contour(x1Grid, x2Grid, np.reshape(scores1, x1Grid.shape, order='F'), levels=0, colors='k')
plt.axis('equal')
plt.title('Decision boundary and support vectors for training data')
plt.legend((h1, h2, h3),('+1','-1', 'support vecs'))
plt.savefig('simpleSVM_train_decision_1.png')
time.sleep(2)
## visualize decision boundary for test data
Xt = np.concatenate((testdata1, testdata2), axis=0)
yt = np.concatenate((np.ones((Nt1, 1)), - np.ones((Nt2, 1))), axis=0)
xt1 = np.arange(np.min(Xt[:,0]), np.max(Xt[:,0]), d)
xt2 = np.arange(np.min(Xt[:,1]), np.max(Xt[:,1]), d)
xt1Grid, xt2Grid = np.meshgrid(xt1, xt2)
xtGrid = np.stack((xt1Grid.flatten('F'), xt2Grid.flatten('F')), axis=1)
test_scores1 = xtGrid.dot(w.value) + b.value
test_scores2 = -xtGrid.dot(w.value) - b.value
plt.figure(1)
ht1 = plt.scatter(testdata1[:,0], testdata1[:,1], s=15, marker='.', c='r')
ht2 = plt.scatter(testdata2[:,0], testdata2[:,1], s=15, marker='.', c='b')
plt.contour(xt1Grid, xt2Grid, np.reshape(test_scores1, xt1Grid.shape, order='F'), levels=0, colors='k')
plt.axis('equal')
plt.title('Decision boundary and support vectors for test data')
plt.legend((ht1, ht2),('+1','-1'))
plt.savefig('simpleSVM_test_decision_1.png')
plt.show()
|
[
"mazo2428@colorado.edu"
] |
mazo2428@colorado.edu
|
a5c36185001aaea65acbe7f9d9f0789692100ec5
|
94405cc7a8482a0009eb6da0f11519c93dfab640
|
/contacts_api/models.py
|
99809e5cb019e76fffb890c74a213a981d7d71a8
|
[] |
no_license
|
mahuntington/companies_contacts_locations
|
ca1604232842481a49d88d194e2beedfdc76f0c1
|
848b568b2f136c9ec1641cc6fd5b6183d208594d
|
refs/heads/master
| 2023-03-20T00:11:23.508697
| 2021-03-11T21:53:10
| 2021-03-11T21:53:10
| 346,848,104
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 309
|
py
|
from django.db import models
from locations_api.models import Location
# Create your models here.
class Contact(models.Model):
name = models.CharField(max_length=32)
age = models.IntegerField()
home = models.ForeignKey(Location, related_name='inhabitants', null=True, on_delete=models.SET_NULL)
|
[
"matt.huntington@gmail.com"
] |
matt.huntington@gmail.com
|
66658b9afa7e781e257535df44bd3a6053c7a745
|
4ae03a335ac92933ff0fb463b42f36e56f2ec4bb
|
/a4.py
|
b0170fbff2638e1f112b1902553c5b6eaabee245
|
[] |
no_license
|
Aausuman/Advanced-Python
|
ddd87d09791ea96844d1feebd480bb77de521cd1
|
fbc786a77b3b79b293895584386aff9ab11820b1
|
refs/heads/master
| 2023-05-31T14:17:22.902635
| 2021-06-16T23:47:01
| 2021-06-16T23:47:01
| 273,336,992
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,707
|
py
|
#
# Assignment 4
#
# Student Name : Aausuman Deep
# Student Number : 119220605
#
# Assignment Creation Date : February 22, 2020
import docx
import pyexcel
import os.path
def analyze(docfile):
# This function creates an excel file with word frequencies of the desired document file
doc = docx.Document(docfile)
my_dict = {}
# iterating paragraph wise
for paragraph in doc.paragraphs:
# replacing all non alphanumeric characters with a space
for i in range(len(paragraph.text)):
if not paragraph.text[i].isalnum():
paragraph.text = paragraph.text.replace(paragraph.text[i], " ")
paragraph.text = paragraph.text.lower()
words = paragraph.text.split()
# creating a dictionary of words and their counts
for i in range(len(words)):
if words[i] not in my_dict.keys():
my_dict[words[i]] = 1
else:
my_dict[words[i]] += 1
count_words = sum(my_dict.values())
# updating dictionary to have frequency of words (divided by total) instead of counts
for i in my_dict:
my_dict[i] = float(my_dict[i]/count_words)
# deleting all key value pairs with frequency less than 0.001
delete = [key for key in my_dict if my_dict[key] < 0.001]
for key in delete:
del my_dict[key]
row = 1
# writing the dictionary into the worksheet and saving the appropriately named excel file
my_list = [[k, v] for k, v in my_dict.items()]
file = os.path.split(docfile)[1]
filename = file.split(".")[0] + "_word_stats.xlsx"
pyexcel.save_as(array=my_list, dest_file_name=filename, dest_sheet_name='Word Frequency Stats')
return 0
|
[
"aausuman.26@gmail.com"
] |
aausuman.26@gmail.com
|
f8afdf36a4d0d59d004ca654106e35b8e212e845
|
9743d5fd24822f79c156ad112229e25adb9ed6f6
|
/xai/brain/wordbase/nouns/_atoll.py
|
a236ca994b2bd81870d2324fe37aae78e706518a
|
[
"MIT"
] |
permissive
|
cash2one/xai
|
de7adad1758f50dd6786bf0111e71a903f039b64
|
e76f12c9f4dcf3ac1c7c08b0cc8844c0b0a104b6
|
refs/heads/master
| 2021-01-19T12:33:54.964379
| 2017-01-28T02:00:50
| 2017-01-28T02:00:50
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 394
|
py
|
#calss header
class _ATOLL():
def __init__(self,):
self.name = "ATOLL"
self.definitions = [u'a ring-shaped island formed of coral (= rock-like natural substance) that surrounds a lagoon (= area of sea water): ']
self.parents = []
self.childen = []
self.properties = []
self.jsondata = {}
self.specie = 'nouns'
def run(self, obj1 = [], obj2 = []):
return self.jsondata
|
[
"xingwang1991@gmail.com"
] |
xingwang1991@gmail.com
|
ebc67444ecc4b8a6a983eac1234ede2d6195eee7
|
614aaf54a98cdfb3a687656635eb9aea70452480
|
/images/icons.py
|
55baa5d5cde69af7164188213deea36c80831827
|
[] |
no_license
|
yangyingchao/klip
|
0c9a80eba91dc1097d2671210e4e8e4fcb927ba1
|
b2c2a06e9241f2d4cb76de61558206e149ae4361
|
refs/heads/master
| 2021-06-26T10:57:43.198250
| 2020-12-07T03:28:41
| 2020-12-07T03:28:41
| 176,853,049
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 24,565
|
py
|
#----------------------------------------------------------------------
# This file was generated by /usr/local/bin/img2py
#
from wx.lib.embeddedimage import PyEmbeddedImage
klip = PyEmbeddedImage(
b'iVBORw0KGgoAAAANSUhEUgAAAgAAAAIACAYAAAD0eNT6AAAAAXNSR0IArs4c6QAAAAlwSFlz'
b'AAAN1wAADdcBQiibeAAAAWJpVFh0WE1MOmNvbS5hZG9iZS54bXAAAAAAADx4OnhtcG1ldGEg'
b'eG1sbnM6eD0iYWRvYmU6bnM6bWV0YS8iIHg6eG1wdGs9IlhNUCBDb3JlIDUuNC4wIj4KICAg'
b'PHJkZjpSREYgeG1sbnM6cmRmPSJodHRwOi8vd3d3LnczLm9yZy8xOTk5LzAyLzIyLXJkZi1z'
b'eW50YXgtbnMjIj4KICAgICAgPHJkZjpEZXNjcmlwdGlvbiByZGY6YWJvdXQ9IiIKICAgICAg'
b'ICAgICAgeG1sbnM6ZXhpZj0iaHR0cDovL25zLmFkb2JlLmNvbS9leGlmLzEuMC8iPgogICAg'
b'ICAgICA8ZXhpZjpVc2VyQ29tbWVudD5TY3JlZW5zaG90PC9leGlmOlVzZXJDb21tZW50Pgog'
b'ICAgICA8L3JkZjpEZXNjcmlwdGlvbj4KICAgPC9yZGY6UkRGPgo8L3g6eG1wbWV0YT4Kbsze'
b'LgAAPnJJREFUeAHt3QmYVNWZ8PH3VlU3IIjIoqwioqCORhTcJRLNiEpmdGJ03P3iEnXco4Iy'
b'CKgYQJJ8Sb446phkJsskMZm4b7gAgqACQkA22fd9X7qhge7vPdU0VtFV1bXe7fzv81youstZ'
b'fm911VtVp851ampqhAUBBBBAAAEE7BKI2NVdeosAAggggAACRoAEgMcBAggggAACFgqQAFgY'
b'dLqMAAIIIIAACQCPAQQQQAABBCwUIAGwMOh0GQEEEEAAARIAHgMIIIAAAghYKEACYGHQ6TIC'
b'CCCAAAIkADwGEEAAAQQQsFCABMDCoNNlBBBAAAEESAB4DCCAAAIIIGChAAmAhUGnywgggAAC'
b'CJAA8BhAAAEEEEDAQgESAAuDTpcRQAABBBAgAeAxgAACCCCAgIUCJAAWBp0uI4AAAgggQALA'
b'YwABBBBAAAELBUgALAw6XUYAAQQQQIAEgMcAAggggAACFgqQAFgYdLqMAAIIIIAACQCPAQQQ'
b'QAABBCwUIAGwMOh0GQEEEEAAARIAHgMIIIAAAghYKEACYGHQ6TICCCCAAAIkADwGEEAAAQQQ'
b'sFCABMDCoNNlBBBAAAEESAB4DCCAAAIIIGChAAmAhUGnywgggAACCJAA8BhAAAEEEEDAQgES'
b'AAuDTpcRQAABBBAgAeAxgAACCCCAgIUCJAAWBp0uI4AAAgggQALAYwABBBBAAAELBUgALAw6'
b'XUYAAQQQQIAEgMcAAggggAACFgqQAFgYdLqMAAIIIIAACQCPAQQQQAABBCwUIAGwMOh0GQEE'
b'EEAAARIAHgMIIIAAAghYKEACYGHQ6TICCCCAAAIkADwGEEAAAQQQsFAgZmGf6XIRBHr16tVC'
b'i2mva2tdSSSLYEoRCLggsFfrWKvrmqlTp1a4UB9V+FjAqamp8XHzaJrXAvpCf7q24QpdT9bV'
b'vOC3O7A20f9ZEEAguALbtOmrdV1z4P/J+v/rmhgs1/9ZLBAgAbAgyLl0UV/wzadC39T1Sl3N'
b'C/8xurIggIA9AtO1q6+ZVZOBmfZ0276ekgDYF/OUPdYXfvPufpCu1+p6ZMqD2IgAArYJLNEO'
b'v6jrLzQZqLSt82HvLwlA2CPcQP/0hf8IPWSArg/oelgDh7MbAQTsFFip3R6s6281Eai2kyB8'
b'vSYBCF9Ms+qRvvA30gPv0XWgrq2yOomDEEDAdoHZCvCYJgFv2Q4Rhv6TAIQhijn2QV/8z9VT'
b'/qRr5xxP5XAEEEDACHyk6/WaCKyHI7gC/HwruLHLq+X64n+LnjhWV1788xLkJAQQUIGLdZ2i'
b'zyenoRFcAT4BCG7scmq5/qGaZO9ZXR/O6UQORgABBNIL7NJdN+snAa+kP4Q9fhXgEwC/RqaI'
b'7dIXfzPQz3xnx4t/EV0pCgEEpKka/K8+xzyBRfAE+AQgeDHLqcUHXvw/0ZNOyelEDkYAAQRy'
b'E3hJPwn4QW6ncLSXAnwC4KV+ies+8LG/GezHi3+JrSkeAQTkDn3OeQiH4AiQAAQnVvm01Hzn'
b'f1k+J3IOAgggkIfAKE0C+uZxHqd4IMBXAB6gu1Gl/hGa0f7/7UZd1IEAAggkCJhrDJytXwd8'
b'lbCNmz4UIAHwYVAKbZK++Jvf+Zuf+pnJflgQQAABtwXma4UmCdjqdsXUl70AXwFkbxWII/XF'
b'37zom+/9efEPRMRoJAKhFOimvfpZKHsWok6RAIQomAe6cq/+zyQ/4YsrPUIgaAI36RuSU4PW'
b'aJvaSwIQomjrH1sL7Y6Z258FAQQQ8FrAvL6M9LoR1J9egAQgvU0Q9zymjW4ZxIbTZgQQCKXA'
b'ZfrG5Fuh7FkIOsUgwBAE0XRB/8g66n8LdG1s7rMggAACPhGYou0wAwJrfNIemnFAgE8AwvNQ'
b'MNfq5sU/PPGkJwiEReBM7ci/hKUzYeoHCUAIoqnv/su0G9eEoCt0AQEEwilwQzi7FexekQAE'
b'O351re+jN8wFf1gQQAABPwr01TcqfELps8iQAPgsIHk258o8z+M0BBBAwA0Bc9XAf3SjIurI'
b'XoAEIHsrPx/5z35uHG1DAAEEVOAKFPwlwK8A/BWPnFujH6v10pPMKFsWBBBAwM8C67Vx7fTX'
b'ANV+bqRNbeMTgOBHm3f/wY8hPUDABoGjtJPn2NDRoPSRBCAokUrfzpPS72IPAggg4CsBnq98'
b'FA4SAB8FI8+mtMvzPE5DAAEE3Bbg+cpt8Qz1kQBkwAnIrvYBaSfNRAABBHi+8tFjgATAR8HI'
b'sylk1HnCcRoCCLguwPOV6+TpKyQBSG/j+z36A4AjtZFMruH7SNFABBA4IMAnAD56KJAA+CgY'
b'eTSFbDoPNE5BAAHPBHjO8oy+fsUkAPVNgrSldZAaS1sRQMB6AZ6zfPQQIAHwUTDyaArxywON'
b'UxBAwDMBnrM8o69fMcGob8IWBBBAAAEEQi9AAhD6ENNBBBBAAAEE6guQANQ3YQsCCCCAAAKh'
b'FyABCH2I6SACCCCAAAL1BUgA6puwBQEEEEAAgdALkACEPsR0EAEEEEAAgfoCJAD1TdiCAAII'
b'IIBA6AVIAEIfYjqIAAIIIIBAfQESgPombEEAAQQQQCD0AiQAoQ8xHUQAAQQQQKC+AAlAfRO2'
b'IIAAAgggEHqBWOh7SAdLJvD8889L06ZNS1Y+BSOAQPEE7rrrLqmoqChegZQUeAESgMCH0LsO'
b'dO3aVZo1a+ZdA6gZAQSyFohE+MA3ayxLDuQRYUmg6SYCCCCAAAKJAiQAiRrcRgABBBBAwBIB'
b'EgBLAk03EUAAAQQQSBQgAUjU4DYCCCCAAAKWCJAAWBJouokAAggggECiAAlAoga3EUAAAQQQ'
b'sESABMCSQNNNBBBAAAEEEgVIABI1uI0AAggggIAlAiQAlgSabiKAAAIIIJAoQAKQqMFtBBBA'
b'AAEELBEgAbAk0HQTAQQQQACBRAESgEQNbiOAAAIIIGCJAAmAJYGmmwgggAACCCQKkAAkanAb'
b'AQQQQAABSwRIACwJNN1EAAEEEEAgUYAEIFGD2wgggAACCFgiQAJgSaDpJgIIIIAAAokCJACJ'
b'GtxGAAEEEEDAEgESAEsCTTcRQAABBBBIFCABSNTgNgIIIIAAApYIkABYEmi6iQACCCCAQKIA'
b'CUCiBrcRQAABBBCwRIAEwJJA000EEEAAAQQSBUgAEjW4jQACCCCAgCUCJACWBJpuIoAAAggg'
b'kChAApCowW0EEEAAAQQsESABsCTQdBMBBBBAAIFEARKARA1uI4AAAgggYIkACYAlgaabCCCA'
b'AAIIJAqQACRqcBsBBBBAAAFLBEgALAk03UQAAQQQQCBRgAQgUYPbCCCAAAIIWCJAAmBJoOkm'
b'AggggAACiQIkAIka3EYAAQQQQMASARIASwJNN0MgUFMdgk7QBQQQ8ItAzC8NoR0IIJBawNm4'
b'QKJzXxfZsUb2Xf6T1AexFQEEEMhRgAQgRzAOR8AVgb2VEln4QfyF31k/N15lTdtvuFI1lSCA'
b'gB0CJAB2xJleBkTA2TAv/qIfWfCByN6KgLSaZiKAQBAFSACCGDXaHC4BfaGPLHhfonNeF2fj'
b'V+HqG71BAAHfCpAA+DY0NCzsAuajffPdfvzd/r7KsHeX/iGAgM8ESAB8FhCaE3KBql36gj+6'
b'9rt9HdzHggACCHglQALglTz1WiXgrJutL/qv6cC+j0T27c6v746T33mchQACCKQQIAFIgcIm'
b'BIoisGenRBe8JxHz3f7mRfkXGWsi+0/5ruw/7Yb8y+BMBBBA4BABEoBDQLiLQKECztova7/b'
b'X2Te7e8pqLjq4/rIvt79RZq0KKgcTkYAAQQOFSABOFSE+wjkI7Bnh0Tn67t9/Zjf2bwknxLq'
b'nTNr91HS7dtPi0Si9faxAQEEEChUgASgUEHOt1rAWTPjwLv9MSL7q4pm8fkakTfkVBnCi3/R'
b'TCkIAQSSBUgAkj24h0DDAru36bv9d/Xd/hvibFna8PF5HPH8dJG2PRj0lwcdpyCAQJYCJABZ'
b'QnEYAs7q6bXv9heP1Xf7e0sG8uUGkVkbNQEoWQ0UjAACCIiQAPAoQCCTwO6tEv3KvNvXkfxb'
b'l2c6smj7XtXpAc4880y57rrrilYmBSGAAAKHCpAAHCrCfQSkRiKrpscH9EUWfyxSXbp3+6mw'
b'r7mzvxx3/pWpdrENAQQQKJoACUDRKCko6AJO5RaJfPW2vvC/Kc62FZ51p2uP8zQFYUEAAQRK'
b'K0ACUFpfSve9gL7bXzk1/hF/ZMl4fbe/z9sWOxGpOay1t22gdgQQsEKABMCKMNPJQwWcys0S'
b'mWfe7et3+9tXH7rbu/vlzUQ0CWBBAAEESi1AAlBqYcr3kYC+218xpfa7/aWfeP9uP5UM8/2n'
b'UmEbAgiUQIAEoASoFOkvAadik77bf7P2u/0dOsMOCwIIIIAAPwPkMRBSgZpqfbf/ee13+8sm'
b'6rv9/SHtKN1CAAEE8hPgE4D83DjLpwLOrg36bv+t+Dt+Z8dan7aSZiGAAALeC5AAeB8DWlCo'
b'gHm3v/yzr9/t630WBBBAAIHMAiQAmX3Y63eBPdul7K+3iLNznd9bSvsQQAABXwnweyNfhYPG'
b'5Crg7NvDi3+uaByPAAIIqAAJAA8DBBBAAAEELBQgAbAw6HQZAQQQQAABEgAeA4EWqK5mwF+g'
b'A0jjEUDAMwEGAXpGT8WFCGzYsEHeeustmfT+a/LbC7MvqVKn+t9RJbJTL/DXvFykZWP9HszJ'
b'/nyORAABBMIiQAIQlkha0o9NmzbJs88+KxMnThTz7v+ow1J3vErn/Zm3WWTGepGFW0WWbqtd'
b'dx1yZV/z4t+2qci3jhG5tIvISa1Sl8dWBBBAIGwCJABhi2jI+7N27VqZMGFCvV6ay+fO3Sjy'
b'ySqRz3W237mbREwS0NBSrSeu3inyP3Nq1z6dRAaeW/vJQEPnsh8BBBAIsgAJQJCjR9vFvKN/'
b'apLIRH3h31RZOMi4FSLT9VODn10kcmqbwsujBAQQQMCvAgwC9GtkaFdWAiYBeGNhcV786yrc'
b'tkfkh2NFVu2o28L/CCCAQPgESADCF1N6VASBLbtFHhojst98t8CCAAIIhFCABCCEQaVLxRFY'
b'rAMHX19QnLIoBQEEEPCbAGMA/BYR2pO3QHl5uXTv3l26dOkinTt3lmOPPVZatmwphx9+uBx2'
b'2GGydetWMb8iWLFihYwZM0amTZsW/yVBpgp/NVPkO11FyqOZjmIfAgggEDwBEoDgxYwWHxAw'
b'L/g9e/aMr9/4xjfiL/5lZWVpfVq0aBFPCsw5V155pWzcuFGee+45GT16dNpz1leIfKwDA//x'
b'2LSHsAMBBBAIpAAJQCDDZm+jmzRpIldccYWcf/750qtXL2ncWGfyyXNp3bq1DBkyRC666CIZ'
b'OnSoVFam/hnB+JUkAHkScxoCCPhYgDEAPg4OTasvcNxxx8mAAQPkggsuKOjFP7Hk3r17y7Bh'
b'wyQSSf3nMFETAAYDJopxGwEEwiCQ+hkvDD2jDwjkIHDuuefKfffdl/KM7Tp18IItKXexEQEE'
b'EAisAAlAYENHw4stcPXVV8cHEKYqd5n+IoAFAQQQCJMACUCYoklfChIwXwH84Ac/SFnGsu0p'
b'N7MRAQQQCKwACUBgQ0fDSyFw4YUXSseOHesVTQJQj4QNCCAQcAESgIAHkOYXX+Cb3/xmvULN'
b'zIAsCCCAQJgESADCFE36UhQB86uAQ5fKfYdu4T4CCCAQbAESgGDHj9aXQODUU08VM99A4kIC'
b'kKjBbQQQCIMACUAYokgfiipgBgOaqYQTFxKARA1uI4BAGARIAMIQRfpQdIFOnTollbl3f9Jd'
b'7iCAAAKBFyABCHwI6UApBI455phSFEuZCCCAgG8ESAB8Ewoa4ieBNm3a+Kk5tAUBBBAougAJ'
b'QNFJKTAMAubywSwIIIBAmAVIAMIcXfqWt0AhVxnMu1JORAABBFwUIAFwEZuqgiNw6M8Ag9Ny'
b'WooAAghkJ0ACkJ0TR1km0KhRI8t6THcRQMA2ARIA2yJOf7MScBwnq+M4CAEEEAiqAAlAUCNH'
b'uxFAAAEEEChAgASgADxORQABBBBAIKgCJABBjRztRgABBBBAoAABEoAC8DgVAQQQQACBoAqQ'
b'AAQ1crQbAQQQQACBAgRIAArA41QEEEAAAQSCKkACENTI0W4EEEAAAQQKECABKACPUxFAAAEE'
b'EAiqAAlAUCNHuxFAAAEEEChAgASgADxORQABBBBAIKgCJABBjRztRgABBBBAoAABEoAC8DgV'
b'AQQQQACBoAqQAAQ1crQbAQQQQACBAgRIAArA41QEEEAAAQSCKkACENTI0W4EEEAAAQQKECAB'
b'KACPUxFAAAEEEAiqAAlAUCNHuxFAAAEEEChAgASgADxORQABBBBAIKgCJABBjRztRgABBBBA'
b'oAABEoAC8DgVAQQQQACBoAqQAAQ1crQbAQQQQACBAgRIAArA41QEEEAAAQSCKkACENTI0W4E'
b'EEAAAQQKECABKACPUxFAAAEEEAiqAAlAUCNHuxFAAAEEEChAgASgADxORQABBBBAIKgCJABB'
b'jRztRgABBBBAoAABEoAC8DgVAQQQQACBoAqQAAQ1crQbAQQQQACBAgRIAArA41QEEEAAAQSC'
b'KhALasNpNwIIFF8gsuhDcdbOKn7BlpRY0+Ykqe7W15Le0s2gC5AABD2CtB+BIgpEVk6VyNw3'
b'iliiXUVVn9CXBMCukAe6t3wFEOjw0XgEEEAAAQTyEyAByM+NsxBAAAEEEAi0AAlAoMNH4xFA'
b'AAEEEMhPgAQgPzfOQgABBBBAINACDAIMdPhoPAKFCcyaNUs2b958sJCT1qyRDgfvcQMBBMIs'
b'QAIQ5ujSNwQaEPj1r38tn3/++cGjBp0r0uGEg3e5gQACIRYgAQhxcOkaArkK/G2+yPT1Ik3L'
b'atfD9BmiRWNNCpqJdDxcpG1TkYiTa6nhO75in8iSrSJlrY+Vrqf3FmnSUmrKm0lNyy7h6yw9'
b'Cq0ACUBoQ0vHEMhdYO4mEbOmW6L64n9Mc5EzjhY5s51IT/3/SE0Qwr4s3y4yZa3IF7rO029M'
b'Vuj9Gu30TTf1lrvPvjvs3ad/IRUgAQhpYOkWAqUQ2K+veku21a7m0wLzYYBJBK7uLnJhp/B8'
b'OmD6OW2dyNjlIh+vEFm3qxSalImAtwIkAN76UzsCgRYw74Inr6ldj9avBwacJfJNTQSCuszR'
b'Tz/eXiQyeonI1j1B7QXtRiA7ARKA7Jw4CgEEGhAw75J/OFbkn7pqInC2SOOAPLvs1u/z31ks'
b'8vI8kUX6vT4LArYIBORP1JZw0E8Egi/wpr6D3l4lMqqPv78S2FAh8qe5Iq8tqG1v8OXpAQK5'
b'CZAA5ObF0QiESuCCCy6QVq1aSUVFRXzdtWuXbNmyRdatWyf79+/Pu6/me/Pn/y5yz+l5F1Gy'
b'E1fuEPndbJG3NFGpyqGLjuNI69atpV27dnLkkUdKs2bN5JRTTilZOykYgVILkACUWpjyEfCx'
b'wFVXXZWydebF3yQBK1eulKVLl8oXX3wh06dPl507d6Y8PtXGP84RueFk/Rlho1R73d9mvqL4'
b'zxki5hOKajN4IcNiXuyPP/546dGjh3Tv3l26desmnTt3lrIy/X0kCwIhESABCEkg6QYCxRSI'
b'RqPSvn37+HrWWWfJNddcI9XV1TJ//nx577335J133mkwGdij765f0V8K3HpqMVuWe1nbdDDf'
b'r78U+d+vMr/jN+/ozz//fOndu7f07NlTjjjiiNwr4wwEAiRAAhCgYNFUBLwUiEQicuKJJ8bX'
b'u+66K54IvPDCC7J9u/4oPs3y5YY0O1zYvK9a5K/6om/e9e/QMQmpliZNmsiFF14ol156qZxx'
b'xhkSi/GUmMqJbeEU4NEezrjSKwRKKtC4cWO58sor4++WR4wYIRMnTkxZ3+rsvzFIeX6+Gyet'
b'EvnpVJGlOmdBqsUkMubrj4suukhMEsCCgI0CJAA2Rp0+I1AkATOAcNSoUfL000/Lu+++W69U'
b'8/G7m8t6Hdn/48kiY3QCn0MX8wmGecE3X2cweO9QHe7bKEACYGPU6TMCRRZ4/PHH44MGp02b'
b'llTyES4NADQz95mf9P2n/vLAzNOfuJiBe/369ZMbb7wxPqYhcR+3EbBZgATA5ujTdwSKJGC+'
b'O3/wwQfl5ptvTiqxpQvXCViwReSpSfWvYWBe+K+44gq55ZZb4j91TGoYdxBAQEgAeBAggEBR'
b'BMzP5nr16iVTp+qX7weWU9vU3Sr+/2aQ3290dL9Zze26xXzUf8kll8jtt9/OO/46FP5HIIUA'
b'CUAKFDYhgEB+Auedd15SAtCrbX7lNHSWmbL3iQki8/Xdf+JiRvKbTyJMMsKCAAKZBUgAMvuw'
b'FwEEchAws+TVLeb7/x5H1d0rzv9m/h4zwdBz05N/02/mLLj33nulT58+xamIUhCwQIAEwIIg'
b'00UE3BIwU+XWLf30okDl0bp7hf9vRvgP+URkytqvyzLf85txBzfddJOUl5d/vYNbCCDQoAAJ'
b'QINEHIAAAtkKmGsKmMXR9bsnxG8W5R9zbYEndaDf9oSfFZoZCh9++GHp1KlTUeqgEARsEyAB'
b'sC3i9BeBEgps21Y7886lx4kcW4SZdM3Fen72hchf9FK9dUvz5s3loYcekr59+9Zt4n8EEMhD'
b'gAQgDzROQQCB1AILFiyQsojI3T1S789l63KdYfixj5MH+pnv+B955BFp2bJlLkVxLAIIpBAg'
b'AUiBwiYEEMhPwFw18KZ/EGnfLL/z684apx/5m+/7d+2t3WIuzPPoo4/GZ/KrO4b/EUCgMAES'
b'gML8OBsBBA4IbNmyRfatmyd3XJY/iblMrxnh/7tZInVX7D377LNl0KBBTOaTPytnIpBSgAQg'
b'JQsbEUAgV4H33nlTnjyvOv4VQK7nmuO36gC/x/Uj/7pR/o0aNYr/tM9ctIcFAQSKL0ACUHxT'
b'SkTAOoGamhrpOP8PcsLX0wDkZDB/s8jDY0XW7Ko9rWvXrvLUU09Jly5dciqHgxFAIHsBEoDs'
b'rTgSAQTSCCx+ZZhc3C6/a/++v7R2Lv/dBy7i893vflfuv/9+ftefxprNCBRLgASgWJKUg4Ct'
b'Aks+ke7r9VLA5sf/OSx13/f/Vr/vN0uzZs1k4MCBzOZXy8G/CJRcgASg5MRUgEB4BZy1MyTy'
b'/kCJ5PjiX6Gj+/99gsiElbU2Zu7+4cOHS4cOHcKLRc8Q8JkACYDPAkJzEAiKgLNxgUTfelgi'
b'NQc+u8+y4av1m4IHx4gs1gv6mOXyyy+P/8TPDPpjQQAB9wRIANyzpiYEQiPgbF4ksbcfFGdf'
b'7dS/2XZs2jqR/uNqR/ybefzNjH5XXnlltqdzHAIIFFGABKCImBSFgA0Czvo5Uvb2D0X26FR9'
b'OSyvLhAZ+bnIvmqJ/6b/Rz/6kZx66qk5lMChCCBQTAESgGJqUhYCIRdwVk+XsncfFdmb/Tt/'
b'M9jvp1NF/jy3Fufkk0+Of9/fpk2bkGvRPQT8LUAC4O/40DoEfCMQWfihxMY9o2/hEy7J10Dr'
b'dlTp5D7jRT5bXXtgv379pH///mI+/mdBAAFvBUgAvPWndgT8L1BTLdHJL0p0+u9zaqu5mI8Z'
b'7Gf+j0Qics8998h1112XUxkcjAACpRMgASidLSUjEHyBql0S+2ioRJZNzKkvn6+pvZKf+QSg'
b'adOm8Vn9zj333JzK4GAEECitAAlAaX0pHYHACjjbVkjsvQHibFmaUx9enifykyki5rv/jh07'
b'yrPPPivHHntsTmVwMAIIlF6ABKD0xtSAQOAEIismS+zDJ3Sk/46s225G9z87WeSV+bWn9OzZ'
b'U5555hlp3rx51mVwIAIIuCdAAuCeNTUhEAiB6Mw/S/TTX+r1ePUVPctlm44LfHSciPmdv1nM'
b'fP7mN/7RaLR2A/8igIDvBEgAfBcSGoSARwL790ps/EiJfPVOTg0wM/o9pFfyW6UfFpgXfPPC'
b'bxIAFgQQ8LcACYC/40PrEHBFwKnYpN/3PybO+tk51Wfm8jdz+pu5/c1H/eYjf/PRPwsCCPhf'
b'gATA/zGihQiUVMBZP1dio3Ww366NOdXzO80VfjmtdrBf586dZdSoUfFBfzkVwsEIIOCZAAmA'
b'Z/RUjID3ApH5oyX28XCR/fp7vSyXqv0iz3wm8vai2hPOOeec+M/8zOV8WRBAIDgCJADBiRUt'
b'RaB4AmZyn8/+Q6Iz/phTmZt3izyi3/fP3FB72rXXXiv33ntvfKKfnAriYAQQ8FyABMDzENAA'
b'BFwW2LNTf+I3WCIr9G18Dsv8zbWD/dbtkvhUvo8++qh85zvfyaEEDkUAAT8JkAD4KRq0BYES'
b'Czhbl0vs3f7ibFueU01jlokM0ckAK/eJtGjRIn4xn9NOOy2nMjgYAQT8JUAC4K940BoESiYQ'
b'Wf6pvvMfIlK1M6c6fjVT5MW/67QAetbxxx8fn9mvbdu2OZXBwQgg4D8BEgD/xYQWIVB0gejf'
b'/yDRz1/IaXKfPTrY70l91//+0trm9O7dW4YOHSpNmjQpevsoEAEE3BcgAXDfnBoRcE9AR/fH'
b'xg2XyILROdW5vkLkYR3sN3dT7Wk333yz3HnnneI4Tk7lcDACCPhXgATAv7GhZQgUJODs2lB7'
b'MZ8NenWeHJbZOh2AefHfWClSXl4uAwcOlEsuuSSHEjgUAQSCIEACEIQo0UYEchRw1s3SyX0e'
b'FzPDXy7Le0tEnpqkwwT04/9WrVrJyJEj5eSTT86lCI5FAIGACJAABCRQNBOBbAUi896W2IRn'
b'dXIfnZ83y8UM8Ht+ushvvqw94cQTT4y/+Ldp0ybLEjgMAQSCJkACELSI0V4E0gmYyX0m/UKi'
b'X/4l3REpt1foT/sG63z+41bU7r744otl0KBB0qhRo5THsxEBBMIhQAIQjjjSC9sF9uyQ2AeD'
b'JLJySk4Sa3RSnx+OEVmwReID/G677Ta59dZbcyqDgxFAIJgCJADBjButRuCggLNlSe3kPttX'
b'HdyWzY2/rxd5dJzIFp3et3HjxjJ48GDp06dPNqdyDAIIhECABCAEQaQL9gpEln2ik/sMFdmr'
b'v9vLYXljochwnQl4b7XI0UcfHf++v1u3bjmUwKEIIBB0ARKAoEeQ9lsrEJ32W4lO/k/tvxnC'
b'l/2yfY/Iz6bWvvifcsopMmLECGnZsmX2BXAkAgiEQoAEIBRhpBNWCezbLbGxP5LIog/z6nZz'
b'Hds3rLfIe+WXyoDHHo9f2CevgjgJAQQCLUACEOjw0XjbBJyd62on99k4v6Cun9dB5Oye7WR/'
b'WVlB5XAyAggEVyAS3KbTcgTsEnDWzpDY324Vp8AX/zq16Bf/LWYMAQsCCNgpQAJgZ9zpdcAE'
b'InPfkLI37hOnUn+vV7SlRmIfPaWXBj4wAUDRyqUgBBAIggAJQBCiRBvtFajeL9FPfiKxj0eI'
b'VOuMPcVe9NLAZspg2acT/7MggIBVAiQAVoWbzgZKYPc2ib39gERn/a2kzXY2L9ZBhcNLWgeF'
b'I4CA/wRIAPwXE1qEgDibF0nZ326TyKpprmiYXxREZ/7JlbqoBAEE/CFAAuCPONAKBA4KRJZ8'
b'LGWv/kCcHasPbnPjRvSz/xBntV4RiAUBBKwQIAGwIsx0MjACZk7/0QN1lh4PvpPX8QbmegLO'
b'rg2B4aKhCCCQvwAJQP52nIlA8QX0in65zuxXzEaYXxnEE5Dq7C8lXMz6KQsBBNwTIAFwz5qa'
b'EGhYIFoue53yho8r4RHO+tkS++T/lrAGikYAAT8IkAD4IQq0AYEDApX6S78BY6pynN2/+HyR'
b'Oa9JZN5bxS+YEhFAwDcCJAC+CQUNQUCkqqpKxuu8PC/N8F4jNuHH4myY531DaAECCJREgASg'
b'JKwUikBhAiYBGL+ysDIKPnt/Ve0kQbu3FlwUBSCAgP8ESAD8FxNahED8K4AnJogs2+4tRvzi'
b'Qx8O0XGJZnAiCwIIhEmABCBM0aQvoRLYpQPxHx4rUuHxgPzIyikSnfxCqGzpDAIIiJAA8ChA'
b'wMcCS7eJDNYL9tV43Mbo9D9IZMk4j1tB9QggUEwBEoBialIWAiUQGKeDAn89swQF51hkbMww'
b'cbYuy/EsDkcAAb8KkAD4NTK0C4EEgRf/LjLB60GBeysk9t5jOkthRULLuIkAAkEVIAEIauRo'
b't1UC5iuAJ/SrgOVeDwrUTwDMJwEsCCAQfAESgODHkB5YIrCzyieDAnUsQHT67y1Rp5sIhFeA'
b'BCC8saVnARRo3KRJxlYv0UGBQyb6YFDg5BfF/DqABQEEgitAAhDc2NHyEAo0Ki+Xfv36ZezZ'
b'2OUiv/F6UKDOCxD7cLBesnhtxrayEwEE/CtAAuDf2NAySwX69+8vJ510Usbev6gzBU5clfGQ'
b'0u/cvU1i7+uli3XGQBYEEAieAAlA8GJGi0MuUFZWJsOHD5cjjzwybU+rdVTgv+tMgZ4PCtRr'
b'BcQmjErbTnYggIB/BUgA/BsbWmaxwFFHHSXPPPOMRKPRtApmUOAjZqZAvYKgl0tk3tsSmfOq'
b'l02gbgQQyEOABCAPNE5BwA2BHj16yAMPPJCxqsU6KHCoD2YKjH3yM3HWzc7YVnYigIC/BEgA'
b'/BUPWoNAksD3vvc9ufzyy5O2HXpnzHKR//ry0K0u36/eGx8P4FRucbliqkMAgXwFSADyleM8'
b'BFwSMIMCTzzxxIy1vaAzBU7yeFCgs2uDxD4YJFK9P2Nb2YkAAv4QIAHwRxxoBQJpBcr1p4Fm'
b'UGCLFi3SHlM3KHDFjrSHuLLDWT1dop8950pdVIIAAoUJkAAU5sfZCLgicPTRRzc4KHBH3UyB'
b'Hg8KjM78s0QWfuiKC5UggED+AiQA+dtxJgKuCpx++uly//33Z6xz8VYdFKgzBXq9xMYNF2fz'
b'Yq+bQf0IIJBBgAQgAw67EPCbwNVXXy2XXXZZxmaN0Sv2ej4ocF+lxEbrlQOrdmZsKzsRQMA7'
b'ARIA7+ypGYG8BAYMGNDgoMDn/TAocNtKiX30pPbRXMuQBQEE/CZAAuC3iNAeBBoQyHZQ4CCd'
b'KXClx4MCI8smSvSL/2qgR+xGAAEvBEgAvFCnTgQKFDCDAocNG5ZxpsDtBwYFVno9KHDqbySy'
b'/NMCe8zpCCBQbAESgGKLUh4CLgmcccYZct9992WsbZEOCnzS60GB5sqBHw0VZ/vqjG1lJwII'
b'uCtAAuCuN7UhUFSBa665Rvr27ZuxzA91UOBvZ2U8pPQ79+yoHRS4b3fp66IGBBDISoAEICsm'
b'DkLAvwKPPfaYdO/ePWMDn5su8qnHb8CdTQsl9vHIjO1kJwIIuCdAAuCeNTUhUBKBRo0ayYgR'
b'IxqeKXC8yCqvBwUuGC3RL/9aEgcKRQCB3ARIAHLz4mgEfClQNygwEkn/Jx0fFDhOxPNBgZ/+'
b'Qpy1M3zpSKMQsEkg/bOFTQr0FYEQCJhBgffee2/GnizUi/U9NSnjIaXfqRcLir0/SJyKTaWv'
b'ixoQQCCtAAlAWhp2IBA8gWuvvbbBQYEfLBX53Wxv+2Ze/OMzBe7f621DqB0BiwVIACwOPl0P'
b'p4AZFNitW7eMnfvlNJHPvB4UuG62xCaMythOdiKAQOkESABKZ0vJCHgikO2gwIE6U+Bqj6fq'
b'j8x7S6KzGBToyQOFSq0XIAGw/iEAQBgF2rZtK08//bRkHBS4R+ThsSK7vZ4pcNIvJLLqizCG'
b'gT4h4GsBEgBfh4fGIZC/QM+ePeWee+7JWMACvwwK/EAHBe7w+DuJjFLsRCB8AiQA4YspPULg'
b'oMB1110nl1xyycH7qW68v9T7QYGye5vE3h0gsrciVRPZhgACJRAgASgBKkUi4CcBMyjwhBNO'
b'yNgkMyhw8pqMh5R8p7N5kcQ+eEKvHlxd8rqoAAEEREgAeBQgEHKBxo0bx2cKPOKII9L2tLpG'
b'5LHxPhgUqFcNjE76edp2sgMBBIonQAJQPEtKQsC3Au3atZOnnnqqwUGBj/hhUKBOFRyZ/Ypv'
b'LWkYAmERIAEISyTpBwINCJx55pnyb//2bxmPmq+DAp/+NOMhruyMffJTiayY7EpdVIKArQIk'
b'ALZGnn5bKXD99dfLt7/97Yx9H71E5A9zMh5S+p06DiD2wb+Ls3FB6euiBgQsFSABsDTwdNte'
b'gYEDB8rxxx+fEeAX+rN8rwcFStUuib39kDjbV2VsKzsRQCA/ARKA/Nw4C4HACtQNCmzevHna'
b'PphBgY+P935QoFO5WWJvPSjmfxYEECiuAAlAcT0pDYFACLRv377BQYHbdKbAR8aJ7NnvbZfM'
b'JwCxt38o5hMBFgQQKJ4ACUDxLCkJgUAJnHXWWXL33XdnbPN8feP99KSMh7iy09k4X68eqBMF'
b'7a9ypT4qQcAGARIAG6JMHxFII3DDDTc0OCjwPR0U+D9eDwrU9kdWTZPY+wNFqj2+eEEaSzYj'
b'EDQBEoCgRYz2IlBkgWwGBf5cBwVOWVvkivMoLrJsksQ+HMJsgXnYcQoChwqQABwqwn0EUgjE'
b'QvyXkvWgwI9F1vjga/jI4rESG/uMRklHKrIggEDeAiF+WsvbhBMRqCcQ5gTAdDabQYFbzaDA'
b'sd4PCjTtjcx/V2Ljf2xusiCAQJ4CJAB5wnGaXQIxJ/z9NYMC77rrrowd/UoHBT7jg5kCTSMj'
b'c17VJGCk3uKTgIxBYycCaQRIANLAsBmBRIHmjRLvlfC2422mceONN8rFF1+csYPvLBb549yM'
b'h7i2MzLn9dqvA7iCoGvmVBQeARKA8MSSnpRQoGXjEhaeUHRNJJZwz5ubZlBg165dM1b+86ki'
b'U30wKNA0MvLVOxL7aKj+OsDjCQsyirETAf8JkAD4Lya0yAcCNTXJHyu3bOJSoyJlLlWUvpom'
b'TZrELx98+OGHpz1ov/I8poMC1/pgUKBpZGThh3rtgEE6T8DetG1mBwIIJAuQACR7cA+BuMD+'
b'/cnvJts3cwnGJxPddOjQocGZAusGBVYlU7kEVb+ayJKP49cOkKqd9XeyBQEE6gmQANQjYQMC'
b'+kbykASgyxHuqDi7t7lTURa1nH322XLnnXdmPHKeDgoc5pNBgaahkdXTpOy1u8XZtTFju9mJ'
b'AAL69wICAgjUF9i3L3m2ObcSgPgsd3t21G+QR1tuuukm+da3vpWxdjMo8E8+GRRoGupsXiSx'
b'V+8QZ8vSjO1mJwK2C5AA2P4IoP8pBSoqKg5uN78A6JD+6/CDxxXrhrNlSbGKKko5gwYNkuOO'
b'Oy5jWT/TQYFf+GRQoGmos3OdfhJwp04frA1jQQCBlAIkAClZ2Gi7wObNX19+9rQ2+oLiIoh5'
b'B+unxQwKHDlypDRrln4gRHxQ4HiRdT4ZFBj3009SYm8/JNFZf/MTJ21BwDcCJAC+CQUN8ZPA'
b'li1bDjbn9KMP3nTlRmSl/961ZjMocMvu2ssH+2VQYDxY+tPA6Cc/kdiEH/MzQVcevVQSJAES'
b'gCBFi7a6JrBu3bqDdV10zMGbrtyIrPhcX6z893O2c845R+64446MBnM36UyBn2U8xJOdkdmv'
b'SOytB8Sp/PqTHU8aQqUI+EiABMBHwaAp/hFYunRpvDEnthTp6OL3//FK91ZIZMkE/2AktOSW'
b'W26RPn36JGypf/Nt/Qbj5Xn1t3u9xfxCIPbXW8RZPd3rplA/Ar4QIAHwRRhohN8Eli1bFm/S'
b'Vd29aZl5x+rX5YknnpAuXbpkbN5Pp4hM+/pDlIzHurnTqdgkZW/eJ9Hpv9Nqkyd7crMd1IWA'
b'HwRIAPwQBdrgKwHz8f+OHTuklc7+1+84b5pm3q2a1Y9L1oMCP/bZoMA6TL1uQPTzF/QrgYfi'
b'vxao28z/CNgmQAJgW8Tpb4MCM2bMiB9z52ki5dEGDy/ZAWbwml/nt+/YsaM8+eSTEomkfwrZ'
b'rIMCHx0n4qtBgQnRiqycLGUv3yCRuW8kbOUmAvYIpP/rtceAniKQJDBz5kw57SiRf+mWtNn1'
b'O87mJRL97DnX6822wnPPPVduv/32jIfP0UGBw304KPBgo3W8RezjEfppwIPi7Fh9cDM3ELBB'
b'gATAhijTx6wFzEWAZk+ZIMN6u/vb/3QNjM78s0Tmj0632/Pt2QwKfFMHBf7Fh4MCE/Hinwb8'
b'+XqJTv21yL49ibu4jUBoBUgAQhtaOpaPwNwZU+XxUzdIu6b5nF2ac2Jjn45f7a40pRdWquM4'
b'ktWgQJ3aYPr6wuoq+dl6ISaTAJS9fL3+CkMHMLAgEHIBEoCQB5juZS9gRoi3nThYTmmd/Tmu'
b'HKmD1sz17qNf/JcOXK92pcpcKqm7fHCmmQL3abMHjBNZ//UMy7lU4eqxzo41Ehv9uJS9codE'
b'Vkx2tW4qQ8BNARIAN7Wpy7cCkWWfSPQvN0vbqH+uxpeEZUauT3lJv6u+X5xtK5J2+eFOp06d'
b'ZOjQoQ0OCnxknH8HBR7q6KyfrVMJP6jXFLhLzFcELAiETYAEIGwRpT85CThbl0nsg8ESe7e/'
b'RHZ/Pf1vToW4eHBklV7u9uUb9Wdsz4tUbnWx5oarOu+88xoeFLhRZIROdBikxVk7Mz5IMP6L'
b'gTmv6hgB/XkDCwIhEIiFoA90AYHcBPTddGTlFInMe1Mii8f58mP1jB3SaYKj038v0S//ItXd'
b'LpP93S+XmqP/QU9xMp7mxk4zKHDevHkyfvz4tNW9sVDkpFYiV3s0yVLahjWww1ylMTZ+lIgm'
b'X9Vdv127tu+h7LyPaoCO3T4VIAHwaWBoVnEFnF0bdApYnVxn1RfiLP9UzPf9gV90tHpkzmvx'
b'tabZ0VLT+Xypbn+G1LQ/XWqaHOlJ98ygwMGDB8c/CaibTjlVQ36iMwWeoE3soT+3DNyyZ+fX'
b'7k1aSk2XC6W6g7q36yE1h2lmw4JAQAQc87MnlmAK9OrVq4+2fKxXrX///fczXiLWtXbpE7Kz'
b'V69DW2XWHRJ/sd+5QUSvCe9sXiyRzfqW02cfl5faJp4QtDxOalp2FTHJQdPW8VXK9cIGZU2k'
b'Rlcpb1ayZixfvjyeBOzcuTNtHS0bi/zhOyJHHZb2kMDtqDmik9S07iY1LTpLzZG6Nu9Q61ze'
b'VGoaqX203LM+XXLJJZIpHi41bM/UqVM18ix+EOATAD9EgTbkLWBe7Mt+f0Xe54f1RMckP7qK'
b'ftqRbqn6P++ING6RbndB24855hgZMmSI9O/fX9K9yaibKfClvt7OuFhQRw852QzQTDdIs/qE'
b'vrLv4iGHnMFdBLwT4Msr7+ypGYFQC5x//vly2223ZezjbB0UODJggwIzdoidCARIgAQgQMGi'
b'qQgETeD73/++9O6t0ypmWF7Xb2j+96sMB7ALAQRKIkACUBJWCkUAASNgBgWarwI6d+6cEeTH'
b'Oihwht9nCszYA3YiEDwBEoDgxYwWIxAogcMOO0xGjhwpTZs2TdtuM1Ngf519d0MAZgpM2wl2'
b'IBAwARKAgAWM5iYLVFf7b2rc5BZyzwjUDQo0nwikWzZV1l4+eC8hTUfEdgSKKkACUFROCiu1'
b'wOLFi+Xmm2+Wq666Si699FL5rv7PEgyBCy64QG699daMjZ3FoMCMPuxEoJgCJADF1KSskgtU'
b'VlbKwoULZc2aNbJ9+3a9Ng5vF0uOXsQKTALQ0KDA1xaI/G1+ESulKAQQSCnAPAApWdgYFAHz'
b'W/Lr3wpKa71rZ1lZmfzqpV8lN6BR8+T7LtyrmynQ/DzQTBaUbhml194JehLwnX795Oqrr/m6'
b'i43d9/66cm4hUF+ABKC+CVsCJGAGj83fHKAGe9TU8nJHZ6g7waPak6s1gwHNoMDbb79ddu3S'
b'2RtTLGGI6wbRaYJ9Yp6CmE0ICF8B8CBAAAHXBczPAs01AzINCnS9UVSIgGUCJACWBZzuIuAX'
b'ATMWwEwUxIIAAt4IkAB4406tCCCgAmYsgPl1AAsCCLgvQALgvjk1IoDAAYG6QYFmngAWBBBw'
b'V4BBgO56U1uBAm3btpUHHnigwFLsOz0ajfq2082aNZNRo0bJpEmTfNvGfBp20kkn5XMa5yDg'
b'mgAJgGvUVFQMgVatWsm//uu/FqMoyvCRQKdOnYirj+JBU+wQ4CsAO+JMLxFAAAEEEEgSIAFI'
b'4uAOAggggAACdgiQANgRZ3qJAAIIIIBAkgAJQBIHdxBAAAEEELBDgATAjjjTSwQQQAABBJIE'
b'SACSOLiDAAIIIICAHQIkAHbEmV4igAACCCCQJEACkMTBHQQQQAABBOwQIAGwI870EgEEEEAA'
b'gSQBEoAkDu4ggAACCCBghwAJgB1xppcIIIAAAggkCZAAJHFwBwEEEEAAATsESADsiDO9RAAB'
b'BBBAIEmABCCJgzsIIIAAAgjYIUACYEec6SUCCCCAAAJJAiQASRzcQQABBBBAwA4BEgA74kwv'
b'EUAAAQQQSBIgAUji4A4CCCCAAAJ2CJAA2BFneokAAggggECSAAlAEgd3EEAAAQQQsEOABMCO'
b'ONNLBBBAAAEEkgRIAJI4uIMAAggggIAdAiQAdsSZXiKAAAIIIJAkQAKQxMEdBBBAAAEE7BAg'
b'AbAjzvQSAQQQQACBJAESgCQO7iCAAAIIIGCHAAmAHXGmlwgggAACCCQJkAAkcXAHAQQQQAAB'
b'OwRIAOyIM71EAAEEEEAgSYAEIImDOwgggAACCNghQAJgR5zpJQIIIIAAAkkCJABJHNxBAAEE'
b'EEDADgESADviTC8RQAABBBBIEogl3eMOAjkI9OvXTxzHyeEMDkUAAa8EqqqqvKqaen0qQALg'
b'08AEoVl79+4NQjNpIwIIIIBACgG+AkiBwiYEEEAAAQTCLkACEPYI0z8EEEAAAQRSCJAApEBh'
b'EwIIIIAAAmEXIAEIe4TpHwIIIIAAAikESABSoLAJAQQQQACBsAuQAIQ9wvQPAQQQQACBFAIk'
b'AClQ2IQAAggggEDYBUgAwh5h+ocAAggggEAKARKAFChsQgABBBBAIOwCJABhjzD9QwABBBBA'
b'IIUACUAKFDYhgAACCCAQdgESgLBHmP4hgAACCCCQQoAEIAUKmxBAAAEEEAi7AAlA2CNM/xBA'
b'AAEEEEghQAKQAoVNCCCAAAIIhF2ABCDsEaZ/CCCAAAIIpBAgAUiBwiYEEEAAAQTCLkACEPYI'
b'0z8EEEAAAQRSCJAApEBhEwIIIIAAAmEXIAEIe4TpHwIIIIAAAikESABSoLAJAQQQQACBsAuQ'
b'AIQ9wvQPAQQQQACBFAIkAClQ2IQAAggggEDYBUgAwh5h+ocAAggggEAKARKAFChsQgABBBBA'
b'IOwCJABhjzD9QwABBBBAIIUACUAKFDYhgAACCCAQdgESgLBHmP4hgAACCCCQQoAEIAUKmxBA'
b'AAEEEAi7AAlA2CNM/xBAAAEEEEghQAKQAoVNCCCAAAIIhF2ABCDsEaZ/CCCAAAIIpBAgAUiB'
b'wiYEEEAAAQTCLkACEPYI0z8EEEAAAQRSCJAApEAJ0KaqALWVpiKAAAI8Z/noMUAC4KNg5NGU'
b'tXmcwykIIICAVwI8Z3kln6JeEoAUKAHatCZAbaWpCCCAAM9ZPnoMkAD4KBi5NmXq1KmVes7W'
b'XM/jeAQQQMAjgdUe1Uu1KQRIAFKgBGwTGXXAAkZzEbBYgOcrHwWfBMBHwcizKWTUecJxGgII'
b'uC7A85Xr5OkrJAFIbxOUPWTUQYkU7UQAAZ6vfPQYIAHwUTDybMpneZ7HaQgggIDbAjxfuS2e'
b'oT4SgAw4Adn1ekDaSTMRQMBugVk6cHmR3QT+6j0JgL/ikXNr9A9qpZ40NecTOQEBBBBwV+A1'
b'd6ujtoYESAAaEgrGfj4FCEacaCUCNgvwPOWz6JMA+CwgeTaHzDpPOE5DAAFXBFbqp5V8UukK'
b'dfaVkABkb+XbI/XvapY2ju/WfBshGoaA9QK8+/fhQ4AEwIdBybNJz+d5HqchgAACpRSo1sJf'
b'LGUFlJ2fAAlAfm5+POuX2qhlfmwYbUIAAasFfq+fUn5ptYBPO08C4NPA5Nos/QPbo+c8ket5'
b'HI8AAgiUUGC3ls3zUgmBCymaBKAQPf+d+z/apBn+axYtQgABSwX+n745WWFp333fbRIA34co'
b'+wbqH5r5rm1A9mdwJAIIIFAygS1a8vCSlU7BBQuQABRM6K8CNAkYrS16z1+tojUIIGChwJP6'
b'fGSSABafCpAA+DQwBTbrRj1/SYFlcDoCCCCQr8Bf9MX/5/mezHnuCJAAuOPsai36h7dJK/xn'
b'XXe6WjGVIYAAAiLTFeH7QPhfgATA/zHKq4WaBJjJgW7StSavAjgJAQQQyF1gnZ5yhT7/VOR+'
b'Kme4LUAC4La4i/XpH6GZIniwi1VSFQII2CtQpV2/Sp93GPUfkMcACUBAApVvM/WPcZie+1K+'
b'53MeAgggkIWAefH/P/p8MzGLYznEJwIkAD4JRCmboX+UP9Dyf6jr/lLWQ9kIIGClwHrt9cX6'
b'PPMnK3sf4E47NTV8RRzg+OXU9F69evXVE17W9YicTuRgBBBAILWAmXjMfOfPNOSpfXy9lU8A'
b'fB2e4jZO/0jNHAFn6zq/uCVTGgIIWCjwivb5fF78gxt5EoDgxi6vlusf61d6okkC/ltXM3Mg'
b'CwIIIJCLwDY9+FFdv6fPJ7tyOZFj/SXAVwD+ioerrdGvBE7RCkfo2s/ViqkMAQSCKGAG+v2H'
b'rsP0hd/MNcIScAESgIAHsBjN10Tgm1rOSF3PKUZ5lIEAAqESMAPF/qjrIH3hXxqqnlneGRIA'
b'yx8Aid3XRMDMHmimEb5U18MT93EbAQSsE1ipPX5T15f0hd/M7scSMgESgJAFtBjd0USgXMvp'
b'o+sVuv6Trp10ZUEAgfALmFH9r+v6hr7ofxH+7trdQxIAu+OfVe81IeihB/6Drh10bX/gf3Pb'
b'rG10ZTCpIrAgEAAB8z3+Gl1X67rqwFp3e7K+6C/XbSyWCJAAWBJouokAAggggECiAO/cEjW4'
b'jQACCCCAgCUCJACWBJpuIoAAAgggkChAApCowW0EEEAAAQQsESABsCTQdBMBBBBAAIFEARKA'
b'RA1uI4AAAgggYIkACYAlgaabCCCAAAIIJAqQACRqcBsBBBBAAAFLBEgALAk03UQAAQQQQCBR'
b'gAQgUYPbCCCAAAIIWCJAAmBJoOkmAggggAACiQIkAIka3EYAAQQQQMASARIASwJNNxFAAAEE'
b'EEgUIAFI1OA2AggggAAClgiQAFgSaLqJAAIIIIBAogAJQKIGtxFAAAEEELBEgATAkkDTTQQQ'
b'QAABBBIFSAASNbiNAAIIIICAJQIkAJYEmm4igAACCCCQKEACkKjBbQQQQAABBCwRIAGwJNB0'
b'EwEEEEAAgUQBEoBEDW4jgAACCCBgiQAJgCWBppsIIIAAAggkCpAAJGpwGwEEEEAAAUsESAAs'
b'CTTdRAABBBBAIFGABCBRg9sIIIAAAghYIkACYEmg6SYCCCCAAAKJAiQAiRrcRgABBBBAwBIB'
b'EgBLAk03EUAAAQQQSBQgAUjU4DYCCCCAAAKWCJAAWBJouokAAggggECiAAlAoga3EUAAAQQQ'
b'sESABMCSQNNNBBBAAAEEEgVIABI1uI0AAggggIAlAiQAlgSabiKAAAIIIJAoQAKQqMFtBBBA'
b'AAEELBH4/x08dMfMBhpeAAAAAElFTkSuQmCC')
|
[
"yangyingchao@gmail.com"
] |
yangyingchao@gmail.com
|
8498569e5d9f49504c408a0e022c08b078d7d7db
|
4e30d990963870478ed248567e432795f519e1cc
|
/ciscoisesdk/models/validators/v3_1_1/jsd_b839d4dee9b958e48ccef056603e253f.py
|
acacc17322fd85a929ef50acca3daea197b20b69
|
[
"MIT"
] |
permissive
|
CiscoISE/ciscoisesdk
|
84074a57bf1042a735e3fc6eb7876555150d2b51
|
f468c54998ec1ad85435ea28988922f0573bfee8
|
refs/heads/main
| 2023-09-04T23:56:32.232035
| 2023-08-25T17:31:49
| 2023-08-25T17:31:49
| 365,359,531
| 48
| 9
|
MIT
| 2023-08-25T17:31:51
| 2021-05-07T21:43:52
|
Python
|
UTF-8
|
Python
| false
| false
| 2,663
|
py
|
# -*- coding: utf-8 -*-
"""Identity Services Engine getUserGroups data model.
Copyright (c) 2021 Cisco and/or its affiliates.
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
"""
from __future__ import absolute_import, division, print_function, unicode_literals
import json
from builtins import *
import fastjsonschema
from ciscoisesdk.exceptions import MalformedRequest
class JSONSchemaValidatorB839D4DeE9B958E48CceF056603E253F(object):
"""getUserGroups request schema definition."""
def __init__(self):
super(JSONSchemaValidatorB839D4DeE9B958E48CceF056603E253F, self).__init__()
self._validator = fastjsonschema.compile(json.loads(
'''{
"$schema": "http://json-schema.org/draft-04/schema#",
"properties": {
"OperationAdditionalData": {
"properties": {
"additionalData": {
"items": {
"properties": {
"name": {
"type": "string"
},
"value": {
"type": "string"
}
},
"type": "object"
},
"type": "array"
}
},
"type": "object"
}
},
"type": "object"
}'''.replace("\n" + ' ' * 16, '')
))
def validate(self, request):
try:
self._validator(request)
except fastjsonschema.exceptions.JsonSchemaException as e:
raise MalformedRequest(
'{} is invalid. Reason: {}'.format(request, e.message)
)
|
[
"wastorga@altus.co.cr"
] |
wastorga@altus.co.cr
|
283d47d01cb4a37496f04beecb4ad8779a0b077c
|
1a897f626be0348ab84aee55bb3f3adc5167ac82
|
/src/mapper/CountHomoHetInOneVCF.py
|
d32773674364fc59f2a1070c7e8515e5d6b0e16a
|
[] |
no_license
|
polyactis/vervet-web
|
13f2fc1f0e8711045e7e592ef6c5e61065d8b269
|
a550680f83d4c0c524734ee94bdd540c40f3a537
|
refs/heads/master
| 2021-01-01T18:18:09.094561
| 2014-05-15T23:37:54
| 2014-05-15T23:37:54
| 32,554,427
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,866
|
py
|
#!/usr/bin/env python
"""
Examples:
%s
%s -i gatk/Contig799.vcf.gz -l 1000000 -c Contig799 -o /tmp/output
Description:
2011-11-7
count the number of homo-ref/homo-alt/het calls from one vcf
"""
import sys, os, math
__doc__ = __doc__%(sys.argv[0], sys.argv[0])
bit_number = math.log(sys.maxint)/math.log(2)
if bit_number>40: #64bit
sys.path.insert(0, os.path.expanduser('~/lib64/python'))
sys.path.insert(0, os.path.join(os.path.expanduser('~/script64')))
else: #32bit
sys.path.insert(0, os.path.expanduser('~/lib/python'))
sys.path.insert(0, os.path.join(os.path.expanduser('~/script')))
import csv
from pymodule import ProcessOptions, getListOutOfStr, PassingData, utils
from pymodule import VCFFile
from AbstractVCFMapper import AbstractVCFMapper
class CountHomoHetInOneVCF(AbstractVCFMapper):
__doc__ = __doc__
option_default_dict = AbstractVCFMapper.option_default_dict.copy()
def __init__(self, **keywords):
"""
"""
AbstractVCFMapper.__init__(self, **keywords)
def countHomoHetCallsForEachSampleFromVCF(self, inputFname, outputFname, chromosome=None, chrLength=None, minDepth=1):
"""
2011-11-2
given a VCF file, count the number of homo-ref, homo-alt, het calls
"""
sys.stderr.write("Count the number of homozygous-ref/alt & het from %s .\n"%(inputFname))
vcfFile = VCFFile(inputFname=inputFname, minDepth=minDepth)
sampleID2data = {} #key is sampleID, value is a list of 3 numbers. 'NoOfHomoRef', 'NoOfHomoAlt', 'NoOfHet'
no_of_total = 0.
minStart = None
for vcfRecord in vcfFile.parseIter():
chr = vcfRecord.chr
pos = vcfRecord.pos
pos = int(pos)
refBase = vcfRecord.data_row[0].get("GT")[0]
for sample_id, sample_index in vcfFile.sample_id2index.iteritems():
if sample_id=='ref': #ignore the reference
continue
if sample_id not in sampleID2data:
sampleID2data[sample_id] = [0, 0, 0]
if not vcfRecord.data_row[sample_index]: #None for this sample
continue
callForThisSample = vcfRecord.data_row[sample_index].get('GT')
if not callForThisSample or callForThisSample=='NA':
continue
if callForThisSample[0]==refBase and callForThisSample[1]==refBase:
#homozygous reference allele
sampleID2data[sample_id][0]+=1
elif callForThisSample[0]==callForThisSample[1] and callForThisSample[0]!=refBase:
#homozygous alternative allele
sampleID2data[sample_id][1]+=1
elif callForThisSample[0]!=callForThisSample[1]:
sampleID2data[sample_id][2]+=1
import csv
writer = csv.writer(open(outputFname, 'w'), delimiter='\t')
writer.writerow(['#sampleID', 'chromosome', 'length', "NoOfTotal", 'NoOfHomoRef', 'NoOfHomoAlt', "FractionOfHomoAlt", 'NoOfHet', "FractionOfHet"])
sampleIDLs = sampleID2data.keys()
sampleIDLs.sort()
for sampleID in sampleIDLs:
count_data = sampleID2data.get(sampleID)
noOfHomoRef, noOfHomoAlt, noOfHet = count_data[:3]
no_of_calls = float(sum(count_data))
if no_of_calls>0:
fractionOfHomoAlt = noOfHomoAlt/no_of_calls
fractionOfHet = noOfHet/no_of_calls
else:
fractionOfHomoAlt = -1
fractionOfHet = -1
writer.writerow([sampleID, chromosome, chrLength, int(no_of_calls), noOfHomoRef, noOfHomoAlt, \
fractionOfHomoAlt, noOfHet, fractionOfHet])
del writer
sys.stderr.write("Done.\n")
def run(self):
"""
"""
if self.debug:
import pdb
pdb.set_trace()
#outputFname = "%s.homoHetCountPerSample.tsv"%(outputFnamePrefix)
self.countHomoHetCallsForEachSampleFromVCF(self.inputFname, self.outputFname, chromosome=self.chromosome, \
chrLength=self.chrLength, minDepth=self.minDepth)
if __name__ == '__main__':
main_class = CountHomoHetInOneVCF
po = ProcessOptions(sys.argv, main_class.option_default_dict, error_doc=main_class.__doc__)
instance = main_class(**po.long_option2value)
instance.run()
|
[
"crocea@uclaOffice"
] |
crocea@uclaOffice
|
6382e8fc0c96ec5821aa03f0fefeae9614c7e87e
|
c304fdd01358c682acff2603733e0a5377bf8940
|
/solutions/1813-maximum-erasure-value/maximum-erasure-value.py
|
5661a073c11184967f5a98b921438bf4445b13fd
|
[] |
no_license
|
gaelwjl/Leetcode-Solution
|
8432e5610adacc69455a705b83ad433f01c9eaad
|
933bdb462400f490506285774d277394753ef79b
|
refs/heads/master
| 2023-03-06T00:25:53.486131
| 2021-02-10T11:46:40
| 2021-02-10T11:46:40
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,530
|
py
|
# You are given an array of positive integers nums and want to erase a subarray containing unique elements. The score you get by erasing the subarray is equal to the sum of its elements.
#
# Return the maximum score you can get by erasing exactly one subarray.
#
# An array b is called to be a subarray of a if it forms a contiguous subsequence of a, that is, if it is equal to a[l],a[l+1],...,a[r] for some (l,r).
#
#
# Example 1:
#
#
# Input: nums = [4,2,4,5,6]
# Output: 17
# Explanation: The optimal subarray here is [2,4,5,6].
#
#
# Example 2:
#
#
# Input: nums = [5,2,1,2,5,2,1,2,5]
# Output: 8
# Explanation: The optimal subarray here is [5,2,1] or [1,2,5].
#
#
#
# Constraints:
#
#
# 1 <= nums.length <= 105
# 1 <= nums[i] <= 104
#
#
class Solution:
def maximumUniqueSubarray(self, nums: List[int]) -> int:
cnt = defaultdict(int)
ans = 0
i, j = 0, 0
prefix = [0]
for v in nums:
prefix.append(prefix[-1] + v)
while i < len(nums):
while i < len(nums):
if cnt[nums[i]] >= 1:
break
cnt[nums[i]] += 1
i += 1
if (i == len(nums)):
ans = max(ans, prefix[-1] - prefix[j])
break
ans = max(ans, prefix[i] - prefix[j])
while j < len(nums):
cnt[nums[j]] -= 1
j += 1
if cnt[nums[i]] < 1:
break
return ans
|
[
"47608857+wenjun20@users.noreply.github.com"
] |
47608857+wenjun20@users.noreply.github.com
|
f03f550123a9975d6c7f9efbc70fadc0c24e21bc
|
921577c9cafd42751cee4a2b1355767ee5c42e94
|
/ER_clean/run_singlePFAM_DCA.py
|
b2aa90a891a92f2612bc20e46541bd7a6dd0cb0b
|
[] |
no_license
|
evancresswell/DCA_ER_old
|
f3cbb4c4a6662691c47df06879521bfb387477b4
|
1a3ca3388177e67d821859aca26fa5073d7e4fbe
|
refs/heads/master
| 2023-07-25T07:30:35.823564
| 2020-12-31T21:36:31
| 2020-12-31T21:36:31
| 233,687,646
| 0
| 0
| null | 2023-07-06T21:49:30
| 2020-01-13T20:32:23
|
Jupyter Notebook
|
UTF-8
|
Python
| false
| false
| 9,417
|
py
|
import sys,os
import data_processing as dp
import ecc_tools as tools
import timeit
# import pydca-MF module
from pydca.sequence_backmapper import sequence_backmapper
from pydca.msa_trimmer import msa_trimmer
from pydca.msa_trimmer.msa_trimmer import MSATrimmerException
from pydca.dca_utilities import dca_utilities
from scipy import linalg
from sklearn.preprocessing import OneHotEncoder
from pydca.meanfield_dca import meanfield_dca
import numpy as np
import pickle
from gen_ROC_jobID_df import add_ROC
import matplotlib
#matplotlib.use('Qt4Agg')
import matplotlib.pyplot as plt
# Import Bio data processing features
import Bio.PDB, warnings
from Bio.PDB import *
pdb_list = Bio.PDB.PDBList()
pdb_parser = Bio.PDB.PDBParser()
from scipy.spatial import distance_matrix
from Bio import BiopythonWarning
warnings.filterwarnings("error")
warnings.simplefilter('ignore', BiopythonWarning)
warnings.simplefilter('ignore', DeprecationWarning)
warnings.simplefilter('ignore', FutureWarning)
warnings.simplefilter('ignore', ResourceWarning)
warnings.filterwarnings("ignore", message="numpy.dtype size changed")
warnings.filterwarnings("ignore", message="numpy.ufunc size changed")
#========================================================================================
data_path = '/home/eclay/Pfam-A.full'
preprocess_path = '/home/eclay/DCA_ER/biowulf/pfam_ecc/'
data_path = '/data/cresswellclayec/hoangd2_data/Pfam-A.full'
preprocess_path = '/data/cresswellclayec/DCA_ER/biowulf/pfam_ecc/'
#pfam_id = 'PF00025'
pfam_id = sys.argv[1]
cpus_per_job = int(sys.argv[2])
job_id = sys.argv[3]
print("Calculating DI for %s using %d (of %d) threads (JOBID: %s)"%(pfam_id,cpus_per_job-4,cpus_per_job,job_id))
# Read in Reference Protein Structure
pdb = np.load('%s/%s/pdb_refs.npy'%(data_path,pfam_id))
# convert bytes to str (python 2 to python 3)
pdb = np.array([pdb[t,i].decode('UTF-8') for t in range(pdb.shape[0]) for i in range(pdb.shape[1])]).reshape(pdb.shape[0],pdb.shape[1])
ipdb = 0
tpdb = int(pdb[ipdb,1]) - 1
print('Ref Sequence # should be : ',tpdb-1)
# Load Multiple Sequence Alignment
s = dp.load_msa(data_path,pfam_id)
# Load Polypeptide Sequence from PDB as reference sequence
print(pdb[ipdb,:])
pdb_id = pdb[ipdb,5]
pdb_chain = pdb[ipdb,6]
pdb_start,pdb_end = int(pdb[ipdb,7]),int(pdb[ipdb,8])
pdb_range = [pdb_start-1, pdb_end]
#print('pdb id, chain, start, end, length:',pdb_id,pdb_chain,pdb_start,pdb_end,pdb_end-pdb_start+1)
#print('download pdb file')
pdb_file = pdb_list.retrieve_pdb_file(str(pdb_id),file_format='pdb')
#pdb_file = pdb_list.retrieve_pdb_file(pdb_id)
pfam_dict = {}
#---------------------------------------------------------------------------------------------------------------------#
chain = pdb_parser.get_structure(str(pdb_id),pdb_file)[0][pdb_chain]
ppb = PPBuilder().build_peptides(chain)
# print(pp.get_sequence())
print('peptide build of chain produced %d elements'%(len(ppb)))
matching_seq_dict = {}
poly_seq = list()
for i,pp in enumerate(ppb):
for char in str(pp.get_sequence()):
poly_seq.append(char)
print('PDB Polypeptide Sequence: \n',poly_seq)
#check that poly_seq matches up with given MSA
poly_seq_range = poly_seq[pdb_range[0]:pdb_range[1]]
print('PDB Polypeptide Sequence (In Proteins PDB range len=%d): \n'%len(poly_seq_range),poly_seq_range)
if len(poly_seq_range) < 10:
print('PP sequence overlap with PDB range is too small.\nWe will find a match\nBAD PDB-RANGE')
poly_seq_range = poly_seq
else:
pp_msa_file_range, pp_ref_file_range = tools.write_FASTA(poly_seq_range, s, pfam_id, number_form=False,processed=False,path='./pfam_ecc/',nickname='range')
pp_msa_file, pp_ref_file = tools.write_FASTA(poly_seq, s, pfam_id, number_form=False,processed=False,path='./pfam_ecc/')
#---------------------------------------------------------------------------------------------------------------------#
#---------------------------------------------------------------------------------------------------------------------#
#---------------------------------- PreProcess FASTA Alignment -------------------------------------------------------#
#---------------------------------------------------------------------------------------------------------------------#
trimmed_data_outfile = preprocess_path+'MSA_%s_Trimmed.fa'%pfam_id
print('Pre-Processing MSA')
try:
print('\n\nPre-Processing MSA with Range PP Seq\n\n')
trimmer = msa_trimmer.MSATrimmer(
pp_msa_file_range, biomolecule='PROTEIN',
refseq_file=pp_ref_file_range
)
pfam_dict['ref_file'] = pp_ref_file_range
except:
print('\nDidnt work, using full PP seq\nPre-Processing MSA wth PP Seq\n\n')
# create MSATrimmer instance
trimmer = msa_trimmer.MSATrimmer(
pp_msa_file, biomolecule='protein',
refseq_file=pp_ref_file
)
pfam_dict['ref_file'] = pp_ref_file
# Adding the data_processing() curation from tools to erdca.
try:
trimmed_data = trimmer.get_msa_trimmed_by_refseq(remove_all_gaps=True)
print('Trimmed Data: \n',trimmed_data[:10])
print(np.shape(trimmed_data))
except(MSATrimmerException):
ERR = 'PPseq-MSA'
print('Error with MSA trimms\n%s\n'%ERR)
sys.exit()
#write trimmed msa to file in FASTA format
with open(trimmed_data_outfile, 'w') as fh:
for seqid, seq in trimmed_data:
fh.write('>{}\n{}\n'.format(seqid, seq))
#---------------------------------------------------------------------------------------------------------------------#
#---------------------------------------------------------------------------------------------------------------------#
#----------------------------------------- Run Simulation DCA --------------------------------------------------------#
#---------------------------------------------------------------------------------------------------------------------#
print('Initializing MF DCA\n')
import numba
print(numba.__version__)
try:
#create mean-field DCA instance
mfdca_inst = meanfield_dca.MeanFieldDCA(trimmed_data_outfile,'protein',pseudocount = 0.5,seqid = 0.8)
except:
ref_seq = s[tpdb,:]
print('Using PDB defined reference sequence from MSA:\n',ref_seq)
msa_file, ref_file = tools.write_FASTA(ref_seq, s, pfam_id, number_form=False,processed=False,path=preprocess_path)
pfam_dict['ref_file'] = ref_file
print('Re-trimming MSA with pdb index defined ref_seq')
trimmer = msa_trimmer.MSATrimmer(
msa_file, biomolecule='protein',
refseq_file=ref_file
)
trimmed_data = trimmer.get_msa_trimmed_by_refseq(remove_all_gaps=True)
#write trimmed msa to file in FASTA format
with open(trimmed_data_outfile, 'w') as fh:
for seqid, seq in trimmed_data:
fh.write('>{}\n{}\n'.format(seqid, seq))
#create mean-field DCA instance
mfdca_inst = meanfield_dca.MeanFieldDCA(trimmed_data_outfile,'protein',pseudocount = 0.5,seqid = 0.8)
# Compute average product corrected Frobenius norm of the couplings
print('Running MF DCA')
start_time = timeit.default_timer()
# Compute DCA scores
#sorted_DI_plm = plmdca_inst.compute_sorted_DI()
# compute DCA scores summarized by Frobenius norm and average product corrected
sorted_DI_mf = mfdca_inst.compute_sorted_FN_APC()
run_time = timeit.default_timer() - start_time
print('MF run time:',run_time)
for site_pair, score in sorted_DI_mf[:5]:
print(site_pair, score)
with open('DI/MF/mf_DI_%s.pickle'%(pfam_id), 'wb') as f:
pickle.dump(sorted_DI_mf, f)
f.close()
# Save processed data dictionary and FASTA file
pfam_dict['processed_msa'] = trimmed_data
pfam_dict['msa'] = s
pfam_dict['s_ipdb'] = tpdb
input_data_file = preprocess_path+"%s_DP.pickle"%(pfam_id)
with open(input_data_file,"wb") as f:
pickle.dump(pfam_dict, f)
f.close()
#---------------------------------------------------------------------------------------------------------------------#
plotting = False
if plotting:
# Print Details of protein PDB structure Info for contact visualizeation
print('Using chain ',pdb_chain)
print('PDB ID: ', pdb_id)
from pydca.contact_visualizer import contact_visualizer
visualizer = contact_visualizer.DCAVisualizer('protein', pdb_chain, pdb_id,
refseq_file = pp_ref_file,
sorted_dca_scores = sorted_DI_mf,
linear_dist = 4,
contact_dist = 8.)
contact_map_data = visualizer.plot_contact_map()
#plt.show()
#plt.close()
tp_rate_data = visualizer.plot_true_positive_rates()
#plt.show()
#plt.close()
with open(preprocess_path+'MF_%s_contact_map_data.pickle'%(pfam_id), 'wb') as f:
pickle.dump(contact_map_data, f)
f.close()
with open(preprocess_path+'MF_%s_tp_rate_data.pickle'%(pfam_id), 'wb') as f:
pickle.dump(tp_rate_data, f)
f.close()
|
[
"evancresswell@gmail.com"
] |
evancresswell@gmail.com
|
aa5904c918a608c59263802a3bb79e12e6b4ebc6
|
1d5262b68f922f4ed5eeea3ec1f0e68deb15275c
|
/utils/testgender.py
|
516f15212249648ef5d0a353ed03dcf9c84489a1
|
[] |
no_license
|
jayrambhia/fisherfacerec
|
88baf66f2cc9d5f937f44ae04c7acf63e1116b3f
|
cab1bfbfbfcd018689d6334aa694b71fbb921f29
|
refs/heads/master
| 2016-09-06T20:12:34.796555
| 2013-05-29T06:38:30
| 2013-05-29T06:38:30
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 767
|
py
|
from SimpleCV import *
import time
def identifyGender():
f = FaceRecognizer()
#cam = Camera()
#img = cam.getImage()
img = Image("/home/jay/Visionaries/Eigen/Emma/8.jpg")
cascade = SimpleCV.__path__[0]+"/"+"Features/HaarCascades/face.xml"
feat = img.findHaarFeatures(cascade)
if feat:
crop_image = feat.sortArea()[-1].crop()
feat.sortArea()[-1].draw()
f.load(SimpleCV.__path__[0]+"/"+"Features/FaceRecognizerData/AT_T_Gender_Data.xml")
w, h = f.imageSize
crop_image = crop_image.resize(w, h)
label = f.predict(crop_image)
print label
if label == 0:
img.drawText("Female", fontsize=48)
else:
img.drawText("Male", fontsize=48)
img.show()
time.sleep(4)
identifyGender()
|
[
"jayrambhia777@gmail.com"
] |
jayrambhia777@gmail.com
|
ff51c85b16657fa78a9f57a67a970922d89bdbe5
|
e3d9592ff05f225433e1689ec70253043a360ee2
|
/hackerrank/python/closures_decorators/name_directory.py
|
7114d47ccd95f796d43020ed086675a2326804d1
|
[] |
no_license
|
jreiher2003/code_challenges
|
cad28cac57b6e14ffd30d2b7fe00abdba8b3fa47
|
ac03c868b28e1cfa22d8257366e7a0f8f757ad8c
|
refs/heads/master
| 2020-04-16T02:25:26.267418
| 2016-12-12T01:23:56
| 2016-12-12T01:23:56
| 58,969,218
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 894
|
py
|
from operator import itemgetter
from itertools import groupby
directory = [["Mike Thomson", 20, "M"],["Robert Bustle", 32, "M"],["Andria Bustle", 30, "F"]]
directory.sort(key=itemgetter(1))
for i,name in groupby(directory, itemgetter(1)):
for x in name:
if x[2] == "M":
print "Mr. " + x[0]
if x[2] == "F":
print "Ms. " + x[0]
def peopleformat(func):
def peopletoformat(peoples):
for i in range(len(peoples)):
temp = peoples[i].strip().split()
if temp[-1] == 'M':
flag = 'Mr. '
else:
flag = 'Ms. '
peoples[i] = [flag + ' '.join(temp[:-2]), int(temp[-2])]
return func(peoples)
return peopletoformat
@peopleformat
def peoplesort(peoples):
for x, y in sorted(peoples, key=lambda x: x[1]):
print x
|
[
"jreiher2003@yahoo.com"
] |
jreiher2003@yahoo.com
|
7968ce28d3c46790fb695fb1c71b8c8545582486
|
bf6172087680cb2be3fad3fa58ebfa9f04952989
|
/Week_01/LeetCode-easy-88-combine-list.py
|
324a4605752c8b602925c9258008754595011b16
|
[] |
no_license
|
gitzhangjianqi/algorithm009-class01
|
57df88c89f5dfc1bb149301ae4a9ee9fa4f8864e
|
0d565ed78e277b88973e76c74214a0eb2f2f8e5d
|
refs/heads/master
| 2022-11-25T02:05:26.894980
| 2020-07-19T15:00:41
| 2020-07-19T15:00:41
| 266,250,080
| 0
| 0
| null | 2020-05-23T02:44:45
| 2020-05-23T02:44:45
| null |
UTF-8
|
Python
| false
| false
| 693
|
py
|
# Definition for singly-linked list.
# class ListNode:
# def __init__(self, x):
# self.val = x
# self.next = None
class Solution:
def mergeTwoLists(self, l1: ListNode, l2: ListNode) -> ListNode:
"""
if not l1 :
return l2
if not l2 :
return l1
if l1.val <= l2.val :
l1.next = self.mergeTwoLists(l1.next, l2)
return l1
else :
l2.next = self.mergeTwoLists(l1, l2.next)
return l2
"""
if l1 and l2 :
if l1.val > l2.val :
l1, l2 = l2, l1
l1.next = self.mergeTwoLists(l1.next, l2)
return l1 or l2
|
[
"winzjq@outlook.com"
] |
winzjq@outlook.com
|
1c334a6eb48b37de87ba2054c80b369b1f565dfb
|
dec335c8afe2e3addab8d413ed043b797b8ef4f3
|
/lotsofproducts.py
|
6d5036526fc713e240406aa6e8e6e5efe9cf698e
|
[] |
no_license
|
tobincorporated/TobinCatalog
|
ad1978f97432be9b4f7e663d62d9b27d564e1e60
|
16b103bf23c471cc1c7da0f214a14fd19ae7513d
|
refs/heads/master
| 2021-01-20T04:21:34.151349
| 2017-04-30T22:26:21
| 2017-04-30T22:26:21
| 89,677,855
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 8,455
|
py
|
from sqlalchemy import create_engine
from sqlalchemy.orm import sessionmaker
from database_setup import Category, Base, Product, User
engine = create_engine('sqlite:///productcatalog.db')
# Bind the engine to the metadata of the Base class so that the
# declaratives can be accessed through a DBSession instance
Base.metadata.bind = engine
DBSession = sessionmaker(bind=engine)
# A DBSession() instance establishes all conversations with the database
# and represents a "staging zone" for all the objects loaded into the
# database session object. Any change made against the objects in the
# session won't be persisted into the database until you call
# session.commit(). If you're not happy about the changes, you can
# revert all of them back to the last commit by calling
# session.rollback()
session = DBSession()
# Create dummy user
User1 = User(name="Zach Tobin", email="tobin.zachary@gmail.com",
picture='https://lh3.googleusercontent.com/-Nrf8Py-fzI8/AAAAAAAAAAI/AAAAAAAAA2E/MzosFqeiD8I/photo.jpg')
session.add(User1)
session.commit()
# Menu for UrbanBurger
category1 = Category(user_id=1, name="Electronics")
session.add(category1)
session.commit()
product2 = Product(user_id=1, name="USB Cables", description="You need more USB cables. Buy them now.",
price="$3.50", category=category1)
session.add(product2)
session.commit()
product1 = Product(user_id=1, name="MotherBoard", description="You gotta build your own computer, man",
price="$200.99", category=category1)
session.add(product1)
session.commit()
product2 = Product(user_id=1, name="LCD Monitor", description="A full 23in so you can see all the things in the pixels",
price="$125.50", category=category1)
session.add(product2)
session.commit()
product3 = Product(user_id=1, name="Tablet Computer", description="So new, and with so many apps!",
price="$600.00", category=category1)
session.add(product3)
session.commit()
product4 = Product(user_id=1, name="Speakers", description="They bring the noise and the funk",
price="$70.99", category=category1)
session.add(product4)
session.commit()
product5 = Product(user_id=1, name="Optical Drive", description="I like DVDs",
price="$25.99", category=category1)
session.add(product5)
session.commit()
product6 = Product(user_id=1, name="Laptop", description="Coding on the go, right?",
price="$800.99", category=category1)
session.add(product6)
session.commit()
# Menu for Super Stir Fry
category2 = Category(user_id=1, name="Beverages")
session.add(category2)
session.commit()
product1 = Product(user_id=1, name="Root Beer", description="Pretty nice tasting.",
price="$1.99", category=category2)
session.add(product1)
session.commit()
product2 = Product(user_id=1, name="Cola",
description="Too indecisive for a real flavor? Try this.", price="$1.99", category=category2)
session.add(product2)
session.commit()
product3 = Product(user_id=1, name="Mojito", description="Zach\'s personal specialty",
price="$12.00", category=category2)
session.add(product3)
session.commit()
product4 = Product(user_id=1, name="Manhattan", description="For the old-fashioned who don\'t want an Old Fashioned.",
price="$12.00", category=category2)
session.add(product4)
session.commit()
product5 = Product(user_id=1, name="Whisky", description="Whisky is pretty nice.",
price="$12.00", category=category2)
session.add(product5)
session.commit()
# Menu for Panda Garden
category1 = Category(user_id=1, name="Martial Arts")
session.add(category1)
session.commit()
product1 = Product(user_id=1, name="Dogi", description="Look like a professional",
price="$28.99", category=category1)
session.add(product1)
session.commit()
product2 = Product(user_id=1, name="Sparring pads", description="It\'s fun to hit other people, now do it without the liability!",
price="$6.99", category=category1)
session.add(product2)
session.commit()
product3 = Product(user_id=1, name="Katana", description="Slice bad guys and look cool doing it",
price="$399.95", category=category1)
session.add(product3)
session.commit()
# Menu for Thyme for that
category1 = Category(user_id=1, name="Tools")
session.add(category1)
session.commit()
product1 = Product(user_id=1, name="Hammer", description="It\'s a hammer",
price="$12.99", category=category1)
session.add(product1)
session.commit()
product2 = Product(user_id=1, name="Drill", description="More powerful than yours, so buy it.",
price="$45.99", category=category1)
session.add(product2)
session.commit()
product3 = Product(user_id=1, name="Awl",
description="Poke holes with the best of them",
price="$4.50", category=category1)
session.add(product3)
session.commit()
product4 = Product(user_id=1, name="Utility knife", description="Don\'t cut yourself.",
price="$6.95", category=category1)
session.add(product4)
session.commit()
product5 = Product(user_id=1, name="Bottle Opener", description="For use with beverages",
price="$0.95", category=category1)
session.add(product5)
session.commit()
# Menu for Tony's Bistro
category1 = Category(user_id=1, name="Sewing")
session.add(category1)
session.commit()
product1 = Product(user_id=1, name="Needle", description="For use with thread",
price="$0.95", category=category1)
session.add(product1)
session.commit()
product2 = Product(user_id=1, name="Thread", description="For use with needle",
price="$4.95", category=category1)
session.add(product2)
session.commit()
product3 = Product(user_id=1, name="Fabric", description="For use with needle and thread",
price="$6.95", category=category1)
session.add(product3)
session.commit()
product4 = Product(user_id=1, name="Scissors",
description="Cut fabric with these ultra shears", price="$3.95", category=category1)
session.add(product4)
session.commit()
# Menu for Auntie Ann's
category1 = Category(user_id=1, name="Food")
session.add(category1)
session.commit()
product9 = Product(user_id=1, name="Pizza",
description="Even if you\'re not in college anymore", price="$8.99", category=category1)
session.add(product9)
session.commit()
product1 = Product(user_id=1, name="Cookies", description="With chocolate chips",
price="$2.99", category=category1)
session.add(product1)
session.commit()
product2 = Product(user_id=1, name="Burger", description="With cheese and pickles and secret sauce",
price="$4.95", category=category1)
session.add(product2)
session.commit()
product3 = Product(user_id=1, name="Soup",
description="Eat when sick", price="$1.50", category=category1)
session.add(product3)
session.commit()
product4 = Product(user_id=1, name="Chicken", description="Pluck first",
price="$8.95", category=category1)
session.add(product4)
session.commit()
# Menu for Cocina Y Amor
category1 = Category(user_id=1, name="Office Supplies")
session.add(category1)
session.commit()
product1 = Product(user_id=1, name="Pencil",
description="Mechanical pencils are great.", price="$5.95", category=category1)
session.add(product1)
session.commit()
product2 = Product(user_id=1, name="Pen", description="Pack of 100 because pens are awful. ",
price="$7.99", category=category1)
session.add(product2)
session.commit()
product1 = Product(user_id=1, name="Printer Paper",
description="Print or scribble on this stuff.",
price="$5.95",
category=category1)
session.add(product1)
session.commit()
product1 = Product(user_id=1, name="Tape",
description="Now you can hold any two things together indefinitely", price="$6.95", category=category1)
session.add(product1)
session.commit()
product1 = Product(user_id=1, name="Stapler",
description="Better than tape", price="$8.25", category=category1)
session.add(product1)
session.commit()
print "added products!"
|
[
"ztobin@pasedfoundation.org"
] |
ztobin@pasedfoundation.org
|
68c9ff4bb99cdab0a7d9a09497cb3c231c2e7c33
|
010279e2ba272d09e9d2c4e903722e5faba2cf7a
|
/contrib/python/stack-data/stack_data/version.py
|
aece342d9345098161555c8a830d1da163b62d9d
|
[
"MIT",
"Apache-2.0"
] |
permissive
|
catboost/catboost
|
854c1a1f439a96f1ae6b48e16644be20aa04dba2
|
f5042e35b945aded77b23470ead62d7eacefde92
|
refs/heads/master
| 2023-09-01T12:14:14.174108
| 2023-09-01T10:01:01
| 2023-09-01T10:22:12
| 97,556,265
| 8,012
| 1,425
|
Apache-2.0
| 2023-09-11T03:32:32
| 2017-07-18T05:29:04
|
Python
|
UTF-8
|
Python
| false
| false
| 22
|
py
|
__version__ = '0.6.2'
|
[
"robot-contrib@yandex-team.com"
] |
robot-contrib@yandex-team.com
|
a2b16bb15a6958e40c055db5c18481270228ac8d
|
2ec3224f7cd4222a339003cba457fa304103ee38
|
/sheepwall_app/migrations/0018_auto_20170320_0721.py
|
e17fed5e9b43b6607cfb2c22ec4970751a3e8cf3
|
[] |
no_license
|
flachi1109/sheepwall_prj
|
459f79baeaea462637880d05e7f51afab6ab8f4d
|
04f7a3c4402da080662c57802a1bd232a6c3e694
|
refs/heads/master
| 2021-01-19T22:47:22.560681
| 2017-03-30T08:16:26
| 2017-03-30T08:16:26
| 83,778,842
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 480
|
py
|
# -*- coding: utf-8 -*-
# Generated by Django 1.10.6 on 2017-03-20 07:21
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('sheepwall_app', '0017_auto_20170317_0154'),
]
operations = [
migrations.AlterField(
model_name='wifiuser',
name='wechat_head_img',
field=models.CharField(max_length=100, null=True),
),
]
|
[
"root@localhost.localdomain"
] |
root@localhost.localdomain
|
ffcd14a30e3ead87c6218b1fc34225dbf111ce7c
|
15f321878face2af9317363c5f6de1e5ddd9b749
|
/solutions_python/Problem_199/1932.py
|
70a055e7fe5f3b5671c1e9c2dfeb49c63ce7a92d
|
[] |
no_license
|
dr-dos-ok/Code_Jam_Webscraper
|
c06fd59870842664cd79c41eb460a09553e1c80a
|
26a35bf114a3aa30fc4c677ef069d95f41665cc0
|
refs/heads/master
| 2020-04-06T08:17:40.938460
| 2018-10-14T10:12:47
| 2018-10-14T10:12:47
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,113
|
py
|
import collections
def flip(s, k, i):
tmp = [t for t in s]
tmp[i:i+k] = [not t for t in tmp[i:i+k]]
return tmp
def check_goal(s):
for i in s:
if not i:
return False
return True
def convstr(v):
a = ''
for i in v:
if i:
a = a + '+'
else:
a = a + '-'
return a
def goodness(v):
return sum([1 if i else 0 for i in v])
def flip_pancakes(s, k):
s = [i == '+' for i in s]
state = [s]
q = collections.deque()
q.append(state)
maxlen = 0
lookupdict = {}
lookupdict[convstr(s)] = 0
while len(q) > 0:
tmp = q.popleft()
if check_goal(tmp[-1]):
return len(tmp) - 1
else:
if len(tmp) > maxlen:
maxlen = len(tmp)
# print "maxlen: %s" % maxlen
# print("processing %s" % convstr(tmp[-1]))
for i in range(len(s) - k + 1):
before = goodness(tmp[-1])
new = flip(tmp[-1], k, i)
after = goodness(new)
if new not in tmp:
if convstr(new) in lookupdict:
if len(tmp) >= lookupdict[convstr(new)]:
continue
lookupdict[convstr(new)] = len(tmp)
# print("adding %s" % convstr(new))
tmptmp = [i for i in tmp]
tmptmp.append(new)
q.append(tmptmp)
if after - before == k:
break
return 'IMPOSSIBLE'
if __name__ == '__main__':
# raw_input() reads a string with a line of input, stripping the '\n' (newline) at the end.
# This is all you need for most Google Code Jam problems.
t = int(raw_input()) # read a line with a single integer
for i in xrange(1, t + 1):
s, k = [s for s in raw_input().split(" ")] # read a list of integers, 2 in this case
print "Case #{}: {}".format(i, flip_pancakes(s, int(k)))
# check out .format's specification for more formatting options
|
[
"miliar1732@gmail.com"
] |
miliar1732@gmail.com
|
6c296a7b817934c96074972917480cdb5396f60a
|
723238ea8ab1ffee491a845e0dd7def2d7d4aac2
|
/LeetCode/219. Contains Duplicate II.py
|
2758c86c305a8ae649b90f18d5d263d99af09647
|
[
"MIT"
] |
permissive
|
QinganZhao/LXXtCode
|
9657e5acee47bbdc8374b4861d9472bb3e829e23
|
9debb10f9d33dcdb3def9d141a638b8172d25ff3
|
refs/heads/master
| 2021-06-06T21:58:36.922170
| 2020-02-24T16:32:38
| 2020-02-24T16:32:38
| 146,527,448
| 3
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 469
|
py
|
class Solution:
def containsNearbyDuplicate(self, nums: List[int], k: int) -> bool:
dicDis = {}
dicInd = {}
for i, num in enumerate(nums):
if num in dicInd:
dicDis[num] = min(dicDis.get(num, float('inf')),
i - dicInd[num])
dicInd[num] = i
print(dicDis)
if dicDis:
return min(map(lambda x: x[1], dicDis.items())) <= k
return False
|
[
"qingan_zhao@berkeley.edu"
] |
qingan_zhao@berkeley.edu
|
c545c63ad0db68fee85f8936998fc21f674f2861
|
25ebc03b92df764ff0a6c70c14c2848a49fe1b0b
|
/daily/20191111/example_handofcats2/09int-literals.py
|
fd4131f10f741a821e2dfcd1753b33badea9aa2c
|
[] |
no_license
|
podhmo/individual-sandbox
|
18db414fafd061568d0d5e993b8f8069867dfcfb
|
cafee43b4cf51a321f4e2c3f9949ac53eece4b15
|
refs/heads/master
| 2023-07-23T07:06:57.944539
| 2023-07-09T11:45:53
| 2023-07-09T11:45:53
| 61,940,197
| 6
| 0
| null | 2022-10-19T05:01:17
| 2016-06-25T11:27:04
|
Python
|
UTF-8
|
Python
| false
| false
| 216
|
py
|
import typing as t
import typing_extensions as tx
from handofcats import as_command
@as_command
def run(*, alpha: tx.Literal[1, 2, 3, 4, 5, 6, 7, 8, 9], ps: t.List[int]) -> None:
print([alpha * p for p in ps])
|
[
"ababjam61+github@gmail.com"
] |
ababjam61+github@gmail.com
|
c2374dd18b89a808840fe34d7fbed9ce6f93df83
|
70e4aee0047702e332f0c2b1d352d512592aa39b
|
/users/views.py
|
c5c8f2abd710e2f611054a2f64804f7907cbe05a
|
[] |
no_license
|
xSebeq/messaging-app
|
9be2a8cb5be53ad555eb0511817940c4ac8c0ce1
|
6ac9017277d8c2816314b0f87bee6073bb8fb6d2
|
refs/heads/master
| 2023-04-08T14:24:10.110062
| 2021-04-21T19:31:10
| 2021-04-21T19:31:10
| 360,276,019
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 247
|
py
|
from django.urls import reverse_lazy
from django.views import generic
from . import forms
class SignUp(generic.CreateView):
form_class = forms.CustomUserCreationForm
success_url = reverse_lazy('home')
template_name = 'signup.html'
|
[
"sebeq245@interia.eu"
] |
sebeq245@interia.eu
|
2611990ec36bf4e8ef68994e842f35123977aa97
|
db7fff5546d371e15ecee344a510e42c5551f841
|
/lldb_python/douyu/dy_commands.py
|
f5c8e9c5f5352ee7a9d63838dc6581afd889aae1
|
[
"MIT"
] |
permissive
|
MrChens/m_note
|
13f9a5276267c3e3484db7f3b659a797c07e1171
|
b279125cbcb8bfcb32532776660224237e3e4ed5
|
refs/heads/master
| 2020-03-11T18:06:29.811371
| 2019-05-28T02:57:53
| 2019-05-28T02:57:53
| 130,167,354
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 269
|
py
|
def dyBreak(debugger, command, result, internal_dict):
# the follow command show how to write break command
# debugger.HandleCommand('br s -r \'\[BluetoothVC babyDelegate\]$\'')
# debugger.HandleCommand('br s -n \'[NSData(AESAdditions) AES256EncryptWithKey:iv:]\'')
|
[
"mr_chens@outlook.com"
] |
mr_chens@outlook.com
|
685cca814fdb67d2a5dbdcb01f5ced170c368dea
|
03e41507aa7ec99611c75ae02a6c592bbf154dee
|
/python/pose_publisher.py
|
a6f0fa6c3fb5d366ef351c2dadfcfa4e778c6156
|
[] |
no_license
|
Kanazawanaoaki/zisyupuro-memo
|
5cfb4acbe7aaea4d15d6a5f8c873896c781cbe3e
|
b64e51aaf30d902853ae38bcef24a17f7e799cfd
|
refs/heads/master
| 2020-09-21T22:22:12.501556
| 2020-01-10T01:53:13
| 2020-01-10T01:53:13
| 224,953,059
| 0
| 0
| null | 2020-01-10T01:53:14
| 2019-11-30T03:16:29
|
Makefile
|
UTF-8
|
Python
| false
| false
| 1,287
|
py
|
#!/usr/bin/env python
import math
import rospy
import sys
from trajectory_msgs.msg import JointTrajectory
from trajectory_msgs.msg import JointTrajectoryPoint
import cv2
# point_list = []
def send_joint_position():
try:
# while True:
print(1)
rospy.init_node('send_joint_position')
pub = rospy.Publisher('/2d_human_joint',JointTrajectory,queue_size=1)
rospy.sleep(1)
for i in range(50):
joint_trajectory = JointTrajectory()
joint_trajectory.header.stamp = rospy.Time.now()
joint_trajectory.joint_names = ['r-shoulder','r-elbow','l-shoulder','l-elbow','r-hip-joint','r-knee','l-hip-joint','l-knee']
point = JointTrajectoryPoint()
point.positions = [-60+2*i,-20+2*i,60-2*i,20-2*i,60,30,-60,-30]#point_list
joint_trajectory.points.append(point)
pub.publish(joint_trajectory)
rospy.sleep(1)
if cv2.waitKey(0) == 27:
print("finish")
break
except KeyboardInterrupt:
print('!!FINISH!!')
sys.exit(0)
if __name__ == '__main__':
try:
send_joint_position()
except rospy.ROSInterruptException: pass
|
[
"naoaki65k@gmail.com"
] |
naoaki65k@gmail.com
|
0f7d89b1208485b987a21b984253dc228fa4dc18
|
74ed8d533e86d57c7db9eca879a9fb5b979b8eaf
|
/Punctuators/model/nns.py
|
91727b37cd5f4b32ec1e7f6da0a27bea156b30ad
|
[
"MIT"
] |
permissive
|
GyxChen/AmusingPythonCodes
|
97c5a2080d47399080df005a0643eddb56bceb25
|
388e90c78c67b79c23b4a8fc1ebe29f26394a54b
|
refs/heads/master
| 2020-06-23T12:03:56.708448
| 2019-05-15T05:19:57
| 2019-05-15T05:19:57
| 198,617,528
| 0
| 1
| null | 2019-07-24T10:57:29
| 2019-07-24T10:57:29
| null |
UTF-8
|
Python
| false
| false
| 7,694
|
py
|
import tensorflow as tf
from functools import reduce
from operator import mul
# from tensorflow.python.ops.rnn_cell_impl import _linear
# from tensorflow.python.util import nest
def dot_attention(inputs, memory, hidden, keep_prob=1.0, is_train=None, scope="dot_attention"):
with tf.variable_scope(scope):
d_inputs = dropout(inputs, keep_prob=keep_prob, is_train=is_train)
d_memory = dropout(memory, keep_prob=keep_prob, is_train=is_train)
with tf.variable_scope("attention"):
inputs_ = tf.nn.relu(dense(d_inputs, hidden, use_bias=False, scope="inputs"))
memory_ = tf.nn.relu(dense(d_memory, hidden, use_bias=False, scope="memory"))
outputs = tf.matmul(inputs_, tf.transpose(memory_, [0, 2, 1])) / (hidden ** 0.5)
logits = tf.nn.softmax(outputs)
outputs = tf.matmul(logits, memory)
res = tf.concat([inputs, outputs], axis=-1)
with tf.variable_scope("gate"):
dim = res.get_shape().as_list()[-1]
d_res = dropout(res, keep_prob=keep_prob, is_train=is_train)
gate = tf.nn.sigmoid(dense(d_res, dim, use_bias=False))
return res * gate
def multi_conv1d(in_, filter_sizes, heights, padding, is_train=None, keep_prob=None, scope=None):
with tf.variable_scope(scope or "multi_conv1d"):
assert len(filter_sizes) == len(heights)
outs = []
for i, (filter_size, height) in enumerate(zip(filter_sizes, heights)):
if filter_size == 0:
continue
out = conv1d(in_, filter_size, height, padding, is_train=is_train, keep_prob=keep_prob,
scope="conv1d_{}".format(i))
outs.append(out)
concat_out = tf.concat(axis=2, values=outs)
return concat_out
def conv1d(in_, filter_size, height, padding, is_train=None, keep_prob=None, scope=None):
with tf.variable_scope(scope or "conv1d"):
num_channels = in_.get_shape()[-1]
filter_ = tf.get_variable("filter", shape=[1, height, num_channels, filter_size], dtype=tf.float32)
bias = tf.get_variable("bias", shape=[filter_size], dtype=tf.float32)
strides = [1, 1, 1, 1]
if is_train is not None and keep_prob is not None:
in_ = dropout(in_, keep_prob, is_train)
# [batch, max_len_sent, max_len_word / filter_stride, char output size]
xxc = tf.nn.conv2d(in_, filter_, strides, padding) + bias
out = tf.reduce_max(tf.nn.relu(xxc), axis=2) # max-pooling, [-1, max_len_sent, char output size]
return out
def highway_network(arg, num_layers, bias, bias_start=0.0, scope=None, keep_prob=None, is_train=None):
with tf.variable_scope(scope or "highway_network"):
prev = arg
cur = None
for layer_idx in range(num_layers):
cur = highway_layer(prev, bias, bias_start=bias_start, scope="layer_{}".format(layer_idx),
keep_prob=keep_prob, is_train=is_train)
prev = cur
return cur
def highway_layer(inputs, use_bias=True, bias_start=0.0, keep_prob=1.0, is_train=False, scope=None):
with tf.variable_scope(scope or "highway_layer"):
hidden = inputs.get_shape().as_list()[-1]
trans = tf.cond(tf.convert_to_tensor(is_train), lambda: tf.nn.dropout(inputs, keep_prob), lambda: inputs)
trans = tf.layers.dense(trans, units=hidden, use_bias=use_bias, bias_initializer=tf.constant_initializer(
bias_start), activation=None)
trans = tf.nn.relu(trans)
gate = tf.cond(tf.convert_to_tensor(is_train), lambda: tf.nn.dropout(inputs, keep_prob), lambda: inputs)
gate = tf.layers.dense(gate, units=hidden, use_bias=use_bias, bias_initializer=tf.constant_initializer(
bias_start), activation=None)
gate = tf.nn.sigmoid(gate)
outputs = gate * trans + (1 - gate) * inputs
return outputs
'''def highway_layer(arg, bias, bias_start=0.0, scope=None, keep_prob=None, is_train=None):
with tf.variable_scope(scope or "highway_layer"):
d = arg.get_shape()[-1]
trans = linear([arg], d, bias, bias_start=bias_start, scope='trans', keep_prob=keep_prob, is_train=is_train)
trans = tf.nn.relu(trans)
gate = linear([arg], d, bias, bias_start=bias_start, scope='gate', keep_prob=keep_prob, is_train=is_train)
gate = tf.nn.sigmoid(gate)
out = gate * trans + (1 - gate) * arg
return out
def linear(args, output_size, bias, bias_start=0.0, scope=None, squeeze=False, keep_prob=None, is_train=None):
if args is None or (nest.is_sequence(args) and not args):
raise ValueError("args must be specified")
if not nest.is_sequence(args):
args = [args]
flat_args = [flatten(arg, 1) for arg in args]
if keep_prob is not None and is_train is not None:
flat_args = [tf.cond(is_train, lambda: tf.nn.dropout(arg, keep_prob), lambda: arg) for arg in flat_args]
with tf.variable_scope(scope or 'linear'):
flat_out = _linear(flat_args, output_size, bias, bias_initializer=tf.constant_initializer(bias_start))
out = reconstruct(flat_out, args[0], 1)
if squeeze:
out = tf.squeeze(out, [len(args[0].get_shape().as_list()) - 1])
return out'''
def dropout(x, keep_prob, is_train, noise_shape=None, seed=None, name=None):
with tf.name_scope(name or "dropout"):
if keep_prob < 1.0:
out = tf.cond(is_train, lambda: tf.nn.dropout(x, keep_prob, noise_shape=noise_shape, seed=seed), lambda: x)
return out
return x
def dense(inputs, hidden, use_bias=True, activation=None, scope="dense"):
with tf.variable_scope(scope):
flat_inputs = flatten(inputs, keep=1)
w = tf.get_variable("weight", [inputs.get_shape().as_list()[-1], hidden], dtype=tf.float32)
res = tf.matmul(flat_inputs, w)
if use_bias:
b = tf.get_variable("bias", [hidden], initializer=tf.constant_initializer(0.))
res = tf.nn.bias_add(res, b)
if activation is not None:
res = activation(res)
res = reconstruct(res, ref=inputs, keep=1)
return res
def flatten(tensor, keep):
fixed_shape = tensor.get_shape().as_list()
start = len(fixed_shape) - keep
left = reduce(mul, [fixed_shape[i] or tf.shape(tensor)[i] for i in range(start)])
out_shape = [left] + [fixed_shape[i] or tf.shape(tensor)[i] for i in range(start, len(fixed_shape))]
flat = tf.reshape(tensor, out_shape)
return flat
def reconstruct(tensor, ref, keep, remove_shape=None):
ref_shape = ref.get_shape().as_list()
tensor_shape = tensor.get_shape().as_list()
ref_stop = len(ref_shape) - keep
tensor_start = len(tensor_shape) - keep
if remove_shape is not None:
tensor_start = tensor_start + remove_shape
pre_shape = [ref_shape[i] or tf.shape(ref)[i] for i in range(ref_stop)]
keep_shape = [tensor_shape[i] or tf.shape(tensor)[i] for i in range(tensor_start, len(tensor_shape))]
target_shape = pre_shape + keep_shape
out = tf.reshape(tensor, target_shape)
return out
def viterbi_decode(logits, trans_params, sequence_lengths, scope=None):
with tf.variable_scope(scope or 'viterbi_decode'):
viterbi_sequences = []
# iterate over the sentences due to no batching in viterbi_decode
for logit, sequence_length in zip(logits, sequence_lengths):
logit = logit[:sequence_length] # keep only the valid steps
viterbi_seq, viterbi_score = tf.contrib.crf.viterbi_decode(logit, trans_params)
viterbi_sequences += [viterbi_seq]
return viterbi_sequences
|
[
"isaac.changhau@gmail.com"
] |
isaac.changhau@gmail.com
|
08fc411947815fdeba423b9bee79371b167575f1
|
e114120099ad52f5801bceddac6f4394f0a7cb45
|
/core/admin.py
|
b68166055fd9fe328322c253cd46e80fb0e073f2
|
[] |
no_license
|
MtacDev/BuscadorPeliculas
|
c0160176a512d99bbd00bd92c3dad502dbb2396c
|
06dccd4df8e140764c2363a65fe6e9c96c08e6a5
|
refs/heads/main
| 2022-12-28T19:49:55.581288
| 2020-10-12T17:22:41
| 2020-10-12T17:22:41
| 303,459,883
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 123
|
py
|
from django.contrib import admin
from core.models import pelis
# Register your models here.
admin.site.register(pelis)
|
[
"noreply@github.com"
] |
MtacDev.noreply@github.com
|
6477542849ae0829feddbe93e42f5821bbb42737
|
208063a41d5d99f3f3b33321d90c39c9e4e5380f
|
/lib/imutils.py
|
a1436dc47ed3b6adea4db7c54a4bbe1bbe3c2325
|
[
"BSD-2-Clause",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
lsst-camera-dh/mutils
|
6b2d87094413eb3c6a7cf9b0e4f0187f8164f279
|
14286b262ce69201c610647bb0e302726288568a
|
refs/heads/master
| 2023-08-05T06:55:40.527502
| 2023-07-28T22:34:42
| 2023-07-28T22:34:42
| 252,312,670
| 0
| 1
|
NOASSERTION
| 2020-09-28T21:14:18
| 2020-04-01T23:54:48
|
Python
|
UTF-8
|
Python
| false
| false
| 50,023
|
py
|
"""
Common image utility functions
"""
import re
import sys
import logging
import datetime
import os.path
from astropy.io import fits
from astropy import stats
from astropy import wcs
from astropy.convolution import convolve, Gaussian1DKernel, interpolate_replace_nans
import numpy as np
import math
from scipy.ndimage import minimum_filter1d, median_filter, percentile_filter
from scipy import sparse
from scipy.sparse.linalg import spsolve
from scipy.interpolate import UnivariateSpline
def create_output_hdulist(hdulisti: fits.HDUList, argv: list) -> fits.HDUList:
"""
Create output HDUList from input HDUList for building new image
that is the result of processing the inputs (eg. not a blank).
The Primary header of the input HDUList is used to create the
Primary header of the output HDUList by appending to the bare
output HDUList.
DATE and an HISTORY header cards added to record what was done
This is generally the first step before subsequent ops to modify
data arrays and changing additional header keys.
"""
logging.debug("creating output hdulist")
# Create the output image, copy and update header comments, history
hdulisto = fits.HDUList(fits.PrimaryHDU(None, hdulisti[0].header))
hdu = hdulisto[0]
hdr = hdu.header
cstr = hdr.comments["DATE"] # copy comment
hdr.rename_keyword("DATE", "DATEORIG", force=True)
hdr.comments["DATEORIG"] = "Previous file date/time"
# FITS date format: 'yyyy-mm-ddTHH:MM:SS[.sss]'
dtstr = datetime.datetime.utcnow().isoformat(timespec="milliseconds")
hdr.insert("DATEORIG", ("DATE", dtstr, cstr))
# add HISTORY lines
hdr.add_history(
"Header written by {} at: {}".format(os.path.basename(argv[0]), dtstr)
)
hdr.add_history("CMD: {} {}".format(os.path.basename(argv[0]), " ".join(argv[1:])))
return hdulisto
def init_image_hdu(
hdui: fits.ImageHDU, hdulisto: fits.HDUList, region: tuple = None
) -> fits.ImageHDU:
"""
Append a new image HDU to output image using input HDU as a template.
Copy the header and set the size/region specs in preparation for data
to be added later.
Returns
-------
hduo: fits.ImageHDU That was created during the call.
"""
# create the output hdu from the master, (primary already exists)
if not isinstance(hdui, fits.PrimaryHDU):
hdri = hdui.header.copy()
hdulisto.append(fits.ImageHDU(None, hdri, hdri["EXTNAME"]))
hduo = hdulisto[len(hdulisto) - 1]
hdro = hduo.header
hdro["NAXIS"] = 2
hdro.set("NAXIS1", hdri["NAXIS1"], "size of the n'th axis", after="NAXIS")
hdro.set("NAXIS2", hdri["NAXIS2"], "size of the n'th axis", after="NAXIS1")
hdro["BITPIX"] = -32
# make changes to account for region of interest subimage
if region and region != (None, None):
logging.debug("region = {}".format(region))
naxis2 = (region[0].stop or len(hdui.data[:, 0])) - (region[0].start or 0)
naxis1 = (region[1].stop or len(hdui.data[0, :])) - (region[1].start or 0)
hdro.set("NAXIS1", naxis1, "size of the n'th axis", after="NAXIS")
hdro.set("NAXIS2", naxis2, "size of the n'th axis", after="NAXIS1")
# update any wcses
wcses = wcs.find_all_wcs(hdro, fix=False)
for w in wcses:
wreg = w.slice(region)
wreghdro = wreg.to_header()
for card in wreghdro.cards:
key = card.keyword
value = card.value
comment = card.comment
hdro.set(key, value, comment)
# logging.debug('output header:\n%s\n', hdro.tostring())
return hduo
def parse_region(reg: str) -> tuple:
"""
Return a pair of slices (slice1, slice2) corresponding
to the region give as input in numpy slice string format
If the region can't be parsed sys.exit() is called
"""
try:
slices = str_to_slices(reg)
except ValueError as ve:
logging.error("ValueError: %s", ve)
logging.error("Bad region spec: %s", reg)
sys.exit(1)
if len(slices) != 2:
logging.error("Bad region spec: %s", reg)
sys.exit(1)
return slices
def parse_iraf_region(reg: str) -> tuple:
"""
Return a pair of slices (slice1, slice2) corresponding
to the region give as input in ~IRAF format
If the region can't be parsed (None, None) is returned
"""
# peel off any outer brackets
reg = re.sub(r"^\[([^\]]*)\]$", r"\1", reg)
#
# reg = [x1:x2,y1:y2] -- standard rectangle)
if re.match(r"([0-9]*):([0-9]+),\s*([0-9]+):([0-9]+)$", reg):
(x1, x2, y1, y2) = re.match(
r"([0-9]+):([0-9]+),\s*([0-9]+):([0-9]+)$", reg
).groups()
retval = (slice(int(y1) - 1, int(y2)), slice(int(x1) - 1, int(x2)))
#
# reg = [x0,y1:y2] -- single column section)
elif re.match(r"([0-9]+),\s*([0-9]+):([0-9]+)$", reg):
(x0, y1, y2) = re.match(r"([0-9]+),\s*([0-9]+):([0-9]+)$", reg).groups()
retval = (slice(int(y1) - 1, int(y2)), slice(int(x0) - 1))
#
# reg = [*,y1:y2]) -- row selection
elif re.match(r"(\*),\s*([0-9]+):([0-9]+)$", reg):
(x, y1, y2) = re.match(r"(\*),\s*([0-9]+):([0-9]+)$", reg).groups()
retval = (slice(int(y1) - 1, int(y2)), slice(None, None))
#
# reg = [x1:*,y1:y2]) -- row selection w/cols to end
elif re.match(r"([0-9]+):\s*(\*),\s*([0-9]+):([0-9]+)$", reg):
(x1, x2, y1, y2) = re.match(
r"([0-9]+):\s*(\*),\s*([0-9]+):([0-9]+)$", reg
).groups()
retval = (slice(int(y1) - 1, int(y2)), slice(int(x1) - 1, None))
#
# reg = [*:x1,y1:y2]) -- row selection w/cols from beginning
elif re.match(r"(\*):\s*([0-9]+),\s*([0-9]+):([0-9]+)$", reg):
(x1, x2, y1, y2) = re.match(
r"(\*):\s*([0-9]+),\s*([0-9]+):([0-9]+)$", reg
).groups()
retval = (slice(int(y1) - 1, int(y2)), slice(None, int(x2) - 1))
#
# reg = [x0,y0] -- single pixel
elif re.match(r"([0-9]+),\s*([0-9]+)$", reg):
(x0, y0) = re.match(r"([0-9]+),\s*([0-9]+)$", reg).groups()
retval = (slice(int(y0)), slice(int(x0)))
#
# reg = [x1:x2,y0] -- single row section
elif re.match(r"([0-9]+):([0-9]+),\s*([0-9]+)$", reg):
(x1, x2, y0) = re.match(r"([0-9]+):([0-9]+),\s*([0-9]+)$", reg).groups()
retval = (slice(int(y0) - 1), slice(int(x1) - 1, int(x2)))
#
# reg = [x1:x2,*] -- column selection
elif re.match(r"([0-9]+):([0-9]+),\s*(\*)$", reg):
(x1, x2, y) = re.match(r"([0-9]+):([0-9]+),\s*(\*)$", reg).groups()
retval = (slice(None, None), slice(int(x1) - 1, int(x2)))
#
# reg = [*,*] # redundant, for completeness)
elif re.match(r"(\*),\s*(\*)$", reg):
(x, y) = re.match(r"(\*),\s*(\*)$", reg).groups()
retval = (slice(None, None), slice(None, None))
#
# no match found, bad spec
else:
logging.error("bad region spec: '%s' no match produced", reg)
retval = (None, None)
#
return retval
def get_requested_hduids(
hdulist: fits.HDUList, hdunames: list, hduindices: list
) -> list:
"""
Return a list of image hduids requested in optlist or all by default.
Check that they exist in hdulist. Requested hduids that
don't exist are skipped. Redundant values are dropped.
"""
logging.debug("get_requested_hduids() called")
hduids = [] # list of candidate hduids
for name in hdunames or []:
for hdu in hdulist:
if re.search(name, hdu.name):
try:
hduid = hdulist.index_of(hdu.name)
if hduid not in hduids:
hduids.append(hduid)
except KeyError as ke:
logging.error("KeyError: %s", ke)
logging.error("HDU[%s] not found, skipping", hdu.name)
for hduid in hduindices or []:
try:
hdu = hdulist[hduid]
if hduid not in hduids:
hduids.append(hduid)
except IndexError:
logging.error("HDU[%d] not found, skipping", hduid)
if not hduindices and not hdunames:
for hdu in hdulist:
hduids.append(hdulist.index(hdu))
if hduids:
return hduids
return None
def get_requested_image_hduids(
hdulist: fits.HDUList, hdunames: list, hduindices: list
) -> list:
"""
Return a list of image hduids requested in hdunames or all by default.
Check that they exist in hdulist and have data. Requested hduids that
don't exist are skipped. Redundant values are dropped.
"""
logging.debug("get_requested_hduids() called")
chduids = [] # list of candidate hduids
for name in hdunames or []:
for hdu in hdulist:
if re.search(name, hdu.name):
try:
hduid = hdulist.index_of(hdu.name)
if hduid not in chduids:
chduids.append(hduid)
except KeyError as ke:
logging.error("KeyError: %s", ke)
logging.error("HDU[%s] not found, skipping", hdu.name)
for hduid in hduindices or []:
try:
hdu = hdulist[hduid]
if hduid not in chduids:
chduids.append(hduid)
except IndexError:
logging.error("HDU[%d] not found, skipping", hduid)
if not hduindices and not hdunames:
for hdu in hdulist:
chduids.append(hdulist.index(hdu))
# Validate the list of candidate HDUs, keep those with pixels
hduids = []
for hduid in chduids:
hdu = hdulist[hduid]
if isinstance(hdu, fits.PrimaryHDU): # check for data
hdr = hdu.header
if hdr.get("NAXIS") == 2:
if hdr.get("NAXIS1") and hdr.get("NAXIS2"):
naxis1 = hdr.get("NAXIS1")
naxis2 = hdr.get("NAXIS2")
if naxis1 * naxis2 > 0:
logging.debug(
"adding %s with index %d to hduid list", hdu.name, hduid
)
hduids.append(hduid)
elif isinstance(hdu, (fits.ImageHDU, fits.CompImageHDU)):
logging.debug("adding %s with index %d to hduid list", hdu.name, hduid)
hduids.append(hduid)
else:
logging.debug(
"%s with index %d is not type (Comp)ImageHDU", hdu.name, hduid
)
if hduids:
return hduids
return None
def get_data_oscan_slices(hdu: fits.FitsHDU) -> tuple:
"""
Get datasec, serial/parallel overscan as slice specifications.
Also double overscan (and later underscan?)
Given an hdu, uses header keys to infer slice specs. If a particular
region cannot be obtained a spec of (None, None) is returned for that
region.
Returns a tuple of slice definitions (datasec, soscan, poscan).
The serial overscan is assumed to be at the end of each row if present.
"""
# first get serial and parallel overscan region defs
hdr = hdu.header
try:
dstr = hdr["DATASEC"]
except KeyError as ke:
logging.debug("KeyError: %s required", ke)
return (None, None, None)
logging.debug("EXTNAME=%s DATASEC=%s", hdr.get("EXTNAME"), dstr)
try:
n1 = hdr["NAXIS1"]
except KeyError as ke:
logging.error("KeyError: %s required", ke)
return (None, None, None)
try:
n2 = hdr["NAXIS2"]
except KeyError as ke:
logging.error("KeyError: %s required", ke)
return (None, None, None)
# get DATASEC region
datasec = parse_iraf_region(dstr)
if datasec == (None, None):
return (None, None, None)
(p1, p2) = (datasec[0].start or 0, datasec[0].stop or len(hdu.data[:, 0]))
(s1, s2) = (datasec[1].start or 0, datasec[1].stop or len(hdu.data[0, :]))
if n1 > s2:
soscan = (slice(0, n2), slice(s2, n1))
else: # no serial overscan
soscan = (slice(None), slice(None))
if n2 > p2:
poscan = (slice(p2, n2), slice(0, n1))
else:
poscan = (slice(None), slice(None))
doscan = (poscan[0], soscan[1])
return (datasec, soscan, poscan, doscan)
def str_to_slices(sliceStr: str) -> tuple:
"""
Parse a string containing one or more slice specs separated by commas
Returns a tuple of slice() objects
rewrite of:
https://stackoverflow.com/questions/43089907/
using-a-string-to-define-numpy-array-slice
to make it straightforward albeit not nearly as elegant
"""
# peel off any outer brackets
sliceStr = re.sub(r"^\[([^\]]*)\]$", r"\1", sliceStr)
slices = []
for sspec in sliceStr.split(","):
if ":" not in sspec:
slice_args = [int(sspec), int(sspec) + 1]
slices.append(slice(*tuple(slice_args)))
else:
slice_args = []
for item in sspec.strip().split(":"):
if item:
slice_args.append(int(item))
else:
slice_args.append(None)
slices.append(slice(*tuple(slice_args)))
return tuple(slices)
def subtract_bias(stype: str, ptype: str, hdu: fits.ImageHDU, bad_segs: list = None):
"""
Subtract a bias estimate (using overscans) from an hdu.
Operates in-place on the Image.HDU parameter
Choices are 'None', 'mean' 'median', 'by(row|col)', 'by(row|col)filter' and
'by(row|col)smooth', and stype='dbloscan'
Bias estimates are calculated using DATASEC to infer the overscan regions.
bad_segs are determined for bycolfilter choice if None or can be passed in as a
special case (used by xtalk)
The fits.ImageHDU is operated on directly
"""
(datasec, soscan, poscan, doscan) = get_data_oscan_slices(hdu)
logging.debug("bias stype=%s ptype=%s", stype, ptype)
pcnt = 30.0 # percentile for signal est
max_rn = 7.0
rn_est = min(np.std(hdu.data[poscan[0], soscan[1]]), max_rn)
# serial overscan pass
if stype:
if stype in {"byrow", "byrowsmooth"}:
so_med = np.percentile(hdu.data[soscan][:, 5:], 50, axis=1)
so_c14 = np.max(hdu.data[soscan][:, 1:4], axis=1)
# clean up any crazy rows (eg overflow from hot column, serial saturation)
so_med_med = np.median(so_med)
so_med_bad_ind = np.nonzero(so_c14 - so_med_med > 100 * rn_est)
logging.debug("anomalous soscan rows: %s", so_med_bad_ind)
if np.size(so_med_bad_ind):
so_med[so_med_bad_ind] = np.nan
# optionally smooth the 1-d array to be subtracted
if stype == "byrowsmooth":
logging.debug("smoothing serial overscan with Gaussian1DKernel")
kernel = Gaussian1DKernel(1)
so_med = convolve(so_med, kernel, boundary="extend")
so_med[np.isnan(so_med)] = so_med_med # bad rows use median of others
logging.debug("mean serial overscan subtraction: %d", np.median(so_med))
logging.debug("first 20 rows: \n%s", so_med[0:20])
# convert shape from (n,) to (n, 1)
so_med = so_med.reshape(np.shape(so_med)[0], 1)
hdu.data = hdu.data - so_med
elif stype == "mean":
hdu.data = hdu.data - np.mean(hdu.data[soscan][:, 5:])
elif stype == "median":
hdu.data = hdu.data - np.median(hdu.data[soscan][:, 5:])
# elif stype == "dbloscan":
elif re.match(r"^dbl", stype):
logging.debug(
"dbloscan = np.median(hdu.data[%d:, %d:])",
poscan[0].start,
soscan[1].start,
)
hdu.data = hdu.data - np.median(hdu.data[doscan])
# hdu.data[poscan[0].start :, soscan[1].start :])
# elif stype[0] == "colspec":
# logging.debug(f"hdu.data[:, {str_to_slices(stype[1])[0]}]")
# hdu.data = hdu.data - np.median(hdu.data[:, str_to_slices(stype[1])[0]])
elif re.match(r"^no", stype):
pass
else:
logging.error("stype: %s not valid", stype)
sys.exit(1)
# parallel overscan pass
if ptype:
if ptype in {"bycol", "bycolfilter", "bycolsmooth"}:
if ptype == "bycol":
# bias_row = np.percentile(hdu.data[poscan[0], :], pcnt, axis=0)
ravg, bias_row, rstd = stats.sigma_clipped_stats(
hdu.data[poscan[0], :], axis=0
)
logging.debug(
"bias_row = stats.sigma_clipped_stats(hdu.data[%d:, :], %.1f, axis=0)",
poscan[0].start,
)
# "bias_row = np.percentile(hdu.data[%d:, :], %.1f, axis=0)",
elif ptype in {"bycolfilter", "bycolsmooth"}:
bias_row = get_bias_filtered_est_row(hdu, bad_segs)
if bias_row is None:
logging.warning(
"%s: saturated: could not perform parallel bias subtraction",
hdu.header.get("EXTNAME"),
)
return
if ptype == "bycolsmooth":
logging.debug("smoothing par overscan with Gaussian1DKernel")
kernel = Gaussian1DKernel(2)
# don't smooth the prescan
bias_row[datasec[1].start :] = convolve(
bias_row[datasec[1].start :], kernel, boundary="extend"
)
# convert shape from (,n) to (1, n)
bias_row = bias_row.reshape(1, np.shape(bias_row)[0])
hdu.data = hdu.data - bias_row.data
logging.debug("bias_row_median = %.2f", np.median(bias_row.data))
elif ptype == "mean":
hdu.data = hdu.data - np.mean(hdu.data[poscan])
elif ptype == "median":
hdu.data = hdu.data - np.median(hdu.data[poscan])
elif re.match(r"^no", ptype):
pass
else:
logging.error("ptype: %s not valid", ptype)
sys.exit(1)
def eper_serial(hdu):
"""
Given datasec and serial overscan as slices, calculate
eper using the first ecols=3 columns of serial overscan
"""
(datasec, soscan, poscan, doscan) = get_data_oscan_slices(hdu)
ecols = 3 # number of columns used for eper signal
pcnt = 30.0 # percentile for signal est
ncols = datasec[1].stop - datasec[1].start
scols = int(0.10 * ncols)
# signal estimate 1-d array (30% is ~sky)
ravg, sig_est_col, rstd = stats.sigma_clipped_stats(
hdu.data[datasec[0], (datasec[1].stop - scols) : datasec[1].stop], axis=1
)
# sig_est_col = np.percentile(
# hdu.data[datasec[0], (datasec[1].stop - scols) : datasec[1].stop], pcnt, axis=1
# )
# deferred charge estimate (before bias subtraction)
dc_sum_col = np.sum(
hdu.data[datasec[0], soscan[1].start : (soscan[1].start + ecols)], axis=1
)
bias_est_col = np.median(hdu.data[datasec[0], (soscan[1].start + ecols) :], axis=1)
sig_est_col = sig_est_col - bias_est_col
dc_est_col = dc_sum_col - ecols * bias_est_col
dc_avg, dc_med, dc_std = stats.sigma_clipped_stats(dc_est_col)
sig_avg, sig_med, sig_std = stats.sigma_clipped_stats(sig_est_col)
if dc_avg > 0 and sig_avg > 0:
cti_est = dc_avg / sig_avg / ncols
else:
cti_est = -1.0
if cti_est > -0.0001:
eper = 1 - cti_est
return eper
else:
logging.debug("s-cti est was < 0")
return None
def get_union_of_bad_column_segs(hdulist: fits.HDUList):
""" """
shape = None
segs = []
for hdu in hdulist:
# determine type of HDU
if isinstance(hdu, fits.PrimaryHDU): # check for data
if hdu.header.get("NAXIS") != 2:
continue
elif isinstance(hdu, (fits.ImageHDU, fits.CompImageHDU)):
if np.size(hdu.data) == 0:
logging.error("fits.*ImageHDU type must have np.size(data) != 0")
continue
else:
continue
# get pixel data info for hdu if exists
if not shape:
shape = np.shape(hdu.data)
if shape != np.shape(hdu.data):
logging.error(
"fits.*ImageHDU all must have same shape: %s != %s",
np.shape(hdu.data),
shape,
)
return None
new_segs = get_bad_column_segs(hdu)
if new_segs is None:
logging.warning(
"%s: too saturated, could not determine bad columns",
hdu.header.get("EXTNAME"),
)
elif len(new_segs):
logging.debug("before extending segs=%s", segs)
segs.extend(new_segs)
logging.debug("after extending segs=%s", segs)
else:
logging.debug("no bad segments found in %s", hdu.header.get("EXTNAME"))
# merge if within merge_distance
segs.sort()
seg_merge_dist = 8
i = 1
while i < len(segs):
if segs[i - 1][1] + seg_merge_dist > segs[i][0]:
segs[i][0] = segs[i - 1][0] # expand lower edge of upper segment
if segs[i][1] < segs[i - 1][1]:
segs[i][1] = segs[i - 1][1]
del segs[i - 1] # delete the lower segment
else:
i += 1 # move on
logging.debug(f"after merging segs={segs}")
segs.sort()
return segs
def get_disjoint_segments(indices: np.array) -> list:
"""
input indices np.array is expected to be sorted
"""
# get disjoint consecutive segments as [seg0, ...] where segj=[startcol, endcol]
# logging.debug("given indices=%s", indices)
segs = []
if np.size(indices):
seg_start = seg_stop = idx_last = indices[0]
for idx in indices[1:]: # start on second element
if idx == idx_last + 1: # advance the segment
seg_stop = idx_last = idx
else: # append and start a new seg
segs.append([seg_start, seg_stop])
seg_start = seg_stop = idx_last = idx
segs.append([seg_start, seg_stop])
# logging.debug("found segs=%s", segs)
return segs
def merge_segments(segs: list, merge_distance: int = 8) -> list:
"""merge segments [start, stop], if within merge_distance"""
i = 1
while i < len(segs):
if segs[i - 1][1] + merge_distance > segs[i][0]:
segs[i][0] = segs[i - 1][0] # expand lower edge of upper segment
if segs[i][1] < segs[i - 1][1]:
segs[i][1] = segs[i - 1][1]
del segs[i - 1] # delete the lower segment
else:
i += 1 # move on
logging.debug("after merge: segs=%s", segs)
return segs
def get_bad_column_segs(hdu):
"""
Given hdu, produce an list of ordered pairs [a,b] where columns
a through b inclusive are "bad" as in hot/saturated
The search is based on the parallel overscan.
An effort is made to deal with global saturation until it gets too high.
"""
logging.debug("get_bad_column_segs(): entry")
# define basic regions
(datasec, soscan, poscan, doscan) = get_data_oscan_slices(hdu)
pstart = poscan[0].start
pstop = poscan[0].stop
# parameters
max_rn = 7.0 # ceiling for read-noise estimate
window_size = 11 # window for forming baseline estimate
sat_col_thresh = 80 # thresh for saturated cols (units are read-noise)
base_delta_thresh = 2.0 # units of rn for return to baseline
base_delta_cnt = 2
pcnt = 20 # percentile for base_row used in comparison
erows = int((pstop - pstart) / 6.0) # skipped before baseline calc
seg_merge_dist = 8
rn_est = min(np.std(hdu.data[poscan[0], soscan[1]]), max_rn)
bias_floor = np.percentile(hdu.data[poscan[0], soscan[1]], 30)
sat_col_thresh = sat_col_thresh * rn_est # thresh for major sat cols
base_delta_thresh = base_delta_thresh * rn_est # thresh for shoulders
#
logging.debug(f"bias_floor={bias_floor}")
logging.debug(f"rn_est={rn_est:.2f}")
logging.debug(f"sat_col_thresh={sat_col_thresh:.2f}")
logging.debug(f"base_delta_thresh={base_delta_thresh:.2f}")
offset = erows
retries = int((pstop - pstart) / offset) - 1 # shift and try again limit
while retries > 0:
# skips first few rows to avoid cti deferred signal -- matters at high sig
test_row = np.percentile(
hdu.data[pstart + offset :, datasec[1]],
(100.0 - pcnt),
axis=0,
)
# tail end of parallel overscan to use for base level
base_row = np.percentile(hdu.data[pstart + offset :, datasec[1]], pcnt, axis=0)
base_row = minimum_filter1d(base_row, window_size, mode="nearest")
# get the high values in cores of hot/sat column groups
bad_ind = np.array(np.nonzero(test_row > (bias_floor + sat_col_thresh))[0])
if np.size(bad_ind) == 0:
return []
# find segments
segs = get_disjoint_segments(bad_ind)
# expand segments until baseline is reached
for seg in segs:
logging.debug("initial segment=[%s, %s]", seg[0], seg[1])
# work the low side
thresh_cnt = 0
while seg[0] > 0 and thresh_cnt < base_delta_cnt:
if (test_row[seg[0] - 1] - base_row[seg[0] - 1]) < base_delta_thresh:
thresh_cnt += 1
seg[0] -= 1
# work the high side
thresh_cnt = 0
while (
seg[1] + 1 < datasec[1].stop - datasec[1].start
and thresh_cnt < base_delta_cnt
):
if (test_row[seg[1] + 1] - base_row[seg[1] + 1]) < base_delta_thresh:
thresh_cnt += 1
seg[1] += 1
logging.debug("expanded segment=[%s, %s]", seg[0], seg[1])
# merge segments that are close (8) to each other
segs = merge_segments(segs, seg_merge_dist)
segsum = sum([seg[1] - seg[0] for seg in segs])
logging.debug("segsum=%d", segsum)
if sum([seg[1] - seg[0] for seg in segs]) > int(np.size(base_row) / 2):
# this is likely saturation of whole hdu and not hot columns
offset += erows
retries -= 1
if retries > 0:
logging.debug("may be saturated: retrying with offset=%d", offset)
else:
return None
else:
break
origin = datasec[1].start
for seg in segs:
seg[0] += origin
seg[1] += origin
logging.debug("final segs=%s", segs)
logging.debug("get_bad_column_segs(): exit")
return segs
def indices_to_segs(ind_arr: np.array):
""" """
logging.debug("indices_to_segs() entry")
seg_merge_dist = 8
# get disjoint consecutive segments as seg=[startcol, endcol]
logging.debug("ind_arr=%s", ind_arr)
segs = []
arr = np.sort(ind_arr)
seg_start = seg_stop = idx_last = arr[0]
for idx in arr[1:]: # start on second element
if idx == idx_last + 1: # advance the segment
seg_stop = idx_last = idx
else: # append and start a new seg
segs.append([seg_start, seg_stop])
seg_start = seg_stop = idx_last = idx
segs.append([seg_start, seg_stop])
logging.debug("initial segs=%s", segs)
# merge if within merge_distance
i = 1
while i < len(segs):
if segs[i - 1][1] + seg_merge_dist > segs[i][0]:
segs[i][0] = segs[i - 1][0] # expand lower edge of upper segment
if segs[i][1] < segs[i - 1][1]:
segs[i][1] = segs[i - 1][1]
del segs[i - 1] # delete the lower segment
else:
i += 1 # move on
segs.sort()
logging.debug("after merge: segs=%s", segs)
logging.debug("indices_to_segs() exit")
return segs
def get_bias_filtered_est_row(hdu, bad_segs=None):
"""
Given hdu, produce a suitable parallel bias estimate for bycol subtraction
The filtered row attempts to interpolate across regions with bad/hot columns
"""
(datasec, soscan, poscan, doscan) = get_data_oscan_slices(hdu)
pcnt = 50.0 # targets p-oscan matching double overscan in final rows
offset = int((poscan[0].stop - poscan[0].start) / 2.0)
bias_est_row = np.percentile(hdu.data[poscan[0].start + offset :, :], pcnt, axis=0)
if not bad_segs:
logging.debug("get_bias_filtered_est_row->get_bad_column_segs()")
bad_segs = get_bad_column_segs(hdu) # sorted list of disjoint segments
logging.debug("bad_segs=%s", bad_segs)
# if bad_segs is None:
# return None
max_length = 0
tot_length = 0
if len(bad_segs):
for seg in bad_segs:
length = seg[1] - seg[0] + 1
tot_length += length
if length > max_length:
max_length = length
if tot_length > 0.5 * np.size(bias_est_row[datasec[1]]):
return None
for seg in bad_segs:
ll = max(datasec[0].start, seg[0] - 10)
ul = min(datasec[0].stop, seg[1] + 11)
lval = np.median(bias_est_row[ll : seg[0]])
rval = np.median(bias_est_row[seg[1] : ul])
segsz = seg[1] - seg[0]
for x in range(seg[0], seg[1]):
bias_est_row[x] = (
lval * (seg[1] - x) / segsz + rval * (x - seg[0]) / segsz
)
# match datasec bias level to double overscan near last rows
bias_match_level = np.percentile(
hdu.data[poscan[0].start + offset :, soscan[1]], pcnt
)
bias_est_level = np.percentile(bias_est_row[soscan[1].start :], pcnt)
bias_est_row -= bias_est_level - bias_match_level
return bias_est_row
def get_bias_filtered_est_row_test(hdu, bad_segs=None):
"""
Given hdu, produce a suitable parallel bias estimate for bycol subtraction
The filtered row attempts to interpolate across regions with bad/hot columns
"""
(datasec, soscan, poscan, doscan) = get_data_oscan_slices(hdu)
pcnt = 30.0 # targets p-oscan matching double overscan in final rows
offset = int((poscan[0].stop - poscan[0].start) / 2.0)
bias_est_row = np.percentile(hdu.data[poscan[0].start + offset :, :], pcnt, axis=0)
new_est_row = baseline_als_optimized(bias_est_row, 105, 0.1, niter=10)
return new_est_row
def baseline_als_optimized(y, lam, p, niter=10):
L = len(y)
D = sparse.diags([1, -2, 1], [0, -1, -2], shape=(L, L - 2))
D = lam * D.dot(
D.transpose()
) # Precompute this term since it does not depend on `w`
w = np.ones(L)
W = sparse.spdiags(w, 0, L, L)
for i in range(niter):
W.setdiag(w) # Do not create a new matrix, just update diagonal values
Z = W + D
z = spsolve(Z, w * y)
w = p * (y > z) + (1 - p) * (y < z)
return z
def get_bad_columns(hdu):
"""
Given hdu, produce an array containing column indices for bad/hot columns
based on the parallel overscan.
An effort is made to deal with saturation until it gets too high.
"""
# define basic regions
(datasec, soscan, poscan, doscan) = get_data_oscan_slices(hdu)
pstart = poscan[0].start
pstop = poscan[0].stop
# parameters
max_rn = 7.0 # ceiling for read-noise estimate
window_size = 7 # window for forming baseline estimate
sat_col_thresh = 80 # thresh for saturated cols (units are read-noise)
base_delta_thresh = 8 # thresh for detecting hot cols in shoulder regions
nearest_nbr_cnt = 2 # number of nearest neighbors to add to columns
seg_merge_dist = 8 # threshold for merging groups of hot columns
pcnt = 30 # percentile for base_row used in comparison
erows = int((pstop - pstart) / 6.0)
rn_est = min(np.std(hdu.data[poscan[0], soscan[1]]), max_rn)
bias_floor = np.percentile(hdu.data[poscan[0], soscan[1]], 30)
sat_col_thresh = sat_col_thresh * rn_est # thresh for major sat cols
base_delta_thresh = base_delta_thresh * rn_est # thresh for shoulders
#
logging.debug(f"bias_floor={bias_floor}")
logging.debug(f"rn_est={rn_est:.2f}")
logging.debug(f"sat_col_thresh={sat_col_thresh:.2f}")
logging.debug(f"base_delta_thresh={base_delta_thresh:.2f}")
offset = erows
retries = int((pstop - pstart) / offset) - 1
while retries > 0:
# skips first few rows to avoid cti deferred signal -- matters at high sig
test_row = np.percentile(
hdu.data[pstart + offset :, datasec[1]],
(100.0 - pcnt),
axis=0,
)
# tail end of parallel overscan to use for base level
base_row = np.percentile(hdu.data[pstart + offset :, datasec[1]], pcnt, axis=0)
base_row = minimum_filter1d(base_row, window_size, mode="nearest")
# get the high values in cores of hot/sat column groups
bad_ind0 = np.array(np.nonzero(test_row > (bias_floor + sat_col_thresh)))
# get the shoulders and small sat columns
bad_ind1 = np.array(np.nonzero(test_row > (base_row + base_delta_thresh)))
bad_ind = np.union1d(bad_ind0, bad_ind1)
logging.debug(f"np.size(bad_ind0)={np.size(bad_ind0)}")
logging.debug(f"np.size(bad_ind1)={np.size(bad_ind1)}")
logging.debug(f"np.size(bad_ind)={np.size(bad_ind)}")
if np.size(bad_ind) == 0:
return None
elif np.size(bad_ind1) > int(np.size(base_row) / 2):
# this is saturation of whole hdu and not hot columns
if np.size(bad_ind0) == 0:
return None
elif np.size(bad_ind0) < int(np.size(base_row) / 2):
bad_ind = bad_ind0 # ignore bad_ind1
break
else: # skip more rows and try again
offset += erows
retries -= 1
if retries > 0:
logging.debug(f"retrying with offset={offset}")
else:
retries = 0
# puff up the bad indices by including {nearest_nbr_cnt} neighbors
for i in range(0, nearest_nbr_cnt):
bad_ind = np.union1d(np.union1d(bad_ind - 1, bad_ind), bad_ind + 1)
logging.debug(f"bad_ind={bad_ind + datasec[1].start}")
# get disjoint consecutive segments as seg=[startcol, endcol]
segs = []
seg_start = seg_stop = idx_last = bad_ind[0]
for idx in bad_ind[1:]: # start on second element
if idx == idx_last + 1: # advance the segment
seg_stop = idx_last = idx
else: # append and start a new seg
segs.append([seg_start, seg_stop])
seg_start = idx_last = idx
segs.append([seg_start, seg_stop])
logging.debug(f"segs={segs}")
# merge if within merge_distance
i = 1
while i < len(segs):
if segs[i - 1][1] + seg_merge_dist > segs[i][0]:
segs[i][0] = segs[i - 1][0] # expand lower edge of upper segment
del segs[i - 1] # delete the lower segment
else:
i += 1 # move on
logging.debug(f"segs={segs}")
new_bad_ind = []
segs.sort()
for seg in segs:
for idx in range(seg[0], seg[1]):
new_bad_ind.append(idx)
bad_ind = np.array(new_bad_ind)
if np.size(bad_ind):
# trim the ends
bad_ind = np.intersect1d(np.arange(datasec[1].stop - datasec[1].start), bad_ind)
logging.debug(f"bad_ind={bad_ind + datasec[1].start}")
return bad_ind + datasec[1].start
def eper_parallel(hdu):
"""
Given hdu, calculate eper using parallel overscan
Note once eper <~ 0.998 accuracy is reduced although effort is made
to deal with saturation extending into the parallel overscan
"""
(datasec, soscan, poscan, doscan) = get_data_oscan_slices(hdu)
# need a return None if any of those are missing
erows = 8 # number of rows used to measure deferred charge
nrows = datasec[0].stop - datasec[0].start
srows = int(0.05 * nrows)
pstart = poscan[0].start
pstop = poscan[0].stop
prows = pstop - pstart
if prows < 2 * erows:
logging.warning("parallel overscan too small to estimate cte")
return None
# bias floor and read noise estimate using double overscan region
bias_floor = np.percentile(hdu.data[poscan[0], soscan[1]], 30)
logging.debug("bias_floor = %.2f", bias_floor)
read_noise_est = min(np.std(hdu.data[poscan[0], soscan[1]]), 7.0)
logging.debug("read_noise_est = %.2f", read_noise_est)
good_ind = np.array(np.arange(datasec[1].stop - datasec[1].start))
bad_ind = get_bad_columns(hdu) # sorted array of column indices
if isinstance(bad_ind, np.ndarray) and np.size(bad_ind):
bad_ind -= datasec[1].start # account for offset
good_ind = np.setdiff1d(good_ind, bad_ind)
logging.debug("%d cols had usable signal in eper_parallel", np.size(good_ind))
if np.size(good_ind) < 0.5 * (datasec[1].stop - datasec[1].start):
logging.debug("not enough good columns to determine p-cte")
return None
# signal estimate 1-d array (use last 5% of rows)
sig_est_row = np.median(
hdu.data[datasec[0].stop - srows : datasec[0].stop, datasec[1]], axis=0
)
sig_est0 = np.percentile(sig_est_row, 20) - bias_floor # estimate
logging.debug("sig_est0 = %.2f", sig_est0)
# get column indices to use in determining p-cti
if sig_est0 > int(1 << 14) * read_noise_est: # assuming ~16k dynamic range
logging.debug("using high signal case")
# deferred charge estimate
dc_est_row = np.sum(
hdu.data[pstart : pstop - erows, datasec[1]], axis=0
) - bias_floor * (pstop - erows - pstart)
sig_est_row -= bias_floor
else: # unsaturated case
bias_est_row = np.percentile(hdu.data[pstart + erows :, datasec[1]], 50, axis=0)
# deferred charge estimate
dc_est_row = (
np.sum(hdu.data[pstart : pstart + erows, datasec[1]], axis=0)
- bias_est_row * erows
)
# signal estimate 1-d array (use last 5% of rows)
sig_est_row -= -bias_est_row
dc_est = np.sum(dc_est_row[good_ind])
sig_est = np.sum(sig_est_row[good_ind])
logging.debug("dc_est = %.2f sig_est = %.2f nrows = %d", dc_est, sig_est, nrows)
if sig_est > 0:
cti_est = dc_est / sig_est / nrows
else:
cti_est = -1.0
logging.debug("cti_est = %.6f", cti_est)
if cti_est > -0.0001:
eper = 1 - cti_est
return eper
else:
logging.warning("p-cti est was < 0")
return None
def files_to_hdulists(ifiles: list, mmp: bool = True) -> list:
"""
Given a list of image files return a list of fits.HDUList objects
that are verified as commensurate for processing as a set (combining etc.)
The mmp input flag defaults to True to enable memory mapping being used.
If there are many large files then calling with mmp = False and
processing by sectioning is another choice.
"""
# set up the items used to verify file match each other
#
list_of_hdulists = []
for cnt, ffile in enumerate(ifiles):
try:
hdulist = fits.open(ffile, memmap=mmp)
except IOError as ioerr:
logging.error("IOError: %s", ioerr)
sys.exit(1)
# compare selected parameters per hdu per file
hdu_pars = [] # list of dict()s
for hdu in hdulist:
hdr = hdu.header
hdudict = dict()
# determine type of HDU
if isinstance(hdu, fits.PrimaryHDU): # check for data
hdudict["type"] = "PrimaryHDU"
elif isinstance(hdu, (fits.ImageHDU, fits.CompImageHDU)):
hdudict["type"] = "ImageHDU"
else:
hdudict["type"] = "other"
# get pixel data info for hdu if exists
hdudict["dimension"] = (None, None)
if hdudict["type"] in ("ImageHDU", "PrimaryHDU"):
if hdr.get("NAXIS") == 2:
if hdr.get("NAXIS1") and hdr.get("NAXIS2"):
naxis1 = hdr.get("NAXIS1")
naxis2 = hdr.get("NAXIS2")
if naxis1 * naxis2 > 0:
hdudict["dimension"] = (naxis1, naxis2)
hdu_pars.append(hdudict)
# end of loop overy hdus within file
if cnt == 0: # first file defines the valid parameters
base_pars = hdu_pars
else: # compare hdu_pars to first file
for hpar, bpar in zip(hdu_pars, base_pars):
for key in bpar.keys():
if hpar[key] != bpar[key]:
logging.error(
"file parameter mismatch: %s: %s != %s",
key,
hpar[key],
bpar[key],
)
sys.exit(1)
# end of loop over files
list_of_hdulists.append(hdulist)
return list_of_hdulists
def image_combine_hdu(
iimages: list,
hduid: int,
method: list,
region: tuple,
bimage: fits.HDUList,
sbias: str,
pbias: str,
scaling: tuple,
hduo: fits.ImageHDU,
):
"""
From a list of input images (as hdulists) and the id of one extension
return an ImageHDU.data object containing a pixel-by-pixel "combined value" of
the stacked input images. The processing varies according to the
additional arguments as to median vs. average, bias subtraction etc.
Parameters
----------
iimages: list of astropy.io.fits.HDUList objects
hduid: index specifying a single hdu (present in all iimages) to process
method: [median], [average], [std], [rstd], [sigmaclip, sigmaval], [rank, percentile]
region: (yslice, xslice) specifying ROI to process, full image if None
bimage: fits.HDUList object with (bias) image to subtract
sbias: param for subtract_bias() function (in this module)
pbias: param for subtract_bias() function (in this module)
scaling: (yslice, xslice) specifying ROI to use for scaling
hduo: a basic ImageHDU object that is modified and is the functions result
"""
hdudata_list = []
hdu_scale = []
logging.debug(f"using sbias: {sbias}")
logging.debug(f"using pbias: {pbias}")
if re.match(r"^no", sbias):
sbias = None
if re.match(r"^no", pbias):
pbias = None
for im in iimages:
hdu = im[hduid].copy()
if sbias or pbias:
subtract_bias(sbias, pbias, hdu)
if scaling:
svalue = np.median(hdu.data[scaling[0], scaling[1]])
hdu_scale.append(svalue)
if region:
hdudata_list.append(hdu.data[region[0], region[1]])
if bimage:
bdata = bimage[hduid].data[region[0], region[1]]
else:
hdudata_list.append(hdu.data)
if bimage:
bdata = bimage[hduid].data
if scaling: # pass through data and scale it
hdu_scale_arr = np.asarray(hdu_scale)
# normalize the scale factors
hdu_scale_arr = np.mean(hdu_scale_arr) / hdu_scale_arr
logging.debug(f"applying scale factors: {hdu_scale_arr}")
for hdudata, hduscale in zip(hdudata_list, hdu_scale_arr):
hdudata = hdudata * hduscale
logging.debug(f"using method: {method}")
if re.match(r"^mea", method[0]):
hduo.data = np.mean(np.array(hdudata_list), axis=0)
elif re.match(r"^med", method[0]):
hduo.data = np.median(np.array(hdudata_list), axis=0)
elif re.match(r"^madstd", method[0]):
hduo.data = stats.mad_std(np.array(hdudata_list), axis=0)
elif re.match(r"^std", method[0]):
hduo.data = np.std(np.array(hdudata_list), axis=0)
elif re.match(r"^rstd", method[0]):
logging.debug(
f"calling stats.sigma_clip(np.array(hdudata_list), float({method[1]}), axis=0, masked=False)"
)
hduo.data = np.nanstd(
stats.sigma_clip(
np.array(hdudata_list), float(method[1]), axis=0, masked=False
),
axis=0,
)
elif re.match(r"^sig", method[0]): # this one is ugly
logging.debug(
f"calling stats.sigma_clip(np.array(hdudata_list), float({method[1]}), axis=0, masked=False)"
)
hduo.data = np.nanmean(
stats.sigma_clip(
np.array(hdudata_list), float(method[1]), axis=0, masked=False
),
axis=0,
)
elif re.match(r"^ran", method[0]):
hduo.data = np.percentile(np.array(hdudata_list), method[1], axis=0)
else:
logging.error("image combine method %s not recognized", method[0])
sys.exit(1)
if bimage:
hduo.data = hduo.data - bdata
def subtract_background(hdu, datasec, segs):
"""
Used in xtalk measurement where the background should be simple
"""
# convert segments list into array of indices (origin is same as hdu)
bad_ind = []
tot_len = 0
max_len = 0
if segs:
segs.sort()
for seg in segs:
seg_len = seg[1] - seg[0]
tot_len += seg_len
if seg_len > max_len:
max_len = seg_len
bad_ind.extend(list(range(seg[0], seg[1] + 1)))
bad_ind = np.array(bad_ind)
# copy hdu.data to produce a background estimate
bkgarr = hdu.data.copy()
# interpolate across bad column regions (segments)
if np.size(bad_ind):
for rowind in range(np.shape(hdu.data)[0]):
for seg in segs:
ll = max(datasec[0].start, seg[0] - 13)
ul = min(datasec[0].stop, seg[1] + 13)
lval = np.median(bkgarr[rowind, ll : seg[0]])
rval = np.median(bkgarr[rowind, seg[1] : ul])
segsz = seg[1] - seg[0]
for x in range(seg[0], seg[1]):
bkgarr[rowind, x] = (
lval * (seg[1] - x) / segsz + rval * (x - seg[0]) / segsz
)
if rowind % 500 == 0:
logging.debug(
"bkgarr[%d,%d:%d]=%s",
rowind,
seg[0] - 13,
seg[1] + 13,
np.array2string(
bkgarr[rowind, seg[0] - 13 : seg[1] + 13],
precision=2,
separator=",",
),
)
# median filter
hdu.data[datasec] -= percentile_filter(
bkgarr[datasec], 20, size=(10, 50), mode="nearest"
)
def subtract_background_for_xtalk(hdu, mask, datasec):
"""
Used in xtalk measurement where the background should be simple
"""
# copy hdu.data to produce a background estimate
bkgarr = hdu.data.copy()
# interpolate row by row across masked area
d0 = datasec[1].start
d1 = datasec[1].stop
dsize = d1 - d0
# str1 = np.array2string(bkgarr[700, d0:d1], precision=2, separator=",")
# print(f"bkgarr[700, {d0}:{d1}]={str1}")
# str2 = np.array2string(mask[700, d0:d1], precision=2, separator=",")
# print(f"mask[700, {d0}:{d1}]={str2}")
for rowind in range(np.shape(hdu.data)[0]):
row_arr = np.array(bkgarr[rowind, d0:d1])
wghts = np.array(mask[rowind, d0:d1])
if np.all(wghts): # skip row if no masked points
continue
x = np.arange(dsize)
segs = get_disjoint_segments(x[wghts == 0])
segsum = sum([seg[1] - seg[0] for seg in segs])
if segsum > (dsize) / 2.0: # can't subtract background this row
bkgarr[rowind, :] = np.nan
continue
for seg in segs:
s0 = seg[0]
s1 = seg[1]
ll = max(0, s0 - 10)
ul = min(s1 + 10, dsize)
if s0 - 10 < 0 or s1 + 10 > dsize: # invalidate and skip segment
bkgarr[rowind, s0 + d0 : s1 + d0 + 1] = np.nan
continue
# logging.debug("ll = %d", ll)
# logging.debug("s0 = %d", s0)
# logging.debug("row_arr[%d : %d]=%s", ll, s0, row_arr[ll:s0])
lval = np.median(row_arr[ll:s0])
rval = np.median(row_arr[s1 + 1 : ul])
segsz = s1 - s0 + 1
for xval in range(s0, s1 + 1):
row_arr[xval] = lval * (s1 - xval) / segsz + rval * (xval - s0) / segsz
bkgarr[rowind, s0 + d0 : s1 + d0 + 1] = row_arr[s0 : s1 + 1]
nan_cnt = np.count_nonzero(np.isnan(row_arr))
if nan_cnt:
logging.debug(
"2: found %d nans in row %d",
np.count_nonzero(np.isnan(row_arr)),
rowind,
)
if rowind == 40:
logging.debug("segs=%s", segs)
logging.debug("segsum=%d", segsum)
logging.debug("%s", row_arr)
hdu.data -= bkgarr
def auto_biastype(hdulist: fits.HDUList) -> tuple:
"""
function for LSST CCD FITs files to return the CCD type: itl|e2v
raises KeyError if FITS keyword "LSST_NUM" is not present
raises ValueError if LSST_NUM is invalid
"""
key = "LSST_NUM"
try:
lsstnum = hdulist[0].header[key] # raises KeyError
except KeyError:
raise KeyError("Missing LSST_NUM keyword required for LSST Camera Image?")
if re.match(r"E2V", lsstnum):
sbias_str = "byrow"
pbias_str = "bycolfilter"
logging.debug("auto_biastype is E2V")
elif re.match(r"ITL", lsstnum):
sbias_str = "byrow"
pbias_str = "bycolfilter"
logging.debug("auto_biastype is ITL")
else:
raise ValueError(f"LSST_NUM FITS key value: {key} is invalid")
return sbias_str, pbias_str
|
[
"stuart.l.marshall@gmail.com"
] |
stuart.l.marshall@gmail.com
|
7bf5acfff6dcd2a5a761d1c79eadbb80457132b7
|
fb7cb229a8f68f9ba3cc23ce51238008841516e8
|
/Sensorslab2/Task1/first_pkg/first_pkg/node1.py
|
14b6406c2d671cf89efc40b794ca907651adc2be
|
[] |
no_license
|
RozanMagdy/ITI-Labs
|
24852442c8cae3f9d0fe44e55e5995853f18a9b5
|
3e3a4b85a415492c6eb539c79be128504fefaf96
|
refs/heads/master
| 2023-06-04T18:07:58.256689
| 2021-06-17T11:43:30
| 2021-06-17T11:43:30
| 359,421,308
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,172
|
py
|
#!/usr/bin/env python3
import rclpy
from rclpy.node import Node
from nav_msgs.msg import Odometry
import pandas as pd
from geometry_msgs.msg import Quaternion
from math import sin, cos, pi
import numpy as np
def euler_from_quaternion(quaternion):
x = quaternion.x
y = quaternion.y
z = quaternion.z
w = quaternion.w
sinr_cosp = 2 * (w * x + y * z)
cosr_cosp = 1 - 2 * (x * x + y * y)
roll = np.arctan2(sinr_cosp, cosr_cosp)
sinp = 2 * (w * y - z * x)
pitch = np.arcsin(sinp)
siny_cosp = 2 * (w * z + x * y)
cosy_cosp = 1 - 2 * (y * y + z * z)
yaw = np.arctan2(siny_cosp, cosy_cosp)
return roll, pitch, yaw
class my_node(Node):
def __init__(self):
super().__init__("node1")
self.get_logger().info("Node1 is Started")
self.create_subscription(Odometry,"odom",self.timer_call_sub,10)
self.df = pd.read_csv("pose.csv")
self.angX_list= list(self.df[self.df.columns[0]])
self.angY_list= list(self.df[self.df.columns[1]])
self.yaw_list = list(self.df[self.df.columns[2]])
self.index=0
def timer_call_sub(self,odom_msg):
currentX=odom_msg.pose.pose.position.x
currentY=odom_msg.pose.pose.position.y
_,_,currentYAW=euler_from_quaternion(odom_msg.pose.pose.orientation)*(180/pi)
self.get_logger().info("current data "+str(currentX)+' '+str(currentY)+' '+str(currentYAW))
expectedX=self.angX_list[self.index]
expectedY=self.angY_list[self.index]
expectedYAW=self.yaw_list[self.index]
if(abs(currentX-expectedX)==0.5) and (abs(currentY-expectedY)==0.5) and (abs(currentYAW-expectedYAW)==5):
self.index= self.index+1
if self.index>len(self.yaw_list):
self.get_logger().info("i execute all position and last one is"+ str(currentX)+","+ str(currentY)+","+ str(currentYAW))
self.index=0
def main (args=None):
rclpy.init(args=args)
node=my_node()
rclpy.spin(node)
rclpy.shutdown()
if __name__=="__main__":
main()
|
[
"rozanabdelmawla@gmail.com"
] |
rozanabdelmawla@gmail.com
|
bc2e286f954ef39e80a55f023977e2bbd6237920
|
07ae1548a4113ef59bbe805a9530c67487dc1454
|
/day_13.py
|
3e65386d1bfc042f3e7929da11b66930a971356f
|
[] |
no_license
|
mrugacz95/advent-of-code-2018
|
acb74bde04d7e51b2eab1b393ca8161c0808146b
|
bcc6265517a5d1d4adedb6e43b260383b80344a2
|
refs/heads/master
| 2023-04-23T09:12:10.303890
| 2021-05-16T11:05:43
| 2021-05-16T11:05:43
| 338,780,583
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 10,067
|
py
|
from collections import defaultdict
from enum import Enum
from typing import Dict, Optional, Tuple
from aocd.models import Puzzle
puzzle = Puzzle(year=2018, day=13)
left_turn = '∖'
raw = puzzle.input_data.replace('\\', left_turn)
def format(*args):
return '\n'.join(args)
one_loop = format('/->-∖',
'| |',
'| |',
'| |',
'∖---/')
three_loops = format('/->-∖ ',
'| | /----∖',
'| /-+--+-∖ |',
'| | | | v |',
'∖-+-/ ∖-+--/',
' ∖------/ ')
crash = format('|',
'v',
'|',
'|',
'|',
'^',
'|', )
eight_shape = format('/--∖ ',
'| | ',
'| | ',
'∖--+---∖',
' | |',
' v |',
' | |',
' ∖---/')
close_loop = format('/-∖ ',
'| | /-----∖',
'∖-+-+---∖ |',
' | v | |',
' ∖-+---/ |',
' ∖---∖ |',
' | |',
' ∖-/')
wreckfest = format('/>-<∖ ',
'| | ',
'| /<+-∖',
'| | | v',
'∖>+</ |',
' | ^',
' ∖<->/')
class SimpleReprDefaultDict(defaultdict):
def __repr__(self):
return repr(dict(self))
class Direction(Enum):
_ignore_ = ['_cart_to_direction', '_direction_to_cart']
UP = 1
DOWN = -1
LEFT = -2
RIGHT = 2
STRAIGHT = 4
def __lt__(self, other: 'Direction'):
return self.value < other.value
_cart_to_direction = {}
_direction_to_cart = {}
def apply(self, y, x):
return {
Direction.UP: (y - 1, x),
Direction.DOWN: (y + 1, x),
Direction.LEFT: (y, x - 1),
Direction.RIGHT: (y, x + 1)
}.get(self)
@staticmethod
def from_cart(cart):
return Direction._cart_to_direction[cart]
def to_cart(self):
return self._direction_to_cart[self]
def opposite(self):
return Direction(-self.value)
def __repr__(self):
return {
Direction.RIGHT: "RIGHT",
Direction.LEFT: "LEFT",
Direction.UP: "UP",
Direction.DOWN: "DOWN",
Direction.STRAIGHT: "STRAIGHT",
}.get(self)
@staticmethod
def to_relative_direction(before: 'Direction', after: 'Direction'):
if before.opposite() == after:
return Direction.STRAIGHT
return {
# right
(Direction.LEFT, Direction.DOWN): Direction.RIGHT,
(Direction.DOWN, Direction.RIGHT): Direction.RIGHT,
(Direction.RIGHT, Direction.UP): Direction.RIGHT,
(Direction.UP, Direction.LEFT): Direction.RIGHT,
# left
(Direction.LEFT, Direction.UP): Direction.LEFT,
(Direction.DOWN, Direction.LEFT): Direction.LEFT,
(Direction.RIGHT, Direction.DOWN): Direction.LEFT,
(Direction.UP, Direction.RIGHT): Direction.LEFT,
}.get((before, after))
@staticmethod
def from_relative_direction(relative, absolute):
if relative == Direction.STRAIGHT:
return absolute
if relative == Direction.LEFT:
return {
Direction.LEFT: Direction.DOWN,
Direction.RIGHT: Direction.UP,
Direction.DOWN: Direction.RIGHT,
Direction.UP: Direction.LEFT,
}.get(absolute)
return {
Direction.LEFT: Direction.UP,
Direction.RIGHT: Direction.DOWN,
Direction.DOWN: Direction.LEFT,
Direction.UP: Direction.RIGHT,
}.get(absolute)
Direction._cart_to_direction = {
'>': Direction.RIGHT,
'<': Direction.LEFT,
'^': Direction.UP,
'v': Direction.DOWN,
}
# noinspection PyProtectedMember
Direction._direction_to_cart = {v: k for k, v in Direction._cart_to_direction.items()}
Position = Tuple[int, int]
Graph = Dict[Position, Dict[Direction, Optional[Position]]]
class Cart:
def __init__(self, x, y, direction):
self.x = x
self.y = y
self.direction = direction
self.last_turn = Direction.RIGHT
def next_turn(self):
return {
Direction.LEFT: Direction.STRAIGHT,
Direction.STRAIGHT: Direction.RIGHT,
Direction.RIGHT: Direction.LEFT,
None: Direction.LEFT
}.get(self.last_turn)
def move(self, graph: Graph):
connections = graph[(self.y, self.x)]
if len(connections) == 4: # intersection
self.last_turn = self.next_turn()
self.direction = Direction.from_relative_direction(self.last_turn, self.direction)
else: # curve or straight
opposite = self.direction.opposite()
self.direction = next(filter(lambda d: d != opposite, connections.keys())) # the other from two ends
if len(graph[self.direction.apply(self.y, self.x)].keys()) < 2: # dead end, shouldn't move
return
self.y, self.x = self.direction.apply(self.y, self.x)
def __repr__(self):
return f'Cart<y: {self.y}, x: {self.x}, dir: {self.direction}, lt: {self.last_turn}>'
@property
def position(self):
return self.y, self.x
def track_below(self):
return {
Direction.DOWN: '|',
Direction.UP: '|',
Direction.LEFT: '-',
Direction.RIGHT: '-'
}.get(self.direction)
def parse_tracks(raw_rails):
rails = raw_rails.split('\n')
carts = []
graph: Graph = SimpleReprDefaultDict(lambda: SimpleReprDefaultDict(lambda: None))
rails_width = max(map(len, rails))
rails_height = len(rails)
for y, line in enumerate(rails):
for x, symbol in enumerate(line):
if symbol in ['v', '^', '<', '>']:
cart = Cart(x, y, Direction.from_cart(symbol))
carts.append(cart)
track = cart.track_below()
else:
track = symbol
directions = {
left_turn: [],
'|': [Direction.UP, Direction.DOWN],
'/': [],
'-': [Direction.LEFT, Direction.RIGHT],
'+': [Direction.LEFT, Direction.RIGHT, Direction.DOWN, Direction.UP],
' ': []
}.get(track)
for direction in directions:
graph[(y, x)][direction] = direction.apply(y, x)
graph[direction.apply(y, x)][direction.opposite()] = (y, x)
return graph, carts
def print_rails(carts, graph: Graph):
carts_positions: Dict[Tuple[int, int], Cart] = {cart.position: cart for cart in carts}
tracks = list(graph.keys())
max_x = max(map(lambda x: x[1], tracks))
min_x = min(map(lambda x: x[1], tracks))
max_y = max(map(lambda y: y[0], tracks))
min_y = min(map(lambda y: y[0], tracks))
for y in range(min_y, max_y + 1):
for x in range(min_x, max_x + 1):
track = graph.get((y, x), None)
if (y, x) in carts_positions:
cart = carts_positions[(y, x)].direction.to_cart()
print(cart, end='')
elif track is not None:
track = tuple(sorted(list(track.keys())))
track = {
tuple(sorted([Direction.UP, Direction.DOWN])): '|',
tuple(sorted([Direction.LEFT, Direction.RIGHT])): '-',
tuple(sorted([Direction.LEFT, Direction.RIGHT, Direction.UP, Direction.DOWN])): '+',
tuple(sorted([Direction.LEFT, Direction.UP])): '/',
tuple(sorted([Direction.LEFT, Direction.DOWN])): '\\',
tuple(sorted([Direction.RIGHT, Direction.UP])): '\\',
tuple(sorted([Direction.RIGHT, Direction.DOWN])): '/',
}.get(track)
print(track, end='')
else:
print(' ', end='')
print()
print()
def part_1(graph, carts):
carts_positions = set(cart.position for cart in carts)
while True:
ordered_carts = sorted(carts, key=lambda c: c.position)
for cart in ordered_carts:
carts_positions.remove(cart.position)
cart.move(graph)
pos_after_move = cart.position
if pos_after_move in carts_positions: # crash
return cart.position
else:
carts_positions.add(pos_after_move)
def part_2(graph, carts):
carts_positions = {cart.position: cart for cart in carts}
carts_left = carts.copy()
while len(carts_left) != 1:
ordered_carts = sorted(carts_left, key=lambda c: c.position)
invalid_carts = set()
for cart in ordered_carts:
if cart in invalid_carts: # already crashed
continue
carts_positions.pop(cart.position)
cart.move(graph)
if cart.position in carts_positions: # crash
invalid_carts.add(carts_positions[cart.position])
invalid_carts.add(cart)
carts_positions.pop(cart.position)
else:
carts_positions[cart.position] = cart
for invalid in invalid_carts:
carts_left.remove(invalid)
return carts_left[0].position
def solve():
graph, carts = parse_tracks(raw)
print_rails(carts, graph)
ans = part_1(graph, carts)
puzzle.answer_a = ','.join(map(str, reversed(ans)))
graph, carts = parse_tracks(raw)
ans = part_2(graph, carts)
puzzle.answer_b = ','.join(map(str, reversed(ans)))
if __name__ == '__main__':
solve()
|
[
"marcin.mrugas@allegro.pl"
] |
marcin.mrugas@allegro.pl
|
4253b79bb5472aaa724f8a275715ff312d9e51ad
|
6bc7062b2f99d0c54fd1bb74c1c312a2e3370e24
|
/crowdfunding/projects/migrations/0019_remove_project_project_category.py
|
51c2a3d6871cecdc37443bf4dad36195d614d54a
|
[] |
no_license
|
marinkoellen/drf-proj
|
f2d1f539efb877df69d285bd2fe6d5e789709933
|
874549d68ab80a774988c83706bb7934e035de42
|
refs/heads/master
| 2022-12-25T16:53:52.187704
| 2020-10-03T03:54:06
| 2020-10-03T03:54:06
| 289,620,536
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 342
|
py
|
# Generated by Django 3.0.8 on 2020-08-25 13:49
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('projects', '0018_remove_category_slug'),
]
operations = [
migrations.RemoveField(
model_name='project',
name='project_category',
),
]
|
[
"ellen.marinko1@gmail.com"
] |
ellen.marinko1@gmail.com
|
5815ce4fe1258e88d6834e0525aef0cb88efe45c
|
e407cd1e873ef1a626a592ac22901a300f5be8f4
|
/.pycharm_helpers/python_stubs/-1840357896/_ast.py
|
781c01e7223effe7a6aacfc06678da20c5e8ad7b
|
[] |
no_license
|
rpesce/oktetoProject
|
65f77cfd2d92e6372f32e6e3dbfb8ce038d1b45d
|
7dbddf3d85b040755b15f4e647894353d4e5a3c5
|
refs/heads/master
| 2023-03-31T12:03:49.419915
| 2020-05-13T19:37:49
| 2020-05-13T19:37:49
| 263,726,526
| 0
| 0
| null | 2021-03-20T03:57:35
| 2020-05-13T19:38:54
|
Python
|
UTF-8
|
Python
| false
| false
| 26,769
|
py
|
# encoding: utf-8
# module _ast
# from (built-in)
# by generator 1.147
# no doc
# no imports
# Variables with simple values
PyCF_ALLOW_TOP_LEVEL_AWAIT = 8192
PyCF_ONLY_AST = 1024
PyCF_TYPE_COMMENTS = 4096
# no functions
# classes
class AST(object):
# no doc
def __delattr__(self, *args, **kwargs): # real signature unknown
""" Implement delattr(self, name). """
pass
def __getattribute__(self, *args, **kwargs): # real signature unknown
""" Return getattr(self, name). """
pass
def __init__(self, *args, **kwargs): # real signature unknown
pass
@staticmethod # known case of __new__
def __new__(*args, **kwargs): # real signature unknown
""" Create and return a new object. See help(type) for accurate signature. """
pass
def __reduce__(self, *args, **kwargs): # real signature unknown
pass
def __setattr__(self, *args, **kwargs): # real signature unknown
""" Implement setattr(self, name, value). """
pass
_attributes = ()
_fields = ()
__dict__ = None # (!) real value is "mappingproxy({'__getattribute__': <slot wrapper '__getattribute__' of '_ast.AST' objects>, '__setattr__': <slot wrapper '__setattr__' of '_ast.AST' objects>, '__delattr__': <slot wrapper '__delattr__' of '_ast.AST' objects>, '__init__': <slot wrapper '__init__' of '_ast.AST' objects>, '__new__': <built-in method __new__ of type object at 0x7f7006275a20>, '__reduce__': <method '__reduce__' of '_ast.AST' objects>, '__dict__': <attribute '__dict__' of '_ast.AST' objects>, '__doc__': None, '_fields': (), '_attributes': ()})"
class operator(AST):
# no doc
def __init__(self, *args, **kwargs): # real signature unknown
pass
__weakref__ = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
"""list of weak references to the object (if defined)"""
_attributes = ()
_fields = ()
class Add(operator):
# no doc
def __init__(self, *args, **kwargs): # real signature unknown
pass
_fields = ()
class alias(AST):
# no doc
def __init__(self, *args, **kwargs): # real signature unknown
pass
__weakref__ = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
"""list of weak references to the object (if defined)"""
_attributes = ()
_fields = (
'name',
'asname',
)
class boolop(AST):
# no doc
def __init__(self, *args, **kwargs): # real signature unknown
pass
__weakref__ = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
"""list of weak references to the object (if defined)"""
_attributes = ()
_fields = ()
class And(boolop):
# no doc
def __init__(self, *args, **kwargs): # real signature unknown
pass
_fields = ()
class stmt(AST):
# no doc
def __init__(self, *args, **kwargs): # real signature unknown
pass
__weakref__ = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
"""list of weak references to the object (if defined)"""
_attributes = (
'lineno',
'col_offset',
'end_lineno',
'end_col_offset',
)
_fields = ()
class AnnAssign(stmt):
# no doc
def __init__(self, *args, **kwargs): # real signature unknown
pass
_fields = (
'target',
'annotation',
'value',
'simple',
)
class arg(AST):
# no doc
def __init__(self, *args, **kwargs): # real signature unknown
pass
__weakref__ = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
"""list of weak references to the object (if defined)"""
_attributes = (
'lineno',
'col_offset',
'end_lineno',
'end_col_offset',
)
_fields = (
'arg',
'annotation',
'type_comment',
)
class arguments(AST):
# no doc
def __init__(self, *args, **kwargs): # real signature unknown
pass
__weakref__ = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
"""list of weak references to the object (if defined)"""
_attributes = ()
_fields = (
'posonlyargs',
'args',
'vararg',
'kwonlyargs',
'kw_defaults',
'kwarg',
'defaults',
)
class Assert(stmt):
# no doc
def __init__(self, *args, **kwargs): # real signature unknown
pass
_fields = (
'test',
'msg',
)
class Assign(stmt):
# no doc
def __init__(self, *args, **kwargs): # real signature unknown
pass
_fields = (
'targets',
'value',
'type_comment',
)
class AsyncFor(stmt):
# no doc
def __init__(self, *args, **kwargs): # real signature unknown
pass
_fields = (
'target',
'iter',
'body',
'orelse',
'type_comment',
)
class AsyncFunctionDef(stmt):
# no doc
def __init__(self, *args, **kwargs): # real signature unknown
pass
_fields = (
'name',
'args',
'body',
'decorator_list',
'returns',
'type_comment',
)
class AsyncWith(stmt):
# no doc
def __init__(self, *args, **kwargs): # real signature unknown
pass
_fields = (
'items',
'body',
'type_comment',
)
class expr(AST):
# no doc
def __init__(self, *args, **kwargs): # real signature unknown
pass
__weakref__ = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
"""list of weak references to the object (if defined)"""
_attributes = (
'lineno',
'col_offset',
'end_lineno',
'end_col_offset',
)
_fields = ()
class Attribute(expr):
# no doc
def __init__(self, *args, **kwargs): # real signature unknown
pass
_fields = (
'value',
'attr',
'ctx',
)
class AugAssign(stmt):
# no doc
def __init__(self, *args, **kwargs): # real signature unknown
pass
_fields = (
'target',
'op',
'value',
)
class expr_context(AST):
# no doc
def __init__(self, *args, **kwargs): # real signature unknown
pass
__weakref__ = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
"""list of weak references to the object (if defined)"""
_attributes = ()
_fields = ()
class AugLoad(expr_context):
# no doc
def __init__(self, *args, **kwargs): # real signature unknown
pass
_fields = ()
class AugStore(expr_context):
# no doc
def __init__(self, *args, **kwargs): # real signature unknown
pass
_fields = ()
class Await(expr):
# no doc
def __init__(self, *args, **kwargs): # real signature unknown
pass
_fields = (
'value',
)
class BinOp(expr):
# no doc
def __init__(self, *args, **kwargs): # real signature unknown
pass
_fields = (
'left',
'op',
'right',
)
class BitAnd(operator):
# no doc
def __init__(self, *args, **kwargs): # real signature unknown
pass
_fields = ()
class BitOr(operator):
# no doc
def __init__(self, *args, **kwargs): # real signature unknown
pass
_fields = ()
class BitXor(operator):
# no doc
def __init__(self, *args, **kwargs): # real signature unknown
pass
_fields = ()
class BoolOp(expr):
# no doc
def __init__(self, *args, **kwargs): # real signature unknown
pass
_fields = (
'op',
'values',
)
class Break(stmt):
# no doc
def __init__(self, *args, **kwargs): # real signature unknown
pass
_fields = ()
class Call(expr):
# no doc
def __init__(self, *args, **kwargs): # real signature unknown
pass
_fields = (
'func',
'args',
'keywords',
)
class ClassDef(stmt):
# no doc
def __init__(self, *args, **kwargs): # real signature unknown
pass
_fields = (
'name',
'bases',
'keywords',
'body',
'decorator_list',
)
class cmpop(AST):
# no doc
def __init__(self, *args, **kwargs): # real signature unknown
pass
__weakref__ = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
"""list of weak references to the object (if defined)"""
_attributes = ()
_fields = ()
class Compare(expr):
# no doc
def __init__(self, *args, **kwargs): # real signature unknown
pass
_fields = (
'left',
'ops',
'comparators',
)
class comprehension(AST):
# no doc
def __init__(self, *args, **kwargs): # real signature unknown
pass
__weakref__ = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
"""list of weak references to the object (if defined)"""
_attributes = ()
_fields = (
'target',
'iter',
'ifs',
'is_async',
)
class Constant(expr):
# no doc
def __init__(self, *args, **kwargs): # real signature unknown
pass
n = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
s = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
_fields = (
'value',
'kind',
)
class Continue(stmt):
# no doc
def __init__(self, *args, **kwargs): # real signature unknown
pass
_fields = ()
class Del(expr_context):
# no doc
def __init__(self, *args, **kwargs): # real signature unknown
pass
_fields = ()
class Delete(stmt):
# no doc
def __init__(self, *args, **kwargs): # real signature unknown
pass
_fields = (
'targets',
)
class Dict(expr):
# no doc
def __init__(self, *args, **kwargs): # real signature unknown
pass
_fields = (
'keys',
'values',
)
class DictComp(expr):
# no doc
def __init__(self, *args, **kwargs): # real signature unknown
pass
_fields = (
'key',
'value',
'generators',
)
class Div(operator):
# no doc
def __init__(self, *args, **kwargs): # real signature unknown
pass
_fields = ()
class Eq(cmpop):
# no doc
def __init__(self, *args, **kwargs): # real signature unknown
pass
_fields = ()
class excepthandler(AST):
# no doc
def __init__(self, *args, **kwargs): # real signature unknown
pass
__weakref__ = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
"""list of weak references to the object (if defined)"""
_attributes = (
'lineno',
'col_offset',
'end_lineno',
'end_col_offset',
)
_fields = ()
class ExceptHandler(excepthandler):
# no doc
def __init__(self, *args, **kwargs): # real signature unknown
pass
_fields = (
'type',
'name',
'body',
)
class Expr(stmt):
# no doc
def __init__(self, *args, **kwargs): # real signature unknown
pass
_fields = (
'value',
)
class mod(AST):
# no doc
def __init__(self, *args, **kwargs): # real signature unknown
pass
__weakref__ = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
"""list of weak references to the object (if defined)"""
_attributes = ()
_fields = ()
class Expression(mod):
# no doc
def __init__(self, *args, **kwargs): # real signature unknown
pass
_fields = (
'body',
)
class slice(AST):
# no doc
def __init__(self, *args, **kwargs): # real signature unknown
pass
__weakref__ = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
"""list of weak references to the object (if defined)"""
_attributes = ()
_fields = ()
class ExtSlice(slice):
# no doc
def __init__(self, *args, **kwargs): # real signature unknown
pass
_fields = (
'dims',
)
class FloorDiv(operator):
# no doc
def __init__(self, *args, **kwargs): # real signature unknown
pass
_fields = ()
class For(stmt):
# no doc
def __init__(self, *args, **kwargs): # real signature unknown
pass
_fields = (
'target',
'iter',
'body',
'orelse',
'type_comment',
)
class FormattedValue(expr):
# no doc
def __init__(self, *args, **kwargs): # real signature unknown
pass
_fields = (
'value',
'conversion',
'format_spec',
)
class FunctionDef(stmt):
# no doc
def __init__(self, *args, **kwargs): # real signature unknown
pass
_fields = (
'name',
'args',
'body',
'decorator_list',
'returns',
'type_comment',
)
class FunctionType(mod):
# no doc
def __init__(self, *args, **kwargs): # real signature unknown
pass
_fields = (
'argtypes',
'returns',
)
class GeneratorExp(expr):
# no doc
def __init__(self, *args, **kwargs): # real signature unknown
pass
_fields = (
'elt',
'generators',
)
class Global(stmt):
# no doc
def __init__(self, *args, **kwargs): # real signature unknown
pass
_fields = (
'names',
)
class Gt(cmpop):
# no doc
def __init__(self, *args, **kwargs): # real signature unknown
pass
_fields = ()
class GtE(cmpop):
# no doc
def __init__(self, *args, **kwargs): # real signature unknown
pass
_fields = ()
class If(stmt):
# no doc
def __init__(self, *args, **kwargs): # real signature unknown
pass
_fields = (
'test',
'body',
'orelse',
)
class IfExp(expr):
# no doc
def __init__(self, *args, **kwargs): # real signature unknown
pass
_fields = (
'test',
'body',
'orelse',
)
class Import(stmt):
# no doc
def __init__(self, *args, **kwargs): # real signature unknown
pass
_fields = (
'names',
)
class ImportFrom(stmt):
# no doc
def __init__(self, *args, **kwargs): # real signature unknown
pass
_fields = (
'module',
'names',
'level',
)
class In(cmpop):
# no doc
def __init__(self, *args, **kwargs): # real signature unknown
pass
_fields = ()
class Index(slice):
# no doc
def __init__(self, *args, **kwargs): # real signature unknown
pass
_fields = (
'value',
)
class Interactive(mod):
# no doc
def __init__(self, *args, **kwargs): # real signature unknown
pass
_fields = (
'body',
)
class unaryop(AST):
# no doc
def __init__(self, *args, **kwargs): # real signature unknown
pass
__weakref__ = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
"""list of weak references to the object (if defined)"""
_attributes = ()
_fields = ()
class Invert(unaryop):
# no doc
def __init__(self, *args, **kwargs): # real signature unknown
pass
_fields = ()
class Is(cmpop):
# no doc
def __init__(self, *args, **kwargs): # real signature unknown
pass
_fields = ()
class IsNot(cmpop):
# no doc
def __init__(self, *args, **kwargs): # real signature unknown
pass
_fields = ()
class JoinedStr(expr):
# no doc
def __init__(self, *args, **kwargs): # real signature unknown
pass
_fields = (
'values',
)
class keyword(AST):
# no doc
def __init__(self, *args, **kwargs): # real signature unknown
pass
__weakref__ = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
"""list of weak references to the object (if defined)"""
_attributes = ()
_fields = (
'arg',
'value',
)
class Lambda(expr):
# no doc
def __init__(self, *args, **kwargs): # real signature unknown
pass
_fields = (
'args',
'body',
)
class List(expr):
# no doc
def __init__(self, *args, **kwargs): # real signature unknown
pass
_fields = (
'elts',
'ctx',
)
class ListComp(expr):
# no doc
def __init__(self, *args, **kwargs): # real signature unknown
pass
_fields = (
'elt',
'generators',
)
class Load(expr_context):
# no doc
def __init__(self, *args, **kwargs): # real signature unknown
pass
_fields = ()
class LShift(operator):
# no doc
def __init__(self, *args, **kwargs): # real signature unknown
pass
_fields = ()
class Lt(cmpop):
# no doc
def __init__(self, *args, **kwargs): # real signature unknown
pass
_fields = ()
class LtE(cmpop):
# no doc
def __init__(self, *args, **kwargs): # real signature unknown
pass
_fields = ()
class MatMult(operator):
# no doc
def __init__(self, *args, **kwargs): # real signature unknown
pass
_fields = ()
class Mod(operator):
# no doc
def __init__(self, *args, **kwargs): # real signature unknown
pass
_fields = ()
class Module(mod):
# no doc
def __init__(self, *args, **kwargs): # real signature unknown
pass
_fields = (
'body',
'type_ignores',
)
class Mult(operator):
# no doc
def __init__(self, *args, **kwargs): # real signature unknown
pass
_fields = ()
class Name(expr):
# no doc
def __init__(self, *args, **kwargs): # real signature unknown
pass
_fields = (
'id',
'ctx',
)
class NamedExpr(expr):
# no doc
def __init__(self, *args, **kwargs): # real signature unknown
pass
_fields = (
'target',
'value',
)
class Nonlocal(stmt):
# no doc
def __init__(self, *args, **kwargs): # real signature unknown
pass
_fields = (
'names',
)
class Not(unaryop):
# no doc
def __init__(self, *args, **kwargs): # real signature unknown
pass
_fields = ()
class NotEq(cmpop):
# no doc
def __init__(self, *args, **kwargs): # real signature unknown
pass
_fields = ()
class NotIn(cmpop):
# no doc
def __init__(self, *args, **kwargs): # real signature unknown
pass
_fields = ()
class Or(boolop):
# no doc
def __init__(self, *args, **kwargs): # real signature unknown
pass
_fields = ()
class Param(expr_context):
# no doc
def __init__(self, *args, **kwargs): # real signature unknown
pass
_fields = ()
class Pass(stmt):
# no doc
def __init__(self, *args, **kwargs): # real signature unknown
pass
_fields = ()
class Pow(operator):
# no doc
def __init__(self, *args, **kwargs): # real signature unknown
pass
_fields = ()
class Raise(stmt):
# no doc
def __init__(self, *args, **kwargs): # real signature unknown
pass
_fields = (
'exc',
'cause',
)
class Return(stmt):
# no doc
def __init__(self, *args, **kwargs): # real signature unknown
pass
_fields = (
'value',
)
class RShift(operator):
# no doc
def __init__(self, *args, **kwargs): # real signature unknown
pass
_fields = ()
class Set(expr):
# no doc
def __init__(self, *args, **kwargs): # real signature unknown
pass
_fields = (
'elts',
)
class SetComp(expr):
# no doc
def __init__(self, *args, **kwargs): # real signature unknown
pass
_fields = (
'elt',
'generators',
)
class Slice(slice):
# no doc
def __init__(self, *args, **kwargs): # real signature unknown
pass
_fields = (
'lower',
'upper',
'step',
)
class Starred(expr):
# no doc
def __init__(self, *args, **kwargs): # real signature unknown
pass
_fields = (
'value',
'ctx',
)
class Store(expr_context):
# no doc
def __init__(self, *args, **kwargs): # real signature unknown
pass
_fields = ()
class Sub(operator):
# no doc
def __init__(self, *args, **kwargs): # real signature unknown
pass
_fields = ()
class Subscript(expr):
# no doc
def __init__(self, *args, **kwargs): # real signature unknown
pass
_fields = (
'value',
'slice',
'ctx',
)
class Suite(mod):
# no doc
def __init__(self, *args, **kwargs): # real signature unknown
pass
_fields = (
'body',
)
class Try(stmt):
# no doc
def __init__(self, *args, **kwargs): # real signature unknown
pass
_fields = (
'body',
'handlers',
'orelse',
'finalbody',
)
class Tuple(expr):
# no doc
def __init__(self, *args, **kwargs): # real signature unknown
pass
_fields = (
'elts',
'ctx',
)
class type_ignore(AST):
# no doc
def __init__(self, *args, **kwargs): # real signature unknown
pass
__weakref__ = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
"""list of weak references to the object (if defined)"""
_attributes = ()
_fields = ()
class TypeIgnore(type_ignore):
# no doc
def __init__(self, *args, **kwargs): # real signature unknown
pass
_fields = (
'lineno',
'tag',
)
class UAdd(unaryop):
# no doc
def __init__(self, *args, **kwargs): # real signature unknown
pass
_fields = ()
class UnaryOp(expr):
# no doc
def __init__(self, *args, **kwargs): # real signature unknown
pass
_fields = (
'op',
'operand',
)
class USub(unaryop):
# no doc
def __init__(self, *args, **kwargs): # real signature unknown
pass
_fields = ()
class While(stmt):
# no doc
def __init__(self, *args, **kwargs): # real signature unknown
pass
_fields = (
'test',
'body',
'orelse',
)
class With(stmt):
# no doc
def __init__(self, *args, **kwargs): # real signature unknown
pass
_fields = (
'items',
'body',
'type_comment',
)
class withitem(AST):
# no doc
def __init__(self, *args, **kwargs): # real signature unknown
pass
__weakref__ = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
"""list of weak references to the object (if defined)"""
_attributes = ()
_fields = (
'context_expr',
'optional_vars',
)
class Yield(expr):
# no doc
def __init__(self, *args, **kwargs): # real signature unknown
pass
_fields = (
'value',
)
class YieldFrom(expr):
# no doc
def __init__(self, *args, **kwargs): # real signature unknown
pass
_fields = (
'value',
)
class __loader__(object):
"""
Meta path import for built-in modules.
All methods are either class or static methods to avoid the need to
instantiate the class.
"""
@classmethod
def create_module(cls, *args, **kwargs): # real signature unknown
""" Create a built-in module """
pass
@classmethod
def exec_module(cls, *args, **kwargs): # real signature unknown
""" Exec a built-in module """
pass
@classmethod
def find_module(cls, *args, **kwargs): # real signature unknown
"""
Find the built-in module.
If 'path' is ever specified then the search is considered a failure.
This method is deprecated. Use find_spec() instead.
"""
pass
@classmethod
def find_spec(cls, *args, **kwargs): # real signature unknown
pass
@classmethod
def get_code(cls, *args, **kwargs): # real signature unknown
""" Return None as built-in modules do not have code objects. """
pass
@classmethod
def get_source(cls, *args, **kwargs): # real signature unknown
""" Return None as built-in modules do not have source code. """
pass
@classmethod
def is_package(cls, *args, **kwargs): # real signature unknown
""" Return False as built-in modules are never packages. """
pass
@classmethod
def load_module(cls, *args, **kwargs): # real signature unknown
"""
Load the specified module into sys.modules and return it.
This method is deprecated. Use loader.exec_module instead.
"""
pass
def module_repr(module): # reliably restored by inspect
"""
Return repr for the module.
The method is deprecated. The import machinery does the job itself.
"""
pass
def __init__(self, *args, **kwargs): # real signature unknown
pass
__weakref__ = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
"""list of weak references to the object (if defined)"""
__dict__ = None # (!) real value is "mappingproxy({'__module__': '_frozen_importlib', '__doc__': 'Meta path import for built-in modules.\\n\\n All methods are either class or static methods to avoid the need to\\n instantiate the class.\\n\\n ', 'module_repr': <staticmethod object at 0x7f7005ada430>, 'find_spec': <classmethod object at 0x7f7005ada460>, 'find_module': <classmethod object at 0x7f7005ada490>, 'create_module': <classmethod object at 0x7f7005ada4c0>, 'exec_module': <classmethod object at 0x7f7005ada4f0>, 'get_code': <classmethod object at 0x7f7005ada580>, 'get_source': <classmethod object at 0x7f7005ada610>, 'is_package': <classmethod object at 0x7f7005ada6a0>, 'load_module': <classmethod object at 0x7f7005ada6d0>, '__dict__': <attribute '__dict__' of 'BuiltinImporter' objects>, '__weakref__': <attribute '__weakref__' of 'BuiltinImporter' objects>})"
# variables with complex values
__spec__ = None # (!) real value is "ModuleSpec(name='_ast', loader=<class '_frozen_importlib.BuiltinImporter'>, origin='built-in')"
|
[
"robertopescee@hotmail.com"
] |
robertopescee@hotmail.com
|
489ce3ed0b9a4321ab5b111d4250a46d2ec16416
|
39b11ece266ee1d094dda212988e1a45b884212e
|
/assignment2/count_docs_w_a_term.py
|
8218425391014acc98def7f7a86273734f957be2
|
[] |
no_license
|
cedoradog/datasci_course_materials
|
2910e436e833fd9edd9837ca091635585aec4c7a
|
2b64e796cfc6cc47c8dc6be0510da088c3d952e5
|
refs/heads/master
| 2020-12-25T10:08:38.417349
| 2014-07-30T22:05:11
| 2014-07-30T22:05:11
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 646
|
py
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
#Required packages
import sqlite3 as lite
import sys
#Open the database and create a cursor for it
connection = lite.connect('reuters.db')
cursor = connection.cursor()
#Perform the query: PROJECT_docid(SELECT_{term='parliament'}(frequency))
query = cursor.execute('''
SELECT docid
FROM frequency
WHERE term = 'parliament';''')
#Run your query against your local database and determine the number
#of records returned.
answer = len(query.fetchall())
#Print answer
print(answer)
#Not required: Commit the changes to the reuters database
#connection.commit()
#Close connection
connection.close()
|
[
"cedoradog@unal.edu.co"
] |
cedoradog@unal.edu.co
|
3769cc4e5d62e2acc88d472e5469b2c475fc4ccd
|
01ae34687bed6b71ddad6e74661d0633f96a7010
|
/testPaChong/identityImg.py
|
d6081826fc92c9a450a3f59f5b4c813621503b94
|
[] |
no_license
|
Yinpeng1/PythonTestAndPaChong
|
a36b5437b52a5793b853d7a83b9bd866dafd5bf0
|
53639c7d807b509c66dc53a965735bd7ea84dcaa
|
refs/heads/master
| 2021-06-02T00:40:44.458694
| 2020-01-07T07:09:15
| 2020-01-07T07:09:15
| 134,508,860
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,167
|
py
|
from PIL import Image
import pytesseract
import urllib.request
import requests
import re
from http import cookiejar
from contextlib import closing
import execjs
s = requests.Session()
# jar = requests.cookies.RequestsCookieJar()
cookie = cookiejar.CookieJar()
urlOpener = urllib.request.build_opener(urllib.request.HTTPCookieProcessor(cookie))
def getHtml():
# papg = urlOpener.open('http://ykjcx.yundasys.com/go.php?wen=3839998850701') # 打开图片的网址
papg = s.get("http://ykjcx.yundasys.com/go.php?wen=3839999344061")
# print("cookie==========="+s.cookies.values())
# html = papg.read() # 用read方法读成网页源代码,格式为字节对象
# html = html.decode('gbk') # 定义编码格式解码字符串(字节转换为字符串)
# return html
return papg.text
# 匹配
def getimg(html):
imgre = re.compile(r' *zb1qBpg2\.php') # 正则匹配,compile为把正则表达式编译成一个正则表达式对象,提供效率。
imglist = re.findall(imgre, repr(html)) # 获取字符串中所有匹配的字符串
for imgurl in imglist: # 循环图片字符串列表并输出
# print(imgurl)
imgUrl = imgurl.replace("src=\\'.", "")
newImgUrl = "http://ykjcx.yundasys.com/"+imgUrl
# 下载
# urllib.request.urlretrieve(url=newImgUrl, filename='C:/img/0.jpg') # 把图片下载到本地并指定保存目录
# response = urlOpener.open(newImgUrl).read()
response = s.get(newImgUrl)
for t1 in s.cookies.keys():
print("pre11111==========" + t1)
# 这里打开一个空的png文件,相当于创建一个空的txt文件,wb表示写文件
with open('C:/img/0.jpg', 'wb') as file:
file.write(response.content) # data相当于一块一块数据写入到我们的图片文件中
print("下载完成") # 格式化输出张数
# 匹配
# def getimg():
# urllib.request.urlretrieve(url="http://ykjcx.yundasys.com/zb1qBpg2.php", filename='C:/img/0.jpg') # 把图片下载到本地并指定保存目录
# print("正在下载第%s张" % 55555) # 格式化输出张数
prehtml=getHtml()
getimg(prehtml)
# 注意eng的版本 这边使用的是3.0版本,不然会报错,在exe文件中新建tessdate文件,把各种语言放进去
code = pytesseract.image_to_string(Image.open("C:/img/0.jpg"), lang="eng", config="-psm 7")
result = eval(code.replace(":", ""))
print(result)
data = {
"wen": "3839999344061",
"hh": "23",
"yzm": result
}
headers = {
"Accept": "text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8",
"Accept-Encoding": "zip, deflate",
"Accept-Language": "zh-CN,zh;q=0.9",
"Cache-Control": "max-age=0",
"Connection": "keep-alive",
# "Content-Length": "30",
# "Content-Type": "application/x-www-form-urlencoded",
# "Cookie": "PHPSESSID=h26utvhc4t6mvnhnsv4purvk71; JSESSIONID=1rC5bGTCDzMGSC3L8D9h6pwJHFvPQCh3J92Pnn9yLcVYMFyp2N0G!1051678070",
"Host": "ykjcx.yundasys.com",
"Origin": "http://ykjcx.yundasys.com",
"Referer": "http://ykjcx.yundasys.com/go.php",
# "Upgrade-Insecure-Requests": "1",
"User-Agent": "Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/66.0.3359.181 Safari/537.36"
}
value = urllib.parse.urlencode(data).encode('utf-8')
request23 = urllib.request.Request('http://ykjcx.yundasys.com/go_wsd.php')
def getInfoHtml():
# papg = urlOpener.open(request23, data=value) # 打开图片的网址
# s.cookies.clear_session_cookies().set("PHPSESSID", "h26utvhc4t6mvnhnsv4purvk71")
for t1 in s.cookies.keys():
print("pre=========="+t1)
# s.cookies.clear(domain="PHPSESSID")
# s.cookies.pop("PHPSESSID")
# s.cookies.set("PHPSESSID", "h26utvhc4t6mvnhnsv4purvk71")
papg = s.post('http://ykjcx.yundasys.com/go_wsd.php', data=data)
for t in s.cookies.items():
print(t)
# html = papg.read() # 用read方法读成网页源代码,格式为字节对象
# html = html.decode('utf-8') # 定义编码格式解码字符串(字节转换为字符串)
# return html
return papg.text
def getValue(html):
reg = re.compile(r'var g_s=.*;') # 正则匹配,compile为把正则表达式编译成一个正则表达式对象,提供效率。
allValue = re.findall(reg, repr(html)) # 获取字符串中所有匹配的字符串
# keyArr = allValue.split(";")
keyArr = allValue[0]
keyValue = keyArr.split(";")
secretValue= keyValue[0].replace("var g_s=", "")
# print(keyValue[0].replace("var g_s=", ""))
return secretValue
def get_js():
# f = open("D:/WorkSpace/MyWorkSpace/jsdemo/js/des_rsa.js",'r',encoding='UTF-8')
f = open("yunda.js", 'r', encoding='gb2312')
line = f.readline()
htmlstr = ''
while line:
htmlstr = htmlstr + line
line = f.readline()
return htmlstr
keyHtml = getInfoHtml()
print(keyHtml)
result = getValue(keyHtml)
print(result)
jsstr = get_js()
ctx = execjs.compile(jsstr)
t=ctx.call('allExec', str(result))
print(t)
s.cookies.clear()
# 还未完成要不eval后面的参数穿进去
|
[
"pyin@mo9.com"
] |
pyin@mo9.com
|
a32a8ba1fe6b56f57205930e21f775786d87fe8e
|
b566639f4141c6f9d1b658a6424c92f234ca4eda
|
/src/fusion/final_result_0530_facenet_bow_del.py
|
be046be03fc0511e79d17536d4c1676c17d86fc7
|
[] |
no_license
|
wangzwhu/INS2018
|
4ae649cf7334622dd863e24ae2ae5a9fcba555c7
|
859e81c86711fa05bc862f8fe9b04018320070bb
|
refs/heads/master
| 2020-03-31T22:01:24.715345
| 2018-10-11T14:15:06
| 2018-10-11T14:15:06
| 152,602,422
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,040
|
py
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from scipy import misc
import scipy.io as scio
import sys
sys.path.append("./facenet-master/src/")
import os
import argparse
import numpy as np
import random
from time import sleep
from sklearn.metrics.pairwise import euclidean_distances
from sklearn import preprocessing
def main():
person_result_file = '/net/dl380g7a/export/ddn11c1/wangz/ins2018/distance/face_distance_0522.npy'
scene_result_file = '/net/dl380g7a/export/ddn11c1/wangz/ins2018/distance/scene_similarity_0523.npy'
result_file_path = '/net/dl380g7a/export/ddn11a2/ledduy/kaori-visualsearch/kaori-ins16/result/tv2018/test2018'
shot_meta_file = '/net/dl380g7a/export/ddn11a2/ledduy/kaori-visualsearch/kaori-ins16/meta/Index.mat'
mat_data = scio.loadmat(shot_meta_file)
shot_index = mat_data['Index']
gallery_num = 471526
del_result = np.zeros(gallery_num, dtype=int)
del_result = del_result + 1
del_result[np.where(shot_index[:, 0] == 0)[0]] = 0
print('delete all shot0_ shots')
del_noface_file = '/net/dl380g7a/export/ddn11c1/wangz/ins2018/del/noface_2016.mat'
del_outdoor_file = '/net/dl380g7a/export/ddn11c1/wangz/ins2018/del/outdoor_2017.mat'
mat_data = scio.loadmat(del_noface_file)
del_noface_list = mat_data['noface']
mat_data = scio.loadmat(del_outdoor_file)
del_outdoor_list = mat_data['outdoor']
# cell_num = len(del_noface_list)
# for i in range(cell_num):
# shot_id = del_noface_list[i][0][0]
# index_a = shot_id.find('shot')
# index_b = shot_id.find('_')
# shot_id_num_1 = shot_id[index_a + 4:index_b]
# shot_id_num_2 = shot_id[index_b + 1:]
# shot_position = list(set(np.where(shot_index[:, 0] == int(shot_id_num_1))[0]).intersection(
# set(np.where(shot_index[:, 1] == int(shot_id_num_2))[0])))[0]
# del_result[shot_position] = 0
# print('delete all no face shots')
cell_num = len(del_outdoor_list)
for i in range(cell_num):
shot_id = del_outdoor_list[i][0][0]
index_a = shot_id.find('shot')
index_b = shot_id.find('_')
shot_id_num_1 = shot_id[index_a + 4:index_b]
shot_id_num_2 = shot_id[index_b + 1:]
shot_position = list(set(np.where(shot_index[:, 0] == int(shot_id_num_1))[0]).intersection(
set(np.where(shot_index[:, 1] == int(shot_id_num_2))[0])))[0]
del_result[shot_position] = 0
print('delete all outdoor shots')
chelsea = 0
darrin = 1
garry = 2
heather = 3
jack = 4
jane = 5
max = 6
minty = 7
mo = 8
zainab = 9
cafe1 = 0
cafe2 = 1
foyer = 2
kitchen1 = 3
kitchen2 = 4
laun = 5
LR1 = 6
LR2 = 7
market = 8
pub = 9
topics = np.zeros((30, 2), dtype=int)
topics[:,:] = [[jane, cafe2], [jane, pub], [jane, market],
[chelsea, cafe2], [chelsea, pub], [chelsea, market],
[minty, cafe2], [minty, pub], [minty, market],
[garry, cafe2], [garry, pub], [garry, laun],
[mo, cafe2], [mo, pub], [mo, laun],
[darrin, cafe2], [darrin, pub], [darrin, laun],
[zainab, cafe2], [zainab, laun], [zainab, market],
[heather, cafe2], [heather, laun], [heather, market],
[jack, pub], [jack, laun], [jack, market],
[max, cafe2], [max, laun], [max, market]]
topic_start_id = 9219
person_distance = np.load(person_result_file)
person_distance[person_distance == 0] = 2
person_similarity = 1 / person_distance
person_similarity = preprocessing.normalize(person_similarity, norm='l2')
scene_similarity = np.load(scene_result_file)
scene_similarity = preprocessing.normalize(scene_similarity, norm='l2')
run_id = '0530_facenet_bow_del'
query_id = 'shot1_1'
for i in range(topics.shape[0]):
topic_id = topic_start_id + i
print(topic_id, topics[i,0], topics[i,1])
# final_similarity = person_similarity[:, topics[i,0]] + scene_similarity[:, topics[i,1]]
final_similarity = np.multiply(person_similarity[:, topics[i,0]], scene_similarity[:, topics[i,1]])
final_similarity = np.multiply(final_similarity, del_result)
final_similarity_result = np.argsort(-final_similarity)
if not os.path.exists(os.path.join(result_file_path, run_id, str(topic_id))):
os.makedirs(os.path.join(result_file_path, run_id, str(topic_id)))
output = open(os.path.join(result_file_path, run_id, str(topic_id), 'TRECVID2013_11.res'), 'w')
for j in range(1000):
shot_id = 'shot' + str(shot_index[final_similarity_result[j], 0]) + '_' + str(shot_index[final_similarity_result[j], 1])
shot_score = str(final_similarity[final_similarity_result[j]])
output.write(shot_id + ' #$# ' + query_id + ' #$# ' + shot_score + '\n')
output.close()
if __name__ == '__main__':
main()
|
[
"wangzwhu@gmail.com"
] |
wangzwhu@gmail.com
|
995edcf81eb918f030ada2066c6d7a83ac83a4e2
|
c34a3ca63f6ce89029aac2cc292496a32427487e
|
/vote/migrations/0003_userballot.py
|
35e4540a35f52e89b1f5c65dd82e9c97ceaad739
|
[] |
no_license
|
Sogang-BallotChain/ballotchain_server_django
|
2f25cf2857f27c32655ebb532b152c5f69b6a56c
|
7dc013d1d804028f9e27ca501547247570ccb206
|
refs/heads/master
| 2020-08-16T00:20:11.509827
| 2019-12-10T12:40:26
| 2019-12-10T12:40:26
| 215,429,216
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 629
|
py
|
# Generated by Django 2.0.13 on 2019-11-06 09:25
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('user', '0001_initial'),
('vote', '0002_auto_20191106_1343'),
]
operations = [
migrations.CreateModel(
name='UserBallot',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='user.User')),
],
),
]
|
[
"omnipede@naver.com"
] |
omnipede@naver.com
|
aabcb5b9a5277878a6859e3d30169b3ac42d6f06
|
26664b82833e4c87df360528f5f91dd86626fd9b
|
/analysis/level_3_showdown.py
|
df833cd440caa680e0faa589a390b4b7e9a6d2c9
|
[] |
no_license
|
eoriont/space-empires
|
7e9a167418b0d05f8a97c4a2e7258a941d50e6e9
|
16461734e25e4dfc7386191c6540bb47e5ac352c
|
refs/heads/master
| 2023-03-24T15:47:21.312122
| 2021-03-23T02:38:23
| 2021-03-23T02:38:23
| 277,672,052
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,543
|
py
|
import sys
import random
sys.path.append('src')
sys.path.append('tests')
sys.path.append('src/strategies/level_3')
from game import Game
from player import Player
from colby_strategy import ColbySiegeStrategyLevel3 as ColbyStrategyLevel3
from david_strategy import DavidStrategyLevel3
from elijah_strategy import ElijahStrategyLevel3
from george_strategy import GeorgeStrategyLevel3
from numbers_berserker import NumbersBerserkerLevel3
from riley_strategy import RileyStrategyLevel3
print("Playing games...")
def matchup(type1, type2):
print(f"\n {type1.__name__} vs {type2.__name__}")
wins = [0, 0, 0]
games = 100
winlog = False
for i in range(games):
first_player = 0 if i < games//2 else 1
random.seed(i+1)
log = i in []
# log = True
game = Game((7, 7), logging=log, rendering=False, game_level=3, die_size=10)
p1 = Player(type1(first_player), "Player1", (3, 6*first_player), game)
p2 = Player(type2(1-first_player), "Player2", (3, 6 - 6*first_player), game)
if first_player == 0:
game.add_player(p1)
game.add_player(p2)
else:
game.add_player(p2)
game.add_player(p1)
game.start()
if game.run_until_completion(max_turns=100):
if winlog: print(type(game.winner.strat).__name__, i)
wins[[type1, type2].index(type(game.winner.strat))] += 1
else:
if winlog: print("tie", i)
wins[2] += 1
if log:
input()
wins = [w/games for w in wins]
return wins
# I had to change colby's strategy
# print(matchup(ColbyStrategyLevel3, GeorgeStrategyLevel3))
# print(matchup(ColbyStrategyLevel3, RileyStrategyLevel3))
# print(matchup(ColbyStrategyLevel3, ElijahStrategyLevel3))
# print(matchup(ColbyStrategyLevel3, DavidStrategyLevel3))
# print(matchup(GeorgeStrategyLevel3, RileyStrategyLevel3))
# print(matchup(GeorgeStrategyLevel3, ElijahStrategyLevel3))
# print(matchup(GeorgeStrategyLevel3, DavidStrategyLevel3))
# print(matchup(RileyStrategyLevel3, ElijahStrategyLevel3))
# print(matchup(RileyStrategyLevel3, DavidStrategyLevel3))
# print(matchup(DavidStrategyLevel3, ElijahStrategyLevel3))
# print(matchup(NumbersBerserkerLevel3, ColbyStrategyLevel3))
# print(matchup(NumbersBerserkerLevel3, GeorgeStrategyLevel3))
# print(matchup(NumbersBerserkerLevel3, RileyStrategyLevel3))
# print(matchup(NumbersBerserkerLevel3, ElijahStrategyLevel3))
# print(matchup(NumbersBerserkerLevel3, DavidStrategyLevel3))
|
[
"elijahotarr@gmail.com"
] |
elijahotarr@gmail.com
|
f9d0e8ac5c8ef4a8c88a80076161397ef7c478b0
|
8c764d1c82d1ce3a1614eadfc73496641af357d2
|
/ecs_service_scan.py
|
987e6e297b8ab0b03dd097d8382418333749e185
|
[] |
no_license
|
ChenChihChiang/aws-tools
|
6177ef2b1bdd21e0c457177e56bb4e99d0d93478
|
3c344793ad9ba57f5cf23ffd6b8f6f2bcda17f8d
|
refs/heads/main
| 2023-02-01T15:32:21.776468
| 2020-12-19T09:00:56
| 2020-12-19T09:00:56
| 322,805,742
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,124
|
py
|
import boto3
import json
import sys
class ecs_service_scan:
def __init__(self):
self.services_list = []
self.tasks_list = []
self.result_dict = {}
def status(self, cluster_name, aws_profile='saml'):
count = 0
ecs_session = boto3.Session(profile_name=aws_profile)
ecs = ecs_session.client('ecs')
# get first 100's service, maxResult maximum is 100
services_response = ecs.list_services(cluster=cluster_name, maxResults=100)
self.services_list.extend(services_response['serviceArns'])
# get all of service
while 'nextToken' in services_response:
services_response = ecs.list_services(cluster=cluster_name, maxResults=100, nextToken=services_response['nextToken'])
self.services_list.extend(services_response['serviceArns'])
# get each task settings of each serivce
for i in range(len(self.services_list)):
count = count + 1
svc_response = ecs.describe_services(
cluster=cluster_name,
services=[str(self.services_list[i])],
)
desiredCount = svc_response['services'][0]['deployments'][0]['desiredCount']
runningCount = svc_response['services'][0]['deployments'][0]['runningCount']
pendingCount = svc_response['services'][0]['deployments'][0]['pendingCount']
if desiredCount != runningCount:
self.tasks_list.append(desiredCount)
self.tasks_list.append(runningCount)
self.tasks_list.append(pendingCount)
self.result_dict[self.services_list[i]] = list(self.tasks_list)
self.tasks_list.clear()
# count how many service be scanned
self.result_dict['ecs_service_scan']=str(count)
return json.dumps(self.result_dict,sort_keys=True, indent=1)
if __name__ == '__main__':
result = ecs_service_scan()
if len(sys.argv) >= 3:
AWS_PROFILE = sys.argv[1]
CLUSTER_NAME = sys.argv[2]
elif len(sys.argv) >= 2:
AWS_PROFILE = sys.argv[1]
else:
AWS_PROFILE = 'default'
CLUSTER_NAME = 'ecs-cluster'
# scan desiredCount != runningCount
print (result.status(aws_profile=AWS_PROFILE, cluster_name=CLUSTER_NAME))
|
[
"chihchinag@gmail.com"
] |
chihchinag@gmail.com
|
e70b565f3cc34b027474339156c213e3bd286211
|
15ccb1606f17be596f810446a397e246ec76c744
|
/test_selenium_1220_01/__init__.py
|
9c1ed58197a5b951b6649e2e1c9a0b4bb50adc18
|
[] |
no_license
|
z944274972/hogwarts
|
ee31b018c1c534757134d2133156a56aa3ab8c61
|
68ba225c6340764c21640b041248d27247ff67ef
|
refs/heads/master
| 2023-03-06T05:13:30.830482
| 2021-02-19T06:05:19
| 2021-02-19T06:05:19
| 319,946,555
| 0
| 0
| null | 2020-12-14T01:27:09
| 2020-12-09T12:15:17
|
Python
|
UTF-8
|
Python
| false
| false
| 158
|
py
|
# -*- coding: utf-8 -*-
# @Time : 2020/12/20 14:20
# @Author : zhangyuxin
# @Email : zhangyuxin.aikebo@bytedance.com
# @File : __init__.py.py
|
[
"944274972@qq.com"
] |
944274972@qq.com
|
736e54d54db1964fc8ed8316108efbcffb86ad1e
|
3c96e1393d3418bcc472c04afcbae638bf246573
|
/bin/file_handling
|
b68c1f65fbe390a8df554b83d1885c40c8026449
|
[] |
no_license
|
AnandMurugan/SysAdminPython3
|
579de5858b97bcf2c62e3540dd215a859e25e66d
|
ded84c18cd62190648f78685fbe46041bf168e70
|
refs/heads/master
| 2020-03-22T12:23:59.003370
| 2018-08-01T15:00:41
| 2018-08-01T15:00:41
| 140,037,735
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 681
|
#!/usr/bin/env python3.7
import sys
def get_filename(reprompt=False):
filename = input("Please enter filename to read from (Filename can not be empty):")
return filename or get_filename(True)
filename = get_filename()
try:
f = open(filename,'rt')
except FileNotFoundError as err:
print(f"Error: {err}")
sys.exit(2)
else:
with f:
linenum = input("Please enter a line number to read: ").strip()
linenum = int(linenum)
lines = f.readlines()
if linenum >= len(lines):
print("Error: Line number doesn't exist. File is too short")
sys.exit(1)
else:
print(lines[linenum - 1], end="")
|
[
"m.anandsp@gmail.com"
] |
m.anandsp@gmail.com
|
|
c17e5b94641648eecc6bd1b3922b66018cc92697
|
9763c9a192896f7470481de72f6809fffb059cda
|
/ShopingSite/testApi/urls.py
|
3b3fbd067adca5b687966906743e117df6b42df5
|
[] |
no_license
|
prd-huy-nguyen/huyn
|
f1ed0ba4f36d7b4587bd06508fc749d387bb5734
|
abed2249a21ee69da746f321dccec9dbf4379b70
|
refs/heads/main
| 2023-07-19T22:26:55.207843
| 2021-09-07T04:22:50
| 2021-09-07T04:22:50
| 398,010,196
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 133
|
py
|
from django.urls import path
from .views import GetAllCouresAPIView
urlpatterns = [
path('', GetAllCouresAPIView.as_view()),
]
|
[
"huy.nguyen@paradox.ai"
] |
huy.nguyen@paradox.ai
|
dccbc0f60c52ad1ea3e6a5c5b40b8aedb8eb3048
|
eed8b5d07503df029f134facecdf1c08b70ea8fc
|
/salt/_grains/ec2_info.py
|
83c39786cfaa6123ce8aba07710c819a77352301
|
[] |
no_license
|
mooperd/saltstack-tw
|
ff38dcd958882dec0a9a09d79f5afc085e040311
|
dea6d05dabe810598421d649f6a132761ca238e1
|
refs/heads/master
| 2021-04-30T16:50:54.316574
| 2017-01-29T23:55:05
| 2017-01-29T23:55:05
| 80,141,902
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,055
|
py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Get some grains information that is only available in Amazon AWS
Author: Erik Günther, J C Lawrence <claw@kanga.nu>, Mark McGuire
"""
import logging
import httplib
import socket
import json
# Set up logging
LOG = logging.getLogger(__name__)
def _call_aws(url):
"""
Call AWS via httplib. Require correct path.
Host: 169.254.169.254
"""
conn = httplib.HTTPConnection("169.254.169.254", 80, timeout=1)
conn.request('GET', url)
return conn.getresponse()
def _get_ec2_hostinfo(path=""):
"""
Recursive function that walks the EC2 metadata available to each minion.
:param path: URI fragment to append to /latest/meta-data/
Returns a nested dictionary containing all the EC2 metadata. All keys
are converted from dash case to snake case.
"""
resp = _call_aws("/latest/meta-data/%s" % path)
resp_data = resp.read().strip()
d = {}
for line in resp_data.split("\n"):
if line[-1] != "/":
call_response = _call_aws("/latest/meta-data/%s" % (path + line))
call_response_data = call_response.read()
# avoid setting empty grain
if call_response_data == '':
d[line] = None
elif call_response_data is not None:
line = _dash_to_snake_case(line)
try:
data = json.loads(call_response_data)
if isinstance(data, dict):
data = _snake_caseify_dict(data)
d[line] = data
except ValueError:
d[line] = call_response_data
else:
return line
else:
d[_dash_to_snake_case(line[:-1])] = _get_ec2_hostinfo(path + line)
return d
def _camel_to_snake_case(s):
return s[0].lower() + "".join((("_" + x.lower()) if x.isupper() else x) for x in s[1:])
def _dash_to_snake_case(s):
return s.replace("-", "_")
def _snake_caseify_dict(d):
nd = {}
for k, v in d.items():
nd[_camel_to_snake_case(k)] = v
return nd
def _get_ec2_additional():
"""
Recursive call in _get_ec2_hostinfo() does not retrieve some of
the hosts information like region, availability zone or
architecture.
"""
response = _call_aws("/latest/dynamic/instance-identity/document")
# _call_aws returns None for all non '200' reponses,
# catching that here would rule out AWS resource
if response.status == 200:
response_data = response.read()
data = json.loads(response_data)
return _snake_caseify_dict(data)
else:
raise httplib.BadStatusLine("Could not read EC2 metadata")
def _get_ec2_user_data():
"""
Recursive call in _get_ec2_hostinfo() does not retrieve user-data.
"""
response = _call_aws("/latest/user-data")
# _call_aws returns None for all non '200' reponses,
# catching that here would rule out AWS resource
if response.status == 200:
response_data = response.read()
try:
return json.loads(response_data)
except ValueError as e:
return response_data
elif response.status == 404:
return ''
else:
raise httplib.BadStatusLine("Could not read EC2 user-data")
def ec2_info():
"""
Collect all ec2 grains into the 'ec2' key.
"""
try:
grains = _get_ec2_additional()
grains.update({'user-data': _get_ec2_user_data()})
grains.update(_get_ec2_hostinfo())
return {'ec2' : grains}
except httplib.BadStatusLine, error:
LOG.debug(error)
return {}
except socket.timeout, serr:
LOG.info("Could not read EC2 data (timeout): %s" % (serr))
return {}
except socket.error, serr:
LOG.info("Could not read EC2 data (error): %s" % (serr))
return {}
except IOError, serr:
LOG.info("Could not read EC2 data (IOError): %s" % (serr))
return {}
if __name__ == "__main__":
print ec2_info()
|
[
"a.holway@dcmn.com"
] |
a.holway@dcmn.com
|
f6a18af47b3f3d7f75c367b5c09dedb414083527
|
0b94dd0019538d329b875e9f59fe93fc5af4afa2
|
/authentication/models.py
|
6e306ce7607f29d7fe6d32e277d94498f06ad11e
|
[] |
no_license
|
iwansyahp/thinkster-django-angular-boilerplate
|
9b37511de2b5a9d125a7358d828a6e4edfb8462c
|
37d2bcc28208f7d371479bd01fb14ebdb000f941
|
refs/heads/release
| 2021-01-11T20:21:02.619372
| 2017-01-20T16:48:24
| 2017-01-20T16:48:24
| 79,097,088
| 0
| 0
| null | 2017-01-16T08:16:14
| 2017-01-16T08:16:14
| null |
UTF-8
|
Python
| false
| false
| 2,014
|
py
|
'''Model yang akan digunakan pada app ini'''
from django.contrib.auth.models import BaseUserManager #for Managers
#for Models
from django.contrib.auth.models import AbstractBaseUser
from django.db import models
class AccountManager(BaseUserManager):
''' Manager untuk model Account '''
def create_user(self, email, password=None, **kwargs):
''' Membuat user baru '''
if not email:
raise ValueError('Users must have a valid email address')
if not kwargs.get('username'):
raise ValueError('Users must have a valid username')
account = self.model(
email=self.normalize_email(email),
username=kwargs.get('username')
)
account.set_password(password)
account.save()
return account
def create_superuser(self, email, password, **kwargs):
''' Method ini akan dipanggil ketika superuser/admin dibuat'''
# memanggil fungsi create_user diatas!
account = self.create_user(email, password, **kwargs)
# simpan account ini sebagai admin
account.is_admin = True
account.save()
return account
# Create your models here.
class Account(AbstractBaseUser):
'''Model untuk akun yang dibuat'''
email = models.EmailField(unique=True)
username = models.CharField(max_length=40, unique=True)
first_name = models.CharField(max_length=40, blank=True)
last_name = models.CharField(max_length=40, blank=True)
tagline = models.CharField(max_length=140, blank=True)
is_admin = models.BooleanField(default=False)
created_at = models.DateTimeField(auto_now_add=True)
updated_at = models.DateTimeField(auto_now=True)
objects = AccountManager()
USERNAME_FIELD = 'email'
REQUIRED_FIELDS = ['username']
def __unicode__(self):
return self.email
def get_full_name(self):
return ' '.join([self.first_name, self.last_name])
def get_short_name(self):
return self.first_name
|
[
"iwansyahp@gmail.com"
] |
iwansyahp@gmail.com
|
07c097e2daab7284db1e9f265146389973b99703
|
32c56293475f49c6dd1b0f1334756b5ad8763da9
|
/google-cloud-sdk/lib/googlecloudsdk/api_lib/compute/forwarding_rules_utils.py
|
7d2f2aab880588cfe049e2f7b6af05b97ce04ff3
|
[
"LicenseRef-scancode-unknown-license-reference",
"Apache-2.0",
"MIT"
] |
permissive
|
bopopescu/socialliteapp
|
b9041f17f8724ee86f2ecc6e2e45b8ff6a44b494
|
85bb264e273568b5a0408f733b403c56373e2508
|
refs/heads/master
| 2022-11-20T03:01:47.654498
| 2020-02-01T20:29:43
| 2020-02-01T20:29:43
| 282,403,750
| 0
| 0
|
MIT
| 2020-07-25T08:31:59
| 2020-07-25T08:31:59
| null |
UTF-8
|
Python
| false
| false
| 9,578
|
py
|
# -*- coding: utf-8 -*- #
# Copyright 2014 Google LLC. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Common classes and functions for forwarding rules."""
from __future__ import absolute_import
from __future__ import division
from __future__ import unicode_literals
from googlecloudsdk.api_lib.compute import lister
from googlecloudsdk.api_lib.compute import utils
from googlecloudsdk.calliope import exceptions as calliope_exceptions
from googlecloudsdk.command_lib.compute import flags as compute_flags
from googlecloudsdk.command_lib.compute import scope as compute_scope
from googlecloudsdk.command_lib.compute.forwarding_rules import flags
from googlecloudsdk.core import properties
def _ValidateGlobalArgs(args):
"""Validate the global forwarding rules args."""
if args.target_instance:
raise calliope_exceptions.ToolException(
'You cannot specify [--target-instance] for a global '
'forwarding rule.')
if args.target_pool:
raise calliope_exceptions.ToolException(
'You cannot specify [--target-pool] for a global '
'forwarding rule.')
if getattr(args, 'backend_service', None):
raise calliope_exceptions.ToolException(
'You cannot specify [--backend-service] for a global '
'forwarding rule.')
if getattr(args, 'load_balancing_scheme', None) == 'INTERNAL':
raise calliope_exceptions.ToolException(
'You cannot specify internal [--load-balancing-scheme] for a global '
'forwarding rule.')
if getattr(args, 'target_vpn_gateway', None):
raise calliope_exceptions.ToolException(
'You cannot specify [--target-vpn-gateway] for a global '
'forwarding rule.')
if getattr(args, 'load_balancing_scheme', None) == 'INTERNAL_SELF_MANAGED':
if not (getattr(args, 'target_http_proxy', None) or
getattr(args, 'target_https_proxy', None)):
raise calliope_exceptions.ToolException(
'You must specify either [--target-http-proxy] or '
'[--target-https-proxy] for an INTERNAL_SELF_MANAGED '
'[--load-balancing-scheme].')
if getattr(args, 'subnet', None):
raise calliope_exceptions.ToolException(
'You cannot specify [--subnet] for an INTERNAL_SELF_MANAGED '
'[--load-balancing-scheme].')
if not getattr(args, 'address', None):
raise calliope_exceptions.ToolException(
'You must specify [--address] for an INTERNAL_SELF_MANAGED '
'[--load-balancing-scheme]')
def GetGlobalTarget(resources, args):
"""Return the forwarding target for a globally scoped request."""
_ValidateGlobalArgs(args)
if args.target_http_proxy:
return flags.TargetHttpProxyArg().ResolveAsResource(
args, resources, default_scope=compute_scope.ScopeEnum.GLOBAL)
if args.target_https_proxy:
return flags.TargetHttpsProxyArg().ResolveAsResource(
args, resources, default_scope=compute_scope.ScopeEnum.GLOBAL)
if args.target_ssl_proxy:
return flags.TARGET_SSL_PROXY_ARG.ResolveAsResource(args, resources)
if getattr(args, 'target_tcp_proxy', None):
return flags.TARGET_TCP_PROXY_ARG.ResolveAsResource(args, resources)
def _ValidateRegionalArgs(args):
"""Validate the regional forwarding rules args.
Args:
args: The arguments given to the create/set-target command.
"""
if getattr(args, 'global', None):
raise calliope_exceptions.ToolException(
'You cannot specify [--global] for a regional '
'forwarding rule.')
# For flexible networking, with STANDARD network tier the regional forwarding
# rule can have global target. The request may not specify network tier
# because it can be set as default project setting, so here let backend do
# validation.
if args.target_instance_zone and not args.target_instance:
raise calliope_exceptions.ToolException(
'You cannot specify [--target-instance-zone] unless you are '
'specifying [--target-instance].')
if getattr(args, 'load_balancing_scheme', None) == 'INTERNAL':
if getattr(args, 'port_range', None):
raise calliope_exceptions.ToolException(
'You cannot specify [--port-range] for a forwarding rule '
'whose [--load-balancing-scheme] is internal, '
'please use [--ports] flag instead.')
schemes_allowing_network_fields = ['INTERNAL', 'INTERNAL_MANAGED']
if (getattr(args, 'subnet', None) or
getattr(args, 'network', None)) and getattr(
args, 'load_balancing_scheme',
None) not in schemes_allowing_network_fields:
raise calliope_exceptions.ToolException(
'You cannot specify [--subnet] or [--network] for non-internal '
'[--load-balancing-scheme] forwarding rule.')
if getattr(args, 'load_balancing_scheme', None) == 'INTERNAL_SELF_MANAGED':
raise calliope_exceptions.ToolException(
'You cannot specify an INTERNAL_SELF_MANAGED [--load-balancing-scheme] '
'for a regional forwarding rule.')
def GetRegionalTarget(client,
resources,
args,
forwarding_rule_ref=None,
include_l7_internal_load_balancing=False):
"""Return the forwarding target for a regionally scoped request."""
_ValidateRegionalArgs(args)
if forwarding_rule_ref:
region_arg = forwarding_rule_ref.region
project_arg = forwarding_rule_ref.project
else:
region_arg = args.region
project_arg = None
if args.target_pool:
if not args.target_pool_region and region_arg:
args.target_pool_region = region_arg
target_ref = flags.TARGET_POOL_ARG.ResolveAsResource(
args,
resources,
scope_lister=compute_flags.GetDefaultScopeLister(client))
target_region = target_ref.region
elif args.target_instance:
target_ref = flags.TARGET_INSTANCE_ARG.ResolveAsResource(
args,
resources,
scope_lister=_GetZonesInRegionLister(
['--target-instance-zone'], region_arg, client, project_arg or
properties.VALUES.core.project.GetOrFail()))
target_region = utils.ZoneNameToRegionName(target_ref.zone)
elif getattr(args, 'target_vpn_gateway', None):
if not args.target_vpn_gateway_region and region_arg:
args.target_vpn_gateway_region = region_arg
target_ref = flags.TARGET_VPN_GATEWAY_ARG.ResolveAsResource(
args, resources)
target_region = target_ref.region
elif getattr(args, 'backend_service', None):
if not args.backend_service_region and region_arg:
args.backend_service_region = region_arg
target_ref = flags.BACKEND_SERVICE_ARG.ResolveAsResource(args, resources)
target_region = target_ref.region
elif args.target_http_proxy:
target_ref = flags.TargetHttpProxyArg(
include_l7_internal_load_balancing=include_l7_internal_load_balancing
).ResolveAsResource(
args, resources, default_scope=compute_scope.ScopeEnum.GLOBAL)
target_region = region_arg
elif args.target_https_proxy:
target_ref = flags.TargetHttpsProxyArg(
include_l7_internal_load_balancing=include_l7_internal_load_balancing
).ResolveAsResource(
args, resources, default_scope=compute_scope.ScopeEnum.GLOBAL)
target_region = region_arg
elif args.target_ssl_proxy:
target_ref = flags.TARGET_SSL_PROXY_ARG.ResolveAsResource(args, resources)
target_region = region_arg
elif args.target_tcp_proxy:
target_ref = flags.TARGET_TCP_PROXY_ARG.ResolveAsResource(args, resources)
target_region = region_arg
return target_ref, target_region
def _GetZonesInRegionLister(flag_names, region, compute_client, project):
"""Lists all the zones in a given region."""
def Lister(*unused_args):
"""Returns a list of the zones for a given region."""
if region:
filter_expr = 'name eq {0}.*'.format(region)
else:
filter_expr = None
errors = []
global_resources = lister.GetGlobalResources(
service=compute_client.apitools_client.zones,
project=project,
filter_expr=filter_expr,
http=compute_client.apitools_client.http,
batch_url=compute_client.batch_url,
errors=errors)
choices = [resource for resource in global_resources]
if errors or not choices:
punctuation = ':' if errors else '.'
utils.RaiseToolException(
errors,
'Unable to fetch a list of zones. Specifying [{0}] may fix this '
'issue{1}'.format(', or '.join(flag_names), punctuation))
return {compute_scope.ScopeEnum.ZONE: choices}
return Lister
def SendGetRequest(client, forwarding_rule_ref):
"""Send forwarding rule get request."""
if forwarding_rule_ref.Collection() == 'compute.globalForwardingRules':
return client.apitools_client.globalForwardingRules.Get(
client.messages.ComputeGlobalForwardingRulesGetRequest(
**forwarding_rule_ref.AsDict()))
else:
return client.apitools_client.forwardingRules.Get(
client.messages.ComputeForwardingRulesGetRequest(
**forwarding_rule_ref.AsDict()))
|
[
"jonathang132298@gmail.com"
] |
jonathang132298@gmail.com
|
8b793e47681d3f7f3af71a3491179e1bcd1a747b
|
0d24433894b0b2955a351fdf63a10173b948b3fc
|
/teafacto/scripts/simplequestions/fullrank/alleval.py
|
7632df8a65a41b30c1bee533fab43d9cd7a7da24
|
[] |
no_license
|
linxiexiong/teafacto
|
9209bea80bd76d84c18b7f8afb353b61f0fba8b2
|
1c749ee66dc21c2efe6b4d105f227c35ae969815
|
refs/heads/master
| 2021-06-16T15:16:40.064465
| 2017-05-05T18:25:42
| 2017-05-05T18:25:42
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,134
|
py
|
import sys, os, re
from textwrap import dedent
from teafacto.util import argprun
from collections import OrderedDict
def main(scriptname="testrunscript.py",
modelfilepattern="testmodelfile{}.txt",
modelfile="none",
numtestcans="5,10,400",
multiprune="0,1",
mode="concat,seq,multi,multic"):
if not os.path.exists("alleval"):
os.makedirs("alleval")
loc = locals()
griddict = OrderedDict({x: loc[x].split(",") for x in "numtestcans multiprune mode".split()})
#print griddict
if modelfile == "none":
for filename in os.listdir("."):
m = re.match("^{}$".format(modelfilepattern.format("(\d{0,4}\.?(\d{0,3}ep)?)")), filename)
if m:
modelname = m.group(1)
print filename, modelname
else:
print modelfile
if modelfile == "none":
for filename in os.listdir("."):
m = re.match("^{}$".format(modelfilepattern.format("(\d{0,4}\.?(\d{0,3}ep)?)")), filename)
if m:
modelname = m.group(1)
runstuff(modelname, griddict, scriptname)
else:
modelname = modelfile
runstuff(modelname, griddict, scriptname)
def runstuff(modelname, griddict, scriptname):
for i in range(reduce(lambda x, y: x * y, map(len, griddict.values()))):
indexes = OrderedDict()
for k, v in griddict.items():
indexes[k] = i % len(v)
i //= len(v)
#print indexes
options = "".join(["-{} {} ".format(x, griddict[x][indexes[x]]) for x in griddict.keys()])
cmd = """python {}
-loadmodel {}
{}"""\
.format(scriptname,
modelname,
options
)
cmd = re.sub("\n", "", cmd)
cmd = re.sub("\s{2,}", " ", cmd)
print cmd
targetname = "alleval/{}.out".format(re.sub("\s", "_", cmd))
os.system("echo {} > {}".format(cmd, targetname))
os.system("{} >> {} 2>&1".format(cmd, targetname))
if __name__ == "__main__":
argprun(main)
|
[
"lukovnik@drogon.iai.uni-bonn.de"
] |
lukovnik@drogon.iai.uni-bonn.de
|
50ea0e511945325609d4de6fbc078bc8ce361055
|
5039bd51408727ca6595faa513bd7a7a72188146
|
/instagram/migrations/0002_auto_20180725_1623.py
|
64388b488c3880b6f53aa45ecb39943246354e3d
|
[
"MIT"
] |
permissive
|
Imma7/Instagram
|
b23508d5e3ff56c645361d77dcb23fa79631e143
|
32627573cbe4147433c9e02755b676b0b8dd7c20
|
refs/heads/master
| 2023-06-11T15:31:36.567535
| 2023-06-02T11:34:02
| 2023-06-02T11:34:02
| 142,843,047
| 5
| 11
| null | 2022-11-22T02:48:36
| 2018-07-30T07:49:52
|
Python
|
UTF-8
|
Python
| false
| false
| 1,881
|
py
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11 on 2018-07-25 13:23
from __future__ import unicode_literals
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('instagram', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='Image',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('image', models.ImageField(upload_to='gallery/')),
('image_name', models.CharField(max_length=30)),
('image_caption', models.CharField(blank=True, max_length=30, null=True)),
('comments', models.TextField(blank=True, max_length=50, null=True)),
('likes', models.IntegerField()),
('pub_date', models.DateTimeField(auto_now_add=True)),
],
),
migrations.CreateModel(
name='Profile',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('profile_photo', models.ImageField(upload_to='profile/')),
('bio', models.TextField(blank=True, max_length=50, null=True)),
('username', models.CharField(max_length=30)),
('user', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
migrations.AddField(
model_name='image',
name='profile',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='instagram.Profile'),
),
]
|
[
"immamugambi@gmail.com"
] |
immamugambi@gmail.com
|
c5615aa1c0d85191890ed0a7425bda23af1eb627
|
d4cbf415ff956127085ad5cbf0046fd0443ceb01
|
/Assignment3_201501090/test_Block.py
|
5f45364277f6e28852f9452ef9e5f7e32fa2a79b
|
[] |
no_license
|
nikhilrayaprolu/pytest_practice
|
827c46b63222e7d198a46ba9bd3195c1d5e8814f
|
42dddae1500d2bebd70be739bb3efefadd49a1f2
|
refs/heads/master
| 2021-01-01T15:49:08.591391
| 2017-07-19T11:57:29
| 2017-07-19T11:57:29
| 97,711,040
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,901
|
py
|
from Block import Block
blocks=Block()
class TestBlocks():
def test_initial_S_SHAPE_TEMPLATE(self):
assert blocks.S_SHAPE_TEMPLATE == [['.....',
'.....',
'..OO.',
'.OO..',
'.....'],
['.....',
'..O..',
'..OO.',
'...O.',
'.....']]
def test_initial_Z_SHAPE_TEMPLATE(self):
assert blocks.Z_SHAPE_TEMPLATE == [['.....',
'.....',
'.OO..',
'..OO.',
'.....'],
['.....',
'..O..',
'.OO..',
'.O...',
'.....']]
def test_initial_Z_SHAPE_TEMPLATE(self):
assert blocks.I_SHAPE_TEMPLATE == [['..O..',
'..O..',
'..O..',
'..O..',
'.....'],
['.....',
'.....',
'OOOO.',
'.....',
'.....']]
def test_initial_Z_SHAPE_TEMPLATE(self):
assert blocks.O_SHAPE_TEMPLATE == [['.....',
'.....',
'.OO..',
'.OO..',
'.....']]
def test_initial_Z_SHAPE_TEMPLATE(self):
assert blocks.J_SHAPE_TEMPLATE == [['.....',
'.O...',
'.OOO.',
'.....',
'.....'],
['.....',
'..OO.',
'..O..',
'..O..',
'.....'],
['.....',
'.....',
'.OOO.',
'...O.',
'.....'],
['.....',
'..O..',
'..O..',
'.OO..',
'.....']]
def test_initial_Z_SHAPE_TEMPLATE(self):
assert blocks.L_SHAPE_TEMPLATE == [['.....',
'...O.',
'.OOO.',
'.....',
'.....'],
['.....',
'..O..',
'..O..',
'..OO.',
'.....'],
['.....',
'.....',
'.OOO.',
'.O...',
'.....'],
['.....',
'.OO..',
'..O..',
'..O..',
'.....']]
def test_initial_Z_SHAPE_TEMPLATE(self):
assert blocks.T_SHAPE_TEMPLATE == [['.....',
'..O..',
'.OOO.',
'.....',
'.....'],
['.....',
'..O..',
'..OO.',
'..O..',
'.....'],
['.....',
'.....',
'.OOO.',
'..O..',
'.....'],
['.....',
'..O..',
'.OO..',
'..O..',
'.....']]
def test_PIECES(self):
blocks.PIECES = {'S': blocks.S_SHAPE_TEMPLATE,
'Z': blocks.Z_SHAPE_TEMPLATE,
'J': blocks.J_SHAPE_TEMPLATE,
'L': blocks.L_SHAPE_TEMPLATE,
'I': blocks.I_SHAPE_TEMPLATE,
'O': blocks.O_SHAPE_TEMPLATE,
'T': blocks.T_SHAPE_TEMPLATE}
def test_getnewpieceshape(self):
newpiece=blocks.getNewPiece()
print newpiece
assert newpiece['shape'] in blocks.PIECES
def test_getnewrotation(self):
newpiece=blocks.getNewPiece()
print newpiece
assert newpiece['rotation'] in [0,1,2,3]
def test_getnewy(self):
newpiece=blocks.getNewPiece()
print newpiece
assert newpiece['y']==-2
def test_moveLeft(self):
initialx=blocks.fallingPiece['x']
blocks.moveLeft()
finalx=blocks.fallingPiece['x']
assert finalx==initialx-1
def test_moveRight(self):
initialx=blocks.fallingPiece['x']
blocks.moveRight()
finalx=blocks.fallingPiece['x']
assert finalx==initialx+1
def test_Rotate(self):
initialrotation=blocks.fallingPiece['rotation']
blocks.Rotate(2);
finalrotation=blocks.fallingPiece['rotation']
assert finalrotation==(initialrotation+2)%2
def test_Rotateafter20times(self):
initialrotation=blocks.fallingPiece['rotation']
t=20
while t:
blocks.Rotate(1)
t=t-1
finalrotation=blocks.fallingPiece['rotation']
assert finalrotation==(initialrotation+2)%2
|
[
"nikhil684@gmail.com"
] |
nikhil684@gmail.com
|
4417d0458bd16603cd2ee90957dd17795072e5fd
|
82b946da326148a3c1c1f687f96c0da165bb2c15
|
/sdk/python/pulumi_azure_native/dbformariadb/get_virtual_network_rule.py
|
7463391edb58270bf8c428feefc9f27884d993d0
|
[
"Apache-2.0",
"BSD-3-Clause"
] |
permissive
|
morrell/pulumi-azure-native
|
3916e978382366607f3df0a669f24cb16293ff5e
|
cd3ba4b9cb08c5e1df7674c1c71695b80e443f08
|
refs/heads/master
| 2023-06-20T19:37:05.414924
| 2021-07-19T20:57:53
| 2021-07-19T20:57:53
| 387,815,163
| 0
| 0
|
Apache-2.0
| 2021-07-20T14:18:29
| 2021-07-20T14:18:28
| null |
UTF-8
|
Python
| false
| false
| 5,281
|
py
|
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from .. import _utilities
__all__ = [
'GetVirtualNetworkRuleResult',
'AwaitableGetVirtualNetworkRuleResult',
'get_virtual_network_rule',
]
@pulumi.output_type
class GetVirtualNetworkRuleResult:
"""
A virtual network rule.
"""
def __init__(__self__, id=None, ignore_missing_vnet_service_endpoint=None, name=None, state=None, type=None, virtual_network_subnet_id=None):
if id and not isinstance(id, str):
raise TypeError("Expected argument 'id' to be a str")
pulumi.set(__self__, "id", id)
if ignore_missing_vnet_service_endpoint and not isinstance(ignore_missing_vnet_service_endpoint, bool):
raise TypeError("Expected argument 'ignore_missing_vnet_service_endpoint' to be a bool")
pulumi.set(__self__, "ignore_missing_vnet_service_endpoint", ignore_missing_vnet_service_endpoint)
if name and not isinstance(name, str):
raise TypeError("Expected argument 'name' to be a str")
pulumi.set(__self__, "name", name)
if state and not isinstance(state, str):
raise TypeError("Expected argument 'state' to be a str")
pulumi.set(__self__, "state", state)
if type and not isinstance(type, str):
raise TypeError("Expected argument 'type' to be a str")
pulumi.set(__self__, "type", type)
if virtual_network_subnet_id and not isinstance(virtual_network_subnet_id, str):
raise TypeError("Expected argument 'virtual_network_subnet_id' to be a str")
pulumi.set(__self__, "virtual_network_subnet_id", virtual_network_subnet_id)
@property
@pulumi.getter
def id(self) -> str:
"""
Fully qualified resource ID for the resource. Ex - /subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/{resourceProviderNamespace}/{resourceType}/{resourceName}
"""
return pulumi.get(self, "id")
@property
@pulumi.getter(name="ignoreMissingVnetServiceEndpoint")
def ignore_missing_vnet_service_endpoint(self) -> Optional[bool]:
"""
Create firewall rule before the virtual network has vnet service endpoint enabled.
"""
return pulumi.get(self, "ignore_missing_vnet_service_endpoint")
@property
@pulumi.getter
def name(self) -> str:
"""
The name of the resource
"""
return pulumi.get(self, "name")
@property
@pulumi.getter
def state(self) -> str:
"""
Virtual Network Rule State
"""
return pulumi.get(self, "state")
@property
@pulumi.getter
def type(self) -> str:
"""
The type of the resource. E.g. "Microsoft.Compute/virtualMachines" or "Microsoft.Storage/storageAccounts"
"""
return pulumi.get(self, "type")
@property
@pulumi.getter(name="virtualNetworkSubnetId")
def virtual_network_subnet_id(self) -> str:
"""
The ARM resource id of the virtual network subnet.
"""
return pulumi.get(self, "virtual_network_subnet_id")
class AwaitableGetVirtualNetworkRuleResult(GetVirtualNetworkRuleResult):
# pylint: disable=using-constant-test
def __await__(self):
if False:
yield self
return GetVirtualNetworkRuleResult(
id=self.id,
ignore_missing_vnet_service_endpoint=self.ignore_missing_vnet_service_endpoint,
name=self.name,
state=self.state,
type=self.type,
virtual_network_subnet_id=self.virtual_network_subnet_id)
def get_virtual_network_rule(resource_group_name: Optional[str] = None,
server_name: Optional[str] = None,
virtual_network_rule_name: Optional[str] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetVirtualNetworkRuleResult:
"""
A virtual network rule.
API Version: 2018-06-01.
:param str resource_group_name: The name of the resource group. The name is case insensitive.
:param str server_name: The name of the server.
:param str virtual_network_rule_name: The name of the virtual network rule.
"""
__args__ = dict()
__args__['resourceGroupName'] = resource_group_name
__args__['serverName'] = server_name
__args__['virtualNetworkRuleName'] = virtual_network_rule_name
if opts is None:
opts = pulumi.InvokeOptions()
if opts.version is None:
opts.version = _utilities.get_version()
__ret__ = pulumi.runtime.invoke('azure-native:dbformariadb:getVirtualNetworkRule', __args__, opts=opts, typ=GetVirtualNetworkRuleResult).value
return AwaitableGetVirtualNetworkRuleResult(
id=__ret__.id,
ignore_missing_vnet_service_endpoint=__ret__.ignore_missing_vnet_service_endpoint,
name=__ret__.name,
state=__ret__.state,
type=__ret__.type,
virtual_network_subnet_id=__ret__.virtual_network_subnet_id)
|
[
"noreply@github.com"
] |
morrell.noreply@github.com
|
a32ac7028c93de8ff3f30e39d34e0e057f062658
|
47c5ba839f6b88b0c7b03ac33ff5f22f6d510649
|
/src/curriculum_learning.py
|
005713f2404431f49ea86bb812a9ab4c69a8189b
|
[] |
no_license
|
danielvachalek/ElephantCallAI
|
4a6f0841842946637692dd13312e4343fcd5b722
|
4ea208411129706fdab9292054cd5591a3204d28
|
refs/heads/master
| 2023-01-09T03:24:32.001038
| 2020-09-11T17:43:41
| 2020-09-11T17:43:41
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 17,082
|
py
|
from tensorboardX import SummaryWriter
import numpy as np
import matplotlib.pyplot as plt
import torch
import torch.nn as nn
from torch import optim
import sys
import time
import os
import argparse
import parameters
from data import get_loader, get_loader_fuzzy
from utils import create_save_path, create_dataset_path
from models import * # Note for some reason we need to import the models as well
from loss import get_loss
from train import train_curriculum
### THINGS THAT I WANT TO DO
"""
Where do we want to start writing this code:
- We can still leverage the train_epoch and val_epoch functions in
train.py. Note later if we really want to do the batches or iteration
level than we can re-write this. Overall, the train.py file should
really just be responsible for either training one epoch, training
several epochs, or training several iterations. A separate file
should be responsible for re-sampling the data and then calling
the necessary train functions.
- In this class we should define the outward framework for doing the curriculum
learning, including the curriculum scheduling and curriculum defining
- Methods to write here:
...
Look at some profiling:
- After five epochs keep a histogram of how many get x segments incorrect
- keep track of the variance of number of wrong for each example! Namely,
for each example, see how many wrong were for that example after
5, 10, 15, 20, 25 epochs and then calculate the # wrong variance.
- Also keep track of the variance of "avg" confidence like in focal loss
- Methods to write:
- Profiling method that trains a model for 5 then 10 then 15 then ...
epochs and at each time calls a helper method that runs through the
full training data to compute statistics based on the full training data.
- Full data "scoring" statistics computation. Takes some training model
and runs it over the full dataset to compute per window statistics such
as:
- the number of incorrect chunks
- the avg prediction cofidence for the correct slice class (i.e.
think the chunk focal loss)
-
"""
parser = argparse.ArgumentParser()
parser.add_argument('--local_files', dest='local_files', action='store_true',
help='Flag specifying to read data from the local elephant_dataset directory.'
'The default is to read from the quatro data directory.')
parser.add_argument('--save_local', dest='save_local', action='store_true',
help='Flag specifying to save model run information to the local models directory.'
'The default is to save to the quatro data directory.')
# Just so numpy does not print rediculously un-readible stuff
np.set_printoptions(precision=2)
# SHOULD JUST DO FOR NEG SAMPLES!!!
def model_statistics(model, full_dataloaders, threshold=0.5):
"""
Full data "scoring" statistics computation. Takes a model
and runs it over the full datasets to compute per window statistics such
as:
- the number of incorrect chunks
- the avg prediction cofidence for the correct slice class (i.e.
think the chunk focal loss)
NOTE: Make sure these are not shuffled datasets!
"""
# Used for computing the avg of 1 - correct class pred probabilities
bce = nn.BCEWithLogitsLoss(reduction='none')
total_window_errors = {'train': np.zeros(0), 'valid': np.zeros(0)}
total_window_inv_avg_predictions = {'train': np.zeros(0), 'valid': np.zeros(0)}
for phase in ['train', 'valid']:
dataloader = full_dataloaders[phase]
# Run the model over the data
print ("Num batches:", len(dataloader))
for idx, batch in enumerate(dataloader):
if idx % 1000 == 0:
print("Gone through {} batches".format(idx))
inputs = batch[0].clone().float()
labels = batch[1].clone().float()
inputs = inputs.to(parameters.device)
labels = labels.to(parameters.device)
# ONLY Squeeze the last dim!
logits = model(inputs).squeeze(-1) # Shape - (batch_size, seq_len)
# Now for each chunk we want to see whether it should be flagged as
# a true false positive. For now do "approx" by counting number pos samples
predictions = torch.sigmoid(logits)
# Pre-compute the number of pos. slices in each chunk
# Threshold the predictions - May add guassian blur
binary_preds = torch.where(predictions > threshold, torch.tensor(1.0).to(parameters.device), torch.tensor(0.0).to(parameters.device))
window_errors = torch.sum(binary_preds != labels, axis = 1).cpu().detach().numpy()
total_window_errors[phase] = np.concatenate((total_window_errors[phase], window_errors))
# Get for each chunk the pred prob for the correct class
bce_loss = bce(logits, labels)
pts = torch.exp(-bce_loss)
# Now the difficulty is 1 - pts
# i.e. hard examples have high hardness score as
# the model is not confident for many slices (low pts)
# so (1-low) = high
window_inv_avg_predictions = torch.mean(1 - pts, axis = 1).cpu().detach().numpy()
total_window_inv_avg_predictions[phase] = np.concatenate((total_window_inv_avg_predictions[phase], window_inv_avg_predictions))
#total_window_errors[phase] = np.expand_dims(total_window_errors[phase], axis=0)
#total_window_inv_avg_predictions[phase] = np.expand_dims(total_window_inv_avg_predictions[phase], axis=0)
# Note for ease of concatenation later expand the second dim!
stats = {'window_errors': total_window_errors,
'window_inv_avg_predictions': total_window_inv_avg_predictions}
return stats
def curriculum_profiling(model, train_dataloaders, full_dataloaders, loss_func, optimizer,
scheduler, writer, include_boundaries=False):
"""
Trains a model for 5 then 10 then 15 then ... epochs
and at each time calls a helper method that runs through the
full training data to compute statistics based on the full training data.
"""
# Things to profile
curriculum_file = '../Curriculum_profiling/'
train_window_errors = None
train_inv_avg_predictions = None
test_window_errors = None
test_inv_avg_predictions = None
# Train 5, 10, 15, 20, 25 epochs
for i in range(20):
# In train curriculum, for now do not return model based on best performance
# but simply return the model at the end of that training loop
model_weights = train_curriculum(model, train_dataloaders, loss_func, optimizer,
scheduler, writer, epochs=5, include_boundaries=include_boundaries)
# Technically model will already have the weights we want since we are returning
# the model weights after 5 epochs not the best epoch run; however, maybe later this
# will change
model.load_state_dict(model_weights)
# Profile the model over the full training dataset and test dataset to see
# window difficulties and variations.
model_stats = model_statistics(model, full_dataloaders)
train_window_error_i = np.expand_dims(model_stats['window_errors']['train'], axis=0)
train_inv_avg_prediction_i = np.expand_dims(model_stats['window_inv_avg_predictions']['train'], axis=0)
test_window_error_i = np.expand_dims(model_stats['window_errors']['valid'], axis=0)
test_inv_avg_prediction_i = np.expand_dims(model_stats['window_inv_avg_predictions']['valid'], axis=0)
if i == 0:
train_window_errors = train_window_error_i
train_inv_avg_predictions = train_inv_avg_prediction_i
test_window_errors = test_window_error_i
test_inv_avg_predictions = test_inv_avg_prediction_i
else:
# Concatenate these together so that we can get std info
train_window_errors = np.concatenate((train_window_errors, train_window_error_i))
train_inv_avg_predictions = np.concatenate((train_inv_avg_predictions, train_inv_avg_prediction_i))
test_window_errors = np.concatenate((test_window_errors, test_window_error_i))
test_inv_avg_predictions = np.concatenate((test_inv_avg_predictions, test_inv_avg_prediction_i))
# Save the histograms so that we can open them in jupyter
print ("Saving Histograms for Iteration i:", i)
# Number of incorrect slices distribution
n, bins, _ = plt.hist(train_window_error_i[0], bins=25)
plt.title('Train - Number incorrect slices iteration' + str((i + 1) * 5))
plt.savefig(curriculum_file + "Train_Num_Incorrect_i-" + str((i+1) * 5) + ".png")
# Print out to visually inspect
print ('Train - Number incorrect slices iteration' + str((i + 1) * 5))
print ('Vals:', n)
print ('Bins:', bins)
print ('Number Incorrect > 15:', np.sum(train_window_error_i[0] > 15))
print ('Number Incorrect > 25:', np.sum(train_window_error_i[0] > 25))
print('------------------------------')
plt.clf()
n, bins, _ = plt.hist(test_window_error_i[0], bins=25)
plt.title('Valid - Number incorrect slices iteration' + str((i + 1) * 5))
plt.savefig(curriculum_file + "Valid_Num_Incorrect_i-" + str((i+1) * 5) + ".png")
print ('Valid - Number incorrect slices iteration' + str((i + 1) * 5))
print ('Vals:', n)
print ('Bins:', bins)
print ('Number Incorrect > 15:', np.sum(test_window_error_i[0] > 15))
print ('Number Incorrect > 25:', np.sum(train_window_error_i[0] > 25))
print('------------------------------')
plt.clf()
# 1 - avg. prediction confidence distribution
n, bins, _ = plt.hist(train_inv_avg_prediction_i[0], bins=25)
plt.title('Train - (1 - avg. prediction confidence) iteration' + str((i + 1) * 5))
plt.savefig(curriculum_file + "Train_pred_condfidence_i-" + str((i+1) * 5) + ".png")
print ('Train - (1 - avg. prediction confidence) iteration' + str((i + 1) * 5))
print ('Vals:', n)
print ('Bins:', bins)
print('------------------------------')
plt.clf()
n, bins, _ = plt.hist(test_inv_avg_predictions[0], bins=25)
plt.title('Valid - (1 - avg. prediction confidence) iteration' + str((i + 1) * 5))
plt.savefig(curriculum_file + "Valid_pred_condfidence_i-" + str((i+1) * 5) + ".png")
print ('Valid - (1 - avg. prediction confidence) iteration' + str((i + 1) * 5))
print ('Vals:', n)
print ('Bins:', bins)
print('------------------------------')
plt.clf()
# Look at the distribution of variances across the
# trails until now!
if i != 0:
# Now do calculations of the variance and shit
# Let us do this part a bit later!
std_train_window_errors = np.std(train_window_errors, axis=0)
std_train_inv_avg_predictions = np.std(train_inv_avg_predictions, axis=0)
std_test_window_errors = np.std(test_window_errors, axis=0)
std_test_inv_avg_predictions = np.std(test_inv_avg_predictions, axis=0)
n, bins, _ = plt.hist(std_train_window_errors, bins=20)
plt.title('Train - STD incorrect slices after iteration' + str((i + 1) * 5))
plt.savefig(curriculum_file + "Train_std_window_errors_i-" + str((i+1) * 5) + ".png")
print ('Train - STD incorrect slices after iteration' + str((i + 1) * 5))
print ('Vals:', n)
print ('Bins:', bins)
print('------------------------------')
plt.clf()
n, bins, _ = plt.hist(std_train_inv_avg_predictions, bins=20)
plt.title('Train - STD (1 - avg. prediction confidence) after iteration' + str((i + 1) * 5))
plt.savefig(curriculum_file + "Train_std_pred_condfidence_i-" + str((i+1) * 5) + ".png")
print ('Valid - STD (1 - avg. prediction confidence) after iteration' + str((i + 1) * 5))
print ('Vals:', n)
print ('Bins:', bins)
print('------------------------------')
plt.clf()
# We should also save the actual saved stats to look at later!
np.save(curriculum_file + 'train_window_errors', train_window_errors)
np.save(curriculum_file + 'train_inv_avg_predictions', train_inv_avg_predictions)
np.save(curriculum_file + 'test_window_errors', test_window_errors)
np.save(curriculum_file + 'test_inv_avg_predictions', test_inv_avg_predictions)
print ("Completed")
def main():
args = parser.parse_args()
if args.local_files:
train_data_path = parameters.LOCAL_TRAIN_FILES
test_data_path = parameters.LOCAL_TEST_FILES
full_train_path = parameters.LOCAL_FULL_TRAIN
full_test_path = parameters.LOCAL_FULL_TEST
else:
if parameters.DATASET.lower() == "noab":
train_data_path = parameters.REMOTE_TRAIN_FILES
test_data_path = parameters.REMOTE_TEST_FILES
full_train_path = parameters.REMOTE_FULL_TRAIN
full_test_path = parameters.REMOTE_FULL_TEST
else:
train_data_path = parameters.REMOTE_BAI_TRAIN_FILES
test_data_path = parameters.REMOTE_BAI_TEST_FILES
full_train_path = parameters.REMOTE_FULL_TRAIN_BAI
full_test_path = parameters.REMOTE_FULL_TEST_BAI
train_data_path, include_boundaries = create_dataset_path(train_data_path, neg_samples=parameters.NEG_SAMPLES,
call_repeats=parameters.CALL_REPEATS,
shift_windows=parameters.SHIFT_WINDOWS)
test_data_path, _ = create_dataset_path(test_data_path, neg_samples=parameters.TEST_NEG_SAMPLES,
call_repeats=1)
train_loader = get_loader_fuzzy(train_data_path, parameters.BATCH_SIZE, random_seed=parameters.DATA_LOADER_SEED,
norm=parameters.NORM, scale=parameters.SCALE,
include_boundaries=include_boundaries, shift_windows=parameters.SHIFT_WINDOWS)
test_loader = get_loader_fuzzy(test_data_path, parameters.BATCH_SIZE, random_seed=parameters.DATA_LOADER_SEED,
norm=parameters.NORM, scale=parameters.SCALE, include_boundaries=include_boundaries)
# For now we don't need to save the model
save_path = create_save_path(time.strftime("%Y-%m-%d_%H:%M:%S", time.localtime()), args.save_local)
train_dataloaders = {'train':train_loader, 'valid':test_loader}
# Load the full data sets - SET SHUFFLE = False
full_train_loader = get_loader_fuzzy(full_train_path, parameters.BATCH_SIZE, shuffle=False,
norm=parameters.NORM, scale=parameters.SCALE,
include_boundaries=False, shift_windows=False,
is_full_dataset=True)
full_test_loader = get_loader_fuzzy(full_test_path, parameters.BATCH_SIZE, shuffle=False,
norm=parameters.NORM, scale=parameters.SCALE, include_boundaries=False)
full_dataloaders = {'train':full_train_loader, 'valid': full_test_loader}
model = get_model(parameters.MODEL_ID)
model.to(parameters.device)
print(model)
writer = SummaryWriter(save_path)
writer.add_scalar('batch_size', parameters.BATCH_SIZE)
writer.add_scalar('weight_decay', parameters.HYPERPARAMETERS[parameters.MODEL_ID]['l2_reg'])
# Want to use focal loss! Next thing to check on!
loss_func, include_boundaries = get_loss()
# Honestly probably do not need to have hyper-parameters per model, but leave it for now.
optimizer = torch.optim.Adam(model.parameters(), lr=parameters.HYPERPARAMETERS[parameters.MODEL_ID]['lr'],
weight_decay=parameters.HYPERPARAMETERS[parameters.MODEL_ID]['l2_reg'])
scheduler = torch.optim.lr_scheduler.StepLR(optimizer, parameters.HYPERPARAMETERS[parameters.MODEL_ID]['lr_decay_step'],
gamma=parameters.HYPERPARAMETERS[parameters.MODEL_ID]['lr_decay'])
start_time = time.time()
curriculum_profiling(model, train_dataloaders, full_dataloaders, loss_func, optimizer, scheduler, writer)
print('Training time: {:10f} minutes'.format((time.time()-start_time)/60))
writer.close()
if __name__ == '__main__':
main()
|
[
"jgs8@stanford.edu"
] |
jgs8@stanford.edu
|
d374d041d9fdbc6ce15c9205a158067a20da5491
|
3a6a456c35721fb48382eea448d0c5e9f0544898
|
/Programing_Basics_with_Python/2 Conditional Statements/5-Число_от_100_до_200.py
|
f9adfb9614f270b8df718c9e340240fa075c1085
|
[
"MIT"
] |
permissive
|
eclipse-ib/Software-University-Entry-Module
|
82a8c8613c30c17d51c0bc6073a9fe80327de9de
|
7d287dc32340ae301872d6e45cbd7d8bf56110a2
|
refs/heads/main
| 2023-01-03T19:52:12.254709
| 2020-10-21T20:51:26
| 2020-10-21T20:51:26
| 306,134,859
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 157
|
py
|
n = int(input())
if n < 100:
print(f"Less than 100")
if n > 99 and n < 201:
print(f"Between 100 and 200")
if n > 200:
print(f"Greater than 200")
|
[
"65770519+eclipse-ib@users.noreply.github.com"
] |
65770519+eclipse-ib@users.noreply.github.com
|
06f7b5a194d9185a56bc840e4159c3cd3e67d79b
|
606b8709678f6efd0c73eafb24d4b27fea2caef7
|
/dublicate.py
|
e794b0efc6c47f2896a64598388824a0d9b6e4e4
|
[] |
no_license
|
roshnet/echelon-convertor
|
34ddd8deb7ae529f5b991d8d70be4f677597e003
|
542368c79ccab9d343cc501a07cb6bd87181b64b
|
refs/heads/master
| 2020-04-03T15:28:01.040142
| 2018-10-29T18:16:16
| 2018-10-29T18:16:16
| 155,363,403
| 0
| 0
| null | 2018-10-30T10:06:04
| 2018-10-30T10:06:03
| null |
UTF-8
|
Python
| false
| false
| 631
|
py
|
'''dublicate file is a composition of functions
dublicate and intialize fuction returning matrix after intializing them
and returning them for the use in main file'''
#dublicate uses m(rows),n(columns),matrix(input matrix) and l2(for deep copy) as parameters
def dublicate(m,n,matrix,l2):
for i in range(m):
for j in range(n):
l2[i][j]=matrix[i][j] # picking each element from matrix and placing it in l2
return l2
#intialize uses m,n, any multidimensional array
#intialize any array
def intialize(m,n,array):
for i in range(m):
array.append([])
for j in range(n):
array[i].append(0)
return array
|
[
"prajjwalnijhara@gmail.com"
] |
prajjwalnijhara@gmail.com
|
5d1e6d95a353db6b4b3a15223520f3f31b903cbd
|
3fec1cdced9ab2087401aadaeca51c5439d535c6
|
/venv/Scripts/pip-script.py
|
97e4217f1ea51ca24af88a89fb77544c1d90ec1f
|
[] |
no_license
|
jkuntzer/WarRock-Login-Server
|
607f91286569539542cf619ddcf5088877962ca1
|
b62a415b15def3b7f8bb94419a575b035c261cea
|
refs/heads/master
| 2022-02-15T06:34:39.943718
| 2019-08-21T08:45:32
| 2019-08-21T08:45:32
| 202,322,555
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 422
|
py
|
#!D:\Documents\PycharmProjects\Warrock_Login_Server\venv\Scripts\python.exe
# EASY-INSTALL-ENTRY-SCRIPT: 'pip==10.0.1','console_scripts','pip'
__requires__ = 'pip==10.0.1'
import re
import sys
from pkg_resources import load_entry_point
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(
load_entry_point('pip==10.0.1', 'console_scripts', 'pip')()
)
|
[
"jonas.kuntzer@rwth-aachen.de"
] |
jonas.kuntzer@rwth-aachen.de
|
45fa0eee742f5dc4bbd5cc281ba19a010a868e2d
|
507d6106507c087f8a10ef5f737a203b58ca811f
|
/setup.py
|
56dedb4e4ec5192ad2c9667e85aa5fdb891e4d0b
|
[
"MIT"
] |
permissive
|
grimen/python-config2
|
7c235619df00fe698b8126804ad4980c39f09194
|
30538a06105aaf56a1cf8970bd9bfdf3c2733722
|
refs/heads/master
| 2022-05-03T04:15:22.789423
| 2019-05-17T05:52:52
| 2019-05-17T05:52:52
| 133,857,345
| 22
| 12
|
MIT
| 2019-05-09T05:24:37
| 2018-05-17T19:09:19
|
Python
|
UTF-8
|
Python
| false
| false
| 2,542
|
py
|
# =========================================
# IMPORTS
# --------------------------------------
import os
import setuptools
import setupextras
# DISABLED/BUG: this line fails when `pip install config2` but works `pip install .`
# from config2 import __version__
# =========================================
# MAIN
# --------------------------------------
name = 'config2'
version = '0.3.2'
description = 'Python application configuration - highly inspired by `node-config`.'
keywords = [
'config',
'configuration',
'configurations',
'settings',
'env',
'environment',
'environments',
'application',
'node-config',
'python-config',
]
packages = setupextras.get_packages()
data_files = setupextras.get_data_files(['*.*'], os.path.join(name, 'tests', '__fixtures__'))
requirements = setupextras.get_requirements()
readme = setupextras.get_readme()
config = {
'name': name,
'version': version,
'description': (description),
'keywords': keywords,
'author': 'Jonas Grimfelt',
'author_email': 'grimen@gmail.com',
'url': 'https://github.com/grimen/python-{name}'.format(name = name),
'download_url': 'https://github.com/grimen/python-{name}'.format(name = name),
'project_urls': {
'repository': 'https://github.com/grimen/python-{name}'.format(name = name),
'bugs': 'https://github.com/grimen/python-{name}/issues'.format(name = name),
},
'license': 'MIT',
'long_description': readme,
'long_description_content_type': 'text/markdown',
'classifiers': [
'Topic :: Software Development :: Libraries',
'Development Status :: 5 - Production/Stable',
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Operating System :: POSIX',
'Programming Language :: Python',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: Implementation :: CPython',
'Programming Language :: Python :: Implementation :: PyPy',
],
'packages': packages,
'package_dir': {
name: name,
},
'package_data': {
'': [
'MIT-LICENSE',
'README.md',
],
name: [
'*.*',
],
},
'data_files': data_files,
'include_package_data': True,
'zip_safe': True,
'install_requires': requirements,
'setup_requires': [
'setuptools_git >= 1.2',
],
}
setuptools.setup(**config)
|
[
"grimen@gmail.com"
] |
grimen@gmail.com
|
510e59b01b43f83b3140e29f39d891f0f10426e2
|
865d33f0ea35461d6ec073eb74f34e061d521a25
|
/03_list_less_than_ten.py
|
25667f857bc2631213dad4954ebb6454477790fe
|
[
"Apache-2.0"
] |
permissive
|
ThornOfCamor/Practice
|
51bcbaf5ca6c24b9872c715efb977d5f3da11e22
|
d96ce8068506cb959f2ab4ccc97bf4c0271377e4
|
refs/heads/master
| 2020-04-08T22:48:28.468332
| 2018-11-30T11:32:22
| 2018-11-30T11:32:22
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 241
|
py
|
"""
Practice - 3
"""
if __name__ == '__main__':
a = [ 1, 1, 2, 3, 5, 6, 10, 12, 12, 15, 18, 33, 80]
b = []
maxi = int(raw_input("Enter upper limit: "), 10)
for x in a:
if maxi>x:
b.append(x)
print b
|
[
"f2015082@pilani.bits-pilani.ac.in"
] |
f2015082@pilani.bits-pilani.ac.in
|
25d46fadeac81a64181c6940596c0ca832487f44
|
1d2301980e52955d5b8d06b340254bcafe13ff45
|
/devel/lib/python2.7/dist-packages/sensor_simulators/srv/_calibrate.py
|
c64f6956180a9bf1de0d333f810a22c22a305ac1
|
[] |
no_license
|
j-alicia-long/ros-sim-projects
|
3f4f74ef6ded867947aa29372a49bfe557c5bbcd
|
a77eea0510b6c8f80f7670b6f68aefe22dd311b5
|
refs/heads/master
| 2022-04-05T03:03:17.373721
| 2020-03-05T08:43:10
| 2020-03-05T08:43:10
| 241,970,605
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 121
|
py
|
/home/robotclass/fastsim/devel/.private/sensor_simulators/lib/python2.7/dist-packages/sensor_simulators/srv/_calibrate.py
|
[
"j.alicia.long@gmail.com"
] |
j.alicia.long@gmail.com
|
7a8e92c57c690cda1d3ed678c5d87ff8775e42cf
|
64112c3a013301f70b5ef079c2d8b3ab92811046
|
/python/exercise 6/code1.py
|
d7d9cb5d9eec56a3e33cb599cee8ab8b830d21fe
|
[] |
no_license
|
dieduk89/project_euler
|
91197243302c15990afd212339ba6c93b3266644
|
4c555c5976b6f910fa6da9238a3c84dbf836bf83
|
refs/heads/master
| 2020-03-08T11:45:41.555545
| 2019-01-10T17:38:35
| 2019-01-10T17:38:35
| 128,107,357
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 103
|
py
|
print "\nCALCULUS\n"
print sum([x for x in range(101)])**2-sum([x**2 for x in range(101)])
print "\n"
|
[
"dieduk89@gmail.com"
] |
dieduk89@gmail.com
|
9a8f133b1a485d74be5f9277e467fb40fc54d413
|
0f19ec42bbee96db42a6973d99d679fa33d7aba1
|
/Chapter15/Exercise15.03/Test_15_03.py
|
d8d2bb76df668abd75d8989a1f83ce02e15672d9
|
[
"MIT"
] |
permissive
|
a1ip/The-Data-Science-Workshop
|
ab3aa67c24daac18cbccbf9dc96b5436e46e1754
|
5992a4d14f7ea06c8815e395d57a9941bac47254
|
refs/heads/master
| 2022-12-18T08:19:04.550755
| 2020-09-24T04:10:38
| 2020-09-24T04:10:38
| 286,596,069
| 1
| 0
|
MIT
| 2020-09-03T00:40:51
| 2020-08-10T23:05:29
|
Jupyter Notebook
|
UTF-8
|
Python
| false
| false
| 843
|
py
|
import unittest
import import_ipynb
import pandas as pd
import pandas.testing as pd_testing
class Test(unittest.TestCase):
def setUp(self):
import Exercise_15_03_Ensemble_learning_Weighted_Averaging_v1_0
self.exercises = Exercise_15_03_Ensemble_learning_Weighted_Averaging_v1_0
self.filename = 'https://raw.githubusercontent.com/PacktWorkshops/The-Data-Science-Workshop/master/Chapter15/Dataset/crx.data'
self.credData = pd.read_csv(self.filename,sep=",",header = None,na_values = "?")
self.dataShape = self.credData.shape
def test_file_url(self):
self.assertEqual(self.exercises.filename, self.filename)
def test_shape(self):
self.assertEqual(self.exercises.credData.shape, self.dataShape)
if __name__ == '__main__':
unittest.main()
|
[
"noreply@github.com"
] |
a1ip.noreply@github.com
|
9d4454c5706870d31d1ed495a4f26be822d1a65a
|
67713397548c4964878f203aa631e10a0382f66c
|
/7_listComprehensions/listComprehensions.py
|
cd8393825f018a70c17b410d0afe4f607c47fb55
|
[] |
no_license
|
JonathanC13/python_ref
|
50bc5f5fba74a9ddc04857f6fd6dfa6efa79c292
|
cfd1f5eaa3ca24bd91a0577103f0ce2e178c853e
|
refs/heads/master
| 2020-03-24T15:46:38.631259
| 2019-01-15T14:16:07
| 2019-01-15T14:16:07
| 142,801,214
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 168
|
py
|
a = [1, 4, 9, 16, 25, 36, 49, 64, 81, 100]
new = [elem for elem in a if ((elem % 2) == 0)]
# for elem in a
# if ((elem % 2) == 0)
print (*new, sep=', ')
|
[
"Jonhschan@hotmail.com"
] |
Jonhschan@hotmail.com
|
f175b2b241093b84b7fd98865d414f73de33ca40
|
5ce258654302ad5ded2a57ce922a2fb8fda48e4c
|
/venv/Scripts/pip3.7-script.py
|
54e1519da7ae4b15f588337964785808810ffb1f
|
[] |
no_license
|
emaillalkrishna/AutomationTraining04Apr2019
|
d929dcce3d6ccf60661ab5119d45775263be55cc
|
bc15c2c204a0edb8bd7dee65284b5bde0f5c3b40
|
refs/heads/master
| 2020-05-04T22:14:14.981293
| 2019-04-04T15:27:40
| 2019-04-04T15:27:40
| 179,504,478
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 445
|
py
|
#!"C:\Users\LAL KRISHNA\PycharmProjects\AutomationTraining03Apr2019\venv\Scripts\python.exe"
# EASY-INSTALL-ENTRY-SCRIPT: 'pip==10.0.1','console_scripts','pip3.7'
__requires__ = 'pip==10.0.1'
import re
import sys
from pkg_resources import load_entry_point
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(
load_entry_point('pip==10.0.1', 'console_scripts', 'pip3.7')()
)
|
[
"emaillalkrishna@gmail.com"
] |
emaillalkrishna@gmail.com
|
aed49f3fa11d05349c61e91e585839ac12a283f1
|
1cfcfa686489885843b9a142c8ba980ebd5d5ffd
|
/tensorkit/tensor/linalg.py
|
441198f19a5c68c4d5d5bcb73c381fd7180a75f4
|
[
"MIT"
] |
permissive
|
qyz-thu/gnn_vae
|
9d2d8e984a96d0f22f74362889fdd1c0613df46d
|
278aeb7038216812a94c7f7acd2ca425696f986b
|
refs/heads/master
| 2023-02-05T20:07:24.097968
| 2020-12-18T06:34:20
| 2020-12-18T06:34:20
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 86
|
py
|
from ..backend import linalg
from ..backend.linalg import *
__all__ = linalg.__all__
|
[
"haowen.xu@outlook.com"
] |
haowen.xu@outlook.com
|
9027a463289bf93c1317431c88e6d23510a23d5d
|
37194bcee20e66e84360010d98a45adcced57963
|
/Algorithem_my/IM_Motherboard/baekjoon2563/2563.py
|
23114f8524cc99264e3210092d2c038dceaec963
|
[] |
no_license
|
dmdekf/algo
|
edcd1bbd067102a622ff1d55b2c3f6274126414a
|
544a531799295f0f9879778a2d092f23a5afc4ce
|
refs/heads/master
| 2022-09-13T14:53:31.593307
| 2020-06-05T07:06:03
| 2020-06-05T07:06:03
| 237,857,520
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 376
|
py
|
import sys
sys.stdin = open('input.txt')
N = int(input())
d = list([0]*100 for _ in range(100))
cnt = 0
for x in range(N):
r, c = map(int,input().split())
# print(r,c)
for i in range(r, r+10):
for j in range(c, c+10):
if d[i][j] != 0:
cnt +=1
else:
d[i][j] = 1
N*100 - cnt
print(N*100 - cnt)
|
[
"dmdekf@gmail.com"
] |
dmdekf@gmail.com
|
885a2426d989ed59b2b78b30a6439009f939b856
|
6a7bc7db97e924576d675cc0d45a7639650256d6
|
/tests/gmm.py
|
aabb0fc5ff58642e9f658bc9bff0abe3026e907a
|
[] |
no_license
|
nima-vakili/clouds
|
9a2ad8c015ac224baf8a872701c4369a0f5b86ff
|
6c8d746b675d4b59a5beb14a84c8e95ba30201a6
|
refs/heads/master
| 2022-11-12T18:17:00.765708
| 2020-06-24T16:04:02
| 2020-06-24T16:04:02
| 274,707,451
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 8,056
|
py
|
class GMM:
""" Gaussian Mixture Model
Parameters
-----------
k: int , number of gaussian distributions
seed: int, will be randomly set if None
max_iter: int, number of iterations to run algorithm, default: 200
Attributes
-----------
centroids: array, k, number_features
cluster_labels: label for each data point
"""
def __init__(self, C, n_runs):
self.C = C # number of Guassians/clusters
self.n_runs = n_runs
def get_params(self):
return (self.mu, self.pi, self.sigma)
def calculate_mean_covariance(self, X, prediction):
"""Calculate means and covariance of different
clusters from k-means prediction
Parameters:
------------
prediction: cluster labels from k-means
X: N*d numpy array data points
Returns:
-------------
intial_means: for E-step of EM algorithm
intial_cov: for E-step of EM algorithm
"""
d = X.shape[1]
labels = np.unique(prediction)
self.initial_means = np.zeros((self.C, d))
self.initial_cov = np.zeros((self.C, d, d))
self.initial_pi = np.zeros(self.C)
counter=0
for label in labels:
ids = np.where(prediction == label) # returns indices
self.initial_pi[counter] = len(ids[0]) / X.shape[0]
self.initial_means[counter,:] = np.mean(X[ids], axis = 0)
de_meaned = X[ids] - self.initial_means[counter,:]
Nk = X[ids].shape[0] # number of data points in current gaussian
self.initial_cov[counter,:, :] = np.dot(self.initial_pi[counter] * de_meaned.T, de_meaned) / Nk
counter+=1
assert np.sum(self.initial_pi) == 1
return (self.initial_means, self.initial_cov, self.initial_pi)
def _initialise_parameters(self, X):
"""Implement k-means to find starting
parameter values.
https://datascience.stackexchange.com/questions/11487/how-do-i-obtain-the-weight-and-variance-of-a-k-means-cluster
Parameters:
------------
X: numpy array of data points
Returns:
----------
tuple containing initial means and covariance
_initial_means: numpy array: (C*d)
_initial_cov: numpy array: (C,d*d)
"""
n_clusters = self.C
kmeans = KMeans(n_clusters= n_clusters, init="k-means++", max_iter=500, algorithm = 'auto')
fitted = kmeans.fit(X)
prediction = kmeans.predict(X)
self._initial_means, self._initial_cov, self._initial_pi = self.calculate_mean_covariance(X, prediction)
return (self._initial_means, self._initial_cov, self._initial_pi)
def _e_step(self, X, pi, mu, sigma):
"""Performs E-step on GMM model
Parameters:
------------
X: (N x d), data points, m: no of features
pi: (C), weights of mixture components
mu: (C x d), mixture component means
sigma: (C x d x d), mixture component covariance matrices
Returns:
----------
gamma: (N x C), probabilities of clusters for objects
"""
N = X.shape[0]
self.gamma = np.zeros((N, self.C))
const_c = np.zeros(self.C)
self.mu = self.mu if self._initial_means is None else self._initial_means
self.pi = self.pi if self._initial_pi is None else self._initial_pi
self.sigma = self.sigma if self._initial_cov is None else self._initial_cov
for c in range(self.C):
# Posterior Distribution using Bayes Rule
self.gamma[:,c] = self.pi[c] * mvn.pdf(X, self.mu[c,:], self.sigma[c])
# normalize across columns to make a valid probability
gamma_norm = np.sum(self.gamma, axis=1)[:,np.newaxis]
self.gamma /= gamma_norm
return self.gamma
def _m_step(self, X, gamma):
"""Performs M-step of the GMM
We need to update our priors, our means
and our covariance matrix.
Parameters:
-----------
X: (N x d), data
gamma: (N x C), posterior distribution of lower bound
Returns:
---------
pi: (C)
mu: (C x d)
sigma: (C x d x d)
"""
N = X.shape[0] # number of objects
C = self.gamma.shape[1] # number of clusters
d = X.shape[1] # dimension of each object
# responsibilities for each gaussian
self.pi = np.mean(self.gamma, axis = 0)
self.mu = np.dot(self.gamma.T, X) / np.sum(self.gamma, axis = 0)[:,np.newaxis]
for c in range(C):
x = X - self.mu[c, :] # (N x d)
gamma_diag = np.diag(self.gamma[:,c])
x_mu = np.matrix(x)
gamma_diag = np.matrix(gamma_diag)
sigma_c = x.T * gamma_diag * x
self.sigma[c,:,:]=(sigma_c) / np.sum(self.gamma, axis = 0)[:,np.newaxis][c]
return self.pi, self.mu, self.sigma
def _compute_loss_function(self, X, pi, mu, sigma):
"""Computes lower bound loss function
Parameters:
-----------
X: (N x d), data
Returns:
---------
pi: (C)
mu: (C x d)
sigma: (C x d x d)
"""
N = X.shape[0]
C = self.gamma.shape[1]
self.loss = np.zeros((N, C))
for c in range(C):
dist = mvn(self.mu[c], self.sigma[c],allow_singular=True)
self.loss[:,c] = self.gamma[:,c] * (np.log(self.pi[c]+0.00001)+dist.logpdf(X)-np.log(self.gamma[:,c]+0.000001))
self.loss = np.sum(self.loss)
return self.loss
def fit(self, X):
"""Compute the E-step and M-step and
Calculates the lowerbound
Parameters:
-----------
X: (N x d), data
Returns:
----------
instance of GMM
"""
d = X.shape[1]
self.mu, self.sigma, self.pi = self._initialise_parameters(X)
try:
for run in range(self.n_runs):
self.gamma = self._e_step(X, self.mu, self.pi, self.sigma)
self.pi, self.mu, self.sigma = self._m_step(X, self.gamma)
loss = self._compute_loss_function(X, self.pi, self.mu, self.sigma)
if run % 10 == 0:
print("Iteration: %d Loss: %0.6f" %(run, loss))
except Exception as e:
print(e)
return self
def predict(self, X):
"""Returns predicted labels using Bayes Rule to
Calculate the posterior distribution
Parameters:
-------------
X: ?*d numpy array
Returns:
----------
labels: predicted cluster based on
highest responsibility gamma.
"""
labels = np.zeros((X.shape[0], self.C))
for c in range(self.C):
labels [:,c] = self.pi[c] * mvn.pdf(X, self.mu[c,:], self.sigma[c])
labels = labels .argmax(1)
return labels
def predict_proba(self, X):
"""Returns predicted labels
Parameters:
-------------
X: N*d numpy array
Returns:
----------
labels: predicted cluster based on
highest responsibility gamma.
"""
post_proba = np.zeros((X.shape[0], self.C))
for c in range(self.C):
# Posterior Distribution using Bayes Rule, try and vectorise
post_proba[:,c] = self.pi[c] * mvn.pdf(X, self.mu[c,:], self.sigma[c])
return post_proba
|
[
"nima.vakili@hotmail.com"
] |
nima.vakili@hotmail.com
|
7c7d1d85edff3a0cc3b0b4ebb69aaf8614d3144a
|
df87c768ab5eda4648b54eb5df7ca7a403c1d2b2
|
/object-detector/dwell_time.py
|
68aa2c6779619be10df4ca774dd5119c76c8dff6
|
[] |
no_license
|
detend/fish-detector
|
447c6f9ee1e7c72a1a3b14ec0ddb7380a6f5c354
|
f8c4621700506531746c69cb8161b563c5348e18
|
refs/heads/master
| 2020-07-31T05:58:32.572460
| 2019-12-24T02:56:04
| 2019-12-24T02:56:04
| 210,508,532
| 2
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 9,595
|
py
|
import numpy as np
import os
import six.moves.urllib as urllib
import sys
import tarfile
import tensorflow as tf
import zipfile
from distutils.version import StrictVersion
from collections import defaultdict
from io import StringIO
import matplotlib.pyplot as plt
from PIL import Image
import glob
from moviepy.video.io.ffmpeg_tools import ffmpeg_extract_subclip
import argparse
import cv2
import time
import imutils
import pickle
sys.path.append("..")
from object_detection.utils import ops as utils_ops
if StrictVersion(tf.__version__) < StrictVersion('1.12.0'):
raise ImportError('Please upgrade your TensorFlow installation to v1.12.*.')
sys.path.append('../../../models/research/object_detection') # point to the tensorflow dir
# sys.path.append('~/Documents/Tensorflow/models/slim')
from utils import label_map_util
from utils import visualization_utils as vis_util
# Model name
MODEL_NAME = '../trained-inference-graphs/output_inference_graph_v1.pb'
# Path to frozen detection graph. This is the actual model that is used for the object detection.
PATH_TO_FROZEN_GRAPH = MODEL_NAME + '/frozen_inference_graph.pb'
# List of the strings that is used to add correct label for each box.
PATH_TO_LABELS = '../annotations/label_map.pbtxt'
# Load the frozen Tensorflow model into memory
detection_graph = tf.Graph()
with detection_graph.as_default():
od_graph_def = tf.GraphDef()
with tf.gfile.GFile(PATH_TO_FROZEN_GRAPH, 'rb') as fid:
serialized_graph = fid.read()
od_graph_def.ParseFromString(serialized_graph)
tf.import_graph_def(od_graph_def, name='')
# Loading the label map
category_index = label_map_util.create_category_index_from_labelmap(PATH_TO_LABELS, use_display_name=True)
# Detection for a single image
def run_inference_for_single_image(image, graph):
with graph.as_default():
with tf.Session() as sess:
# Get handles to input and output tensors
ops = tf.get_default_graph().get_operations()
all_tensor_names = {output.name for op in ops for output in op.outputs}
tensor_dict = {}
for key in [
'num_detections', 'detection_boxes', 'detection_scores',
'detection_classes', 'detection_masks'
]:
tensor_name = key + ':0'
if tensor_name in all_tensor_names:
tensor_dict[key] = tf.get_default_graph().get_tensor_by_name(
tensor_name)
if 'detection_masks' in tensor_dict:
# The following processing is only for single image
detection_boxes = tf.squeeze(tensor_dict['detection_boxes'], [0])
detection_masks = tf.squeeze(tensor_dict['detection_masks'], [0])
# Reframe is required to translate mask from box coordinates to image coordinates and fit the image size.
real_num_detection = tf.cast(tensor_dict['num_detections'][0], tf.int32)
detection_boxes = tf.slice(detection_boxes, [0, 0], [real_num_detection, -1])
detection_masks = tf.slice(detection_masks, [0, 0, 0], [real_num_detection, -1, -1])
detection_masks_reframed = utils_ops.reframe_box_masks_to_image_masks(
detection_masks, detection_boxes, image.shape[1], image.shape[2])
detection_masks_reframed = tf.cast(
tf.greater(detection_masks_reframed, 0.5), tf.uint8)
# Follow the convention by adding back the batch dimension
tensor_dict['detection_masks'] = tf.expand_dims(
detection_masks_reframed, 0)
image_tensor = tf.get_default_graph().get_tensor_by_name('image_tensor:0')
# Run inference
output_dict = sess.run(tensor_dict,
feed_dict={image_tensor: image})
# all outputs are float32 numpy arrays, so convert types as appropriate
output_dict['num_detections'] = int(output_dict['num_detections'][0])
output_dict['detection_classes'] = output_dict[
'detection_classes'][0].astype(np.int64)
output_dict['detection_boxes'] = output_dict['detection_boxes'][0]
output_dict['detection_scores'] = output_dict['detection_scores'][0]
if 'detection_masks' in output_dict:
output_dict['detection_masks'] = output_dict['detection_masks'][0]
return output_dict
# Read the original video
def read_vid(from_path):
return cv2.VideoCapture(path_from)
def identify_position(w,h, center):
#NW
if (center[0] < (w*2/5)) and (center[1]<h/2):
return 1
#SW
elif (center[0] < (w*2/5)) and (center[1]>h/2):
return 2
#NE
elif (center[0]>(w*3/5)) and (center[1]<h/2):
return 4
#SE
elif (center[0]>(w*3/5)) and (center[1]>h/2):
return 5
#main
else:
return 3
def identify_transit(previous, current):
maze_part = [1, 2, 4 ,5]
if (previous!=current) and (current in maze_part):
return current
return 0
# Track the fish in the video
def tracking(vid, name, path_to):
dwell = []
passes = []
previous = 0
timestamp = 0
#positions are 1:NW 2:SW 3:main 4:NE 5:SE
possible_positions = [1,2,3,4,5]
width = vid.get(cv2.CAP_PROP_FRAME_WIDTH)
height = vid.get(cv2.CAP_PROP_FRAME_HEIGHT)
# Find the fish every 60 frames
count = 60
while (vid.isOpened()):
# Read a single frame
ret, frame = vid.read()
if ret:
# Convert colors to RGB
color_frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
# Expand dimensions since the model expects images to have shape: [1, None, None, 3]
image_np_expanded = np.expand_dims(color_frame, axis=0)
if count % 60 == 0:
# Actual detection for the single frame
output_dict = run_inference_for_single_image(image_np_expanded, detection_graph)
if output_dict['detection_scores'][0] > 0.9:
# Determine the center of bordering box
ymin, xmin, ymax, xmax = output_dict['detection_boxes'][0]
center = (int((xmax + xmin) * width / 2), int((ymax + ymin) * height / 2))
count += 1
timestamp_ = vid.get(cv2.CAP_PROP_POS_MSEC)
delta_time = timestamp_ - timestamp
position = identify_position(width, height, center)
if previous == 0:
dwell.append((position, delta_time))
else:
dwell.append((previous, delta_time))
pass_ = identify_transit(previous, position)
if (pass_ in possible_positions) and (previous!=0):
passes.append((pass_, timestamp_))
print(position, pass_, timestamp_)
timestamp = timestamp_
previous = position
else:
count += 1
else:
count += 1
#timestamp = vid.get(cv2.CAP_PROP_POS_MSEC)
else:
break
vid.release()
#cv2.destroyWindow('frame')
return dwell, passes
def dump_file(path, name, file):
with open(path+name[:-4], 'wb') as pathto:
pickle.dump(file, pathto)
if __name__ == '__main__':
# construct the needed arguments
ap = argparse.ArgumentParser(description="Converting video(s) to images")
ap.add_argument("-v", "--video", help="path to a single video file")
ap.add_argument("-d", "--directory", help="path to a directory including video(s)")
ap.add_argument("-s", "--save", help="path to a directory to save the output images")
ap.add_argument("--start", help="Start point to trim the video")
ap.add_argument("--end", help="End point to trim the video")
args = vars(ap.parse_args())
# handle wrong arguments
if (args.get("video", True)) and (args.get("directory", True)):
raise ValueError("Use either --video or --directory, not both of them.")
elif not args.get("save", True):
raise ValueError("Use --save flag to specify a directory to save the output images")
elif args.get("video", True):
arg_type = "video"
path_from = args["video"]
path_to = args["save"]
elif args.get("directory", True):
arg_type = "directory"
path_from = args["directory"]
path_to = args["save"]
else:
raise ValueError("use --video or --directory flag with a following valid path.")
# place a '/' at the end of the path_to if it doesn't have it
if not path_to[-1] == "/":
path_to += "/"
if not path_to[-1] == "/":
path_to += "/"
try:
if arg_type == "video":
name = path_from.split("/")[-1]
vid = read_vid(path_from)
dwell, passes = tracking(vid, name, path_to)
dump_file(path_to, "dwell/"+name, dwell)
dump_file(path_to, "passes/"+name, passes)
elif arg_type == "directory":
videos = glob.glob(path_from + "*")
for video in videos:
name = video.split("/")[-1]
vid = cv2.VideoCapture(video)
dwell, passes = tracking(vid, name, path_to)
dump_file(path_to,"dwell/"+name, dwell)
dump_file(path_to, "passes/"+name, passes)
except Exception as e:
print(e)
|
[
"vahid.pourheidari@gmail.com"
] |
vahid.pourheidari@gmail.com
|
02d068146b046fb2b5a2631b823f235d70a865b2
|
c8373f4e8187c1149ca0eceb5e1ec81d6c895785
|
/modules/native/wakeWord.py
|
049b01f0e729d639a8b3435e286cc6f679d3310b
|
[] |
no_license
|
pietroid/sphynx
|
2866705b4d9eca5fd492ae1e7309ea1429612cae
|
483dec05f8b2885bd7c7766bec083ee208210f8e
|
refs/heads/main
| 2023-07-14T14:06:51.602334
| 2021-09-02T16:44:30
| 2021-09-02T16:44:30
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,016
|
py
|
import sys
sys.path.append('../../')
from orchestrator.orchestrator import Orchestrator
class WakeWord:
def __init__(self):
self.timeout = 10
self.wakeWord = 'hey jarvis'
self.orchestrator = Orchestrator()
self.listeningKey = 'LISTENING'
def apply(self,message):
if(message['type'] == 'RAW'):
contentText = message['content']['text']
if(contentText.lower().find(self.wakeWord) >= 0):
self.orchestrator.createMemory(self.listeningKey,None,self.timeout)
newContentText = contentText.split(self.wakeWord)[-1]
if(newContentText.strip() != ''):
outputMessage = message
outputMessage['content']['text'] = newContentText
#TODO: manipulate NLP to remove wakeWord from structure
return outputMessage
def isListening(self):
return len(self.orchestrator.listMemoriesByKey(self.listeningKey)) > 0
|
[
"^pietrodomingues@gmail.com^"
] |
^pietrodomingues@gmail.com^
|
b88ce5e638d86d3df2ff53ccb8735ce2973f594c
|
af669dbef653dd69474f4c0836582bf14262c80f
|
/price-test/frame/lib/deploylib/basemodule.py
|
40802035dad8bd234a44eb1fa887fabf13457945
|
[] |
no_license
|
siki320/fishtest
|
7a3f91639d8d4cee624adc1d4d05563611b435e9
|
7c3f024192e1c48214b53bc45105bdf9e746a013
|
refs/heads/master
| 2021-01-19T21:58:36.807126
| 2017-04-19T09:56:37
| 2017-04-19T09:56:37
| 88,729,049
| 0
| 0
| null | null | null | null |
GB18030
|
Python
| false
| false
| 27,826
|
py
|
# -*- coding: GB18030 -*-
"""
@author: songyang
@modify: guoan
@modify: maqi
@modify: geshijing
@date: Nov 29, 2011
@summary: 负责环境搭建的统一调度
@version: 1.1.0.1
@copyright: Copyright (c) 2011 XX, Inc. All Rights Reserved
"""
import sys
import os.path
import socket
import inspect
import imp
from frame.lib.commonlib.kvconf import Kvconf
from frame.lib.commonlib.dlog import dlog
# 增加rpyc到pythonpath
# rpyc_path = os.path.join(os.path.dirname(__file__), "../thirdlib")
rpyc_path = os.path.abspath(rpyc_path)
if rpyc_path not in sys.path:
sys.path.append(rpyc_path)
import frame.lib.thirdlib.rpyc
from frame.lib.thirdlib.rpyc.core import consts
from frame.lib.thirdlib.rpyc.core.netref import syncreq
from frame.lib.commonlib.utils import get_abs_dir
from frame.lib.commonlib.timer import Timer2
from frame.lib.commonlib.portalloc import PortAlloc
from frame.lib.deploylib.xdsystem import XDSystem
from frame.lib.deploylib.element import Element
from frame.lib.deploylib.result import Module_Result
from frame.lib.deploylib.xdlog import XDLog
from frame.lib.deploylib.utils import ping,healthy_clients_list
from frame.lib.deploylib.xderror import XDComponentError,XDCommonError
from frame.lib.deploylib.download import HadoopDownload,StdDownload,ScmpfDownload,HudsonDownload,LocalDownload,SvnDownload,DataCenterDownload,HDFSDownload
from copy import deepcopy
class RpycTypeMixIn(object):
DEFAULT_RPC_PORT = 60778
RPYC_CLIENT_NUM = 0
conn = []
@staticmethod
def create_component(klass, host_info, *args, **argws):
localhostname = socket.gethostname()
try:
localip = socket.gethostbyname(localhostname)
except:
print "Cannot get local ip by hostname[%s], set ip=127.0.0.1!"%localhostname
localip = "127.0.0.1"
desthost = host_info["host"].strip()
localuser = os.environ["USER"]
client_path = os.environ.get("client_path",'.XDS_CLIENT')
if (localhostname == desthost or localip == desthost or desthost == "127.0.0.1") and (host_info["user"] == "localuser" or host_info["user"] == localuser):
host_info["host"] = localhostname
host_info["user"] = localuser
host_info["ip"] = localip
host_info['client_path'] = client_path
host_info["is_local"] = 1
return RpycTypeMixIn.create_local_component(klass, host_info, *args, **argws)
else:
host_info["host"] = desthost
host_info["ip"] = socket.gethostbyname(desthost)
host_info["is_local"] = 0
host_info['client_path'] = client_path
return RpycTypeMixIn.create_remote_component(klass, host_info, *args, **argws)
@staticmethod
def create_local_component(klass, host_info, *args, **argws):
instance = RpycTypeMixIn.__new__(klass)
instance.host_info = host_info
return instance
@staticmethod
def create_remote_component(klass, host_info, *args, **argws):
RpycTypeMixIn.DEFAULT_RPC_PORT = host_info["rpyc_port"]
client_path=host_info["client_path"]
if not os.path.isabs(client_path):
client_path = "/home/" + host_info["user"] + "/" + client_path
if host_info["user"] == "root":
RpycTypeMixIn.DEFAULT_RPC_PORT = 60779
client_path = "/root/.XDS_CLIENT"
if ping(host_info["ip"], int(RpycTypeMixIn.DEFAULT_RPC_PORT)) != 0:
raise XDComponentError("can not connect to " + host_info["host"])
RpycTypeMixIn.conn.append(frame.lib.thirdlib.rpyc.classic.connect(host_info["host"], int(RpycTypeMixIn.DEFAULT_RPC_PORT)))
host_info['rpc_connection'] = RpycTypeMixIn.conn[RpycTypeMixIn.RPYC_CLIENT_NUM]
RpycTypeMixIn.conn[RpycTypeMixIn.RPYC_CLIENT_NUM].modules.sys.path.insert(0,client_path)
mod_name = klass.__module__
cls_name = klass.__name__
RpycTypeMixIn.RPYC_CLIENT_NUM += 1
return getattr(RpycTypeMixIn.conn[RpycTypeMixIn.RPYC_CLIENT_NUM-1].modules[mod_name], cls_name).remote_create_component(klass, host_info, *args, **argws)
@staticmethod
def remote_create_component(klass, host_info, *args, **argws):
mod_name = syncreq(klass, consts.HANDLE_GETATTR, '__module__')
cls_name = syncreq(klass, consts.HANDLE_GETATTR, '__name__')
tmpmod = __import__(mod_name, globals(), locals(), [mod_name], -1)
klass = getattr(tmpmod, cls_name)
instance = RpycTypeMixIn.create_local_component(klass, host_info, *args, **argws)
instance.__init__(host=host_info["host"],user=host_info["user"], local_path=host_info["path"], *args, **argws)
return instance
class BaseModule(RpycTypeMixIn):
"""
@note: 所有模块的基类,例如 bs, as
"""
def __new__(cls, host="127.0.0.1", user="localuser", local_path="./", passwd=None, rpyc_port=60778, *args, **argws):
"""
在实例被__init__前调用RPC模块,创建组件的远程实例,根据host信息,连接该机器上的rpyc server
"""
host_info = dict()
host_info["host"] = host
host_info["user"] = user
host_info["path"] = get_abs_dir(path=local_path,exist=False)
host_info["passwd"] = passwd
host_info["rpyc_port"] = rpyc_port
return cls.create_component(cls, host_info, *args, **argws)
def __init__(self,host="127.0.0.1", user="localuser", local_path="./",instance_name=None,passwd=None,config_file=None,**args):
"""
@note: host, user, local_path都在init之前被赋值到self.host_info的dict中了,不需要再init中赋值
"""
#每个类都有一个type属性,这个属性对应传入词典的key值
self.type = None
#这个属性控制wget命令失败的时候重试几次
self.retry_num = 3
#配置本模块有多少的端口
self.port_num = 0
self.listen_port = None
self.port_list = []
#包含下游具体模块实例,表示一个搭建场景下 本模块注册的下游实例(实例级别的)
self.module_rel_set = []
#每个模块的外围模块对象,表示本模块 下游一共有多少模块
self.all_rel_module = []
#对于某些模块需要搭建多个的时候,可以通过instance_name进行区别
self.instance_name = instance_name
#log对象,用于各个模块写log
self.log = None
#调用linux系统命令的handler
self.sys = None
#模块自描述配置文件
self.config_file = self.search_abs_path(config_file)
#每个模块的自描述,默认含有bin conf data,支持扩展,可以到达文件的粒度
self.element_dict = {}
#用于element下载的保序
self.element_list = []
#模块下载源的字典
self.src_dict={}
#模块下载源的原信息保存
self.src_list=[]
#用于记录搭建的详细信息,供dashboard展示
self.result_obj = None
#用于端口自适应
self.portalloc = None
self.port_segs = []
def __getstate__(self):
odict = self.__dict__.copy() # copy the dict since we change it
if odict["host_info"].has_key('rpc_connection'):
del odict["host_info"]['rpc_connection']
if odict["portalloc"]:
del odict["portalloc"]
odict['port_segs'] =[]
return odict
def __setstate__(self,state):
self.__dict__.update(state)
if 0 == self.host_info["is_local"]:
instance = RpycTypeMixIn.create_remote_component(self.__class__,self.host_info)
for k,v in self.__dict__.items():
instance.__dict__[k] = v
self = instance
self.init_handlers(dlog)
def search_abs_path(self,element_conf):
#modify by geshijing
#增加对于相对路径的element 配置文件查找功能
#查找顺序 产品线绝对路径(产品线根路径与frame平级)-> 相对于modulelib的路径-> .XDS_CLIENT后的绝对路径,找到后终止
client_path_name=self.host_info["client_path"]
if element_conf and( not os.path.isabs(element_conf)) and(not os.path.exists(element_conf)):
#查找基于frame的相对路径
path_base_frame = os.path.abspath(os.path.join(os.path.dirname(os.path.realpath(__file__)), '../../../',element_conf))
#查找基于自身obj的相对路径
path_base_obj = os.path.abspath(os.path.join(os.path.dirname(os.path.realpath(inspect.getfile(self.__class__))),element_conf))
#查找基于.XDS_CLIENT的绝对路径
path_base_abs = os.path.abspath(os.path.join(os.path.expanduser("~/%s"%client_path_name),element_conf))
#modify by hushiling01
#查找config_file所在的路径,前提是config_file已经有初始值
path_config_abs = None
if self.__dict__.has_key("config_file"):
path_config_abs = os.path.join(os.path.dirname(self.config_file),element_conf)
if os.path.exists(path_base_frame):
element_conf = path_base_frame
elif os.path.exists(path_base_obj):
element_conf = path_base_obj
elif os.path.exists(os.path.join(os.path.expanduser("~/%s"%client_path_name),element_conf)):
element_conf = os.path.join(os.path.expanduser("~/%s"%client_path_name),element_conf)
elif os.path.exists(path_config_abs):
element_conf = path_config_abs
else:
raise AssertionError,"Can't find any file for the relative path[%s] in [%s,%s,%s]"%(element_conf,path_base_frame,path_base_obj,path_base_abs)
return element_conf
def init_handlers(self,log):
"""
@note: 初始化一个module的各种handler
@param log: 中心机日志对象,为了把日志打到中心机屏幕和日志中
"""
#根据instance_name决定日志文件名,如果没设置instance_name,表明只搭建了一个该模块,直接去type名
if self.instance_name == None or self.instance_name == "":
self.instance_name = self.type
basepath = os.path.basename(self.host_info["path"])
host_name = self.host_info["host"]
if self.host_info["is_local"] == 0:
client_path = os.path.join(os.path.expanduser('~'),self.host_info["client_path"])
if not os.path.exists(client_path):
os.system('mkdir -p '+client_path)
os.chdir(client_path)
if not self.log:
self.log = XDLog(log, host_name + basepath + self.instance_name, self.instance_name)
if not os.path.exists("./log"):
os.mkdir("./log")
#删除各个模块上次搭建存放的日志信息,使得每次搭建的日志保存本次的,而中心机的日志不进行删除
#中心机日志保存所有搭建情况的日志
if os.path.isfile("./log/" + self.instance_name + ".log"):
os.remove("./log/" + self.instance_name + ".log")
self.log.init_logger("./log/" + self.instance_name + ".log")
#初始化system,用于调用linux命令
self.__set_system()
#初始化download_obj为合适的源
self.__set_download_obj()
#初始化各个element,目前默认只有按规则填写到config文件就被认为是需要注册的
#针对一个module,包含很多不同的elements,不同的element使用不同的下载方法
self.init_all_elements()
self.__set_result_obj()
return 0
def __set_download_obj(self):
"""
@note:init不同的download handler
"""
self.hadoop_download = HadoopDownload(self.log, \
self.host_info, self.type, self.retry_num)
self.std_download = StdDownload(self.log, \
self.host_info, self.type, self.retry_num)
self.scmpf_download = ScmpfDownload(self.log, \
self.host_info, self.type, self.retry_num)
self.hudson_download = HudsonDownload(self.log, \
self.host_info, self.type, self.retry_num)
self.local_download = LocalDownload(self.log, \
self.host_info, self.type, self.retry_num)
self.svn_download = SvnDownload(self.log, \
self.host_info, self.type, self.retry_num)
self.center_download = DataCenterDownload(self.log, \
self.host_info, self.type, self.retry_num)
self.hdfs_download = HDFSDownload(self.log, \
self.host_info, self.type, self.retry_num)
return 0
def __set_result_obj(self):
"""
@note:init result_obj
"""
self.result_obj = Module_Result(type=self.type,path=self.host_info["path"],\
host=self.host_info["host"],user=self.host_info["user"],\
instance=self.instance_name)
return 0
def __set_system(self):
"""
@note: 设置类的system对象替代popen等执行shell命令的操作,并将错误日志打印到中心机屏幕、日志和本地日志中
"""
self.sys = XDSystem(self.log)
return 0
###设置每个模块的element,使得模块具有自描述能力
def parse_config_file(self):
"""
@note: 解析配置文件
@return: element_list,kv_config_file
"""
if self.config_file == None:
return None
kv_config_file = Kvconf(self.config_file)
element_list = []
src_list =[]
for key in kv_config_file.lines:
if key.startswith("#"):
continue
if key.startswith("element_"):
element_list.append(key.replace("element_",""))
if key.startswith("src_"):
src_list.append(key.replace("src_",""))
return element_list, kv_config_file,src_list
def init_all_elements(self):
"""
@note: 初始化该module的所有elements
包括每个element所使用的download对象
注意:element的download对象,以及download字典是在这个时候赋初值
"""
config_result = self.parse_config_file()
if config_result == None:
return 0
element_list = config_result[0]
src_list = config_result[2]
for src in src_list:
src_dict = eval(config_result[1].getvalue("src_" +src))
self.reg_src(src ,src_dict)
for one_element in element_list:
tmp_element_dict = eval(config_result[1].getvalue("element_"+one_element))
if tmp_element_dict.has_key('des_file'):
tmp_element_dict['des_file'] = self.search_abs_path(tmp_element_dict['des_file'])
self.add_element(one_element,tmp_element_dict)
return 0
def reg_src(self,src_name,src_dict):
'''
@note:注册一个下载源
@param src_name:下载源名字
@param src_dict:下载源字典
'''
if not src_name in self.src_list:
self.src_list.append(src_name)
self.src_dict[src_name] = src_dict
def get_src(self,src_name):
'''
@note:获取下载源字典的一个拷贝
@param src_name:下载源名字
'''
if self.src_dict.has_key(src_name):
#暂时使用deepcopy 防止修改造成的异常
return deepcopy(self.src_dict[src_name])
else:
return {}
def add_element(self,element_name,dest_dict):
'''
@note: 增加一个模块元素
@param element_name:元素名
@param dest_dict:元素属性字典
'''
if not element_name in self.element_list:
self.element_list.append(element_name)
tmp_element = Element(name = element_name,file_path = self.config_file)
if dest_dict["src_type"].startswith("hadoop"):
tmp_element.downloadobj = self.hadoop_download
elif dest_dict["src_type"].startswith("std"):
tmp_element.downloadobj = self.std_download
elif dest_dict["src_type"].startswith("scmpf"):
tmp_element.downloadobj = self.scmpf_download
elif dest_dict["src_type"].startswith("hudson"):
tmp_element.downloadobj = self.hudson_download
elif dest_dict["src_type"].startswith("local"):
tmp_element.downloadobj = self.local_download
elif dest_dict["src_type"].startswith("svn"):
tmp_element.downloadobj = self.svn_download
elif dest_dict["src_type"].startswith("center"):
tmp_element.downloadobj = self.center_download
elif dest_dict["src_type"].startswith("hdfs"):
tmp_element.downloadobj = self.hdfs_download
else:
self.log.warning("type %s we do not support" %(dest_dict["src_type"]))
raise ValueError, "Unsupported src_type"
tmp_element.src_dict = self.get_src(dest_dict["src_type"])
tmp_element.dst_dict = dest_dict
self.element_dict[element_name] = tmp_element
def del_element(self,element_name_list):
"""
@note:删除element ,达到不下载该element的目的
@param element_name_list:需要删除的element列表
"""
for element_name in element_name_list:
if self.element_dict.has_key(element_name):
del self.element_dict[element_name]
if element_name in self.element_list:
self.element_list.remove(element_name)
return 0
def check_ip_local_port_range(self):
"""
@note:原因是client占用了server的端口,所以通过这个方法防范
1)读/proc/sys/net/ipv4/ip_local_port_range
2)判断第二列的数字是否大于61000,如果大于的话就报错了
3)返回当前模块所在机器的/proc/sys/net/ipv4/ip_local_port_range 值
"""
ip_local_port_range = self.sys.xd_system("cat /proc/sys/net/ipv4/ip_local_port_range", output = "true")[1]
max_port_kernel = ip_local_port_range.split('\t')[1][:-1]
kernel_ip_local_port_range = ip_local_port_range.splitlines()
if max_port_kernel.isdigit() == False:
self.log.warning("maybe you can not cat /proc,using default staring port for port adaptive")
begin_port = 61100
else:
begin_port = int(max_port_kernel)+100
#if int(max_port_kernel) > 61000:
#self.log.critical("yifeng is tracing")
#raise XDCommonError,"ip_local_port_range is larger than 61000"
#return kernel_ip_local_port_range
return begin_port,65500
###每个模块都必须包含以下方法###
def port_adaptive(self):
'''
@note:使用哨兵算法进行端口自适应
'''
begin_port,end_port = self.check_ip_local_port_range()
#在函数中进行初始化,解决重复调用时端口分配出错的问题
if self.__dict__.get("portalloc",None):
for port_seg in self.port_segs:
self.portalloc.freePortSeg(port_seg)
self.port_segs = []
del self.portalloc
self.port_list = []
self.portalloc = PortAlloc(begin_port, end_port, 10)
port_seg = self.portalloc.allocPortSeg()
self.port_segs.append(port_seg)
while(len(self.port_list)<self.port_num):
try:
port = self.portalloc.allocPort(port_seg)
self.port_list.append(port)
except Exception, e:
self.log.warning('the module are using more than 9 ports')
port_seg = self.portalloc.allocPortSeg()
self.port_segs.append(port_seg)
self.log.info('the module are using ports[%s]',str(self.port_list))
return 0
def get_listen_port(self):
"""
@note:获得模块的监听端口
"""
return self.listen_port
def set_listen_port(self):
"""
@note:设置模块的监听端口,端口自适应会调用此函数设置端口
"""
return 0
def del_relation(self,module):
"""
@note: 参数传递的是已经生成的其他module的实例,该函数是为了删除关联关系使用
作用是让本模块包含有关联关系的模块信息
@param module: 下游模块对象
"""
if module in self.module_rel_set:
self.module_rel_set.remove(module)
def add_relation(self,module):
"""
@note: 参数传递的是已经生成的其他module的实例,该函数是为了建立关联关系使用
作用是让本模块包含有关联关系的模块信息
@param module: 下游模块对象
"""
self.module_rel_set.append(module)
def build_relation(self):
"""
@note: 建立关联关系
"""
dict_set = {}
#for dashboard
self.result_obj.set_module_rel_set(self.module_rel_set)
for module_type in self.all_rel_module:
dict_set[module_type] = []
for module_obj in self.module_rel_set:
module_type = getattr(module_obj,"type")
dict_set[module_type].append(module_obj)
#self.debug("set relation %s\nmodule_rel_set:%s" %(str(dict_set), str(self.module_rel_set)) )
for module_type in dict_set:
if len(dict_set[module_type]) == 0:
continue
if hasattr(self,"set_" + module_type + "_relation"):
getattr(self,"set_" + module_type + "_relation")(dict_set[module_type])
else:
getattr(self,"set_relation")(dict_set[module_type])
return 0
def download(self):
"""
@note: 下载
"""
download_time = Timer2()
download_time.start()
for one_element in self.element_list:
if self.element_dict.has_key(one_element):
self.element_dict[one_element].download()
else:
self.log.warning("element_%s was delete from self.element_dict"%(one_element))
download_time.end()
#单位为秒,收集搭建的信息
self.result_obj.set_download_time(download_time._starttime,download_time._interval)
self.result_obj.element_dict = self.element_dict
return 0
def predownload(self):
"""
@note: 预处理(下载前)
"""
return 0
def preprocess(self):
"""
@note: 预处理(下载后)
"""
return 0
def localize(self):
"""
@note: 本地化
"""
return 0
def postprocess(self):
"""
@note: 后处理(建立连接关系后)
"""
return 0
def start(self):
"""
@note: 启动模块
"""
return 0
def stop(self):
"""
@note: 停止模块
"""
return 0
def restart(self):
"""
@note: 重启模块
"""
return 0
def clean(self):
"""
@note: 清理模块
"""
return 0
def set_bak_dir(self,bakdir = None):
'''
@note 设置模块备份路径,若重写需保证返回的是绝对路径
'''
if bakdir == None:
#默认使用模块自身的bak_dir路径,若不存在则使用当前路径
if self.__dict__.has_key('bak_dir'):
bak_dir = self.bak_dir
else:
bak_dir = './bakup'
else:
bak_dir = bakdir
self.bak_dir = os.path.abspath(bak_dir)
return self.bak_dir
def backup(self,include =[],exclude =[]):
"""
@note:备份模块,包含的路径优先级高于不包含的路径
"""
#获取备份的路径
bak_dir = self.set_bak_dir()
path_pair = (self.host_info["path"],'')
while path_pair[1] == '':
path_pair = os.path.split(path_pair[0])
self.log.info("Start to back up module: %s to %s",self.host_info["path"],bak_dir)
#设置需要包含的路径
includestr = ""
for includelist in include:
includestr += " --include='%s'"%str(os.path.join(path_pair[1],includelist))
excludestr= ''
for blacklist in exclude:
excludestr += " --exclude='%s'"%str(os.path.join(path_pair[1],blacklist))
cmd = "rsync --delete -a %s %s %s %s"%(includestr,excludestr,self.host_info["path"],bak_dir)
self.sys.xd_system(cmd,output = True)
self.log.info("Finished backing up module by cmd: [%s]",cmd)
return 0
def restore(self,isforce = True):
'''
@note:从备份中恢复模块
注意:默认会删除所有的改动,包括日志
当isforce为false时,只恢复备份部分的内容
'''
#获取备份的路径
bak_dir = self.set_bak_dir()
path_pair = (self.host_info["path"],'')
while path_pair[1] == '':
path_pair = os.path.split(path_pair[0])
self.log.info("Start to restore module from: %s to %s",bak_dir,self.host_info["path"])
srcpath = os.path.join(bak_dir,path_pair[1])+'/'
if os.path.exists(srcpath) == False:
self.log.error("Source path do not exist [%s], you need to backup before restore")
return -1
if isforce:
cmd ="rsync --delete -a %s %s"%(os.path.join(bak_dir,path_pair[1])+'/',self.host_info["path"])
else:
cmd ="rsync -a %s %s"%(os.path.join(bak_dir,path_pair[1])+'/',self.host_info["path"])
self.sys.xd_system(cmd,output = True)
self.log.info("Finished restore module by cmd: [%s]"%cmd)
return 0
#这几个方法是有特殊用途的方法,详细请看说明
def retry_func(self,func,retry_num=3):
"""
@author:guoan
@note: 对于一些方法我们希望他们一定要执行成功,这个函数提供retry的功能
@param func:方法名
@param retry_num:重试次数
"""
self.log.debug("we will retry %s times"%(retry_num))
ret = 0
if hasattr(self,func):
tmp_func = getattr(self,func)
else:
self.log.warning("method %s is not exit,please make attention"%(func))
return
for i in range(retry_num):
if tmp_func() == 0:
self.log.debug("exec %s successfuly"%(func))
ret = 0
break
else:
self.log.warning("now we retry %s the %s time"%(func,str(i+1)))
continue
return 0
def load_remote_module(self, rel_path):
"""
@author:liqiuhua
@note: 通过module load对应的client lib
@param rel_path:相对于client path的lib路径
"""
client_path = os.path.join(os.path.expanduser('~'),self.host_info["client_path"])
abs_path = client_path+"/"+rel_path
mname,ext = os.path.splitext(os.path.basename(abs_path))
fp,pathname,desc = imp.find_module(mname,[os.path.dirname(abs_path)])
sys.path.append(os.path.dirname(abs_path))
try:
m = imp.load_module(mname,fp,pathname,desc)
finally:
if fp:
fp.close()
return m
def append_sys_path(self, sys_path):
sys.path.append(sys_path)
|
[
"lisiqi_i@didichuxing.com"
] |
lisiqi_i@didichuxing.com
|
83dd025957c4eaa4b5e35036642dff08662c5871
|
95a7450a1fdfc94cd859a32b19a70443599338a1
|
/src/categorizer.py
|
92ebcb6fbe5668e79e4f208c8a5dc5a6f5b78337
|
[
"MIT"
] |
permissive
|
smile-eh/unnamed-slack-nlp-bot
|
a8a6b3844bed521a58702dfe374aaa84255aa5af
|
1d572a4c7efd53cbb812a08a664f7c79de484bf3
|
refs/heads/master
| 2022-08-06T14:44:01.643942
| 2019-09-26T15:18:42
| 2019-09-26T15:18:42
| 208,820,870
| 0
| 0
|
NOASSERTION
| 2021-03-20T01:46:09
| 2019-09-16T14:29:46
|
Python
|
UTF-8
|
Python
| false
| false
| 2,873
|
py
|
from google.cloud import language
from google.cloud.language import enums
from google.cloud.language import types
from sumy.nlp.stemmers import Stemmer
from sumy.nlp.tokenizers import Tokenizer
from sumy.parsers.html import HtmlParser
from sumy.parsers.plaintext import PlaintextParser
from sumy.summarizers.lsa import LsaSummarizer
from sumy.utils import get_stop_words
import requests
import six
import json
LANGUAGE = "english"
SENTENCES_COUNT = 5
POST_COUNT = 30
def get_summarized_article(url):
"""
From a url, get a summarization of the page/article
Parameters
----------
url : str
the address of the article/text to summarize
Returns
----------
summarized_text : str
the summarized text of the article at the supplied url
"""
summarized_text = ""
try:
if domain_in_blacklist(url) == False:
parser = HtmlParser.from_url(url, Tokenizer(LANGUAGE))
# or for plain text files
# parser = PlaintextParser.from_file("document.txt", Tokenizer(LANGUAGE))
stemmer = Stemmer(LANGUAGE)
summarizer = LsaSummarizer(stemmer)
summarizer.stop_words = get_stop_words(LANGUAGE)
text = ""
for sentence in summarizer(parser.document, SENTENCES_COUNT):
text = text + " " + str(sentence)
summarized_text = text
else:
summarized_text = None
except Exception as ke:
print("**********")
print("Exception in Categorizer!")
print("**********")
print("")
print(ke)
summarized_text = None
return summarized_text
def get_category(text):
"""
From a block of text, get the categorization of that text
Parameters
----------
text : str
the article/text to categorize
Returns
----------
category : str
the category of the text contents
"""
if text:
client = language.LanguageServiceClient()
if isinstance(text, six.binary_type):
text = text.decode("utf-8")
document = types.Document(
content=text.encode("utf-8"), type=enums.Document.Type.PLAIN_TEXT
)
categories = client.classify_text(document).categories
if categories:
return categories[0].name
return None
def domain_in_blacklist(url):
"""
determines if the provided url is in a blacklist
Parameters
----------
url : str
the url to check for blacklisting
"""
blacklist = [
"youtube.com",
"bloomberg.com",
"sec.gov",
"dannymoerkerke.com",
"localizingjapan.com",
"ianix.com",
]
for domain in blacklist:
if domain in url:
return True
return False
|
[
"matthew.sta.clements@gmail.com"
] |
matthew.sta.clements@gmail.com
|
bff3168ed2c553c037a7ed990979ff7f7357784c
|
6cc57da2d24933dde56a5e6320aa9739f24e2faf
|
/bin/up-container
|
dc16b776e32114e34527e3eb2325b2e00923578b
|
[] |
no_license
|
artpaiva/docker
|
970fb448ef3cf64df3da2e4fa4518226cc06673b
|
796cf6a3a901a1de093da5105e00c1e0428096b0
|
refs/heads/master
| 2020-07-27T05:40:11.901231
| 2019-09-09T16:08:05
| 2019-09-09T16:08:05
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 732
|
#!/usr/bin/env python
import sys
import os
help_message = """
UP-CONTAINER
Usage:
{} [container-name] [-d]
Options:
-d Detached mode
"""
if 2 > len(sys.argv):
sys.stderr.write(help_message.format(sys.argv[0]))
sys.stderr.flush()
sys.exit(1)
container_name = sys.argv[1]
detached = '-d' if 3 == len(sys.argv) and '-d' == sys.argv[2] else ''
docker_compose_bin_folder = os.path.dirname(os.path.realpath(__file__))
root_folder = os.path.dirname(docker_compose_bin_folder)
docker_compose_file = os.path.join(root_folder, 'docker-compose.yml')
command = 'docker-compose -f {} up {} {}'
command = command.format(
docker_compose_file,
detached,
container_name
)
os.system(command)
|
[
"reisraff@gmail.com"
] |
reisraff@gmail.com
|
|
d787eb865522e027c866b9cfbf68f7d34216b835
|
11ab6a7c2a12f16746ac23db5aac7ff9892c4bff
|
/application/matches/forms.py
|
c395d6295a90bc5d6bf3ddaed4ba4fc192b5dad3
|
[] |
no_license
|
Granigan/BBMRS
|
6612f85dece7d92b3b168b7358f9c13bd18209a8
|
2f2aadd975c079d4ef8c15cbfd590c2b8cd07c68
|
refs/heads/master
| 2020-03-23T22:28:27.179942
| 2018-08-30T16:26:01
| 2018-08-30T16:26:01
| 142,178,162
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 455
|
py
|
from flask_wtf import FlaskForm
from wtforms import SelectField, RadioField
class MatchForm(FlaskForm):
winner = SelectField('Winner', coerce=int)
loser = SelectField('Loser', coerce=int)
def find_teams(self, teams):
all_teams = []
for team in teams:
all_teams.append((team.id, team.name))
self.winner.choices = all_teams
self.loser.choices = all_teams
class Meta:
csrf = False
|
[
"teromarkustapio@gmail.com"
] |
teromarkustapio@gmail.com
|
ed105e597e49e0a14931fef642d1dc5506b3b365
|
952dc09c3e77016f4991d8b2297de32b3e3b45d8
|
/ops/ccs-ops-misc/synthetic-data/scripts/synthea-manual/generate-characteristics-file.py
|
bb1b925c65e92708a20097e857c53ef1bf39dd0f
|
[
"CC0-1.0",
"LicenseRef-scancode-public-domain"
] |
permissive
|
CMSgov/beneficiary-fhir-data
|
1f3bd5ff9171975bc77e1a4b6971222342bb3bd9
|
0d170907736c5f957b7545ae26b12ba16e0c2550
|
refs/heads/master
| 2023-08-08T12:53:42.348530
| 2023-08-07T15:27:15
| 2023-08-07T15:27:15
| 203,852,942
| 47
| 34
|
NOASSERTION
| 2023-09-14T18:25:26
| 2019-08-22T18:41:16
|
Java
|
UTF-8
|
Python
| false
| false
| 7,685
|
py
|
#
# Script for creating a file that describes which bene ids were generated
# and what claim types were associated with each bene id.
# Will overwrite any characteristics.csv at output location, assuming queries succeed
#
# Args:
# 1: bene id start (inclusive, taken from previous end state properties / synthea properties file)
# 2: bene id end (exclusive, taken from new output end state properties)
# 3: file system location to write the characteristics file
# 4: which environment to check, should be a single value from the list of [test prd-sbx prod]
#
# Example runstring: python3 ./generate-characteristics-file.py -10000008009988 -10000010009985 ~/Documents/Test/ test
#
# Requires psycopg2 and boto3 installed
#
import sys
import psycopg2
import re
import csv
from pathlib import Path
import ssmutil
def generate_characteristics_file(args):
"""
Generates a beneficiary characteristics file for a given
synthea load, and exports it as a csv.
"""
bene_id_start = args[0]
bene_id_end = args[1]
output_path = args[2] if args[2].endswith('/') else args[2] + "/"
env = args[3]
db_string = ""
if "test" == env:
db_string = ssmutil.get_ssm_db_string("test")
elif "prd-sbx" == env:
db_string = ssmutil.get_ssm_db_string("prd-sbx")
elif "prod" == env:
db_string = ssmutil.get_ssm_db_string("prod")
else:
print(f"(Validation Failure) Unknown environment string {env}")
print("Returning with exit code 1")
sys.exit(1)
header = ['Beneficiary Id','MBI Unhashed','Part D Contract Number','Carrier Claims Total','DME Claims Total','HHA Claims Total','Hospice Claims Total','Inpatient Claims Total','Outpatient Claims Total','SNF Claims Total','Part D Events Total']
## get data for csv from db
bene_data = {}
carrier_data = {}
dme_data = {}
hha_data = {}
hospice_data = {}
inpatient_data = {}
outpatient_data = {}
snf_data = {}
pde_data = {}
try:
## bene data, 3 columns: bene id, unhashed mbi, concatenated contract numbers
bene_data = get_bene_data(bene_id_start, bene_id_end, db_string)
carrier_data = get_table_count("carrier_claims", bene_id_start, bene_id_end, db_string)
dme_data = get_table_count("dme_claims", bene_id_start, bene_id_end, db_string)
hha_data = get_table_count("hha_claims", bene_id_start, bene_id_end, db_string)
hospice_data = get_table_count("hospice_claims", bene_id_start, bene_id_end, db_string)
inpatient_data = get_table_count("inpatient_claims", bene_id_start, bene_id_end, db_string)
outpatient_data = get_table_count("outpatient_claims", bene_id_start, bene_id_end, db_string)
snf_data = get_table_count("snf_claims", bene_id_start, bene_id_end, db_string)
pde_data = get_table_count("partd_events", bene_id_start, bene_id_end, db_string)
except BaseException as err:
print(f"Unexpected error while running queries: {err}")
print("Returning with exit code 1")
sys.exit(1)
## synthesize data into final rows
final_data_rows = put_data_into_final_rows(bene_data, carrier_data, dme_data, hha_data, hospice_data, inpatient_data, outpatient_data, snf_data, pde_data)
## Write csv to filesystem + header
filePath = output_path + 'characteristics.csv'
print("Writing final csv...")
try:
with open(filePath, 'w') as f:
writer = csv.writer(f)
writer.writerow(header)
writer.writerows(final_data_rows)
num_rows = len(final_data_rows)
print(f"Wrote out {num_rows} to {filePath}")
except IOError as err:
print(f"IOError while opening/writing csv: {err}")
print("Returning with exit code 1")
sys.exit(1)
except BaseException as err:
print(f"Unexpected error while opening/writing csv: {err}")
print("Returning with exit code 1")
sys.exit(1)
print("Returning with exit code 0 (No errors)")
sys.exit(0)
def get_bene_data(bene_id_start, bene_id_end, db_string):
"""
Gets the initial data from the beneficiary table including the beneficiary id,
mbi, and a concatenated list of contract numbers.
"""
query = f"SELECT bene_id, mbi_num, concat_ws(',', ptd_cntrct_jan_id, ptd_cntrct_feb_id,ptd_cntrct_mar_id,ptd_cntrct_apr_id,ptd_cntrct_may_id,ptd_cntrct_jun_id,"\
f" ptd_cntrct_jul_id, ptd_cntrct_aug_id, ptd_cntrct_sept_id, ptd_cntrct_oct_id, ptd_cntrct_nov_id, ptd_cntrct_dec_id) as \"Part D Contract Number\""\
f" FROM public.beneficiaries WHERE bene_id <= {bene_id_start} and bene_id > {bene_id_end} order by bene_id desc"
print(f"Starting query for bene data...");
raw_query_response = _execute_query(db_string, query)
rows = len(raw_query_response)
print(f"Got {rows} results from bene data query.");
return raw_query_response
def get_table_count(table_name, bene_id_start, bene_id_end, db_string):
"""
Gets the table count for each bene in the specified range for the specified
database, and returns a dictionary with the bene id as the key and the
table count as the value.
"""
query = "SELECT bene_id, count(*)"\
f" FROM public.{table_name}"\
f" WHERE bene_id <= {bene_id_start} and bene_id > {bene_id_end}"\
" GROUP BY bene_id"\
" ORDER BY bene_id desc;"\
print(f"Starting query for {table_name} count...");
raw_query_response = _execute_query(db_string, query)
rows = len(raw_query_response)
print(f"Got {table_name} counts for {rows} benes.");
# put the entries in a dict for faster lookup later
dict_response = {}
for entry in raw_query_response:
dict_response[entry[0]] = entry[1]
return dict_response
def put_data_into_final_rows(bene_data, carrier_data, dme_data, hha_data, hospice_data, inpatient_data, outpatient_data, snf_data, pde_data):
"""
Takes the bene data and table counts and creates a list of rows that will
be used in the final csv characteristics file.
"""
final_rows = []
print("Setting up final data rows...")
for row in bene_data:
bene_id = row[0]
mbi = row[1]
contracts = row[2]
carrier_count = carrier_data[bene_id] if bene_id in carrier_data else 0
dme_count = dme_data[bene_id] if bene_id in dme_data else 0
hha_count = hha_data[bene_id] if bene_id in hha_data else 0
hospice_count = hospice_data[bene_id] if bene_id in hospice_data else 0
inpatient_count = inpatient_data[bene_id] if bene_id in inpatient_data else 0
outpatient_count = outpatient_data[bene_id] if bene_id in outpatient_data else 0
snf_count = snf_data[bene_id] if bene_id in snf_data else 0
pde_count = pde_data[bene_id] if bene_id in pde_data else 0
final_rows.append([bene_id, mbi, contracts, carrier_count, dme_count, hha_count, hospice_count, inpatient_count, outpatient_count, snf_count, pde_count])
return final_rows
def _execute_query(uri: str, query: str):
"""
Execute a PSQL select statement and return its results.
"""
conn = None
finalResults = []
try:
with psycopg2.connect(uri) as conn:
with conn.cursor() as cursor:
cursor.execute(query)
finalResults = cursor.fetchall()
finally:
conn.close()
return finalResults
## Runs the program via run args when this file is run
if __name__ == "__main__":
generate_characteristics_file(sys.argv[1:])
|
[
"noreply@github.com"
] |
CMSgov.noreply@github.com
|
921d06f5ed37c9e185f1ad7d11f2a4a67123c1df
|
af0cd6278280121578b56aaafefa1197c2cf1832
|
/backend/login/migrations/0001_initial.py
|
05441933c9cb5766e75a1f6d6408193e50c8c2c9
|
[] |
no_license
|
moshfiqrony/login-with-react-redux-django-restapi
|
c00888500c9b2fac40769eb7a04ce3e7e517f14c
|
5248e5b17b341cfdd43d234cacbca06b8b51ac3d
|
refs/heads/master
| 2020-04-22T09:16:31.442571
| 2019-02-12T10:56:57
| 2019-02-12T10:56:57
| 170,266,596
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 553
|
py
|
# Generated by Django 2.1.7 on 2019-02-12 07:40
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='loginModel',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('phone', models.CharField(max_length=20)),
('password', models.CharField(max_length=20)),
],
),
]
|
[
"moshfiqrony@gmail.com"
] |
moshfiqrony@gmail.com
|
5192f40ea59ea9e73fd7ec25c1c410ab6db47993
|
25caffbb4e68704d8640c5178111de77153cb2bc
|
/antonkom.py
|
212167896b5a40e81ee000f85fcea03e17857fac
|
[] |
no_license
|
avivel97/forked
|
454947022d30948be630596ab6bb6ea2d1e71cea
|
1576eda874a87acee9c1e68977bc8006282e2d7c
|
refs/heads/main
| 2023-08-23T11:48:23.652216
| 2021-11-06T09:49:30
| 2021-11-06T09:49:30
| 425,203,835
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 28
|
py
|
print("Hello pull-request")
|
[
"antoncomm@edge1.ru-central1.internal"
] |
antoncomm@edge1.ru-central1.internal
|
08e314bca1a7a58b67078db8b41f7a82071fa70a
|
c8bb5a2134ca0071c0c542df1815b26cd5f848e7
|
/src/16 - Gradient/02-Edge_detection_and_gradiant.py
|
4f869adc2100d64b5b81f402454354c32932ee3d
|
[
"MIT"
] |
permissive
|
hritik5102/Awesome-Computer-Vision-Guide
|
02cd1b97b864e0459f5ed2381a59d5c62d5aa39e
|
005cd96f6d6c7dacdf1b9b5f5bf56cae3d6cea18
|
refs/heads/master
| 2023-02-28T00:57:03.550931
| 2021-01-29T15:33:34
| 2021-01-29T15:33:34
| 336,303,121
| 0
| 0
|
MIT
| 2021-02-05T15:04:30
| 2021-02-05T14:59:28
| null |
UTF-8
|
Python
| false
| false
| 2,759
|
py
|
# plzz refers this
'''
We use the functions: cv.Sobel (src, dst, ddepth, dx, dy, ksize = 3, scale = 1, delta = 0, borderType = cv.BORDER_DEFAULT)
Parameters
src input image.
dst output image of the same size and the same number of channels as src.
ddepth output image depth(see cv.combinations); in the case of 8-bit input images it will result in truncated derivatives.
dx order of the derivative x.
dy order of the derivative y.
ksize size of the extended Sobel kernel; it must be 1, 3, 5, or 7.
scale optional scale factor for the computed derivative values.
delta optional delta value that is added to the results prior to storing them in dst.
borderType pixel extrapolation method(see cv.BorderTypes)
'''
import cv2
import numpy as np
def nothing(x): # callback function which is executed everytime trackbar value changes.
pass
cap = cv2.VideoCapture(0)
cv2.namedWindow('trackbars')
cv2.createTrackbar('lowh','trackbars',0,180,nothing) # 1.tracbar name
cv2.createTrackbar('highh','trackbars',0,180,nothing) # 2 .window name
cv2.createTrackbar('lows','trackbars',0,255,nothing) # 3. default value
cv2.createTrackbar('highs','trackbars',0,255,nothing) # 4 .maximum value
cv2.createTrackbar('lowv','trackbars',0,255,nothing) # 5. callback function
cv2.createTrackbar('highv','trackbars',0,255,nothing)
while True:
_, frame = cap.read()
lowh=cv2.getTrackbarPos('lowh','trackbars')
lows =cv2.getTrackbarPos('lows','trackbars')
lowv = cv2.getTrackbarPos('lowv','trackbars')
highh = cv2.getTrackbarPos('highh','trackbars')
highs = cv2.getTrackbarPos('highs','trackbars')
highv = cv2.getTrackbarPos('highv','trackbars')
hsv = cv2.cvtColor(frame,cv2.COLOR_BGR2HSV)
lower_red = np.array([lowh,lows,lowv])
upper_red = np.array([highh,highs,highv])
mask = cv2.inRange(hsv, lower_red , upper_red)
# higher type of datatype is cv2.CV_64F and simple type of data type is np.uint8 etc
# we cant use simple data type bcoz when you convert data to np.uint8,
#all negative slopes are made zero. In simple words, you miss that edge.hence we are using higher type of data type
laplacian = cv2.Laplacian(mask,cv2.CV_64F)
sobelx = cv2.Sobel(hsv,cv2.CV_64F,1,0,ksize= -1)
sobely = cv2.Sobel(hsv,cv2.CV_64F,0,1,ksize= -1)
#First argument is our input image. Second and third arguments are our minVal and maxVal respectively
edge = cv2.Canny(mask,120,150)
##cv2.imshow('original',frame)
cv2.imshow('laplacian',laplacian)
cv2.imshow('sobelx',sobelx)
cv2.imshow('sobely',sobely)
cv2.imshow('edge',edge)
cv2.imshow('mask',mask)
k = cv2.waitKey(4) & 0xFF
if k == 27:
break
cv2.destroyAllWindows()
cap.release()
|
[
"hritik.jaiswal@somaiya.edu"
] |
hritik.jaiswal@somaiya.edu
|
c98b32d8712470c2607c4269f577f43eac9f1c36
|
067078390e2250174f9cf9ce42da7c3770940ef8
|
/src/models/model_checkin.py
|
df6b54b26258b60a82f21191e25280d5f8982079
|
[
"MIT"
] |
permissive
|
AndreasVikke-School/ComputerScience-Final
|
0444e93fb500efe724f59af82b0ebb6f5f239e41
|
52d09a5876bfde661a00736712db6e3d19be877d
|
refs/heads/master
| 2023-02-10T16:38:13.171988
| 2021-01-15T11:21:25
| 2021-01-15T11:21:25
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,184
|
py
|
# pylint: disable=E0401,R0903,W0221,R0801,C0116
"""
Model file for pynamodb checkin table.
:license: MIT
"""
import os
from pynamodb.attributes import UnicodeAttribute
from pynamodb.models import Model
from src.modules.get_export import get_export
from src.models.index_checkin import ConsultantDateIndex
class CheckInModel(Model):
'''CheckInModel Model Class'''
class Meta:
'''CheckIn Meta Class'''
if 'CheckInTableName' in os.environ:
table_name = os.environ['CheckInTableName']
else:
table_name = get_export('database-CheckInTableName')
region = 'eu-central-1'
host = 'https://dynamodb.eu-central-1.amazonaws.com'
uuid = UnicodeAttribute(hash_key=True)
consultant_uuid = UnicodeAttribute()
date = UnicodeAttribute()
device_id = UnicodeAttribute(null=True)
completed = UnicodeAttribute(null=False)
predictions = UnicodeAttribute()
consultant_uuid_date_index = ConsultantDateIndex()
user_input = UnicodeAttribute(null=True)
def __iter__(self):
for name, attr in self._get_attributes().items():
yield name, attr.serialize(getattr(self, name))
|
[
"andreasvikke@gmail.com"
] |
andreasvikke@gmail.com
|
8ac907af1c525266d746bcad3de36ff5ceb1e8b7
|
c30dba02c52caf035bf1d21611e6b88fc9b29bd9
|
/JD1-2017-HA.py
|
0e3755cd530cfd1f7f97442f96c6b791eb696ddf
|
[] |
no_license
|
gjn1228/Rating
|
0f85c8983e16996210844331494921dc46451a80
|
99f429914271d374b5f47284f84b804ab35d10ae
|
refs/heads/master
| 2020-04-08T21:43:45.019373
| 2018-11-30T02:19:30
| 2018-11-30T02:19:30
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 913
|
py
|
# coding=utf-8 #
# Author GJN #
import xlrd
import pandas as pd
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy import Column, String, Date, Integer, create_engine, ForeignKey, Float
# wb = xlrd.open_workbook(r'Y:\Departments\CSLRM\Level D\FB Odds Locals\JD1\JD1__2018.xlsm')
Base = declarative_base()
# engine = create_engine('mysql+mysqlconnector://gjn:pass@172.18.1.158:3306/betradar')
engine = create_engine('mysql+mysqlconnector://root:password@localhost:3306/jd1')
def get_ha(table):
df = pd.read_sql(table, engine)
dfh = df[df['H/A'] == 'H'].iloc[:, [1, 3, 9, 10]].groupby('team').sum()
dfa = df[df['H/A'] == 'A'].iloc[:, [1, 3, 9, 10]].groupby('team').sum()
dfj = dfh - dfa
row = dfj.shape[0] - 1
s = dfj['sup'].sum() / (2 * row)
ha = dfj['sup'].map(lambda x: (x - s) / (row - 1))
return ha
ha2017 = get_ha('2017')
hajd2 = get_ha('2017jd2')
|
[
"gjn19911228@163.com"
] |
gjn19911228@163.com
|
dedf92cadcc2da9796c6cf61e3ccaf653da7f7cb
|
9d4af8989bc4a9354a4a2e36f14b0e7fed6a31d5
|
/pack/semisupervised/recursive_clustering.py
|
8dfa38447ffb4ca09d2de8629dfacd0e1e679b9d
|
[] |
no_license
|
radevlabs/rdlearn
|
cc5c57cf19c27d559dd4960139659860c39f5985
|
a5d058f60d1f23423fb5b55a98c2732b2fdbd787
|
refs/heads/master
| 2022-10-09T00:54:35.526722
| 2020-06-06T05:08:07
| 2020-06-06T05:08:07
| 269,874,034
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,927
|
py
|
from ..base import SemiSupervisedBase
from sklearn.metrics.pairwise import euclidean_distances
from ..utils import random_per_target
import numpy as np
import warnings
import datetime
import sys
warnings.filterwarnings('ignore')
class RecursiveClustering(SemiSupervisedBase):
"""
Example
>> rc = RecursiveClustering(fp={'target':[], 'centroid':[], 'n':[], 'dt':[]}, th=0., init='random', verbose=True, max_recursive=2000)
>> rc.fit(x=x, y=y)
>>
References
-
"""
def __init__(self, fp, th=0., init='random', verbose=True, max_recursive=2000):
"""
Init class
:param fp: final partisions
:param th: threshold
:param init: initial centroid ('random' or callback)
:param verbose: show the process
:param max_recursive: max recursive
"""
self._th = th
self._fp = fp
if init == 'random':
self._init = random_per_target
else:
self._init = init
self._fp['dt'].append(datetime.datetime.now())
self._verbose = verbose
self._max_recursive = max_recursive
sys.setrecursionlimit(self._max_recursive)
def fit(self, x, y):
"""
Learn data
:param x:
:param y:
:param validation_data:
:return: self
"""
# convert x y to ndarray
x = np.array(x)
y = np.array(y)
# validate the data
x, y = self._validate(x, y)
# find unique target, null or None will be reputed as unlabel data
y_unique = np.unique(y)
y_unique = y_unique[y_unique != None]
# make partitions
partitions = [[] for c in range(y_unique.shape[0])]
# clustering proccess
labels = self._cluster(n_clusters=y_unique.shape[0], x=x, y=y, init_function=self._init)
# agglomerate data to each suit partition
for idx, label in enumerate(labels):
partitions[label].append([idx, y[idx]])
# convert each partition to ndarray
for c in range(y_unique.shape[0]):
partitions[c] = np.array(partitions[c])
# check every partition
for partition in partitions:
# find unique target and n data per target
target = np.unique(partition[:, 1], return_counts=True)
n_per_target = target[1]
target = target[0]
# find null index and delete them
unlabel_idx = np.where(target == None)[0]
target = np.delete(target, unlabel_idx)
n_per_target = np.delete(n_per_target, unlabel_idx)
# find max n data index
highest_target_idx = np.argmax(n_per_target)
# count relative presentage
rps = []
for c in range(target.shape[0]):
if c != highest_target_idx:
rps.append(n_per_target[c] / n_per_target[highest_target_idx])
# get highest relative presentage
try:
highest_rps = np.max(rps)
except:
highest_rps = '-'
if self._verbose:
v = f'recursives : {len(self._fp["dt"])}x | '
v += f'partisions : {len(self._fp["target"])}'
sys.stdout.write(f'\r{v}')
# do recursion if relative presetage > threshold
if target.shape[0] > 1 and highest_rps > self._th:
new_x = x[partition[:, 0].astype(np.int)]
new_y = partition[:, 1]
self._recursiveClass(new_x=new_x, new_y=new_y, fp=self._fp, th=self._th, init=self._init, verbose=self._verbose, max_recursive=self._max_recursive)
else:
target = target[highest_target_idx]
centroid = list(x[partition[:, 0].astype(np.int)].mean(axis=0))
self._fp['target'].append(target)
self._fp['centroid'].append(centroid)
self._fp['n'].append(n_per_target[highest_target_idx])
return self
def _recursiveClass(self, new_x, new_y, fp, th, init, verbose, max_recursive):
RecursiveClustering(th=th, fp=fp, verbose=verbose, init=init, max_recursive=max_recursive).fit(new_x, new_y)
def _cluster(self, n_clusters, x, y, init_function):
pass
def _validate(self, x, y):
unique_x, indices, n_x = np.unique(x, axis=0, return_counts=True, return_index=True)
return x[indices], y[indices]
def getFP(self):
return {'target': np.array(self._fp['target']), 'centroid': np.array(self._fp['centroid']),
'n': np.array(self._fp['n']), 'dt': np.array(self._fp['dt'])}
def predict(self, x):
x = np.array(x)
fp = self.getFP()
distances = euclidean_distances(x, fp['centroid'])
y = []
for d in distances:
y.append(fp['target'][np.argmin(d)])
return np.array(y)
|
[
"rafyakbar@smadia.id"
] |
rafyakbar@smadia.id
|
eea6248afe1f802ba886819f67260d408c32ed07
|
2c8308e6741aff6f8c21819761f6cb2fa0beca81
|
/pybo/migrations/0007_auto_20210203_0438.py
|
7663368a7c07ee863145ab53e92f159c7be33407
|
[] |
no_license
|
dhraudwn/pybo
|
dddf467cfd095424cd9a08530ecc407c39d9a4cd
|
90f032183789f3b77507c8ff14bb229ad7d96ee6
|
refs/heads/master
| 2023-07-13T19:16:01.585105
| 2021-08-21T06:16:49
| 2021-08-21T06:16:49
| 335,436,996
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 809
|
py
|
# Generated by Django 3.1.3 on 2021-02-02 19:38
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('pybo', '0006_comment_answer'),
]
operations = [
migrations.AddField(
model_name='question',
name='voter',
field=models.ManyToManyField(related_name='voter_question', to=settings.AUTH_USER_MODEL),
),
migrations.AlterField(
model_name='question',
name='author',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='author_question', to=settings.AUTH_USER_MODEL),
),
]
|
[
"dhraudwn@naver.com"
] |
dhraudwn@naver.com
|
6cdc5a243ebb2e5363c87017de3e08abeb63034b
|
ffbf7dc1736f785feed8e60716d0199b22f16114
|
/apps/users/models.py
|
7ac09081d82622848c4c28ce392a3240cdeb424b
|
[] |
no_license
|
andrewkharzin/fliss
|
5c554fa290932ee3bb9879700581191dd1d3c158
|
0cf6bc1449895320e1491dede0e5650b0c9b8866
|
refs/heads/main
| 2023-05-13T06:37:56.097495
| 2021-06-04T16:32:40
| 2021-06-04T16:32:40
| 373,489,076
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 653
|
py
|
from django.contrib.auth.models import AbstractBaseUser, PermissionsMixin
from django.db import models
from django.utils import timezone
from django.utils.translation import gettext_lazy as _
from .managers import CustomUserManager
class CustomUser(AbstractBaseUser, PermissionsMixin):
email = models.EmailField(_('email address'), unique=True)
is_staff = models.BooleanField(default=False)
is_active = models.BooleanField(default=True)
date_joined = models.DateTimeField(default=timezone.now)
USERNAME_FIELD = 'email'
REQUIRED_FIELDS = []
objects = CustomUserManager()
def __str__(self):
return self.email
|
[
"andrewkharzin@gmail.com"
] |
andrewkharzin@gmail.com
|
4d0ff729065ab269ef0be6509c7fa34a2d414a91
|
0fccee4c738449f5e0a8f52ea5acabf51db0e910
|
/genfragments/ThirteenTeV/RSGraviton/RSGravToGG_kMpl02_M_760_TuneCUEP8M1_13TeV_pythia8_cfi.py
|
84ae4c4442902c4becf9bf5797358a8aaccdf8cd
|
[] |
no_license
|
cms-sw/genproductions
|
f308ffaf3586c19b29853db40e6d662e937940ff
|
dd3d3a3826343d4f75ec36b4662b6e9ff1f270f4
|
refs/heads/master
| 2023-08-30T17:26:02.581596
| 2023-08-29T14:53:43
| 2023-08-29T14:53:43
| 11,424,867
| 69
| 987
| null | 2023-09-14T12:41:28
| 2013-07-15T14:18:33
|
Python
|
UTF-8
|
Python
| false
| false
| 1,193
|
py
|
import FWCore.ParameterSet.Config as cms
from Configuration.Generator.Pythia8CommonSettings_cfi import *
from Configuration.Generator.Pythia8CUEP8M1Settings_cfi import *
generator = cms.EDFilter("Pythia8GeneratorFilter",
comEnergy = cms.double(13000.0),
crossSection = cms.untracked.double(1.095e-3),
filterEfficiency = cms.untracked.double(1),
maxEventsToPrint = cms.untracked.int32(0),
pythiaHepMCVerbosity = cms.untracked.bool(False),
pythiaPylistVerbosity = cms.untracked.int32(1),
PythiaParameters = cms.PSet(
pythia8CommonSettingsBlock,
pythia8CUEP8M1SettingsBlock,
processParameters = cms.vstring(
'ExtraDimensionsG*:all = on',
'ExtraDimensionsG*:kappaMG = 1.08',
'5100039:m0 = 760',
'5100039:onMode = off',
'5100039:onIfAny = 22',
),
parameterSets = cms.vstring('pythia8CommonSettings',
'pythia8CUEP8M1Settings',
'processParameters',
)
)
)
ProductionFilterSequence = cms.Sequence(generator)
|
[
"sheffield@physics.rutgers.edu"
] |
sheffield@physics.rutgers.edu
|
355631178c6ad2f9ef3de2a48bcc1a8a5dc2e9b6
|
c50e7eb190802d7849c0d0cea02fb4d2f0021777
|
/src/networkcloud/azext_networkcloud/aaz/latest/networkcloud/trunkednetwork/_update.py
|
04291de060939c1bee340fb53c79b39a080844ba
|
[
"LicenseRef-scancode-generic-cla",
"MIT"
] |
permissive
|
Azure/azure-cli-extensions
|
c1615b19930bba7166c282918f166cd40ff6609c
|
b8c2cf97e991adf0c0a207d810316b8f4686dc29
|
refs/heads/main
| 2023-08-24T12:40:15.528432
| 2023-08-24T09:17:25
| 2023-08-24T09:17:25
| 106,580,024
| 336
| 1,226
|
MIT
| 2023-09-14T10:48:57
| 2017-10-11T16:27:31
|
Python
|
UTF-8
|
Python
| false
| false
| 10,691
|
py
|
# --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
#
# Code generated by aaz-dev-tools
# --------------------------------------------------------------------------------------------
# pylint: skip-file
# flake8: noqa
from azure.cli.core.aaz import *
@register_command(
"networkcloud trunkednetwork update",
)
class Update(AAZCommand):
"""Update tags associated with the provided trunked network.
:example: Update tags for trunked network
az networkcloud trunkednetwork update --resource-group "resourceGroupName" --name "trunkedNetworkName" --tags key1="myvalue1" key2="myvalue2"
"""
_aaz_info = {
"version": "2023-07-01",
"resources": [
["mgmt-plane", "/subscriptions/{}/resourcegroups/{}/providers/microsoft.networkcloud/trunkednetworks/{}", "2023-07-01"],
]
}
def _handler(self, command_args):
super()._handler(command_args)
self._execute_operations()
return self._output()
_args_schema = None
@classmethod
def _build_arguments_schema(cls, *args, **kwargs):
if cls._args_schema is not None:
return cls._args_schema
cls._args_schema = super()._build_arguments_schema(*args, **kwargs)
# define Arg Group ""
_args_schema = cls._args_schema
_args_schema.resource_group = AAZResourceGroupNameArg(
required=True,
)
_args_schema.trunked_network_name = AAZStrArg(
options=["-n", "--name", "--trunked-network-name"],
help="The name of the trunked network.",
required=True,
id_part="name",
fmt=AAZStrArgFormat(
pattern="^([a-zA-Z0-9][a-zA-Z0-9-_]{0,28}[a-zA-Z0-9])$",
),
)
# define Arg Group "TrunkedNetworkUpdateParameters"
_args_schema = cls._args_schema
_args_schema.tags = AAZDictArg(
options=["--tags"],
arg_group="TrunkedNetworkUpdateParameters",
help="The Azure resource tags that will replace the existing ones.",
)
tags = cls._args_schema.tags
tags.Element = AAZStrArg()
return cls._args_schema
def _execute_operations(self):
self.pre_operations()
self.TrunkedNetworksUpdate(ctx=self.ctx)()
self.post_operations()
@register_callback
def pre_operations(self):
pass
@register_callback
def post_operations(self):
pass
def _output(self, *args, **kwargs):
result = self.deserialize_output(self.ctx.vars.instance, client_flatten=True)
return result
class TrunkedNetworksUpdate(AAZHttpOperation):
CLIENT_TYPE = "MgmtClient"
def __call__(self, *args, **kwargs):
request = self.make_request()
session = self.client.send_request(request=request, stream=False, **kwargs)
if session.http_response.status_code in [200]:
return self.on_200(session)
return self.on_error(session.http_response)
@property
def url(self):
return self.client.format_url(
"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.NetworkCloud/trunkedNetworks/{trunkedNetworkName}",
**self.url_parameters
)
@property
def method(self):
return "PATCH"
@property
def error_format(self):
return "MgmtErrorFormat"
@property
def url_parameters(self):
parameters = {
**self.serialize_url_param(
"resourceGroupName", self.ctx.args.resource_group,
required=True,
),
**self.serialize_url_param(
"subscriptionId", self.ctx.subscription_id,
required=True,
),
**self.serialize_url_param(
"trunkedNetworkName", self.ctx.args.trunked_network_name,
required=True,
),
}
return parameters
@property
def query_parameters(self):
parameters = {
**self.serialize_query_param(
"api-version", "2023-07-01",
required=True,
),
}
return parameters
@property
def header_parameters(self):
parameters = {
**self.serialize_header_param(
"Content-Type", "application/json",
),
**self.serialize_header_param(
"Accept", "application/json",
),
}
return parameters
@property
def content(self):
_content_value, _builder = self.new_content_builder(
self.ctx.args,
typ=AAZObjectType,
typ_kwargs={"flags": {"client_flatten": True}}
)
_builder.set_prop("tags", AAZDictType, ".tags")
tags = _builder.get(".tags")
if tags is not None:
tags.set_elements(AAZStrType, ".")
return self.serialize_content(_content_value)
def on_200(self, session):
data = self.deserialize_http_content(session)
self.ctx.set_var(
"instance",
data,
schema_builder=self._build_schema_on_200
)
_schema_on_200 = None
@classmethod
def _build_schema_on_200(cls):
if cls._schema_on_200 is not None:
return cls._schema_on_200
cls._schema_on_200 = AAZObjectType()
_schema_on_200 = cls._schema_on_200
_schema_on_200.extended_location = AAZObjectType(
serialized_name="extendedLocation",
flags={"required": True},
)
_schema_on_200.id = AAZStrType(
flags={"read_only": True},
)
_schema_on_200.location = AAZStrType(
flags={"required": True},
)
_schema_on_200.name = AAZStrType(
flags={"read_only": True},
)
_schema_on_200.properties = AAZObjectType(
flags={"required": True, "client_flatten": True},
)
_schema_on_200.system_data = AAZObjectType(
serialized_name="systemData",
flags={"read_only": True},
)
_schema_on_200.tags = AAZDictType()
_schema_on_200.type = AAZStrType(
flags={"read_only": True},
)
extended_location = cls._schema_on_200.extended_location
extended_location.name = AAZStrType(
flags={"required": True},
)
extended_location.type = AAZStrType(
flags={"required": True},
)
properties = cls._schema_on_200.properties
properties.associated_resource_ids = AAZListType(
serialized_name="associatedResourceIds",
flags={"read_only": True},
)
properties.cluster_id = AAZStrType(
serialized_name="clusterId",
flags={"read_only": True},
)
properties.detailed_status = AAZStrType(
serialized_name="detailedStatus",
flags={"read_only": True},
)
properties.detailed_status_message = AAZStrType(
serialized_name="detailedStatusMessage",
flags={"read_only": True},
)
properties.hybrid_aks_clusters_associated_ids = AAZListType(
serialized_name="hybridAksClustersAssociatedIds",
flags={"read_only": True},
)
properties.hybrid_aks_plugin_type = AAZStrType(
serialized_name="hybridAksPluginType",
)
properties.interface_name = AAZStrType(
serialized_name="interfaceName",
)
properties.isolation_domain_ids = AAZListType(
serialized_name="isolationDomainIds",
flags={"required": True},
)
properties.provisioning_state = AAZStrType(
serialized_name="provisioningState",
flags={"read_only": True},
)
properties.virtual_machines_associated_ids = AAZListType(
serialized_name="virtualMachinesAssociatedIds",
flags={"read_only": True},
)
properties.vlans = AAZListType(
flags={"required": True},
)
associated_resource_ids = cls._schema_on_200.properties.associated_resource_ids
associated_resource_ids.Element = AAZStrType()
hybrid_aks_clusters_associated_ids = cls._schema_on_200.properties.hybrid_aks_clusters_associated_ids
hybrid_aks_clusters_associated_ids.Element = AAZStrType()
isolation_domain_ids = cls._schema_on_200.properties.isolation_domain_ids
isolation_domain_ids.Element = AAZStrType()
virtual_machines_associated_ids = cls._schema_on_200.properties.virtual_machines_associated_ids
virtual_machines_associated_ids.Element = AAZStrType()
vlans = cls._schema_on_200.properties.vlans
vlans.Element = AAZIntType()
system_data = cls._schema_on_200.system_data
system_data.created_at = AAZStrType(
serialized_name="createdAt",
)
system_data.created_by = AAZStrType(
serialized_name="createdBy",
)
system_data.created_by_type = AAZStrType(
serialized_name="createdByType",
)
system_data.last_modified_at = AAZStrType(
serialized_name="lastModifiedAt",
)
system_data.last_modified_by = AAZStrType(
serialized_name="lastModifiedBy",
)
system_data.last_modified_by_type = AAZStrType(
serialized_name="lastModifiedByType",
)
tags = cls._schema_on_200.tags
tags.Element = AAZStrType()
return cls._schema_on_200
class _UpdateHelper:
"""Helper class for Update"""
__all__ = ["Update"]
|
[
"noreply@github.com"
] |
Azure.noreply@github.com
|
5c262f366d8d1e1138abe8febc21ad9caf7c08fa
|
30d9471ab2e2d8999b442d5fc9a6d27ce05defe1
|
/storage/migrations/0005_auto_20151102_1223.py
|
d3ccbe0617173929d347864ec871ff80b8a6716e
|
[] |
no_license
|
pevadi/uva-inform-dashboard
|
1c0b181faa9b09eaea12981a08c6700837587190
|
32da730fce2aa152b7b51eda7c6b73be6bb42387
|
refs/heads/master
| 2021-01-12T13:39:36.919950
| 2017-03-08T15:27:29
| 2017-03-08T15:27:29
| 70,059,907
| 0
| 1
| null | 2016-12-18T13:34:32
| 2016-10-05T12:50:55
|
Python
|
UTF-8
|
Python
| false
| false
| 901
|
py
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('storage', '0004_activity_remotely_stored'),
]
operations = [
migrations.CreateModel(
name='ActivityExtension',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('key', models.URLField(max_length=255)),
('value', models.CharField(max_length=255)),
('location', models.CharField(default=b'R', max_length=2, choices=[(b'R', b'Result extension')])),
],
),
migrations.AddField(
model_name='activity',
name='extensions',
field=models.ManyToManyField(to='storage.ActivityExtension'),
),
]
|
[
"sanderlatour@gmail.com"
] |
sanderlatour@gmail.com
|
2870901c222fef6ad318c224a43ab05c4e22f3a7
|
9dcbe78d35b6b3365fbde13e9e831fa236767d3e
|
/node_modules/libpq/build/config.gypi
|
e26ec9ac5f41a5fc644b1a6269b0666875d1c84d
|
[
"MIT"
] |
permissive
|
zfranklyn/juke-react-router
|
141dc63dfec4e155314612195301c95f5ba7ec12
|
cef37143fc30ee57f253f72380ef5cf8a661482f
|
refs/heads/master
| 2021-01-17T18:00:34.959684
| 2016-10-13T21:44:54
| 2016-10-13T21:44:54
| 70,823,166
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,279
|
gypi
|
# Do not edit. File was generated by node-gyp's "configure" step
{
"target_defaults": {
"cflags": [],
"default_configuration": "Release",
"defines": [],
"include_dirs": [],
"libraries": []
},
"variables": {
"asan": 0,
"debug_devtools": "node",
"force_dynamic_crt": 0,
"host_arch": "x64",
"icu_data_file": "icudt57l.dat",
"icu_data_in": "../../deps/icu-small/source/data/in/icudt57l.dat",
"icu_endianness": "l",
"icu_gyp_path": "tools/icu/icu-generic.gyp",
"icu_locales": "en,root",
"icu_path": "deps/icu-small",
"icu_small": "true",
"icu_ver_major": "57",
"llvm_version": 0,
"node_byteorder": "little",
"node_enable_d8": "false",
"node_enable_v8_vtunejit": "false",
"node_install_npm": "true",
"node_module_version": 48,
"node_no_browser_globals": "false",
"node_prefix": "/",
"node_release_urlbase": "https://nodejs.org/download/release/",
"node_shared": "false",
"node_shared_cares": "false",
"node_shared_http_parser": "false",
"node_shared_libuv": "false",
"node_shared_openssl": "false",
"node_shared_zlib": "false",
"node_tag": "",
"node_use_bundled_v8": "true",
"node_use_dtrace": "true",
"node_use_etw": "false",
"node_use_lttng": "false",
"node_use_openssl": "true",
"node_use_perfctr": "false",
"node_use_v8_platform": "true",
"openssl_fips": "",
"openssl_no_asm": 0,
"shlib_suffix": "48.dylib",
"target_arch": "x64",
"uv_parent_path": "/deps/uv/",
"uv_use_dtrace": "true",
"v8_enable_gdbjit": 0,
"v8_enable_i18n_support": 1,
"v8_inspector": "true",
"v8_no_strict_aliasing": 1,
"v8_optimized_debug": 0,
"v8_random_seed": 0,
"v8_use_snapshot": "true",
"want_separate_host_toolset": 0,
"xcode_version": "7.0",
"nodedir": "/Users/franklyn/.node-gyp/6.7.0",
"copy_dev_lib": "true",
"standalone_static_library": 1,
"dry_run": "",
"legacy_bundling": "",
"save_dev": "",
"browser": "",
"only": "",
"viewer": "man",
"also": "",
"rollback": "true",
"usage": "",
"globalignorefile": "/usr/local/etc/npmignore",
"init_author_url": "",
"maxsockets": "50",
"shell": "/bin/bash",
"parseable": "",
"shrinkwrap": "true",
"init_license": "ISC",
"if_present": "",
"cache_max": "Infinity",
"init_author_email": "",
"sign_git_tag": "",
"cert": "",
"git_tag_version": "true",
"local_address": "",
"long": "",
"fetch_retries": "2",
"npat": "",
"registry": "https://registry.npmjs.org/",
"key": "",
"message": "%s",
"versions": "",
"globalconfig": "/usr/local/etc/npmrc",
"always_auth": "",
"cache_lock_retries": "10",
"global_style": "",
"heading": "npm",
"fetch_retry_mintimeout": "10000",
"proprietary_attribs": "true",
"access": "",
"json": "",
"description": "true",
"engine_strict": "",
"https_proxy": "",
"init_module": "/Users/franklyn/.npm-init.js",
"userconfig": "/Users/franklyn/.npmrc",
"node_version": "6.7.0",
"user": "501",
"editor": "vi",
"save": "",
"tag": "latest",
"global": "",
"progress": "true",
"optional": "true",
"bin_links": "true",
"force": "",
"searchopts": "",
"depth": "Infinity",
"rebuild_bundle": "true",
"searchsort": "name",
"unicode": "true",
"fetch_retry_maxtimeout": "60000",
"ca": "",
"save_prefix": "^",
"strict_ssl": "true",
"tag_version_prefix": "v",
"dev": "",
"fetch_retry_factor": "10",
"group": "20",
"save_exact": "",
"cache_lock_stale": "60000",
"version": "",
"cache_min": "10",
"cache": "/Users/franklyn/.npm",
"searchexclude": "",
"color": "true",
"save_optional": "",
"user_agent": "npm/3.10.3 node/v6.7.0 darwin x64",
"ignore_scripts": "",
"cache_lock_wait": "10000",
"production": "",
"save_bundle": "",
"init_version": "1.0.0",
"umask": "0022",
"git": "git",
"init_author_name": "",
"scope": "",
"onload_script": "",
"tmp": "/var/folders/0n/tb35tkk56mz4z2_xhppdtt200000gn/T",
"unsafe_perm": "true",
"link": "",
"prefix": "/usr/local"
}
}
|
[
"zfranklyn@gmail.com"
] |
zfranklyn@gmail.com
|
a3f98a432b3cdbca139dec9dd11fe38d8329e893
|
d838bed08a00114c92b73982a74d96c15166a49e
|
/docs/data/learn/Algebra/input/arithmetic_code/Factor.py
|
cf9e71bdc09473d7329dafa4ce1b677d51a986c5
|
[] |
no_license
|
offbynull/offbynull.github.io
|
4911f53d77f6c59e7a453ee271b1e04e613862bc
|
754a85f43159738b89dd2bde1ad6ba0d75f34b98
|
refs/heads/master
| 2023-07-04T00:39:50.013571
| 2023-06-17T20:27:05
| 2023-06-17T23:27:00
| 308,482,936
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,814
|
py
|
from __future__ import annotations
from Output import log_indent, log_unindent, log, log_decorator
#MARKDOWN_NAIVE
@log_decorator
def factor_naive(num: int) -> set[int]:
log(f'Factoring {num}...')
log_indent()
factors: set[int] = set()
for factor1 in range(1, num+1):
for factor2 in range(1, num+1):
log(f'Testing if {factor1} and {factor2} are factors...')
if factor1 * factor2 == num:
factors.add(factor1)
factors.add(factor2)
log(f'Yes')
else:
log(f'No')
log_unindent()
log(f'{factors}')
return factors
#MARKDOWN_NAIVE
#MARKDOWN_FAST
@log_decorator
def factor_fast(num: int) -> set[int]:
log(f'Factoring {num}...')
log_indent()
factors: set[int] = set()
for factor1 in range(1, num+1):
log(f'Test if {factor1} is a factor...')
factor2 = num // factor1
remainder = num - (factor1 * factor2)
if remainder == 0:
factors.add(factor1)
factors.add(factor2)
log(f'Yes: ({factor1} and {factor2} are factors)')
else:
log(f'No')
log_unindent()
log(f'{factors}')
return factors
#MARKDOWN_FAST
#MARKDOWN_FASTEST
@log_decorator
def factor_fastest(num: int) -> set[int]:
log(f'Factoring {num}...')
log_indent()
factors: set[int] = set()
for factor1 in range(1, num+1):
log(f'Test if {factor1} is a factor...')
factor2 = num // factor1
remainder = num - (factor1 * factor2)
if remainder == 0:
factors.add(factor1)
factors.add(factor2)
log(f'Yes: ({factor1} and {factor2} are factors)')
else:
log(f'No')
if factor2 <= factor1:
break
log_unindent()
log(f'{factors}')
return factors
#MARKDOWN_FASTEST
#MARKDOWN_PRIMETEST
@log_decorator
def is_prime(num: int) -> bool:
log(f'Test if {num} is prime...')
log_indent()
num_factors = factor_fastest(num)
# At a minimum, all counting numbers have the factors 1 and the number itself (2 factors). If
# there are more factore than that, it's a composite. Otherwise, it's a primse.
log_unindent()
if len(num_factors) == 2:
log(f'{num}\'s factors are {num_factors} -- it is a prime')
return True
else:
log(f'{num}\'s factors are {num_factors} -- it is a composite')
return False
#MARKDOWN_PRIMETEST
#MARKDOWN_FACTORTREE
@log_decorator
def factor_tree(num: int) -> FactorTreeNode:
log(f'Creating factor tree for {num}...')
factors = factor_fastest(num)
# remove factor pairs that can't used in factor true: (1, num) or (num, 1)
factors = set([f for f in factors if f != 1 and f != num])
ret = FactorTreeNode()
if len(factors) == 0:
ret.value = num
log(f'Cannot factor {num} is prime -- resulting tree: {ret}')
else:
factor1 = next(iter(factors))
factor2 = num // factor1
ret.value = num
ret.left = factor_tree(factor1)
ret.right = factor_tree(factor2)
log(f'Factored {num} to {factor1} and {factor2} -- resulting tree: {ret}')
return ret
#MARKDOWN_FACTORTREE
class FactorTreeNode:
value: int
left: FactorTreeNode | None
right: FactorTreeNode | None
def __init__(self):
self.left = None
self.right = None
def get_prime_factors(self, output_list: list[int] = None) -> list[int]:
if output_list is None:
output_list = []
if self.left is None and self.right is None:
if self.value != 1: # REMEMBER: 1 is not a prime number
output_list.append(self.value)
if self.left is not None:
self.left.get_prime_factors(output_list)
if self.right is not None:
self.right.get_prime_factors(output_list)
return output_list
def __str__(self):
ret = str(self.value)
if self.left is not None and self.right is not None:
ret += '('
if self.left is not None:
ret += str(self.left)
ret += ','
if self.right is not None:
ret += str(self.right)
ret += ')'
return ret
#MARKDOWN_LADDER
@log_decorator
def ladder(num: int) -> list[int]:
prime_factors: list[int] = []
log(f'Testing primes (using ladder method) to see which is factor of {num}...')
log_indent()
while not is_prime(num):
prime_to_test = 2
while True:
log(f'Testing if {prime_to_test} is divisible by {num}...')
new_num = num // prime_to_test
remainder = num - (new_num * prime_to_test)
if remainder == 0:
break
prime_to_test = calculate_next_prime(prime_to_test)
log(f'Found! {prime_to_test} is a prime factor -- {new_num} * {prime_to_test} = {num}')
prime_factors.append(prime_to_test)
num = new_num
log(f'Testing primes to see which is factor of {num}...')
log(f'{num} itself is a prime!')
prime_factors.append(num)
log_unindent()
log(f'Prime factors: {prime_factors}')
return prime_factors
#MARKDOWN_LADDER
def calculate_next_prime(last_prime: int) -> int:
next_possible_prime = last_prime + 1
while True:
if is_prime(next_possible_prime):
return next_possible_prime
else:
next_possible_prime += 1
if __name__ == '__main__':
# factors = factor_naive(int(24))
# factors = factor_fast(int(24))
# factors = factor_fastest(int(24))
# print(f'{factors}')
# print(f'{prime_test(int(49))}')
tree = factor_tree(24)
print(f'{tree}')
# print(f'{ladder(int(24))}')
|
[
"offbynull@gmail.com"
] |
offbynull@gmail.com
|
4fbfaff03a0cb8220c9044fc80f60040a941005c
|
199eab10e95d3ac98505e1633b55d23a48ac7f7d
|
/feaas/runners/__init__.py
|
0ae19706ac78f27bbbf84e3668bc38423a4a2fcd
|
[
"BSD-3-Clause"
] |
permissive
|
tsuru/varnishapi
|
b2fd3fb544bb5953829fcf4ac92c08db10f5f08a
|
d63a8c8c5f9c837855509fc5af59d8213c1c91d6
|
refs/heads/master
| 2021-05-21T11:54:06.779056
| 2015-03-12T14:19:53
| 2015-03-12T14:19:53
| 8,635,749
| 3
| 5
|
BSD-3-Clause
| 2021-02-08T20:17:15
| 2013-03-07T20:20:28
|
Python
|
UTF-8
|
Python
| false
| false
| 739
|
py
|
# Copyright 2014 varnishapi authors. All rights reserved.
# Use of this source code is governed by a BSD-style
# license that can be found in the LICENSE file.
import time
from feaas import storage
class Base(object):
def __init__(self, manager, interval, *locks):
self.manager = manager
self.storage = manager.storage
self.interval = interval
def init_locker(self, *lock_names):
self.locker = storage.MultiLocker(self.storage)
for lock_name in lock_names:
self.locker.init(lock_name)
def loop(self):
self.running = True
while self.running:
self.run()
time.sleep(self.interval)
def stop(self):
self.running = False
|
[
"fss@corp.globo.com"
] |
fss@corp.globo.com
|
e1b76576199454d255edba31a8b8ddf421be22ef
|
f20579a647f3a0308cd03f449d771fabf233c6a3
|
/tests/scratch.py
|
e5d61ce831001edfd9044b714bcb8e4cf7c92d90
|
[] |
no_license
|
jonathanchancey/wunbot
|
b4179bbf7a13627f4d9a531bb7c3374123a7ab49
|
ed417dbe0ab6ba86d8102464f38c1aecdeb101f9
|
refs/heads/master
| 2023-08-17T10:23:41.368160
| 2021-08-06T17:34:01
| 2021-08-06T17:34:01
| 105,359,149
| 0
| 1
| null | 2021-08-06T17:34:02
| 2017-09-30T09:29:38
|
Python
|
UTF-8
|
Python
| false
| false
| 4,770
|
py
|
# @bot.event
# async def discord.on_command(left : int, right : int):
# """Adds two numbers together."""
# await bot.say(left + right)
# @bot.command(pass_context=True)
# async def and(ctex):
# if "dance" in ctx.message.content:
# await ctx.send('Your message is {} characters long.'.format(ctx.message.content))
# @bot.group()
# async def get(ctx):
# if ctx.invoked_subcommand is None:
# await ctx.send('Invalid dance command passed...')
# @git.command()
# async def push(ctx, remote: str, branch: str):
# await ctx.send('Pushing to {} {}'.format(remote, branch))
# @bot.event
# async def on_message(message):
# await my_background_task()
# if message.content.startswith('!dayn'):
# await bot.send_message(message.channel, day_of_year)
# elif message.content.startswith('!potd'):
# try:
# await bot.send_file(message.channel, "pictures\\" + filenames[day_of_year])
# except:
# await bot.send_message(message.channel, "File for day "+ str(day_of_year) + " not found")
# elif message.content.startswith('!strike'):
# temp_strike_msg = "Strike given to " + message.content[8:]
# await bot.send_message(message.channel, temp_strike_msg) #, tts=True
# # await bot.send_message(message.channel, message.mentions[0])
# conn = sqlite3.connect(strikeDB)
# c = conn.cursor()
# # Creating a new SQLite table with 1 column
# c.execute('CREATE TABLE {tn} ({nf} {ft},strikes integer)'\
# .format(tn=strikeTable, nf="user", ft="TEXT"))
# conn.commit()
# conn.close()
# temp_list = []
# with open('strikes.csv', newline='') as csvfile:
# spamreader = csv.reader(csvfile, delimiter=' ', quotechar='|')
# for row in spamreader:
# print(' '.join(row))
# temp_list.extend(spamreader)
# with open('strikes.csv', 'w+', newline='') as csvfile:
# spamwriter = csv.writer(csvfile, delimiter=' ',
# quotechar='|', quoting=csv.QUOTE_MINIMAL)
# for line, row in enumerate(temp_list):
# data = message.mentions[0].get(line, row)
# spamwriter.writerow(data)
# TODO add launching functionality back
# TODO add restart and stop functionality
# TODO add user friendly way to add and remove programs
# TODO add command to display currently running programs
# TODO add python image generation and send images with information about currently running programs
# with open('strikes.csv', 'rb') as infile, open('strikes.csv.new', 'wb') as outfile:
# # with open('strikes.csv','w+',newline='\n') as csvDataFile:
# # csvReader = csv.reader(csvDataFile)
# # csvWriter = csv.writer(csvDataFile)
# writer = csv.writer(outfile)
# print("writer init")
# for row in csv.writer(infile):
# if row[0] == message.mentions[0]:
# print("if")
# # print(message.mentions[0] + " has " + row[1] + " strikes.")
# # writer.writerow([message.mentions[0]], 1)
# else:
# print("else")
# ## newstrike = row[1]+1
# # writer.writerow([message.mentions[0]], 1)
# os.rename('strikes.csv.new','strikes.csv')
# elif message.content.startswith('!launch Sev'): # terrible code
# await bot.send_message(message.channel, "opening " + "Sev")
# with open('files.csv') as csvDataFile:
# csvReader = csv.reader(csvDataFile)
# for row in csvReader:
# if row[0] == "Sev":
# print("launching " + row[1])
# #open_file(row[1])
# #joins the working directory and the filename
# abs_file_path_row = os.path.join(abs_file_path,row[1])
# open_file(abs_file_path_row)
# elif message.content.startswith('!launch sev'):# terrible code, I'm sorry albert einstein
# await bot.send_message(message.channel, "opening " + "Sev")
# with open('files.csv') as csvDataFile:
# csvReader = csv.reader(csvDataFile)
# for row in csvReader:
# if row[0] == "Sev":
# print("launching " + row[1])
# #open_file(row[1])
# #joins the working directory and the filename
# abs_file_path_row = os.path.join(abs_file_path,row[1])
# open_file(abs_file_path_row)
|
[
"jonathan22711@gmail.com"
] |
jonathan22711@gmail.com
|
9fcdf522466f2ce8b3618bd7bc929acfeab4f0d0
|
129f6db8cbc1134b96e09ff0ee3b2e35e5d34705
|
/190905/solvingclub_0906_rhombus.py
|
adcde3b1cf3fd140c22a9940ab4aee2ef18e7762
|
[] |
no_license
|
ghleokim/algorithm
|
0dee76e1896dfe0c7a55248a4c2d6da8f09f371c
|
47f4d9b7e836babfa12d53b645f15d9038589ebe
|
refs/heads/master
| 2021-07-01T21:03:58.891015
| 2020-11-30T03:56:51
| 2020-11-30T03:56:51
| 200,062,743
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,083
|
py
|
# N이 홀수일 때, 마름모 그리기
# 1: 1
# 3:
# 0 1 0
# 1 1 1
# 0 1 0
# 5:
# 0 0 1 0 0
# 0 1 1 1 0
# 1 1 1 1 1
# 0 1 1 1 0
# 0 0 1 0 0
a = [[0,0,1,0,0],[0,1,1,1,0],[1,1,1,1,1]]
N = 5
mid = N // 2
k = 0
offset = 1
for i in range(N):
print(mid, k, offset)
for j in range(mid-k, mid+k+1):
print(' ', j, end=' ')
print()
if k == mid: offset *= -1
k += offset
# for T in range(int(input())):
# N = int(input())
# board = [[*map(int,[*input()])] for i in range(N)]
# mid, k, offset = N//2, 0, 1
# res = 0
# for i in range(N):
# res += sum(board[i][mid-k:mid+k+1])
# if k == mid: offset *= -1
# k += offset
# print('#',end='');print(T+1,res)
#shorten
for T in range(int(input())):m=int(input())//2;print('#',end='');print(T+1,sum([sum([*map(int,[*input()])][abs(i-m):2*m-abs(i-m)+1])for i in range(m*2+1)]))
for T in range(int(input())):m=int(input())//2;print(f'#{T+1} {sum([sum([*map(int,[*input()])][abs(i-m):2*m-abs(i-m)+1])for i in range(m*2+1)])}')
"""
1
5
14054
44250
02032
51204
52212
"""
|
[
"gh.leokim@gmail.com"
] |
gh.leokim@gmail.com
|
fbdb5dd9319218916d8ec89b907ab6a4c24dfde5
|
0232c8fff943b65cb2f33da7e2743ea2db3b5c1e
|
/gym_square/test/.test_human_input.py
|
4c039508221f1987c659d7e50df27438e8fb1c33
|
[
"MIT"
] |
permissive
|
gpldecha/gym-square
|
6112bef27ad2639217300d73dfb22f07b3c532b8
|
10e23d1458f2d8e893514b35e25494ba449f1af3
|
refs/heads/master
| 2021-01-11T21:07:36.256930
| 2018-01-21T13:47:59
| 2018-01-21T13:47:59
| 79,250,471
| 2
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,146
|
py
|
import unittest
import gym
from gym_square.envs.square_env import SquareEnv
from gym_square.envs.square_world.keyboard import Keyboard
from gym_square.envs.square_world.reward import Reward
from time import sleep
import numpy as np
import matplotlib.cm as cmx
class TestLeftRightEnv(unittest.TestCase):
def test_human_square_env(self):
env = SquareEnv()
env.reset()
env.square_world.set_agent_state(55)
cm = cmx.get_cmap('brg')
assert isinstance(env.square_world.reward,Reward)
env.square_world.reward.set_color_map(cm)
keyboard = Keyboard()
env.render()
for i in range(100):
env.render()
action = keyboard.get_action()
observation, reward, done, info = env.step(action)
print 'i: ', i
print 'act: ', action
print 'obs: ', observation
print 'rew: ', reward
print 'done: ', done
print ' '
if done:
print 'Episode Finished'
break
return True
if __name__ == '__main__':
unittest.main()
|
[
"chambrierg@gmail.com"
] |
chambrierg@gmail.com
|
963eea28b326e12da13f6326ac4ac774fa31e013
|
71796056186081d034eef90d18649a88b2f5821e
|
/app/main/controller/movie.py
|
d9acf42972eb3e0f5b01f8d150d2e95649074d1a
|
[] |
no_license
|
hamza5213/Umot_flask
|
ca434c1182d60bc0e63309807b5f278a6b3850ce
|
9f6774038deb90a930d336955d6aa01f473748d9
|
refs/heads/master
| 2023-04-05T00:14:14.771548
| 2020-06-12T21:05:57
| 2020-06-12T21:05:57
| 271,104,488
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,177
|
py
|
from flask_restplus import Resource, reqparse
from ..service import logging_service
from ..service import movie_service
from ..util.dtos import get_response, MovieDto
_logger = logging_service.get_logger(__name__)
api = MovieDto.api
_movie_search = MovieDto.movie_search
_response = MovieDto.movie_response
@api.route('/search')
class Search(Resource):
@api.doc('Movie Title')
@api.param('query', 'Movie Title')
@api.marshal_with(_response)
def get(self):
try:
parser = reqparse.RequestParser()
parser.add_argument('query', type=str, help='query cannot be null')
args = parser.parse_args()
query = args['query']
if query != None or '':
data = movie_service.search_movie(query)
return get_response(200, data, 'Success', True)
else:
return get_response(300, [], 'query is null', False)
except Exception as e:
_logger.error(e)
return get_response(300, [], str(e), False)
@api.route('/search/all')
class SearchAll(Resource):
@api.doc('Movie Title')
@api.param('query', 'Movie Title')
@api.marshal_with(_response)
def get(self):
try:
parser = reqparse.RequestParser()
parser.add_argument('query', type=str, help='query cannot be null')
args = parser.parse_args()
query = args['query']
if query != None or '':
data = movie_service.search_all(query)
return get_response(200, data, 'Success', True)
else:
return get_response(300, [], 'query is null', False)
except Exception as e:
_logger.error(e)
return get_response(300, [], str(e), False)
@api.route('/sync_es')
class SyncES(Resource):
@api.doc('Sync DataBase with Elastic Search')
@api.marshal_with(_response)
def get(self):
try:
# movie_service.sync_es()
return get_response(200, [], 'Success', True)
except Exception as e:
_logger.error(e)
return get_response(300, [], str(e), False)
|
[
"35306029+hamza5213@users.noreply.github.com"
] |
35306029+hamza5213@users.noreply.github.com
|
f6de17a88c1056a935f3f6e3c4f9e77152fdda5b
|
d0da45b2cac517d74f9a6e5675fbc0b07972a3ef
|
/projects/migrations/0001_initial.py
|
ed75c4fbd8c8dd7990f8003286ece49f4cc0a8b1
|
[] |
no_license
|
rachelistone/portfolio
|
db989547902add1451ef4babe03e6db4116b1db0
|
3fff533b7387985be0a686bbabf82a7db2798177
|
refs/heads/master
| 2023-08-23T07:46:49.114540
| 2021-10-26T11:32:51
| 2021-10-26T11:32:51
| 317,668,588
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 657
|
py
|
# Generated by Django 3.0.2 on 2020-10-18 20:21
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Project',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(max_length=50)),
('picture', models.ImageField(upload_to='')),
('description', models.TextField()),
('link', models.URLField(blank=True)),
],
),
]
|
[
"rs0527677843@gmail.com"
] |
rs0527677843@gmail.com
|
cac81237031b341e3db1171aa4b3b3f3737f066f
|
65ca44041d7925372120cdbcb8e08fa7c507c035
|
/spresso/controller/grant/settings.py
|
edb96ec3a66eac278ad5cb192c2a02cf17d0ebfc
|
[
"MIT"
] |
permissive
|
lujung/python-spresso
|
faef8d092f2bde56ed318d52770d8cf38762ad2d
|
32e0229abe9e18bbbe487184645d66ed1ed95a05
|
refs/heads/master
| 2021-01-21T06:30:45.630091
| 2017-03-01T20:01:26
| 2017-03-01T20:01:26
| 83,245,282
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,016
|
py
|
import warnings
from spresso.model.settings import Container
class Setting(object):
_available_schemes = ["http", "https"]
endpoints = Container()
scheme = "https"
debug = False
def __setattr__(self, key, value):
if key == "scheme":
if value not in self._available_schemes:
raise ValueError(
"'scheme' must be one of '{}'".format(
self._available_schemes
)
)
if value == "http":
warnings.warn(
"\nThe SPRESSO system is running on HTTP, this setting "
"renders the system insecure!\nThis should only be used in "
"development environments.\nIf you are running a production"
" environment, make sure all traffic is transferred over "
"HTTPS!",
RuntimeWarning
)
super(Setting, self).__setattr__(key, value)
|
[
"s4lujung@uni-trier.de"
] |
s4lujung@uni-trier.de
|
e5a064e9f9eaff906c2535f0d47d39d53f67ec5f
|
e054e790e25c17b6d1c9c350c270c2016f7264ff
|
/rolling_dice.py
|
0b6ab0d2c6dc47939362f851b4f414f413f404af
|
[] |
no_license
|
ShakeriaCodes/rolling_dice
|
b4d63c2dc6e3eeb9daae11cb5d6071c5f4437615
|
2903a249353fad7fd00825296fdc5b0f23a82110
|
refs/heads/master
| 2020-06-13T17:49:41.632665
| 2019-07-01T20:35:29
| 2019-07-01T20:35:29
| 194,739,154
| 4
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 283
|
py
|
import random
min = 1
max = 6
roll_again = "yes"
while roll_again == "yes" or roll_again == "y":
print ("Rolling the dices...")
print ("values are...")
print (random.randint(min, max))
print (random.randint(min, max))
roll_again = input("roll the dice again")
|
[
"yogi_keri@icloud.com"
] |
yogi_keri@icloud.com
|
66721be9527fe8bbbd2fc6bf23731c047a3b8eb3
|
8be87da4b33c8ab83099b9689c54395bc0d8b079
|
/analytics/util/historicaldata.py
|
ea866cf6359c58f07edb2fbac6a7d7a804ec916b
|
[] |
no_license
|
ubc-capstone-real-time-energy-display/analytics
|
874dcceb37bdd6b9804be7fca11396f7217a3f8a
|
5068de256805da533907ec05377cebaddcd30f4e
|
refs/heads/master
| 2021-01-10T02:03:49.166550
| 2015-11-26T22:27:53
| 2015-11-26T22:27:53
| 46,546,251
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 933
|
py
|
import util.building
import datetime
def gethistoricaldata(bid, startdate, daysago):
start = startdate - datetime.timedelta(days=daysago)
stop = start + datetime.timedelta(days=1)
return util.building.getdata(bid, start, stop)
def getaverage(bid, startdate, maxtimeframes, timeframe):
timeframesago = 0
datasets = []
for i in xrange(maxtimeframes):
timeframesago += 1
data = gethistoricaldata(bid, startdate, timeframesago * timeframe)
# Extract kwh
data = [x[1] for x in data]
if len(data) == 0:
break
else:
datasets.append(data)
# Make sure dataset lengths are the same
lens = [len(x) for x in datasets]
if lens[1:] != lens[:-1]:
print lens
print 'Unequal data set lengths (daylight savings or missing data)'
return
# Create average
return [sum(x) / len(x) for x in zip(*datasets)]
|
[
"alvin.lao.is@gmail.com"
] |
alvin.lao.is@gmail.com
|
158f79253f33e1c20d6ddb1e70a100196feff123
|
9d278285f2bc899ac93ec887b1c31880ed39bf56
|
/ondoc/account/migrations/0013_auto_20180713_2151.py
|
d0b07037331a843bf3606f0d6a973e5233e3533e
|
[] |
no_license
|
ronit29/docprime
|
945c21f8787387b99e4916cb3ba1618bc2a85034
|
60d4caf6c52a8b70174a1f654bc792d825ba1054
|
refs/heads/master
| 2023-04-01T14:54:10.811765
| 2020-04-07T18:57:34
| 2020-04-07T18:57:34
| 353,953,576
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,845
|
py
|
# Generated by Django 2.0.6 on 2018-07-13 16:21
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('account', '0012_pgtransaction_order_no'),
]
operations = [
migrations.CreateModel(
name='ConsumerRefund',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('created_at', models.DateTimeField(auto_now_add=True)),
('updated_at', models.DateTimeField(auto_now=True)),
('refund_amount', models.DecimalField(decimal_places=2, default=None, max_digits=10)),
('refund_state', models.PositiveSmallIntegerField(choices=[(1, 'Pending'), (2, 'Completed')], default=1)),
('consumer_transaction', models.ForeignKey(on_delete=django.db.models.deletion.DO_NOTHING, to='account.ConsumerTransaction')),
],
options={
'db_table': 'consumer_refund',
},
),
migrations.AddField(
model_name='pgtransaction',
name='amount',
field=models.DecimalField(blank=True, decimal_places=2, max_digits=10, null=True),
),
migrations.AddField(
model_name='consumerrefund',
name='pg_transaction',
field=models.ForeignKey(on_delete=django.db.models.deletion.DO_NOTHING, to='account.PgTransaction'),
),
migrations.AddField(
model_name='consumerrefund',
name='user',
field=models.ForeignKey(on_delete=django.db.models.deletion.DO_NOTHING, to=settings.AUTH_USER_MODEL),
),
]
|
[
"prateekmirdha@policybazaar.com"
] |
prateekmirdha@policybazaar.com
|
1663c8dd0f92be78897e3253c0a7ce701349df77
|
a77ad626481519828a14fd5dae1a32f324fb71db
|
/questions/models.py
|
9b6a49fd7bc5ea015d63088657705db96b2d9b2d
|
[] |
no_license
|
acbelter/ask-acbelter
|
d55d08805c4c3e640a37ef34bc8fb1c128fab3b0
|
37c3c99f8193d80fe57bae671407ebc4b1f8d850
|
refs/heads/master
| 2023-01-23T13:55:42.917522
| 2016-02-27T12:32:15
| 2016-02-27T12:32:15
| 45,356,848
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,273
|
py
|
from django.db import models
# Create your models here.
from django.utils import timezone
from tags.models import Tag
from users.models import Member
class QuestionQuerySet(models.QuerySet):
def new_questions(self):
return self.order_by('creation_date')
def popular_questions(self):
return self.filter(rating__gt=500).order_by('rating').reverse()
def question_by_tag(self, tag_value):
tag = Tag.objects.all().filter(value=tag_value)
return self.filter(tags__in=tag)
def question_by_id(self, question_id):
return self.get(id=question_id)
class AnswerQuerySet(models.QuerySet):
def answers_by_question(self, question):
return self.filter(question=question).order_by('rating').reverse()
def answer_by_id(self, answer_id):
return self.get(id=answer_id)
class Question(models.Model):
title = models.CharField(u'title', max_length=255)
text = models.TextField(u'text')
author = models.ForeignKey(Member)
creation_date = models.DateTimeField(u'creation date', default=timezone.now, blank=True)
tags = models.ManyToManyField(Tag)
rating = models.IntegerField(u'rating', default=0)
answers_count = models.PositiveIntegerField(u'answers count', default=0)
objects = QuestionQuerySet.as_manager()
def __unicode__(self):
return self.title
class Meta:
ordering = ['-creation_date']
class QuestionRating(models.Model):
member = models.ForeignKey(Member)
question = models.ForeignKey(Question)
# rating_delta is 1 or -1
rating_delta = models.SmallIntegerField()
class Answer(models.Model):
text = models.TextField(u'text')
author = models.ForeignKey(Member)
question = models.ForeignKey(Question)
correct_answer = models.BooleanField(u'correct answer', default=False)
creation_date = models.DateTimeField(u'creation date', default=timezone.now, blank=True)
rating = models.IntegerField(u'rating', default=0)
objects = AnswerQuerySet().as_manager()
def __unicode__(self):
return self.text
class AnswerRating(models.Model):
member = models.ForeignKey(Member)
answer = models.ForeignKey(Answer)
# rating_delta is 1 or -1
rating_delta = models.SmallIntegerField()
|
[
"acbelter@gmail.com"
] |
acbelter@gmail.com
|
24cc3135a2c89005b48088cff821f5d992b4454c
|
188b2f0a0a9dbdf6261feb59442c0fe8d03daa6c
|
/manage.py
|
280e4867324fbff512f9374feb9d6b521ac899bf
|
[] |
no_license
|
andre1201/work
|
966eca5901eb22a1d816b9f5bff0c03690c39b93
|
dbf656c612021cc074ef652b28f3a87e9a6481be
|
refs/heads/master
| 2021-01-10T14:30:34.595668
| 2015-11-10T08:34:16
| 2015-11-10T08:34:16
| 43,693,268
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 251
|
py
|
#!/usr/bin/env python
import os
import sys
if __name__ == "__main__":
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "restAuth.settings")
from django.core.management import execute_from_command_line
execute_from_command_line(sys.argv)
|
[
"shukin_am@crvtu.local"
] |
shukin_am@crvtu.local
|
c9939ca4c1e00560dafc428259151becb03674da
|
8c95a8c5f153aed18b849cb9e56d3e3cb089a188
|
/gans/data/base_dataset.py
|
e8b5e9ba461b6a007bb5ec32a71152d8deb45057
|
[
"MIT",
"BSD-3-Clause",
"BSD-2-Clause"
] |
permissive
|
avisekiit/wacv_2019
|
5c86296441f365549c63b6ad3c5700045cb0ce29
|
263f264b3f2bdb0f116ebbb30ec4a805f357b3a6
|
refs/heads/master
| 2020-04-07T15:42:09.535703
| 2019-01-05T12:41:14
| 2019-01-05T12:41:14
| 158,497,302
| 7
| 4
|
MIT
| 2018-12-09T17:16:53
| 2018-11-21T05:48:07
| null |
UTF-8
|
Python
| false
| false
| 3,397
|
py
|
import torch.utils.data as data
from PIL import Image
import torchvision.transforms as transforms
class BaseDataset(data.Dataset):
def __init__(self):
super(BaseDataset, self).__init__()
def name(self):
return 'BaseDataset'
@staticmethod
def modify_commandline_options(parser, is_train):
return parser
def initialize(self, opt):
pass
def __len__(self):
return 0
def get_transform(opt):
transform_list = []
if opt.resize_or_crop == 'resize_and_crop':
osize = [opt.loadSize, opt.loadSize]
transform_list.append(transforms.Resize(osize, Image.BICUBIC))
transform_list.append(transforms.RandomCrop(opt.fineSize))
elif opt.resize_or_crop == 'crop':
transform_list.append(transforms.RandomCrop(opt.fineSize))
elif opt.resize_or_crop == 'scale_width':
transform_list.append(transforms.Lambda(
lambda img: __scale_width(img, opt.fineSize)))
elif opt.resize_or_crop == 'scale_width_and_crop':
transform_list.append(transforms.Lambda(
lambda img: __scale_width(img, opt.loadSize)))
transform_list.append(transforms.RandomCrop(opt.fineSize))
elif opt.resize_or_crop == 'none':
transform_list.append(transforms.Lambda(
lambda img: __adjust(img)))
else:
raise ValueError('--resize_or_crop %s is not a valid option.' % opt.resize_or_crop)
if opt.isTrain and not opt.no_flip:
transform_list.append(transforms.RandomHorizontalFlip())
transform_list += [transforms.ToTensor(),
transforms.Normalize((0.5, 0.5, 0.5),
(0.5, 0.5, 0.5))]
return transforms.Compose(transform_list)
# just modify the width and height to be multiple of 4
def __adjust(img):
ow, oh = img.size
# the size needs to be a multiple of this number,
# because going through generator network may change img size
# and eventually cause size mismatch error
mult = 4
if ow % mult == 0 and oh % mult == 0:
return img
w = (ow - 1) // mult
w = (w + 1) * mult
h = (oh - 1) // mult
h = (h + 1) * mult
if ow != w or oh != h:
__print_size_warning(ow, oh, w, h)
return img.resize((w, h), Image.BICUBIC)
def __scale_width(img, target_width):
ow, oh = img.size
# the size needs to be a multiple of this number,
# because going through generator network may change img size
# and eventually cause size mismatch error
mult = 4
assert target_width % mult == 0, "the target width needs to be multiple of %d." % mult
if (ow == target_width and oh % mult == 0):
return img
w = target_width
target_height = int(target_width * oh / ow)
m = (target_height - 1) // mult
h = (m + 1) * mult
if target_height != h:
__print_size_warning(target_width, target_height, w, h)
return img.resize((w, h), Image.BICUBIC)
def __print_size_warning(ow, oh, w, h):
if not hasattr(__print_size_warning, 'has_printed'):
print("The image size needs to be a multiple of 4. "
"The loaded image size was (%d, %d), so it was adjusted to "
"(%d, %d). This adjustment will be done to all images "
"whose sizes are not multiples of 4" % (ow, oh, w, h))
__print_size_warning.has_printed = True
|
[
"charared@cisco.com"
] |
charared@cisco.com
|
efee7d448eea985b0e0ec8897540d3d67ee9753f
|
fe04f9320876df80b38a5c6dc4fcfedcfe11bc95
|
/ChartterBot.py
|
f880fb824db9e963648c6eed232a173bbe836969
|
[] |
no_license
|
princeadegoke/Heroku-Bot
|
e3b58a09df2a38726dac7828d9a979522bbb76a7
|
ac8430ceac1697779b0ba0036e28385a2aee9e3d
|
refs/heads/master
| 2021-05-05T21:54:21.409957
| 2018-01-06T13:50:38
| 2018-01-06T13:50:38
| 115,973,258
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 926
|
py
|
# Dependencies
import tweepy
import time
# Twitter API Keys
consumer_key = "md27jI2cdRGQ5QJrC9GrZnjfj"
consumer_secret = "dp2ujQmPbGKDJO1UTx3S3kMdApXWz91XDMaLL1Ti92HygMrJVg"
access_token = "943270787640852485-AMbIDMXo65N5tVrEPs5TJvVlU9c2faJ"
access_token_secret = "lFoISe9o4VujzhvqWosuzWCS1uK2Ax7AeinI5r5mDsYG9"
# Create quotes to tweet
quote_list = []
# Create function for tweeting
def QuoteItUp(quote_num):
counter = 0
# Twitter credentials
auth = tweepy.OAuthHandler(consumer_key, consumer_secret)
auth.set_access_token(access_token, access_token_secret)
api = tweepy.API(auth, parser=tweepy.parsers.JSONParser())
# Tweet a random quote
api.update_status(random.choice(happy_quotes))
# Print success message
print("Tweeted successfully, sir!")
# Set timer to run every minute
while(counter < 15):
HappyItUp()
counter = counter + 1
time.sleep(60)
|
[
"29675051+princeadegoke@users.noreply.github.com"
] |
29675051+princeadegoke@users.noreply.github.com
|
136a83c7414cc8364c3af64bbfbe44998bfd1edc
|
ca7aa979e7059467e158830b76673f5b77a0f5a3
|
/Python_codes/p03951/s825491842.py
|
60c5369e40917f6ab084747ef4ce896e921c692d
|
[] |
no_license
|
Aasthaengg/IBMdataset
|
7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901
|
f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8
|
refs/heads/main
| 2023-04-22T10:22:44.763102
| 2021-05-13T17:27:22
| 2021-05-13T17:27:22
| 367,112,348
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 196
|
py
|
n = int(input())
s = input()
t = input()
for i in range(n):
if s == t:
print(len(s))
exit()
if s[i:] == t[:n-i]:
print(len(s+t[-i:]))
exit()
print(len(s+t))
|
[
"66529651+Aastha2104@users.noreply.github.com"
] |
66529651+Aastha2104@users.noreply.github.com
|
ad91ddf3727cb4bc3d789a237462d85cd8db4915
|
eec4d8f4ded660bc493dc9aa539042eea86186eb
|
/app.py
|
a0cfb7d92e08924a99188609864b3695ccc12bef
|
[] |
no_license
|
Elilora/Lung-Cancer-Classification
|
bdcd02afabad44396fd90a23f1e30c43d84a8978
|
c202fbddc8f7c9d0ea2a4ceb288b71b47056fc34
|
refs/heads/main
| 2023-09-05T18:52:34.861679
| 2021-11-22T18:17:29
| 2021-11-22T18:17:29
| 425,361,383
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,073
|
py
|
import os
from sklearn.preprocessing import StandardScaler
from sklearn.discriminant_analysis import LinearDiscriminantAnalysis as LDA
import numpy as np
import pandas as pd
import streamlit as st
from PIL import Image
import pickle
from skimage.io import imread
import matplotlib.pyplot as plt
from skimage.transform import resize
from sklearn.tree import DecisionTreeClassifier
from sklearn.model_selection import train_test_split
from sklearn.metrics import roc_auc_score, roc_curve, plot_confusion_matrix, classification_report, confusion_matrix, accuracy_score
st.title("Lung Cancer Detection using Image Classification with Machine Learning")
st.text("Upload a Lung CT Scan for image classification as benign, Malignant or Normal ")
Categories = ['Bengin cases','Malignant cases','Normal cases']
for category in Categories:
class_num = Categories.index(category)
model = pickle.load(open('img_model.p','rb'))
uploaded_file = st.file_uploader("Choose a Lung CT scan", type=["jpg", "png", "jpeg"])
def detection(image, model):
image = np.array(image)
img_resize=resize(image,(150,150,3))
l=[img_resize.flatten()]
df=pd.DataFrame(l) #dataframe
x=df.iloc[:,:] #input data
probability=model.predict(x)
#for ind,val in enumerate(Categories):
#print(f'{val} = {probability[0][ind]*100}%')
#print("The predicted image is : "+Categories[model.predict(l)[0]])
#j= Categories[model.predict(l)[0]]
#print(f'Is the image a {Categories[model.predict(l)[0]]} ?(y/n)')
return probability
if uploaded_file is not None:
image = Image.open(uploaded_file)
st.image(image, caption='Uploaded CT Scan.', use_column_width=True)
st.write("")
st.write("Classifying...")
label = detection(image, model)
if label == 0:
st.write("The CT scan is a benign case")
elif label == 1:
st.write("The CT scan is a Malignant case")
else:
st.write("The CT scan is a Normal case")
|
[
"noreply@github.com"
] |
Elilora.noreply@github.com
|
ec494d05608481ab64454ce37641be6ce36ed299
|
41d2ad2dc0297454855dc71af8216a5282523eac
|
/apps/usermgmt/views.py
|
46c77aa6b024767b2d3591002690cd47db363404
|
[] |
no_license
|
shankarnpatro/StockMind
|
7ed4194abf5aa0392c0a714da8093a4afc3096c8
|
cf91510af8b8adbf48b35cd4728779497515ba9f
|
refs/heads/master
| 2020-05-19T19:39:24.346761
| 2019-10-04T12:55:03
| 2019-10-04T12:55:03
| 185,184,130
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,882
|
py
|
# Create your views here.
from django.core.mail import EmailMultiAlternatives
from django.template.loader import render_to_string
from django.utils.http import int_to_base36, base36_to_int
from StockMind import settings
from StockMind.environment_mixin import EnvironmentMixin
from apps.usermgmt.models import User
from apps.usermgmt.tokens import account_activation_token
class EmailVerification(EnvironmentMixin):
def send_email(self, user):
if self.is_production:
curr_user = User.objects.get(id=user.id)
mail_subject = 'Verify your StockMind Account'
message = render_to_string('email_verification_template.html', {
'user': curr_user.first_name,
'domain': settings.CURRENT_DOMAIN,
'uid': int_to_base36(curr_user.id),
'token': account_activation_token.make_token(curr_user),
})
to_email = curr_user.email
print(to_email)
email = EmailMultiAlternatives(mail_subject, message, from_email=settings.DEFAULT_FROM_EMAIL, to=[to_email])
email.attach_alternative(message, "text/html")
email.send()
def activate(self, request, uidb36, token):
try:
# uid = int(uidb64)
uid = base36_to_int(uidb36)
user = User.objects.get(pk=uid)
except(TypeError, ValueError, OverflowError, User.DoesNotExist):
user = None
if user is not None and account_activation_token.check_token(user, token):
user.email_verified = True
user.save()
# return user.email_verified
return True
# return HttpResponse('Thank you for confirming your Email.')
else:
# return user.email_verified
return False
# return HttpResponse('Invalid activation link.')
|
[
"shankarnarayanpatro@gmail.com"
] |
shankarnarayanpatro@gmail.com
|
016512463dfe14d3e287ec6a927c9826b10091a5
|
c8f8de52f642a1813ddff84ee39a11707f7c9026
|
/CDM/CDM/asgi.py
|
dda2d232960d2a8aebe6ee1efc80f837d9c98475
|
[] |
no_license
|
coffeii/medicalData
|
99eb9ecafea53b24c2add1db92764aa8411a2a0c
|
057f041b8bc1252a95cd48ecf2d9c164110544cf
|
refs/heads/master
| 2023-06-12T02:46:33.527821
| 2021-07-01T12:44:22
| 2021-07-01T12:44:22
| 382,025,872
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 383
|
py
|
"""
ASGI config for CDM project.
It exposes the ASGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/3.2/howto/deployment/asgi/
"""
import os
from django.core.asgi import get_asgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'CDM.settings')
application = get_asgi_application()
|
[
"gsu2520@naver.com"
] |
gsu2520@naver.com
|
b7f406f9ccee204d76cfd5b659a16fd8f2baaf81
|
1ff2e1da74ddde3e825fb9b31dec381caf2a3c04
|
/lavalamps.py
|
0535724223e4fccfae0af3cb40787cda589a7e0e
|
[] |
no_license
|
au-ts/bamboo_build_lights
|
d32a83d3405f69933af832b8c6a569e1453cf3c6
|
93836a9b68b400866b89a55cec5cdca72ee89967
|
refs/heads/master
| 2023-04-20T03:13:22.285637
| 2021-03-14T22:56:43
| 2021-03-14T22:56:43
| 369,365,015
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,494
|
py
|
#!/usr/bin/env python3
import ftdi1 as ftd
from time import sleep
import sys
# '1' on the controller is wired
# to FTDI bits 0 (off) and 3 (on)
RED_ON = bytes([1<<3])
RED_OFF = bytes([1<<0])
# '2' on the controller is wired to
# FTDI bits 1 (off) and 4 (on)
GREEN_ON = bytes([1<<4])
GREEN_OFF = bytes([1<<1])
# Bits 2, 5, 6, 7 and 8 are not connected.
# To release the button(s) write all
# zeros to the FTDI
NEUTRAL = bytes([0])
# The lavalamps are controlled with a FTDI device wired across the
# ON and OFF buttons on a wireless remote control.
# To actuate the button, it has to be 'pressed' for at least 200ms
# actuate() 'presses' a button by causing the appropriate FTDI bitbanging output
# to pulse for a short time.
def actuate(context, value):
ftd.write_data(context, value)
sleep(0.3)
ftd.write_data(context, NEUTRAL)
def red(context):
actuate(context, RED_ON)
actuate(context, GREEN_OFF)
def green(context):
# turn on Green light
actuate(context, GREEN_ON)
actuate(context, RED_OFF)
def off(context):
actuate(context, RED_OFF)
actuate(context, GREEN_OFF)
context = ftd.new()
ftd.usb_open(context, 0x0403, 0x6001);
ftd.set_bitmode(context, 0xff, ftd.BITMODE_BITBANG)
# Everything off --- shouldn't be necessary.
ftd.write_data(context, bytes([0x00]))
if len(sys.argv) == 1:
off(context)
elif sys.argv[1] == 'red':
red(context)
elif sys.argv[1] == 'green':
green(context)
else:
print("Usage: lavalamps [red|green]")
|
[
"Peter.Chubb@data61.csiro.au"
] |
Peter.Chubb@data61.csiro.au
|
9e70805f0e5ade7b299089159d229c0eecdc369b
|
10b1f4d80f4453972918f8de42a5b6212047dac9
|
/submissions/exercise3/barrameda/Main.py
|
b10e07cda687df2ae7108cfd7108e33f90ddc34e
|
[
"MIT"
] |
permissive
|
tjmonsi/cmsc129-2016-repo
|
e6a87cab1c6adb093f9b339271cf3e8d2c48726c
|
5a1833265b17f9079d5a256909296d363db9179b
|
refs/heads/master
| 2021-01-22T09:55:37.484477
| 2017-06-01T16:24:19
| 2017-06-01T16:24:19
| 52,767,672
| 0
| 14
| null | 2017-06-01T16:24:20
| 2016-02-29T05:55:01
|
JavaScript
|
UTF-8
|
Python
| false
| false
| 1,018
|
py
|
import LexicalAnalyzer
import SyntaxAnalyzer
code = open('sample1.scb', 'r')
tokens = LexicalAnalyzer.tokenizer(code.read())
print("Syntax Error for Sample 1")
SyntaxAnalyzer.parser(tokens)
print('\n')
#code = open('sample2.scb', 'r')
#tokens = LexicalAnalyzer.tokenizer(code.read())
#print("Syntax Error for Sample 2")
#SyntaxAnalyzer.parser(tokens)
#print('\n')
#code = open('sample3.scb', 'r')
#tokens = LexicalAnalyzer.tokenizer(code.read())
#print("Syntax Error for Sample 3")
#SyntaxAnalyzer.parser(tokens)
#print('\n')
#code = open('sample4.scb', 'r')
#tokens = LexicalAnalyzer.tokenizer(code.read())
#print("Syntax Error for Sample 4")
#SyntaxAnalyzer.parser(tokens)
#print('\n')
#code = open('sample5.scb', 'r')
#tokens = LexicalAnalyzer.tokenizer(code.read())
#print("Syntax Error for Sample 5")
#SyntaxAnalyzer.parser(tokens)
#print('\n')
#code = open('sample6.scb', 'r')
#tokens = LexicalAnalyzer.tokenizer(code.read())
#print("Syntax Error for Sample 6")
#SyntaxAnalyzer.parser(tokens)
#print('\n')
|
[
"barramedasimon321@gmail.com"
] |
barramedasimon321@gmail.com
|
0af840a7f8615b95578cdccbf649c12039fa9780
|
e94363b6dc2d003f19f6c97a1bdc7e47f96aed53
|
/tutorial_bodenseo/adv01_tkinter/1_label.py
|
7fb58cb32a1a7f7a468fe26c0316c81248e29494
|
[] |
no_license
|
danbi2990/python_practice
|
c4f74fbeb9002dbcbc2de65b48cacfb161cf7742
|
15ad87740d3aeb45e45886e2a20aeb64b62df1af
|
refs/heads/master
| 2021-01-11T18:12:21.790000
| 2017-02-07T13:19:26
| 2017-02-07T13:19:26
| 79,514,548
| 0
| 1
| null | 2017-01-20T02:15:02
| 2017-01-20T01:51:12
| null |
UTF-8
|
Python
| false
| false
| 425
|
py
|
import tkinter as tk
counter = 0
def counter_label(label):
def count():
global counter
counter += 1
label.config(text=str(counter))
label.after(1000, count)
count()
root = tk.Tk()
root.title("Counting Seconds")
label = tk.Label(root, fg="green")
label.pack()
counter_label(label)
button = tk.Button(root, text='Stop', width=25, command=root.destroy)
button.pack()
root.mainloop()
|
[
"danbi2990@gmail.com"
] |
danbi2990@gmail.com
|
62674439d3a67850c0d3067970c150a2ec3caae0
|
c6daf22223e685b11be0ac75572b431de380e842
|
/test/testTalib.py
|
255c68d10f1d6856d31edf394b620a772bb72fd9
|
[] |
no_license
|
afcarl/stockNeural
|
b750895d539eebebaf8d4f81ec17753f0d32ee9a
|
dc2ed034a3611e830bc05941eacafc258ba8c480
|
refs/heads/master
| 2020-09-04T02:55:38.281218
| 2016-05-28T02:24:41
| 2016-05-28T02:24:41
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 490
|
py
|
import talib
import pandas as pd
from talib.abstract import *
import numpy as np
real_data = [135.01, 133.0, 134.0, 131.0, 133.0, 131.0]
float_data = [float(x) for x in real_data]
np_float_data = np.array(float_data)
np_out = talib.MA(np_float_data,3)
print np_out
# outputMA = talib.MA(close,timeperiod=3)
# # outputMACD = talib.MACD(close,timeperiod=3)
# outputRSI = talib.RSI(close,timeperiod=3)
# print type(close)
# print outputMA
# # print outputMACD
# print outputRSI
# print close
|
[
"1971990184@qq.com"
] |
1971990184@qq.com
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.