blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 3
288
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
112
| license_type
stringclasses 2
values | repo_name
stringlengths 5
115
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 684
values | visit_date
timestamp[us]date 2015-08-06 10:31:46
2023-09-06 10:44:38
| revision_date
timestamp[us]date 1970-01-01 02:38:32
2037-05-03 13:00:00
| committer_date
timestamp[us]date 1970-01-01 02:38:32
2023-09-06 01:08:06
| github_id
int64 4.92k
681M
⌀ | star_events_count
int64 0
209k
| fork_events_count
int64 0
110k
| gha_license_id
stringclasses 22
values | gha_event_created_at
timestamp[us]date 2012-06-04 01:52:49
2023-09-14 21:59:50
⌀ | gha_created_at
timestamp[us]date 2008-05-22 07:58:19
2023-08-21 12:35:19
⌀ | gha_language
stringclasses 147
values | src_encoding
stringclasses 25
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 128
12.7k
| extension
stringclasses 142
values | content
stringlengths 128
8.19k
| authors
listlengths 1
1
| author_id
stringlengths 1
132
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
07e0a3f5263b42f5cfbf6a40d74adadb61a31aac
|
ca7aa979e7059467e158830b76673f5b77a0f5a3
|
/Python_codes/p03836/s870323568.py
|
ad0d20b04f7d66288bd881e8921f6962e16fadd5
|
[] |
no_license
|
Aasthaengg/IBMdataset
|
7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901
|
f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8
|
refs/heads/main
| 2023-04-22T10:22:44.763102
| 2021-05-13T17:27:22
| 2021-05-13T17:27:22
| 367,112,348
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 516
|
py
|
def main():
sx, sy, tx, ty = map(int, input().split())
x = tx - sx
y = ty - sy
s = ""
for i in range(2):
tmp = ""
if i:
tmp += "D"
tmp += "R"*(x+1)
tmp += "U"*(y+1)
tmp += "L"
else:
tmp += "R"*x
tmp += "U"*y
s += tmp + tmp.translate(str.maketrans({"U": "D",
"D": "U", "L": "R", "R": "L"}))
print(s)
if __name__ == "__main__":
main()
|
[
"66529651+Aastha2104@users.noreply.github.com"
] |
66529651+Aastha2104@users.noreply.github.com
|
92764ee5fa04ada4b84d89a4c40d1645710675ca
|
afc8d5a9b1c2dd476ea59a7211b455732806fdfd
|
/Configurations/WW/FullRunII/Full2017_v9/njets/plot.py
|
b869826b875f6efbb25b48b65c4cd12b7f151232
|
[] |
no_license
|
latinos/PlotsConfigurations
|
6d88a5ad828dde4a7f45c68765081ed182fcda21
|
02417839021e2112e740607b0fb78e09b58c930f
|
refs/heads/master
| 2023-08-18T20:39:31.954943
| 2023-08-18T09:23:34
| 2023-08-18T09:23:34
| 39,819,875
| 10
| 63
| null | 2023-08-10T14:08:04
| 2015-07-28T07:36:50
|
Python
|
UTF-8
|
Python
| false
| false
| 5,738
|
py
|
# plot configuration
nbins = 4
# groupPlot = {}
#
# Groups of samples to improve the plots.
# If not defined, normal plots is used
#
groupPlot['top'] = {
'nameHR' : 'tW and t#bar{t}',
'isSignal' : 0,
'color': 400, # kYellow
'samples' : ['top']
}
groupPlot['WW'] = {
'nameHR' : 'WW',
'isSignal' : 0,
'color': 851, # kAzure -9
'samples' : ['WW_B%d'%i for i in xrange(nbins)]+['ggWW_B%d'%i for i in xrange(nbins)]
}
groupPlot['WW_nonfid'] = {
'nameHR' : 'WW nonfid',
'isSignal' : 0,
'color': 853, # kAzure -9
'samples' : ['WW_nonfid', 'ggWW_nonfid']
}
groupPlot['WWewk'] = {
'nameHR' : 'WWewk',
'isSignal' : 0,
'color': 852, # kAzure -9
'samples' : ['WWewk']
}
groupPlot['Fake'] = {
'nameHR' : 'nonprompt',
'isSignal' : 0,
'color': 921, # kGray + 1
'samples' : ['Fake_me', 'Fake_em']
}
groupPlot['DY'] = {
'nameHR' : "DY",
'isSignal' : 0,
'color': 418, # kGreen+2
'samples' : ['DY']
}
groupPlot['VVV'] = {
'nameHR' : 'VVV',
'isSignal' : 0,
'color': 857, # kAzure -3
'samples' : ['VVV']
}
groupPlot['WZ'] = {
'nameHR' : "WZ",
'isSignal' : 0,
'color' : 617, # kViolet + 1
'samples' : ['WZ']
}
groupPlot['ZZ'] = {
'nameHR' : "ZZ",
'isSignal' : 0,
'color' : 618, # kViolet + 1
'samples' : ['ZZ']
}
groupPlot['Vg'] = {
'nameHR' : "V#gamma",
'isSignal' : 0,
'color' : 811, # kOrange + 10
'samples' : ['Vg']
}
groupPlot['Higgs'] = {
'nameHR' : 'Higgs',
'isSignal' : 0,
'color': 632, # kRed
'samples' : ['Higgs' ]
}
#plot = {}
# keys here must match keys in samples.py
#
plot['DY'] = {
'color': 418, # kGreen+2
'isSignal' : 0,
'isData' : 0,
'scale' : 1.0,
}
plot['Fake_me'] = {
'color': 921, # kGray + 1
'isSignal' : 0,
'isData' : 0,
'scale' : 1.0
}
plot['Fake_em'] = {
'color': 921, # kGray + 1
'isSignal' : 0,
'isData' : 0,
'scale' : 1.0
}
plot['top'] = {
'nameHR' : 'tW and t#bar{t}',
'color': 400, # kYellow
'isSignal' : 0,
'isData' : 0,
'scale' : 1.0,
}
for i in xrange(nbins):
plot['WW_B%d'%i] = {
'color': 851, # kAzure -9
'isSignal' : 0,
'isData' : 0,
'scale' : 1.0 # ele/mu trigger efficiency datadriven
}
plot['ggWW_B%d'%i] = {
'color': 851, # kAzure -9
'isSignal' : 0,
'isData' : 0,
'scale' : 1.0 # ele/mu trigger efficiency datadriven
}
plot['WW_nonfid'] = {
'color': 853, # kAzure -9
'isSignal' : 0,
'isData' : 0,
'scale' : 1.0 # ele/mu trigger efficiency datadriven
}
plot['ggWW_nonfid'] = {
'color': 853, # kAzure -9
'isSignal' : 0,
'isData' : 0,
'scale' : 1.0 # ele/mu trigger efficiency datadriven
}
plot['WWewk'] = {
'color': 851, # kAzure -9
'isSignal' : 0,
'isData' : 0,
'scale' : 1.0 # ele/mu trigger efficiency datadriven
}
plot['Vg'] = {
'color': 859, # kAzure -1
'isSignal' : 0,
'isData' : 0,
'scale' : 1.0
}
plot['WZ'] = {
'color': 858, # kAzure -2
'isSignal' : 0,
'isData' : 0,
'scale' : 1.0
}
plot['ZZ'] = {
'color': 858, # kAzure -2
'isSignal' : 0,
'isData' : 0,
'scale' : 1.0
}
plot['VVV'] = {
'color': 857, # kAzure -3
'isSignal' : 0,
'isData' : 0,
'scale' : 1.0
}
# Higgs
plot['Higgs'] = {
'nameHR' : 'Higgs',
'color': 632, # kRed
'isSignal' : 0,
'isData' : 0,
'scale' : 1 #
}
# data
plot['DATA'] = {
'nameHR' : 'Data',
'color': 1 ,
'isSignal' : 0,
'isData' : 1 ,
'isBlind' : 1
}
# additional options
legend['lumi'] = 'L = 41.5/fb'
legend['sqrt'] = '#sqrt{s} = 13 TeV'
|
[
"saumya.phor4252@gmail.com"
] |
saumya.phor4252@gmail.com
|
018440f84f40bfcb4de4dbd4916932d807d54935
|
2c7f025568bceb560888d26828aef30e5ae23393
|
/src/home-20170804/migrations/0015_auto_20170601_1925.py
|
aac40a81d41a1db348df5d1c203aa4f5437164e5
|
[] |
no_license
|
GustavoCruz12/educacao
|
6271ebc71830ee1964f8311d3ef21ec8abf58e50
|
d0faa633ed1d588d84c74a3e15ccf5fa4dd9839e
|
refs/heads/master
| 2022-12-08T09:34:42.066372
| 2018-08-03T06:38:49
| 2018-08-03T06:38:49
| 143,387,426
| 0
| 0
| null | 2022-12-08T00:01:52
| 2018-08-03T06:31:03
|
Python
|
UTF-8
|
Python
| false
| false
| 493
|
py
|
# -*- coding: utf-8 -*-
# Generated by Django 1.10.6 on 2017-06-01 19:25
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('home', '0014_auto_20170601_1923'),
]
operations = [
migrations.AlterField(
model_name='videoyoutube',
name='slug',
field=models.SlugField(max_length=250, unique=True, verbose_name='URL (slug)'),
),
]
|
[
"gustavocruz201419@gmail.com"
] |
gustavocruz201419@gmail.com
|
7c720b9d543c458945310c10348f41a006a16bdb
|
72900b002fc2c27ea92564da10a87b4c9ab75f63
|
/contracts/tests/test_schedule_metadata.py
|
66fe05df5396217794d2f39d48bc50611e0b4899
|
[
"CC0-1.0",
"LicenseRef-scancode-public-domain"
] |
permissive
|
firefoxxy8/calc
|
f495443d28f7fd71da5e881a6d05d92aad2ba0fd
|
7ceb2fcadd67bc9fbcb64cdaba50dbbdead0e803
|
refs/heads/master
| 2020-03-27T19:19:24.127137
| 2018-08-20T22:23:33
| 2018-08-20T22:23:33
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,230
|
py
|
from django.core.management import call_command
from django.test import SimpleTestCase, TestCase
from django.utils.safestring import SafeText
from contracts.models import ScheduleMetadata
def populate_schedule_metadata():
# This is SUPER weird. Previous TransactionTestCase runs (usually
# the result of a LiveServerTestCase) could have removed the
# schedule metadata populated by our migration, so we'll forcibly
# wipe our schedule metadata and re-run the migration just in case.
ScheduleMetadata.objects.all().delete()
call_command('migrate', 'contracts', '0023_schedulemetadata', '--fake')
call_command('migrate', 'contracts', '0024_populate_schedulemetadata')
call_command('migrate', '--fake')
class InitialScheduleMetadataTests(TestCase):
def test_populate_data_migration_works(self):
populate_schedule_metadata()
env = ScheduleMetadata.objects.get(sin='899')
self.assertEqual(env.schedule, 'Environmental')
self.assertEqual(env.name, 'Legacy Environmental')
self.assertIn('pollution', env.description)
self.assertEqual(list(
sm.schedule
for sm in ScheduleMetadata.objects.all().order_by('schedule')
), [
'AIMS',
'Consolidated',
'Environmental',
'FABS',
'IT Schedule 70',
'Language Services',
'Logistics',
'MOBIS',
'PES',
])
class SimpleTests(SimpleTestCase):
def test_full_name_includes_sin_when_present(self):
sm = ScheduleMetadata(sin='123', name='blarg')
self.assertEqual(sm.full_name, '123 - blarg')
def test_full_name_works_when_sin_is_absent(self):
sm = ScheduleMetadata(name='blarg')
self.assertEqual(sm.full_name, 'blarg')
def test_description_html_works(self):
sm = ScheduleMetadata(description='hello *there*')
self.assertEqual(
sm.description_html,
'<p>hello <em>there</em></p>'
)
self.assertIsInstance(sm.description_html, SafeText)
def test_str_works(self):
sm = ScheduleMetadata(sin='123', name='blarg')
self.assertEqual(str(sm), '123 - blarg')
|
[
"varmaa@gmail.com"
] |
varmaa@gmail.com
|
9ed3621af43e4f23d160e89f19f6d78356630961
|
3940b4a507789e1fbbaffeb200149aee215f655a
|
/lc/145.BinaryTreePostorderTraversal.py
|
cea3a772e720331356728cbbd39b0eca590101ba
|
[] |
no_license
|
akimi-yano/algorithm-practice
|
15f52022ec79542d218c6f901a54396a62080445
|
1abc28919abb55b93d3879860ac9c1297d493d09
|
refs/heads/master
| 2023-06-11T13:17:56.971791
| 2023-06-10T05:17:56
| 2023-06-10T05:17:56
| 239,395,822
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,915
|
py
|
# 145. Binary Tree Postorder Traversal
# Medium
# 2416
# 112
# Add to List
# Share
# Given the root of a binary tree, return the postorder traversal of its nodes' values.
# Example 1:
# Input: root = [1,null,2,3]
# Output: [3,2,1]
# Example 2:
# Input: root = []
# Output: []
# Example 3:
# Input: root = [1]
# Output: [1]
# Example 4:
# Input: root = [1,2]
# Output: [2,1]
# Example 5:
# Input: root = [1,null,2]
# Output: [2,1]
# Constraints:
# The number of the nodes in the tree is in the range [0, 100].
# -100 <= Node.val <= 100
# Follow up:
# Recursive solution is trivial, could you do it iteratively?
# This solution works:
# Definition for a binary tree node.
# class TreeNode:
# def __init__(self, val=0, left=None, right=None):
# self.val = val
# self.left = left
# self.right = right
class Solution:
def postorderTraversal(self, root: TreeNode) -> List[int]:
def helper(cur):
if not cur:
return
nonlocal arr
helper(cur.left)
helper(cur.right)
arr.append(cur.val)
arr = []
helper(root)
return arr
# This solution works - iterative:
# Definition for a binary tree node.
# class TreeNode:
# def __init__(self, val=0, left=None, right=None):
# self.val = val
# self.left = left
# self.right = right
class Solution:
def postorderTraversal(self, root: TreeNode) -> List[int]:
stack = [(root, False)]
ans = []
while stack:
node, traversed = stack.pop()
if not node:
continue
if not traversed:
stack.append((node, True))
stack.append((node.right, False))
stack.append((node.left, False))
else:
ans.append(node.val)
return ans
|
[
"akimi.mimi.yano@gmail.com"
] |
akimi.mimi.yano@gmail.com
|
a490e7f1e6b2c65087dfabc08bfafafeb3f0052c
|
5e9576c368e98927e2965bd2fb23bd35d9993d69
|
/featuretools/primitives/standard/aggregation/time_since_last_false.py
|
da04a8d96f2ab98be1971c71fbac3b5a5bab51cb
|
[
"BSD-3-Clause"
] |
permissive
|
alteryx/featuretools
|
c6e319e063e8e84e7684bf232376f95dc5272160
|
c284c2d27a95b81e0bae913ac90df2b02c8f3b37
|
refs/heads/main
| 2023-08-25T12:21:33.945418
| 2023-08-23T16:30:25
| 2023-08-23T16:30:25
| 102,908,804
| 1,783
| 201
|
BSD-3-Clause
| 2023-09-07T18:53:19
| 2017-09-08T22:15:17
|
Python
|
UTF-8
|
Python
| false
| false
| 2,223
|
py
|
import numpy as np
import pandas as pd
from woodwork.column_schema import ColumnSchema
from woodwork.logical_types import Boolean, BooleanNullable, Datetime, Double
from featuretools.primitives.base import AggregationPrimitive
class TimeSinceLastFalse(AggregationPrimitive):
"""Calculates the time since the last `False` value.
Description:
Using a series of Datetimes and a series of Booleans, find the last
record with a `False` value. Return the seconds elapsed between that record
and the instance's cutoff time. Return nan if no values are `False`.
Examples:
>>> from datetime import datetime
>>> time_since_last_false = TimeSinceLastFalse()
>>> cutoff_time = datetime(2010, 1, 1, 12, 0, 0)
>>> times = [datetime(2010, 1, 1, 11, 45, 0),
... datetime(2010, 1, 1, 11, 55, 15),
... datetime(2010, 1, 1, 11, 57, 30)]
>>> booleans = [True, False, True]
>>> time_since_last_false(times, booleans, time=cutoff_time)
285.0
"""
name = "time_since_last_false"
input_types = [
[
ColumnSchema(logical_type=Datetime, semantic_tags={"time_index"}),
ColumnSchema(logical_type=Boolean),
],
[
ColumnSchema(logical_type=Datetime, semantic_tags={"time_index"}),
ColumnSchema(logical_type=BooleanNullable),
],
]
return_type = ColumnSchema(logical_type=Double, semantic_tags={"numeric"})
uses_calc_time = True
stack_on_self = False
default_value = 0
def get_function(self):
def time_since_last_false(datetime_col, bool_col, time=None):
df = pd.DataFrame(
{
"datetime": datetime_col,
"bool": bool_col,
},
).dropna()
if df.empty:
return np.nan
false_indices = df[~df["bool"]]
if false_indices.empty:
return np.nan
last_false_index = false_indices.index[-1]
time_since = time - datetime_col.loc[last_false_index]
return time_since.total_seconds()
return time_since_last_false
|
[
"noreply@github.com"
] |
alteryx.noreply@github.com
|
d69efc6966a7a0dd85157fa1130e54581402b0c7
|
2feb8dfebbf10ffd03f02bea643195c942ed6739
|
/list_doublesplits.py
|
e6f81a0f131079bc59c5d382b3d93bc6415a1860
|
[] |
no_license
|
Prads16/PythonForEverybodyCourseraSpecialization
|
f961648eb1c92fe7cc41468c278589b151e59cab
|
997056b3de23ed1d01f6bb1c94c6b850c114368e
|
refs/heads/master
| 2021-04-12T05:02:42.912896
| 2018-03-27T18:53:26
| 2018-03-27T18:53:26
| 125,952,601
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 807
|
py
|
#Open the file mbox-short.txt and read it line by line. When you find a line that
#starts with 'From ' like the following line:
#From stephen.marquard@uct.ac.za Sat Jan 5 09:14:16 2008
#You will parse the From line using split() and print out the second word in the
#line (i.e. the entire address of the person who sent the message). Then print
#out a count at the end.
#Hint: make sure not to include the lines that start with 'From:'.
fname = input("Enter file name: ")
#if len(fname) < 1 : fname = "mbox-short.txt"
fh = open(fname)
count = 0
for line in fh:
line = line.rstrip()
if line.startswith('From') and not line.startswith('From:'):
count = count + 1
words = line.split()
print(words[1])
print("There were", count, "lines in the file with From as the first word")
|
[
"pradnya.ambre16@gmail.com"
] |
pradnya.ambre16@gmail.com
|
1b3b643a9de2d0688ccc6fd43534603d811be2eb
|
927b50cdaf1c384c8bbf6f13816d0ba465852fd8
|
/machine_learning_models/ctscan_covid_prediction/cnn.py
|
61c6c1a50ac8a0e672cbe76c993cee29544e1ef9
|
[
"MIT"
] |
permissive
|
jhabarsingh/DOCMED
|
f37d336483cffd874b0a7db43677c08a47bd639c
|
8a831886d3dd415020699491687fb73893e674c5
|
refs/heads/main
| 2023-04-26T06:45:10.409633
| 2021-05-19T14:37:53
| 2021-05-19T14:37:53
| 316,683,855
| 3
| 5
|
MIT
| 2021-02-21T13:32:33
| 2020-11-28T07:51:22
|
JavaScript
|
UTF-8
|
Python
| false
| false
| 2,926
|
py
|
import tensorflow as tf
import keras
import pandas as pd
import numpy as np
import zipfile
import os
from random import randint
from keras.layers import Dense,Flatten,MaxPool2D,Conv2D,Dropout,GlobalAveragePooling2D
from keras.losses import binary_crossentropy,categorical_crossentropy
from keras.models import Model,Sequential
import matplotlib.pyplot as plt
from keras.preprocessing.image import ImageDataGenerator
from keras.preprocessing.image import load_img,img_to_array
classes = ['covid', 'normal', 'others']
train_df = pd.DataFrame(columns=['image','clas'])
val_df = pd.DataFrame(columns=['image','clas'])
for label in classes:
images = f'./mini_natural_images/{label}'
print(images)
for image in os.listdir(images)[:-30]:
train_df = train_df.append({'image':'./mini_natural_images/'+label+'/'+image,'clas':label},ignore_index=True)
for image in os.listdir(images)[-30:]:
val_df = val_df.append({'image':'./mini_natural_images/'+label+'/'+image,'clas':label},ignore_index=True)
print(train_df)
val_df.head()
train_df.shape,val_df.shape
train_datagen = ImageDataGenerator(
rotation_range=40,
width_shift_range=0.2,
height_shift_range=0.2,
shear_range=0.2,
zoom_range=0.2,
horizontal_flip=True,
rescale=1/255,
)
val_datagen = ImageDataGenerator(
rescale=1/255
)
train_generator = train_datagen.flow_from_dataframe(train_df, x_col='image',y_col='clas',classes=classes)
val_generator = val_datagen.flow_from_dataframe(val_df,x_col='image',y_col='clas',classes=classes)
from keras.applications.inception_resnet_v2 import InceptionResNetV2
inceptionresnet = InceptionResNetV2(include_top=False, input_shape=(256,256,3),classes=3)
inceptionresnet.trainable = False
last_layer = inceptionresnet.layers[-1].output
x = GlobalAveragePooling2D()(last_layer)
x = Dense(3,activation='softmax')(x)
model = Model(inceptionresnet.inputs,x)
model.summary()
model.compile(loss='categorical_crossentropy',optimizer=keras.optimizers.RMSprop(learning_rate=0.0001),metrics=['acc'])
history = model.fit(train_generator,epochs=3,validation_data=val_generator)
model.save("pickle.h5")
acc = history.history['acc']
val_acc = history.history['val_acc']
loss = history.history['loss']
val_loss = history.history['val_loss']
epochs = range(len(acc))
plt.plot(epochs, loss, 'r', label='Training loss')
plt.plot(epochs, val_loss, 'r', label='Validation loss')
plt.plot(epochs, acc, 'r', label='Training accuracy')
plt.plot(epochs, val_acc, 'b', label='Validation accuracy')
plt.title('Training and validation accuracy')
plt.legend(loc=0)
plt.figure()
plt.show()
index = randint(1,val_df.shape[0])
image = val_df.iloc[index]
img = load_img(image.image,target_size=(256,256))
plt.imshow(img)
img_tensor = img_to_array(img)
img_tensor = np.expand_dims(img_tensor, axis=0)
img_tensor /= 255.
prediction = model.predict(img_tensor)
classes[np.argmax(prediction)]
|
[
"jhabarsinghbhati23@gmail.com"
] |
jhabarsinghbhati23@gmail.com
|
7d7cada6debb9f178ad60a991eb6e7cb110ccb8e
|
dcda5ba16474dd8ff650e04e7f4a9bf700f6a9ff
|
/shop/admin.py
|
35ea34c67b82499abed52b603a899222fbc1937b
|
[] |
no_license
|
007vict/shopbyexample
|
2084d6e53faafb5c7e856cc8b3a5ff43bc3a82e2
|
bc7dcfe5818499731c3cbf956c9c0b95cf3791da
|
refs/heads/master
| 2022-12-21T13:05:08.425653
| 2019-04-10T10:30:41
| 2019-04-10T10:30:41
| 177,291,341
| 0
| 0
| null | 2022-12-08T04:58:00
| 2019-03-23T13:18:59
|
JavaScript
|
UTF-8
|
Python
| false
| false
| 667
|
py
|
from django.contrib import admin
from .models import Category, Product
from parler.admin import TranslatableAdmin
@admin.register(Category)
class CategoryAdmin(TranslatableAdmin):
list_display = ['name', 'slug']
def get_prepopulated_fields(self, request, obj=None):
return {'slug': ('name',)}
@admin.register(Product)
class ProductAdmin(TranslatableAdmin):
list_display = ['name', 'slug', 'price',
'available', 'created', 'updated']
list_filter = ['available', 'created', 'updated']
list_editable = ['price', 'available']
def get_prepopulated_fields(self, request, obj=None):
return {'slug': ('name',)}
|
[
"super_vg@bk.ru"
] |
super_vg@bk.ru
|
616df77085bb95807dd89413c1aa21dd7e7250a7
|
442ccaa620eb22d51378a45941d021b44d7cde98
|
/src/courses/admin.py
|
daed5cd7fc1160232f05616b0ed9cc628995f6df
|
[] |
no_license
|
achiengcindy/supreme-school
|
43884b3c34fcac51d8f6ab4df38ee923f473fec9
|
c3777cb5f63ec41a167f87f0c7ec6a575e88ff0b
|
refs/heads/master
| 2021-01-16T01:57:01.590225
| 2020-03-26T01:40:14
| 2020-03-26T01:40:14
| 242,935,025
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 587
|
py
|
from django.contrib import admin
from .models import Subject, Course, Module
# Register your models here.
@admin.register(Subject)
class SubjectAdmin(admin.ModelAdmin):
list_display = ['title', 'slug']
prepopulated_fields = {'slug': ('title',)}
class ModuleInline(admin.StackedInline):
model = Module
@admin.register(Course)
class CourseAdmin(admin.ModelAdmin):
list_display = ['title', 'subject', 'created']
list_filter = ['created', 'subject']
search_fields = ['title', 'overview']
prepopulated_fields = {'slug': ('title',)}
inlines = [ModuleInline]
|
[
"achiengcindy36@gmail.com"
] |
achiengcindy36@gmail.com
|
259eb83402332534b5d99c0a6e094279776f1915
|
ac5e52a3fc52dde58d208746cddabef2e378119e
|
/exps-gsn-edf/gsn-edf_ut=3.5_rd=0.65_rw=0.04_rn=4_u=0.075-0.35_p=harmonic-2/sched=RUN_trial=73/params.py
|
885ca21da69a7e1b42cd2a69b712f6a68b7f435e
|
[] |
no_license
|
ricardobtxr/experiment-scripts
|
1e2abfcd94fb0ef5a56c5d7dffddfe814752eef1
|
7bcebff7ac2f2822423f211f1162cd017a18babb
|
refs/heads/master
| 2023-04-09T02:37:41.466794
| 2021-04-25T03:27:16
| 2021-04-25T03:27:16
| 358,926,457
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 255
|
py
|
{'cpus': 4,
'duration': 30,
'final_util': '3.532857',
'max_util': '3.5',
'periods': 'harmonic-2',
'release_master': False,
'res_distr': '0.65',
'res_nmb': '4',
'res_weight': '0.04',
'scheduler': 'GSN-EDF',
'trial': 73,
'utils': 'uni-medium-3'}
|
[
"ricardo.btxr@gmail.com"
] |
ricardo.btxr@gmail.com
|
3f52dd08607f49f62ba2a9fcf0763299051fc089
|
4c7fc810eb442b386969bf345b4dc6ef3152c783
|
/src/transformers/models/pegasus/configuration_pegasus.py
|
ae5f8f007573b77a2c22f667698e25ef5bd39b3e
|
[
"Apache-2.0"
] |
permissive
|
newcodevelop/transformers
|
fbcef5d703b12febf6e76e84e3f0493769fb9d37
|
e8d1bd7427021d2114ec159b2c90c6b1fcddeae7
|
refs/heads/main
| 2023-03-15T11:45:09.906184
| 2022-08-30T07:26:17
| 2022-08-30T07:26:17
| 254,360,734
| 0
| 1
|
Apache-2.0
| 2020-04-09T12:07:09
| 2020-04-09T12:07:08
| null |
UTF-8
|
Python
| false
| false
| 7,868
|
py
|
# coding=utf-8
# Copyright 2021, Google and The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" PEGASUS model configuration"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
logger = logging.get_logger(__name__)
PEGASUS_PRETRAINED_CONFIG_ARCHIVE_MAP = {
"google/pegasus-large": "https://huggingface.co/google/pegasus-large/resolve/main/config.json",
# See all PEGASUS models at https://huggingface.co/models?filter=pegasus
}
class PegasusConfig(PretrainedConfig):
r"""
This is the configuration class to store the configuration of a [`PegasusModel`]. It is used to instantiate an
PEGASUS model according to the specified arguments, defining the model architecture. Instantiating a configuration
with the defaults will yield a similar configuration to that of the PEGASUS
[google/pegasus-large](https://huggingface.co/google/pegasus-large) architecture.
Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
documentation from [`PretrainedConfig`] for more information.
Args:
vocab_size (`int`, *optional*, defaults to 50265):
Vocabulary size of the PEGASUS model. Defines the number of different tokens that can be represented by the
`inputs_ids` passed when calling [`PegasusModel`] or [`TFPegasusModel`].
d_model (`int`, *optional*, defaults to 1024):
Dimensionality of the layers and the pooler layer.
encoder_layers (`int`, *optional*, defaults to 12):
Number of encoder layers.
decoder_layers (`int`, *optional*, defaults to 12):
Number of decoder layers.
encoder_attention_heads (`int`, *optional*, defaults to 16):
Number of attention heads for each attention layer in the Transformer encoder.
decoder_attention_heads (`int`, *optional*, defaults to 16):
Number of attention heads for each attention layer in the Transformer decoder.
decoder_ffn_dim (`int`, *optional*, defaults to 4096):
Dimensionality of the "intermediate" (often named feed-forward) layer in decoder.
encoder_ffn_dim (`int`, *optional*, defaults to 4096):
Dimensionality of the "intermediate" (often named feed-forward) layer in decoder.
activation_function (`str` or `function`, *optional*, defaults to `"gelu"`):
The non-linear activation function (function or string) in the encoder and pooler. If string, `"gelu"`,
`"relu"`, `"silu"` and `"gelu_new"` are supported.
dropout (`float`, *optional*, defaults to 0.1):
The dropout probability for all fully connected layers in the embeddings, encoder, and pooler.
attention_dropout (`float`, *optional*, defaults to 0.0):
The dropout ratio for the attention probabilities.
activation_dropout (`float`, *optional*, defaults to 0.0):
The dropout ratio for activations inside the fully connected layer.
classifier_dropout (`float`, *optional*, defaults to 0.0):
The dropout ratio for classifier.
max_position_embeddings (`int`, *optional*, defaults to 1024):
The maximum sequence length that this model might ever be used with. Typically set this to something large
just in case (e.g., 512 or 1024 or 2048).
init_std (`float`, *optional*, defaults to 0.02):
The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
encoder_layerdrop (`float`, *optional*, defaults to 0.0):
The LayerDrop probability for the encoder. See the [LayerDrop paper](see https://arxiv.org/abs/1909.11556)
for more details.
decoder_layerdrop (`float`, *optional*, defaults to 0.0):
The LayerDrop probability for the decoder. See the [LayerDrop paper](see https://arxiv.org/abs/1909.11556)
for more details.
scale_embedding (`bool`, *optional*, defaults to `False`):
Scale embeddings by diving by sqrt(d_model).
use_cache (`bool`, *optional*, defaults to `True`):
Whether or not the model should return the last key/values attentions (not used by all models)
forced_eos_token_id (`int`, *optional*, defaults to 1):
The id of the token to force as the last generated token when `max_length` is reached. Usually set to
`eos_token_id`.
Example:
```python
>>> from transformers import PegasusModel, PegasusConfig
>>> # Initializing a PEGASUS google/pegasus-large style configuration
>>> configuration = PegasusConfig()
>>> # Initializing a model from the google/pegasus-large style configuration
>>> model = PegasusModel(configuration)
>>> # Accessing the model configuration
>>> configuration = model.config
```"""
model_type = "pegasus"
keys_to_ignore_at_inference = ["past_key_values"]
attribute_map = {"num_attention_heads": "encoder_attention_heads", "hidden_size": "d_model"}
def __init__(
self,
vocab_size=50265,
max_position_embeddings=1024,
encoder_layers=12,
encoder_ffn_dim=4096,
encoder_attention_heads=16,
decoder_layers=12,
decoder_ffn_dim=4096,
decoder_attention_heads=16,
encoder_layerdrop=0.0,
decoder_layerdrop=0.0,
use_cache=True,
is_encoder_decoder=True,
activation_function="gelu",
d_model=1024,
dropout=0.1,
attention_dropout=0.0,
activation_dropout=0.0,
init_std=0.02,
decoder_start_token_id=0,
classifier_dropout=0.0,
scale_embedding=False,
pad_token_id=0,
eos_token_id=1,
forced_eos_token_id=1,
**kwargs
):
self.vocab_size = vocab_size
self.max_position_embeddings = max_position_embeddings
self.d_model = d_model
self.encoder_ffn_dim = encoder_ffn_dim
self.encoder_layers = encoder_layers
self.encoder_attention_heads = encoder_attention_heads
self.decoder_ffn_dim = decoder_ffn_dim
self.decoder_layers = decoder_layers
self.decoder_attention_heads = decoder_attention_heads
self.dropout = dropout
self.attention_dropout = attention_dropout
self.activation_dropout = activation_dropout
self.activation_function = activation_function
self.init_std = init_std
self.encoder_layerdrop = encoder_layerdrop
self.decoder_layerdrop = decoder_layerdrop
self.classifier_dropout = classifier_dropout
self.use_cache = use_cache
self.num_hidden_layers = encoder_layers
self.scale_embedding = scale_embedding # scale factor will be sqrt(d_model) if True
super().__init__(
pad_token_id=pad_token_id,
eos_token_id=eos_token_id,
is_encoder_decoder=is_encoder_decoder,
decoder_start_token_id=decoder_start_token_id,
forced_eos_token_id=forced_eos_token_id,
**kwargs,
)
@property
def num_attention_heads(self) -> int:
return self.encoder_attention_heads
@property
def hidden_size(self) -> int:
return self.d_model
|
[
"noreply@github.com"
] |
newcodevelop.noreply@github.com
|
c7bf6347a9483578c151ba6fdf82003fc374d7ff
|
f3b233e5053e28fa95c549017bd75a30456eb50c
|
/ptp1b_input/L82/82-80_MD_NVT_rerun/set_2.py
|
418a71c6ac0ba7775d409fa90b0c3d5147873a7b
|
[] |
no_license
|
AnguseZhang/Input_TI
|
ddf2ed40ff1c0aa24eea3275b83d4d405b50b820
|
50ada0833890be9e261c967d00948f998313cb60
|
refs/heads/master
| 2021-05-25T15:02:38.858785
| 2020-02-18T16:57:04
| 2020-02-18T16:57:04
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 741
|
py
|
import os
dir = '/mnt/scratch/songlin3/run/ptp1b/L82/MD_NVT_rerun/ti_one-step/82_80/'
filesdir = dir + 'files/'
temp_prodin = filesdir + 'temp_prod_2.in'
temp_pbs = filesdir + 'temp_2.pbs'
lambd = [ 0.00922, 0.04794, 0.11505, 0.20634, 0.31608, 0.43738, 0.56262, 0.68392, 0.79366, 0.88495, 0.95206, 0.99078]
for j in lambd:
os.chdir("%6.5f" %(j))
workdir = dir + "%6.5f" %(j) + '/'
#prodin
prodin = workdir + "%6.5f_prod_2.in" %(j)
os.system("cp %s %s" %(temp_prodin, prodin))
os.system("sed -i 's/XXX/%6.5f/g' %s" %(j, prodin))
#PBS
pbs = workdir + "%6.5f_2.pbs" %(j)
os.system("cp %s %s" %(temp_pbs, pbs))
os.system("sed -i 's/XXX/%6.5f/g' %s" %(j, pbs))
#submit pbs
#os.system("qsub %s" %(pbs))
os.chdir(dir)
|
[
"songlin3@msu.edu"
] |
songlin3@msu.edu
|
4b22873ba2b86ee6f8f353268dab2f9cda56c13c
|
d77c8e7d5ec57940a2e1ee0e9836fb6181b0e051
|
/user.py
|
ff88009a1f188531003aede35af3ba5cdd19b583
|
[] |
no_license
|
ultralegendary/Git-RPG
|
6111ea5bb25ecbd5d86d4a577935e8aa0fc40f15
|
db3caeea635a56303a971a4ee6488de7963a5aa2
|
refs/heads/master
| 2023-03-21T14:15:18.612195
| 2021-03-14T14:06:57
| 2021-03-14T14:06:57
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,458
|
py
|
import sqlite3
import os
"""Db structure:
{
user_name = ...
file_path = ...
level = {
lvl_no: ["init","commit",....],
}
"""
basepath = os.path.join(os.path.dirname(os.path.realpath(__file__)), "user.sqlite3")
class User:
def __init__(self, path=None):
"""Takes a file-path"""
if path is None:
path = basepath
self.db = sqlite3.connect(path)
self.cursor = self.db.cursor()
if (
self.db.execute(
"SELECT name FROM sqlite_master WHERE type='table' AND name='Users';"
).fetchone()
== None
):
self.cursor.execute(
"create table if not exists Users (id integer primary key autoincrement, path text, level int, sublevel int)"
)
self.cursor.execute("insert into Users values (1,NULL,1,1)")
self.db.commit()
def update(self, item: dict):
if len(item) > 1:
safe_text = ", ".join(f"{i} = ?" for i in item.keys())
else:
safe_text = " ".join(f"{i} = ?" for i in item.keys())
res = self.cursor.execute(f"update Users set {safe_text} where id=1", tuple(item.values()))
self.db.commit()
return res
def get(self, item):
res = self.cursor.execute(f"select {item} from Users where id=1").fetchone()
if len(res) > 1:
return res
else:
return res[0]
|
[
"npc203@users.noreply.github.com"
] |
npc203@users.noreply.github.com
|
422b89a625e8ed71bceb6edd8df9b18591547f09
|
e23a4f57ce5474d468258e5e63b9e23fb6011188
|
/125_algorithms/_exercises/templates/_algorithms_challenges/codeabbey/_CodeAbbeyPythonSolutions-master/matching_brackets.py
|
6e1fa02b64da38699b6c5780bcf0007efcd986dd
|
[] |
no_license
|
syurskyi/Python_Topics
|
52851ecce000cb751a3b986408efe32f0b4c0835
|
be331826b490b73f0a176e6abed86ef68ff2dd2b
|
refs/heads/master
| 2023-06-08T19:29:16.214395
| 2023-05-29T17:09:11
| 2023-05-29T17:09:11
| 220,583,118
| 3
| 2
| null | 2023-02-16T03:08:10
| 2019-11-09T02:58:47
|
Python
|
UTF-8
|
Python
| false
| false
| 745
|
py
|
_______ __
amount_values i..(input
results # list
___ is_matching(word
word __.sub _ [^()[\]{}<>]","",word)
open_brackets ["[","(","{","<"]
close_brackets ["]",")","}",">"]
open_brackets_in_word = # list
___ i __ word:
__(i __ open_brackets
open_brackets_in_word.a..(i)
____(i __ close_brackets
__(l..(open_brackets_in_word) __ 0
r.. 0
__(open_brackets.i.. open_brackets_in_word[-1]) !_ close_brackets.i.. i:
r.. 0
____
open_brackets_in_word.p.. )
__(l..(open_brackets_in_word) > 0
r.. 0
r.. 1
___ i __ r..(amount_values
word i.. )
results.a..(is_matching(word
print(*results)
|
[
"sergejyurskyj@yahoo.com"
] |
sergejyurskyj@yahoo.com
|
6f16a1c2b4dac3fc4a91743e308c6b7a3bc0d011
|
c16ea32a4cddb6b63ad3bacce3c6db0259d2bacd
|
/google/cloud/bigquery/storage/v1/bigquery-storage-v1-py/setup.py
|
f20749fb5fe943b8d9cce3d3b649c3f8f26a9e70
|
[
"Apache-2.0"
] |
permissive
|
dizcology/googleapis-gen
|
74a72b655fba2565233e5a289cfaea6dc7b91e1a
|
478f36572d7bcf1dc66038d0e76b9b3fa2abae63
|
refs/heads/master
| 2023-06-04T15:51:18.380826
| 2021-06-16T20:42:38
| 2021-06-16T20:42:38
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,802
|
py
|
# -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import io
import os
import setuptools # type: ignore
version = '0.1.0'
package_root = os.path.abspath(os.path.dirname(__file__))
readme_filename = os.path.join(package_root, 'README.rst')
with io.open(readme_filename, encoding='utf-8') as readme_file:
readme = readme_file.read()
setuptools.setup(
name='google-cloud-bigquery-storage',
version=version,
long_description=readme,
packages=setuptools.PEP420PackageFinder.find(),
namespace_packages=('google', 'google.cloud'),
platforms='Posix; MacOS X; Windows',
include_package_data=True,
install_requires=(
'google-api-core[grpc] >= 1.22.2, < 2.0.0dev',
'libcst >= 0.2.5',
'proto-plus >= 1.15.0',
'packaging >= 14.3', ),
python_requires='>=3.6',
classifiers=[
'Development Status :: 3 - Alpha',
'Intended Audience :: Developers',
'Operating System :: OS Independent',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3.8',
'Topic :: Internet',
'Topic :: Software Development :: Libraries :: Python Modules',
],
zip_safe=False,
)
|
[
"bazel-bot-development[bot]@users.noreply.github.com"
] |
bazel-bot-development[bot]@users.noreply.github.com
|
29fe8a900520b7b684149b1f22f380300671c0f6
|
0fd2b832673946c9ee532686a2a35bf2680f8408
|
/CybORG/CybORG/Shared/Results.py
|
59e18af053e39645c2884ab3d8a515ea1d15a00f
|
[
"MIT"
] |
permissive
|
pvu1984/cage-challenge-2
|
4e57bad7bc30c7df2b90c2fabc8395a5f2a3e65c
|
e76722dcd79a6b7511e185cde34fac1e0b45720e
|
refs/heads/main
| 2023-09-02T15:11:32.072215
| 2021-11-12T02:33:19
| 2021-11-12T02:33:19
| 429,307,660
| 0
| 0
|
MIT
| 2021-11-18T05:27:36
| 2021-11-18T05:27:35
| null |
UTF-8
|
Python
| false
| false
| 2,588
|
py
|
# Copyright DST Group. Licensed under the MIT license.
import pprint
from copy import deepcopy
from CybORG.Shared.Observation import Observation
class Results:
def __init__(self,
observation: dict = None,
done: bool = None,
reward: float = None,
info=None,
parameter_mask=None,
action_space=None,
error: Exception = None,
error_msg: str = None,
next_observation=None,
action=None,
action_name: str = None):
self.observation = observation
self.next_observation = next_observation
self.done = done
self.reward = reward
self.action = action
self.info = info
self.parameter_mask = parameter_mask
self.action_space = action_space
self.error = error
self.error_msg = error_msg
self.action_name = action_name
self.selection_masks = None
def has_error(self):
return self.error is not None
def copy(self):
copy_kwargs = {
"done": self.done,
"reward": self.reward,
"error": deepcopy(self.error),
"error_msg": deepcopy(self.error_msg),
"action": deepcopy(self.action),
"info": deepcopy(self.info),
"action_space": deepcopy(self.action_space)
}
if isinstance(self.observation, Observation):
copy_kwargs["observation"] = self.observation.copy()
else:
copy_kwargs["observation"] = deepcopy(self.observation)
if isinstance(self.next_observation, Observation):
copy_kwargs["next_observation"] = self.next_observation.copy()
else:
copy_kwargs["next_observation"] = deepcopy(self.next_observation)
return Results(**copy_kwargs)
def __str__(self):
output = [f"{self.__class__.__name__}:"]
for attr, v in self.__dict__.items():
if v is None:
continue
if isinstance(v, dict):
v_str = pprint.pformat(v)
else:
v_str = str(v)
output.append(f"{attr}={v_str}")
return "\n".join(output)
def __eq__(self, other):
if not isinstance(other, type(self)):
return False
for k, v in self.__dict__.items():
if k not in other.__dict__:
return False
if v != other.__dict__[k]:
return False
return True
|
[
"david@pop-os.localdomain"
] |
david@pop-os.localdomain
|
94d3ea0be5e02f307d069584e4530c7fdc6abeaa
|
79baf4404e51bdc0f33038b3b16bea86ff09e82f
|
/azext_iot/central/providers/export_provider.py
|
d5199774472543a2fd7b5a3ccb8a34bb9aa676be
|
[
"MIT"
] |
permissive
|
Azure/azure-iot-cli-extension
|
80b6cb29e907f7512c7361a85d6bfdea5ae2dd9e
|
bdbe65c3874ff632c2eba25c762e9ea8e9175b5f
|
refs/heads/dev
| 2023-09-04T10:57:16.118442
| 2023-08-28T17:12:05
| 2023-08-28T17:12:05
| 103,456,760
| 95
| 80
|
NOASSERTION
| 2023-09-13T00:02:54
| 2017-09-13T22:04:36
|
Python
|
UTF-8
|
Python
| false
| false
| 4,040
|
py
|
# coding=utf-8
# --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
from typing import List, Union
from knack.log import get_logger
from azure.cli.core.azclierror import AzureResponseError, ClientRequestError, ResourceNotFoundError
from azext_iot.central.providers.central_provider import CentralProvider
from azext_iot.constants import CENTRAL_ENDPOINT
from azext_iot.central import services as central_services
from azext_iot.central.models.v2022_06_30_preview import ExportPreview
logger = get_logger(__name__)
class CentralExportProvider(CentralProvider):
def __init__(self, cmd, app_id: str, api_version: str, token=None):
super().__init__(cmd, app_id, api_version, token=token)
self._exports = {}
def list_exports(
self, central_dns_suffix=CENTRAL_ENDPOINT
) -> List[Union[dict, ExportPreview]]:
exports = central_services.export.list_exports(
cmd=self._cmd,
app_id=self._app_id,
token=self._token,
central_dns_suffix=central_dns_suffix,
api_version=self._api_version,
)
# add to cache
for export in exports:
self._exports.update({export["id"]: export})
return exports
def add_export(
self, export_id, payload, central_dnx_suffix=CENTRAL_ENDPOINT
) -> Union[dict, ExportPreview]:
if export_id in self._exports:
raise ClientRequestError("Destination already exists")
export = central_services.export.add_export(
self._cmd,
self._app_id,
export_id=export_id,
payload=payload,
token=self._token,
api_version=self._api_version,
central_dns_suffix=central_dnx_suffix,
)
if not export:
raise AzureResponseError("Failed to create export with id: '{}'.".format(export_id))
# add to cache
self._exports[export["id"]] = export
return export
def update_export(
self, export_id, payload, central_dnx_suffix=CENTRAL_ENDPOINT
) -> Union[dict, ExportPreview]:
export = central_services.export.update_export(
self._cmd,
self._app_id,
export_id=export_id,
payload=payload,
token=self._token,
api_version=self._api_version,
central_dns_suffix=central_dnx_suffix,
)
if not export:
raise AzureResponseError("Failed to create export with id: '{}'.".format(export_id))
# add to cache
self._exports[export_id] = export
return export
def get_export(
self, export_id, central_dnx_suffix=CENTRAL_ENDPOINT
) -> Union[dict, ExportPreview]:
# get or add to cache
export = self._exports.get(export_id)
if not export:
export = central_services.export.get_export(
cmd=self._cmd,
app_id=self._app_id,
token=self._token,
api_version=self._api_version,
export_id=export_id,
central_dns_suffix=central_dnx_suffix,
)
if not export:
raise ResourceNotFoundError("No export found with id: '{}'.".format(export_id))
else:
self._exports[export_id] = export
return export
def delete_export(self, export_id, central_dnx_suffix=CENTRAL_ENDPOINT):
central_services.export.delete_export(
cmd=self._cmd,
app_id=self._app_id,
token=self._token,
api_version=self._api_version,
export_id=export_id,
central_dns_suffix=central_dnx_suffix,
)
self._exports.pop(export_id, None)
|
[
"noreply@github.com"
] |
Azure.noreply@github.com
|
9293d417b66ad07a3eb2ecf542809cd46dfaa542
|
7cb322bfb75500e1627f0d6306789f0c3c59e83f
|
/django_eveonline_connector/signals.py
|
e09bb78e636047511d0260cf337617d72affeb7a
|
[
"MIT"
] |
permissive
|
Demieno/django-eveonline-connector
|
9e0cfc6b091d408407544d4486cc5d7ffcf9c3f4
|
7aa47440b5a4df19545c3499d63e39f202f46c61
|
refs/heads/master
| 2020-12-14T03:45:15.594067
| 2020-01-02T20:57:27
| 2020-01-02T20:57:27
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 678
|
py
|
from django.contrib.auth.models import User, Group
from django_eveonline_connector.models import EveScope, EveClient
from django.dispatch import receiver
from django.db.models.signals import post_delete, post_save
from django.db import transaction
from django.core.exceptions import PermissionDenied
import logging
logger = logging.getLogger(__name__)
@receiver(post_save, sender=EveScope)
def scope_save(sender, **kwargs):
def call():
EveClient.get_instance().save()
transaction.on_commit(call)
@receiver(post_delete, sender=EveScope)
def scope_delete(sender, **kwargs):
def call():
EveClient.get_instance().save()
transaction.on_commit(call)
|
[
"porowns@gmail.com"
] |
porowns@gmail.com
|
2fb7cc835fea101c3497563093dc6b59a9d34543
|
1afa6c852dfc922d1a26a384d965976f31a87692
|
/Common/ComputationalGeometry/Testing/Python/CSpline.py
|
e7feea105e904f590ff75a762cdd74fb4e846c28
|
[
"BSD-3-Clause"
] |
permissive
|
dgobbi/VTK
|
631d037aacc7258861e70f77c586b01cd4ebff3f
|
17f232ee440025c26bc78a897edef78e9fc78510
|
refs/heads/master
| 2021-01-04T22:27:46.611907
| 2013-03-01T19:44:02
| 2013-03-01T19:44:02
| 938,377
| 0
| 2
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,097
|
py
|
#!/usr/bin/env python
import vtk
from vtk.test import Testing
from vtk.util.misc import vtkGetDataRoot
VTK_DATA_ROOT = vtkGetDataRoot()
# Now create the RenderWindow, Renderer and Interactor
#
ren1 = vtk.vtkRenderer()
renWin = vtk.vtkRenderWindow()
renWin.AddRenderer(ren1)
iren = vtk.vtkRenderWindowInteractor()
iren.SetRenderWindow(renWin)
math = vtk.vtkMath()
numberOfInputPoints = 30
aSplineX = vtk.vtkCardinalSpline()
aSplineY = vtk.vtkCardinalSpline()
aSplineZ = vtk.vtkCardinalSpline()
# generate random points
inputPoints = vtk.vtkPoints()
i = 0
while i < numberOfInputPoints:
x = math.Random(0,1)
y = math.Random(0,1)
z = math.Random(0,1)
aSplineX.AddPoint(i,x)
aSplineY.AddPoint(i,y)
aSplineZ.AddPoint(i,z)
inputPoints.InsertPoint(i,x,y,z)
i = i + 1
inputData = vtk.vtkPolyData()
inputData.SetPoints(inputPoints)
balls = vtk.vtkSphereSource()
balls.SetRadius(.01)
balls.SetPhiResolution(10)
balls.SetThetaResolution(10)
glyphPoints = vtk.vtkGlyph3D()
glyphPoints.SetInputData(inputData)
glyphPoints.SetSourceConnection(balls.GetOutputPort())
glyphMapper = vtk.vtkPolyDataMapper()
glyphMapper.SetInputConnection(glyphPoints.GetOutputPort())
glyph = vtk.vtkActor()
glyph.SetMapper(glyphMapper)
glyph.GetProperty().SetDiffuseColor(1,0.4,0.4)
glyph.GetProperty().SetSpecular(.3)
glyph.GetProperty().SetSpecularPower(30)
ren1.AddActor(glyph)
# create a polyline
points = vtk.vtkPoints()
profileData = vtk.vtkPolyData()
numberOfOutputPoints = 400
offset = 1.0
def fit (__vtk__temp0=0,__vtk__temp1=0):
global numberOfInputPoints, numberOfOutputPoints, offset
points.Reset()
i = 0
while i < numberOfOutputPoints:
t = expr.expr(globals(), locals(),["(","numberOfInputPoints","-","offset",")","/","(","numberOfOutputPoints","-","1",")","*","i"])
points.InsertPoint(i,aSplineX.Evaluate(t),aSplineY.Evaluate(t),aSplineZ.Evaluate(t))
i = i + 1
profileData.Modified()
fit()
lines = vtk.vtkCellArray()
lines.InsertNextCell(numberOfOutputPoints)
i = 0
while i < numberOfOutputPoints:
lines.InsertCellPoint(i)
i = i + 1
profileData.SetPoints(points)
profileData.SetLines(lines)
profileTubes = vtk.vtkTubeFilter()
profileTubes.SetNumberOfSides(8)
profileTubes.SetInputData(profileData)
profileTubes.SetRadius(.005)
profileMapper = vtk.vtkPolyDataMapper()
profileMapper.SetInputConnection(profileTubes.GetOutputPort())
profile = vtk.vtkActor()
profile.SetMapper(profileMapper)
profile.GetProperty().SetDiffuseColor(1,1,0.6)
profile.GetProperty().SetSpecular(.3)
profile.GetProperty().SetSpecularPower(30)
ren1.AddActor(profile)
ren1.ResetCamera()
ren1.GetActiveCamera().Dolly(1.5)
ren1.ResetCameraClippingRange()
renWin.SetSize(400,400)
# render the image
#
iren.Initialize()
def opened (__vtk__temp0=0,__vtk__temp1=0):
global offset
offset = 1.0
aSplineX.ClosedOff()
aSplineY.ClosedOff()
aSplineZ.ClosedOff()
fit()
renWin.Render()
def varyLeft (__vtk__temp0=0,__vtk__temp1=0):
left = -1
while left <= 1:
aSplineX.SetLeftValue(left)
aSplineY.SetLeftValue(left)
aSplineZ.SetLeftValue(left)
fit()
renWin.Render()
left = expr.expr(globals(), locals(),["left","+",".05"])
def varyRight (__vtk__temp0=0,__vtk__temp1=0):
right = -1
while right <= 1:
aSplineX.SetRightValue(right)
aSplineY.SetRightValue(right)
aSplineZ.SetRightValue(right)
fit()
renWin.Render()
right = expr.expr(globals(), locals(),["right","+",".05"])
def constraint (value,__vtk__temp0=0,__vtk__temp1=0):
aSplineX.SetLeftConstraint(value)
aSplineY.SetLeftConstraint(value)
aSplineZ.SetLeftConstraint(value)
aSplineX.SetRightConstraint(value)
aSplineY.SetRightConstraint(value)
aSplineZ.SetRightConstraint(value)
def closed (__vtk__temp0=0,__vtk__temp1=0):
global offset
offset = 0.0
aSplineX.ClosedOn()
aSplineY.ClosedOn()
aSplineZ.ClosedOn()
fit()
renWin.Render()
# prevent the tk window from showing up then start the event loop
# --- end of script --
|
[
"nikhil.shetty@kitware.com"
] |
nikhil.shetty@kitware.com
|
25dbc0f67dd0f2b7ad3b38e4f59b589d9570d866
|
c9144edf6236e8214cc54f6c24f5689d11aff1a8
|
/week10/yghoon/keypad.py
|
500187cc35b217402b14fc2ada564727f2730e73
|
[] |
no_license
|
dohvis/kmu-sw-proj
|
10aa60066c1a3a11e9f05f54effb22935c8c4381
|
8ab1996f84d80f322b64d35e71cb1a8f0d90c108
|
refs/heads/master
| 2021-08-23T13:15:46.962343
| 2017-12-05T01:44:56
| 2017-12-05T01:44:56
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 556
|
py
|
from calcFunctions import factorial, decToBin, binToDec, decToRoman
numPadList = [
'7', '8', '9',
'4', '5', '6',
'1', '2', '3',
'0', '.', '=',
]
operatorList = [
'*', '/',
'+', '-',
'(', ')',
'C',
]
constantDics = {
'pi' : '3.141592',
'빛의 이동 속도 (m/s)' : '3E+8',
'소리의 이동 속도 (m/s)' : '340',
'태양과의 평균 거리 (km)' : '1.5E+8',
}
functionDics = {
'factorial (!)' : factorial,
'-> binary' : decToBin,
'binary -> dec' : binToDec,
'-> roman' : decToRoman,
}
|
[
"the_basic_@kookmin.ac.kr"
] |
the_basic_@kookmin.ac.kr
|
a9434d2aa6368d8259029d3de1bd9374cec21a2a
|
15f321878face2af9317363c5f6de1e5ddd9b749
|
/solutions_python/Problem_207/726.py
|
511cfe53fe71d562c63735db511b437a68cc20d6
|
[] |
no_license
|
dr-dos-ok/Code_Jam_Webscraper
|
c06fd59870842664cd79c41eb460a09553e1c80a
|
26a35bf114a3aa30fc4c677ef069d95f41665cc0
|
refs/heads/master
| 2020-04-06T08:17:40.938460
| 2018-10-14T10:12:47
| 2018-10-14T10:12:47
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,481
|
py
|
f = open('B-small-attempt2.in','r')
fo = open('result.txt','w')
t = int(f.readline())
# 0N, 1R, 2O, 3Y, 4G, 5B, and 6V.
d = [[],[2,6],[1,3],[2,4],[3,5],[4,6],[5,1]]
e = [[],[3,4,5],[4,5,6],[1,5,6],[1,2,6],[1,2,3],[2,3,4]]
st = 'XROYGBV'
for ti in range(t):
u = [int(x) for x in f.readline().split()]
#print(u)
b = [[[],u]]
soln = "IMPOSSIBLE"
while b:
h = b[0]
b = b[1:]
a = h[1]
#print(len(b))
if len(h[0]) == u[0]:
if h[0][0] in e[h[0][-1]]:
soln = ''.join(st[i] for i in h[0])
break
broken = False
for i in range(1,7):
if a[i]:
s = 0
for j in e[i]:
s += a[j]
if s < a[i]-1:
broken = True
break
if not broken:
if h[0]:
dl = e[h[0][-1]]
else:
dl = range(1,7)
ji = max((a[j],j) for j in range(1,7) if j in dl)
if ji[0] > 4:
j = ji[1]
a[j] -= 1
b += [[h[0]+[j],a]]
else:
for j in dl:
if a[j]:
a2 = a[:]
a2[j] -= 1
b += [[h[0]+[j],a2]]
rs = "Case #%d: %s\n" % (ti+1, soln)
#print(rs)
fo.write( rs )
fo.close()
f.close()
|
[
"miliar1732@gmail.com"
] |
miliar1732@gmail.com
|
a55c909ea302272ae58bf65f53197abc7929f606
|
45a153a8e27b552d82d137bd94f2d5d0aec3c889
|
/GoogleCLoudwithTwilio/google-cloud-sdk/lib/surface/app/gen_repo_info_file.py
|
6b22de0c01c26795d53203fb8e81f01af51444b1
|
[
"LicenseRef-scancode-unknown-license-reference",
"Apache-2.0"
] |
permissive
|
Pooshan/App-using-Twilio
|
84bc0be1f091c678528bdf161c9fbb0af617fc0e
|
a6eea3f40ef9f22a7ab47c1f63b90deaa2620049
|
refs/heads/master
| 2022-11-23T23:56:49.754209
| 2016-10-01T18:47:25
| 2016-10-01T18:47:25
| 69,719,320
| 0
| 1
| null | 2022-11-02T19:48:20
| 2016-10-01T04:26:37
|
Python
|
UTF-8
|
Python
| false
| false
| 4,572
|
py
|
# Copyright 2014 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""The gen_repo_info_file command."""
import json
import os
from googlecloudsdk.calliope import base
from googlecloudsdk.core import log
from googlecloudsdk.core.util import files
from googlecloudsdk.third_party.appengine.tools import context_util
@base.Hidden
class GenRepoInfoFile(base.Command):
"""Determines repository information and generates a file representation.
The generated file is an opaque blob representing which source revision the
application was built at, and which Google-hosted repository this revision
will be pushed to.
"""
detailed_help = {
'DESCRIPTION': """\
This command generates two files, {old_name} and
{contexts_filename}, containing information on the source revision
and remote repository associated with the given source directory.
{contexts_filename} contains information on all remote repositories
associated with the directory, while {old_name} contains
information only on one repository. It will refer to the associated
Cloud Repository if there is one, or the remote Git repository if
there is no Cloud Repository.
{old_name} is deprecated in favor of {contexts_filename}.
It is generated solely for compatibility with existing tools during
the transition.
""".format(old_name=context_util.CONTEXT_FILENAME,
contexts_filename=context_util.EXT_CONTEXT_FILENAME),
'EXAMPLES': """\
To generate repository information files for your app,
from your source directory run:
$ {command}
""",
}
@staticmethod
def Args(parser):
parser.add_argument(
'--source-directory',
default='.',
help='The path to directory containing the source code for the build.')
# TODO((b/25215149) Remove this option.
parser.add_argument(
'--output-file',
help=(
'(Deprecated; use --output-directory instead.) '
'Specifies the full name of the output file to contain a single '
'source context. The file name must be "{old_name}" in '
'order to work with cloud diagnostic tools.').format(
old_name=context_util.CONTEXT_FILENAME))
parser.add_argument(
'--output-directory',
default='',
help=(
'The directory in which to create the source context files. '
'Defaults to the current directory, or the directory containing '
'--output-file if that option is provided with a file name that '
'includes a directory path.'))
def Run(self, args):
contexts = context_util.CalculateExtendedSourceContexts(
args.source_directory)
# First create the old-style source-context.json file
if args.output_file:
log.warn(
'The --output-file option is deprecated and will soon be removed.')
output_directory = os.path.dirname(args.output_file)
output_file = args.output_file
else:
output_directory = ''
output_file = context_util.CONTEXT_FILENAME
if not output_directory:
if args.output_directory:
output_directory = args.output_directory
output_file = os.path.join(output_directory, output_file)
else:
output_directory = '.'
best_context = context_util.BestSourceContext(contexts,
args.source_directory)
files.MakeDir(output_directory)
with open(output_file, 'w') as f:
json.dump(best_context, f, indent=2, sort_keys=True)
# Create the new source-contexts.json file.
if args.output_directory and args.output_directory != output_directory:
output_directory = args.output_directory
files.MakeDir(output_directory)
with open(os.path.join(output_directory, context_util.EXT_CONTEXT_FILENAME),
'w') as f:
json.dump(contexts, f, indent=2, sort_keys=True)
|
[
"pooshan.vyas@gmail.com"
] |
pooshan.vyas@gmail.com
|
38a29097fb677afe38a3f4e24a7ca69dea1596b0
|
2c872fedcdc12c89742d10c2f1c821eed0470726
|
/pbase/day06/jiangyi/day06/day05_exercise/99.py
|
98eb958a8ab699da85a2d9454f6e34a6e82a1522
|
[] |
no_license
|
zuigehulu/AID1811
|
581c3c7a37df9fa928bc632e4891fc9bafe69201
|
10cab0869875290646a9e5d815ff159d0116990e
|
refs/heads/master
| 2020-04-19T16:33:04.174841
| 2019-01-30T07:58:24
| 2019-01-30T07:58:24
| 168,307,918
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 268
|
py
|
# 1. 写程序打印九九乘法表
# 1x1=1
# 1x2=2 2x2=4
# 1x3=3 2x3=6 3x3=9
# ....
# 1x9=9 .......... 9x9=81
for x2 in range(1, 10):
for x1 in range(1, x2 + 1):
print("%dx%d=%d"%(x1, x2, x1 * x2), end=' ')
print() # 换行
|
[
"442315617@qq.com"
] |
442315617@qq.com
|
0eedd09812bb4c2cbf5a57643d7aac8c1c200214
|
f51c6d0cebb27c377ce9830deec4b727b9b2ee90
|
/AI/05_tictactoe/01object_grid.py
|
b3c72f860a52becf43b6c430e74fbbe1f3bc4994
|
[] |
no_license
|
dbbudd/Python-Experiments
|
1c3c1322583aaaf2016a2f2f3061e6d034c5d1c8
|
b6d294bf11a5c92b8578d16aa2f63cc27fc47b07
|
refs/heads/master
| 2020-04-17T02:21:36.693593
| 2019-01-17T00:18:34
| 2019-01-17T00:18:34
| 166,130,283
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,044
|
py
|
#!/usr/bin/env python
import numpy as np
import itertools
class gameboard(object):
def __init__(self):
#player 1 puts a "X", player 2 puts a "O"
self.g = [[0,0,0],[0,0,0],[0,0,0]]
self.grid = np.array(self.g)
print(self.grid)
def checkWin(self):
if ((self.grid.diagonal(0) == 1).all()) or ((np.flipud(self.grid).diagonal(0) == 1).all()):
print("X Wins")
elif ((self.grid.diagonal(0) == 2).all()) or ((np.flipud(self.grid).diagonal(0) == 2).all()):
print("0 Wins")
else:
for i in range(0,len(self.grid)):
self.column = self.grid [:, i]
self.row = self.grid [i, :]
if (self.row.all() == 1) or (self.column.all() == 1):
print("X Wins")
elif (self.row.all() == 2) or (self.column.all() == 2):
print("O Wins")
else:
continue
print("Keep Playing!")
board = gameboard()
board.checkWin()
|
[
"dbbudd@gmail.com"
] |
dbbudd@gmail.com
|
e8c235b55e0f91859013f3f878d062126d0be59f
|
1cb2e45c87c1b961d33cfcbed95f765ca3be15b1
|
/0x22-primegame/main_8.py
|
b78832497631e5415d3cb06df9fadfd917647b07
|
[] |
no_license
|
icculp/holbertonschool-interview
|
a93b5e213861c3349733df1042bc7c323e8129ad
|
e49ac9e2f3dc356a9cae177472ac54c2e55edabf
|
refs/heads/main
| 2023-07-26T08:41:17.638936
| 2021-08-26T17:09:29
| 2021-08-26T17:09:29
| 319,488,541
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 207
|
py
|
#!/usr/bin/python3
"""
Main file for testing
"""
isWinner = __import__('0-prime_game').isWinner
nums = [0] * 10000
for i in range(10000):
nums[i] = i
print("Winner: {}".format(isWinner(10000, nums)))
|
[
"icculp@gmail.com"
] |
icculp@gmail.com
|
8ee49add3d08eb326c2b2eb0441beefb3936465b
|
c5471ada76f276752255b34b910da799dc29d804
|
/2016/day/11.py
|
0214968c95823e0b526e4725bbb4ea03394a88a3
|
[] |
no_license
|
MasterMedo/aoc
|
3c75a3b02ed6e65f9a5be2f393af884c79914209
|
42ccde20f31b99bc9e49248a58b732dca5972c69
|
refs/heads/master
| 2022-12-22T13:22:59.562745
| 2022-12-22T06:29:39
| 2022-12-22T06:29:39
| 112,675,225
| 31
| 8
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,756
|
py
|
import re
from itertools import combinations, chain
from heapq import heappop, heappush
generators, microchips = {}, {}
with open('../input/11.txt') as f:
for floor, line in enumerate(f):
for microchip in set(re.findall(r'\w+(?=-comp)', line)):
microchips[microchip] = floor
for generator in set(re.findall(r'\w+(?=\ gen)', line)):
generators[generator] = floor
pairs = [(microchips[i], generators[i]) for i in microchips]
# pairs.extend([(0, 0)]*2) # uncomment for part 2
pairs = tuple(sorted(pairs))
floor = distance = 0
state = [distance, floor, pairs]
to_visit = []
heappush(to_visit, state)
visited = set([(pairs, floor)])
while to_visit:
distance, floor, pairs = heappop(to_visit)
if all(i == 3 for pair in pairs for i in pair):
break
floors = [[], [], [], []]
for element, (x, y) in enumerate(pairs):
floors[x].append((element, 0)) # (element_index, type)
floors[y].append((element, 1))
if any(i == 0 and (e, 1) not in things and any(j == 1 for _, j in things)
for things in floors for e, i in things):
continue
candidates = floors[floor]
for floor in {min(floor + 1, 3), max(floor - 1, 0)} - set([floor]):
for things in chain(map(lambda x: [x], candidates),
combinations(candidates, 2)):
new_pairs = tuple(sorted(tuple(floor if (e, i) in things else old
for i, old in enumerate(pair))
for e, pair in enumerate(pairs)))
if (new_pairs, floor) not in visited:
heappush(to_visit, (distance + 1, floor, new_pairs))
visited.add((new_pairs, floor))
print(distance)
|
[
"mislav.vuletic@gmail.com"
] |
mislav.vuletic@gmail.com
|
d7b9041bc53a89ae60f21bc5562483eb05692a1a
|
7b20e2f86c2bb2145ae9ca5bcd4b9ad1566e79b0
|
/ABC/ABC134/D.py
|
53b03aa78f43b3f56626b735bd9b2b2e47171ff2
|
[] |
no_license
|
pto8913/KyoPro
|
5f5e769960dfec73af5b0f338f32659ff067094b
|
29ebc30a3d45fea273cb9034fba8311673a406dd
|
refs/heads/master
| 2021-06-13T16:43:40.275854
| 2021-03-23T00:02:25
| 2021-03-23T00:02:25
| 174,684,331
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 253
|
py
|
N = int(input())
A = list(map(int,input().split()))
box = [0] * (N+1)
res = []
for i in range(1, N+1)[::-1]:
s = 0
for j in range(1, N // i):
s += box[(j+1)*i]
if s % 2 != A[i-1]:
box[i] = 1
res.append(i)
print(len(res))
print(*res)
|
[
"noreply@github.com"
] |
pto8913.noreply@github.com
|
b1967d3e0558202aea64db2d93f90760ba779a18
|
0f2a123478c6fb5941c4180c76fa663833d3c764
|
/service/neighborhood.py
|
dfeeab71407cb8d5ab381e80c1088ed2c3ee5e0f
|
[
"Apache-2.0"
] |
permissive
|
curtislisle/EntityAlignLarge
|
77fc650251724dc5eec9dbde13d34b91dd0d62b9
|
d629ef14b5f6abe3f0c758f8df92139dc9d01f07
|
refs/heads/master
| 2021-01-18T20:32:05.074741
| 2015-07-02T01:36:13
| 2015-07-02T01:36:13
| 35,761,970
| 0
| 1
| null | 2015-07-02T01:36:13
| 2015-05-17T10:32:24
|
JavaScript
|
UTF-8
|
Python
| false
| false
| 3,139
|
py
|
import bson.json_util
from bson.objectid import ObjectId
import json
from pymongo import MongoClient
def freeze(rec):
return (str(rec["_id"]), rec["data"]["id"], bson.json_util.dumps(rec))
def process(frozen):
rec = json.loads(frozen[2])
processed = {"key": rec["_id"]["$oid"]}
processed.update(rec["data"])
return processed
def run(host=None, db=None, coll=None, center=None, radius=None, deleted=json.dumps(False)):
# Connect to the Mongo collection
client = MongoClient(host)
db = client[db]
graph = db[coll]
# Prepare the arguments.
radius = int(radius)
deleted = json.loads(deleted)
frontier = set()
neighbor_nodes = set()
neighbor_links = []
# Find the center node in the database.
center_node = graph.find_one({"_id": ObjectId(center)})
print 'found center node:',center_node
if center_node is not None and deleted or not center_node["data"].get("deleted"):
frozen = freeze(center_node)
neighbor_nodes.add(frozen)
frontier.add(frozen)
for i in range(radius):
new_frontier = set()
# Compute the next frontier from the current frontier.
for key, id, _ in frontier:
# Find all incoming and outgoing links from all nodes in the
# frontier.
query = {"$and": [{"type": "link"},
{"$or": [{"data.source": id},
{"data.target": id}]}]}
links = graph.find(query)
print 'frontier query:',query
print 'query result length:',links.count()
# Collect the neighbors of the node, and add them to the new
# frontier if appropriate.
for link in links:
source = link["data"]["source"] == id
neighbor_id = source and link["data"]["target"] or link["data"]["source"]
query_clauses = [{"type": "node"},
{"data.id": neighbor_id}]
if not deleted:
query_clauses.append({"$or": [{"data.deleted": {"$exists": False}},
{"data.deleted": False}]})
neighbor = graph.find_one({"$and": query_clauses})
if neighbor is not None:
frozen = freeze(neighbor)
if frozen not in neighbor_nodes:
new_frontier.add(frozen)
neighbor_nodes.add(frozen)
if source:
neighbor_link = {"source": key,
"target": str(neighbor["_id"])}
else:
neighbor_link = {"source": str(neighbor["_id"]),
"target": key}
neighbor_links.append(neighbor_link)
frontier = new_frontier
# processed = map(process, neighbor_nodes)
processed = map(lambda x: json.loads(x[2]), neighbor_nodes)
return {"nodes": processed,
"links": neighbor_links}
|
[
"clisle@knowledgevis.com"
] |
clisle@knowledgevis.com
|
092fd67ed334bb3399c9ae96c5673c7487a8ca76
|
31bb411ccdc1581e5a5c9e8acd7fc7c5ffde1067
|
/prosper/datareader/intrinio/auth.py
|
e80a448395a989d2f2f94b1c20e579d053708ca7
|
[
"MIT"
] |
permissive
|
EVEprosper/ProsperDatareader
|
c58b645bab80cbb946cfddd657921a9dadbf403b
|
31f0d77074c21222161774f4d653326925611167
|
refs/heads/master
| 2021-01-01T19:45:39.506553
| 2018-06-13T16:18:03
| 2018-06-13T16:18:03
| 98,676,957
| 0
| 1
|
MIT
| 2019-01-22T06:09:17
| 2017-07-28T18:32:41
|
Python
|
UTF-8
|
Python
| false
| false
| 2,983
|
py
|
"""prosper.datareader.intrinio.auth: handle authentication/validation"""
import requests
from .. import exceptions
from .. import config
BASE_URL = 'https://api.intrinio.com/'
class IntrinioHelper(object):
"""parent class for handling requests to Intrininio
Notes:
See https://intrinio.com/account for account keys
Args:
username (str): username for direct-auth
password (str): password for direct-auth
public_key (str): API key for indirect access
Raises:
InvalidAuth: will not be able to access Intrinio feeds
"""
def __init__(self, username='', password='', public_key='',):
self.__user = username
self.__password = password
self.__public_key = public_key
if not bool(self):
raise exceptions.InvalidAuth('Lacking required authentication')
def request(self, route, params=None, headers=None):
"""empty metaclass for handling requests"""
raise NotImplementedError()
def __bool__(self):
"""validate acceptable auth pattern"""
if all([self.__user, self.__password, self.__public_key]):
# either/or, not both auth types
return False
if self.__user and self.__password:
# direct auth method
self.request = self._direct_auth_request
return True
if self.__public_key:
# public-key method
self.request = self._public_key_request
return True
return False
def _direct_auth_request(self, route, params=None, headers=None):
"""handle HTTP request for direct-auth
Args:
url (str): url of endpoint to fetch
params (dict): param args for endpoint request
headers (dict): headers for endpoint request
Returns:
dict: JSON-parsed response from endpoint
Raises:
requests.exceptions: connection/HTTP errors
"""
req = requests.get(
url=BASE_URL + route,
params=params,
headers=headers,
auth=(self.__user, self.__password),
)
req.raise_for_status()
return req.json()
def _public_key_request(self, route, params=None, headers=None):
"""handle HTTP request for public-key
Args:
url (str): url of endpoint to fetch
params (dict): param args for endpoint request
headers (dict): headers for endpoint request
Returns:
dict: JSON-parsed response from endpoint
Raises:
requests.exceptions: connection/HTTP errors
"""
if not headers:
headers = {}
headers = {**headers, 'X-Authorization-Public-Key':self.__public_key}
req = requests.get(
url=BASE_URL + route,
params=params,
headers=headers,
)
req.raise_for_status()
return req.json()
|
[
"locke.renard@gmail.com"
] |
locke.renard@gmail.com
|
739a10dd59b002a18843a90b852d29a3bd234b9b
|
e41b573b7d2822ba00123e28d1ad6fe39db0631f
|
/portal/editstore/decorators.py
|
b12b9ac81dbf47941368253f1213fefb119f6633
|
[] |
no_license
|
epodreczniki/epodreczniki-portal
|
1e7fc583bd24dc6962fefdc2a2002835a39e122d
|
3f3d033c87a186d43cecd119ffe1172d7720c638
|
refs/heads/master
| 2021-01-16T21:46:12.400414
| 2017-01-31T21:24:35
| 2017-01-31T21:24:35
| 64,475,412
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,246
|
py
|
# coding=utf-8
from __future__ import absolute_import
from django.shortcuts import render
from store.exceptions import NiceException
import functools
from editstore.objects import drivers, SpaceDriver
from editstore import models
from django.shortcuts import get_object_or_404
from editstore import exceptions
from common.utils import wrap_nice_exceptions
from django.shortcuts import redirect
from common import messages
from surround.django.logging import setupModuleLogger
setupModuleLogger(globals())
wrap_edition_errors = wrap_nice_exceptions
def fetch_from_dict(kwargs, key):
value = kwargs[key]
del kwargs[key]
return value
def space_method(must_have_write=None, must_have_read=True):
def decorator(view):
@functools.wraps(view)
def wrapper(request, **kwargs):
spaceid = fetch_from_dict(kwargs, 'spaceid')
space = get_object_or_404(models.Space, identifier=spaceid)
space_driver = SpaceDriver.bind_db_object(space, user=request.user)
kwargs['space_driver'] = space_driver
if must_have_read is True:
space_driver.raise_for_read_perm()
if must_have_write is True:
space_driver.raise_for_write_perm()
return view(request, **kwargs)
return wrapper
return decorator
def driver_method(must_exist=True, must_have_write=None, must_have_read=True, category=None, use_space=True, redirect_missing=False):
def decorator(view):
@functools.wraps(view)
def wrapper(request, **kwargs):
if use_space:
space = get_object_or_404(models.Space, identifier=fetch_from_dict(kwargs, 'spaceid'))
else:
space = None
driver = drivers.bind(
fetch_from_dict(kwargs, 'category') if category is None else category,
fetch_from_dict(kwargs, 'identifier'),
fetch_from_dict(kwargs, 'version'),
request.user,
space=space,
)
if must_exist is True:
try:
driver.raise_for_exists()
except exceptions.DoesNotExist as e:
if redirect_missing and request.method == 'GET':
driver_class = drivers.get(driver.category)
messages.info(request, u'%s %s, wersja %s nie znajduje się w edycji online' % (driver_class.nice_name, driver.identifier, driver.version), extra_tags='danger')
return redirect('editres.views.listing', driver.spaceid, driver.category)
raise
if use_space:
driver.raise_for_space()
if must_exist is False:
if driver.exists:
raise exceptions.ObjectAlreadyExist('%s %s/%s already exists' % (driver.category, driver.identifier, driver.version))
if must_have_read is True:
driver.raise_for_read_perm()
if must_have_write is True:
driver.raise_for_write_perm()
kwargs['driver'] = driver
return view(request, **kwargs)
return wrapper
return decorator
|
[
"kontakt@epodreczniki.pl"
] |
kontakt@epodreczniki.pl
|
0b7c4c174ac6c7f498f46996277dc543c4576816
|
7969cea981b2d2c665b62c4de58c2d9556bfeaad
|
/original/test_new_vctk.py
|
3ba4d093058763ef3f014e46af880a1b0c508a41
|
[] |
no_license
|
xcmyz/Forced-Alignment
|
106dc73072d34bb07e881ee310e2a4a327230214
|
c20dd4c892c39b5d6a0ee6bef673ac523621f15e
|
refs/heads/master
| 2020-05-02T03:52:30.079472
| 2019-04-17T09:40:48
| 2019-04-17T09:40:48
| 177,738,186
| 2
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 585
|
py
|
import os
from nnmnkwii.datasets import vctk
import hparams as hp
# speakers = vctk.available_speakers
# print(speakers)
# print(len(speakers))
speakers = list()
for file in os.listdir(os.path.join(hp.vctk_processed, "wav48")):
speakers.append(str(file[1:4]))
# print(speakers)
# print(len(speakers))
td = vctk.TranscriptionDataSource(hp.vctk_processed, speakers=speakers)
transcriptions = td.collect_files()
wav_paths = vctk.WavFileDataSource(
hp.vctk_processed, speakers=speakers).collect_files()
print(transcriptions[32306])
print(wav_paths[32306])
|
[
"noreply@github.com"
] |
xcmyz.noreply@github.com
|
f3abd2898ef25b92b7c5f64799adadc6acde5e1d
|
5783be589f9f6ab590ea097eb9b84fa3786617e4
|
/Trie/contacts_list.py
|
31c37f25db381989e9aad83a48ec1dcf4f978847
|
[
"Apache-2.0"
] |
permissive
|
suyash248/ds_algo
|
5751e46ba4b959f0dd3f6843800f3e21d52100ac
|
1ca9470c33236016cbb88a38b2f19db41535e457
|
refs/heads/master
| 2022-12-10T10:39:16.135888
| 2022-12-06T16:45:25
| 2022-12-06T16:45:25
| 58,738,512
| 8
| 4
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,179
|
py
|
# https://www.youtube.com/watch?v=vlYZb68kAY0 - Contacts list
# Insert and search costs O(key_length), however the memory requirements of Trie is O(ALPHABET_SIZE * key_length * N)
# where N is number of keys in Trie.
# https://www.youtube.com/watch?v=AXjmTQ8LEoI
class ContactNode(object):
def __init__(self):
self.children = {}
self.endOfWord = False
# Count of words (leaf nodes) starting from this node. It will help to find out the number of words/count starting with some prefix.
self.wordsNum = 0
def __str__(self):
return "Children: {} | {}".format(self.children.keys(), self.endOfWord)
class Trie(object):
def __init__(self):
self.__root__ = ContactNode()
# Time complexity: O(length_of_word)
def insert(self, word):
cur = self.__root__
for ch in word:
child = cur.children.get(ch, None)
if child is None:
child = ContactNode()
cur.children[ch] = child
child.wordsNum += 1
cur = child
cur.endOfWord = True
# Time complexity: O(length_of_word)
def search(self, word):
cur = self.__root__
for ch in word:
child = cur.children.get(ch, None)
if child is None:
return False
cur = child
return cur.endOfWord
def prefix_search_count(self, prefix):
cur = self.__root__
for ch in prefix:
child = cur.children.get(ch, None)
if child is None:
return 0
cur = child
return cur.wordsNum
# Time complexity: O(length_of_word)
def __delete__(self, word, cur, index=0):
if index == len(word):
if cur.endOfWord:
cur.endOfWord = False # Mark `endOfWord` as we're going to delete this.
return len(
cur.children) == 0 # If there are no children, delete this node (True means node will be deleted later)
return False
ch = word[index]
child = cur.children.get(ch, None)
# No need to check if this word exists or not as we've already checked it before calling this method.
cur.wordsNum -= 1
shouldRemove = self.__delete__(word, child, index=index + 1)
# Removing node from memory (i.e. parent's `children' dict) if this is the only node remaining in `children1 dict.
if shouldRemove:
# Delete this node from memory, i.e. remove node corresponding to key(ch) from it's parent's `children` dict
cur.children.pop(ch)
# If parent's `children` dict becomes empty then parent is also a candidate for removal, return `True` in that case.
return len(cur.children) == 0
return False
def delete(self, word):
if self.search(word):
self.__delete__(word, self.__root__)
def __prefix_search__(self, prefix, joint_node):
for ch, child_node in joint_node.children.items():
prefix = prefix + ch
if child_node.endOfWord:
print
prefix
self.__prefix_search__(prefix, child_node)
prefix = prefix[:-1] # Backtracking
def prefix_search(self, prefix):
cur = self.__root__
# Traverse till last character in `prefix`
for ch in prefix:
child = cur.children.get(ch, None)
if child is None:
return None
cur = child
self.__prefix_search__(prefix, cur)
if __name__ == '__main__':
trie = Trie()
choices = {
1: "Add contact",
2: "Search contact",
3: "Prefix search(startswith) count",
4: "Prefix search(startswith)",
5: "Delete contact",
6: "Exit"
}
choices = '\n'.join(['{}. {}'.format(k, v) for k, v in choices.items()])
while True:
print("\n" + choices + "\n")
try:
choice = int(input("Enter your choice - ")) or 0
except:
choice = 0
if choice == 1:
word = input("Please enter a contact name to be inserted - ")
trie.insert(word)
elif choice == 2:
word = input("Please enter contact name to be searched - ")
is_present = trie.search(word)
print("{} is {}present".format(word, "" if is_present else "not "))
elif choice == 3:
prefix = input("Please enter a contact name/prefix to be searched - ")
wordsNum = trie.prefix_search_count(prefix)
print("There are {} contact(s) starting with prefix {}".format(wordsNum, prefix))
elif choice == 4:
prefix = input("Please enter a contact name/prefix to be searched - ")
print("Contact(s) starting with prefix {} are -".format(prefix))
trie.prefix_search(prefix)
elif choice == 5:
word = input("Please enter a word/sequence to be deleted - ")
trie.delete(word)
elif choice == 6:
print("Thank you!")
break
else:
print("Invalid choice")
continue
|
[
"suyash.soni248@gmail.com"
] |
suyash.soni248@gmail.com
|
2bb7c7ba3061c50db496fcc55f5566792482e2cd
|
65c8a6a7af2ee8cdf3866d012ea814887bd68a26
|
/ppro360_automation/Ppro360/CoachingAndTriadCoaching_Pages/RapidFireProcessConfirmation.py
|
0a3d8240f845597bb551d6c2ea4dd50383a5257f
|
[] |
no_license
|
1282270620/automation_test
|
9b3c595c3f7a139ded0a638ae4bcf31e0b7f9686
|
3faf86f0d641089eaf27eba906d22157dd2c1f5d
|
refs/heads/master
| 2020-04-01T06:35:33.873989
| 2018-10-21T03:05:17
| 2018-10-21T03:05:17
| 152,954,477
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,478
|
py
|
'''
Created on 20171101
@author: lei.tan
'''
from selenium.webdriver.common.by import By
from Tablet_pages import BasePage
class RapidFireProcessConfirmation(BasePage.Action):
def __init__(self):
self.callRecordingNumber_loc=(By.XPATH,"//*[@id='container']/div/section/div/form/div/div[3]/div[2]/div/div/input")
self.KPIcheckbox_path="//*[@id='container']/div/section/div/form/div[2]/div[1]/div/table/tbody/tr[4]/td[%d]/i"
self.comments_path="//*[@id='container']/div/section/div/form/div[2]/div[%d]/div/textarea"
self.comments_title_path="//*[@id='container']/div/section/div/form/div[2]/div[%d]/label"
self.scoreinput_path="//*[@id='container']/div/section/div/form/div[2]/div[%d]/div/div[4]/input"
self.scoreballstataus_path="//*[@id='container']/div/section/div/form/div[2]/div[%d]/div/div[4]/i"
self.overallscore_loc=(By.XPATH,"//*[@id='container']/div/section/div/form/div[2]/label/div[2]/input")
self.overallball_loc=(By.XPATH,"//*[@id='container']/div/section/div/form/div[2]/label/div[2]/i")
def click_KPIcheckbox (self, checkboxorderindex):
self.KPIcheckbox_loc=(By.XPATH,self.KPIcheckbox_path %checkboxorderindex)
self.find_element(*self.KPIcheckbox_loc).click()
def input_callRecordingNumber (self,text):
self.find_element(*self.callRecordingNumber_loc).send_keys(text);
def input_comments(self,lineindex,text):
self.comments_loc=(By.XPATH,self.comments_path %lineindex)
self.Input_text(text,*self.comments_loc)
def get_comments(self,lineindex):
self.comments_loc=(By.XPATH,self.comments_path %lineindex )
return self.find_element(*self.comments_loc).get_attribute("value")
def comments_disabled(self,lineindex):
self.comments_loc=(By.XPATH,self.comments_path %lineindex )
flag=self.find_element(*self.comments_loc).get_attribute("disabled")
return flag
def get_commentsBoxtitle(self,lineindex):
self.comments_title_loc=(By.XPATH,self.comments_title_path %lineindex)
return self.find_element(*self.comments_title_loc).text
def input_scoreinput(self,lineindex,text):
self.scoreinput_loc=(By.XPATH,self.scoreinput_path %lineindex)
self.Input_text(text,*self.scoreinput_loc)
def get_scoreinput(self,lineindex):
self.scoreinput_loc=(By.XPATH,self.scoreinput_path %lineindex )
return self.find_element(*self.scoreinput_loc).get_attribute("value")
def scoreinput_disabled(self,lineindex):
self.scoreinput_loc=(By.XPATH,self.scoreinput_path %lineindex )
flag=self.find_element(*self.scoreinput_loc).get_attribute("disabled")
return flag
def get_scoreballstataus(self,lineindex):
scoreballstataus_loc=(By.XPATH,self.scoreballstataus_path %lineindex )
scoreballstataus=self.find_element(*scoreballstataus_loc).get_attribute("class")
return scoreballstataus
def get_overallscore(self):
return self.find_element(*self.overallscore_loc).get_attribute("value")
def overallscore_disabled(self):
flag=self.find_element(*self.overallscore_loc).get_attribute("disabled")
return flag
def get_overallballstataus(self):
scoreballstataus=self.find_element(*self.overallball_loc).get_attribute("class")
return scoreballstataus
|
[
"1282270620@qq.com"
] |
1282270620@qq.com
|
f2ec15ec6b195fffb34cf7280adecd51ca8ee052
|
95d1dd5758076c0a9740d545a6ef2b5e5bb8c120
|
/PY/basic/class_inherit.py
|
98146eaa6d42f48c981e6d630f45405486b34194
|
[] |
no_license
|
icoding2016/study
|
639cb0ad2fe80f43b6c93c4415dc6e8a11390c85
|
11618c34156544f26b3b27886b55c771305b2328
|
refs/heads/master
| 2023-08-31T14:15:42.796754
| 2023-08-31T05:28:38
| 2023-08-31T05:28:38
| 117,061,872
| 2
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,631
|
py
|
#!/usr/bin/python
from __future__ import print_function
class B(object):
class_var = None
def __init__(self):
print("Class B init..")
self.inst_var = 0
def func(self):
print("B::func()")
print("class_var=%s" % self.class_var)
print("inst_var=%s" % self.inst_var)
def show(self):
print("B::show()")
print("class_var=%s" % self.class_var)
print("inst_var=%s" % self.inst_var)
class B1(B):
def __init__(self):
print("Class B1 init..")
self.inst_var = 1
def func(self):
print("B1::func()")
class B2(B):
def __init__(self):
super(B2, self).__init__()
print("base::__init__ called before Class B2 init..")
self.inst_var = 2 # initiate the instance's inst_var, not changing the base instance's inst_var
def func(self):
print("B2::func(), then explicitly call base.func()")
super(B2, self).func()
def changeSelfClassVar(self):
self.class_var = 2 # this add a var to the instance and assign 2, not changing the B::class_var
print("B2: self.class_var -> %s" % self.class_var)
def changeClassVar(self):
B.class_var = 22 # this modifies the 'class var' (static)
print("B2: class_var -> %s" % B.class_var)
if "__main__" in __name__:
print("-"*20)
b = B()
b.func()
print("-"*20)
b1 = B1()
b1.func()
print("-"*20)
b2 = B2()
b2.func()
print("-"*10)
b2.changeSelfClassVar()
b.show() # self.inst_var still None, 'static' B.class_var not changed.
b2.changeClassVar()
b.show()
|
[
"icoding2016@gmail.com"
] |
icoding2016@gmail.com
|
f967c5af25bac400dae4bde6a3438947838cd97e
|
e35eb92b5ab6547119585004b9eea3cafe948050
|
/efsw/storage/errors.py
|
84ab6044f4679693c7697a6ed29b48ba498314da
|
[] |
no_license
|
einsfr/mmkit
|
0a084db85b2cf5ba268e692676095d768733f387
|
f12bc2f83254a3123e02abdc105816cc04c438b5
|
refs/heads/master
| 2020-12-31T05:56:19.287611
| 2016-06-10T05:56:58
| 2016-06-10T05:56:58
| 29,473,203
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 838
|
py
|
FILE_DOES_NOT_EXIST_IN_STORAGE = 'В хранилище "{0}" отсутствует файл "{1}".'
STORAGE_DOES_NOT_CONTAIN_FILE = 'Файл "{0}" не принадлежит хранилищу "{1}".'
FILE_ALREADY_EXISTS_IN_STORAGE = 'Файл "{0}" уже существует в хранилище "{1}".'
STORAGE_ROOT_NOT_FOUND = 'Корневая папка хранилищ "{0}" не существует.'
STORAGE_ROOT_REWRITE_FORBIDDEN = 'Storage\'s root directory can\'t be rewritten if application is in production ' \
'environment.'
STORAGE_BASE_DIR_REWRITE_FORBIDDEN = 'Storage\'s base directory can\'t be rewritten if application is in production ' \
'environment.'
STORAGE_BASE_DIR_NOT_FOUND = 'Storage\'s "{0}" base directory "{1}" doesn\'t exist.'
|
[
"einsfr@users.noreply.github.com"
] |
einsfr@users.noreply.github.com
|
283f427e9d9fd76dffc9aa0194a13ceee65c1eed
|
62e58c051128baef9452e7e0eb0b5a83367add26
|
/x12/6010/210006010.py
|
9c9a3fead2b5be7e1936e8c84d815fd4b8fdba06
|
[] |
no_license
|
dougvanhorn/bots-grammars
|
2eb6c0a6b5231c14a6faf194b932aa614809076c
|
09db18d9d9bd9d92cefbf00f1c0de1c590fe3d0d
|
refs/heads/master
| 2021-05-16T12:55:58.022904
| 2019-05-17T15:22:23
| 2019-05-17T15:22:23
| 105,274,633
| 0
| 0
| null | 2017-09-29T13:21:21
| 2017-09-29T13:21:21
| null |
UTF-8
|
Python
| false
| false
| 2,639
|
py
|
from bots.botsconfig import *
from records006010 import recorddefs
syntax = {
'version': '00601',
'functionalgroup': 'IM',
}
structure = [
{ID: 'ST', MIN: 1, MAX: 1, LEVEL: [
{ID: 'B3', MIN: 1, MAX: 1},
{ID: 'C2', MIN: 0, MAX: 1},
{ID: 'C3', MIN: 0, MAX: 1},
{ID: 'ITD', MIN: 0, MAX: 1},
{ID: 'L11', MIN: 0, MAX: 300},
{ID: 'G62', MIN: 0, MAX: 6},
{ID: 'R3', MIN: 0, MAX: 12},
{ID: 'H3', MIN: 0, MAX: 6},
{ID: 'K1', MIN: 0, MAX: 10},
{ID: 'N1', MIN: 0, MAX: 10, LEVEL: [
{ID: 'N2', MIN: 0, MAX: 1},
{ID: 'N3', MIN: 0, MAX: 2},
{ID: 'N4', MIN: 0, MAX: 1},
{ID: 'L11', MIN: 0, MAX: 5},
]},
{ID: 'N7', MIN: 0, MAX: 10, LEVEL: [
{ID: 'M7', MIN: 0, MAX: 2},
]},
{ID: 'OID', MIN: 0, MAX: 999999, LEVEL: [
{ID: 'SDQ', MIN: 0, MAX: 10},
]},
{ID: 'S5', MIN: 0, MAX: 999, LEVEL: [
{ID: 'L11', MIN: 0, MAX: 10},
{ID: 'G62', MIN: 0, MAX: 10},
{ID: 'H3', MIN: 0, MAX: 6},
{ID: 'OID', MIN: 0, MAX: 999999, LEVEL: [
{ID: 'SDQ', MIN: 0, MAX: 10},
]},
{ID: 'N1', MIN: 0, MAX: 2, LEVEL: [
{ID: 'N2', MIN: 0, MAX: 1},
{ID: 'N3', MIN: 0, MAX: 2},
{ID: 'N4', MIN: 0, MAX: 1},
{ID: 'L11', MIN: 0, MAX: 5},
{ID: 'N7', MIN: 0, MAX: 10, LEVEL: [
{ID: 'M7', MIN: 0, MAX: 2},
]},
]},
]},
{ID: 'LX', MIN: 0, MAX: 99999, LEVEL: [
{ID: 'L11', MIN: 0, MAX: 20},
{ID: 'L5', MIN: 0, MAX: 30},
{ID: 'H1', MIN: 0, MAX: 3},
{ID: 'H2', MIN: 0, MAX: 2},
{ID: 'L0', MIN: 0, MAX: 10},
{ID: 'L1', MIN: 0, MAX: 50},
{ID: 'L4', MIN: 0, MAX: 10},
{ID: 'L7', MIN: 0, MAX: 10},
{ID: 'K1', MIN: 0, MAX: 10},
{ID: 'OID', MIN: 0, MAX: 999999, LEVEL: [
{ID: 'SDQ', MIN: 0, MAX: 10},
]},
{ID: 'N1', MIN: 0, MAX: 999999, LEVEL: [
{ID: 'N2', MIN: 0, MAX: 1},
{ID: 'N3', MIN: 0, MAX: 2},
{ID: 'N4', MIN: 0, MAX: 1},
{ID: 'L11', MIN: 0, MAX: 10},
{ID: 'CD3', MIN: 0, MAX: 999999, LEVEL: [
{ID: 'L11', MIN: 0, MAX: 20},
{ID: 'H6', MIN: 0, MAX: 10},
{ID: 'L9', MIN: 0, MAX: 50},
{ID: 'POD', MIN: 0, MAX: 1},
{ID: 'G62', MIN: 0, MAX: 1},
]},
{ID: 'OID', MIN: 0, MAX: 999999, LEVEL: [
{ID: 'SDQ', MIN: 0, MAX: 10},
]},
]},
]},
{ID: 'L3', MIN: 0, MAX: 1},
{ID: 'SE', MIN: 1, MAX: 1},
]}
]
|
[
"doug.vanhorn@tagglogistics.com"
] |
doug.vanhorn@tagglogistics.com
|
262e5a8fc1b3277a125ac7ac66fefddc56cae93a
|
a457e3284fa1f32257969a72c69082dd0179eb73
|
/gladweb/config.py
|
ef8979cda314e9b8cbea6d22467ff25691cdb8b3
|
[] |
no_license
|
slow2go/glad-web
|
19377a6f17f19a4ebc46bc9c61afc9f709f628b0
|
13f8674c9602d1288b5de9437cf618e835fcac4e
|
refs/heads/master
| 2021-01-24T08:29:43.615111
| 2017-05-22T14:29:30
| 2017-05-22T14:29:30
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 880
|
py
|
# ---
# Default Configuration
# ---
import os
import gladweb.cache
base_path = os.path.abspath(os.path.join(os.path.split(__file__)[0], '..'))
# ---
# Flask
# ---
# This key MUST be changed before you make a site public, as it is used
# to sign the secure cookies used for sessions.
SECRET_KEY = 'ChangeMeOrGetHacked'
# ---
# Glad Web
# ---
# A cache, which will be used to store/retrieve various files.
CACHE = gladweb.cache.FileCache(os.path.join(base_path, 'cache'))
# Path to a folder which will be used to store generation results
TEMP = os.path.join(base_path, 'temp')
# Generate static html files for /generated
# the webserver needs to be configured to serve /generated instead of passing
# requests through to glad-web.
# Note: /generated/icons still needs to be served by glad-web
FREEZE = True
try:
from local_config import *
except ImportError:
pass
|
[
"admin@dav1d.de"
] |
admin@dav1d.de
|
b182d112f6cb1b8565fb48e838a02291e2d64987
|
2bcc421ee345b00cf805c543b37d18b5d019dc04
|
/adafruit-circuitpython-bundle-6.x-mpy-20201126/examples/azureiot_central_properties.py
|
415f9b7095f77f7c046958466f0ecc7f3a5f28bd
|
[] |
no_license
|
saewoonam/sc-current-source-titano
|
5a1ad46889c1b09c168424901fd71cb4eab5c61b
|
1c136aa8b61268d9ac0b5a682b30ece70ab87663
|
refs/heads/main
| 2023-03-02T22:12:26.685537
| 2021-02-09T03:28:01
| 2021-02-09T03:28:01
| 317,299,900
| 0
| 2
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,963
|
py
|
import random
import time
import board
import busio
from digitalio import DigitalInOut
import neopixel
from adafruit_esp32spi import adafruit_esp32spi, adafruit_esp32spi_wifimanager
import adafruit_esp32spi.adafruit_esp32spi_socket as socket
from adafruit_ntp import NTP
# Get wifi details and more from a secrets.py file
try:
from secrets import secrets
except ImportError:
print("WiFi secrets are kept in secrets.py, please add them there!")
raise
# ESP32 Setup
try:
esp32_cs = DigitalInOut(board.ESP_CS)
esp32_ready = DigitalInOut(board.ESP_BUSY)
esp32_reset = DigitalInOut(board.ESP_RESET)
except AttributeError:
esp32_cs = DigitalInOut(board.D13)
esp32_ready = DigitalInOut(board.D11)
esp32_reset = DigitalInOut(board.D12)
spi = busio.SPI(board.SCK, board.MOSI, board.MISO)
esp = adafruit_esp32spi.ESP_SPIcontrol(spi, esp32_cs, esp32_ready, esp32_reset)
"""Use below for Most Boards"""
status_light = neopixel.NeoPixel(board.NEOPIXEL, 1, brightness=0.2) # Uncomment for Most Boards
"""Uncomment below for ItsyBitsy M4"""
# status_light = dotstar.DotStar(board.APA102_SCK, board.APA102_MOSI, 1, brightness=0.2)
# Uncomment below for an externally defined RGB LED
# import adafruit_rgbled
# from adafruit_esp32spi import PWMOut
# RED_LED = PWMOut.PWMOut(esp, 26)
# GREEN_LED = PWMOut.PWMOut(esp, 27)
# BLUE_LED = PWMOut.PWMOut(esp, 25)
# status_light = adafruit_rgbled.RGBLED(RED_LED, BLUE_LED, GREEN_LED)
wifi = adafruit_esp32spi_wifimanager.ESPSPI_WiFiManager(esp, secrets, status_light)
print("Connecting to WiFi...")
wifi.connect()
print("Connected to WiFi!")
print("Getting the time...")
ntp = NTP(esp)
# Wait for a valid time to be received
while not ntp.valid_time:
time.sleep(5)
ntp.set_time()
print("Time:", str(time.time()))
# To use Azure IoT Central, you will need to create an IoT Central app.
# You can either create a free tier app that will live for 7 days without an Azure subscription,
# Or a standard tier app that will last for ever with an Azure subscription.
# The standard tiers are free for up to 2 devices
#
# If you don't have an Azure subscription:
#
# If you are a student, head to https://aka.ms/FreeStudentAzure and sign up, validating with your
# student email address. This will give you $100 of Azure credit and free tiers of a load of
# service, renewable each year you are a student
#
# If you are not a student, head to https://aka.ms/FreeAz and sign up to get $200 of credit for 30
# days, as well as free tiers of a load of services
#
# Create an Azure IoT Central app by following these instructions: https://aka.ms/CreateIoTCentralApp
# Add a device template with telemetry, properties and commands, as well as a view to visualize the
# telemetry and execute commands, and a form to set properties.
#
# Next create a device using the device template, and select Connect to get the device connection details.
# Add the connection details to your secrets.py file, using the following values:
#
# 'id_scope' - the devices ID scope
# 'device_id' - the devices device id
# 'key' - the devices primary key
#
# The adafruit-circuitpython-azureiot library depends on the following libraries:
#
# From the Adafruit CircuitPython Bundle (https://github.com/adafruit/Adafruit_CircuitPython_Bundle):
# * adafruit-circuitpython-minimqtt
# * adafruit-circuitpython-requests
from adafruit_azureiot import IoTCentralDevice
# Create an IoT Hub device client and connect
device = IoTCentralDevice(socket, esp, secrets["id_scope"], secrets["device_id"], secrets["key"])
# Subscribe to property changes
# Properties can be updated either in code, or by adding a form to the view
# in the device template, and setting the value on the dashboard for the device
def property_changed(property_name, property_value, version):
print("Property", property_name, "updated to", str(property_value), "version", str(version))
# Subscribe to the property changed event
device.on_property_changed = property_changed
print("Connecting to Azure IoT Central...")
# Connect to IoT Central
device.connect()
print("Connected to Azure IoT Central!")
message_counter = 60
while True:
try:
# Send property values every minute
# You can see the values in the devices dashboard
if message_counter >= 60:
device.send_property("Desired_Temperature", random.randint(0, 50))
message_counter = 0
else:
message_counter = message_counter + 1
# Poll every second for messages from the cloud
device.loop()
except (ValueError, RuntimeError) as e:
print("Connection error, reconnecting\n", str(e))
# If we lose connectivity, reset the wifi and reconnect
wifi.reset()
wifi.connect()
device.reconnect()
continue
time.sleep(1)
|
[
"nams@nist.gov"
] |
nams@nist.gov
|
c2ea836a58ec6f9d02d3d631bdecf55d3db16ccf
|
88307f29f2930213819b2a21ac328ee52e5d8d65
|
/tests/benchmark.py
|
52a684fb04421ac6481101c185ec87ab22b3704e
|
[
"BSD-3-Clause"
] |
permissive
|
ChristopherBradley/cogent3
|
7dc6524d66687402d2bd48c07ca68b41133e9f00
|
4b4c0fbc77f50aebd74ecf44a6d1777b2e2c0fbb
|
refs/heads/master
| 2023-02-27T00:58:29.796585
| 2020-11-09T04:13:51
| 2020-11-09T04:13:51
| 219,615,537
| 0
| 0
|
BSD-3-Clause
| 2023-02-21T20:03:32
| 2019-11-04T23:22:58
|
Python
|
UTF-8
|
Python
| false
| false
| 5,641
|
py
|
#!/usr/bin/env python
import sys # ,hotshot
from cogent3 import load_aligned_seqs, load_tree
from cogent3.evolve.substitution_model import (
TimeReversibleCodon,
TimeReversibleDinucleotide,
TimeReversibleNucleotide,
)
from cogent3.maths import optimisers
from cogent3.util import parallel
__author__ = "Peter Maxwell and Gavin Huttley"
__copyright__ = "Copyright 2007-2020, The Cogent Project"
__credits__ = ["Peter Maxwell", "Gavin Huttley"]
__license__ = "BSD-3"
__version__ = "2020.7.2a"
__maintainer__ = "Gavin Huttley"
__email__ = "gavin.huttley@anu.edu.au"
__status__ = "Production"
ALIGNMENT = load_aligned_seqs(filename="data/brca1.fasta")
TREE = load_tree(filename="data/murphy.tree")
def subtree(size):
names = ALIGNMENT.names[:size]
assert len(names) == size
tree = TREE.get_sub_tree(names) # .balanced()
return names, tree
def brca_test(subMod, names, tree, length, par_rules, **kw):
# names = ALIGNMENT.names[:taxa]
# assert len(names) == taxa
tree = TREE.get_sub_tree(names) # .balanced()
aln = ALIGNMENT.take_seqs(names).omit_gap_pos()[:length]
assert len(aln) == length, (len(aln), length)
# the_tree_analysis = LikelihoodFunction(treeobj = tree, submodelobj = subMod, alignobj = aln)
par_controller = subMod.make_likelihood_function(tree, **kw)
for par_rule in par_rules:
par_controller.set_param_rule(**par_rule)
# lf = par_controller.make_calculator(aln)
return (par_controller, aln)
def measure_evals_per_sec(pc, aln):
pc.set_alignment(aln)
return pc.measure_evals_per_second(time_limit=2.0, wall=False)
def makePC(modelClass, parameterisation, length, taxa, tree, opt_mprobs, **kw):
modelClass = eval(modelClass)
if parameterisation is not None:
predicates = {"silly": silly_predicate}
par_rules = [{"par_name": "silly", "is_independent": parameterisation}]
else:
predicates = {}
par_rules = []
subMod = modelClass(
equal_motif_probs=True,
optimise_motif_probs=opt_mprobs,
predicates=predicates,
recode_gaps=True,
mprob_model="conditional",
)
(pc, aln) = brca_test(subMod, taxa, tree, length, par_rules, **kw)
return (pc, aln)
def quiet(f, *args, **kw):
import io
import sys
temp = io.StringIO()
_stdout = sys.stdout
try:
sys.stdout = temp
result = f(*args, **kw)
finally:
# pass
sys.stdout = _stdout
return result
def evals_per_sec(*args):
pc, aln = makePC(*args) # quiet(makeLF, *args)
speed1 = measure_evals_per_sec(pc, aln)
speed = str(int(speed1))
return speed
class CompareImplementations(object):
def __init__(self, switch):
self.switch = switch
def __call__(self, *args):
self.switch(0)
(pc, aln) = quiet(makePC, *args)
speed1 = measure_evals_per_sec(pc, aln)
self.switch(1)
(pc, aln) = quiet(makePC, *args)
speed2 = measure_evals_per_sec(pc, aln)
if speed1 < speed2:
speed = "+%2.1f" % (speed2 / speed1)
else:
speed = "-%2.1f" % (speed1 / speed2)
if speed in ["+1.0", "-1.0"]:
speed = ""
return speed
def benchmarks(test):
alphabets = ["Nucleotide", "Dinucleotide", "Codon"]
sequence_lengths = [18, 2004]
treesizes = [5, 20]
for (optimise_motifs, parameterisation) in [
(False, "global"),
(False, "local"),
(True, "global"),
]:
print(parameterisation, ["", "opt motifs"][optimise_motifs])
print(" " * 14, end=" ")
wcol = 5 * len(sequence_lengths) + 2
for alphabet in alphabets:
print(str(alphabet).ljust(wcol), end=" ")
print()
print("%-15s" % "", end=" ") # "length"
for alphabet in alphabets:
for sequence_length in sequence_lengths:
print("%4s" % sequence_length, end=" ")
print(" ", end=" ")
print()
print(
" " * 12
+ (
" | ".join(
[""]
+ ["-" * (len(sequence_lengths) * 5) for alphabet in alphabets]
+ [""]
)
)
)
for treesize in treesizes:
print(("%4s taxa | " % treesize), end=" ")
(taxa, tree) = subtree(treesize)
for alphabet in alphabets:
for sequence_length in sequence_lengths:
speed = test(
alphabet,
parameterisation == "local",
sequence_length,
taxa,
tree,
optimise_motifs,
)
print("%4s" % speed, end=" ")
print("| ", end=" ")
print()
print()
print()
def silly_predicate(a, b):
return a.count("A") > a.count("T") or b.count("A") > b.count("T")
# def asym_predicate((a,b)):
# print a, b, 'a' in a
# return 'a' in a
# mA = Codon()
# mA.setPredicates({'asym': asym_predicate})
def exponentiator_switch(switch):
import cogent3.evolve.substitution_calculation
cogent3.evolve.substitution_calculation.use_new = switch
if "relative" in sys.argv:
test = CompareImplementations(exponentiator_switch)
else:
test = evals_per_sec
parallel.inefficiency_forgiven = True
if parallel.get_rank() > 0:
# benchmarks(test)
quiet(benchmarks, test)
else:
try:
benchmarks(test)
except KeyboardInterrupt:
print(" OK")
|
[
"Gavin.Huttley@anu.edu.au"
] |
Gavin.Huttley@anu.edu.au
|
1ac63a541e9a8dce7c61fe484cbb580d7979038e
|
0737f5a9e19cc14692c8bf99dc349ae856a20b0c
|
/replay_buffer.py
|
8f793775a62d5ab3e9a96878a951fc17eebfd190
|
[] |
no_license
|
takuseno/unreal
|
37fd0c0b7613182f1abb5d55b5d0f8564acf25c2
|
864cfbc1edf56510c69ef3809ae0adc6cb129017
|
refs/heads/master
| 2020-03-22T19:55:25.246229
| 2018-08-07T09:03:52
| 2018-08-07T09:03:52
| 140,560,691
| 1
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,188
|
py
|
from collections import deque
from random import sample, randrange, random
import uuid
class ReplayBuffer:
def __init__(self, capacity=2e3):
self.capacity = capacity
self.ids = []
self.transitions = {}
self.rewarding_states = {}
self.non_rewarding_states = {}
self.episode_terminal_ids = []
# ((s_t-2, s_t-1, s_t), a_t-1, r_t, a_t, r_t+1, s_t+1, t_t+1)
def add(self, obs_t, action_tm1, reward_t, action_t, reward_tp1, obs_tp1, terminal):
# create unique id
id = uuid.uuid4()
self.ids.append(id)
# remove oldest transision
if len(self.transitions.keys()) > self.capacity:
self.remove(self.ids[0])
# for value function replay and others
transition = dict(
obs_t=obs_t[-1],
action_tm1=action_tm1,
reward_t=reward_t,
action_t=action_t,
reward_tp1=reward_tp1,
obs_tp1=obs_tp1
)
self.transitions[id] = transition
# for reward prediction
reward_prediction_dict = dict(obs_t=obs_t, reward_tp1=reward_tp1)
if reward_tp1 == 0.0:
self.non_rewarding_states[id] = reward_prediction_dict
else:
self.rewarding_states[id] = reward_prediction_dict
# add episode terminal id
if terminal:
self.episode_terminal_ids.append(id)
def remove(self, id):
if id in self.ids:
self.ids.remove(id)
self.transitions.pop(id)
if id in self.episode_terminal_ids:
self.episode_terminal_ids.remove(id)
if id in self.rewarding_states:
self.rewarding_states.pop(id)
if id in self.non_rewarding_states:
self.non_rewarding_states.pop(id)
def sample_rp(self):
prob = random()
if prob > 0.5 and len(self.rewarding_states.values()) != 0:
transition = sample(list(self.rewarding_states.values()), 1)[0]
else:
transition = sample(list(self.non_rewarding_states.values()), 1)[0]
reward = transition['reward_tp1']
if reward == 0.0:
reward_class = 0
elif reward > 0.0:
reward_class = 1
else:
reward_class = 2
return transition['obs_t'], reward_class
def sample_sequence(self, n):
if len(self.episode_terminal_ids) > 0:
# get terminal index
episode_index = randrange(len(self.episode_terminal_ids))
id = self.episode_terminal_ids[episode_index]
end_index = self.ids.index(id)
# get start index
if episode_index == 0:
start_index = 0
else:
prev_id = self.episode_terminal_ids[episode_index - 1]
start_index = self.ids.index(prev_id) + 1
else:
# no episode ends yet
end_index = len(self.ids) - 1
start_index = 0
# get trajectory
length = end_index - start_index + 1
if length > n:
sample_start_index = randrange(length - n + 1) + start_index
sample_end_index = sample_start_index + n - 1
else:
sample_start_index = start_index
sample_end_index = end_index
transitions = list(self.transitions.values())
sampled_transitions = transitions[sample_start_index:sample_end_index+1]
is_terminal = self.ids[sample_end_index] in self.episode_terminal_ids
return sampled_transitions, is_terminal
def sample_vr(self, n):
transitions, is_terminal = self.sample_sequence(n)
# format results
obs_t = []
actions_tm1 = []
rewards_t = []
for transition in transitions:
obs_t.append(transition['obs_t'])
actions_tm1.append(transition['action_tm1'])
rewards_t.append(transition['reward_t'])
obs_t.append(transitions[-1]['obs_tp1'])
actions_tm1.append(transitions[-1]['action_t'])
rewards_t.append(transitions[-1]['reward_tp1'])
return obs_t, actions_tm1, rewards_t, is_terminal
|
[
"takuma.seno@gmail.com"
] |
takuma.seno@gmail.com
|
ec771792c99b81d60de883ad609af2084995cd10
|
a5c4ea16042a8078e360c32636c00e3163ac99a8
|
/Pytorch_Tutorial/08_transfer_learning/custompytorch/utils/helpers.py
|
29f3ab0c8ddc604babb84d5d48f924c624f60e47
|
[] |
no_license
|
lykhahaha/Mine
|
3b74571b116f72ee17721038ca4c58796610cedd
|
1439e7b161a7cd612b0d6fa4403b4c8c61648060
|
refs/heads/master
| 2020-07-15T05:16:13.808047
| 2019-06-01T07:30:01
| 2019-06-01T07:30:01
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,219
|
py
|
import torch
import time
import copy
import matplotlib.pyplot as plt
import numpy as np
def train_model(model, dataloaders, dataset_sizes, criterion, optimizer, scheduler=None, num_epochs=25):
"""
Scheduling the learning rate
Saving the best model
Arguments:
model: nn Modules
dataloaders: {'train': torch.utils.data.DataLoader, 'val': torch.utils.data.DataLoader}
dataset_sizes: {'train': dataset_sizes of train, 'test': dataset_sizes of test}
"""
device = torch.device('cuda:0') if torch.cuda.is_available() else 'cpu'
best_model_wts = copy.deepcopy(model.state_dict())
best_val_acc = 0.
for e in range(num_epochs):
start = time.time()
statistics = {
'train': {
'loss': 0.,
'acc': 0.
},
'val': {
'loss':0.,
'acc': 0.
}
}
for phase in ['train', 'val']:
if phase == 'train':
if scheduler:
scheduler.step()
model.train() # set model to training mode
else:
model.eval() # set model to evaluate mode
# loop over dataloader
for inputs, labels in dataloaders[phase]:
inputs, labels = inputs.to(device), labels.to(device)
# Zero out parameter gradients
optimizer.zero_grad()
# Forward pass, track history in train phase
with torch.set_grad_enabled(phase=='train'):
outputs = model(inputs)
_, preds = torch.max(outputs, dim=1) # torch.max return 2 tensors: first is max value, second is argmax value
loss = criterion(outputs, labels)
if phase == 'train':
loss.backward()
optimizer.step()
statistics[phase]['loss'] += loss.item() * inputs.size(0)
statistics[phase]['acc'] += (preds == labels.data).sum().item()
statistics[phase] = {key: statistics[phase][key]/dataset_sizes[phase] for key in statistics[phase].keys()}
time_elapsed = time.time() - start
print(f"[INFO]Epoch {e+1}/{num_epochs} - {time_elapsed:.2f}s - Loss: {statistics['train']['loss']:.5f}, Accuracy: {statistics['train']['acc']:.5f}, Validation loss: {statistics['val']['loss']:.5f}, Validation accuracy: {statistics['val']['acc']:.5f}")
if best_val_acc < statistics['val']['acc']:
best_val_acc = statistics['val']['acc']
best_model_wts = copy.deepcopy(model.state_dict())
# load best weights
model.load_state_dict(best_model_wts)
return model
def imshow(inp, title=None):
"""
Imshow for Tensor
"""
inp = inp.permute(1, 2, 0).numpy()
mean = np.array([0.485, 0.456, 0.406])
std = np.array([0.229, 0.224, 0.225])
inp = std * inp + mean
inp = np.clip(inp, 0, 1)
plt.imshow(inp)
if title:
plt.title(title)
def visualize_model(model, dataloaders, class_names, file_names=None, num_images=6):
"""
Generic function to display predictions for a few images
Arguments:
class_names: ['ant', 'bee']
"""
device = torch.device('cuda:0') if torch.cuda.is_available() else 'cpu'
model.eval()
fig = plt.figure()
image_num = 0
with torch.no_grad():
for i, (inputs, labels) in enumerate(dataloaders['val']):
inputs, labels = inputs.to(device), labels.to(device)
outputs = model(inputs)
_, preds = torch.max(outputs, dim=1)
for j in range(inputs.size(0)):
image_num += 1
if j == num_images:
if file_names:
fig.savefig(file_names)
plt.close(fig)
else:
plt.imshow(fig)
model.train()
return
ax = plt.subplot(num_images//2, 2, image_num)
ax.axis('off')
ax.set_title(f'Predicted: {class_names[preds[j]]}')
imshow(inputs.cpu().data[j])
|
[
"ITITIU15033@student.hcmiu.edu.vn"
] |
ITITIU15033@student.hcmiu.edu.vn
|
65faef840d04bdeb7155de6e7e23e1f6a184626b
|
4bf067cd4fa1cee2891c54b02fafcaca28e8227a
|
/random.py
|
5dffcfd8d8ead06c246861f051676c2e77a62e0b
|
[] |
no_license
|
c1c51/Python
|
e19dd8e0d90ec5015c87cd9f113c79aea7b25111
|
6dfa8ffa94d92e8741217ae09f265680e4e44951
|
refs/heads/master
| 2020-03-09T19:06:33.997743
| 2018-04-10T14:49:01
| 2018-04-10T14:49:01
| 128,949,484
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 333
|
py
|
import Queue
import threading
import urllib2
# called by each thread
def get_url(q, url):
q.put(urllib2.urlopen(url).read())
theurls = ["http://google.com", "http://yahoo.com"]
q = Queue.Queue()
for u in theurls:
t = threading.Thread(target=get_url, args = (q,u))
t.daemon = True
t.start()
s = q.get()
print (s)
|
[
"unconfigured@null.spigotmc.org"
] |
unconfigured@null.spigotmc.org
|
e06919dd2a9bbd247c1840da35f544f13f1c92bb
|
36a7c6c092799d9550233be9c735964768f34f09
|
/EVSCapp/permissions.py
|
e6f4ad1effbded5fa92093c5e13393f9be30bfdf
|
[] |
no_license
|
AmirIdris/EVSCProject
|
eea215f8480fdcee54cc2cce0a675621c8c487bb
|
ed994c240924e6c30626b7e8a8020480c8112c4e
|
refs/heads/master
| 2023-07-28T00:12:56.857669
| 2021-09-07T21:10:58
| 2021-09-07T21:10:58
| 393,363,817
| 0
| 1
| null | 2021-09-07T21:10:58
| 2021-08-06T12:01:30
|
CSS
|
UTF-8
|
Python
| false
| false
| 271
|
py
|
from rest_framework import permissions
class IsOwnerOrReadOnly(permissions.BasePermission):
def has_object_permission(self, request, view, obj):
if request.method in permissions.SAFE_METHODS:
return True
return obj.user == request.user
|
[
"you@example.com"
] |
you@example.com
|
5e277ac73b8593875d3614ad8691df57cb8aa2fb
|
ba0cbdae81c171bd4be7b12c0594de72bd6d625a
|
/MyToontown/py2/toontown/minigame/DistributedTagTreasure.pyc.py
|
48111c1e609a7eb84bfa86c60461bd082ade4002
|
[] |
no_license
|
sweep41/Toontown-2016
|
65985f198fa32a832e762fa9c59e59606d6a40a3
|
7732fb2c27001264e6dd652c057b3dc41f9c8a7d
|
refs/heads/master
| 2021-01-23T16:04:45.264205
| 2017-06-04T02:47:34
| 2017-06-04T02:47:34
| 93,279,679
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,019
|
py
|
# 2013.08.22 22:21:41 Pacific Daylight Time
# Embedded file name: toontown.minigame.DistributedTagTreasure
from toontown.safezone import DistributedTreasure
class DistributedTagTreasure(DistributedTreasure.DistributedTreasure):
__module__ = __name__
def __init__(self, cr):
DistributedTreasure.DistributedTreasure.__init__(self, cr)
self.modelPath = 'phase_4/models/props/icecream'
self.grabSoundPath = 'phase_4/audio/sfx/SZ_DD_treasure.mp3'
self.accept('minigameOffstage', self.handleMinigameOffstage)
def handleEnterSphere(self, collEntry):
if not base.localAvatar.isIt:
self.d_requestGrab()
return None
def handleMinigameOffstage(self):
self.nodePath.reparentTo(hidden)
# okay decompyling C:\Users\Maverick\Documents\Visual Studio 2010\Projects\Unfreezer\py2\toontown\minigame\DistributedTagTreasure.pyc
# decompiled 1 files: 1 okay, 0 failed, 0 verify failed
# 2013.08.22 22:21:41 Pacific Daylight Time
|
[
"sweep14@gmail.com"
] |
sweep14@gmail.com
|
23207e111a4e7e07a5d636cb6326493693f5b3c4
|
532ca0c5361b54970bc435232e2a6d079c49aecd
|
/03_Conditionals and Control Flow/01_Conditionals and Control Flow/04_How the Tables Have Turned.py
|
761dda428d2ddf39b1b6f433d981dd3583040f68
|
[] |
no_license
|
haveano/codeacademy-python_v1
|
dc5484e8df73b9a15ffce835dde625b6454c8302
|
10e6fb2974e1c47f380bb6a33c50b171ecfbf50f
|
refs/heads/master
| 2021-01-11T16:45:57.337493
| 2017-05-30T10:04:08
| 2017-05-30T10:04:08
| 79,660,536
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 812
|
py
|
"""
How the Tables Have Turned
Comparisons result in either True or False, which are booleans as we learned before in this exercise.
# Make me true!
bool_one = 3 < 5
Let's switch it up: we'll give the boolean, and you'll write the expression, just like the example above.
Instructions
For each boolean value in the editor, write an expression that evaluates to that value.
Remember, comparators are: ==, !=, >, >=, <, and <=.
Use at least three different ones!
Don't just use True and False! That's cheating!
"""
# Create comparative statements as appropriate on the lines below!
# Make me true!
bool_one = 3 < 5 # We already did this one for you!
# Make me false!
bool_two = 13 != 14-1
# Make me true!
bool_three = 13 !=14-2
# Make me false!
bool_four = 13 >= 14
# Make me true!
bool_five = 13 <= 13
|
[
"noreply@github.com"
] |
haveano.noreply@github.com
|
fa24099fb4c61a922ec7e32ecb388a6cac3cd988
|
f2889a13368b59d8b82f7def1a31a6277b6518b7
|
/309.py
|
da9a1c3ac6e5917c4c8a202bed12b01b6642673d
|
[] |
no_license
|
htl1126/leetcode
|
dacde03de5c9c967e527c4c3b29a4547154e11b3
|
c33559dc5e0bf6879bb3462ab65a9446a66d19f6
|
refs/heads/master
| 2023-09-01T14:57:57.302544
| 2023-08-25T15:50:56
| 2023-08-25T15:50:56
| 29,514,867
| 7
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 746
|
py
|
# ref: https://leetcode.com/discuss/71391/easiest-java-solution-with
# -explanations
class Solution(object):
def maxProfit(self, prices):
"""
:type prices: List[int]
:rtype: int
"""
if len(prices) < 2:
return 0
b0 = -prices[0] # max profit for buying at 0
b1 = b0 # max profit for buying at 1
s0, s1, s2 = 0, 0, 0 # max profit for buying at i, i - 1, i - 2
for i in xrange(1, len(prices)):
b0 = max(b1, s2 - prices[i])
s0 = max(s1, b1 + prices[i])
b1 = b0
s2 = s1
s1 = s0
return s0
if __name__ == '__main__':
sol = Solution()
print sol.maxProfit([1, 2, 3, 0, 2])
|
[
"b93902098@ntu.edu.tw"
] |
b93902098@ntu.edu.tw
|
468fdc36ae7001294a1493c1070b5c443b66e893
|
bc97d423d19756fbf33affd4ed98d4628d8878b3
|
/my_project/itproger/main/urls.py
|
49b1548ce7162c1a6ed6511e5d6aa41220dc9528
|
[] |
no_license
|
David-Hakobyan1/MY_Django
|
40d63232805679bb5416d12a4ebba94fcb097959
|
fdcd61a76d131ca47a203bc291212494c3587637
|
refs/heads/main
| 2023-06-19T15:58:42.315023
| 2021-07-18T09:55:28
| 2021-07-18T09:55:28
| 381,956,110
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 290
|
py
|
from django.urls import path
from . import views
urlpatterns = [
path('',views.index,name='index'),
path('create',views.create,name='create'),
path('read',views.read,name='read'),
path('update',views.update,name='update'),
path('delete',views.delete,name='delete'),
]
|
[
"my@mail.ru"
] |
my@mail.ru
|
f17fd9f2d917793ba4512aa37f689ece8ed71944
|
48832d27da16256ee62c364add45f21b968ee669
|
/res/scripts/client/gui/scaleform/genconsts/quests_season_awards_types.py
|
556871ca4a30e5a322e6482ad59791a4ee691bce
|
[] |
no_license
|
webiumsk/WOT-0.9.15.1
|
0752d5bbd7c6fafdd7f714af939ae7bcf654faf7
|
17ca3550fef25e430534d079876a14fbbcccb9b4
|
refs/heads/master
| 2021-01-20T18:24:10.349144
| 2016-08-04T18:08:34
| 2016-08-04T18:08:34
| 64,955,694
| 0
| 0
| null | null | null | null |
WINDOWS-1250
|
Python
| false
| false
| 496
|
py
|
# 2016.08.04 19:52:02 Střední Evropa (letní čas)
# Embedded file name: scripts/client/gui/Scaleform/genConsts/QUESTS_SEASON_AWARDS_TYPES.py
class QUESTS_SEASON_AWARDS_TYPES(object):
VEHICLE = 1
FEMALE_TANKMAN = 2
COMMENDATION_LISTS = 3
# okay decompyling c:\Users\PC\wotsources\files\originals\res\scripts\client\gui\scaleform\genconsts\quests_season_awards_types.pyc
# decompiled 1 files: 1 okay, 0 failed, 0 verify failed
# 2016.08.04 19:52:02 Střední Evropa (letní čas)
|
[
"info@webium.sk"
] |
info@webium.sk
|
53e57c810578fe9bfb63bc0c3c4a5616ac1b4306
|
70d39e4ee19154a62e8c82467ef75b601e584738
|
/docker/mac-robber.py
|
2503134682dfb5d51eb85125c86ad0210ea31582
|
[] |
no_license
|
babywyrm/sysadmin
|
6f2724be13ae7e5b9372278856a8c072073beffb
|
2a5f3d29c7529bc917d4ff9be03af30ec23948a5
|
refs/heads/master
| 2023-08-16T03:50:38.717442
| 2023-08-16T03:05:55
| 2023-08-16T03:05:55
| 210,228,940
| 10
| 5
| null | 2023-05-01T23:15:31
| 2019-09-22T23:42:50
|
PowerShell
|
UTF-8
|
Python
| false
| false
| 5,827
|
py
|
#!/usr/bin/env python
#
# Author: Jim Clausing
# Date: 2017-09-01
# Version: 1.2.0
#
# Desc: rewrite of the sleithkit mac-robber in Python
# Unlinke the TSK version, this one can actually includes the MD5 & inode number
# though I still return a 0 in the MD5 column for non-regular files, but calculating
# hashes likely will modify atime, so it is turned off by default
#
# Note: in Python 2.7.x, st_ino, st_dev, st_nlink, st_uid, and st_gid are dummy variables
# on Windows systems. This is apparently fixed in current Python 3 versions.
# On *ALL* systems, os.stat does not return btime, so we put 0 there. :-(
#
# A useful way to use this on a live Linux system is with read-only --bind mounts
#
# # mount --bind / /mnt
# # mount -o ro,remount,bind /mnt
# # ./mac-robber.py -5 -x /mnt/tmp -r /mnt -m system-foo:/ /mnt
#
# This gets us hashes, but because the bind mount is read-only doesn't update atimes
#
# Copyright (c) 2017 AT&T Open Source. All rights reserved.
#
import os
import sys
import argparse
import hashlib
from stat import *
__version_info__ = (1,2,3)
__version__ = ".".join(map(str, __version_info__))
def mode_to_string(mode):
lookup = ['---','--x','-w-','-wx','r--','r-x','rw-','rwx']
if S_ISDIR(mode):
mode_str = 'd'
elif S_ISCHR(mode):
mode_str = 'c'
elif S_ISBLK(mode):
mode_str = 'b'
elif S_ISREG(mode):
mode_str = '-'
elif S_ISFIFO(mode):
mode_str = 'p'
elif S_ISLNK(mode):
mode_str = 'l'
elif S_ISSOCK:
mode_str = 's'
own_mode = lookup[(mode & 0700)>>6]
if mode & 04000:
if mode & 0100:
own_mode = own_mode.replace('x','s')
else:
own_mode = own_mode[:1] + 'S'
mode_str = mode_str + own_mode
grp_mode = lookup[(mode & 070)>>3]
if mode & 02000:
if mode & 010:
grp_mode = grp_mode.replace('x','s')
else:
grp_mode = grp_mode[:1] + 'S'
mode_str = mode_str + own_mode
oth_mode = lookup[(mode & 07)]
if mode & 01000:
if mode & 01:
oth_mode = oth_mode.replace('x','t')
else:
oth_mode = oth_mode[:1] + 'T'
mode_str = mode_str + oth_mode
return mode_str
def process_item(dirpath,item):
md5 = hashlib.md5()
fname = os.path.join(dirpath,item)
if args.exclude and (fname in args.exclude or dirpath in args.exclude):
return
try:
if os.path.islink(fname):
status = os.lstat(fname)
else:
status = os.stat(fname)
except IOError:
return
except OSError:
return
if args.hashes and S_ISREG(status.st_mode):
try:
if not (fname.find('/proc/') != -1 and fname.endswith('/kcore')) and status.st_size > 0:
with open(fname, "rb") as f:
for block in iter(lambda: f.read(65536), b""):
md5.update(block)
md5str = md5.hexdigest()
elif status.st_size == 0:
md5str = "d41d8cd98f00b204e9800998ecf8427e"
else:
md5str = "0"
except IOError:
md5str = "0"
else:
md5str = "0"
mode = mode_to_string(status.st_mode)
if os.path.islink(fname) and status.st_size > 0:
mode = mode + ' -> ' + os.readlink(fname)
if sys.version_info<(2,7,0):
mtime = '%20.9f' % (status.st_mtime)
atime = '%20.9f' % (status.st_atime)
ctime = '%20.9f' % (status.st_mtime)
else:
mtime = '{:20.9f}'.format(status.st_mtime)
atime = '{:20.9f}'.format(status.st_atime)
ctime = '{:20.9f}'.format(status.st_mtime)
btime = 0
size = status.st_size
uid = status.st_uid
gid = status.st_gid
inode = status.st_ino
if args.rmprefix:
if fname.startswith(args.rmprefix):
fname = fname[len(args.rmprefix):]
if args.prefix:
if fname.find('/') == 0:
fname = args.prefix + fname
else:
fname = args.prefix + '/' + fname
return md5str+'|'+fname+'|'+str(inode)+'|'+mode+'|'+str(uid)+'|'+str(gid)+'|'+str(size)+'|'+atime+'|'+mtime+'|'+ctime+'|'+str(btime)
if sys.version_info<(2,6,0):
sys.stderr.write("Not tested on versions earlier than 2.6\n")
exit(1)
parser = argparse.ArgumentParser(description='collect data on files')
parser.add_argument('directories', metavar='DIR', nargs='+', help='directories to traverse')
parser.add_argument('-m','--prefix', metavar='PREFIX', help='prefix string')
parser.add_argument('-5','--hashes', action='store_true', help='do MD5 calculation (disabled by default)', default=False)
parser.add_argument('-x','--exclude', metavar='EXCLUDE', action='append', help='directory trees or files to exclude, does not handle file extensions or regex', default=[])
parser.add_argument('-r','--rmprefix', metavar='RMPREFIX', help='prefix to remove, useful when using read-only --bind mount to prevent atime updates')
parser.add_argument('-V','--version', action='version', help='print version number',
version='%(prog)s v{version}'.format(version= __version__))
args = parser.parse_args()
for directory in args.directories:
for dirpath,dirs,files in os.walk(directory):
dirs[:] = [d for d in dirs if d not in args.exclude]
for directory in dirs:
outstr = process_item(dirpath,directory)
if outstr is not None:
print outstr
sys.stdout.flush()
for filename in files:
if filename in args.exclude:
continue
outstr = process_item(dirpath,filename)
if outstr is not None:
print outstr
sys.stdout.flush()
#########################################
|
[
"noreply@github.com"
] |
babywyrm.noreply@github.com
|
d4323a8fa1e1648c6105fb1c105c9320a7657887
|
90d3b9467dcc6763865cad90a04a247cafcf5862
|
/shopee/child_app/transport/urls.py
|
2623f3b186fcd57a552bb35e8ab754ee8ca7fb7d
|
[] |
no_license
|
vandat9xhn/django_1
|
0fa51515549eab04c27bdfeaf9e43650fe44dc70
|
6669e172d6b5a2a729dd31ea43d6c08f76b6e19c
|
refs/heads/master
| 2023-06-23T19:46:26.558871
| 2021-07-26T15:11:12
| 2021-07-26T15:11:12
| 375,704,827
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 131
|
py
|
from django.urls import path
#
from . import views
#
urlpatterns = [
path('transport-l/', views.TransportViewL.as_view()),
]
|
[
"vandat9xiloveyou@gmail.com"
] |
vandat9xiloveyou@gmail.com
|
62e9919f8fe5745cb117876ae51196d4b4bc314f
|
53fab060fa262e5d5026e0807d93c75fb81e67b9
|
/backup/user_029/ch178_2020_08_14_14_38_35_094615.py
|
bfc04d96c2b6f66c5cb150cd9294fc1f83977d91
|
[] |
no_license
|
gabriellaec/desoft-analise-exercicios
|
b77c6999424c5ce7e44086a12589a0ad43d6adca
|
01940ab0897aa6005764fc220b900e4d6161d36b
|
refs/heads/main
| 2023-01-31T17:19:42.050628
| 2020-12-16T05:21:31
| 2020-12-16T05:21:31
| 306,735,108
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 337
|
py
|
def junta_nomes(a,b,c):
lis = []
for i in a:
if len(a) != 0:
for d in c:
lis.append(i+''+d)
for e in b:
if len(b) != 0:
for f in c:
lis.append(i+''+f)
return lis
|
[
"you@example.com"
] |
you@example.com
|
25cd32fcfa59a1dd6d38c88daf0e48f1bd5f8283
|
32a81b96a631fa5f7cd3e1da79499b36f1cbbf86
|
/src/artifice/scraper/config/settings.py
|
aa602920a6b6d62fb2e9637ebe0dae41b8846510
|
[] |
no_license
|
minelminel/celery-rabbitmq-example
|
f4e696f24d924395d09934a33fba9ea9ca065603
|
4eccabf46aec855cfa4738a06a992f71232d6364
|
refs/heads/master
| 2020-07-01T11:44:43.656850
| 2019-08-30T04:08:13
| 2019-08-30T04:08:13
| 201,165,312
| 0
| 0
| null | 2019-08-30T04:08:13
| 2019-08-08T02:48:51
|
Python
|
UTF-8
|
Python
| false
| false
| 3,758
|
py
|
try:
import configparser
except ImportError:
configparser = None
import os
import logging
log = logging.getLogger(__name__)
loc = os.path.dirname(os.path.abspath(__file__))
class Settings(object):
"""
The settings can be changed by setting up a config file.
For an example of a config file, see
`scraper.cfg` in the main-directory.
"""
def __init__(self):
"""
Sets the default values for the project
"""
# BASE_DIR:///artifice/scraper/
self.BASE_DIR = os.path.dirname(loc)
# prototypes
self._eth0 = '0.0.0.0'
self._exposed_port = 8080
self._db_name = 'site.db'
self._redis_pword = 'password'
self._redis_host = 'localhost'
self._redis_port = 6379
self._celery_broker_uname = 'michael'
self._celery_broker_pword = 'michael123'
self._celery_broker_host = 'localhost'
self._celery_broker_virtual_host = 'michael_vhost'
# flask
self.TESTING = False
self.URL_PREFIX = ''
self.FLASK_PORT = self._exposed_port
self.FLASK_HOST = '0.0.0.0'
self.FLASK_DEBUG = False
self.FLASK_USE_RELOADER = False
self.FLASK_THREADED = True
# logging
self.LOG_FILE = 'flask.log'
self.LOG_LEVEL = 'INFO'
self.CELERY_LOG_LEVEL = 'ERROR'
self.CELERY_LOG_FILE = 'celery.log'
self.STDOUT = True
# database
self.DROP_TABLES = True
self.SQLALCHEMY_TRACK_MODIFICATIONS = False
self.SQLALCHEMY_DATABASE_URI = 'sqlite:///{}'.format(
os.path.join(self.BASE_DIR, self._db_name))
# redis
self.REDIS_URL = 'redis://{}:@{}:{}/0'.format(
self._redis_pword,
self._redis_host,
self._redis_port)
self.REDIS_HIT_COUNTER = 'HIT_COUNTER'
# defaults
self.ARGS_DEFAULT_LIMIT = 10
self.ARGS_DEFAULT_STATUS = ['READY', 'TASKED', 'DONE']
self.SUPERVISOR_ENABLED = True
self.SUPERVISOR_DEBUG = False
self.SUPERVISOR_POLITE = 1
# celery
self.CELERY_WORKERS = 8
self.CELERY_MODULE = 'background'
self.CELERY_BROKER = 'amqp://{}:{}@{}/{}'.format(
self._celery_broker_uname,
self._celery_broker_pword,
self._celery_broker_host,
self._celery_broker_virtual_host)
self.CELERY_BACKEND = 'rpc://'
self.CELERY_INCLUDE = ['artifice.scraper.background.tasks']
# endpoints
self.URL_FOR_STATUS = 'http://{}:{}/status'.format(self._eth0, self._exposed_port)
self.URL_FOR_QUEUE = 'http://{}:{}/queue'.format(self._eth0, self._exposed_port)
self.URL_FOR_CONTENT = 'http://{}:{}/content'.format(self._eth0, self._exposed_port)
def init_from(self, file=None, envvar=None, log_verbose=False):
if envvar:
file = os.getenv(envvar)
if log_verbose:
log.info("Running with config from: " + (str(file)))
if not file:
return
try:
parser = configparser.RawConfigParser()
parser.read(file)
# parse prototypes
# parse flask
# parse logging
# parse database
# parse redis
# parse defaults
# parse celery
# parse endpoints
except AttributeError:
log.info("Cannot use configparser in Python2.7")
raise
|
[
"ctrlcmdspace@gmail.com"
] |
ctrlcmdspace@gmail.com
|
f355b0f96a8d88e8aad867f986f3cfa6975d11e8
|
e7af5a3e76e674be0a85628067fa494348d45123
|
/Python-for-Finance-Second-Edition-master/Chapter01/c1_04_def_pv_funtion.py
|
0b20696756b3ed519e37d740043058e85838ece9
|
[
"MIT"
] |
permissive
|
SeyedShobeiri/Work
|
8321ead6f11de8297fa18d70a450602f700f26fb
|
f758e758106fbd53236a7fadae42e4ec6a4e8244
|
refs/heads/master
| 2022-07-25T02:33:25.852521
| 2020-05-17T16:11:27
| 2020-05-17T16:11:27
| 264,706,380
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 330
|
py
|
# -*- coding: utf-8 -*-
"""
Name : c1_04_def_pv_function.py
Book : Python for Finance (2nd ed.)
Publisher: Packt Publishing Ltd.
Author : Yuxing Yan
Date : 6/6/2017
email : yany@canisius.edu
paulyxy@hotmail.com
"""
def pv_f(pv,r,n):
return pv/(1+r)**n
#
pv=pv_f(100,0.1,2)
print(pv)
|
[
"shobeiri@math.uh.edu"
] |
shobeiri@math.uh.edu
|
a84c8d31de4dc825e18cbe3145a69c31faa63f3c
|
6d6bebce1a3d819c28cf583f3c46c8235ffccfd2
|
/WildlifeObservations/observations/migrations/0008_taxonomysubfamily.py
|
91c79afbcb1c9ad4a2816de568ceb34033315e61
|
[
"MIT"
] |
permissive
|
jen-thomas/wildlife-observations
|
7a20366164d467d73c44e8c844cc99fe26716152
|
e6a6b6594e60fe080f253481720d80a38a9f7411
|
refs/heads/main
| 2023-05-25T17:20:15.506403
| 2023-05-16T13:03:05
| 2023-05-16T13:03:05
| 450,234,890
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 824
|
py
|
# Generated by Django 3.2.11 on 2022-05-05 08:14
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('observations', '0007_alter_identification_confidence'),
]
operations = [
migrations.CreateModel(
name='TaxonomySubfamily',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('subfamily', models.CharField(max_length=255, unique=True)),
('family', models.ForeignKey(on_delete=django.db.models.deletion.PROTECT, to='observations.taxonomyfamily')),
],
options={
'verbose_name_plural': 'Taxonomy subfamilies',
},
),
]
|
[
"jenny_t152@yahoo.co.uk"
] |
jenny_t152@yahoo.co.uk
|
c7d8f13bd8fb9ab354250cbb8f69282bb7f5d574
|
8362a53892f45a1a7adcca7da5cd6827bd5c55fd
|
/tests/test_database.py
|
89f622e5642f98b20ec639311a2fc9d55c641d1c
|
[
"MIT"
] |
permissive
|
accent-starlette/starlette-core
|
1a414969ae05ba90c96f184a206c0eb97c1d33fc
|
88e94be0cc65e457e32f2586a3c8860c6c08fca9
|
refs/heads/master
| 2022-01-27T07:57:50.570112
| 2022-01-04T16:39:26
| 2022-01-04T16:39:26
| 185,164,201
| 13
| 7
|
MIT
| 2021-12-10T12:03:59
| 2019-05-06T09:19:57
|
Python
|
UTF-8
|
Python
| false
| false
| 3,037
|
py
|
import pytest
import sqlalchemy as sa
from starlette.exceptions import HTTPException
from starlette_core.database import Base, Session
class User(Base):
name = sa.Column(sa.String(50))
def test_database(db):
# connects ok
db.engine.connect()
# can create tables
db.create_all()
assert "user" in db.engine.table_names()
# can drop tables
db.drop_all()
assert [] == db.engine.table_names()
def test_database__truncate_of_db(db):
db.create_all()
user = User(name="bill")
user.save()
assert User.query.count() == 1
db.truncate_all(force=True)
assert User.query.count() == 0
def test_session(db):
db.create_all()
# basic session usage
user = User(name="bill")
session = Session()
session.add(user)
session.commit()
session.close()
def test_declarative_base__save(db):
db.create_all()
user = User(name="ted")
user.save()
assert User.query.get(user.id) == user
def test_declarative_base__delete(db):
db.create_all()
user = User(name="ted")
user.save()
user.delete()
assert User.query.get(user.id) is None
def test_declarative_base__refresh_from_db(db):
db.create_all()
user = User(name="ted")
user.save()
user.name = "sam"
user.refresh_from_db()
assert user.name == "ted"
def test_declarative_base__can_be_deleted(db):
class OrderA(Base):
user_id = sa.Column(sa.Integer, sa.ForeignKey(User.id))
class OrderB(Base):
user_id = sa.Column(
sa.Integer, sa.ForeignKey(User.id, ondelete="SET NULL"), nullable=True
)
class OrderC(Base):
user_id = sa.Column(sa.Integer, sa.ForeignKey(User.id, ondelete="CASCADE"))
class OrderD(Base):
user_id = sa.Column(sa.Integer, sa.ForeignKey(User.id, ondelete="RESTRICT"))
db.create_all()
user = User(name="ted")
user.save()
assert user.can_be_deleted()
# default
order = OrderA(user_id=user.id)
order.save()
assert not user.can_be_deleted()
order.delete()
assert user.can_be_deleted()
# set null
order = OrderB(user_id=user.id)
order.save()
assert user.can_be_deleted()
# cascade
order = OrderC(user_id=user.id)
order.save()
assert user.can_be_deleted()
# restrict
order = OrderD(user_id=user.id)
order.save()
assert not user.can_be_deleted()
order.delete()
assert user.can_be_deleted()
def test_declarative_base__repr(db):
db.create_all()
user = User()
assert user.__tablename__ == "user"
assert repr(user) == f"<User, id={user.id}>"
assert str(user) == f"<User, id={user.id}>"
def test_declarative_base__query(db):
db.create_all()
user = User(name="ted")
user.save()
# get_or_404
assert User.query.get_or_404(user.id) == user
# get_or_404 raises http exception when no result found
with pytest.raises(HTTPException) as e:
User.query.get_or_404(1000)
assert e.value.status_code == 404
|
[
"stuart@accentdesign.co.uk"
] |
stuart@accentdesign.co.uk
|
c21d74a662d5db8b34c6793c5b0def3026ab0cfe
|
8afb5afd38548c631f6f9536846039ef6cb297b9
|
/_MY_ORGS/Web-Dev-Collaborative/blog-research/ciriculumn/week.18-/W18D2_lectures/08-posts/app/routes.py
|
91d6b5ace00c8f67a076bc546d9f4e510c7630de
|
[
"MIT"
] |
permissive
|
bgoonz/UsefulResourceRepo2.0
|
d87588ffd668bb498f7787b896cc7b20d83ce0ad
|
2cb4b45dd14a230aa0e800042e893f8dfb23beda
|
refs/heads/master
| 2023-03-17T01:22:05.254751
| 2022-08-11T03:18:22
| 2022-08-11T03:18:22
| 382,628,698
| 10
| 12
|
MIT
| 2022-10-10T14:13:54
| 2021-07-03T13:58:52
| null |
UTF-8
|
Python
| false
| false
| 809
|
py
|
from flask import render_template, redirect
from app import app
from app.forms.login import LoginForm
@app.route('/')
def index():
return render_template('page.html', title='Welcome')
@app.route('/login', methods=['GET', 'POST'])
def login():
form = LoginForm()
if form.validate_on_submit():
return redirect('/')
return render_template('login.html', form=form)
@app.route('/help')
def help():
return render_template('page.html', title='Help')
@app.route('/item/<int:id>')
def item(id):
if (id > 0 and id < 100):
item = {
"id": id,
"name": f"Fancy Item {id}",
"description": "Coming soon!",
}
return render_template('item.html', item=item)
else:
return '<h1>Sample App</h1><h2>Item Not Found</h2>'
|
[
"bryan.guner@gmail.com"
] |
bryan.guner@gmail.com
|
a0c82a506c91a3a7c0b678adac1283adedd35094
|
6bd047eb1951601a5a7bab564eb2abba92c6c004
|
/prices/api/queries.py
|
0fdac2905d72924ea823efd6ca273a290b653fd8
|
[] |
no_license
|
volgoweb/DDD_sandbox
|
6ab2b43d3fcad8eb2f802bd485e5dbc05eb2e10d
|
700c2848d5341ab267e69326bac2487657450d22
|
refs/heads/master
| 2021-01-01T15:46:13.244679
| 2017-07-11T06:18:36
| 2017-07-11T06:18:36
| 97,695,978
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 659
|
py
|
from utils.queries import IQuery
class GetProductPricingForOneProduct(IQuery):
def __init__(self, product_id: int):
self.product_id = product_id
@classmethod
def get_query_type_name(cls):
return 'prices.GetProductPricingForOneProduct'
class GetProductPricingForManyProducts(IQuery):
def __init__(self, product_id: int):
self.product_id = product_id
@classmethod
def get_query_type_name(cls):
return 'prices.GetProductPricingForManyProducts'
class GetProductPricingForAllProducts(IQuery):
@classmethod
def get_query_type_name(cls):
return 'prices.GetProductPricingForAllProducts'
|
[
"volgoweb@bk.ru"
] |
volgoweb@bk.ru
|
54ab05db85f18373b1cd489a5310e729a167c100
|
08e039046e2b3c526b5fd2169e02d5c5bbe253c5
|
/0x04-python-more_data_structures/0-main.py
|
f37cdb42818fedc7a8af51018ae298fef65412fb
|
[] |
no_license
|
VinneyJ/alx-higher_level_programming
|
22a976a22583334aff1f0c4120fb81117905e35b
|
0ea8719ec5f28c76faf06bb5e67c14abb71fa3d0
|
refs/heads/main
| 2023-07-31T15:44:30.390103
| 2021-10-01T21:27:31
| 2021-10-01T21:27:31
| 361,816,988
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 225
|
py
|
#!/usr/bin/python3
square_matrix_simple = __import__('0-square_matrix_simple').square_matrix_simple
matrix = [
[1, 2, 3],
[4, 5, 6],
[7, 8, 9]
]
new_matrix = square_matrix_simple(matrix)
print(new_matrix)
print(matrix)
|
[
"vincentjayden49@gmail.com"
] |
vincentjayden49@gmail.com
|
eb439ef5321ace82cb57ceda8a14ba4f5978f5b8
|
52508ce70294ec29f84c9e551d8b92a7b402913d
|
/anyflow/flow.py
|
647b6d25ed535d940110f411ddcc5ba175e061f2
|
[
"MIT"
] |
permissive
|
Cologler/anyflow-python
|
0f18003a39cb645c24aa5549df4ee39173977fff
|
cde20b0c74faf18cb7dc503072d4c2f99d5681de
|
refs/heads/master
| 2021-07-30T15:12:19.478682
| 2021-07-29T14:08:22
| 2021-07-29T14:08:22
| 241,824,800
| 0
| 0
|
MIT
| 2021-07-29T14:08:22
| 2020-02-20T07:53:25
|
Python
|
UTF-8
|
Python
| false
| false
| 2,957
|
py
|
# -*- coding: utf-8 -*-
#
# Copyright (c) 2020~2999 - Cologler <skyoflw@gmail.com>
# ----------
#
# ----------
from typing import Callable, Any, List
from abc import ABC, abstractmethod
from .err import Abort
from .ctx import FlowContext
#Next = Callable[[], Any]
class MiddlewareInvoker:
__slots__ = ('_ctx', '_factorys')
def __init__(self, factorys: list, ctx: FlowContext):
super().__init__()
self._factorys = factorys
self._ctx = ctx
def invoke(self) -> Any:
if self._factorys:
return self.run_middleware(0)
def run_middleware(self, index) -> Any:
factory = self._factorys[index]
middleware = factory(self._ctx)
next = Next(self, index+1)
return middleware(self._ctx, next)
def has_next(self, next_index: int):
'return whether has the next middleware.'
return len(self._factorys) > next_index
class Next:
__slots__ = ('_invoker', '_next_index', '_retvals')
def __init__(self, invoker: MiddlewareInvoker, next_index: int):
super().__init__()
self._invoker = invoker
self._next_index = next_index
self._retvals = None
def __call__(self, or_value=None):
if not self._retvals:
if self._invoker.has_next(self._next_index):
rv = self._invoker.run_middleware(self._next_index)
else:
rv = or_value
self._retvals = (rv, )
return self._retvals[0]
@property
def is_nop(self):
return not self._invoker.has_next(self._next_index)
Middleware = Callable[[FlowContext, Next], Any]
MiddlewareFactory = Callable[[FlowContext], Middleware]
class Flow:
def __init__(self, *, ctx_cls=FlowContext, state: dict=None):
super().__init__()
if not issubclass(ctx_cls, FlowContext):
raise TypeError(f'excepted subclass of FlowContext, got {ctx_cls}')
self._ctx_cls = ctx_cls
self._factorys = []
self.suppress_abort = False
self._state = dict(state or ()) # make a clone
def run(self, state: dict=None):
ctx_state = self._state.copy()
ctx_state.update(state or ())
ctx = self._ctx_cls(ctx_state)
invoker = MiddlewareInvoker(self._factorys.copy(), ctx)
try:
return invoker.invoke()
except Abort:
if not self.suppress_abort:
raise
def use(self, middleware: Middleware=None):
'''
*this method can use as decorator.*
'''
if middleware is None:
return lambda m: self.use(m)
return self.use_factory(lambda _: middleware)
def use_factory(self, middleware_factory: MiddlewareFactory=None):
'''
*this method can use as decorator.*
'''
if middleware_factory is None:
return lambda mf: self.use_factory(mf)
self._factorys.append(middleware_factory)
|
[
"skyoflw@gmail.com"
] |
skyoflw@gmail.com
|
bec64a7169611c133f6effb658d194136f903149
|
feff273063b4c89bde3aa190b4e49c83ab1e5855
|
/memphis/view/layout.py
|
6a21cc666494b2107e8bc16e66c706a54a3bb83b
|
[
"BSD-2-Clause",
"BSD-3-Clause"
] |
permissive
|
mcdonc/memphis
|
7d53b8f77f7bab7c20a258a9ab33d1cc663711a2
|
daef09507eacb32b235faf070a0146ffb5cf035f
|
refs/heads/master
| 2016-09-05T23:04:01.578991
| 2011-10-11T03:37:43
| 2011-10-11T03:37:43
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,147
|
py
|
""" layout implementation """
import sys, logging
from zope import interface
from pyramid.interfaces import IRequest, IRouteRequest
from memphis import config
from memphis.view.base import View
from memphis.view.formatter import format
from memphis.view.interfaces import ILayout
from memphis.view.customize import LayerWrapper
log = logging.getLogger('memphis.view')
def queryLayout(request, context, name=''):
""" query named layout for context """
while context is not None:
layout = config.registry.queryMultiAdapter(
(context, request), ILayout, name)
if layout is not None:
return layout
context = getattr(context, '__parent__', None)
return None
class Layout(View):
interface.implements(ILayout)
name = ''
template = None
view = None
viewcontext = None
@property
def __name__(self):
return self.name
def render(self, content, **kwargs):
if self.template is None:
return content
kwargs.update({'view': self,
'content': content,
'context': self.context,
'request': self.request,
'format': format})
return self.template(**kwargs)
def __call__(self, content, layout=None, view=None):
if view is not None:
self.view = view
self.viewcontext = getattr(view, 'context', self.context)
if layout is not None:
self.view = layout.view or self.view
self.viewcontext = layout.viewcontext or self.viewcontext
result = self.render(content, **(self.update() or {}))
if self.layout is None:
return result
parent = getattr(view, '__parent__', self.context)
if self.name != self.layout:
layout = queryLayout(self.request, parent, self.layout)
if layout is not None:
return layout(result, layout=self, view=view)
else:
if layout is not None:
context = layout.context
else:
context = self.context
parent = getattr(context, '__parent__', None)
if parent is not None:
layout = queryLayout(self.request, parent, self.layout)
if layout is not None:
return layout(result, view=view)
log.warning("Can't find parent layout: '%s'"%self.layout)
return self.render(result)
def registerLayout(
name='', context=None, parent='',
klass=Layout, template = None, route=None, layer=''):
if not klass or not issubclass(klass, Layout):
raise ValueError("klass has to inherit from Layout class")
discriminator = ('memphis.view:layout', name, context, route, layer)
info = config.DirectiveInfo()
info.attach(
config.Action(
LayerWrapper(registerLayoutImpl, discriminator),
(klass, name, context, template, parent, route),
discriminator = discriminator)
)
def registerLayoutImpl(klass, name, context, template, parent, route_name):
if klass in _registered:
raise ValueError("Class can't be reused for different layouts")
if not parent:
layout = None
elif parent == '.':
layout = ''
else:
layout = parent
# class attributes
cdict = {'name': name,
'layout': layout}
if template is not None:
cdict['template'] = template
if issubclass(klass, Layout) and klass is not Layout:
layout_class = klass
_registered.append(klass)
for attr, value in cdict.items():
setattr(layout_class, attr, value)
else:
layout_class = type(str('Layout<%s>'%name), (Layout,), cdict)
# register layout
request_iface = IRequest
if route_name is not None:
request_iface = config.registry.getUtility(IRouteRequest,name=route_name)
config.registry.registerAdapter(
layout_class, (context, request_iface), ILayout, name)
_registered = []
@config.addCleanup
def cleanUp():
_registered[:] = []
|
[
"fafhrd91@gmail.com"
] |
fafhrd91@gmail.com
|
d16ef364cf6106a88a1b28a9a96bdae89166f80c
|
778a3e1e70b0b2782d2a35f8818bbe799e6c7396
|
/Seventh_week_exersice/03Sum_prime_non_prime.py
|
def7d6757eba88c49c90e3f84af9077c2e5e6b72
|
[] |
no_license
|
skafev/Python_basics
|
0088203207fe3960b26944e0940acaec40a8caaf
|
8bfc1a8b0dad3bf829fffbd539cebe3688f75974
|
refs/heads/main
| 2023-06-10T11:25:27.468914
| 2021-07-01T15:28:12
| 2021-07-01T15:28:12
| 382,078,056
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 447
|
py
|
number = input()
not_prime = 0
prime = 0
while number != "stop":
number = int(number)
if number < 0:
print("Number is negative.")
elif number > 3:
if number % 2 == 0 or number % 3 == 0:
not_prime += number
else:
prime += number
else:
prime += number
number = input()
print(f"Sum of all prime numbers is: {prime}")
print(f"Sum of all non prime numbers is: {not_prime}")
|
[
"s.kafev@gmail.com"
] |
s.kafev@gmail.com
|
2c8269a6d5c4ddb8c4b445466174f74aecf370f6
|
857b051f99e8a42f94dd5895c7ac735e37867e94
|
/hakkimizda/urls.py
|
56836439053617eeb3e0ba317c7a1333be1e19df
|
[
"MIT"
] |
permissive
|
kopuskopecik/projem
|
a88e4970ef23a4917e590e1a0a19ac7c49c86a73
|
738b0eeb2bf407b4ef54197cce1ce26ea67279c8
|
refs/heads/master
| 2021-06-22T10:04:44.523681
| 2020-12-25T19:56:10
| 2020-12-25T19:56:10
| 172,302,265
| 2
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 171
|
py
|
from django.urls import path
from django.conf.urls import url
from .views import *
app_name="hakkimizda"
urlpatterns = [
path('hakkimizda/', hakkimizda, name="hak"),
]
|
[
"kopuskopecik@gmail.com"
] |
kopuskopecik@gmail.com
|
1d6af3af9fa41162b76ba04790d68e5e149b3219
|
2ca07aecfa6ff25b0baae6dc9a707a284c2d1b6d
|
/trustzone_images/apps/bsp/build/scripts/genuses.py
|
44dde7dfd37d5c023ae14fb6e8d49ccd9fafb72d
|
[] |
no_license
|
zhilangtaosha/msm8996-wp-1-0_test_device
|
ef05af263ba7955263ff91eb81d45b2437bc492e
|
6af9b44abbc4a367a9aaae26707079974c535f08
|
refs/heads/master
| 2023-03-19T02:42:09.581740
| 2021-02-21T01:20:19
| 2021-02-21T01:20:19
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 6,954
|
py
|
#===============================================================================
#
# genuses
#
# GENERAL DESCRIPTION
# Generates USES_FLAGS imformation from DATA file generate from build/ms.
#
# Copyright (c) 2009-2010 by Qualcomm Technologies, Incorporated.
# All Rights Reserved.
# QUALCOMM Proprietary/GTDR
#
#-------------------------------------------------------------------------------
#
# $Header: //components/rel/apps.tz/1.0.6/bsp/build/scripts/genuses.py#1 $
# $DateTime: 2016/12/02 01:50:16 $
# $Author: pwbldsvc $
# $Change: 11897059 $
# EDIT HISTORY FOR FILE
#
# This section contains comments describing changes made to the module.
# Notice that changes are listed in reverse chronological order.
#
# when who what, where, why
# -------- --- ---------------------------------------------------------
# 04/02/10 sk Created
#
#===============================================================================
import os
import subprocess
import string
import sys
import re, string, os
from array import array
from optparse import OptionParser
from datetime import datetime
#===============================================================================
# parse_args
# parse command line arguments
#===============================================================================
def parse_args():
usage = "usage: %prog [options]"
version = "%prog 1.0"
parser = OptionParser(usage=usage, version=version)
parser.add_option("-f", "--datfile", dest="dat_filename",
help="Read preprocess data from FILE", metavar="FILE")
parser.add_option("-o", "--outfile", dest="output_filename",
help="Write output to FILE", metavar="FILE")
parser.add_option("-v", "--verbose",
action="store_true", dest="verbose", default=False,
help="print status messages to stdout")
(options, args) = parser.parse_args()
if options.dat_filename is None:
parser.error("--datfile option must be defined")
sys.exit(2)
if options.output_filename is None:
parser.error("--outfile option must be defined")
sys.exit(2)
return (options, args)
#===============================================================================
# create_file_banner
# creates a string that can be use as a banner for auto generated files
#===============================================================================
def create_file_banner(fname, description="None", start_comment="#",
end_comment="", start_block="", end_block="", style='none'):
banner_str = \
'''$SB$SCM============================================================================$ECM
$SCM Name: $ECM
$SCM $FILE_NAME $ECM
$SCM
$SCM Description: $ECM
$SCM $DESCRIPTION $ECM
$SCM $ECM
$SCM Copyright (c) $YEAR by QUALCOMM, Incorporated. All Rights Reserved. $ECM
$SCM============================================================================$ECM
$SCM $ECM
$SCM *** AUTO GENERATED FILE - DO NOT EDIT $ECM
$SCM $ECM
$SCM GENERATED: $DATE $ECM
$SCM============================================================================$ECM$EB
'''
if style == 'C':
start_comment = "#"
end_comment = ""
start_block = "/*\n"
end_block = "\n*/"
elif style == 'C++':
start_comment = "//"
end_comment = ""
start_block = ""
end_block = ""
elif style == 'asm':
start_comment = ";"
end_comment = ""
start_block = ""
end_block = ""
elif style == 'make' or style == 'shell':
start_comment = "#"
end_comment = ""
start_block = ""
end_block = ""
elif style == 'dos':
start_comment = "REM "
end_comment = ""
start_block = ""
end_block = ""
banner_str = banner_str.replace('$SCM', start_comment)
banner_str = banner_str.replace('$ECM', end_comment)
banner_str = banner_str.replace('$SB', start_block)
banner_str = banner_str.replace('$EB', end_block)
banner_str = banner_str.replace('$YEAR', str(datetime.now().strftime('%Y')))
banner_str = banner_str.replace('$DATE', str(datetime.now().ctime()))
banner_str = banner_str.replace('$FILE_NAME', fname)
banner_str = banner_str.replace('$DESCRIPTION', description)
return banner_str
def CleanLine(aLine):
aLine = aLine.replace('(','{')
aLine = aLine.replace(')','}')
aLine = aLine.replace('\n','')
aLine = aLine.replace(':=','=')
aLine = aLine.replace('?=','=')
return aLine
def CleanVarName(aVarname):
aVarname = aVarname.replace('.', '_')
aVarname = aVarname.replace('export', '')
aVarname = aVarname.replace('define', '')
aVarname = re.sub('\s', '', aVarname) #get rid of whitespaces
return aVarname
def CleanVarValue(aVarvalue):
aVarvalue = aVarvalue.strip()
return aVarvalue
def WriteData (options, file_handle, data, new_line="\n"):
file_handle.write(data + new_line)
if options.verbose:
print data
def main():
# get args from cmd line
(options, args) = parse_args()
uses = "USES"
lines = open(options.dat_filename, 'r').readlines()
total = ""
banner = create_file_banner(os.path.split(options.output_filename)[1])
out_file = open(options.output_filename, 'w')
WriteData(options, out_file, banner, new_line="")
WriteData(options, out_file, "def exists(env):")
WriteData(options, out_file, " return env.Detect('usesflags')")
WriteData(options, out_file, "")
WriteData(options, out_file, "def generate(env):")
VarNameDict = {}
#count = 0
for line in lines:
line = line.lstrip()
if line.find(uses, 0, 4)>-1:
line = CleanLine(line)
tempstr = line.split("=")
VarName = tempstr[0]
VarName = CleanVarName(VarName)
VarValue = tempstr[1]
VarValue = CleanVarValue(VarValue)
if VarValue == "yes":
vUsesFlag = True
else:
vUsesFlag = False
if vUsesFlag == True:
VarNameDict[VarName] = True
# sort keys and write file
#import pdb; pdb.set_trace()
uses_flags = sorted(VarNameDict.iterkeys())
for uflag in uses_flags:
WriteData(options, out_file, " env.Replace(%s = True)" % uflag)
WriteData(options, out_file, " env.Replace(USES_FLAGS = %s)" % str(uses_flags))
WriteData(options, out_file, " return None")
out_file.close()
#run
main()
|
[
"lonelyjskj@gmail.com"
] |
lonelyjskj@gmail.com
|
c3973679874c6bcb06a9d97d54f6965242f7ef53
|
15f321878face2af9317363c5f6de1e5ddd9b749
|
/solutions_python/Problem_143/388.py
|
2c19f330a95780da7bb74471727f3b492214bb28
|
[] |
no_license
|
dr-dos-ok/Code_Jam_Webscraper
|
c06fd59870842664cd79c41eb460a09553e1c80a
|
26a35bf114a3aa30fc4c677ef069d95f41665cc0
|
refs/heads/master
| 2020-04-06T08:17:40.938460
| 2018-10-14T10:12:47
| 2018-10-14T10:12:47
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 636
|
py
|
import math
def solve(a, b, k):
count = 0
i = 0
while i < a:
j = 0
while j < b:
if (i & j) < k:
count += 1
j += 1
i += 1
return count
name = "B-small-attempt0"
fi = open(name + ".in", "r")
fout = open(name + ".out", "w")
numTestCases = int(fi.readline())
print "#TestCases: ", numTestCases
for i in range(0, numTestCases):
line = fi.readline().strip().split(" ")
a = int(line[0])
b = int(line[1])
k = int(line[2])
fout.write("Case #" + str(i + 1) + ": " + str(solve(a, b, k)) + "\n")
#print "Case #" + str(i + 1) + ": " + str(solve(a, b, k))
fi.close()
fout.close()
|
[
"miliar1732@gmail.com"
] |
miliar1732@gmail.com
|
bdd0760e8844fd6ba461b3318c1347dc4022acd9
|
b090cb9bc30ac595675d8aa253fde95aef2ce5ea
|
/trunk/test/NightlyRun/test405.py
|
4234b88e576ef0f06697b7c02f12c1d1579361dc
|
[
"BSD-3-Clause",
"BSD-2-Clause"
] |
permissive
|
eyhl/issm
|
5ae1500715c258d7988e2ef344c5c1fd15be55f7
|
1013e74c28ed663ebb8c9d398d9be0964d002667
|
refs/heads/master
| 2022-01-05T14:31:23.235538
| 2019-01-15T13:13:08
| 2019-01-15T13:13:08
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 931
|
py
|
#Test Name: SquareSheetShelfStressMHOPenalties
import numpy as np
from model import *
from socket import gethostname
from triangle import *
from setmask import *
from parameterize import *
from setflowequation import *
from solve import *
md=triangle(model(),'../Exp/Square.exp',180000.)
md=setmask(md,'../Exp/SquareShelf.exp','')
md=parameterize(md,'../Par/SquareSheetShelf.py')
md.extrude(5,1.)
md=setflowequation(md,'SSA','../Exp/SquareHalfRight.exp','fill','HO','coupling','penalties')
md.cluster=generic('name',gethostname(),'np',3)
md=solve(md,'Stressbalance')
#Fields and tolerances to track changes
field_names =['Vx','Vy','Vz','Vel','Pressure']
field_tolerances=[5e-05,5e-05,5e-05,5e-05,1e-05]
field_values=[\
md.results.StressbalanceSolution.Vx,\
md.results.StressbalanceSolution.Vy,\
md.results.StressbalanceSolution.Vz,\
md.results.StressbalanceSolution.Vel,\
md.results.StressbalanceSolution.Pressure,\
]
|
[
"cummings.evan@gmail.com"
] |
cummings.evan@gmail.com
|
147eafbcdb47571b8ec157075995bcb513a53efa
|
f167dffa2f767a0419aa82bf434852069a8baeb8
|
/lib/youtube_dl/extractor/kankan.py
|
a677ff44712794ef54f53a1afe9c55fbacad91e2
|
[
"MIT"
] |
permissive
|
firsttris/plugin.video.sendtokodi
|
d634490b55149adfdcb62c1af1eb77568b8da3f5
|
1095c58e2bc21de4ab6fcb67a70e4f0f04febbc3
|
refs/heads/master
| 2023-08-18T10:10:39.544848
| 2023-08-15T17:06:44
| 2023-08-15T17:06:44
| 84,665,460
| 111
| 31
|
MIT
| 2022-11-11T08:05:21
| 2017-03-11T16:53:06
|
Python
|
UTF-8
|
Python
| false
| false
| 1,738
|
py
|
from __future__ import unicode_literals
import re
import hashlib
from .common import InfoExtractor
_md5 = lambda s: hashlib.md5(s.encode('utf-8')).hexdigest()
class KankanIE(InfoExtractor):
_VALID_URL = r'https?://(?:.*?\.)?kankan\.com/.+?/(?P<id>\d+)\.shtml'
_TEST = {
'url': 'http://yinyue.kankan.com/vod/48/48863.shtml',
'md5': '29aca1e47ae68fc28804aca89f29507e',
'info_dict': {
'id': '48863',
'ext': 'flv',
'title': 'Ready To Go',
},
'skip': 'Only available from China',
}
def _real_extract(self, url):
video_id = self._match_id(url)
webpage = self._download_webpage(url, video_id)
title = self._search_regex(r'(?:G_TITLE=|G_MOVIE_TITLE = )[\'"](.+?)[\'"]', webpage, 'video title')
surls = re.search(r'surls:\[\'.+?\'\]|lurl:\'.+?\.flv\'', webpage).group(0)
gcids = re.findall(r'http://.+?/.+?/(.+?)/', surls)
gcid = gcids[-1]
info_url = 'http://p2s.cl.kankan.com/getCdnresource_flv?gcid=%s' % gcid
video_info_page = self._download_webpage(
info_url, video_id, 'Downloading video url info')
ip = self._search_regex(r'ip:"(.+?)"', video_info_page, 'video url ip')
path = self._search_regex(r'path:"(.+?)"', video_info_page, 'video url path')
param1 = self._search_regex(r'param1:(\d+)', video_info_page, 'param1')
param2 = self._search_regex(r'param2:(\d+)', video_info_page, 'param2')
key = _md5('xl_mp43651' + param1 + param2)
video_url = 'http://%s%s?key=%s&key1=%s' % (ip, path, key, param2)
return {
'id': video_id,
'title': title,
'url': video_url,
}
|
[
"noreply@github.com"
] |
firsttris.noreply@github.com
|
93676b1ccd5b8f20a34473032ab5d4db03956a52
|
6b2a8dd202fdce77c971c412717e305e1caaac51
|
/solutions_5634947029139456_1/Python/AlonH/2014A1A.py
|
a02bf27c55c7c31dc651a2d0b8c8393d949d0274
|
[] |
no_license
|
alexandraback/datacollection
|
0bc67a9ace00abbc843f4912562f3a064992e0e9
|
076a7bc7693f3abf07bfdbdac838cb4ef65ccfcf
|
refs/heads/master
| 2021-01-24T18:27:24.417992
| 2017-05-23T09:23:38
| 2017-05-23T09:23:38
| 84,313,442
| 2
| 4
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,292
|
py
|
def match(now,needed):
now.sort()
needed.sort()
for i in range(len(now)):
if now[i] != needed[i]:
return True
return False
def count(ar,l):
ret = [0]*l
for i in range(l):
for s in ar:
ret[i] += int(s[i])
return(ret)
def compare(n,o,l):
ret = [0]*l
for i in range(l):
if n[i] != o[i]:
ret[i] = 1
return tuple(ret)
f = open("A-large.in","r")
o = open("A-large-answers.txt","w")
T = int(f.readline())
for t in range(1,T+1):
inp = [int(a) for a in f.readline().split()]
n = inp[0]
l = inp[1]
lifts = [0]*l
start = [a for a in f.readline().split()]
needed = [a for a in f.readline().split()]
cnow = count(start,l)
cneeded = count(needed,l)
print("case",t,cnow,cneeded,start,needed)
op = set([compare(start[0],n,l) for n in needed])
for i in range(1,n):
op1 = set([compare(start[i],n,l) for n in needed])
op = op&op1
if len(op) == 0:
o.write("Case #"+str(t)+": NOT POSSIBLE"+"\n")
else:
o.write("Case #"+str(t)+": "+str(min([a.count(1) for a in op]))+"\n")
o.close()
#o.write("Case #"+str(t)+": NOT POSSIBLE"+"\n")
#o.write("Case #"+str(t)+": "+str(lifts.count(1))+"\n")
|
[
"eewestman@gmail.com"
] |
eewestman@gmail.com
|
48d5dad900ecdf584f2ec639d5b62ce8f06d2c2c
|
82074ba616918ede605dec64b038546a7b07bd7d
|
/empowerb/middleware.py
|
85764001431cfbf3cedb9002fb6e1ccf8f38b859
|
[] |
no_license
|
chetankhopade/EmpowerRM
|
b7ab639eafdfa57c054a0cf9da15c3d4b90bbd66
|
8d968592f5e0d160c56f31a4870e79c30240b514
|
refs/heads/main
| 2023-07-05T03:20:13.820049
| 2021-08-26T11:56:28
| 2021-08-26T11:56:28
| 399,354,317
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 774
|
py
|
from _contextvars import ContextVar
from django.utils.deprecation import MiddlewareMixin
from empowerb.settings import DATABASES
db_ctx = ContextVar('var')
class WhichDatabaseIsTOUseMIddleware(MiddlewareMixin):
"""
Middleware to update the context var with the db alias
"""
@staticmethod
def process_request(request):
try:
db_name_path = request.path.split('/')[1]
db_name = db_name_path.split('_')[0] if '_' in db_name_path else db_name_path
# set contextvar with the database name if dbname exist in DATABASES dict
db_ctx.set(db_name) if db_name in DATABASES.keys() else db_ctx.set('NoOP')
except Exception as ex:
print(ex.__str__())
db_ctx.reset('NoOP')
|
[
"noreply@github.com"
] |
chetankhopade.noreply@github.com
|
628a26377a4ac11054ec002268c2916d3883eccf
|
601e6891504cc9da063e3ef9993e7b5f142bbe35
|
/examples/wifiStationsAndHosts.py
|
8e68f22b85c8a2a65ee1968f5506555a2831560b
|
[] |
no_license
|
caiqiqi/mininet-wifi
|
b8a13f83e4fbadea20865faecf6719abf8e68437
|
547cf3c01d85b9bfb38b3e9df3b5c52119b5b5e2
|
refs/heads/master
| 2021-01-20T16:44:34.270734
| 2016-05-16T12:55:56
| 2016-05-16T12:55:56
| 58,878,807
| 0
| 0
| null | 2016-05-15T19:01:01
| 2016-05-15T19:01:01
| null |
UTF-8
|
Python
| false
| false
| 1,177
|
py
|
#!/usr/bin/python
"""
This example shows how work with wireless and wired media
"""
from mininet.net import Mininet
from mininet.node import Controller, OVSKernelSwitch
from mininet.cli import CLI
from mininet.log import setLogLevel
from mininet.link import TCLink
def topology():
"Create a network."
net = Mininet( controller=Controller, link=TCLink, switch=OVSKernelSwitch )
print "*** Creating nodes"
ap1 = net.addBaseStation( 'ap1', ssid="simplewifi", mode="g", channel="5" )
sta1 = net.addStation( 'sta1', ip='192.168.0.1/24' )
sta2 = net.addStation( 'sta2', ip='192.168.0.2/24' )
h3 = net.addHost( 'h3', ip='192.168.0.3/24' )
h4 = net.addHost( 'h4', ip='192.168.0.4/24' )
c0 = net.addController('c0', controller=Controller, ip='127.0.0.1' )
print "*** Adding Link"
net.addLink(sta1, ap1)
net.addLink(sta2, ap1)
net.addLink(h3, ap1)
net.addLink(h4, ap1)
print "*** Starting network"
net.build()
c0.start()
ap1.start( [c0] )
print "*** Running CLI"
CLI( net )
print "*** Stopping network"
net.stop()
if __name__ == '__main__':
setLogLevel( 'info' )
topology()
|
[
"ramonreisfontes@gmail.com"
] |
ramonreisfontes@gmail.com
|
4f9cdc759c20a19b123b187ed82e7d01eb37bd48
|
8827574a663cc9d18194eb355dce7ffb676e6d0b
|
/everest/transit.py
|
8958b3c59d794095e0ea42a5548d12f5aa0ef602
|
[
"MIT"
] |
permissive
|
mirca/everest
|
70a79432f6cd2b604a64fc4c97c7513bbe2a6406
|
b96cc5cd1949b81ddc49fb74b90bf5a95c6fca14
|
refs/heads/master
| 2021-01-13T05:56:00.206244
| 2017-03-17T16:35:49
| 2017-03-17T16:35:49
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,317
|
py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
'''
:py:mod:`transit.py` - Transit models
-------------------------------------
These are routines used to generate a transit model, primarily for
transit injection/recovery tests. These are wrappers around
:py:func:`pysyzygy.Transit`, with the added feature that
the transit :py:obj:`depth` and the transit :py:obj:`duration` can be specified
as input variables (as opposed to the planet-star radius ratio
and the stellar density, which :py:mod:`pysyzygy` expects).
'''
from __future__ import division, print_function, absolute_import, unicode_literals
import numpy as np
import matplotlib.pyplot as pl
import pysyzygy as ps
from scipy.optimize import fmin
import logging
log = logging.getLogger(__name__)
class TransitModel(object):
'''
'''
def __init__(self, name, sig_RpRs = 0.001, **kwargs):
'''
'''
# The planet/transit model ID
assert type(name) is str, "Arg `name` must be a string."
self.name = name
# The transit model
self._transit = ps.Transit(**kwargs)
# Compute the depth
times = kwargs.get('times', None)
if times is not None:
t0 = times[0]
else:
t0 = kwargs.get('t0', 0.)
self.depth = (1. - self._transit([t0]))[0]
# Approximate variance on the depth
self.var_depth = (2 * sig_RpRs) ** 2
# Save the kwargs
self.params = kwargs
def __call__(self, time):
'''
'''
model = (self._transit(time) - 1) / self.depth
return model
def Get_RpRs(d, **kwargs):
'''
Returns the value of the planet radius over the stellar radius for a given depth :py:obj:`d`, given
the :py:class:`everest.pysyzygy` transit :py:obj:`kwargs`.
'''
def Depth(RpRs, **kwargs):
return 1 - ps.Transit(RpRs = RpRs, **kwargs)([kwargs.get('t0', 0.)])
def DiffSq(r):
return 1.e10 * (d - Depth(r, **kwargs)) ** 2
return fmin(DiffSq, [np.sqrt(d)], disp = False)
def Get_rhos(dur, **kwargs):
'''
Returns the value of the stellar density for a given transit duration :py:obj:`dur`, given
the :py:class:`everest.pysyzygy` transit :py:obj:`kwargs`.
'''
assert dur >= 0.01 and dur <= 0.5, "Invalid value for the duration."
def Dur(rhos, **kwargs):
t0 = kwargs.get('t0', 0.)
time = np.linspace(t0 - 0.5, t0 + 0.5, 1000)
try:
t = time[np.where(ps.Transit(rhos = rhos, **kwargs)(time) < 1)]
except:
return 0.
return t[-1] - t[0]
def DiffSq(rhos):
return (dur - Dur(rhos, **kwargs)) ** 2
return fmin(DiffSq, [0.2], disp = False)
def Transit(time, t0 = 0., dur = 0.1, per = 3.56789, depth = 0.001, **kwargs):
'''
A `Mandel-Agol <http://adsabs.harvard.edu/abs/2002ApJ...580L.171M>`_ transit model,
but with the depth and the duration as primary input variables.
:param numpy.ndarray time: The time array
:param float t0: The time of first transit in units of :py:obj:`BJD` - 2454833.
:param float dur: The transit duration in days. Don't go too crazy on this one -- very small \
or very large values will break the inverter. Default 0.1
:param float per: The orbital period in days. Default 3.56789
:param float depth: The fractional transit depth. Default 0.001
:param dict kwargs: Any additional keyword arguments, passed directly to :py:func:`everest.pysyzygy.Transit`
:returns tmod: The transit model evaluated at the same times as the :py:obj:`time` array
'''
# Note that rhos can affect RpRs, so we should really do this iteratively,
# but the effect is pretty negligible!
RpRs = Get_RpRs(depth, t0 = t0, per = per, **kwargs)
rhos = Get_rhos(dur, t0 = t0, per = per, **kwargs)
return ps.Transit(t0 = t0, per = per, RpRs = RpRs, rhos = rhos, **kwargs)(time)
class TransitShape(object):
'''
'''
def __init__(self, depth = 1, window = 0.5, **kwargs):
'''
'''
kwargs.pop('t0', None)
kwargs.pop('times', None)
t = np.linspace(-window / 2, window / 2, 5000)
trn = ps.Transit(t0 = 0., **kwargs)
transit_model = trn(t)
transit_model -= 1
transit_model *= depth / (1 - trn([0.])[0])
self.x = t
self.y = transit_model
def __call__(self, time, t0 = 0.):
'''
'''
return np.interp(time, self.x + t0, self.y)
|
[
"rodluger@gmail.com"
] |
rodluger@gmail.com
|
14679579a6dbc0f503f5b3d8562401165ce94756
|
91deb97afda334c5366e560325995cf6b5407bee
|
/src/command_modules/azure-cli-billing/azure/cli/command_modules/billing/custom.py
|
21965f255ffa9f5644f22068a40961f8ca75b5a3
|
[
"MIT",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
viananth/azure-cli
|
ab117c1b0b676026cbb57567544cd70630efe830
|
4d23492ed03e946cfc11bae23b29acb971fb137d
|
refs/heads/master
| 2021-05-23T05:13:51.414113
| 2017-08-17T16:58:10
| 2017-08-17T16:58:10
| 95,239,804
| 0
| 0
|
NOASSERTION
| 2019-03-19T18:45:16
| 2017-06-23T17:01:34
|
Python
|
UTF-8
|
Python
| false
| false
| 891
|
py
|
# --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
def cli_billing_list_invoices(client, generate_url=False):
"""List all available invoices of the subscription"""
invoices = client.list(expand='downloadUrl' if generate_url else None)
return list(invoices)
def cli_billing_get_invoice(client, name=None):
"""Retrieve invoice of specific name of the subscription"""
if name:
return client.get(name)
return client.get_latest()
def cli_billing_list_periods(client):
"""List all available billing periods of the subscription"""
return list(client.list())
|
[
"troy.dai@outlook.com"
] |
troy.dai@outlook.com
|
1390b2d3b283c49021827414a5f0ca6601dd27e8
|
1cfafec5935522b386d40ab7bb7246f39da89fcc
|
/temp/20201221_naver_ai_handsonsummit.py
|
ff8784adba43b2a7a15adeb0447977ce5373c919
|
[] |
no_license
|
madfalc0n/my_coding_labs
|
0d9e13e2d1579607d5481c6a78baa70a2c7c374a
|
b38fd988a5e3ebb8d8b66bf5a0b15eb3eaa20578
|
refs/heads/master
| 2021-07-03T17:33:16.801207
| 2021-06-18T06:24:09
| 2021-06-18T06:24:09
| 241,097,976
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 549
|
py
|
import sys
import requests
client_id = "CID"
client_secret = "CSECRET"
lang = "Kor" # 언어 코드 ( Kor, Jpn, Eng, Chn )
url = "https://naveropenapi.apigw.ntruss.com/recog/v1/stt?lang=" + lang
data = open('filepath', 'rb')
headers = {
"X-NCP-APIGW-API-KEY-ID": client_id,
"X-NCP-APIGW-API-KEY": client_secret,
"Content-Type": "application/octet-stream"
}
response = requests.post(url, data=data, headers=headers)
rescode = response.status_code
if(rescode == 200):
print (response.text)
else:
print("Error : " + response.text)
|
[
"chadool116@naver.com"
] |
chadool116@naver.com
|
67558e1d4c168ae6ffe706cae7b73d5b96991949
|
91d1a6968b90d9d461e9a2ece12b465486e3ccc2
|
/ec2_write_1/client-vpn-client-certificate-revocation-list_export.py
|
590e1a0b6582eaa6d0a43363fd3ba344c40e4825
|
[] |
no_license
|
lxtxl/aws_cli
|
c31fc994c9a4296d6bac851e680d5adbf7e93481
|
aaf35df1b7509abf5601d3f09ff1fece482facda
|
refs/heads/master
| 2023-02-06T09:00:33.088379
| 2020-12-27T13:38:45
| 2020-12-27T13:38:45
| 318,686,394
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,091
|
py
|
#!/usr/bin/python
# -*- codding: utf-8 -*-
import os
import sys
sys.path.append(os.path.dirname(os.path.abspath(os.path.dirname(__file__))))
from common.execute_command import write_one_parameter
# url : https://awscli.amazonaws.com/v2/documentation/api/latest/reference/ec2/export-client-vpn-client-certificate-revocation-list.html
if __name__ == '__main__':
"""
import-client-vpn-client-certificate-revocation-list : https://awscli.amazonaws.com/v2/documentation/api/latest/reference/ec2/import-client-vpn-client-certificate-revocation-list.html
"""
parameter_display_string = """
# client-vpn-endpoint-id : The ID of the Client VPN endpoint.
"""
add_option_dict = {}
#######################################################################
# parameter display string
add_option_dict["parameter_display_string"] = parameter_display_string
# ex: add_option_dict["no_value_parameter_list"] = "--single-parameter"
write_one_parameter("ec2", "export-client-vpn-client-certificate-revocation-list", "client-vpn-endpoint-id", add_option_dict)
|
[
"hcseo77@gmail.com"
] |
hcseo77@gmail.com
|
4c6a4945f123306bcdf31d8b8f17939c2b32cc2f
|
094304d0aa7cb6949c0f471d1c432dc7db5a4c2a
|
/VIRSCAN/vir_scan_db.py
|
1358ecd8d77cb2b4438e1bd9cfcaacc0392ee70c
|
[] |
no_license
|
smallmeet/fangzhen
|
7f8e232b87841b88268d14133479846e48e33ba1
|
d0cbf09eba98c835a4ea013889a8cf0b34263d0d
|
refs/heads/master
| 2020-12-24T22:28:54.972613
| 2016-08-12T09:24:15
| 2016-08-12T09:24:15
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,005
|
py
|
from base_db import MysqlClient
from get_conf import GetConf
class VirScanMysqlClient:
def __init__(self, conf):
self.mysql_client = MysqlClient(conf)
def insert_vir_scan(self, args):
self.mysql_client.insert('PRO_VIR_SCAN_INSERT', args)
def insert_apk_black_list(self, args):
self.mysql_client.insert('PRO_APK_BLACK_LIST_INSERT', args)
def update_apk_black_list(self, args):
self.mysql_client.insert('PRO_APK_BLACK_LIST_UPDATE', args)
def select_vir_scan(self, args):
return self.mysql_client.select('PRO_VIR_SCAN_SELECT', args)
def select_apk_black_list_info(self, args):
return self.mysql_client.select('PRO_APK_BLACK_LIST_SELECT', args)
def fetch_apk_black_list_info(self, args):
return self.mysql_client.select('PRO_APK_BLACK_LIST_FETCH', args)
if __name__ == '__main__':
get_conf = GetConf('')
mysql_client = VirScanMysqlClient(get_conf)
# mysql_client.get_app_info()
# mysql_client.insert_data()
|
[
"luojianfeng2011@163.com"
] |
luojianfeng2011@163.com
|
022be4db452f8ecc1c423f41fa77963d1855a30e
|
9e21ee282d0a567b42a96f938f61d655eb2d5940
|
/chat_room/tests/test_base.py
|
1262db10f3b75ee65a9f2fb0e64f06f887ac4d2a
|
[] |
no_license
|
smolynets/chat-interface
|
e0ac815639dd993f029f331a545c5c5932785569
|
3b66970c241eb1660b60a612aceffde36223eff4
|
refs/heads/master
| 2021-06-12T02:19:47.749561
| 2019-07-13T12:42:21
| 2019-07-13T12:42:21
| 191,516,912
| 0
| 0
| null | 2021-06-10T18:21:22
| 2019-06-12T07:06:21
|
Python
|
UTF-8
|
Python
| false
| false
| 1,496
|
py
|
"""
This test is inherited by tests of other apps.
"""
from django.urls import reverse
from rest_framework.test import APIClient, APITestCase
from rest_framework_simplejwt.settings import api_settings
from ..models import User
class APIRestAuthJWTClient(APIClient):
"""
APIRestAuthJWTClient class.
Login with jwt tokens.
"""
def login(self, login_name="login", **credentials):
"""
Login method.
Get tokens, if successful login.
"""
login_endpoint = reverse(login_name)
login_response = self.post(login_endpoint, credentials, format="json")
if login_response.status_code == 200:
self.credentials(
HTTP_AUTHORIZATION="{0} {1}".format(
api_settings.defaults["AUTH_HEADER_TYPES"][0],
login_response.data["access"]
)
)
return True
else:
return False
class APITestBaseClass(APITestCase):
"""
APITestBaseClass class.
Get APITestBaseClass.
"""
def setUp(self):
"""
Creeate User.
"""
self.user = User.objects.create_user(
username="test_user",
email="test@emil.com",
password="password"
)
self.user_two = User.objects.create_user(
username="test2_user",
email="test@emil2.com",
password="password"
)
client_class = APIRestAuthJWTClient
|
[
"smolynets@gmail.com"
] |
smolynets@gmail.com
|
1547ae20bcab955d0bc53826f0c25ebaf5c0ca77
|
163bbb4e0920dedd5941e3edfb2d8706ba75627d
|
/Code/CodeRecords/2645/60768/316508.py
|
65bd1d2918995f2d545307313de4c81f073d0279
|
[] |
no_license
|
AdamZhouSE/pythonHomework
|
a25c120b03a158d60aaa9fdc5fb203b1bb377a19
|
ffc5606817a666aa6241cfab27364326f5c066ff
|
refs/heads/master
| 2022-11-24T08:05:22.122011
| 2020-07-28T16:21:24
| 2020-07-28T16:21:24
| 259,576,640
| 2
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 481
|
py
|
piles = eval(input())
h = int(input())
max_k = max(piles)
min_k = int(sum(piles) / h)
re = max_k
for k in range(min_k, max_k + 1):
time = 0
bananas = [i for i in piles]
while len(bananas) > 0:
for i in range(len(bananas)):
bananas[i] = bananas[i] - k
time += 1
if bananas[i] < 0:
bananas[i] = 0
while 0 in bananas:
bananas.remove(0)
if time <= h:
re = k
break
print(re)
|
[
"1069583789@qq.com"
] |
1069583789@qq.com
|
8a6f357264b2dbc8114efa7cb34e8a02b9be2820
|
63b814265ab49ebc2ed8e62577757991119be83b
|
/data-quality/kalman-filt.py
|
d10a0e87d25b724b1f8289d92c13e7a3168ac9bd
|
[] |
no_license
|
wisecg/mjd-analysis
|
7de4e67c34c19215984f528f31f71a8e584e1e91
|
ca4f00a767f2dfe6d460b44c700e2b59fe0bb296
|
refs/heads/master
| 2020-12-07T21:28:34.376478
| 2017-08-28T15:20:17
| 2017-08-28T15:20:17
| 65,919,233
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,108
|
py
|
"""
The idea is to take plots like "gmax" and "gbase" in data-quality.cc
and apply a Kalman filter to them, to look for "extrema"
http://scipy-cookbook.readthedocs.io/items/KalmanFiltering.html
Intro to Kalman filters:
http://www.cs.unc.edu/~welch/media/pdf/kalman_intro.pdf
Ben says:
everything is “basically linear,” which in electronics engineering speak means made of gaussians
so you model it with a bunch of kalman filters
and that gives you a statistically robust way to look for discontinuities or other jumps
its how they monitor parameters at like a gas turbine plant or jet engine or shit like that
its called fault detection and is a big component of controls engineering
its like wildly unexciting
but sort of cool math
but, like, say you want to monitor stability of a peak or whatever
you can make a bunch of plots of that peak position and look at them by eye
or you can have a filter that looks at the position vs time and says WOAH WTF BRO if it jumps
kalman filters are markov chain way to do that
and you know we roll markov style up in this bitch
same with rates or whatever
"""
|
[
"wisecg.neontetra@gmail.com"
] |
wisecg.neontetra@gmail.com
|
5509f28877444ba0ac97b513a2106dbc9ddd0995
|
ea0c0b8d67a42086f840149b3dbe1c0e4f58e56f
|
/members_area/migrations/0005_auto_20200129_2122.py
|
12832e8dc63b3301b343c645b65d640d95c3d93b
|
[
"MIT"
] |
permissive
|
AzeezBello/raodoh
|
78b27e0886f8882144a4def160d9c3f53bcc6af9
|
296bd44069bd750557bf49995374601f5052d695
|
refs/heads/master
| 2022-05-03T05:07:21.632642
| 2020-02-26T10:16:08
| 2020-02-26T10:16:08
| 235,878,080
| 0
| 0
|
MIT
| 2022-04-22T23:01:27
| 2020-01-23T20:15:39
|
JavaScript
|
UTF-8
|
Python
| false
| false
| 481
|
py
|
# Generated by Django 2.2.9 on 2020-01-29 20:22
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('members_area', '0004_auto_20200128_2330'),
]
operations = [
migrations.AlterField(
model_name='lesson',
name='course',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='members_area.Course'),
),
]
|
[
"azeez@scholarx.co"
] |
azeez@scholarx.co
|
656bb960ef1d2fd531df0a667c4d97135b95bcb1
|
dd5ee6d1e88527cd22f1b64443320ba8ef751b59
|
/rlcard3/envs/mocsar.py
|
2b3edd2b9df42da117998cd3dd3b41bf88e15885
|
[
"MIT"
] |
permissive
|
sorata2894/rlcard3
|
42a2587e3ab00f3a33c684fb76efbc334a835359
|
e9bbd36b789e670f96622a3a2ba8327f0d897561
|
refs/heads/master
| 2022-11-05T00:08:10.809055
| 2020-06-11T03:28:41
| 2020-06-11T03:28:41
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,854
|
py
|
"""
Mocsár Environment
File name: envs/gmocsar.py
Author: József Varga
Date created: 3/27/2020
"""
from rlcard3 import models
from rlcard3.envs.env import Env
from rlcard3.games.mocsar.game import MocsarGame as Game
from rlcard3.games.mocsar.utils import action_to_string, \
string_to_action, payoff_func, print_state, encode_to_obs
from typing import List
class MocsarEnv(Env):
""" GinRummy Environment
"""
state_shape: List[int] # Dimensions of state numpy array
def __init__(self, config):
self.game = Game()
self.state_shape = [3, 9, 14]
super().__init__(config=config)
def _extract_state(self, state): # 200213 don't use state ???
"""
Extract useful information from state for RL. Must be implemented in the child class.
numpy(3,9,14)
Menaing: x,y,z
z: 1/0, 1 means, the hand contains y amount of card.
y: rank of cards in some hand.
x=0: player's hand
x=1: others hand
x=2: target
x>2: history, not implemented....
:param state: dict, the raw state
:return: dict: 'obs':the extracted state, numpy.array, 'legal_actions': list of actions
"""
obs = encode_to_obs(state=state)
extracted_state = {'obs': obs,
'legal_actions': self._get_legal_actions(),
'is_extract': True # State is extracted>
}
return extracted_state
def get_payoffs(self):
"""
Get the payoffs of players. Must be implemented in the child class.
First one scores 1, Last one scores 0. Other ith player scores 0.5 ^^i
:return: A list of payoffs for each player.
"""
num_players = self.game.num_players
# winnersben a győzelmek sorrendje van
# List indexed by PlayerID instead of OrderId, pl [1,3,2,0]
win_id = [self.game.players.winners.index(i) for i in range(num_players)]
# win_id-ben, meg az, hogy az adott indexű játékos hányadik, pl [3,0,2,1], mivel a 0-ik indexű játékos utolsó=3
payoffs = [payoff_func(position=win_id[i], num_players=num_players) for i in range(num_players)]
return payoffs
def _decode_action(self, action_id):
"""
Decode Action id to the action in the game.
:param action_id: The id of the action
:return: The action that will be passed to the game engine.
"""
return action_to_string(action=action_id)
def _get_legal_actions(self):
"""
Get all legal actions for current state.
:return: A list of legal actions' id.
"""
return [string_to_action(action) for action in self.game.get_legal_actions()]
def _load_model(self):
"""
Load pretrained/rule model
:return: A Model object
"""
return models.load('mocsar-rule-v1', num_players=self.game.get_player_num())
def print_state(self, player: int):
"""
Print out the state of a given player
:param player: Player Id to print
"""
state = self.game.get_state(player)
print_state(state)
def print_result(self, player):
"""
Print the game result when the game is over
:param player: Player Id to print
"""
payoffs = self.get_payoffs()
for player_ in self.game.players.players:
print(f"Player {player_.__str__()} : points {payoffs[player_.player_id]}")
@staticmethod
def print_action(action: str):
"""
Print out an action in a nice form
:param action: Code of the action
"""
if type(action) is tuple:
action, _ = action
print(f"\nAction code:{string_to_action(action)}, action:{action}")
|
[
"31209755+cogitoergoread@users.noreply.github.com"
] |
31209755+cogitoergoread@users.noreply.github.com
|
afa233f76cb5afeb5878c1f8371c6ee8b5e88667
|
5ed795f324b1f94ded479a22f60580d9f41a114b
|
/dashboard/migrations/0007_auto_20190212_1753.py
|
231eeaf5940eeed0afb26eda070c777986ca996d
|
[] |
no_license
|
ashutoshdev/Django-Main-news-wesbite
|
907f52a131e136072a585c903c906adb19457765
|
9a934255465d73ab12e16031fb99ad5847b65b55
|
refs/heads/master
| 2023-08-23T20:27:40.286701
| 2021-10-21T02:03:49
| 2021-10-21T02:03:49
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,404
|
py
|
# Generated by Django 2.0 on 2019-02-12 12:23
from django.db import migrations, models
import django.utils.timezone
class Migration(migrations.Migration):
dependencies = [
('dashboard', '0006_rfcompany'),
]
operations = [
migrations.CreateModel(
name='DashboardCompany',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('companyname', models.TextField(default='', verbose_name='companyname')),
('bannerloads', models.PositiveIntegerField(default=0)),
('clicks', models.PositiveIntegerField(default=0)),
('date', models.DateField(auto_now_add=True)),
('time', models.TimeField(auto_now_add=True)),
('created', models.DateTimeField(auto_now_add=True)),
],
),
migrations.AddField(
model_name='rfcompany',
name='date',
field=models.DateField(auto_now_add=True, default=django.utils.timezone.now),
preserve_default=False,
),
migrations.AddField(
model_name='rfcompany',
name='time',
field=models.TimeField(auto_now_add=True, default=django.utils.timezone.now),
preserve_default=False,
),
]
|
[
"looklikeme05@gmail.com"
] |
looklikeme05@gmail.com
|
369e5de6978cd855314fe286b88ec95c0f367146
|
19bdbe1c4aa00ba9799764681f16e09f65d6ea2b
|
/np/lib/smtp.py
|
0065154141cf437ba3588749e4b816c5fc03783f
|
[] |
no_license
|
invisibleroads/networkplanner
|
b4a3c7b3c0c169c3cd6610a6fb77125434dcb1c4
|
7ad8c0f2b4078f6cca681205e1671d060a937c18
|
refs/heads/master
| 2023-08-11T17:33:44.458438
| 2012-05-31T13:41:04
| 2012-05-31T13:41:04
| 961,674
| 2
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,222
|
py
|
'Routines for sending messages'
# Import system modules
import smtplib
import email.message
import email.utils
import socket
def sendMessage(fromByValue, toByValue, subject, body, headerByName=None):
'Send a message using SMTP'
# Prepare
message = email.message.Message()
message.add_header('from', email.utils.formataddr((fromByValue['nickname'], fromByValue['email'])))
message.add_header('to', email.utils.formataddr((toByValue['nickname'], toByValue['email'])))
message.add_header('subject', subject)
message.set_payload(body)
if headerByName:
for key, value in headerByName.iteritems():
message.add_header(key, value)
# Connect to server
if fromByValue['smtp'] == 'localhost':
server = smtplib.SMTP('localhost')
else:
server = smtplib.SMTP_SSL(fromByValue['smtp'], 465)
if len(fromByValue['username']):
server.login(fromByValue['username'], fromByValue['password'])
# Send mail
try:
server.sendmail(fromByValue['email'], toByValue['email'], message.as_string())
except socket.error, error:
raise SMTPError(error)
finally:
server.quit()
class SMTPError(Exception):
pass
|
[
"support@invisibleroads.com"
] |
support@invisibleroads.com
|
3fd599f2dd2b120dfc1fa457dd87c9056ade3f26
|
00a9295409b78a53ce790f7ab44931939f42c0e0
|
/FPGA/apio/iCEBreaker/FIR_Filter/sympy/venv/lib/python3.8/site-packages/sympy/multipledispatch/utils.py
|
11cea683ed08448b11c2efecaea1b7e234934cc4
|
[
"Apache-2.0"
] |
permissive
|
klei22/Tech-OnBoarding-Class
|
c21f0762d2d640d5e9cb124659cded5c865b32d4
|
960e962322c37be9117e0523641f8b582a2beceb
|
refs/heads/master
| 2022-11-10T13:17:39.128342
| 2022-10-25T08:59:48
| 2022-10-25T08:59:48
| 172,292,871
| 2
| 3
|
Apache-2.0
| 2019-05-19T00:26:32
| 2019-02-24T03:50:35
|
C
|
UTF-8
|
Python
| false
| false
| 3,042
|
py
|
def expand_tuples(L):
"""
>>> from sympy.multipledispatch.utils import expand_tuples
>>> expand_tuples([1, (2, 3)])
[(1, 2), (1, 3)]
>>> expand_tuples([1, 2])
[(1, 2)]
"""
if not L:
return [()]
elif not isinstance(L[0], tuple):
rest = expand_tuples(L[1:])
return [(L[0],) + t for t in rest]
else:
rest = expand_tuples(L[1:])
return [(item,) + t for t in rest for item in L[0]]
# Taken from theano/theano/gof/sched.py
# Avoids licensing issues because this was written by Matthew Rocklin
def _toposort(edges):
""" Topological sort algorithm by Kahn [1] - O(nodes + vertices)
inputs:
edges - a dict of the form {a: {b, c}} where b and c depend on a
outputs:
L - an ordered list of nodes that satisfy the dependencies of edges
>>> from sympy.multipledispatch.utils import _toposort
>>> _toposort({1: (2, 3), 2: (3, )})
[1, 2, 3]
Closely follows the wikipedia page [2]
[1] Kahn, Arthur B. (1962), "Topological sorting of large networks",
Communications of the ACM
[2] https://en.wikipedia.org/wiki/Toposort#Algorithms
"""
incoming_edges = reverse_dict(edges)
incoming_edges = {k: set(val) for k, val in incoming_edges.items()}
S = {v for v in edges if v not in incoming_edges}
L = []
while S:
n = S.pop()
L.append(n)
for m in edges.get(n, ()):
assert n in incoming_edges[m]
incoming_edges[m].remove(n)
if not incoming_edges[m]:
S.add(m)
if any(incoming_edges.get(v, None) for v in edges):
raise ValueError("Input has cycles")
return L
def reverse_dict(d):
"""Reverses direction of dependence dict
>>> d = {'a': (1, 2), 'b': (2, 3), 'c':()}
>>> reverse_dict(d) # doctest: +SKIP
{1: ('a',), 2: ('a', 'b'), 3: ('b',)}
:note: dict order are not deterministic. As we iterate on the
input dict, it make the output of this function depend on the
dict order. So this function output order should be considered
as undeterministic.
"""
result = {}
for key in d:
for val in d[key]:
result[val] = result.get(val, tuple()) + (key, )
return result
# Taken from toolz
# Avoids licensing issues because this version was authored by Matthew Rocklin
def groupby(func, seq):
""" Group a collection by a key function
>>> from sympy.multipledispatch.utils import groupby
>>> names = ['Alice', 'Bob', 'Charlie', 'Dan', 'Edith', 'Frank']
>>> groupby(len, names) # doctest: +SKIP
{3: ['Bob', 'Dan'], 5: ['Alice', 'Edith', 'Frank'], 7: ['Charlie']}
>>> iseven = lambda x: x % 2 == 0
>>> groupby(iseven, [1, 2, 3, 4, 5, 6, 7, 8]) # doctest: +SKIP
{False: [1, 3, 5, 7], True: [2, 4, 6, 8]}
See Also:
``countby``
"""
d = dict()
for item in seq:
key = func(item)
if key not in d:
d[key] = list()
d[key].append(item)
return d
|
[
"kaunalei@gmail.com"
] |
kaunalei@gmail.com
|
e7286393de85a9ea1daeff4f6a590f0d35dd069b
|
f50f1aa1f8f139d546db3230a1cb1f53043fd9e6
|
/hardware/mobile/ifuse/actions.py
|
4253ce5b51f00a7f0adaacfac0ba3a76d71890d3
|
[] |
no_license
|
pars-linux/corporate2
|
7887961d1552d39bc3b0bef4a60fd3413d9b82bb
|
14d1eacfc824fb8d0bff8173e7ac06b36b88d10d
|
refs/heads/master
| 2020-05-26T15:02:12.005654
| 2017-02-27T03:07:14
| 2017-02-27T03:07:14
| 82,476,084
| 4
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 557
|
py
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Copyright 2009-2010 TUBITAK/UEKAE
# Licensed under the GNU General Public License, version 2.
# See the file http://www.gnu.org/licenses/old-licenses/gpl-2.0.txt
from pisi.actionsapi import autotools
from pisi.actionsapi import pisitools
from pisi.actionsapi import get
def setup():
autotools.autoreconf("-vfi")
autotools.configure()
def build():
autotools.make("-j1")
def install():
autotools.rawInstall("DESTDIR=%s" % get.installDIR())
pisitools.dodoc("AUTHORS", "COPYING", "README")
|
[
"ozancaglayan@users.noreply.github.com"
] |
ozancaglayan@users.noreply.github.com
|
2da48d4fe2ab88ad57d4bc2ce4b47d37ade84327
|
00c14f5816c3ef6a9ff5652af89c27c12bcf023c
|
/example/jspm_0_17/jspm_0_17/urls.py
|
9ef86b2113613f0783470d90f157872b78c2522d
|
[
"MIT",
"ISC"
] |
permissive
|
ilyashupta/django-systemjs
|
148fd7de73aeb2cf562a07d3bb392436f3a78010
|
f4d26794c06449d4d3ae2a6f7ab0bc550b35b0c7
|
refs/heads/master
| 2023-04-27T14:41:45.265046
| 2016-09-19T09:15:35
| 2016-09-19T09:15:35
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 815
|
py
|
"""jspm_0_17 URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/1.8/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')
Including another URLconf
1. Add a URL to urlpatterns: url(r'^blog/', include('blog.urls'))
"""
from django.conf.urls import include, url
from django.contrib import admin
from django.views.generic import TemplateView
urlpatterns = [
url(r'^admin/', include(admin.site.urls)),
url(r'^$', TemplateView.as_view(template_name='base.html')),
]
|
[
"sergeimaertens@gmail.com"
] |
sergeimaertens@gmail.com
|
936fe33888460fe111915ebee493e4b636140d10
|
b42957e496e5c9447b858d7382caea83ce9ea431
|
/packtml/__init__.py
|
d3f0e325a23686c69f7eec073c983de5f0695885
|
[
"MIT"
] |
permissive
|
PacktPublishing/Supervised-Machine-Learning-with-Python
|
153b9f5248fd4ca79896a277c7f703cf5899ac07
|
00d6ce2451547a73e6358d85937f8cbf2af762a4
|
refs/heads/master
| 2023-02-02T21:20:35.889344
| 2023-01-30T08:34:13
| 2023-01-30T08:34:13
| 187,639,872
| 5
| 3
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 658
|
py
|
# -*- coding: utf-8 -*-
import os
# global namespace:
from packtml import clustering
from packtml import decision_tree
from packtml import metrics
from packtml import neural_net
from packtml import recommendation
from packtml import regression
from packtml import utils
# set the version
packtml_location = os.path.abspath(os.path.dirname(__file__))
with open(os.path.join(packtml_location, "VERSION")) as vsn:
__version__ = vsn.read().strip()
# remove from global namespace
del os
del packtml_location
del vsn
__all__ = [
'clustering',
'decision_tree',
'metrics',
'neural_net',
'recommendation',
'regression',
'utils'
]
|
[
"packt.suwarnar@gmail.com"
] |
packt.suwarnar@gmail.com
|
286d4837a392a3730412cc78c44d91c56603e5b6
|
dd8227454b817ccf2ceb24b3dfd4260d4ded7a72
|
/scripts/item/consume_2434546.py
|
fd4214fd1a7ed1410ab093ba89cb8ce96fcf7213
|
[
"MIT"
] |
permissive
|
Snewmy/swordie
|
0dd3c17808b064c2cb2bd9576b51daf01ae5d686
|
ae01ed4ec0eb20a18730e8cd209eea0b84a8dd17
|
refs/heads/master
| 2023-06-30T21:14:05.225798
| 2021-07-06T14:32:39
| 2021-07-06T14:32:39
| 389,497,502
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 170
|
py
|
# Scribbler Damage Skin
success = sm.addDamageSkin(2434546)
if success:
sm.chat("The Scribbler Damage Skin has been added to your account's damage skin collection.")
|
[
"vcalheirosdoc@gmail.com"
] |
vcalheirosdoc@gmail.com
|
0fdb86c8d95ec238f669e4cd793c8b90cee446f0
|
b48a1d5733d10c39a112698c2286ae1afb02f36a
|
/announce/management/commands/migrate_mailchimp_users.py
|
7cce1d4f3b7e6e48acb8b65b386b435c2095820c
|
[
"MIT"
] |
permissive
|
p2pu/learning-circles
|
ecb317aaa8620cb076ce45c42d055e89e6586516
|
ae8de4df48aae0844fb50dca5c62c099b3b2b0a3
|
refs/heads/master
| 2023-08-19T19:18:09.198077
| 2023-08-10T09:23:58
| 2023-08-10T09:23:58
| 32,735,768
| 11
| 10
|
MIT
| 2023-08-10T09:30:04
| 2015-03-23T14:05:41
|
Python
|
UTF-8
|
Python
| false
| false
| 1,191
|
py
|
from django.core.management.base import BaseCommand, CommandError
from django.contrib.auth.models import User
from announce.mailchimp import archive_members, list_members, batch_subscribe
from studygroups.models import Profile
import requests
import logging
logger = logging.getLogger(__name__)
class Command(BaseCommand):
help = 'Synchronize mailchimp audience with users that opted in for communications'
def handle(self, *args, **options):
# get all mailchimp users
mailchimp_members = list_members()
filter_subscribed = lambda x: x.get('status') not in ['unsubscribed', 'cleaned']
mailchimp_members = filter(filter_subscribed, mailchimp_members)
emails = [member.get('email_address').lower() for member in mailchimp_members]
# add all members with communicagtion_opt_in == True to mailchimp
subscribed = User.objects.filter(profile__communication_opt_in=True, is_active=True, profile__email_confirmed_at__isnull=False)
to_sub = list(filter(lambda u: u.email.lower() not in emails, subscribed))
print('{} users will be added to the mailchimp list'.format(len(to_sub)))
batch_subscribe(to_sub)
|
[
"dirkcuys@gmail.com"
] |
dirkcuys@gmail.com
|
d1660437d7cc1d437db44a397725e49216966700
|
eefc47dcb8377239c34134024be8783a9e3b5f44
|
/bimdata_api_client/models/raw_system.py
|
3d6644047f00fe509b01b9df9dfbe5ddcdf9b50d
|
[] |
no_license
|
Mike-FR/python-api-client
|
4fea5afcd942ebdf6dca174e2d38afaeed71eee4
|
54b2b090cbbf127cf8ac0f17c3492e6d0e1c7f29
|
refs/heads/master
| 2023-06-29T13:07:30.438434
| 2021-07-28T09:08:54
| 2021-07-28T09:08:54
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,617
|
py
|
# coding: utf-8
"""
BIMData API
BIMData API is a tool to interact with your models stored on BIMData’s servers. Through the API, you can manage your projects, the clouds, upload your IFC files and manage them through endpoints. # noqa: E501
The version of the OpenAPI document: v1
Contact: support@bimdata.io
Generated by: https://openapi-generator.tech
"""
import pprint
import re # noqa: F401
import six
from bimdata_api_client.configuration import Configuration
class RawSystem(object):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
openapi_types = {
'uuid': 'str',
'name': 'str',
'description': 'str',
'object_type': 'str'
}
attribute_map = {
'uuid': 'uuid',
'name': 'name',
'description': 'description',
'object_type': 'object_type'
}
def __init__(self, uuid=None, name=None, description=None, object_type=None, local_vars_configuration=None): # noqa: E501
"""RawSystem - a model defined in OpenAPI""" # noqa: E501
if local_vars_configuration is None:
local_vars_configuration = Configuration()
self.local_vars_configuration = local_vars_configuration
self._uuid = None
self._name = None
self._description = None
self._object_type = None
self.discriminator = None
self.uuid = uuid
self.name = name
self.description = description
self.object_type = object_type
@property
def uuid(self):
"""Gets the uuid of this RawSystem. # noqa: E501
:return: The uuid of this RawSystem. # noqa: E501
:rtype: str
"""
return self._uuid
@uuid.setter
def uuid(self, uuid):
"""Sets the uuid of this RawSystem.
:param uuid: The uuid of this RawSystem. # noqa: E501
:type: str
"""
if self.local_vars_configuration.client_side_validation and uuid is None: # noqa: E501
raise ValueError("Invalid value for `uuid`, must not be `None`") # noqa: E501
if (self.local_vars_configuration.client_side_validation and
uuid is not None and len(uuid) < 1):
raise ValueError("Invalid value for `uuid`, length must be greater than or equal to `1`") # noqa: E501
self._uuid = uuid
@property
def name(self):
"""Gets the name of this RawSystem. # noqa: E501
:return: The name of this RawSystem. # noqa: E501
:rtype: str
"""
return self._name
@name.setter
def name(self, name):
"""Sets the name of this RawSystem.
:param name: The name of this RawSystem. # noqa: E501
:type: str
"""
self._name = name
@property
def description(self):
"""Gets the description of this RawSystem. # noqa: E501
:return: The description of this RawSystem. # noqa: E501
:rtype: str
"""
return self._description
@description.setter
def description(self, description):
"""Sets the description of this RawSystem.
:param description: The description of this RawSystem. # noqa: E501
:type: str
"""
self._description = description
@property
def object_type(self):
"""Gets the object_type of this RawSystem. # noqa: E501
:return: The object_type of this RawSystem. # noqa: E501
:rtype: str
"""
return self._object_type
@object_type.setter
def object_type(self, object_type):
"""Sets the object_type of this RawSystem.
:param object_type: The object_type of this RawSystem. # noqa: E501
:type: str
"""
self._object_type = object_type
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, RawSystem):
return False
return self.to_dict() == other.to_dict()
def __ne__(self, other):
"""Returns true if both objects are not equal"""
if not isinstance(other, RawSystem):
return True
return self.to_dict() != other.to_dict()
|
[
"infra@bimdata.io"
] |
infra@bimdata.io
|
92636880ee00dcdaf6082a42f6967c44fa8b6054
|
9bcd8a8de7e34ab52f44070c171e2e12e52e9775
|
/setup.py
|
c7ab57d1d127894b45df406d8c76bdb98355363e
|
[
"BSD-2-Clause"
] |
permissive
|
miracle2k/localtodo
|
c419bf5cd8aa5fd6092420577c6155a3d418cd1d
|
8598a073d9fe466832b6a952a0b1dc20603d0e7d
|
refs/heads/master
| 2022-04-30T13:36:50.211348
| 2022-03-21T18:45:16
| 2022-03-21T18:45:16
| 5,198,753
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 754
|
py
|
#!/usr/bin/env python
# coding: utf-8
from setuptools import setup
setup(
name='localtodo',
url='https://github.com/miracle2k/localtodo',
version='1.0',
license='BSD',
author=u'Michael Elsdörfer',
author_email='michael@elsdoerfer.com',
description=
'.gitignore local todo files, but sync them through Dropbox.',
py_modules=['localtodo'],
install_requires=['docopt==0.4.1'],
classifiers=[
'Development Status :: 5 - Production/Stable',
'Intended Audience :: Developers',
'License :: OSI Approved :: BSD License',
'Operating System :: OS Independent',
'Programming Language :: Python'
],
entry_points="""[console_scripts]\nlocaltodo = localtodo:run\n""",
)
|
[
"michael@elsdoerfer.com"
] |
michael@elsdoerfer.com
|
805535843094f8434fa9cfb5c22c4c9c99ef2185
|
2c2d2405929b026ac4de77d34538cec623dee5eb
|
/codes/SRN/models/modules/loss.py
|
844e09818490d48d1b9b375b12a65032b32c4075
|
[] |
no_license
|
greitzmann/DASR
|
9d709cf031561897722f1553842af05fca36855e
|
f85b22ada54344fd0d94ba31ae596427cb9b5c5b
|
refs/heads/master
| 2023-01-01T12:26:11.563140
| 2020-10-15T16:03:26
| 2020-10-15T16:03:26
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,636
|
py
|
import random
import torch
import torch.nn as nn
import sys
from PerceptualSimilarity.models import util as ps
# Define GAN loss: [vanilla | lsgan | wgan-gp]
class GANLoss(nn.Module):
def __init__(self, gan_type, real_label_val=1.0, fake_label_val=0.0):
super(GANLoss, self).__init__()
self.gan_type = gan_type.lower()
self.real_label_val = real_label_val
self.fake_label_val = fake_label_val
if self.gan_type == 'vanilla':
self.loss = nn.BCEWithLogitsLoss()
elif self.gan_type == 'lsgan':
self.loss = nn.MSELoss()
elif self.gan_type == 'wgan-gp':
def wgan_loss(input, target):
# target is boolean
return -1 * input.mean() if target else input.mean()
self.loss = wgan_loss
else:
raise NotImplementedError('GAN type [{:s}] is not found'.format(self.gan_type))
def get_target_label(self, input, target_is_real):
if self.gan_type == 'wgan-gp':
return target_is_real
if target_is_real:
return torch.empty_like(input).fill_(self.real_label_val)
else:
return torch.empty_like(input).fill_(self.fake_label_val)
def forward(self, input, target_is_real):
target_label = self.get_target_label(input, target_is_real)
loss = self.loss(input, target_label)
return loss
class GradientPenaltyLoss(nn.Module):
def __init__(self, device=torch.device('cpu')):
super(GradientPenaltyLoss, self).__init__()
self.register_buffer('grad_outputs', torch.Tensor())
self.grad_outputs = self.grad_outputs.to(device)
def get_grad_outputs(self, input):
if self.grad_outputs.size() != input.size():
self.grad_outputs.resize_(input.size()).fill_(1.0)
return self.grad_outputs
def forward(self, interp, interp_crit):
grad_outputs = self.get_grad_outputs(interp_crit)
grad_interp = torch.autograd.grad(outputs=interp_crit, inputs=interp, \
grad_outputs=grad_outputs, create_graph=True, retain_graph=True, only_inputs=True)[0]
grad_interp = grad_interp.view(grad_interp.size(0), -1)
grad_interp_norm = grad_interp.norm(2, dim=1)
loss = ((grad_interp_norm - 1)**2).mean()
return loss
class PerceptualLossLPIPS(nn.Module):
def __init__(self):
super(PerceptualLossLPIPS, self).__init__()
self.loss_network = ps.PerceptualLoss(use_gpu=torch.cuda.is_available())
def forward(self, x, y):
return self.loss_network.forward(x, y, normalize=True).mean()
class PerceptualLoss(nn.Module):
def __init__(self, rotations=False, flips=False):
super(PerceptualLoss, self).__init__()
self.loss = PerceptualLossLPIPS()
self.rotations = rotations
self.flips = flips
def forward(self, x, y):
if self.rotations:
k_rot = random.choice([-1, 0, 1])
x = torch.rot90(x, k_rot, [2, 3])
y = torch.rot90(y, k_rot, [2, 3])
if self.flips:
if random.choice([True, False]):
x = torch.flip(x, (2,))
y = torch.flip(y, (2,))
if random.choice([True, False]):
x = torch.flip(x, (3,))
y = torch.flip(y, (3,))
return self.loss(x, y)
def generator_loss(labels, wasserstein=False, weights=None):
if not isinstance(labels, list):
labels = (labels,)
if weights is None:
weights = [1.0 / len(labels)] * len(labels)
loss = 0.0
for label, weight in zip(labels, weights):
if wasserstein:
loss += weight * torch.mean(-label)
else:
loss += weight * torch.mean(-torch.log(label + 1e-8))
return loss
def discriminator_loss(reals, fakes, wasserstein=False, grad_penalties=None, weights=None):
if not isinstance(reals, list):
reals = (reals,)
if not isinstance(fakes, list):
fakes = (fakes,)
if weights is None:
weights = [1.0 / len(fakes)] * len(fakes)
loss = 0.0
if wasserstein:
if not isinstance(grad_penalties, list):
grad_penalties = (grad_penalties,)
for real, fake, weight, grad_penalty in zip(reals, fakes, weights, grad_penalties):
loss += weight * (-real.mean() + fake.mean() + grad_penalty)
else:
for real, fake, weight in zip(reals, fakes, weights):
loss += weight * (-torch.log(real + 1e-8).mean() - torch.log(1 - fake + 1e-8).mean())
return loss
if __name__ == '__main__':
a = PerceptualLossLPIPS()
|
[
"516488199@qq.com"
] |
516488199@qq.com
|
8838064973dcf235bd1744d1dadead87051a80ea
|
9743d5fd24822f79c156ad112229e25adb9ed6f6
|
/xai/brain/wordbase/adjectives/_widest.py
|
8492170c13955906272fe657ccf1e56cec9420c8
|
[
"MIT"
] |
permissive
|
cash2one/xai
|
de7adad1758f50dd6786bf0111e71a903f039b64
|
e76f12c9f4dcf3ac1c7c08b0cc8844c0b0a104b6
|
refs/heads/master
| 2021-01-19T12:33:54.964379
| 2017-01-28T02:00:50
| 2017-01-28T02:00:50
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 236
|
py
|
from xai.brain.wordbase.adjectives._wide import _WIDE
#calss header
class _WIDEST(_WIDE, ):
def __init__(self,):
_WIDE.__init__(self)
self.name = "WIDEST"
self.specie = 'adjectives'
self.basic = "wide"
self.jsondata = {}
|
[
"xingwang1991@gmail.com"
] |
xingwang1991@gmail.com
|
0ee0fecb3d4df02cb0dc4970912acd258cfee73d
|
549f00e84cf77f08b2d72b3a298174143e23222e
|
/pytest_echo.py
|
025e4f09603e8814d066cc041fb8926ac1650558
|
[
"MIT"
] |
permissive
|
hugovk/pytest-echo
|
9a94c6a246ae1803dd6b391f56c35a7d0472f209
|
939793448e7d7e80a356aafc4dbb58bbedbe7e2c
|
refs/heads/master
| 2020-08-10T05:26:13.397571
| 2018-04-22T17:12:07
| 2018-04-22T17:12:07
| 214,268,306
| 0
| 0
|
MIT
| 2019-10-10T19:27:28
| 2019-10-10T19:27:28
| null |
UTF-8
|
Python
| false
| false
| 5,239
|
py
|
# -*- coding: utf-8 -*-
from __future__ import print_function
import fnmatch
import os
from pprint import pformat
import pkg_resources
from pkg_resources import DistributionNotFound
__version__ = '1.6.0'
def get_installed_distributions():
"""
Return a list of installed Distribution objects.
"""
return [d for d in pkg_resources.working_set]
def get_attr(obj, attr, default='NOT FOUND'):
"""Recursive get object's attribute. May use dot notation.
>>> class C(object):
... pass
>>> a = C()
>>> a.b = C()
>>> a.b.c = 4
>>> get_attr(a, 'b.c')
4
>>> get_attr(a, 'b.c.y', None)
>>> get_attr(a, 'b.c.y', 1)
1
>>> get_attr([0,1,2], '2')
2
>>> get_attr([0,1,(21, 22)], '2.1')
22
>>> get_attr({'key': 11}, 'key')
11
>>> get_attr({'key': {'key': 11}}, 'key.key')
11
"""
if '.' not in attr:
try:
if hasattr(obj, attr):
return getattr(obj, attr, default)
elif isinstance(obj, (list, tuple, set)):
return obj[int(attr)]
elif isinstance(obj, dict):
return obj[attr]
else:
return default
except Exception as e: # pragma: no cover
return str(e)
else:
L = attr.split('.')
return get_attr(get_attr(obj, L[0], default), '.'.join(L[1:]), default)
def get_module_attribute(path):
"""
Returns a attribute value base on it's full path.
The `attribute` can be either a module attribute (ie. os.path.curdir)
or a object attribute (ie. linecache.cache.__class__)
Warning: Be careful when use thi function as it load any module in the path
and this will execute any module's level code
:param path: full path to the attribute
:return:
>>> print(get_module_attribute('linecache.cache.__class__'))
<... 'dict'>
>>> print(get_module_attribute('os.path.curdir'))
'.'
>>> print(get_module_attribute('wrong'))
('Unable to load %s', 'wrong')
"""
parts = path.split('.')
parent = ""
pkg = None
try:
for i, part in enumerate(parts):
try:
if parent:
module_name = "%s.%s" % (parent, parts[i])
else:
module_name = parts[i]
pkg = __import__(module_name, fromlist=[parent])
parent = module_name
except ImportError:
if hasattr(pkg, part):
return pformat(get_attr(pkg, ".".join(parts[i:])))
raise Exception('Unable to load %s', path)
except Exception as e:
return str(e)
def get_env(var_name):
if '*' in var_name:
targets = [(key, value)
for key, value in os.environ.items()
if fnmatch.fnmatch(key, var_name)]
else:
targets = [(var_name, os.environ.get(var_name, "<not set>"))]
return targets
def get_version(package_name):
if '*' in package_name:
targets = [(i.key, i.version)
for i in get_installed_distributions()
if fnmatch.fnmatch(i.key, package_name)]
else:
targets = [(package_name, _get_version(package_name))]
return targets
def _get_version(package_name):
try:
import pkg_resources
return pkg_resources.require(package_name)[0].version
except (ImportError, AttributeError, TypeError, DistributionNotFound):
pass
try:
pkg = __import__(package_name)
except ImportError:
return '<unable to load package>'
for attr_name in ('get_version', '__version__', 'VERSION', 'version'):
if hasattr(pkg, attr_name):
attr = getattr(pkg, attr_name)
if callable(attr):
return attr()
else:
return attr
def pytest_report_header(config):
ret = []
if config.option.echo_envs:
ret.append("Environment:")
data = []
for k in config.option.echo_envs:
data.extend(get_env(k))
ret.append("\n".join([" %s: %s" % (k, v)
for k, v in sorted(data)]))
if config.option.echo_versions:
ret.append("Package version:")
data = []
for k in config.option.echo_versions:
data.extend(get_version(k))
ret.append("\n".join([" %s: %s" % (k, v)
for k, v in sorted(data)]))
if config.option.echo_attribues:
ret.append("Inspections:")
ret.append("\n".join([" %s: %s" % (k, get_module_attribute(k))
for k in config.option.echo_attribues]))
if ret:
return "\n".join(ret)
def pytest_addoption(parser):
group = parser.getgroup("general")
group.addoption('--echo-env', action='append', dest="echo_envs",
default=[], help="environment to print")
group.addoption('--echo-version', action='append', dest="echo_versions",
default=[], help="package version to print")
group.addoption('--echo-attr', action='append', dest="echo_attribues",
default=[], help="attribute to print (full path)")
|
[
"s.apostolico@gmail.com"
] |
s.apostolico@gmail.com
|
9c68f21e289ac893f938e83bb2be5f054a2a7561
|
2f6c3e78de825b14cc6d471ba231724d819b7436
|
/tasks/ogle.py
|
2c1ff53316fd09c5f8108a519f3f81fdfee981f0
|
[
"MIT"
] |
permissive
|
astrocatalogs/supernovae
|
3f685d447b56c741081acffc6de0c9818149bb47
|
9585d2ae053f15fa91ab5900b5ae962c6a508037
|
refs/heads/master
| 2023-03-12T12:19:01.300505
| 2023-03-10T16:45:53
| 2023-03-10T16:45:53
| 62,802,442
| 42
| 18
|
MIT
| 2023-03-14T20:39:37
| 2016-07-07T11:42:13
|
Python
|
UTF-8
|
Python
| false
| false
| 6,846
|
py
|
"""Import tasks for OGLE.
"""
import os
import re
from astrocats.catalog.utils import is_number, jd_to_mjd, pbar, uniq_cdl
from bs4 import BeautifulSoup, NavigableString, Tag
from decimal import Decimal
from ..supernova import SUPERNOVA
def do_ogle(catalog):
task_str = catalog.get_current_task_str()
basenames = [
'transients', 'transients/2015', 'transients/2014b', 'transients/2014',
'transients/2013', 'transients/2012'
]
oglenames = []
ogleupdate = [True, False, False, False, False]
for b, bn in enumerate(pbar(basenames, task_str)):
if catalog.args.update and not ogleupdate[b]:
continue
filepath = os.path.join(catalog.get_current_task_repo(), 'OGLE-')
filepath += bn.replace('/', '-') + '-transients.html'
htmltxt = catalog.load_url(
'http://ogle.astrouw.edu.pl/ogle4/' + bn + '/transients.html',
filepath)
if not htmltxt:
continue
soup = BeautifulSoup(htmltxt, 'html5lib')
links = soup.findAll('a')
breaks = soup.findAll('br')
datalinks = []
datafnames = []
for a in links:
if a.has_attr('href'):
if '.dat' in a['href']:
datalinks.append('http://ogle.astrouw.edu.pl/ogle4/' + bn +
'/' + a['href'])
datafnames.append(
bn.replace('/', '-') + '-' + a['href'].replace('/',
'-'))
ec = -1
reference = 'OGLE-IV Transient Detection System'
refurl = 'http://ogle.astrouw.edu.pl/ogle4/transients/transients.html'
for bi, br in enumerate(pbar(breaks, task_str)):
sibling = br.nextSibling
if 'Ra,Dec=' in sibling:
line = sibling.replace('\n', '').split('Ra,Dec=')
name = line[0].strip()
ec += 1
if 'NOVA' in name or 'dupl' in name:
continue
if name in oglenames:
continue
oglenames.append(name)
name = catalog.add_entry(name)
mySibling = sibling.nextSibling
atelref = ''
claimedtype = ''
while 'Ra,Dec=' not in mySibling:
if isinstance(mySibling, NavigableString):
if not claimedtype and 'class=' in str(mySibling):
claimedtype = re.sub(r'\([^)]*\)', '',
str(mySibling).split('=')[-1])
claimedtype = claimedtype.replace('SN', '').strip()
if claimedtype == '-':
claimedtype = ''
if isinstance(mySibling, Tag):
atela = mySibling
if (atela and atela.has_attr('href') and
'astronomerstelegram' in atela['href']):
atelref = atela.contents[0].strip()
atelurl = atela['href']
mySibling = mySibling.nextSibling
if mySibling is None:
break
# nextSibling = sibling.nextSibling
# if ((isinstance(nextSibling, Tag) and
# nextSibling.has_attr('alt') and
# nextSibling.contents[0].strip() != 'NED')):
# radec = nextSibling.contents[0].strip().split()
# else:
# radec = line[-1].split()
# ra = radec[0]
# dec = radec[1]
fname = os.path.join(catalog.get_current_task_repo(),
'OGLE/') + datafnames[ec]
csvtxt = catalog.load_url(datalinks[ec], fname)
lcdat = csvtxt.splitlines()
sources = [
catalog.entries[name].add_source(
name=reference, url=refurl)
]
catalog.entries[name].add_quantity(SUPERNOVA.ALIAS, name,
sources[0])
if atelref and atelref != 'ATel#----':
sources.append(catalog.entries[name].add_source(
name=atelref, url=atelurl))
sources = uniq_cdl(sources)
if name.startswith('OGLE'):
if name[4] == '-':
if is_number(name[5:9]):
catalog.entries[name].add_quantity(
SUPERNOVA.DISCOVER_DATE, name[5:9], sources)
else:
if is_number(name[4:6]):
catalog.entries[name].add_quantity(
SUPERNOVA.DISCOVER_DATE, '20' + name[4:6],
sources)
# RA and Dec from OGLE pages currently not reliable
# catalog.entries[name].add_quantity(SUPERNOVA.RA, ra, sources)
# catalog.entries[name].add_quantity(SUPERNOVA.DEC, dec,
# sources)
if claimedtype and claimedtype != '-':
catalog.entries[name].add_quantity(SUPERNOVA.CLAIMED_TYPE,
claimedtype, sources)
elif ('SN' not in name and
SUPERNOVA.CLAIMED_TYPE not in catalog.entries[name]):
catalog.entries[name].add_quantity(SUPERNOVA.CLAIMED_TYPE,
'Candidate', sources)
for row in lcdat:
row = row.split()
mjd = str(jd_to_mjd(Decimal(row[0])))
magnitude = row[1]
if float(magnitude) > 90.0:
continue
e_mag = row[2]
upperlimit = False
if e_mag == '-1' or float(e_mag) > 10.0:
e_mag = ''
upperlimit = True
catalog.entries[name].add_photometry(
time=mjd,
u_time='MJD',
band='I',
magnitude=magnitude,
e_magnitude=e_mag,
system='Vega',
source=sources,
upperlimit=upperlimit)
if catalog.args.update:
catalog.journal_entries()
if catalog.args.travis and bi >= catalog.TRAVIS_QUERY_LIMIT:
break
catalog.journal_entries()
return
|
[
"guillochon@gmail.com"
] |
guillochon@gmail.com
|
ed5aaf4d9c069dfae5c52ce541ca6227e507404e
|
358aaf68f3c60ebbbd86b3bc66d4e6c098bcb39e
|
/fonts/tsvga_et4000_8x16.py
|
ec50d5db6c710d173aebde5e57c209dc19065ccd
|
[
"MIT"
] |
permissive
|
ccccmagicboy/st7735_mpy
|
d2de0046abd81978d5176dace45a40758377af82
|
b15f1bde69fbe6e0eb4931c57e71c136d8e7f024
|
refs/heads/master
| 2022-08-28T23:18:04.353733
| 2020-05-28T04:19:21
| 2020-05-28T04:19:21
| 254,869,035
| 7
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 6,759
|
py
|
"""converted from ..\fonts\TSVGA_ET4000_8x16.bin """
WIDTH = 8
HEIGHT = 16
FIRST = 0x20
LAST = 0x7f
_FONT =\
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'\
b'\x00\x00\x18\x3c\x3c\x3c\x18\x18\x18\x00\x18\x18\x00\x00\x00\x00'\
b'\x00\x66\x66\x66\x24\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'\
b'\x00\x00\x00\x6c\x6c\xfe\x6c\x6c\x6c\xfe\x6c\x6c\x00\x00\x00\x00'\
b'\x18\x18\x7c\xc6\xc2\xc0\x7c\x06\x06\x86\xc6\x7c\x18\x18\x00\x00'\
b'\x00\x00\x00\x00\xc2\xc6\x0c\x18\x30\x60\xc6\x86\x00\x00\x00\x00'\
b'\x00\x00\x38\x6c\x6c\x38\x76\xdc\xcc\xcc\xcc\x76\x00\x00\x00\x00'\
b'\x00\x30\x30\x30\x60\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'\
b'\x00\x00\x0c\x18\x30\x30\x30\x30\x30\x30\x18\x0c\x00\x00\x00\x00'\
b'\x00\x00\x30\x18\x0c\x0c\x0c\x0c\x0c\x0c\x18\x30\x00\x00\x00\x00'\
b'\x00\x00\x00\x00\x00\x66\x3c\xff\x3c\x66\x00\x00\x00\x00\x00\x00'\
b'\x00\x00\x00\x00\x00\x18\x18\x7e\x18\x18\x00\x00\x00\x00\x00\x00'\
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x18\x18\x18\x30\x00\x00\x00'\
b'\x00\x00\x00\x00\x00\x00\x00\x7e\x00\x00\x00\x00\x00\x00\x00\x00'\
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x18\x18\x00\x00\x00\x00'\
b'\x00\x00\x00\x00\x02\x06\x0c\x18\x30\x60\xc0\x80\x00\x00\x00\x00'\
b'\x00\x00\x7c\xc6\xc6\xce\xde\xf6\xe6\xc6\xc6\x7c\x00\x00\x00\x00'\
b'\x00\x00\x18\x38\x78\x18\x18\x18\x18\x18\x18\x7e\x00\x00\x00\x00'\
b'\x00\x00\x7c\xc6\x06\x0c\x18\x30\x60\xc0\xc6\xfe\x00\x00\x00\x00'\
b'\x00\x00\x7c\xc6\x06\x06\x3c\x06\x06\x06\xc6\x7c\x00\x00\x00\x00'\
b'\x00\x00\x0c\x1c\x3c\x6c\xcc\xfe\x0c\x0c\x0c\x1e\x00\x00\x00\x00'\
b'\x00\x00\xfe\xc0\xc0\xc0\xfc\x06\x06\x06\xc6\x7c\x00\x00\x00\x00'\
b'\x00\x00\x38\x60\xc0\xc0\xfc\xc6\xc6\xc6\xc6\x7c\x00\x00\x00\x00'\
b'\x00\x00\xfe\xc6\x06\x06\x0c\x18\x30\x30\x30\x30\x00\x00\x00\x00'\
b'\x00\x00\x7c\xc6\xc6\xc6\x7c\xc6\xc6\xc6\xc6\x7c\x00\x00\x00\x00'\
b'\x00\x00\x7c\xc6\xc6\xc6\x7e\x06\x06\x06\x0c\x78\x00\x00\x00\x00'\
b'\x00\x00\x00\x00\x18\x18\x00\x00\x00\x18\x18\x00\x00\x00\x00\x00'\
b'\x00\x00\x00\x00\x18\x18\x00\x00\x00\x18\x18\x30\x00\x00\x00\x00'\
b'\x00\x00\x00\x06\x0c\x18\x30\x60\x30\x18\x0c\x06\x00\x00\x00\x00'\
b'\x00\x00\x00\x00\x00\x7e\x00\x00\x7e\x00\x00\x00\x00\x00\x00\x00'\
b'\x00\x00\x00\x60\x30\x18\x0c\x06\x0c\x18\x30\x60\x00\x00\x00\x00'\
b'\x00\x00\x7c\xc6\xc6\x0c\x18\x18\x18\x00\x18\x18\x00\x00\x00\x00'\
b'\x00\x00\x7c\xc6\xc6\xc6\xde\xde\xde\xdc\xc0\x7c\x00\x00\x00\x00'\
b'\x00\x00\x10\x38\x6c\xc6\xc6\xfe\xc6\xc6\xc6\xc6\x00\x00\x00\x00'\
b'\x00\x00\xfc\x66\x66\x66\x7c\x66\x66\x66\x66\xfc\x00\x00\x00\x00'\
b'\x00\x00\x3c\x66\xc2\xc0\xc0\xc0\xc0\xc2\x66\x3c\x00\x00\x00\x00'\
b'\x00\x00\xf8\x6c\x66\x66\x66\x66\x66\x66\x6c\xf8\x00\x00\x00\x00'\
b'\x00\x00\xfe\x66\x62\x68\x78\x68\x60\x62\x66\xfe\x00\x00\x00\x00'\
b'\x00\x00\xfe\x66\x62\x68\x78\x68\x60\x60\x60\xf0\x00\x00\x00\x00'\
b'\x00\x00\x3c\x66\xc2\xc0\xc0\xde\xc6\xc6\x66\x3a\x00\x00\x00\x00'\
b'\x00\x00\xc6\xc6\xc6\xc6\xfe\xc6\xc6\xc6\xc6\xc6\x00\x00\x00\x00'\
b'\x00\x00\x3c\x18\x18\x18\x18\x18\x18\x18\x18\x3c\x00\x00\x00\x00'\
b'\x00\x00\x1e\x0c\x0c\x0c\x0c\x0c\xcc\xcc\xcc\x78\x00\x00\x00\x00'\
b'\x00\x00\xe6\x66\x66\x6c\x78\x78\x6c\x66\x66\xe6\x00\x00\x00\x00'\
b'\x00\x00\xf0\x60\x60\x60\x60\x60\x60\x62\x66\xfe\x00\x00\x00\x00'\
b'\x00\x00\xc6\xee\xfe\xfe\xd6\xc6\xc6\xc6\xc6\xc6\x00\x00\x00\x00'\
b'\x00\x00\xc6\xe6\xf6\xfe\xde\xce\xc6\xc6\xc6\xc6\x00\x00\x00\x00'\
b'\x00\x00\x7c\xc6\xc6\xc6\xc6\xc6\xc6\xc6\xc6\x7c\x00\x00\x00\x00'\
b'\x00\x00\xfc\x66\x66\x66\x7c\x60\x60\x60\x60\xf0\x00\x00\x00\x00'\
b'\x00\x00\x7c\xc6\xc6\xc6\xc6\xc6\xc6\xd6\xde\x7c\x0c\x0e\x00\x00'\
b'\x00\x00\xfc\x66\x66\x66\x7c\x6c\x66\x66\x66\xe6\x00\x00\x00\x00'\
b'\x00\x00\x7c\xc6\xc6\x60\x38\x0c\x06\xc6\xc6\x7c\x00\x00\x00\x00'\
b'\x00\x00\x7e\x7e\x5a\x18\x18\x18\x18\x18\x18\x3c\x00\x00\x00\x00'\
b'\x00\x00\xc6\xc6\xc6\xc6\xc6\xc6\xc6\xc6\xc6\x7c\x00\x00\x00\x00'\
b'\x00\x00\xc6\xc6\xc6\xc6\xc6\xc6\xc6\x6c\x38\x10\x00\x00\x00\x00'\
b'\x00\x00\xc6\xc6\xc6\xc6\xd6\xd6\xd6\xfe\xee\x6c\x00\x00\x00\x00'\
b'\x00\x00\xc6\xc6\x6c\x7c\x38\x38\x7c\x6c\xc6\xc6\x00\x00\x00\x00'\
b'\x00\x00\x66\x66\x66\x66\x3c\x18\x18\x18\x18\x3c\x00\x00\x00\x00'\
b'\x00\x00\xfe\xc6\x86\x0c\x18\x30\x60\xc2\xc6\xfe\x00\x00\x00\x00'\
b'\x00\x00\x3c\x30\x30\x30\x30\x30\x30\x30\x30\x3c\x00\x00\x00\x00'\
b'\x00\x00\x00\x80\xc0\xe0\x70\x38\x1c\x0e\x06\x02\x00\x00\x00\x00'\
b'\x00\x00\x3c\x0c\x0c\x0c\x0c\x0c\x0c\x0c\x0c\x3c\x00\x00\x00\x00'\
b'\x10\x38\x6c\xc6\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'\
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\xff\x00\x00'\
b'\x30\x30\x18\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'\
b'\x00\x00\x00\x00\x00\x78\x0c\x7c\xcc\xcc\xcc\x76\x00\x00\x00\x00'\
b'\x00\x00\xe0\x60\x60\x78\x6c\x66\x66\x66\x66\x7c\x00\x00\x00\x00'\
b'\x00\x00\x00\x00\x00\x7c\xc6\xc0\xc0\xc0\xc6\x7c\x00\x00\x00\x00'\
b'\x00\x00\x1c\x0c\x0c\x3c\x6c\xcc\xcc\xcc\xcc\x76\x00\x00\x00\x00'\
b'\x00\x00\x00\x00\x00\x7c\xc6\xfe\xc0\xc0\xc6\x7c\x00\x00\x00\x00'\
b'\x00\x00\x38\x6c\x64\x60\xf0\x60\x60\x60\x60\xf0\x00\x00\x00\x00'\
b'\x00\x00\x00\x00\x00\x76\xcc\xcc\xcc\xcc\xcc\x7c\x0c\xcc\x78\x00'\
b'\x00\x00\xe0\x60\x60\x6c\x76\x66\x66\x66\x66\xe6\x00\x00\x00\x00'\
b'\x00\x00\x18\x18\x00\x38\x18\x18\x18\x18\x18\x3c\x00\x00\x00\x00'\
b'\x00\x00\x06\x06\x00\x0e\x06\x06\x06\x06\x06\x06\x66\x66\x3c\x00'\
b'\x00\x00\xe0\x60\x60\x66\x6c\x78\x78\x6c\x66\xe6\x00\x00\x00\x00'\
b'\x00\x00\x38\x18\x18\x18\x18\x18\x18\x18\x18\x3c\x00\x00\x00\x00'\
b'\x00\x00\x00\x00\x00\xec\xfe\xd6\xd6\xd6\xd6\xc6\x00\x00\x00\x00'\
b'\x00\x00\x00\x00\x00\xdc\x66\x66\x66\x66\x66\x66\x00\x00\x00\x00'\
b'\x00\x00\x00\x00\x00\x7c\xc6\xc6\xc6\xc6\xc6\x7c\x00\x00\x00\x00'\
b'\x00\x00\x00\x00\x00\xdc\x66\x66\x66\x66\x66\x7c\x60\x60\xf0\x00'\
b'\x00\x00\x00\x00\x00\x76\xcc\xcc\xcc\xcc\xcc\x7c\x0c\x0c\x1e\x00'\
b'\x00\x00\x00\x00\x00\xdc\x76\x66\x60\x60\x60\xf0\x00\x00\x00\x00'\
b'\x00\x00\x00\x00\x00\x7c\xc6\x60\x38\x0c\xc6\x7c\x00\x00\x00\x00'\
b'\x00\x00\x10\x30\x30\xfc\x30\x30\x30\x30\x36\x1c\x00\x00\x00\x00'\
b'\x00\x00\x00\x00\x00\xcc\xcc\xcc\xcc\xcc\xcc\x76\x00\x00\x00\x00'\
b'\x00\x00\x00\x00\x00\x66\x66\x66\x66\x66\x3c\x18\x00\x00\x00\x00'\
b'\x00\x00\x00\x00\x00\xc6\xc6\xd6\xd6\xd6\xfe\x6c\x00\x00\x00\x00'\
b'\x00\x00\x00\x00\x00\xc6\x6c\x38\x38\x38\x6c\xc6\x00\x00\x00\x00'\
b'\x00\x00\x00\x00\x00\xc6\xc6\xc6\xc6\xc6\xc6\x7e\x06\x0c\xf8\x00'\
b'\x00\x00\x00\x00\x00\xfe\xcc\x18\x30\x60\xc6\xfe\x00\x00\x00\x00'\
b'\x00\x00\x0e\x18\x18\x18\x70\x18\x18\x18\x18\x0e\x00\x00\x00\x00'\
b'\x00\x00\x18\x18\x18\x18\x00\x18\x18\x18\x18\x18\x00\x00\x00\x00'\
b'\x00\x00\x70\x18\x18\x18\x0e\x18\x18\x18\x18\x70\x00\x00\x00\x00'\
b'\x00\x00\x76\xdc\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'\
b'\x00\x00\x00\x00\x10\x38\x6c\xc6\xc6\xc6\xfe\x00\x00\x00\x00\x00'\
FONT = memoryview(_FONT)
|
[
"cuiwei_cv@163.com"
] |
cuiwei_cv@163.com
|
f0d990d45a27fde720efb4dff618a7fd5ef391b2
|
8600ea155f279e5a8dfe5a1926038511f6b6a7ea
|
/sale_crm/wizard/__init__.py
|
7c43908361fc6be916f06247bd16776a8e4c1776
|
[] |
no_license
|
MarkNorgate/addons-EAD
|
c2fff89ab16fce3ba19fbe433ee5863705a6f4e5
|
840f28642b5d328e4b86839c413e5164622295a5
|
refs/heads/master
| 2020-04-23T22:11:00.164438
| 2015-07-22T12:24:53
| 2015-07-22T12:24:53
| 39,501,011
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,090
|
py
|
# -*- encoding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2009 Tiny SPRL (<http://tiny.be>). All Rights Reserved
# $Id$
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import makesale
import makecase
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
[
"mark.norgate@affinity-digital.com"
] |
mark.norgate@affinity-digital.com
|
54e305cc17a0c2232390a83ffbdeb8ed103b393e
|
219d7cf7cf00b778ff1a5709406c144fcf2132f3
|
/First Steps in Coding - Lab/07. Projects Creation.py
|
7ca4becdbaa4bc97c0ff32e779c16f999679df79
|
[] |
no_license
|
SilviaKoynova/Softuni-Programming-Basics-Python
|
e8e175419383815c65c4e110fdb2b752d940e887
|
0dfef0850f2cb8471dfee1af89f137be4e887cb8
|
refs/heads/main
| 2023-07-13T00:35:09.389302
| 2021-08-27T07:43:45
| 2021-08-27T07:43:45
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 163
|
py
|
name = input()
projects = int(input())
need_hours = projects * 3
print(f"The architect {name} will need {need_hours} hours to complete {projects} project/s.")
|
[
"noreply@github.com"
] |
SilviaKoynova.noreply@github.com
|
736887a4862a68ddb38a06f891def851858936db
|
9d8acc20d2ee1d1957849dfb71c22e0dae2d8c5c
|
/baomoicrawl/venv/Lib/site-packages/twisted/test/test_ftp_options.py
|
ef567bbb49bfce24ee4cb271b1a59b1a8730dd7a
|
[
"MIT",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
thuy4tbn99/TranTruongThuy_17021178_Nhom4_Crawler
|
b0fdedee2942a12d9f64dfed93f43802dc5ab340
|
87c8c07433466bbc43a24ea089f75baeb467c356
|
refs/heads/master
| 2022-11-27T21:36:33.917491
| 2020-08-10T23:24:42
| 2020-08-10T23:24:42
| 286,583,216
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,765
|
py
|
# Copyright (c) Twisted Matrix Laboratories.
# See LICENSE for details.
"""
Tests for L{twisted.tap.ftp}.
"""
from twisted.trial.unittest import TestCase
from twisted.cred import credentials, error
from twisted.tap.ftp import Options
from twisted.python import versions
from twisted.python.filepath import FilePath
class FTPOptionsTests(TestCase):
"""
Tests for the command line option parser used for C{twistd ftp}.
"""
usernamePassword = (b'iamuser', b'thisispassword')
def setUp(self):
"""
Create a file with two users.
"""
self.filename = self.mktemp()
f = FilePath(self.filename)
f.setContent(b':'.join(self.usernamePassword))
self.options = Options()
def test_passwordfileDeprecation(self):
"""
The C{--password-file} option will emit a warning stating that
said option is deprecated.
"""
self.callDeprecated(
versions.Version("Twisted", 11, 1, 0),
self.options.opt_password_file, self.filename)
def test_authAdded(self):
"""
The C{--auth} command-line option will add a checker to the list of
checkers
"""
numCheckers = len(self.options['credCheckers'])
self.options.parseOptions(['--auth', 'file:' + self.filename])
self.assertEqual(len(self.options['credCheckers']), numCheckers + 1)
def test_authFailure(self):
"""
The checker created by the C{--auth} command-line option returns a
L{Deferred} that fails with L{UnauthorizedLogin} when
presented with credentials that are unknown to that checker.
"""
self.options.parseOptions(['--auth', 'file:' + self.filename])
checker = self.options['credCheckers'][-1]
invalid = credentials.UsernamePassword(self.usernamePassword[0], 'fake')
return (checker.requestAvatarId(invalid)
.addCallbacks(
lambda ignore: self.fail("Wrong password should raise error"),
lambda err: err.trap(error.UnauthorizedLogin)))
def test_authSuccess(self):
"""
The checker created by the C{--auth} command-line option returns a
L{Deferred} that returns the avatar id when presented with credentials
that are known to that checker.
"""
self.options.parseOptions(['--auth', 'file:' + self.filename])
checker = self.options['credCheckers'][-1]
correct = credentials.UsernamePassword(*self.usernamePassword)
return checker.requestAvatarId(correct).addCallback(
lambda username: self.assertEqual(username, correct.username)
)
|
[
"thuy4tbn99@gmail.com"
] |
thuy4tbn99@gmail.com
|
9562bc0b7e2dcc38f7a84b31462b6d5fd5598619
|
3c898b1aec7009110c63504d5a56e31914625d1b
|
/acrylamid/filters/rstx_youtube.py
|
1866872d0d44360221e12addf431c60a545739b3
|
[
"BSD-3-Clause",
"BSD-2-Clause"
] |
permissive
|
MagicSword/acrylamid
|
e294b151ed6305f37fc5a5fdd4f1f0fb999a22f7
|
6f34bc5fb2175af1103aec7a910ef48a6842de03
|
refs/heads/master
| 2021-01-16T21:30:58.564719
| 2012-06-22T16:00:50
| 2012-06-22T16:45:38
| 4,817,948
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,988
|
py
|
# -*- encoding: utf-8 -*-
#
# Copyright 2012 posativ <info@posativ.org>. All rights reserved.
# License: BSD Style, 2 clauses. see acrylamid/__init__.py
from docutils import nodes
from docutils.parsers.rst import Directive, directives
match = ['youtube', 'yt']
def align(argument):
return directives.choice(argument, ('left', 'center', 'right'))
class YouTube(Directive):
"""reStructuredText directive that creates an embed object to display
a video from Youtube (:options: are optional).
Usage example::
.. youtube:: ZPJlyRv_IGI
:start: 34
:align: center
:height: 1280
:width: 720
:ssl:
"""
required_arguments = 1
optional_arguments = 0
option_spec = {
'height': directives.length_or_unitless,
'width': directives.length_or_percentage_or_unitless,
'border': directives.length_or_unitless,
'align': align,
'start': int,
'ssl': directives.flag,
}
has_content = False
def run(self):
alignments = {
'left': '0',
'center': '0 auto',
'right': '0 0 0 auto',
}
uri = ('https://' if 'ssl' in self.options else 'http://') \
+ 'www.youtube-nocookie.com/embed/' + self.arguments[0]
self.options['uri'] = uri
self.options['align'] = alignments[self.options.get('align', 'center')]
self.options.setdefault('width', '680px')
self.options.setdefault('height', '382px')
self.options.setdefault('border', 0)
self.options.setdefault('start', 0)
YT_EMBED = """<iframe width="%(width)s" height="%(height)s" src="%(uri)s" \
frameborder="%(border)s" style="display: block; margin: %(align)s;" \
start="%(start)i" class="video" allowfullscreen></iframe>"""
return [nodes.raw('', YT_EMBED % self.options, format='html')]
def makeExtension():
return YouTube
|
[
"info@posativ.org"
] |
info@posativ.org
|
b9cea96bd1fe04ff6d961295ea869a78c3e571e4
|
dfab6798ece135946aebb08f93f162c37dd51791
|
/core/luban/db/models.py
|
e30723a4165928648d1c38b81f47e476985bd1ca
|
[] |
no_license
|
yxqd/luban
|
405f5f7dcf09015d214079fe7e23d644332be069
|
00f699d15c572c8bf160516d582fa37f84ac2023
|
refs/heads/master
| 2020-03-20T23:08:45.153471
| 2012-05-18T14:52:43
| 2012-05-18T14:52:43
| 137,831,650
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,608
|
py
|
# -*- Python -*-
#
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
#
# Jiao Lin
# California Institute of Technology
# (C) 2006-2011 All Rights Reserved
#
# {LicenseText}
#
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
#
# example base class of model
# from sqlalchemy.ext.declarative import declarative_base
# Base = declarative_base()
# XXX: thinking of use metaclass...
class ModelCollector:
def __new__(cls, name, bases, attributes, **kwds):
# the created class
created = super().__new__(cls, name, bases, attributes, **kwds)
model_registry.register(created)
return created
class ModelRegistry:
def __init__(self):
self.models = {}
return
def register(self, cls):
self.models[cls.__name__] = cls
return
def __getattr__(self, name):
return self.models[name]
model_registry = ModelRegistry()
# method to load all db models in a python sub-package
def loadModels(subpkg):
# the implementation just import all sub modules in the sub-pkg
# recursively
path = subpkg.__path__
import os
import pkgutil
prefix = subpkg.__name__ + '.'
for loader, module_name, is_pkg in pkgutil.walk_packages(path, prefix):
found = loader.find_module(module_name)
if not found:
print ("%s not found" % module_name)
else:
mod = found.load_module(module_name)
continue
return
# End of file
|
[
"linjiao@caltech.edu"
] |
linjiao@caltech.edu
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.