blob_id stringlengths 40 40 | directory_id stringlengths 40 40 | path stringlengths 3 616 | content_id stringlengths 40 40 | detected_licenses listlengths 0 112 | license_type stringclasses 2 values | repo_name stringlengths 5 115 | snapshot_id stringlengths 40 40 | revision_id stringlengths 40 40 | branch_name stringclasses 777 values | visit_date timestamp[us]date 2015-08-06 10:31:46 2023-09-06 10:44:38 | revision_date timestamp[us]date 1970-01-01 02:38:32 2037-05-03 13:00:00 | committer_date timestamp[us]date 1970-01-01 02:38:32 2023-09-06 01:08:06 | github_id int64 4.92k 681M ⌀ | star_events_count int64 0 209k | fork_events_count int64 0 110k | gha_license_id stringclasses 22 values | gha_event_created_at timestamp[us]date 2012-06-04 01:52:49 2023-09-14 21:59:50 ⌀ | gha_created_at timestamp[us]date 2008-05-22 07:58:19 2023-08-21 12:35:19 ⌀ | gha_language stringclasses 149 values | src_encoding stringclasses 26 values | language stringclasses 1 value | is_vendor bool 2 classes | is_generated bool 2 classes | length_bytes int64 3 10.2M | extension stringclasses 188 values | content stringlengths 3 10.2M | authors listlengths 1 1 | author_id stringlengths 1 132 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
204b28dbe6e3e380bd5198a01b9c562dae452234 | d512a6953008941caa36d47dcddb0dd8e3758d84 | /train.py | 106dcf890e8e14cd700ac5f803821ccdf661d56a | [] | no_license | alibuda/CCKS_QA | 8dd541a51118cf5f9c168b909c14e3360aab351f | 27394f91a499731b85b31c785b50f7418354580d | refs/heads/master | 2020-04-20T05:14:19.780628 | 2018-08-12T09:13:34 | 2018-08-12T09:13:34 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,200 | py | import tensorflow as tf
from read_utils import TextConverter, batch_generator,load_origin_data,val_samples_generator
import os
import argparse # 用于分析输入的超参数
def parseArgs(args):
"""
Parse 超参数
Args:
args (list<stir>): List of arguments.
"""
parser = argparse.ArgumentParser()
test_args = parser.add_argument_group('test超参数')
test_args.add_argument('--file_name', type=str, default='default',help='name of the model')
test_args.add_argument('--batch_size', type=int, default=100,help='number of seqs in one batch')
test_args.add_argument('--num_steps', type=int, default=100,help='length of one seq')
test_args.add_argument('--hidden_size', type=int, default=128,help='size of hidden state of lstm')
test_args.add_argument('--num_layers', type=int, default=2,help='number of lstm layers')
test_args.add_argument('--use_embedding', type=bool, default=False,help='whether to use embedding')
test_args.add_argument('--embedding_size', type=int, default=128,help='size of embedding')
test_args.add_argument('--learning_rate', type=float, default=0.001,help='learning_rate')
test_args.add_argument('--train_keep_prob', type=float, default=0.7,help='dropout rate during training')
test_args.add_argument('--max_steps', type=int, default=100000,help='max steps to train')
test_args.add_argument('--save_every_n', type=int, default=100,help='save the model every n steps')
test_args.add_argument('--log_every_n', type=int, default=20,help='log to the screen every n steps')
test_args.add_argument('--fc_activation', type=str, default='sigmoid', help='funciton of activated')
test_args.add_argument('--feats', type=str, default='all', help='features of query')
test_args.add_argument('--batch_norm', type=bool, default=False, help='standardization')
test_args.add_argument('--op_method', type=str, default='adam', help='method of optimizer')
test_args.add_argument('--checkpoint_path', type=str, default='models/thoth3/', help='checkpoint path')
test_args.add_argument('--lr_decay', type=bool, default=False, help='standardization')
return parser.parse_args(args)
## thoth 问答
args_in = '--file_name n26b200h400F ' \
'--num_steps 26 ' \
'--batch_size 200 ' \
'--learning_rate 0.001 ' \
'--hidden_size 400 ' \
'--fc_activation sigmoid ' \
'--op_method adam ' \
'--max_steps 200000'.split()
FLAGS = parseArgs(args_in)
def main(_):
model_path = os.path.join('models', FLAGS.file_name)
if os.path.exists(model_path) is False:
os.makedirs(model_path)
if os.path.exists(os.path.join(model_path, 'converter.pkl')) or os.path.exists(os.path.join(model_path, 'QAs.pkl')) is False:
print('词库文件不存在,创建...')
QAs, text = load_origin_data('data/task3_train.txt')
converter = TextConverter(text, 5000)
converter.save_to_file(converter.vocab ,os.path.join(model_path, 'converter.pkl'))
converter.save_to_file(QAs,os.path.join(model_path, 'QAs.pkl'))
else:
converter = TextConverter(filename=os.path.join(model_path, 'converter.pkl'))
QAs = converter.load_obj(filename=os.path.join(model_path, 'QAs.pkl'))
QA_arrs = converter.QAs_to_arrs(QAs, FLAGS.num_steps)
thres = int(len(QA_arrs) * 0.9)
train_samples = QA_arrs[:thres]
val_samples = QA_arrs[thres:]
train_g = batch_generator(train_samples, FLAGS.batch_size)
val_g = val_samples_generator(val_samples)
print('use embeding:',FLAGS.use_embedding)
print('vocab size:',converter.vocab_size)
from model3 import Model
model = Model(converter.vocab_size,FLAGS,test=False, embeddings=None)
# 继续上一次模型训练
FLAGS.checkpoint_path = tf.train.latest_checkpoint(model_path)
if FLAGS.checkpoint_path:
model.load(FLAGS.checkpoint_path)
model.train(train_g,
FLAGS.max_steps,
model_path,
FLAGS.save_every_n,
FLAGS.log_every_n,
val_g
)
if __name__ == '__main__':
tf.app.run() | [
"zoulingwei@cyou-inc.com"
] | zoulingwei@cyou-inc.com |
2f08353be95a5c836ae59a52d53cd5a296acde31 | 78980891d3137810bf3a3c1bb229966b7f49f0dd | /data_structure/3/ll.py | f43ad13c667a531c3fbabe2aea2be2b7fd278900 | [] | no_license | miniyk2012/leetcode | 204927d3aefc9746070c1bf13abde517c6c16dc0 | 91ca9cd0df3c88fc7ef3c829dacd4d13f6b71ab1 | refs/heads/master | 2021-06-17T21:50:31.001111 | 2021-03-10T11:36:23 | 2021-03-10T11:36:23 | 185,042,818 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,961 | py | class LinkedList:
def __init__(self):
self.head = None
def add(self, v):
if self.head is not None:
self.head.add(v)
return
self.head = Node(v)
def print(self):
if self.head:
self.head.print()
def pophead(self):
if self.head is None:
raise Exception('链表为空')
head = self.head
self.head = head.next
head.next = None
return head
def first(self, n):
yield from self.head.first(n)
def length(self):
if self.head is None:
return 0
new_l = LinkedList()
new_l.head = self.head.next
return 1 + new_l.length()
def is_empty(self):
return self.head is None
class Node:
def __init__(self, v):
self.v = v
self.next = None
def print(self):
print(self.v, end=' ')
if self.next:
self.next.print()
def add(self, v):
if self.next is not None:
return self.next.add(v)
self.next = Node(v)
def first(self, n):
yield self.v
if self.next and n>1:
yield from self.next.first(n-1)
def run():
print('run!!!')
def count_recursion(n):
if n > 1:
count_recursion(n - 1)
print(n - 1)
if __name__ == "__main__":
n = Node(10)
# a.py
print(n.v, n.next)
ll = LinkedList() # []
print('length: ', ll.length())
ll.add(10) # [10]
ll.add(2) # [10, 2]
ll.add(-3) # [10, 2, -3]
ll.print() # 10, 2 , -3
print('length: ', ll.length())
print()
count_recursion(4)
print('yield..')
for x in ll.first(3):
print(x)
ll.pophead()
ll.print() # 2 , -3
print()
print('length: ', ll.length())
"""
python 中 deque is a doubly linked list while List is just an array.
""" | [
"yk_ecust_2007@163.com"
] | yk_ecust_2007@163.com |
f5b0c0334a7c08a30029ae177be681688724e975 | f2a12bc1435111dd4e2afda02834bb3cd53ed8d8 | /vgc/__main__.py | 5f412a68a21c70a6b59104fb498a550e4c5fe13e | [
"MIT"
] | permissive | reedessick/video-game-camp | c83504d63637bc8c2c8f8b4067ec277233b74d4d | 09a324279c5ea9de87080f122fe27e1ef83d5373 | refs/heads/master | 2022-11-06T11:00:32.526460 | 2020-06-19T16:28:12 | 2020-06-19T16:28:12 | 272,338,754 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,952 | py | """a basic menu from which users can navigate to different games they have designed.
"""
__author__ = 'Reed Essick (reed.essick@gmail.com)'
#-------------------------------------------------
import sys
import inspect
### non-standard libraries
import vgc
#-------------------------------------------------
def print_available_games(games):
for game in games.keys():
print(' -- '+game)
def select_game(games):
"""interact with the command line to select a game"""
Ngames = len(games)
if Ngames == 0: ### no games available
print('I\'m sorry, but there are no games currently available. Please design a game soon so we can get playing!')
sys.exit(0)
elif Ngames==1:
print('There is only a single game available!')
return games.items()[0]
else:
print('Please tell me which of the following games you would like to play!')
print_available_games(games)
selected = raw_input('')
while selected not in games: ### make sure the specified game is available
print('I\'m sorry, but I did not understand. Please specify one of the following, or specify "exit" to quit')
print_available_games(games)
selected = raw_input('')
if selected == 'exit': ### quit
sys.exit(0)
return selected, games[selected]
#------------------------
def main():
"""the basic function that will be run when this module is called as an executable. This should discover the available games and prompt the user to select which game they would like to play. It should then launch that game.
Note, users should also be able to launch individual games directly by calling the associated modules that live within vgc."""
name, game = select_game(vgc.KNOWN_GAMES)
print('---- Launching: %s -----'%name)
game.game.main()
sys.exit(0)
#-------------------------------------------------
main()
| [
"reed.essick@ligo.org"
] | reed.essick@ligo.org |
d89661ef6fd64a848d58e944a0359d58cf2e99c5 | da172d7a739ee31c760bb06a2b979037dda01613 | /ws/executors/wsgi_python.py | f9b7b669c752728cb333ef6fd1e85300accff134 | [] | no_license | jonhillmtl/web-server | b5f87e315364b699275140bf5ad1b8475529f96a | 4b6c123954dfdc07007a46dbf4799c2ba912c768 | refs/heads/master | 2020-03-27T20:31:31.247819 | 2018-09-04T20:12:15 | 2018-09-04T20:12:15 | 147,075,272 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 625 | py | import os
import importlib.util
from .base import BaseRequestExecutor, InternalServerError
class WsgiPythonRequestExecutor(BaseRequestExecutor):
def serve(self):
try:
wsgi_path = os.path.expanduser(self.vhost['wsgi_path'])
spec = importlib.util.spec_from_file_location("wsgi", wsgi_path)
wsgi = importlib.util.module_from_spec(spec)
spec.loader.exec_module(wsgi)
return wsgi.execute(self.request)
except FileNotFoundError as e:
raise InternalServerError(e)
except Exception as e:
raise InternalServerError(e) | [
"jon@jonhill.ca"
] | jon@jonhill.ca |
0d275f55f7cdff67f7a818f11670631d05e5e8d4 | dd4d1a61ec680a86d4b569490bf2a898ea0d7557 | /appengine/monorail/features/test/notify_test.py | f5afea90ade1943de15a4c53668d3f91e3be59ee | [
"BSD-3-Clause"
] | permissive | mcgreevy/chromium-infra | f1a68914b47bcbe3cd8a424f43741dd74fedddf4 | 09064105713603f7bf75c772e8354800a1bfa256 | refs/heads/master | 2022-10-29T23:21:46.894543 | 2017-05-16T06:22:50 | 2017-05-16T06:22:50 | 91,423,078 | 1 | 1 | BSD-3-Clause | 2022-10-01T18:48:03 | 2017-05-16T06:23:34 | Python | UTF-8 | Python | false | false | 12,412 | py | # Copyright 2016 The Chromium Authors. All rights reserved.
# Use of this source code is govered by a BSD-style
# license that can be found in the LICENSE file or at
# https://developers.google.com/open-source/licenses/bsd
"""Tests for notify.py."""
import json
import os
import unittest
import urllib
import webapp2
import webtest
from google.appengine.api import taskqueue
from google.appengine.ext import testbed
from features import notify
from framework import urls
from proto import tracker_pb2
from services import service_manager
from testing import fake
from testing import testing_helpers
from tracker import tracker_bizobj
def MakeTestIssue(project_id, local_id, owner_id, reporter_id, is_spam=False):
issue = tracker_pb2.Issue()
issue.project_id = project_id
issue.local_id = local_id
issue.issue_id = 1000 * project_id + local_id
issue.owner_id = owner_id
issue.reporter_id = reporter_id
issue.is_spam = is_spam
return issue
class SendNotificationTest(unittest.TestCase):
def setUp(self):
self.testbed = testbed.Testbed()
self.testbed.activate()
self.testbed.init_taskqueue_stub()
self.taskqueue_stub = self.testbed.get_stub(testbed.TASKQUEUE_SERVICE_NAME)
self.taskqueue_stub._root_path = os.path.dirname(
os.path.dirname(os.path.dirname( __file__ )))
def tearDown(self):
self.testbed.deactivate()
def testPrepareAndSendIssueChangeNotification(self):
notify.PrepareAndSendIssueChangeNotification(
issue_id=78901,
hostport='testbed-test.appspotmail.com',
commenter_id=1,
seq_num=0,
old_owner_id=2,
send_email=True)
tasks = self.taskqueue_stub.get_filtered_tasks(
url=urls.NOTIFY_ISSUE_CHANGE_TASK + '.do')
self.assertEqual(1, len(tasks))
def testPrepareAndSendIssueBlockingNotification(self):
notify.PrepareAndSendIssueBlockingNotification(
issue_id=78901,
hostport='testbed-test.appspotmail.com',
delta_blocker_iids=[],
commenter_id=1,
send_email=True)
tasks = self.taskqueue_stub.get_filtered_tasks(
url=urls.NOTIFY_BLOCKING_CHANGE_TASK + '.do')
self.assertEqual(0, len(tasks))
notify.PrepareAndSendIssueBlockingNotification(
issue_id=78901,
hostport='testbed-test.appspotmail.com',
delta_blocker_iids=[2],
commenter_id=1,
send_email=True)
tasks = self.taskqueue_stub.get_filtered_tasks(
url=urls.NOTIFY_BLOCKING_CHANGE_TASK + '.do')
self.assertEqual(1, len(tasks))
def testSendIssueBulkChangeNotification_CommentOnly(self):
notify.SendIssueBulkChangeNotification(
issue_ids=[78901],
hostport='testbed-test.appspotmail.com',
old_owner_ids=[2],
comment_text='comment',
commenter_id=1,
amendments=[],
send_email=True,
users_by_id=2)
tasks = self.taskqueue_stub.get_filtered_tasks(
url=urls.NOTIFY_BULK_CHANGE_TASK + '.do')
self.assertEqual(1, len(tasks))
params = dict(urllib.unquote_plus(item).split('=')
for item in tasks[0].payload.split('&'))
self.assertEqual('comment', params['comment_text'])
self.assertEqual('', params['amendments'])
def testSendIssueBulkChangeNotification_Normal(self):
notify.SendIssueBulkChangeNotification(
issue_ids=[78901],
hostport='testbed-test.appspotmail.com',
old_owner_ids=[2],
comment_text='comment',
commenter_id=1,
amendments=[
tracker_bizobj.MakeStatusAmendment('New', 'Old'),
tracker_bizobj.MakeLabelsAmendment(['Added'], ['Removed']),
tracker_bizobj.MakeStatusAmendment('New', 'Old'),
],
send_email=True,
users_by_id=2)
tasks = self.taskqueue_stub.get_filtered_tasks(
url=urls.NOTIFY_BULK_CHANGE_TASK + '.do')
self.assertEqual(1, len(tasks))
params = dict(urllib.unquote_plus(item).split('=')
for item in tasks[0].payload.split('&'))
self.assertEqual('comment', params['comment_text'])
self.assertEqual(
[' Status: New',
' Labels: -Removed Added'],
params['amendments'].split('\n'))
class NotifyTaskHandleRequestTest(unittest.TestCase):
def setUp(self):
self.testbed = testbed.Testbed()
self.testbed.activate()
self.testbed.init_taskqueue_stub()
self.taskqueue_stub = self.testbed.get_stub(testbed.TASKQUEUE_SERVICE_NAME)
self.taskqueue_stub._root_path = os.path.dirname(
os.path.dirname(os.path.dirname( __file__ )))
self.services = service_manager.Services(
user=fake.UserService(),
usergroup=fake.UserGroupService(),
project=fake.ProjectService(),
config=fake.ConfigService(),
issue=fake.IssueService(),
issue_star=fake.IssueStarService(),
features=fake.FeaturesService())
self.services.user.TestAddUser('requester@example.com', 1)
self.services.user.TestAddUser('user@example.com', 2)
self.services.user.TestAddUser('member@example.com', 3)
self.services.project.TestAddProject(
'test-project', owner_ids=[1, 3],
project_id=12345)
self.issue1 = MakeTestIssue(
project_id=12345, local_id=1, owner_id=2, reporter_id=1)
self.services.issue.TestAddIssue(self.issue1)
def VerifyParams(self, result, params):
self.assertEqual(
bool(params['send_email']), result['params']['send_email'])
if 'issue_id' in params:
self.assertEqual(params['issue_id'], result['params']['issue_id'])
if 'issue_ids' in params:
self.assertEqual([int(p) for p in params['issue_ids'].split(',')],
result['params']['issue_ids'])
def testNotifyIssueChangeTask(self):
task = notify.NotifyIssueChangeTask(
request=None, response=None, services=self.services)
params = {'send_email': 1, 'issue_id': 12345001, 'seq': 0,
'commenter_id': 2}
mr = testing_helpers.MakeMonorailRequest(
user_info={'user_id': 1},
params=params,
method='POST',
services=self.services)
result = task.HandleRequest(mr)
self.VerifyParams(result, params)
def testNotifyIssueChangeTask_spam(self):
issue = MakeTestIssue(
project_id=12345, local_id=1, owner_id=1, reporter_id=1,
is_spam=True)
self.services.issue.TestAddIssue(issue)
task = notify.NotifyIssueChangeTask(
request=None, response=None, services=self.services)
params = {'send_email': 0, 'issue_id': issue.issue_id, 'seq': 0,
'commenter_id': 2}
mr = testing_helpers.MakeMonorailRequest(
user_info={'user_id': 1},
params=params,
method='POST',
services=self.services)
result = task.HandleRequest(mr)
self.assertEquals(0, len(result['notified']))
def testNotifyBlockingChangeTask(self):
issue2 = MakeTestIssue(
project_id=12345, local_id=2, owner_id=2, reporter_id=1)
self.services.issue.TestAddIssue(issue2)
task = notify.NotifyBlockingChangeTask(
request=None, response=None, services=self.services)
params = {
'send_email': 1, 'issue_id': issue2.issue_id, 'seq': 0,
'delta_blocker_iids': 2, 'commenter_id': 1}
mr = testing_helpers.MakeMonorailRequest(
user_info={'user_id': 1},
params=params,
method='POST',
services=self.services)
result = task.HandleRequest(mr)
self.VerifyParams(result, params)
def testNotifyBlockingChangeTask_spam(self):
issue2 = MakeTestIssue(
project_id=12345, local_id=2, owner_id=2, reporter_id=1,
is_spam=True)
self.services.issue.TestAddIssue(issue2)
task = notify.NotifyBlockingChangeTask(
request=None, response=None, services=self.services)
params = {
'send_email': 1, 'issue_id': issue2.issue_id, 'seq': 0,
'delta_blocker_iids': 2, 'commenter_id': 1}
mr = testing_helpers.MakeMonorailRequest(
user_info={'user_id': 1},
params=params,
method='POST',
services=self.services)
result = task.HandleRequest(mr)
self.assertEquals(0, len(result['notified']))
def testNotifyBulkChangeTask(self):
issue2 = MakeTestIssue(
project_id=12345, local_id=2, owner_id=2, reporter_id=1)
issue2.cc_ids = [3]
self.services.issue.TestAddIssue(issue2)
task = notify.NotifyBulkChangeTask(
request=None, response=None, services=self.services)
params = {
'send_email': 1, 'seq': 0,
'issue_ids': '%d,%d' % (self.issue1.issue_id, issue2.issue_id),
'old_owner_ids': '1,1', 'commenter_id': 1}
mr = testing_helpers.MakeMonorailRequest(
user_info={'user_id': 1},
params=params,
method='POST',
services=self.services)
result = task.HandleRequest(mr)
self.VerifyParams(result, params)
tasks = self.taskqueue_stub.get_filtered_tasks(
url=urls.OUTBOUND_EMAIL_TASK + '.do')
self.assertEqual(2, len(tasks))
for task in tasks:
task_params = json.loads(task.payload)
# obfuscated email for non-members
if 'user' in task_params['to']:
self.assertIn(u'\u2026', task_params['from_addr'])
# Full email for members
if 'member' in task_params['to']:
self.assertNotIn(u'\u2026', task_params['from_addr'])
def testNotifyBulkChangeTask_spam(self):
issue2 = MakeTestIssue(
project_id=12345, local_id=2, owner_id=2, reporter_id=1,
is_spam=True)
self.services.issue.TestAddIssue(issue2)
task = notify.NotifyBulkChangeTask(
request=None, response=None, services=self.services)
params = {
'send_email': 1,
'issue_ids': '%d,%d' % (self.issue1.issue_id, issue2.issue_id),
'seq': 0,
'old_owner_ids': '1,1', 'commenter_id': 1}
mr = testing_helpers.MakeMonorailRequest(
user_info={'user_id': 1},
params=params,
method='POST',
services=self.services)
result = task.HandleRequest(mr)
self.assertEquals(1, len(result['notified']))
def testOutboundEmailTask_Normal(self):
"""We can send an email."""
params = {
'from_addr': 'requester@example.com',
'reply_to': 'user@example.com',
'to': 'user@example.com',
'subject': 'Test subject'}
body = json.dumps(params)
request = webapp2.Request.blank('/', body=body)
task = notify.OutboundEmailTask(
request=request, response=None, services=self.services)
mr = testing_helpers.MakeMonorailRequest(
user_info={'user_id': 1},
payload=body,
method='POST',
services=self.services)
result = task.HandleRequest(mr)
self.assertEqual(params['from_addr'], result['sender'])
self.assertEqual(params['subject'], result['subject'])
def testOutboundEmailTask_MissingTo(self):
"""We skip emails that don't specify the To-line."""
params = {
'from_addr': 'requester@example.com',
'reply_to': 'user@example.com',
'subject': 'Test subject'}
body = json.dumps(params)
request = webapp2.Request.blank('/', body=body)
task = notify.OutboundEmailTask(
request=request, response=None, services=self.services)
mr = testing_helpers.MakeMonorailRequest(
user_info={'user_id': 1},
payload=body,
method='POST',
services=self.services)
result = task.HandleRequest(mr)
self.assertEqual('Skipping because no "to" address found.', result['note'])
self.assertNotIn('from_addr', result)
def testOutboundEmailTask_BannedUser(self):
"""We don't send emails to banned users.."""
params = {
'from_addr': 'requester@example.com',
'reply_to': 'user@example.com',
'to': 'banned@example.com',
'subject': 'Test subject'}
body = json.dumps(params)
request = webapp2.Request.blank('/', body=body)
task = notify.OutboundEmailTask(
request=request, response=None, services=self.services)
mr = testing_helpers.MakeMonorailRequest(
user_info={'user_id': 1},
payload=body,
method='POST',
services=self.services)
self.services.user.TestAddUser('banned@example.com', 404L, banned=True)
result = task.HandleRequest(mr)
self.assertEqual('Skipping because user is banned.', result['note'])
self.assertNotIn('from_addr', result)
| [
"commit-bot@chromium.org"
] | commit-bot@chromium.org |
bbb7f19a4b8a1e5ac6d8ac9f63eccd448a578c6a | 6437a3a4a31ab9ad233d6b2d985beb50ed50de23 | /PythonistaAppTemplate/PythonistaKit.framework/pylib_ext/sympy/printing/precedence.py | 80f40e0636cbeabdc7f063514567ed517e9dc830 | [] | no_license | sreyemnayr/jss-lost-mode-app | 03ddc472decde3c17a11294d8ee48b02f83b71e7 | 3ff4ba6fb13f4f3a4a98bfc824eace137f6aabaa | refs/heads/master | 2021-05-02T08:50:10.580091 | 2018-02-08T20:32:29 | 2018-02-08T20:32:29 | 120,813,623 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 11,069 | py | #\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
"""A module providing information about the necessity of brackets"""
from __future__ import print_function, division
from sympy.core.function import _coeff_isneg
# Default precedence values for some basic types
PRECEDENCE = {
"Lambda": 1,
"Xor": 10,
"Or": 20,
"And": 30,
"Relational": 35,
"Add": 40,
"Mul": 50,
"Pow": 60,
"Not": 100,
"Atom": 1000
}
# A dictionary assigning precedence values to certain classes. These values are
# treated like they were inherited, so not every single class has to be named
# here.
PRECEDENCE_VALUES = {
"Equivalent": PRECEDENCE["Xor"],
"Xor": PRECEDENCE["Xor"],
"Or": PRECEDENCE["Or"],
"And": PRECEDENCE["And"],
"Add": PRECEDENCE["Add"],
"Pow": PRECEDENCE["Pow"],
"Relational": PRECEDENCE["Relational"],
"Sub": PRECEDENCE["Add"],
"Not": PRECEDENCE["Not"],
"factorial": PRECEDENCE["Pow"],
"factorial2": PRECEDENCE["Pow"],
"NegativeInfinity": PRECEDENCE["Add"],
"MatAdd": PRECEDENCE["Add"],
"MatMul": PRECEDENCE["Mul"],
"HadamardProduct": PRECEDENCE["Mul"]
}
# Sometimes it's not enough to assign a fixed precedence value to a
# class. Then a function can be inserted in this dictionary that takes
# an instance of this class as argument and returns the appropriate
# precedence value.
# Precedence functions
def precedence_Mul(item):
if _coeff_isneg(item):
return PRECEDENCE["Add"]
return PRECEDENCE["Mul"]
def precedence_Rational(item):
if item.p < 0:
return PRECEDENCE["Add"]
return PRECEDENCE["Mul"]
def precedence_Integer(item):
if item.p < 0:
return PRECEDENCE["Add"]
return PRECEDENCE["Atom"]
def precedence_Float(item):
if item < 0:
return PRECEDENCE["Add"]
return PRECEDENCE["Atom"]
def precedence_PolyElement(item):
if item.is_generator:
return PRECEDENCE["Atom"]
elif item.is_ground:
return precedence(item.coeff(1))
elif item.is_term:
return PRECEDENCE["Mul"]
else:
return PRECEDENCE["Add"]
def precedence_FracElement(item):
if item.denom == 1:
return precedence_PolyElement(item.numer)
else:
return PRECEDENCE["Mul"]
PRECEDENCE_FUNCTIONS = {
"Integer": precedence_Integer,
"Mul": precedence_Mul,
"Rational": precedence_Rational,
"Float": precedence_Float,
"PolyElement": precedence_PolyElement,
"FracElement": precedence_FracElement,
}
def precedence(item):
"""
Returns the precedence of a given object.
"""
if hasattr(item, "precedence"):
return item.precedence
try:
mro = item.__class__.__mro__
except AttributeError:
return PRECEDENCE["Atom"]
for i in mro:
n = i.__name__
if n in PRECEDENCE_FUNCTIONS:
return PRECEDENCE_FUNCTIONS[n](item)
elif n in PRECEDENCE_VALUES:
return PRECEDENCE_VALUES[n]
return PRECEDENCE["Atom"]
| [
"ryanmeyersweb@gmail.com"
] | ryanmeyersweb@gmail.com |
34106f33d7f4aec21edf9b7e288d4621593a29cb | 2f0aa66e14c6595289f6a0de2bdf71e9922052a7 | /nextApi/invitation/urls.py | b71dc47d9815d9986fa4dd3305973670cfc11d12 | [] | no_license | aimethierry/NextApi | 8f83a2b0f499fdf5118eb930baa051584cfd9aa5 | 90884ee6d900ce71116b40276dda0e97bec0b521 | refs/heads/master | 2022-12-11T09:03:54.981284 | 2020-09-19T12:40:36 | 2020-09-19T12:40:36 | 296,866,571 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 221 | py | from django.urls import path
from rest_framework.urlpatterns import format_suffix_patterns
from . import views
from .views import InvitationView
urlpatterns = [
path(r'createInvitation/', InvitationView.as_view()),
] | [
"aime.thierry97@gmail.com"
] | aime.thierry97@gmail.com |
54e88f8afdc0c2207e7b3a33889e5d54e6ef2ea2 | fcc88521f63a3c22c81a9242ae3b203f2ea888fd | /Python3/0816-Ambiguous-Coordinates/soln.py | 6af316ab69487b3c7557024a2af3c249b1bc088c | [
"MIT"
] | permissive | wyaadarsh/LeetCode-Solutions | b5963e3427aa547d485d3a2cb24e6cedc72804fd | 3719f5cb059eefd66b83eb8ae990652f4b7fd124 | refs/heads/master | 2022-12-06T15:50:37.930987 | 2020-08-30T15:49:27 | 2020-08-30T15:49:27 | 291,811,790 | 0 | 1 | MIT | 2020-08-31T19:57:35 | 2020-08-31T19:57:34 | null | UTF-8 | Python | false | false | 1,079 | py | class Solution:
def ambiguousCoordinates(self, S):
"""
:type S: str
:rtype: List[str]
"""
def valid(x):
if '.' not in x:
return str(int(x)) == x
else:
idx = x.find('.')
int_part, frac_part = x[:idx], x[idx + 1:]
if len(int_part) > 1 and int_part[0] == '0':
return False
if len(frac_part) > 0 and frac_part[-1] == '0':
return False
return True
S = S[1:-1]
n = len(S)
ans = []
for i in range(1, n):
left, right = S[:i], S[i:]
for x in [left] + ['{}.{}'.format(left[:k], left[k:]) for k in range(1, len(left))]:
if not valid(x):
continue
for y in [right] + ['{}.{}'.format(right[:j], right[j:]) for j in range(1, len(right))]:
if valid(y):
ans.append('({}, {})'.format(x, y))
# print(valid("1.23"))
return ans
| [
"zhang623@wisc.edu"
] | zhang623@wisc.edu |
039a0870e48a245cf17235ef7fc5554fe7700500 | 6671be3a542925342379d5f6fc691acfebbe281f | /discounts/src/consumer/sqs.py | 4585bcd66519608533b7fb52fb144fef9dd70dc4 | [
"Apache-2.0"
] | permissive | dalmarcogd/mobstore | e79b479b39474873043345b70f7e972f304c1586 | 0b542b9267771a1f4522990d592028dc30ee246f | refs/heads/main | 2023-04-29T22:27:20.344929 | 2021-05-18T12:00:00 | 2021-05-18T12:00:00 | 365,539,054 | 0 | 0 | Apache-2.0 | 2021-05-17T23:22:58 | 2021-05-08T14:46:34 | Go | UTF-8 | Python | false | false | 1,391 | py | import json
import logging
from typing import Callable, Dict, List
import boto3
from src import settings
_sqs = boto3.client('sqs', region_name=settings.AWS_REGION,
aws_access_key_id=settings.AWS_ACCESS_KEY,
aws_secret_access_key=settings.AWS_SECRET_KEY,
endpoint_url=settings.AWS_ENDPOINT)
def start_pool(queue: str, handler: Callable):
while True:
try:
response = _sqs.receive_message(
QueueUrl=queue,
MaxNumberOfMessages=1,
MessageAttributeNames=[
'All'
],
WaitTimeSeconds=2
)
if 'Messages' in response:
try:
messages: List[Dict] = response['Messages']
for message in messages:
receipt_handle: str = message.get('ReceiptHandle')
body_str: str = message.get('Body')
body: Dict = json.loads(body_str)
handler(body)
_sqs.delete_message(QueueUrl=queue, ReceiptHandle=receipt_handle)
except Exception as e:
logging.error(f'[sqs] error no message in queue -> {e}')
except Exception as exc:
logging.error(f'[sqs] error no message in queue -> {exc}')
| [
"dalmarco.gd@gmail.com"
] | dalmarco.gd@gmail.com |
c905595be4974b75bdd595264c0e4286ffc165a2 | 2a3743ced45bd79826dcdc55f304da049f627f1b | /venv/lib/python3.7/site-packages/jedi/third_party/typeshed/third_party/3.5/contextvars.pyi | ab2ae9e5fabf3a9ae486f509156f09fc5fa1e70a | [
"MIT",
"Apache-2.0"
] | permissive | Dimasik007/Deribit_funding_rate_indicator | 12cc8cd7c0be564d6e34d9eae91940c62492ae2a | 3251602ae5249069489834f9afb57b11ff37750e | refs/heads/master | 2023-05-26T10:14:20.395939 | 2019-08-03T11:35:51 | 2019-08-03T11:35:51 | 198,705,946 | 5 | 3 | MIT | 2023-05-22T22:29:24 | 2019-07-24T20:32:19 | Python | UTF-8 | Python | false | false | 1,130 | pyi | from typing import Any, Callable, ClassVar, Generic, Iterator, Mapping, TypeVar, Union
_T = TypeVar('_T')
class ContextVar(Generic[_T]):
def __init__(self, name: str, *, default: _T = ...) -> None: ...
@property
def name(self) -> str: ...
def get(self, default: _T = ...) -> _T: ...
def set(self, value: _T) -> Token[_T]: ...
def reset(self, token: Token[_T]) -> None: ...
class Token(Generic[_T]):
@property
def var(self) -> ContextVar[_T]: ...
@property
def old_value(self) -> Any: ... # returns either _T or MISSING, but that's hard to express
MISSING: ClassVar[object]
def copy_context() -> Context: ...
# It doesn't make sense to make this generic, because for most Contexts each ContextVar will have
# a different value.
class Context(Mapping[ContextVar[Any], Any]):
def __init__(self) -> None: ...
def run(self, callable: Callable[..., _T], *args: Any, **kwargs: Any) -> _T: ...
def copy(self) -> Context: ...
def __getitem__(self, key: ContextVar[Any]) -> Any: ...
def __iter__(self) -> Iterator[ContextVar[Any]]: ...
def __len__(self) -> int: ...
| [
"dmitriy00vn@gmail.com"
] | dmitriy00vn@gmail.com |
c40e2fcb4e20ddf289e9e4beb5613e83b69cced8 | c6b9b9f2fbc6c62e7a86b02718954661af3c564f | /configs/flownet2/flownet2cs_8x1_sfine_flyingthings3d_subset_384x768.py | ca4db07d952781b13f83d38c9d6347781425c2bc | [
"Apache-2.0"
] | permissive | open-mmlab/mmflow | a90ff072805ac79cbc0b277baded1e74d25cccf0 | 9fb1d2f1bb3de641ddcba0dd355064b6ed9419f4 | refs/heads/master | 2023-05-22T05:19:48.986601 | 2023-01-10T16:05:18 | 2023-01-10T16:05:18 | 428,493,460 | 808 | 110 | Apache-2.0 | 2023-09-05T13:19:38 | 2021-11-16T02:42:41 | Python | UTF-8 | Python | false | false | 374 | py | _base_ = [
'../_base_/models/flownet2/flownet2cs.py',
'../_base_/datasets/flyingthings3d_subset_384x768.py',
'../_base_/schedules/schedule_s_fine.py', '../_base_/default_runtime.py'
]
# Train on FlyingChairs and finetune on FlyingThings3D subset
load_from = 'https://download.openmmlab.com/mmflow/flownet2/flownet2cs_8x1_slong_flyingchairs_384x448.pth' # noqa
| [
"meowzheng@outlook.com"
] | meowzheng@outlook.com |
4ab5b11e8bc7b4e9791245ba6efa0070a7fe2960 | a28709c421e7f5db9af18476216abe7f41ed64cf | /frameworks/pytorch/examples/3_multi_outputs.py | 9906ee9f5a410c22127a65cb4c7695e681695832 | [
"LicenseRef-scancode-generic-cla",
"MIT"
] | permissive | microsoft/antares | 8269f93418306fdea14f89032bc861fd7cdf6b24 | 86317b035043daaae4f8bd8bb1bb3b8d1b9f648d | refs/heads/v0.3.x | 2023-08-19T11:18:47.174186 | 2023-08-09T09:02:29 | 2023-08-09T09:02:29 | 274,578,755 | 262 | 37 | NOASSERTION | 2023-09-06T22:19:51 | 2020-06-24T04:58:46 | C++ | UTF-8 | Python | false | false | 906 | py | #!/usr/bin/env python3
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT license.
import torch
from antares_core.frameworks.pytorch.custom_op import CustomOp
device = torch.device("cuda") if torch.cuda.is_available() else torch.device("cpu")
dtype = torch.float32
kwargs = {'dtype': dtype,
'device': device,
'requires_grad': False}
input0 = torch.ones(1024 * 512, **kwargs)
input1 = torch.ones(1024 * 512, **kwargs)
custom_op = CustomOp(ir='output0[N] = input0[N] + input1[N]; output1[N] = input0[N].call(`exp`); output2[N] = input1[N] + output1[N];', extra_outputs=['output0', 'output1', 'output2'], input_orders={'input0': input0, 'input1': input1}, device=device).tune(step=100, use_cache=True, timeout=600).emit()
result = custom_op(input0, input1)
print('The result of tensor `%s, %s` is:\n%s' % (custom_op.output_names[0], custom_op.output_names[1], result))
| [
"noreply@github.com"
] | microsoft.noreply@github.com |
1e86280800dcaf3dbbd555ebced79e54636ff5b1 | 0139bdde50d922893e718221a69e1ca4cb89757d | /wssh/examples/cryptography-1.5/tests/hazmat/backends/test_openssl.py | 4ec8d84e448b6d1c1dc5b07e82e87821d8a16584 | [
"Apache-2.0",
"BSD-3-Clause",
"BSD-2-Clause",
"MIT"
] | permissive | nuaays/Miscellaneous_Scripts | 79adc5d4a639f1c95d5206447593f89a813d2e06 | 803a3b30e8848bbcbce58eb12f9b25a12060a437 | refs/heads/master | 2021-01-10T05:46:24.227613 | 2017-08-04T02:30:18 | 2017-08-04T02:30:18 | 48,648,489 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 27,735 | py | # This file is dual licensed under the terms of the Apache License, Version
# 2.0, and the BSD License. See the LICENSE file in the root of this repository
# for complete details.
from __future__ import absolute_import, division, print_function
import datetime
import itertools
import os
import subprocess
import sys
import textwrap
import pytest
from cryptography import utils, x509
from cryptography.exceptions import InternalError, _Reasons
from cryptography.hazmat.backends.interfaces import RSABackend
from cryptography.hazmat.backends.openssl.backend import (
Backend, backend
)
from cryptography.hazmat.backends.openssl.ec import _sn_to_elliptic_curve
from cryptography.hazmat.primitives import hashes, serialization
from cryptography.hazmat.primitives.asymmetric import dsa, ec, padding
from cryptography.hazmat.primitives.ciphers import Cipher
from cryptography.hazmat.primitives.ciphers.algorithms import AES
from cryptography.hazmat.primitives.ciphers.modes import CBC, CTR
from ..primitives.fixtures_dsa import DSA_KEY_2048
from ..primitives.fixtures_rsa import RSA_KEY_2048, RSA_KEY_512
from ..primitives.test_ec import _skip_curve_unsupported
from ...doubles import (
DummyAsymmetricPadding, DummyCipherAlgorithm, DummyHashAlgorithm, DummyMode
)
from ...test_x509 import _load_cert
from ...utils import load_vectors_from_file, raises_unsupported_algorithm
def skip_if_libre_ssl(openssl_version):
if u'LibreSSL' in openssl_version:
pytest.skip("LibreSSL hard-codes RAND_bytes to use arc4random.")
class TestLibreSkip(object):
def test_skip_no(self):
assert skip_if_libre_ssl(u"OpenSSL 1.0.2h 3 May 2016") is None
def test_skip_yes(self):
with pytest.raises(pytest.skip.Exception):
skip_if_libre_ssl(u"LibreSSL 2.1.6")
class DummyMGF(object):
_salt_length = 0
class TestOpenSSL(object):
def test_backend_exists(self):
assert backend
def test_openssl_version_text(self):
"""
This test checks the value of OPENSSL_VERSION_TEXT.
Unfortunately, this define does not appear to have a
formal content definition, so for now we'll test to see
if it starts with OpenSSL or LibreSSL as that appears
to be true for every OpenSSL-alike.
"""
assert (
backend.openssl_version_text().startswith("OpenSSL") or
backend.openssl_version_text().startswith("LibreSSL")
)
def test_supports_cipher(self):
assert backend.cipher_supported(None, None) is False
def test_aes_ctr_always_available(self):
# AES CTR should always be available, even in 1.0.0.
assert backend.cipher_supported(AES(b"\x00" * 16),
CTR(b"\x00" * 16)) is True
def test_register_duplicate_cipher_adapter(self):
with pytest.raises(ValueError):
backend.register_cipher_adapter(AES, CBC, None)
@pytest.mark.parametrize("mode", [DummyMode(), None])
def test_nonexistent_cipher(self, mode):
b = Backend()
b.register_cipher_adapter(
DummyCipherAlgorithm,
type(mode),
lambda backend, cipher, mode: backend._ffi.NULL
)
cipher = Cipher(
DummyCipherAlgorithm(), mode, backend=b,
)
with raises_unsupported_algorithm(_Reasons.UNSUPPORTED_CIPHER):
cipher.encryptor()
def test_openssl_assert(self):
backend.openssl_assert(True)
with pytest.raises(InternalError):
backend.openssl_assert(False)
def test_consume_errors(self):
for i in range(10):
backend._lib.ERR_put_error(backend._lib.ERR_LIB_EVP, 0, 0,
b"test_openssl.py", -1)
assert backend._lib.ERR_peek_error() != 0
errors = backend._consume_errors()
assert backend._lib.ERR_peek_error() == 0
assert len(errors) == 10
def test_ssl_ciphers_registered(self):
meth = backend._lib.TLSv1_method()
ctx = backend._lib.SSL_CTX_new(meth)
assert ctx != backend._ffi.NULL
backend._lib.SSL_CTX_free(ctx)
def test_evp_ciphers_registered(self):
cipher = backend._lib.EVP_get_cipherbyname(b"aes-256-cbc")
assert cipher != backend._ffi.NULL
def test_error_strings_loaded(self):
# returns a value in a static buffer
err = backend._lib.ERR_error_string(101183626, backend._ffi.NULL)
assert backend._ffi.string(err) == (
b"error:0607F08A:digital envelope routines:EVP_EncryptFinal_ex:"
b"data not multiple of block length"
)
def test_unknown_error_in_cipher_finalize(self):
cipher = Cipher(AES(b"\0" * 16), CBC(b"\0" * 16), backend=backend)
enc = cipher.encryptor()
enc.update(b"\0")
backend._lib.ERR_put_error(0, 0, 1,
b"test_openssl.py", -1)
with pytest.raises(InternalError):
enc.finalize()
def test_large_key_size_on_new_openssl(self):
parameters = dsa.generate_parameters(2048, backend)
param_num = parameters.parameter_numbers()
assert utils.bit_length(param_num.p) == 2048
parameters = dsa.generate_parameters(3072, backend)
param_num = parameters.parameter_numbers()
assert utils.bit_length(param_num.p) == 3072
def test_int_to_bn(self):
value = (2 ** 4242) - 4242
bn = backend._int_to_bn(value)
assert bn != backend._ffi.NULL
bn = backend._ffi.gc(bn, backend._lib.BN_free)
assert bn
assert backend._bn_to_int(bn) == value
def test_int_to_bn_inplace(self):
value = (2 ** 4242) - 4242
bn_ptr = backend._lib.BN_new()
assert bn_ptr != backend._ffi.NULL
bn_ptr = backend._ffi.gc(bn_ptr, backend._lib.BN_free)
bn = backend._int_to_bn(value, bn_ptr)
assert bn == bn_ptr
assert backend._bn_to_int(bn_ptr) == value
def test_bn_to_int(self):
bn = backend._int_to_bn(0)
assert backend._bn_to_int(bn) == 0
def test_actual_osrandom_bytes(self, monkeypatch):
skip_if_libre_ssl(backend.openssl_version_text())
sample_data = (b"\x01\x02\x03\x04" * 4)
length = len(sample_data)
def notrandom(size):
assert size == length
return sample_data
monkeypatch.setattr(os, "urandom", notrandom)
buf = backend._ffi.new("char[]", length)
backend._lib.RAND_bytes(buf, length)
assert backend._ffi.buffer(buf)[0:length] == sample_data
class TestOpenSSLRandomEngine(object):
def setup(self):
# The default RAND engine is global and shared between
# tests. We make sure that the default engine is osrandom
# before we start each test and restore the global state to
# that engine in teardown.
current_default = backend._lib.ENGINE_get_default_RAND()
name = backend._lib.ENGINE_get_name(current_default)
assert name == backend._binding._osrandom_engine_name
def teardown(self):
# we need to reset state to being default. backend is a shared global
# for all these tests.
backend.activate_osrandom_engine()
current_default = backend._lib.ENGINE_get_default_RAND()
name = backend._lib.ENGINE_get_name(current_default)
assert name == backend._binding._osrandom_engine_name
@pytest.mark.skipif(sys.executable is None,
reason="No Python interpreter available.")
def test_osrandom_engine_is_default(self, tmpdir):
engine_printer = textwrap.dedent(
"""
import sys
from cryptography.hazmat.backends.openssl.backend import backend
e = backend._lib.ENGINE_get_default_RAND()
name = backend._lib.ENGINE_get_name(e)
sys.stdout.write(backend._ffi.string(name).decode('ascii'))
res = backend._lib.ENGINE_free(e)
assert res == 1
"""
)
engine_name = tmpdir.join('engine_name')
# If we're running tests via ``python setup.py test`` in a clean
# environment then all of our dependencies are going to be installed
# into either the current directory or the .eggs directory. However the
# subprocess won't know to activate these dependencies, so we'll get it
# to do so by passing our entire sys.path into the subprocess via the
# PYTHONPATH environment variable.
env = os.environ.copy()
env["PYTHONPATH"] = os.pathsep.join(sys.path)
with engine_name.open('w') as out:
subprocess.check_call(
[sys.executable, "-c", engine_printer],
env=env,
stdout=out,
stderr=subprocess.PIPE,
)
osrandom_engine_name = backend._ffi.string(
backend._binding._osrandom_engine_name
)
assert engine_name.read().encode('ascii') == osrandom_engine_name
def test_osrandom_sanity_check(self):
# This test serves as a check against catastrophic failure.
buf = backend._ffi.new("char[]", 500)
res = backend._lib.RAND_bytes(buf, 500)
assert res == 1
assert backend._ffi.buffer(buf)[:] != "\x00" * 500
def test_activate_osrandom_no_default(self):
backend.activate_builtin_random()
e = backend._lib.ENGINE_get_default_RAND()
assert e == backend._ffi.NULL
backend.activate_osrandom_engine()
e = backend._lib.ENGINE_get_default_RAND()
name = backend._lib.ENGINE_get_name(e)
assert name == backend._binding._osrandom_engine_name
res = backend._lib.ENGINE_free(e)
assert res == 1
def test_activate_builtin_random(self):
e = backend._lib.ENGINE_get_default_RAND()
assert e != backend._ffi.NULL
name = backend._lib.ENGINE_get_name(e)
assert name == backend._binding._osrandom_engine_name
res = backend._lib.ENGINE_free(e)
assert res == 1
backend.activate_builtin_random()
e = backend._lib.ENGINE_get_default_RAND()
assert e == backend._ffi.NULL
def test_activate_builtin_random_already_active(self):
backend.activate_builtin_random()
e = backend._lib.ENGINE_get_default_RAND()
assert e == backend._ffi.NULL
backend.activate_builtin_random()
e = backend._lib.ENGINE_get_default_RAND()
assert e == backend._ffi.NULL
def test_activate_osrandom_already_default(self):
e = backend._lib.ENGINE_get_default_RAND()
name = backend._lib.ENGINE_get_name(e)
assert name == backend._binding._osrandom_engine_name
res = backend._lib.ENGINE_free(e)
assert res == 1
backend.activate_osrandom_engine()
e = backend._lib.ENGINE_get_default_RAND()
name = backend._lib.ENGINE_get_name(e)
assert name == backend._binding._osrandom_engine_name
res = backend._lib.ENGINE_free(e)
assert res == 1
class TestOpenSSLRSA(object):
def test_generate_rsa_parameters_supported(self):
assert backend.generate_rsa_parameters_supported(1, 1024) is False
assert backend.generate_rsa_parameters_supported(4, 1024) is False
assert backend.generate_rsa_parameters_supported(3, 1024) is True
assert backend.generate_rsa_parameters_supported(3, 511) is False
def test_generate_bad_public_exponent(self):
with pytest.raises(ValueError):
backend.generate_rsa_private_key(public_exponent=1, key_size=2048)
with pytest.raises(ValueError):
backend.generate_rsa_private_key(public_exponent=4, key_size=2048)
def test_cant_generate_insecure_tiny_key(self):
with pytest.raises(ValueError):
backend.generate_rsa_private_key(public_exponent=65537,
key_size=511)
with pytest.raises(ValueError):
backend.generate_rsa_private_key(public_exponent=65537,
key_size=256)
@pytest.mark.skipif(
backend._lib.CRYPTOGRAPHY_OPENSSL_101_OR_GREATER,
reason="Requires an older OpenSSL. Must be < 1.0.1"
)
def test_non_sha1_pss_mgf1_hash_algorithm_on_old_openssl(self):
private_key = RSA_KEY_512.private_key(backend)
with raises_unsupported_algorithm(_Reasons.UNSUPPORTED_HASH):
private_key.signer(
padding.PSS(
mgf=padding.MGF1(
algorithm=hashes.SHA256(),
),
salt_length=padding.PSS.MAX_LENGTH
),
hashes.SHA1()
)
public_key = private_key.public_key()
with raises_unsupported_algorithm(_Reasons.UNSUPPORTED_HASH):
public_key.verifier(
b"sig",
padding.PSS(
mgf=padding.MGF1(
algorithm=hashes.SHA256(),
),
salt_length=padding.PSS.MAX_LENGTH
),
hashes.SHA1()
)
def test_rsa_padding_unsupported_pss_mgf1_hash(self):
assert backend.rsa_padding_supported(
padding.PSS(mgf=padding.MGF1(DummyHashAlgorithm()), salt_length=0)
) is False
def test_rsa_padding_unsupported(self):
assert backend.rsa_padding_supported(DummyAsymmetricPadding()) is False
def test_rsa_padding_supported_pkcs1v15(self):
assert backend.rsa_padding_supported(padding.PKCS1v15()) is True
def test_rsa_padding_supported_pss(self):
assert backend.rsa_padding_supported(
padding.PSS(mgf=padding.MGF1(hashes.SHA1()), salt_length=0)
) is True
def test_rsa_padding_supported_oaep(self):
assert backend.rsa_padding_supported(
padding.OAEP(
mgf=padding.MGF1(algorithm=hashes.SHA1()),
algorithm=hashes.SHA1(),
label=None
),
) is True
@pytest.mark.skipif(
backend._lib.Cryptography_HAS_RSA_OAEP_MD == 0,
reason="Requires OpenSSL with rsa_oaep_md (1.0.2+)"
)
def test_rsa_padding_supported_oaep_sha2_combinations(self):
hashalgs = [
hashes.SHA1(),
hashes.SHA224(),
hashes.SHA256(),
hashes.SHA384(),
hashes.SHA512(),
]
for mgf1alg, oaepalg in itertools.product(hashalgs, hashalgs):
assert backend.rsa_padding_supported(
padding.OAEP(
mgf=padding.MGF1(algorithm=mgf1alg),
algorithm=oaepalg,
label=None
),
) is True
def test_rsa_padding_unsupported_oaep_ripemd160_sha1(self):
assert backend.rsa_padding_supported(
padding.OAEP(
mgf=padding.MGF1(algorithm=hashes.RIPEMD160()),
algorithm=hashes.SHA1(),
label=None
),
) is False
def test_rsa_padding_unsupported_oaep_sha1_ripemd160(self):
assert backend.rsa_padding_supported(
padding.OAEP(
mgf=padding.MGF1(algorithm=hashes.SHA1()),
algorithm=hashes.RIPEMD160(),
label=None
),
) is False
def test_rsa_padding_unsupported_mgf(self):
assert backend.rsa_padding_supported(
padding.OAEP(
mgf=DummyMGF(),
algorithm=hashes.SHA1(),
label=None
),
) is False
assert backend.rsa_padding_supported(
padding.PSS(mgf=DummyMGF(), salt_length=0)
) is False
@pytest.mark.skipif(
backend._lib.Cryptography_HAS_RSA_OAEP_MD == 1,
reason="Requires OpenSSL without rsa_oaep_md (< 1.0.2)"
)
def test_unsupported_mgf1_hash_algorithm_decrypt(self):
private_key = RSA_KEY_512.private_key(backend)
with raises_unsupported_algorithm(_Reasons.UNSUPPORTED_PADDING):
private_key.decrypt(
b"0" * 64,
padding.OAEP(
mgf=padding.MGF1(algorithm=hashes.SHA256()),
algorithm=hashes.SHA1(),
label=None
)
)
@pytest.mark.skipif(
backend._lib.Cryptography_HAS_RSA_OAEP_MD == 1,
reason="Requires OpenSSL without rsa_oaep_md (< 1.0.2)"
)
def test_unsupported_oaep_hash_algorithm_decrypt(self):
private_key = RSA_KEY_512.private_key(backend)
with raises_unsupported_algorithm(_Reasons.UNSUPPORTED_PADDING):
private_key.decrypt(
b"0" * 64,
padding.OAEP(
mgf=padding.MGF1(algorithm=hashes.SHA1()),
algorithm=hashes.SHA256(),
label=None
)
)
def test_unsupported_mgf1_hash_algorithm_ripemd160_decrypt(self):
private_key = RSA_KEY_512.private_key(backend)
with raises_unsupported_algorithm(_Reasons.UNSUPPORTED_PADDING):
private_key.decrypt(
b"0" * 64,
padding.OAEP(
mgf=padding.MGF1(algorithm=hashes.RIPEMD160()),
algorithm=hashes.RIPEMD160(),
label=None
)
)
def test_unsupported_mgf1_hash_algorithm_whirlpool_decrypt(self):
private_key = RSA_KEY_512.private_key(backend)
with raises_unsupported_algorithm(_Reasons.UNSUPPORTED_PADDING):
private_key.decrypt(
b"0" * 64,
padding.OAEP(
mgf=padding.MGF1(algorithm=hashes.Whirlpool()),
algorithm=hashes.Whirlpool(),
label=None
)
)
def test_unsupported_oaep_label_decrypt(self):
private_key = RSA_KEY_512.private_key(backend)
with pytest.raises(ValueError):
private_key.decrypt(
b"0" * 64,
padding.OAEP(
mgf=padding.MGF1(algorithm=hashes.SHA1()),
algorithm=hashes.SHA1(),
label=b"label"
)
)
@pytest.mark.skipif(
backend._lib.CRYPTOGRAPHY_OPENSSL_LESS_THAN_101,
reason="Requires an OpenSSL version >= 1.0.1"
)
class TestOpenSSLCMAC(object):
def test_unsupported_cipher(self):
with raises_unsupported_algorithm(_Reasons.UNSUPPORTED_CIPHER):
backend.create_cmac_ctx(DummyCipherAlgorithm())
class TestOpenSSLCreateX509CSR(object):
@pytest.mark.skipif(
backend._lib.CRYPTOGRAPHY_OPENSSL_101_OR_GREATER,
reason="Requires an older OpenSSL. Must be < 1.0.1"
)
def test_unsupported_dsa_keys(self):
private_key = DSA_KEY_2048.private_key(backend)
with pytest.raises(NotImplementedError):
backend.create_x509_csr(object(), private_key, hashes.SHA1())
@pytest.mark.skipif(
backend._lib.CRYPTOGRAPHY_OPENSSL_101_OR_GREATER,
reason="Requires an older OpenSSL. Must be < 1.0.1"
)
def test_unsupported_ec_keys(self):
_skip_curve_unsupported(backend, ec.SECP256R1())
private_key = ec.generate_private_key(ec.SECP256R1(), backend)
with pytest.raises(NotImplementedError):
backend.create_x509_csr(object(), private_key, hashes.SHA1())
class TestOpenSSLSignX509Certificate(object):
def test_requires_certificate_builder(self):
private_key = RSA_KEY_2048.private_key(backend)
with pytest.raises(TypeError):
backend.create_x509_certificate(
object(), private_key, DummyHashAlgorithm()
)
@pytest.mark.skipif(
backend._lib.CRYPTOGRAPHY_OPENSSL_101_OR_GREATER,
reason="Requires an older OpenSSL. Must be < 1.0.1"
)
def test_sign_with_dsa_private_key_is_unsupported(self):
private_key = DSA_KEY_2048.private_key(backend)
builder = x509.CertificateBuilder()
builder = builder.subject_name(
x509.Name([x509.NameAttribute(x509.NameOID.COUNTRY_NAME, u'US')])
).issuer_name(
x509.Name([x509.NameAttribute(x509.NameOID.COUNTRY_NAME, u'US')])
).serial_number(
1
).public_key(
private_key.public_key()
).not_valid_before(
datetime.datetime(2002, 1, 1, 12, 1)
).not_valid_after(
datetime.datetime(2032, 1, 1, 12, 1)
)
with pytest.raises(NotImplementedError):
builder.sign(private_key, hashes.SHA512(), backend)
@pytest.mark.skipif(
backend._lib.CRYPTOGRAPHY_OPENSSL_101_OR_GREATER,
reason="Requires an older OpenSSL. Must be < 1.0.1"
)
def test_sign_with_ec_private_key_is_unsupported(self):
_skip_curve_unsupported(backend, ec.SECP256R1())
private_key = ec.generate_private_key(ec.SECP256R1(), backend)
builder = x509.CertificateBuilder()
builder = builder.subject_name(
x509.Name([x509.NameAttribute(x509.NameOID.COUNTRY_NAME, u'US')])
).issuer_name(
x509.Name([x509.NameAttribute(x509.NameOID.COUNTRY_NAME, u'US')])
).serial_number(
1
).public_key(
private_key.public_key()
).not_valid_before(
datetime.datetime(2002, 1, 1, 12, 1)
).not_valid_after(
datetime.datetime(2032, 1, 1, 12, 1)
)
with pytest.raises(NotImplementedError):
builder.sign(private_key, hashes.SHA512(), backend)
class TestOpenSSLSignX509CertificateRevocationList(object):
def test_invalid_builder(self):
private_key = RSA_KEY_2048.private_key(backend)
with pytest.raises(TypeError):
backend.create_x509_crl(object(), private_key, hashes.SHA256())
@pytest.mark.skipif(
backend._lib.CRYPTOGRAPHY_OPENSSL_101_OR_GREATER,
reason="Requires an older OpenSSL. Must be < 1.0.1"
)
def test_sign_with_dsa_private_key_is_unsupported(self):
private_key = DSA_KEY_2048.private_key(backend)
builder = x509.CertificateRevocationListBuilder()
builder = builder.issuer_name(
x509.Name([x509.NameAttribute(x509.NameOID.COUNTRY_NAME, u'US')])
).last_update(
datetime.datetime(2002, 1, 1, 12, 1)
).next_update(
datetime.datetime(2032, 1, 1, 12, 1)
)
with pytest.raises(NotImplementedError):
builder.sign(private_key, hashes.SHA1(), backend)
@pytest.mark.skipif(
backend._lib.CRYPTOGRAPHY_OPENSSL_101_OR_GREATER,
reason="Requires an older OpenSSL. Must be < 1.0.1"
)
def test_sign_with_ec_private_key_is_unsupported(self):
_skip_curve_unsupported(backend, ec.SECP256R1())
private_key = ec.generate_private_key(ec.SECP256R1(), backend)
builder = x509.CertificateRevocationListBuilder()
builder = builder.issuer_name(
x509.Name([x509.NameAttribute(x509.NameOID.COUNTRY_NAME, u'US')])
).last_update(
datetime.datetime(2002, 1, 1, 12, 1)
).next_update(
datetime.datetime(2032, 1, 1, 12, 1)
)
with pytest.raises(NotImplementedError):
builder.sign(private_key, hashes.SHA512(), backend)
class TestOpenSSLCreateRevokedCertificate(object):
def test_invalid_builder(self):
with pytest.raises(TypeError):
backend.create_x509_revoked_certificate(object())
class TestOpenSSLSerializationWithOpenSSL(object):
def test_pem_password_cb_buffer_too_small(self):
ffi_cb, userdata = backend._pem_password_cb(b"aa")
handle = backend._ffi.new_handle(userdata)
buf = backend._ffi.new('char *')
assert ffi_cb(buf, 1, False, handle) == 0
assert userdata.called == 1
assert isinstance(userdata.exception, ValueError)
def test_pem_password_cb(self):
password = b'abcdefg'
buf_size = len(password) + 1
ffi_cb, userdata = backend._pem_password_cb(password)
handle = backend._ffi.new_handle(userdata)
buf = backend._ffi.new('char[]', buf_size)
assert ffi_cb(buf, buf_size, False, handle) == len(password)
assert userdata.called == 1
assert backend._ffi.string(buf, len(password)) == password
def test_unsupported_evp_pkey_type(self):
key = backend._create_evp_pkey_gc()
with raises_unsupported_algorithm(None):
backend._evp_pkey_to_private_key(key)
with raises_unsupported_algorithm(None):
backend._evp_pkey_to_public_key(key)
def test_very_long_pem_serialization_password(self):
password = "x" * 1024
with pytest.raises(ValueError):
load_vectors_from_file(
os.path.join(
"asymmetric", "Traditional_OpenSSL_Serialization",
"key1.pem"
),
lambda pemfile: (
backend.load_pem_private_key(
pemfile.read().encode(), password
)
)
)
class DummyLibrary(object):
Cryptography_HAS_EC = 0
class TestOpenSSLEllipticCurve(object):
def test_elliptic_curve_supported(self, monkeypatch):
monkeypatch.setattr(backend, "_lib", DummyLibrary())
assert backend.elliptic_curve_supported(None) is False
def test_elliptic_curve_signature_algorithm_supported(self, monkeypatch):
monkeypatch.setattr(backend, "_lib", DummyLibrary())
assert backend.elliptic_curve_signature_algorithm_supported(
None, None
) is False
def test_sn_to_elliptic_curve_not_supported(self):
with raises_unsupported_algorithm(_Reasons.UNSUPPORTED_ELLIPTIC_CURVE):
_sn_to_elliptic_curve(backend, b"fake")
def test_elliptic_curve_exchange_algorithm_supported(self, monkeypatch):
monkeypatch.setattr(backend, "_lib", DummyLibrary())
assert not backend.elliptic_curve_exchange_algorithm_supported(
ec.ECDH(), ec.SECP256R1()
)
@pytest.mark.requires_backend_interface(interface=RSABackend)
class TestRSAPEMSerialization(object):
def test_password_length_limit(self):
password = b"x" * 1024
key = RSA_KEY_2048.private_key(backend)
with pytest.raises(ValueError):
key.private_bytes(
serialization.Encoding.PEM,
serialization.PrivateFormat.PKCS8,
serialization.BestAvailableEncryption(password)
)
class TestGOSTCertificate(object):
def test_numeric_string_x509_name_entry(self):
cert = _load_cert(
os.path.join("x509", "e-trust.ru.der"),
x509.load_der_x509_certificate,
backend
)
if (
not backend._lib.CRYPTOGRAPHY_OPENSSL_110_OR_GREATER or
backend._lib.CRYPTOGRAPHY_IS_LIBRESSL
):
with pytest.raises(ValueError) as exc:
cert.subject
# We assert on the message in this case because if the certificate
# fails to load it will also raise a ValueError and this test could
# erroneously pass.
assert str(exc.value) == "Unsupported ASN1 string type. Type: 18"
else:
assert cert.subject.get_attributes_for_oid(
x509.ObjectIdentifier("1.2.643.3.131.1.1")
)[0].value == "007710474375"
| [
"nuaays@gmail.com"
] | nuaays@gmail.com |
67927289ffd53208d56009d2b3654fc46cf8c258 | ac0a4336abfa8f36079203b2ba2e104a59f3ed8b | /Multithreading/thread1.py | 584a040d5525dfe8380e865d962c501eab646baa | [] | no_license | Michal-lis/python_playground | ea422df3c992c01bfe6df5621768df386583eed9 | ec24b7456a0ee872acbcbfa54daa6634dfcfb7be | refs/heads/master | 2022-11-05T20:50:14.809449 | 2019-01-11T13:25:09 | 2019-01-11T13:25:09 | 87,660,323 | 0 | 1 | null | 2022-10-17T11:27:43 | 2017-04-08T19:53:26 | Tcl | UTF-8 | Python | false | false | 827 | py | import threading
import time
ki = range(300)
def calculate_5(li):
pow5 = []
for e in li:
for e in li:
for e in li:
pow5.append(pow(e, 5))
return pow5
def calculate_4(li):
pow4 = []
for e in li:
for e in li:
for e in li:
pow4.append(pow(e, 4))
return pow4
thread1 = threading.Thread(target=calculate_5, args=(ki,))
thread2 = threading.Thread(target=calculate_4, args=(ki,))
tt_init_5 = time.time()
thread1.start()
thread2.start()
thread1.join()
thread2.join()
tt_end_5 = time.time()
tt5 = tt_end_5 - tt_init_5
t_init_5 = time.time()
a5 = calculate_5(ki)
t_end_5 = time.time()
t5 = t_end_5 - t_init_5
t_init_4 = time.time()
a4 = calculate_4(ki)
t_end_4 = time.time()
t4 = t_end_4 - t_init_4
print(t4)
print(t5)
print(tt5)
| [
"michallis95@vp.pl"
] | michallis95@vp.pl |
d4a90e6d064d0f890b8d6bd5f03b1b914263bf27 | e23a4f57ce5474d468258e5e63b9e23fb6011188 | /125_algorithms/_exercises/templates/_algorithms_challenges/edabit/_Edabit-Solutions-master/Limit a Number's Value/solution.py | ce3b9f895d444972b8796e83f0b74883d8c75a31 | [] | no_license | syurskyi/Python_Topics | 52851ecce000cb751a3b986408efe32f0b4c0835 | be331826b490b73f0a176e6abed86ef68ff2dd2b | refs/heads/master | 2023-06-08T19:29:16.214395 | 2023-05-29T17:09:11 | 2023-05-29T17:09:11 | 220,583,118 | 3 | 2 | null | 2023-02-16T03:08:10 | 2019-11-09T02:58:47 | Python | UTF-8 | Python | false | false | 385 | py | ___ limit_number(num, range_low, range_high
__ num < range_low:
r.. range_low
____ num > range_high:
r.. range_high
____
r.. num
___ test
print("test has started")
__ limit_number(5, 1, 10) ! 5:
print("error1")
__ limit_number(-3, 1, 10) ! 1:
print("error2")
__ limit_number(14, 1, 10) ! 10:
print("error3")
| [
"sergejyurskyj@yahoo.com"
] | sergejyurskyj@yahoo.com |
57106cf1d9c1905a6e255803e406bf9aa13528ea | 64076dd830b5740cf25f54fbf64c6784427801a2 | /security.py | 07a1b66e228677dbf3567bca4c517b48ceb23062 | [] | no_license | Shiv2157k/flask_store | ee9113fa813365429dccb486cb14af6d307f9c0e | 9fe0d74f9c83b00fa769a6bcb6557dca1dfd8d13 | refs/heads/master | 2022-11-26T01:56:28.865821 | 2020-08-02T22:28:05 | 2020-08-02T22:28:05 | 284,491,361 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 338 | py | from werkzeug.security import safe_str_cmp
from models.user import UserModel
def authenticate(username, password):
user = UserModel.find_by_username(username)
if user and safe_str_cmp(user.password, password):
return user
def identity(payload):
user_id = payload["identity"]
return UserModel.find_by_id(user_id)
| [
"shiv2157.k@gmail.com"
] | shiv2157.k@gmail.com |
2446169dfd9c23bd9ff066bc9404816a83fec2c0 | f82349a5d9cb285ced7c52db1ce95c65f5fd0cf0 | /mars/tensor/expressions/arithmetic/cos.py | ce9407c3b368b6138702495e84f7a15278b473a1 | [
"MIT",
"BSD-3-Clause",
"OFL-1.1",
"LicenseRef-scancode-unknown-license-reference",
"Python-2.0",
"Apache-2.0",
"BSD-2-Clause"
] | permissive | pingrunhuang/mars | 8d2602356b6f4d9eb7c6dfe4b2c4536b4bdfc229 | ae920c374e9844d7426d0cc09c0d97059dc5341c | refs/heads/master | 2020-04-17T03:42:11.147774 | 2019-01-18T06:49:29 | 2019-01-18T06:49:29 | 166,196,676 | 0 | 0 | Apache-2.0 | 2019-01-17T09:17:25 | 2019-01-17T09:17:25 | null | UTF-8 | Python | false | false | 2,961 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright 1999-2018 Alibaba Group Holding Ltd.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
from .... import operands
from ..utils import infer_dtype
from .core import TensorUnaryOp
class TensorCos(operands.Cos, TensorUnaryOp):
def __init__(self, casting='same_kind', err=None, dtype=None, sparse=False, **kw):
err = err if err is not None else np.geterr()
super(TensorCos, self).__init__(_casting=casting, _err=err,
_dtype=dtype, _sparse=sparse, **kw)
@classmethod
def _is_sparse(cls, x):
return False
@infer_dtype(np.cos)
def cos(x, out=None, where=None, **kwargs):
"""
Cosine element-wise.
Parameters
----------
x : array_like
Input tensor in radians.
out : Tensor, None, or tuple of Tensor and None, optional
A location into which the result is stored. If provided, it must have
a shape that the inputs broadcast to. If not provided or `None`,
a freshly-allocated array is returned. A tuple (possible only as a
keyword argument) must have length equal to the number of outputs.
where : array_like, optional
Values of True indicate to calculate the ufunc at that position, values
of False indicate to leave the value in the output alone.
**kwargs
Returns
-------
y : Tensor
The corresponding cosine values.
Notes
-----
If `out` is provided, the function writes the result into it,
and returns a reference to `out`. (See Examples)
References
----------
M. Abramowitz and I. A. Stegun, Handbook of Mathematical Functions.
New York, NY: Dover, 1972.
Examples
--------
>>> import mars.tensor as mt
>>> mt.cos(mt.array([0, mt.pi/2, mt.pi])).execute()
array([ 1.00000000e+00, 6.12303177e-17, -1.00000000e+00])
>>>
>>> # Example of providing the optional output parameter
>>> out1 = mt.empty(1)
>>> out2 = mt.cos([0.1], out1)
>>> out2 is out1
True
>>>
>>> # Example of ValueError due to provision of shape mis-matched `out`
>>> mt.cos(mt.zeros((3,3)),mt.zeros((2,2)))
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
ValueError: operands could not be broadcast together with shapes (3,3) (2,2)
"""
op = TensorCos(**kwargs)
return op(x, out=out, where=where)
| [
"xuye.qin@alibaba-inc.com"
] | xuye.qin@alibaba-inc.com |
a05127b405e68038f07aab774aed90fe51dd6642 | 9b2eb0d6b673ac4945f9698c31840b847f790a58 | /pkg/test/test_fast_stats_builds_api.py | 77236c6f0afa96f5aa495b7784829af15e744dd3 | [
"Apache-2.0"
] | permissive | Apteco/apteco-api | 6d21c9f16e58357da9ce64bac52f1d2403b36b7c | e8cf50a9cb01b044897025c74d88c37ad1612d31 | refs/heads/master | 2023-07-10T23:25:59.000038 | 2023-07-07T14:52:29 | 2023-07-07T14:52:29 | 225,371,142 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,815 | py | # coding: utf-8
"""
Apteco API
An API to allow access to Apteco Marketing Suite resources # noqa: E501
The version of the OpenAPI document: v2
Contact: support@apteco.com
Generated by: https://openapi-generator.tech
"""
from __future__ import absolute_import
import unittest
import apteco_api
from apteco_api.api.fast_stats_builds_api import FastStatsBuildsApi # noqa: E501
from apteco_api.rest import ApiException
class TestFastStatsBuildsApi(unittest.TestCase):
"""FastStatsBuildsApi unit test stubs"""
def setUp(self):
self.api = apteco_api.api.fast_stats_builds_api.FastStatsBuildsApi() # noqa: E501
def tearDown(self):
pass
def test_fast_stats_builds_cancel_fast_stats_build_job(self):
"""Test case for fast_stats_builds_cancel_fast_stats_build_job
EXPERIMENTAL: Requires OrbitAdmin: Cancel a running data purchasing job # noqa: E501
"""
pass
def test_fast_stats_builds_create_fast_stats_build_job(self):
"""Test case for fast_stats_builds_create_fast_stats_build_job
EXPERIMENTAL: Requires OrbitAdmin: Create a new job to builds a FastStats system from the given definition # noqa: E501
"""
pass
def test_fast_stats_builds_fast_stats_build_sync(self):
"""Test case for fast_stats_builds_fast_stats_build_sync
EXPERIMENTAL: Requires OrbitAdmin: Builds a FastStats system from the given definition # noqa: E501
"""
pass
def test_fast_stats_builds_get_fast_stats_build_job(self):
"""Test case for fast_stats_builds_get_fast_stats_build_job
EXPERIMENTAL: Requires OrbitAdmin: Get the status of a running FastStats build job # noqa: E501
"""
pass
if __name__ == '__main__':
unittest.main()
| [
"tim.morris@apteco.com"
] | tim.morris@apteco.com |
c570a16420f515bd89d420f8231058e8acb26b1d | 53fab060fa262e5d5026e0807d93c75fb81e67b9 | /backup/user_298/ch22_2020_09_09_12_15_01_157465.py | 8803cbcb0895a39a98c1635aaa7138b86ea1db8d | [] | no_license | gabriellaec/desoft-analise-exercicios | b77c6999424c5ce7e44086a12589a0ad43d6adca | 01940ab0897aa6005764fc220b900e4d6161d36b | refs/heads/main | 2023-01-31T17:19:42.050628 | 2020-12-16T05:21:31 | 2020-12-16T05:21:31 | 306,735,108 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 195 | py | a= input("Quantos cigarros voce fuma por dia? ")
b= input("Ha quantos anos? ")
a= int(a)
b= int(b)
def vida(a,b):
fumado= a*b*365
perdido= (fumado)/144
return perdido
print(vida(a,b)) | [
"you@example.com"
] | you@example.com |
104b1a43657e6f3c40b770581127e9bbd0589f0c | c14e31bdfed47fc9aaafd3b1100451551acff3c6 | /source/accounts/forms.py | f038f43ec5d8eb1f212fdaa4b74836b1d7619c7c | [] | no_license | UuljanAitnazarova/reviews_project | 79f6e423f111afa7a057fdf15e086559530a281c | 74b64559bfaf1f9fcd3c78009fa4264c64bb9571 | refs/heads/master | 2023-04-23T04:38:59.266181 | 2021-05-01T12:56:15 | 2021-05-01T12:56:15 | 363,329,907 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,908 | py | from django.contrib.auth import get_user_model
from django.contrib.auth.forms import UserCreationForm
from django import forms
from django.core.exceptions import ValidationError
class MyUserCreationForm(UserCreationForm):
class Meta(UserCreationForm.Meta):
fields = ['username', 'email', 'first_name', 'last_name', 'password1', 'password2']
def clean(self):
super(MyUserCreationForm, self).clean()
if not self.cleaned_data.get('email'):
raise ValidationError('Enter your email address')
class UserUpdateForm(forms.ModelForm):
class Meta:
model = get_user_model()
fields = ('email', 'first_name', 'last_name')
class PasswordChangeForm(forms.ModelForm):
password = forms.CharField(label="New password", strip=False, widget=forms.PasswordInput)
password_confirm = forms.CharField(label="Confirm password", widget=forms.PasswordInput, strip=False)
old_password = forms.CharField(label="Old password", strip=False, widget=forms.PasswordInput)
def clean_password_confirm(self):
password = self.cleaned_data.get("password")
password_confirm = self.cleaned_data.get("password_confirm")
if password and password_confirm and password != password_confirm:
raise forms.ValidationError('Passwords do not match!')
return password_confirm
def clean_old_password(self):
old_password = self.cleaned_data.get('old_password')
if not self.instance.check_password(old_password):
raise forms.ValidationError('Old password is incorrect!')
return old_password
def save(self, commit=True):
user = self.instance
user.set_password(self.cleaned_data["password"])
if commit:
user.save()
return user
class Meta:
model = get_user_model()
fields = ['password', 'password_confirm', 'old_password'] | [
"u.aitnazarova@gmail.com"
] | u.aitnazarova@gmail.com |
6ea43da81e9e349639a232afa4e830990a130077 | 8a4f6d02ea5f83c78543849d9e997458c7d615c9 | /cleanup/transfer.py | 430bbb97b0b4d07686165e55ba110e4f44538591 | [] | no_license | jsl12/Picture-Cleanup | f5f2e3889b2894051e25f6347049fa4ea752235e | 82c62dab9fe9a59ff6ec2905049d4667ad9d91d9 | refs/heads/master | 2020-12-09T02:03:42.979162 | 2020-04-08T15:04:05 | 2020-04-08T15:04:05 | 233,159,912 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,706 | py | import ftplib
import logging
from pathlib import Path
from . import log
LOGGER = logging.getLogger(__name__)
def pull_from_phone(
host,
port,
local_path,
phone_path=None,
ext='jpg',
user='android',
passwd='android'):
ftp = ftplib.FTP()
ftp.connect(host, port)
try:
LOGGER.debug(f'Connected to {host}:{port}')
ftp.login(user, passwd)
LOGGER.debug(f'Logged in with: {user}, {passwd}')
for file in ftp_files(ftp, phone_path):
if file.suffix == f'.{ext}':
res = local_path / file.name
if res.exists():
LOGGER.info(f'file already exists: "{res}"')
continue
else:
if not res.parents[0].exists():
res.parents[0].mkdir()
LOGGER.debug(f'Created dir: "{res.parents[0]}"')
with res.open('wb') as res_file:
ftp.retrbinary(f'RETR {file}', res_file.write)
LOGGER.info(f'ftp success: "{file}", "{res}"')
except Exception as e:
LOGGER.exception(repr(e))
finally:
ftp.quit()
def ftp_files(ftp, path):
for f in ftp.mlsd(path, facts=['type']):
if f[1]['type'] == 'dir':
yield from ftp_files(ftp, f'{path}\\{f[0]}')
else:
yield Path(path) / f[0]
def transferred_files(ftplog):
for line in log.filter(log.line_gen(ftplog), 'ftp success'):
yield (log.get_paths(line))
def skipped_files(ftplog):
for line in log.filter(log.line_gen(ftplog), 'file already exists'):
yield (log.get_paths(line))
| [
"32917998+jsl12@users.noreply.github.com"
] | 32917998+jsl12@users.noreply.github.com |
f1a412eef19c821e3e3888ee78749efea6439ccd | f9294e64495dbe6d959380c1f1c313a9fc2ee8c4 | /from_u_20207/wsgi.py | 5cc0a495f5d9677c53f9ed5e37c419015474609e | [] | no_license | crowdbotics-apps/from-u-20207 | 38304624e5d2106502c8d459ad4e05af8ccc0ec8 | 951986384c17568b27505fb5aa4943b4c312749e | refs/heads/master | 2022-12-14T22:55:34.251845 | 2020-09-11T15:32:28 | 2020-09-11T15:32:28 | 294,733,294 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 401 | py | """
WSGI config for from_u_20207 project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/2.2/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'from_u_20207.settings')
application = get_wsgi_application()
| [
"team@crowdbotics.com"
] | team@crowdbotics.com |
c29268709c89ea3b972d76ba5d5b1827978ad7dc | 45dd427ec7450d2fac6fe2454f54a130b509b634 | /homework_3/preparation1.py | 2cd921e0e4dbfc35edb2a6c54fe16ec5985257e4 | [] | no_license | weka511/smac | 702fe183e3e73889ec663bc1d75bcac07ebb94b5 | 0b257092ff68058fda1d152d5ea8050feeab6fe2 | refs/heads/master | 2022-07-02T14:24:26.370766 | 2022-06-13T00:07:36 | 2022-06-13T00:07:36 | 33,011,960 | 22 | 8 | null | null | null | null | UTF-8 | Python | false | false | 518 | py | import pylab
def show_conf(L, sigma, title, fname):
pylab.axes()
for [x, y] in L:
for ix in range(-1, 2):
for iy in range(-1, 2):
cir = pylab.Circle((x + ix, y + iy), radius=sigma, fc='r')
pylab.gca().add_patch(cir)
pylab.axis('scaled')
pylab.title(title)
pylab.axis([0.0, 1.0, 0.0, 1.0])
pylab.savefig(fname)
pylab.show()
pylab.close()
L = [[0.9, 0.9]]
sigma = 0.4
show_conf(L, sigma, 'test graph', 'one_disk.png') | [
"simon@greenweaves.nz"
] | simon@greenweaves.nz |
a34c9c352f846d610a576e96d440f5c41e31f197 | 341bd2d71b6b6e3af734f16989aeb450e3e73624 | /PCA9536_WDBZ/PCA9536_WDBZ.py | adb05997edaa4ecbe077a630777db9ae1e205939 | [] | no_license | ControlEverythingCommunity/CE_PYTHON_LIB | 5c170f7e3763ab3b160a5fc33f2bb96d4798c7e2 | 736b29434a451a384c2f52490c849239c3190951 | refs/heads/master | 2021-01-12T00:39:25.374689 | 2017-08-30T21:54:47 | 2017-08-30T21:54:47 | 78,751,564 | 7 | 7 | null | 2017-12-15T11:08:48 | 2017-01-12T14:05:11 | Python | UTF-8 | Python | false | false | 6,014 | py | # Distributed with a free-will license.
# Use it any way you want, profit or free, provided it fits in the licenses of its associated works.
# PCA9536_WDBZ
# This code is designed to work with the PCA9536_WDBZ_I2CS I2C Mini Module available from ControlEverything.com.
# https://shop.controleverything.com/products/water-detect-sensor-with-buzzer
import smbus
import time
# Get I2C bus
bus = smbus.SMBus(1)
# I2C address of the device
PCA9536_WDBZ_DEFAULT_ADDRESS = 0x41
# PCA9536_WDBZ Register Map
PCA9536_WDBZ_REG_INPUT = 0x00 # Input Port Register
PCA9536_WDBZ_REG_OUTPUT = 0x01 # Output Port Register
PCA9536_WDBZ_REG_POLARITY = 0x02 # Polarity Inversion Register
PCA9536_WDBZ_REG_CONFIG = 0x03 # Configuration Register
# PCA9536_WDBZ Output Port Register Configuration
PCA9536_WDBZ_OUTPUT_PIN0 = 0x01 # Reflects outgoing logic levels of Pin-0
PCA9536_WDBZ_OUTPUT_PIN1 = 0x02 # Reflects outgoing logic levels of Pin-1
PCA9536_WDBZ_OUTPUT_PIN2 = 0x04 # Reflects outgoing logic levels of Pin-2
PCA9536_WDBZ_OUTPUT_PIN3 = 0x08 # Reflects outgoing logic levels of Pin-3
# PCA9536_WDBZ Polarity Inversion Register Configuration
PCA9536_WDBZ_POLARITY_PIN0 = 0x01 # Input Port register data inverted of Pin-0
PCA9536_WDBZ_POLARITY_PIN1 = 0x02 # Input Port register data inverted of Pin-1
PCA9536_WDBZ_POLARITY_PIN2 = 0x04 # Input Port register data inverted of Pin-2
PCA9536_WDBZ_POLARITY_PIN3 = 0x08 # Input Port register data inverted of Pin-3
PCA9536_WDBZ_POLARITY_PINX = 0x00 # Input Port register data retained of Pin-X
# PCA9536_WDBZ Configuration Register
PCA9536_WDBZ_CONFIG_PIN0 = 0x01 # Corresponding port Pin-0 configured as Input
PCA9536_WDBZ_CONFIG_PIN1 = 0x02 # Corresponding port Pin-1 configured as Input
PCA9536_WDBZ_CONFIG_PIN2 = 0x04 # Corresponding port Pin-2 configured as Input
PCA9536_WDBZ_CONFIG_PIN3 = 0x08 # Corresponding port Pin-3 configured as Input
PCA9536_WDBZ_CONFIG_PINX = 0x00 # Corresponding port Pin-X configured as Output
class PCA9536_WDBZ():
def select_io(self):
"""Select the Input/Output for the use
0 : Input
1 : Output"""
self.io = int(input("Select Input/Output (0:I, 1:O) = "))
while self.io > 1 :
self.io = int(input("Select Input/Output (0:I, 1:O) = "))
def select_pin(self):
"""Select the Pin for the use
0 : Pin-0
1 : Pin-1
2 : Pin-2
3 : Pin-3"""
self.pin = int(input("Enter the Pin No.(0-3) = "))
while self.pin > 3 :
self.pin = int(input("Enter the Pin No.(0-3) = "))
def input_output_config(self):
"""Select the Configuration Register data from the given provided value"""
if self.io == 0 :
if self.pin == 0 :
bus.write_byte_data(PCA9536_WDBZ_DEFAULT_ADDRESS, PCA9536_WDBZ_REG_CONFIG, PCA9536_WDBZ_CONFIG_PIN0)
elif self.pin == 1 :
bus.write_byte_data(PCA9536_WDBZ_DEFAULT_ADDRESS, PCA9536_WDBZ_REG_CONFIG, PCA9536_WDBZ_CONFIG_PIN1)
elif self.pin == 2 :
bus.write_byte_data(PCA9536_WDBZ_DEFAULT_ADDRESS, PCA9536_WDBZ_REG_CONFIG, PCA9536_WDBZ_CONFIG_PIN2)
elif self.pin == 3 :
bus.write_byte_data(PCA9536_WDBZ_DEFAULT_ADDRESS, PCA9536_WDBZ_REG_CONFIG, PCA9536_WDBZ_CONFIG_PIN3)
elif self.io == 1 :
bus.write_byte_data(PCA9536_WDBZ_DEFAULT_ADDRESS, PCA9536_WDBZ_REG_CONFIG, PCA9536_WDBZ_CONFIG_PINX)
def polarity_config(self):
"""Select the Polarity Inversion Register Configuration data from the given provided value"""
if self.pin == 0 :
bus.write_byte_data(PCA9536_WDBZ_DEFAULT_ADDRESS, PCA9536_WDBZ_REG_POLARITY, PCA9536_WDBZ_POLARITY_PIN0)
elif self.pin == 1 :
bus.write_byte_data(PCA9536_WDBZ_DEFAULT_ADDRESS, PCA9536_WDBZ_REG_POLARITY, PCA9536_WDBZ_POLARITY_PIN1)
elif self.pin == 2 :
bus.write_byte_data(PCA9536_WDBZ_DEFAULT_ADDRESS, PCA9536_WDBZ_REG_POLARITY, PCA9536_WDBZ_POLARITY_PIN2)
elif self.pin == 3 :
bus.write_byte_data(PCA9536_WDBZ_DEFAULT_ADDRESS, PCA9536_WDBZ_REG_POLARITY, PCA9536_WDBZ_POLARITY_PIN3)
def relay_buzzer_config(self):
"""Select the Polarity Inversion Register Configuration data from the given provided value"""
bus.write_byte_data(PCA9536_WDBZ_DEFAULT_ADDRESS, PCA9536_WDBZ_REG_CONFIG, PCA9536_WDBZ_CONFIG_PINX)
"""Select the Output Port Register Configuration data from the given provided value"""
if self.pin == 0 :
bus.write_byte_data(PCA9536_WDBZ_DEFAULT_ADDRESS, PCA9536_WDBZ_REG_OUTPUT, PCA9536_WDBZ_OUTPUT_PIN0)
elif self.pin == 1 :
bus.write_byte_data(PCA9536_WDBZ_DEFAULT_ADDRESS, PCA9536_WDBZ_REG_OUTPUT, PCA9536_WDBZ_OUTPUT_PIN1)
elif self.pin == 2 :
bus.write_byte_data(PCA9536_WDBZ_DEFAULT_ADDRESS, PCA9536_WDBZ_REG_OUTPUT, PCA9536_WDBZ_OUTPUT_PIN2)
def read_data(self):
"""Read data back from PCA9536_WDBZ_REG_INPUT(0x00)/PCA9536_WDBZ_REG_OUTPUT(0x01), 1 byte"""
data = bus.read_byte_data(PCA9536_WDBZ_DEFAULT_ADDRESS, PCA9536_WDBZ_REG_OUTPUT)
# Convert the data to 4-bits
data = (data & 0x0F)
if (data & (2 ** self.pin)) == 0 :
bus.write_byte_data(PCA9536_WDBZ_DEFAULT_ADDRESS, PCA9536_WDBZ_REG_CONFIG, PCA9536_WDBZ_CONFIG_PINX)
bus.write_byte_data(PCA9536_WDBZ_DEFAULT_ADDRESS, PCA9536_WDBZ_REG_OUTPUT, PCA9536_WDBZ_OUTPUT_PIN3)
print "I/O Pin 3 State is HIGH"
print "Buzzer is ON"
print "I/O Pin %d State is LOW" %self.pin
print "Water Detected"
else :
bus.write_byte_data(PCA9536_WDBZ_DEFAULT_ADDRESS, PCA9536_WDBZ_REG_CONFIG, PCA9536_WDBZ_CONFIG_PIN3)
bus.write_byte_data(PCA9536_WDBZ_DEFAULT_ADDRESS, PCA9536_WDBZ_REG_OUTPUT, PCA9536_WDBZ_OUTPUT_PIN3)
print "I/O Pin 3 State is LOW"
print "Buzzer is OFF"
print "I/O Pin %d State is HIGH" %self.pin
print "No Water Present"
| [
"apple@Yaddis-iMac.local"
] | apple@Yaddis-iMac.local |
695020da160da49d3ff96237eb4b04bf19b2c942 | fb78fd824e904705fb1ee09db8b3c20cc3902805 | /django-blog-api/posts/views.py | ce41539ee2fd83cb2392599bd4c682a68868049b | [] | no_license | Roderich25/mac | 8469833821ac49c539a744db29db5a41d755ad55 | 4f7fe281c88f0199b85d0ac99ce41ffb643d6e82 | refs/heads/master | 2023-01-12T05:55:12.753209 | 2021-11-26T01:16:24 | 2021-11-26T01:16:24 | 207,029,750 | 0 | 0 | null | 2023-01-07T11:49:23 | 2019-09-07T21:51:53 | Jupyter Notebook | UTF-8 | Python | false | false | 1,231 | py | from django.contrib.auth import get_user_model
from rest_framework import viewsets # generics
from .permissions import IsAuthorOrReadOnly
from .models import Post
from .serializers import PostSerializer, UserSerializer
class PostViewSet(viewsets.ModelViewSet):
permission_classes = (IsAuthorOrReadOnly,)
queryset = Post.objects.all()
serializer_class = PostSerializer
class UserViewSet(viewsets.ModelViewSet):
queryset = get_user_model().objects.all()
serializer_class = UserSerializer
# class PostList(generics.ListCreateAPIView):
# # permission_classes = (permissions.IsAuthenticated,)
# queryset = Post.objects.all()
# serializer_class = PostSerializer
#
#
# class PostDetail(generics.RetrieveUpdateDestroyAPIView):
# # permission_classes = (permissions.IsAuthenticated,)
# permission_classes = (IsAuthorOrReadOnly,)
# queryset = Post.objects.all()
# serializer_class = PostSerializer
#
#
# class UserList(generics.ListCreateAPIView):
# queryset = get_user_model().objects.all()
# serializer_class = UserSerializer
#
#
# class UserDetail(generics.RetrieveUpdateDestroyAPIView):
# queryset = get_user_model().objects.all()
# serializer_class = UserSerializer
| [
"rodrigoavilasolis@gmail.com"
] | rodrigoavilasolis@gmail.com |
ab0875ad6d3c9469a5818f5710f7de6bd10097db | 5117fdfc5ce8399f72d12d519d1c1b5ea4a64c63 | /examples/hh/ilql_hh.py | 57500a06f128d6bc82a7bee9df5a855b5e5ca234 | [
"MIT"
] | permissive | neevaco/trlx | cd1ccd0ad308742eaa1dcaedc64a3103637ff839 | 46231059f5a3fa256a25ec2a16f36b24b2b34055 | refs/heads/main | 2023-05-25T01:33:48.486866 | 2023-03-13T22:04:24 | 2023-03-13T22:04:24 | 615,039,599 | 0 | 0 | null | 2023-03-16T20:36:57 | 2023-03-16T20:36:56 | null | UTF-8 | Python | false | false | 3,530 | py | import json
import os
import sys
from datasets import load_dataset
from ppo_hh import create_reward_fn
import trlx
from trlx.data.default_configs import (
ILQLConfig,
ModelConfig,
OptimizerConfig,
SchedulerConfig,
TokenizerConfig,
TrainConfig,
TRLConfig,
)
default_config = TRLConfig(
train=TrainConfig(
seq_length=1024,
batch_size=4,
epochs=100,
total_steps=20000,
checkpoint_interval=10000,
eval_interval=1000,
pipeline="PromptPipeline",
trainer="AccelerateILQLTrainer",
checkpoint_dir="checkpoints/ilql_hh",
),
model=ModelConfig(model_path="EleutherAI/gpt-j-6B", num_layers_unfrozen=-1),
tokenizer=TokenizerConfig(tokenizer_path="EleutherAI/gpt-j-6B", truncation_side="left"),
optimizer=OptimizerConfig(name="adamw", kwargs=dict(lr=1e-6, betas=(0.9, 0.95), eps=1.0e-8, weight_decay=1.0e-6)),
scheduler=SchedulerConfig(name="cosine_annealing", kwargs=dict(T_max=1000000000, eta_min=1e-6)),
method=ILQLConfig(
name="ilqlconfig",
tau=0.6,
gamma=0.99,
cql_scale=0.1,
awac_scale=1,
alpha=0.0001,
beta=0,
steps_for_target_q_sync=1,
two_qs=True,
gen_kwargs=dict(max_new_tokens=128, top_k=20, beta=[1, 4], temperature=1.0),
),
)
config_name = os.environ.get("CONFIG_NAME")
if config_name == "125M":
default_config.train.batch_size = 16
default_config.train.checkpoint_dir = "checkpoints/ilql_hh_125M"
default_config.model.model_path = "EleutherAI/pythia-125m-deduped"
default_config.tokenizer.tokenizer_path = "EleutherAI/gpt-neox-20b"
elif config_name == "1B":
default_config.train.batch_size = 8
default_config.train.checkpoint_dir = "checkpoints/ilql_hh_1B"
default_config.model.model_path = "EleutherAI/pythia-1.4b-deduped"
default_config.tokenizer.tokenizer_path = "EleutherAI/gpt-neox-20b"
elif config_name == "6B":
default_config.train.batch_size = 4
default_config.train.checkpoint_dir = "checkpoints/ilql_hh_6B"
default_config.model.model_path = "EleutherAI/pythia-6.9b-deduped"
default_config.tokenizer.tokenizer_path = "EleutherAI/gpt-neox-20b"
elif config_name == "20B":
default_config.train.batch_size = 1
default_config.train.total_steps = 3000
default_config.train.checkpoint_dir = "checkpoints/ilql_hh_20B"
default_config.model.model_path = "EleutherAI/gpt-neox-20b"
default_config.tokenizer.tokenizer_path = "EleutherAI/gpt-neox-20b"
def preprocess(sample):
sample["prompt_output"] = [
[sample["prompt"], sample["chosen"]],
[sample["prompt"], sample["rejected"]],
]
sample["reward"] = [1, -1]
return sample
def main(hparams={}):
config = TRLConfig.update(default_config, hparams)
dataset = load_dataset("Dahoas/full-hh-rlhf").map(preprocess)
prompts_outputs = sum(dataset["train"]["prompt_output"], [])
rewards = sum(dataset["train"]["reward"], [])
eval_prompts = [prompt_output[0][0] for prompt_output in dataset["test"]["prompt_output"]][:280]
reward_fn = create_reward_fn()
trlx.train(
samples=prompts_outputs,
rewards=rewards,
config=config,
eval_prompts=eval_prompts,
metric_fn=lambda **kwargs: {"reward": reward_fn(**kwargs)},
stop_sequences=["Human:", "human:", "Assistant:", "assistant:"],
)
if __name__ == "__main__":
hparams = {} if len(sys.argv) == 1 else json.loads(sys.argv[1])
main(hparams)
| [
"noreply@github.com"
] | neevaco.noreply@github.com |
b7742363da58a3b4a69edc0d133ad46617c3deaf | e31bbc636eb495eed5843d1a4b7f66d3525eecc6 | /Examp/Python Advanced Exam - 27 June 2020/2.Snake.py | 9d6a0ffe5c0adb9f9aea2b696c8ca43593217b01 | [] | no_license | PullBack993/Python-Advanced | c6a24b68d2517589027d4af8dee80fe9f28490a2 | 3a9362b09555649eef797220dac1bba7a39df06e | refs/heads/main | 2023-06-12T08:34:16.379021 | 2021-07-08T14:08:03 | 2021-07-08T14:08:03 | 358,933,332 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,196 | py | FOOD = '*'
SNAKE = 'S'
BURROWS = 'B'
MOVE_SYMBOL = '.'
MAX_FOOD = 9
def get_input(size):
board = []
for _ in range(size):
board.append([el for el in input()])
return board
def find_snake(board, size):
for row_i in range(size):
for col_i in range(size):
if board[row_i][col_i] == SNAKE:
return row_i, col_i
def find_burrows(board):
for row_i in range(len(board)):
for col_i in range(len(board)):
if board[row_i][col_i] == BURROWS:
return row_i, col_i
def check_index(mat, r, c):
size_matrix = len(mat)
if 0 <= r < size_matrix and 0 <= c < size_matrix:
return True
return False
size = int(input())
board = get_input(size)
snake_row, snake_col = find_snake(board, size)
food = 0
game_over = False
while not game_over and MAX_FOOD >= food:
move_command = input()
old_row_position = snake_row
old_col_position = snake_col
if move_command == "up":
snake_row -= 1
elif move_command == "down":
snake_row += 1
elif move_command == "left":
snake_col -= 1
elif move_command == "right":
snake_col += 1
position = check_index(board, snake_row, snake_col)
if position:
new_row = snake_row
new_col = snake_col
new_position = board[new_row][new_col]
if new_position == FOOD:
food += 1
board[old_row_position][old_col_position] = MOVE_SYMBOL
board[new_row][new_col] = SNAKE
elif new_position == BURROWS:
board[old_row_position][old_col_position] = MOVE_SYMBOL
board[new_row][new_col] = MOVE_SYMBOL
row, col = find_burrows(board)
snake_row, snake_col = row, col
board[row][col] = SNAKE
else:
board[old_row_position][old_col_position] = MOVE_SYMBOL
board[new_row][new_col] = SNAKE
else:
board[old_row_position][old_col_position] = MOVE_SYMBOL
game_over = True
if game_over:
print("Game over!")
else:
print("You won! You fed the snake.")
print(f"Food eaten: {food}")
for el in board:
print("".join(i for i in el))
| [
"turgay.durhanov.ismailov@gmail.com"
] | turgay.durhanov.ismailov@gmail.com |
af89e467e5d5db861f9dfd5c9e22a755b28896d6 | 45c170fb0673deece06f3055979ece25c3210380 | /toontown/coghq/DistributedCrate.py | f4b3afdd53272a72b3d62a17efb5645db1b1ad11 | [] | no_license | MTTPAM/PublicRelease | 5a479f5f696cfe9f2d9dcd96f378b5ce160ec93f | 825f562d5021c65d40115d64523bb850feff6a98 | refs/heads/master | 2021-07-24T09:48:32.607518 | 2018-11-13T03:17:53 | 2018-11-13T03:17:53 | 119,129,731 | 2 | 6 | null | 2018-11-07T22:10:10 | 2018-01-27T03:43:39 | Python | UTF-8 | Python | false | false | 11,934 | py | #Embedded file name: toontown.coghq.DistributedCrate
from pandac.PandaModules import *
from direct.interval.IntervalGlobal import *
from toontown.toonbase.ToontownGlobals import *
from CrateGlobals import *
from toontown.toonbase.ToonPythonUtil import fitSrcAngle2Dest
from direct.distributed import DistributedObject
from direct.directnotify import DirectNotifyGlobal
from toontown.coghq import MovingPlatform
from direct.task.Task import Task
from toontown.coghq import DistributedCrushableEntity
class DistributedCrate(DistributedCrushableEntity.DistributedCrushableEntity):
notify = DirectNotifyGlobal.directNotify.newCategory('DistributedCrate')
UP_KEY = base.MOVE_UP
DOWN_KEY = base.MOVE_DOWN
LEFT_KEY = base.MOVE_LEFT
RIGHT_KEY = base.MOVE_RIGHT
ModelPaths = ('phase_9/models/cogHQ/woodCrateB', 'phase_10/models/cashbotHQ/CBWoodCrate')
def __init__(self, cr):
DistributedCrushableEntity.DistributedCrushableEntity.__init__(self, cr)
self.initNodePath()
self.modelType = 0
self.crate = None
self.gridSize = 3.0
self.tContact = 0
self.tStick = 0.01
self.moveTrack = None
self.avMoveTrack = None
self.avPushTrack = None
self.crate = None
self.crushTrack = None
self.isLocalToon = 0
self.stuckToCrate = 0
self.upPressed = 0
self.isPushing = 0
self.creakSound = loader.loadSfx('phase_9/audio/sfx/CHQ_FACT_crate_effort.ogg')
self.pushSound = loader.loadSfx('phase_9/audio/sfx/CHQ_FACT_crate_sliding.ogg')
def disable(self):
self.ignoreAll()
if self.moveTrack:
self.moveTrack.pause()
del self.moveTrack
if self.avMoveTrack:
self.avMoveTrack.pause()
del self.avMoveTrack
if self.avPushTrack:
self.avPushTrack.pause()
del self.avPushTrack
if self.crate:
self.crate.destroy()
del self.crate
if self.crushTrack:
self.crushTrack.pause()
del self.crushTrack
taskMgr.remove(self.taskName('crushTask'))
if self.pushable:
self.__listenForCollisions(0)
self.ignore(base.MOVE_UP)
self.ignore(base.MOVE_UP + '-up')
DistributedCrushableEntity.DistributedCrushableEntity.disable(self)
def delete(self):
DistributedCrushableEntity.DistributedCrushableEntity.delete(self)
del self.creakSound
del self.pushSound
def generateInit(self):
DistributedCrushableEntity.DistributedCrushableEntity.generateInit(self)
def generate(self):
DistributedCrushableEntity.DistributedCrushableEntity.generate(self)
def announceGenerate(self):
self.notify.debug('announceGenerate')
DistributedCrushableEntity.DistributedCrushableEntity.announceGenerate(self)
self.loadModel()
self.modCrateCollisions()
if self.pushable:
self.__listenForCollisions(1)
self.accept(base.MOVE_UP, self.__upKeyPressed)
def modCrateCollisions(self):
cNode = self.find('**/wall')
cNode.setName(self.uniqueName('crateCollision'))
cNode.setZ(-0.8)
colNode = self.find('**/collision')
floor = colNode.find('**/MovingPlatform*')
floor2 = floor.copyTo(colNode)
floor2.setZ(-0.8)
def __upKeyPressed(self):
self.ignore(base.MOVE_UP)
self.accept(base.MOVE_UP + '-up', self.__upKeyReleased)
self.upPressed = 1
def __upKeyReleased(self):
self.ignore(base.MOVE_UP + '-up')
self.accept(base.MOVE_UP, self.__upKeyPressed)
self.upPressed = 0
if self.stuckToCrate:
self.__resetStick()
def loadModel(self):
crateModel = loader.loadModel(DistributedCrate.ModelPaths[self.modelType])
self.crate = MovingPlatform.MovingPlatform()
self.crate.setupCopyModel(self.getParentToken(), crateModel, 'floor')
self.setScale(1.0)
self.crate.setScale(self.scale)
self.crate.reparentTo(self)
self.crate.flattenLight()
def setScale(self, scale):
if self.crate:
self.crate.setScale(scale)
def __listenForCollisions(self, on):
if on:
self.accept(self.uniqueName('entercrateCollision'), self.handleCollision)
else:
self.ignore(self.uniqueName('entercrateCollision'))
def setPosition(self, x, y, z):
self.setPos(x, y, z)
def handleCollision(self, collEntry = None):
if not self.upPressed:
return
crateNormal = Vec3(collEntry.getSurfaceNormal(self))
relativeVec = base.localAvatar.getRelativeVector(self, crateNormal)
relativeVec.normalize()
worldVec = render.getRelativeVector(self, crateNormal)
worldVec.normalize()
offsetVec = Vec3(base.localAvatar.getPos(render) - self.getPos(render))
offsetVec.normalize()
offsetDot = offsetVec[0] * worldVec[0] + offsetVec[1] * worldVec[1]
self.notify.debug('offsetDot = %s, world = %s, rel = %s' % (offsetDot, worldVec, offsetVec))
if relativeVec.getY() < -0.7 and offsetDot > 0.9 and offsetVec.getZ() < 0.05:
self.getCrateSide(crateNormal)
self.tContact = globalClock.getFrameTime()
self.__listenForCollisions(0)
self.__listenForCancelEvents(1)
self.__startStickTask(crateNormal, base.localAvatar.getPos(render))
def setReject(self):
self.notify.debug('setReject')
self.sentRequest = 0
if self.stuckToCrate:
self.__resetStick()
def __startStickTask(self, crateNormal, toonPos):
self.__killStickTask()
self.stuckToCrate = 1
sTask = Task(self.__stickTask)
sTask.crateNormal = crateNormal
sTask.toonPos = toonPos
taskMgr.add(sTask, self.taskName('stickTask'))
def __killStickTask(self):
taskMgr.remove(self.taskName('stickTask'))
def __stickTask(self, task):
tElapsed = globalClock.getFrameTime() - self.tContact
if tElapsed > self.tStick:
lToon = base.localAvatar
self.isLocalToon = 1
crateNormal = task.crateNormal
crateWidth = 2.75 * self.scale
offset = crateWidth + 1.5 + TorsoToOffset[lToon.style.torso]
newPos = crateNormal * offset
if self.avPushTrack:
self.avPushTrack.pause()
place = base.cr.playGame.getPlace()
newHpr = CrateHprs[self.crateSide]
h = lToon.getH(self)
h = fitSrcAngle2Dest(h, newHpr[0])
startHpr = Vec3(h, 0, 0)
self.avPushTrack = Sequence(LerpPosHprInterval(lToon, 0.25, newPos, newHpr, startHpr=startHpr, other=self, blendType='easeInOut'), Func(place.fsm.request, 'push'), Func(self.__sendPushRequest, task.crateNormal), SoundInterval(self.creakSound, node=self))
self.avPushTrack.start()
return Task.done
else:
pos = task.toonPos
base.localAvatar.setPos(task.toonPos)
return Task.cont
def getCrateSide(self, crateNormal):
for i in xrange(len(CrateNormals)):
dotP = CrateNormals[i].dot(crateNormal)
if dotP > 0.9:
self.crateSide = i
def __sendPushRequest(self, crateNormal):
self.notify.debug('__sendPushRequest')
if self.crateSide != None:
self.sentRequest = 1
self.sendUpdate('requestPush', [self.crateSide])
else:
self.notify.debug("didn't send request")
def __listenForCancelEvents(self, on):
self.notify.debug('%s, __listenForCancelEvents(%s)' % (self.doId, on))
if on:
self.accept(base.MOVE_DOWN, self.__resetStick)
self.accept(base.MOVE_LEFT, self.__resetStick)
self.accept(base.MOVE_RIGHT, self.__resetStick)
else:
self.ignore(base.MOVE_DOWN)
self.ignore(base.MOVE_LEFT)
self.ignore(base.MOVE_RIGHT)
def setMoveTo(self, avId, x0, y0, z0, x1, y1, z1):
self.notify.debug('setMoveTo')
self.__moveCrateTo(Vec3(x0, y0, z0), Vec3(x1, y1, z1))
isLocal = base.localAvatar.doId == avId
if isLocal and self.stuckToCrate or not isLocal:
self.__moveAvTo(avId, Vec3(x0, y0, z0), Vec3(x1, y1, z1))
def __moveCrateTo(self, startPos, endPos):
if self.moveTrack:
self.moveTrack.finish()
self.moveTrack = None
self.moveTrack = Parallel(Sequence(LerpPosInterval(self, T_PUSH, endPos, startPos=startPos, fluid=1)), SoundInterval(self.creakSound, node=self), SoundInterval(self.pushSound, node=self, duration=T_PUSH, volume=0.2))
self.moveTrack.start()
def __moveAvTo(self, avId, startPos, endPos):
if self.avMoveTrack:
self.avMoveTrack.finish()
self.avMoveTrack = None
av = base.cr.doId2do.get(avId)
if av:
avMoveTrack = Sequence()
moveDir = endPos - startPos
crateNormal = startPos - endPos
crateNormal.normalize()
crateWidth = 2.75 * self.scale
offset = crateWidth + 1.5 + TorsoToOffset[av.style.torso]
toonOffset = crateNormal * offset
avMoveTrack.append(Sequence(LerpPosInterval(av, T_PUSH, toonOffset, startPos=toonOffset, other=self)))
self.avMoveTrack = avMoveTrack
self.avMoveTrack.start()
def __resetStick(self):
self.notify.debug('__resetStick')
self.__killStickTask()
self.__listenForCancelEvents(0)
self.__listenForCollisions(1)
self.sendUpdate('setDone')
if self.avPushTrack:
self.avPushTrack.pause()
del self.avPushTrack
self.avPushTrack = None
if self.avMoveTrack:
self.avMoveTrack.pause()
del self.avMoveTrack
self.avMoveTrack = None
base.cr.playGame.getPlace().fsm.request('walk')
self.crateSide = None
self.crateNormal = None
self.isLocalToon = 0
self.stuckToCrate = 0
def playCrushMovie(self, crusherId, axis):
self.notify.debug('playCrushMovie')
taskMgr.remove(self.taskName('crushTask'))
taskMgr.add(self.crushTask, self.taskName('crushTask'), extraArgs=(crusherId, axis), priority=25)
def crushTask(self, crusherId, axis):
crusher = self.level.entities.get(crusherId, None)
if crusher:
crusherHeight = crusher.model.getPos(self)[2]
maxHeight = self.pos[2] + self.scale
minHeight = crusher.getPos(self)[2]
minScale = minHeight / maxHeight
self.notify.debug('cHeight= %s' % crusherHeight)
if crusherHeight < maxHeight and crusherHeight >= minHeight:
if crusherHeight == minHeight:
self.setScale(Vec3(1.2, 1.2, minScale))
taskMgr.doMethodLater(2, self.setScale, 'resetScale', extraArgs=(1,))
return Task.done
k = crusherHeight / maxHeight
sx = min(1 / k, 0.2)
self.setScale(Vec3(1 + sx, 1 + sx, k))
return Task.cont
def originalTry(self, axis):
tSquash = 0.4
if self.crushTrack:
self.crushTrack.finish()
del self.crushTrack
self.crushTrack = None
self.crushTrack = Sequence(LerpScaleInterval(self, tSquash, VBase3(1.2, 1.2, 0.25), blendType='easeInOut'), LerpColorScaleInterval(self, 2.0, VBase4(1, 1, 1, 0), blendType='easeInOut'), Wait(2.0), LerpScaleInterval(self, 0.1, VBase3(1, 1, 1), blendType='easeInOut'), LerpColorScaleInterval(self, 0.1, VBase4(1, 1, 1, 0), blendType='easeInOut'))
self.crushTrack.start()
| [
"linktlh@gmail.com"
] | linktlh@gmail.com |
207afe79b2117970d554133c01f85e074c46ba24 | 944401a6292baa2d23b9738898e0b0cb199d0795 | /lib/python2.7/idlelib/idle_test/htest.py | 4f622b2f7a14d0e86e5c661ff62c2199ee03f22f | [
"Python-2.0"
] | permissive | sunnyweilai/Finding-Theme-Color-Palettes | cc84c93ce58abdd1802431c41bd59181d7a4f75b | 4c38b112f5c40b43d6ec126e415b609c7fdc1f39 | refs/heads/master | 2022-12-21T09:41:31.187411 | 2019-04-30T14:50:17 | 2019-04-30T14:50:17 | 184,273,925 | 1 | 0 | null | 2022-12-07T03:46:55 | 2019-04-30T14:09:52 | Python | UTF-8 | Python | false | false | 13,788 | py | '''Run human tests of Idle's window, dialog, and popup widgets.
run(*tests)
Create a master Tk window. Within that, run each callable in tests
after finding the matching test spec in this file. If tests is empty,
run an htest for each spec dict in this file after finding the matching
callable in the module named in the spec. Close the window to skip or
end the test.
In a tested module, let X be a global name bound to a callable (class
or function) whose .__name__ attrubute is also X (the usual situation).
The first parameter of X must be 'parent'. When called, the parent
argument will be the root window. X must create a child Toplevel
window (or subclass thereof). The Toplevel may be a test widget or
dialog, in which case the callable is the corresonding class. Or the
Toplevel may contain the widget to be tested or set up a context in
which a test widget is invoked. In this latter case, the callable is a
wrapper function that sets up the Toplevel and other objects. Wrapper
function names, such as _editor_window', should start with '_'.
End the module with
if __name__ == '__main__':
<unittest, if there is one>
from idlelib.idle_test.htest import run
run(X)
To have wrapper functions and test invocation code ignored by coveragepy
reports, put '# htest #' on the def statement header line.
def _wrapper(parent): # htest #
Also make sure that the 'if __name__' line matches the above. Then have
make sure that .coveragerc includes the following.
[report]
exclude_lines =
.*# htest #
if __name__ == .__main__.:
(The "." instead of "'" is intentional and necessary.)
To run any X, this file must contain a matching instance of the
following template, with X.__name__ prepended to '_spec'.
When all tests are run, the prefix is use to get X.
_spec = {
'file': '',
'kwds': {'title': ''},
'msg': ""
}
file (no .py): run() imports file.py.
kwds: augmented with {'parent':root} and passed to X as **kwds.
title: an example kwd; some widgets need this, delete if not.
msg: master window hints about testing the widget.
Modules and classes not being tested at the moment:
PyShell.PyShellEditorWindow
Debugger.Debugger
AutoCompleteWindow.AutoCompleteWindow
OutputWindow.OutputWindow (indirectly being tested with grep test)
'''
from importlib import import_module
from idlelib.macosxSupport import _initializeTkVariantTests
import Tkinter as tk
AboutDialog_spec = {
'file': 'aboutDialog',
'kwds': {'title': 'aboutDialog test',
'_htest': True,
},
'msg': "Test every button. Ensure Python, TK and IDLE versions "
"are correctly displayed.\n [Close] to exit.",
}
_calltip_window_spec = {
'file': 'CallTipWindow',
'kwds': {},
'msg': "Typing '(' should display a calltip.\n"
"Typing ') should hide the calltip.\n"
}
_class_browser_spec = {
'file': 'ClassBrowser',
'kwds': {},
'msg': "Inspect names of module, class(with superclass if "
"applicable), methods and functions.\nToggle nested items.\n"
"Double clicking on items prints a traceback for an exception "
"that is ignored."
}
_color_delegator_spec = {
'file': 'ColorDelegator',
'kwds': {},
'msg': "The text is sample Python code.\n"
"Ensure components like comments, keywords, builtins,\n"
"string, definitions, and break are correctly colored.\n"
"The default color scheme is in idlelib/config-highlight.def"
}
ConfigDialog_spec = {
'file': 'configDialog',
'kwds': {'title': 'ConfigDialogTest',
'_htest': True,},
'msg': "IDLE preferences dialog.\n"
"In the 'Fonts/Tabs' tab, changing font face, should update the "
"font face of the text in the area below it.\nIn the "
"'Highlighting' tab, try different color schemes. Clicking "
"items in the sample program should update the choices above it."
"\nIn the 'Keys', 'General' and 'Extensions' tabs, test settings"
"of interest."
"\n[Ok] to close the dialog.[Apply] to apply the settings and "
"and [Cancel] to revert all changes.\nRe-run the test to ensure "
"changes made have persisted."
}
# TODO Improve message
_dyn_option_menu_spec = {
'file': 'dynOptionMenuWidget',
'kwds': {},
'msg': "Select one of the many options in the 'old option set'.\n"
"Click the button to change the option set.\n"
"Select one of the many options in the 'new option set'."
}
# TODO edit wrapper
_editor_window_spec = {
'file': 'EditorWindow',
'kwds': {},
'msg': "Test editor functions of interest.\n"
"Best to close editor first."
}
GetCfgSectionNameDialog_spec = {
'file': 'configSectionNameDialog',
'kwds': {'title':'Get Name',
'message':'Enter something',
'used_names': {'abc'},
'_htest': True},
'msg': "After the text entered with [Ok] is stripped, <nothing>, "
"'abc', or more that 30 chars are errors.\n"
"Close 'Get Name' with a valid entry (printed to Shell), "
"[Cancel], or [X]",
}
GetHelpSourceDialog_spec = {
'file': 'configHelpSourceEdit',
'kwds': {'title': 'Get helpsource',
'_htest': True},
'msg': "Enter menu item name and help file path\n "
"<nothing> and more than 30 chars are invalid menu item names.\n"
"<nothing>, file does not exist are invalid path items.\n"
"Test for incomplete web address for help file path.\n"
"A valid entry will be printed to shell with [0k].\n"
"[Cancel] will print None to shell",
}
# Update once issue21519 is resolved.
GetKeysDialog_spec = {
'file': 'keybindingDialog',
'kwds': {'title': 'Test keybindings',
'action': 'find-again',
'currentKeySequences': [''] ,
'_htest': True,
},
'msg': "Test for different key modifier sequences.\n"
"<nothing> is invalid.\n"
"No modifier key is invalid.\n"
"Shift key with [a-z],[0-9], function key, move key, tab, space"
"is invalid.\nNo validitity checking if advanced key binding "
"entry is used."
}
_grep_dialog_spec = {
'file': 'GrepDialog',
'kwds': {},
'msg': "Click the 'Show GrepDialog' button.\n"
"Test the various 'Find-in-files' functions.\n"
"The #1lab_results should be displayed in a new '*Output*' window.\n"
"'Right-click'->'Goto file/line' anywhere in the search #1lab_results "
"should open that file \nin a new EditorWindow."
}
_io_binding_spec = {
'file': 'IOBinding',
'kwds': {},
'msg': "Test the following bindings.\n"
"<Control-o> to open file from dialog.\n"
"Edit the file.\n"
"<Control-p> to print the file.\n"
"<Control-s> to save the file.\n"
"<Alt-s> to save-as another file.\n"
"<Control-c> to save-copy-as another file.\n"
"Check that changes were saved by opening the file elsewhere."
}
_multi_call_spec = {
'file': 'MultiCall',
'kwds': {},
'msg': "The following actions should trigger a print to console or IDLE"
" Shell.\nEntering and leaving the text area, key entry, "
"<Control-Key>,\n<Alt-Key-a>, <Control-Key-a>, "
"<Alt-Control-Key-a>, \n<Control-Button-1>, <Alt-Button-1> and "
"focusing out of the window\nare sequences to be tested."
}
_multistatus_bar_spec = {
'file': 'MultiStatusBar',
'kwds': {},
'msg': "Ensure presence of multi-status bar below text area.\n"
"Click 'Update Status' to change the multi-status text"
}
_object_browser_spec = {
'file': 'ObjectBrowser',
'kwds': {},
'msg': "Double click on items upto the lowest level.\n"
"Attributes of the objects and related information "
"will be displayed side-by-side at each level."
}
_path_browser_spec = {
'file': 'PathBrowser',
'kwds': {},
'msg': "Test for correct display of all paths in sys.path.\n"
"Toggle nested items upto the lowest level.\n"
"Double clicking on an item prints a traceback\n"
"for an exception that is ignored."
}
_percolator_spec = {
'file': 'Percolator',
'kwds': {},
'msg': "There are two tracers which can be toggled using a checkbox.\n"
"Toggling a tracer 'on' by checking it should print tracer"
"output to the console or to the IDLE shell.\n"
"If both the tracers are 'on', the output from the tracer which "
"was switched 'on' later, should be printed first\n"
"Test for actions like text entry, and removal."
}
_replace_dialog_spec = {
'file': 'ReplaceDialog',
'kwds': {},
'msg': "Click the 'Replace' button.\n"
"Test various replace options in the 'Replace dialog'.\n"
"Click [Close] or [X] to close the 'Replace Dialog'."
}
_search_dialog_spec = {
'file': 'SearchDialog',
'kwds': {},
'msg': "Click the 'Search' button.\n"
"Test various search options in the 'Search dialog'.\n"
"Click [Close] or [X] to close the 'Search Dialog'."
}
_scrolled_list_spec = {
'file': 'ScrolledList',
'kwds': {},
'msg': "You should see a scrollable list of items\n"
"Selecting (clicking) or double clicking an item "
"prints the name to the console or Idle shell.\n"
"Right clicking an item will display a popup."
}
show_idlehelp_spec = {
'file': 'help',
'kwds': {},
'msg': "If the help text displays, this works.\n"
"Text is selectable. Window is scrollable."
}
_stack_viewer_spec = {
'file': 'StackViewer',
'kwds': {},
'msg': "A stacktrace for a NameError exception.\n"
"Expand 'idlelib ...' and '<locals>'.\n"
"Check that exc_value, exc_tb, and exc_type are correct.\n"
}
_tabbed_pages_spec = {
'file': 'tabbedpages',
'kwds': {},
'msg': "Toggle between the two tabs 'foo' and 'bar'\n"
"Add a tab by entering a suitable name for it.\n"
"Remove an existing tab by entering its name.\n"
"Remove all existing tabs.\n"
"<nothing> is an invalid add page and remove page name.\n"
}
TextViewer_spec = {
'file': 'textView',
'kwds': {'title': 'Test textView',
'text':'The quick brown fox jumps over the lazy dog.\n'*35,
'_htest': True},
'msg': "Test for read-only property of text.\n"
"Text is selectable. Window is scrollable.",
}
_tooltip_spec = {
'file': 'ToolTip',
'kwds': {},
'msg': "Place mouse cursor over both the buttons\n"
"A tooltip should appear with some text."
}
_tree_widget_spec = {
'file': 'TreeWidget',
'kwds': {},
'msg': "The canvas is scrollable.\n"
"Click on folders upto to the lowest level."
}
_undo_delegator_spec = {
'file': 'UndoDelegator',
'kwds': {},
'msg': "Click [Undo] to undo any action.\n"
"Click [Redo] to redo any action.\n"
"Click [Dump] to dump the current state "
"by printing to the console or the IDLE shell.\n"
}
_widget_redirector_spec = {
'file': 'WidgetRedirector',
'kwds': {},
'msg': "Every text insert should be printed to the console."
"or the IDLE shell."
}
def run(*tests):
root = tk.Tk()
root.title('IDLE htest')
root.resizable(0, 0)
_initializeTkVariantTests(root)
# a scrollable Label like constant width text widget.
frameLabel = tk.Frame(root, padx=10)
frameLabel.pack()
text = tk.Text(frameLabel, wrap='word')
text.configure(bg=root.cget('bg'), relief='flat', height=4, width=70)
scrollbar = tk.Scrollbar(frameLabel, command=text.yview)
text.config(yscrollcommand=scrollbar.set)
scrollbar.pack(side='right', fill='y', expand=False)
text.pack(side='left', fill='both', expand=True)
test_list = [] # List of tuples of the form (spec, callable widget)
if tests:
for test in tests:
test_spec = globals()[test.__name__ + '_spec']
test_spec['name'] = test.__name__
test_list.append((test_spec, test))
else:
for k, d in globals().items():
if k.endswith('_spec'):
test_name = k[:-5]
test_spec = d
test_spec['name'] = test_name
mod = import_module('idlelib.' + test_spec['file'])
test = getattr(mod, test_name)
test_list.append((test_spec, test))
test_name = [tk.StringVar('')]
callable_object = [None]
test_kwds = [None]
def next():
if len(test_list) == 1:
next_button.pack_forget()
test_spec, callable_object[0] = test_list.pop()
test_kwds[0] = test_spec['kwds']
test_kwds[0]['parent'] = root
test_name[0].set('Test ' + test_spec['name'])
text.configure(state='normal') # enable text editing
text.delete('1.0','end')
text.insert("1.0",test_spec['msg'])
text.configure(state='disabled') # preserve read-only property
def run_test():
widget = callable_object[0](**test_kwds[0])
try:
print(widget.result)
except AttributeError:
pass
button = tk.Button(root, textvariable=test_name[0], command=run_test)
button.pack()
next_button = tk.Button(root, text="Next", command=next)
next_button.pack()
next()
root.mainloop()
if __name__ == '__main__':
run()
| [
"wnn2260@gmail.com"
] | wnn2260@gmail.com |
1712f8a239c3cea9372aa7f0697719b9add81465 | 97cbcd454be80f0b6f986b0a81e84570596a9368 | /tests/basics/Functions32.py | a30fc83b58bdb7db17b6699db653ac8fd8964539 | [
"Apache-2.0",
"LicenseRef-scancode-warranty-disclaimer"
] | permissive | keitheis/Nuitka | aff7bf348e12d772543018e6b464cbfa7eaf2d30 | 1e4f31e12cbd36ce2f6a785c61e0111639c258a9 | refs/heads/master | 2021-01-17T21:59:05.601349 | 2014-04-21T10:28:13 | 2014-04-21T10:28:13 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,183 | py | # Copyright 2014, Kay Hayen, mailto:kay.hayen@gmail.com
#
# Python tests originally created or extracted from other peoples work. The
# parts were too small to be protected.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
def kwonlysimple(*, a):
return a
print( "Most simple case", kwonlysimple( a = 3 ) )
def kwonlysimpledefaulted(*, a = 5):
return a
print( "Default simple case", kwonlysimpledefaulted() )
def default1():
print( "Called", default1 )
return 1
def default2():
print( "Called", default2 )
return 2
def default3():
print( "Called", default3 )
return 3
def default4():
print( "Called", default4 )
return 4
def annotation1():
print ( "Called", annotation1 )
return "a1"
def annotation2():
print ( "Called", annotation2 )
return "a2"
def annotation3():
print ( "Called", annotation3 )
return "a3"
def annotation4():
print ( "Called", annotation4 )
return "a4"
def annotation5():
print ( "Called", annotation5 )
return "a5"
def annotation6():
print ( "Called", annotation6 )
return "a6"
def annotation7():
print ( "Called", annotation7 )
return "a7"
def annotation8():
print ( "Called", annotation8 )
return "a8"
def annotation9():
print ( "Called", annotation9 )
return "a9"
def kwonlyfunc(x: annotation1(), y: annotation2() = default1(), z: annotation3() = default2(), *, a: annotation4(), b: annotation5() = default3(), c: annotation6() = default4(), d: annotation7(), **kw: annotation8()) -> annotation9():
print( x, y, z, a, b, c, d )
print( kwonlyfunc.__kwdefaults__ )
print( "Keyword only function" )
kwonlyfunc( 7, a = 8, d = 12 )
print( "Annotations come out as", sorted( kwonlyfunc.__annotations__ ) )
kwonlyfunc.__annotations__ = {}
print( "After updating to None it is", kwonlyfunc.__annotations__ )
kwonlyfunc.__annotations__ = { "k" : 9 }
print( "After updating to None it is", kwonlyfunc.__annotations__ )
def kwonlystarfunc(*, a, b, **d):
return a, b, d
print( "kwonlystarfunc", kwonlystarfunc( a = 8, b = 12, k = 9, j = 7 ) )
def deeplyNestedNonLocalWrite():
x = 0
y = 0
def f():
def g():
nonlocal x
x = 3
return x
return g()
return f(), x
print( "Deeply nested non local writing function", deeplyNestedNonLocalWrite() )
def deletingClosureVariables():
try:
x = 1
def g():
nonlocal x
del x
g()
g()
except Exception as e:
return e
print( "Using deleted non-local vaiables", deletingClosureVariables() )
| [
"kay.hayen@gmail.com"
] | kay.hayen@gmail.com |
f6eb37893e2c8398620ed9ef42a8aeb7319b6a93 | 108fc2873b5c07e4ad9515adc16bc8e9fdf7d021 | /smorest_sfs/utils/imports.py | 5e66ce484a3a2a64d10150b3e01da6c0b90d5117 | [
"Apache-2.0"
] | permissive | ssfdust/yt-media | 4ac5eba6a25830268f42b951e8307bb57e7baeeb | 36c3d1977df5851d8df54846f0bc84be2b86e962 | refs/heads/master | 2021-08-08T09:40:31.241228 | 2020-05-11T03:11:20 | 2020-05-11T03:11:20 | 175,938,603 | 2 | 1 | null | null | null | null | UTF-8 | Python | false | false | 1,631 | py | # Copyright 2019 RedLotus <ssfdust@gmail.com>
# Author: RedLotus <ssfdust@gmail.com>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pkgutil
def import_submodules(context: dict, root_module: str, path: str) -> None:
"""
加载文件夹下的所有子模块
https://github.com/getsentry/zeus/blob/97528038a0abfd6f0e300d8d3f276e1b0818c328/zeus/utils/imports.py#L23
>>> import_submodules(locals(), __name__, __path__)
"""
modules = {}
for _, module_name, _ in pkgutil.walk_packages(path, root_module + "."):
# this causes a Runtime error with model conflicts
# module = loader.find_module(module_name).load_module(module_name)
module = __import__(module_name, globals(), locals(), ["__name__"])
keys = getattr(module, "__all__", None)
if keys is None:
keys = [k for k in vars(module).keys() if not k.startswith("_")]
for k in keys:
context[k] = getattr(module, k, None)
modules[module_name] = module
# maintain existing module namespace import with priority
for k, v in modules.items():
context[k] = v
| [
"ssfdust@gmail.com"
] | ssfdust@gmail.com |
169a505b73cec2a76c9acc2e96ffc6cdcd3aaeaa | bc0fc1d06ee2822b696494d3a73eeb6c1af0360a | /androidfs/acestream.engine/data/plugins/viasat_embed.py | 5f69a54cd37d0dae965f2594274dc2cc77b7545d | [] | no_license | elipatov/acestream.engine | cf41472b6435400022f19eb4b48c2a17a3afacbd | 65849f78a0dc7749d0478c387e69bb14865bdaf3 | refs/heads/main | 2023-02-24T22:28:00.045198 | 2021-01-22T18:04:12 | 2021-01-22T18:04:12 | 329,993,224 | 4 | 1 | null | null | null | null | UTF-8 | Python | false | false | 980 | py | #-plugin-sig:Tgg2N/mlOCMuMUWR77aIuGUWncB0O1Mc6rLUOmnvO3hbpruyNpgRfDiH5IScd0JNZvzRHw3chwFWMgPzQskdvfDq8u01ZyGbSY5+Z5jK/bO6xZGV4kQumyH4jv59aQiqEjtHk8u7n7878oi1qpqMY1OEDTn6gK7fNE//2XroR9PfGcNTwhpvfoh6pEB2Yzww5I+8wh35cqtcS/oeIB98bXt3X2XOUb88OF8Oepd63G1OM3Lixc/MdVI37N+Kg8BoyBenl3PSpZwB9w7QJV7rRYWsBpnPmeXjLdrHWjzSDfyCK9U5KW39LhjynZltpD/wBV98tALzALrGY1d5VZAawg==
import re
from ACEStream.PluginsContainer.livestreamer.plugin import Plugin
from ACEStream.PluginsContainer.livestreamer.plugin.api import http
_url_re = re.compile("http(s)?://(www\.)?tv(3|6|8|10)\.se")
_embed_re = re.compile('<iframe class="iframe-player" src="([^"]+)">')
class ViasatEmbed(Plugin):
@classmethod
def can_handle_url(self, url):
return _url_re.match(url)
def _get_streams(self):
res = http.get(self.url)
match = _embed_re.search(res.text)
if match:
url = match.group(1)
return self.session.streams(url)
__plugin__ = ViasatEmbed
| [
"evgeni.lipatov@idt.net"
] | evgeni.lipatov@idt.net |
3cc3d36f75e036fa7a5e0def03e659cc73015e62 | 51363872687318ac54e815b51d16d44d214974a2 | /build/turtlebot_msgs/catkin_generated/pkg.develspace.context.pc.py | 278898f50424d4529ca17bcfc9c3b65ea13b5ba3 | [] | no_license | pirmou/catkin_ws | 2acee80a43f17841326d1b917931866d561648c3 | abaac27209016a944bd3520d84e4dc3aab1abf2e | refs/heads/main | 2023-02-17T04:44:03.927127 | 2021-01-10T17:40:02 | 2021-01-10T17:40:02 | 328,440,401 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 511 | py | # generated from catkin/cmake/template/pkg.context.pc.in
CATKIN_PACKAGE_PREFIX = ""
PROJECT_PKG_CONFIG_INCLUDE_DIRS = "/home/pierremoussa/catkin_ws/devel/include".split(';') if "/home/pierremoussa/catkin_ws/devel/include" != "" else []
PROJECT_CATKIN_DEPENDS = "std_msgs;sensor_msgs;std_srvs;message_runtime".replace(';', ' ')
PKG_CONFIG_LIBRARIES_WITH_PREFIX = "".split(';') if "" != "" else []
PROJECT_NAME = "turtlebot_msgs"
PROJECT_SPACE_DIR = "/home/pierremoussa/catkin_ws/devel"
PROJECT_VERSION = "2.2.1"
| [
"pierre.moussa18@me.com"
] | pierre.moussa18@me.com |
96c92caa5ed1befcc1f92e68dcd7f0678b33848b | 28e997a25e62b9c76fd4d3fd1e87436dc54b3178 | /2_bioinformatics_stronghold/rosalind_RNAS.py | b9ef53081808f87d60e58a271a0f329028991977 | [] | no_license | kangli-bionic/Rosalind | 0bba97d86db51b91af480766155816ec9e1f05e6 | 6cae380f31498a991381c7c6f1d479d302615571 | refs/heads/master | 2021-01-02T09:13:49.665498 | 2016-08-16T20:54:48 | 2016-08-16T20:54:48 | 99,171,765 | 1 | 0 | null | 2017-08-03T00:07:24 | 2017-08-03T00:07:24 | null | UTF-8 | Python | false | false | 1,289 | py | #!/usr/bin/python
'''
Rosalind: Bioinformatics Stronghold
Problem: Wobble Bonding and RNA Secondary Structures
URL: http://rosalind.info/problems/rnas/
Given: An RNA string s (of length at most 200 bp).
Return: The total number of distinct valid matchings of basepair edges in the
bonding graph of s. Assume that wobble base pairing is allowed.
'''
def pair(seq):
# Only one possible match for a seq of length one.
if len(seq) < 4:
return 1
# No need to recalculate a sequence if we've already done so.
if seq in prev:
return prev[seq]
# Otherwise, do the calculation and add it to the dictionary.
else:
prev[seq] = pair(seq[1:])
for i in range(4, len(seq)):
if seq[i] in match[seq[0]]:
prev[seq] += pair(seq[1:i]) * pair(seq[i+1:])
return prev[seq]
if __name__ == '__main__':
# Read sequence.
with open('problem_datasets/rosalind_rnas.txt', 'r') as infile:
seq = infile.read().replace('\n', '')
# The possible basepair matchings including wobble base pairing.
match = {'A':'U', 'U':'AG', 'C':'G', 'G':'CU'}
# Keep track of the number of the valid pairs.
prev = {}
# Print answer.
print(pair(seq))
| [
"kapoozy@gmail.com"
] | kapoozy@gmail.com |
0f3e81225d78acd167476a5d00413cdf17c6da29 | 3655215852ee2fb1864dbfa1ce924290a2c4f4b9 | /Assignment 8/str.py | 86812ddfbfc152dc6de737a38e116eb6ebc80f38 | [] | no_license | shubhamkanade/Niyander-Python | 8b318df2b8ae33b943dcf83eb01c00577914ca59 | 2b6a4780707e26852aa950d7c3e54be1f3b2080b | refs/heads/main | 2023-06-16T03:44:07.833619 | 2021-07-06T03:09:16 | 2021-07-06T03:09:16 | 383,021,140 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 64 | py | list1 = "saurabh"
for i in range(len(list1)):
print(list1[i])
| [
"shubhamkanade98@gmail.com"
] | shubhamkanade98@gmail.com |
49630b6c37551aa5f2437053e72260c7fd273926 | 3816cc7dd655e7ede0c4ad9e9ccd7e541bdc88c2 | /chapter_exercise.py | b62fa23aee0e3d380811259de0ccc1d2daa27cc7 | [] | no_license | GBelzoni/ThinkBayes | 5b32c8fac528f48ceebcd1d7394591b25582633b | a06ee2efd3687d6d129792bf5f1f7e5ac99e22f4 | refs/heads/master | 2021-01-23T21:39:12.740134 | 2014-06-08T11:38:22 | 2014-06-08T11:38:22 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 12,808 | py | # -*- coding: utf-8 -*-
import numpy
import matplotlib
from matplotlib import pylab, mlab, pyplot
np = numpy
plt = pyplot
from IPython.display import display
from IPython.core.pylabtools import figsize, getfigs
from pylab import *
from numpy import *
from os import chdir
chdir('C:\Users\PHCostello\Documents\UbuntuHome\Projects\ThinkBayes')
from thinkbayes import *
#Test out plotting
x=linspace(0,2*pi,num=500)
y = sin(x)
plot(x,y)
###########################################################################
#COOKIES!!!
###########################################################################
#Suppose there are two bowls of cookies. Bowl 1 contains 30 vanilla cookies
#and 10 chocolate cookies. Bowl 2 contains 20 of each.
#Now suppose you choose one of the bowls at random and,
#without looking, select a cookie at random.
#The cookie is vanilla.
#
#What is the probability that it came from Bowl 1?
###########################################################################
#M&M
###########################################################################
#In 1995, they introduced blue M&M’s.
#Before then, the color mix in a bag of plain M&M’s
#was 30% Brown, 20% Yellow, 20% Red, 10% Green, 10% Orange, 10% Tan.
#
#Afterward it was 24% Blue , 20% Green, 16% Orange, 14% Yellow, 13% Red, 13% Brown.
#
#Suppose a friend of mine has two bags of M&M’s,
#and he tells me that one is from 1994 and one from 1996.
#He won’t tell me which is which, but he gives me one M&M from each bag.
#One is yellow and one is green.
#
#What is the probability that the yellow one came from the 1994 bag?
# Hyp A = Bag1 from 1994 and Bag2 from 1996
# Hyp B = Bag2 from 1994 and Bag1 from 1996
#Pr( A | given y and g from different bags) = Pr(y|first)P(g|second)P(A)/sum(...)
# = 0.2*0.2*0.5/( 0.2*0.2*0.5 + 0.14*0.1*0.5) = 2/(2+0.7) = 2/2.7
###########################################################################
###########################################################################
#Chapter 2
###########################################################################
###########################################################################
#Basic probability mass function
pmf = Pmf()
for x in [1,2,3,4,5,6]:
pmf.Set(x,1/6.)
pmf.Prob(2)
#Setting arbitrary value then normalising.
pmf = Pmf()
for x in [1,2,3,4,5,6]:
pmf.Set(x,1.)
print pmf.Prob(2)
pmf.Normalize()
print pmf.Prob(2)
pmf = Pmf()
for x in ['a','a','a','a','b','b','c']:
pmf.Incr(x,1.)#Use Incr to add to freq in dist
pmf.Normalize()
pmf.Prob('a')
##Cookie problem
pmf = Pmf()
#Set priors
pmf.Set('Bowl 1' , 0.5)
pmf.Set('Bowl 2' , 0.5)
#Update with Likelyhood
pmf.Mult('Bowl 1', 0.75)
pmf.Mult('Bowl 2', 0.5)
#Normalise
pmf.Normalize()
print pmf.Prob('Bowl 1')
#Generalise
class Cookie(Pmf):
def __init__(self, hypos):
Pmf.__init__(self)
for hypo in hypos:
self.Set(hypo,1)
self.Normalize()
def Update(self, data):
for hypo in self.Values():
like = self.Likelihood(data, hypo)
self.Mult(hypo, like)
self.Normalize()
mixes = {
'Bowl 1':dict(vanilla=0.75, chocolate=0.25),
'Bowl 2':dict(vanilla=0.5, chocolate=0.5),
}
def Likelihood(self, data, hypo):
mix = self.mixes[hypo]
like = mix[data]
return like
hypos = ['Bowl 1', 'Bowl 2']
pmf = Cookie(hypos)
pmf.Update('vanilla')
for hypo, prob in pmf.Items():
print hypo, prob
#####M&M problems
mix94 = dict(brown=30,
yellow=20,
red=20,
green=10,
orange=10,
tan=10)
mix96 = dict(blue=24,
green=20,
orange=16,
yellow=14,
red=13,
brown=13)
hypoA = {'bag1' : mix94, 'bag2':mix96}
hypoB = {'bag1' : mix96, 'bag2':mix94}
hypothesis = {'A' : hypoA, 'B': hypoB}
class MandM(Suite):
def Likelihood(self,data,hypo):
bag, color = data
like = hypothesis[hypo][bag][color]
return like
suite = MandM('AB')
suite.Print()
suite.Update(('bag1', 'yellow'))
suite.Print()
suite.Update(('bag2', 'green'))
suite.Print()
##Exercises Chapter 2
#Generalise
class Cookie(Pmf):
def __init__(self, hypos,
Bowl1 = {vanilla:30, chocolate:10},
Bowl2 = {vanilla:20, chocolate:20}):
Pmf.__init__(self)
for hypo in hypos:
self.Set(hypo,1)
self.Normalize()
self.Bowl1 = Bowl1
self.Bowl2 = Bowl2
def Update(self, data):
for hypo in self.Values():
like = self.Likelihood(data, hypo)
self.Mult(hypo, like)
self.Normalize()
mixes = {
'Bowl 1':self.Bowl1,
'Bowl 2':self.Bowl2,
}
def Likelihood(self, data, hypo):
mix = self.mixes[hypo]
like = mix[data]
return like
#Exercise Chapter 2 kinda hard
#hypos = ['Bowl 1', 'Bowl 2']
#pmf = Cookie(hypos)
#
#pmf.Update('vanilla')
#
#for hypo, prob in pmf.Items():
# print hypo, prob
###########################################################################
###########################################################################
#Chapter 3
###########################################################################
###########################################################################
class Dice(Suite):
def Likelihood(self,data,hypo):
if hypo<data:
return 0
else:
return 1.0/hypo
suite = Dice([4,6,8,12,20])
suite.Update(6)
suite.Print()
for roll in [6,8,7,7,5,4]:
suite.Update(roll)
suite.Print()
## A company has a has N trains and you see train numbered 60. What is the most
#probable number of trains?
class Train(Suite):
def __init__(self, hypos, alpha=1.0):
Pmf.__init__(self)
for hypo in hypos:
self.Set(hypo, hypo**(-alpha))
self.Normalize()
def Likelihood(self, data, hypo):
if hypo < data:
return 0
else:
return 1./hypo
hypos = range(1,1001)
suite = Train(hypos)
suite.Update(60)
suite.Mean()
xy = array([it for it in suite.Items()])
plot(xy[:,0],xy[:,1])
xy[0:5,:]
show()
#what about prior?
for data in [60,30,90]:
suite.Update(data)
suite.Mean()
interval = Percentile(suite, 5.), Percentile(suite,95)
print interval
cdf = suite.MakeCdf()
interval = cdf.Percentile(5.), cdf.Percentile(95)
interval
###############################################################################
#Chapter 3 exercises
###############################################################################
#Exercise 1 To write a likelihood function for the locomotive problem, we had to answer this question: “If the railroad has N locomotives, what is the probability that we see number 60?”
#The answer depends on what sampling process we use when we observe the locomotive. In this chapter, I resolved the ambiguity by specifying that there is only one train-operating company (or only one that we care about).
#But suppose instead that there are many companies with different numbers of trains. And suppose that you are equally likely to see any train operated by any company. In that case, the likelihood function is different because you are more likely to see a train operated by a large company.
#
#As an exercise, implement the likelihood function for this variation of the locomotive problem, and compare the results.
#Subset the total number of train into k groups
#You P(60|N, group_i size = sz_i, sum sz_i=N)
###########################################################################
###########################################################################
#Chapter 4
###########################################################################
###########################################################################
#A statistical statement appeared in “The Guardian" on Friday January 4, 2002:
#When spun on edge 250 times, a Belgian one-euro coin came up heads 140 times and tails 110.
#‘It looks very suspicious to me,’ said Barry Blight, a statistics lecturer at the London School of Economics.
#‘If the coin were unbiased, the chance of getting a result as extreme as that would be less than 7%.’
#But do these data give evidence that the coin is biased rather than fair?
hypos = range(0,101)
class Coin( Suite ):
def Likelihood(self, data, hypo):
if data=="H":
return hypo/100.
else:
return 1 - hypo/100.
suite = Coin(hypos)
dataset = 'H' * 140 + 'T' * 110
for data in dataset:
suite.Update(data)
suite.Print()
x = suite.Values()
y =suite.d.values()
plot(x,y)
show()
suite.MaximumLikelihood()
print 'Mean', suite.Mean()
print 'Median', Percentile(suite, 50)
print 'CI', thinkbayes.CredibleInterval(suite, 90)
##### Beta distribution ###############
#Definition in thinkbayes.pdf
#TODO check conjugate prioness of beta dist
beta = Beta()
beta.Update( (140,110))
print beta.Mean()
x = linspace(0,1,num=100)
y = [ beta.EvalPdf(val) for val in x]
plot(x,y)
show()
#To make into pdf would need to scale as vals around 1e-75
#and making PmfDict can't handle little numbers due to floating point rounding
yscaled = [yv*1e80 for yv in y]
yscaled = array(yscaled)
yscaled = yscaled/sum(yscaled)
betaPmf = MakePmfFromDict(dict(zip(x, yscaled)))
x = linspace(0,1,num=100)
y = [ betaPmf.Prob(val) for val in x]
plot(x,y)
show()
###Exercises
#Exercise 1
#Suppose that instead of observing coin tosses directly,
#you measure the outcome using an instrument that is not always correct.
#Specifically, suppose there is a probability y that an actual heads is reported as tails,
#or actual tails reported as heads.
#Let a be the probability of misreporting the value
#Pr(p=x|y=H) = Pr(H|p=x)Pr(p=x)
#Pr(H|p=x) = x*a + (1-x)*(1-a) = prob observing getting head and reporting correctl + pr T and reporting incorrrectly
#Pr(T|p=x) = (1-x)*a + x*(1-a)
class CoinLatent( Suite ):
def __init__(self, hypos, a=1.0):
Suite.__init__(self, hypos)
self.a = a
def set_a(a):
self.a = a
def Likelihood(self, data, hypo):
prH = hypo/100.
prT = 1 - hypo/100.
a = self.a
if data=="H":
return prH*a + prT*(1-a)
else:
return prT*a + prH*(1-a)
hypos = range(1,101)
suite = CoinLatent(hypos,a=1-0.56)
dataset = 'H' * 140 + 'T' * 110
for data in dataset:
suite.Update(data)
#suite.Print()
x = suite.Values()
y =suite.d.values()
plot(x,y)
show()
aas = [1.0,0.9,0.7,0.6,0.58,0.56,0.53,0.5,0.3,0.1,0.0]
def genSuite(a):
hypos = range(1,101)
suite = CoinLatent(hypos,a)
dataset = 'H' * 140 + 'T' * 110
for data in dataset:
suite.Update(data)
return suite
suite.MakeCdf().Percentile(50)
#Conclusion: we get 140H, 110T so approx 0.56 heads. If prob of reporting wrong is 0.56
# then we the we would think that most likely hyp near 100% heads, as then it would only be
# wrong reporting that is giving us the error. The distibution is increasing for a=0.56 with max
# max likelyhood of 0.3 at prior = 1.0
# When reporting error is a=0.5, then 50/50 error means we have no info about the underlying dist
# and all priors are equally likely, so flat posterior
# When a<0.5 then error rate flips role of heas and tails and we get the same behaviour inverted, ie
# for a = 1- 0.56 then we have decreasing distribution with max at prior =0.0.
# So in end we have posterior P(p=x| data , a=alpha). I guess we could also put a prior on alpha and integrate it
# out as well
#CoinLatentIntAlpha
####Integrating out noise parameter in coin tossing problem - WRITEUP!!!
#hypo x's, alpha's
#CoinLatent, posteria given alpha, so Pr(x|data,alpha) = LikelyHood given alpha
#likely hood P(x|data, alpha)Pr(alpha)
###
#So generate pmf
errors = linspace(0.5,1,num=50)
suites = [ genSuite(er) for er in errors]
x=56
sum([st.Prob(x) for st in suites])
pmf_coin = Pmf()
pmf_alpham = [sum([st.Prob(x) for st in suites]) for x in hypos]
hypos
for pr in zip(hypos,pmf_alpham):
pmf_coin.Set(pr[0],pr[1])
pmf_coin.Normalize()
x = pmf_coin.Values()
y =pmf_coin.d.values()
plot(x,y)
show()
suite = CoinLatentAlphInt(hypos)
dataset = 'H' * 140 + 'T' * 110
for data in dataset:
suite.Update(data)
| [
"patrickhcostello@gmail.com"
] | patrickhcostello@gmail.com |
8329046888d401d5fffd22ffe9b07d4646213ac0 | 0567517ff7c0366b58e52d7fa96b651e97af5d82 | /apps/smartpipe/migrations/0014_project_geometry.py | 7afb86374c9d472435fcd7586edc5c61f5c73554 | [] | no_license | m6ttl/smartpipes | fdb9976b11d6c520953c240872d2574b1a69ec55 | 2d5846143dbf7b44c36491dd1787c36ebbe4fe0d | refs/heads/master | 2022-12-09T10:46:38.594820 | 2020-03-09T13:01:07 | 2020-03-09T13:01:07 | 246,028,233 | 0 | 0 | null | 2022-12-08T03:46:02 | 2020-03-09T12:20:32 | HTML | UTF-8 | Python | false | false | 490 | py | # Generated by Django 2.0.1 on 2020-02-16 15:47
import DjangoUeditor.models
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('smartpipe', '0013_pipedetail_tem'),
]
operations = [
migrations.AddField(
model_name='project',
name='geometry',
field=DjangoUeditor.models.UEditorField(blank=True, default='', null=True, verbose_name='地理数据'),
),
]
| [
"steve_wei@163.net"
] | steve_wei@163.net |
69e0a610209790bf516dcdab0f40d9cfdbf81ce1 | de915073d42855cafad97adca348a46b2de92626 | /test_settings.py | 78b2b91a07ed1d32b6f0899527bb6cb81f060ef5 | [
"MIT"
] | permissive | koonalb/django-knockout-modeler | f4b1418594701d270e1a5e1a45a5dcdec9a88f73 | 29e5e939acba8fb3bd7fcad7726eb1115bd1e420 | refs/heads/master | 2021-01-17T21:43:28.496690 | 2015-11-10T23:45:15 | 2015-11-10T23:45:15 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 191 | py | DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': ':memory:',
}
}
SECRET_KEY = "secret_key_for_testing"
INSTALLED_APPS = ['knockout_modeler']
| [
"rich@anomos.info"
] | rich@anomos.info |
80a3e5a20b477376b8ff06266e6480a8e92aef97 | 2f98aa7e5bfc2fc5ef25e4d5cfa1d7802e3a7fae | /python/python_14807.py | 1c4944b19c8681435a7d2fcc608a1ce5faee96ac | [] | no_license | AK-1121/code_extraction | cc812b6832b112e3ffcc2bb7eb4237fd85c88c01 | 5297a4a3aab3bb37efa24a89636935da04a1f8b6 | refs/heads/master | 2020-05-23T08:04:11.789141 | 2015-10-22T19:19:40 | 2015-10-22T19:19:40 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 92 | py | # How can I make Python with Colorama print coloured text after compiling it to an .exe?
-w
| [
"ubuntu@ip-172-31-7-228.us-west-2.compute.internal"
] | ubuntu@ip-172-31-7-228.us-west-2.compute.internal |
4b3776fb2af490e0f28b4baca4d019143be9046c | e3b5e20bcb560a3c37c09f728b9340b1715c1818 | /venv/lib/python3.7/site-packages/plotly/validators/splom/dimension/axis/__init__.py | a6db85fff6ce1e67482945d3cdb1d6860e3f789b | [
"MIT"
] | permissive | 180Studios/LoginApp | 63bc50b1f91e7221c7581627ab166eeb01758f5c | 66ff684a81b23d8f45eef2c56be19a2afd95ab29 | refs/heads/master | 2022-12-24T00:33:08.481826 | 2020-02-03T05:14:41 | 2020-02-03T05:14:41 | 144,414,562 | 0 | 1 | MIT | 2022-12-08T01:38:26 | 2018-08-11T19:57:44 | Python | UTF-8 | Python | false | false | 72 | py | from ._type import TypeValidator
from ._matches import MatchesValidator
| [
"kylenahas@gmail.com"
] | kylenahas@gmail.com |
55ccd49a7f8ec347c90757fd4cb841d531674772 | 05fda9758bb71133b85ce6a58ce9dbc8fdc18fc7 | /resources/eth/history.py | 9d047cedb23e4c491af577a0ccc572b8f5791404 | [
"MIT"
] | permissive | yanrising/bitez | 82e3572d689989e37f5d8d3ab06bd764b036e64f | c0d9b052cbc8eb1c9884c287e34705b0a2f73bb1 | refs/heads/master | 2023-01-04T14:12:18.651916 | 2019-10-17T14:31:25 | 2019-10-17T14:31:25 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 527 | py | # tx count
import requests
from config import INFURA_API_KEY, CRYPTO_NETWORK
# block param: loop 'latest', 'pending'
def eth_tx_count(address, block):
if CRYPTO_NETWORK == 'mainnet':
net = 'https://mainnet.infura.io/v3/'+INFURA_API_KEY
elif CRYPTO_NETWORK == 'testnet':
net = 'https://ropsten.infura.io/v3/'+INFURA_API_KEY
hist = requests.post(net, json={"jsonrpc":"2.0","method":"eth_getTransactionCount","params": [address, block],"id":1})
txs = hist.json()
return int(txs['result'], 0)
| [
"merwanedr@gmail.com"
] | merwanedr@gmail.com |
dbfbc4a50c99a3dd737fc5753fcef35b7ebc2477 | 723ea3f47a45fe756c4a77809eb2a4d6b98bc733 | /crackfun/1. Two Sum.py | cadb1db2bf7fd90a71675399516afeef26c591c5 | [] | no_license | JoyiS/Leetcode | a625e7191bcb80d246328121669a37ac81e30343 | 5510ef424135783f6dc40d3f5e85c4c42677c211 | refs/heads/master | 2021-10-21T05:41:00.706086 | 2019-03-03T06:29:14 | 2019-03-03T06:29:14 | 110,296,869 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 442 | py | '''
1/22/2018 Understand the hash function usage here
'''
class Solution:
# @return a tuple, (index1, index2)
# 8:42
def twoSum(self, nums, target):
"""
:type nums: List[int]
:type target: int
:rtype: List[int]
"""
d={}
for i, n in enumerate(nums):
if n in d:
return (d[n], i)
else:
d[target-n]=i
return (0,0) | [
"california.sjy@gmail.com"
] | california.sjy@gmail.com |
3c735398dd09e42e3d9e33e053474404254a5988 | 6f06a519bc5a89464d95702fa27fa663ad4eb8f8 | /stateChangeTest_Full.py | bf9ac9366ed460aba7182baba230030da5b92572 | [] | no_license | chrismaurer/chrism | 64e71397bfffcd76aa5a12cc1394fad2c8939d76 | b0a30d5cbe4602a32ad494e2550219d633edf2db | refs/heads/master | 2021-06-04T07:53:15.212812 | 2021-01-19T13:23:22 | 2021-01-19T13:23:22 | 112,128,681 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,032 | py | import time
import pyscreenshot
from pyrate.builder import Builder
from pyrate.ttapi.predicates import CallbackType
from pyrate.ttapi.manager import Manager
from pyrate.ttapi.order import TTAPIOrder
from ttapi import aenums, cppclient
from pyrate.exceptions import TimeoutError
from captain.controlled import controlled_name_type, ControlledName
from captain.lib.controlled_types import Tif
priceSession = Manager().getPriceSession()
orderSession = Manager().getOrderFillSession()
allCustDefaults = Manager().getCustomers()
ordSrv = Manager().getOrderServer()
priceSrv = Manager().getPriceServer()
products = priceSession.getProducts(prodName='HSI', prodType=aenums.TT_PROD_FUTURE)
product = products[0]
contracts = priceSession.getContracts(product)
contract = contracts[3]
custDefaults = allCustDefaults[0]
run_now = True
prev_trading_status = None
curr_trading_status = None
pricey = None
while run_now is True:
try:
if not priceSession.feed_down:
for enum, price in priceSession.getPrices(contract).items():
if "SETTL" in str(enum):
pricey = price.value
elif "LAST_TRD_PRC" in str(enum):
pricey = price.value
elif "SRS_STATUS" in str(enum):
curr_trading_status = price.value
if curr_trading_status == prev_trading_status:
pass
else:
orderSession.deleteMyOrders()
if "FUTURE" not in str(product.prod_type) and pricey is None:
pricey = 10
if pricey is None:
pricey = 30000
else:
pricey = pricey
order_qty = 100
for side in [aenums.TT_BUY, aenums.TT_SELL]:
orderParams = dict(order_qty=order_qty, buy_sell=side, order_action=aenums.TT_ORDER_ACTION_ADD, limit_prc=pricey, order_type=aenums.TT_LIMIT_ORDER, tif="GTD", srs=contract, exchange_clearing_account=custDefaults.exchange_clearing_account, free_text=custDefaults.free_text, acct_type=cppclient.AEnum_Account.TT_ACCT_AGENT_1)
newOrder = TTAPIOrder()
newOrder.setFields(**orderParams)
myOrder = orderSession.sendAndWait(newOrder)
if "BUY" in str(side):
newOrder2 = TTAPIOrder()
newOrder2.setFields(**orderParams)
newOrder2.buy_sell = aenums.TT_SELL
newOrder2.order_qty = 1
orderSession.sendAndWait(newOrder2)
time.sleep(3)
pyscreenshot.grab_to_file(r"C:\tt\screenshot_" + str(curr_trading_status) + "_" + "-".join([str(time.localtime()[3]), str(time.localtime()[4]), str(time.localtime()[5])]) + ".png")
prev_trading_status = curr_trading_status
time.sleep(15)
except TimeoutError:
pass
| [
"chris.maurer@tradingtechnologies.com"
] | chris.maurer@tradingtechnologies.com |
7902f1f901a4002347a8a5287c8699239944a7d0 | 0b6966a75a4c62393a38a73df5a779228639c42c | /active_selection/softmax_entropy.py | 024feddbae9822212fb02b22c24cb73a610c2eaf | [] | no_license | Shuai-Xie/DEAL | 7cbec778bcc83b633a1c3319d9b00c8b0f98aa00 | 06ff3ba29196e276376a9cf8d868d54fd2db2680 | refs/heads/master | 2023-08-29T20:03:44.867280 | 2021-11-12T07:50:28 | 2021-11-12T07:50:28 | 300,126,893 | 25 | 2 | null | null | null | null | UTF-8 | Python | false | false | 2,612 | py | import torch
from datasets.base_dataset import BaseDataset
from datasets.transforms import get_transform
from tqdm import tqdm
import numpy as np
from torch.utils.data import DataLoader
from utils.misc import *
class SoftmaxEntropySelector:
def __init__(self, dataset, img_size):
self.dataset = dataset
self.img_size = img_size
self.softmax = torch.nn.Softmax2d()
@torch.no_grad()
def select_next_batch(self, model, active_trainset, select_num):
model.eval()
# get a subset from the whole unlabelset
subset_img_paths, subset_target_paths, remset_img_paths, remset_target_paths = get_subset_paths(
active_trainset.unlabel_img_paths, active_trainset.unlabel_target_paths,
)
print('subset_img_paths', len(subset_img_paths))
print('remset_img_paths', len(remset_img_paths))
unlabelset = BaseDataset(subset_img_paths, subset_target_paths)
unlabelset.transform = get_transform('test', base_size=self.img_size)
dataloader = DataLoader(unlabelset,
batch_size=8, shuffle=False,
pin_memory=True, num_workers=4)
scores = []
tbar = tqdm(dataloader, desc='\r')
tbar.set_description(f'cal_entropy_score')
for sample in tbar:
img = sample['img'].cuda()
probs = self.softmax(model(img)) # B,C,H,W
probs = probs.detach().cpu().numpy()
scores += self.cal_entropy_score(probs)
select_idxs = get_topk_idxs(scores, select_num)
# 从 subset 中选出样本
select_img_paths, select_target_paths, remain_img_paths, remain_target_paths = get_select_remain_paths(
subset_img_paths, subset_target_paths, select_idxs
)
# remset 补充回去
remain_img_paths += remset_img_paths
remain_target_paths += remset_target_paths
print('select_img_paths', len(select_img_paths))
print('remain_img_paths', len(remain_img_paths))
# 更新 DL, DU
active_trainset.add_by_select_remain_paths(select_img_paths, select_target_paths,
remain_img_paths, remain_target_paths)
@staticmethod
def cal_entropy_score(probs): # C,H,W 熵越大,越难分
batch_scores = []
for i in range(len(probs)): # prob img
entropy = np.mean(-np.nansum(np.multiply(probs[i], np.log(probs[i] + 1e-12)), axis=0)) # 表示沿着第1维计算
batch_scores.append(entropy)
return batch_scores
| [
"shuaixie@zju.edu.cn"
] | shuaixie@zju.edu.cn |
64f6d5d175541c818f8df3fab0c13c4a310d2b8c | b521802cca8e4ee4ff5a5ffe59175a34f2f6d763 | /maya/maya-utils/Scripts/Animation/2019-2-15 Tim Cam_Route_Manager/.history/Cam_Con/Cam_Con/Cam_Con_20190119103655.py | 82dc55f54efa3014614d149f07da48a4e3fa3c34 | [] | no_license | all-in-one-of/I-Do-library | 2edf68b29558728ce53fe17168694ad0353a076e | 8972ebdcf1430ccc207028d8482210092acf02ce | refs/heads/master | 2021-01-04T06:58:57.871216 | 2019-12-16T04:52:20 | 2019-12-16T04:52:20 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,917 | py | # -*- coding:utf-8 -*-
# Require Header
import os
import json
from functools import partial
# Sys Header
import sys
import traceback
import subprocess
# Maya Header
import maya.cmds as cmds
import maya.mel as mel
import maya.OpenMayaUI as omui
import plugin.Qt as Qt
from Qt.QtCore import *
from Qt.QtGui import *
from Qt.QtWidgets import *
def loadUiType(uiFile):
import plugin.Qt as Qt
if Qt.__binding__.startswith('PyQt'):
from Qt import _uic as uic
return uic.loadUiType(uiFile)
elif Qt.__binding__ == 'PySide':
import pysideuic as uic
else:
import pyside2uic as uic
import xml.etree.ElementTree as xml
from cStringIO import StringIO
parsed = xml.parse(uiFile)
widget_class = parsed.find('widget').get('class')
form_class = parsed.find('class').text
with open(uiFile, 'r') as f:
o = StringIO()
frame = {}
uic.compileUi(f, o, indent=0)
pyc = compile(o.getvalue(), '<string>', 'exec')
exec pyc in frame
# Fetch the base_class and form class based on their type
# in the xml from designer
form_class = frame['Ui_%s'%form_class]
base_class = eval('%s'%widget_class)
return form_class, base_class
from Qt.QtCompat import wrapInstance
DIR = os.path.dirname(__file__)
UI_PATH = os.path.join(DIR,"ui","Cam_Con.ui")
GUI_STATE_PATH = os.path.join(DIR, "json" ,'GUI_STATE.json')
form_class , base_class = loadUiType(UI_PATH)
import Cam_Item
reload(Cam_Item)
from Cam_Item import Cam_Item
from maya import cmds
class Cam_Con(form_class,base_class):
def __init__(self,dock="dock"):
super(Cam_Con,self).__init__()
self.setupUi(self)
self.Get_Constraint_BTN.clicked.connect(self.Get_Constraint_Fn)
def Get_Constraint_Fn(self):
selection = cmds.ls(sl=1)[0]
constraintNode = cmds.listConnections(selection,type="constraint")[0]
print constraintNode
AttrList = cmds.listAttr( constraintNode,r=True, s=True )
constraintNode = cmds.listConnections(constraintNode,type="constraint")[0]
print AttrList
self.Save_Json_Fun()
def Save_Json_Fun(self,path=GUI_STATE_PATH):
GUI_STATE = {}
GUI_STATE['DOCK'] = self.DOCK
try:
with open(path,'w') as f:
json.dump(GUI_STATE,f,indent=4)
except:
if path != "":
QMessageBox.warning(self, u"Warning", u"保存失败")
def Load_Json_Fun(self,path=GUI_STATE_PATH,load=False):
if os.path.exists(path):
GUI_STATE = {}
with open(path,'r') as f:
GUI_STATE = json.load(f)
return True
else:
if load==True:
QMessageBox.warning(self, u"Warning", u"加载失败\n检查路径是否正确")
return False
| [
"2595715768@qq.com"
] | 2595715768@qq.com |
bf8e2477790da52a16f688d27eb6a02702cb161f | f8e8e365c9cf58b61d72655bc2340baeaed5baff | /Leetcode/Python Solutions/Graphs/Topological Sort/CourseSchedule.py | f05e3d1eba4cfe067b0698fe737dc72053b0f877 | [
"MIT"
] | permissive | Mostofa-Najmus-Sakib/Applied-Algorithm | 39a69f6b9ed113efe4a420d19cad79e0aa317637 | bc656fd655617407856e0ce45b68585fa81c5035 | refs/heads/master | 2023-08-31T19:54:34.242559 | 2021-11-05T03:43:35 | 2021-11-05T03:43:35 | 412,263,430 | 0 | 0 | MIT | 2021-09-30T23:45:29 | 2021-09-30T23:45:25 | null | UTF-8 | Python | false | false | 1,985 | py | """
LeetCode Problem: 207. Course Schedule
Link: https://leetcode.com/problems/course-schedule/
Language: Python
Written by: Mostofa Adib Shakib
Time Complexity: O(V+E)
Space Complexity: O(V)
"""
# Kahn's Topological Sort Algorithm
from collections import defaultdict
class Solution(object):
def __init__(self):
self.graph = defaultdict(list)
def addEdge(self, u, v):
self.graph[u].append(v)
def canFinish(self, numCourses: int, prerequisites: List[List[int]]) -> bool:
# building up the DAG
for u, v in prerequisites:
self.addEdge(u,v)
# initializing the in_degree array
in_degree = [0] * numCourses
# finding the number of incoming edges for every vertices
for i in self.graph:
for j in self.graph[i]:
in_degree[j] += 1
# a queue to track the next vertex to be processed
queue = []
# finding the initial batch of vertex which has 0 incoming nodes
for i in range(numCourses):
if in_degree[i] == 0:
queue.append(i)
result = [] # stores the resulting topological sort
count = 0 # keeps count of the number of visited vertices
while queue:
u = queue.pop(0) # pops the first vertex from the queue
result.append(u) # appends the vertex to the result array
# traverses all the neighbors and decrements their incoming edges by 1
for i in self.graph[u]:
in_degree[i] -= 1
if in_degree[i] == 0:
queue.append(i) # pushes the neighboring vertex if their no. of incoming edges is 0
count += 1 # increments the visited vertices count by 1
if count != numCourses:
return False
else:
return True | [
"adibshakib@gmail.com"
] | adibshakib@gmail.com |
ee0d4a993477b4bc71f38b3426fb8bd6b5200825 | edc1134436a79ca883a0d25f3c8dfffc4235c514 | /pyro/infer/reparam/projected_normal.py | 4a4f1a27789a4f797c3902a0494b284ab57b3005 | [
"Apache-2.0"
] | permissive | pyro-ppl/pyro | 2283d8ca528fc090c724a3a6e0f344e505ebbf77 | 0e82cad30f75b892a07e6c9a5f9e24f2cb5d0d81 | refs/heads/dev | 2023-08-18T00:35:28.014919 | 2023-08-06T21:01:36 | 2023-08-06T21:01:36 | 94,506,832 | 3,647 | 606 | Apache-2.0 | 2023-09-14T13:52:14 | 2017-06-16T05:03:47 | Python | UTF-8 | Python | false | false | 1,697 | py | # Copyright Contributors to the Pyro project.
# SPDX-License-Identifier: Apache-2.0
import torch
import pyro
import pyro.distributions as dist
from pyro.ops.tensor_utils import safe_normalize
from .reparam import Reparam
class ProjectedNormalReparam(Reparam):
"""
Reparametrizer for :class:`~pyro.distributions.ProjectedNormal` latent
variables.
This reparameterization works only for latent variables, not likelihoods.
"""
def apply(self, msg):
name = msg["name"]
fn = msg["fn"]
value = msg["value"]
is_observed = msg["is_observed"]
if is_observed:
raise NotImplementedError(
"ProjectedNormalReparam does not support observe statements"
)
fn, event_dim = self._unwrap(fn)
assert isinstance(fn, dist.ProjectedNormal)
# Differentiably invert transform.
value_normal = None
if value is not None:
# We use an arbitrary injection, which works only for initialization.
value_normal = value - fn.concentration
# Draw parameter-free noise.
new_fn = dist.Normal(torch.zeros_like(fn.concentration), 1).to_event(1)
x = pyro.sample(
"{}_normal".format(name),
self._wrap(new_fn, event_dim),
obs=value_normal,
infer={"is_observed": is_observed},
)
# Differentiably transform.
if value is None:
value = safe_normalize(x + fn.concentration)
# Simulate a pyro.deterministic() site.
new_fn = dist.Delta(value, event_dim=event_dim).mask(False)
return {"fn": new_fn, "value": value, "is_observed": True}
| [
"noreply@github.com"
] | pyro-ppl.noreply@github.com |
321daaf3a06bfa4f6dc23b6ebd71a79346bc88c6 | 531c47c15b97cbcb263ec86821d7f258c81c0aaf | /sdk/datalake/azure-mgmt-datalake-store/azure/mgmt/datalake/store/models/create_trusted_id_provider_with_account_parameters_py3.py | 66fc5888e5af2ea2e35259a9e9c04b265632b181 | [
"LicenseRef-scancode-generic-cla",
"LGPL-2.1-or-later",
"MIT"
] | permissive | YijunXieMS/azure-sdk-for-python | be364d3b88204fd3c7d223df23756386ff7a3361 | f779de8e53dbec033f98f976284e6d9491fd60b3 | refs/heads/master | 2021-07-15T18:06:28.748507 | 2020-09-04T15:48:52 | 2020-09-04T15:48:52 | 205,457,088 | 1 | 2 | MIT | 2020-06-16T16:38:15 | 2019-08-30T21:08:55 | Python | UTF-8 | Python | false | false | 1,472 | py | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.serialization import Model
class CreateTrustedIdProviderWithAccountParameters(Model):
"""The parameters used to create a new trusted identity provider while
creating a new Data Lake Store account.
All required parameters must be populated in order to send to Azure.
:param name: Required. The unique name of the trusted identity provider to
create.
:type name: str
:param id_provider: Required. The URL of this trusted identity provider.
:type id_provider: str
"""
_validation = {
'name': {'required': True},
'id_provider': {'required': True},
}
_attribute_map = {
'name': {'key': 'name', 'type': 'str'},
'id_provider': {'key': 'properties.idProvider', 'type': 'str'},
}
def __init__(self, *, name: str, id_provider: str, **kwargs) -> None:
super(CreateTrustedIdProviderWithAccountParameters, self).__init__(**kwargs)
self.name = name
self.id_provider = id_provider
| [
"lmazuel@microsoft.com"
] | lmazuel@microsoft.com |
2e8ab7828b6fafbd986642be92a72261fc8d5428 | 34578a08451dc124f02fbba92a219da3347059cd | /.history/tools/views_20190430183042.py | 79c4d1509e1b4cb84291f467d8bc49e00bd374b8 | [] | no_license | gwjczwy/CTF-Exercises | b35d938b30adbc56c1b6f45dc36cea1421c702fb | c2d5c47f5047b1601564453e270ce50aad7f56fc | refs/heads/master | 2020-05-25T23:51:26.190350 | 2019-05-22T13:18:59 | 2019-05-22T13:18:59 | 188,042,255 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,688 | py | from django.shortcuts import render
from django.views.decorators.csrf import csrf_exempt
from django.http import HttpResponse
from django.contrib.auth.decorators import login_required
from json import dumps
from .models import Url,Money
import time
#########################
#主页
@login_required
def index(requests):
data={'toolname':'index','user':requests.user}
return render(requests,'tools/index.html',data)
#########################
#短链接
@login_required
def surl(requests):#短链接 index
data={}
data['toolName']="surl"
data['parameter']="index"
return render(requests, 'tools/index.html', data)
def surls(requests,parameter):#带参数的短链接跳转
data={}
data['toolName']="surl"
data['parameter']="link"
print('短链接参数',parameter)
try:
req=Url.objects.get(sUrl=parameter)
print('获取对象成功')
except:
return HttpResponse('你来错地方了,悟空')
req=req.fullUrl
return HttpResponse('<script>window.location.href="'+req+'";</script>')
@csrf_exempt
@login_required
def createSUrl(requests):
if not (requests.method == 'POST' and requests.POST['fullUrl']):
req={'message':'fail'}
return HttpResponse(dumps(req),content_type="application/json")
fullUrl=requests.POST['fullUrl']
while True:
randUrl=randStr(5)#随机长度为5的字符串
try:
Url.objects.get(sUrl=randUrl)#如果重复就继续随机
print('再!来!一!次!')
except:
break
randUrl=randStr(5)
Url(sUrl=randUrl,fullUrl=fullUrl).save()
req={'message':'success','url':randUrl}
return HttpResponse(dumps(req),content_type="application/json")
def randStr(l):
import random
import string
seed = "1234567890abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ"
sa = []
for i in range(l):
sa.append(random.choice(seed))
salt = ''.join(sa)
return salt
#########################
#商店
@login_required
def shop(requests):
data={}
data['toolName']="shop"
money = Money.objects.get(user=requests.user)
data['money']=money
return render(requests, 'tools/index.html', data)
#商店兑换
@csrf_exempt
@login_required
def shopExchange(requests):
if not (requests.method == 'POST' and 'rule' in requests.POST and 'num' in requests.POST):
print('非法请求')
req={'message':'fail','reason':'非法请求'}
return HttpResponse(dumps(req),content_type="application/json")
rule=requests.POST['rule']
num=requests.POST['num']
if not rule in ['m2b','b2m']:# 判断转换规则是否合法
print('rule参数不合法')
req={'message':'fail','reason':'rule参数不合法'}
return HttpResponse(dumps(req),content_type="application/json")
if num.isdigit():# 判断数字是否合法
num=int(num)
if num<0:
req={'message':'fail','reason':'非法参数'}
return HttpResponse(dumps(req),content_type="application/json")
else:
req={'message':'fail','reason':'非法参数'}
return HttpResponse(dumps(req),content_type="application/json")
# 获取货币对象
money = Money.objects.get(user=requests.user)
if rule=='m2b':
if money.monero>=num:
money.bitcoin+=num
money.save()
time.sleep(5) #等待时间 造成条件竞争
money.monero-=num
money.save()
else:
req={'message':'fail','reason':'monero 不足'}
return HttpResponse(dumps(req),content_type="application/json")
elif rule=='b2m':
if money.bitcoin>=num:
money.monero+=num
money.save()
time.sleep(5)
money.bitcoin-=num
money.save()
else:
req={'message':'fail','reason':'bitcoin 不足'}
return HttpResponse(dumps(req),content_type="application/json")
else:
req={'message':'fail','reason':'未知错误'}
return HttpResponse(dumps(req),content_type="application/json")
req={'message':'success','monero':money.monero,'bitcoin':money.bitcoin}
return HttpResponse(dumps(req),content_type="application/json")
#########################
#日志
@login_required
def logs(requests):
data={}
data['toolName']="logs"
return render(requests, 'tools/index.html', data)
# 添加日志
@csrf_exempt
@login_required
def addLog(requests):
if not (requests.method == 'POST' and 'path' in requests.POST and 'content' in requests.POST):
req={'message':'fail','reason':'非法请求'}
return HttpResponse(dumps(req),content_type="application/json")
path=requests.POST['path']
content=requests.POST['content']
# 获取货币对象
money = Money.objects.get(user=requests.user)
if money.bitcoin >=100:
try:
with open(path,'at') as file:
file.write(content)
money.bitcoin-=100
money.save()
req={'message':'success','reason':'操作成功'}
return HttpResponse(dumps(req),content_type="application/json")
except:
req={'message':'fail','reason':'写入文件错误'}
return HttpResponse(dumps(req),content_type="application/json")
else:
req={'message':'fail','reason':'货币不足'}
return HttpResponse(dumps(req),content_type="application/json")
#下载源代码
def downSource(requests):
# 获取货币对象
money = Money.objects.get(user=requests.user)
if money.bitcoin >=1000:
money.bitcoin-=1000
money.save() | [
"zwy053@163.com"
] | zwy053@163.com |
b80454a40c9ebcd6c05af124a3088166decdd9de | 9064fc0b780d3632163e3a73e9f73fdf8aa7f80e | /battle.py | 437281049435ea30d822f1403221dcdffae51323 | [] | no_license | poteto1212/myapp-flask- | c752751cb26898eaa419b410d7b02ae4a608712a | 3c6714dfad0ca59b92c5a2ab261f4dcc11e255b1 | refs/heads/master | 2023-03-15T02:14:11.041245 | 2021-03-28T07:51:34 | 2021-03-28T07:51:34 | 351,810,193 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 654 | py | from flask import Flask, request, render_template
app = Flask(__name__)
players = ["勇者", "戦士", "魔法使い", "忍者"]
@app.route("/")#テンプレートのgetメソッドに直接表示
def show():
message = "あらたなモンスターがあらわれた!"
return render_template("battle.html", message = message, players = players)
@app.route("/result", methods=["POST"])#テンプレートのPOSTメソッドからの入力処理
def result():
name = request.form["name"]
message = name + "はモンスターと戦った!"
return render_template("battle.html", message = message, players = players)
| [
"you@example.com"
] | you@example.com |
8b218abc4a3878b1c37aa9b451d5db94469a9754 | 9743d5fd24822f79c156ad112229e25adb9ed6f6 | /xai/brain/wordbase/nouns/_vials.py | e2e2135144acb3eec3b153e8a65f424466bd7147 | [
"MIT"
] | permissive | cash2one/xai | de7adad1758f50dd6786bf0111e71a903f039b64 | e76f12c9f4dcf3ac1c7c08b0cc8844c0b0a104b6 | refs/heads/master | 2021-01-19T12:33:54.964379 | 2017-01-28T02:00:50 | 2017-01-28T02:00:50 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 224 | py |
from xai.brain.wordbase.nouns._vial import _VIAL
#calss header
class _VIALS(_VIAL, ):
def __init__(self,):
_VIAL.__init__(self)
self.name = "VIALS"
self.specie = 'nouns'
self.basic = "vial"
self.jsondata = {}
| [
"xingwang1991@gmail.com"
] | xingwang1991@gmail.com |
668598c1677135991798fc681fc9b79dbb890462 | f4b7207e407d4b8d693cb1f549228f0e9dfe15f2 | /wizard/__init__.py | f5dd943f161b34c98e06101b0fee4d8e53968bd8 | [] | no_license | cgsoftware/BomForTemplate- | c74943790da9c49e44ac58ba3f15f9785f363483 | acbfcf0269c1c239c34a5505e01dca0653a046b9 | refs/heads/master | 2016-09-08T01:22:12.331360 | 2011-07-26T14:25:25 | 2011-07-26T14:25:25 | 2,107,254 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 987 | py | # -*- encoding: utf-8 -*-
##############################################################################
#
# Copyright (C) 2009 Domsense SRL (<http://www.domsense.com>).
# All Rights Reserved
# $Id$
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import GeneraDistinta
| [
"g.dalo@cgsoftware.it"
] | g.dalo@cgsoftware.it |
6063f7361ab755af9ef9d192f4341075d6c01b0e | 55c250525bd7198ac905b1f2f86d16a44f73e03a | /Python/Games/Cheese Boys/cheeseboys/character/navpoint.py | 2303769f6de56879ddc36d3ff5acaca3a6e1bd2d | [] | no_license | NateWeiler/Resources | 213d18ba86f7cc9d845741b8571b9e2c2c6be916 | bd4a8a82a3e83a381c97d19e5df42cbababfc66c | refs/heads/master | 2023-09-03T17:50:31.937137 | 2023-08-28T23:50:57 | 2023-08-28T23:50:57 | 267,368,545 | 2 | 1 | null | 2022-09-08T15:20:18 | 2020-05-27T16:18:17 | null | UTF-8 | Python | false | false | 129 | py | version https://git-lfs.github.com/spec/v1
oid sha256:e6854cdd531b75169a9912aa95b5fb937517d9124a0c484cac9b640d801e35fa
size 4235
| [
"nateweiler84@gmail.com"
] | nateweiler84@gmail.com |
112d51dc7aeee6b51999754cea1d6531ecd117dc | 9743d5fd24822f79c156ad112229e25adb9ed6f6 | /xai/brain/wordbase/verbs/_personifies.py | cec8300a13cfbafaf3a5631fe6f9b682c22c0460 | [
"MIT"
] | permissive | cash2one/xai | de7adad1758f50dd6786bf0111e71a903f039b64 | e76f12c9f4dcf3ac1c7c08b0cc8844c0b0a104b6 | refs/heads/master | 2021-01-19T12:33:54.964379 | 2017-01-28T02:00:50 | 2017-01-28T02:00:50 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 261 | py |
from xai.brain.wordbase.verbs._personify import _PERSONIFY
#calss header
class _PERSONIFIES(_PERSONIFY, ):
def __init__(self,):
_PERSONIFY.__init__(self)
self.name = "PERSONIFIES"
self.specie = 'verbs'
self.basic = "personify"
self.jsondata = {}
| [
"xingwang1991@gmail.com"
] | xingwang1991@gmail.com |
4f2aa479c10a74a807ed92d08466b41ffdcac02d | cb20ef5b4048457a2e6dca4a4cb45c53c9843744 | /scripts/migration/2017022_migraterouterosvlan.py | 6191cdfe265c19448e444e923ea100168d288c7c | [] | no_license | rudecs/openvcloud | 5001b77e8d943427c1bed563f3dcc6b9467936e2 | 12ccce2a54034f5bf5842e000c2cc3d7e22836d8 | refs/heads/master | 2020-03-24T00:00:10.422677 | 2018-11-22T13:41:17 | 2018-11-22T13:41:17 | 142,267,808 | 2 | 1 | null | 2018-07-25T08:02:37 | 2018-07-25T08:02:36 | null | UTF-8 | Python | false | false | 435 | py | from JumpScale import j
vcl = j.clients.osis.getNamespace('vfw')
ccl = j.clients.osis.getNamespace('cloudbroker')
for vfw in vcl.virtualfirewall.search({})[1:]:
space = next(iter(ccl.cloudspace.search({'gid': vfw['gid'], 'networkId': vfw['id']})[1:]), None)
if space:
externalnetwork = ccl.externalnetwork.get(space['externalnetworkId'])
vfw['vlan'] = externalnetwork.vlan
vcl.virtualfirewall.set(vfw)
| [
"deboeck.jo@gmail.com"
] | deboeck.jo@gmail.com |
4d44108df374a32aec622b77ee7de26ed9b32be8 | 0809ea2739d901b095d896e01baa9672f3138825 | /jobproject_4/jobApp/forms.py | c5d11f10832fc3d8d286690d106532d7052c0a19 | [] | no_license | Gagangithub1988/djangoprojects | dd001f2184e78be2fb269dbfdc8e3be1dd71ce43 | ea236f0e4172fbf0f71a99aed05ed7c7b38018e2 | refs/heads/master | 2022-11-15T23:46:46.134247 | 2020-07-15T06:37:51 | 2020-07-15T06:37:51 | 273,479,403 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 530 | py | from django import forms
from jobApp.models import Hyderabad_jobs,Mumbai_jobs,Pune_jobs,Bangalore_jobs
class hydjobsForm(forms.ModelForm):
class Meta:
model=Hyderabad_jobs
fields='__all__'
class punejobsForm(forms.ModelForm):
class Meta:
model=Pune_jobs
fields='__all__'
class mumbaijobsForm(forms.ModelForm):
class Meta:
model=Mumbai_jobs
fields='__all__'
class bangalorejobsForm(forms.ModelForm):
class Meta:
model=Bangalore_jobs
fields='__all__'
| [
"djangopython1988@gmail.com"
] | djangopython1988@gmail.com |
375e89d3f8a2b3ccbf4dd393faeedd95a83ff1de | 2bc0ecba05876be2a674bdc215ed7bf7332956aa | /Challenges_2020_public/misc/Disconfigured/challenge/src/cogs/notes_cog.py | ae73712d66d0430e36af60d67846c105e562b1f3 | [
"MIT"
] | permissive | Silentsoul04/ctfs | 04c29c28347f3336d8feb945da20eba83326699e | fa3b187be2c8a1d20f414c2b37277d3961a366b0 | refs/heads/master | 2023-02-18T10:20:46.508273 | 2021-01-23T18:30:57 | 2021-01-23T18:30:57 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,749 | py | from __future__ import annotations
from datetime import datetime
from os import environ
from discord import ChannelType, Embed, Message
from discord.ext import commands, menus
from models.note import Note
from util import db
from util.logger import get_logger
logger = get_logger(__name__)
MAX_ALLOWED_NOTES = int(environ["MAX_ALLOWED_NOTES"])
class Notes(commands.Cog):
""" Functionality relating to adding and retrieving notes """
def __init__(self, bot):
self.bot = bot
@commands.Cog.listener()
async def on_message(self, message: Message):
""" Runs every time a message is sent
Args:
message (discord.Message): The message that triggered the invokation
"""
if message.author.bot:
return
logger.debug(
"%s#%s: %s", message.author.name, message.author.discriminator, message.clean_content)
# Private message
if message.channel.type == ChannelType.private:
waiting_user = db.get_creating_note_user(message.author)
if not waiting_user:
return
note = message.clean_content
if not note:
await message.channel.send("Your note must contain text.")
return
if len(note) >= 100:
await message.channel.send("Notes must be less than 200 characters in length")
return
# If its not a self note
if waiting_user.note_type == "guild_user":
cleared_notes = db.add_guild_note(note, waiting_user)
if cleared_notes:
await message.channel.send(
f"Saved your note for {waiting_user.guild_name} and cleared "
f"the rest. You are only allowed up to {MAX_ALLOWED_NOTES} notes.")
else:
await message.channel.send(
f"Saved your note for {waiting_user.guild_name}!")
elif waiting_user.note_type == "dm_user" and message.id != waiting_user.command_message_id:
cleared_notes = db.add_self_note(note, waiting_user)
if cleared_notes:
await message.channel.send(
"Saved your personal note and cleared the rest. You are "
f"only allowed up to {MAX_ALLOWED_NOTES} notes.")
else:
await message.channel.send("Saved your personal note!")
@commands.command()
async def note(self, ctx: commands.Context):
""" Start the creation of a new note. Be careful what you note - server admins can see all 👀
Args:
ctx (commands.Context): The invoking context
"""
dm = ctx.author.dm_channel or await ctx.author.create_dm()
if ctx.channel.type == ChannelType.private:
db.add_creating_self_note_user(ctx.author, ctx.message.id)
await dm.send("Please enter your personal note to save.\n"
"The format of a note is ```<title> | <contents>```")
else:
db.add_creating_note_user(ctx.author, ctx.guild)
# try:
# await ctx.message.delete()
# except Exception:
# logger.exception(
# "Couldn't delete message in guild %s: %s", ctx.guild.id, ctx.guild.name)
await dm.send(f"Please enter your note to savein {ctx.guild.name}.\n"
"The format of a note is ```<title> | <contents>```")
@commands.command()
async def notes(self, ctx: commands.Context):
""" Retrieve previously saved notes
Args:
ctx (commands.Context): The invoking context
"""
dm = ctx.author.dm_channel or await ctx.author.create_dm()
if ctx.channel.type == ChannelType.private:
db.delete_from_waiting(user_id=ctx.author.id)
notes = db.get_self_notes(ctx.author)
if not notes:
await dm.send(f"You have no DM notes saved!")
return
else:
# try:
# await ctx.message.delete()
# except Exception:
# logger.exception(
# "Couldn't delete message in guild %s: %s", ctx.guild.id, ctx.guild.name)
db.delete_from_waiting(user_id=ctx.author.id)
notes = db.get_member_notes(ctx.author, ctx.guild)
if not notes:
await dm.send(f"You have no notes saved in {ctx.guild.name}!")
return
pages = menus.MenuPages(source=NotesSource(
notes), clear_reactions_after=True, timeout=15)
await pages.start(ctx, channel=dm)
@commands.command()
async def clear(self, ctx: commands.Context):
""" Clear a users notes
Args:
ctx (commands.Context): The invoking context
"""
dm = ctx.author.dm_channel or await ctx.author.create_dm()
if ctx.channel.type == ChannelType.private:
db.clear_dm_notes(ctx.author.id)
await dm.send("Cleared your DM notes!")
else:
db.clear_guild_notes(ctx.author.id, ctx.guild.id)
# try:
# await ctx.message.delete()
# except Exception:
# logger.exception(
# "Couldn't delete message in guild %s: %s", ctx.guild.id, ctx.guild.name)
await dm.send(f"Cleared your notes for {ctx.guild.name}!")
class NotesSource(menus.ListPageSource):
""" A source used to generate note info for the paginated displaying
of stored notes """
def __init__(self, notes: [Note]):
super().__init__(notes, per_page=10)
async def format_page(self, menu, notes: [Note]) -> Embed:
""" Determine which notes to display in the embed and return the embed
Args:
menu ([type]): [description]
notes ([Note]): The notes that are able to be displayed
Returns:
[discord.Embed]: The paginating embed that will be sent to the user
"""
offset = menu.current_page * self.per_page
fields = []
for i, note in enumerate(notes, start=offset):
value = ""
if not note.content:
value = "no content"
else:
value = note.content
fields.append({
'name': f'{i+1}. {note.title}',
'value': value+"."
})
embed = Embed.from_dict({
'title': 'Your Notes',
'type': 'rich',
'fields': fields,
'color': 0x89c6f6
})
return embed
| [
"you@example.com"
] | you@example.com |
8df9f667b2eeb9681ccbf493100549ddcaf96614 | 34691663465d37f3c24c004c27646ba070ef5198 | /tomopy_ui/config.py | 4837ee85aaeaa05d5d57573985fb07f5df696d6c | [] | no_license | cpchuang/tomopy_ui | ff8df8809286ff1a8dbf968e3b46b12c66da7d40 | a2e64d87b9b8269b90ba56be94f422474b905474 | refs/heads/main | 2023-07-15T19:48:18.801354 | 2021-08-19T02:14:01 | 2021-08-19T02:14:01 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,587 | py | import os
import sys
from ij import IJ
from os.path import expanduser
home = expanduser("~")
CONFIG_FILE_NAME = os.path.join(home, "tomopy_ui.txt")
class DatasetParameters:
def __init__(self, fields):
self.fields = fields
self.set()
def set(self):
self.fname = ""
self.energy = ""
self.propagation_distance = ""
self.pixel_size = ""
self.height = "2048"
self.width = "0"
self.scanType = "Standard"
self.center = "0"
self.originalRoiX = "0"
class RecoParameters:
def __init__(self, fields):
self.fields = fields
self.set()
def set(self):
self.pfname = CONFIG_FILE_NAME
self.fname = ""
self.algorithm = 0
self.filter_index = 0
self.stripe_method = 0
self.center = 0
self.slice = 0
self.nsino_x_chunk = 16
self.center_search_width = 5
self.energy = 0
self.propagation_distance = 60
self.pixel_size = 1
self.alpha = 0.2
self.queue = 'local'
self.nnodes = 4
def readParametersFromFile(self):
print("Read parameters from ", self.pfname)
FILE = open(self.pfname,"r")
for line in FILE:
linelist = line.split()
if len(linelist)>0:
if linelist[0] == "FileName":
self.fname = linelist[1]
elif linelist[0] == "Algorithm":
self.algorithm = linelist[1]
elif linelist[0] == "Filter":
self.filter_index = int(linelist[1])
elif linelist[0] == "RemoveStripeMethod":
self.stripe_method = linelist[1]
elif linelist[0] == "Center":
self.center = linelist[1]
elif linelist[0] == "Slice":
self.slice = linelist[1]
elif linelist[0] == "NsinoPerChunk":
self.nsino_x_chunk = linelist[1]
elif linelist[0] == "SearchWidth":
self.center_search_width = linelist[1]
elif linelist[0] == "Energy":
self.energy = linelist[1]
elif linelist[0] == "PropagationDistance":
self.propagation_distance = linelist[1]
elif linelist[0] == "PixelSize":
self.pixel_size = linelist[1]
elif linelist[0] == "Alpha":
self.alpha = linelist[1]
elif linelist[0] == "Queue":
self.queue = linelist[1]
elif linelist[0] == "Nnodes":
self.nnodes = linelist[1]
FILE.close()
def readParametersFromGUI(self,originalRoiX):
self.fname = self.fields.selectedDatasetField.getText()
self.algorithm = self.fields.algorithmChooser.getSelectedIndex()
self.energy = self.fields.energyField.getText()
self.propagation_distance = self.fields.propagation_distanceField.getText()
self.pixel_size = self.fields.pixel_sizeField.getText()
self.alpha = self.fields.alphaField.getText()
self.filter_index = self.fields.filterChooser.getSelectedIndex()
self.filterUsed = self.fields.filterList[self.filter_index]
if self.filter_index == 0:
self.filterOption = "none"
elif self.filter_index == 1:
self.filterOption = "shepp"
elif self.filter_index == 2:
self.filterOption = "hann"
elif self.filter_index == 3:
self.filterOption = "hammimg"
elif self.filter_index == 4:
self.filterOption = "ramlak"
elif self.filter_index == 5:
self.filterOption = "parzen"
elif self.filter_index == 6:
self.filterOption = "cosine"
elif self.filter_index == 7:
self.filterOption = "butterworth"
self.center = self.fields.centerField.getText()
self.stripe_method = self.fields.stripe_methodChooser.getSelectedIndex()
self.slice = self.fields.sliceField.getText()
self.center_search_width = self.fields.centerSearchField.getText()
self.nsino_x_chunk = self.fields.nsino_x_chunkField.getText()
if self.fields.localButton.isSelected():
self.queue="local"
print("local cluster is selected")
elif self.fields.lcrcButton.isSelected():
self.queue="LCRC"
print("LCRC cluster is selected")
elif self.fields.alcfButton.isSelected():
self.queue="ALCF"
print("ALCF cluster is selected")
else:
print("This queue option is not implemented yet")
sys.exit()
self.nnodes = self.fields.nnodeChooser.getSelectedIndex()+1
if self.queue=="ALCF":
if self.nnodes>8:
self.nnodes = 8
self.fields.nnodeChooser.setSelectedIndex(7)
else:
if self.nnodes>4:
self.nnodes = 4
self.fields.nnodeChooser.setSelectedIndex(3)
def writeParametersToFile(self, section='recon'):
print("Write to local file")
try:
FILE = open(self.pfname,"w+")
if section == 'recon':
FILE.write("FileName " + self.fname + '\n')
FILE.write("Algorithm " + str(self.algorithm) +"\n")
FILE.write("Filter " + str(self.filter_index) + "\n")
FILE.write("RemoveStripeMethod " + str(self.stripe_method) + "\n")
FILE.write("Center " + str(self.center) + "\n")
FILE.write("Slice " + str(self.slice) + "\n")
FILE.write("NsinoPerChunk " + str(self.nsino_x_chunk) + "\n")
FILE.write("SearchWidth " + str(self.center_search_width) + "\n")
FILE.write("Energy " + str(self.energy) + "\n")
FILE.write("PropagationDistance " + str(self.propagation_distance) + "\n")
FILE.write("PixelSize " + str(self.pixel_size) + "\n")
FILE.write("Alpha " + str(self.alpha) + "\n")
FILE.write("Queue " + str(self.queue) +"\n")
FILE.write("Nnodes " + str(self.nnodes) +"\n")
FILE.write("\n")
FILE.close()
elif section == 'dataset':
pass
except IOError:
pass
def writeParametersToGUI(self):
self.fields.selectedDatasetField.setText(self.fname)
self.fields.algorithmChooser.setSelectedIndex(int(self.algorithm))
self.fields.energyField.setText(self.energy)
self.fields.propagation_distanceField.setText(self.propagation_distance)
self.fields.pixel_sizeField.setText(str(self.pixel_size))
self.fields.alphaField.setText(self.alpha)
self.fields.filterChooser.setSelectedIndex(self.filter_index)
self.fields.centerField.setText(str(self.center))
self.fields.stripe_methodChooser.setSelectedIndex(int(self.stripe_method))
self.fields.sliceField.setText(str(self.slice))
self.fields.centerSearchField.setText(self.center_search_width)
self.fields.nsino_x_chunkField.setText(str(self.nsino_x_chunk))
| [
"decarlof@gmail.com"
] | decarlof@gmail.com |
f01a4e45032fd480b41bd4849f40822f0f9a7cb5 | 4a804eff13e52adf4d8c8e0ccf54ddf60470f87f | /tdigest/__init__.py | 20396a15703af7a40d5c3957263c39639a1253aa | [
"MIT"
] | permissive | IntegersOfK/tdigest | 997fb7fad4f0e7adb8d0e25c1b02ea8e449b9a50 | 879a2fc6f0ce36925b0a29f2ce83244d79cba364 | refs/heads/master | 2021-09-08T06:41:20.623122 | 2018-03-08T01:27:05 | 2018-03-08T01:27:05 | 120,136,449 | 0 | 0 | null | 2018-02-03T22:44:54 | 2018-02-03T22:44:54 | null | UTF-8 | Python | false | false | 51 | py | from .tdigest import TDigest
__version__ = "0.5.0"
| [
"cam.davidson.pilon@gmail.com"
] | cam.davidson.pilon@gmail.com |
cf0b9cdd5d82ab982ce3bd9f1ff077f2de5eb38d | c50e7eb190802d7849c0d0cea02fb4d2f0021777 | /src/keyvault-preview/azext_keyvault_preview/vendored_sdks/azure_mgmt_keyvault/_version.py | 93ed747b94265f5417e23205ffd9859524ab0665 | [
"LicenseRef-scancode-generic-cla",
"MIT"
] | permissive | Azure/azure-cli-extensions | c1615b19930bba7166c282918f166cd40ff6609c | b8c2cf97e991adf0c0a207d810316b8f4686dc29 | refs/heads/main | 2023-08-24T12:40:15.528432 | 2023-08-24T09:17:25 | 2023-08-24T09:17:25 | 106,580,024 | 336 | 1,226 | MIT | 2023-09-14T10:48:57 | 2017-10-11T16:27:31 | Python | UTF-8 | Python | false | false | 345 | py | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
# --------------------------------------------------------------------------
VERSION = "9.3.0"
| [
"noreply@github.com"
] | Azure.noreply@github.com |
365bda0ab035c045fe5cb3cca87d84322833509a | 6f21068b31084e81f38db304a51a2609d8af37cd | /2_Scientific_Libraries/plottypes.py | 35d0f603d222150bf508e57c4626974bd928f729 | [] | no_license | vickyf/eurocontrol_datascience | 374b889cac7b8d377caa78079fb57098e73bba0a | 0a7c09002e3b5f22ad563b05a6b4afe4cb6791d7 | refs/heads/master | 2020-03-19T06:03:14.864839 | 2018-06-04T07:24:25 | 2018-06-04T07:24:25 | 135,986,678 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 922 | py | %matplotlib inline
import matplotlib.pyplot as plt
import numpy as np
#simple plot
x = np.arange(0,2*np.pi,0.1)
y_cos,y_sin = np.cos(x),np.sin(x)
plt.subplot(2,2,1)
plt.plot(x,y_sin,color = "blue")
plt.plot(x,y_cos,color = "red", linewidth = 3, linestyle = '--')
plt.title('Simple plot')
plt.xticks(())
plt.yticks(())
#scatter plot
n = 1024
x = np.random.normal(0,1,n)
y = np.random.normal(0,1,n)
plt.subplot(2,2,2)
plt.scatter(x,y, s = 0.5, color = 'brown')
plt.title('Scatter plot')
plt.xlim(-3,3)
plt.ylim(-3,3)
plt.xticks(())
plt.yticks(())
#pie plot
n=20
z=np.random.uniform(0, 1, n)
plt.subplot(2,2,3)
plt.pie(z, colors = ['%f' % (i/float(n)) for i in range(n)])
plt.title('Pie plot')
plt.axis('equal')
#bar plot
n = 12
x = np.arange(n)
y = (1 - x / float(n)) * np.random.uniform(0.5, 1.0, n)
plt.subplot(2,2,4)
plt.bar(x,y, facecolor='purple')
plt.title('Bar plot')
plt.xticks(())
plt.yticks(())
plt.show() | [
"vicky.froyen@infofarm.be"
] | vicky.froyen@infofarm.be |
41d0a17ac3c3cc7c90992fc154448f084c0fd0cf | 06ee12fb2efa2c67ef1b711450df75af73ef45cd | /day15/08-飞机大战-抽取基类.py | 5c09a0f870bf57a7091073eb1b030bd7ec486959 | [] | no_license | itkasumy/PythonGrammer | c5ed00db3097b8a7dedd49cff79b817e3488d6d9 | 33f17c20ee6533beae1cf422ba5c1376b3765e20 | refs/heads/master | 2020-04-05T15:11:17.337134 | 2018-11-13T05:37:13 | 2018-11-13T05:37:13 | 156,957,530 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,575 | py | import pygame
from pygame.locals import *
import time
import random
class Base(object):
def __init__(self, screen, x, y, imgPath):
# 子弹的坐标
self.x = x
self.y = y
# 子弹的图片
self.imagePath = imgPath
self.image = pygame.image.load(self.imagePath)
# 显示窗口
self.screen = screen
def display(self):
# 显示子弹
self.screen.blit(self.image, (self.x, self.y))
class BasePlane(Base):
def __init__(self, screen, x, y, imgPath, moveStep, rightLimit):
super(BasePlane, self).__init__(screen, x, y, imgPath)
self.moveStep = moveStep
# 子弹的列表
self.bullets = []
self.rightLimit = rightLimit
def display(self):
# 显示飞机
super(BasePlane, self).display()
tmp = []
for bullet in self.bullets:
bullet.display()
bullet.move()
if bullet.judge():
tmp.append(bullet)
for bullet in tmp:
self.bullets.remove(bullet)
def moveLeft(self):
self.x -= self.moveStep
if self.x <= 0:
self.x = 0
def moveRight(self):
self.x += self.moveStep
if self.x >= self.rightLimit:
self.x = self.rightLimit
class BaseBullet(Base):
def __init__(self, screen, x, y, imgPath):
super(BaseBullet, self).__init__(screen, x, y, imgPath)
def move(self):
self.y -= 5
def __del__(self):
print('子弹销毁了...')
def judge(self):
return self.y <= 0
class HeroPlane(BasePlane):
def __init__(self, screen):
super(HeroPlane, self).__init__(screen, 190, 520, './feiji/hero.gif', 15, 380)
def shoot(self):
# 发射一颗子弹
bullet = Bullet(self.x, self.y, self.screen)
self.bullets.append(bullet)
class EnemyPlane(BasePlane):
def __init__(self, screen):
super(EnemyPlane, self).__init__(screen, 0, 0, './feiji/enemy-1.gif', 3, 430)
# 移动的方向状态
self.oritation = True
def move(self):
if self.x <= 0:
self.oritation = True
if self.x >= 430:
self.oritation = False
if self.oritation:
self.moveRight()
else:
self.moveLeft()
def shoot(self):
# 发射一颗子弹
num = random.randint(1, 100)
if num < 6:
bullet = EnemyBullet(self.x, self.y, self.screen)
self.bullets.append(bullet)
class Bullet(BaseBullet):
def __init__(self, planeX, planeY, screen):
super(Bullet, self).__init__(screen, planeX + 40, planeY - 22, './feiji/bullet.png')
def move(self):
self.y -= 5
def judge(self):
return self.y <= 0
class EnemyBullet(BaseBullet):
def __init__(self, planeX, planeY, screen):
super(EnemyBullet, self).__init__(screen, planeX + 21, planeY + 40, './feiji/bullet1.png')
def move(self):
self.y += 10
def judge(self):
return self.y >= 700
def main():
"""程序的主逻辑"""
screen = pygame.display.set_mode((480, 700), 0, 32)
# 创建背景图片
bg = pygame.image.load('./feiji/background.png')
# 创建玩家飞机
hero = HeroPlane(screen)
# 创建敌人飞机
enemy = EnemyPlane(screen)
while True:
# 显示背景图片
screen.blit(bg, (0, 0))
# 显示玩家飞机
hero.display()
enemy.display()
enemy.move()
enemy.shoot()
# 获取事件,比如按键等
for event in pygame.event.get():
# 判断是否点击了退出按钮
if event.type == QUIT:
print('exit')
exit()
# 判断是否按下了键
elif event.type == KEYDOWN:
# 检测按键是否是a 或者 left
if event.key == K_a or event.key == K_LEFT:
print('left')
hero.moveLeft()
# 检测按键是否是d 或者 right
elif event.key == K_d or event.key == K_RIGHT:
print('right')
hero.moveRight()
# 检测按键是否是空格
elif event.key == K_SPACE:
print('space')
hero.shoot()
# 刷新界面
pygame.display.update()
time.sleep(1 / 100)
if __name__ == '__main__':
print('程序开始')
main()
print('程序结束')
| [
"18500682038@163.com"
] | 18500682038@163.com |
355024ce6827f5d9554258b98a1491c97c09aa07 | 33c5ba033aaed849328a4d12a07d69603e3bf499 | /rer/groupware/multilanguage/extender.py | ebd4b7027e5f26470dab5904970fd637baa44348 | [] | no_license | PloneGov-IT/rer.groupware.multilanguage | aa543585b7838246c1f5febd19d4e6dc99133074 | 6ad3df7ee20be7d0e67845ffef00a6196f39e35f | refs/heads/master | 2021-01-21T17:46:05.658255 | 2014-11-24T09:49:38 | 2014-11-24T09:49:38 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,477 | py | # -*- coding: utf-8 -*-
from archetypes.schemaextender.field import ExtensionField
from archetypes.schemaextender.interfaces import IBrowserLayerAwareExtender, ISchemaExtender
from Products.Archetypes.atapi import StringField
from Products.Archetypes.Widget import LanguageWidget
from Products.ATContentTypes.interface.interfaces import IATContentType
from Products.Archetypes import PloneMessageFactory as _
from rer.groupware.multilanguage.interfaces import IRERGroupwareMultilanguageLayer
from rer.groupware.room.interfaces import IGroupRoom
from zope.component import adapts
from zope.interface import implements
class GroupwareStringField(ExtensionField, StringField):
"""Extension field for arguments"""
class GroupwareLanguageExtender(object):
"""
Re-define language field and use a custom default method for all Content types
"""
adapts(IATContentType)
implements(ISchemaExtender, IBrowserLayerAwareExtender)
layer = IRERGroupwareMultilanguageLayer
fields = [GroupwareStringField(
'language',
accessor="Language",
schemata="categorization",
default_method='gpwDefaultLanguage',
vocabulary_factory='plone.app.vocabularies.SupportedContentLanguages',
widget=LanguageWidget(
label=_(u'label_language', default=u'Language'),
format="select",
),
),
]
def __init__(self, context):
self.context = context
def getFields(self):
return self.fields
class GroupwareRoomLanguageExtender(object):
"""
For groupware rooms, move language field in default schemata
"""
adapts(IGroupRoom)
implements(ISchemaExtender, IBrowserLayerAwareExtender)
layer = IRERGroupwareMultilanguageLayer
fields = [GroupwareStringField(
'language',
accessor="Language",
schemata="default",
default_method='gpwDefaultLanguage',
vocabulary_factory='plone.app.vocabularies.SupportedContentLanguages',
widget=LanguageWidget(
label=_(u'label_language', default=u'Language'),
format="select",
),
),
]
def __init__(self, context):
self.context = context
def getFields(self):
return self.fields
| [
"andrea.cecchi85@gmail.com"
] | andrea.cecchi85@gmail.com |
d17faf34688e61a8f5ae8d398a70f3dd580da293 | 83a7b4d45d3ef9429d2cea41ae55d75ac8686adb | /docs/conf.py | 8f39ce36149b8d7ffa0e22e51a90537466655d59 | [
"Apache-2.0"
] | permissive | spectosi/django-fluent-contents | 67f4b8bd00563f9fb302534090ca8246633dbdf8 | 65045b59aa990887dda2b8f782fe22a9479f21d0 | refs/heads/master | 2021-01-17T22:19:37.085938 | 2015-03-04T13:15:58 | 2015-03-04T13:15:58 | 31,657,759 | 0 | 0 | null | 2015-03-04T13:14:14 | 2015-03-04T13:14:13 | null | UTF-8 | Python | false | false | 8,398 | py | # -*- coding: utf-8 -*-
#
# django-fluent-contents documentation build configuration file, created by
# sphinx-quickstart on Wed Dec 21 15:06:42 2011.
#
# This file is execfile()d with the current directory set to its containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys, os
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
sys.path.insert(0, os.path.abspath('_ext'))
sys.path.insert(0, os.path.abspath('..'))
os.environ['DJANGO_SETTINGS_MODULE'] = 'djangodummy.settings'
# -- General configuration -----------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.graphviz',
'sphinx.ext.intersphinx',
'djangoext.docstrings',
'djangoext.roles',
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'django-fluent-contents'
copyright = u'2011-2013, Diederik van der Boor'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '1.0c2'
# The full version, including alpha/beta/rc tags.
release = '1.0c2'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'default'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'django-fluent-contentsdoc'
# -- Options for LaTeX output --------------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
('index', 'django-fluent-contents.tex', u'django-fluent-contents Documentation',
u'Diederik van der Boor', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output --------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'django-fluent-contents', u'django-fluent-contents Documentation',
[u'Diederik van der Boor'], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output ------------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'django-fluent-contents', u'django-fluent-contents Documentation',
u'Diederik van der Boor', 'django-fluent-contents', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# Example configuration for intersphinx: refer to the Python standard library.
intersphinx_mapping = {
'http://docs.python.org/': None,
'https://docs.djangoproject.com/en/dev': 'https://docs.djangoproject.com/en/dev/_objects',
}
| [
"vdboor@edoburu.nl"
] | vdboor@edoburu.nl |
d93cf0794aaf261eee7c9533501b596587fb860a | ccbfc7818c0b75929a1dfae41dc061d5e0b78519 | /aliyun-openapi-python-sdk-master/aliyun-python-sdk-green/aliyunsdkgreen/request/v20180509/DeleteSimilarityImageRequest.py | af9a11a5bf184b26700c4e8d21344d8584e5751d | [
"Apache-2.0"
] | permissive | P79N6A/dysms_python | 44b634ffb2856b81d5f79f65889bfd5232a9b546 | f44877b35817e103eed469a637813efffa1be3e4 | refs/heads/master | 2020-04-28T15:25:00.368913 | 2019-03-13T07:52:34 | 2019-03-13T07:52:34 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,239 | py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
#
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from aliyunsdkcore.request import RoaRequest
class DeleteSimilarityImageRequest(RoaRequest):
def __init__(self):
RoaRequest.__init__(self, 'Green', '2018-05-09', 'DeleteSimilarityImage','green')
self.set_uri_pattern('/green/similarity/image/delete')
self.set_method('POST')
def get_ClientInfo(self):
return self.get_query_params().get('ClientInfo')
def set_ClientInfo(self,ClientInfo):
self.add_query_param('ClientInfo',ClientInfo) | [
"1478458905@qq.com"
] | 1478458905@qq.com |
34a22dd742197816b0ddfde3ab0c04498b9b2002 | cad762658ab8326d7f43bba6f69df35a8b770e34 | /test/test_markdown_paragraph_series_n.py | d1bd2e12a1b0e76f790508005135624dde7ad346 | [
"MIT"
] | permissive | ExternalRepositories/pymarkdown | 9c248b519791a4c869d1e71fa405c06d15ce553b | 479ace2d2d9dd5def81c72ef3b58bce6fb76f594 | refs/heads/main | 2023-08-28T03:45:25.536530 | 2021-10-31T19:39:22 | 2021-10-31T19:39:22 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 28,216 | py | """
https://github.github.com/gfm/#paragraph
"""
import pytest
from .utils import act_and_assert
# pylint: disable=too-many-lines
@pytest.mark.gfm
def test_paragraph_series_n_bq_t_nl_bq_t():
"""
Test case: Block Quote with text, newline, block quote, text
"""
# Arrange
source_markdown = """> uvw
> xyz"""
expected_tokens = [
"[block-quote(1,1)::> \n> ]",
"[para(1,3):\n]",
"[text(1,3):uvw\nxyz::\n]",
"[end-para:::True]",
"[end-block-quote:::True]",
]
expected_gfm = """<blockquote>
<p>uvw
xyz</p>
</blockquote>"""
# Act & Assert
act_and_assert(source_markdown, expected_gfm, expected_tokens)
@pytest.mark.gfm
def test_paragraph_series_n_bq_t_nl_i2_t():
"""
Test case: Block Quote with text, newline, indent of 2, text
"""
# Arrange
source_markdown = """> uvw
xyz"""
expected_tokens = [
"[block-quote(1,1)::> \n]",
"[para(1,3):\n ]",
"[text(1,3):uvw\nxyz::\n]",
"[end-para:::True]",
"[end-block-quote:::True]",
]
expected_gfm = """<blockquote>
<p>uvw
xyz</p>
</blockquote>"""
# Act & Assert
act_and_assert(source_markdown, expected_gfm, expected_tokens)
@pytest.mark.gfm
def test_paragraph_series_n_ul_t_nl_i2_bq_t_nl_i2_bq_t():
"""
Test case: unordered list, text, newline, indent of 2, block Quote, text, newline,
indent of 2, block quote, text
"""
# Arrange
source_markdown = """* abc
> uvw
> xyz
* def"""
expected_tokens = [
"[ulist(1,1):*::2:]",
"[para(1,3):]",
"[text(1,3):abc:]",
"[end-para:::True]",
"[block-quote(2,3): : > \n > ]",
"[para(2,5):\n]",
"[text(2,5):uvw\nxyz::\n]",
"[end-para:::True]",
"[end-block-quote:::True]",
"[li(4,1):2::]",
"[para(4,3):]",
"[text(4,3):def:]",
"[end-para:::True]",
"[end-ulist:::True]",
]
expected_gfm = """<ul>
<li>abc
<blockquote>
<p>uvw
xyz</p>
</blockquote>
</li>
<li>def</li>
</ul>"""
# Act & Assert
act_and_assert(source_markdown, expected_gfm, expected_tokens)
@pytest.mark.gfm
def test_paragraph_series_n_ul_t_nl_i2_bq_t_nl_i4_t():
"""
Test case: unordered list, text, newline, indent of 2, block Quote, text, newline,
indent of 4, text
"""
# Arrange
source_markdown = """* abc
> uvw
xyz
* def"""
expected_tokens = [
"[ulist(1,1):*::2:: ]",
"[para(1,3):]",
"[text(1,3):abc:]",
"[end-para:::True]",
"[block-quote(2,3): : > \n ]",
"[para(2,5):\n ]",
"[text(2,5):uvw\nxyz::\n]",
"[end-para:::True]",
"[end-block-quote:::True]",
"[li(4,1):2::]",
"[para(4,3):]",
"[text(4,3):def:]",
"[end-para:::True]",
"[end-ulist:::True]",
]
expected_gfm = """<ul>
<li>abc
<blockquote>
<p>uvw
xyz</p>
</blockquote>
</li>
<li>def</li>
</ul>"""
# Act & Assert
act_and_assert(source_markdown, expected_gfm, expected_tokens)
@pytest.mark.gfm
def test_paragraph_series_n_bq_t_nl_bq_ha():
"""
Test case: Block Quote with text, newline, block quote, atx heading
"""
# Arrange
source_markdown = """> uvw
> # head"""
expected_tokens = [
"[block-quote(1,1)::> \n> ]",
"[para(1,3):]",
"[text(1,3):uvw:]",
"[end-para:::False]",
"[atx(2,3):1:0:]",
"[text(2,5):head: ]",
"[end-atx::]",
"[end-block-quote:::True]",
]
expected_gfm = """<blockquote>
<p>uvw</p>
<h1>head</h1>
</blockquote>"""
# Act & Assert
act_and_assert(source_markdown, expected_gfm, expected_tokens)
@pytest.mark.gfm
def test_paragraph_series_n_bq_t_nl_i2_ha():
"""
Test case: Block Quote with text, newline, proper indent for lazy, atx heading
"""
# Arrange
source_markdown = """> uvw
# head"""
expected_tokens = [
"[block-quote(1,1)::> ]",
"[para(1,3):]",
"[text(1,3):uvw:]",
"[end-para:::True]",
"[end-block-quote:::True]",
"[atx(2,3):1:0: ]",
"[text(2,5):head: ]",
"[end-atx::]",
]
expected_gfm = """<blockquote>
<p>uvw</p>
</blockquote>
<h1>head</h1>"""
# Act & Assert
act_and_assert(source_markdown, expected_gfm, expected_tokens)
@pytest.mark.gfm
def test_paragraph_series_n_ul_t_nl_i2_bq_t_nl_i2_bq_ha():
"""
Test case: Unordered list, text, newline, Indent of 2, Block Quote with text,
newline, ident of 2, block quote, atx heading
"""
# Arrange
source_markdown = """* abc
> uvw
> # head
* def"""
expected_tokens = [
"[ulist(1,1):*::2:]",
"[para(1,3):]",
"[text(1,3):abc:]",
"[end-para:::True]",
"[block-quote(2,3): : > \n > ]",
"[para(2,5):]",
"[text(2,5):uvw:]",
"[end-para:::False]",
"[atx(3,5):1:0:]",
"[text(3,7):head: ]",
"[end-atx::]",
"[end-block-quote:::True]",
"[li(4,1):2::]",
"[para(4,3):]",
"[text(4,3):def:]",
"[end-para:::True]",
"[end-ulist:::True]",
]
expected_gfm = """<ul>
<li>abc
<blockquote>
<p>uvw</p>
<h1>head</h1>
</blockquote>
</li>
<li>def</li>
</ul>"""
# Act & Assert
act_and_assert(source_markdown, expected_gfm, expected_tokens)
@pytest.mark.gfm
def test_paragraph_series_n_ul_t_i2_bq_t_nl_i4_ha():
"""
Test case: Unordered list, text, newline, Indent of 2, Block Quote with text,
newline, ident of 4, atx heading
"""
# Arrange
source_markdown = """* abc
> uvw
# head
* def"""
expected_tokens = [
"[ulist(1,1):*::2:: ]",
"[para(1,3):]",
"[text(1,3):abc:]",
"[end-para:::True]",
"[block-quote(2,3): : > ]",
"[para(2,5):]",
"[text(2,5):uvw:]",
"[end-para:::True]",
"[end-block-quote:::True]",
"[atx(3,5):1:0: ]",
"[text(3,7):head: ]",
"[end-atx::]",
"[li(4,1):2::]",
"[para(4,3):]",
"[text(4,3):def:]",
"[end-para:::True]",
"[end-ulist:::True]",
]
expected_gfm = """<ul>
<li>abc
<blockquote>
<p>uvw</p>
</blockquote>
<h1>head</h1>
</li>
<li>def</li>
</ul>"""
# Act & Assert
act_and_assert(source_markdown, expected_gfm, expected_tokens)
@pytest.mark.gfm
def test_paragraph_series_n_bq_t_nl_bq_tb():
"""
Test case: Block quote, text, newline, block quote, thematic break
"""
# Arrange
source_markdown = """> uvw
> ---"""
expected_tokens = [
"[block-quote(1,1)::> \n> ]",
"[setext(2,3):-:3::(1,3)]",
"[text(1,3):uvw:]",
"[end-setext::]",
"[end-block-quote:::True]",
]
expected_gfm = """<blockquote>
<h2>uvw</h2>
</blockquote>"""
# Act & Assert
act_and_assert(source_markdown, expected_gfm, expected_tokens)
@pytest.mark.gfm
def test_paragraph_series_n_bq_t_nl_i2_tb():
"""
Test case: Block quote, text, newline, indent of 2, thematic break
"""
# Arrange
source_markdown = """> uvw
---"""
expected_tokens = [
"[block-quote(1,1)::> ]",
"[para(1,3):]",
"[text(1,3):uvw:]",
"[end-para:::True]",
"[end-block-quote:::True]",
"[tbreak(2,3):-: :---]",
]
expected_gfm = """<blockquote>
<p>uvw</p>
</blockquote>
<hr />"""
# Act & Assert
act_and_assert(source_markdown, expected_gfm, expected_tokens)
@pytest.mark.gfm
def test_paragraph_series_n_ul_t_nl_i2_bq_t_nl_i2_bq_tb():
"""
Test case: Unordered list, text, newline, ident of 2, block quote, text, newline,
indent of 2, block quote, thematic break
"""
# Arrange
source_markdown = """* abc
> uvw
> ---
* def"""
expected_tokens = [
"[ulist(1,1):*::2:]",
"[para(1,3):]",
"[text(1,3):abc:]",
"[end-para:::True]",
"[block-quote(2,3): : > \n > ]",
"[setext(3,5):-:3::(2,5)]",
"[text(2,5):uvw:]",
"[end-setext::]",
"[end-block-quote:::True]",
"[li(4,1):2::]",
"[para(4,3):]",
"[text(4,3):def:]",
"[end-para:::True]",
"[end-ulist:::True]",
]
expected_gfm = """<ul>
<li>abc
<blockquote>
<h2>uvw</h2>
</blockquote>
</li>
<li>def</li>
</ul>"""
# Act & Assert
act_and_assert(source_markdown, expected_gfm, expected_tokens)
@pytest.mark.gfm
def test_paragraph_series_n_ul_t_nl_i2_bq_t_nl_i4_tb():
"""
Test case: Unordered list, text, newline, ident of 2, block quote, text, newline,
indent of 4, thematic break
"""
# Arrange
source_markdown = """* abc
> uvw
---
* def"""
expected_tokens = [
"[ulist(1,1):*::2:: ]",
"[para(1,3):]",
"[text(1,3):abc:]",
"[end-para:::True]",
"[block-quote(2,3): : > ]",
"[para(2,5):]",
"[text(2,5):uvw:]",
"[end-para:::True]",
"[end-block-quote:::True]",
"[tbreak(3,5):-: :---]",
"[li(4,1):2::]",
"[para(4,3):]",
"[text(4,3):def:]",
"[end-para:::True]",
"[end-ulist:::True]",
]
expected_gfm = """<ul>
<li>abc
<blockquote>
<p>uvw</p>
</blockquote>
<hr />
</li>
<li>def</li>
</ul>"""
# Act & Assert
act_and_assert(source_markdown, expected_gfm, expected_tokens)
@pytest.mark.gfm
def test_paragraph_series_n_bq_t_nl_bq_hb():
"""
Test case: Block quote, text, newline, block quote, html block
"""
# Arrange
source_markdown = """> uvw
> <!-- comment -->"""
expected_tokens = [
"[block-quote(1,1)::> \n> ]",
"[para(1,3):]",
"[text(1,3):uvw:]",
"[end-para:::False]",
"[html-block(2,3)]",
"[text(2,3):<!-- comment -->:]",
"[end-html-block:::False]",
"[end-block-quote:::True]",
]
expected_gfm = """<blockquote>
<p>uvw</p>
<!-- comment -->
</blockquote>"""
# Act & Assert
act_and_assert(source_markdown, expected_gfm, expected_tokens)
@pytest.mark.gfm
def test_paragraph_series_n_bq_t_nl_i2_hb():
"""
Test case: Block quote, text, newline, indent of 2, html block
"""
# Arrange
source_markdown = """> uvw
<!-- comment -->"""
expected_tokens = [
"[block-quote(1,1)::> ]",
"[para(1,3):]",
"[text(1,3):uvw:]",
"[end-para:::True]",
"[end-block-quote:::True]",
"[html-block(2,1)]",
"[text(2,3):<!-- comment -->: ]",
"[end-html-block:::False]",
]
expected_gfm = """<blockquote>
<p>uvw</p>
</blockquote>
<!-- comment -->"""
# Act & Assert
act_and_assert(source_markdown, expected_gfm, expected_tokens)
@pytest.mark.gfm
def test_paragraph_series_n_ul_t_nl_i2_bq_t_nl_i2_bq_hb():
"""
Test case: Unordered list, text, newline, ident of 2, block quote, text, newline,
indent of 2, block quote, html block
"""
# Arrange
source_markdown = """* abc
> uvw
> <!-- comment -->
* def"""
expected_tokens = [
"[ulist(1,1):*::2:]",
"[para(1,3):]",
"[text(1,3):abc:]",
"[end-para:::True]",
"[block-quote(2,3): : > \n > ]",
"[para(2,5):]",
"[text(2,5):uvw:]",
"[end-para:::False]",
"[html-block(3,5)]",
"[text(3,5):<!-- comment -->:]",
"[end-html-block:::False]",
"[end-block-quote:::True]",
"[li(4,1):2::]",
"[para(4,3):]",
"[text(4,3):def:]",
"[end-para:::True]",
"[end-ulist:::True]",
]
expected_gfm = """<ul>
<li>abc
<blockquote>
<p>uvw</p>
<!-- comment -->
</blockquote>
</li>
<li>def</li>
</ul>"""
# Act & Assert
act_and_assert(source_markdown, expected_gfm, expected_tokens)
@pytest.mark.gfm
def test_paragraph_series_n_ul_t_nl_i2_bq_t_nl_i4_hb():
"""
Test case: Unordered list, text, newline, ident of 2, block quote, text, newline,
indent of 4, html block
"""
# Arrange
source_markdown = """* abc
> uvw
<!-- comment -->
* def"""
expected_tokens = [
"[ulist(1,1):*::2:: ]",
"[para(1,3):]",
"[text(1,3):abc:]",
"[end-para:::True]",
"[block-quote(2,3): : > ]",
"[para(2,5):]",
"[text(2,5):uvw:]",
"[end-para:::True]",
"[end-block-quote:::True]",
"[html-block(3,3)]",
"[text(3,5):<!-- comment -->: ]",
"[end-html-block:::False]",
"[li(4,1):2::]",
"[para(4,3):]",
"[text(4,3):def:]",
"[end-para:::True]",
"[end-ulist:::True]",
]
expected_gfm = """<ul>
<li>abc
<blockquote>
<p>uvw</p>
</blockquote>
<!-- comment -->
</li>
<li>def</li>
</ul>"""
# Act & Assert
act_and_assert(source_markdown, expected_gfm, expected_tokens)
@pytest.mark.gfm
def test_paragraph_series_n_bq_t_nl_bq_fb():
"""
Test case: Block quote, text, newline, block quote, fenced block
"""
# Arrange
source_markdown = """> uvw
> ```
> def
> ```"""
expected_tokens = [
"[block-quote(1,1)::> \n> \n> \n> ]",
"[para(1,3):]",
"[text(1,3):uvw:]",
"[end-para:::False]",
"[fcode-block(2,3):`:3::::::]",
"[text(3,3):def:]",
"[end-fcode-block::3:False]",
"[end-block-quote:::True]",
]
expected_gfm = """<blockquote>
<p>uvw</p>
<pre><code>def
</code></pre>
</blockquote>"""
# Act & Assert
act_and_assert(source_markdown, expected_gfm, expected_tokens)
@pytest.mark.gfm
def test_paragraph_series_n_bq_t_nl_bq_fb_nl_with_bq():
"""
Test case: Block quote, text, newline, block quote, fenced block with newlines
prefaced by block quotes
"""
# Arrange
source_markdown = """> uvw
> ```
>
> def
>
> ```"""
expected_tokens = [
"[block-quote(1,1)::> \n> \n>\n> \n>\n> ]",
"[para(1,3):]",
"[text(1,3):uvw:]",
"[end-para:::False]",
"[fcode-block(2,3):`:3::::::]",
"[text(3,3):\ndef\n:]",
"[end-fcode-block::3:False]",
"[end-block-quote:::True]",
]
expected_gfm = """<blockquote>
<p>uvw</p>
<pre><code>
def
</code></pre>
</blockquote>"""
# Act & Assert
act_and_assert(source_markdown, expected_gfm, expected_tokens)
@pytest.mark.gfm
def test_paragraph_series_n_bq_t_nl_bq_fb_nl_without_bq():
"""
Test case: Block quote, text, newline, block quote, fenced block with newlines
not prefaced by block quotes
"""
# Arrange
source_markdown = """> uvw
> ```
> def
> ```"""
expected_tokens = [
"[block-quote(1,1)::> \n> \n]",
"[para(1,3):]",
"[text(1,3):uvw:]",
"[end-para:::False]",
"[fcode-block(2,3):`:3::::::]",
"[end-fcode-block:::True]",
"[end-block-quote:::True]",
"[BLANK(3,1):]",
"[block-quote(4,1)::> \n]",
"[para(4,3):]",
"[text(4,3):def:]",
"[end-para:::True]",
"[end-block-quote:::True]",
"[BLANK(5,1):]",
"[block-quote(6,1)::> ]",
"[fcode-block(6,3):`:3::::::]",
"[end-fcode-block:::True]",
"[end-block-quote:::True]",
]
expected_gfm = """<blockquote>
<p>uvw</p>
<pre><code></code></pre>
</blockquote>
<blockquote>
<p>def</p>
</blockquote>
<blockquote>
<pre><code></code></pre>
</blockquote>"""
# Act & Assert
act_and_assert(source_markdown, expected_gfm, expected_tokens)
@pytest.mark.gfm
def test_paragraph_series_n_bq_t_nl_i2_fb():
"""
Test case: Block quote, text, newline, indent of 2, fenced block
"""
# Arrange
source_markdown = """> uvw
```
def
```"""
expected_tokens = [
"[block-quote(1,1)::> ]",
"[para(1,3):]",
"[text(1,3):uvw:]",
"[end-para:::True]",
"[end-block-quote:::True]",
"[fcode-block(2,3):`:3::::: :]",
"[text(3,3):def:\a \a\x03\a]",
"[end-fcode-block: :3:False]",
]
expected_gfm = """<blockquote>
<p>uvw</p>
</blockquote>
<pre><code>def
</code></pre>"""
# Act & Assert
act_and_assert(source_markdown, expected_gfm, expected_tokens)
@pytest.mark.gfm
def test_paragraph_series_n_ul_t_nl_i2_bq_t_nl_i2_bq_fb():
"""
Test case: Unordered list, text, newline, ident of 2, block quote, text, newline,
indent of 2, block quote, fenced block
"""
# Arrange
source_markdown = """* abc
> uvw
> ```
> def
> ```
* def"""
expected_tokens = [
"[ulist(1,1):*::2:]",
"[para(1,3):]",
"[text(1,3):abc:]",
"[end-para:::True]",
"[block-quote(2,3): : > \n > \n > \n > ]",
"[para(2,5):]",
"[text(2,5):uvw:]",
"[end-para:::False]",
"[fcode-block(3,5):`:3::::::]",
"[text(4,5):def:]",
"[end-fcode-block::3:False]",
"[end-block-quote:::True]",
"[li(6,1):2::]",
"[para(6,3):]",
"[text(6,3):def:]",
"[end-para:::True]",
"[end-ulist:::True]",
]
expected_gfm = """<ul>
<li>abc
<blockquote>
<p>uvw</p>
<pre><code>def
</code></pre>
</blockquote>
</li>
<li>def</li>
</ul>"""
# Act & Assert
act_and_assert(source_markdown, expected_gfm, expected_tokens)
@pytest.mark.gfm
def test_paragraph_series_n_ul_t_nl_i2_bq_t_nl_i4_fb():
"""
Test case: Unordered list, text, newline, ident of 2, block quote, text, newline,
indent of 4, fenced block
"""
# Arrange
source_markdown = """* abc
> uvw
```
def
```
* def"""
expected_tokens = [
"[ulist(1,1):*::2:: \n \n ]",
"[para(1,3):]",
"[text(1,3):abc:]",
"[end-para:::True]",
"[block-quote(2,3): : > ]",
"[para(2,5):]",
"[text(2,5):uvw:]",
"[end-para:::True]",
"[end-block-quote:::True]",
"[fcode-block(3,5):`:3::::: :]",
"[text(4,3):def:\a \a\x03\a]",
"[end-fcode-block: :3:False]",
"[li(6,1):2::]",
"[para(6,3):]",
"[text(6,3):def:]",
"[end-para:::True]",
"[end-ulist:::True]",
]
expected_gfm = """<ul>
<li>abc
<blockquote>
<p>uvw</p>
</blockquote>
<pre><code>def
</code></pre>
</li>
<li>def</li>
</ul>"""
# Act & Assert
act_and_assert(source_markdown, expected_gfm, expected_tokens)
@pytest.mark.gfm
def test_paragraph_series_n_bq_t_nl_bq_ib():
"""
Test case: Block quote, text, newline, block quote, indented block
"""
# Arrange
source_markdown = """> uvw
> def"""
expected_tokens = [
"[block-quote(1,1)::> \n> ]",
"[para(1,3):\n ]",
"[text(1,3):uvw\ndef::\n]",
"[end-para:::True]",
"[end-block-quote:::True]",
]
expected_gfm = """<blockquote>
<p>uvw
def</p>
</blockquote>"""
# Act & Assert
act_and_assert(source_markdown, expected_gfm, expected_tokens)
@pytest.mark.gfm
def test_paragraph_series_n_bq_i4_t_nl_bq_i4_t():
"""
Test case: Block quote indent of 4 text newline block quote indent of 4 text
"""
# Arrange
source_markdown = """> foo
> bar"""
expected_tokens = [
"[block-quote(1,1)::> \n> ]",
"[icode-block(1,7): :\n ]",
"[text(1,7):foo\nbar:]",
"[end-icode-block:::True]",
"[end-block-quote:::True]",
]
expected_gfm = """<blockquote>
<pre><code>foo
bar
</code></pre>
</blockquote>"""
# Act & Assert
act_and_assert(source_markdown, expected_gfm, expected_tokens)
@pytest.mark.gfm
def test_paragraph_series_n_bq_i4_t_nl_bq_i3_t():
"""
Test case: Block quote indent of 4 text newline block quote indent of 3 text
"""
# Arrange
source_markdown = """> foo
> bar"""
expected_tokens = [
"[block-quote(1,1)::> \n> ]",
"[icode-block(1,7): :]",
"[text(1,7):foo:]",
"[end-icode-block:::False]",
"[para(2,6): ]",
"[text(2,6):bar:]",
"[end-para:::True]",
"[end-block-quote:::True]",
]
expected_gfm = """<blockquote>
<pre><code>foo
</code></pre>
<p>bar</p>
</blockquote>"""
# Act & Assert
act_and_assert(source_markdown, expected_gfm, expected_tokens)
@pytest.mark.gfm
def test_paragraph_series_n_bq_i4_t_nl_nl_bq_i4_t():
"""
Test case: Block quote indent of 4 text newline newline block quote indent of 4 text
"""
# Arrange
source_markdown = """> foo
> bar"""
expected_tokens = [
"[block-quote(1,1)::> \n]",
"[icode-block(1,7): :]",
"[text(1,7):foo:]",
"[end-icode-block:::True]",
"[end-block-quote:::True]",
"[BLANK(2,1):]",
"[block-quote(3,1)::> ]",
"[icode-block(3,7): :]",
"[text(3,7):bar:]",
"[end-icode-block:::True]",
"[end-block-quote:::True]",
]
expected_gfm = """<blockquote>
<pre><code>foo
</code></pre>
</blockquote>
<blockquote>
<pre><code>bar
</code></pre>
</blockquote>"""
# Act & Assert
act_and_assert(source_markdown, expected_gfm, expected_tokens)
@pytest.mark.gfm
def test_paragraph_series_n_bq_i4_t_nl_bq_nl_bq_i4_t():
"""
Test case: Block quote indent of 4 text newline block quote newline block quote indent of 4 text
"""
# Arrange
source_markdown = """> foo
> bar"""
expected_tokens = [
"[block-quote(1,1)::> \n]",
"[icode-block(1,7): :]",
"[text(1,7):foo:]",
"[end-icode-block:::True]",
"[end-block-quote:::True]",
"[BLANK(2,1):]",
"[block-quote(3,1)::> ]",
"[icode-block(3,7): :]",
"[text(3,7):bar:]",
"[end-icode-block:::True]",
"[end-block-quote:::True]",
]
expected_gfm = """<blockquote>
<pre><code>foo
</code></pre>
</blockquote>
<blockquote>
<pre><code>bar
</code></pre>
</blockquote>"""
# Act & Assert
act_and_assert(source_markdown, expected_gfm, expected_tokens)
@pytest.mark.gfm
def test_paragraph_series_n_bq_t_nl_bq_nl_bq_ib():
"""
Test case: Block quote, text, newline, block quote, newline block quote, indented block
"""
# Arrange
source_markdown = """> uvw
>
> def"""
expected_tokens = [
"[block-quote(1,1)::> \n>\n> ]",
"[para(1,3):]",
"[text(1,3):uvw:]",
"[end-para:::True]",
"[BLANK(2,2):]",
"[icode-block(3,7): :]",
"[text(3,7):def:]",
"[end-icode-block:::True]",
"[end-block-quote:::True]",
]
expected_gfm = """<blockquote>
<p>uvw</p>
<pre><code>def
</code></pre>
</blockquote>"""
# Act & Assert
act_and_assert(source_markdown, expected_gfm, expected_tokens)
@pytest.mark.gfm
def test_paragraph_series_n_bq_t_nl_i6_ib():
"""
Test case: Block quote, text, newline, indent of 6, indented block
"""
# Arrange
source_markdown = """> uvw
def"""
expected_tokens = [
"[block-quote(1,1)::> \n]",
"[para(1,3):\n ]",
"[text(1,3):uvw\ndef::\n]",
"[end-para:::True]",
"[end-block-quote:::True]",
]
expected_gfm = """<blockquote>
<p>uvw
def</p>
</blockquote>"""
# Act & Assert
act_and_assert(source_markdown, expected_gfm, expected_tokens)
@pytest.mark.gfm
def test_paragraph_series_n_bq_t_nl_nl_nl_i6_ib():
"""
Test case: Block quote, text, newline, newline, indent of 6, indented block
"""
# Arrange
source_markdown = """> uvw
def"""
expected_tokens = [
"[block-quote(1,1)::> \n]",
"[para(1,3):]",
"[text(1,3):uvw:]",
"[end-para:::True]",
"[end-block-quote:::True]",
"[BLANK(2,1):]",
"[icode-block(3,5): :]",
"[text(3,5):def: ]",
"[end-icode-block:::True]",
]
expected_gfm = """<blockquote>
<p>uvw</p>
</blockquote>
<pre><code> def
</code></pre>"""
# Act & Assert
act_and_assert(source_markdown, expected_gfm, expected_tokens)
@pytest.mark.gfm
def test_paragraph_series_n_ul_t_nl_i2_bq_t_nl_i2_bq_ib():
"""
Test case: Unordered list, text, newline, ident of 2, block quote, text, newline,
indent of 2, block quote, indented block
"""
# Arrange
source_markdown = """* abc
> uvw
> def
* def"""
expected_tokens = [
"[ulist(1,1):*::2:]",
"[para(1,3):]",
"[text(1,3):abc:]",
"[end-para:::True]",
"[block-quote(2,3): : > \n > ]",
"[para(2,5):\n ]",
"[text(2,5):uvw\ndef::\n]",
"[end-para:::True]",
"[end-block-quote:::True]",
"[li(4,1):2::]",
"[para(4,3):]",
"[text(4,3):def:]",
"[end-para:::True]",
"[end-ulist:::True]",
]
expected_gfm = """<ul>
<li>abc
<blockquote>
<p>uvw
def</p>
</blockquote>
</li>
<li>def</li>
</ul>"""
# Act & Assert
act_and_assert(source_markdown, expected_gfm, expected_tokens)
@pytest.mark.gfm
def test_paragraph_series_n_ul_t_nl_i2_bq_t_nl_bq_nl_i2_bq_ib():
"""
Test case: Unordered list, text, newline, ident of 2, block quote, text, newline,
block quote, newline, indent of 2, block quote, indented block
"""
# Arrange
source_markdown = """* abc
> uvw
>
> def
* def"""
expected_tokens = [
"[ulist(1,1):*::2:]",
"[para(1,3):]",
"[text(1,3):abc:]",
"[end-para:::True]",
"[block-quote(2,3): : > \n >\n > ]",
"[para(2,5):]",
"[text(2,5):uvw:]",
"[end-para:::True]",
"[BLANK(3,4):]",
"[icode-block(4,9): :]",
"[text(4,9):def:]",
"[end-icode-block:::True]",
"[end-block-quote:::True]",
"[li(5,1):2::]",
"[para(5,3):]",
"[text(5,3):def:]",
"[end-para:::True]",
"[end-ulist:::True]",
]
expected_gfm = """<ul>
<li>abc
<blockquote>
<p>uvw</p>
<pre><code>def
</code></pre>
</blockquote>
</li>
<li>def</li>
</ul>"""
# Act & Assert
act_and_assert(source_markdown, expected_gfm, expected_tokens)
@pytest.mark.gfm
def test_paragraph_series_n_ul_t_nl_i2_bq_t_nl_i6_ib():
"""
Test case: Unordered list, text, newline, ident of 2, block quote, text, newline,
indent of 6, indented block
"""
# Arrange
source_markdown = """* abc
> uvw
def
* def"""
expected_tokens = [
"[ulist(1,1):*::2:: ]",
"[para(1,3):]",
"[text(1,3):abc:]",
"[end-para:::True]",
"[block-quote(2,3): : > \n ]",
"[para(2,5):\n ]",
"[text(2,5):uvw\ndef::\n]",
"[end-para:::True]",
"[end-block-quote:::True]",
"[li(4,1):2::]",
"[para(4,3):]",
"[text(4,3):def:]",
"[end-para:::True]",
"[end-ulist:::True]",
]
expected_gfm = """<ul>
<li>abc
<blockquote>
<p>uvw
def</p>
</blockquote>
</li>
<li>def</li>
</ul>"""
# Act & Assert
act_and_assert(source_markdown, expected_gfm, expected_tokens)
@pytest.mark.gfm
def test_paragraph_series_n_ul_t_nl_i2_bq_t_nl_nl_i6_ib():
"""
Test case: Unordered list, text, newline, ident of 2, block quote, text, newline,
newline, indent of 6, indented block
"""
# Arrange
source_markdown = """* abc
> uvw
def
* def"""
expected_tokens = [
"[ulist(1,1):*::2:: ]",
"[para(1,3):]",
"[text(1,3):abc:]",
"[end-para:::True]",
"[block-quote(2,3): : > \n]",
"[para(2,5):]",
"[text(2,5):uvw:]",
"[end-para:::True]",
"[end-block-quote:::True]",
"[BLANK(3,1):]",
"[icode-block(4,7): :]",
"[text(4,7):def:]",
"[end-icode-block:::True]",
"[li(5,1):2::]",
"[para(5,3):]",
"[text(5,3):def:]",
"[end-para:::True]",
"[end-ulist:::True]",
]
expected_gfm = """<ul>
<li>
<p>abc</p>
<blockquote>
<p>uvw</p>
</blockquote>
<pre><code>def
</code></pre>
</li>
<li>
<p>def</p>
</li>
</ul>"""
# Act & Assert
act_and_assert(source_markdown, expected_gfm, expected_tokens)
# setext?
| [
"jack.de.winter@outlook.com"
] | jack.de.winter@outlook.com |
c309b24c5233096607053ef27b246b64c8bf58cc | 1ceb35da7b1106a4da4e8a3a5620d23a326a68e4 | /corticalmapping/scripts/post_recording/analysis_pipeline_zstack/old/caiman/010_motion_correction_zstack_caiman_multichannel.py | fae38c75308d66e6f6d96c8dd03f31f6a74cf3e7 | [] | no_license | zhuangjun1981/corticalmapping | c3870a3f31ed064d77f209a08e71f44c375676a3 | 0ddd261b3993f5ce5608adfbd98a588afc56d20c | refs/heads/master | 2022-11-14T03:24:53.443659 | 2020-07-13T23:48:50 | 2020-07-13T23:48:50 | 84,975,797 | 2 | 1 | null | null | null | null | UTF-8 | Python | false | false | 4,145 | py | import sys
sys.path.extend([r"E:\data\github_packages\CaImAn"])
import caiman as cm
import numpy as np
import os
from caiman.motion_correction import MotionCorrect, tile_and_correct, motion_correction_piecewise
import tifffile as tf
import h5py
import warnings
from multiprocessing import Pool
base_folder = r"\\allen\programs\braintv\workgroups\nc-ophys\Jun\raw_data_rabies_project" \
r"\180605-M391355-2p\zstack\zstack_zoom2"
reference_chn = 'green'
n_processes = 5
def correct_single_movie(folder_path):
#=======================================setup parameters==============================================
# number of iterations for rigid motion correction
niter_rig = 5
# maximum allowed rigid shift in pixels (view the movie to get a sense of motion)
max_shifts = (30, 30)
# for parallelization split the movies in num_splits chuncks across time
# if none all the splits are processed and the movie is saved
splits_rig = 56
# intervals at which patches are laid out for motion correction
# num_splits_to_process_rig = None
# create a new patch every x pixels for pw-rigid correction
strides = (48, 48)
# overlap between pathes (size of patch strides+overlaps)
overlaps = (24, 24)
# for parallelization split the movies in num_splits chuncks across time
splits_els = 56
# num_splits_to_process_els = [28, None]
# upsample factor to avoid smearing when merging patches
upsample_factor_grid = 4
# maximum deviation allowed for patch with respect to rigid shifts
max_deviation_rigid = 3
# if True, apply shifts fast way (but smoothing results) by using opencv
shifts_opencv = True
# if True, make the SAVED movie and template mostly nonnegative by removing min_mov from movie
nonneg_movie = False
# =======================================setup parameters==============================================
offset_mov = 0.
file_path = [f for f in os.listdir(folder_path) if f[-4:] == '.tif']
if len(file_path) == 0:
raise LookupError('no tif file found in folder: {}'.format(folder_path))
elif len(file_path) > 1:
raise LookupError('more than one tif files found in folder: {}'.format(folder_path))
else:
file_path = os.path.join(folder_path, file_path[0])
# create a motion correction object# creat
mc = MotionCorrect(file_path, offset_mov,
dview=None, max_shifts=max_shifts, niter_rig=niter_rig,
splits_rig=splits_rig, strides=strides, overlaps=overlaps,
splits_els=splits_els, upsample_factor_grid=upsample_factor_grid,
max_deviation_rigid=max_deviation_rigid,
shifts_opencv=shifts_opencv, nonneg_movie=nonneg_movie)
mc.motion_correct_rigid(save_movie=True)
# load motion corrected movie
m_rig = cm.load(mc.fname_tot_rig)
m_rig = m_rig.astype(np.int16)
save_name = os.path.splitext(file_path)[0] + '_corrected.tif'
tf.imsave(os.path.join(folder_path, save_name), m_rig)
tf.imsave(os.path.join(folder_path, 'corrected_mean_projection.tif'),
np.mean(m_rig, axis=0).astype(np.float32))
tf.imsave(os.path.join(folder_path, 'corrected_max_projection.tif'),
np.max(m_rig, axis=0).astype(np.float32))
offset_f = h5py.File(os.path.join(folder_path, 'correction_offsets.hdf5'))
offsets = mc.shifts_rig
offsets = np.array([np.array(o) for o in offsets]).astype(np.float32)
offset_dset = offset_f.create_dataset(name='file_0000', data=offsets)
offset_dset.attrs['format'] = 'height, width'
offset_dset.attrs['path'] = file_path
os.remove(mc.fname_tot_rig[0])
if __name__ == '__main__':
data_folder = os.path.join(base_folder, reference_chn)
chunk_p = Pool(n_processes)
folder_list = [f for f in os.listdir(data_folder) if os.path.isdir(os.path.join(data_folder, f))]
folder_list.sort()
print('\n'.join(folder_list))
folder_list = [os.path.join(data_folder, f) for f in folder_list]
chunk_p.map(correct_single_movie, folder_list) | [
"junz@alleninstitute.org"
] | junz@alleninstitute.org |
790e2066d1faca87986538cc4d7037df448d580e | 8afb5afd38548c631f6f9536846039ef6cb297b9 | /_VSCODE-extensions/vscode-jupyter/pythonFiles/vscode_datascience_helpers/daemon/__main__.py | 666f245b2a23462a33bfce132b6e6d3bdedd2a9d | [
"MIT",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | bgoonz/UsefulResourceRepo2.0 | d87588ffd668bb498f7787b896cc7b20d83ce0ad | 2cb4b45dd14a230aa0e800042e893f8dfb23beda | refs/heads/master | 2023-03-17T01:22:05.254751 | 2022-08-11T03:18:22 | 2022-08-11T03:18:22 | 382,628,698 | 10 | 12 | MIT | 2022-10-10T14:13:54 | 2021-07-03T13:58:52 | null | UTF-8 | Python | false | false | 3,982 | py | # Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License.
import argparse
import importlib
import json
import os
import logging
import logging.config
import sys
log = logging.getLogger(__name__)
LOG_FORMAT = (
"%(asctime)s UTC - %(levelname)s - (PID: %(process)d) - %(name)s - %(message)s"
)
queue_handler = None
def add_arguments(parser):
parser.description = "Daemon"
parser.add_argument(
"--daemon-module",
default="vscode_datascience_helpers.daemon.daemon_python",
help="Daemon Module",
)
log_group = parser.add_mutually_exclusive_group()
log_group.add_argument(
"--log-config", help="Path to a JSON file containing Python logging config."
)
log_group.add_argument(
"--log-file",
help="Redirect logs to the given file instead of writing to stderr."
"Has no effect if used with --log-config.",
)
parser.add_argument(
"-v",
"--verbose",
action="count",
default=0,
help="Increase verbosity of log output, overrides log config file",
)
class TemporaryQueueHandler(logging.Handler):
"""Logger used to temporarily store everything into a queue.
Later the messages are pushed back to the RPC client as a notification.
Once the RPC channel is up, we'll stop queuing messages and sending id directly.
"""
def __init__(self):
logging.Handler.__init__(self)
self.queue = []
self.server = None
def set_server(self, server):
# Send everything that has beeen queued until now.
self.server = server
for msg in self.queue:
self.server._endpoint.notify("log", msg)
self.queue = []
def emit(self, record):
data = {
"level": record.levelname,
"msg": self.format(record),
"pid": os.getpid(),
}
# If we don't have the server, then queue it and send it later.
if self.server is None:
self.queue.append(data)
else:
self.server._endpoint.notify("log", data)
def _configure_logger(verbose=0, log_config=None, log_file=None):
root_logger = logging.root
global queue_handler
if log_config:
with open(log_config, "r") as f:
logging.config.dictConfig(json.load(f))
else:
formatter = logging.Formatter(LOG_FORMAT)
if log_file:
log_handler = logging.handlers.RotatingFileHandler(
log_file,
mode="a",
maxBytes=50 * 1024 * 1024,
backupCount=10,
encoding=None,
delay=0,
)
log_handler.setFormatter(formatter)
root_logger.addHandler(log_handler)
else:
queue_handler = TemporaryQueueHandler()
root_logger.addHandler(queue_handler)
if verbose == 0:
level = logging.WARNING
elif verbose == 1:
level = logging.INFO
elif verbose >= 2:
level = logging.DEBUG
root_logger.setLevel(level)
def main():
"""Starts the daemon.
The daemon_module allows authors of modules to provide a custom daemon implementation.
E.g. we have a base implementation for standard python functionality,
and a custom daemon implementation for DS work (related to jupyter).
"""
parser = argparse.ArgumentParser()
add_arguments(parser)
args = parser.parse_args()
_configure_logger(args.verbose, args.log_config, args.log_file)
log.info("Starting daemon from %s.PythonDaemon", args.daemon_module)
try:
daemon_module = importlib.import_module(args.daemon_module)
daemon_cls = daemon_module.PythonDaemon
daemon_cls.start_daemon(queue_handler)
except Exception:
import traceback
log.error(traceback.format_exc())
raise Exception("Failed to start daemon")
if __name__ == "__main__":
main()
| [
"bryan.guner@gmail.com"
] | bryan.guner@gmail.com |
497196af0825121b296b2753bc579672864906c4 | 0258ce084f66f5c4080b686f7fd388ef8094ac75 | /Flask Programs/SQLite/crud.py | 83aaabb26dbec997c4d7e6a2081a619863c3a509 | [] | no_license | Jaydeep-07/Flask | 524be9056c9e5cc6e7733d5780f8ce1e110cc9b4 | 172b5fd30785d3876aeb5509eb685887f1692d52 | refs/heads/master | 2020-12-26T11:15:05.092803 | 2020-05-09T10:19:13 | 2020-05-09T10:19:13 | 237,491,335 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,918 | py | from flask import *
import sqlite3
app = Flask(__name__,template_folder='Template')
@app.route("/")
def index():
return render_template("index.html");
@app.route("/add")
def add():
return render_template("add.html")
@app.route("/savedetails",methods = ["POST","GET"])
def saveDetails():
msg = "msg"
if request.method == "POST":
try:
name = request.form["name"]
email = request.form["email"]
address = request.form["address"]
with sqlite3.connect("employee.db") as con:
cur = con.cursor()
cur.execute("INSERT into Employees (name, email, address) values (?,?,?)",(name,email,address))
con.commit()
msg = "Employee successfully Added"
except:
con.rollback()
msg = "We can not add the employee to the list"
finally:
return render_template("success.html",msg = msg)
con.close()
@app.route("/view")
def view():
con = sqlite3.connect("employee.db")
con.row_factory = sqlite3.Row
cur = con.cursor()
cur.execute("select * from Employees")
rows = cur.fetchall()
return render_template("view.html",rows = rows)
@app.route("/delete")
def delete():
return render_template("delete.html")
@app.route("/deleterecord",methods = ["POST"])
def deleterecord():
id = request.form["id"]
with sqlite3.connect("employee.db") as con:
try:
cur = con.cursor()
cur.execute("delete from Employees where id = ?",id)
msg = "record successfully deleted"
except:
msg = "can't be deleted"
finally:
return render_template("delete_record.html",msg = msg)
if __name__ == "__main__":
app.run(debug = True) | [
"jaydeepvpatil225@gmail.com"
] | jaydeepvpatil225@gmail.com |
4a20713389c308d697f51ef7fcbc1917e11778a8 | dae2da1b80124ba6923cb1674033208af01e031f | /problems/202101/R-boj-7453-합이 0인 네 정수.py | eedffde840ca2557e26440da7144a901cb79aa2b | [] | no_license | MaxKim-J/Algo | ad09fa19da3e764ba7af18d015bbaa186643f7fb | 34771c45361db4aade4c364179e13708c5004b3a | refs/heads/master | 2023-01-08T00:49:11.479545 | 2022-12-26T15:45:35 | 2022-12-26T15:48:52 | 237,583,281 | 4 | 3 | null | null | null | null | UTF-8 | Python | false | false | 966 | py | # 부분수열 합이랑 똑같은 아이디어
#! 자료형을 임의로 나눠 시간을 줄이는 간단하고도 명쾌한 방법!!!!!!!!
#! n^4에는 당연히 못풀고 O(2n^2) 쯤으로 시간을 줄이는 방법이다. 이렇게 백트랙킹도 가능한듯
# 입력이 최대 4 * 4000이니 sys를 사용하자
import sys
N = int(sys.stdin.readline())
answer = 0
A = B = C = D = []
for _ in range(N):
a1, b1, c1, d1 = map(int, input().split(" "))
A.append(a1)
B.append(b1)
C.append(c1)
D.append(d1)
AB = dict()
# 전에 풀었던 문제처럼, 합을 완성할때 값을 도출시키면 시간이 더 준다
# 곱경우 구하기
for i in range(N):
for j in range(N):
temp = A[i] + B[j]
if temp in AB:
AB[temp] += 1
else:
AB[temp] = 1
for i in range(N):
for j in range(N):
temp = -(C[i] + D[j])
if temp in AB:
answer += AB[temp]
print(answer)
| [
"hwaseen@gmail.com"
] | hwaseen@gmail.com |
6dd1608accf61ff244bcb11ab0befc4eceb2c646 | 9d7d69178c6f1f1db6ed6767e0af32bfe836549c | /new_workspace/Gumtree_Workspace/Magnet/Yick/P9363/100 Alignment/20210130/Overnight/2021_time_55_5K_0Oe_bottom_40min.py | 2f815255bbf7912196d0d54ccc32594d35ba372e | [] | no_license | Gumtree/Quokka_scripts | 217958288b59adbdaf00a9a13ece42f169003889 | c9687d963552023d7408a8530005a99aabea1697 | refs/heads/master | 2023-08-30T20:47:32.142903 | 2023-08-18T03:38:09 | 2023-08-18T03:38:09 | 8,191,387 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,432 | py | histmem preset 60
histmem mode time
#Time scan
#-----------------------------------------------------------------
#System reset (15 minutes)
hset /sample/tc1/control/tolerance1 1
drive ma1_setpoint 0
drive tc1_driveable 90
wait 10
drive ma1_setpoint 0
wait 10
hset /sample/tc1/control/tolerance1 0.1
drive tc1_driveable 55.8
#-----------------------------------------------------------------
drive ma1_setpoint 0
newfile HISTOGRAM_XY
histmem start block
save
histmem stop
newfile HISTOGRAM_XY
histmem start block
save
histmem stop
newfile HISTOGRAM_XY
histmem start block
save
histmem stop
newfile HISTOGRAM_XY
histmem start block
save
histmem stop
newfile HISTOGRAM_XY
histmem start block
save
histmem stop
newfile HISTOGRAM_XY
histmem start block
save
histmem stop
newfile HISTOGRAM_XY
histmem start block
save
histmem stop
newfile HISTOGRAM_XY
histmem start block
save
histmem stop
newfile HISTOGRAM_XY
histmem start block
save
histmem stop
newfile HISTOGRAM_XY
histmem start block
save
histmem stop
# 10 minutes
newfile HISTOGRAM_XY
histmem start block
save
histmem stop
newfile HISTOGRAM_XY
histmem start block
save
histmem stop
newfile HISTOGRAM_XY
histmem start block
save
histmem stop
newfile HISTOGRAM_XY
histmem start block
save
histmem stop
newfile HISTOGRAM_XY
histmem start block
save
histmem stop
newfile HISTOGRAM_XY
histmem start block
save
histmem stop
newfile HISTOGRAM_XY
histmem start block
save
histmem stop
newfile HISTOGRAM_XY
histmem start block
save
histmem stop
newfile HISTOGRAM_XY
histmem start block
save
histmem stop
newfile HISTOGRAM_XY
histmem start block
save
histmem stop
# 20 minutes
newfile HISTOGRAM_XY
histmem start block
save
histmem stop
newfile HISTOGRAM_XY
histmem start block
save
histmem stop
newfile HISTOGRAM_XY
histmem start block
save
histmem stop
newfile HISTOGRAM_XY
histmem start block
save
histmem stop
newfile HISTOGRAM_XY
histmem start block
save
histmem stop
newfile HISTOGRAM_XY
histmem start block
save
histmem stop
newfile HISTOGRAM_XY
histmem start block
save
histmem stop
newfile HISTOGRAM_XY
histmem start block
save
histmem stop
newfile HISTOGRAM_XY
histmem start block
save
histmem stop
newfile HISTOGRAM_XY
histmem start block
save
histmem stop
# 30 minutes
newfile HISTOGRAM_XY
histmem start block
save
histmem stop
newfile HISTOGRAM_XY
histmem start block
save
histmem stop
newfile HISTOGRAM_XY
histmem start block
save
histmem stop
newfile HISTOGRAM_XY
histmem start block
save
histmem stop
newfile HISTOGRAM_XY
histmem start block
save
histmem stop
newfile HISTOGRAM_XY
histmem start block
save
histmem stop
newfile HISTOGRAM_XY
histmem start block
save
histmem stop
newfile HISTOGRAM_XY
histmem start block
save
histmem stop
newfile HISTOGRAM_XY
histmem start block
save
histmem stop
newfile HISTOGRAM_XY
histmem start block
save
histmem stop
# 40 minutes
newfile HISTOGRAM_XY
histmem start block
save
histmem stop
newfile HISTOGRAM_XY
histmem start block
save
histmem stop
newfile HISTOGRAM_XY
histmem start block
save
histmem stop
newfile HISTOGRAM_XY
histmem start block
save
histmem stop
newfile HISTOGRAM_XY
histmem start block
save
histmem stop
newfile HISTOGRAM_XY
histmem start block
save
histmem stop
newfile HISTOGRAM_XY
histmem start block
save
histmem stop
newfile HISTOGRAM_XY
histmem start block
save
histmem stop
newfile HISTOGRAM_XY
histmem start block
save
histmem stop
newfile HISTOGRAM_XY
histmem start block
save
histmem stop
# 50 minutes
newfile HISTOGRAM_XY
histmem start block
save
histmem stop
newfile HISTOGRAM_XY
histmem start block
save
histmem stop
newfile HISTOGRAM_XY
histmem start block
save
histmem stop
newfile HISTOGRAM_XY
histmem start block
save
histmem stop
newfile HISTOGRAM_XY
histmem start block
save
histmem stop
newfile HISTOGRAM_XY
histmem start block
save
histmem stop
newfile HISTOGRAM_XY
histmem start block
save
histmem stop
newfile HISTOGRAM_XY
histmem start block
save
histmem stop
newfile HISTOGRAM_XY
histmem start block
save
histmem stop
newfile HISTOGRAM_XY
histmem start block
save
histmem stop
# 60 minutes | [
"quokka@DAV5-QUOKKA.nbi.ansto.gov.au"
] | quokka@DAV5-QUOKKA.nbi.ansto.gov.au |
728444c91e5b1c215f157d91c477de72e296153f | 7cf119239091001cbe687f73018dc6a58b5b1333 | /datashufflepy-zeus/src/branch_scripts2/NEWS/ZX_CJXW_ZYCJ/ZX_CJXW_ZYCJ_CJW_HGSY.py | fa6658d4b23ae9065f2f10549ae13c7b765d6153 | [
"Apache-2.0"
] | permissive | ILKKAI/dataETL | 0f5b80c3482994f735f092a1e01fa1009bac4109 | 32f7ec3aaaf32b5074536a615cb9cd5c28bd499c | refs/heads/master | 2022-04-04T19:27:05.747852 | 2020-02-28T11:17:48 | 2020-02-28T11:17:48 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 358 | py | # -*- coding: utf-8 -*-
from database._mongodb import MongoClient
def data_shuffle(data):
return data
if __name__ == '__main__':
main_mongo = MongoClient(entity_code="ZX_CJXW_ZYCJ_CJW_HGSY", mongo_collection="ZX_CJXW_ZYCJ")
data_list = main_mongo.main()
for data in data_list:
re_data = data_shuffle(data)
print(re_data)
| [
"499413642@qq.com"
] | 499413642@qq.com |
f2c6e6187a2ca1c6010314be23e1c55da99dce8a | d8f1c299d1b1c3619272bb2f81197e170c88887a | /postgresqleu/confreg/migrations/0008_volunteers.py | 8861f16b53e93b766bb5df47fc697c623fa35b10 | [] | no_license | mhagander/pgeu-website | 1d324898a8ba47af80488d1f803fd8684157bc80 | c88cd42b39a84d16a056f2b668fd1949c893c4e0 | refs/heads/master | 2021-01-24T06:06:19.545244 | 2018-12-04T19:53:33 | 2018-12-04T19:55:40 | 619,420 | 0 | 2 | null | 2019-11-02T08:00:17 | 2010-04-20T11:51:40 | Python | UTF-8 | Python | false | false | 2,592 | py | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
import django.core.validators
import django.contrib.postgres.fields.ranges
class Migration(migrations.Migration):
dependencies = [
('confreg', '0007_new_specialtype'),
]
operations = [
migrations.CreateModel(
name='VolunteerAssignment',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('vol_confirmed', models.BooleanField(default=False, verbose_name=b'Confirmed by volunteer')),
('org_confirmed', models.BooleanField(default=False, verbose_name=b'Confirmed by organizers')),
],
),
migrations.CreateModel(
name='VolunteerSlot',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('timerange', django.contrib.postgres.fields.ranges.DateTimeRangeField()),
('title', models.CharField(max_length=50)),
('min_staff', models.IntegerField(default=1, validators=[django.core.validators.MinValueValidator(1)])),
('max_staff', models.IntegerField(default=1, validators=[django.core.validators.MinValueValidator(1)])),
],
),
migrations.AddField(
model_name='conference',
name='volunteers',
field=models.ManyToManyField(help_text=b'Users who volunteer', related_name='volunteers_set', to='confreg.ConferenceRegistration', blank=True),
),
migrations.AddField(
model_name='conferenceregistration',
name='regtoken',
field=models.TextField(unique=True, null=True, blank=True),
),
migrations.AddField(
model_name='volunteerslot',
name='conference',
field=models.ForeignKey(to='confreg.Conference', on_delete=models.CASCADE),
),
migrations.AddField(
model_name='volunteerassignment',
name='reg',
field=models.ForeignKey(to='confreg.ConferenceRegistration', on_delete=models.CASCADE),
),
migrations.AddField(
model_name='volunteerassignment',
name='slot',
field=models.ForeignKey(to='confreg.VolunteerSlot', on_delete=models.CASCADE),
),
migrations.RunSQL(
"CREATE INDEX confreg_volunteerslot_timerange_idx ON confreg_volunteerslot USING gist(timerange)",
),
]
| [
"magnus@hagander.net"
] | magnus@hagander.net |
edae2068ba8fc33d1442dfb5bfa8df67c369737f | 18a2e479f4edef528fa7803723822f9f5974e5f8 | /17_adding_fips_to_counties.py | 706e66e8deef1a90e70c521945b4d6217b762bcd | [] | no_license | wpower12/RedditCountyBias | ee25cb870b807466ed53225471e9ac6f5eec1cd0 | 59f0b6642f20547ac129b47496ef3ca0ac135a39 | refs/heads/master | 2023-04-04T22:24:24.258295 | 2021-04-15T17:50:18 | 2021-04-15T17:50:18 | 329,438,176 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 853 | py | import pandas as pd
import praw
from psaw import PushshiftAPI
import time
import datetime as dt
import pymysql as sql
import progressbar
import requests
import math
COUNTY_FN = "./data/counties_fips.csv"
conn = sql.connect(host='localhost',
user='bill',
password='password',
database='reddit_data')
df = pd.read_csv(COUNTY_FN)
UPDATE_COUNTY_SQL = """UPDATE county SET fips='{}' WHERE county_id={};"""
for row in df.iterrows():
state = row[1]['state']
county = row[1]['county_name']
county_id = row[1]['county_id']
fips_val = row[1]['fips']
if not math.isnan(fips_val):
# need to make sure this gets 0' padded at the front.
fips_val = "{:0>5}".format(int(row[1]['fips']))
else:
fips_val = -1
with conn.cursor() as cursor:
cursor.execute(UPDATE_COUNTY_SQL.format(fips_val, county_id))
conn.commit() | [
"willpowe@gmail.com"
] | willpowe@gmail.com |
34569ddf4dde722008a3ee453027d40c2911e838 | c50e7eb190802d7849c0d0cea02fb4d2f0021777 | /src/portal/azext_portal/vendored_sdks/portal/aio/operations_async/_operation_operations_async.py | aebe6bdf31d0b930019b5627d296786e99d0c031 | [
"LicenseRef-scancode-generic-cla",
"MIT"
] | permissive | Azure/azure-cli-extensions | c1615b19930bba7166c282918f166cd40ff6609c | b8c2cf97e991adf0c0a207d810316b8f4686dc29 | refs/heads/main | 2023-08-24T12:40:15.528432 | 2023-08-24T09:17:25 | 2023-08-24T09:17:25 | 106,580,024 | 336 | 1,226 | MIT | 2023-09-14T10:48:57 | 2017-10-11T16:27:31 | Python | UTF-8 | Python | false | false | 4,017 | py | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import Any, Callable, Dict, Generic, Optional, TypeVar
import warnings
from azure.core.async_paging import AsyncItemPaged, AsyncList
from azure.core.exceptions import map_error
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import AsyncHttpResponse, HttpRequest
from ... import models
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]]
class OperationOperations:
"""OperationOperations async operations.
You should not instantiate directly this class, but create a Client instance that will create it for you and attach it as attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~portal.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = models
def __init__(self, client, config, serializer, deserializer) -> None:
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
def list(
self,
**kwargs
) -> "models.ResourceProviderOperationList":
"""The Microsoft Portal operations API.
:keyword callable cls: A custom type or function that will be passed the direct response
:return: ResourceProviderOperationList or the result of cls(response)
:rtype: ~portal.models.ResourceProviderOperationList
:raises: ~portal.models.ErrorResponseException:
"""
cls: ClsType["models.ResourceProviderOperationList"] = kwargs.pop('cls', None )
error_map = kwargs.pop('error_map', {})
api_version = "2019-01-01-preview"
def prepare_request(next_link=None):
if not next_link:
# Construct URL
url = self.list.metadata['url']
else:
url = next_link
# Construct parameters
query_parameters: Dict[str, Any] = {}
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters: Dict[str, Any] = {}
header_parameters['Accept'] = 'application/json'
# Construct and send request
request = self._client.get(url, query_parameters, header_parameters)
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize('ResourceProviderOperationList', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise models.ErrorResponseException.from_response(response, self._deserialize)
return pipeline_response
return AsyncItemPaged(
get_next, extract_data
)
list.metadata = {'url': '/providers/Microsoft.Portal/operations'}
| [
"noreply@github.com"
] | Azure.noreply@github.com |
e7e1a396fe3a3d6b353cf863995d1b47d0be9c89 | cc86a7c9f27b45002c9d0d5388b6457b6470dc2c | /modern_business/settings.py | 0404b4aa16bb9461d3c290680556037305bad97c | [] | no_license | innotexak/DjangoCMS | 5bf18df6aa1212cf0c048c83e1faa203b93a7a36 | 6545f928fb9afb09a004b9c5935158e244d1086b | refs/heads/main | 2023-02-26T09:59:50.974034 | 2021-02-04T14:53:11 | 2021-02-04T14:53:11 | 331,692,811 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,452 | py | import os # isort:skip
gettext = lambda s: s
DATA_DIR = os.path.dirname(os.path.dirname(__file__))
"""
Django settings for modern_business project.
Generated by 'django-admin startproject' using Django 3.1.5.
For more information on this file, see
https://docs.djangoproject.com/en/3.1/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/3.1/ref/settings/
"""
from pathlib import Path
# Build paths inside the project like this: BASE_DIR / 'subdir'.
BASE_DIR = Path(__file__).resolve().parent.parent
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/3.1/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'f!*h4ap^i6o-pdm+qj9yqb@(56g_)epg6rl_5%-2x_45s^0f@f'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
ROOT_URLCONF = 'modern_business.urls'
WSGI_APPLICATION = 'modern_business.wsgi.application'
# Database
# https://docs.djangoproject.com/en/3.1/ref/settings/#databases
# Password validation
# https://docs.djangoproject.com/en/3.1/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/3.1/topics/i18n/
LANGUAGE_CODE = 'en'
TIME_ZONE = 'Africa/Lagos'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/3.1/howto/static-files/
STATIC_URL = '/static/'
MEDIA_URL = '/media/'
MEDIA_ROOT = os.path.join(DATA_DIR, 'media')
STATIC_ROOT = os.path.join(DATA_DIR, 'static')
STATICFILES_DIRS = (
os.path.join(BASE_DIR, 'modern_business', 'static'),
)
SITE_ID = 1
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [os.path.join(BASE_DIR, 'modern_business', 'templates'),],
'OPTIONS': {
'context_processors': [
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
'django.template.context_processors.i18n',
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.template.context_processors.media',
'django.template.context_processors.csrf',
'django.template.context_processors.tz',
'sekizai.context_processors.sekizai',
'django.template.context_processors.static',
'cms.context_processors.cms_settings'
],
'loaders': [
'django.template.loaders.filesystem.Loader',
'django.template.loaders.app_directories.Loader'
],
},
},
]
MIDDLEWARE = [
'cms.middleware.utils.ApphookReloadMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.locale.LocaleMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
'cms.middleware.user.CurrentUserMiddleware',
'cms.middleware.page.CurrentPageMiddleware',
'cms.middleware.toolbar.ToolbarMiddleware',
'cms.middleware.language.LanguageCookieMiddleware'
]
INSTALLED_APPS = [
'djangocms_admin_style',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.admin',
'django.contrib.sites',
'django.contrib.sitemaps',
'django.contrib.staticfiles',
'django.contrib.messages',
'cms',
'menus',
'sekizai',
'treebeard',
'djangocms_text_ckeditor',
'filer',
'easy_thumbnails',
'djangocms_bootstrap4',
'djangocms_bootstrap4.contrib.bootstrap4_alerts',
'djangocms_bootstrap4.contrib.bootstrap4_badge',
'djangocms_bootstrap4.contrib.bootstrap4_card',
'djangocms_bootstrap4.contrib.bootstrap4_carousel',
'djangocms_bootstrap4.contrib.bootstrap4_collapse',
'djangocms_bootstrap4.contrib.bootstrap4_content',
'djangocms_bootstrap4.contrib.bootstrap4_grid',
'djangocms_bootstrap4.contrib.bootstrap4_jumbotron',
'djangocms_bootstrap4.contrib.bootstrap4_link',
'djangocms_bootstrap4.contrib.bootstrap4_listgroup',
'djangocms_bootstrap4.contrib.bootstrap4_media',
'djangocms_bootstrap4.contrib.bootstrap4_picture',
'djangocms_bootstrap4.contrib.bootstrap4_tabs',
'djangocms_bootstrap4.contrib.bootstrap4_utilities',
'djangocms_file',
'djangocms_icon',
'djangocms_link',
'djangocms_picture',
'djangocms_style',
'djangocms_googlemap',
'djangocms_video',
'modern_business',
'polls_cms_integration',
'polls'
]
LANGUAGES = (
## Customize this
('en', gettext('en')),
)
CMS_LANGUAGES = {
## Customize this
1: [
{
'code': 'en',
'name': gettext('en'),
'redirect_on_fallback': True,
'public': True,
'hide_untranslated': False,
},
],
'default': {
'redirect_on_fallback': True,
'public': True,
'hide_untranslated': False,
},
}
CMS_TEMPLATES = (
## Customize this
('fullwidth.html', 'Fullwidth'),
('home.html', 'Home'),
)
X_FRAME_OPTIONS = 'SAMEORIGIN'
CMS_PERMISSION = True
CMS_PLACEHOLDER_CONF = {}
DATABASES = {
'default': {
'CONN_MAX_AGE': 0,
'ENGINE': 'django.db.backends.sqlite3',
'HOST': 'localhost',
'NAME': 'project.db',
'PASSWORD': '',
'PORT': '',
'USER': ''
}
}
THUMBNAIL_PROCESSORS = (
'easy_thumbnails.processors.colorspace',
'easy_thumbnails.processors.autocrop',
'filer.thumbnail_processors.scale_and_crop_with_subject_location',
'easy_thumbnails.processors.filters'
)
| [
"akuhinnocent2016@gmail.com"
] | akuhinnocent2016@gmail.com |
56288e8ec12376f3caaa2bf6670c4052cd21c2ce | b483c598fa375e9af02348960f210b9f482bd655 | /pythonbrasil/exercicios/repeticao/ER resp 39.py | 3530297090dcd38af2039607fba79850fa812d48 | [
"MIT"
] | permissive | brunofonsousa/python | 6f766d08bf193180ea9a4903cb93ffd167db588d | 8f2f26c77015c0baaa76174e004406b4115272c7 | refs/heads/master | 2022-09-30T14:58:01.080749 | 2020-06-08T09:55:35 | 2020-06-08T09:55:35 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 938 | py | '''
Faça um programa que leia dez conjuntos de dois valores, o primeiro representando
o número do aluno e o segundo representando a sua altura em centímetros. Encontre
o aluno mais alto e o mais baixo. Mostre o número do aluno mais alto e o número do
aluno mais baixo, junto com suas alturas.
'''
quant_alunos = 10
cod_maisAlto = 0
cod_maisBaixo = 0
mais_alto = 0
mais_baixo = 5
for i in range(quant_alunos):
print('')
cod_aluno = int(input('Digite o número do aluno: '))
altura = float(input('Altura: '))
if altura > mais_alto:
cod_maisAlto = cod_aluno
mais_alto = altura
if altura < mais_baixo:
cod_maisBaixo = cod_aluno
mais_baixo = altura
print('')
print('ALUNOS: ')
print('O aluno mais alto foi o nº %i com a altura de %0.2f'%(cod_maisAlto,mais_alto))
print('O aluno mais baixo foi o nº %i com a altura de %0.2f'%(cod_maisBaixo,mais_baixo))
| [
"brunofonsousa@gmail.com"
] | brunofonsousa@gmail.com |
17d4fe129c3e9b0803fddf4b72bac10810ddf323 | 02fb101eac8e5f9cc5d5c5d807601787c339b7ec | /laser_geom_tool/laser_borders.py | b12a549be5e346429c491f424028c458a8104f60 | [
"BSD-2-Clause"
] | permissive | awesomebytes/etherdream_tools | ad8c7f0e09318ab47ea9d1aaa8fec40f9e49f119 | 7ba3ba4828f43f323d76064ec0432b25b85f6e58 | refs/heads/master | 2021-01-01T20:12:17.858584 | 2015-10-12T21:25:05 | 2015-10-12T21:25:05 | 24,540,681 | 12 | 0 | null | null | null | null | UTF-8 | Python | false | false | 10,416 | py | # -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'laser_borders.ui'
#
# Created: Wed Dec 24 03:09:48 2014
# by: PyQt4 UI code generator 4.9.1
#
# WARNING! All changes made in this file will be lost!
from PyQt4 import QtCore, QtGui
try:
_fromUtf8 = QtCore.QString.fromUtf8
except AttributeError:
_fromUtf8 = lambda s: s
class Ui_laser_main(object):
def setupUi(self, laser_main):
laser_main.setObjectName(_fromUtf8("laser_main"))
laser_main.resize(800, 600)
self.centralwidget = QtGui.QWidget(laser_main)
self.centralwidget.setObjectName(_fromUtf8("centralwidget"))
self.button_send = QtGui.QPushButton(self.centralwidget)
self.button_send.setGeometry(QtCore.QRect(310, 420, 115, 31))
self.button_send.setObjectName(_fromUtf8("button_send"))
self.tl_x = QtGui.QDoubleSpinBox(self.centralwidget)
self.tl_x.setGeometry(QtCore.QRect(120, 160, 77, 31))
self.tl_x.setDecimals(3)
self.tl_x.setMinimum(-1.0)
self.tl_x.setMaximum(1.0)
self.tl_x.setSingleStep(0.001)
self.tl_x.setProperty("value", -1.0)
self.tl_x.setObjectName(_fromUtf8("tl_x"))
self.tl_y = QtGui.QDoubleSpinBox(self.centralwidget)
self.tl_y.setGeometry(QtCore.QRect(240, 160, 77, 31))
self.tl_y.setDecimals(3)
self.tl_y.setMinimum(-1.0)
self.tl_y.setMaximum(1.0)
self.tl_y.setSingleStep(0.001)
self.tl_y.setProperty("value", 1.0)
self.tl_y.setObjectName(_fromUtf8("tl_y"))
self.bl_x = QtGui.QDoubleSpinBox(self.centralwidget)
self.bl_x.setGeometry(QtCore.QRect(120, 270, 77, 31))
self.bl_x.setDecimals(3)
self.bl_x.setMinimum(-1.0)
self.bl_x.setMaximum(1.0)
self.bl_x.setSingleStep(0.001)
self.bl_x.setProperty("value", -1.0)
self.bl_x.setObjectName(_fromUtf8("bl_x"))
self.bl_y = QtGui.QDoubleSpinBox(self.centralwidget)
self.bl_y.setGeometry(QtCore.QRect(240, 270, 77, 31))
self.bl_y.setDecimals(3)
self.bl_y.setMinimum(-1.0)
self.bl_y.setMaximum(1.0)
self.bl_y.setSingleStep(0.001)
self.bl_y.setProperty("value", -1.0)
self.bl_y.setObjectName(_fromUtf8("bl_y"))
self.tr_x = QtGui.QDoubleSpinBox(self.centralwidget)
self.tr_x.setGeometry(QtCore.QRect(430, 160, 77, 31))
self.tr_x.setDecimals(3)
self.tr_x.setMinimum(-1.0)
self.tr_x.setMaximum(1.0)
self.tr_x.setSingleStep(0.001)
self.tr_x.setProperty("value", 1.0)
self.tr_x.setObjectName(_fromUtf8("tr_x"))
self.tr_y = QtGui.QDoubleSpinBox(self.centralwidget)
self.tr_y.setGeometry(QtCore.QRect(560, 160, 77, 31))
self.tr_y.setDecimals(3)
self.tr_y.setMinimum(-1.0)
self.tr_y.setMaximum(1.0)
self.tr_y.setSingleStep(0.001)
self.tr_y.setProperty("value", 1.0)
self.tr_y.setObjectName(_fromUtf8("tr_y"))
self.br_x = QtGui.QDoubleSpinBox(self.centralwidget)
self.br_x.setGeometry(QtCore.QRect(430, 270, 77, 31))
self.br_x.setDecimals(3)
self.br_x.setMinimum(-1.0)
self.br_x.setMaximum(1.0)
self.br_x.setSingleStep(0.001)
self.br_x.setProperty("value", 1.0)
self.br_x.setObjectName(_fromUtf8("br_x"))
self.br_y = QtGui.QDoubleSpinBox(self.centralwidget)
self.br_y.setGeometry(QtCore.QRect(560, 270, 77, 31))
self.br_y.setDecimals(3)
self.br_y.setMinimum(-1.0)
self.br_y.setMaximum(1.0)
self.br_y.setSingleStep(0.001)
self.br_y.setProperty("value", -1.0)
self.br_y.setObjectName(_fromUtf8("br_y"))
self.text_tl = QtGui.QLabel(self.centralwidget)
self.text_tl.setGeometry(QtCore.QRect(180, 90, 80, 21))
self.text_tl.setObjectName(_fromUtf8("text_tl"))
self.text_tr = QtGui.QLabel(self.centralwidget)
self.text_tr.setGeometry(QtCore.QRect(500, 90, 80, 21))
self.text_tr.setObjectName(_fromUtf8("text_tr"))
self.text_bl = QtGui.QLabel(self.centralwidget)
self.text_bl.setGeometry(QtCore.QRect(159, 220, 111, 21))
self.text_bl.setObjectName(_fromUtf8("text_bl"))
self.text_br = QtGui.QLabel(self.centralwidget)
self.text_br.setGeometry(QtCore.QRect(479, 220, 111, 21))
self.text_br.setObjectName(_fromUtf8("text_br"))
self.label = QtGui.QLabel(self.centralwidget)
self.label.setGeometry(QtCore.QRect(150, 130, 80, 21))
self.label.setObjectName(_fromUtf8("label"))
self.label_2 = QtGui.QLabel(self.centralwidget)
self.label_2.setGeometry(QtCore.QRect(270, 130, 80, 21))
self.label_2.setObjectName(_fromUtf8("label_2"))
self.label_3 = QtGui.QLabel(self.centralwidget)
self.label_3.setGeometry(QtCore.QRect(580, 130, 80, 21))
self.label_3.setObjectName(_fromUtf8("label_3"))
self.label_4 = QtGui.QLabel(self.centralwidget)
self.label_4.setGeometry(QtCore.QRect(460, 130, 80, 21))
self.label_4.setObjectName(_fromUtf8("label_4"))
self.label_5 = QtGui.QLabel(self.centralwidget)
self.label_5.setGeometry(QtCore.QRect(270, 240, 80, 21))
self.label_5.setObjectName(_fromUtf8("label_5"))
self.label_6 = QtGui.QLabel(self.centralwidget)
self.label_6.setGeometry(QtCore.QRect(150, 240, 80, 21))
self.label_6.setObjectName(_fromUtf8("label_6"))
self.label_7 = QtGui.QLabel(self.centralwidget)
self.label_7.setGeometry(QtCore.QRect(580, 240, 80, 21))
self.label_7.setObjectName(_fromUtf8("label_7"))
self.label_8 = QtGui.QLabel(self.centralwidget)
self.label_8.setGeometry(QtCore.QRect(460, 240, 80, 21))
self.label_8.setObjectName(_fromUtf8("label_8"))
self.edit_ip = QtGui.QLineEdit(self.centralwidget)
self.edit_ip.setGeometry(QtCore.QRect(370, 20, 241, 31))
self.edit_ip.setObjectName(_fromUtf8("edit_ip"))
self.button_searchdac = QtGui.QPushButton(self.centralwidget)
self.button_searchdac.setGeometry(QtCore.QRect(200, 20, 115, 31))
self.button_searchdac.setObjectName(_fromUtf8("button_searchdac"))
self.label_9 = QtGui.QLabel(self.centralwidget)
self.label_9.setGeometry(QtCore.QRect(60, 320, 501, 31))
self.label_9.setObjectName(_fromUtf8("label_9"))
self.label_10 = QtGui.QLabel(self.centralwidget)
self.label_10.setGeometry(QtCore.QRect(110, 350, 491, 31))
self.label_10.setObjectName(_fromUtf8("label_10"))
laser_main.setCentralWidget(self.centralwidget)
self.menubar = QtGui.QMenuBar(laser_main)
self.menubar.setGeometry(QtCore.QRect(0, 0, 800, 29))
self.menubar.setObjectName(_fromUtf8("menubar"))
laser_main.setMenuBar(self.menubar)
self.statusbar = QtGui.QStatusBar(laser_main)
self.statusbar.setObjectName(_fromUtf8("statusbar"))
laser_main.setStatusBar(self.statusbar)
self.retranslateUi(laser_main)
QtCore.QObject.connect(self.tl_x, QtCore.SIGNAL(_fromUtf8("valueChanged(double)")), self.button_send.click)
QtCore.QObject.connect(self.tl_y, QtCore.SIGNAL(_fromUtf8("valueChanged(double)")), self.button_send.click)
QtCore.QObject.connect(self.tr_x, QtCore.SIGNAL(_fromUtf8("valueChanged(double)")), self.button_send.click)
QtCore.QObject.connect(self.tr_y, QtCore.SIGNAL(_fromUtf8("valueChanged(double)")), self.button_send.click)
QtCore.QObject.connect(self.bl_x, QtCore.SIGNAL(_fromUtf8("valueChanged(double)")), self.button_send.click)
QtCore.QObject.connect(self.bl_y, QtCore.SIGNAL(_fromUtf8("valueChanged(double)")), self.button_send.click)
QtCore.QObject.connect(self.br_x, QtCore.SIGNAL(_fromUtf8("valueChanged(double)")), self.button_send.click)
QtCore.QObject.connect(self.br_y, QtCore.SIGNAL(_fromUtf8("valueChanged(double)")), self.button_send.click)
QtCore.QMetaObject.connectSlotsByName(laser_main)
def retranslateUi(self, laser_main):
laser_main.setWindowTitle(QtGui.QApplication.translate("laser_main", "Laser geometry setter", None, QtGui.QApplication.UnicodeUTF8))
self.button_send.setText(QtGui.QApplication.translate("laser_main", "Send Coords", None, QtGui.QApplication.UnicodeUTF8))
self.text_tl.setText(QtGui.QApplication.translate("laser_main", "Top Left", None, QtGui.QApplication.UnicodeUTF8))
self.text_tr.setText(QtGui.QApplication.translate("laser_main", "Top Right", None, QtGui.QApplication.UnicodeUTF8))
self.text_bl.setText(QtGui.QApplication.translate("laser_main", "Bottom left", None, QtGui.QApplication.UnicodeUTF8))
self.text_br.setText(QtGui.QApplication.translate("laser_main", "Bottom right", None, QtGui.QApplication.UnicodeUTF8))
self.label.setText(QtGui.QApplication.translate("laser_main", "x", None, QtGui.QApplication.UnicodeUTF8))
self.label_2.setText(QtGui.QApplication.translate("laser_main", "y", None, QtGui.QApplication.UnicodeUTF8))
self.label_3.setText(QtGui.QApplication.translate("laser_main", "y", None, QtGui.QApplication.UnicodeUTF8))
self.label_4.setText(QtGui.QApplication.translate("laser_main", "x", None, QtGui.QApplication.UnicodeUTF8))
self.label_5.setText(QtGui.QApplication.translate("laser_main", "y", None, QtGui.QApplication.UnicodeUTF8))
self.label_6.setText(QtGui.QApplication.translate("laser_main", "x", None, QtGui.QApplication.UnicodeUTF8))
self.label_7.setText(QtGui.QApplication.translate("laser_main", "y", None, QtGui.QApplication.UnicodeUTF8))
self.label_8.setText(QtGui.QApplication.translate("laser_main", "x", None, QtGui.QApplication.UnicodeUTF8))
self.edit_ip.setText(QtGui.QApplication.translate("laser_main", "192.168.0.10", None, QtGui.QApplication.UnicodeUTF8))
self.button_searchdac.setText(QtGui.QApplication.translate("laser_main", "Search DAC", None, QtGui.QApplication.UnicodeUTF8))
self.label_9.setText(QtGui.QApplication.translate("laser_main", "Note: Messages will be sent when modifying the Spin Boxes", None, QtGui.QApplication.UnicodeUTF8))
self.label_10.setText(QtGui.QApplication.translate("laser_main", "and when hitting \"Send Coords\" to the IP in the EditBox.", None, QtGui.QApplication.UnicodeUTF8))
| [
"sammypfeiffer@gmail.com"
] | sammypfeiffer@gmail.com |
fef1cd77d6d0cf5499dee830c7afdd90b66cf0ef | 55c250525bd7198ac905b1f2f86d16a44f73e03a | /Python/PyBox/pybox2d-android/examples/test_OneSidedPlatform.py | 9ba5bf332a3936891263a8e35d4532d9f54a5d87 | [] | no_license | NateWeiler/Resources | 213d18ba86f7cc9d845741b8571b9e2c2c6be916 | bd4a8a82a3e83a381c97d19e5df42cbababfc66c | refs/heads/master | 2023-09-03T17:50:31.937137 | 2023-08-28T23:50:57 | 2023-08-28T23:50:57 | 267,368,545 | 2 | 1 | null | 2022-09-08T15:20:18 | 2020-05-27T16:18:17 | null | UTF-8 | Python | false | false | 129 | py | version https://git-lfs.github.com/spec/v1
oid sha256:64a4150f4e7993b64261e93365583e48ee17c68fcd142d236d7ba99977f09b72
size 2957
| [
"nateweiler84@gmail.com"
] | nateweiler84@gmail.com |
4f7492ea4905b4c55ade3b3b0e09f7e5896f785b | acb8e84e3b9c987fcab341f799f41d5a5ec4d587 | /langs/0/is.py | 601bed438f1b9697adf57cb74ad6b18e1aad1ab1 | [] | no_license | G4te-Keep3r/HowdyHackers | 46bfad63eafe5ac515da363e1c75fa6f4b9bca32 | fb6d391aaecb60ab5c4650d4ae2ddd599fd85db2 | refs/heads/master | 2020-08-01T12:08:10.782018 | 2016-11-13T20:45:50 | 2016-11-13T20:45:50 | 73,624,224 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 485 | py | import sys
def printFunction(lineRemaining):
if lineRemaining[0] == '"' and lineRemaining[-1] == '"':
if len(lineRemaining) > 2:
#data to print
lineRemaining = lineRemaining[1:-1]
print ' '.join(lineRemaining)
else:
print
def main(fileName):
with open(fileName) as f:
for line in f:
data = line.split()
if data[0] == 'IS':
printFunction(data[1:])
else:
print 'ERROR'
return
if __name__ == '__main__':
main(sys.argv[1]) | [
"juliettaylorswift@gmail.com"
] | juliettaylorswift@gmail.com |
9e0ef62bd92f99ba6f48e52c9835f10b659ace75 | d2c4151eff768af64946ababc2e41c13d8973cd3 | /ABC171/a.py | 8da69726a2404106ebe552dc9841fe44d4fce822 | [] | no_license | Intel-out-side/AtCoder | 2de19b71981247135432aed2d6d9c2a16c3ab7f0 | 0c419d2df15fff02032432cb1b1323612484e16e | refs/heads/master | 2022-06-23T04:21:12.886072 | 2022-06-13T14:39:07 | 2022-06-13T14:39:07 | 235,240,853 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 100 | py | letter = input()
if "a" <= letter <= "z":
print("a")
elif "A" <= letter <= "Z":
print("A")
| [
"so.eng.eng.1rou@gmail.com"
] | so.eng.eng.1rou@gmail.com |
c8fc07a03a64e1410fc4b6bf2d4fec67f8a9c97d | 15f321878face2af9317363c5f6de1e5ddd9b749 | /solutions_python/Problem_135/3919.py | 1c304f9ed5704b5147c1697d18850afa52e338ef | [] | no_license | dr-dos-ok/Code_Jam_Webscraper | c06fd59870842664cd79c41eb460a09553e1c80a | 26a35bf114a3aa30fc4c677ef069d95f41665cc0 | refs/heads/master | 2020-04-06T08:17:40.938460 | 2018-10-14T10:12:47 | 2018-10-14T10:12:47 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,416 | py | Input = open( "C:\Users\josip kotarac\Desktop\A-small-attempt1.in", "r" )
Output = open( "C:\Users\josip kotarac\Desktop\output.txt", "w" )
array = []
chosenRow=0
maxRows = 4
possibleCards=[]
foundCard=0
cardFoundNumber=0
caseNum =0
setDone = False
for line in Input.readlines():
line = line.split()
array.append( line )
for index in range(len(array)):
if (len(array[index])==1):
chosenRow=int(array[index][0])
indexInRow = 0
foundCard = 0
cardFoundNumber = 0
elif (len(array[index])==4):
if (indexInRow==chosenRow-1 and not possibleCards):
possibleCards=array[index]
elif (indexInRow==chosenRow-1 and possibleCards):
caseNum+=1
for card in array[index]:
for oldCard in possibleCards:
if (card == oldCard):
cardFoundNumber+=1
foundCard = card
if (cardFoundNumber>1):
Output.write( "Case #"+ str(caseNum) +": Bad magician!\n")
elif (cardFoundNumber==1):
Output.write( "Case #"+ str(caseNum) +": "+foundCard+"\n")
elif (cardFoundNumber==0):
Output.write( "Case #"+ str(caseNum) +": Volunteer cheated!\n")
possibleCards=[]
indexInRow+=1
Output.close()
Input.close() | [
"miliar1732@gmail.com"
] | miliar1732@gmail.com |
2e2e18df159c6ad3d657c307d0904d91462f0f63 | deaf14c242dc5e534e5ce7691e206f9b5305fdb1 | /blog/models.py | 02a89abaf5581be70f4424179254aa806ba755d8 | [] | no_license | Ulorewien/synergee | ac51462cb270efc4d0ef9bf22b1a946c46b6cc1d | 22b285393ff28e8cab8b228c12087bde22280fe7 | refs/heads/main | 2023-08-12T19:15:25.171103 | 2021-10-04T15:47:32 | 2021-10-04T15:47:32 | 332,399,492 | 0 | 1 | null | 2021-01-24T08:21:59 | 2021-01-24T08:21:58 | null | UTF-8 | Python | false | false | 559 | py | from django.db import models
from group.models import Member,Interest
class Post(models.Model):
"""
Attributes of Post Relational Model
"""
author = models.ForeignKey(Member,on_delete=models.CASCADE)
category = models.ForeignKey(Interest,on_delete=models.CASCADE)
title = models.CharField(max_length=128)
content = models.TextField()
created_at = models.DateTimeField(auto_now_add=True)
class Meta:
ordering = ('-created_at',)
def __str__(self):
return f"Posted By {self.author.first_name}"
| [
"shubhpathak07@gmail.com"
] | shubhpathak07@gmail.com |
95b84bdb31fb8d15a6125e393c6a3c410d477038 | 9743d5fd24822f79c156ad112229e25adb9ed6f6 | /xai/brain/wordbase/otherforms/_stationers.py | 8ecfa5c4d17eaa1e85497e432cc4e12608092d92 | [
"MIT"
] | permissive | cash2one/xai | de7adad1758f50dd6786bf0111e71a903f039b64 | e76f12c9f4dcf3ac1c7c08b0cc8844c0b0a104b6 | refs/heads/master | 2021-01-19T12:33:54.964379 | 2017-01-28T02:00:50 | 2017-01-28T02:00:50 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 234 | py |
#calss header
class _STATIONERS():
def __init__(self,):
self.name = "STATIONERS"
self.definitions = stationer
self.parents = []
self.childen = []
self.properties = []
self.jsondata = {}
self.basic = ['stationer']
| [
"xingwang1991@gmail.com"
] | xingwang1991@gmail.com |
dd03466d913bf653258193e0979efba7236aa71e | b28b8dc227d9e6b015eeb19db9cb97ae067a3301 | /DivideTwoIntegers.py | f01458a232870b5efe7608a4457339f49dccd822 | [] | no_license | nbrahman/LeetCode | d5dd267e1b64d6d5ac6c7a312f286faa043a3444 | 3f94952eba038ca07ecd57f5dc51889daf7b663a | refs/heads/master | 2021-01-22T10:13:13.769444 | 2017-02-14T22:00:29 | 2017-02-14T22:00:29 | 81,993,285 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,250 | py | '''
Divide two integers without using multiplication, division and mod operator.
If it is overflow, return MAX_INT.
'''
class Solution(object):
intRemainder = 0
intDivisor = 0
def divide(self, dividend, divisor):
"""
:type dividend: int
:type divisor: int
:rtype: int
"""
intQuotient = 1
if (divisor==dividend):
intRemainder = 0
return 1
elif (dividend < divisor):
intRemainder = dividend
return 0
divisor = divisor << 1
print ('divisor',divisor)
intQuotient = intQuotient << 1
print ('intQuotient',intQuotient)
while (divisor <= dividend):
divisor = divisor << 1
print ('divisor',divisor)
intQuotient = intQuotient << 1
print ('intQuotient',intQuotient)
intQuotient = intQuotient + self.divide(dividend - divisor, self.intDivisor)
intQuotient = intQuotient >> 1
rtype = intQuotient
print (rtype)
return rtype
if __name__ == '__main__':
num1 = input ("enter the numerator: ")
num2 = input ("enter the denominator: ")
result = Solution().divide(int(num1), int(num2))
print (result)
| [
"nikhil.brahmankar@gmail.com"
] | nikhil.brahmankar@gmail.com |
36be193c3b32fb7370569d601d089401790b81ab | 8a936bd3e28c9ec116244df37d3ba5aedd48c9cc | /dashboard/internal/pages/register.py | 93615c354eb570ed066b045a84732334169e0466 | [
"Apache-2.0"
] | permissive | AssassinDev422/PHP_Minera | a66bd23610cbcfd43545e5b6a689c2c1b1248814 | f507dbcc4b4609990f14995754d54f42dcaaa618 | refs/heads/master | 2020-03-15T13:16:30.708491 | 2018-05-04T16:20:05 | 2018-05-04T16:20:05 | 132,162,749 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 653 | py | import os
import sys
import json
import requests
import smtplib
def main():
sid = sys.argv[1]
email = sys.argv[2]
username = sys.argv[3]
password = sys.argv[4]
phone_number = sys.argv[5]
hashpower = sys.argv[6]
reg_user = {'referrals':0,'btcavailible':0,'username':username,'email':email,'phone':phone_number,'password':password,'btchashpower':hashpower,'btcaddress':'.','ltcaddress':'.','dashaddress':'.','accounttype':'customer'}
r = requests.post('http://api.msunicloud.com:2404/users/', data = reg_user, cookies={'sid':sid})
t = r.json()
_id = t['id']
if __name__ == '__main__':
main()
| [
"saas.exp7@gmail.com"
] | saas.exp7@gmail.com |
8383a2063f9f853bc4844b765c99f5e3d10d5d33 | 6e5b8ef12d56a11aa0b68fd5a3700a4be0541acd | /.history/nets/yolov5_20210819163229.py | 028344d8484e367faf34fcceef742e37f85afac1 | [] | no_license | Arcofcosmos/MyYolov5_Pytorch | d8fc7f8398249aeb996fa4b07c3ecdc6fedd2308 | 07bcb7e3b1dd32ec25171d4aa860d462f5a01078 | refs/heads/main | 2023-07-15T16:07:52.624808 | 2021-08-21T07:13:42 | 2021-08-21T07:13:42 | 398,490,633 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,494 | py | '''
Author: TuZhou
Version: 1.0
Date: 2021-08-18 15:24:52
LastEditTime: 2021-08-19 16:32:29
LastEditors: TuZhou
Description:
FilePath: \my_yolov5\nets\yolov5.py
'''
import torch
import torch.nn as nn
from pathlib import Path
import yaml
import math
from CSPDarknet53 import preprocess_backbone, CSP2, CBL
#------------------------------#
# 计算输出通道数
#------------------------------#
def make_divisible(x, divisor):
# Returns x evenly divisible by divisor
return math.ceil(x / divisor) * divisor
#------------------------------#
# 处理yaml文件
# 读取出backbone模块的信息
#------------------------------#
def process_yaml(yaml_path = './nets/yolov5s.yaml'):
yaml_file = Path(yaml_path)
with open(yaml_file, 'r') as f:
yaml_dict = yaml.safe_load(f)
#提取出网络宽度与深度
gd = yaml_dict['depth_multiple']
gw = yaml_dict['width_multiple']
nc = yaml_dict['nc']
backbone_dict = yaml_dict['head']
filters = []
blocks = []
for n in backbone_dict:
if n[2] == 'C3':
blocks.append(n[1])
if not n[2] == 'Concat':
filters.append(n[3])
blocks = blocks[:]
for i, _ in enumerate(blocks):
blocks[i] = max(round(blocks[i] * gd), 1) if blocks[i] > 1 else blocks[i]
for i, _ in enumerate(filters):
if not isinstance(filters[i][0], str):
filters[i][0] = make_divisible(filters[i][0] * gw, 8)
return nc, blocks, filters
class YoloBody(nn.Module):
def __init__(self, cfg='yolov5s.yaml', image_channels=3, nc=None, anchors=None): # model, input channels, number of classes
super().__init__()
#---------------------------------------------------#
# 生成CSPdarknet53的主干模型
# 以输入为608x608的图片为例
# 获得三个有效特征层,他们的shape分别是:
# 76,76,128
# 38,38,256
# 19,19,512
#---------------------------------------------------#
self.backbone = preprocess_backbone(None)
self.in_channel = 512
self.nc, self.number_blocks, self.filters_info = process_yaml()
self.result_channels = 3 * (self.nc + 5)
#---------------------------------------------------#
# 第一个head特征融合处理
#---------------------------------------------------#
#filter:256x512x1x1,stride = 1
self.conv1 = CBL(self.in_channel, self.filters_info[0][0], self.filters_info[0][1], self.filters_info[0][2])
self.upsample1 = nn.Upsample(scale_factor=self.filters_info[1][1], mode=self.filters_info[1][2])
#self.cat拼接
#第一个csp2是在拼接后,所以输入通道维度翻倍了
self.block1 = CSP2(self.filters_info[0][0]*2, self.filters_info[2][0], self.number_blocks[0])
self.conv2 = CBL(self.filters_info[2][0], self.filters_info[3][0], self.filters_info[3][1], self.filters_info[3][2])
self.upsample2 = nn.Upsample(scale_factor=self.filters_info[4][1], mode=self.filters_info[4][2])
#self.cat拼接
self.block2 = CSP2(self.filters_info[3][0]*2, self.filters_info[5][0], self.number_blocks[1])
self.single_conv1 = nn.Conv2d(self.filters_info[5][0], self.result_channels, 1, 1)
#---------------------------------------------------#
# 第二个head特征融合处理
#---------------------------------------------------#
self.conv3 = CBL(self.filters_info[5][0], self.filters_info[6][0], self.filters_info[6][1], self.filters_info[6][2])
#self.cat拼接
self.block3 = CSP2(self.filters_info[6][0]*2, self.filters_info[7][0], self.filters_info[7][1], self.number_blocks[2])
self.single_conv2 = nn.Conv2d(self.filters_info[7][0], self.result_channels, 1, 1)
#---------------------------------------------------#
# 第三个head特征融合处理
#---------------------------------------------------#
self.conv4 = CBL(self.filters_info[7][0], self.filters_info[8][0], self.filters_info[8][1], self.filters_info[8][2])
#self.cat拼接
self.block4 = CSP2(self.filters_info[8][0]*2, self.filters_info[9][0], self.filters_info[9][1], self.number_blocks[3])
self.single_conv3 = nn.Conv2d(self.filters_info[9][0], self.result_channels, 1, 1)
def forward(self, x):
#backbone, 从2到0特征图尺寸依次从大到小
x2, x1, x0 = self.backbone(x)
#第一个76x76特征图融合处理
y0 = self.conv1(x0) #待融合
x0 = torch.cat([self.upsample1(y0), x1], 1)
y1 = self.conv2(self.block1(x0)) #待融合
out0 = self.block2(torch.cat([self.upsample2(y1), x2], 1))
#第二个38x38特征图融合处理
out1 = self.block3(torch.cat([self.conv3(out0), y1], 1))
#第三个19x19特征图融合处理
out2 = self.block4(torch.cat([self.conv4(out1), y0], 1))
#经过head部分装换结果输出通道维度
out0 = self.single_conv1(out0)
out1 = self.single_conv2(out1)
out2 = self.single_conv3(out2)
#输出特征图尺寸从小到大排列,由19x19到76x76
return out2, out1, out0
if __name__ == "__main__":
model = YoloBody()
x = torch.rand(1, 3, 608, 608)
#torch.unsqueeze(x, )
out1, out2, out3 = model(x)
print(out1)
#print(out1.shape)
| [
"tz2062750487@163.com"
] | tz2062750487@163.com |
dc74e72fedacff9e9cbca0a0d2f5af2dcede9eb3 | d1205c39ad5febd5f73526cb6eda5fd413e998a3 | /ambra_sdk/storage/request.py | e1ac6c7927dbe26c84ab730fc3b9daedbefa09a7 | [
"Apache-2.0"
] | permissive | ppolk-nocimed/sdk-python | 77364972c5559a43787f81373327b86204f9ec39 | 165a8049bbc38a201ef27f60a8ba1b980c1c9a64 | refs/heads/master | 2022-11-22T23:48:22.444967 | 2020-07-17T16:27:05 | 2020-07-17T16:27:05 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,799 | py | from enum import Enum
from io import BufferedReader
from typing import TYPE_CHECKING, Any, Dict, Mapping, NamedTuple, Optional
from requests import Response
from ambra_sdk.exceptions.storage import AmbraResponseException
from ambra_sdk.storage.response import check_response
if TYPE_CHECKING:
from ambra_sdk.storage.storage import Storage # NOQA:WPS433
class StorageMethod(Enum):
"""Storage methods."""
get = 'GET'
post = 'POST'
delete = 'DELETE'
class PreparedRequest(NamedTuple):
"""Prepared request."""
# This some sort of private field.
# User should not have dicect access to this field
# But we can not use _name in NamedTuple attributes
storage_: 'Storage' # NOQA WPS1120
url: str
method: StorageMethod
# Mapping type is covariant is covariant type
errors_mapping: Optional[Mapping[int, AmbraResponseException]] = None
params: Optional[Dict[str, Any]] = None # NOQA:WPS110
files: Optional[Dict[str, BufferedReader]] = None
headers: Optional[Dict[str, str]] = None
data: Optional[Any] = None # NOQA:WPS110
stream: Optional[bool] = None
def execute(self) -> Response:
"""Execute prepared request.
If sid problems we try to get new sid
and retry request.
:return: response object
"""
response: Response = self.storage_.retry_with_new_sid(
self.execute_once,
)
return response # NOQA:WPS331
def execute_once(self) -> Response:
"""Execute prepared request.
:return: response object
:raises RuntimeError: Unknown request method
"""
request_kwargs: Dict[str, Any] = {}
if self.params is not None:
request_kwargs['params'] = self.params
if self.data is not None:
request_kwargs['data'] = self.data
if self.headers is not None:
request_kwargs['headers'] = self.headers
if self.files is not None:
request_kwargs['files'] = self.files
if self.stream is not None:
request_kwargs['stream'] = self.stream
if self.method == StorageMethod.get:
response = self.storage_.get(self.url, **request_kwargs)
elif self.method == StorageMethod.post:
response = self.storage_.post(self.url, **request_kwargs)
elif self.method == StorageMethod.delete:
response = self.storage_.delete(self.url, **request_kwargs)
else:
raise RuntimeError(
'Unknown storage request method: {method}'.format(
method=self.method,
),
)
return check_response(
response,
self.url,
errors_mapping=self.errors_mapping,
)
| [
"akapustin@ambrahealth.com"
] | akapustin@ambrahealth.com |
580ecfc67a9c4367efef20f46c52084ed5d78cec | b4afe69978e813aa0457c1e8156f11efd0e686df | /docker/Dockerfile.fluentd | 0ca694fe509592428fb946223459336d90d25986 | [
"Apache-2.0"
] | permissive | rachit491/voltha | e467d828620a8c03533bddf8f82dc225575b271e | 01ca080bff5f58f7d4d154704dd411711cf6455b | refs/heads/master | 2021-07-25T11:04:06.911364 | 2017-11-03T13:02:11 | 2017-11-06T20:07:07 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 797 | fluentd | #!/usr/bin/env python
#
# Copyright 2016 the original author or authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
#DockerFile to Create Fluentd Forwards inside cord-voltha
FROM fluent/fluentd
COPY fluentd_config/fluent.conf /fluentd/etc/
COPY fluentd_config/fluent-agg.conf /fluentd/etc/
| [
"knursimu@ciena.com"
] | knursimu@ciena.com |
27fc91b8f668e22d5b6a7abe81676a9310fb4c0c | 0fa96aa3b1ee5cf752e20bad98ef02785c369225 | /matresdev/matresdev/simiter/sim_pstudy/sim_pstudy.py | b60bc161a1fcbf40db75abeb464385e1dd9b61fc | [] | no_license | simvisage/simvisage | 7a56ce0d13211513a86e3631db1b55dc62e85986 | acb2a8eb8b6caa57c1e9e15f724a2b43843c7b10 | refs/heads/master | 2020-04-04T07:32:47.598636 | 2018-12-10T15:10:43 | 2018-12-10T15:10:43 | 3,583,342 | 0 | 3 | null | null | null | null | UTF-8 | Python | false | false | 10,420 | py | #-------------------------------------------------------------------------
#
# Copyright (c) 2009, IMB, RWTH Aachen.
# All rights reserved.
#
# This software is provided without warranty under the terms of the BSD
# license included in simvisage/LICENSE.txt and may be redistributed only
# under the conditions described in the aforementioned license. The license
# is also available online at http://www.simvisage.com/licenses/BSD.txt
#
# Thanks for using Simvisage open source!
# ve_study
# Created on Feb 1, 2010 by: rch
""" Application window. """
from math import exp, e, sqrt, log, pi
import os
import pickle
import string
from matplotlib.figure import Figure
from numpy import array, linspace, frompyfunc, zeros, column_stack, \
log as ln, append, logspace, hstack, sign, trapz, mgrid, c_, \
zeros, arange, ix_
from pyface.api import ImageResource
from pyface.api import confirm, error, information, warning, YES, NO, CANCEL
from scipy.optimize import brentq, newton
from scipy.special import erf, gamma
from scipy.stats import norm, weibull_min, uniform
from traits.api import \
HasTraits, Float, Property, cached_property, \
Instance, List, on_trait_change, Int, Tuple, Bool, \
DelegatesTo, Event, Str, Button, Dict, Array, Any, \
implements, File
from traitsui.api import \
View, Item, Tabbed, VGroup, HGroup, ModelView, HSplit, VSplit, \
CheckListEditor, EnumEditor, TableEditor, TabularEditor, Handler
from traitsui.file_dialog \
import open_file, save_file, FileInfo, TextInfo
from traitsui.menu import Action, CloseAction, HelpAction, Menu, \
MenuBar, NoButtons, Separator, ToolBar
from traitsui.menu import OKButton
from traitsui.tabular_adapter \
import TabularAdapter
from util.traits.editors.mpl_figure_editor import MPLFigureEditor
from i_sim_array import ISimArray
from i_sim_model import ISimModel
import os.path as path
from sim_array import SimArray
from sim_array_view import SimArrayView
from sim_factor import \
SimFactor, SimFloatFactor, SimIntFactor, SimEnumFactor
from sim_output import SimOut
from sim_todo import ToDo
#os.environ['ETS_TOOLKIT'] = 'qt4'
#
TITLESTRING = 'simvisage.sim_array'
class SimPStudyController(Handler):
def init(self, ui_info):
'''Set the title string
'''
self._set_title_string(ui_info, ignore_dirty=True)
#-------------------------------------------------------------------------
# Public Controller interface
#-------------------------------------------------------------------------
def new_study(self, ui_info):
if ui_info.object.dirty:
# discard / save dialog
answer = confirm(ui_info.ui.control, 'Study modified. Save it?',
title='New study',
cancel=False,
default=YES)
if answer == YES:
self.save_study(ui_info)
ui_info.object.file_path = ''
ui_info.object.new()
self._set_title_string(ui_info)
def exit_study(self, ui_info):
if ui_info.object.dirty:
# discard / save dialog
answer = confirm(ui_info.ui.control, 'Save study before exiting?',
title='Close study',
cancel=True,
default=YES)
if answer == YES:
self.save_study(ui_info)
self._on_close(ui_info)
return True
elif answer == NO:
self._on_close(ui_info)
return True
else:
return False
else:
self._on_close(ui_info)
return True
# def close(self, ui_info, is_ok ):
# is_ok = self.exit_study( ui_info )
# print 'IS OK', is_ok
# super( SimPStudyController, self ).close( ui_info, is_ok )
# self._on_close( ui_info )
def open_study(self, ui_info):
if ui_info.object.dirty:
# discard / save dialog
answer = confirm(ui_info.ui.control, 'Study modified. Save it?',
title='Open study',
cancel=True,
default=YES)
if answer == YES:
self.save_study(ui_info)
elif answer == CANCEL:
return
file_name = open_file(filter=['*.pst'],
extensions=[FileInfo(), TextInfo()])
if file_name != '':
ui_info.object.load(file_name)
ui_info.object.file_path = file_name
self._set_title_string(ui_info)
def save_study(self, ui_info):
if ui_info.object.file_path == '':
file_name = save_file(filter=['*.pst'],
extensions=[FileInfo(), TextInfo()])
if file_name == '':
return
else:
file_name = ui_info.object.file_path
ui_info.object.save(file_name)
def save_study_as(self, ui_info):
file_name = save_file(filter=['*.pst'],
extensions=[FileInfo(), TextInfo()])
if file_name != '':
ui_info.object.save(file_name)
ui_info.object.file_path = file_name
self._set_title_string(ui_info)
def new_view(self, ui_info):
new_view = SimArrayView(model=ui_info.object.sim_array)
new_view.configure_traits() # kind = 'livemodal' )
def exit_pstudy(self, ui_info):
'''Close all views and check if everything was saved'''
todo = ToDo()
todo.configure_traits(kind='modal')
def clear_cache(self, ui_info):
ui_info.object.clear_cache()
def about_pstudy(self, ui_info):
todo = ToDo()
todo.configure_traits(kind='modal')
def _set_title_string(self, ui_info, ignore_dirty=False):
if ui_info.object.dirty and not ignore_dirty:
modified = '(modified)'
else:
modified = ''
if ui_info.object.file_path == '':
filename = '<unnamed>'
else:
filename = ui_info.object.file_path
title_string = '%s: %s %s' % (TITLESTRING, '<unnamed>', modified)
ui_info.ui.title = title_string
class SimPStudy(HasTraits):
""" The main application window. """
def __init__(self, **kw):
super(SimPStudy, self).__init__(**kw)
# The initialization should not be considered dirty
# therefore set the flag to indicate unsaved study to false
#
self.dirty = False
sim_array = Instance(SimArray)
def _sim_array_default(self):
return SimArray()
sim_model = Property()
def _set_sim_model(self, value):
self.sim_array.sim_model = value
def __getitem__(self, factor_slices):
'''Direct access to the sim_array.
'''
return self.sim_array[factor_slices]
#---------------------------------------------------------------
# PERSISTENCY
#-------------------------------------------------------------------
file_base_name = Property()
def _get_file_base_name(self):
return self.sim_model.__class__.__name__
file_path = Str('')
dirty = False
@on_trait_change('sim_array.changed')
def _set_dirty(self):
self.dirty = True
def new(self):
sim_model = self.sim_array.sim_model
self.sim_array = SimArray(sim_model=sim_model)
self.dirty = False
def load(self, file_name):
file = open(file_name, 'r')
self.sim_array = pickle.load(file)
file.close()
self.dirty = False
def save(self, file_name):
file = open(file_name, 'w')
pickle.dump(self.sim_array, file)
file.close()
self.dirty = False
def clear_cache(self):
self.sim_array.clear_cache()
toolbar = ToolBar(
Action(name="New Study",
tooltip='Create a new study',
image=ImageResource('New-32'),
action="new_study"),
Action(name="Open Study",
tooltip='Open a study',
image=ImageResource('fileopen-32'),
action="open_study"),
Action(name="Save Study",
tooltip='Save study',
image=ImageResource('save'),
action="save_study"),
Action(name="New View",
tooltip='Create new view',
image=ImageResource('new_view'),
action="new_view"),
Action(name="Clear Cache",
tooltip='Reset cache',
image=ImageResource('reset'),
action="clear_cache"),
image_size=(22, 22),
show_tool_names=False,
show_divider=True,
name='study_toolbar')
menubar = MenuBar(Menu(Action(name="&New",
action="new_study"),
Action(name="&Open",
action="open_study"),
Action(name="&Save",
action="save_study"),
Action(name="Save &As",
action="save_study_as"),
Action(name="&Exit",
action="exit_study"),
name="&File"),
Menu(Action(name="&New View",
action="new_view"),
name="&View"),
Menu(Action(name="&Clear Cache",
action="clear_cache"),
name="&Data"),
Menu(Action(name="About PStudy",
action="about_pstudy"),
HelpAction,
name="Help")
)
view = View(
Item('sim_array@', show_label=False),
id='simvisage.simiter.pstudy',
dock='tab',
menubar=menubar,
toolbar=toolbar,
resizable=True,
width=0.8,
height=0.8,
title='SimVisage: Parametric Study',
handler=SimPStudyController,
)
def run():
from sim_model import SimModel
pstudy_app = SimPStudy(sim_model=SimModel())
pstudy_app.configure_traits(kind='live')
if __name__ == '__main__':
run()
| [
"rostislav.chudoba@rwth-aachen.de"
] | rostislav.chudoba@rwth-aachen.de |
47768f668c9203a6d7ed852b25fa14292f1fc879 | 64b6015e35bd45df2f25ba04bf68a3dc6905e841 | /User_App/migrations/0001_initial.py | 67d0ba5a7c289a319c796789b71d03a7b6e840c0 | [] | no_license | masudurHimel/Django_Reboot | 10904c14f001c90ef971340fcccfb5007846fc59 | 2de54484397eecd14f2a7425636beb17fd5cc12d | refs/heads/master | 2022-07-02T22:47:04.133111 | 2020-05-06T20:46:29 | 2020-05-06T20:46:29 | 257,648,946 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 840 | py | # Generated by Django 3.0.3 on 2020-05-05 19:57
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='UserProfileInfo',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('portfolio', models.URLField(blank=True)),
('profile_pic', models.ImageField(blank=True, upload_to='profile_pics')),
('user', models.OneToOneField(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
]
| [
"masudurhimel@gmail.com"
] | masudurhimel@gmail.com |
3c2c3c043bbe9bcb82f9b7622ae402f18bd8c648 | cc5a3fa80d2ae90afc2626e4a82b9a927726dfa0 | /huaweicloud-sdk-bss/huaweicloudsdkbss/v2/model/list_sku_inventories_response.py | 124ef51c81c9c4847ae336042351acec75c390e9 | [
"Apache-2.0"
] | permissive | Logan118/huaweicloud-sdk-python-v3 | eca15e9b08bdccef7122e40735d444ddc958efa8 | bb230c03bd00225b9f5780a56adce596e9456420 | refs/heads/master | 2023-07-17T14:57:50.799564 | 2021-08-25T10:40:43 | 2021-08-25T10:40:43 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,420 | py | # coding: utf-8
import re
import six
from huaweicloudsdkcore.sdk_response import SdkResponse
from huaweicloudsdkcore.utils.http_utils import sanitize_for_serialization
class ListSkuInventoriesResponse(SdkResponse):
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
sensitive_list = []
openapi_types = {
'sku_inventories': 'list[SkuInventory]'
}
attribute_map = {
'sku_inventories': 'sku_inventories'
}
def __init__(self, sku_inventories=None):
"""ListSkuInventoriesResponse - a model defined in huaweicloud sdk"""
super(ListSkuInventoriesResponse, self).__init__()
self._sku_inventories = None
self.discriminator = None
if sku_inventories is not None:
self.sku_inventories = sku_inventories
@property
def sku_inventories(self):
"""Gets the sku_inventories of this ListSkuInventoriesResponse.
库存的查询结果详情,具体参见表2。
:return: The sku_inventories of this ListSkuInventoriesResponse.
:rtype: list[SkuInventory]
"""
return self._sku_inventories
@sku_inventories.setter
def sku_inventories(self, sku_inventories):
"""Sets the sku_inventories of this ListSkuInventoriesResponse.
库存的查询结果详情,具体参见表2。
:param sku_inventories: The sku_inventories of this ListSkuInventoriesResponse.
:type: list[SkuInventory]
"""
self._sku_inventories = sku_inventories
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
if attr in self.sensitive_list:
result[attr] = "****"
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
import simplejson as json
if six.PY2:
import sys
reload(sys)
sys.setdefaultencoding("utf-8")
return json.dumps(sanitize_for_serialization(self), ensure_ascii=False)
def __repr__(self):
"""For `print`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, ListSkuInventoriesResponse):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| [
"hwcloudsdk@huawei.com"
] | hwcloudsdk@huawei.com |
b73808b7ece173bea9a10ffa904af73e32d72221 | bf15a97a377bc49495a8c278cd247387a08361fd | /intersight/models/vnic_fc_adapter_policy_ref.py | e5f956c79f737fa320ee12693e463e1fa5431998 | [
"Apache-2.0"
] | permissive | movinalot/intersight-python | ffcb434e5fdf3f6e857dd967c794a64b2d2e05de | cdc3b082d75eac93b74029ab610e16d3008fdd8c | refs/heads/master | 2020-12-18T15:46:06.780834 | 2019-10-29T00:39:49 | 2019-10-29T00:39:49 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,699 | py | # coding: utf-8
"""
Intersight REST API
This is Intersight REST API
OpenAPI spec version: 1.0.9-961
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from pprint import pformat
from six import iteritems
import re
class VnicFcAdapterPolicyRef(object):
"""
NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'moid': 'str',
'object_type': 'str',
'selector': 'str'
}
attribute_map = {
'moid': 'Moid',
'object_type': 'ObjectType',
'selector': 'Selector'
}
def __init__(self, moid=None, object_type=None, selector=None):
"""
VnicFcAdapterPolicyRef - a model defined in Swagger
"""
self._moid = None
self._object_type = None
self._selector = None
if moid is not None:
self.moid = moid
if object_type is not None:
self.object_type = object_type
if selector is not None:
self.selector = selector
@property
def moid(self):
"""
Gets the moid of this VnicFcAdapterPolicyRef.
The Moid of the referenced REST resource.
:return: The moid of this VnicFcAdapterPolicyRef.
:rtype: str
"""
return self._moid
@moid.setter
def moid(self, moid):
"""
Sets the moid of this VnicFcAdapterPolicyRef.
The Moid of the referenced REST resource.
:param moid: The moid of this VnicFcAdapterPolicyRef.
:type: str
"""
self._moid = moid
@property
def object_type(self):
"""
Gets the object_type of this VnicFcAdapterPolicyRef.
The Object Type of the referenced REST resource.
:return: The object_type of this VnicFcAdapterPolicyRef.
:rtype: str
"""
return self._object_type
@object_type.setter
def object_type(self, object_type):
"""
Sets the object_type of this VnicFcAdapterPolicyRef.
The Object Type of the referenced REST resource.
:param object_type: The object_type of this VnicFcAdapterPolicyRef.
:type: str
"""
self._object_type = object_type
@property
def selector(self):
"""
Gets the selector of this VnicFcAdapterPolicyRef.
An OData $filter expression which describes the REST resource to be referenced. This field may be set instead of 'moid' by clients. If 'moid' is set this field is ignored. If 'selector' is set and 'moid' is empty/absent from the request, Intersight will determine the Moid of the resource matching the filter expression and populate it in the MoRef that is part of the object instance being inserted/updated to fulfill the REST request. An error is returned if the filter matches zero or more than one REST resource. An example filter string is: Serial eq '3AA8B7T11'.
:return: The selector of this VnicFcAdapterPolicyRef.
:rtype: str
"""
return self._selector
@selector.setter
def selector(self, selector):
"""
Sets the selector of this VnicFcAdapterPolicyRef.
An OData $filter expression which describes the REST resource to be referenced. This field may be set instead of 'moid' by clients. If 'moid' is set this field is ignored. If 'selector' is set and 'moid' is empty/absent from the request, Intersight will determine the Moid of the resource matching the filter expression and populate it in the MoRef that is part of the object instance being inserted/updated to fulfill the REST request. An error is returned if the filter matches zero or more than one REST resource. An example filter string is: Serial eq '3AA8B7T11'.
:param selector: The selector of this VnicFcAdapterPolicyRef.
:type: str
"""
self._selector = selector
def to_dict(self):
"""
Returns the model properties as a dict
"""
result = {}
for attr, _ in iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""
Returns the string representation of the model
"""
return pformat(self.to_dict())
def __repr__(self):
"""
For `print` and `pprint`
"""
return self.to_str()
def __eq__(self, other):
"""
Returns true if both objects are equal
"""
if not isinstance(other, VnicFcAdapterPolicyRef):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""
Returns true if both objects are not equal
"""
return not self == other
| [
"ategaw@cisco.com"
] | ategaw@cisco.com |
34867fe2ca39a80cbe21f6a9aad4334333975065 | 7e01c039f2427d434a4ef44a1b9dc0ea21db65ba | /venv/lib/python3.8/site-packages/django/db/utils.py | f6d9622ec96308029141d2b3657c2b40829f6be5 | [] | no_license | dmfranz/Spike-exercise | 09f8051163d2a63dfbc3f75da2de0a1bbbbb122d | 83971e95a72d504f629778fece2cdfb953e5d08b | refs/heads/main | 2023-08-23T04:18:43.934471 | 2021-10-11T04:54:28 | 2021-10-11T04:54:28 | 413,568,735 | 0 | 1 | null | 2021-10-11T04:36:22 | 2021-10-04T20:10:01 | Python | UTF-8 | Python | false | false | 10,150 | py | import pkgutil
from importlib import import_module
from django.conf import settings
from django.core.exceptions import ImproperlyConfigured
# For backwards compatibility with Django < 3.2
from django.utils.connection import ConnectionDoesNotExist # NOQA: F401
from django.utils.connection import BaseConnectionHandler
from django.utils.functional import cached_property
from django.utils.module_loading import import_string
DEFAULT_DB_ALIAS = 'default'
DJANGO_VERSION_PICKLE_KEY = '_django_version'
class Error(Exception):
pass
class InterfaceError(Error):
pass
class DatabaseError(Error):
pass
class DataError(DatabaseError):
pass
class OperationalError(DatabaseError):
pass
class IntegrityError(DatabaseError):
pass
class InternalError(DatabaseError):
pass
class ProgrammingError(DatabaseError):
pass
class NotSupportedError(DatabaseError):
pass
class DatabaseErrorWrapper:
"""
Context manager and decorator that reraises backend-specific database
exceptions using Django's common wrappers.
"""
def __init__(self, wrapper):
"""
wrapper is a database wrapper.
It must have a Database attribute defining PEP-249 exceptions.
"""
self.wrapper = wrapper
def __enter__(self):
pass
def __exit__(self, exc_type, exc_value, traceback):
if exc_type is None:
return
for dj_exc_type in (
DataError,
OperationalError,
IntegrityError,
InternalError,
ProgrammingError,
NotSupportedError,
DatabaseError,
InterfaceError,
Error,
):
db_exc_type = getattr(self.wrapper.Database, dj_exc_type.__name__)
if issubclass(exc_type, db_exc_type):
dj_exc_value = dj_exc_type(*exc_value.args)
# Only set the 'errors_occurred' flag for errors that may make
# the connection unusable.
if dj_exc_type not in (DataError, IntegrityError):
self.wrapper.errors_occurred = True
raise dj_exc_value.with_traceback(traceback) from exc_value
def __call__(self, func):
# Note that we are intentionally not using @wraps here for performance
# reasons. Refs #21109.
def inner(*args, **kwargs):
with self:
return func(*args, **kwargs)
return inner
def load_backend(backend_name):
"""
Return a database backend's "base" module given a fully qualified database
backend name, or raise an error if it doesn't exist.
"""
# This backend was renamed in Django 1.9.
if backend_name == 'django.db.backends.postgresql_psycopg2':
backend_name = 'django.db.backends.postgresql'
try:
return import_module('%s.base' % backend_name)
except ImportError as e_user:
# The database backend wasn't found. Display a helpful error message
# listing all built-in database backends.
import django.db.backends
builtin_backends = [
name for _, name, ispkg in pkgutil.iter_modules(django.db.backends.__path__)
if ispkg and name not in {'base', 'dummy'}
]
if backend_name not in ['django.db.backends.%s' % b for b in builtin_backends]:
backend_reprs = map(repr, sorted(builtin_backends))
raise ImproperlyConfigured(
"%r isn't an available database backend or couldn't be "
"imported. Check the above exception. To use one of the "
"built-in backends, use 'django.db.backends.XXX', where XXX "
"is one of:\n"
" %s" % (backend_name, ", ".join(backend_reprs))
) from e_user
else:
# If there's some other error, this must be an error in Django
raise
class ConnectionHandler(BaseConnectionHandler):
settings_name = 'DATABASES'
# Connections needs to still be an actual thread local, as it's truly
# thread-critical. Database backends should use @async_unsafe to protect
# their code from async contexts, but this will give those contexts
# separate connections in case it's needed as well. There's no cleanup
# after async contexts, though, so we don't allow that if we can help it.
thread_critical = True
def configure_settings(self, databases):
databases = super().configure_settings(databases)
if databases == {}:
databases[DEFAULT_DB_ALIAS] = {'ENGINE': 'django.db.backends.dummy'}
elif DEFAULT_DB_ALIAS not in databases:
raise ImproperlyConfigured(
f"You must define a '{DEFAULT_DB_ALIAS}' database."
)
elif databases[DEFAULT_DB_ALIAS] == {}:
databases[DEFAULT_DB_ALIAS]['ENGINE'] = 'django.db.backends.dummy'
return databases
@property
def databases(self):
return self.settings
def ensure_defaults(self, alias):
"""
Put the defaults into the settings dictionary for a given connection
where no settings is provided.
"""
try:
conn = self.databases[alias]
except KeyError:
raise self.exception_class(f"The connection '{alias}' doesn't exist.")
conn.setdefault('ATOMIC_REQUESTS', False)
conn.setdefault('AUTOCOMMIT', True)
conn.setdefault('ENGINE', 'django.db.backends.dummy')
if conn['ENGINE'] == 'django.db.backends.' or not conn['ENGINE']:
conn['ENGINE'] = 'django.db.backends.dummy'
conn.setdefault('CONN_MAX_AGE', 0)
conn.setdefault('OPTIONS', {})
conn.setdefault('TIME_ZONE', None)
for setting in ['NAME', 'USER', 'PASSWORD', 'HOST', 'PORT']:
conn.setdefault(setting, '')
def prepare_test_settings(self, alias):
"""
Make sure the test settings are available in the 'TEST' sub-dictionary.
"""
try:
conn = self.databases[alias]
except KeyError:
raise self.exception_class(f"The connection '{alias}' doesn't exist.")
test_settings = conn.setdefault('TEST', {})
default_test_settings = [
('CHARSET', None),
('COLLATION', None),
('MIGRATE', True),
('MIRROR', None),
('NAME', None),
]
for key, value in default_test_settings:
test_settings.setdefault(key, value)
def create_connection(self, alias):
self.ensure_defaults(alias)
self.prepare_test_settings(alias)
db = self.databases[alias]
backend = load_backend(db['ENGINE'])
return backend.DatabaseWrapper(db, alias)
def close_all(self):
for alias in self:
try:
connection = getattr(self._connections, alias)
except AttributeError:
continue
connection.close()
class ConnectionRouter:
def __init__(self, routers=None):
"""
If routers is not specified, default to settings.DATABASE_ROUTERS.
"""
self._routers = routers
@cached_property
def routers(self):
if self._routers is None:
self._routers = settings.DATABASE_ROUTERS
routers = []
for r in self._routers:
if isinstance(r, str):
router = import_string(r)()
else:
router = r
routers.append(router)
return routers
def _router_func(action):
def _route_db(self, model, **hints):
chosen_db = None
for router in self.routers:
try:
method = getattr(router, action)
except AttributeError:
# If the router doesn't have a method, skip to the next one.
pass
else:
chosen_db = method(model, **hints)
if chosen_db:
return chosen_db
instance = hints.get('instance')
if instance is not None and instance._state.db:
return instance._state.db
return DEFAULT_DB_ALIAS
return _route_db
db_for_read = _router_func('db_for_read')
db_for_write = _router_func('db_for_write')
def allow_relation(self, obj1, obj2, **hints):
for router in self.routers:
try:
method = router.allow_relation
except AttributeError:
# If the router doesn't have a method, skip to the next one.
pass
else:
allow = method(obj1, obj2, **hints)
if allow is not None:
return allow
return obj1._state.db == obj2._state.db
def allow_migrate(self, db, app_label, **hints):
for router in self.routers:
try:
method = router.allow_migrate
except AttributeError:
# If the router doesn't have a method, skip to the next one.
continue
allow = method(db, app_label, **hints)
if allow is not None:
return allow
return True
def allow_migrate_model(self, db, model):
return self.allow_migrate(
db,
model._meta.app_label,
model_name=model._meta.model_name,
model=model,
)
def get_migratable_models(self, app_config, db, include_auto_created=False):
"""Return app models allowed to be migrated on provided db."""
models = app_config.get_models(include_auto_created=include_auto_created)
return [model for model in models if self.allow_migrate_model(db, model)]
| [
"marmara@wisc.edu"
] | marmara@wisc.edu |
163b3454ba091eda53c8e7b350d2baac391e73ea | e8dd7dfabac2031d42608920de0aeadc1be8e998 | /thesis/Chapter4/Python/time_distance_plots.py | 0326f20bcb84ee5b7568f7808f713602ed90a9e4 | [
"MIT",
"CC-BY-4.0",
"BSD-2-Clause"
] | permissive | Cadair/Thesis | e7189dfdf74edf3ea2565ee0ec3bdc0777ad99b2 | 792ab1e8cf37af7b9ee52de3566faa928e580500 | refs/heads/master | 2022-05-03T18:29:43.366175 | 2017-01-06T13:37:54 | 2017-01-06T13:37:54 | 21,343,957 | 8 | 1 | null | null | null | null | UTF-8 | Python | false | false | 4,588 | py | # coding: utf-8
from __future__ import print_function
import os
from functools import partial
import numpy as np
from scipy.interpolate import interp1d
def get_filepath(base_path, driver, period, post_amp, tube_r, exp_fac):
if exp_fac is not None:
data_dir = os.path.join(base_path, '%s/%s_%s_%s_%s/'%(driver, period, post_amp, tube_r, exp_fac))
else:
data_dir = os.path.join(base_path, '%s/%s_%s_%s/'%(driver, period, post_amp, tube_r))
return data_dir
def get_xy(base_path, driver, period, post_amp, tube_r, exp_fac):
data_dir = get_filepath(base_path, driver, period, post_amp, tube_r, exp_fac)
height_Mm = np.load(os.path.join(base_path, "heightMM.npy"))
all_times = np.load(os.path.join(data_dir, ("LineVar_%s_%s_%s_times.npy"%(driver, period, post_amp))))[:,0]
all_spoints = np.load(os.path.join(data_dir, "LineVar_%s_%s_%s_points.npy"%(driver,period,post_amp)))[:,::-1,:]
f = interp1d(np.linspace(0,128,128), height_Mm)
y = f(all_spoints[0,:,2])
return all_times, y, all_spoints
def get_data(base_path, driver, period, post_amp, tube_r, exp_fac):
data_dir = get_filepath(base_path, driver, period, post_amp, tube_r, exp_fac)
path_join = partial(os.path.join, data_dir)
all_svphi = np.load(path_join("LineVar_%s_%s_%s_vphi.npy"%(driver,period,post_amp))).T
all_svperp = np.load(path_join("LineVar_%s_%s_%s_vperp.npy"%(driver,period,post_amp))).T
all_svpar = np.load(path_join("LineVar_%s_%s_%s_vpar.npy"%(driver,period,post_amp))).T
beta_line = np.load(path_join("LineFlux_%s_%s_%s_beta.npy"%(driver,period,post_amp))).T
if post_amp in ['A02k', 'A10']:
data = [all_svpar*1e3, all_svperp*1e3, all_svphi*1e3]
else:
data = [all_svpar, all_svperp, all_svphi]
return data, beta_line
def get_speeds(base_path, driver, period, post_amp, tube_r, exp_fac):
data_dir = get_filepath(base_path, driver, period, post_amp, tube_r, exp_fac)
path_join = partial(os.path.join, data_dir)
cs_line = np.load(path_join("LineFlux_%s_%s_%s_cs.npy"%(driver,period,post_amp))).T
va_line = np.load(path_join("LineFlux_%s_%s_%s_va.npy"%(driver,period,post_amp))).T
return cs_line, va_line
def get_flux(base_path, driver, period, post_amp, tube_r, exp_fac):
data_dir = get_filepath(base_path, driver, period, post_amp, tube_r, exp_fac)
path_join = partial(os.path.join, data_dir)
if exp_fac:
identifier = "%s_%s_%s_%s_%s"%(driver, period, post_amp, tube_r, exp_fac)
else:
identifier = "%s_%s_%s_%s"%(driver, period, post_amp, tube_r)
Fpar_line = np.load(path_join("LineVar_{}_Fwpar.npy".format(identifier))).T
Fperp_line = np.load(path_join("LineVar_{}_Fwperp.npy".format(identifier))).T
Fphi_line = np.load(path_join("LineVar_{}_Fwphi.npy".format(identifier))).T
Ftotal = np.sqrt(Fpar_line**2 + Fperp_line**2 + Fphi_line**2)
Fpar_percent = (Fpar_line / Ftotal)
Fperp_percent = (Fperp_line / Ftotal)
Fphi_percent = (Fphi_line / Ftotal)
#Filter out the noisy flux values before the wave starts propagating.
filter_ftotal = (np.abs(Ftotal) <= 1e-5)
Fpar_percent[filter_ftotal.nonzero()] = np.nan
Fperp_percent[filter_ftotal.nonzero()] = np.nan
Fphi_percent[filter_ftotal.nonzero()] = np.nan
ParAvg = np.mean(Fpar_percent[np.isfinite(Fpar_percent)])
PerpAvg = np.mean(Fperp_percent[np.isfinite(Fperp_percent)])
PhiAvg = np.mean(Fphi_percent[np.isfinite(Fphi_percent)])
beta_line = np.load(path_join("LineFlux_%s_%s_%s_beta.npy"%(driver,period,post_amp))).T
return [Fpar_percent, Fperp_percent, Fphi_percent], beta_line, [ParAvg, PerpAvg, PhiAvg]
def overplot_speeds(axes, y, va_line, cs_line):
delta_x = np.zeros(y.shape)
delta_x[1:] = y[1:] - y[:-1]
delta_t_va = delta_x*1e6 / va_line[:,0]
delta_t_cs = delta_x*1e6 / cs_line[:,0]
delta_t_vf = delta_x*1e6 / np.sqrt(cs_line[:,0]**2 + va_line[:,0]**2)
delta_t_vs = delta_x*1e6 / np.sqrt(cs_line[:,0]**-2 + va_line[:,0]**-2)**-1
ti = 60
t_va = np.cumsum(delta_t_va) + ti
t_cs = np.cumsum(delta_t_cs) + ti
t_vf = np.cumsum(delta_t_vf) + ti
t_vs = np.cumsum(delta_t_vs) + ti
for i in range(0,3):
axes[i].plot(t_va, y, label=r"$V_A$", linewidth=2, linestyle=':', color='k')#b
axes[i].plot(t_cs, y, label=r"$C_s$", linewidth=2, linestyle='--', color='k')#g
axes[i].plot(t_vf, y, label=r"$V_f$", linewidth=2, linestyle='-.', color='k')#r
axes[i].plot(t_vs, y, label=r"$V_s$", linewidth=2, linestyle='-', color='k')#c
| [
"stuart@cadair.com"
] | stuart@cadair.com |
4f8ac3edfd7e3d403c3648bd85382171104548c4 | 1325ecde27307dce9fe6edce88c0249d5c49ae60 | /day11/part2.py | 6fcbdfc51aa26a08e16ea53ce361bbca0406ec05 | [
"Apache-2.0"
] | permissive | jonasmue/adventofcode20 | 24208e7b9f2fb161d3fabb2bfd9b00f11b67840d | 437eb9ff82045ce825b68f1dcb3b79265723bba4 | refs/heads/main | 2023-02-04T17:43:40.082534 | 2020-12-26T08:51:54 | 2020-12-26T08:51:54 | 317,487,937 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 367 | py | from common import SeatingSystem
if __name__ == "__main__":
# O(s*n^2) time and O(n) space
# with s: number of steps until convergence, n: number of points in grid
# Time complexity could be optimized by saving and reusing neighbors of each point
seating_system = SeatingSystem(5, False)
seating_system.run()
print(seating_system.occupied)
| [
"jonas.mueller@compitencies.com"
] | jonas.mueller@compitencies.com |
d70f750865ab7e20438deebfe95e9082fb4bc964 | 9e988c0dfbea15cd23a3de860cb0c88c3dcdbd97 | /sdBs/AllRun/pg_1106+083/sdB_pg_1106+083_lc.py | ecb0d839b701604589194e291b9d3c266426ea9b | [] | no_license | tboudreaux/SummerSTScICode | 73b2e5839b10c0bf733808f4316d34be91c5a3bd | 4dd1ffbb09e0a599257d21872f9d62b5420028b0 | refs/heads/master | 2021-01-20T18:07:44.723496 | 2016-08-08T16:49:53 | 2016-08-08T16:49:53 | 65,221,159 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 342 | py | from gPhoton.gAperture import gAperture
def main():
gAperture(band="NUV", skypos=[167.1825,8.030292], stepsz=30., csvfile="/data2/fleming/GPHOTON_OUTPU/LIGHTCURVES/sdBs/sdB_pg_1106+083/sdB_pg_1106+083_lc.csv", maxgap=1000., overwrite=True, radius=0.00555556, annulus=[0.005972227,0.0103888972], verbose=3)
if __name__ == "__main__":
main()
| [
"thomas@boudreauxmail.com"
] | thomas@boudreauxmail.com |
129d670a19e33414131c1441c3be5449062746fd | 73758dde83d1a1823c103e1a4ba71e7c95168f71 | /nsd2009/py01/day05/stack2.py | d95af629d461f0dd46de18c3352ef00fc877d385 | [] | no_license | tonggh220/md_5_nsd_notes | 07ffdee7c23963a7a461f2a2340143b0e97bd9e1 | a58a021ad4c7fbdf7df327424dc518f4044c5116 | refs/heads/master | 2023-07-02T01:34:38.798929 | 2021-05-12T08:48:40 | 2021-05-12T08:48:40 | 393,885,415 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,116 | py | stack = [] # 使用列表模拟栈
def push_it():
"用于压栈"
data = input("数据: ").strip()
if data: # 如果data非空
stack.append(data)
else:
print("未获取到数据。")
def pop_it():
"用于出栈"
if stack: # 如果stack非空
print("从栈中弹出: \033[31;1m%s\033[0m" % stack.pop())
else:
print("\033[31;1m栈已经是空的\033[0m")
def view_it():
"查询"
print('\033[32;1m%s\033[0m' % stack)
def show_menu():
"用于显示菜单,实现代码逻辑"
# 将函数存入字典
funcs = {'0': push_it, '1': pop_it, '2': view_it}
prompt = """(0) 压栈
(1) 出栈
(2) 查询
(3) 退出
请选择(0/1/2/3): """
while 1:
choice = input(prompt).strip() # 去除用户输入的两端空白字符
if choice not in ['0', '1', '2', '3']:
print("无效的输入,请重试。")
continue
if choice == '3':
print('Bye-bye')
break
funcs[choice]() # 在字典中取出函数并调用
if __name__ == '__main__':
show_menu()
| [
"root@localhost.localdomain"
] | root@localhost.localdomain |
b5989f1e513e71181fa457b7defad96e57e6e4a9 | bf2d010229aece071359662f4fef44e48ba57951 | /fitness-combined | 60c7f5034e05a8c045965320b191f404c78aa9d4 | [] | no_license | Osrip/CriticalEvolution | b97398f74e2fc5b54c9ab92765b08ce3bf97257e | f77cae8acc626cb4c6d64d5a44fdf00310309c2e | refs/heads/master | 2021-06-24T03:44:03.283017 | 2021-04-03T13:09:42 | 2021-04-03T13:09:42 | 215,332,038 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,350 | #!/usr/bin/env python
import numpy as np
import matplotlib.pyplot as plt
from matplotlib import colors
from matplotlib.lines import Line2D
from matplotlib.patches import Circle
import pickle
from os import makedirs, path
import os
'''
loadfiles = ['beta_experiment/beta-0-1/sim-20180512-105719',
'beta_experiment/beta-1/sim-20180511-163319',
'beta_experiment/beta-10/sim-20180512-105824']
'''
loadfiles = ['sim-20191114-000009_server']
# os.chdir('D:\Masterarbeit_ausgelagert')
energy_model = True
numAgents = 150
autoLoad = True
saveFigBool = True
fixGen2000 = False
# loadfiles = ['beta_experiment/beta-0-1/sim-20180512-105719',
# 'beta_experiment/beta-0-1/sim-20180512-105725',
# 'beta_experiment/beta-1/sim-20180511-163319',
# 'beta_experiment/beta-1/sim-20180511-163335',
# 'beta_experiment/beta-1/sim-20180511-163347',
# 'beta_experiment/beta-1/sim-20180511-163357',
# 'beta_experiment/beta-10/sim-20180512-105824',
# 'beta_experiment/beta-10/sim-20180512-105819']
# IC = [0, 0, 1, 1, 1, 1, 2, 2]
new_order = [2, 0, 1]
labels = [r'$\beta_i = 0.1$', r'$\beta_i = 1$', r'$\_i = 10$']
iter_list = np.arange(0, 2000, 1)
cmap = plt.get_cmap('seismic')
norm = colors.Normalize(vmin=0, vmax=len(loadfiles)) # age/color mapping
# norm = [[194, 48, 32, 255],
# [146, 49, 182, 255],
# [44, 112, 147, 255]
# ]
# norm = np.divide(norm, 255)
a = 0.15 # alpha
def upper_tri_masking(A):
m = A.shape[0]
r = np.arange(m)
mask = r[:, None] < r
return A[mask]
def fitness(loadfile, iter_list, numAgents, autoLoad, saveFigBool):
folder = 'save/' + loadfile
folder2 = folder + '/figs/fitness/'
fname2 = folder2 + 'fitness-' + \
str(iter_list[0]) + '-' + str(iter_list[1] - iter_list[0]) + '-' + str(iter_list[-1]) + \
'.npz'
if path.isfile(fname2) and autoLoad:
txt = 'Loading: ' + fname2
print(txt)
data = np.load(fname2)
FOOD = data['FOOD']
else:
FOOD = np.zeros((len(iter_list), numAgents))
for ii, iter in enumerate(iter_list):
filename = 'save/' + loadfile + '/isings/gen[' + str(iter) + ']-isings.pickle'
startstr = 'Loading simulation:' + filename
print(startstr)
try:
isings = pickle.load(open(filename, 'rb'))
except Exception:
print("Error while loading %s. Skipped file" % filename)
#Leads to the previous datapoint being drawn twice!!
food = []
for i, I in enumerate(isings):
if energy_model:
food.append(I.energy)
else:
food.append(I.fitness)
# food = np.divide(food, 6)
FOOD[ii, :] = food
if not path.exists(folder2):
makedirs(folder2)
np.savez(fname2, FOOD=FOOD)
return FOOD
FOODS = []
for loadfile in loadfiles:
f = fitness(loadfile, iter_list, numAgents, autoLoad, saveFigBool)
# FIX THE DOUBLE COUNTING PROBLEM
if f.shape[0] > 2000 and fixGen2000:
print('Fixing Double Counting at Gen 2000')
f[2000, :] = f[2000, :] - f[1999, :]
FOODS.append(f)
# FIX THE DOUBLE COUNTING OF THE FITNESS
plt.rc('text', usetex=True)
font = {'family': 'serif', 'size': 28, 'serif': ['computer modern roman']}
plt.rc('font', **font)
plt.rc('legend', **{'fontsize': 20})
fig, ax = plt.subplots(1, 1, figsize=(19, 10))
fig.text(0.51, 0.035, r'$Generation$', ha='center', fontsize=20)
# fig.text(0.07, 0.5, r'$Avg. Food Consumed$', va='center', rotation='vertical', fontsize=20)
fig.text(0.07, 0.5, r'$Food Consumed$', va='center', rotation='vertical', fontsize=20)
title = 'Food consumed per organism'
fig.suptitle(title)
for i, FOOD in enumerate(FOODS):
# for i in range(0, numAgents):
# ax.scatter(iter_list, FOOD[:, i], color=[0, 0, 0], alpha=0.2, s=30)
c = cmap(norm(new_order[i]))
# c = norm[i]
# c = norm[IC[i]]
muF = np.mean(FOOD, axis=1)
ax.plot(iter_list, muF, color=c, label=labels[new_order[i]])
# for numOrg in range(FOOD.shape[1]):
# ax.scatter(iter_list, FOOD[:, numOrg],
# alpha=0.01, s=8, color=c, label=labels[new_order[i]])
# maxF = np.max(FOOD, axis=1)
# minF = np.min(FOOD, axis=1)
# ax.fill_between(iter_list, maxF, minF,
# color=np.divide(c, 2), alpha=a)
sigmaF = FOOD.std(axis=1)
ax.fill_between(iter_list, muF + sigmaF, muF - sigmaF,
color=c, alpha=a
)
custom_legend = [Line2D([0], [0], marker='o', color='w',
markerfacecolor=cmap(norm(1)), markersize=15),
Line2D([0], [0], marker='o', color='w',
markerfacecolor=cmap(norm(0)), markersize=15),
Line2D([0], [0], marker='o', color='w',
markerfacecolor=cmap(norm(2)), markersize=15),]
# custom_legend = [Circle((0, 0), 0.001,
# facecolor=cmap(norm(1))),
# Circle((0, 0), 1,
# facecolor=cmap(norm(0))),
# Circle((0, 0), 1,
# facecolor=cmap(norm(2)))]
ax.legend(custom_legend, [r'$\beta = 10$', r'$\beta = 1$', r'$\beta = 0.1$'], loc='upper left')
# plt.legend(loc=2)
# yticks = np.arange(0, 150, 20)
# ax.set_yticks(yticks)
# xticks = [0.1, 0.5, 1, 2, 4, 10, 50, 100, 200, 500, 1000, 2000]
# ax.set_xscale("log", nonposx='clip')
# ax.set_xticks(xticks)
# ax.get_xaxis().set_major_formatter(matplotlib.ticker.ScalarFormatter())
folder = 'save/' + loadfile
savefolder = folder + '/figs/fitness_combined/'
savefilename = savefolder + 'fitness_gen_' + str(iter_list[0]) + '-' + str(iter_list[-1]) + '.png'
if not path.exists(savefolder):
makedirs(savefolder)
if saveFigBool:
plt.savefig(savefilename, bbox_inches='tight', dpi=150)
# plt.close()
savemsg = 'Saving ' + savefilename
print(savemsg)
# if saveFigBool:
# savefolder = folder + '/figs/fitness/'
# savefilename = savefolder + 'fitness_gen_' + str(iter_list[0]) + '-' + str(iter_list[-1]) + '.png'
# plt.savefig(bbox_inches = 'tight', dpi = 300)
plt.show()
| [
"jan.prosi@hotmail.com"
] | jan.prosi@hotmail.com | |
93323b86ef220a5a72384a8c2174cc533f7e5ac5 | 45b64f620e474ac6d6b2c04fbad2730f67a62b8e | /Varsity-Final-Project-by-Django-master/.history/project/project/settings_20210405152615.py | 26c367c5f973efc1f18418c5f5bb4e41da83fb2b | [] | no_license | ashimmitra/Final-Project | 99de00b691960e25b1ad05c2c680015a439277e0 | a3e1d3c9d377e7b95b3eaf4dbf757a84a3858003 | refs/heads/master | 2023-04-11T06:12:35.123255 | 2021-04-26T15:41:52 | 2021-04-26T15:41:52 | 361,796,607 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,682 | py | """
Django settings for project project.
Generated by 'django-admin startproject' using Django 3.1.
For more information on this file, see
https://docs.djangoproject.com/en/3.1/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/3.1/ref/settings/
"""
from pathlib import Path
# Build paths inside the project like this: BASE_DIR / 'subdir'.
BASE_DIR = Path(__file__).resolve(strict=True).parent.parent
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/3.1/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'nyg(3!o7_eqr1fk-hb(xfnvj3)ay^zvhiz6o_d029p9$0cr6h^'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'student',
'index',
'authentication',
'exams',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'project.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [BASE_DIR / 'templates'],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'project.wsgi.application'
# Database
# https://docs.djangoproject.com/en/3.1/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.postgresql',
'NAME': 'project',
'USER': 'postgres',
'PASSWORD': '12345',
'HOST': '127.0.0.1',
'PORT': '5432',
}
}
# Password validation
# https://docs.djangoproject.com/en/3.1/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/3.1/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/3.1/howto/static-files/
STATIC_URL = '/static/'
STATICFILES_DIRS = [
BASE_DIR / 'static'
]
STATIC_ROOT = BASE_DIR / 'assets'
MEDIA_URL = '/media/'
STATIC_ROOT = BASE_DIR / 'media'
#alert messages are here
from django.contrib.messages import constants as messages
MESSAGE_TAGS = {
messages.DEBUG: 'alert-info',
messages.INFO: 'alert-info',
messages.SUCCESS: 'alert-success',
messages.WARNING: 'alert-warning',
messages.ERROR: 'alert-danger',
} | [
"34328617+ashimmitra@users.noreply.github.com"
] | 34328617+ashimmitra@users.noreply.github.com |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.