blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 3
281
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
57
| license_type
stringclasses 2
values | repo_name
stringlengths 6
116
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 313
values | visit_date
timestamp[us] | revision_date
timestamp[us] | committer_date
timestamp[us] | github_id
int64 18.2k
668M
⌀ | star_events_count
int64 0
102k
| fork_events_count
int64 0
38.2k
| gha_license_id
stringclasses 17
values | gha_event_created_at
timestamp[us] | gha_created_at
timestamp[us] | gha_language
stringclasses 107
values | src_encoding
stringclasses 20
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 4
6.02M
| extension
stringclasses 78
values | content
stringlengths 2
6.02M
| authors
listlengths 1
1
| author
stringlengths 0
175
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
b6cdd4ac57903c57644fd5839156cb699231f3e1
|
82c4b842f806502991d932bf06b3b925b371049a
|
/iterative_sorting/test_iterative.py
|
95bee262e58d74f39c46251aa5c01b2de10b96e1
|
[] |
no_license
|
MadisonViaLux/PythonNotes
|
9bfdea380d9bfdf9853040aa5550ed6c13f9a927
|
20d83f8b419e1975dde9cf901cadcd175ea8f27b
|
refs/heads/master
| 2023-06-09T06:50:04.794492
| 2020-08-06T19:34:15
| 2020-08-06T19:34:15
| 281,484,506
| 0
| 0
| null | 2020-08-06T19:34:17
| 2020-07-21T19:20:29
|
Python
|
UTF-8
|
Python
| false
| false
| 991
|
py
|
import unittest
import random
from iterative_sorting import *
class IterativeSortingTest(unittest.TestCase):
def test_selection_sort(self):
arr1 = [1, 5, 8, 4, 2, 9, 6, 0, 3, 7]
arr2 = []
arr3 = [0, 1, 2, 3, 4, 5]
arr4 = random.sample(range(200), 50)
self.assertEqual(selection_sort(arr1), [0,1,2,3,4,5,6,7,8,9])
self.assertEqual(selection_sort(arr2), [])
self.assertEqual(selection_sort(arr3), [0,1,2,3,4,5])
self.assertEqual(selection_sort(arr4), sorted(arr4))
def test_bubble_sort(self):
arr1 = [1, 5, 8, 4, 2, 9, 6, 0, 3, 7]
arr2 = []
arr3 = [0, 1, 2, 3, 4, 5]
arr4 = random.sample(range(200), 50)
self.assertEqual(bubble_sort(arr1), [0,1,2,3,4,5,6,7,8,9])
self.assertEqual(bubble_sort(arr2), [])
self.assertEqual(bubble_sort(arr3), [0,1,2,3,4,5])
self.assertEqual(bubble_sort(arr4), sorted(arr4))
if __name__ == '__main__':
unittest.main()
|
[
"madison.s.mckown@gmail.com"
] |
madison.s.mckown@gmail.com
|
5c581662b4dd5c362aae88507f12843821c76bc9
|
c1ca214c27832c461775737e86c9c150d64d0c84
|
/mapreduce/hooks.py
|
20a5c757f0762e472112b6e1a1d50c9b93ec3c1c
|
[] |
no_license
|
agualis/AOS2012
|
348a882a59e518c18d6ccc0af84fdf27e9d04880
|
99af591167514de64bfa2212c0659bd1a941787c
|
refs/heads/master
| 2021-03-12T22:14:23.915585
| 2012-06-23T13:51:30
| 2012-06-23T13:51:30
| 3,416,875
| 5
| 4
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,803
|
py
|
#!/usr/bin/env python
#
# Copyright 2010 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""API allowing control over some mapreduce implementation details."""
__all__ = ["Hooks"]
class Hooks(object):
"""Allows subclasses to control some aspects of mapreduce execution.
control.start_map accepts an optional "hooks" argument that can be passed a
subclass of this class.
"""
def enqueue_worker_task(self, task, queue_name):
"""Enqueues a worker task that is used to run the mapper.
Args:
task: A taskqueue.Task that must be queued in order for the mapreduce
mappers to be run.
queue_name: The queue where the task should be run e.g. "default".
Raises:
NotImplementedError: to indicate that the default worker queueing strategy
should be used.
"""
raise NotImplementedError()
def enqueue_kickoff_task(self, task, queue_name):
"""Enqueues a task that is used to start the mapreduce.
Args:
task: A taskqueue.Task that must be queued in order for the mapreduce
to start.
queue_name: The queue where the task should be run e.g. "default".
Raises:
NotImplementedError: to indicate that the default mapreduce start strategy
should be used.
"""
raise NotImplementedError()
def enqueue_done_task(self, task, queue_name):
"""Enqueues a task that is triggered when the mapreduce completes.
Args:
task: A taskqueue.Task that must be queued in order for the client to be
notified when the mapreduce is complete.
queue_name: The queue where the task should be run e.g. "default".
Raises:
NotImplementedError: to indicate that the default mapreduce notification
strategy should be used.
"""
raise NotImplementedError()
def enqueue_controller_task(self, task, queue_name):
"""Enqueues a task that is used to monitor the mapreduce process.
Args:
task: A taskqueue.Task that must be queued in order for updates to the
mapreduce process to be properly tracked.
queue_name: The queue where the task should be run e.g. "default".
Raises:
NotImplementedError: to indicate that the default mapreduce tracking
strategy should be used.
"""
raise NotImplementedError()
|
[
"agualis@gmail.com"
] |
agualis@gmail.com
|
9f2e29e63a60fddec858f933a2b29d20252cd2ff
|
dd766645d0706f8cd2430288f8dbb8c9026f284c
|
/weather/forms.py
|
ff5b7f2543bbefb23bbbbf9ca81b2d09394e387b
|
[] |
no_license
|
krishnadevkar/Weather-App-Using-Django-and-Python
|
b465bbef182076277c748cb935bbaaf57df2c70b
|
5342cc78d567096d5269ecfec3bf08f91ec6ecf7
|
refs/heads/main
| 2022-12-26T16:01:57.742725
| 2020-10-13T13:19:52
| 2020-10-13T13:19:52
| 303,709,053
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 251
|
py
|
from django.forms import ModelForm, TextInput
from .models import City
class CityForm(ModelForm):
class Meta:
model = City
fields = ['name']
widgets = {'name': TextInput(attrs={'class':'input','placeholder':'City Name'})}
|
[
"72792579+krishnadevkar@users.noreply.github.com"
] |
72792579+krishnadevkar@users.noreply.github.com
|
22b5a918e455a0575dea2511baf43c39ae5f6df4
|
409b77fa3d52e396f45a2126178732cbccfa32ec
|
/budget/expenses/views.py
|
eabe7e4ee2059a08d5f3178ed7757f10db4b4384
|
[] |
no_license
|
Venkatessankarthitk/montlybudget
|
2bd652d7e5f9e0383d73d1c0155dcf6f5845aa2b
|
107719c348e382d85a4a94e3f4acab3e5f851659
|
refs/heads/master
| 2020-03-23T09:37:20.341798
| 2018-07-23T06:15:29
| 2018-07-23T06:15:29
| 141,399,307
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,820
|
py
|
import json
import datetime
from django.shortcuts import render
from django.contrib.auth.decorators import login_required
from django.core import serializers
from django.http import HttpResponse
from expenses.models import purchasing_items, expenses_details
from django.core import serializers
from django.views.generic.base import TemplateView
from django.http import JsonResponse
from django.shortcuts import redirect
from django.core.serializers.json import DjangoJSONEncoder
from rest_framework.decorators import api_view
from rest_framework.decorators import list_route, detail_route
class ExpensesPage(TemplateView):
template_name = "expenses.html"
#def index(request):
#return HttpResponse("Hello, world. You're in budget expenses page index.")
# return render(request, 'expenses.html')
def purchase_item(request):
purchase =json.dumps(list(purchasing_items.objects.values()))
return HttpResponse(purchase, content_type='application/json')
@list_route(methods=["GET","POST" ])
def abcd(request):
try:
parchesed_details = dict(item.split("=") for item in (request.META['QUERY_STRING']).split("&"))
parchesed_data = expenses_details( parchased_product= parchesed_details['purchase_items'] ,
parchased_price= parchesed_details['price'], parchased_date= parchesed_details['date'] )
parchesed_data.save()
# return HttpResponse("Sucessfully Added the purchased details")
return redirect('http://127.0.0.1:8000/expenses/')
except Exception:
return HttpResponse("Error in Adding the purchased details")
def expenses(request):
purchase =(expenses_details.objects.all().values())
list_result = [entry for entry in purchase]
response = json.dumps(list_result, cls=DjangoJSONEncoder)
return HttpResponse(response)
class dashboard(TemplateView):
template_name = "dashboard.html"
|
[
"noreply@github.com"
] |
noreply@github.com
|
11a6fcb57a8d8be2f2d2ef039795233e246976d1
|
ca7aa979e7059467e158830b76673f5b77a0f5a3
|
/Python_codes/p03346/s839224624.py
|
607b47a18949a8482e6d9a712cdca3e6c434dd8e
|
[] |
no_license
|
Aasthaengg/IBMdataset
|
7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901
|
f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8
|
refs/heads/main
| 2023-04-22T10:22:44.763102
| 2021-05-13T17:27:22
| 2021-05-13T17:27:22
| 367,112,348
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 745
|
py
|
import bisect
import heapq
import itertools
import math
import operator
import os
import re
import string
import sys
from collections import Counter, deque, defaultdict
from copy import deepcopy
from decimal import Decimal
from fractions import gcd
from functools import lru_cache, reduce
from operator import itemgetter, mul, add, xor
import numpy as np
if os.getenv("LOCAL"):
sys.stdin = open("_in.txt", "r")
sys.setrecursionlimit(2147483647)
INF = float("inf")
IINF = 10 ** 18
MOD = 10 ** 9 + 7
N = int(sys.stdin.readline())
P = [int(sys.stdin.readline()) for _ in range(N)]
# dp[i]: i より前に何個連続してるか
dp = np.zeros(N + 1, dtype=int)
for i in range(N):
p = P[i]
dp[p] = dp[p - 1] + 1
print(N - dp.max())
|
[
"66529651+Aastha2104@users.noreply.github.com"
] |
66529651+Aastha2104@users.noreply.github.com
|
a1ea1cd0c4454fea650614ef561225696796a60d
|
f9d564f1aa83eca45872dab7fbaa26dd48210d08
|
/huaweicloud-sdk-organizations/huaweicloudsdkorganizations/v1/model/tag_resource_req_body.py
|
f43bc9df67f8aa35a1f1ec41372a38258bee7053
|
[
"Apache-2.0"
] |
permissive
|
huaweicloud/huaweicloud-sdk-python-v3
|
cde6d849ce5b1de05ac5ebfd6153f27803837d84
|
f69344c1dadb79067746ddf9bfde4bddc18d5ecf
|
refs/heads/master
| 2023-09-01T19:29:43.013318
| 2023-08-31T08:28:59
| 2023-08-31T08:28:59
| 262,207,814
| 103
| 44
|
NOASSERTION
| 2023-06-22T14:50:48
| 2020-05-08T02:28:43
|
Python
|
UTF-8
|
Python
| false
| false
| 3,188
|
py
|
# coding: utf-8
import six
from huaweicloudsdkcore.utils.http_utils import sanitize_for_serialization
class TagResourceReqBody:
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
sensitive_list = []
openapi_types = {
'tags': 'list[TagDto]'
}
attribute_map = {
'tags': 'tags'
}
def __init__(self, tags=None):
"""TagResourceReqBody
The model defined in huaweicloud sdk
:param tags: 要添加到指定资源的标签列表。
:type tags: list[:class:`huaweicloudsdkorganizations.v1.TagDto`]
"""
self._tags = None
self.discriminator = None
self.tags = tags
@property
def tags(self):
"""Gets the tags of this TagResourceReqBody.
要添加到指定资源的标签列表。
:return: The tags of this TagResourceReqBody.
:rtype: list[:class:`huaweicloudsdkorganizations.v1.TagDto`]
"""
return self._tags
@tags.setter
def tags(self, tags):
"""Sets the tags of this TagResourceReqBody.
要添加到指定资源的标签列表。
:param tags: The tags of this TagResourceReqBody.
:type tags: list[:class:`huaweicloudsdkorganizations.v1.TagDto`]
"""
self._tags = tags
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
if attr in self.sensitive_list:
result[attr] = "****"
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
import simplejson as json
if six.PY2:
import sys
reload(sys)
sys.setdefaultencoding("utf-8")
return json.dumps(sanitize_for_serialization(self), ensure_ascii=False)
def __repr__(self):
"""For `print`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, TagResourceReqBody):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
|
[
"hwcloudsdk@huawei.com"
] |
hwcloudsdk@huawei.com
|
1ba0240b2c151da511245d602901fc5f66aeb582
|
e91a030ed90ef06fb861729a173d6f77ecf5a3e8
|
/prahaApp/places/views.py
|
3eda96b75ce36ca323c8915323e2f5b06a88ee8f
|
[] |
no_license
|
snjope/prahaApp
|
0517657fcc08739dd01a504f7f8c75dde44fbca4
|
aef953c0f8379898cdbb93bd0088a8cc6298cdbc
|
refs/heads/master
| 2021-05-01T20:26:48.547811
| 2017-01-17T17:17:13
| 2017-01-17T17:17:13
| 79,244,113
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 170
|
py
|
from django.shortcuts import render
from django.http import HttpResponse
def index(request):
return HttpResponse("Hei kusipäät. Täällä voi lisätä mestoja.")
|
[
"joonas.peuralinna@aalto.fi"
] |
joonas.peuralinna@aalto.fi
|
373c763944c135c4d62fd1b09bab3072d1e6cd84
|
e79888cd68177e7ec5125270cdc52f888e211e78
|
/hwichan/chapter06/knock52.py
|
d96472faf4e538c6afb9d7c120f1e680ac6703fb
|
[] |
no_license
|
cafenoctua/100knock2019
|
ec259bee27936bdacfe0097d42f23cc7500f0a07
|
88717a78c4290101a021fbe8b4f054f76c9d3fa6
|
refs/heads/master
| 2022-06-22T04:42:03.939373
| 2019-09-03T11:05:19
| 2019-09-03T11:05:19
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,343
|
py
|
import re
import snowballstemmer
def sentence_extraction():
pattern = re.compile(r'''
(
^.*? # 非貪欲
[\.|;|:|\?|!] # . or ; or : or ? or !
)
\s # 空白
(
[A-Z].* # 英大文字
)
''', re.MULTILINE + re.VERBOSE + re.DOTALL)
with open('nlp.txt', 'r') as f:
for line in f:
line = line.strip('\n')
while True:
if line == '':
break
s = pattern.match(line)
if s:
yield s.group(1)
line = s.group(2)
else: # マッチしないということは最後までが1文
yield line
break
def main():
stemmer = snowballstemmer.stemmer('english') # stemmingモジュール読み込み
for i, line in enumerate(sentence_extraction()):
if i == 10:
break
words = line.strip('\n').split(' ')
for word in words:
# 各単語に対して、stemmer.stemWord(word)でステミング処理
print('{}\t{}'.format(word, stemmer.stemWord(word)))
print('\n')
if __name__ == '__main__':
main()
|
[
"2mmhwichan@gmail.com"
] |
2mmhwichan@gmail.com
|
3b7a1f1b87e2d782ff133914ab7fc7467d0a30b3
|
fec00348ab62d9539955f91f707291f8b09ecbd8
|
/chainer_/chainercv2/models/irevnet.py
|
0a86f6a43e3e4bd058b0b08765e3a0d0aaa6b1b9
|
[
"MIT"
] |
permissive
|
raijinspecial/imgclsmob
|
192cae6cd5021f8ff951371d641e5f222cfe3068
|
c5d3ab207a6304f1343e4394f0467bdc7403a72a
|
refs/heads/master
| 2020-04-08T03:27:05.031453
| 2019-03-08T04:28:13
| 2019-03-08T04:28:13
| 158,976,223
| 0
| 0
|
MIT
| 2019-03-08T04:28:14
| 2018-11-24T21:47:08
|
Python
|
UTF-8
|
Python
| false
| false
| 15,483
|
py
|
"""
i-RevNet, implemented in Chainer.
Original paper: 'i-RevNet: Deep Invertible Networks,' https://arxiv.org/abs/1802.07088.
"""
__all__ = ['IRevNet', 'irevnet301']
import os
import chainer.functions as F
import chainer.links as L
from chainer import Chain
from functools import partial
from chainer.serializers import load_npz
from .common import conv3x3, pre_conv3x3_block, DualPathSequential, SimpleSequential
class IRevDualPathSequential(DualPathSequential):
"""
An invertible sequential container for blocks with dual inputs/outputs.
Blocks will be executed in the order they are added.
Parameters:
----------
return_two : bool, default True
Whether to return two output after execution.
first_ordinals : int, default 0
Number of the first blocks with single input/output.
last_ordinals : int, default 0
Number of the final blocks with single input/output.
dual_path_scheme : function
Scheme of dual path response for a block.
dual_path_scheme_ordinal : function
Scheme of dual path response for an ordinal block.
last_noninvertible : int, default 0
Number of the final blocks skipped during inverse.
"""
def __init__(self,
return_two=True,
first_ordinals=0,
last_ordinals=0,
dual_path_scheme=(lambda module, x1, x2: module(x1, x2)),
dual_path_scheme_ordinal=(lambda module, x1, x2: (module(x1), x2)),
last_noninvertible=0):
super(IRevDualPathSequential, self).__init__(
return_two=return_two,
first_ordinals=first_ordinals,
last_ordinals=last_ordinals,
dual_path_scheme=dual_path_scheme,
dual_path_scheme_ordinal=dual_path_scheme_ordinal)
self.last_noninvertible = last_noninvertible
def inverse(self, x1, x2=None):
length = len(self.layer_names)
for i, block_name in enumerate(reversed(self.layer_names)):
block = self[block_name]
if i < self.last_noninvertible:
pass
elif (i < self.last_ordinals) or (i >= length - self.first_ordinals):
x1, x2 = self.dual_path_scheme_ordinal(block.inverse, x1, x2)
else:
x1, x2 = self.dual_path_scheme(block.inverse, x1, x2)
if self.return_two:
return x1, x2
else:
return x1
class IRevDownscale(Chain):
"""
i-RevNet specific downscale (so-called psi-block).
Parameters:
----------
scale : int
Scale (downscale) value.
"""
def __init__(self, scale):
super(IRevDownscale, self).__init__()
self.scale = scale
def __call__(self, x):
batch, x_channels, x_height, x_width = x.shape
y_channels = x_channels * self.scale * self.scale
assert (x_height % self.scale == 0)
y_height = x_height // self.scale
y = F.transpose(x, axes=(0, 2, 3, 1))
d2_split_seq = F.split_axis(y, indices_or_sections=(y.shape[2] // self.scale), axis=2)
d2_split_seq = [t.reshape(batch, y_height, y_channels) for t in d2_split_seq]
y = F.stack(d2_split_seq, axis=1)
y = F.transpose(y, axes=(0, 3, 2, 1))
return y
def inverse(self, y):
scale_sqr = self.scale * self.scale
batch, y_channels, y_height, y_width = y.shape
assert (y_channels % scale_sqr == 0)
x_channels = y_channels // scale_sqr
x_height = y_height * self.scale
x_width = y_width * self.scale
x = F.transpose(y, axes=(0, 2, 3, 1))
x = x.reshape(batch, y_height, y_width, scale_sqr, x_channels)
d3_split_seq = F.split_axis(x, indices_or_sections=(x.shape[3] // self.scale), axis=3)
d3_split_seq = [t.reshape(batch, y_height, x_width, x_channels) for t in d3_split_seq]
x = F.stack(d3_split_seq, axis=0)
x = F.transpose(F.swapaxes(x, axis1=0, axis2=1), axes=(0, 2, 1, 3, 4)).reshape(
batch, x_height, x_width, x_channels)
x = F.transpose(x, axes=(0, 3, 1, 2))
return x
class IRevInjectivePad(Chain):
"""
i-RevNet channel zero padding block.
Parameters:
----------
padding : int
Size of the padding.
"""
def __init__(self, padding):
super(IRevInjectivePad, self).__init__()
self.padding = padding
def __call__(self, x):
return F.pad(x, pad_width=((0, 0), (0, self.padding), (0, 0), (0, 0)), mode="constant", constant_values=0)
def inverse(self, x):
return x[:, :x.size(1) - self.padding, :, :]
class IRevSplitBlock(Chain):
"""
iRevNet split block.
"""
def __init__(self):
super(IRevSplitBlock, self).__init__()
def __call__(self, x, _):
x1, x2 = F.split_axis(x, indices_or_sections=2, axis=1)
return x1, x2
def inverse(self, x1, x2):
x = F.concat((x1, x2), axis=1)
return x, None
class IRevMergeBlock(Chain):
"""
iRevNet merge block.
"""
def __init__(self):
super(IRevMergeBlock, self).__init__()
def __call__(self, x1, x2):
x = F.concat((x1, x2), axis=1)
return x, x
def inverse(self, x, _):
x1, x2 = F.split_axis(x, indices_or_sections=2, axis=1)
return x1, x2
class IRevBottleneck(Chain):
"""
iRevNet bottleneck block.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
stride : int or tuple/list of 2 int
Stride of the branch convolution layers.
preactivate : bool
Whether use pre-activation for the first convolution block.
"""
def __init__(self,
in_channels,
out_channels,
stride,
preactivate):
super(IRevBottleneck, self).__init__()
mid_channels = out_channels // 4
with self.init_scope():
if preactivate:
self.conv1 = pre_conv3x3_block(
in_channels=in_channels,
out_channels=mid_channels,
stride=stride)
else:
self.conv1 = conv3x3(
in_channels=in_channels,
out_channels=mid_channels,
stride=stride)
self.conv2 = pre_conv3x3_block(
in_channels=mid_channels,
out_channels=mid_channels)
self.conv3 = pre_conv3x3_block(
in_channels=mid_channels,
out_channels=out_channels)
def __call__(self, x):
x = self.conv1(x)
x = self.conv2(x)
x = self.conv3(x)
return x
class IRevUnit(Chain):
"""
iRevNet unit.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
stride : int or tuple/list of 2 int
Stride of the branch convolution layers.
preactivate : bool
Whether use pre-activation for the first convolution block.
"""
def __init__(self,
in_channels,
out_channels,
stride,
preactivate):
super(IRevUnit, self).__init__()
if not preactivate:
in_channels = in_channels // 2
padding = 2 * (out_channels - in_channels)
self.do_padding = (padding != 0) and (stride == 1)
self.do_downscale = (stride != 1)
with self.init_scope():
if self.do_padding:
self.pad = IRevInjectivePad(padding)
self.bottleneck = IRevBottleneck(
in_channels=in_channels,
out_channels=out_channels,
stride=stride,
preactivate=preactivate)
if self.do_downscale:
self.psi = IRevDownscale(stride)
def __call__(self, x1, x2):
if self.do_padding:
x = F.concat((x1, x2), axis=1)
x = self.pad(x)
x1, x2 = F.split_axis(x, indices_or_sections=2, axis=1)
fx2 = self.bottleneck(x2)
if self.do_downscale:
x1 = self.psi(x1)
x2 = self.psi(x2)
y1 = fx2 + x1
return x2, y1
def inverse(self, x2, y1):
if self.do_downscale:
x2 = self.psi.inverse(x2)
fx2 = - self.bottleneck(x2)
x1 = fx2 + y1
if self.do_downscale:
x1 = self.psi.inverse(x1)
if self.do_padding:
x = F.concat((x1, x2), axis=1)
x = self.pad.inverse(x)
x1, x2 = F.split_axis(x, indices_or_sections=2, axis=1)
return x1, x2
class IRevPostActivation(Chain):
"""
iRevNet specific post-activation block.
Parameters:
----------
in_channels : int
Number of input channels.
"""
def __init__(self,
in_channels):
super(IRevPostActivation, self).__init__()
with self.init_scope():
self.bn = L.BatchNormalization(
size=in_channels,
eps=1e-5)
self.activ = F.relu
def __call__(self, x):
x = self.bn(x)
x = self.activ(x)
return x
class IRevNet(Chain):
"""
i-RevNet model from 'i-RevNet: Deep Invertible Networks,' https://arxiv.org/abs/1802.07088.
Parameters:
----------
channels : list of list of int
Number of output channels for each unit.
init_block_channels : int
Number of output channels for the initial unit.
init_block_channels : int
Number of output channels for the initial unit.
final_block_channels : int
Number of output channels for the final unit.
in_channels : int, default 3
Number of input channels.
in_size : tuple of two ints, default (224, 224)
Spatial size of the expected input image.
classes : int, default 1000
Number of classification classes.
"""
def __init__(self,
channels,
init_block_channels,
final_block_channels,
in_channels=3,
in_size=(224, 224),
classes=1000):
super(IRevNet, self).__init__()
assert (in_channels > 0)
self.in_size = in_size
self.classes = classes
with self.init_scope():
self.features = IRevDualPathSequential(
first_ordinals=1,
last_ordinals=2,
last_noninvertible=2)
with self.features.init_scope():
setattr(self.features, "init_block", IRevDownscale(scale=2))
in_channels = init_block_channels
setattr(self.features, "init_split", IRevSplitBlock())
for i, channels_per_stage in enumerate(channels):
stage = IRevDualPathSequential()
with stage.init_scope():
for j, out_channels in enumerate(channels_per_stage):
stride = 2 if (j == 0) else 1
preactivate = not ((i == 0) and (j == 0))
setattr(stage, "unit{}".format(j + 1), IRevUnit(
in_channels=in_channels,
out_channels=out_channels,
stride=stride,
preactivate=preactivate))
in_channels = out_channels
setattr(self.features, "stage{}".format(i + 1), stage)
in_channels = final_block_channels
setattr(self.features, "final_merge", IRevMergeBlock())
setattr(self.features, "final_postactiv", IRevPostActivation(in_channels=in_channels))
setattr(self.features, "final_pool", partial(
F.average_pooling_2d,
ksize=7,
stride=1))
self.output = SimpleSequential()
with self.output.init_scope():
setattr(self.output, "flatten", partial(
F.reshape,
shape=(-1, in_channels)))
setattr(self.output, "fc", L.Linear(
in_size=in_channels,
out_size=classes))
def __call__(self, x, return_out_bij=False):
x, out_bij = self.features(x)
x = self.output(x)
if return_out_bij:
return x, out_bij
else:
return x
def inverse(self, out_bij):
x, _ = self.features.inverse(out_bij)
return x
def get_irevnet(blocks,
model_name=None,
pretrained=False,
root=os.path.join('~', '.chainer', 'models'),
**kwargs):
"""
Create i-RevNet model with specific parameters.
Parameters:
----------
blocks : int
Number of blocks.
model_name : str or None, default None
Model name for loading pretrained model.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
if blocks == 301:
layers = [6, 16, 72, 6]
else:
raise ValueError("Unsupported i-RevNet with number of blocks: {}".format(blocks))
assert (sum(layers) * 3 + 1 == blocks)
channels_per_layers = [24, 96, 384, 1536]
init_block_channels = 12
final_block_channels = 3072
channels = [[ci] * li for (ci, li) in zip(channels_per_layers, layers)]
net = IRevNet(
channels=channels,
init_block_channels=init_block_channels,
final_block_channels=final_block_channels,
**kwargs)
if pretrained:
if (model_name is None) or (not model_name):
raise ValueError("Parameter `model_name` should be properly initialized for loading pretrained model.")
from .model_store import get_model_file
load_npz(
file=get_model_file(
model_name=model_name,
local_model_store_dir_path=root),
obj=net)
return net
def irevnet301(**kwargs):
"""
i-RevNet-301 model from 'i-RevNet: Deep Invertible Networks,' https://arxiv.org/abs/1802.07088.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.chainer/models'
Location for keeping the model parameters.
"""
return get_irevnet(blocks=301, model_name="irevnet301", **kwargs)
def _test():
import numpy as np
import chainer
chainer.global_config.train = False
pretrained = False
models = [
irevnet301,
]
for model in models:
net = model(pretrained=pretrained)
weight_count = net.count_params()
print("m={}, {}".format(model.__name__, weight_count))
assert (model != irevnet301 or weight_count == 125120356)
x = np.random.rand(2, 3, 224, 224).astype(np.float32)
y = net(x)
assert (y.shape == (2, 1000))
y, out_bij = net(x, return_out_bij=True)
x_ = net.inverse(out_bij)
assert (x_.shape == (2, 3, 224, 224))
assert (np.max(np.abs(x - x_.array)) < 1e-3)
if __name__ == "__main__":
_test()
|
[
"osemery@gmail.com"
] |
osemery@gmail.com
|
cbdb2c29d50802280c7f7b520ddb6e0fc8def24b
|
04b07645b8f9bf9e7cddf831f97c91fa241f4fca
|
/exploits/OpenRedirect03.py
|
a187db31912d6dac1aa834798ad488b412058341
|
[] |
no_license
|
kowshik-sundararajan/Assignment-3
|
2248db4c4e93d78c07e9049da02aff1096d19530
|
b1c8014315d0e89abf535be024046cbddd186ff6
|
refs/heads/master
| 2020-03-15T06:21:59.476645
| 2018-04-24T14:17:21
| 2018-04-24T14:17:21
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 471
|
py
|
from selenium import webdriver
from selenium.webdriver.common.keys import Keys
from selenium.webdriver.common.desired_capabilities import DesiredCapabilities
import os
import time
chromedriver = "/usr/lib/chromium-browser/chromedriver"
os.environ["webdriver.chrome.driver"] = chromedriver
browser = webdriver.Chrome(chromedriver)
browser.get("http://ec2-13-250-106-60.ap-southeast-1.compute.amazonaws.com:8082/escape?target=https%3A%2F%2Fstatus.github.com/messages")
|
[
"anusha.anandan@gmail.com"
] |
anusha.anandan@gmail.com
|
fd4c566533abd145d2dda3e620a145553cd6a382
|
721ba9724a60997b4b4761b3a3c8931ae3a949a9
|
/ipfs_video_index/ipfs_indexer/__main__.py
|
a165cc293f084a1974b318c9e3a74a4b2af9a33e
|
[] |
no_license
|
bneijt/ipfs-video-index
|
f058416e45ff2f8e9877e81fd88200281bf0bafb
|
555c5d25fe95456dd4e5aa9ca0b04aa8736633e2
|
refs/heads/main
| 2023-07-26T02:24:40.108876
| 2021-08-21T20:22:54
| 2021-08-21T20:22:54
| 395,742,171
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 876
|
py
|
import argparse
import time
from dqp.queue import Project
from loguru import logger
from ipfs_video_index.ipfs_indexer.procs import pipeline
def main():
parser = argparse.ArgumentParser(description="Process some integers.")
parser.add_argument(
"--loop", action="store_true", help="Loop processing the project continuously"
)
parser.add_argument(
"project_path",
metavar="PROJECT_PATH",
type=str,
help="Location of the project storage folder",
)
args = parser.parse_args()
while True:
logger.info(f"Executing pipeline on {args.project_path}")
with Project(args.project_path) as project:
with logger.catch(reraise=False):
pipeline(project)
if args.loop:
time.sleep(60)
else:
break
if __name__ == "__main__":
main()
|
[
"bram@neijt.nl"
] |
bram@neijt.nl
|
901fa07849d22ed8b30cf47e067a33598d238cf6
|
916480ae24345193efa95df013f637e0a115653b
|
/web/transiq/api/management/commands/save_blackbuck_data.py
|
ef02a5eccb3223c25f6cb6c0b3b3b085eb722b2e
|
[
"Apache-2.0"
] |
permissive
|
manibhushan05/tms
|
50e289c670e1615a067c61a051c498cdc54958df
|
763fafb271ce07d13ac8ce575f2fee653cf39343
|
refs/heads/master
| 2022-12-11T07:59:30.297259
| 2021-09-08T03:24:59
| 2021-09-08T03:24:59
| 210,017,184
| 0
| 0
|
Apache-2.0
| 2022-12-08T02:35:01
| 2019-09-21T16:23:57
|
Python
|
UTF-8
|
Python
| false
| false
| 303
|
py
|
from django.core.management.base import BaseCommand
from api.blackbuck import fetch_blackbuck_data
class Command(BaseCommand):
args = 'Arguments not needed'
help = 'Django admin command to save blackbuck data'
def handle(self, *args, **options):
fetch_blackbuck_data(clean=True)
|
[
"mani@myhost.local"
] |
mani@myhost.local
|
f2852368a7d8c04e5cdcb07120a99934cf4f1b4c
|
430fcb1b9103f4d26986a00f7ce60d507bcd13c9
|
/sdsparser/errors.py
|
f553bae797ce9435deb213663d6b064b1f2b0b83
|
[
"MIT"
] |
permissive
|
astepe/sds_parser
|
49f7c91a79a1970017616bcd7046343881464c97
|
129935f7876f8ddeec7771689d4bbc50059b13ca
|
refs/heads/master
| 2023-03-31T22:45:29.375447
| 2023-03-16T18:17:58
| 2023-03-16T18:17:58
| 160,744,163
| 13
| 5
|
MIT
| 2022-11-22T11:36:25
| 2018-12-06T23:13:24
|
Python
|
UTF-8
|
Python
| false
| false
| 561
|
py
|
class FileMatchNotFound(Exception):
def __init__(self, file_name, target_directory):
Exception.__init__(self, f'No matching .txt file found for {file_name} in {target_directory}')
class TextDirectoryDoesNotExist(Exception):
def __init__(self, target_directory):
Exception.__init__(self, f'No matching txt directory found for {target_directory}')
class ManufacturerNotSupported(Exception):
def __init__(self, manufacturer_name):
Exception.__init__(self, f'{manufacturer_name} is not a currently supported manufacturer')
|
[
"arisstepe@gmail.com"
] |
arisstepe@gmail.com
|
e966083dfa346bd6ad2fe9466ac0dd52e26279d8
|
84f67171337704d4fc10542a2705af892ebce1b2
|
/blog/views.py
|
18cf7c69ef3b66d183b6f50cc65d644096285c35
|
[] |
no_license
|
ZahedAli97/my-first-blog
|
64acef6aebf8de8e7e239e12143dcb0a5f699ea5
|
e5c05bac1125c79ad4b6d56a53be0bfeb1b8cc9d
|
refs/heads/master
| 2020-04-25T01:41:29.329528
| 2019-03-05T16:26:26
| 2019-03-05T16:26:26
| 172,415,920
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,518
|
py
|
from django.shortcuts import render, get_object_or_404
from django.utils import timezone
from .models import Post
from .forms import PostForm
from django.shortcuts import redirect
# Create your views here.
def post_list(request):
posts = Post.objects.filter(published_date__lte=timezone.now()
).order_by('published_date')
return render(request, 'blog/post_list.html', {'posts': posts})
def post_detail(request, pk):
post = get_object_or_404(Post, pk=pk)
return render(request, 'blog/post_detail.html', {'post': post})
def post_new(request):
if request.method == "POST":
form = PostForm(request.POST)
if form.is_valid():
post = form.save(commit=False)
post.author = request.user
post.published_date = timezone.now()
post.save()
return redirect('post_detail', pk=post.pk)
else:
form = PostForm()
return render(request, 'blog/post_edit.html', {'form': form})
def post_edit(request, pk):
post = get_object_or_404(Post, pk=pk)
if request.method == "POST":
form = PostForm(request.POST, instance=post)
if form.is_valid():
post = form.save(commit=False)
post.author = request.user
post.published_date = timezone.now()
post.save()
return redirect('post_detail', pk=post.pk)
else:
form = PostForm(instance=post)
return render(request, 'blog/post_edit.html', {'form': form})
|
[
"zahedmiralirh@gmail.com"
] |
zahedmiralirh@gmail.com
|
f453a36bcee504e1fc87eb4a16f5709e38556740
|
415a8a4315e6331b2a157de8a1429fe0562729f8
|
/python/TryCatch.py
|
75b958f68079d3ef596723ff8107b476c5de2643
|
[] |
no_license
|
alfaceor/programming-examples
|
784690dd1104e4adbdf958e4163b3b462f635881
|
abea970a54cfab0eacc5280ae62383495e9e6eeb
|
refs/heads/master
| 2022-05-04T23:14:30.503114
| 2022-04-29T10:11:45
| 2022-04-29T10:11:45
| 36,015,541
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 290
|
py
|
#!/usr/bin/python
import numpy as np
for i in range(4):
try:
data = np.loadtxt("NoExiste.dat")
except IOError as e:
print "Oops!"
pass
for i in range(5):
try:
data = np.loadtxt("NoExiste.dat")
except IOError as e:
print "NO PASS!"
break
print "Fuck U!!!"
|
[
"alfaceor"
] |
alfaceor
|
ca8bebbb101f6f0a7ea337d1d0365016720fb6b6
|
cf09d6430e37b5460d7208d6cae6d3af0fa15925
|
/jsonbot/jsb/lib/config.py
|
87dc5bc6d9858b7446409e090713d466ea5d113e
|
[
"MIT"
] |
permissive
|
Lujeni/old-projects
|
2bbf0ff89852a3e4a9677475a615d2ee4b07d635
|
657304c8b017a98935de9728fc695abe8be7cc4f
|
refs/heads/master
| 2021-03-12T23:08:34.054777
| 2014-10-16T23:10:15
| 2014-10-16T23:10:15
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 21,266
|
py
|
# jsb/lib/config.py
#
#
""" config module. config is stored as item = JSON pairs. """
## jsb imports
from jsb.utils.trace import whichmodule, calledfrom
from jsb.utils.lazydict import LazyDict
from jsb.utils.exception import handle_exception
from jsb.utils.name import stripname
from datadir import getdatadir
from errors import CantSaveConfig, NoSuchFile
from jsb.utils.locking import lockdec
## simplejson imports
from jsb.imports import getjson
json = getjson()
## basic imports
import sys
import os
import types
import thread
import logging
import uuid
import thread
import getpass
import copy
import time
## locks
savelock = thread.allocate_lock()
savelocked = lockdec(savelock)
## defines
cpy = copy.deepcopy
## classes
class Config(LazyDict):
"""
config class is a dict containing json strings. is writable to file
and human editable.
"""
def __init__(self, filename, verbose=False, input={}, ddir=None, nolog=False, *args, **kw):
assert filename
LazyDict.__init__(self, input, *args, **kw)
self.origname = filename
self.origdir = ddir or getdatadir()
self.setcfile(ddir, filename)
self.jsondb = None
if not self._comments: self._comments = {}
try:
import waveapi
self.isdb = True
self.isgae = True
except ImportError:
self.isgae = False
self.isdb = False
dodb = False
try:
logging.info("fromfile - %s from %s" % (self.origname, whichmodule(2)))
self.fromfile(self.cfile)
except IOError, ex: handle_exception() ; dodb = True
if dodb or (self.isgae and not "mainconfig" in filename):
try:
from persist import Persist
self.jsondb = Persist(self.cfile)
if self.jsondb: self.merge(self.jsondb.data)
logging.warn("fromdb - %s" % self.cfile)
except ImportError:
logging.warn("can't read config from %s - %s" % (self.cfile, str(ex)))
self.init()
if self.owner: logging.info("owner is %s" % self.owner)
if not self.has_key("uuid"): self.setuuid()
if not self.has_key("cfile"): self.cfile = self.setcfile(self.origdir, self.origname)
assert self.cfile
def setcfile(self, ddir, filename):
self.filename = filename or 'mainconfig'
self.datadir = ddir or getdatadir()
self.dir = self.datadir + os.sep + 'config'
self.cfile = self.dir + os.sep + filename
def setuuid(self, save=True):
logging.debug("setting uuid")
self.uuid = str(uuid.uuid4())
if save: self.save()
def __deepcopy__(self, a):
""" accessor function. """
cfg = Config(self.filename, input=self, nolog=True)
return cfg
def __getitem__(self, item):
""" accessor function. """
if not self.has_key(item): return None
else: return LazyDict.__getitem__(self, item)
def merge(self, cfg):
""" merge in another cfg. """
self.update(cfg)
def set(self, item, value):
""" set item to value. """
LazyDict.__setitem__(self, item, value)
def fromdb(self):
""" read config from database. """
from jsb.lib.persist import Persist
tmp = Persist(self.cfile)
logging.debug("fromdb - %s - %s" % (self.cfile, tmp.data.tojson()))
self.update(tmp.data)
def todb(self):
""" save config to database. """
cp = dict(self)
del cp['jsondb']
if not self.jsondb:
from jsb.lib.persist import Persist
self.jsondb = Persist(self.cfile)
self.jsondb.data = cp
self.jsondb.save()
def fromfile(self, filename=None):
""" read config object from filename. """
curline = ""
fname = filename or self.cfile
if not fname: raise Exception(" %s - %s" % (self.cfile, self.dump()))
if not os.path.exists(fname): logging.warn("config file %s doesn't exist yet" % fname) ; return False
comment = ""
for line in open(fname, 'r'):
curline = line
curline = curline.strip()
if curline == "": continue
if curline.startswith('#'): comment = curline; continue
if True:
try:
key, value = curline.split('=', 1)
kkey = key.strip()
self[kkey] = json.loads(unicode(value.strip()))
if comment: self._comments[kkey] = comment
comment = ""
except ValueError: logging.error("skipping line - unable to parse: %s" % line)
#self.cfile = fname
return
def tofile(self, filename=None, stdout=False):
""" save config object to file. """
if not filename: filename = self.cfile
if not filename: raise Exception("no cfile found - %s" % whichmodule(3))
if self.isgae: logging.warn("can't save config file %s on GAE" % filename) ; return
logging.warn("saving %s" % filename)
if filename.startswith(os.sep): d = [os.sep,]
else: d = []
for p in filename.split(os.sep)[:-1]:
if not p: continue
d.append(p)
ddir = os.sep.join(d)
if not os.path.isdir(ddir):
logging.debug("persist - creating %s dir" % ddir)
try: os.mkdir(ddir)
except OSError, ex:
logging.error("persist - not saving - failed to make %s - %s" % (ddir, str(ex)))
return
written = []
curitem = None
later = []
try:
if stdout: configtmp = sys.stdout
else: configtmp = open(filename + '.tmp', 'w')
configtmp.write('# ===========================================================\n#\n')
configtmp.write("# JSONBOT CONFIGURATION FILE - %s\n" % filename)
configtmp.write("#\n")
configtmp.write('# last changed on %s\n#\n' % time.ctime(time.time()))
configtmp.write("# This file contains configration data for the JSONBOT.\n")
configtmp.write('# Variables are defined by "name = json value" pairs.\n')
configtmp.write('# Make sure to use " in strings.\n#\n')
configtmp.write('# The bot can edit this file!.\n#\n')
configtmp.write('# ===========================================================\n\n')
teller = 0
keywords = self.keys()
keywords.sort()
for keyword in keywords:
value = self[keyword]
if keyword in written: continue
if keyword in ['isgae', 'origdir', 'origname', 'issaved', 'blacklist', 'whitelist', 'followlist', 'uuid', 'whitelist', 'datadir', 'name', 'createdfrom', 'cfile', 'filename', 'dir', 'isdb']: later.append(keyword) ; continue
if keyword == 'jsondb': continue
if keyword == 'optionslist': continue
if keyword == 'gatekeeper': continue
if keyword == "_comments": continue
if self._comments and self._comments.has_key(keyword):
configtmp.write(self._comments[keyword] + u"\n")
curitem = keyword
try: configtmp.write('%s = %s\n' % (keyword, json.dumps(value)))
except TypeError: logging.error("%s - can't serialize %s" % (filename, keyword)) ; continue
teller += 1
#configtmp.write("\n")
configtmp.write('\n\n# ============================================================\n#\n')
configtmp.write("# bot generated stuff.\n#\n")
configtmp.write('# ============================================================\n\n')
for keyword in later:
if self._comments and self._comments.has_key(keyword):
configtmp.write(self._comments[keyword] + u"\n")
curitem = keyword
value = self[keyword]
try: configtmp.write(keyword + " = " + json.dumps(value) + "\n")
except TypeError: logging.error("%s - can't serialize %s" % (filename, keyword)) ; continue
teller += 1
#configtmp.write("\n")
if not "mainconfig" in filename and self._comments:
try:
configtmp.write('\n\n# ============================================================\n#\n')
configtmp.write("# possible other config variables.\n#\n")
configtmp.write('# ============================================================\n\n')
items = self._comments.keys()
keys = self.keys()
do = []
for var in items:
if var not in keys: do.append(var)
do.sort()
for var in do:
configtmp.write(u"# %s -=- %s\n" % (var, self._comments[var]))
configtmp.write("\n\n")
except Exception, ex: handle_exception()
else: configtmp.write("\n\n# jsonbot can run multiple bots at once. see %s/config/fleet for their configurations.\n\n" % self.origdir)
if not stdout:
configtmp.close()
os.rename(filename + '.tmp', filename)
return teller
except Exception, ex:
handle_exception()
logging.error("ERROR WRITING %s CONFIG FILE: %s .. %s" % (self.cfile, str(ex), curitem))
@savelocked
def save(self):
""" save the config. """
logging.info("save called from %s" % calledfrom(sys._getframe(2)))
self.issaved = True
if self.isdb: self.todb()
else: self.tofile(self.cfile)
def load_config(self, verbose=False):
""" load the config file. """
if self.isdb: self.fromdb()
else: self.fromfile(self.filename)
self.init()
if verbose: logging.debug('%s' % self.dump())
def init(self):
""" initialize the config object. """
if not self._comments: self._comments = {}
if self.filename == 'mainconfig':
self._comments["whitelist"] = "# - whitelist used to allow ips .. bot maintains this"
self.setdefault("whitelist", [])
self._comments["blacklist"] = "# - blacklist used to deny ips .. bot maintains this"
self.setdefault("blacklist", [])
self.setdefault('owner', [])
self._comments["loglist"] = "# - loglist .. maintained by the bot."
self.setdefault('loglist', [])
self._comments["loglevel"] = "# - loglevel of all bots"
self.setdefault('loglevel', "warn")
self._comments["loadlist"] = "# - loadlist .. not used yet."
self.setdefault('loadlist', [])
self._comments["quitmsg"] = "# - message to send on quit"
self.setdefault('quitmsg', "http://jsonbot.googlecode.com")
self._comments["dotchars"] = "# - characters to used as seperator."
self.setdefault('dotchars', ", ")
self._comments["floodallow"] = "# - whether the bot is allowed to flood."
self.setdefault('floodallow', 1)
self._comments["auto_register"] = "# - enable automatic registration of new users."
self.setdefault('auto_register', 0)
self._comments["guestasuser"] = "# - enable this to give new users the USER permission besides GUEST."
self.setdefault('guestasuser', 0)
self._comments["globalcc"] = "# - global control character"
self.setdefault('globalcc', "")
self._comments["app_id"] = "# - application id used by appengine."
self.setdefault('app_id', "jsonbot")
self._comments["appname"] = "# - application name as used by the bot."
self.setdefault('appname', "JSONBOT")
self._comments["domain"] = "# - domain .. used for WAVE."
self.setdefault('domain', "")
self._comments["color"] = "# - color used in the webconsole."
self.setdefault('color', "")
self._comments["colors"] = "# - enable colors in logging."
self.setdefault('colors', "")
self._comments["memcached"] = "# - enable memcached."
self.setdefault('memcached', 0)
self._comments["allowrc"] = "# - allow execution of rc files."
self.setdefault('allowrc', 0)
self._comments["allowremoterc"] = "# - allow execution of remote rc files."
self.setdefault('allowremoterc', 0)
self._comments['dbenable'] = "# - enable database support"
self.setdefault('dbenable', 0)
self._comments['dbtype'] = "# - type of database .. sqlite or mysql at this time."
self.setdefault('dbtype', 'sqlite')
self._comments['dbname'] = "# - database name"
self.setdefault('dbname', "main.db")
self._comments['dbhost'] = "# - database hostname"
self.setdefault('dbhost', "localhost")
self._comments['dbuser'] = "# - database user"
self.setdefault('dbuser', "bart")
self._comments['dbpasswd'] = "# - database password"
self.setdefault('dbpasswd', "mekker2")
self._comments['ticksleep'] = "# - nr of seconds to sleep before creating a TICK event."
self.setdefault('ticksleep', 1)
self._comments['bindhost'] = "# - host to bind to"
self.setdefault("bindhost", "")
self._comments['defaultcc'] = "# - host to bind to"
self.setdefault("defaultcc", ".")
self['createdfrom'] = whichmodule()
if 'xmpp' in self.cfile: self.setdefault('fulljids', 1)
if 'fleet' in self.cfile:
self.setdefault('disable', 1)
self.setdefault("owner", [])
self.setdefault("user", "")
self.setdefault("host", "")
self.setdefault("server", "")
self.setdefault("ssl", 0)
self.setdefault("ipv6", 0)
self.setdefault("channels", [])
self.setdefault("port", "")
self.setdefault("password", "")
self._comments['datadir'] = "# - directory to store bot data in."
self._comments["owner"] = "# - owner of the bot."
self._comments["uuid"] = "# - bot generated uuid for this config file."
self._comments["user"] = "# - user used to login on xmpp networks."
self._comments["host"] = "# - host part of the user, derived from user var."
self._comments["server"] = "# - server to connect to (only when different from users host)."
self._comments["password"] = "# - password to use in authing the bot."
self._comments["port"] = "# - port to connect to (IRC)."
self._comments["ssl"] = "# - whether to enable ssl (set to 1 to enable)."
self._comments["ipv6"] = "# - whether to enable ssl (set to 1 to enable)."
self._comments["name"] = "# - the name of the bot."
self._comments["disable"] = "# - set this to 0 to enable the bot."
self._comments["followlist"] = "# - who to follow on the bot .. bot maintains this list."
self._comments["networkname"] = "# - networkname .. not used right now."
self._comments["type"] = "# - the bot's type."
self._comments["nick"] = "# - the bot's nick."
self._comments["channels"] = "# - channels to join."
self._comments["cfile"] = "# - filename of this config file. edit this when you move this file."
self._comments["createdfrom"] = "# - function that created this config file. bot generated"
self._comments["dir"] = "# - directory in which this config file lives."
self._comments["isdb"] = "# - whether this config file lives in the database and not on file."
self._comments["filename"] = "# - filename of this config file."
self._comments["username"] = "# - username of the bot."
self._comments["fulljids"] = "# - use fulljids of bot users (used in non anonymous conferences."
self._comments["servermodes"] = "# - string of modes to send to the server after connect."
self._comments["realname"] = "# - name used in the ident of the bot."
self._comments["onconnect"] = "# - string to send to server after connect."
self._comments["onconnectmode"] = "# - MODE string to send to server after connect."
self._comments["realname"] = "# - mode string to send to the server after connect."
self._comments["issaved"] = "# - whether this config file has been saved. "
self._comments["origdir"] = "# - original datadir for this configfile. "
self._comments["origname"] = "# - displayable name of the config file name. "
return self
def reload(self):
""" reload the config file. """
self.load_config()
return self
def ownercheck(userhost):
""" check whether userhost is a owner. """
if not userhost: return False
if userhost in cfg['owner']: return True
return False
mainconfig = None
def getmainconfig(ddir=None):
global mainconfig
if not mainconfig: mainconfig = Config("mainconfig", ddir=ddir)
if not mainconfig.has_key("issaved"): mainconfig.save()
return mainconfig
irctemplate = """# =====================================================
#
# JSONBOT CONFIGURATION FILE -
#
# last changed on
#
# This file contains configration data for the JSONBOT.
# Variables are defined by "name = json value" pairs.
# Make sure to use " in strings.
# The bot can edit this file!
#
# =====================================================
# - to enable put this to 0
disable = 1
# - the bot's nick.
nick = "jsb"
# - owner of the bot.
owner = []
# - port to connect to (IRC).
port = 6667
# - server to connect to (on jabber only when different that host.
server = "localhost"
# - the bot's type.
type = "irc"
# - username of the bot.
username = "jsonbot"
# - ssl enabled or not
ssl = 0
# - ipv6 enabled or not
ipv6 = 0
# - name use in ident of the bot
realname = "jsonbot"
# - string of modes send to the server on connect
servermodes = ""
# =====================================================
#
# bot generated stuff.
#
# =====================================================
"""
xmpptemplate = """# =====================================================
#
# JSONBOT CONFIGURATION FILE -
#
# last changed on
#
# This file contains configration data for the JSONBOT.
# Variables are defined by "name = json value" pairs.
# Make sure to use " in strings.
# The bot can edit this file!
#
# =====================================================
# - channels to join
channels = []
# - to enable put this to 0
disable = 1
# - the bot's nick.
nick = "jsb"
# - owner of the bot.
owner = []
# - use fulljids of bot users (used in non anonymous conferences.
fulljids = 1
# password used to auth on the server.
password = ""
# - server to connect to (on jabber only when different that users host.
server = ""
# - the bot's type.
type = "sxmpp"
# - user used to login on xmpp networks.
user = ""
# =====================================================
#
# bot generated stuff.
#
# =====================================================
"""
sleektemplate = """# =====================================================
#
# JSONBOT CONFIGURATION FILE -
#
# last changed on
#
# This file contains configration data for the JSONBOT.
# Variables are defined by "name = json value" pairs.
# Make sure to use " in strings.
# The bot can edit this file!
#
# =====================================================
# - channels to join
channels = []
# - to enable put this to 0
disable = 1
# - the bot's nick.
nick = "jsb"
# - owner of the bot.
owner = []
# - use fulljids of bot users (used in non anonymous conferences.
fulljids = 1
# password used to auth on the server.
password = ""
# - server to connect to (on jabber only when different that users host.
server = ""
# - the bot's type.
type = "sleek"
# - user used to login on xmpp networks.
user = ""
# =====================================================
#
# bot generated stuff.
#
# =====================================================
"""
def makedefaultconfig(type, ddir=None):
filename = 'config'
datadir = ddir or getdatadir()
dir = datadir + os.sep + 'config'
ttype = "default-%s" % type
cfile = dir + os.sep + "fleet" + os.sep + ttype + os.sep + filename
logging.warn("creating default config for type %s in %s" % (type, cfile))
splitted = cfile.split(os.sep)
mdir = ""
for i in splitted[:-1]:
mdir += "%s%s" % (i, os.sep)
if not os.path.isdir(mdir): os.mkdir(mdir)
logging.debug("filename is %s" % cfile)
f = open(cfile, "w")
if type == "irc": f.write(irctemplate) ; f.close()
elif type == "sxmpp": f.write(xmpptemplate) ; f.close()
elif type == "sleek": f.write(sleektemplate) ; f.close()
else: raise Exception("no such bot type: %s" % type)
|
[
"julien@thebault.co"
] |
julien@thebault.co
|
e7d358ae2ca583464f854ea25d0791bc92dfc215
|
4181ae5cb29ea85457070aff76c6b7fcd0708b3c
|
/chapter_08/series_exponential_log.py
|
08f3b154f691b5e29cb7d134b860fddc4781546f
|
[] |
no_license
|
bgbutler/TimeSeriesBook
|
f2ff2ac820d9becee274ac88da42077d10dd7cae
|
c44ff4389d2586fb5ca91fd57db8ae3c3b1e425b
|
refs/heads/master
| 2020-06-08T04:20:38.914960
| 2019-06-21T20:41:49
| 2019-06-21T20:41:49
| 193,156,798
| 9
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 329
|
py
|
# log transform a contrived exponential time series
from matplotlib import pyplot
from math import exp
from numpy import log
series = [exp(i) for i in range(1,100)]
transform = log(series)
pyplot.figure(1)
# line plot
pyplot.subplot(211)
pyplot.plot(transform)
# histogram
pyplot.subplot(212)
pyplot.hist(transform)
pyplot.show()
|
[
"noreply@github.com"
] |
noreply@github.com
|
9ac072a2202de51e2724dde17e37fd47ab045be3
|
c3243f9b925324fcc036b11125233f4b634e19ee
|
/config.py
|
41e7de63a4f6029163ee6234398affa2b0c646d7
|
[] |
no_license
|
jacksonschwarz/socnet-project
|
b237dadde5e72d8b9f852d83061fa1e3ea6b2876
|
09196c66c57dc984766198ecc48e49adb8d268a1
|
refs/heads/master
| 2020-06-26T02:33:02.320915
| 2019-07-29T17:39:16
| 2019-07-29T17:39:16
| 199,499,257
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 273
|
py
|
class keys():
consumer_key="8IOO5WAeQExvzpHBhLFV2V1eK"
consumer_secret="rjnxXnu2mfUdDY1bf5RdeyDNgJzka0WsXuLcKuCCmOjIRBJ4mS"
access_token="399061958-kr8PAZVPkzeH2AHxkzK7l3HVrPUXfnbLjYgCdhs9"
access_token_secret="t1cHMQvCU9wfifevxKnbhTGqKD3qGLqWwWnDOjiqmFZFX"
|
[
"jackson.schwarz0@gmail.com"
] |
jackson.schwarz0@gmail.com
|
5d0ee6b0dc39b9f92bdb2eef54ed35b8d54a32c9
|
696e35ccdf167c3f6b1a7f5458406d3bb81987c9
|
/content/test/gpu/gpu_tests/gpu_integration_test.py
|
9f991d50206a63aaaaa763e1e5b49a5a4108a461
|
[
"BSD-3-Clause"
] |
permissive
|
mgh3326/iridium-browser
|
064e91a5e37f4e8501ea971483bd1c76297261c3
|
e7de6a434d2659f02e94917be364a904a442d2d0
|
refs/heads/master
| 2023-03-30T16:18:27.391772
| 2019-04-24T02:14:32
| 2019-04-24T02:14:32
| 183,128,065
| 0
| 0
|
BSD-3-Clause
| 2019-11-30T06:06:02
| 2019-04-24T02:04:51
| null |
UTF-8
|
Python
| false
| false
| 10,996
|
py
|
# Copyright 2016 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import logging
from telemetry.testing import serially_executed_browser_test_case
from telemetry.util import screenshot
from gpu_tests import exception_formatter
from gpu_tests import gpu_test_expectations
class GpuIntegrationTest(
serially_executed_browser_test_case.SeriallyExecutedBrowserTestCase):
_cached_expectations = None
_also_run_disabled_tests = False
# Several of the tests in this directory need to be able to relaunch
# the browser on demand with a new set of command line arguments
# than were originally specified. To enable this, the necessary
# static state is hoisted here.
# We store a deep copy of the original browser finder options in
# order to be able to restart the browser multiple times, with a
# different set of command line arguments each time.
_original_finder_options = None
# We keep track of the set of command line arguments used to launch
# the browser most recently in order to figure out whether we need
# to relaunch it, if a new pixel test requires a different set of
# arguments.
_last_launched_browser_args = set()
@classmethod
def SetUpProcess(cls):
super(GpuIntegrationTest, cls).SetUpProcess()
cls._original_finder_options = cls._finder_options.Copy()
@classmethod
def AddCommandlineArgs(cls, parser):
"""Adds command line arguments understood by the test harness.
Subclasses overriding this method must invoke the superclass's
version!"""
parser.add_option(
'--also-run-disabled-tests',
dest='also_run_disabled_tests',
action='store_true', default=False,
help='Run disabled tests, ignoring Skip and Fail expectations')
@classmethod
def CustomizeBrowserArgs(cls, browser_args):
"""Customizes the browser's command line arguments.
NOTE that redefining this method in subclasses will NOT do what
you expect! Do not attempt to redefine this method!
"""
if not browser_args:
browser_args = []
cls._finder_options = cls._original_finder_options.Copy()
browser_options = cls._finder_options.browser_options
# A non-sandboxed, 15-seconds-delayed gpu process is currently running in
# the browser to collect gpu info. A command line switch is added here to
# skip this gpu process for all gpu integration tests to prevent any
# interference with the test results.
browser_args.append(
'--disable-gpu-process-for-dx12-vulkan-info-collection')
# Append the new arguments.
browser_options.AppendExtraBrowserArgs(browser_args)
cls._last_launched_browser_args = set(browser_args)
cls.SetBrowserOptions(cls._finder_options)
@classmethod
def RestartBrowserIfNecessaryWithArgs(cls, browser_args, force_restart=False):
if not browser_args:
browser_args = []
elif '--disable-gpu' in browser_args:
# Some platforms require GPU process, so browser fails to launch with
# --disable-gpu mode, therefore, even test expectations fail to evaluate.
browser_args = list(browser_args)
os_name = cls.browser.platform.GetOSName()
if os_name == 'android' or os_name == 'chromeos':
browser_args.remove('--disable-gpu')
if force_restart or set(browser_args) != cls._last_launched_browser_args:
logging.info('Restarting browser with arguments: ' + str(browser_args))
cls.StopBrowser()
cls.CustomizeBrowserArgs(browser_args)
cls.StartBrowser()
@classmethod
def RestartBrowserWithArgs(cls, browser_args):
cls.RestartBrowserIfNecessaryWithArgs(browser_args, force_restart=True)
# The following is the rest of the framework for the GPU integration tests.
@classmethod
def GenerateTestCases__RunGpuTest(cls, options):
cls._also_run_disabled_tests = options.also_run_disabled_tests
for test_name, url, args in cls.GenerateGpuTests(options):
yield test_name, (url, test_name, args)
@classmethod
def StartBrowser(cls):
# We still need to retry the browser's launch even though
# desktop_browser_finder does so too, because it wasn't possible
# to push the fetch of the first tab into the lower retry loop
# without breaking Telemetry's unit tests, and that hook is used
# to implement the gpu_integration_test_unittests.
for x in range(0, 3):
try:
super(GpuIntegrationTest, cls).StartBrowser()
cls.tab = cls.browser.tabs[0]
return
except Exception:
logging.warning('Browser start failed (attempt %d of 3)', (x + 1))
# If we are on the last try and there is an exception take a screenshot
# to try and capture more about the browser failure and raise
if x == 2:
url = screenshot.TryCaptureScreenShotAndUploadToCloudStorage(
cls.platform)
if url is not None:
logging.info("GpuIntegrationTest screenshot of browser failure " +
"located at " + url)
else:
logging.warning("GpuIntegrationTest unable to take screenshot")
raise
# Otherwise, stop the browser to make sure it's in an
# acceptable state to try restarting it.
if cls.browser:
cls.StopBrowser()
@classmethod
def _RestartBrowser(cls, reason):
logging.warning('Restarting browser due to '+ reason)
cls.StopBrowser()
cls.SetBrowserOptions(cls._finder_options)
cls.StartBrowser()
def _RunGpuTest(self, url, test_name, *args):
expectations = self.__class__.GetExpectations()
expectation = expectations.GetExpectationForTest(
self.browser, url, test_name)
if self.__class__._also_run_disabled_tests:
# Ignore test expectations if the user has requested it.
expectation = 'pass'
if expectation == 'skip':
# skipTest in Python's unittest harness raises an exception, so
# aborts the control flow here.
self.skipTest('SKIPPING TEST due to test expectations')
try:
# TODO(nednguyen): For some reason the arguments are getting wrapped
# in another tuple sometimes (like in the WebGL extension tests).
# Perhaps only if multiple arguments are yielded in the test
# generator?
if len(args) == 1 and isinstance(args[0], tuple):
args = args[0]
self.RunActualGpuTest(url, *args)
except Exception:
if expectation == 'pass':
# This is not an expected exception or test failure, so print
# the detail to the console.
exception_formatter.PrintFormattedException()
# Symbolize any crash dump (like from the GPU process) that
# might have happened but wasn't detected above. Note we don't
# do this for either 'fail' or 'flaky' expectations because
# there are still quite a few flaky failures in the WebGL test
# expectations, and since minidump symbolization is slow
# (upwards of one minute on a fast laptop), symbolizing all the
# stacks could slow down the tests' running time unacceptably.
self.browser.LogSymbolizedUnsymbolizedMinidumps(logging.ERROR)
# This failure might have been caused by a browser or renderer
# crash, so restart the browser to make sure any state doesn't
# propagate to the next test iteration.
self._RestartBrowser('unexpected test failure')
raise
elif expectation == 'fail':
msg = 'Expected exception while running %s' % test_name
exception_formatter.PrintFormattedException(msg=msg)
# Even though this is a known failure, the browser might still
# be in a bad state; for example, certain kinds of timeouts
# will affect the next test. Restart the browser to prevent
# these kinds of failures propagating to the next test.
self._RestartBrowser('expected test failure')
return
if expectation != 'flaky':
logging.warning(
'Unknown expectation %s while handling exception for %s',
expectation, test_name)
raise
# Flaky tests are handled here.
num_retries = expectations.GetFlakyRetriesForTest(
self.browser, url, test_name)
if not num_retries:
# Re-raise the exception.
raise
# Re-run the test up to |num_retries| times.
for ii in xrange(0, num_retries):
print 'FLAKY TEST FAILURE, retrying: ' + test_name
try:
# For robustness, shut down the browser and restart it
# between flaky test failures, to make sure any state
# doesn't propagate to the next iteration.
self._RestartBrowser('flaky test failure')
self.RunActualGpuTest(url, *args)
break
except Exception:
# Squelch any exceptions from any but the last retry.
if ii == num_retries - 1:
# Restart the browser after the last failure to make sure
# any state doesn't propagate to the next iteration.
self._RestartBrowser('excessive flaky test failures')
raise
else:
if expectation == 'fail':
logging.warning(
'%s was expected to fail, but passed.\n', test_name)
@classmethod
def GenerateGpuTests(cls, options):
"""Subclasses must implement this to yield (test_name, url, args)
tuples of tests to run."""
raise NotImplementedError
def RunActualGpuTest(self, file_path, *args):
"""Subclasses must override this to run the actual test at the given
URL. file_path is a path on the local file system that may need to
be resolved via UrlOfStaticFilePath.
"""
raise NotImplementedError
@classmethod
def GetExpectations(cls):
if not cls._cached_expectations:
cls._cached_expectations = cls._CreateExpectations()
if not isinstance(cls._cached_expectations,
gpu_test_expectations.GpuTestExpectations):
raise Exception(
'gpu_integration_test requires use of GpuTestExpectations')
return cls._cached_expectations
@classmethod
def _CreateExpectations(cls):
# Subclasses **must** override this in order to provide their test
# expectations to the harness.
#
# Do not call this directly. Call GetExpectations where necessary.
raise NotImplementedError
@classmethod
def _EnsureTabIsAvailable(cls):
try:
cls.tab = cls.browser.tabs[0]
except Exception:
# restart the browser to make sure a failure in a test doesn't
# propagate to the next test iteration.
logging.exception("Failure during browser startup")
cls._RestartBrowser('failure in setup')
raise
def setUp(self):
self._EnsureTabIsAvailable()
def LoadAllTestsInModule(module):
# Just delegates to serially_executed_browser_test_case to reduce the
# number of imports in other files.
return serially_executed_browser_test_case.LoadAllTestsInModule(module)
|
[
"commit-bot@chromium.org"
] |
commit-bot@chromium.org
|
afe52020807529d3d426f1f80748977c241334c4
|
15f321878face2af9317363c5f6de1e5ddd9b749
|
/solutions_python/Problem_137/548.py
|
6499377835513cc99a4e2110302f4abf9f61a149
|
[] |
no_license
|
dr-dos-ok/Code_Jam_Webscraper
|
c06fd59870842664cd79c41eb460a09553e1c80a
|
26a35bf114a3aa30fc4c677ef069d95f41665cc0
|
refs/heads/master
| 2020-04-06T08:17:40.938460
| 2018-10-14T10:12:47
| 2018-10-14T10:12:47
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 755
|
py
|
data = open('minesweeper.txt')
mines = {}
while True:
line = data.readline()
line = line.split()
if len(line) == 0:
break
R = int(line[0])
C = int(line[1])
M = int(line[2])
line = data.readline().strip()
if line == 'Impossible':
mines[(R, C, M)] = 'Impossible'
else:
mine = line + '\n'
for r in range(1, R):
mine += data.readline()
mines[(R, C, M)] = mine
test_data = open('C-small-attempt0.in')
num_tests = int(test_data.readline().strip())
for test in range(num_tests):
line = test_data.readline().split()
R = int(line[0])
C = int(line[1])
M = int(line[2])
output = mines[(R, C, M)]
print('Case #{0}:'.format(test + 1))
print(output)
|
[
"miliar1732@gmail.com"
] |
miliar1732@gmail.com
|
541814fce90ea9c146f892f9ecf241d6d2ec4278
|
45fbf26618ef3b1fd93bca267032754b8f96603e
|
/src/Server/API/python/offer_passageid.py
|
5398b9441dfeef6019d18f29576c35edb3e74f7c
|
[] |
no_license
|
EricDeng1001/LS
|
d76f53edec08fd35b0856ba1c03f2dd83074275c
|
da4209bffdd9aaffa846c662c594f0065c51341f
|
refs/heads/master
| 2021-09-15T02:12:26.388130
| 2018-05-24T06:27:05
| 2018-05-24T06:27:05
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,113
|
py
|
# coding=utf-8
from __future__ import division
import json
import MySQLdb
import time
import datetime
import urllib
import json
from urllib import urlencode
from urllib import quote
import sys
reload(sys)
sys.setdefaultencoding('utf-8')
def offer_passageid(user_name):
conn = MySQLdb.Connect(host = '127.0.0.1',user = 'root', passwd = '123456', db = 'gyc_f_e', port = 3306,charset='utf8')
cur = conn.cursor()
sql = 'select recommend_art_id from user WHERE user_name = "%s"' % (user_name)
cur.execute(sql)
passage_id = cur.fetchall()
passageid = passage_id[0][0]
article_id_all = {}
article_id_list = []
article_id_dict = {}
article_id_dict['artid'] = str(passageid)
article_id_list.append(article_id_dict)
article_id_all['article_id'] = article_id_list
jsondata = json.dumps(article_id_all,ensure_ascii = False)
jsondata_sub = jsondata[1:len(jsondata)-1]
print jsondata_sub
#return jsondata_sub
def offer_passageidl():
sys.exit(offer_passageid(sys.argv[1]))
if __name__ == '__main__':
offer_passageidl()
|
[
"gantinus@gmail.com"
] |
gantinus@gmail.com
|
0e224f6a0ff6149cf70f6a426a50cdc40b769be9
|
8d1ceed7720e374691829d78007ea146a9030e4f
|
/arkestra_clinical_studies/lister.py
|
5905346c3c39dba5232bfa745f6c1a2ba387225d
|
[
"BSD-2-Clause"
] |
permissive
|
gonff/arkestra-clinical-studies
|
25ef186207781bbc979f7f12bdef194802d9c71c
|
d75540e006a5d8b1ccb6d05a8253eba9c9fb0a79
|
refs/heads/master
| 2021-01-18T05:10:23.067652
| 2014-05-21T11:19:03
| 2014-05-21T11:19:03
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,978
|
py
|
from django.db.models import Q
from django.utils.translation import ugettext_lazy as _
from arkestra_utilities.generic_lister import (
ArkestraGenericLister, ArkestraGenericList, ArkestraGenericFilterList,
ArkestraGenericFilterSet
)
from arkestra_utilities.settings import MULTIPLE_ENTITY_MODE
from .models import Study
# we're going to have a list of Studies that we can search, filter and paginate
# the ArkestraGenericFilterSet provides us with some of that machinery
class StudiesFilterSet(ArkestraGenericFilterSet):
# the fields we want to be able to filter on
fields = ["date", "status", "studytype"]
class StudiesListMixin(object):
def set_items_for_entity(self):
# if we're not in MULTIPLE_ENTITY_MODE, just leave self.items alone
if MULTIPLE_ENTITY_MODE and self.entity:
# we want to include any item that has any relationship with any
# of the descendants of the entity we're looking at
# get a list of all those entities
entities = self.entity.get_descendants(
include_self=True
).values_list('id', flat=True)
# get the Studies that have a relationship with any item in that list
self.items = self.items.filter(
Q(hosted_by__in=entities) | Q(publish_to__in=entities) |
Q(funding_body__in=entities) | Q(sponsor__in=entities) |
Q(clinical_centre__in=entities)
).distinct()
# the class that produces the list of items, based on ArkestraGenericFilterList
class StudiesList(StudiesListMixin, ArkestraGenericFilterList):
# it must have a filter_set class
filter_set = StudiesFilterSet
# the model we're listing
model = Study
# the text search fields - each one is a dictionary
search_fields = [
{
# the field as its name appears in the URL: ?text=
"field_name": "text",
# a label for the field
"field_label": "Search title/summary",
# the placeholder text in the search widget
"placeholder": "Search",
# the model fields we want to search through
"search_keys": [
"title__icontains",
"summary__icontains",
],
},
]
# we want to override the generic list item template
item_template = "clinical_studies/study_list_item.html"
# we need our own build() method to override the generic one
def build(self):
# get the listable (by default, published and shown in lists) items
self.items = self.model.objects.listable_objects()
# we'll limit the items according to the appropriate entity - the
# method that does this is defined in the StudiesListMixin
self.set_items_for_entity()
# and limit by search terms
self.filter_on_search_terms()
# and set up the filter for rendering
self.itemfilter = self.filter_set(self.items, self.request.GET)
# the Lister class is the one that determines which lists to display, along
# with the surrounding furniture - in the case of Studies, it's just one List,
# but we could have more
class StudiesLister(ArkestraGenericLister):
# a list of available List classes
listkinds = [("studies", StudiesList)]
# the List classes we want to use
display = "studies"
class StudiesMenuList(StudiesListMixin, ArkestraGenericList):
model = Study
heading_text = _(u"News")
def build(self):
# get the listable (by default, published and shown in lists) items
self.items = self.model.objects.listable_objects()
# we'll limit the items according to the appropriate entity - the
# method that does this is defined in the StudiesListMixin
self.set_items_for_entity()
class StudiesMenuLister(ArkestraGenericLister):
listkinds = [("studies", StudiesMenuList)]
display = "studies"
|
[
"daniele@vurt.org"
] |
daniele@vurt.org
|
5a308f6b7f9ceacdf803dead7dbd5a2dfc85628e
|
9aa1885bfd666b5d3719c29334c9769bbe88d3e0
|
/bin/cache-purge-consumer.py
|
d1bd99303ae097493edde7eadcd860165b207716
|
[] |
permissive
|
praekelt/django-ultracache
|
9c240cfad4660afdb7e679192ca0f4b05bab1831
|
476eb8a4935043f4fc6901ed3541ececed1664bf
|
refs/heads/develop
| 2022-01-27T18:20:00.062349
| 2020-05-29T09:58:01
| 2020-05-29T09:58:01
| 38,880,711
| 32
| 4
|
BSD-3-Clause
| 2022-01-06T22:24:32
| 2015-07-10T13:02:45
|
Python
|
UTF-8
|
Python
| false
| false
| 3,973
|
py
|
"""Subscribe to RabbitMQ and listen for purge instructions continuously. Manage
this script through eg. supervisor."""
import json
import traceback
from multiprocessing.pool import ThreadPool
from optparse import OptionParser
from time import sleep
import pika
import requests
import yaml
class Consumer:
channel = None
connection = None
def __init__(self):
self.pool = ThreadPool()
parser = OptionParser()
parser.add_option("-c", "--config", dest="config",
help="Configuration file", metavar="FILE")
(options, args) = parser.parse_args()
config_file = options.config
self.config = {}
if config_file:
self.config = yaml.load(open(config_file)) or {}
def log(self, msg):
name = self.config.get("logfile", None)
if not name:
return
if name == "stdout":
print(msg)
return
fp = open(name, "a")
try:
fp.write(msg + "\n")
finally:
fp.close()
def connect(self):
parameters = pika.URLParameters(
self.config.get(
"rabbit-url",
"amqp://guest:guest@127.0.0.1:5672/%2F"
)
)
self.connection = pika.BlockingConnection(parameters)
self.channel = self.connection.channel()
self.channel.exchange_declare(
exchange="purgatory", exchange_type="fanout"
)
queue = self.channel.queue_declare(exclusive=True)
queue_name = queue.method.queue
self.channel.queue_bind(exchange="purgatory", queue=queue_name)
self.channel.basic_qos(prefetch_count=1)
self.channel.basic_consume(
self.on_message, queue=queue_name, no_ack=False, exclusive=True
)
def on_message(self, channel, method_frame, header_frame, body):
self.pool.apply_async(self.handle_message, (body,))
channel.basic_ack(delivery_tag=method_frame.delivery_tag)
def handle_message(self, body):
if body:
try:
di = json.loads(body)
except ValueError:
path = body
headers = {}
else:
path = di["path"]
headers = di["headers"]
self.log("Purging %s with headers %s" % (path, str(headers)))
host = self.config.get("host", None)
try:
if host:
final_headers = {"Host": host}
final_headers.update(headers)
response = requests.request(
"PURGE", "http://" \
+ self.config.get("proxy-address", "127.0.0.1") + path,
headers=final_headers,
timeout=10
)
else:
response = requests.request(
"PURGE", "http://" \
+ self.config.get("proxy-address", "127.0.0.1") + path,
timeout=10,
headers=headers
)
except Exception as exception:
msg = traceback.format_exc()
self.log("Error purging %s: %s" % (path, msg))
else:
content = response.content
def consume(self):
loop = True
while loop:
try:
if self.channel is None:
raise pika.exceptions.ConnectionClosed()
self.channel.start_consuming()
except KeyboardInterrupt:
loop = False
self.channel.stop_consuming()
except pika.exceptions.ConnectionClosed:
try:
self.connect()
except pika.exceptions.ConnectionClosed:
sleep(1)
self.connection.close()
consumer = Consumer()
consumer.consume()
|
[
"hedleyroos@gmail.com"
] |
hedleyroos@gmail.com
|
4b270fa9d701f65ef4e79353a53e22d43df8424f
|
ad9782856ec2f860fccbefa5e75a896691b8e1cc
|
/MonteCarlo/test/opt6s3l/crab_step2_VBF_HToBB_OT613_200_IT4025_opt6s3l.py
|
8031794a20b9cb961ae352984ee3b6e5b3a772d7
|
[] |
no_license
|
OSU-CMS/VFPix
|
7fe092fc5a973b4f9edc29dbfdf44907664683e5
|
4c9fd903219742a4eba1321dc4181da125616e4c
|
refs/heads/master
| 2020-04-09T05:52:05.644653
| 2019-01-09T13:44:22
| 2019-01-09T13:44:22
| 30,070,948
| 0
| 0
| null | 2018-11-30T13:15:54
| 2015-01-30T12:26:20
|
Python
|
UTF-8
|
Python
| false
| false
| 944
|
py
|
from CRABClient.UserUtilities import config, getUsernameFromSiteDB
config = config()
config.General.requestName = 'VBF_HToBB_14TeV_step2_923_PU200_OT613_200_IT4025_opt6s3l'
config.General.workArea = 'crab'
config.General.transferOutputs = True
config.General.transferLogs = True
config.JobType.pluginName = 'Analysis'
config.JobType.psetName = 'step2_DIGI_L1_L1TrackTrigger_DIGI2RAW_HLT_PU200_OT613_200_IT4025_opt6s3l.py'
config.JobType.maxMemoryMB = 4000
config.Data.inputDataset = '/VBF_HToBB_14TeV_923_OT613_200_IT4025_opt6s3l/jalimena-LheGenSim_RAWSIMoutput-efeae19cc3c320703c0b5144577e0f10/USER'
config.Data.outputDatasetTag = 'step2_PU200'
config.Data.inputDBS = 'phys03'
config.Data.splitting = 'FileBased'
config.Data.unitsPerJob = 1
config.Data.outLFNDirBase = '/store/group/lpcfpix'
config.Data.publication = True
config.Data.ignoreLocality = True
config.Site.whitelist = ["T1_US_FNAL"]
config.Site.storageSite = 'T3_US_FNALLPC'
|
[
"juliette.alimena@cern.ch"
] |
juliette.alimena@cern.ch
|
bf743c6551b46c870314090cfe6cba4a96f5797a
|
d912186b9806f67a1ee744bf8848a1f9fd11bcee
|
/Keras_Framework_TensorRT/flower_photos/test.py
|
9cd4433375b8c2db35ad6ee2ad6fbe6c0eb616d1
|
[] |
no_license
|
HangJie720/TensorRT_Tensorflow_Keras
|
89297aa12f58b6dfcc2ef29cf9f6a9bc44a49b0c
|
8032a9347929098c8d38ee40e35c538274d7363a
|
refs/heads/master
| 2020-03-19T03:17:30.590481
| 2018-06-01T12:26:57
| 2018-06-01T12:26:57
| 135,714,075
| 3
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,753
|
py
|
from tensorrt.lite import Engine
from PIL import Image
import numpy as np
import os
import functools
import time
import cv2
import matplotlib.pyplot as plt
PLAN_single = '/tmp/keras/flower_photos/engine/keras_vgg19_b1_fp32.engine' # engine filename for batch size 1
PLAN_half = '/tmp/keras/flower_photos/engine/keras_vgg19_b1_fp16.engine'
IMAGE_DIR = '/tmp/keras/flower_photos/val/roses'
BATCH_SIZE = 1
def analyze(output_data):
LABELS = ["daisy", "dandelion", "roses", "sunflowers", "tulips"]
output = output_data.reshape(-1, len(LABELS))
top_classes = [LABELS[idx] for idx in np.argmax(output, axis=1)]
top_classes_prob = np.amax(output, axis=1)
return top_classes, top_classes_prob
def image_to_np_CHW(image):
return np.asarray(
image.resize(
(224, 224),
Image.ANTIALIAS
)).transpose([2, 0, 1]).astype(np.float32)
def load_and_preprocess_images():
file_list = [f for f in os.listdir(IMAGE_DIR) if os.path.isfile(os.path.join(IMAGE_DIR, f))]
images_trt = []
for f in file_list:
images_trt.append(image_to_np_CHW(Image.open(os.path.join(IMAGE_DIR, f))))
images_trt = np.stack(images_trt)
num_batches = int(len(images_trt) / BATCH_SIZE)
images_trt = np.reshape(images_trt[0:num_batches * BATCH_SIZE], [
num_batches,
BATCH_SIZE,
images_trt.shape[1],
images_trt.shape[2],
images_trt.shape[3]
])
return images_trt
def timeit(func):
@functools.wraps(func)
def newfunc(*args, **kwargs):
startTime = time.time()
retargs = func(*args, **kwargs)
elapsedTime = time.time() - startTime
print('function [{}] finished in {} ms'.format(
func.__name__, int(elapsedTime * 1000)))
return retargs
return newfunc
def load_TRT_engine(plan):
engine = Engine(PLAN=plan, postprocessors={"dense_2/Softmax":analyze})
return engine
engine_single = load_TRT_engine(PLAN_single)
# engine_half = load_TRT_engine(PLAN_half)
images_trt = load_and_preprocess_images()
@timeit
def infer_all_images_trt(engine):
results = []
for image in images_trt:
result = engine.infer(image)
results.append(result)
return results
# DO inference with TRT
results_trt_single = infer_all_images_trt(engine_single)
# results_trt_half = infer_all_images_trt(engine_half)
correct = 0.0
for i in range(len(results_trt_single)):
# plt.imshow(images_trt[i, 0, 0], cmap='gray')
# plt.show()
print results_trt_single[i][0][0][0]
# print results_trt_half[i][0][0][0]
if results_trt_single[i][0][0][0][0]=='roses':
correct += 1
print ("Inference: {:.2f}% Correct".format((correct / len(results_trt_single)) * 100))
|
[
"hangjie@hangjiedeMacBook-Pro.local"
] |
hangjie@hangjiedeMacBook-Pro.local
|
7ef82813296454bbe47997f3438cf9311b31267a
|
c1488f29f165d65005464a8e0583ec9edf1735b7
|
/test script/deleteNodes.py
|
baef77798df99485f605c10f044475e3a07c167c
|
[] |
no_license
|
KensonN/ground-dash
|
a5fad69f3e77367448b9b0f35fa51549de0a4df3
|
b7ace79d6d83dacf5331d6c4d33a3bb10ad4c2bb
|
refs/heads/master
| 2023-01-06T13:38:20.733628
| 2020-10-31T23:19:14
| 2020-10-31T23:19:14
| 286,333,413
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 578
|
py
|
#import serial
import pyrebase
import random
from time import time, sleep
from datetime import datetime
config = { #firebase authentification
"apiKey": "AIzaSyDEfkwr7Zl5WucXFluMxB8VIlngUnp7aDM",
"authDomain": "smv-daq.firebaseapp.com",
"databaseURL": "https://smv-daq.firebaseio.com",
"projectId": "smv-daq",
"storageBucket": "bucket.appspot.com"
}
firebase = pyrebase.initialize_app(config)
db = firebase.database()
while (True):
trialNum = input("Trial to remove: ")
db.child("Trial " + trialNum).remove()
print("Deleted trial " + trialNum + "!")
|
[
"kenson.nguyen3@gmail.com"
] |
kenson.nguyen3@gmail.com
|
057557a86accb533d99a88d08a903f4a4422eb9b
|
6d659cdae13a89c2d2e84b750ba7628e871df846
|
/control.py
|
088e6317a167e5b140b58e239be9f1bec1e09e5a
|
[] |
no_license
|
FosDos/leechybird
|
8bcd545aa5e6151b7ff62347462f691548ac4302
|
ef83761ab1c7113db31540ead35704c4c440cd6f
|
refs/heads/master
| 2022-11-24T23:47:30.519667
| 2020-07-29T17:09:00
| 2020-07-29T17:09:00
| 116,117,040
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 679
|
py
|
# Author: Foster C. Williams
# Email: fosterclarksonwilliams@gmail.com
#github: github.com/fosdos
import time
import datetime
class twitter_timer(object):
start_time = None
user_screen_name = None
def __init__(self, user_screen_name):
self.start_time = datetime.datetime.now()
self.user_screen_name = str(user_screen_name)
def __str__(self):
return "Timing " + str(self.user_screen_name) + ", started on: " + str(self.start_time)
def time_check(self):
if((datetime.datetime.now() - self.start_time).total_seconds() > 86400):
return self.user_screen_name
else:
return False
def time_test(self):
return str(self.user_screen_name)
|
[
"fosterclarksonwilliams@gmail.com"
] |
fosterclarksonwilliams@gmail.com
|
5b3970c02a50e16d0ceabd306e7e4a70ec1fff8e
|
1e38de9c5761d7917db3338c3d49477ee001618d
|
/airbridge/builder.py
|
d5b1361f2aedbc336b53b6063a2390faff77614f
|
[
"Apache-2.0"
] |
permissive
|
yonglehou/airbridge
|
ef7b61de7e0abcea5eb6470c9dcb9d825f615fea
|
55e97f61dbff11cc9380010885febd8a1c12c4d6
|
refs/heads/master
| 2021-01-15T18:30:47.504796
| 2015-07-09T02:27:33
| 2015-07-09T02:27:33
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 975
|
py
|
# -*- config:utf-8 -*-
"""
Copyright 2014 Airbridge
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from flask import Flask
from werkzeug.utils import import_string
from airbridge.common.database import set_db
def create_app(module):
app = Flask('airbridge')
app.config.from_pyfile('config.cfg')
set_db(app)
blueprint = import_string('airbridge.{0}.views.{0}'.format(module))
app.register_blueprint(blueprint)
return app
|
[
"lucasboscaini@gmail.com"
] |
lucasboscaini@gmail.com
|
7337e654a102bc58ed6a403e7e5a2baafed31204
|
4da30c339d64989d0b92587ea059d9bb462ceec0
|
/backend/reframery/migrations/0011_auto_20210222_1447.py
|
9b11ee18ad98b1d1643fb4853babb83976c517ab
|
[] |
no_license
|
Danielwu9226/community-currency-reframery
|
876973f296268a6d6c4449b5e590dcde53161721
|
b863c84d2b2bab1122f114bf0cfcd6faf2cdc7ef
|
refs/heads/master
| 2023-04-07T18:00:29.501122
| 2021-04-01T07:05:30
| 2021-04-01T07:05:30
| 300,325,079
| 1
| 0
| null | 2021-02-22T02:12:19
| 2020-10-01T15:18:38
|
Python
|
UTF-8
|
Python
| false
| false
| 862
|
py
|
# Generated by Django 3.1.5 on 2021-02-22 14:47
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('reframery', '0010_auto_20201229_2325'),
]
operations = [
migrations.CreateModel(
name='Wallet',
fields=[
('customUser', models.OneToOneField(on_delete=django.db.models.deletion.CASCADE, primary_key=True, serialize=False, to='reframery.customuser')),
('address', models.CharField(max_length=255)),
('private_key', models.CharField(max_length=255)),
],
),
migrations.AlterField(
model_name='customuser',
name='validate_code',
field=models.CharField(default='kvvajs2cb3imbj#!4ycl', max_length=255),
),
]
|
[
"imtiaza@mcmaster.ca"
] |
imtiaza@mcmaster.ca
|
0508fff314281a130e0521271cb6a91d6bc8ca58
|
3b3741229af1dee887495bab1bc7af1363969f9c
|
/axioms/migrations/0001_initial.py
|
2b7bdf3654ea3460de507402ca7d4fb61e7ee073
|
[] |
no_license
|
chadpalmer/django_api_sample
|
3ba26aebc840eb75966cc6b99fd80e0fe082647f
|
06b4fac412f6a5871214c9b92e216b58091483b2
|
refs/heads/master
| 2021-09-23T10:34:42.504173
| 2020-01-23T22:32:34
| 2020-01-23T22:32:34
| 232,022,693
| 0
| 0
| null | 2021-09-22T18:20:44
| 2020-01-06T04:17:35
|
Python
|
UTF-8
|
Python
| false
| false
| 980
|
py
|
# Generated by Django 3.0.2 on 2020-01-04 23:49
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='Axiom',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('created', models.DateTimeField(auto_now_add=True)),
('category', models.CharField(blank=True, default='', max_length=200)),
('text', models.TextField()),
('owner', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='snippets', to=settings.AUTH_USER_MODEL)),
],
options={
'ordering': ['pk'],
},
),
]
|
[
"chadpalmer@Chads-MacBook-Pro.local"
] |
chadpalmer@Chads-MacBook-Pro.local
|
aa4e6b7443d288d9d0895517c5a524596bd8b3e4
|
f6b5d818d58506d6de7429528fab9c1ab059fb15
|
/HW5/Melnychuk/hw5_1.py
|
d47acd47e39d0e882a0aba9587525b393a1d72ee
|
[] |
no_license
|
kolyasalubov/Lv-585.2.PythonCore
|
8381931bbeab17eee74142d2fbbe45fb025a1c5e
|
a18ee1180262a641c19806c4d9dd59e537096e14
|
refs/heads/main
| 2023-04-10T10:05:44.114639
| 2021-04-29T13:46:40
| 2021-04-29T13:46:40
| 349,162,354
| 1
| 3
| null | 2021-04-29T13:46:41
| 2021-03-18T17:30:57
|
Python
|
UTF-8
|
Python
| false
| false
| 445
|
py
|
numbers_div_2 = []
numbers_div_3 = []
other_numbers = []
for number in range (1, 10):
if number % 2 == 0:
numbers_div_2.append(number)
elif number % 3 == 0:
numbers_div_3.append(number)
else:
other_numbers.append(number)
print(f"Numbers that are divisible by 2: {numbers_div_2}")
print(f"Numbers that are divisible by 3: {numbers_div_3}")
print(f"Numbers that are not divisable by 2 and 3: {other_numbers}")
|
[
"melny4uk.d@gmail.com"
] |
melny4uk.d@gmail.com
|
8bd0766d2f6563738282d6a6113ab378a44ded93
|
d6d42869ea400e434618b4289cb254f32bcc0ca3
|
/autorecord/core/models.py
|
fd9032f6989a0cea97e01712fcd8113c63271629
|
[] |
no_license
|
Diverso-NVR/autorecord
|
227538bd4e20ba4e9d19179f984dd0112b9d5d0e
|
7caa5ba9f69204f933c37072ce7366a206cd33a2
|
refs/heads/master
| 2023-04-23T16:56:29.449049
| 2021-05-12T18:49:00
| 2021-05-12T18:49:00
| 235,286,658
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 433
|
py
|
class Room:
def __init__(self, room_dict):
self.__dict__.update(**room_dict)
@property
def sources(self):
return self._sources
@sources.setter
def sources(self, sources_list):
self._sources = []
for source_dict in sources_list:
self._sources.append(Source(source_dict))
class Source:
def __init__(self, source_dict):
self.__dict__.update(**source_dict)
|
[
"dakudryavcev@gmail.com"
] |
dakudryavcev@gmail.com
|
5b5318e9339850b6265dc415340e362ff7e63894
|
8f3336bbf7cd12485a4c52daa831b5d39749cf9b
|
/Python/diameter-of-binary-tree.py
|
2f8d44152f1d1d7bc911f4df55398ee39e93ccf0
|
[] |
no_license
|
black-shadows/LeetCode-Topicwise-Solutions
|
9487de1f9a1da79558287b2bc2c6b28d3d27db07
|
b1692583f7b710943ffb19b392b8bf64845b5d7a
|
refs/heads/master
| 2022-05-30T22:16:38.536678
| 2022-05-18T09:18:32
| 2022-05-18T09:18:32
| 188,701,704
| 240
| 110
| null | 2020-05-08T13:04:36
| 2019-05-26T15:41:03
|
C++
|
UTF-8
|
Python
| false
| false
| 501
|
py
|
# Time: O(n)
# Space: O(h)
class Solution(object):
def diameterOfBinaryTree(self, root):
"""
:type root: TreeNode
:rtype: int
"""
return self.depth(root, 0)[1]
def depth(self, root, diameter):
if not root:
return 0, diameter
left, diameter = self.depth(root.left, diameter)
right, diameter = self.depth(root.right, diameter)
return 1 + max(left, right), max(diameter, left + right)
|
[
"noreply@github.com"
] |
noreply@github.com
|
3b15efcd4c58e73f9d4c0135da5f36a883347fa3
|
d170efa06e6e682c71961fe1213298e5a68193c3
|
/python/python/rotate/test_rotate.py
|
fc39d304949523ec047f2d1eddf13bc3a777fc50
|
[
"MIT"
] |
permissive
|
iggy18/data-structures-and-algorithms
|
45b9ebf3c0820968bda62c0ebd90a9cfd65b3902
|
700ef727ca7656724120a1873af4bd4bce5962f4
|
refs/heads/main
| 2023-02-27T04:45:12.535801
| 2021-02-08T22:41:28
| 2021-02-08T22:41:28
| 300,975,693
| 0
| 0
|
MIT
| 2021-02-12T18:39:18
| 2020-10-03T20:42:08
|
JavaScript
|
UTF-8
|
Python
| false
| false
| 232
|
py
|
from rotate import rotate
def test_rotate():
assert rotate
def test_rotate_works_properly():
x = [[1,2,3], [1,2,3], [1,2,3]]
actual = rotate(x)
expected = [[1,1,1], [2,2,2], [3,3,3,]]
assert actual == expected
|
[
"seth.mcfeeters@gmail.com"
] |
seth.mcfeeters@gmail.com
|
e71ad573a5ef2e637afd4b895e4543e24ac20f06
|
eb69c111ac9f0f77d1cf9636f66b9309cb295178
|
/遗漏的知识点/python中的数据结构/testTwoList.py
|
f136ba0e192e64660b4d3d40e649525e97601313
|
[] |
no_license
|
zyp19/leetcode1
|
f7984ff4f74c399d2f7c1f059e193505341993ba
|
6f0338e6c11b497a1ab21cf00997b8eeb56703fb
|
refs/heads/main
| 2023-06-17T08:11:51.092801
| 2021-05-13T03:58:57
| 2021-07-11T09:24:15
| 384,907,118
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,013
|
py
|
# 1.二维数组的遍历方式
"""
第一种方式:类似于MATLAB中的二维数据索引,比较直观,容易理解
首先,定义了一个二维列表list2d.然后利用一个两层循环遍历这个二维列表.利用range函数限制循环次数,利用len函数获得列表的行数和列数.
注意这两者的不同.
评价:这个方式不够好:
首先,Python中列表和MATLAB中不同,不需要每行的列数相同
利用行列下标索引方式,则必须要求,每行的列数相同.
"""
list2d = [[1,2,3],[4,5,6]]
sum = 0
for i in range(len(list2d)):
for j in range(len(list2d[0])):
# i(0,1) j(0,2)
sum += list2d[i][j]
print(sum)
"""
第二种方式:利用列表句柄
提示:作为新手,一定要熟悉各种数据结构的句柄遍历方式.
"""
list2d = [[1,2,3],[4,5]]
sum = 0
for i in range(len(list2d)):
for j in range(len(list2d[0])):
sum += list2d[i][j]
print(sum)
list2d = [[1,2,3],[4,5]]
sum = 0
for i in list2d:
for j in i:
sum += j
print(sum)
|
[
"zuoyiping@163.com"
] |
zuoyiping@163.com
|
75f027c569ffff1f16c7bb74496f8f376e04a6b1
|
9a2593b422605d6254553b3f003d136fd0104514
|
/exercise-submission/day2/Dalyo_OURBA.py
|
be74e506d194fab813f986ec639bd2f9618bcb3a
|
[] |
no_license
|
sokharsamb/AIMS2019-Dakar-BigDataCourse
|
c577a327e78b536c678dd7789422011270c75d6b
|
0733272c568e6dabac02f8815465411be5999f15
|
refs/heads/master
| 2021-11-24T23:58:22.303402
| 2021-11-04T18:46:50
| 2021-11-04T18:46:50
| 196,364,855
| 2
| 1
| null | 2019-07-11T09:37:25
| 2019-07-11T09:37:25
| null |
UTF-8
|
Python
| false
| false
| 9,095
|
py
|
"""
GENERAL INSTRUCTIONS
WARNING: For Python beginners:
the instructions here will only make sense after you have gone through and
completed the training materials.
1. WHICH PART TO CHANGE?: Uncomment every line with [YOUR CODE HERE] and replace it with your code.
Please don't change anything else other than these lines.
2. USE OF JUPYTER NOTEBOOK: For those who would like to use Jupyter Notebook. You can copy and paste
each function in the notebook environment, test your code their. However,
remember to paste back your code in a .py file and ensure that its running
okay.
3. IDENTATION: Please make sure that you check your identation
4. Returning things frm function: All the functions below have to return a value.
Please dont forget to use the return statement to return a value.
5. HINTS: please read my comments for hints and instructions where applicable
6. DEFINING YOUR OWN FUNCTIONS: where I ask you to define your own function
please make sure that you name the function exactly as I said.
"""
# import Python libraries if required
import seaborn as sns
from matplotlib import pyplot as plt
from textblob import TextBlob
import pandas as pd
import requests
from bs4 import BeautifulSoup
# define a function, please call it: report_basic_data_properties
# the function should take as input a CSV file, call the input
# parameter "csv_file"
# and return the following properties about the data
# 1. Number of rows in the data
# 2. List of column names
# Note that you can return two values in a function
# using a tuple
def get_name_of_town_with_highest_elevation(csv_file=None, elev_col="elev_metres"):
"""
Given the following data file: day2-python-for-data-science/data/townships_with_dist_and_elev.csv
return the town with highest elevation.
Note that column name with elevation values is already provided as a default parameter.
:param csv_file: CSV file with elevation data
:param elev_col: Column with elevation values
:return: town name with highest elevation value
"""
# read data into pandas dataframe
# [YOUR CODE HERE]
# get the maximum value for elev_metres column
# [YOUR CODE HERE]
# inspect the object type which you get above
# if its a series object use the function
# "values" on it like so: pd_series.values
# in order to get a string
# [YOUR CODE HERE]
# return the answer
# [YOUR CODE HERE]
def plot_a_numeric_attribute(csv_file=None, col_to_plot=None, output_plot=None):
"""
Given a CSV file, read the data using pandas, plot a given column
and finally save the plot as "png" file.
DATA FOR TESTING: day2-python-for-data-science/data/townships_with_dist_and_elev.csv
COLUMN NAME FOR TESTING: 'elev_meters' column
:param csv_file: File to get data from
:param col_to_plot: Column name to plot
:param output_plot: Save output plot to file
:return:
"""
# read data into pandas dataframe
# [YOUR CODE HERE]
# use seaborn to plot distribution of data
# ax = sns.distplot(ADD YOUR CODE HERE)
# save plot as png file
# ax.get_figure().savefig(ADD YOUR CODE HERE)
def translate_to_french_for_dunstan(sentence=None):
"""
Given a sentence, translate each word in the sentence
Example: sentence = 'I love you', returns {"I": "je", "love": "amour", "you": "vous"}
use textblob package (https://textblob.readthedocs.io/en/dev/) and NLTK package
for this task
:param sentence: Sentence to translate
:return: a dictionary where key is english word and value is translated french word
"""
# first tokenize the words: split the sentence
# into words using the NLTK function word_tokenize()
# words = [YOUR CODE HERE]
# initiate a dictionary object to put in english and French words
en_fr = {}
# Now do the translation
# for w in words:
# en_blob = TextBlob(w)
#
# # use the function translate(from_lang="en", to='fr')
# # on the en_blob object defined above
# fr_blob = [YOUR CODE HERE]
#
# # use function raw on the blob above to get the word as a string
# [YOUR CODE HERE]
#
# # put the translated word in the
# # dictionary object en_fr with english
# # as key and corresponding french translation as value
# [YOUR CODE HERE]
# return the dictionary object
def get_table_rows_from_webpage(url=None):
"""
The function should go to the webpage given in the parameter
extract the table values and save to CSV file
:param url: The website to get the table from
:return:
"""
# Open the website using requests, retrieve HTML and create BS object
response = requests.get(url)
html = response.text
bs = BeautifulSoup(html, 'lxml')
# Now get all table rows using the tr tag
tb_rows = bs.find_all('tr')
# return the table rows
return tb_rows
def clean_table_rows(tb_rows=None):
"""
Since
:param tb_rows:
:return:
"""
# Declare list to hold all cleaned rows
cleaned_rows = []
for row in tb_rows:
# Extract cell using table cell HTML tag
cells = row.find_all('td')
# Extract text only
str_cells = str(cells)
clean_text = BeautifulSoup(str_cells, "lxml").get_text()
# Remove white spaces-a little convuluted but it works
clean_text2 = " ".join(clean_text.split())
# Remove brackts at beginning and end
clean_text3 = clean_text2[1:-1]
# Split clean_text3 using comma delimiter
split_str = clean_text3.split(',')
# Remove white spaces again
split_str2 = [i.strip() for i in split_str]
# Add split_str2 to cleaned_rows list
cleaned_rows = split_str2
# return cleaned rows
return cleaned_rows
def convert_website_table_to_csv(output_csv_file=None):
"""
The function scrapes data off the website wih given url
and saves it into CSV file.
:param output_csv_file:
:return:
"""
# URL to get data from
URL = 'https://www.tcsnycmarathon.org/about-the-race/results/overall-men'
# extract table rows using the function "get_table_rows_from_webpage"
# defined above
tb_rows = get_table_rows_from_webpage(URL)
# clean up table rows using "clean_table_rows" function
clean_tb_rows = clean_table_rows(tb_rowsb)
# Column names: note that the first element of the list contains the column names
# Use list indexing to get the column headers
colnames = clean_tb_rows[0]
# Create dataframe
df_men = pd.DataFrame(data=clean_tb_rows[1:], columns=colnames)
# save the dataframe to CSV file
df.to_csv(out_file, index=False)
def get_weather(api_key=None, city_id=None):
"""
Returns weather
:param api_key:
:param city_name:
:return:
"""
# add your API key
# url = "http://api.openweathermap.org/data/2.5/forecast?id={}&APPID={}".format(YOUR CODE HERE)
# use requests to retrieve data from the API
# [YOUR CODE HERE]
# retrieve JSON from the response object
# [YOUR CODE HERE]
# return the JSON object
# [YOUR CODE HERE]
def compile_weather_forecast(city_name=None, output_csv_file=None):
"""
Get weather forecasts for Dakar. Please get only TEMPERATURE and HUMIDITY
Useful Info:
city_details_file: day2-python-for-data-science/data/city.list.json
:param your_api_key:
:param output_csv_file:
:return:
"""
# # copy and paste your API key below
# API_KEY = [YOUR CODE HERE]
#
# # JSON file with city details
# jfile = [YOUR CODE HERE]
#
# # load city details file
# with open(jfile) as f:
# data = json.load(f)
#
# # inspect the data object above
# # use for loop and if statement to find city id
# city_code = None
# [YOUR CODE HERE]
#
# # now get the weather forecast using the
# # "get_weather" function defined above
# weather_json = [YOUR CODE HERE]
#
# # using method for accessing a dictionary
# # put weather items in a list
# weather_items = [YOUR CODE HERE]
#
# # save into a dataframe
# data = [] # will hold our data
#
# for i in weather_items:
# # get forecast time
# ts = [YOUR CODE HERE]
#
# # get temperature, rain and humidity
# temp = [YOUR CODE HERE]
# hum = [YOUR CODE HERE]
#
# # for rains and clouds, use get() method to
# # retrieve required values
# rains = [YOUR CODE HERE]
#
# clouds = [YOUR CODE HERE]
#
# data_item = {'forecastTime': [YOUR CODE HERE], 'tempF': [YOUR CODE HERE],
# 'humidity': [YOUR CODE HERE], "rain": [YOUR CODE HERE],
# 'cloudsPercent': [YOUR CODE HERE]}
#
# # append to list of create earlier on
# [YOUR CODE HERE]
#
# # create dataframe
# [YOUR CODE HERE]
#
# # save dataframe with option index set to False
# [YOUR CODE HERE]
|
[
"dmatekenya@gmail.com"
] |
dmatekenya@gmail.com
|
367b2fcba0f5c8e7622ab91e4e2e199f84f863e7
|
fb9ee00025b9e4b19bd7c9d009094d088f481125
|
/google-code-jam/Google Code Jam 2011 - Qualification Round/ProblemA.Bot_Trust-file.py
|
705fb0901075a0742bab075a8446e713c1fee8ee
|
[] |
no_license
|
wonjohnchoi/competitions
|
e382e3e544915b57e48bbb826120ef40b99b9843
|
4ba5be75a5b36b56de68d4a4ab8912d89eb73a0e
|
refs/heads/master
| 2021-01-16T00:28:33.446271
| 2015-09-12T16:53:25
| 2015-09-12T16:53:25
| 2,786,296
| 1
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,468
|
py
|
class Robot:
def __init__(self):
self.status = 'waiting' #waiting, moving, pushing, idle (no more command)
self.pos = 1
self.dest = -1
self.idx = -1
def find(c,commands, f):
for i in range(f, len(commands)):
if commands[i][0]==c:
return i
return -1
def solve():
fout = open('C:\\Users\\Wonjohn Choi\\Python\\Google Code Jam\\A.out','w')
fin = open('C:\\Users\\Wonjohn Choi\\Python\\Google Code Jam\\A.in','r')
t = int(str(next(fin)).strip())
for nCase in range(t):
line = list(str(next(fin)).strip().split(' '))[1:]
commands = []
for i in range(0,len(line),2):
if str(line[i])=='O':
commands.append((0,int(line[i+1])))
else:
commands.append((1,int(line[i+1])))
cnt = 0
#print(find('O',commands))
#print(find('B',commands))
robots = [Robot(), Robot()] #O, B
#done = False
while not (robots[0].status == 'idle' and robots[1].status == 'idle'):
cnt+=1
for i in (0,1):
r = robots[i]
if r.status == 'waiting':
i = find(i, commands, r.idx+1)
if i==-1:
r.status = 'idle'
r.idx = len(commands)
else:
r.dest = commands[i][1]
r.idx = i
if r.pos==r.dest:
r.status = 'pushing'
else:
r.status = 'moving'
if robots[0].status == 'pushing' and robots[1].status == 'pushing':
if robots[0].idx>robots[1].idx:
robots[1].status = 'waiting'
#print('Robot 0: sleeping.')
#print('Robot 1: pushed at %d'%(robots[1].pos))
else:
robots[0].status = 'waiting'
#print('Robot 1: sleeping.')
#print('Robot 0: pushed at %d'%(robots[0].pos))
elif robots[0].status == 'idle' and robots[1].status == 'idle':
cnt-=1
#print('DONE')
else:
for i in [0,1]:
if robots[i].status == 'pushing' and robots[i].idx<robots[1-i].idx:
robots[i].status = 'waiting'
#print('Robot %d: pushed at %d'%(i, robots[i].pos))
elif robots[i].status == 'moving':
#print('Robot %d: moved from %d'%(i, robots[i].pos),end='')
if robots[i].dest>robots[i].pos:
robots[i].pos+=1
else:
robots[i].pos-=1
#print(' to %d.'%(robots[i].pos))
if robots[i].pos==robots[i].dest:
robots[i].status = 'pushing'
else:
#print('Robot %d: sleeping'%(i))
pass
fout.write('Case #%d: %d'%(nCase+1, cnt))
fout.write('\n')
solve()
|
[
"wonjohn.choi@gmail.com"
] |
wonjohn.choi@gmail.com
|
a28f99427e7b585a4de577169e2d4afd3ab4e90e
|
618522a8ffed585e27701b9acb1a1171e3c5c924
|
/salience_sum/module/encoder.py
|
845e220e59a1dd02f3abb3eeec33d31e13a09aba
|
[] |
no_license
|
blodstone/Salience_Sum
|
9795c2a1c03c86218a8c4560ba65f7d1ff5f65e8
|
ce2e9e316a68c18bd523ba9e3d1e3ea286bbf068
|
refs/heads/master
| 2020-08-29T11:49:40.695618
| 2020-01-21T16:17:18
| 2020-01-21T16:17:18
| 218,023,295
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,495
|
py
|
import torch
from allennlp.modules import Seq2SeqEncoder
from allennlp.nn.util import get_lengths_from_binary_sequence_mask
from torch.nn import LSTM, Linear, Sequential, ReLU
from typing import Dict, Tuple
from torch.nn.utils.rnn import pack_padded_sequence, pad_packed_sequence
@Seq2SeqEncoder.register('salience_encoder')
class Encoder(Seq2SeqEncoder):
"""
A standard LSTM encoder that supports bidirectional. If bidirectional is True, we split
the hidden layer and then concatenate the two directions in the resulting encoder states.
Everything is on first batch basis.
"""
def __init__(self, input_size,
hidden_size,
num_layers,
bidirectional,
stateful: bool = False) -> None:
super().__init__(stateful)
self.hidden_size = hidden_size
self.bidirectional = bidirectional
self.num_layers = num_layers
self.input_size = input_size
self._rnn = LSTM(input_size=self.input_size,
hidden_size=self.hidden_size,
num_layers=self.num_layers,
bidirectional=self.bidirectional,
batch_first=True)
self._reduce = Linear(self.hidden_size * 2, self.hidden_size)
def forward(self, embedded_src: torch.Tensor, source_mask: torch.Tensor) \
-> Tuple[torch.Tensor, Tuple[torch.Tensor, torch.Tensor]]:
source_lengths = get_lengths_from_binary_sequence_mask(source_mask)
packed_src = pack_padded_sequence(embedded_src, source_lengths,
batch_first=True, enforce_sorted=False)
# states = (B x L X 2*H)
packed_states, final = self._rnn(packed_src)
states, _ = pad_packed_sequence(packed_states, batch_first=True)
batch_size = states.size(0)
# final_states and context = (B x 2*num_layer x H)
final_state, context = final
# Reducing the dual hidden size to one hidden size
if self.bidirectional:
final_state = self._reduce(final_state.view(batch_size, self.num_layers, -1))
context = self._reduce(context.view(batch_size, self.num_layers, -1))
return states, (final_state, context)
def get_input_dim(self) -> int:
return self.input_size
def get_output_dim(self) -> int:
return self.hidden_size
def is_bidirectional(self) -> bool:
return self.bidirectional
|
[
"hardy.oei@gmail.com"
] |
hardy.oei@gmail.com
|
e7a93d4b82e4218fcdd25cd67933e675a3c21dfd
|
f363b0405df50ec6fa2292a928c2b448cc049831
|
/main/chapter2/itemcf.py
|
aa6ca73765dbdcbefda72de6bec11d36ae9e3867
|
[] |
no_license
|
CJuanvip/Practice_Recommender_System
|
ccb711a03cadd1db6f00dc67e6122c334057ce9a
|
8f5d1c06deed6714fcfb61da06dee73f98dbff54
|
refs/heads/master
| 2023-01-19T19:33:44.234923
| 2020-11-29T20:46:53
| 2020-11-29T20:46:53
| 316,509,626
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,148
|
py
|
#!/usr/bin/python3
# coding=utf-8
'''
Created on 2018年6月15日
@author: qcymkxyc
'''
from main.chapter2 import UserCF
from collections import defaultdict
import math
from operator import itemgetter
import sys
from main.util.utils import load_file, save_file
class ItemCF(UserCF):
"""基于物品的协同过滤矩阵"""
def __init__(self):
pass
def train(self, origin_data, sim_matrix_path="store/item_sim.pkl"):
"""训练模型
@param origin_data: 原始数据
@param sim_matrix_path: 协同矩阵保存的路径
"""
self.origin_data = origin_data
# 初始化训练集
UserCF._init_train(self, origin_data)
print("开始训练模型", file=sys.stderr)
try:
print("开始载入用户协同矩阵....", file=sys.stderr)
self.item_sim_matrix = load_file(sim_matrix_path)
print("载入协同过滤矩阵完成", file=sys.stderr)
except BaseException:
print("载入用户协同过滤矩阵失败,重新计算协同过滤矩阵", file=sys.stderr)
# 计算用户协同矩阵
self.item_sim_matrix = self._item_similarity()
print("开始保存协同过滤矩阵", file=sys.stderr)
save_file(sim_matrix_path, self.item_sim_matrix)
print("保存协同过滤矩阵完成", file=sys.stderr)
def _item_similarity(self):
"""计算商品协同矩阵
@return: 物品的协同矩阵
"""
item_sim_matrix = dict() # 物品的协同矩阵
N = defaultdict(int) # 每个物品的流行度
# 统计同时购买商品的人数
for _, items in self.train.items():
for i in items:
item_sim_matrix.setdefault(i, dict())
# 统计商品的流行度
N[i] += 1
for j in items:
if i == j:
continue
item_sim_matrix[i].setdefault(j, 0)
item_sim_matrix[i][j] += 1
# 计算物品协同矩阵
for i, related_items in item_sim_matrix.items():
for j, related_count in related_items.items():
item_sim_matrix[i][j] = related_count / math.sqrt(N[i] * N[j])
return item_sim_matrix
def recommend(self, user, N, K):
"""推荐
@param user: 用户
@param N: 推荐的商品个数
@param K: 查找最相似的商品个数
@return: 商品字典 {商品 : 相似性打分情况}
"""
recommends = dict()
items = self.train[user]
for item in items:
for i, sim in sorted(self.item_sim_matrix.get(item, {}).items(), key=itemgetter(1), reverse=True)[: K]:
if i in items:
continue
recommends.setdefault(i, 0.)
recommends[i] += sim
return dict(sorted(recommends.items(), key=itemgetter(1), reverse=True)[: N])
def recommend_users(self, users, N, K):
return UserCF.recommend_users(self, users, N, K)
|
[
"e0253722@u.nus.edu"
] |
e0253722@u.nus.edu
|
96fd3506464c392a8fd723e5f4d4aeaf7d0ba1cc
|
09e5cfe06e437989a2ccf2aeecb9c73eb998a36c
|
/modules/cctbx_project/cctbx/libtbx_refresh.py
|
b1206d46a3f922f73066f670596a7b7a0ef8f24f
|
[
"BSD-3-Clause-LBNL",
"BSD-3-Clause"
] |
permissive
|
jorgediazjr/dials-dev20191018
|
b81b19653624cee39207b7cefb8dfcb2e99b79eb
|
77d66c719b5746f37af51ad593e2941ed6fbba17
|
refs/heads/master
| 2020-08-21T02:48:54.719532
| 2020-01-25T01:41:37
| 2020-01-25T01:41:37
| 216,089,955
| 0
| 1
|
BSD-3-Clause
| 2020-01-25T01:41:39
| 2019-10-18T19:03:17
|
Python
|
UTF-8
|
Python
| false
| false
| 1,790
|
py
|
from __future__ import absolute_import, division, print_function
import os
from libtbx.utils import warn_if_unexpected_md5_hexdigest
if self.env.is_ready_for_build():
message_template = ' Generating C++ files in:\n "%s"'
# eltbx
from cctbx.source_generators.eltbx import generate_henke_cpp
from cctbx.source_generators.eltbx import generate_sasaki_cpp
target_dir = self.env.under_build("cctbx/eltbx")
print(message_template % target_dir)
for label,generator_module in [("Henke", generate_henke_cpp),
("Sasaki", generate_sasaki_cpp)]:
if os.path.isdir(generator_module.reference_tables_directory):
if not os.path.isdir(target_dir):
os.makedirs(target_dir)
generator_module.run(target_dir=target_dir)
else:
print("*"*79)
print("Warning: directory with %s tables is missing:" % label)
print(" ", repr(generator_module.reference_tables_directory))
print("*"*79)
# flex_fwd.h
from cctbx.source_generators import flex_fwd_h
target_dir = self.env.under_build("include/cctbx/boost_python")
print(message_template % target_dir)
if not os.path.isdir(target_dir):
os.makedirs(target_dir)
flex_fwd_h.run(target_dir)
# reference_table.cpp : checking that it is up-to-date
for f,sig in [
("reference_table.py", "b4d948c292357b90c8b4d5716d607bb9"),
("short_cuts.py", "18e5b9d93962d70711497de1d6dbebbb"),
("proto/generate_cpp_asu_table.py", "0f19e51b469650aa23e81483051eeb10")]:
fn = "sgtbx/direct_space_asu/" + f
warn_if_unexpected_md5_hexdigest(
path=self.env.under_dist( module_name="cctbx", path=fn),
expected_md5_hexdigests=[ sig ],
hints=[
" Files to review:",
" "+fn,
" cctbx/libtbx_refresh.py"])
|
[
"jorge7soccer@gmail.com"
] |
jorge7soccer@gmail.com
|
2a9bf81297de2e77ef26a208ce9dd789aafb71d2
|
40248f9e5ed813fbb966df515ece9193cebf889d
|
/exapi/request_creators/hitbtc/market_data/interface.py
|
a1dc6f761c714843d5db467ba83515a00f5001a5
|
[
"MIT"
] |
permissive
|
astsu-dev/exapi1
|
29bc22e0949e835d6ea6887e9c52288584a095eb
|
1ef39ccdd77e9ddb60ec6eaa16a2cc26e1ac3e12
|
refs/heads/main
| 2023-05-08T20:08:18.435247
| 2021-06-02T11:25:11
| 2021-06-02T11:25:11
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 10,161
|
py
|
"""Has interface for hitbtc market data request creator."""
from typing import Optional, Protocol
from exapi.request_creators.request import Request
from exapi.typedefs.hitbtc import (CandlesPeriod, Currencies, Currency,
IntervalValue, SortBy, SortDirection,
Symbol, Symbols)
class IHitbtcMarketDataRequestCreator(Protocol):
"""Has methods for creating requests for hitbtc market data endpoints."""
def create_get_currencies_request(self, currencies: Optional[Currencies] = None) -> Request:
"""Creates request for /public/currency endpoint.
Requires no API key Access Rights.
Args:
currencies (Optional[Currencies], optional): specified currencies.
If not passed, then will create for all currencies.
Returns:
Request
"""
def create_get_certain_currency_request(self, currency: Currency) -> Request:
"""Creates request for /public/currency/`currency` endpoint.
Requires no API key Access Rights.
Args:
currency (Currency)
Returns:
Request
"""
def create_get_symbols_request(self, symbols: Optional[Symbols] = None) -> Request:
"""Creates request for /public/symbol endpoint.
Args:
symbols (Optional[Symbols], optional): list of symbols.
If not passed, then will create for all symbols.
Returns:
Request
"""
def create_get_certain_symbol_request(self, symbol: Symbol) -> Request:
"""Creates request for /public/symbol/`symbol` endpoint.
Requires no API key Access Rights.
Args:
symbol (Symbol): certain symbol
Returns:
Request
"""
def create_get_tickers_request(self, symbols: Optional[Symbols] = None) -> Request:
"""Creates request for /public/ticker endpoint.
Requires no API key Access Rights.
Args:
symbols (Optional[Symbols], optional): list of symbols.
If not passed, then will create for all symbols.
Returns:
Request
"""
def create_get_certain_ticker_request(self, symbol: Symbol) -> Request:
"""Creates request for /public/ticker/`symbol` endpoint.
Requires no API key Access Rights.
Args:
symbol (Symbol): certain symbol
Returns:
Request
"""
def create_get_trades_request(self, symbols: Optional[Symbols] = None,
sort: Optional[SortDirection] = None,
from_: Optional[IntervalValue] = None,
till: Optional[IntervalValue] = None,
limit: Optional[int] = None,
offset: Optional[int] = None
) -> Request:
"""Creates request for /public/trades endpoint.
Requires no API key Access Rights.
Args:
symbols (Optional[Symbols], optional): list of symbols.
If not passed, then will create for all symbols.
SortDirection (Optional[SortDirection], optional): SortDirection direction.
Accepted values: ASC, DESC. Default value: DESC.
from_ (Optional[IntervalValue], optional): Interval initial value.
If sorting by timestamp is used, then Datetime,
otherwise int of index value.
till (Optional[IntervalValue], optional): Interval end value.
If sorting by timestamp is used, then Datetime,
otherwise int of index value.
limit (Optional[int], optional): Default value: 100. Max value: 1000.
offset (Optional[int], optional): Default value: 0. Max value: 100000.
Returns:
Request
"""
def create_get_certain_trades_request(self, symbol: Symbol,
sort: Optional[SortDirection] = None,
by: Optional[SortBy] = None,
from_: Optional[IntervalValue] = None,
till: Optional[IntervalValue] = None,
limit: Optional[int] = None,
offset: Optional[int] = None
) -> Request:
"""Creates request for /public/trades/`symbol` endpoint.
Requires no API key Access Rights.
Args:
symbol (Symbol): certain symbol.
sort (Optional[SortDirection], optional): SortDirection direction.
Accepted values: ASC, DESC. Default value: DESC.
by (Optional[SortBy], optional): Defines sort type.
Accepted values: id, timestamp. Default value: timestamp.
from_ (Optional[IntervalValue], optional): Interval initial value.
If sorting by timestamp is used, then Datetime,
otherwise int of index value.
till (Optional[IntervalValue], optional): Interval end value.
If sorting by timestamp is used, then Datetime,
otherwise int of index value.
limit (Optional[int], optional): Default value: 100. Max value: 1000.
offset (Optional[int], optional): Default value: 0. Max value: 100000.
Returns:
Request
"""
def create_get_orderbooks_request(self, symbols: Optional[Symbols] = None,
limit: Optional[int] = None
) -> Request:
"""Creates request for /public/orderbook endpoint.
Requires no API key Access Rights.
Args:
symbols (Optional[Symbols], optional): list of symbols.
If not passed, then will create for all symbols.
limit (Optional[int], optional): limit of order book levels.
Default value: 100. Set 0 to view full list of levels.
Returns:
Request
"""
def create_get_certain_orderbook_request(self, symbol: Symbol,
limit: Optional[int] = None,
volume: Optional[int] = None
) -> Request:
"""Creates request for /public/orderbook/`symbol` endpoint.
Requires no API key Access Rights.
Please note that if the volume is specified,
the limit will be ignored, askAveragePrice and bidAveragePrice
are returned in response.
Args:
symbol (Symbol): certain symbol.
limit (Optional[int], optional): Limit of Order Book levels.
Default value: 100. Set 0 to view full list of levels.
volume (Optional[int], optional): Desired volume for market depth search.
Returns:
Request
"""
def create_get_candles_request(self, symbols: Optional[Symbols] = None,
period: Optional[CandlesPeriod] = None,
sort: Optional[SortDirection] = None,
from_: Optional[IntervalValue] = None,
till: Optional[IntervalValue] = None,
limit: Optional[int] = None,
offset: Optional[int] = None
) -> Request:
"""Creates request for /public/candles endpoint.
Requires no API key Access Rights.
Args:
symbols (Optional[Symbols], optional): list of symbols.
If not passed, then will create for all symbols.
period (Optional[CandlesPeriod], optional): accepted values: M1 (one minute),
M3, M5, M15, M30, H1 (one hour), H4,
D1 (one day), D7, 1M (one month).
Default value: M30
sort (Optional[SortDirection], optional): sort direction.
Accepted values: ASC, DESC. Default value: DESC.
from_ (Optional[IntervalValue], optional): interval initial value.
till (Optional[IntervalValue], optional): interval end value.
limit (Optional[int], optional): limit of candles. Default value: 100. Max value: 1000.
offset (Optional[int], optional): Default value: 0. Max value: 100000.
Returns:
Request
"""
def create_get_certain_candles_request(self, symbol: Symbol,
period: Optional[CandlesPeriod] = None,
sort: Optional[SortDirection] = None,
from_: Optional[IntervalValue] = None,
till: Optional[IntervalValue] = None,
limit: Optional[int] = None,
offset: Optional[int] = None
) -> Request:
"""Creates request for /public/candles/`symbol` endpoint.
Requires no API key Access Rights.
Args:
symbol (Symbol): certain symbol.
period (Optional[CandlesPeriod], optional): accepted values: M1 (one minute),
M3, M5, M15, M30, H1 (one hour), H4,
D1 (one day), D7, 1M (one month).
Default value: M30
sort (Optional[SortDirection], optional): sort direction.
Accepted values: ASC, DESC. Default value: DESC.
from_ (Optional[IntervalValue], optional): interval initial value.
till (Optional[IntervalValue], optional): interval end value.
limit (Optional[int], optional): limit of candles. Default value: 100. Max value: 1000.
offset (Optional[int], optional): Default value: 0. Max value: 100000.
Returns:
Request
"""
|
[
"you@example.com"
] |
you@example.com
|
7fda0ad362d063c3c2f53573555b544d534b230e
|
e9dd23f575a9d3e3eb72fd20b155ad8b93a1a6fd
|
/SHCHGui/CalendarUI.py
|
4e91d8c91b4031aec5d04cf98fa15d282fe0caf5
|
[] |
no_license
|
xiangyuw123/SHCH_BDF_Tools
|
ca3677a70e772a1bedcfee8703a40dc6d6e194ec
|
d244075b2a90a4236716b1dea67238e2843a9ff2
|
refs/heads/master
| 2022-12-04T13:56:12.923137
| 2020-08-28T08:11:25
| 2020-08-28T08:11:25
| 290,937,341
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 12,973
|
py
|
# -*- coding: utf-8 -*-
"""
Created on Wed Jul 8 14:13:05 2020
@author: 123
"""
# -*- coding: utf-8 -*-
import calendar
import tkinter as tk
import tkinter.font as tkFont
from tkinter import ttk
datetime = calendar.datetime.datetime
timedelta = calendar.datetime.timedelta
class calendarWidget:
def __init__(s, point = None, position = None):
# point 提供一个基点,来确定窗口位置
# position 窗口在点的位置 'ur'-右上, 'ul'-左上, 'll'-左下, 'lr'-右下
#s.master = tk.Tk()
s.master = tk.Toplevel()
s.master.withdraw()
fwday = calendar.SUNDAY
year = datetime.now().year
month = datetime.now().month
locale = None
sel_bg = '#ecffc4'
sel_fg = '#05640e'
s._date = datetime(year, month, 1)
s._selection = None # 设置为未选中日期
s.G_Frame = ttk.Frame(s.master)
s._cal = s.__get_calendar(locale, fwday)
s.__setup_styles() # 创建自定义样式
s.__place_widgets() # pack/grid 小部件
s.__config_calendar() # 调整日历列和安装标记
# 配置画布和正确的绑定,以选择日期。
s.__setup_selection(sel_bg, sel_fg)
# 存储项ID,用于稍后插入。
s._items = [s._calendar.insert('', 'end', values='') for _ in range(6)]
# 在当前空日历中插入日期
s._update()
s.G_Frame.pack(expand = 1, fill = 'both')
s.master.overrideredirect(1)
s.master.update_idletasks()
width, height = s.master.winfo_reqwidth(), s.master.winfo_reqheight()
if point and position:
if position == 'ur': x, y = point[0], point[1] - height
elif position == 'lr': x, y = point[0], point[1]
elif position == 'ul': x, y = point[0] - width, point[1] - height
elif position == 'll': x, y = point[0] - width, point[1]
else: x, y = (s.master.winfo_screenwidth() - width)/2, (s.master.winfo_screenheight() - height)/2
s.master.geometry('%dx%d+%d+%d' % (width, height, x, y)) #窗口位置居中
s.master.after(300, s._main_judge)
s.master.deiconify()
s.master.focus_set()
s.master.wait_window() #这里应该使用wait_window挂起窗口,如果使用mainloop,可能会导致主程序很多错误
def __get_calendar(s, locale, fwday):
# 实例化适当的日历类
if locale is None:
return calendar.TextCalendar(fwday)
else:
return calendar.LocaleTextCalendar(fwday, locale)
def __setitem__(s, item, value):
if item in ('year', 'month'):
raise AttributeError("attribute '%s' is not writeable" % item)
elif item == 'selectbackground':
s._canvas['background'] = value
elif item == 'selectforeground':
s._canvas.itemconfigure(s._canvas.text, item=value)
else:
s.G_Frame.__setitem__(s, item, value)
def __getitem__(s, item):
if item in ('year', 'month'):
return getattr(s._date, item)
elif item == 'selectbackground':
return s._canvas['background']
elif item == 'selectforeground':
return s._canvas.itemcget(s._canvas.text, 'fill')
else:
r = ttk.tclobjs_to_py({item: ttk.Frame.__getitem__(s, item)})
return r[item]
def __setup_styles(s):
# 自定义TTK风格
style = ttk.Style(s.master)
arrow_layout = lambda dir: (
[('Button.focus', {'children': [('Button.%sarrow' % dir, None)]})]
)
style.layout('L.TButton', arrow_layout('left'))
style.layout('R.TButton', arrow_layout('right'))
def __place_widgets(s):
# 标头框架及其小部件
Input_judgment_num = s.master.register(s.Input_judgment) # 需要将函数包装一下,必要的
hframe = ttk.Frame(s.G_Frame)
gframe = ttk.Frame(s.G_Frame)
bframe = ttk.Frame(s.G_Frame)
hframe.pack(in_=s.G_Frame, side='top', pady=5, anchor='center')
gframe.pack(in_=s.G_Frame, fill=tk.X, pady=5)
bframe.pack(in_=s.G_Frame, side='bottom', pady=5)
lbtn = ttk.Button(hframe, style='L.TButton', command=s._prev_month)
lbtn.grid(in_=hframe, column=0, row=0, padx=12)
rbtn = ttk.Button(hframe, style='R.TButton', command=s._next_month)
rbtn.grid(in_=hframe, column=5, row=0, padx=12)
s.CB_year = ttk.Combobox(hframe, width = 5, values = [str(year) for year in range(datetime.now().year, datetime.now().year-11,-1)], validate = 'key', validatecommand = (Input_judgment_num, '%P'))
s.CB_year.current(0)
s.CB_year.grid(in_=hframe, column=1, row=0)
s.CB_year.bind('<KeyPress>', lambda event:s._update(event, True))
s.CB_year.bind("<<ComboboxSelected>>", s._update)
tk.Label(hframe, text = '年', justify = 'left').grid(in_=hframe, column=2, row=0, padx=(0,5))
s.CB_month = ttk.Combobox(hframe, width = 3, values = ['%02d' % month for month in range(1,13)], state = 'readonly')
s.CB_month.current(datetime.now().month - 1)
s.CB_month.grid(in_=hframe, column=3, row=0)
s.CB_month.bind("<<ComboboxSelected>>", s._update)
tk.Label(hframe, text = '月', justify = 'left').grid(in_=hframe, column=4, row=0)
# 日历部件
s._calendar = ttk.Treeview(gframe, show='', selectmode='none', height=7)
s._calendar.pack(expand=1, fill='both', side='bottom', padx=5)
ttk.Button(bframe, text = "确 定", width = 6, command = lambda: s._exit(True)).grid(row = 0, column = 0, sticky = 'ns', padx = 20)
ttk.Button(bframe, text = "取 消", width = 6, command = s._exit).grid(row = 0, column = 1, sticky = 'ne', padx = 20)
tk.Frame(s.G_Frame, bg = '#565656').place(x = 0, y = 0, relx = 0, rely = 0, relwidth = 1, relheigh = 2/200)
tk.Frame(s.G_Frame, bg = '#565656').place(x = 0, y = 0, relx = 0, rely = 198/200, relwidth = 1, relheigh = 2/200)
tk.Frame(s.G_Frame, bg = '#565656').place(x = 0, y = 0, relx = 0, rely = 0, relwidth = 2/200, relheigh = 1)
tk.Frame(s.G_Frame, bg = '#565656').place(x = 0, y = 0, relx = 198/200, rely = 0, relwidth = 2/200, relheigh = 1)
def __config_calendar(s):
# cols = s._cal.formatweekheader(3).split()
cols = ['日','一','二','三','四','五','六']
s._calendar['columns'] = cols
s._calendar.tag_configure('header', background='grey90')
s._calendar.insert('', 'end', values=cols, tag='header')
# 调整其列宽
font = tkFont.Font()
maxwidth = max(font.measure(col) for col in cols)
for col in cols:
s._calendar.column(col, width=maxwidth, minwidth=maxwidth,
anchor='center')
def __setup_selection(s, sel_bg, sel_fg):
def __canvas_forget(evt):
canvas.place_forget()
s._selection = None
s._font = tkFont.Font()
s._canvas = canvas = tk.Canvas(s._calendar, background=sel_bg, borderwidth=0, highlightthickness=0)
canvas.text = canvas.create_text(0, 0, fill=sel_fg, anchor='w')
canvas.bind('<Button-1>', __canvas_forget)
s._calendar.bind('<Configure>', __canvas_forget)
s._calendar.bind('<Button-1>', s._pressed)
def _build_calendar(s):
year, month = s._date.year, s._date.month
# update header text (Month, YEAR)
header = s._cal.formatmonthname(year, month, 0)
# 更新日历显示的日期
cal = s._cal.monthdayscalendar(year, month)
for indx, item in enumerate(s._items):
week = cal[indx] if indx < len(cal) else []
fmt_week = [('%02d' % day) if day else '' for day in week]
s._calendar.item(item, values=fmt_week)
def _show_select(s, text, bbox):
"""为新的选择配置画布。"""
x, y, width, height = bbox
textw = s._font.measure(text)
canvas = s._canvas
canvas.configure(width = width, height = height)
canvas.coords(canvas.text, (width - textw)/2, height / 2 - 1)
canvas.itemconfigure(canvas.text, text=text)
canvas.place(in_=s._calendar, x=x, y=y)
def _pressed(s, evt = None, item = None, column = None, widget = None):
"""在日历的某个地方点击。"""
if not item:
x, y, widget = evt.x, evt.y, evt.widget
item = widget.identify_row(y)
column = widget.identify_column(x)
if not column or not item in s._items:
# 在工作日行中单击或仅在列外单击。
return
item_values = widget.item(item)['values']
if not len(item_values): # 这个月的行是空的。
return
text = item_values[int(column[1]) - 1]
if not text: # 日期为空
return
bbox = widget.bbox(item, column)
if not bbox: # 日历尚不可见
s.master.after(20, lambda : s._pressed(item = item, column = column, widget = widget))
return
# 更新,然后显示选择
text = '%02d' % text
s._selection = (text, item, column)
s._show_select(text, bbox)
def _prev_month(s):
"""更新日历以显示前一个月。"""
s._canvas.place_forget()
s._selection = None
s._date = s._date - timedelta(days=1)
s._date = datetime(s._date.year, s._date.month, 1)
s.CB_year.set(s._date.year)
s.CB_month.set(s._date.month)
s._update()
def _next_month(s):
"""更新日历以显示下一个月。"""
s._canvas.place_forget()
s._selection = None
year, month = s._date.year, s._date.month
s._date = s._date + timedelta(
days=calendar.monthrange(year, month)[1] + 1)
s._date = datetime(s._date.year, s._date.month, 1)
s.CB_year.set(s._date.year)
s.CB_month.set(s._date.month)
s._update()
def _update(s, event = None, key = None):
"""刷新界面"""
if key and event.keysym != 'Return': return
year = int(s.CB_year.get())
month = int(s.CB_month.get())
if year == 0 or year > 9999: return
s._canvas.place_forget()
s._date = datetime(year, month, 1)
s._build_calendar() # 重建日历
if year == datetime.now().year and month == datetime.now().month:
day = datetime.now().day
for _item, day_list in enumerate(s._cal.monthdayscalendar(year, month)):
if day in day_list:
item = 'I00' + str(_item + 2)
column = '#' + str(day_list.index(day)+1)
s.master.after(100, lambda :s._pressed(item = item, column = column, widget = s._calendar))
def _exit(s, confirm = False):
"""退出窗口"""
if not confirm: s._selection = None
s.master.destroy()
def _main_judge(s):
"""判断窗口是否在最顶层"""
try:
#s.master 为 TK 窗口
#if not s.master.focus_displayof(): s._exit()
#else: s.master.after(10, s._main_judge)
#s.master 为 toplevel 窗口
if s.master.focus_displayof() == None or 'toplevel' not in str(s.master.focus_displayof()): s._exit()
else: s.master.after(10, s._main_judge)
except:
s.master.after(10, s._main_judge)
#s.master.tk_focusFollowsMouse() # 焦点跟随鼠标
def selection(s):
"""返回表示当前选定日期的日期时间。"""
if not s._selection: return None
year, month = s._date.year, s._date.month
return str(datetime(year, month, int(s._selection[0])))[:10]
def Input_judgment(s, content):
"""输入判断"""
# 如果不加上==""的话,就会发现删不完。总会剩下一个数字
if content.isdigit() or content == "":
return True
else:
return False
if __name__ == '__main__':
root = tk.Tk()
width, height = root.winfo_reqwidth() + 50, 50 #窗口大小
x, y = (root.winfo_screenwidth() - width )/2, (root.winfo_screenheight() - height)/2
print(x,y)
root.geometry('%dx%d+%d+%d' % (width, height, x, y )) #窗口位置居中
date_str = tk.StringVar()
date = ttk.Entry(root, textvariable = date_str)
date.place(x = 0, y = 0, relx = 5/20, rely = 1/6, relwidth = 14/20, relheigh = 2/3)
#Calendar((x, y), 'ur').selection() 获取日期,x,y为点坐标
date_str_gain = lambda: [
date_str.set(date)
for date in [calendarWidget((x, y), 'ur').selection()]
if date]
tk.Button(root, text = '日期:', command = date_str_gain).place(x = 0, y = 0, relx = 1/20, rely = 1/6, relwidth = 4/20, relheigh = 2/3)
root.mainloop()
|
[
"xiangyuw@umich.edu"
] |
xiangyuw@umich.edu
|
264b62e7d0d2651cf9ec655cdfb6fafd32babdd4
|
15f321878face2af9317363c5f6de1e5ddd9b749
|
/solutions_python/Problem_201/2701.py
|
4dfd25441bfd296e14ceefcf2861b262d56462e9
|
[] |
no_license
|
dr-dos-ok/Code_Jam_Webscraper
|
c06fd59870842664cd79c41eb460a09553e1c80a
|
26a35bf114a3aa30fc4c677ef069d95f41665cc0
|
refs/heads/master
| 2020-04-06T08:17:40.938460
| 2018-10-14T10:12:47
| 2018-10-14T10:12:47
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,353
|
py
|
import math
def get_stall(arg):
l = []
arg.sort()
for p1, p2 in zip(arg, arg[1:]):
diff = abs(p1 - p2)
if not l:
l.append(diff)
l.append(p1+(diff//2))
l.append(p1)
elif l[0] < diff:
l.clear()
l.append(diff)
l.append(p1 + (diff//2))
l.append(p1)
else:
pass
return l
t = int(input()) # read a line with a single integer
for x in range(1, t + 1):
n, k = [int(s) for s in input().split(" ")] # read a list of integers, 2 in this case
if n == k:
print("Case #{}: {} {}".format(x, 0, 0))
else:
ls = [0, n+1]
blank_list = []
for i in range(k):
mee = get_stall(ls)
# print(mee)
ls.append(mee[1])
ls.sort()
# print("***", ls)
stall = ls.index(mee[1])
val1 = ls[stall-1]
val2 = ls[stall+1]
z = mee[1]-val1 - 1
y = val2 - mee[1] - 1
# y = max(([abs(t - s)//2 for s, t in zip(ls, ls[1:])]))
# z = min(([abs(t - s)//2 for s, t in zip(ls, ls[1:])]))
# print("Case #{}: {} {}".format(x, max(abs(mee[1]-mee[0])-1, y), max(abs(mee[2]-mee[1]), abs(z))-1))
print("Case #{}: {} {}".format(x, max(y, z), min(y, z)))
|
[
"miliar1732@gmail.com"
] |
miliar1732@gmail.com
|
ad33747c00bc3429bacdc1bf31667c00daab67fc
|
5f09c2581c28751589871068d1faa9297859d2f3
|
/insert_banco.py
|
737b50ad7286cc87824d9969603d863b81f055e2
|
[] |
no_license
|
fandrefh/curso-python-e-django-senac
|
f68b4b4ce7071ac78034afdaf63251ed0422fa56
|
8a418a7d9acd12c3ca8820c5589d5d02476d3d0c
|
refs/heads/master
| 2021-01-20T20:28:53.311346
| 2016-08-27T20:48:44
| 2016-08-27T20:48:44
| 65,097,253
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 492
|
py
|
import sqlite3
conn = sqlite3.connect("clientes.db")
cursor = conn.cursor()
cursor.execute("""
INSERT INTO cad_clientes (nome, idade) VALUES ('Regis', 35);
""")
cursor.execute("""
INSERT INTO cad_clientes (nome, idade) VALUES ('Aloisio', 87);
""")
cursor.execute("""
INSERT INTO cad_clientes (nome, idade) VALUES ('Bruna', 21);
""")
cursor.execute("""
INSERT INTO cad_clientes (nome, idade) VALUES ('Matheus', 19);
""")
conn.commit()
print('Dados inseridos com sucesso.')
conn.close()
|
[
"fandrefh@gmail.com"
] |
fandrefh@gmail.com
|
fd9a779ee31d4fad10c81bbf51d610bf3c2ed530
|
0b96015603d709b37dbd522ebae0a3b367e8e82f
|
/Tags/heads/80.png.py
|
a945a0b029e4b2ef8c456fb24ee9cb955f148741
|
[] |
no_license
|
AWilcke/ClariFight
|
ccbbfec08192b029ce13ed11fc6967550ede74ce
|
d1689025c5087c70f2c33ad80d8417b45b4ebd01
|
refs/heads/master
| 2021-01-11T11:17:27.446503
| 2016-03-10T19:32:00
| 2016-03-10T19:32:00
| 53,609,870
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 565
|
py
|
{"profile": "0.974092483521", "hair": "0.91241133213", "hairstyle": "0.996788263321", "fashion": "0.997870385647", "jewelry": "0.93635571003", "curly": "0.955452501774", "people": "0.993453621864", "dress": "0.908811211586", "individuality": "0.916719317436", "one": "0.99091988802", "glamour": "0.992386758327", "girl": "0.937388062477", "adult": "0.986885905266", "blond": "0.979349136353", "stylish": "0.980356574059", "portrait": "0.992387771606", "model": "0.979283392429", "clothing": "0.942631602287", "side view": "0.987667798996", "women": "0.98726606369"}
|
[
"arthur.wilcke@gmail.com"
] |
arthur.wilcke@gmail.com
|
76aae7edbb8a6a7cd3df5d256f19454db1a5fb83
|
368a74e5c1a748242e945b3bc03244d9f63ad376
|
/cosmos_flask_server-master/새태그KStars/domain/KFilePath.py
|
2b2c82b46ab7b8138142ea9135f209b7f66de18f
|
[] |
no_license
|
lee82762/cosmos_flask_server-master
|
e8c78274d18f7c4cc243c964a279498bef7dda88
|
37a5bb9dcc056d8bd6e55209b9e326a7465229ee
|
refs/heads/master
| 2023-04-22T21:58:59.497161
| 2021-05-06T10:00:14
| 2021-05-06T10:00:14
| 364,494,303
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 197
|
py
|
class KFilePath:
def __init__(self):
self.projectFilePath = ""
self.audioFilePath = ""
def initData(self):
self.projectFilePath = ""
self.audioFilePath = ""
|
[
"cjdrn0208@naver.com"
] |
cjdrn0208@naver.com
|
8f1d9b9c7f0d83582637adb178f4567c71ee05ee
|
9ebbe3fedeb1ca31603bc555fcce87b883781052
|
/7. Data Structures/graph.py
|
902ca0ced6ce9128ea76b1f33c26cb501cb32790
|
[] |
no_license
|
TMJUSTNOW/Algorithm-Implementations
|
5db118756de998460a91e09217b9b84ac235bae9
|
552a98c86840ce5876ad516677bb0dafcc7f257e
|
refs/heads/master
| 2021-06-02T06:49:04.105530
| 2016-09-30T05:22:45
| 2016-09-30T05:22:45
| 105,765,850
| 0
| 1
| null | 2017-10-04T12:30:14
| 2017-10-04T12:30:14
| null |
UTF-8
|
Python
| false
| false
| 13,126
|
py
|
# TODO: Convert Dijkstra processed dictionary to array
# A set of data structures to represent graphs
from queue import Queue
from heap import MinHeap
import sys
class Node(object):
def __init__(self, name):
self.name = str(name)
def getName(self):
return self.name
def __str__(self):
return self.name
def __repr__(self):
return self.name
def __eq__(self, other):
return self.name == other.name
def __ne__(self, other):
return not self.__eq__(other)
def __hash__(self):
# Override the default hash method; simplifies use of dictionary
return self.name.__hash__()
class Edge(object):
def __init__(self, src, dest): # src and dest should be nodes
self.src = src
self.dest = dest
def getSource(self):
return self.src
def getDestination(self):
return self.dest
def __str__(self):
return '{0}->{1}'.format(self.src, self.dest)
class WeightedEdge(Edge):
'''
Subclass of edge; supports for context-specific edge weights.
'''
def __init__(self, src, dest, weight): # src and dest should be nodes
Edge.__init__(self, src, dest)
self.weight = float(weight)
def getWeight(self):
return self.weight
def __str__(self):
return '{0}->{1} ({2})'.format(self.src, self.dest, self.weight)
class PathFinder(object):
'''
Produces generators that can be used to extract shortest paths from lists
of edges sorted in order of discovery by shortest path algorithms.
'''
def __init__(self, paths):
self.paths = paths
def path_to(self, dest):
geodesic = []
target = Node(dest)
for i in range(len(self.paths)-1, -1, -1):
if self.paths[i].getDestination() == target:
geodesic.append(self.paths[i])
target = self.paths[i].getSource()
for e in reversed(geodesic):
yield e
class Digraph(object):
'''
A directed graph
'''
def __init__(self):
# A Python Set is basically a list that doesn't allow duplicates
# Entries into a set must be hashable
# Because it is backed by a hashtable, lookups are O(1) as opposed to the O(n) of a list
# See http://docs.python.org/2/library/stdtypes.html#set-types-set-frozenset
self.nodes = set([])
self.edges = {} # Python dictionary (hashtable); each key represents a node and the key's values represent adjacent nodes
def addNode(self, node):
if node in self.nodes:
# Even though self.nodes is a Set, this makes sure a duplicate
# entry is not added for the same node in the self.edges list
raise ValueError('Duplicate node')
else:
self.nodes.add(node)
self.edges[node] = []
def removeNode(self, node):
if node not in self.nodes:
raise ValueError('Node not in graph')
self.nodes.remove(node) # remove from set of nodes
self.edges.pop(node) # remove as a key from the edges hashtable
for src in self.edges: # remove as a value from the edges hashtable, O(n+m); can we do better?
self.edges[src] = [n for n in self.edges[src] if n != node]
def addEdge(self, edge):
src = edge.getSource()
dest = edge.getDestination()
if not(src in self.nodes and dest in self.nodes):
raise ValueError('Node not in graph')
self.edges[src].append(dest)
def addUndirectedEdge(self, edge):
self.addEdge(edge)
rev = Edge(edge.getDestination(), edge.getSource())
self.addEdge(rev)
def removeEdge(self, edge):
src = edge.getSource()
dest = edge.getDestination()
if not(src in self.edges and dest in self.edges[src]):
raise ValueError('Edge not in graph')
self.edges[src].remove(dest)
def removeUndirectedEdge(self, edge):
self.removeEdge(edge)
rev = Edge(edge.getDestination(), edge.getSource())
self.removeEdge(rev)
def childrenOf(self, node):
return self.edges[node]
def hasNode(self, node):
return node in self.nodes
def countNodes(self):
return len(self.nodes)
def bfs(self, s): # FINISH IMPLEMENTING THIS
explored = set([s])
q = Queue([s])
while q:
v = q.dequeue()
def reverse_edges(self, overwrite = True):
rev = {}
for k in self.edges:
for v in self.edges[k]:
if v in rev:
rev[v].append(k)
else:
rev[v] = [k]
if overwrite:
self.edges = rev
else:
return rev
def compute_SCCs(self):
'''
NOTE: Kosaraju's 2-pass algorithm requires that nodes be labeled
1 to n, where n is the number of nodes in the graph.
'''
g_rev = self.reverse_edges(overwrite = False) # reversed edges
scc = {} # for mapping leader(scc) labels (2nd pass)
order = {} # for mapping finishing times (1st pass)
explored = set() # keeps track of explored nodes
t = 0 # keeps track of finishing times (1st pass)
s = None # keeps track of leader node (2nd pass)
# This recursive version of dfs appears to work correctly, however, on larger
# graphs it causes a maximum recursion depth error because Python does
# not support tail recursion. For this reason, I implemented an iterative
# version using a stack.
#
# def dfs(g_edges, v):
# explored.add(v)
# scc[s].append(v)
# if v in g_edges:
# for i in g_edges[v]:
# if i not in explored:
# dfs(g_edges, i)
# nonlocal t
# t += 1
# order[t] = v
def dfs(g_edges, v):
'''
Iterative version of depth-first search customized for Kosaraju's
2-pass algorithm.
Input: A dictionary representation of the graph's adjacency list
and a starting vertex.
Output: No output.
'''
stack = [v]
while stack:
current = stack.pop()
if current not in explored:
explored.add(current)
scc[s].append(current)
if current not in g_edges or \
all(i in explored for i in g_edges[current]):
nonlocal t
t += 1
order[t] = current
else:
stack.append(current)
for e in g_edges[current]:
if e not in explored:
stack.append(e)
break
def dfs_loop(edges, preprocessing = False):
'''
A procedure for searching over all components of a graph using
depth-first search, customized for Kosaraju's 2-pass algorithm.
Input: A dictionary representation of the graph's adjacency list
and a boolean indicating whether the procedure is being called for
the first or second pass of Kosaraju's algorithm.
Output: No output.
'''
nonlocal s
for i in range(self.countNodes(), 0, -1):
if preprocessing:
v = Node(i)
else:
v = order[i]
if v not in explored:
s = v
scc[s] = []
dfs(edges, v)
dfs_loop(g_rev, preprocessing = True) # 1st pass
scc = {} # reset scc(leader) mapping for 2nd pass
explored = set() # reset explored nodes for 2nd pass
dfs_loop(self.edges) # 2nd pass
return scc
def __str__(self):
res = ''
for k in self.edges:
for d in self.edges[Node(k)]: # Modified from str to Node
res = '{0}{1}->{2}\n'.format(res, k, d)
return res[:-1]
class WeightedDigraph(Digraph):
'''
A subclass of Digraph; supports context-specific weighted edges
'''
def __init__(self):
Digraph.__init__(self)
self.weights = {}
def addEdge(self, edge): #edge should be a WeightedEdge
src = edge.getSource()
dest = edge.getDestination()
weight = edge.getWeight()
if not(src in self.nodes and dest in self.nodes):
raise ValueError('Node not in graph')
self.edges[src].append(dest)
self.weights[(src, dest)] = weight
def removeEdge(self, edge): #edge should be a WeightedEdge
src = edge.getSource()
dest = edge.getDestination()
weight = edge.getWeight()
if not(src in self.edges and dest in self.edges[src]):
raise ValueError('Edge not in graph')
self.edges[src].remove(dest)
if len(self.weights[(src, dest)]) > 1:
self.weights[(src, dest)].pop()
else:
self.weights.pop((src, dest))
def removeNode(self, node):
if node not in self.nodes:
raise ValueError('Node not in graph')
self.nodes.remove(node) # remove from set of nodes
children = self.edges.pop(node) # remove as a key from the edges hashtable and store its former children
for v in children:
self.weights.pop((node, v))
for src in self.edges: # remove as a value from the edges hashtable, O(n+m); can we do better?
self.weights.pop((src, node), None)
self.edges[src] = [n for n in self.edges[src] if n != node]
def getWeight(self, src, dest):
return self.weights[(src, dest)]
def shortest_paths(self, v):
'''
Computes the shortest path distances from a source vertex to all other
vertices using Dijkstra's algorithm.
'''
processed = {} # mapping of processed vertices to geodesic distance
candidates = {} # mapping of candidate vertices to their Dijkstra scores; exists for convenience of O(1) lookups
trace = [] # stores edges in order of processing; used to extract shortest paths
def dijkstra_score(src, dest):
return processed[src] + self.getWeight(src, dest)
# Initialize Dijkstra scores
for n in self.nodes:
if n == v:
processed[n] = 0
for dest in self.edges[n]:
score = dijkstra_score(n, dest)
if dest not in candidates or score < candidates[dest]:
candidates[dest] = score
else:
if n not in candidates:
candidates[n] = float('inf')
# heapify node/score tuples, provide comparison key
unprocessed = MinHeap(list(candidates.items()), lambda x:x[1])
# compute shortest paths
while not unprocessed.is_empty():
n,s = unprocessed.extract_min()
processed[n] = s
candidates.pop(n)
if len(trace) == 0:
trace.append(Edge(v, n)) # Investigate KeyError when using WeightedEdge
else:
src = trace[-1].getDestination()
trace.append(Edge(src, n)) # Investigate KeyError when using WeightedEdge
for dest in self.edges[n]:
if dest in candidates:
unprocessed.delete((dest, candidates[dest]))
score = dijkstra_score(n, dest)
best = min(candidates[dest], score)
candidates[dest] = best
unprocessed.insert((dest, best))
return (processed, PathFinder(trace))
def __str__(self):
result = ''
for src in self.edges:
for dest in self.edges[src]:
w = self.weights[(src, dest)]
result = '{0}{1}->{2} ({3})\n'.format(result, src, dest, w)
return result[:-1]
class Graph(Digraph):
'''
An undirected graph; special instance of a digraph
'''
def __init__(self):
Digraph.__init__(self)
def addEdge(self, edge):
Digraph.addUndirectedEdge(self, edge)
def removeEdge(self, edge):
Digraph.removeUndirectedEdge(self, edge)
def removeNode(self, node):
if node not in self.nodes:
raise ValueError('Node not in graph')
self.nodes.remove(node) # remove from set of nodes
children = self.edges.pop(node) # remove as a key from the edges hashtable and store its former neighbors
for v in children: # remove as a value from the edges hashtable
self.edges[v] = [n for n in self.edges[v] if n != node]
|
[
"mdsalerno@gmail.com"
] |
mdsalerno@gmail.com
|
2ceaf54794a3cce04452020476266a759c4a6b1f
|
0a86abffabe7ab55071b845245aa66bfeca0ad60
|
/tf_lib/__init__.py
|
28eed73f3d6307efe7580438326d12c14281fefe
|
[] |
no_license
|
lttsh/CVAE
|
93018a9e80567f18bf0e971484fe769f6630c6bd
|
55044ff9953e69e39c8481d0cfcb630fbb450e0d
|
refs/heads/master
| 2022-02-24T02:13:13.659972
| 2019-08-22T01:15:30
| 2019-08-22T01:15:30
| 202,025,368
| 2
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 87
|
py
|
import tf_lib.datasets
import tf_lib.loaders
import tf_lib.trainer
import tf_lib.utils
|
[
"lshao2@slb.com"
] |
lshao2@slb.com
|
bb1cdd39e5a42df923644cd0545ed8af6c329188
|
5de1aecd976ce222efc4d86b1efd65185eaa4f46
|
/TD3.py
|
6120bcc1b72e8028e779a22c1947d47f0db3df1a
|
[] |
no_license
|
THBUer-yw/DOIL
|
cb093d164ecc79a4f15045c9b3d25b521dfc40e2
|
d5cc75a356d67e65e1e09c9ca02db15e23313eb3
|
refs/heads/master
| 2023-05-14T14:22:09.156903
| 2021-05-31T08:22:16
| 2021-05-31T08:22:16
| 324,306,072
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 7,146
|
py
|
import copy
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
# Implementation of Twin Delayed Deep Deterministic Policy Gradients (TD3)
# Paper: https://arxiv.org/abs/1802.09477
class Actor(nn.Module):
def __init__(self, state_dim, action_dim, max_action, num_hidden_layers):
super(Actor, self).__init__()
self.num_hidden_layers = num_hidden_layers
self.input_layer = nn.Linear(state_dim, 256)
self.hidden_layers = nn.ModuleList([nn.Linear(256, 256) for _ in range(self.num_hidden_layers)])
self.output_layer = nn.Linear(256, action_dim)
self.max_action = max_action
def forward(self, state):
a = F.relu(self.input_layer(state))
for i in range(self.num_hidden_layers):
a = F.relu(self.hidden_layers[i](a))
return self.max_action * torch.tanh(self.output_layer(a))
class Critic(nn.Module):
def __init__(self, state_dim, action_dim, num_hidden_layers):
super(Critic, self).__init__()
self.num_hidden_layers = num_hidden_layers
# Q1 architecture
self.input_layer1 = nn.Linear(state_dim + action_dim, 256)
self.hidden_layers1 = nn.ModuleList([nn.Linear(256, 256) for _ in range(self.num_hidden_layers)])
self.output_layer1 = nn.Linear(256, 1)
# Q2 architecture
self.input_layer2 = nn.Linear(state_dim + action_dim, 256)
self.hidden_layers2 = nn.ModuleList([nn.Linear(256, 256) for _ in range(self.num_hidden_layers)])
self.output_layer2 = nn.Linear(256, 1)
def forward(self, state, action):
sa = torch.cat([state, action], 1)
q1 = F.relu(self.input_layer1(sa))
for i in range(self.num_hidden_layers):
q1 = F.relu(self.hidden_layers1[i](q1))
q1 = self.output_layer1(q1)
q2 = F.relu(self.input_layer2(sa))
for i in range(self.num_hidden_layers):
q2 = F.relu(self.hidden_layers2[i](q2))
q2 = self.output_layer2(q2)
return q1, q2
def Q1(self, state, action):
sa = torch.cat([state, action], 1)
q1 = F.relu(self.input_layer1(sa))
for i in range(self.num_hidden_layers):
q1 = F.relu(self.hidden_layers1[i](q1))
q1 = self.output_layer1(q1)
return q1
# class Actor(nn.Module):
# def __init__(self, state_dim, action_dim, max_action, num_hidden_layers):
# super(Actor, self).__init__()
#
# self.l1 = nn.Linear(state_dim, 256)
# self.l2 = nn.Linear(256, 256)
# self.l3 = nn.Linear(256, action_dim)
#
# self.max_action = max_action
#
#
# def forward(self, state):
# a = F.relu(self.l1(state))
# a = F.relu(self.l2(a))
# return self.max_action * torch.tanh(self.l3(a))
#
#
# class Critic(nn.Module):
# def __init__(self, state_dim, action_dim, num_hidden_layers):
# super(Critic, self).__init__()
#
# # Q1 architecture
# self.l1 = nn.Linear(state_dim + action_dim, 256)
# self.l2 = nn.Linear(256, 256)
# self.l3 = nn.Linear(256, 1)
#
# # Q2 architecture
# self.l4 = nn.Linear(state_dim + action_dim, 256)
# self.l5 = nn.Linear(256, 256)
# self.l6 = nn.Linear(256, 1)
#
#
# def forward(self, state, action):
# sa = torch.cat([state, action], 1)
#
# q1 = F.relu(self.l1(sa))
# q1 = F.relu(self.l2(q1))
# q1 = self.l3(q1)
#
# q2 = F.relu(self.l4(sa))
# q2 = F.relu(self.l5(q2))
# q2 = self.l6(q2)
# return q1, q2
#
#
# def Q1(self, state, action):
# sa = torch.cat([state, action], 1)
#
# q1 = F.relu(self.l1(sa))
# q1 = F.relu(self.l2(q1))
# q1 = self.l3(q1)
# return q1
class TD3(object):
def __init__(self, args, state_dim, action_dim, max_action, use_cuda, num_hidden_layers, discount=0.99, tau=0.005, policy_noise=0.2, noise_clip=0.5, policy_freq=2):
self.device = torch.device("cuda" if torch.cuda.is_available() and use_cuda else "cpu")
self.args = args
self.actor = Actor(state_dim, action_dim, max_action, num_hidden_layers).to(self.device)
self.actor_target = copy.deepcopy(self.actor)
self.critic = Critic(state_dim, action_dim, num_hidden_layers).to(self.device)
self.critic_target = copy.deepcopy(self.critic)
self.actor_optimizer = torch.optim.Adam(self.actor.parameters(), lr=3e-4)
self.critic_optimizer = torch.optim.Adam(self.critic.parameters(), lr=3e-4)
self.max_action = max_action
self.discount = discount
self.tau = tau
self.policy_noise = policy_noise
self.noise_clip = noise_clip
self.policy_freq = policy_freq
self.total_it = 0
def select_action(self, state):
state = torch.FloatTensor(state.reshape(1, -1)).to(self.device)
return self.actor(state).cpu().data.numpy().flatten()
def train(self, replay_buffer, writer, steps, gail=None):
self.total_it += 1
# Sample replay buffer
state, action, next_state, reward, not_done = replay_buffer.sample(self.args.batch_size)
if gail:
reward = gail.predict_reward(state, action, self.args.discount, not_done, self.args.reward_type)
writer.add_scalar("discriminator/gail_reward", np.mean(np.array(reward.to("cpu")), axis=0), steps)
with torch.no_grad():
# Select action according to policy and add clipped noise
noise = (torch.randn_like(action) * self.policy_noise).clamp(-self.noise_clip, self.noise_clip)
next_action = (self.actor_target(next_state) + noise).clamp(-self.max_action, self.max_action)
# Compute the target Q value
target_Q1, target_Q2 = self.critic_target(next_state, next_action)
target_Q = torch.min(target_Q1, target_Q2)
target_Q = reward + not_done * self.discount * target_Q
# Get current Q estimates
current_Q1, current_Q2 = self.critic(state, action)
# Compute critic loss
critic_loss = F.mse_loss(current_Q1, target_Q) + F.mse_loss(current_Q2, target_Q)
# Optimize the critic
self.critic_optimizer.zero_grad()
critic_loss.backward()
self.critic_optimizer.step()
# Delayed policy updates
if self.total_it % self.policy_freq == 0:
# Compute actor losse
actor_loss = -self.critic.Q1(state, self.actor(state)).mean()
# Optimize the actor
self.actor_optimizer.zero_grad()
actor_loss.backward()
self.actor_optimizer.step()
# Update the frozen target models
for param, target_param in zip(self.critic.parameters(), self.critic_target.parameters()):
target_param.data.copy_(self.tau * param.data + (1 - self.tau) * target_param.data)
for param, target_param in zip(self.actor.parameters(), self.actor_target.parameters()):
target_param.data.copy_(self.tau * param.data + (1 - self.tau) * target_param.data)
def save(self, filename):
torch.save(self.critic.state_dict(), filename + "_critic")
torch.save(self.critic_optimizer.state_dict(), filename + "_critic_optimizer")
torch.save(self.actor.state_dict(), filename + "_actor")
torch.save(self.actor_optimizer.state_dict(), filename + "_actor_optimizer")
def load(self, filename, device):
self.critic.load_state_dict(torch.load(filename + "_critic", map_location=device))
self.critic_optimizer.load_state_dict(torch.load(filename + "_critic_optimizer", map_location=device))
self.critic_target = copy.deepcopy(self.critic)
self.actor.load_state_dict(torch.load(filename + "_actor", map_location=device))
self.actor_optimizer.load_state_dict(torch.load(filename + "_actor_optimizer", map_location=device))
self.actor_target = copy.deepcopy(self.actor)
|
[
"13261059591@163.com"
] |
13261059591@163.com
|
5777394da90b2decc1962dac71b63847e4a0847c
|
bef13bdcfdcccb7c77944a1591c1d333efbf6c08
|
/HW1/String/Alphabet Rangoli.py
|
47d72ed7056d848ed7fe7474bd397cf2137b346b
|
[
"MIT"
] |
permissive
|
danial1371/ADM-HW1
|
a18cb0f5d14c557b9d7050c20134f803bb2d2911
|
30c8b7879b14d29dddf6e20192f7e0fea2466cf9
|
refs/heads/main
| 2023-01-01T02:05:54.115347
| 2020-10-25T22:15:52
| 2020-10-25T22:15:52
| 306,909,622
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 304
|
py
|
import string
def print_rangoli(n):
alpha = string.ascii_lowercase
L = []
for i in range(n):
s = "-".join(alpha[i:n])
L.append((s[::-1]+s[1:]).center(4*n-3, "-"))
print('\n'.join(L[:0:-1]+L))
if __name__ == '__main__':
n = int(input())
print_rangoli(n)
|
[
"noreply@github.com"
] |
noreply@github.com
|
e2cdd67804b10dbb8d7efd074a76bbc88d419696
|
4dba84f7fad05cb5b83bad50833612489de3ac08
|
/arrow_pattern.py
|
f12cf46ab2902af62523a210263ea564dda63da9
|
[] |
no_license
|
abhi55555/Hello-world
|
806deb5c235093b991c9e61555c9323e20deba9a
|
a27841742b133996fdab8b5c503ca0efeaef4638
|
refs/heads/master
| 2020-04-23T17:19:06.738510
| 2019-05-31T12:19:16
| 2019-05-31T12:19:16
| 171,327,492
| 0
| 0
| null | 2019-02-20T20:25:31
| 2019-02-18T17:33:59
|
Python
|
UTF-8
|
Python
| false
| false
| 462
|
py
|
n = 20
t = n // 2
for i in range(1, t + 1, 2):
print(' ' * ((t - i) // 2) + '*' * i)
for i in range(n // 4 - 1):
print(' ' * ((t - 1) // 2) + '@')
for i in range(n // 4 + 1):
print(' ' * ((t - 1) // 2) + '@' + ' ' * (t - 1) + '*' * i)
print(' ' * ((t - 1) // 2) + '@', end='')
for i in range(n // 2 - 1):
print('@', end='')
print('*' * (t // 2 + 1))
for i in range(t // 2, 0, -1):
print(' ' * ((t - 1) // 2) + ' ' * (n // 2) + '*' * i)
|
[
"33001714+abhi55555@users.noreply.github.com"
] |
33001714+abhi55555@users.noreply.github.com
|
2b149a759936616df7de6403e9ccee147214a654
|
7025bda64ac1e9d472bc8bfe40981c517e3c1e46
|
/Password_Cracker.py
|
1f3692bad9bbaa3e4634a4c1944032e626315523
|
[] |
no_license
|
SzymoRatajczak/PasswordCracker
|
aeb642b845def52eb8a14ff77c7e66446651dfbd
|
c4cf45967aa97229bc4d9d689fe41bcea692ecbb
|
refs/heads/master
| 2020-09-16T03:08:16.484551
| 2019-11-23T18:03:34
| 2019-11-23T18:03:34
| 223,631,596
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 597
|
py
|
import crypt
def crack(pas):
salt=pas[:2]
wordlist=open('usr/share/wordlist.txt','r')
for word in wordlist:
word=word.strip('\n')
enc=crypt.crypt(word,salt)
if(enc==pas):
print('Password was cracked:'+word)
else:
print('Password cannot be cracked')
def main():
f=open('password.txt','r')
for i in f.readlines():
i=i.strip('\n')
if ':' in i:
user=i.split(':')[0]
pas=i.split(':')[1].strip(' ')
crack(pas)
if __name__=='__main__':
main()
|
[
"noreply@github.com"
] |
noreply@github.com
|
74bc60d0e44d130fdca4152fa6528656aeb5e24c
|
d3bf647ba8c99c3ff13c0cf66e03d5c2f52e26a5
|
/dsa/temp.py
|
ee23c59fb9463e778a491b8047847ae0c65a155f
|
[] |
no_license
|
jyale/blackbox
|
8d9ce11ca97087f7e03e39b2c85aea2e2df4d0f1
|
975c853a69154e58f037cf041d6c2d513f148796
|
refs/heads/master
| 2020-03-28T01:05:00.332105
| 2013-06-20T20:14:37
| 2013-06-20T20:14:37
| 10,828,404
| 2
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,783
|
py
|
from Crypto.Random import random
from Crypto.PublicKey import DSA
from Crypto.Hash import SHA
################################
# LINKABLE RING SIGNATURE CODE
################################
key = DSA.generate(1024)
p = key.p
q = key.q
g = key.g
# generate private keys
x1 = random.StrongRandom().randint(1,q-1)
x2 = random.StrongRandom().randint(1,q-1)
# get public keys
y1 = pow(g,x1,p)
y2 = pow(g,x2,p)
tuple1 = (y1,g,p,q,x1)
tuple2 = (y2,g,p,q,x2)
# get the 2 DSA keys
key1 = DSA.construct(tuple1)
key2 = DSA.construct(tuple2)
# define the hash functions
def H1(message):
# hash the message
digest = SHA.new(message).hexdigest()
# convert to integer
x = int(digest, 16)
# take it mod q
return x % q
def H2(message):
# hash the message
digest = SHA.new(message).hexdigest()
# convert to integer
x = int(digest, 16)
# take it mod p
return x % p
print
y1 = key1.y
y2 = key2.y
x1 = key1.x
x2 = key2.x
L = [y1,y2]
h = H2(str(y1) + str(y2))
h = g
######################################
# SIGNATURE GENERATION
######################################
m = 'hello'
tag = pow(h,x1,p)
# get a random u
u = random.StrongRandom().randint(1,q-1)
c2 = H1(str(y1) + str(y2) + str(tag) + m + str(pow(g,u,p)) + str(pow(h,u,p)))
# step 3
s2 = random.StrongRandom().randint(1,q-1)
c1 = H1(str(y1) + str(y2) + str(tag) + m + str((pow(g,s2,p) * pow(y2,c2,p))%p) + str((pow(h,s2,p) * pow(tag,c2,p))%p))
# step 4
s1 = (u - (x1 * c1)) % q
sig = [c1,[s1,s2],tag]
firstsig = sig
thirdsig = sig
##########################################
# x2 = x2 + 1
L = [y1,y2]
n = len(L)
m = 'weak'
x = x2
c = range(n)
# index of private key in list of public keys
# pi = 1
# get a string of all the public keys
keystring = ''
for i in range(n):
keystring += str(L[i])
tag = pow(h,x,p)
keytagm = keystring + str(tag) + m
# get a random u
u = random.StrongRandom().randint(1,q-1)
c[0] = H1(keytagm + str(pow(g,u,p)) + str(pow(h,u,p)))
# step 3
s1 = random.StrongRandom().randint(1,q-1)
c[1] = H1(keytagm + str((pow(g,s1,p) * pow(L[0],c[0],p))%p) + str((pow(h,s1,p) * pow(tag,c[0],p))%p))
# step 4
s2 = (u - (x * c[1])) % q
sig = [c[0],[s1,s2],tag]
secondsig = sig
#########################################
# SIGNATURE VERIFICATION FUNCTION
#########################################
def verify(sig, L, m):
# get variables from signature
n = len(L)
c = range(n + 1)
c[0] = sig[0]
s = sig[1]
tag = sig[2]
# lists to store calculation results in (z' and z'' in LRS paper)
zp = range(n)
zpp = range(n)
# get a string of all the public keys
keystring = ''
for i in range(n):
keystring += str(L[i])
for i in range(n):
zp[i] = (pow(g,s[i],p) * pow(L[i],c[i],p)) % p
zpp[i] = (pow(h,s[i],p) * pow(tag,c[i],p)) % p
c[i+1] = H1(keystring + str(tag) + m + str(zp[i]) + str(zpp[i]))
result = c[n]
print c[0]
print result
print (c[0] == c[n])
print
m = 'hello'
x = x1
tag = pow(h,x,p)
# get a random u
u = random.StrongRandom().randint(1,q-1)
c2 = H1(str(y1) + str(y2) + str(tag) + m + str(pow(g,u,p)) + str(pow(h,u,p)))
# step 3
s2 = random.StrongRandom().randint(1,q-1)
c1 = H1(str(y1) + str(y2) + str(tag) + m + str((pow(g,s2,p) * pow(y2,c2,p))%p) + str((pow(h,s2,p) * pow(tag,c2,p))%p))
# step 4
s1 = (u - (x * c1)) % q
sig = [c1,[s1,s2],tag]
thirdsig = sig
####################################
# TEST SIG GEN FUNCTION
####################################
m = 'hello'
x = x1
L = [y1,y2]
n = len(L)
c = range(n)
s = range(n)
keystring = str(y1) + str(y2) + str(tag) + m;
tag = pow(h,x,p)
# get a random u
u = random.StrongRandom().randint(1,q-1)
c[1] = H1(keystring + str(pow(g,u,p)) + str(pow(h,u,p)))
# step 3
s[1] = random.StrongRandom().randint(1,q-1)
c[0] = H1(keystring + str((pow(g,s[1],p) * pow(L[1],c[1],p))%p) + str((pow(h,s[1],p) * pow(tag,c[1],p))%p))
# step 4
s[0] = (u - (x * c[0])) % q
sig = [c[0],[s[0],s[1]],tag]
thirdsig = sig
####################################
# END SIG GEN FUNCTION
####################################
# m = 'hello'
# L = [y1,y2]
# x = x1
# pi = 0
def sign(m,L,x,pi):
tag = pow(h,x,p)
# get a random u
keystring = str(L[0]) + str(L[1]) + str(tag) + m;
u = random.StrongRandom().randint(1,q-1)
c[(pi+1)%n] = H1(keystring + str(pow(g,u,p)) + str(pow(h,u,p)))
i = (pi+1)%n
# step 3
s[i] = random.StrongRandom().randint(1,q-1)
c[(i+1)%n] = H1(keystring + str((pow(g,s[i],p) * pow(L[i],c[i],p))%p) + str((pow(h,s[i],p) * pow(tag,c[i],p))%p))
# step 4
s[pi] = (u - (x * c[pi])) % q
sig = [c[0],[s[0],s[1]],tag]
thirdsig = sig
return sig
m = 'hello'
print 'verify funtion.....'
print
L = [y2,y1]
verify(firstsig,L,'hello')
verify(secondsig,L,'weak')
L = [y1,y2]
verify(firstsig,L,'hello')
verify(secondsig,L,'weak')
print 'third sig'
verify(thirdsig,L,m)
print
print 'weak'
verify(sign(m,L,x2,1),L,m)
|
[
"jyale@---.com"
] |
jyale@---.com
|
5b683b99c44e491339df51fa623bbbc58b8347c9
|
553b5806d7fd69258e699d1eddefa005e5e9f3bd
|
/Algorithms/Tuples.py
|
435009038fcc8d62c5bf4ff9e244f581c42b9be2
|
[] |
no_license
|
ShaftesburySchoolDorset/PythonIntroAlevel
|
7c2c8d9c7c0091e8721fbcad158b994f19d16811
|
47aaf0d6905495046e1ef9f0e0503ea6de234338
|
refs/heads/master
| 2021-01-24T08:48:58.140811
| 2017-05-02T10:48:41
| 2017-05-02T10:48:41
| 69,012,638
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 283
|
py
|
#!/usr/local/bin/python3
ta = 1, 2, 3, 4
tb = (1, 2, 3, 4)
print(ta)
print(tb)
print(ta[0])
#ta[0] = 12
print(ta + tb)
def squares():
values = []
for i in range(1, 21):
values.append(i ** 2)
return tuple(values)
print(squares())
|
[
"gcomplin@gmail.com"
] |
gcomplin@gmail.com
|
e3e6ba2f90be69ce473c4e202b7654e7116b46be
|
97fbecfb57cb605114f50fccdbdf79ec2420d8ff
|
/osf/migrations/0080_add_abstractprovider.py
|
2c2bab38453a9b4ecfdbf84fc5b3437b05e4dfbf
|
[
"Apache-2.0",
"MIT",
"BSD-3-Clause",
"LicenseRef-scancode-free-unknown",
"LicenseRef-scancode-warranty-disclaimer",
"AGPL-3.0-only",
"LGPL-2.0-or-later",
"LicenseRef-scancode-proprietary-license",
"MPL-1.1",
"CPAL-1.0",
"LicenseRef-scancode-unknown-license-reference",
"BSD-2-Clause"
] |
permissive
|
pattisdr/osf.io
|
ffa259465304beb4212c42f26a8811743b3cdf24
|
2a8bdf358bb67f0ad5a9e7bb7ac9a329b98e8825
|
refs/heads/develop
| 2020-12-26T01:13:56.958648
| 2019-06-11T13:39:00
| 2019-06-11T13:39:00
| 35,900,874
| 1
| 1
|
Apache-2.0
| 2019-10-30T13:49:15
| 2015-05-19T18:33:09
|
Python
|
UTF-8
|
Python
| false
| false
| 9,581
|
py
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.9 on 2018-02-15 19:48
from __future__ import unicode_literals
import dirtyfields.dirtyfields
import django.contrib.postgres.fields
from django.db import migrations, models
import django.db.models.deletion
import django_extensions.db.fields
import osf.models.base
import osf.utils.datetime_aware_jsonfield
import osf.utils.fields
class Migration(migrations.Migration):
dependencies = [
('osf', '0079_merge_20180207_1545'),
]
operations = [
migrations.CreateModel(
name='AbstractProvider',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('created', django_extensions.db.fields.CreationDateTimeField(auto_now_add=True, verbose_name='created')),
('modified', django_extensions.db.fields.ModificationDateTimeField(auto_now=True, verbose_name='modified')),
('_id', models.CharField(db_index=True, default=osf.models.base.generate_object_id, max_length=24, unique=True)),
('reviews_workflow', models.CharField(blank=True, choices=[(None, 'None'), ('post-moderation', 'Post-Moderation'), ('pre-moderation', 'Pre-Moderation')], max_length=15, null=True)),
('reviews_comments_private', models.NullBooleanField()),
('reviews_comments_anonymous', models.NullBooleanField()),
('type', models.CharField(choices=[('osf.preprintprovider', 'preprint provider')], db_index=True, max_length=255)),
('name', models.CharField(max_length=128)),
('advisory_board', models.TextField(blank=True, default=b'')),
('description', models.TextField(blank=True, default=b'')),
('domain', models.URLField(blank=True, default=b'')),
('domain_redirect_enabled', models.BooleanField(default=False)),
('external_url', models.URLField(blank=True, null=True)),
('email_contact', models.CharField(blank=True, max_length=200, null=True)),
('email_support', models.CharField(blank=True, max_length=200, null=True)),
('social_twitter', models.CharField(blank=True, max_length=200, null=True)),
('social_facebook', models.CharField(blank=True, max_length=200, null=True)),
('social_instagram', models.CharField(blank=True, max_length=200, null=True)),
('footer_links', models.TextField(blank=True, default=b'')),
('facebook_app_id', models.BigIntegerField(blank=True, null=True)),
('example', models.CharField(blank=True, max_length=20, null=True)),
('allow_submissions', models.BooleanField(default=True)),
('share_publish_type', models.CharField(choices=[(b'Preprint', b'Preprint'), (b'Thesis', b'Thesis')], default=b'Preprint', help_text=b'This SHARE type will be used when pushing publications to SHARE', max_length=32, null=True)),
('share_source', models.CharField(blank=True, max_length=200, null=True)),
('share_title', models.TextField(blank=True, default=b'', null=True)),
('additional_providers', django.contrib.postgres.fields.ArrayField(base_field=models.CharField(max_length=200), blank=True, default=list, null=True, size=None)),
('access_token', osf.utils.fields.EncryptedTextField(blank=True, null=True)),
('preprint_word', models.CharField(choices=[(b'preprint', b'Preprint'), (b'paper', b'Paper'), (b'thesis', b'Thesis'), (b'none', b'None')], default=b'preprint', max_length=10, null=True)),
('subjects_acceptable', osf.utils.datetime_aware_jsonfield.DateTimeAwareJSONField(blank=True, default=list, encoder=osf.utils.datetime_aware_jsonfield.DateTimeAwareJSONEncoder, null=True)),
('default_license', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='default_license', to='osf.NodeLicense')),
('licenses_acceptable', models.ManyToManyField(blank=True, related_name='licenses_acceptable', to='osf.NodeLicense')),
],
options={
'abstract': False,
},
bases=(dirtyfields.dirtyfields.DirtyFieldsMixin, models.Model),
),
migrations.RunSQL(
[
"""
INSERT INTO osf_abstractprovider (id, created, modified, _id,
reviews_workflow, reviews_comments_private, reviews_comments_anonymous, name, advisory_board, description,
domain, domain_redirect_enabled, external_url, email_contact, email_support, social_twitter, social_facebook, social_instagram,
footer_links, facebook_app_id, example, allow_submissions, share_publish_type, share_source, share_title, additional_providers,
access_token, preprint_word, subjects_acceptable, default_license_id, type)
SELECT id, created, modified, _id,
reviews_workflow, reviews_comments_private, reviews_comments_anonymous, name, advisory_board, description,
domain, domain_redirect_enabled, external_url, email_contact, email_support, social_twitter, social_facebook, social_instagram,
footer_links, facebook_app_id, example, allow_submissions, share_publish_type, share_source, share_title, additional_providers,
access_token, preprint_word, subjects_acceptable, default_license_id, 'osf.preprintprovider' as type
FROM osf_preprintprovider;
INSERT INTO osf_abstractprovider_licenses_acceptable (id, abstractprovider_id, nodelicense_id)
SELECT id, preprintprovider_id, nodelicense_id
FROM osf_preprintprovider_licenses_acceptable
"""
], [
"""
INSERT INTO osf_preprintprovider_licenses_acceptable (id, preprintprovider_id, nodelicense_id)
SELECT id, abstractprovider_id, nodelicense_id
FROM osf_abstractprovider_licenses_acceptable
"""
]
),
migrations.AlterField(
model_name='subject',
name='provider',
field=models.ForeignKey(on_delete=models.deletion.CASCADE, related_name='subjects', to='osf.AbstractProvider'),
),
migrations.RunSQL(
migrations.RunSQL.noop,
[
"""
INSERT INTO osf_preprintprovider (id, created, modified, _id,
reviews_workflow, reviews_comments_private, reviews_comments_anonymous, name, advisory_board, description,
domain, domain_redirect_enabled, external_url, email_contact, email_support, social_twitter, social_facebook, social_instagram,
footer_links, facebook_app_id, example, allow_submissions, share_publish_type, share_source, share_title, additional_providers,
access_token, preprint_word, subjects_acceptable, default_license_id)
SELECT id, created, modified, _id,
reviews_workflow, reviews_comments_private, reviews_comments_anonymous, name, advisory_board, description,
domain, domain_redirect_enabled, external_url, email_contact, email_support, social_twitter, social_facebook, social_instagram,
footer_links, facebook_app_id, example, allow_submissions, share_publish_type, share_source, share_title, additional_providers,
access_token, preprint_word, subjects_acceptable, default_license_id
FROM osf_abstractprovider
"""
]
),
migrations.RemoveField(
model_name='preprintprovider',
name='default_license',
),
migrations.RemoveField(
model_name='preprintprovider',
name='licenses_acceptable',
),
migrations.DeleteModel(
name='PreprintProvider',
),
migrations.CreateModel(
name='PreprintProvider',
fields=[
],
options={
'indexes': [],
'proxy': True,
'permissions': (('view_submissions', 'Can view all submissions to this provider'), ('add_moderator', 'Can add other users as moderators for this provider'), ('view_actions', 'Can view actions on submissions to this provider'), ('add_reviewer', 'Can add other users as reviewers for this provider'), ('review_assigned_submissions', 'Can submit reviews for submissions to this provider which have been assigned to this user'), ('assign_reviewer', 'Can assign reviewers to review specific submissions to this provider'), ('set_up_moderation', 'Can set up moderation for this provider'), ('view_assigned_submissions', 'Can view submissions to this provider which have been assigned to this user'), ('edit_reviews_settings', 'Can edit reviews settings for this provider'), ('accept_submissions', 'Can accept submissions to this provider'), ('reject_submissions', 'Can reject submissions to this provider'), ('edit_review_comments', 'Can edit comments on actions for this provider'), ('view_preprintprovider', 'Can view preprint provider details')),
},
bases=('osf.abstractprovider',),
),
]
|
[
"maf7sm@virginia.edu"
] |
maf7sm@virginia.edu
|
9517ed06bde725ef78093612d14e7ddf41d86c4a
|
681e8ba7417edafb02e7a8708748c02e0a5c5fad
|
/dev_builds/oto2016_11_23/OTO_vue_souris.py
|
5944ef77bb34af2adfc4c4ccb159b47130c6ad78
|
[] |
no_license
|
Akiro78960/Intelli-Car
|
3987f3bdcc36d3a7d920baaaa013512b6224c87c
|
78a1125c4eca77f290ed6ec82bbffa1db1480749
|
refs/heads/master
| 2021-01-19T14:15:56.128755
| 2016-12-07T14:38:51
| 2016-12-07T14:38:51
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 354
|
py
|
import bge
#import time
import bgui
#lg=bge.logic
#rd=bge.render
#rd.showMouse(1)
def mainSouris():
obj=bge.logic.getCurrentController()
if obj.sensors["Mouse"].positive and obj.sensors["Mouse1"].positive:
pass
#print(obj.sensors)
#print("OK",obj.sensors["Mouse"].hitPosition,obj.sensors["Mouse"].hitObject)
|
[
"evan.brosky@gmail.com"
] |
evan.brosky@gmail.com
|
c160dbc7cf0935e7f921ffa2272c88e9e446b3ea
|
2489e59412d6a2d128d41d396a0e583ac8282c61
|
/InsertionSort.py
|
d8bb3def437483ff5a5e832f8c7a115893a6e2b1
|
[] |
no_license
|
crb8v2/3130algs_proj2
|
bd27084095b097d0008ed1e085fa30505729ca72
|
8b63004dc228ae5d4f326a3fd83b8cd34d81281e
|
refs/heads/master
| 2020-04-01T19:08:18.256907
| 2018-10-24T00:19:43
| 2018-10-24T00:19:43
| 153,537,125
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,494
|
py
|
# http://interactivepython.org/courselib/static/pythonds/SortSearch/TheInsertionSort.html
from random import *
from timeit import default_timer as timer
import random
def insertionSort(alist):
for index in range(1,len(alist)):
currentvalue = alist[index]
position = index
while position>0 and alist[position-1]>currentvalue:
alist[position]=alist[position-1]
position = position-1
alist[position]=currentvalue
list_hundred_sorted = list(range(1, 100))
list_hundred_random = random.sample(range(1, 100), 99)
list_hundred_semiSort = list(range(1,100))
# randomize every 10th pos of array
for i in list_hundred_semiSort:
if i % 10 != 0 and i != 0:
continue
list_hundred_semiSort[i] = randint(1,100)
list_thousand_sorted = list(range(1, 1000))
list_thousand_random = random.sample(range(1, 1000), 999)
list_thousand_semiSort = list(range(1,1000))
# randomize every 10th pos of array
for i in list_thousand_semiSort:
if i % 10 != 0 and i != 0:
continue
list_thousand_semiSort[i] = randint(1,1000)
list_tenthous_sorted = list(range(1, 10000))
list_tenthous_random = random.sample(range(1, 10000), 9999)
list_tenthous_semiSort = list(range(1,10000))
# randomize every 10th pos of array
for i in list_tenthous_semiSort:
if i % 10 != 0 and i != 0:
continue
list_tenthous_semiSort[i] = randint(1,10000)
# start = timer()
# insertionSort()
# end = timer()
#
# print ("\n time: \n",(end - start))
|
[
"connor@admin.com"
] |
connor@admin.com
|
5ae45e5f638e154d0d9da7e58147b5cd60fd3360
|
05a1ea9da06020fd737e6e4bc1a370f253c47cf2
|
/input/kinetics/families/Intra_R_Add_ExoTetCyclic/groups.py
|
cf6b8d5e594afb263a0eb949f220c3062362d10d
|
[] |
no_license
|
alaraen/RMG-database
|
f16e8375b850f9e25885726fc2da4914e68963ca
|
cd9e2d27d9fe8a43e44de41ba482ee6dfeb12cf0
|
refs/heads/master
| 2021-01-18T08:55:56.859791
| 2014-06-17T02:33:27
| 2014-06-18T02:04:12
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 77,137
|
py
|
#!/usr/bin/env python
# encoding: utf-8
name = "Intra_R_Add_ExoTetcyclic/groups"
shortDesc = u""
longDesc = u"""
"""
template(reactants=["R1_rad_R2_R3"], products=["R1_R2_Cycle", "R3_rad"], ownReverse=False)
reverse = "Ring_Open_Rad_Addition"
recipe(actions=[
['BREAK_BOND', '*2', 'S', '*3'],
['FORM_BOND', '*1', 'S', '*2'],
['LOSE_RADICAL', '*1', '1'],
['GAIN_RADICAL', '*3', '1'],
])
entry(
index = 1,
label = "R1_rad_R2_R3",
group = "OR{R4, R5, R6, R7}",
kinetics = None,
reference = None,
referenceType = "",
shortDesc = u"""""",
longDesc =
u"""
""",
)
entry(
index = 2,
label = "multiplebond_intra",
group =
"""
1 *2 {C,O} 0 {2,S}
2 *3 {C,O} 0 {1,S}
""",
kinetics = None,
reference = None,
referenceType = "",
shortDesc = u"""""",
longDesc =
u"""
""",
)
entry(
index = 3,
label = "radadd_intra",
group =
"""
1 *1 R!H 1
""",
kinetics = None,
reference = None,
referenceType = "",
shortDesc = u"""""",
longDesc =
u"""
""",
)
entry(
index = 4,
label = "R4",
group =
"""
1 *1 R!H 1 {2,{S,D,T,B}}
2 *4 R!H 0 {1,{S,D,T,B}} {3,S}
3 *2 {C,O} 0 {2,S} {4,S}
4 *3 {C,O} 0 {3,S}
""",
kinetics = None,
reference = None,
referenceType = "",
shortDesc = u"""""",
longDesc =
u"""
""",
)
entry(
index = 5,
label = "R4_S",
group =
"""
1 *1 R!H 1 {2,{S,D,T,B}}
2 *4 R!H 0 {1,{S,D,T,B}} {3,S}
3 *2 {C,O} 0 {2,S} {4,S}
4 *3 {C,O} 0 {3,S}
""",
kinetics = None,
reference = None,
referenceType = "",
shortDesc = u"""""",
longDesc =
u"""
""",
)
entry(
index = 7,
label = "R4_S_Cs",
group =
"""
1 *1 R!H 1 {2,S}
2 *4 R!H 0 {1,S} {3,S}
3 *2 {C,O} 0 {2,S} {4,S}
4 *3 C 0 {3,S}
""",
kinetics = None,
reference = None,
referenceType = "",
shortDesc = u"""""",
longDesc =
u"""
""",
)
entry(
index = 8,
label = "R4_S_O",
group =
"""
1 *1 R!H 1 {2,S}
2 *4 R!H 0 {1,S} {3,S}
3 *2 {C,O} 0 {2,S} {4,S}
4 *3 O 0 {3,S}
""",
kinetics = None,
reference = None,
referenceType = "",
shortDesc = u"""""",
longDesc =
u"""
""",
)
entry(
index = 9,
label = "R4_D",
group =
"""
1 *1 Cd 1 {2,D}
2 *4 Cd 0 {1,D} {3,S}
3 *2 {C,O} 0 {2,S} {4,S}
4 *3 {C,O} 0 {3,S}
""",
kinetics = None,
reference = None,
referenceType = "",
shortDesc = u"""""",
longDesc =
u"""
""",
)
entry(
index = 11,
label = "R4_D_Cs",
group =
"""
1 *1 Cd 1 {2,D}
2 *4 Cd 0 {1,D} {3,S}
3 *2 {C,O} 0 {2,S} {4,S}
4 *3 C 0 {3,S}
""",
kinetics = None,
reference = None,
referenceType = "",
shortDesc = u"""""",
longDesc =
u"""
""",
)
entry(
index = 12,
label = "R4_D_O",
group =
"""
1 *1 Cd 1 {2,D}
2 *4 Cd 0 {1,D} {3,S}
3 *2 {C,O} 0 {2,S} {4,S}
4 *3 O 0 {3,S}
""",
kinetics = None,
reference = None,
referenceType = "",
shortDesc = u"""""",
longDesc =
u"""
""",
)
entry(
index = 13,
label = "R4_T",
group =
"""
1 *1 Ct 1 {2,T}
2 *4 Ct 0 {1,T} {3,S}
3 *2 {C,O} 0 {2,S} {4,S}
4 *3 {C,O} 0 {3,S}
""",
kinetics = None,
reference = None,
referenceType = "",
shortDesc = u"""""",
longDesc =
u"""
""",
)
entry(
index = 15,
label = "R4_T_Cs",
group =
"""
1 *1 Ct 1 {2,T}
2 *4 Ct 0 {1,T} {3,S}
3 *2 {C,O} 0 {2,S} {4,S}
4 *3 C 0 {3,S}
""",
kinetics = None,
reference = None,
referenceType = "",
shortDesc = u"""""",
longDesc =
u"""
""",
)
entry(
index = 16,
label = "R4_T_O",
group =
"""
1 *1 Ct 1 {2,T}
2 *4 Ct 0 {1,T} {3,S}
3 *2 {C,O} 0 {2,S} {4,S}
4 *3 O 0 {3,S}
""",
kinetics = None,
reference = None,
referenceType = "",
shortDesc = u"""""",
longDesc =
u"""
""",
)
entry(
index = 17,
label = "R4_B",
group =
"""
1 *1 Cb 1 {2,B}
2 *4 Cb 0 {1,B} {3,S}
3 *2 {C,O} 0 {2,S} {4,S}
4 *3 {C,O} 0 {3,S}
""",
kinetics = None,
reference = None,
referenceType = "",
shortDesc = u"""""",
longDesc =
u"""
""",
)
entry(
index = 19,
label = "R4_B_Cs",
group =
"""
1 *1 Cb 1 {2,B}
2 *4 Cb 0 {1,B} {3,S}
3 *2 {C,O} 0 {2,S} {4,S}
4 *3 C 0 {3,S}
""",
kinetics = None,
reference = None,
referenceType = "",
shortDesc = u"""""",
longDesc =
u"""
""",
)
entry(
index = 20,
label = "R4_B_O",
group =
"""
1 *1 Cb 1 {2,B}
2 *4 Cb 0 {1,B} {3,S}
3 *2 {C,O} 0 {2,S} {4,S}
4 *3 O 0 {3,S}
""",
kinetics = None,
reference = None,
referenceType = "",
shortDesc = u"""""",
longDesc =
u"""
""",
)
entry(
index = 21,
label = "R5",
group =
"""
1 *1 R!H 1 {2,{S,D,T,B}}
2 *4 R!H {0,1,2S,2T} {1,{S,D,T,B}} {3,{S,D,T,B}}
3 *5 R!H {0,1,2S,2T} {2,{S,D,T,B}} {4,S}
4 *2 {C,O} 0 {3,S} {5,S}
5 *3 {C,O} 0 {4,S}
""",
kinetics = None,
reference = None,
referenceType = "",
shortDesc = u"""""",
longDesc =
u"""
""",
)
entry(
index = 22,
label = "R5_SS",
group =
"""
1 *1 R!H 1 {2,S}
2 *4 R!H 0 {1,S} {3,S}
3 *5 R!H 0 {2,S} {4,S}
4 *2 {C,O} 0 {3,S} {5,S}
5 *3 {C,O} 0 {4,S}
""",
kinetics = None,
reference = None,
referenceType = "",
shortDesc = u"""""",
longDesc =
u"""
""",
)
entry(
index = 24,
label = "R5_SS_Cs",
group =
"""
1 *1 R!H 1 {2,S}
2 *4 R!H 0 {1,S} {3,S}
3 *5 R!H 0 {2,S} {4,S}
4 *2 {C,O} 0 {3,S} {5,S}
5 *3 C 0 {4,S}
""",
kinetics = None,
reference = None,
referenceType = "",
shortDesc = u"""""",
longDesc =
u"""
""",
)
entry(
index = 25,
label = "R5_SS_O",
group =
"""
1 *1 R!H 1 {2,S}
2 *4 R!H 0 {1,S} {3,S}
3 *5 R!H 0 {2,S} {4,S}
4 *2 {C,O} 0 {3,S} {5,S}
5 *3 O 0 {4,S}
""",
kinetics = None,
reference = None,
referenceType = "",
shortDesc = u"""""",
longDesc =
u"""
""",
)
entry(
index = 26,
label = "R5_SD",
group =
"""
1 *1 R!H 1 {2,S}
2 *4 Cd 0 {1,S} {3,D}
3 *5 Cd 0 {2,D} {4,S}
4 *2 {C,O} 0 {3,S} {5,S}
5 *3 {C,O} 0 {4,S}
""",
kinetics = None,
reference = None,
referenceType = "",
shortDesc = u"""""",
longDesc =
u"""
""",
)
entry(
index = 28,
label = "R5_SD_Cs",
group =
"""
1 *1 R!H 1 {2,S}
2 *4 Cd 0 {1,S} {3,D}
3 *5 Cd 0 {2,D} {4,S}
4 *2 {C,O} 0 {3,S} {5,S}
5 *3 C 0 {4,S}
""",
kinetics = None,
reference = None,
referenceType = "",
shortDesc = u"""""",
longDesc =
u"""
""",
)
entry(
index = 29,
label = "R5_SD_O",
group =
"""
1 *1 R!H 1 {2,S}
2 *4 Cd 0 {1,S} {3,D}
3 *5 Cd 0 {2,D} {4,S}
4 *2 {C,O} 0 {3,S} {5,S}
5 *3 O 0 {4,S}
""",
kinetics = None,
reference = None,
referenceType = "",
shortDesc = u"""""",
longDesc =
u"""
""",
)
entry(
index = 30,
label = "R5_DS",
group =
"""
1 *1 Cd 1 {2,D}
2 *4 Cd 0 {1,D} {3,S}
3 *5 R!H 0 {2,S} {4,S}
4 *2 {C,O} 0 {3,S} {5,S}
5 *3 {C,O} 0 {4,S}
""",
kinetics = None,
reference = None,
referenceType = "",
shortDesc = u"""""",
longDesc =
u"""
""",
)
entry(
index = 32,
label = "R5_DS_Cs",
group =
"""
1 *1 Cd 1 {2,D}
2 *4 Cd 0 {1,D} {3,S}
3 *5 R!H 0 {2,S} {4,S}
4 *2 {C,O} 0 {3,S} {5,S}
5 *3 C 0 {4,S}
""",
kinetics = None,
reference = None,
referenceType = "",
shortDesc = u"""""",
longDesc =
u"""
""",
)
entry(
index = 33,
label = "R5_DS_O",
group =
"""
1 *1 Cd 1 {2,D}
2 *4 Cd 0 {1,D} {3,S}
3 *5 R!H 0 {2,S} {4,S}
4 *2 {C,O} 0 {3,S} {5,S}
5 *3 O 0 {4,S}
""",
kinetics = None,
reference = None,
referenceType = "",
shortDesc = u"""""",
longDesc =
u"""
""",
)
entry(
index = 34,
label = "R5_ST",
group =
"""
1 *1 R!H 1 {2,S}
2 *4 Ct 0 {1,S} {3,T}
3 *5 Ct 0 {2,T} {4,S}
4 *2 {C,O} 0 {3,S} {5,S}
5 *3 {C,O} 0 {4,S}
""",
kinetics = None,
reference = None,
referenceType = "",
shortDesc = u"""""",
longDesc =
u"""
""",
)
entry(
index = 36,
label = "R5_ST_Cs",
group =
"""
1 *1 R!H 1 {2,S}
2 *4 Ct 0 {1,S} {3,T}
3 *5 Ct 0 {2,T} {4,S}
4 *2 {C,O} 0 {3,S} {5,S}
5 *3 C 0 {4,S}
""",
kinetics = None,
reference = None,
referenceType = "",
shortDesc = u"""""",
longDesc =
u"""
""",
)
entry(
index = 37,
label = "R5_ST_O",
group =
"""
1 *1 R!H 1 {2,S}
2 *4 Ct 0 {1,S} {3,T}
3 *5 Ct 0 {2,T} {4,S}
4 *2 {C,O} 0 {3,S} {5,S}
5 *3 O 0 {4,S}
""",
kinetics = None,
reference = None,
referenceType = "",
shortDesc = u"""""",
longDesc =
u"""
""",
)
entry(
index = 38,
label = "R5_TS",
group =
"""
1 *1 Ct 1 {2,T}
2 *4 Ct 0 {1,T} {3,S}
3 *5 R!H 0 {2,S} {4,S}
4 *2 {C,O} 0 {3,S} {5,S}
5 *3 {C,O} 0 {4,S}
""",
kinetics = None,
reference = None,
referenceType = "",
shortDesc = u"""""",
longDesc =
u"""
""",
)
entry(
index = 40,
label = "R5_TS_Cs",
group =
"""
1 *1 Ct 1 {2,T}
2 *4 Ct 0 {1,T} {3,S}
3 *5 R!H 0 {2,S} {4,S}
4 *2 {C,O} 0 {3,S} {5,S}
5 *3 C 0 {4,S}
""",
kinetics = None,
reference = None,
referenceType = "",
shortDesc = u"""""",
longDesc =
u"""
""",
)
entry(
index = 41,
label = "R5_TS_O",
group =
"""
1 *1 Ct 1 {2,T}
2 *4 Ct 0 {1,T} {3,S}
3 *5 R!H 0 {2,S} {4,S}
4 *2 {C,O} 0 {3,S} {5,S}
5 *3 O 0 {4,S}
""",
kinetics = None,
reference = None,
referenceType = "",
shortDesc = u"""""",
longDesc =
u"""
""",
)
entry(
index = 42,
label = "R5_SB",
group =
"""
1 *1 R!H 1 {2,S}
2 *4 Cb 0 {1,S} {3,B}
3 *5 Cb 0 {2,B} {4,S}
4 *2 {C,O} 0 {3,S} {5,S}
5 *3 {C,O} 0 {4,S}
""",
kinetics = None,
reference = None,
referenceType = "",
shortDesc = u"""""",
longDesc =
u"""
""",
)
entry(
index = 44,
label = "R5_SB_Cs",
group =
"""
1 *1 R!H 1 {2,S}
2 *4 Cb 0 {1,S} {3,B}
3 *5 Cb 0 {2,B} {4,S}
4 *2 {C,O} 0 {3,S} {5,S}
5 *3 C 0 {4,S}
""",
kinetics = None,
reference = None,
referenceType = "",
shortDesc = u"""""",
longDesc =
u"""
""",
)
entry(
index = 45,
label = "R5_SB_O",
group =
"""
1 *1 R!H 1 {2,S}
2 *4 Cb 0 {1,S} {3,B}
3 *5 Cb 0 {2,B} {4,S}
4 *2 {C,O} 0 {3,S} {5,S}
5 *3 O 0 {4,S}
""",
kinetics = None,
reference = None,
referenceType = "",
shortDesc = u"""""",
longDesc =
u"""
""",
)
entry(
index = 46,
label = "R5_BS",
group =
"""
1 *1 Cb 1 {2,B}
2 *4 Cb 0 {1,B} {3,S}
3 *5 R!H 0 {2,S} {4,S}
4 *2 {C,O} 0 {3,S} {5,S}
5 *3 {C,O} 0 {4,S}
""",
kinetics = None,
reference = None,
referenceType = "",
shortDesc = u"""""",
longDesc =
u"""
""",
)
entry(
index = 48,
label = "R5_BS_Cs",
group =
"""
1 *1 Cb 1 {2,B}
2 *4 Cb 0 {1,B} {3,S}
3 *5 R!H 0 {2,S} {4,S}
4 *2 {C,O} 0 {3,S} {5,S}
5 *3 C 0 {4,S}
""",
kinetics = None,
reference = None,
referenceType = "",
shortDesc = u"""""",
longDesc =
u"""
""",
)
entry(
index = 49,
label = "R5_BS_O",
group =
"""
1 *1 Cb 1 {2,B}
2 *4 Cb 0 {1,B} {3,S}
3 *5 R!H 0 {2,S} {4,S}
4 *2 {C,O} 0 {3,S} {5,S}
5 *3 O 0 {4,S}
""",
kinetics = None,
reference = None,
referenceType = "",
shortDesc = u"""""",
longDesc =
u"""
""",
)
entry(
index = 50,
label = "R6",
group =
"""
1 *1 R!H 1 {2,{S,D,T,B}}
2 *4 R!H {0,1,2S,2T} {1,{S,D,T,B}} {3,{S,D,T,B}}
3 *6 R!H {0,1,2S,2T} {2,{S,D,T,B}} {4,{S,D,T,B}}
4 *5 R!H {0,1,2S,2T} {3,{S,D,T,B}} {5,S}
5 *2 {C,O} 0 {4,S} {6,S}
6 *3 {C,O} 0 {5,S}
""",
kinetics = None,
reference = None,
referenceType = "",
shortDesc = u"""""",
longDesc =
u"""
""",
)
entry(
index = 51,
label = "R6_RSR",
group =
"""
1 *1 R!H 1 {2,{S,D,T,B}}
2 *4 R!H 0 {1,{S,D,T,B}} {3,S}
3 *6 R!H 0 {2,S} {4,{S,D,T,B}}
4 *5 R!H 0 {3,{S,D,T,B}} {5,S}
5 *2 {C,O} 0 {4,S} {6,S}
6 *3 {C,O} 0 {5,S}
""",
kinetics = None,
reference = None,
referenceType = "",
shortDesc = u"""""",
longDesc =
u"""
""",
)
entry(
index = 52,
label = "R6_SSR",
group =
"""
1 *1 R!H 1 {2,S}
2 *4 R!H 0 {1,S} {3,S}
3 *6 R!H 0 {2,S} {4,{S,D,T,B}}
4 *5 R!H 0 {3,{S,D,T,B}} {5,S}
5 *2 {C,O} 0 {4,S} {6,S}
6 *3 {C,O} 0 {5,S}
""",
kinetics = None,
reference = None,
referenceType = "",
shortDesc = u"""""",
longDesc =
u"""
""",
)
entry(
index = 53,
label = "R6_SSS",
group =
"""
1 *1 R!H 1 {2,S}
2 *4 R!H 0 {1,S} {3,S}
3 *6 R!H 0 {2,S} {4,S}
4 *5 R!H 0 {3,S} {5,S}
5 *2 {C,O} 0 {4,S} {6,S}
6 *3 {C,O} 0 {5,S}
""",
kinetics = None,
reference = None,
referenceType = "",
shortDesc = u"""""",
longDesc =
u"""
""",
)
entry(
index = 55,
label = "R6_SSS_Cs",
group =
"""
1 *1 R!H 1 {2,S}
2 *4 R!H 0 {1,S} {3,S}
3 *6 R!H 0 {2,S} {4,S}
4 *5 R!H 0 {3,S} {5,S}
5 *2 {C,O} 0 {4,S} {6,S}
6 *3 C 0 {5,S}
""",
kinetics = None,
reference = None,
referenceType = "",
shortDesc = u"""""",
longDesc =
u"""
""",
)
entry(
index = 56,
label = "R6_SSS_O",
group =
"""
1 *1 R!H 1 {2,S}
2 *4 R!H 0 {1,S} {3,S}
3 *6 R!H 0 {2,S} {4,S}
4 *5 R!H 0 {3,S} {5,S}
5 *2 {C,O} 0 {4,S} {6,S}
6 *3 O 0 {5,S}
""",
kinetics = None,
reference = None,
referenceType = "",
shortDesc = u"""""",
longDesc =
u"""
""",
)
entry(
index = 57,
label = "R6_SSM",
group =
"""
1 *1 R!H 1 {2,S}
2 *4 R!H 0 {1,S} {3,S}
3 *6 {Cd,Ct,Cb} 0 {2,S} {4,{D,T,B}}
4 *5 {Cd,Ct,Cb} 0 {3,{D,T,B}} {5,S}
5 *2 {C,O} 0 {4,S} {6,S}
6 *3 {C,O} 0 {5,S}
""",
kinetics = None,
reference = None,
referenceType = "",
shortDesc = u"""""",
longDesc =
u"""
""",
)
entry(
index = 59,
label = "R6_SSM_Cs",
group =
"""
1 *1 R!H 1 {2,S}
2 *4 R!H 0 {1,S} {3,S}
3 *6 {Cd,Ct,Cb} 0 {2,S} {4,{D,T,B}}
4 *5 {Cd,Ct,Cb} 0 {3,{D,T,B}} {5,S}
5 *2 {C,O} 0 {4,S} {6,S}
6 *3 C 0 {5,S}
""",
kinetics = None,
reference = None,
referenceType = "",
shortDesc = u"""""",
longDesc =
u"""
""",
)
entry(
index = 60,
label = "R6_SSM_O",
group =
"""
1 *1 R!H 1 {2,S}
2 *4 R!H 0 {1,S} {3,S}
3 *6 {Cd,Ct,Cb} 0 {2,S} {4,{D,T,B}}
4 *5 {Cd,Ct,Cb} 0 {3,{D,T,B}} {5,S}
5 *2 {C,O} 0 {4,S} {6,S}
6 *3 O 0 {5,S}
""",
kinetics = None,
reference = None,
referenceType = "",
shortDesc = u"""""",
longDesc =
u"""
""",
)
entry(
index = 61,
label = "R6_DSR",
group =
"""
1 *1 Cd 1 {2,D}
2 *4 Cd 0 {1,D} {3,S}
3 *6 R!H 0 {2,S} {4,{S,D,T,B}}
4 *5 R!H 0 {3,{S,D,T,B}} {5,S}
5 *2 {C,O} 0 {4,S} {6,S}
6 *3 {C,O} 0 {5,S}
""",
kinetics = None,
reference = None,
referenceType = "",
shortDesc = u"""""",
longDesc =
u"""
""",
)
entry(
index = 62,
label = "R6_DSS",
group =
"""
1 *1 Cd 1 {2,D}
2 *4 Cd 0 {1,D} {3,S}
3 *6 R!H 0 {2,S} {4,S}
4 *5 R!H 0 {3,S} {5,S}
5 *2 {C,O} 0 {4,S} {6,S}
6 *3 {C,O} 0 {5,S}
""",
kinetics = None,
reference = None,
referenceType = "",
shortDesc = u"""""",
longDesc =
u"""
""",
)
entry(
index = 64,
label = "R6_DSS_Cs",
group =
"""
1 *1 Cd 1 {2,D}
2 *4 Cd 0 {1,D} {3,S}
3 *6 R!H 0 {2,S} {4,S}
4 *5 R!H 0 {3,S} {5,S}
5 *2 {C,O} 0 {4,S} {6,S}
6 *3 C 0 {5,S}
""",
kinetics = None,
reference = None,
referenceType = "",
shortDesc = u"""""",
longDesc =
u"""
""",
)
entry(
index = 65,
label = "R6_DSS_O",
group =
"""
1 *1 Cd 1 {2,D}
2 *4 Cd 0 {1,D} {3,S}
3 *6 R!H 0 {2,S} {4,S}
4 *5 R!H 0 {3,S} {5,S}
5 *2 {C,O} 0 {4,S} {6,S}
6 *3 O 0 {5,S}
""",
kinetics = None,
reference = None,
referenceType = "",
shortDesc = u"""""",
longDesc =
u"""
""",
)
entry(
index = 66,
label = "R6_DSM",
group =
"""
1 *1 Cd 1 {2,D}
2 *4 Cd 0 {1,D} {3,S}
3 *6 {Cd,Ct,Cb} 0 {2,S} {4,{D,T,B}}
4 *5 {Cd,Ct,Cb} 0 {3,{D,T,B}} {5,S}
5 *2 {C,O} 0 {4,S} {6,S}
6 *3 {C,O} 0 {5,S}
""",
kinetics = None,
reference = None,
referenceType = "",
shortDesc = u"""""",
longDesc =
u"""
""",
)
entry(
index = 68,
label = "R6_DSM_Cs",
group =
"""
1 *1 Cd 1 {2,D}
2 *4 Cd 0 {1,D} {3,S}
3 *6 {Cd,Ct,Cb} 0 {2,S} {4,{D,T,B}}
4 *5 {Cd,Ct,Cb} 0 {3,{D,T,B}} {5,S}
5 *2 {C,O} 0 {4,S} {6,S}
6 *3 C 0 {5,S}
""",
kinetics = None,
reference = None,
referenceType = "",
shortDesc = u"""""",
longDesc =
u"""
""",
)
entry(
index = 69,
label = "R6_DSM_O",
group =
"""
1 *1 Cd 1 {2,D}
2 *4 Cd 0 {1,D} {3,S}
3 *6 {Cd,Ct,Cb} 0 {2,S} {4,{D,T,B}}
4 *5 {Cd,Ct,Cb} 0 {3,{D,T,B}} {5,S}
5 *2 {C,O} 0 {4,S} {6,S}
6 *3 O 0 {5,S}
""",
kinetics = None,
reference = None,
referenceType = "",
shortDesc = u"""""",
longDesc =
u"""
""",
)
entry(
index = 70,
label = "R6_TSR",
group =
"""
1 *1 Ct 1 {2,T}
2 *4 Ct 0 {1,T} {3,S}
3 *6 R!H 0 {2,S} {4,{S,D,T,B}}
4 *5 R!H 0 {3,{S,D,T,B}} {5,S}
5 *2 {C,O} 0 {4,S} {6,S}
6 *3 {C,O} 0 {5,S}
""",
kinetics = None,
reference = None,
referenceType = "",
shortDesc = u"""""",
longDesc =
u"""
""",
)
entry(
index = 71,
label = "R6_TSS",
group =
"""
1 *1 Ct 1 {2,T}
2 *4 Ct 0 {1,T} {3,S}
3 *6 R!H 0 {2,S} {4,S}
4 *5 R!H 0 {3,S} {5,S}
5 *2 {C,O} 0 {4,S} {6,S}
6 *3 {C,O} 0 {5,S}
""",
kinetics = None,
reference = None,
referenceType = "",
shortDesc = u"""""",
longDesc =
u"""
""",
)
entry(
index = 73,
label = "R6_TSS_Cs",
group =
"""
1 *1 Ct 1 {2,T}
2 *4 Ct 0 {1,T} {3,S}
3 *6 R!H 0 {2,S} {4,S}
4 *5 R!H 0 {3,S} {5,S}
5 *2 {C,O} 0 {4,S} {6,S}
6 *3 C 0 {5,S}
""",
kinetics = None,
reference = None,
referenceType = "",
shortDesc = u"""""",
longDesc =
u"""
""",
)
entry(
index = 74,
label = "R6_TSS_O",
group =
"""
1 *1 Ct 1 {2,T}
2 *4 Ct 0 {1,T} {3,S}
3 *6 R!H 0 {2,S} {4,S}
4 *5 R!H 0 {3,S} {5,S}
5 *2 {C,O} 0 {4,S} {6,S}
6 *3 O 0 {5,S}
""",
kinetics = None,
reference = None,
referenceType = "",
shortDesc = u"""""",
longDesc =
u"""
""",
)
entry(
index = 75,
label = "R6_TSM",
group =
"""
1 *1 Ct 1 {2,T}
2 *4 Ct 0 {1,T} {3,S}
3 *6 {Cd,Ct,Cb} 0 {2,S} {4,{D,T,B}}
4 *5 {Cd,Ct,Cb} 0 {3,{D,T,B}} {5,S}
5 *2 {C,O} 0 {4,S} {6,S}
6 *3 {C,O} 0 {5,S}
""",
kinetics = None,
reference = None,
referenceType = "",
shortDesc = u"""""",
longDesc =
u"""
""",
)
entry(
index = 77,
label = "R6_TSM_Cs",
group =
"""
1 *1 Ct 1 {2,T}
2 *4 Ct 0 {1,T} {3,S}
3 *6 {Cd,Ct,Cb} 0 {2,S} {4,{D,T,B}}
4 *5 {Cd,Ct,Cb} 0 {3,{D,T,B}} {5,S}
5 *2 {C,O} 0 {4,S} {6,S}
6 *3 C 0 {5,S}
""",
kinetics = None,
reference = None,
referenceType = "",
shortDesc = u"""""",
longDesc =
u"""
""",
)
entry(
index = 78,
label = "R6_TSM_O",
group =
"""
1 *1 Ct 1 {2,T}
2 *4 Ct 0 {1,T} {3,S}
3 *6 {Cd,Ct,Cb} 0 {2,S} {4,{D,T,B}}
4 *5 {Cd,Ct,Cb} 0 {3,{D,T,B}} {5,S}
5 *2 {C,O} 0 {4,S} {6,S}
6 *3 O 0 {5,S}
""",
kinetics = None,
reference = None,
referenceType = "",
shortDesc = u"""""",
longDesc =
u"""
""",
)
entry(
index = 79,
label = "R6_BSR",
group =
"""
1 *1 Cb 1 {2,B}
2 *4 Cb 0 {1,B} {3,S}
3 *6 R!H 0 {2,S} {4,{S,D,T,B}}
4 *5 R!H 0 {3,{S,D,T,B}} {5,S}
5 *2 {C,O} 0 {4,S} {6,S}
6 *3 {C,O} 0 {5,S}
""",
kinetics = None,
reference = None,
referenceType = "",
shortDesc = u"""""",
longDesc =
u"""
""",
)
entry(
index = 80,
label = "R6_BSS",
group =
"""
1 *1 Cb 1 {2,B}
2 *4 Cb 0 {1,B} {3,S}
3 *6 R!H 0 {2,S} {4,S}
4 *5 R!H 0 {3,S} {5,S}
5 *2 {C,O} 0 {4,S} {6,S}
6 *3 {C,O} 0 {5,S}
""",
kinetics = None,
reference = None,
referenceType = "",
shortDesc = u"""""",
longDesc =
u"""
""",
)
entry(
index = 82,
label = "R6_BSS_Cs",
group =
"""
1 *1 Cb 1 {2,B}
2 *4 Cb 0 {1,B} {3,S}
3 *6 R!H 0 {2,S} {4,S}
4 *5 R!H 0 {3,S} {5,S}
5 *2 {C,O} 0 {4,S} {6,S}
6 *3 C 0 {5,S}
""",
kinetics = None,
reference = None,
referenceType = "",
shortDesc = u"""""",
longDesc =
u"""
""",
)
entry(
index = 83,
label = "R6_BSS_O",
group =
"""
1 *1 Cb 1 {2,B}
2 *4 Cb 0 {1,B} {3,S}
3 *6 R!H 0 {2,S} {4,S}
4 *5 R!H 0 {3,S} {5,S}
5 *2 {C,O} 0 {4,S} {6,S}
6 *3 O 0 {5,S}
""",
kinetics = None,
reference = None,
referenceType = "",
shortDesc = u"""""",
longDesc =
u"""
""",
)
entry(
index = 84,
label = "R6_BSM",
group =
"""
1 *1 Cb 1 {2,B}
2 *4 Cb 0 {1,B} {3,S}
3 *6 {Cd,Ct,Cb} 0 {2,S} {4,{D,T,B}}
4 *5 {Cd,Ct,Cb} 0 {3,{D,T,B}} {5,S}
5 *2 {C,O} 0 {4,S} {6,S}
6 *3 {C,O} 0 {5,S}
""",
kinetics = None,
reference = None,
referenceType = "",
shortDesc = u"""""",
longDesc =
u"""
""",
)
entry(
index = 86,
label = "R6_BSM_Cs",
group =
"""
1 *1 Cb 1 {2,B}
2 *4 Cb 0 {1,B} {3,S}
3 *6 {Cd,Ct,Cb} 0 {2,S} {4,{D,T,B}}
4 *5 {Cd,Ct,Cb} 0 {3,{D,T,B}} {5,S}
5 *2 {C,O} 0 {4,S} {6,S}
6 *3 C 0 {5,S}
""",
kinetics = None,
reference = None,
referenceType = "",
shortDesc = u"""""",
longDesc =
u"""
""",
)
entry(
index = 87,
label = "R6_BSM_O",
group =
"""
1 *1 Cb 1 {2,B}
2 *4 Cb 0 {1,B} {3,S}
3 *6 {Cd,Ct,Cb} 0 {2,S} {4,{D,T,B}}
4 *5 {Cd,Ct,Cb} 0 {3,{D,T,B}} {5,S}
5 *2 {C,O} 0 {4,S} {6,S}
6 *3 O 0 {5,S}
""",
kinetics = None,
reference = None,
referenceType = "",
shortDesc = u"""""",
longDesc =
u"""
""",
)
entry(
index = 88,
label = "R6_SMS",
group =
"""
1 *1 R!H 1 {2,S}
2 *4 {Cd,Ct,Cb} 0 {1,S} {3,{D,T,B}}
3 *6 {Cd,Ct,Cb} 0 {2,{D,T,B}} {4,S}
4 *5 R!H 0 {3,S} {5,S}
5 *2 {C,O} 0 {4,S} {6,S}
6 *3 {C,O} 0 {5,S}
""",
kinetics = None,
reference = None,
referenceType = "",
shortDesc = u"""""",
longDesc =
u"""
""",
)
entry(
index = 90,
label = "R6_SMS_Cs",
group =
"""
1 *1 R!H 1 {2,S}
2 *4 {Cd,Ct,Cb} 0 {1,S} {3,{D,T,B}}
3 *6 {Cd,Ct,Cb} 0 {2,{D,T,B}} {4,S}
4 *5 R!H 0 {3,S} {5,S}
5 *2 {C,O} 0 {4,S} {6,S}
6 *3 C 0 {5,S}
""",
kinetics = None,
reference = None,
referenceType = "",
shortDesc = u"""""",
longDesc =
u"""
""",
)
entry(
index = 91,
label = "R6_SMS_O",
group =
"""
1 *1 R!H 1 {2,S}
2 *4 {Cd,Ct,Cb} 0 {1,S} {3,{D,T,B}}
3 *6 {Cd,Ct,Cb} 0 {2,{D,T,B}} {4,S}
4 *5 R!H 0 {3,S} {5,S}
5 *2 {C,O} 0 {4,S} {6,S}
6 *3 O 0 {5,S}
""",
kinetics = None,
reference = None,
referenceType = "",
shortDesc = u"""""",
longDesc =
u"""
""",
)
entry(
index = 92,
label = "R6_SBB",
group =
"""
1 *1 R!H 1 {2,S}
2 *4 Cb 0 {1,S} {3,B}
3 *6 Cbf 0 {2,B} {4,B}
4 *5 Cb 0 {3,B} {5,S}
5 *2 {C,O} 0 {4,S} {6,S}
6 *3 {C,O} 0 {5,S}
""",
kinetics = None,
reference = None,
referenceType = "",
shortDesc = u"""""",
longDesc =
u"""
""",
)
entry(
index = 94,
label = "R6_SBB_Cs",
group =
"""
1 *1 R!H 1 {2,S}
2 *4 Cb 0 {1,S} {3,B}
3 *6 Cbf 0 {2,B} {4,B}
4 *5 Cb 0 {3,B} {5,S}
5 *2 {C,O} 0 {4,S} {6,S}
6 *3 C 0 {5,S}
""",
kinetics = None,
reference = None,
referenceType = "",
shortDesc = u"""""",
longDesc =
u"""
""",
)
entry(
index = 95,
label = "R6_SBB_O",
group =
"""
1 *1 R!H 1 {2,S}
2 *4 Cb 0 {1,S} {3,B}
3 *6 Cbf 0 {2,B} {4,B}
4 *5 Cb 0 {3,B} {5,S}
5 *2 {C,O} 0 {4,S} {6,S}
6 *3 O 0 {5,S}
""",
kinetics = None,
reference = None,
referenceType = "",
shortDesc = u"""""",
longDesc =
u"""
""",
)
entry(
index = 96,
label = "R6_BBS",
group =
"""
1 *1 Cb 1 {2,B}
2 *4 Cbf 0 {1,B} {3,B}
3 *6 Cb 0 {2,B} {4,S}
4 *5 R!H 0 {3,S} {5,S}
5 *2 {C,O} 0 {4,S} {6,S}
6 *3 {C,O} 0 {5,S}
""",
kinetics = None,
reference = None,
referenceType = "",
shortDesc = u"""""",
longDesc =
u"""
""",
)
entry(
index = 98,
label = "R6_BBS_Cs",
group =
"""
1 *1 Cb 1 {2,B}
2 *4 Cbf 0 {1,B} {3,B}
3 *6 Cb 0 {2,B} {4,S}
4 *5 R!H 0 {3,S} {5,S}
5 *2 {C,O} 0 {4,S} {6,S}
6 *3 C 0 {5,S}
""",
kinetics = None,
reference = None,
referenceType = "",
shortDesc = u"""""",
longDesc =
u"""
""",
)
entry(
index = 99,
label = "R6_BBS_O",
group =
"""
1 *1 Cb 1 {2,B}
2 *4 Cbf 0 {1,B} {3,B}
3 *6 Cb 0 {2,B} {4,S}
4 *5 R!H 0 {3,S} {5,S}
5 *2 {C,O} 0 {4,S} {6,S}
6 *3 O 0 {5,S}
""",
kinetics = None,
reference = None,
referenceType = "",
shortDesc = u"""""",
longDesc =
u"""
""",
)
entry(
index = 100,
label = "R7",
group =
"""
1 *1 R!H 1 {2,{S,D,T,B}}
2 *4 R!H {0,1,2S,2T} {1,{S,D,T,B}} {3,{S,D,T,B}}
3 *6 R!H {0,1,2S,2T} {2,{S,D,T,B}} {4,{S,D,T,B}}
4 *7 R!H {0,1,2S,2T} {3,{S,D,T,B}} {5,{S,D,T,B}}
5 *5 R!H {0,1,2S,2T} {4,{S,D,T,B}} {6,S}
6 *2 {C,O} 0 {5,S} {7,S}
7 *3 {C,O} 0 {6,S}
""",
kinetics = None,
reference = None,
referenceType = "",
shortDesc = u"""""",
longDesc =
u"""
""",
)
entry(
index = 101,
label = "R7_RSSR",
group =
"""
1 *1 R!H 1 {2,{S,D,T,B}}
2 *4 R!H 0 {1,{S,D,T,B}} {3,S}
3 *6 R!H 0 {2,S} {4,S}
4 *7 R!H 0 {3,S} {5,{S,D,T,B}}
5 *5 R!H 0 {4,{S,D,T,B}} {6,S}
6 *2 {C,O} 0 {5,S} {7,S}
7 *3 {C,O} 0 {6,S}
""",
kinetics = None,
reference = None,
referenceType = "",
shortDesc = u"""""",
longDesc =
u"""
""",
)
entry(
index = 102,
label = "R7_SSSR",
group =
"""
1 *1 R!H 1 {2,S}
2 *4 R!H 0 {1,S} {3,S}
3 *6 R!H 0 {2,S} {4,S}
4 *7 R!H 0 {3,S} {5,{S,D,T,B}}
5 *5 R!H 0 {4,{S,D,T,B}} {6,S}
6 *2 {C,O} 0 {5,S} {7,S}
7 *3 {C,O} 0 {6,S}
""",
kinetics = None,
reference = None,
referenceType = "",
shortDesc = u"""""",
longDesc =
u"""
""",
)
entry(
index = 103,
label = "R7_SSSS",
group =
"""
1 *1 R!H 1 {2,S}
2 *4 R!H 0 {1,S} {3,S}
3 *6 R!H 0 {2,S} {4,S}
4 *7 R!H 0 {3,S} {5,S}
5 *5 R!H 0 {4,S} {6,S}
6 *2 {C,O} 0 {5,S} {7,S}
7 *3 {C,O} 0 {6,S}
""",
kinetics = None,
reference = None,
referenceType = "",
shortDesc = u"""""",
longDesc =
u"""
""",
)
entry(
index = 105,
label = "R7_SSSS_Cs",
group =
"""
1 *1 R!H 1 {2,S}
2 *4 R!H 0 {1,S} {3,S}
3 *6 R!H 0 {2,S} {4,S}
4 *7 R!H 0 {3,S} {5,S}
5 *5 R!H 0 {4,S} {6,S}
6 *2 {C,O} 0 {5,S} {7,S}
7 *3 C 0 {6,S}
""",
kinetics = None,
reference = None,
referenceType = "",
shortDesc = u"""""",
longDesc =
u"""
""",
)
entry(
index = 106,
label = "R7_SSSS_O",
group =
"""
1 *1 R!H 1 {2,S}
2 *4 R!H 0 {1,S} {3,S}
3 *6 R!H 0 {2,S} {4,S}
4 *7 R!H 0 {3,S} {5,S}
5 *5 R!H 0 {4,S} {6,S}
6 *2 {C,O} 0 {5,S} {7,S}
7 *3 O 0 {6,S}
""",
kinetics = None,
reference = None,
referenceType = "",
shortDesc = u"""""",
longDesc =
u"""
""",
)
entry(
index = 107,
label = "R7_SSSM",
group =
"""
1 *1 R!H 1 {2,S}
2 *4 R!H 0 {1,S} {3,S}
3 *6 R!H 0 {2,S} {4,S}
4 *7 {Cd,Ct,Cb} 0 {3,S} {5,{D,T,B}}
5 *5 {Cd,Ct,Cb} 0 {4,{D,T,B}} {6,S}
6 *2 {C,O} 0 {5,S} {7,S}
7 *3 {C,O} 0 {6,S}
""",
kinetics = None,
reference = None,
referenceType = "",
shortDesc = u"""""",
longDesc =
u"""
""",
)
entry(
index = 109,
label = "R7_SSSM_Cs",
group =
"""
1 *1 R!H 1 {2,S}
2 *4 R!H 0 {1,S} {3,S}
3 *6 R!H 0 {2,S} {4,S}
4 *7 {Cd,Ct,Cb} 0 {3,S} {5,{D,T,B}}
5 *5 {Cd,Ct,Cb} 0 {4,{D,T,B}} {6,S}
6 *2 {C,O} 0 {5,S} {7,S}
7 *3 C 0 {6,S}
""",
kinetics = None,
reference = None,
referenceType = "",
shortDesc = u"""""",
longDesc =
u"""
""",
)
entry(
index = 110,
label = "R7_SSSM_O",
group =
"""
1 *1 R!H 1 {2,S}
2 *4 R!H 0 {1,S} {3,S}
3 *6 R!H 0 {2,S} {4,S}
4 *7 {Cd,Ct,Cb} 0 {3,S} {5,{D,T,B}}
5 *5 {Cd,Ct,Cb} 0 {4,{D,T,B}} {6,S}
6 *2 {C,O} 0 {5,S} {7,S}
7 *3 O 0 {6,S}
""",
kinetics = None,
reference = None,
referenceType = "",
shortDesc = u"""""",
longDesc =
u"""
""",
)
entry(
index = 111,
label = "R7_DSSR",
group =
"""
1 *1 Cd 1 {2,D}
2 *4 Cd 0 {1,D} {3,S}
3 *6 R!H 0 {2,S} {4,S}
4 *7 R!H 0 {3,S} {5,{S,D,T,B}}
5 *5 R!H 0 {4,{S,D,T,B}} {6,S}
6 *2 {C,O} 0 {5,S} {7,S}
7 *3 {C,O} 0 {6,S}
""",
kinetics = None,
reference = None,
referenceType = "",
shortDesc = u"""""",
longDesc =
u"""
""",
)
entry(
index = 112,
label = "R7_DSSS",
group =
"""
1 *1 Cd 1 {2,D}
2 *4 Cd 0 {1,D} {3,S}
3 *6 R!H 0 {2,S} {4,S}
4 *7 R!H 0 {3,S} {5,S}
5 *5 R!H 0 {4,S} {6,S}
6 *2 {C,O} 0 {5,S} {7,S}
7 *3 {C,O} 0 {6,S}
""",
kinetics = None,
reference = None,
referenceType = "",
shortDesc = u"""""",
longDesc =
u"""
""",
)
entry(
index = 114,
label = "R7_DSSS_Cs",
group =
"""
1 *1 Cd 1 {2,D}
2 *4 Cd 0 {1,D} {3,S}
3 *6 R!H 0 {2,S} {4,S}
4 *7 R!H 0 {3,S} {5,S}
5 *5 R!H 0 {4,S} {6,S}
6 *2 {C,O} 0 {5,S} {7,S}
7 *3 C 0 {6,S}
""",
kinetics = None,
reference = None,
referenceType = "",
shortDesc = u"""""",
longDesc =
u"""
""",
)
entry(
index = 115,
label = "R7_DSSS_O",
group =
"""
1 *1 Cd 1 {2,D}
2 *4 Cd 0 {1,D} {3,S}
3 *6 R!H 0 {2,S} {4,S}
4 *7 R!H 0 {3,S} {5,S}
5 *5 R!H 0 {4,S} {6,S}
6 *2 {C,O} 0 {5,S} {7,S}
7 *3 O 0 {6,S}
""",
kinetics = None,
reference = None,
referenceType = "",
shortDesc = u"""""",
longDesc =
u"""
""",
)
entry(
index = 116,
label = "R7_DSSM",
group =
"""
1 *1 Cd 1 {2,D}
2 *4 Cd 0 {1,D} {3,S}
3 *6 R!H 0 {2,S} {4,S}
4 *7 {Cd,Ct,Cb} 0 {3,S} {5,{D,T,B}}
5 *5 {Cd,Ct,Cb} 0 {4,{D,T,B}} {6,S}
6 *2 {C,O} 0 {5,S} {7,S}
7 *3 {C,O} 0 {6,S}
""",
kinetics = None,
reference = None,
referenceType = "",
shortDesc = u"""""",
longDesc =
u"""
""",
)
entry(
index = 118,
label = "R7_DSSM_Cs",
group =
"""
1 *1 Cd 1 {2,D}
2 *4 Cd 0 {1,D} {3,S}
3 *6 R!H 0 {2,S} {4,S}
4 *7 {Cd,Ct,Cb} 0 {3,S} {5,{D,T,B}}
5 *5 {Cd,Ct,Cb} 0 {4,{D,T,B}} {6,S}
6 *2 {C,O} 0 {5,S} {7,S}
7 *3 C 0 {6,S}
""",
kinetics = None,
reference = None,
referenceType = "",
shortDesc = u"""""",
longDesc =
u"""
""",
)
entry(
index = 119,
label = "R7_DSSM_O",
group =
"""
1 *1 Cd 1 {2,D}
2 *4 Cd 0 {1,D} {3,S}
3 *6 R!H 0 {2,S} {4,S}
4 *7 {Cd,Ct,Cb} 0 {3,S} {5,{D,T,B}}
5 *5 {Cd,Ct,Cb} 0 {4,{D,T,B}} {6,S}
6 *2 {C,O} 0 {5,S} {7,S}
7 *3 O 0 {6,S}
""",
kinetics = None,
reference = None,
referenceType = "",
shortDesc = u"""""",
longDesc =
u"""
""",
)
entry(
index = 120,
label = "R7_TSSR",
group =
"""
1 *1 Ct 1 {2,T}
2 *4 Ct 0 {1,T} {3,S}
3 *6 R!H 0 {2,S} {4,S}
4 *7 R!H 0 {3,S} {5,{S,D,T,B}}
5 *5 R!H 0 {4,{S,D,T,B}} {6,S}
6 *2 {C,O} 0 {5,S} {7,S}
7 *3 {C,O} 0 {6,S}
""",
kinetics = None,
reference = None,
referenceType = "",
shortDesc = u"""""",
longDesc =
u"""
""",
)
entry(
index = 121,
label = "R7_TSSS",
group =
"""
1 *1 Ct 1 {2,T}
2 *4 Ct 0 {1,T} {3,S}
3 *6 R!H 0 {2,S} {4,S}
4 *7 R!H 0 {3,S} {5,S}
5 *5 R!H 0 {4,S} {6,S}
6 *2 {C,O} 0 {5,S} {7,S}
7 *3 {C,O} 0 {6,S}
""",
kinetics = None,
reference = None,
referenceType = "",
shortDesc = u"""""",
longDesc =
u"""
""",
)
entry(
index = 123,
label = "R7_TSSS_Cs",
group =
"""
1 *1 Ct 1 {2,T}
2 *4 Ct 0 {1,T} {3,S}
3 *6 R!H 0 {2,S} {4,S}
4 *7 R!H 0 {3,S} {5,S}
5 *5 R!H 0 {4,S} {6,S}
6 *2 {C,O} 0 {5,S} {7,S}
7 *3 C 0 {6,S}
""",
kinetics = None,
reference = None,
referenceType = "",
shortDesc = u"""""",
longDesc =
u"""
""",
)
entry(
index = 124,
label = "R7_TSSS_O",
group =
"""
1 *1 Ct 1 {2,T}
2 *4 Ct 0 {1,T} {3,S}
3 *6 R!H 0 {2,S} {4,S}
4 *7 R!H 0 {3,S} {5,S}
5 *5 R!H 0 {4,S} {6,S}
6 *2 {C,O} 0 {5,S} {7,S}
7 *3 O 0 {6,S}
""",
kinetics = None,
reference = None,
referenceType = "",
shortDesc = u"""""",
longDesc =
u"""
""",
)
entry(
index = 125,
label = "R7_TSSM",
group =
"""
1 *1 Ct 1 {2,T}
2 *4 Ct 0 {1,T} {3,S}
3 *6 R!H 0 {2,S} {4,S}
4 *7 {Cd,Ct,Cb} 0 {3,S} {5,{D,T,B}}
5 *5 {Cd,Ct,Cb} 0 {4,{D,T,B}} {6,S}
6 *2 {C,O} 0 {5,S} {7,S}
7 *3 {C,O} 0 {6,S}
""",
kinetics = None,
reference = None,
referenceType = "",
shortDesc = u"""""",
longDesc =
u"""
""",
)
entry(
index = 127,
label = "R7_TSSM_Cs",
group =
"""
1 *1 Ct 1 {2,T}
2 *4 Ct 0 {1,T} {3,S}
3 *6 R!H 0 {2,S} {4,S}
4 *7 {Cd,Ct,Cb} 0 {3,S} {5,{D,T,B}}
5 *5 {Cd,Ct,Cb} 0 {4,{D,T,B}} {6,S}
6 *2 {C,O} 0 {5,S} {7,S}
7 *3 C 0 {6,S}
""",
kinetics = None,
reference = None,
referenceType = "",
shortDesc = u"""""",
longDesc =
u"""
""",
)
entry(
index = 128,
label = "R7_TSSM_O",
group =
"""
1 *1 Ct 1 {2,T}
2 *4 Ct 0 {1,T} {3,S}
3 *6 R!H 0 {2,S} {4,S}
4 *7 {Cd,Ct,Cb} 0 {3,S} {5,{D,T,B}}
5 *5 {Cd,Ct,Cb} 0 {4,{D,T,B}} {6,S}
6 *2 {C,O} 0 {5,S} {7,S}
7 *3 O 0 {6,S}
""",
kinetics = None,
reference = None,
referenceType = "",
shortDesc = u"""""",
longDesc =
u"""
""",
)
entry(
index = 129,
label = "R7_BSSR",
group =
"""
1 *1 Cb 1 {2,B}
2 *4 Cb 0 {1,B} {3,S}
3 *6 R!H 0 {2,S} {4,S}
4 *7 R!H 0 {3,S} {5,{S,D,T,B}}
5 *5 R!H 0 {4,{S,D,T,B}} {6,S}
6 *2 {C,O} 0 {5,S} {7,S}
7 *3 {C,O} 0 {6,S}
""",
kinetics = None,
reference = None,
referenceType = "",
shortDesc = u"""""",
longDesc =
u"""
""",
)
entry(
index = 130,
label = "R7_BSSS",
group =
"""
1 *1 Cb 1 {2,B}
2 *4 Cb 0 {1,B} {3,S}
3 *6 R!H 0 {2,S} {4,S}
4 *7 R!H 0 {3,S} {5,S}
5 *5 R!H 0 {4,S} {6,S}
6 *2 {C,O} 0 {5,S} {7,S}
7 *3 {C,O} 0 {6,S}
""",
kinetics = None,
reference = None,
referenceType = "",
shortDesc = u"""""",
longDesc =
u"""
""",
)
entry(
index = 132,
label = "R7_BSSS_Cs",
group =
"""
1 *1 Cb 1 {2,B}
2 *4 Cb 0 {1,B} {3,S}
3 *6 R!H 0 {2,S} {4,S}
4 *7 R!H 0 {3,S} {5,S}
5 *5 R!H 0 {4,S} {6,S}
6 *2 {C,O} 0 {5,S} {7,S}
7 *3 C 0 {6,S}
""",
kinetics = None,
reference = None,
referenceType = "",
shortDesc = u"""""",
longDesc =
u"""
""",
)
entry(
index = 133,
label = "R7_BSSS_O",
group =
"""
1 *1 Cb 1 {2,B}
2 *4 Cb 0 {1,B} {3,S}
3 *6 R!H 0 {2,S} {4,S}
4 *7 R!H 0 {3,S} {5,S}
5 *5 R!H 0 {4,S} {6,S}
6 *2 {C,O} 0 {5,S} {7,S}
7 *3 O 0 {6,S}
""",
kinetics = None,
reference = None,
referenceType = "",
shortDesc = u"""""",
longDesc =
u"""
""",
)
entry(
index = 134,
label = "R7_BSSM",
group =
"""
1 *1 Cb 1 {2,B}
2 *4 Cb 0 {1,B} {3,S}
3 *6 R!H 0 {2,S} {4,S}
4 *7 {Cd,Ct,Cb} 0 {3,S} {5,{D,T,B}}
5 *5 {Cd,Ct,Cb} 0 {4,{D,T,B}} {6,S}
6 *2 {C,O} 0 {5,S} {7,S}
7 *3 {C,O} 0 {6,S}
""",
kinetics = None,
reference = None,
referenceType = "",
shortDesc = u"""""",
longDesc =
u"""
""",
)
entry(
index = 136,
label = "R7_BSSM_Cs",
group =
"""
1 *1 Cb 1 {2,B}
2 *4 Cb 0 {1,B} {3,S}
3 *6 R!H 0 {2,S} {4,S}
4 *7 {Cd,Ct,Cb} 0 {3,S} {5,{D,T,B}}
5 *5 {Cd,Ct,Cb} 0 {4,{D,T,B}} {6,S}
6 *2 {C,O} 0 {5,S} {7,S}
7 *3 C 0 {6,S}
""",
kinetics = None,
reference = None,
referenceType = "",
shortDesc = u"""""",
longDesc =
u"""
""",
)
entry(
index = 137,
label = "R7_BSSM_O",
group =
"""
1 *1 Cb 1 {2,B}
2 *4 Cb 0 {1,B} {3,S}
3 *6 R!H 0 {2,S} {4,S}
4 *7 {Cd,Ct,Cb} 0 {3,S} {5,{D,T,B}}
5 *5 {Cd,Ct,Cb} 0 {4,{D,T,B}} {6,S}
6 *2 {C,O} 0 {5,S} {7,S}
7 *3 O 0 {6,S}
""",
kinetics = None,
reference = None,
referenceType = "",
shortDesc = u"""""",
longDesc =
u"""
""",
)
entry(
index = 138,
label = "R7_RSMS",
group =
"""
1 *1 R!H 1 {2,{S,D,T,B}}
2 *4 R!H 0 {1,{S,D,T,B}} {3,S}
3 *6 {Cd,Ct,Cb} 0 {2,S} {4,{D,T,B}}
4 *7 {Cd,Ct,Cb} 0 {3,{D,T,B}} {5,S}
5 *5 R!H 0 {4,S} {6,S}
6 *2 {C,O} 0 {5,S} {7,S}
7 *3 {C,O} 0 {6,S}
""",
kinetics = None,
reference = None,
referenceType = "",
shortDesc = u"""""",
longDesc =
u"""
""",
)
entry(
index = 139,
label = "R7_SSMS",
group =
"""
1 *1 R!H 1 {2,S}
2 *4 R!H 0 {1,S} {3,S}
3 *6 {Cd,Ct,Cb} 0 {2,S} {4,{D,T,B}}
4 *7 {Cd,Ct,Cb} 0 {3,{D,T,B}} {5,S}
5 *5 R!H 0 {4,S} {6,S}
6 *2 {C,O} 0 {5,S} {7,S}
7 *3 {C,O} 0 {6,S}
""",
kinetics = None,
reference = None,
referenceType = "",
shortDesc = u"""""",
longDesc =
u"""
""",
)
entry(
index = 141,
label = "R7_SSMS_Cs",
group =
"""
1 *1 R!H 1 {2,S}
2 *4 R!H 0 {1,S} {3,S}
3 *6 {Cd,Ct,Cb} 0 {2,S} {4,{D,T,B}}
4 *7 {Cd,Ct,Cb} 0 {3,{D,T,B}} {5,S}
5 *5 R!H 0 {4,S} {6,S}
6 *2 {C,O} 0 {5,S} {7,S}
7 *3 C 0 {6,S}
""",
kinetics = None,
reference = None,
referenceType = "",
shortDesc = u"""""",
longDesc =
u"""
""",
)
entry(
index = 142,
label = "R7_SSMS_O",
group =
"""
1 *1 R!H 1 {2,S}
2 *4 R!H 0 {1,S} {3,S}
3 *6 {Cd,Ct,Cb} 0 {2,S} {4,{D,T,B}}
4 *7 {Cd,Ct,Cb} 0 {3,{D,T,B}} {5,S}
5 *5 R!H 0 {4,S} {6,S}
6 *2 {C,O} 0 {5,S} {7,S}
7 *3 O 0 {6,S}
""",
kinetics = None,
reference = None,
referenceType = "",
shortDesc = u"""""",
longDesc =
u"""
""",
)
entry(
index = 143,
label = "R7_DSMS",
group =
"""
1 *1 Cd 1 {2,D}
2 *4 Cd 0 {1,D} {3,S}
3 *6 {Cd,Ct,Cb} 0 {2,S} {4,{D,T,B}}
4 *7 {Cd,Ct,Cb} 0 {3,{D,T,B}} {5,S}
5 *5 R!H 0 {4,S} {6,S}
6 *2 {C,O} 0 {5,S} {7,S}
7 *3 {C,O} 0 {6,S}
""",
kinetics = None,
reference = None,
referenceType = "",
shortDesc = u"""""",
longDesc =
u"""
""",
)
entry(
index = 145,
label = "R7_DSMS_Cs",
group =
"""
1 *1 Cd 1 {2,D}
2 *4 Cd 0 {1,D} {3,S}
3 *6 {Cd,Ct,Cb} 0 {2,S} {4,{D,T,B}}
4 *7 {Cd,Ct,Cb} 0 {3,{D,T,B}} {5,S}
5 *5 R!H 0 {4,S} {6,S}
6 *2 {C,O} 0 {5,S} {7,S}
7 *3 C 0 {6,S}
""",
kinetics = None,
reference = None,
referenceType = "",
shortDesc = u"""""",
longDesc =
u"""
""",
)
entry(
index = 146,
label = "R7_DSMS_O",
group =
"""
1 *1 Cd 1 {2,D}
2 *4 Cd 0 {1,D} {3,S}
3 *6 {Cd,Ct,Cb} 0 {2,S} {4,{D,T,B}}
4 *7 {Cd,Ct,Cb} 0 {3,{D,T,B}} {5,S}
5 *5 R!H 0 {4,S} {6,S}
6 *2 {C,O} 0 {5,S} {7,S}
7 *3 O 0 {6,S}
""",
kinetics = None,
reference = None,
referenceType = "",
shortDesc = u"""""",
longDesc =
u"""
""",
)
entry(
index = 147,
label = "R7_TSMS",
group =
"""
1 *1 Ct 1 {2,T}
2 *4 Ct 0 {1,T} {3,S}
3 *6 {Cd,Ct,Cb} 0 {2,S} {4,{D,T,B}}
4 *7 {Cd,Ct,Cb} 0 {3,{D,T,B}} {5,S}
5 *5 R!H 0 {4,S} {6,S}
6 *2 {C,O} 0 {5,S} {7,S}
7 *3 {C,O} 0 {6,S}
""",
kinetics = None,
reference = None,
referenceType = "",
shortDesc = u"""""",
longDesc =
u"""
""",
)
entry(
index = 149,
label = "R7_TSMS_Cs",
group =
"""
1 *1 Ct 1 {2,T}
2 *4 Ct 0 {1,T} {3,S}
3 *6 {Cd,Ct,Cb} 0 {2,S} {4,{D,T,B}}
4 *7 {Cd,Ct,Cb} 0 {3,{D,T,B}} {5,S}
5 *5 R!H 0 {4,S} {6,S}
6 *2 {C,O} 0 {5,S} {7,S}
7 *3 C 0 {6,S}
""",
kinetics = None,
reference = None,
referenceType = "",
shortDesc = u"""""",
longDesc =
u"""
""",
)
entry(
index = 150,
label = "R7_TSMS_O",
group =
"""
1 *1 Ct 1 {2,T}
2 *4 Ct 0 {1,T} {3,S}
3 *6 {Cd,Ct,Cb} 0 {2,S} {4,{D,T,B}}
4 *7 {Cd,Ct,Cb} 0 {3,{D,T,B}} {5,S}
5 *5 R!H 0 {4,S} {6,S}
6 *2 {C,O} 0 {5,S} {7,S}
7 *3 O 0 {6,S}
""",
kinetics = None,
reference = None,
referenceType = "",
shortDesc = u"""""",
longDesc =
u"""
""",
)
entry(
index = 151,
label = "R7_BSMS",
group =
"""
1 *1 Cb 1 {2,B}
2 *4 Cb 0 {1,B} {3,S}
3 *6 {Cd,Ct,Cb} 0 {2,S} {4,{D,T,B}}
4 *7 {Cd,Ct,Cb} 0 {3,{D,T,B}} {5,S}
5 *5 R!H 0 {4,S} {6,S}
6 *2 {C,O} 0 {5,S} {7,S}
7 *3 {C,O} 0 {6,S}
""",
kinetics = None,
reference = None,
referenceType = "",
shortDesc = u"""""",
longDesc =
u"""
""",
)
entry(
index = 153,
label = "R7_BSMS_Cs",
group =
"""
1 *1 Cb 1 {2,B}
2 *4 Cb 0 {1,B} {3,S}
3 *6 {Cd,Ct,Cb} 0 {2,S} {4,{D,T,B}}
4 *7 {Cd,Ct,Cb} 0 {3,{D,T,B}} {5,S}
5 *5 R!H 0 {4,S} {6,S}
6 *2 {C,O} 0 {5,S} {7,S}
7 *3 C 0 {6,S}
""",
kinetics = None,
reference = None,
referenceType = "",
shortDesc = u"""""",
longDesc =
u"""
""",
)
entry(
index = 154,
label = "R7_BSMS_O",
group =
"""
1 *1 Cb 1 {2,B}
2 *4 Cb 0 {1,B} {3,S}
3 *6 {Cd,Ct,Cb} 0 {2,S} {4,{D,T,B}}
4 *7 {Cd,Ct,Cb} 0 {3,{D,T,B}} {5,S}
5 *5 R!H 0 {4,S} {6,S}
6 *2 {C,O} 0 {5,S} {7,S}
7 *3 O 0 {6,S}
""",
kinetics = None,
reference = None,
referenceType = "",
shortDesc = u"""""",
longDesc =
u"""
""",
)
entry(
index = 155,
label = "R7_SMSR",
group =
"""
1 *1 R!H 1 {2,S}
2 *4 {Cd,Ct,Cb} 0 {1,S} {3,{D,T,B}}
3 *6 {Cd,Ct,Cb} 0 {2,{D,T,B}} {4,S}
4 *7 R!H 0 {3,S} {5,{S,D,T,B}}
5 *5 R!H 0 {4,{S,D,T,B}} {6,S}
6 *2 {C,O} 0 {5,S} {7,S}
7 *3 {C,O} 0 {6,S}
""",
kinetics = None,
reference = None,
referenceType = "",
shortDesc = u"""""",
longDesc =
u"""
""",
)
entry(
index = 156,
label = "R7_SMSS",
group =
"""
1 *1 R!H 1 {2,S}
2 *4 {Cd,Ct,Cb} 0 {1,S} {3,{D,T,B}}
3 *6 {Cd,Ct,Cb} 0 {2,{D,T,B}} {4,S}
4 *7 R!H 0 {3,S} {5,S}
5 *5 R!H 0 {4,S} {6,S}
6 *2 {C,O} 0 {5,S} {7,S}
7 *3 {C,O} 0 {6,S}
""",
kinetics = None,
reference = None,
referenceType = "",
shortDesc = u"""""",
longDesc =
u"""
""",
)
entry(
index = 158,
label = "R7_SMSS_Cs",
group =
"""
1 *1 R!H 1 {2,S}
2 *4 {Cd,Ct,Cb} 0 {1,S} {3,{D,T,B}}
3 *6 {Cd,Ct,Cb} 0 {2,{D,T,B}} {4,S}
4 *7 R!H 0 {3,S} {5,S}
5 *5 R!H 0 {4,S} {6,S}
6 *2 {C,O} 0 {5,S} {7,S}
7 *3 C 0 {6,S}
""",
kinetics = None,
reference = None,
referenceType = "",
shortDesc = u"""""",
longDesc =
u"""
""",
)
entry(
index = 159,
label = "R7_SMSS_O",
group =
"""
1 *1 R!H 1 {2,S}
2 *4 {Cd,Ct,Cb} 0 {1,S} {3,{D,T,B}}
3 *6 {Cd,Ct,Cb} 0 {2,{D,T,B}} {4,S}
4 *7 R!H 0 {3,S} {5,S}
5 *5 R!H 0 {4,S} {6,S}
6 *2 {C,O} 0 {5,S} {7,S}
7 *3 O 0 {6,S}
""",
kinetics = None,
reference = None,
referenceType = "",
shortDesc = u"""""",
longDesc =
u"""
""",
)
entry(
index = 160,
label = "R7_SMSM",
group =
"""
1 *1 R!H 1 {2,S}
2 *4 {Cd,Ct,Cb} 0 {1,S} {3,{D,T,B}}
3 *6 {Cd,Ct,Cb} 0 {2,{D,T,B}} {4,S}
4 *7 {Cd,Ct,Cb} 0 {3,S} {5,{D,T,B}}
5 *5 {Cd,Ct,Cb} 0 {4,{D,T,B}} {6,S}
6 *2 {C,O} 0 {5,S} {7,S}
7 *3 {C,O} 0 {6,S}
""",
kinetics = None,
reference = None,
referenceType = "",
shortDesc = u"""""",
longDesc =
u"""
""",
)
entry(
index = 162,
label = "R7_SMSM_Cs",
group =
"""
1 *1 R!H 1 {2,S}
2 *4 {Cd,Ct,Cb} 0 {1,S} {3,{D,T,B}}
3 *6 {Cd,Ct,Cb} 0 {2,{D,T,B}} {4,S}
4 *7 {Cd,Ct,Cb} 0 {3,S} {5,{D,T,B}}
5 *5 {Cd,Ct,Cb} 0 {4,{D,T,B}} {6,S}
6 *2 {C,O} 0 {5,S} {7,S}
7 *3 C 0 {6,S}
""",
kinetics = None,
reference = None,
referenceType = "",
shortDesc = u"""""",
longDesc =
u"""
""",
)
entry(
index = 163,
label = "R7_SMSM_O",
group =
"""
1 *1 R!H 1 {2,S}
2 *4 {Cd,Ct,Cb} 0 {1,S} {3,{D,T,B}}
3 *6 {Cd,Ct,Cb} 0 {2,{D,T,B}} {4,S}
4 *7 {Cd,Ct,Cb} 0 {3,S} {5,{D,T,B}}
5 *5 {Cd,Ct,Cb} 0 {4,{D,T,B}} {6,S}
6 *2 {C,O} 0 {5,S} {7,S}
7 *3 O 0 {6,S}
""",
kinetics = None,
reference = None,
referenceType = "",
shortDesc = u"""""",
longDesc =
u"""
""",
)
entry(
index = 164,
label = "R7_BBSR",
group =
"""
1 *1 Cb 1 {2,B}
2 *4 Cbf 0 {1,B} {3,B}
3 *6 Cb 0 {2,B} {4,S}
4 *7 R!H 0 {3,S} {5,{S,D,T,B}}
5 *5 R!H 0 {4,{S,D,T,B}} {6,S}
6 *2 {C,O} 0 {5,S} {7,S}
7 *3 {C,O} 0 {6,S}
""",
kinetics = None,
reference = None,
referenceType = "",
shortDesc = u"""""",
longDesc =
u"""
""",
)
entry(
index = 165,
label = "R7_BBSS",
group =
"""
1 *1 Cb 1 {2,B}
2 *4 Cbf 0 {1,B} {3,B}
3 *6 Cb 0 {2,B} {4,S}
4 *7 R!H 0 {3,S} {5,S}
5 *5 R!H 0 {4,S} {6,S}
6 *2 {C,O} 0 {5,S} {7,S}
7 *3 {C,O} 0 {6,S}
""",
kinetics = None,
reference = None,
referenceType = "",
shortDesc = u"""""",
longDesc =
u"""
""",
)
entry(
index = 167,
label = "R7_BBSS_Cs",
group =
"""
1 *1 Cb 1 {2,B}
2 *4 Cbf 0 {1,B} {3,B}
3 *6 Cb 0 {2,B} {4,S}
4 *7 R!H 0 {3,S} {5,S}
5 *5 R!H 0 {4,S} {6,S}
6 *2 {C,O} 0 {5,S} {7,S}
7 *3 C 0 {6,S}
""",
kinetics = None,
reference = None,
referenceType = "",
shortDesc = u"""""",
longDesc =
u"""
""",
)
entry(
index = 168,
label = "R7_BBSS_O",
group =
"""
1 *1 Cb 1 {2,B}
2 *4 Cbf 0 {1,B} {3,B}
3 *6 Cb 0 {2,B} {4,S}
4 *7 R!H 0 {3,S} {5,S}
5 *5 R!H 0 {4,S} {6,S}
6 *2 {C,O} 0 {5,S} {7,S}
7 *3 O 0 {6,S}
""",
kinetics = None,
reference = None,
referenceType = "",
shortDesc = u"""""",
longDesc =
u"""
""",
)
entry(
index = 169,
label = "R7_BBSM",
group =
"""
1 *1 Cb 1 {2,B}
2 *4 Cbf 0 {1,B} {3,B}
3 *6 Cb 0 {2,B} {4,S}
4 *7 {Cd,Ct,Cb} 0 {3,S} {5,{D,T,B}}
5 *5 {Cd,Ct,Cb} 0 {4,{D,T,B}} {6,S}
6 *2 {C,O} 0 {5,S} {7,S}
7 *3 {C,O} 0 {6,S}
""",
kinetics = None,
reference = None,
referenceType = "",
shortDesc = u"""""",
longDesc =
u"""
""",
)
entry(
index = 171,
label = "R7_BBSM_Cs",
group =
"""
1 *1 Cb 1 {2,B}
2 *4 Cbf 0 {1,B} {3,B}
3 *6 Cb 0 {2,B} {4,S}
4 *7 {Cd,Ct,Cb} 0 {3,S} {5,{D,T,B}}
5 *5 {Cd,Ct,Cb} 0 {4,{D,T,B}} {6,S}
6 *2 {C,O} 0 {5,S} {7,S}
7 *3 C 0 {6,S}
""",
kinetics = None,
reference = None,
referenceType = "",
shortDesc = u"""""",
longDesc =
u"""
""",
)
entry(
index = 172,
label = "R7_BBSM_O",
group =
"""
1 *1 Cb 1 {2,B}
2 *4 Cbf 0 {1,B} {3,B}
3 *6 Cb 0 {2,B} {4,S}
4 *7 {Cd,Ct,Cb} 0 {3,S} {5,{D,T,B}}
5 *5 {Cd,Ct,Cb} 0 {4,{D,T,B}} {6,S}
6 *2 {C,O} 0 {5,S} {7,S}
7 *3 O 0 {6,S}
""",
kinetics = None,
reference = None,
referenceType = "",
shortDesc = u"""""",
longDesc =
u"""
""",
)
entry(
index = 173,
label = "R7_RSBB",
group =
"""
1 *1 R!H 1 {2,{S,D,T,B}}
2 *4 R!H 0 {1,{S,D,T,B}} {3,S}
3 *6 Cb 0 {2,S} {4,B}
4 *7 Cbf 0 {3,B} {5,B}
5 *5 Cb 0 {4,B} {6,S}
6 *2 {C,O} 0 {5,S} {7,S}
7 *3 {C,O} 0 {6,S}
""",
kinetics = None,
reference = None,
referenceType = "",
shortDesc = u"""""",
longDesc =
u"""
""",
)
entry(
index = 174,
label = "R7_SSBB",
group =
"""
1 *1 R!H 1 {2,S}
2 *4 R!H 0 {1,S} {3,S}
3 *6 Cb 0 {2,S} {4,B}
4 *7 Cbf 0 {3,B} {5,B}
5 *5 Cb 0 {4,B} {6,S}
6 *2 {C,O} 0 {5,S} {7,S}
7 *3 {C,O} 0 {6,S}
""",
kinetics = None,
reference = None,
referenceType = "",
shortDesc = u"""""",
longDesc =
u"""
""",
)
entry(
index = 176,
label = "R7_SSBB_Cs",
group =
"""
1 *1 R!H 1 {2,S}
2 *4 R!H 0 {1,S} {3,S}
3 *6 Cb 0 {2,S} {4,B}
4 *7 Cbf 0 {3,B} {5,B}
5 *5 Cb 0 {4,B} {6,S}
6 *2 {C,O} 0 {5,S} {7,S}
7 *3 C 0 {6,S}
""",
kinetics = None,
reference = None,
referenceType = "",
shortDesc = u"""""",
longDesc =
u"""
""",
)
entry(
index = 177,
label = "R7_SSBB_O",
group =
"""
1 *1 R!H 1 {2,S}
2 *4 R!H 0 {1,S} {3,S}
3 *6 Cb 0 {2,S} {4,B}
4 *7 Cbf 0 {3,B} {5,B}
5 *5 Cb 0 {4,B} {6,S}
6 *2 {C,O} 0 {5,S} {7,S}
7 *3 O 0 {6,S}
""",
kinetics = None,
reference = None,
referenceType = "",
shortDesc = u"""""",
longDesc =
u"""
""",
)
entry(
index = 178,
label = "R7_DSBB",
group =
"""
1 *1 Cd 1 {2,D}
2 *4 Cd 0 {1,D} {3,S}
3 *6 Cb 0 {2,S} {4,B}
4 *7 Cbf 0 {3,B} {5,B}
5 *5 Cb 0 {4,B} {6,S}
6 *2 {C,O} 0 {5,S} {7,S}
7 *3 {C,O} 0 {6,S}
""",
kinetics = None,
reference = None,
referenceType = "",
shortDesc = u"""""",
longDesc =
u"""
""",
)
entry(
index = 180,
label = "R7_DSBB_Cs",
group =
"""
1 *1 Cd 1 {2,D}
2 *4 Cd 0 {1,D} {3,S}
3 *6 Cb 0 {2,S} {4,B}
4 *7 Cbf 0 {3,B} {5,B}
5 *5 Cb 0 {4,B} {6,S}
6 *2 {C,O} 0 {5,S} {7,S}
7 *3 C 0 {6,S}
""",
kinetics = None,
reference = None,
referenceType = "",
shortDesc = u"""""",
longDesc =
u"""
""",
)
entry(
index = 181,
label = "R7_DSBB_O",
group =
"""
1 *1 Cd 1 {2,D}
2 *4 Cd 0 {1,D} {3,S}
3 *6 Cb 0 {2,S} {4,B}
4 *7 Cbf 0 {3,B} {5,B}
5 *5 Cb 0 {4,B} {6,S}
6 *2 {C,O} 0 {5,S} {7,S}
7 *3 O 0 {6,S}
""",
kinetics = None,
reference = None,
referenceType = "",
shortDesc = u"""""",
longDesc =
u"""
""",
)
entry(
index = 182,
label = "R7_TSBB",
group =
"""
1 *1 Ct 1 {2,T}
2 *4 Ct 0 {1,T} {3,S}
3 *6 Cb 0 {2,S} {4,B}
4 *7 Cbf 0 {3,B} {5,B}
5 *5 Cb 0 {4,B} {6,S}
6 *2 {C,O} 0 {5,S} {7,S}
7 *3 {C,O} 0 {6,S}
""",
kinetics = None,
reference = None,
referenceType = "",
shortDesc = u"""""",
longDesc =
u"""
""",
)
entry(
index = 184,
label = "R7_TSBB_Cs",
group =
"""
1 *1 Ct 1 {2,T}
2 *4 Ct 0 {1,T} {3,S}
3 *6 Cb 0 {2,S} {4,B}
4 *7 Cbf 0 {3,B} {5,B}
5 *5 Cb 0 {4,B} {6,S}
6 *2 {C,O} 0 {5,S} {7,S}
7 *3 C 0 {6,S}
""",
kinetics = None,
reference = None,
referenceType = "",
shortDesc = u"""""",
longDesc =
u"""
""",
)
entry(
index = 185,
label = "R7_TSBB_O",
group =
"""
1 *1 Ct 1 {2,T}
2 *4 Ct 0 {1,T} {3,S}
3 *6 Cb 0 {2,S} {4,B}
4 *7 Cbf 0 {3,B} {5,B}
5 *5 Cb 0 {4,B} {6,S}
6 *2 {C,O} 0 {5,S} {7,S}
7 *3 O 0 {6,S}
""",
kinetics = None,
reference = None,
referenceType = "",
shortDesc = u"""""",
longDesc =
u"""
""",
)
entry(
index = 186,
label = "R7_BSBB",
group =
"""
1 *1 Cb 1 {2,B}
2 *4 Cb 0 {1,B} {3,S}
3 *6 Cb 0 {2,S} {4,B}
4 *7 Cbf 0 {3,B} {5,B}
5 *5 Cb 0 {4,B} {6,S}
6 *2 {C,O} 0 {5,S} {7,S}
7 *3 {C,O} 0 {6,S}
""",
kinetics = None,
reference = None,
referenceType = "",
shortDesc = u"""""",
longDesc =
u"""
""",
)
entry(
index = 188,
label = "R7_BSBB_Cs",
group =
"""
1 *1 Cb 1 {2,B}
2 *4 Cb 0 {1,B} {3,S}
3 *6 Cb 0 {2,S} {4,B}
4 *7 Cbf 0 {3,B} {5,B}
5 *5 Cb 0 {4,B} {6,S}
6 *2 {C,O} 0 {5,S} {7,S}
7 *3 C 0 {6,S}
""",
kinetics = None,
reference = None,
referenceType = "",
shortDesc = u"""""",
longDesc =
u"""
""",
)
entry(
index = 189,
label = "R7_BSBB_O",
group =
"""
1 *1 Cb 1 {2,B}
2 *4 Cb 0 {1,B} {3,S}
3 *6 Cb 0 {2,S} {4,B}
4 *7 Cbf 0 {3,B} {5,B}
5 *5 Cb 0 {4,B} {6,S}
6 *2 {C,O} 0 {5,S} {7,S}
7 *3 O 0 {6,S}
""",
kinetics = None,
reference = None,
referenceType = "",
shortDesc = u"""""",
longDesc =
u"""
""",
)
entry(
index = 190,
label = "R7_SBBS",
group =
"""
1 *1 R!H 1 {2,S}
2 *4 Cb 0 {1,S} {3,B}
3 *6 Cbf 0 {2,B} {4,B}
4 *7 Cb 0 {3,B} {5,S}
5 *5 R!H 0 {4,S} {6,S}
6 *2 {C,O} 0 {5,S} {7,S}
7 *3 {C,O} 0 {6,S}
""",
kinetics = None,
reference = None,
referenceType = "",
shortDesc = u"""""",
longDesc =
u"""
""",
)
entry(
index = 192,
label = "R7_SBBS_Cs",
group =
"""
1 *1 R!H 1 {2,S}
2 *4 Cb 0 {1,S} {3,B}
3 *6 Cbf 0 {2,B} {4,B}
4 *7 Cb 0 {3,B} {5,S}
5 *5 R!H 0 {4,S} {6,S}
6 *2 {C,O} 0 {5,S} {7,S}
7 *3 C 0 {6,S}
""",
kinetics = None,
reference = None,
referenceType = "",
shortDesc = u"""""",
longDesc =
u"""
""",
)
entry(
index = 193,
label = "R7_SBBS_O",
group =
"""
1 *1 R!H 1 {2,S}
2 *4 Cb 0 {1,S} {3,B}
3 *6 Cbf 0 {2,B} {4,B}
4 *7 Cb 0 {3,B} {5,S}
5 *5 R!H 0 {4,S} {6,S}
6 *2 {C,O} 0 {5,S} {7,S}
7 *3 O 0 {6,S}
""",
kinetics = None,
reference = None,
referenceType = "",
shortDesc = u"""""",
longDesc =
u"""
""",
)
entry(
index = 194,
label = "doublebond_intra",
group =
"""
1 *2 {C,O} 0 {2,S}
2 *3 C 0 {1,S}
""",
kinetics = None,
reference = None,
referenceType = "",
shortDesc = u"""""",
longDesc =
u"""
""",
)
entry(
index = 195,
label = "doublebond_intra_2H",
group =
"""
1 *2 {C,O} 0 {2,S}
2 *3 C 0 {1,S} {3,S} {4,S}
3 H 0 {2,S}
4 H 0 {2,S}
""",
kinetics = None,
reference = None,
referenceType = "",
shortDesc = u"""""",
longDesc =
u"""
""",
)
entry(
index = 196,
label = "doublebond_intra_2H_pri",
group =
"""
1 *2 {C,O} 0 {2,S} {3,S}
2 *3 C 0 {1,S} {4,S} {5,S}
3 H 0 {1,S}
4 H 0 {2,S}
5 H 0 {2,S}
""",
kinetics = None,
reference = None,
referenceType = "",
shortDesc = u"""""",
longDesc =
u"""
""",
)
entry(
index = 197,
label = "doublebond_intra_2H_secNd",
group =
"""
1 *2 {C,O} 0 {2,S} {3,S}
2 *3 C 0 {1,S} {4,S} {5,S}
3 {Cs,O} 0 {1,S}
4 H 0 {2,S}
5 H 0 {2,S}
""",
kinetics = None,
reference = None,
referenceType = "",
shortDesc = u"""""",
longDesc =
u"""
""",
)
entry(
index = 198,
label = "doublebond_intra_2H_secDe",
group =
"""
1 *2 {C,O} 0 {2,S} {3,S}
2 *3 C 0 {1,S} {4,S} {5,S}
3 {Cd,Ct,Cb,CO} 0 {1,S}
4 H 0 {2,S}
5 H 0 {2,S}
""",
kinetics = None,
reference = None,
referenceType = "",
shortDesc = u"""""",
longDesc =
u"""
""",
)
entry(
index = 199,
label = "doublebond_intra_HNd",
group =
"""
1 *2 {C,O} 0 {2,S}
2 *3 C 0 {1,S} {3,S} {4,S}
3 H 0 {2,S}
4 {Cs,O} 0 {2,S}
""",
kinetics = None,
reference = None,
referenceType = "",
shortDesc = u"""""",
longDesc =
u"""
""",
)
entry(
index = 200,
label = "doublebond_intra_HNd_pri",
group =
"""
1 *2 {C,O} 0 {2,S} {3,S}
2 *3 C 0 {1,S} {4,S} {5,S}
3 H 0 {1,S}
4 H 0 {2,S}
5 {Cs,O} 0 {2,S}
""",
kinetics = None,
reference = None,
referenceType = "",
shortDesc = u"""""",
longDesc =
u"""
""",
)
entry(
index = 201,
label = "doublebond_intra_HNd_secNd",
group =
"""
1 *2 {C,O} 0 {2,S} {3,S}
2 *3 C 0 {1,S} {4,S} {5,S}
3 {Cs,O} 0 {1,S}
4 H 0 {2,S}
5 {Cs,O} 0 {2,S}
""",
kinetics = None,
reference = None,
referenceType = "",
shortDesc = u"""""",
longDesc =
u"""
""",
)
entry(
index = 202,
label = "doublebond_intra_HNd_secDe",
group =
"""
1 *2 {C,O} 0 {2,S} {3,S}
2 *3 C 0 {1,S} {4,S} {5,S}
3 {Cd,Ct,Cb,CO} 0 {1,S}
4 H 0 {2,S}
5 {Cs,O} 0 {2,S}
""",
kinetics = None,
reference = None,
referenceType = "",
shortDesc = u"""""",
longDesc =
u"""
""",
)
entry(
index = 203,
label = "doublebond_intra_HDe",
group =
"""
1 *2 {C,O} 0 {2,S}
2 *3 C 0 {1,S} {3,S} {4,S}
3 H 0 {2,S}
4 {Cd,Ct,Cb,CO} 0 {2,S}
""",
kinetics = None,
reference = None,
referenceType = "",
shortDesc = u"""""",
longDesc =
u"""
""",
)
entry(
index = 204,
label = "doublebond_intra_HDe_pri",
group =
"""
1 *2 {C,O} 0 {2,S} {3,S}
2 *3 C 0 {1,S} {4,S} {5,S}
3 H 0 {1,S}
4 H 0 {2,S}
5 {Cd,Ct,Cb,CO} 0 {2,S}
""",
kinetics = None,
reference = None,
referenceType = "",
shortDesc = u"""""",
longDesc =
u"""
""",
)
entry(
index = 205,
label = "doublebond_intra_HCd_pri",
group =
"""
1 *2 {C,O} 0 {2,S} {3,S}
2 *3 C 0 {1,S} {4,S} {5,S}
3 H 0 {1,S}
4 H 0 {2,S}
5 Cd 0 {2,S}
""",
kinetics = None,
reference = None,
referenceType = "",
shortDesc = u"""""",
longDesc =
u"""
""",
)
entry(
index = 206,
label = "doublebond_intra_HCt_pri",
group =
"""
1 *2 {C,O} 0 {2,S} {3,S}
2 *3 C 0 {1,S} {4,S} {5,S}
3 H 0 {1,S}
4 H 0 {2,S}
5 Ct 0 {2,S}
""",
kinetics = None,
reference = None,
referenceType = "",
shortDesc = u"""""",
longDesc =
u"""
""",
)
entry(
index = 207,
label = "doublebond_intra_HDe_secNd",
group =
"""
1 *2 {C,O} 0 {2,S} {3,S}
2 *3 C 0 {1,S} {4,S} {5,S}
3 {Cs,O} 0 {1,S}
4 H 0 {2,S}
5 {Cd,Ct,Cb,CO} 0 {2,S}
""",
kinetics = None,
reference = None,
referenceType = "",
shortDesc = u"""""",
longDesc =
u"""
""",
)
entry(
index = 208,
label = "doublebond_intra_HDe_secDe",
group =
"""
1 *2 {C,O} 0 {2,S} {3,S}
2 *3 C 0 {1,S} {4,S} {5,S}
3 {Cd,Ct,Cb,CO} 0 {1,S}
4 H 0 {2,S}
5 {Cd,Ct,Cb,CO} 0 {2,S}
""",
kinetics = None,
reference = None,
referenceType = "",
shortDesc = u"""""",
longDesc =
u"""
""",
)
entry(
index = 209,
label = "doublebond_intra_NdNd",
group =
"""
1 *2 {C,O} 0 {2,S}
2 *3 C 0 {1,S} {3,S} {4,S}
3 {Cs,O} 0 {2,S}
4 {Cs,O} 0 {2,S}
""",
kinetics = None,
reference = None,
referenceType = "",
shortDesc = u"""""",
longDesc =
u"""
""",
)
entry(
index = 210,
label = "doublebond_intra_NdNd_pri",
group =
"""
1 *2 {C,O} 0 {2,S} {3,S}
2 *3 C 0 {1,S} {4,S} {5,S}
3 H 0 {1,S}
4 {Cs,O} 0 {2,S}
5 {Cs,O} 0 {2,S}
""",
kinetics = None,
reference = None,
referenceType = "",
shortDesc = u"""""",
longDesc =
u"""
""",
)
entry(
index = 211,
label = "doublebond_intra_NdNd_secNd",
group =
"""
1 *2 {C,O} 0 {2,S} {3,S}
2 *3 C 0 {1,S} {4,S} {5,S}
3 {Cs,O} 0 {1,S}
4 {Cs,O} 0 {2,S}
5 {Cs,O} 0 {2,S}
""",
kinetics = None,
reference = None,
referenceType = "",
shortDesc = u"""""",
longDesc =
u"""
""",
)
entry(
index = 212,
label = "doublebond_intra_NdNd_secDe",
group =
"""
1 *2 {C,O} 0 {2,S} {3,S}
2 *3 C 0 {1,S} {4,S} {5,S}
3 {Cd,Ct,Cb,CO} 0 {1,S}
4 {Cs,O} 0 {2,S}
5 {Cs,O} 0 {2,S}
""",
kinetics = None,
reference = None,
referenceType = "",
shortDesc = u"""""",
longDesc =
u"""
""",
)
entry(
index = 213,
label = "doublebond_intra_NdDe",
group =
"""
1 *2 {C,O} 0 {2,S}
2 *3 C 0 {1,S} {3,S} {4,S}
3 {Cs,O} 0 {2,S}
4 {Cd,Ct,Cb,CO} 0 {2,S}
""",
kinetics = None,
reference = None,
referenceType = "",
shortDesc = u"""""",
longDesc =
u"""
""",
)
entry(
index = 212,
label = "doublebond_intra_NdDe_pri",
group =
"""
1 *2 {C,O} 0 {2,S} {3,S}
2 *3 C 0 {1,S} {4,S} {5,S}
3 H 0 {1,S}
4 {Cs,O} 0 {2,S}
5 {Cd,Ct,Cb,CO} 0 {2,S}
""",
kinetics = None,
reference = None,
referenceType = "",
shortDesc = u"""""",
longDesc =
u"""
""",
)
entry(
index = 215,
label = "doublebond_intra_NdCd_pri",
group =
"""
1 *2 {C,O} 0 {2,S} {3,S}
2 *3 C 0 {1,S} {4,S} {5,S}
3 H 0 {1,S}
4 {Cs,O} 0 {2,S}
5 Cd 0 {2,S}
""",
kinetics = None,
reference = None,
referenceType = "",
shortDesc = u"""""",
longDesc =
u"""
""",
)
entry(
index = 216,
label = "doublebond_intra_NdCt_pri",
group =
"""
1 *2 {C,O} 0 {2,S} {3,S}
2 *3 C 0 {1,S} {4,S} {5,S}
3 H 0 {1,S}
4 {Cs,O} 0 {2,S}
5 Ct 0 {2,S}
""",
kinetics = None,
reference = None,
referenceType = "",
shortDesc = u"""""",
longDesc =
u"""
""",
)
entry(
index = 217,
label = "doublebond_intra_NdDe_secNd",
group =
"""
1 *2 {C,O} 0 {2,S} {3,S}
2 *3 C 0 {1,S} {4,S} {5,S}
3 {Cs,O} 0 {1,S}
4 {Cs,O} 0 {2,S}
5 {Cd,Ct,Cb,CO} 0 {2,S}
""",
kinetics = None,
reference = None,
referenceType = "",
shortDesc = u"""""",
longDesc =
u"""
""",
)
entry(
index = 214,
label = "doublebond_intra_NdDe_secDe",
group =
"""
1 *2 {C,O} 0 {2,S} {3,S}
2 *3 C 0 {1,S} {4,S} {5,S}
3 {Cd,Ct,Cb,CO} 0 {1,S}
4 {Cs,O} 0 {2,S}
5 {Cd,Ct,Cb,CO} 0 {2,S}
""",
kinetics = None,
reference = None,
referenceType = "",
shortDesc = u"""""",
longDesc =
u"""
""",
)
entry(
index = 215,
label = "doublebond_intra_DeDe",
group =
"""
1 *2 {C,O} 0 {2,S}
2 *3 C 0 {1,S} {3,S} {4,S}
3 {Cd,Ct,Cb,CO} 0 {2,S}
4 {Cd,Ct,Cb,CO} 0 {2,S}
""",
kinetics = None,
reference = None,
referenceType = "",
shortDesc = u"""""",
longDesc =
u"""
""",
)
entry(
index = 216,
label = "doublebond_intra_DeDe_pri",
group =
"""
1 *2 {C,O} 0 {2,S} {3,S}
2 *3 C 0 {1,S} {4,S} {5,S}
3 H 0 {1,S}
4 {Cd,Ct,Cb,CO} 0 {2,S}
5 {Cd,Ct,Cb,CO} 0 {2,S}
""",
kinetics = None,
reference = None,
referenceType = "",
shortDesc = u"""""",
longDesc =
u"""
""",
)
entry(
index = 217,
label = "doublebond_intra_DeDe_secNd",
group =
"""
1 *2 {C,O} 0 {2,S} {3,S}
2 *3 C 0 {1,S} {4,S} {5,S}
3 {Cs,O} 0 {1,S}
4 {Cd,Ct,Cb,CO} 0 {2,S}
5 {Cd,Ct,Cb,CO} 0 {2,S}
""",
kinetics = None,
reference = None,
referenceType = "",
shortDesc = u"""""",
longDesc =
u"""
""",
)
entry(
index = 218,
label = "doublebond_intra_DeDe_secDe",
group =
"""
1 *2 {C,O} 0 {2,S} {3,S}
2 *3 C 0 {1,S} {4,S} {5,S}
3 {Cd,Ct,Cb,CO} 0 {1,S}
4 {Cd,Ct,Cb,CO} 0 {2,S}
5 {Cd,Ct,Cb,CO} 0 {2,S}
""",
kinetics = None,
reference = None,
referenceType = "",
shortDesc = u"""""",
longDesc =
u"""
""",
)
entry(
index = 223,
label = "carbonylbond_intra",
group =
"""
1 *2 {C,O} 0 {2,S}
2 *3 O 0 {1,S}
""",
kinetics = None,
reference = None,
referenceType = "",
shortDesc = u"""""",
longDesc =
u"""
""",
)
entry(
index = 224,
label = "carbonylbond_intra_H",
group =
"""
1 *2 {C,O} 0 {2,S} {3,S}
2 *3 O 0 {1,S}
3 H 0 {1,S}
""",
kinetics = None,
reference = None,
referenceType = "",
shortDesc = u"""""",
longDesc =
u"""
""",
)
entry(
index = 225,
label = "carbonylbond_intra_Nd",
group =
"""
1 *2 {C,O} 0 {2,S} {3,S}
2 *3 O 0 {1,S}
3 {Cs,O} 0 {1,S}
""",
kinetics = None,
reference = None,
referenceType = "",
shortDesc = u"""""",
longDesc =
u"""
""",
)
entry(
index = 226,
label = "carbonylbond_intra_De",
group =
"""
1 *2 {C,O} 0 {2,S} {3,S}
2 *3 O 0 {1,S}
3 {Cd,Ct,Cb,CO} 0 {1,S}
""",
kinetics = None,
reference = None,
referenceType = "",
shortDesc = u"""""",
longDesc =
u"""
""",
)
entry(
index = 227,
label = "radadd_intra_cs",
group =
"""
1 *1 Cs 1
""",
kinetics = None,
reference = None,
referenceType = "",
shortDesc = u"""""",
longDesc =
u"""
""",
)
entry(
index = 228,
label = "radadd_intra_cs2H",
group =
"""
1 *1 Cs 1 {2,S} {3,S}
2 H 0 {1,S}
3 H 0 {1,S}
""",
kinetics = None,
reference = None,
referenceType = "",
shortDesc = u"""""",
longDesc =
u"""
""",
)
entry(
index = 229,
label = "radadd_intra_csHNd",
group =
"""
1 *1 Cs 1 {2,S} {3,S}
2 H 0 {1,S}
3 {Cs,O} 0 {1,S}
""",
kinetics = None,
reference = None,
referenceType = "",
shortDesc = u"""""",
longDesc =
u"""
""",
)
entry(
index = 230,
label = "radadd_intra_csHDe",
group =
"""
1 *1 Cs 1 {2,S} {3,S}
2 H 0 {1,S}
3 {Cd,Ct,Cb,CO} 0 {1,S}
""",
kinetics = None,
reference = None,
referenceType = "",
shortDesc = u"""""",
longDesc =
u"""
""",
)
entry(
index = 235,
label = "radadd_intra_csHCd",
group =
"""
1 *1 Cs 1 {2,S} {3,S}
2 H 0 {1,S}
3 Cd 0 {1,S}
""",
kinetics = None,
reference = None,
referenceType = "",
shortDesc = u"""""",
longDesc =
u"""
""",
)
entry(
index = 236,
label = "radadd_intra_csHCt",
group =
"""
1 *1 Cs 1 {2,S} {3,S}
2 H 0 {1,S}
3 Ct 0 {1,S}
""",
kinetics = None,
reference = None,
referenceType = "",
shortDesc = u"""""",
longDesc =
u"""
""",
)
entry(
index = 237,
label = "radadd_intra_csNdNd",
group =
"""
1 *1 Cs 1 {2,S} {3,S}
2 {Cs,O} 0 {1,S}
3 {Cs,O} 0 {1,S}
""",
kinetics = None,
reference = None,
referenceType = "",
shortDesc = u"""""",
longDesc =
u"""
""",
)
entry(
index = 232,
label = "radadd_intra_csNdDe",
group =
"""
1 *1 Cs 1 {2,S} {3,S}
2 {Cs,O} 0 {1,S}
3 {Cd,Ct,Cb,CO} 0 {1,S}
""",
kinetics = None,
reference = None,
referenceType = "",
shortDesc = u"""""",
longDesc =
u"""
""",
)
entry(
index = 239,
label = "radadd_intra_csNdCd",
group =
"""
1 *1 Cs 1 {2,S} {3,S}
2 {Cs,O} 0 {1,S}
3 Cd 0 {1,S}
""",
kinetics = None,
reference = None,
referenceType = "",
shortDesc = u"""""",
longDesc =
u"""
""",
)
entry(
index = 240,
label = "radadd_intra_csNdCt",
group =
"""
1 *1 Cs 1 {2,S} {3,S}
2 {Cs,O} 0 {1,S}
3 Ct 0 {1,S}
""",
kinetics = None,
reference = None,
referenceType = "",
shortDesc = u"""""",
longDesc =
u"""
""",
)
entry(
index = 241,
label = "radadd_intra_csDeDe",
group =
"""
1 *1 Cs 1 {2,S} {3,S}
2 {Cd,Ct,Cb,CO} 0 {1,S}
3 {Cd,Ct,Cb,CO} 0 {1,S}
""",
kinetics = None,
reference = None,
referenceType = "",
shortDesc = u"""""",
longDesc =
u"""
""",
)
entry(
index = 234,
label = "radadd_intra_O",
group =
"""
1 *1 O 1
""",
kinetics = None,
reference = None,
referenceType = "",
shortDesc = u"""""",
longDesc =
u"""
""",
)
entry(
index = 235,
label = "radadd_intra_Cb",
group =
"""
1 *1 Cb 1
""",
kinetics = None,
reference = None,
referenceType = "",
shortDesc = u"""""",
longDesc =
u"""
""",
)
entry(
index = 236,
label = "radadd_intra_cdsingle",
group =
"""
1 *1 Cd 1 {2,S}
2 R 0 {1,S}
""",
kinetics = None,
reference = None,
referenceType = "",
shortDesc = u"""""",
longDesc =
u"""
""",
)
entry(
index = 237,
label = "radadd_intra_cdsingleH",
group =
"""
1 *1 Cd 1 {2,S}
2 H 0 {1,S}
""",
kinetics = None,
reference = None,
referenceType = "",
shortDesc = u"""""",
longDesc =
u"""
""",
)
entry(
index = 238,
label = "radadd_intra_cdsingleNd",
group =
"""
1 *1 Cd 1 {2,S}
2 {Cs,O} 0 {1,S}
""",
kinetics = None,
reference = None,
referenceType = "",
shortDesc = u"""""",
longDesc =
u"""
""",
)
entry(
index = 239,
label = "radadd_intra_cdsingleDe",
group =
"""
1 *1 Cd 1 {2,S}
2 {Cd,Ct,Cb,CO} 0 {1,S}
""",
kinetics = None,
reference = None,
referenceType = "",
shortDesc = u"""""",
longDesc =
u"""
""",
)
entry(
index = 240,
label = "radadd_intra_cddouble",
group =
"""
1 *1 Cd 1 {2,D}
2 Cd 0 {1,D}
""",
kinetics = None,
reference = None,
referenceType = "",
shortDesc = u"""""",
longDesc =
u"""
""",
)
entry(
index = 241,
label = "radadd_intra_CO",
group =
"""
1 *1 CO 1 {2,D}
2 O 0 {1,D}
""",
kinetics = None,
reference = None,
referenceType = "",
shortDesc = u"""""",
longDesc =
u"""
""",
)
entry(
index = 242,
label = "radadd_intra_Ct",
group =
"""
1 *1 Ct 1 {2,T}
2 Ct 0 {1,T}
""",
kinetics = None,
reference = None,
referenceType = "",
shortDesc = u"""""",
longDesc =
u"""
""",
)
tree(
"""
L1: R1_rad_R2_R3
L2: R4
L3: R4_S
L4: R4_S_Cs
L4: R4_S_O
L3: R4_D
L4: R4_D_Cs
L4: R4_D_O
L3: R4_T
L4: R4_T_Cs
L4: R4_T_O
L3: R4_B
L4: R4_B_Cs
L4: R4_B_O
L2: R5
L3: R5_SS
L4: R5_SS_Cs
L4: R5_SS_O
L3: R5_SD
L4: R5_SD_Cs
L4: R5_SD_O
L3: R5_DS
L4: R5_DS_Cs
L4: R5_DS_O
L3: R5_ST
L4: R5_ST_Cs
L4: R5_ST_O
L3: R5_TS
L4: R5_TS_Cs
L4: R5_TS_O
L3: R5_SB
L4: R5_SB_Cs
L4: R5_SB_O
L3: R5_BS
L4: R5_BS_Cs
L4: R5_BS_O
L2: R6
L3: R6_RSR
L4: R6_SSR
L5: R6_SSS
L6: R6_SSS_Cs
L6: R6_SSS_O
L5: R6_SSM
L6: R6_SSM_Cs
L6: R6_SSM_O
L4: R6_DSR
L5: R6_DSS
L6: R6_DSS_Cs
L6: R6_DSS_O
L5: R6_DSM
L6: R6_DSM_Cs
L6: R6_DSM_O
L4: R6_TSR
L5: R6_TSS
L6: R6_TSS_Cs
L6: R6_TSS_O
L5: R6_TSM
L6: R6_TSM_Cs
L6: R6_TSM_O
L4: R6_BSR
L5: R6_BSS
L6: R6_BSS_Cs
L6: R6_BSS_O
L5: R6_BSM
L6: R6_BSM_Cs
L6: R6_BSM_O
L3: R6_SMS
L4: R6_SMS_Cs
L4: R6_SMS_O
L3: R6_SBB
L4: R6_SBB_Cs
L4: R6_SBB_O
L3: R6_BBS
L4: R6_BBS_Cs
L4: R6_BBS_O
L2: R7
L3: R7_RSSR
L4: R7_SSSR
L5: R7_SSSS
L6: R7_SSSS_Cs
L6: R7_SSSS_O
L5: R7_SSSM
L6: R7_SSSM_Cs
L6: R7_SSSM_O
L4: R7_DSSR
L5: R7_DSSS
L6: R7_DSSS_Cs
L6: R7_DSSS_O
L5: R7_DSSM
L6: R7_DSSM_Cs
L6: R7_DSSM_O
L4: R7_TSSR
L5: R7_TSSS
L6: R7_TSSS_Cs
L6: R7_TSSS_O
L5: R7_TSSM
L6: R7_TSSM_Cs
L6: R7_TSSM_O
L4: R7_BSSR
L5: R7_BSSS
L6: R7_BSSS_Cs
L6: R7_BSSS_O
L5: R7_BSSM
L6: R7_BSSM_Cs
L6: R7_BSSM_O
L3: R7_RSMS
L4: R7_SSMS
L5: R7_SSMS_Cs
L5: R7_SSMS_O
L4: R7_DSMS
L5: R7_DSMS_Cs
L5: R7_DSMS_O
L4: R7_TSMS
L5: R7_TSMS_Cs
L5: R7_TSMS_O
L4: R7_BSMS
L5: R7_BSMS_Cs
L5: R7_BSMS_O
L3: R7_SMSR
L4: R7_SMSS
L5: R7_SMSS_Cs
L5: R7_SMSS_O
L4: R7_SMSM
L5: R7_SMSM_Cs
L5: R7_SMSM_O
L3: R7_BBSR
L4: R7_BBSS
L5: R7_BBSS_Cs
L5: R7_BBSS_O
L4: R7_BBSM
L5: R7_BBSM_Cs
L5: R7_BBSM_O
L3: R7_RSBB
L4: R7_SSBB
L5: R7_SSBB_Cs
L5: R7_SSBB_O
L4: R7_DSBB
L5: R7_DSBB_Cs
L5: R7_DSBB_O
L4: R7_TSBB
L5: R7_TSBB_Cs
L5: R7_TSBB_O
L4: R7_BSBB
L5: R7_BSBB_Cs
L5: R7_BSBB_O
L3: R7_SBBS
L4: R7_SBBS_Cs
L4: R7_SBBS_O
L1: multiplebond_intra
L2: doublebond_intra
L3: doublebond_intra_2H
L4: doublebond_intra_2H_pri
L4: doublebond_intra_2H_secNd
L4: doublebond_intra_2H_secDe
L3: doublebond_intra_HNd
L4: doublebond_intra_HNd_pri
L4: doublebond_intra_HNd_secNd
L4: doublebond_intra_HNd_secDe
L3: doublebond_intra_HDe
L4: doublebond_intra_HDe_pri
L5: doublebond_intra_HCd_pri
L5: doublebond_intra_HCt_pri
L4: doublebond_intra_HDe_secNd
L4: doublebond_intra_HDe_secDe
L3: doublebond_intra_NdNd
L4: doublebond_intra_NdNd_pri
L4: doublebond_intra_NdNd_secNd
L4: doublebond_intra_NdNd_secDe
L3: doublebond_intra_NdDe
L4: doublebond_intra_NdDe_pri
L5: doublebond_intra_NdCd_pri
L5: doublebond_intra_NdCt_pri
L4: doublebond_intra_NdDe_secNd
L4: doublebond_intra_NdDe_secDe
L3: doublebond_intra_DeDe
L4: doublebond_intra_DeDe_pri
L4: doublebond_intra_DeDe_secNd
L4: doublebond_intra_DeDe_secDe
L2: carbonylbond_intra
L3: carbonylbond_intra_H
L3: carbonylbond_intra_Nd
L3: carbonylbond_intra_De
L1: radadd_intra
L2: radadd_intra_cs
L3: radadd_intra_cs2H
L3: radadd_intra_csHNd
L3: radadd_intra_csHDe
L4: radadd_intra_csHCd
L4: radadd_intra_csHCt
L3: radadd_intra_csNdNd
L3: radadd_intra_csNdDe
L4: radadd_intra_csNdCd
L4: radadd_intra_csNdCt
L3: radadd_intra_csDeDe
L2: radadd_intra_O
L2: radadd_intra_Cb
L2: radadd_intra_cdsingle
L3: radadd_intra_cdsingleH
L3: radadd_intra_cdsingleNd
L3: radadd_intra_cdsingleDe
L2: radadd_intra_cddouble
L2: radadd_intra_CO
L2: radadd_intra_Ct
"""
)
forbidden(
label = "bond21",
group =
"""
1 *2 R!H 0 {2,{S,D}}
2 *1 R!H 1 {1,{S,D}}
""",
shortDesc = u"""""",
longDesc =
u"""
""",
)
#forbidden(
# label = "cdd2",
# group =
#"""
#1 *2 {C,O}dd 0
#""",
# shortDesc = u"""""",
# longDesc =
#u"""
#
#""",
#)
#
|
[
"r.west@neu.edu"
] |
r.west@neu.edu
|
47ec0b8daf1be246726bb38689c9967a2047b1d3
|
76050b0002dac757866a9fb95dc199918da665bb
|
/acme/utils/iterator_utils_test.py
|
ebe21f3a602dbf5b91ce2fc5ab468a73080be58f
|
[
"Apache-2.0"
] |
permissive
|
RaoulDrake/acme
|
2829f41688db68d694da2461d301fd6f9f27edff
|
97c50eaa62c039d8f4b9efa3e80c4d80e6f40c4c
|
refs/heads/master
| 2022-12-29T01:16:44.806891
| 2022-12-21T14:09:38
| 2022-12-21T14:10:06
| 300,250,466
| 0
| 0
|
Apache-2.0
| 2020-10-01T11:13:03
| 2020-10-01T11:13:02
| null |
UTF-8
|
Python
| false
| false
| 1,249
|
py
|
# Copyright 2018 DeepMind Technologies Limited. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for iterator_utils."""
from acme.utils import iterator_utils
import numpy as np
from absl.testing import absltest
class IteratorUtilsTest(absltest.TestCase):
def test_iterator_zipping(self):
def get_iters():
x = iter(range(0, 10))
y = iter(range(20, 30))
return [x, y]
zipped = zip(*get_iters())
unzipped = iterator_utils.unzip_iterators(zipped, num_sub_iterators=2)
expected_x, expected_y = get_iters()
np.testing.assert_equal(list(unzipped[0]), list(expected_x))
np.testing.assert_equal(list(unzipped[1]), list(expected_y))
if __name__ == '__main__':
absltest.main()
|
[
"copybara-worker@google.com"
] |
copybara-worker@google.com
|
230d4677c465eea7cfbd8f9c140668c97d6c6b4a
|
dd5a81524d533a7030c164931d6fb03ba721657a
|
/Hack assembler/assembler.py
|
1ac2b1c4398e8f39acd666f2958315afebdec293
|
[] |
no_license
|
RakibRyan/nand2tetris-1
|
5e159757e8b88a9c26ca16d609ec64c3b0dc6a24
|
02904220dadb41a450e7d60d5cdd8191ff76ed90
|
refs/heads/master
| 2022-02-09T03:27:29.607433
| 2019-07-13T17:07:57
| 2019-07-13T17:07:57
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 833
|
py
|
#!/usr/bin/python3
"""
Hack machine language assembler
https://www.nand2tetris.org
@author: shubham1172
"""
import sys
import argparse
from parser import Parser
arg_parser = argparse.ArgumentParser()
arg_parser.add_argument('path')
args = arg_parser.parse_args()
file, data = None, None
try:
file = open(args.path)
if args.path[-4:] != '.asm':
raise NameError()
data = file.readlines()
file.close()
except FileNotFoundError:
print("file not found at the requested path")
sys.exit(-1)
except NameError:
print("file must have an .asm extension")
sys.exit(-1)
parser = Parser(data)
out = parser.parse()
try:
file = open(args.path[:-4] + '.hack', 'w')
file.writelines(out)
file.close()
except FileExistsError:
print('error writing file - file already exists')
sys.exit(-1)
|
[
"shubhamsharma1172@gmail.com"
] |
shubhamsharma1172@gmail.com
|
46b615398d8bcde373699b18aa5c12cde8c97045
|
aa84cae5ab53d7f426c86cd0cf0df3e288fd1e8d
|
/hsm/migrations/0014_datetest.py
|
2f7867b8edab5ed9d703beb57407c46e96e85051
|
[] |
no_license
|
ali-man/AppReception
|
6603dfec85cb1ff792c69cf2bb060f671a237c17
|
734669fcda4f0643c5f30d6da7d59a7a87c7de9a
|
refs/heads/master
| 2020-05-04T21:33:50.194166
| 2019-06-12T14:35:42
| 2019-06-12T14:35:42
| 179,481,042
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 909
|
py
|
# Generated by Django 2.2 on 2019-04-16 11:43
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('hsm', '0013_auto_20190414_2002'),
]
operations = [
migrations.CreateModel(
name='DateTest',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(blank=True, max_length=50, verbose_name='Тестовое название')),
('date_arrival', models.DateTimeField(verbose_name='Дата заезда')),
('date_departure', models.DateTimeField(verbose_name='Дата выезда')),
],
options={
'verbose_name': 'Тестовый',
'verbose_name_plural': 'Тестовые',
},
),
]
|
[
"aliman.fsd@gmail.com"
] |
aliman.fsd@gmail.com
|
71f8e48ac7187f8012e3da7093cb4d2d812e2023
|
00fbdd922ffa68ac67bcb99a6dd8450065769cc1
|
/ba2-104-7.py
|
9295606efc705ba727e1b2c2a5ec35168f5a4a20
|
[] |
no_license
|
lukewang7/bioinfo
|
f083077f2d1ca24b7ccf5b62ea645c67ddfbfa30
|
cf5081f435e98bfe355b09ef8ae21efd6493be40
|
refs/heads/master
| 2021-01-20T09:00:37.684765
| 2015-06-21T00:02:58
| 2015-06-21T00:02:58
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,174
|
py
|
import sys
from collections import Counter
minval = 57
maxval = 200
cyclos = {}
scores = {}
spectrum = Counter()
elements = []
leaderboard = [()]
leader_peptide = [()]
leader_score = 0
max_weight = None
N = 0
M = 0
#runs a convolution of spectrum to get M most frequent elements
def get_M(M, spectrum):
allweights = Counter()
l = len(spectrum)
for i in xrange(1,l):
for j in xrange(i):
diff = spectrum[i]-spectrum[j]
if diff>=minval and diff<=maxval:
allweights[diff]+=1
elements = []
weight_counts = set(allweights.values())
while len(elements)<M and len(weight_counts)>0:
themax = max(weight_counts)
elements+=([k for k in allweights if allweights[k]==themax])
print elements
print len(elements)
weight_counts.remove(themax)
return elements
#calculates cyclospectrum for the given peptide
def verify_peptide(peptide):
len_peptide = len(peptide)
all_weights = Counter([0, sum(peptide)])
peptide += peptide
for i in xrange(len_peptide):
for j in xrange(len_peptide-1):
nextone = sum(peptide[i:i+j+1])
all_weights[nextone]+=1
return all_weights
def get_cyclospectrum(peptide):
if not peptide in cyclos:
cyclos[peptide] = verify_peptide(peptide)
return cyclos[peptide]
def score(pep):
#print "score",pep
ri = len(pep)
for i in range(ri):
cp = pep[i:]+pep[:i]
if cp in scores:
return scores[cp]
score = 0
cs = get_cyclospectrum(pep)
for p in spectrum.keys():
score+=min(spectrum[p],cs[p])
scores[pep] = score
return score
def expand():
next_leaderboard = []
next_scores = set()
global leader_score
global N
global leaderboard
global leader_peptide
global max_weight
print "expand",len(leaderboard), max_weight, max([sum(l) for l in leaderboard])
for l in leaderboard:
cur_weight = sum(l)
iw = [w for w in elements if w<=(max_weight-cur_weight)]
for w in iw:
theweight = cur_weight+w
# print theweight, max_weight
if theweight <= max_weight:
curmem = l+(w,)
curscore = score(curmem)
next_leaderboard.append(curmem)
next_scores.add(curscore)
# print next_scores
if theweight == max_weight:
if curscore>leader_score:
leader_peptide = [curmem]
leader_score = curscore
elif curscore == leader_score:
leader_peptide.append(curmem)
print "leader_peptides", leader_peptide, leader_score
# print "expanded",next_leaderboard
scores = set()
leaderboard = []
while len(next_scores)>0 and len(leaderboard)<N:
# for i in range(min(N, len(next_scores))):
# print "i",i
themax = max(next_scores)
if (themax>0):
scores.add(themax)
next_scores.remove(themax)
# print scores
leaderboard +=[l for l in next_leaderboard if score(l) == themax]
print leaderboard
fname = sys.argv[1]
f = open(fname)
M = int(f.readline().strip())
N = int(f.readline().strip())
i_spectrum = [int(p.strip()) for p in f.readline().strip().split()]
i_spectrum.sort()
max_weight = i_spectrum[-1]
spectrum = Counter(i_spectrum)
print spectrum
print M
print N
print max_weight
elements = get_M(M, i_spectrum)
print elements
while len(leaderboard)>0:
expand()
print leader_peptide
for p in leader_peptide:
print "-".join([str(r) for r in p])
|
[
"anachesa@gmail.com"
] |
anachesa@gmail.com
|
60f85b79ba416594b640d3e783cafbb958ce74b0
|
cfd6d3c89e684cd233efa61d07e79316d243b66a
|
/Code/Fake Blog/This.py
|
dfae5241ec1b4d07126309006df12af16ea53d1f
|
[] |
no_license
|
Jabungu/python
|
d8fc6ab8f671df5f2f947edb9564ca87bac4ea1a
|
fed591a29c44606f5274c5adc8f226d115dd3136
|
refs/heads/master
| 2023-03-22T13:56:56.137768
| 2021-03-08T20:23:58
| 2021-03-08T20:23:58
| 297,374,759
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 13
|
py
|
print('try')
|
[
"joboderoh15@gmail.com"
] |
joboderoh15@gmail.com
|
bdb18e3f32676784b2de62e0014c030128a9383d
|
ea31a366cd897b2aa1d286a26dcd9c8e10e72c25
|
/airflow/plugins/kaggle_elt/kaggle_dbt_source.py
|
d81bece33f752898d83f2fd8519f5f6eb8cbbd14
|
[] |
no_license
|
Beetelbrox/accident-information-challenge
|
d533468c628e1a6453ef0ceffe257480434d2239
|
4e6594013eedf8a73f0edc4e4cc7420a19bff829
|
refs/heads/main
| 2023-06-18T12:37:50.184600
| 2021-07-16T22:02:18
| 2021-07-16T22:02:18
| 385,966,628
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,428
|
py
|
import os
import yaml
from typing import Dict, Any
class KaggleDbtSourceTableColumn:
"""A class representig a dbt source column, enriched with the kaggle metadata"""
def __init__(self, dbt_yaml: Dict[str, Any]):
"""Constructs all necessary attributes for the object from a parsed dby .yml file"""
self.name = dbt_yaml['name']
self.data_type = dbt_yaml['data_type']
self.kaggle_column_name = dbt_yaml['meta']['kaggle_column_name']
class KaggleDbtSourceTable:
"""A class representig a dbt source table, enriched with the kaggle metadata"""
def __init__(self, dbt_yaml: Dict[str, Any], schema: str):
"""Constructs all necessary attributes for the object from a parsed dby .yml file"""
self.name = dbt_yaml['name']
self.schema = schema
self.kaggle_file_name = dbt_yaml['meta']['kaggle_file_name']
self.columns = {c['name']: KaggleDbtSourceTableColumn(c) for c in dbt_yaml.get('columns', [])}
@property
def qualified_name(self) -> str:
"""Returns the qualified name for the source table"""
return f'{self.schema}.{self.name}'
def get_kaggle_to_dbt_mapping(self) -> Dict[str, str]:
"""Gets the mapping from the original names in the kaggle dataset to the sanitized names"""
return {c.kaggle_column_name: c.name for c in self.columns.values()}
class KaggleDbtSource:
"""A class representig a dbt source, enriched with the kaggle metadata. Can contain one or more tables"""
def __init__(self, dbt_yaml: Dict[str, Any]):
"""Constructs all necessary attributes for the object from a parsed dby .yml file"""
yaml_dbt_source = dbt_yaml['sources'][0]
self.name = yaml_dbt_source['name']
self.schema = yaml_dbt_source['schema']
# Pull the kaggle dataset name & parse it
kaggle_dataset_owner, kaggle_dataset_name = yaml_dbt_source['meta']['kaggle_dataset'].split('/')
self.kaggle_owner = kaggle_dataset_owner
self.kaggle_name = kaggle_dataset_name
# Configs for the CSV
self.delimiter = yaml_dbt_source['meta'].get('delimiter', '|')
self.null_value = yaml_dbt_source['meta'].get('null_value', 'NA')
self.encoding = yaml_dbt_source['meta'].get('encoding', 'utf-8')
# Build the tables. Pass the schema for convenience
self.tables = {t['name']: KaggleDbtSourceTable(t, self.schema) for t in yaml_dbt_source['tables']}
@property
def kaggle_full_name(self) -> str:
"""Build the full Kaggle dataset name"""
return f'{self.kaggle_owner}/{self.kaggle_name}'
def get_table(self, table_name: str) -> KaggleDbtSourceTable:
"""Returns a given table"""
return self.tables[table_name]
def read_kaggle_dbt_source_configs(dbt_project_path: str, dbt_project_name: str) -> KaggleDbtSource:
"""Reads and parses all dbt source configuration files (with the right naming) in a dbt project"""
dbt_source_cfgs = {}
dbt_models_path = f'{dbt_project_path}/{dbt_project_name}/models'
for ds in os.listdir(dbt_models_path):
with open(f'{dbt_models_path}/{ds}/sources/src_{ds}.yml', 'r') as ifile:
try:
dataset_cfg = KaggleDbtSource(yaml.safe_load(ifile))
except yaml.YAMLError as e:
print(e)
dbt_source_cfgs[dataset_cfg.name] = dataset_cfg
return dbt_source_cfgs
|
[
"9376816+Beetelbrox@users.noreply.github.com"
] |
9376816+Beetelbrox@users.noreply.github.com
|
e0484f2e58aab4de9e567907b0778dc57f18cc34
|
574d7955a32116e2fa315b5f75f124863ca70614
|
/blog/admin.py
|
ee30581a5a79496780dd1cb38aa3d14fd815c3c0
|
[] |
no_license
|
harunurkst/django_course_04
|
b15cb8e52a821b1157e1ac4dbe56b89fdebce848
|
5d93290cbee0f47795b6c9ecef8d33d8afe859d1
|
refs/heads/master
| 2022-11-22T20:48:36.196279
| 2020-07-26T17:20:37
| 2020-07-26T17:20:37
| 278,904,995
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 294
|
py
|
from django.contrib import admin
from .models import Post, Author, Category, Comment
class PostAdmin(admin.ModelAdmin):
prepopulated_fields = {"slug": ("title",)}
admin.site.register(Post, PostAdmin)
admin.site.register(Author)
admin.site.register(Category)
admin.site.register(Comment)
|
[
"harun1393@gmail.com"
] |
harun1393@gmail.com
|
5c3b3f6888d640338b4b8142484c0632a9ddf488
|
710eb4463c6e7c49d3d64c1409557a885f0585bd
|
/Easy/1103_分糖果||/main.py
|
aab9b69889f701c4683b74aa74fe0c3f25a94b24
|
[] |
no_license
|
mintdouble/LeetCode
|
8324d2eac2592542e84551dc740d17efc9859764
|
29b1cf8ce6fd883818c3f092dbcbfd435e54b8f3
|
refs/heads/master
| 2020-08-27T08:00:41.545474
| 2019-11-22T08:55:43
| 2019-11-22T08:55:43
| 217,292,822
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 817
|
py
|
# 思路:找到每个人每轮获得糖果的通项公式,根据通项公式得到求和公式,就可以根据分发轮数一次性得到该人最终获得的糖果总数
import math
class Solution:
def distributeCandies(self, candies: int, num_people: int) -> List[int]:
max_people = int(math.sqrt(2*candies+0.25)-0.5)
remain_candies = candies - (max_people*(max_people+1)) // 2
epoch = max_people // num_people
extra_epoch = max_people % num_people
result = [0] * num_people
for i in range(num_people):
if i < extra_epoch:
result[i] = (epoch+1)*(2*(i+1)+num_people*epoch)//2
else:
result[i] = epoch*(2*(i+1)+num_people*(epoch-1))//2
result[extra_epoch] += remain_candies
return result
|
[
"noreply@github.com"
] |
noreply@github.com
|
33479b018326b141432225a386b4a3eaf5c3298a
|
e9ed5c06e172b939461c2b6b594adc8d4601bbda
|
/LPythonB/shorten_url/controller.py
|
f6a77c89df4271b1e3220d16eb204d0475fbd507
|
[] |
no_license
|
Qiong/ycyc
|
e65bd4a882a0ba10dcc6b95648fb3fc19f110239
|
df3805aa6ac1b13e8707b8b49f8171d18eed8202
|
refs/heads/master
| 2016-09-06T14:21:21.066282
| 2015-06-06T21:44:29
| 2015-06-06T21:44:29
| 19,927,056
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,410
|
py
|
# Redirect function is used to forward user to full url if he came
# from shortened
# Request is used to encapsulate HTTP request. It will contain request
# methods, request arguments and other related information
from flask import redirect, render_template, request, Flask
from werkzeug.exceptions import BadRequest, NotFound
import models
from flask import Flask
# Initialize Flask application
app = Flask(__name__, template_folder='views')
@app.route("/")
def index():
"""Renders main page."""
return render_template('main_page.html')
@app.route("/shorten/")
def shorten():
"""Returns short_url of requested full_url."""
# Validate user input
full_url = request.args.get('url')
if not full_url:
raise BadRequest()
# Model returns object with short_url property
url_model = models.Url.shorten(full_url)
url_model.short_url
# Pass data to view and call its render method
short_url = request.host + '/' + url_model.short_url
return render_template('success.html', short_url=short_url)
@app.route('/<path:path>')
def redirect_to_full(path=''):
"""Gets short url and redirects user to corresponding full url if found."""
# Model returns object with full_url property
url_model = models.Url.get_by_short_url(path)
# Validate model return
if not url_model:
raise NotFound()
return redirect(url_model.full_url)
if __name__ == "__main__":
app.run(debug=True)
|
[
"qiongfei.seas@gmail.com"
] |
qiongfei.seas@gmail.com
|
eb1c93b1147d8ab10aef657ea6ff65381154bc2d
|
4530ee48b594bb7ecd6832fd8de08115d94c2cfc
|
/pytest.py
|
c2b6a689de5f696743364e5f9790cdc573f0d8ae
|
[] |
no_license
|
SeananTQ/bbq-server
|
186e6a8126269a2ef750849b73c8742262235fbf
|
90ab7bb5d1b6f654e09d981640f56fe76da169f2
|
refs/heads/master
| 2020-03-10T02:06:05.332300
| 2018-04-29T17:10:11
| 2018-04-29T17:10:11
| 129,127,967
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 146
|
py
|
import json
class Group:
def __init__(self, aa, bb):
self.aa = aa
self.bb = bb
temp = Group("aaaa", "bbbb")
print(temp)
|
[
"38140149+SeananTQ@users.noreply.github.com"
] |
38140149+SeananTQ@users.noreply.github.com
|
c4fdac06da50ef0fa06741cf8c8cfa9c834776fb
|
bf6cf5e55349e414ccca0fc674f721f589e06e8f
|
/src/forum/migrations/0005_auto_20170610_2010.py
|
c9a5282f5b3c56db1765f20b66b71acfa3144eaf
|
[
"MIT"
] |
permissive
|
shashankmohabia/gymkhana-master
|
cc7e3db4847edc2f91fc752da2fc35ad055f237d
|
a0d399d781797a2f63fb81a1ae287714213d068b
|
refs/heads/master
| 2021-05-03T10:29:29.596959
| 2018-02-06T23:27:16
| 2018-02-06T23:27:16
| 120,535,798
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 530
|
py
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.2 on 2017-06-10 14:40
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('forum', '0004_auto_20170610_2006'),
]
operations = [
migrations.AlterField(
model_name='answer',
name='topic',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='forum.Topic'),
),
]
|
[
"shashankmohabia27@gmail.com"
] |
shashankmohabia27@gmail.com
|
36e916c8090b3c04f5c332984140f1c1e3aa7642
|
98c590e21c004225dd6095a1ae925b7da6bdb627
|
/meal_prep_app/migrations/0035_auto_20210615_1658.py
|
a0656b825dd8dbffe2d873b9f42df34d2d073562
|
[] |
no_license
|
xwosic/meal_prep_project
|
10abdc2f1b140b10eda71e023085b1c851854036
|
72f53a02992694e3a5d0235b278e9224ed5cf4cd
|
refs/heads/master
| 2023-06-15T21:08:37.965948
| 2021-07-14T11:57:59
| 2021-07-14T11:57:59
| 385,912,241
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 418
|
py
|
# Generated by Django 3.1.7 on 2021-06-15 16:58
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('meal_prep_app', '0034_remove_shoppinglist_amounts'),
]
operations = [
migrations.AlterField(
model_name='shoppinglist',
name='title',
field=models.CharField(default='', max_length=50),
),
]
|
[
"mcisow@gmail.com"
] |
mcisow@gmail.com
|
a7952a9685c8a7d0c16b7e21cba095dabb0d61bd
|
23da3316b2ad2cbd0b3b8091458c262e6173ce24
|
/cod/test_collection_4_dict.py
|
70e6f3b438802d8818a6d3241e7ff86f32023742
|
[] |
no_license
|
babelcodes/python
|
6d3dd9100aed98a37e77129794b3af11c8f89e55
|
86b306c0416e371a3a8f37a3a5d3fec42ce448df
|
refs/heads/master
| 2020-04-13T12:49:10.383718
| 2019-01-02T08:15:54
| 2019-01-02T08:15:54
| 163,212,867
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,740
|
py
|
import unittest
class TestDict(unittest.TestCase):
"""A dictionary (or hashmap) is a class for key / value representation"""
def setUp(self):
self.occurances = dict(a=5, b=6, c=8)
def test_create(self):
self.assertEqual(self.occurances, dict(a=5, b=6, c=8))
def test_create_with_function(self):
self.assertEqual({ i:i*i for i in range(1, 6) }, {1:1, 2:4, 3:9, 4:16, 5:25})
def test_type(self):
self.assertEqual(type(self.occurances), dict)
self.assertTrue(type(self.occurances) is dict)
def test_get(self):
self.assertEqual(self.occurances['c'], 8)
with self.assertRaises(KeyError): # KeyError: 'd'
self.occurances['d']
# safe access
self.assertEqual(self.occurances.get('c'), 8)
self.assertEqual(self.occurances.get('e'), None)
self.assertEqual(self.occurances.get('e', 'some default value'), 'some default value')
def test_set(self):
self.occurances['d'] = 15
self.occurances['c'] = 1
self.assertEqual(self.occurances, dict(a=5, b=6, c=1, d=15))
def test_delete(self):
del self.occurances['a']
self.assertEqual(self.occurances, dict(b=6, c=8))
def test_keys(self):
self.assertEqual(self.occurances.keys(), ['a', 'c', 'b'])
def test_values(self):
self.assertEqual(self.occurances.values(), [5, 8, 6])
def test_items(self):
"""Returns key/value pairs as tuples"""
self.assertEqual(self.occurances.items(), [('a', 5), ('c', 8), ('b', 6)])
result = ''
for (key, value) in self.occurances.items():
result += key + "/" + str(value) + " "
self.assertEqual(result, 'a/5 c/8 b/6 ')
|
[
"jacques@2ia.net"
] |
jacques@2ia.net
|
e7f7098869934c8e98d694a44382c6cb60479ac5
|
717e0190612c20b9f5d26fec77e293c9b53f0e17
|
/numpywren/binops.py
|
cc74dd808794dfddd673758ef5304f701452777c
|
[
"Apache-2.0"
] |
permissive
|
cloudbutton/lithops-array
|
8336e16cf9b80e8745ba9c63256294d2d7206a1c
|
5e74b881c7db95eccdccf986f1e3b0dc44603889
|
refs/heads/main
| 2023-04-23T08:15:42.450676
| 2021-04-06T13:40:15
| 2021-04-06T13:40:15
| 344,418,860
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 8,962
|
py
|
import numpy as np
from .matrix import BigMatrix
from .matrix_utils import chunk, generate_key_name_binop
from . import matrix_utils
import concurrent.futures as fs
import os
import lithops
import time
from . import lambdapack as lp
from . import job_runner
def _gemm_remote_0(block_pairs, XY, X, Y, reduce_idxs=[0], dtype=np.float64, **kwargs):
print('block_pairs: ', block_pairs)
print('reduce_idxs: ', reduce_idxs)
for bp in block_pairs:
bidx_0, bidx_1 = bp
XY_block = None
X.dtype = dtype
Y.dtype = dtype
for r in reduce_idxs:
block1 = X.get_block(bidx_0, r)
block2 = Y.get_block(r, bidx_1)
if XY_block is None:
XY_block = block1.dot(block2)
else:
XY_block += block1.dot(block2)
XY.put_block(XY_block, bidx_0, bidx_1)
def _gemm_remote_1(block_pairs, XY, X, Y, reduce_idxs=[0], dtype=np.float64, **kwargs):
os.system("sudo mount -o remount,size=50g /dev/shm")
X.dtype = dtype
Y.dtype = dtype
for bp in block_pairs:
bidx_0, bidx_1 = bp
block0 = matrix_utils.get_row(X, bidx_0, mmap_loc="/dev/shm/block_0")
block1 = matrix_utils.get_col(Y, bidx_1, mmap_loc="/dev/shm/block_1")
XY_block = block0.dot(block1)
XY.put_block(XY_block, bidx_0, bidx_1)
def _gemm_remote_2(block_pairs, XY, X, Y, reduce_idxs=[0], dtype=np.float64, **kwargs):
os.system("sudo mount -o remount,size=50g /dev/shm")
X.dtype = dtype
X.dtype = dtype
Y.dtype = dtype
block_chunk_size = kwargs.get("block_chunk_size")
for bp in block_pairs:
bidx_0, bidx_1 = bp
result = gemm_with_prefetch(X, Y, bidx_0, bidx_1, block_chunk_size=block_chunk_size)
XY.put_block(result, bidx_0, bidx_1)
_gemms = [_gemm_remote_0, _gemm_remote_1, _gemm_remote_2]
def gemm_with_prefetch(X, Y, bidx0, bidx1, block_chunk_size=16):
# prefetch first 16 columns
parity = 0
executor = fs.ProcessPoolExecutor(32)
block_chunk_size = min(block_chunk_size, len(X._block_idxs(1)))
chunked_blocks = list(matrix_utils.chunk(X._block_idxs(1), block_chunk_size))
assert(chunked_blocks[0] == list(range(block_chunk_size)))
futures0 = matrix_utils.get_matrix_blocks_full_async(X, "/dev/shm/block0_{0}".format(parity), [bidx0], list(range(block_chunk_size)), big_axis=1, executor=executor)
futures1 = matrix_utils.get_matrix_blocks_full_async(Y, "/dev/shm/block1_{0}".format(parity), list(range(block_chunk_size)), [bidx1], big_axis=0, executor=executor)
assert X._block_idxs(1) == Y._block_idxs(0)
chunked_blocks = chunked_blocks[1:]
start_x, end_x = X._blocks(0)[bidx0]
start_y, end_y = Y._blocks(1)[bidx1]
result = np.zeros((end_x - start_x, end_y - start_y), dtype=X.dtype)
for blocks in chunked_blocks:
t = time.time()
fs.wait(futures0)
fs.wait(futures1)
e = time.time()
print("Block Download took effectively {0}".format(e - t))
results = [f.result() for f in futures0]
b1 = matrix_utils.load_mmap(*results[0])
results = [f.result() for f in futures1]
b2 = matrix_utils.load_mmap(*results[0])
parity = (parity + 1) % 2
futures0 = matrix_utils.get_matrix_blocks_full_async(X, "/dev/shm/block0_{0}".format(parity), [bidx0], blocks, big_axis=1, executor=executor)
futures1 = matrix_utils.get_matrix_blocks_full_async(Y, "/dev/shm/block1_{0}".format(parity), blocks, [bidx1], big_axis=0, executor=executor)
t = time.time()
result += b1.dot(b2)
e = time.time()
print("Block Matmul took effectively {0}".format(e - t))
t = time.time()
fs.wait(futures0)
fs.wait(futures1)
e = time.time()
print("Block Download took effectively {0}".format(e - t))
results = [f.result() for f in futures0]
b1 = matrix_utils.load_mmap(*results[0])
results = [f.result() for f in futures1]
b2 = matrix_utils.load_mmap(*results[0])
t = time.time()
result += b1.dot(b2)
e = time.time()
print("Block Matmul took effectively {0}".format(e - t))
return result
def gemm(fexec, X, Y, out_bucket=None, tasks_per_job=1, local=False,
dtype=np.float64, overwrite=True, gemm_impl=0, gemm_chunk_size=16):
'''
Compute XY return
@param pwex - Execution context
@param X - rhs matrix
@param Y - lhs matrix
@param tasks_per_job - number of tasks per job
@param out_bucket - bucket job writes to
@param num_jobs - how many lambdas to run
@param local - run locally? #TODO remove once local lithops executor is provided
'''
reduce_idxs = Y._block_idxs(axis=0)
if out_bucket is None:
out_bucket = X.bucket
root_key = generate_key_name_binop(X, Y, "gemm")
if (Y.shard_sizes[0] != X.shard_sizes[1]):
raise Exception("X dim 1 shard size must match Y dim 0 shard size")
XY = BigMatrix(root_key, shape=(X.shape[0], Y.shape[1]),
bucket=out_bucket, shard_sizes=[X.shard_sizes[0], Y.shard_sizes[1]],
dtype=dtype, write_header=True, storage=X.storage)
num_out_blocks = len(XY.blocks)
if (tasks_per_job > num_out_blocks):
tasks_per_job = 1
num_jobs = int(num_out_blocks/float(tasks_per_job))
print("Out Shape", XY.shape)
print("Total number of output blocks", len(XY.block_idxs))
print("Total number of output blocks that exist", len(XY.blocks_exist))
if (overwrite):
block_idxs_to_map = list(set(XY.block_idxs))
else:
block_idxs_to_map = list(set(XY.block_idxs_not_exist))
print("block_idxs_to_map: ", block_idxs_to_map)
print("Number of output blocks to generate ", len(block_idxs_to_map))
print("Tasks per job: ", tasks_per_job)
print("Num Jobs: ", num_jobs)
print('GEMM impl: ', gemm_impl, _gemms[gemm_impl])
chunked_blocks = list(chunk(block_idxs_to_map, tasks_per_job))
chunked_blocks = [(cb, ) for cb in chunked_blocks]
#if (not isinstance(fexec.invoker, fexec.queues.SQSInvoker) and gemm_impl > 0):
# raise Exception("GEMM IMPL > 0 only supported for standalone mode pywren")
# Josep: Storage class is not pickable, so delete it before invoke Lithops
saved_stroage = X.storage
XY.storage = Y.storage = X.storage = None
def lithops_run(block_pairs, storage):
XY.storage = storage
X.storage = storage
Y.storage = storage
return _gemms[gemm_impl](block_pairs, XY, X, Y, reduce_idxs=reduce_idxs,
dtype=dtype, block_chunk_size=gemm_chunk_size)
if (local):
list(map(lithops_run, chunked_blocks))
return XY
else:
fexec.map(lithops_run, chunked_blocks, include_modules=['numpywren'])
fexec.wait()
Y.storage = X.storage = saved_stroage
return XY
# matrix vector multiply
# hard
def gemv(pwex, X, Y, out_bucket=None, tasks_per_job=1):
raise NotImplementedError
# symmetric rank k update
# hard
def syrk(pwex, X, Y, out_bucket=None, tasks_per_job=1):
raise NotImplementedError
# very hard
def posv(pwex, X, Y, out_bucket=None, tasks_per_job=1):
raise NotImplementedError
# easy
def add(pwex, X, Y, out_bucket=None, tasks_per_job=1):
raise NotImplementedError
# easy
def sub(pwex, X, Y, out_bucket=None, tasks_per_job=1):
raise NotImplementedError
# easy
def mul(pwex, X, Y, out_bucket=None, tasks_per_job=1):
raise NotImplementedError
# easy
def div(pwex, X, Y, out_bucket=None, tasks_per_job=1):
raise NotImplementedError
def logical_and(pwex, X, Y, out_bucket=None, tasks_per_job=1):
raise NotImplementedError
def logical_or(pwex, X, Y, out_bucket=None, tasks_per_job=1):
raise NotImplementedError
def xor(pwex, X, Y, out_bucket=None, tasks_per_job=1):
raise NotImplementedError
def elemwise_binop_func(pwex, X, Y, f, out_bucket=None, tasks_per_job=1, local=False):
raise NotImplementedError
def trisolve(pwex, A, B, out_bucket=None, tasks_per_job=1, lower=False):
if out_bucket is None:
out_bucket = A.bucket
root_key = generate_key_name_binop(A, B, "trisolve")
instructions, X, scratch = lp._trisolve(A, B, out_bucket=out_bucket, lower=lower)
config = pwex.config
# if (isinstance(pwex.invoker, pywren.queues.SQSInvoker)):
# executor = pywren.standalone_executor
# else:
fexec = lithops.FunctionExecutor()
program = lp.LambdaPackProgram(instructions, executor=fexec, pywren_config=config)
print(program)
#assert False
program.start()
job_runner.lambdapack_run(program)
program.wait()
if program.program_status() != lp.PS.SUCCESS:
program.unwind()
raise Exception("Lambdapack Exception : {0}".format(program.program_status()))
program.free()
# delete all intermediate information
[M.free() for M in scratch]
return X
|
[
"josep.sampe@gmail.com"
] |
josep.sampe@gmail.com
|
3157e2a88e41bb79673664c7282c66fc1660f782
|
54838734dd8a1a9b7ac329118081163b4b3a6892
|
/src/utils/static_params.py
|
707afb540e433ebda242a9ca7ac08fcc3843a404
|
[] |
no_license
|
pyliaorachel/SeqGAN-paraphrase-generation
|
b8e0e6a84e16571d02490739ab2e835dcd464fc3
|
8babe2b16dd1a495f7be2c07d40d57a79501eedc
|
refs/heads/master
| 2020-03-25T03:57:11.079929
| 2019-07-28T20:44:02
| 2019-07-28T20:44:02
| 143,369,844
| 9
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 356
|
py
|
import torch
DEBUG = False # Run for fewer iterations/epochs/steps
LIGHT_VER = False # Light version of dataset
NO_SAVE = False # Save model
START_TOKEN = '<S>'
END_TOKEN = '<E>'
PAD_TOKEN = '<P>'
CUDA = torch.cuda.is_available()
TRAIN_SIZE = 53000
TEST_SIZE = 3000
VALID_SET_SIZE_RATIO = 0.1
dataset_path = './dataset/quora_duplicate_questions.tsv'
|
[
"pyliao@stanford.edu"
] |
pyliao@stanford.edu
|
64ec23c48be93c3136600c02443e4ac2a6a55ecc
|
d69ed5cfab7bdcc4125f9f4f4d3531ce00a94fce
|
/codes/rosenbrockfunction.py
|
fae2567587c1825fc91f6af8dd7d2f46fd814373
|
[
"BSD-2-Clause"
] |
permissive
|
hanzhuowei/deeplearningliterature
|
1f84b081377f74f07bae0dfa09aaa22a0e7b610c
|
e9f58b7edf1e76850c91e2a868752f40f5fae37b
|
refs/heads/master
| 2021-01-01T20:48:02.124811
| 2015-04-08T13:03:35
| 2015-04-08T13:03:35
| 25,296,804
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 592
|
py
|
# -*- coding: utf-8 -*-
"""
Created on Fri Feb 6 14:37:17 2015
@author: d1143
"""
from mpl_toolkits.mplot3d import Axes3D
from matplotlib import cm
from matplotlib.colors import LogNorm
import matplotlib.pyplot as plt
import numpy as np
fig = plt.figure()
ax = Axes3D(fig, azim = -128, elev = 43)
s = .05
X = np.arange(-2, 2.+s, s)
Y = np.arange(-1, 3.+s, s)
X, Y = np.meshgrid(X, Y)
Z = (1.-X)**2 + 100.*(Y-X*X)**2
ax.plot_surface(X, Y, Z, rstride = 1, cstride = 1, norm = LogNorm(), cmap = cm.jet)
plt.xlabel("x")
plt.ylabel("y")
plt.savefig("Rosenbrock function.svg")
plt.show()
|
[
"d1143@pc226.(none)"
] |
d1143@pc226.(none)
|
ac7c12faf572165f8d8a4344ba7869bb8ca61cc8
|
6c7322cdbe9189bf51b4a7c7394ea5b181df2328
|
/dev/IA/gauss_noise.py
|
bd1fce83801b9e7a55a802466e2b3be0411388af
|
[] |
no_license
|
StenbergSimon/scanomatic
|
4683f7f2c1330cc9cc998d1f83bed73afbe05e83
|
db5dd2e8501d9db8fb0fd8fbf5c9ddd652ae8fdf
|
refs/heads/master
| 2020-06-11T13:35:52.036072
| 2016-10-03T09:19:40
| 2016-10-03T09:19:40
| 75,655,065
| 0
| 0
| null | 2016-12-05T18:38:03
| 2016-12-05T18:38:03
| null |
UTF-8
|
Python
| false
| false
| 610
|
py
|
import numpy as np
class SigmaError(Exception): pass;
def gauss_noise(im, sigma):
if sigma <= 0:
raise SigmaError(
"Sigma ({0}) is not larger than 0".format(sigma))
return None
v = np.zeros(im.shape)
G = im.max()
theta = np.random.random(im.size)
r = np.random.random(im.size)
alpha = 2 * np.pi * theta
beta = np.sqrt(-2 * np.log(r))
z1 = sigma * np.cos(alpha) * beta
z2 = sigma * np.sin(alpha) * beta
flat_v = v.ravel()
flat_v += z1
flat_v[1:] += z2[:-1]
f = im + v
f[f < 0] = 0
f[f > G] = G
return f
|
[
"martin.zackrisson@gmail.com"
] |
martin.zackrisson@gmail.com
|
96a43ae78f3ba03b1c8502952406d7f35f3535b6
|
a45f64b5bbbb4ba3321ad171288afd62e969e630
|
/Contact-Management -System/Contact-Management -System/Contact-System.py
|
5275042ce4b58a80896fc927ce5d3e3a8dc2e36d
|
[] |
no_license
|
shivam-mahato/test
|
8c6374817646075e7f0a3fe66124dc539c3213aa
|
f15dd498fba2dfb0e69e976d37c06ccb1fd4451c
|
refs/heads/master
| 2023-05-05T06:36:14.238717
| 2021-06-01T04:42:39
| 2021-06-01T04:42:39
| 372,701,677
| 0
| 0
| null | 2021-06-01T04:42:39
| 2021-06-01T04:35:00
|
Python
|
UTF-8
|
Python
| false
| false
| 16,486
|
py
|
# This
from tkinter import *
import sqlite3
import tkinter
import tkinter.ttk as ttk
import tkinter.messagebox as tkMessageBox
root = Tk()
root.title("Contact System")
root.geometry("700x400+0+0")
root.resizable(0, 0)
p1 = PhotoImage(file='images/icon.PNG')
root.iconphoto(True, p1)
root.config(bg="dark gray")
# VARIABLES
f_name = StringVar()
m_name = StringVar()
l_name = StringVar()
age = StringVar()
home_address = StringVar()
phone_number = StringVar()
gender = StringVar()
religion = StringVar()
nationality = StringVar()
# METHODS
def Exit():
wayOut = tkinter.messagebox.askyesno("Contact Management System", "Do you want to exit the system")
if wayOut > 0:
root.destroy()
return
def Reset():
f_name.set("")
m_name.set("")
l_name.set("")
gender.set("")
age.set("")
home_address.set("")
phone_number.set("")
religion.set("")
nationality.set("")
def Database():
conn = sqlite3.connect("contactdb.db")
cursor = conn.cursor()
cursor.execute(
"CREATE TABLE IF NOT EXISTS `contactable` (id INTEGER NOT NULL PRIMARY KEY AUTOINCREMENT, first_name TEXT, middle_name TEXT, last_name TEXT, gender TEXT, age TEXT, home_address TEXT, phone_number TEXT, religion TEXT, nationality TEXT)")
cursor.execute("SELECT * FROM `contactable` ORDER BY `last_name` ASC")
fetch = cursor.fetchall()
for data in fetch:
tree.insert('', 'end', values=(data))
cursor.close()
conn.close()
def Submit():
if f_name.get() == "" or m_name.get() == "" or l_name.get() == "" or gender.get() == "" or age.get() == "" or home_address.get() == "" or phone_number.get() == "" or religion.get() == "" or nationality.get() == "":
result = tkMessageBox.showwarning('', 'Please Complete The Required Field', icon="warning")
else:
tree.delete(*tree.get_children())
conn = sqlite3.connect("contactdb.db")
cursor = conn.cursor()
cursor.execute(
"INSERT INTO `contactable` (first_name, middle_name, last_name, gender, age, home_address, phone_number, religion, nationality) VALUES(?, ?, ?, ?, ?, ?, ?, ?, ?)", (str(f_name.get()), str(m_name.get()), str(l_name.get()), str(gender.get()), int(age.get()), str(home_address.get()),
int(phone_number.get()), str(religion.get()), str(nationality.get())))
conn.commit()
cursor.execute("SELECT * FROM `contactable` ORDER BY `last_name` ASC")
fetch = cursor.fetchall()
for data in fetch:
tree.insert('', 'end', values=(data))
cursor.close()
conn.close()
f_name.set("")
m_name.set("")
l_name.set("")
gender.set("")
age.set("")
home_address.set("")
phone_number.set("")
religion.set("")
nationality.set("")
def Update():
if gender.get() == "":
result = tkMessageBox.showwarning('', 'Please Complete The Required Field', icon="warning")
else:
tree.delete(*tree.get_children())
conn = sqlite3.connect("contactdb.db")
cursor = conn.cursor()
cursor.execute(
"UPDATE `contactable` SET `first_name` = ?, `middle_name` = ? , `last_name` = ?, `gender` =?, `age` = ?, `home_address` = ?, `phone_number` = ?, `religion` = ?, `nationality` = ? WHERE `id` = ?",
(str(f_name.get()), str(m_name.get()), str(l_name.get()), str(gender.get()), int(age.get()), str(home_address.get()),
str(phone_number.get()), str(religion.get()), str(nationality.get()), int(id)))
conn.commit()
cursor.execute("SELECT * FROM `contactable` ORDER BY `last_name` ASC")
fetch = cursor.fetchall()
for data in fetch:
tree.insert('', 'end', values=(data))
cursor.close()
conn.close()
f_name.set("")
m_name.set("")
l_name.set("")
gender.set("")
age.set("")
home_address.set("")
phone_number.set("")
religion.set("")
nationality.set("")
def Delete():
if not tree.selection():
result = tkMessageBox.showwarning('', 'Please Select in the Table First!', icon="warning")
else:
result = tkMessageBox.askquestion('', 'Are You Sure You Want To Delete This Record?', icon="warning")
if result == 'yes':
curItem = tree.focus()
contents = (tree.item(curItem))
selecteditem = contents['values']
tree.delete(curItem)
conn = sqlite3.connect("contactdb.db")
cursor = conn.cursor()
cursor.execute("DELETE FROM `contactable` WHERE `id` = %d" % selecteditem[0])
conn.commit()
cursor.close()
conn.close()
def UpdateContactWindow(event):
global id, UpdateWindow
curItem = tree.focus()
contents = (tree.item(curItem))
selecteditem = contents['values']
id = selecteditem[0]
f_name.set("")
m_name.set("")
l_name.set("")
gender.set("")
age.set("")
home_address.set("")
phone_number.set("")
religion.set("")
nationality.set("")
f_name.set(selecteditem[1])
m_name.set(selecteditem[2])
l_name.set(selecteditem[3])
age.set(selecteditem[5])
home_address.set(selecteditem[6])
phone_number.set(selecteditem[7])
religion.set(selecteditem[8])
nationality.set(selecteditem[9])
UpdateWindow = Toplevel()
UpdateWindow.title("Contact Details")
UpdateWindow.geometry("500x520+0+0")
UpdateWindow.config(bg="dark gray")
UpdateWindow.resizable(0, 0)
if 'NewWindow' in globals():
NewWindow.destroy()
# FRAMES
FormTitle = Frame(UpdateWindow)
FormTitle.pack(side=TOP)
ContactForm = Frame(UpdateWindow)
ContactForm.pack(side=TOP, pady=10)
RadioGroup = Frame(ContactForm)
Male = Radiobutton(RadioGroup, text="Male", variable=gender, value="Male", font=('arial', 14)).pack(side=LEFT)
Female = Radiobutton(RadioGroup, text="Female", variable=gender, value="Female", font=('arial', 14)).pack(side=LEFT)
Others = Radiobutton(RadioGroup, text="Others", variable=gender, value="Others", font=('arial', 14)).pack(side=LEFT)
# LABELS
lbl_title = Label(FormTitle, text="Updating Contacts", bd=12, relief=GROOVE, fg="White", bg="blue",
font=("Calibri", 14, "bold"), pady=3)
lbl_title.pack(fill=X)
lbl_FirstName = Label(ContactForm, text="First Name", font=('arial', 14), bd=5)
lbl_FirstName.grid(row=0, sticky=W)
lbl_MiddleName = Label(ContactForm, text="Middle Name", font=('arial', 14), bd=5)
lbl_MiddleName.grid(row=1, sticky=W)
lbl_LastName = Label(ContactForm, text="Last Name", font=('arial', 14), bd=5)
lbl_LastName.grid(row=2, sticky=W)
lbl_Gender = Label(ContactForm, text="Gender", font=('arial', 14), bd=5)
lbl_Gender.grid(row=3, sticky=W)
lbl_Age = Label(ContactForm, text="Age", font=('arial', 14), bd=5)
lbl_Age.grid(row=4, sticky=W)
lbl_HomeAddress = Label(ContactForm, text=" Home Address", font=('arial', 14), bd=5)
lbl_HomeAddress.grid(row=5, sticky=W)
lbl_PhoneNumber = Label(ContactForm, text="Phone Number", font=('arial', 14), bd=5)
lbl_PhoneNumber.grid(row=6, sticky=W)
lbl_Religion = Label(ContactForm, text="Religion", font=('arial', 14), bd=5)
lbl_Religion.grid(row=7, sticky=W)
lbl_Nationality = Label(ContactForm, text="Nationality", font=('arial', 14), bd=5)
lbl_Nationality.grid(row=8, sticky=W)
# TEXT ENTRY
FirstName = Entry(ContactForm, textvariable=f_name, font=('arial', 14, 'bold'), bd=10, width=20, justify='left')
FirstName.grid(row=0, column=1)
MiddleName = Entry(ContactForm, textvariable=m_name, font=('arial', 14, 'bold'), bd=10, width=20, justify='left')
MiddleName.grid(row=1, column=1)
LastName = Entry(ContactForm, textvariable=l_name, font=('arial', 14, 'bold'), bd=10, width=20, justify='left')
LastName.grid(row=2, column=1)
RadioGroup.grid(row=3, column=1)
Age = Entry(ContactForm, textvariable=age, font=('arial', 14, 'bold'), bd=10, width=20, justify='left')
Age.grid(row=4, column=1)
HomeAddress = Entry(ContactForm, textvariable=home_address, font=('arial', 14, 'bold'), bd=10, width=20,
justify='left')
HomeAddress.grid(row=5, column=1)
PhoneNumber = Entry(ContactForm, textvariable=phone_number, font=('arial', 14, 'bold'), bd=10, width=20,
justify='left')
PhoneNumber.grid(row=6, column=1)
Religion = Entry(ContactForm, textvariable=religion, font=('arial', 14, 'bold'), bd=10, width=20, justify='left')
Religion.grid(row=7, column=1)
Nationality = Entry(ContactForm, textvariable=nationality, font=('arial', 14, 'bold'), bd=10, width=20,
justify='left')
Nationality.grid(row=8, column=1)
# ==================BUTTONS==============================
ButtonUpdatContact = Button(ContactForm, text='Update', bd=10, font=('arial', 12, 'bold'), relief="ridge", fg="white",
bg="blue", command=Update)
ButtonUpdatContact.grid(row=9, columnspan=2, pady=10)
def AddNewContact():
global NewWindow
f_name.set("")
m_name.set("")
l_name.set("")
gender.set("")
age.set("")
home_address.set("")
phone_number.set("")
religion.set("")
nationality.set("")
NewWindow = Toplevel()
NewWindow.title("Contact Details")
NewWindow.resizable(0, 0)
NewWindow.geometry("500x520+0+0")
NewWindow.config(bg="dark gray")
if 'UpdateWindow' in globals():
UpdateWindow.destroy()
# ===================FRAMES==============================
FormTitle = Frame(NewWindow)
FormTitle.pack(side=TOP)
ContactForm = Frame(NewWindow)
ContactForm.pack(side=TOP, pady=10)
RadioGroup = Frame(ContactForm)
Male = Radiobutton(RadioGroup, text="Male", variable=gender, value="Male", font=('arial', 14)).pack(side=LEFT)
Female = Radiobutton(RadioGroup, text="Female", variable=gender, value="Female", font=('arial', 14)).pack(side=LEFT)
Others = Radiobutton(RadioGroup, text="Others", variable=gender, value="Others", font=('arial', 14)).pack(side=LEFT)
# ===================LABELS==============================
lbl_title = Label(FormTitle, text="Adding New Contacts", bd=12, relief=GROOVE, fg="White", bg="blue",
font=("Calibri", 14, "bold"), pady=3)
lbl_title.pack(fill=X)
lbl_FirstName = Label(ContactForm, text="First Name", font=('arial', 14), bd=5)
lbl_FirstName.grid(row=0, sticky=W)
lbl_MiddleName = Label(ContactForm, text="Middle Name", font=('arial', 14), bd=5)
lbl_MiddleName.grid(row=1, sticky=W)
lbl_LastName = Label(ContactForm, text="Last Name", font=('arial', 14), bd=5)
lbl_LastName.grid(row=2, sticky=W)
lbl_Gender = Label(ContactForm, text="Gender", font=('arial', 14), bd=5)
lbl_Gender.grid(row=3, sticky=W)
lbl_Age = Label(ContactForm, text="Age", font=('arial', 14), bd=5)
lbl_Age.grid(row=4, sticky=W)
lbl_HomeAddress = Label(ContactForm, text="Home Address", font=('arial', 14), bd=5)
lbl_HomeAddress.grid(row=5, sticky=W)
lbl_PhoneNumber = Label(ContactForm, text="Phone Number", font=('arial', 14), bd=5)
lbl_PhoneNumber.grid(row=6, sticky=W)
lbl_Religion = Label(ContactForm, text="Religion", font=('arial', 14), bd=5)
lbl_Religion.grid(row=7, sticky=W)
lbl_Nationality = Label(ContactForm, text="Nationality", font=('arial', 14), bd=5)
lbl_Nationality.grid(row=8, sticky=W)
# ===================ENTRY===============================
FirstName = Entry(ContactForm, textvariable=f_name, font=('arial', 14, 'bold'), bd=10, width=20, justify='left')
FirstName.grid(row=0, column=1)
MiddleName = Entry(ContactForm, textvariable=m_name, font=('arial', 14, 'bold'), bd=10, width=20, justify='left')
MiddleName.grid(row=1, column=1)
LastName = Entry(ContactForm, textvariable=l_name, font=('arial', 14, 'bold'), bd=10, width=20, justify='left')
LastName.grid(row=2, column=1)
RadioGroup.grid(row=3, column=1)
Age = Entry(ContactForm, textvariable=age, font=('arial', 14, 'bold'), bd=10, width=20, justify='left')
Age.grid(row=4, column=1)
HomeAddress = Entry(ContactForm, textvariable=home_address, font=('arial', 14, 'bold'), bd=10, width=20, justify='left')
HomeAddress.grid(row=5, column=1)
PhoneNumber = Entry(ContactForm, textvariable=phone_number, font=('arial', 14, 'bold'), bd=10, width=20, justify='left')
PhoneNumber.grid(row=6, column=1)
Religion = Entry(ContactForm, textvariable=religion, font=('arial', 14, 'bold'), bd=10, width=20, justify='left')
Religion.grid(row=7, column=1)
Nationality = Entry(ContactForm, textvariable=nationality, font=('arial', 14, 'bold'), bd=10, width=20, justify='left')
Nationality.grid(row=8, column=1)
# ==================BUTTONS==============================
ButtonAddContact = Button(ContactForm, text='Save', bd=10, font=('arial', 12, 'bold'), relief="ridge", fg="white",
bg="blue", command=Submit)
ButtonAddContact.grid(row=9, columnspan=2, pady=10)
# ============================FRAMES======================================
Top = Frame(root, width=500, bd=1, relief=SOLID)
Top.pack(side=TOP)
Mid = Frame(root, width=500, bg="dark gray")
Mid.pack(side=BOTTOM)
f1 = Frame(root, width=6, height=8, bd=8, bg="dark gray")
f1.pack(side=BOTTOM)
flb = Frame(f1, width=6, height=8, bd=8, bg="blue")
flb.pack(side=BOTTOM)
MidLeft = Frame(Mid, width=100)
MidLeft.pack(side=LEFT, pady=10)
MidLeftPadding = Frame(Mid, width=370, bg="dark gray")
MidLeftPadding.pack(side=LEFT)
MidRight = Frame(Mid, width=100)
MidRight.pack(side=RIGHT, pady=10)
TableMargin = Frame(root, width=500)
TableMargin.pack(side=TOP)
# LABELS
lbl_title = Label(Top, text="Contact Management System", bd=12, relief=GROOVE, fg="White", bg="blue",
font=("Calibri", 36, "bold"), pady=3)
lbl_title.pack(fill=X)
# BUTTONS
ButtonAdd = Button(flb, text='Add New Contact', bd=8, font=('arial', 12, 'bold'), relief="groove", fg="black",
bg="dark gray", command=AddNewContact).grid(row=0, column=0, ipadx=20, padx=30)
ButtonDelete = Button(flb, text='Delete', bd=8, font=('arial', 12, 'bold'), relief="groove", command=Delete,
fg="black", bg="dark gray").grid(row=0, column=1, ipadx=20)
ButtonExit = Button(flb, text='Exit System', bd=8, font=('arial', 12, 'bold'), relief="groove", command=Exit,
fg="black", bg="dark gray").grid(row=0, column=2, ipadx=20, padx=30)
# TABLES
scrollbarx = Scrollbar(TableMargin, orient=HORIZONTAL)
scrollbary = Scrollbar(TableMargin, orient=VERTICAL)
tree = ttk.Treeview(TableMargin, columns=("Id", "First Name", "Middle Name", "Last Name", "Gender", "Age", "Home Address", "Phone Number", "Religion", "Nationality"),
height=400, selectmode="extended", yscrollcommand=scrollbary.set, xscrollcommand=scrollbarx.set)
scrollbary.config(command=tree.yview)
scrollbary.pack(side=RIGHT, fill=Y)
scrollbarx.config(command=tree.xview)
scrollbarx.pack(side=BOTTOM, fill=X)
tree.heading('Id', text="Id", anchor=W)
tree.heading('First Name', text="First Name", anchor=W)
tree.heading('Middle Name', text="Middle Name", anchor=W)
tree.heading('Last Name', text="Last Name", anchor=W)
tree.heading('Gender', text="Gender", anchor=W)
tree.heading('Age', text="Age", anchor=W)
tree.heading('Home Address', text="Home Address", anchor=W)
tree.heading('Phone Number', text="phone Number", anchor=W)
tree.heading('Religion', text="Religion", anchor=W)
tree.heading('Nationality', text="Nationality", anchor=W)
tree.column('#0', stretch=NO, minwidth=0, width=0)
tree.column('#1', stretch=NO, minwidth=0, width=0)
tree.column('#2', stretch=NO, minwidth=0, width=80)
tree.column('#3', stretch=NO, minwidth=0, width=120)
tree.column('#4', stretch=NO, minwidth=0, width=90)
tree.column('#5', stretch=NO, minwidth=0, width=80)
tree.column('#6', stretch=NO, minwidth=0, width=30)
tree.column('#7', stretch=NO, minwidth=0, width=120)
tree.column('#8', stretch=NO, minwidth=0, width=120)
tree.column('#9', stretch=NO, minwidth=0, width=120)
tree.pack()
tree.bind('<Double-Button-1>', UpdateContactWindow)
# ============================INITIALIZATION==============================
if __name__ == '__main__':
Database()
root.mainloop()
|
[
"shivam-mahato"
] |
shivam-mahato
|
130a442d803e3f5aac86c565cd413cffa54efff7
|
56220461c0cf341a938be63b0f836626d4d86cab
|
/apivue/views.py
|
e9363d24e0539dd85522fd06cf2c4ba772af9827
|
[
"MIT"
] |
permissive
|
granith/productdevelopment
|
68a79399cb1775b6f5339dbd8f696b6510cd0601
|
3b750d186092b95ea7f8c36aa19f6a9606727372
|
refs/heads/master
| 2020-03-29T02:27:23.037595
| 2018-09-25T15:04:02
| 2018-09-25T15:04:02
| 149,437,620
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 317
|
py
|
from django.contrib.auth.models import User
from rest_framework import viewsets
from apivue.serializers import UserSerializer
class UserViewSet(viewsets.ModelViewSet):
"""
API endpoint that allows users to be viewed or edited.
"""
queryset = User.objects.all()
serializer_class = UserSerializer
|
[
"ghoda@designvox.com"
] |
ghoda@designvox.com
|
a7a5392cbe81404976148b44c2ac223acab5bdc2
|
c8be480ca49549048defae246309edd4771f929e
|
/tests/test_lzf.py
|
33c3860c4955e5ad8865dc3a8c851edf0cb37915
|
[
"MIT"
] |
permissive
|
darcyg/rdbtools3
|
da79cc74218eaa22bd2884bc4f1450a86f57f7a8
|
7c92768299c35f18a479930335011e1b47f408ff
|
refs/heads/master
| 2021-01-16T22:19:32.078545
| 2014-02-13T15:12:02
| 2014-02-13T15:12:02
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,258
|
py
|
import unittest
from io import BytesIO
from rdbtools3.lzf import unpack_lzf
from rdbtools3.exceptions import RDBValueError
class TestLZF(unittest.TestCase):
def test_simple(self):
data = BytesIO(b'\x04ABCDE')
ret = unpack_lzf(data, 6, 5)
self.assertEqual(b'ABCDE', ret)
def test_empty(self):
data = BytesIO(b'\x00')
ret = unpack_lzf(data, 0, 0)
self.assertEqual(b'', ret)
def test_bad_lenghts(self):
with self.assertRaises(RDBValueError):
ret = unpack_lzf(BytesIO(b'\x00'), 1, 1)
with self.assertRaises(RDBValueError):
ret = unpack_lzf(BytesIO(b'\x00'), 0, 1)
with self.assertRaises(ValueError):
unpack_lzf(BytesIO(b'\x01'), 1, 1)
def test_simple_backref(self):
data = (b'\x01AB' # simple
b'\x60\x01') # backref: len 3+2; back 2-1-1
ret = unpack_lzf(BytesIO(data), 5, 7)
self.assertEqual(b'ABABABA', ret)
def test_longer_backref(self):
data = (b'\x01AB'
b'\xE0\x01\x00') # backref: len 7+1+2; back 2-0-1
ret = unpack_lzf(BytesIO(data), 6, 12)
self.assertEqual(b'ABBBBBBBBBBB', ret)
if __name__ == "__main__":
unittest.main()
|
[
"alexey.popravka@horsedevel.com"
] |
alexey.popravka@horsedevel.com
|
8d73fc202896546a4b0c2a8aeb2e10eea7ad3ffb
|
b26f593233e5ce81ec79e50a9fa016ab3142f5b0
|
/data_prep.py
|
c4847ab1b1d52898db06f6dfb43f72f76b38e633
|
[] |
no_license
|
UoA-CS760/autocomplete-predictor
|
62128c8701c8c1d77664f1b52e4a32f21df05e02
|
3efd4499ce3fd80b5e2b0b3cb24e1bc099878c6a
|
refs/heads/master
| 2023-02-10T16:01:54.996187
| 2021-01-10T02:13:07
| 2021-01-10T02:13:07
| 289,842,950
| 3
| 0
| null | 2020-10-24T02:47:43
| 2020-08-24T06:09:21
|
Python
|
UTF-8
|
Python
| false
| false
| 2,032
|
py
|
import json
from generate_vocab import UNK, PAD
import torch
###################################################
# See train.py for example usage of these classes #
###################################################
class Tokeniser(object):
def __init__(self, vocab_filepath="vocab.json", vocab_size=100000, max_seq_len=1000):
vocab = json.load(open(vocab_filepath))
vocab_size = min(vocab_size-2, len(vocab))
self.vocab = {vocab[i]: i for i in range(vocab_size)}
self.vocab.update({i: vocab[i] for i in range(vocab_size)})
self.vocab.update({vocab_size-1: UNK, vocab_size: PAD})
self.vocab.update({UNK: vocab_size-1, PAD: vocab_size})
self.max_seq_len = max_seq_len
self.unk, self.pad = self.vocab[UNK], self.vocab[PAD]
def encode_value(self, val):
return self.vocab.get(val, self.unk)
def encode_seq(self, sequence):
return [self.vocab.get(s, self.unk) for s in sequence] + \
[self.pad for _ in range(self.max_seq_len-len(sequence))]
def decode_seq(self, seq):
return [self.vocab[s] for s in seq]
def encode_batch(self, X, Y, tensor=True):
encoded_X = [self.encode_seq(x) for x in X]
encoded_Y = [self.encode_value(y) for y in Y]
if tensor:
return torch.tensor(encoded_X, dtype=torch.long), torch.tensor(encoded_Y, dtype=torch.long)
return encoded_X, encoded_Y
class Dataset(object):
def __init__(self, f_pth="./data/toy-data.txt"):
self.loadData(f_pth)
# Keeps track of place in the dataset
self.batchIndex = 0
def loadData(self, f_pth):
with open(f_pth, 'r') as f:
# Each element of X is [ast, index]
# where index is the starting position of the unseen nodes
# See separate_dps in utils.py for more details
self.X = [json.loads(line) for line in f]
self.y = [x[0][-1] for x in self.X]
def getBatch(self, batch_size):
# Function for tokenising elements of X and y
BI = self.batchIndex
x_batch = [x[0][:-1] for x in self.X[BI:BI+batch_size]]
y_batch = self.y[BI:BI+batch_size]
self.batchIndex += batch_size
return x_batch, y_batch
|
[
"36907139+OptimusPrinceps@users.noreply.github.com"
] |
36907139+OptimusPrinceps@users.noreply.github.com
|
bdb0d8ac61f3466c665fc80a84a453800a648b70
|
631a48a47b969a211364baae450f9dfc4af7a5f3
|
/CVRP_Skenario_4.py
|
bd3c79bf8c70b21653a0a9b67d871e99687c3376
|
[] |
no_license
|
Rierii/VRPManTrans
|
52fdedc07a79f4df8afb8a3cddf8fa9294e8a089
|
304c10a2cb02b3dff2807c558beae9c92e5bf1b7
|
refs/heads/main
| 2023-01-23T03:34:59.204126
| 2020-12-11T11:35:23
| 2020-12-11T11:35:23
| 318,479,781
| 0
| 0
| null | 2020-12-11T11:35:24
| 2020-12-04T10:21:36
|
Python
|
UTF-8
|
Python
| false
| false
| 4,866
|
py
|
"""SKENARIO 4"""
# Kapasitas 120, Jarak 35
from __future__ import print_function
from ortools.constraint_solver import routing_enums_pb2
from ortools.constraint_solver import pywrapcp
def create_data_model():
"""Stores the data for the problem."""
data = {}
data['distance_matrix'] = [
[
0, 707, 510, 224, 412, 1342, 707, 566, 800, 224, 100
],
[
707, 0, 849, 806, 1118, 1304, 1131, 583, 141, 854, 700
],
[
510, 844, 0, 728, 640, 860, 283, 316, 860, 361, 608
],
[
224, 806, 728, 0, 424, 1565, 900, 781, 922, 400, 141
],
[
412, 1118, 640, 424, 0, 1487, 671, 854, 1204, 316, 447
],
[
1342, 1304, 860, 1556, 1487, 0, 860, 825, 1217, 1221, 1432
],
[
707, 1131, 283, 900, 671, 860, 0, 583, 1140, 500, 806
],
[
566, 583, 316, 781, 854, 825, 583, 0, 566, 539, 640
],
[
800, 141, 860, 922, 1204, 1217, 1140, 566, 0, 922, 806
],
[
224, 854, 361, 400, 316, 1221, 500, 539, 922, 0, 316
],
[
100, 700, 608, 141, 447, 1432, 806, 640, 806, 316, 0
]
]
data['demands'] = [0, 39, 48, 27, 35, 30, 26, 44, 53, 32, 42]
data['vehicle_capacities'] = [120,120,120,120]
data['num_vehicles'] = 4
data['depot'] = 0
return data
def print_solution(data, manager, routing, solution):
"""Prints solution on console."""
max_route_distance = 0
for vehicle_id in range(data['num_vehicles']):
index = routing.Start(vehicle_id)
plan_output = 'Route for vehicle {}:\n'.format(vehicle_id)
route_distance = 0
while not routing.IsEnd(index):
plan_output += ' {} -> '.format(manager.IndexToNode(index))
previous_index = index
index = solution.Value(routing.NextVar(index))
route_distance += routing.GetArcCostForVehicle(
previous_index, index, vehicle_id)
plan_output += '{}\n'.format(manager.IndexToNode(index))
plan_output += 'Distance of the route: {}m\n'.format(route_distance)
print(plan_output)
max_route_distance = max(route_distance, max_route_distance)
print('Maximum of the route distances: {}m'.format(max_route_distance))
def main():
"""Solve the CVRP problem."""
# Instantiate the data problem.
data = create_data_model()
# Create the routing index manager.
manager = pywrapcp.RoutingIndexManager(len(data['distance_matrix']),
data['num_vehicles'], data['depot'])
# Create Routing Model.
routing = pywrapcp.RoutingModel(manager)
# Create and register a transit callback.
def distance_callback(from_index, to_index):
"""Returns the distance between the two nodes."""
# Convert from routing variable Index to distance matrix NodeIndex.
from_node = manager.IndexToNode(from_index)
to_node = manager.IndexToNode(to_index)
return data['distance_matrix'][from_node][to_node]
transit_callback_index = routing.RegisterTransitCallback(distance_callback)
# Define cost of each arc.
routing.SetArcCostEvaluatorOfAllVehicles(transit_callback_index)
# Add Capacity constraint.
def demand_callback(from_index):
"""Returns the demand of the node."""
# Convert from routing variable Index to demands NodeIndex.
from_node = manager.IndexToNode(from_index)
return data['demands'][from_node]
demand_callback_index = routing.RegisterUnaryTransitCallback(
demand_callback)
routing.AddDimensionWithVehicleCapacity(
demand_callback_index,
0, # null capacity slack
data['vehicle_capacities'], # vehicle maximum capacities
True, # start cumul to zero
'Capacity')
# Add Distance constraint.
dimension_name = 'Distance'
routing.AddDimension(
transit_callback_index,
0, # no slack
3500, # vehicle maximum travel distance
True, # start cumul to zero
dimension_name)
distance_dimension = routing.GetDimensionOrDie(dimension_name)
distance_dimension.SetGlobalSpanCostCoefficient(100)
# Setting first solution heuristic wit time limit if solution not found
search_parameters = pywrapcp.DefaultRoutingSearchParameters()
search_parameters.time_limit.seconds = 5
search_parameters.first_solution_strategy = (
routing_enums_pb2.FirstSolutionStrategy.PATH_CHEAPEST_ARC)
# Solve the problem.
solution = routing.SolveWithParameters(search_parameters)
# Print solution on console.
if solution:
print_solution(data, manager, routing, solution)
if __name__ == '__main__':
main()
|
[
"audreyrizki@gmail.com"
] |
audreyrizki@gmail.com
|
74dda792922704aaaf898ddd4475c618ca61ec5c
|
28cc34a7b9e77c3044768ff1ee6266b39f521932
|
/cookiespool/db.py
|
b9f07e0dd81a97acff6a397b8699819319470f4e
|
[] |
no_license
|
Foxgeek36/CookiesPool
|
1d99267e78b6721709aa45635fdc1838efa57abb
|
9088a29cc76368a570013211b431983b461239cd
|
refs/heads/master
| 2020-07-09T16:50:47.339321
| 2019-08-25T06:30:17
| 2019-08-25T06:30:17
| 204,026,291
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,156
|
py
|
# coding=utf-8
import random
import redis
from cookiespool.config import *
'''
[redis存储模块]
'''
class RedisClient(object):
def __init__(self, type, website, host=REDIS_HOST, port=REDIS_PORT, password=REDIS_PASSWORD):
"""
初始化Redis连接
:param host: 地址
:param port: 端口
:param password: 密码
"""
self.db = redis.StrictRedis(host=host, port=port, password=password, decode_responses=True)
self.type = type
self.website = website
def name(self):
"""
获取Hash的名称
:return: Hash名称
"""
return "{type}:{website}".format(type=self.type, website=self.website)
def set(self, username, value):
"""
设置键值对
:param username: 用户名
:param value: 密码或Cookies
:return:
"""
return self.db.hset(self.name(), username, value)
def get(self, username):
"""
根据键名获取键值
:param username: 用户名
:return:
"""
return self.db.hget(self.name(), username)
def delete(self, username):
"""
根据键名删除键值对
:param username: 用户名
:return: 删除结果
"""
return self.db.hdel(self.name(), username)
def count(self):
"""
获取该HASH值的数量
:return: 数量
"""
return self.db.hlen(self.name())
def random(self):
"""
随机得到键值,用于随机Cookies获取
:return: 随机Cookies
"""
return random.choice(self.db.hvals(self.name()))
def usernames(self):
"""
获取所有账户信息
:return: 所有用户名
"""
return self.db.hkeys(self.name())
def all(self):
"""
获取所有键值对
:return: 用户名和密码或Cookies的映射表
"""
return self.db.hgetall(self.name())
if __name__ == '__main__':
conn = RedisClient('accounts', 'weibo')
result = conn.set('kylin', '1234') # 账号密码
print(result)
|
[
"1002301246@qq.com"
] |
1002301246@qq.com
|
a6dcca2939618af8c91bc0e748a3952d91efa375
|
d1d5818a3c85a92529d01e39e72cb46700a2ae66
|
/weather/weather/urls.py
|
f700317356c93cfd4cadb50d6444b763c4f9b1ae
|
[] |
no_license
|
spenrob/weather
|
c5fca0924c78f9a4cfa71fd389f91f844755520f
|
553cf39062f4a9366869637c950f84e3ddb89d36
|
refs/heads/master
| 2021-01-23T21:01:43.927335
| 2017-05-08T20:38:04
| 2017-05-08T20:38:04
| 90,670,013
| 0
| 0
| null | 2017-05-08T20:38:05
| 2017-05-08T20:33:31
| null |
UTF-8
|
Python
| false
| false
| 882
|
py
|
"""weather URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/1.10/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.conf.urls import url, include
2. Add a URL to urlpatterns: url(r'^blog/', include('blog.urls'))
"""
from django.conf.urls import include, url
from django.contrib import admin
urlpatterns = [
url(r'^weather/', include('weatherapp.urls')),
url(r'^arrests/', include('weatherapp.urls_arrests')),
url(r'^admin/', admin.site.urls)
]
|
[
"noreply@github.com"
] |
noreply@github.com
|
338cee3b8aec81de8bf5fd76d3dca9945f54b52e
|
796dd05d84c56d0873797b29be58247b1be782c6
|
/spotseeker_server/migrations/0002_auto_20181029_2244.py
|
31696e5f9941b7a353b9fb758f10d5ac5badbae7
|
[
"Apache-2.0"
] |
permissive
|
uw-it-aca/spotseeker_server
|
039e65824887a8303f12a3f07e730ca061bab2d5
|
5f21a58b4084d798b1c4f87721b63bad01ac4f36
|
refs/heads/main
| 2023-08-16T08:53:38.124987
| 2023-04-14T21:41:34
| 2023-04-14T21:41:34
| 11,489,866
| 6
| 7
|
Apache-2.0
| 2023-08-15T22:45:30
| 2013-07-17T23:18:17
|
Python
|
UTF-8
|
Python
| false
| false
| 1,081
|
py
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('spotseeker_server', '0001_initial'),
]
operations = [
migrations.AlterField(
model_name='spacereview',
name='is_deleted',
field=models.BooleanField(default=False),
preserve_default=True,
),
migrations.AlterField(
model_name='spacereview',
name='is_published',
field=models.BooleanField(default=False),
preserve_default=True,
),
migrations.AlterField(
model_name='trustedoauthclient',
name='bypasses_user_authorization',
field=models.BooleanField(default=False),
preserve_default=True,
),
migrations.AlterField(
model_name='trustedoauthclient',
name='is_trusted',
field=models.BooleanField(default=False),
preserve_default=True,
),
]
|
[
"cstimmel@uw.edu"
] |
cstimmel@uw.edu
|
51e877e96f8a2a6aafafcea65468e4ae5512e5a6
|
71ae741529cab7c174997d0b0ff3f1968ff9e809
|
/InsAndSel_Time.py
|
b9b0394083612b591e3af881e50529604a1c1c3e
|
[] |
no_license
|
Sdancy/Database_stress_testing_for_mysql
|
6200676b23059b21151702eec6cdcc81e562ee47
|
cd08ca13d7f0dbe4dadf8a56e8e1d3a21ccbc7a3
|
refs/heads/master
| 2023-01-25T01:08:52.479964
| 2020-12-07T13:38:31
| 2020-12-07T13:38:31
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,763
|
py
|
import pymysql
import time
import random
import string
connect = pymysql.connect(
host='127.0.0.1',
user='root',
port=3306,
passwd='000606',
db='short_video_platform',
use_unicode=True
)
def SelectTime():
begintime=time.time()
cur=connect.cursor()
sql="SELECT ID,NAME FROM users " \
"WHERE FANS<200"
try:
cur.execute(sql)
print("查询结果为:{}".format(cur.fetchall()))
except:
connect.rollback()
print("查询失败")
finally:
connect.close()
cur.close()
endtime=time.time()
file=open("One_Seltime.txt","w")
file.write("查询时间为:{}".format(endtime-begintime))
file.close()
def InsertTime():
cur = connect.cursor()
sql = "SELECT NAME FROM users"
cur.execute(sql)
name = cur.fetchall() #获取users表中NAME
name_data = []
for mid in name:
name_data.append(mid[0])
sj = random.choice(name_data)
v_author = sj
v_intro = ''.join(random.sample(string.ascii_letters + ' ', 10))
print("要插入的数据如下:")
print("作者:{}, 简介:{}".format(v_author, v_intro))
# idClient,Name,Age,Sex,Balance=input().split(' ')
begintime = time.time()
sql = "INSERT INTO videos(AUTHOR,INTRO) VALUES (%s,%s)"
args=(v_author, v_intro)
try:
cur.execute(sql,args)
connect.commit()
print("插入成功")
except:
connect.rollback()
print("插入失败")
finally:
connect.close()
cur.close()
endtime = time.time()
file = open("One_IneTime.txt", "w")
file.write("插入时间为:{}".format(endtime - begintime))
file.close()
if __name__=="__main__":
# InsertTime()
SelectTime()
|
[
"785565314@qq.com"
] |
785565314@qq.com
|
0b06190e016241e069caff14b930d190e7d5f83f
|
00d1856dbceb6cef7f92d5ad7d3b2363a62446ca
|
/djexample/images/forms.py
|
dce42d13b42c6f5dec509f69a49c66092513e4b3
|
[] |
no_license
|
lafabo/django_by_example
|
0b05d2b62117f70681c5fc5108b4072c097bc119
|
3cf569f3e6ead9c6b0199d150adf528bd0b2a7c5
|
refs/heads/master
| 2020-12-29T17:54:12.894125
| 2016-06-04T10:35:22
| 2016-06-04T10:35:22
| 58,313,176
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,039
|
py
|
from django import forms
from .models import Image
from urllib import request
from django.core.files.base import ContentFile
from django.utils.text import slugify
class ImageCreateForm(forms.ModelForm):
class Meta:
model = Image
fields = ('title', 'url', 'description')
widgets = {
'url': forms.HiddenInput
}
def clean_url(self):
url = self.cleaned_data['url']
valid_extensions = ['jpg', 'jpeg']
extension = url.rsplit('.', 1)[1].lower()
if extension not in valid_extensions:
raise forms.ValidationError('The given url does not match valid image extensions')
return url
def save(self, force_insert=False, force_update=False, commit=True):
image = super(ImageCreateForm, self).save(commit=False)
image_url = self.cleaned_data['url']
image_name = '%s.%s' % (slugify(image.title), image_url.rsplit('.', 1)[1].lower())
# download image from url
response = request.urlopen(image_url)
image.image.save(image_name, ContentFile(response.read()), save=False)
if commit:
image.save()
return image
|
[
"lazyfatboy@ya.ru"
] |
lazyfatboy@ya.ru
|
21ea6f623d63c27d406333f8b1c81b7bf7634102
|
d247d0eae00007902a737840b1acb3347fe83c41
|
/age.py
|
c04a2321936446426e66a2ea03af2d1732303ed9
|
[] |
no_license
|
vincentvmarshburn/example-repo-2
|
746b3c269ef6fc8d860f4b9c4850effdc1d6ee32
|
999d4eb4306f70e6f2c84cc089787a697c0a1a53
|
refs/heads/master
| 2022-09-03T16:23:07.363026
| 2020-05-29T18:30:52
| 2020-05-29T18:30:52
| 267,906,019
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 253
|
py
|
__author__ = 'noomatik'
from datetime import datetime
YEAR = datetime.now().year
name = input("What is your name? ")
birth = input("What is your year of birth? ")
age = YEAR - int(birth)
print("Hello %s! You are about %s years old." % (name, str(age)))
|
[
"vmarshbu@mdc.edu"
] |
vmarshbu@mdc.edu
|
bb17b14f9cc0eaaeb740793ec62035edb8637a1f
|
71f00ed87cd980bb2f92c08b085c5abe40a317fb
|
/Data/GoogleCloud/google-cloud-sdk/lib/surface/privateca/subordinates/activate.py
|
f9f73fb40b0bb8a564338a2a28bed7e1e5cf84c6
|
[
"LicenseRef-scancode-unknown-license-reference",
"Apache-2.0"
] |
permissive
|
factoryofthesun/Rao-NLP
|
2bd8269a8eed1cb352c14c8fde88e3111ccca088
|
87f9723f5ee51bd21310d58c3425a2a7271ec3c5
|
refs/heads/master
| 2023-04-18T08:54:08.370155
| 2020-06-09T23:24:07
| 2020-06-09T23:24:07
| 248,070,291
| 0
| 1
| null | 2021-04-30T21:13:04
| 2020-03-17T20:49:03
|
Python
|
UTF-8
|
Python
| false
| false
| 3,547
|
py
|
# Lint as: python3
# -*- coding: utf-8 -*- #
# Copyright 2020 Google LLC. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Activate a pending Certificate Authority."""
from __future__ import absolute_import
from __future__ import division
from __future__ import unicode_literals
from googlecloudsdk.api_lib.privateca import base as privateca_base
from googlecloudsdk.calliope import base
from googlecloudsdk.calliope import exceptions
from googlecloudsdk.command_lib.privateca import operations
from googlecloudsdk.command_lib.privateca import pem_utils
from googlecloudsdk.command_lib.privateca import resource_args
from googlecloudsdk.core.util import files
class Activate(base.SilentCommand):
r"""Activate a subordinate certificate authority in a pending state.
## EXAMPLES
To activate a subordinate CA named 'server-tls-1' in the location 'us' using
a PEM certificate
chain in 'chain.crt':
$ {command} server-tls-1 \
--location us \
--pem-chain ./chain.crt
"""
@staticmethod
def Args(parser):
resource_args.AddCertificateAuthorityPositionalResourceArg(
parser, 'to activate')
base.Argument(
'--pem-chain',
required=True,
help='A file containing a list of PEM-encoded certificates, starting '
'with the current CA certificate and ending with the root CA '
'certificate.').AddToParser(parser)
def _ParsePemChainFromFile(self, pem_chain_file):
"""Parses a pem chain from a file, splitting the leaf cert and chain.
Args:
pem_chain_file: file containing the pem_chain.
Raises:
exceptions.InvalidArgumentException if not enough certificates are
included.
Returns:
A tuple with (leaf_cert, rest_of_chain)
"""
try:
pem_chain_input = files.ReadFileContents(pem_chain_file)
except (files.Error, OSError, IOError):
raise exceptions.BadFileException(
"Could not read provided PEM chain file '{}'.".format(pem_chain_file))
certs = pem_utils.ValidateAndParsePemChain(pem_chain_input)
if len(certs) < 2:
raise exceptions.InvalidArgumentException(
'pem-chain',
'The pem_chain must include at least two certificates - the subordinate CA certificate and an issuer certificate.'
)
return certs[0], certs[1:]
def Run(self, args):
client = privateca_base.GetClientInstance()
messages = privateca_base.GetMessagesModule()
ca_ref = args.CONCEPTS.certificate_authority.Parse()
pem_cert, pem_chain = self._ParsePemChainFromFile(args.pem_chain)
operation = client.projects_locations_certificateAuthorities.Activate(
messages
.PrivatecaProjectsLocationsCertificateAuthoritiesActivateRequest(
name=ca_ref.RelativeName(),
activateCertificateAuthorityRequest=messages
.ActivateCertificateAuthorityRequest(
pemCaCertificate=pem_cert, pemCaCertificateChain=pem_chain)))
operations.Await(operation, 'Activating Certificate Authority.')
|
[
"guanzhi97@gmail.com"
] |
guanzhi97@gmail.com
|
d7f8652cf41ebb2091d7a5b13cabad387a52e6f3
|
fbf005bcb1193a7701682b690b660337ac8055d8
|
/client.py
|
ad979610ed308a92fe549857c7478b1b20c40820
|
[] |
no_license
|
ZubnayaFeya/messenger
|
ede708f2a98fb6ca559334094345146662f7e93c
|
095f2a4744cd9ab022587841fdc0e8c786eaabb3
|
refs/heads/master
| 2021-09-10T16:31:30.437000
| 2018-03-29T10:18:36
| 2018-03-29T10:18:36
| 125,897,782
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,137
|
py
|
import socket
import json
import argparse
#from time import ctime
from type_msg import *
import jim
class CClient():
def __init__(self):
self.sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.sock.connect(('localhost', 7777))
def prepare_data(self):
type_data = f_presence()
return type_data
def send_data(self, data):
self.sock.send(jim.f_encode(data))
def recv_data(self):
result = self.sock.recv(1024)
self.sock.close()
return result
def prepare_resalt(self, result):
return jim.f_decode(result)
cli = CClient()
prep_d = cli.prepare_data()
cli.send_data(prep_d)
res = cli.recv_data()
print(cli.prepare_resalt(res))
'''
message = f_presence()
jmessage = json.dumps(message)
bjmessage = jmessage.encode('utf-8')
s = socket.socket(family = socket.AF_INET, type = socket.SOCK_STREAM, proto = 0)
s.connect(('localhost', 7777))
s.send(bjmessage)
while 1:
result = s.recv(1024)
result = result.decode('utf-8')
result = json.loads(result)
print("Полученый ответ: {}".format(result))
s.close()
'''
|
[
"danilal@mail.ru"
] |
danilal@mail.ru
|
5e2be6a66278427ebc2e1b6afcf0da997df8679c
|
2a435bb6ecc2b3d2df0bca62aa990312b313ff8d
|
/pointer/urls.py
|
c6f3cd054e4c7e31dc6a8cea3c5bf4f5e6dd8f5f
|
[] |
no_license
|
singlasahil221/My-CGPA
|
331809e2343989556c6b208fb3414f9039551afd
|
c651956de4973be1057f3f15dd04a4334a40ff66
|
refs/heads/master
| 2022-12-12T14:55:36.755727
| 2018-02-16T05:40:27
| 2018-02-16T05:40:27
| 120,011,982
| 0
| 1
| null | 2022-05-25T00:25:35
| 2018-02-02T17:42:20
|
Python
|
UTF-8
|
Python
| false
| false
| 173
|
py
|
from django.conf.urls import url
from django.contrib import admin
from calc import views
urlpatterns = [
url(r'^admin/', admin.site.urls),
url(r'^$', views.home),
]
|
[
"singlasahil221@gmail.com"
] |
singlasahil221@gmail.com
|
da8bf2b9c61541005f63641740d9613f6612a029
|
77afbf0a99d958fe497567c5fb03074e66cfa3d5
|
/batch/weather.py
|
680003d60fadbfbc20de78db5106524d3b41f93c
|
[] |
no_license
|
tmkokumura/smarthouse
|
c402a2e0eca727734707f6849c54983da168e781
|
b3b3b6d7d1cbed50fc7a3dc7becbbbd99e6108d3
|
refs/heads/master
| 2020-03-30T07:32:35.331574
| 2018-10-31T12:40:49
| 2018-10-31T12:40:49
| 150,947,502
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,734
|
py
|
# -*- coding: utf-8 -*-
from datetime import datetime
import time
import json
import requests
import logging
import os
import sys
sys.path.append(os.path.join(os.path.dirname(__file__), '..'))
from util import db_utils
log_fmt = '%(asctime)s %(levelname)s %(name)s :%(message)s'
logging.basicConfig(level=logging.DEBUG, format=log_fmt)
def get_insert_params(data_dict):
main_dict = data_dict['main']
wind_dict = data_dict['wind']
weather_dict = data_dict['weather'][0]
city = data_dict['name']
dt = datetime.fromtimestamp(data_dict['dt']) # JST
temp = main_dict['temp'] # Celsius
pressure = main_dict['pressure'] # hPa
humidity = main_dict['humidity'] # %
wind_speed = wind_dict['speed'] # m/s
wind_deg = wind_dict.get('deg', None) # degree
description = weather_dict['main']
sub_description = weather_dict['description']
params = (city, dt, temp, pressure, humidity, wind_speed, wind_deg, description, sub_description)
return params
def get_select_params(data_dict):
city = data_dict['name']
dt = datetime.fromtimestamp(data_dict['dt']) # JST
params = (city, dt)
return params
def request():
url = 'http://api.openweathermap.org/data/2.5/weather'
url_params = {'q': 'Tokyo', 'units': 'metric', 'APPID': 'e7fbfe9a2c96e5ff6f3924c7056a441e'}
res = requests.get(url, params=url_params)
return res.status_code, json.loads(res.text)
def insert_weather(res_dict_):
sql = 'INSERT INTO weather (city, dt, temp, pressure, humidity, wind_speed, wind_deg, \
description, sub_description) VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?)'
sql_params = get_insert_params(res_dict_)
logging.debug(sql_params)
db_utils.insert(sql, params=sql_params)
def select_weather(res_dict_):
sql = 'SELECT COUNT(1) FROM weather WHERE city = ? AND dt = ?;'
sql_params = get_select_params(res_dict_)
logging.debug(sql_params)
return db_utils.select(sql, params=sql_params)[0][0]
if __name__ == '__main__':
logging.info('Start [weather.py]')
while True:
logging.info('execute api')
status_code, res_dict = request()
logging.info('raw response body: {}'.format(res_dict))
if status_code == 200:
logging.info('execute select')
count = select_weather(res_dict)
logging.info('count: {}'.format(count))
if count == 0:
logging.info('execute insert')
insert_weather(res_dict)
else:
logging.info('skip executing insert')
time.sleep(3600)
logging.info('End [weather.py]')
|
[
"tmkokumura@gmail.com"
] |
tmkokumura@gmail.com
|
e131f4ceced2b0d9f4007477785d44916f46a583
|
50272366e3d57aeb0cfc91a69dbd080b234647bd
|
/Users/views.py
|
7946a226eaf36c74bd1a8c9562b8712a89a42dba
|
[] |
no_license
|
Saumyaa27/Disease-prediction-and-patient-management-webapp
|
fb0fa5f9f77f968092c1df06d5abfcbaccadcff9
|
ec08ece69a6d085a801316a9ec220a472aef02fb
|
refs/heads/master
| 2023-01-03T07:48:28.244851
| 2020-10-27T12:23:31
| 2020-10-27T12:23:31
| 307,931,319
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 15,981
|
py
|
from django.http import HttpResponse, HttpResponseRedirect
from django.shortcuts import render
from django.urls import reverse
from django.contrib.auth import authenticate, login, logout
from django.db import IntegrityError
from django.contrib.auth.decorators import login_required
from .models import User,Patient,Doctor,Reports,Treatment
from .forms import FileForm , send_to_doc_Form,Register_Doc,Register_Patient, LoginUserForm, RegisterUserForm, Forgot_email_form,Forgot_Password_Form, Prescription
from .utils import send_email
from django.contrib.sites.shortcuts import get_current_site
from django.utils.encoding import force_bytes, force_text
from django.utils.http import urlsafe_base64_encode, urlsafe_base64_decode
from .token import account_activation_token
from .decorators import patient_required, doctor_required
from django.views.decorators.http import require_http_methods
# Create your views here.
def Change_Password(request):
if request.method == "POST":
form = Forgot_Password_Form(request.POST)
if not form.is_valid() or form.data.get('password1') != form.data.get('password2'):
return render(request, "Users/forgot.html",{
"message" : "Change Passsword",
"form" : form,
"name" : "Change Password",
"error" : "Passwords Should Match"
})
else:
request.user.set_password(form.data.get('password1'))
request.user.save()
# return HttpResponseRedirect(reverse("login"))
return render(request, "Users/confirmation.html",{
"message" : "Password Changed Succesfully. Now you can login your account."
})
form = Forgot_Password_Form()
return render(request, "Users/forgot.html",{
"message" : "Change Passsword",
"form" : form,
"name" : "Change Password",
})
@login_required
def Edit_profile(request):
message = None
if request.method == "POST":
if request.user.is_patient:
form = Register_Patient(data=request.POST,instance=request.user.Patient)
form.save()
else:
form = Register_Doc(data=request.POST,instance=request.user.Doctor)
form.save()
message = "Profile Updated Succesfully"
if request.user.is_patient:
form = Register_Patient(instance=request.user.Patient)
else:
form = Register_Doc(instance=request.user.Doctor)
return render(request,"Users/Edit.html",{
"form" : form,
"message" : message
})
@login_required
@doctor_required
def view_active_treatments(request):
Treatments = Treatment.objects.filter(Doctor=request.user.Doctor)
t = []
for tr in Treatments:
if tr.is_active:
t.append(tr)
return render(request, 'Users/ActiveTreat.html',{
'Treatments' : t,
})
@login_required
@doctor_required
def view_new_treatments(request):
Treatments = Treatment.objects.filter(Doctor=request.user.Doctor)
t = []
for tr in Treatments:
if tr.is_new:
t.append(tr)
return render(request, 'Users/NewTreat.html',{
'Treatments' : t
})
@login_required
def Treats(request,nums):
Treat = Treatment.objects.get(pk=nums)
if request.user.is_doctor:
reports = request.user.Doctor.Reports.all()
if Treat.Doctor != request.user.Doctor or Treat.is_completed or Treat.is_new:
return HttpResponseRedirect(reverse("index"))
form = Prescription(instance=Treat)
return render(request, 'Users/Treatment.html',{
'Treatment' : Treat,
'files' : reports,
'presc' : form
})
else:
reports = Reports.objects.filter(Patient=request.user.Patient)
if Treat.Patient != request.user.Patient or Treat.is_new:
return HttpResponseRedirect(reverse("index"))
return render(request, 'Users/Treatment.html',{
'Treatment' : Treat,
'files' : reports
})
@login_required()
def delete_Treat(request,nums):
t = Treatment.objects.get(pk=nums)
print(nums)
if t.Patient != request.user.Patient:
return HttpResponseRedirect(reverse("View_Treatment"))
t.delete()
return HttpResponseRedirect(reverse("View_Treatment"))
@login_required()
def Complete_Treat(request,nums):
if request.method == "POST":
t = Treatment.objects.get(pk=nums)
print(nums)
if t.Doctor != request.user.Doctor:
pass
else:
t.is_completed = True
t.is_active = False
t.save()
return HttpResponseRedirect(reverse("ActiveTreat"))
@login_required()
def not_new(request,nums):
if request.method == "POST":
t = Treatment.objects.get(pk=nums)
print(nums)
if t.Doctor != request.user.Doctor:
pass
else:
t.is_new = False
if "Accept" in request.POST:
t.is_active = True
t.save()
return HttpResponseRedirect(reverse("NewTreat"))
@login_required()
@patient_required
def View_Treatment(request):
Treatments = Treatment.objects.filter(Patient=request.user.Patient)
active = []
new= []
rejected = []
completed = []
for t in Treatments:
if t.is_active:
active.append(t)
elif t.is_new:
new.append(t)
elif t.is_completed:
completed.append(t)
else:
rejected.append(t)
return render(request, 'Users/Treat.html',{
'active' : active,
'new' : new,
'rejected' : rejected,
"completed" : completed
})
@login_required()
@patient_required
@require_http_methods(["POST"])
def send(request,nums):
if request.method == "POST":
files = Reports.objects.get(pk=nums)
if files.Patient != request.user.Patient:
return HttpResponseRedirect(reverse("index"))
docs = request.POST.getlist(f'file_{nums}')
for id in docs:
if all(int(id) != doc.id for doc in files.Doctors.all()):
d = Doctor.objects.get(pk=id)
files.Doctors.add(d)
for doc in files.Doctors.all():
if str(doc.id) not in docs:
d = Doctor.objects.get(pk=doc.id)
files.Doctors.remove(d)
return HttpResponseRedirect(reverse("reports"))
@login_required()
@patient_required
def showfile(request):
# lastfile = request.user.Patient.Reports
form = FileForm(request.POST or None, request.FILES or None)
if form.is_valid():
form.save(request.user) #replace by patient
lastfile= Reports.objects.filter(Patient=request.user.Patient)
send_form = send_to_doc_Form(request.user.Patient)
# treat = Treatment.objects.filter(Patient=request.user.Patient)
# send_form.fields['Doctors'].queryset = (doc.Doctor for doc in treat )
context = None
if lastfile:
context= {
'form': form,
'lastfile' : lastfile,
'Send' : send_form
}
if not context:
context = {
'form': form,
'Send' : send_form
}
return render(request, 'Users/files.html', context)
def rform(request,num):
if(num == 1):
form = Register_Patient()
else:
form = Register_Doc()
return render(request, 'Users/form.html', {
"form" : form
})
def index(request):
return render(request, "Users/index.html",)
def email_forgot(request):
if request.method == "POST":
form = Forgot_email_form(request.POST)
email = form.data.get("email")
print(email)
u = User.objects.filter(email=email).first()
print("here",u)
if u is not None :
current_site = get_current_site(request)
send_email(current_site,u,mess="reset your Password",link="Forgot",subj = "Reset Password")
logout(request)
return render(request, "Users/confirmation.html",{
"message" : "Change you password by email sent ",
"u" : u,
})
else:
return render(request, "Users/forgot.html",{
"message" : "Forgot Password",
"form" : form,
"name" : "Send Email",
"error" : "Email Doesnot Exists"
})
form = Forgot_email_form()
return render(request, "Users/forgot.html",{
"message" : "Forgot Password",
"form" : form,
"name" : "Send Email"
})
def Forgot(request, uidb64, token):
try:
uid = force_text(urlsafe_base64_decode(uidb64))
user = User.objects.get(pk=uid)
except(TypeError, ValueError, OverflowError, user.DoesNotExist):
user = None
if user is not None and account_activation_token.check_token(user, token):
if request.method == "POST":
form = Forgot_Password_Form(request.POST)
if not form.is_valid() or form.data.get('password1') != form.data.get('password2'):
return render(request, "Users/forgot.html",{
"message" : "Change Passsword",
"form" : form,
"name" : "Change Password",
"error" : "Password Should Match"
})
else:
user.set_password(form.data.get('password1'))
user.save()
return HttpResponseRedirect(reverse("login"))
else:
form = Forgot_Password_Form()
return render(request, "Users/forgot.html",{
"message" : "Change Passsword",
"form" : form,
"name" : "Change Password",
})
else:
return render(request, "Users/confirmation.html",{
"message" : "Link is invalid!"
})
def login_view(request):
if request.method == "POST":
# Attempt to sign user in
log = LoginUserForm(request.POST)
email = log.data.get("email")
password = log.data.get("password")
user = authenticate(request, email=email, password=password)
# Check if authentication successful
if user is not None:
if not user.is_active:
return HttpResponse(f'Please confirm your email address to complete the registration')
login(request, user)
link = request.POST["next"]
if link != "None":
return HttpResponseRedirect(link)
return HttpResponseRedirect(reverse("index"))
else:
return render(request, "Users/login.html", {
"message": "Invalid username and/or password.",
"next" : request.POST["next"],
"login" : log
})
else:
log = LoginUserForm()
if "next" in request.GET:
url = request.GET["next"]
else:
url = None
return render(request, "Users/login.html",{
"next" : url,
"login" : log,
})
def logout_view(request):
logout(request)
return HttpResponseRedirect(reverse("index"))
def reg(request):
reg = RegisterUserForm()
form = Register_Patient()
return render(request, "Users/registerDoctor.html",{
"register" : reg,
"form" : form,
})
def register(request):
if request.method == "POST":
reg = RegisterUserForm(request.POST)
email = reg.data.get("email")
form = Register_Patient(request.POST)
# Ensure password matches confirmation
password = reg.data.get("password1")
confirmation = reg.data.get("password2")
if not reg.is_valid() or password != confirmation:
return render(request, "Users/registerDoctor.html", {
"message": "Passwords must match.",
"form" : form,
"register" : reg
})
if not form.is_valid():
return render(request, "Users/registerDoctor.html",{
"form" : form,
"register" : reg,
})
# Attempt to create new user
try:
user = User.objects.create_user(email, password,is_active = True,is_patient = True) ### change is active to false
user.save()
p = form.save(commit=False)
p.user = user
p.save()
current_site = get_current_site(request)
send_email(current_site,user,p.Name)
return render(request, "Users/confirmation.html",{
"message" : "Confirm your email",
"u" : user,
})
except IntegrityError:
return render(request, "Users/registerDoctor.html", {
"message": "Username already taken.",
"form" : form,
"register" : reg
})
login(request, user)
return HttpResponseRedirect(reverse("index"))
else:
return HttpResponseRedirect(reverse("index"))
def register_Doctor(request):
if request.method == "POST":
form = Register_Doc(request.POST)
reg = RegisterUserForm(request.POST)
if not reg.is_valid():
return render(request,"Users/registerDoctor.html",{
"form" : form,
"d" : True,
"register" : reg
})
email = reg.data.get("email")
# Ensure password matches confirmation
password = reg.data.get("password1")
confirmation = reg.data.get("password2")
if password != confirmation:
return render(request, "Users/registerDoctor.html", {
"message": "Passwords must match.",
"form" : form,
"d" : True,
"register" : reg
})
if not form.is_valid():
return render(request,"Users/registerDoctor.html",{
"form" : form,
"d" : True,
"register" : reg
})
# Attempt to create new user
try:
user = User.objects.create_user(email, password,is_active = True,is_doctor = True) ### change is active to false
user.save()
d = form.save(commit=False)
d.user = user
d.save()
current_site = get_current_site(request)
send_email(current_site,user,d.Name)
return render(request, "Users/confirmation.html",{
"message" : "Confirm your email",
"u" : user,
})
except IntegrityError:
return render(request, "Users/registerDoctor.html", {
"message": "Username already taken.",
"form" : form,
"d" : True,
"register" : reg
})
login(request, user)
return HttpResponseRedirect(reverse("index"))
return HttpResponseRedirect(reverse("index"))
def activate(request, uidb64, token):
try:
uid = force_text(urlsafe_base64_decode(uidb64))
user = User.objects.get(pk=uid)
except(TypeError, ValueError, OverflowError, user.DoesNotExist):
user = None
if user is not None and account_activation_token.check_token(user, token):
user.is_active = True
user.save()
login(request, user)
# return redirect('home')
return render(request, "Users/confirmation.html",{
"message" : "Thank you for your email confirmation. Now you can login your account."
})
else:
return render(request, "Users/confirmation.html",{
"message" : "Activation link is invalid!"
})
|
[
"mahajanparth19@gmail.com"
] |
mahajanparth19@gmail.com
|
49888ff53e9d215f112580dc86b077b03882bb30
|
68bde65c828e73d33f987ddd7d8c5d2712e54d33
|
/python-access-web-data/week-6/hidden.py
|
66feb56430f42b737efdcba2814ef8ad583fa32f
|
[] |
no_license
|
champi-dev/p-for-everyone
|
5194b39e266ef052ab2af8d73b7ce3f536814b92
|
1a38b6fb012ddcd186d5271463137719b97431d8
|
refs/heads/master
| 2020-12-06T08:21:05.960795
| 2020-02-08T19:57:47
| 2020-02-08T19:57:47
| 232,405,836
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 182
|
py
|
{
"consumer_key": "h7Lu...Ng",
"consumer_secret": "dNKenAC3New...mmn7Q",
"token_key": "10185562-eibxCp9n2...P4GEQQOSGI",
"token_secret": "H0ycCFemmC4wyf1...qoIpBo"
}
|
[
"dsarmiento@lean-tech.io"
] |
dsarmiento@lean-tech.io
|
565df863bafd4d071e2abce34fb7d1f3ab74c0b6
|
b2b2ce788e8400090a238422d03cb39e806966f7
|
/leadmanager/leadmanager/settings.py
|
aacb3fd6f3fadbf88f7c1a02d1dce9cfa15d8501
|
[] |
no_license
|
shubham1811/hireScript
|
b638f2793b0d8004d0f18d47918d82e740ac26f4
|
018f361b8d883c9333cf2a95203ece77ee9f9c23
|
refs/heads/master
| 2020-04-27T05:48:51.835073
| 2019-03-06T07:38:55
| 2019-03-06T07:38:55
| 174,091,070
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,153
|
py
|
"""
Django settings for leadmanager project.
Generated by 'django-admin startproject' using Django 2.1.5.
For more information on this file, see
https://docs.djangoproject.com/en/2.1/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/2.1/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/2.1/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'd=*a67l%fc@!l%tjr=rvwy4@ba^$&tjq1sjbmw@pdvb$z=r#j!'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'leads',
'rest_framework',
'frontend'
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'leadmanager.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'leadmanager.wsgi.application'
# Database
# https://docs.djangoproject.com/en/2.1/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/2.1/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/2.1/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/2.1/howto/static-files/
STATIC_URL = '/static/'
|
[
"shubham.h.kumar123@gmail.com"
] |
shubham.h.kumar123@gmail.com
|
279f22bf4622f6f3548b87e1904bd5ff49efb3c1
|
3e7bc64d26f60db3755abed956598ff6279dfc90
|
/Utilities/slidesPlot3D_test.py
|
64e7d3e30f452f305bbcec31b55e3e84dee6b1cd
|
[] |
no_license
|
medford-group/surrogate_functionals
|
88fcf1be3fc4e5b1f2a4a8c5fd736b2627f47973
|
a4382a4cf0ed4318bcf885f51331aabc9bcc6afc
|
refs/heads/master
| 2020-05-16T18:51:29.578192
| 2017-05-13T03:06:56
| 2017-05-13T03:06:56
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 7,912
|
py
|
# -*- coding: utf-8 -*-
"""
Created on Mon Feb 27 16:19:26 2017
@author: ray
"""
"""
slice3.py - plot 3D data on a uniform tensor-product grid as a set of
three adjustable xy, yz, and xz plots
Copyright (c) 2013 Greg von Winckel
All rights reserved.
Permission is hereby granted, free of charge, to any person obtaining
a copy of this software and associated documentation files (the
"Software"), to deal in the Software without restriction, including
without limitation the rights to use, copy, modify, merge, publish,
distribute, sublicense, and/or sell copies of the Software, and to
permit persons to whom the Software is furnished to do so, subject to
the following conditions:
The above copyright notice and this permission notice shall be
included in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
Created on Wed Dec 4 11:24:14 MST 2013
"""
import numpy as np
import matplotlib.pyplot as plt
from matplotlib.widgets import Slider
from matplotlib import gridspec
def meshgrid3(x,y,z):
""" Create a three-dimensional meshgrid """
nx = len(x)
ny = len(y)
nz = len(z)
xx = np.swapaxes(np.reshape(np.tile(x,(1,ny,nz)),(nz,ny,nx)),0,2)
yy = np.swapaxes(np.reshape(np.tile(y,(nx,1,nz)),(nx,nz,ny)),1,2)
zz = np.tile(z,(nx,ny,1))
return xx,yy,zz
class DiscreteSlider(Slider):
"""A matplotlib slider widget with discrete steps.
Created by Joe Kington and submitted to StackOverflow on Dec 1 2012
http://stackoverflow.com/questions/13656387/can-i-make-matplotlib-sliders-more-discrete
"""
def __init__(self, *args, **kwargs):
"""Identical to Slider.__init__, except for the "increment" kwarg.
"increment" specifies the step size that the slider will be discritized
to."""
self.inc = kwargs.pop('increment', 1)
Slider.__init__(self, *args, **kwargs)
def set_val(self, val):
xy = self.poly.xy
xy[2] = val, 1
xy[3] = val, 0
self.poly.xy = xy
# Suppress slider label
self.valtext.set_text('')
if self.drawon:
self.ax.figure.canvas.draw()
self.val = val
if not self.eventson:
return
for cid, func in self.observers.iteritems():
func(val)
class slice3(object):
def __init__(self,xx,yy,zz,u,PCA_x,PCA_y,PCA_u):
'''
u: PCA analysis color (R,G,B)
'''
self.x = xx[:,0,0]
self.y = yy[0,:,0]
self.z = zz[0,0,:]
self.data = u
gs = gridspec.GridSpec(2, 3)
# ax0 = plt.subplot(gs[0])
# ax1 = plt.subplot(gs[1])
self.fig = plt.figure()#1,(20,7))
self.ax0 = self.fig.add_subplot(gs[1])
self.ax1 = self.fig.add_subplot(gs[3])
self.ax2 = self.fig.add_subplot(gs[4])
self.ax3 = self.fig.add_subplot(gs[5])
# self.ax0 = self.fig.add_subplot(1,1,1)
# self.ax1 = self.fig.add_subplot(2,3,1)#,aspect='equal')
# self.ax2 = self.fig.add_subplot(2,3,2)#,aspect='equal')
# self.ax3 = self.fig.add_subplot(2,3,3)#,aspect='equal')
self.xplot_zline = self.ax1.axvline(color='m',linestyle='--',lw=2)
self.xplot_zline.set_xdata(self.z[0])
self.xplot_yline = self.ax1.axhline(color='m',linestyle='--',lw=2)
self.xplot_yline.set_ydata(self.y[0])
self.yplot_xline = self.ax2.axhline(color='m',linestyle='--',lw=2)
self.yplot_xline.set_ydata(self.x[0])
self.yplot_zline = self.ax2.axvline(color='m',linestyle='--',lw=2)
self.yplot_zline.set_xdata(self.z[0])
self.zplot_xline = self.ax3.axvline(color='m',linestyle='--',lw=2)
self.zplot_xline.set_xdata(self.x[0])
self.zplot_yline = self.ax3.axhline(color='m',linestyle='--',lw=2)
self.zplot_yline.set_ydata(self.y[0])
self.PCAscatter = self.ax0.scatter(PCA_x, PCA_y, color = PCA_u)#, alpha=0.5)
self.xslice = self.ax1.imshow(u[0,:,:,:],extent=(self.z[0],self.z[-1],self.y[0],self.y[-1]))
self.yslice = self.ax2.imshow(u[:,0,:,:],extent=(self.z[0],self.z[-1],self.x[0],self.x[-1]))
self.zslice = self.ax3.imshow(u[:,:,0,:],extent=(self.x[0],self.x[-1],self.y[0],self.y[-1]))
# Create and initialize x-slider
self.sliderax1 = self.fig.add_axes([0.125,0.08,0.225,0.03])
self.sliderx = DiscreteSlider(self.sliderax1,'',0,len(self.x)-1,increment=1,valinit=0)
self.sliderx.on_changed(self.update_x)
self.sliderx.set_val(0)
# Create and initialize y-slider
self.sliderax2 = self.fig.add_axes([0.4,0.08,0.225,0.03])
self.slidery = DiscreteSlider(self.sliderax2,'',0,len(self.y)-1,increment=1,valinit=0)
self.slidery.on_changed(self.update_y)
self.slidery.set_val(0)
# Create and initialize z-slider
self.sliderax3 = self.fig.add_axes([0.675,0.08,0.225,0.03])
self.sliderz = DiscreteSlider(self.sliderax3,'',0,len(self.z)-1,increment=1,valinit=0)
self.sliderz.on_changed(self.update_z)
self.sliderz.set_val(0)
z0,z1 = self.ax1.get_xlim()
x0,x1 = self.ax2.get_ylim()
y0,y1 = self.ax1.get_ylim()
self.ax1.set_aspect((z1-z0)/(y1-y0))
self.ax2.set_aspect((z1-z0)/(x1-x0))
self.ax3.set_aspect((x1-x0)/(y1-y0))
def xlabel(self,*args,**kwargs):
self.ax2.set_ylabel(*args,**kwargs)
self.ax3.set_xlabel(*args,**kwargs)
def ylabel(self,*args,**kwargs):
self.ax1.set_ylabel(*args,**kwargs)
self.ax3.set_ylabel(*args,**kwargs)
def zlabel(self,*args,**kwargs):
self.ax1.set_xlabel(*args,**kwargs)
self.ax2.set_xlabel(*args,**kwargs)
def update_x(self,value):
self.xslice.set_data(self.data[value,:,:])
self.yplot_xline.set_ydata(self.x[value])
self.zplot_xline.set_xdata(self.x[value])
def update_y(self,value):
self.yslice.set_data(self.data[:,value,:])
self.xplot_yline.set_ydata(self.y[value])
self.zplot_yline.set_ydata(self.y[value])
def update_z(self,value):
self.zslice.set_data(self.data[:,:,value])
self.xplot_zline.set_xdata(self.z[value])
self.yplot_zline.set_xdata(self.z[value])
def show(self):
plt.show()
#if __name__ == '__main__':
#
# # Number of x-grid points
# nx = 100
#
# # Number of
# ny = 100
# nz = 200
#
# x = np.linspace(-4,4,nx)
# y = np.linspace(-4,4,ny)
# z = np.linspace(0,8,nz)
#
# xx,yy,zz = meshgrid3(x,y,z)
#
## result =
# # Display three cross sections of a Gaussian Beam/Paraxial wave
# u = np.real(np.exp(-(2*xx**2+yy**2)/(.2+2j*zz))/np.sqrt(.2+2j*zz))
# v = np.real(np.exp(-(3*xx**2+yy**2)/(.3+2j*zz))/np.sqrt(.5+2j*zz))
# w = np.real(np.exp(-(4*xx**2+yy**2)/(.3+2j*zz))/np.sqrt(.6+2j*zz))
# result = np.ones_like(u).tolist()
# for index,x in np.ndenumerate(u):
# result[index[0]][index[1]][index[2]] = (u[index[0]][index[1]][index[2]], v[index[0]][index[1]][index[2]], w[index[0]][index[1]][index[2]])
## print u.shape
## print u
# print np.asarray(result).shape
# s3 = slice3(xx,yy,zz,np.asarray(result))
# s3.xlabel('x',fontsize=18)
# s3.ylabel('y',fontsize=18)
# s3.zlabel('z',fontsize=18)
#
#
# s3.show()
|
[
"xlei38@gatech.edu"
] |
xlei38@gatech.edu
|
b5d120899fc051df048d8bcf9537e1aa5cfe2f87
|
aa69f3a171e44eb245dd18739c2691b35c31eee7
|
/sort.py
|
a8a478a78633497ed9153bd8cca4b59ebb904245
|
[] |
no_license
|
marjoriehoegen/sort_log
|
094ed8b3bc7cfa54ee4bf67b9f6d235be70f899b
|
29238c90089113b60e05da90a199c7544057d50f
|
refs/heads/master
| 2020-03-29T20:58:16.170094
| 2018-09-26T00:00:30
| 2018-09-26T00:00:30
| 150,341,634
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 924
|
py
|
import os
import glob
from heapq import merge
from itertools import count, islice
from contextlib import ExitStack
filenames = []
# open file
with open('huge_dummy.log') as input_file:
for file_number in count(1):
# read in next 20k lines and sort them
sorted_file = sorted(islice(input_file, 20000))
if not sorted_file:
# when reached the end of input
break
# create files
filename = 'filename_{}.chk'.format(file_number)
# append new file and write
filenames.append(filename)
with open(filename, 'w') as file:
file.writelines(sorted_file)
# merge all files
with ExitStack() as stack, open('output.txt', 'w') as output_file:
files = [stack.enter_context(open(file)) for file in filenames]
output_file.writelines(merge(*files))
# remove temporary files
for f in glob.glob("filename_*.chk"):
os.remove(f)
|
[
"marjoriehoegen@gmail.com"
] |
marjoriehoegen@gmail.com
|
cb1794051e60b1efd05fe551d9a82d1b0d4a48a6
|
3a90b0336d257d78108438196716d5a74569fa71
|
/api/meeting_room.py
|
1b38a4be32475d8a6fb3150dac3ab18ca62a8b39
|
[] |
no_license
|
jiagejiayou/wwork_api_interface_test
|
bff0bcb264f54d0f044322363653b6b51a288e83
|
412f0d3bc83fb732c03de9d4c411c9f0760b2034
|
refs/heads/master
| 2023-01-11T21:51:35.896528
| 2020-11-11T03:58:53
| 2020-11-11T03:58:53
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,478
|
py
|
# @Author : TongTong
from api.base_api import BaseApi
from api.wework import Wework
from common.config import cf
class MeetingRoom(BaseApi):
"""
会议室的api类
test_token: token值
api_path: yml_api的相对路径
"""
# 只是拿来测试用,可以不存在在这里
test_token = Wework().get_token(cf.get_key("wwork", "meeting_room_secret"))
# 简化send_api_data方法路径引用,不用写那么多路径了
api_path = "data/meeting_room/meeting_room_api.yml"
# 增加会议室
def add_meeting_room(self, token, name, capacity, city, building, floor, equipment):
# Template模板二次修改的值,p_data
p_data = {"ip": self.ip, "token": token, "name": name, "capacity": capacity, "city": city, "building": building,
"floor": floor, "equipment": equipment}
# 获取响应,进行了多次封装
res = self.send_api_data(self.api_path, p_data, "add")
return res
# 编辑会议室
def edit_meeting_room(self, token, meetingroom_id, name, capacity, city, building, floor, equipment):
p_data = {"ip": self.ip, "token": token, "meetingroom_id": meetingroom_id, "name": name, "capacity": capacity,
"city": city, "building": building, "floor": floor, "equipment": equipment}
res = self.send_api_data(self.api_path, p_data, "edit")
return res
# 删除会议室
def delete_meeting_room(self, token, meetingroom_id):
p_data = {"ip": self.ip, "token": token, "meetingroom_id": meetingroom_id}
res = self.send_api_data(self.api_path, p_data, "delete")
return res
# 获取会议室
def get_meeting_room(self, token, city, building, floor, equipment):
p_data = {"ip": self.ip, "token": token, "city": city, "building": building, "floor": floor,
"equipment": equipment}
res = self.send_api_data(self.api_path, p_data, "get")
return res
if __name__ == "__main__":
a = MeetingRoom()
print(a.get_meeting_room(a.test_token, None, None, None, None))
# print(a.test_token)
# print(a.add_meeting_room(a.test_token,"a",20,"c","d","e",[1,2]))
# print(a.edit_meeting_room(a.test_token,1,None,None,None,None,None,None))
# print(a.delete_meeting_room(a.test_token,1))
# print(a.add_meeting_room(a.test_token,"ab",11,None,None,None,None))
# print(a.load_yaml("data/meeting_room/1meeting_room_api.yml")["add"])
|
[
"376230095@qq.com"
] |
376230095@qq.com
|
a61fccbe20617a3299561785e06bcc602feb0a7e
|
6e6d19c33c258c00a4e7dcf3a73f0008478d5d4a
|
/year.py
|
d27ef171c35d70fd621a0608f0efc44821dc8cde
|
[] |
no_license
|
barrven/python-expenseTracker
|
e037a99ad08902c91a6aaae9b8257c0a137239f9
|
344accc063a61e6b96aca401d8197f27a21a53d2
|
refs/heads/master
| 2022-08-20T05:50:17.641106
| 2022-08-14T00:00:29
| 2022-08-14T00:00:29
| 262,703,040
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 6,540
|
py
|
##########################################
# Barrington Venables #
# 101189284 #
# comp2152 assignment - Expense manager #
##########################################
class Year:
def __init__(self, yearNum, database):
self.yearNum = yearNum
self.database = database
self.months = database.getMonths(self.yearNum) # list of month objects
def getCatAvg(self, category):
count = 0
sum = 0
for month in self.months:
count += 1
sum += month.getCategory(category)
return sum / count
def addMonth(self, number, rent, groceries, utilities, transit, shopping, entertainment):
success = self.database.addMonthToDb( # addMonthToDb returns a boolean
self.yearNum,
number,
rent,
groceries,
utilities,
transit,
shopping,
entertainment
)
if success:
self.months = self.database.getMonths(self.yearNum)
return True
return False
def addMonth_flex(self, month_num, categories_list):
success = self.database.addMonthToDb_flex(self.yearNum, month_num, categories_list)
if success:
self.months = self.database.getMonths(self.yearNum)
return True
return False
def getMonthByNumber(self, monthNum):
for month in self.months:
if month.number == monthNum:
return month
return None
def getMonthReport(self, monthNum):
m = self.getMonthByNumber(monthNum)
if m is None:
return 'No month data found'
# setup the items needed to compile the report
s = ''
catNames = ['rent', 'groceries', 'utilities', 'transit', 'shopping', 'entertainment']
catTitles = ['Rent: ', 'Groceries: ', 'Utilities: ', 'Transit: ', 'Shopping: ', 'Entertainment: ']
# report header
s += '\n# ' + m.getCategory('monthName') + ' Expense Report #\n'
s += pad('|Category ', 16) + pad('|Amount ', 15) + pad('|Year Avg ', 15) + pad('|% of total ', 5) + '\n'
# get the data for each category: |title|amt|yrAvg|percentTtl|
for i in range(len(catNames)):
current = catNames[i]
s += pad(catTitles[i], 16, '.') # number arg is column width
s += pad('$' + customFormat(m.getCategory(current)), 15, '.') # expense amount
s += pad('$' + customFormat(self.getCatAvg(current)), 15, '.') # average
s += pad(customFormat(m.getCategoryPercent(current)), 5)
s += '\n'
total = m.getCategory('totalExpenses')
avgTotalForYear = self.getCatAvg('totalExpenses')
s += 'Total Expenses: $' + customFormat(total) + '\n'
if total < avgTotalForYear:
s += 'This month is below average for the year ($' + customFormat(avgTotalForYear) + ')'
elif total > avgTotalForYear:
s += 'This month is above average for the year ($' + customFormat(avgTotalForYear) + ')'
else:
s += 'This month is average for the year ($' + customFormat(avgTotalForYear) + ')'
return s
def getMonthData(self, monthNum):
m = self.getMonthByNumber(monthNum)
if m is None:
return [[]]
catNames = ['rent', 'groceries', 'utilities', 'transit', 'shopping', 'entertainment']
catTitles = ['Rent: ', 'Groceries: ', 'Utilities: ', 'Transit: ', 'Shopping: ', 'Entertainment: ']
data = []
for i in range(len(catNames)):
current = catNames[i]
data_row = [
catTitles[i], # number arg is column width
'$' + customFormat(m.getCategory(current)), # expense amount
'$' + customFormat(self.getCatAvg(current)), # average
customFormat(m.getCategoryPercent(current))
]
data.append(data_row)
return data
def getEmptyMonths(self):
month_nums = []
for month in self.months:
month_nums.append(month.number)
empty_months = []
for i in range(1, 13):
if i in month_nums:
continue
else:
empty_months.append(i)
return tuple(empty_months)
# returns a list of strings or list of integers
# based on types in the list passed to it.
def switchMonthStringsAndNums(self, stringOrNums):
strings = {
'January' : 1,
'February' : 2,
'March' : 3,
'April' : 4,
'May' : 5,
'June': 6,
'July' : 7,
'August' : 8,
'September' : 9,
'October' : 10,
'November' : 11,
'December' : 12
}
nums = {
'1' : 'January',
'2' : 'February',
'3' : 'March',
'4' : 'April',
'5' : 'May',
'6' : 'June',
'7' : 'July',
'8' : 'August',
'9' : 'September',
'10' : 'October',
'11' : 'November',
'12' : 'December'
}
output = []
# check if the list has strings or nums
# build list of corresponding strings or nums
for item in stringOrNums:
if type(item) is int:
output.append(nums[str(item)])
else:
output.append(strings[item])
return output
# returns an integer
def switchMonthStringToInt(self, monthString):
strings = {
'January' : 1,
'February' : 2,
'March' : 3,
'April' : 4,
'May' : 5,
'June': 6,
'July' : 7,
'August' : 8,
'September' : 9,
'October' : 10,
'November' : 11,
'December' : 12
}
return strings[monthString]
def getMonthNamesList(self):
monthNames = []
for month in self.months:
monthNames.append(month.monthName)
return monthNames
def getTotalMonthExpenses(self, monthNum):
total = self.getMonthByNumber(monthNum).getCategory('totalExpenses')
return total
# Static Functions #
def customFormat(amt):
return '{:0,.2f}'.format(float(amt))
def pad(string, width, padChar=' '):
return string.ljust(width, padChar)
|
[
"barrven@users.noreply.github.com"
] |
barrven@users.noreply.github.com
|
bb623e9fb3625ae7527176a57f20d61868e658e2
|
b464533745d09720752cb05dd9afc5670e1133fe
|
/Puzzle 15.py
|
a804284d939e33ae862476f5a73723bd76b175ee
|
[] |
no_license
|
Jgusbc/Board-Games
|
99d2a3486c9450557b04403b779268a28bc04e01
|
e362c3ec71facd9c77a09a03cd5c6d896906c162
|
refs/heads/main
| 2022-12-30T23:29:13.010234
| 2020-10-23T03:37:15
| 2020-10-23T03:37:15
| 300,472,692
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,767
|
py
|
"""
Autores del código
José Gustavo Buenaventura Carreón
César Armando Lara Liceaga
"""
import random
import math
import os
#Crear matriz aletoria.
def matriz_aleatoria():
nums = [1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,'']
random.shuffle(nums)
matriz = [[],[],[],[]]
temp = 0
for i in range(4):
for j in range(4):
matriz[i].append(nums[temp])
temp+= 1
return matriz
#Poner una matriz predeterminada.
def matriz_escogida():
nums=[]
while len(nums)!=16:
nums = input('Teclea los números separados por espacios (y el vació con un 0): ')
nums = nums.split()
dummy_nums = ['1','2','3','4','5','6','7','8','9','10','11','12','13','14','15','0']
temp1 = []
for i in nums:
if (i not in temp1 and i in dummy_nums):
temp1.append(i)
else:
print('Hay un número repetido o no disponible')
nums = temp1
for i in nums:
if i == '0':
nums[nums.index(i)] = ""
else:
nums[nums.index(i)] = int(i)
assert len(nums) == 16
matriz = [[],[],[],[]]
temp = 0
for i in range(4):
for j in range(4):
matriz[i].append(nums[temp])
temp+= 1
return matriz
#Enseñar el tablero.
def print_tablero(matriz):
for i in matriz:
for j in i:
print('{:5s}'.format(str(j)),end=' ')
print('\n')
#posibles respuests a elegir
respuestaNormal = [[1,2,3,4],[5,6,7,8],[9,10,11,12],[13,14,15,""]]
respuestaInversa = [["",15,14,13],[12,11,10,9],[8,7,6,5],[4,3,2,1]]
respuestaEspiral = [[1,2,3,4],[12,13,14,5],[11,15,"",6],[10,9,8,7]]
respuestaEspiralInversa = [[4,3,2,1],[5,14,13,12],[6,15,"",11],[7,8,9,10]]
respuestaVerticalNormal = [[1,5,9,13],[2,6,10,14],[3,7,11,15],[4,8,12,""]]
respuestaVerticalInversa = [["",12,8,4],[15,11,7,3],[14,10,6,2],[13,9,5,1]]
#Set de siglas de las respuestaEspiral
respuestas=['n','i','e','ei','vn','vi']
#Encontrar la casilla de un número dado.
def buscar_casillas(casilla,tablero):
for i in tablero:
i_index = tablero.index(i)
for j in i:
j_index= i.index(j)
if tablero[i_index][j_index] == casilla:
return (i_index,j_index)
#Distancia entre 2 casillas.
def distance(p0, p1):
return math.sqrt((p0[0] - p1[0])**2 + (p0[1] - p1[1])**2)
#Realizar el movimiento.
def movimiento(ficha,tablero):
posicion1= buscar_casillas(ficha, tablero)
posicion2= buscar_casillas('', tablero)
dist= distance(posicion1,posicion2)
if dist!=1:
print('No es un movimiento posible')
else:
tablero[posicion1[0]][posicion1[1]],tablero[posicion2[0]][posicion2[1]]=tablero[posicion2[0]][posicion2[1]],tablero[posicion1[0]][posicion1[1]]
return tablero
#main
def juego():
count = 0
modo = ''
tipo = ''
print('En caso de querer interrumpir el juego, ingrese 0.')
while modo != "a" or modo != "p":
modo= input('¿Quieres un tablero aleatorio(a) o uno predeterminado(p)? ')
if modo == "a":
matriz = matriz_aleatoria()
break
elif modo == "p":
matriz = matriz_escogida()
break
while tipo not in respuestas:
tipo = input('¿Cual tipo de juego quiere jugar? \nNormal (n) \nInverso (i) \nEspiral (e) \nEspiral Inverso (ei)\nVertical (v)\nVertical inverso(vi)\n')
print_tablero(matriz)
if tipo == 'n':
solucion= respuestaNormal
elif tipo == 'i':
solucion=respuestaInversa
elif tipo =='e':
solucion=respuestaEspiral
elif tipo == 'ei':
solucion=respuestaEspiralInversa
elif tipo == 'v':
solucion=respuestaVerticalNormal
elif tipo == 'vi':
solucion = respuestaVerticalInversa
while matriz != solucion:
ficha = int(input('Seleccione la tecla que quiere mover: '))
if ficha == 0:
print('Gracias por jugar')
break
else:
os.system('cls')
movimiento(ficha, matriz)
print_tablero(matriz)
count += 1
if matriz == solucion:
print_tablero(matriz)
print("Felicidades lo has resuelto en " + str(count) + " movimientos.")
juego()
"""
Caso de prueba
tablero=[[1,2,3,4],[5,6,7,8],[9,10,11,12],[13,14,'',15]]
intercambiar 15 con ''
función movimiento(ficha, tablero)
se encuentra la ficha con la función buscar_casillas
se intercambia
tablero=[[1,2,3,4],[5,6,7,8],[9,10,11,12],[13,14,15,'']]
tablero==soluciónote
se termina el juego
Función movimientos
Dos casos
Fichas adyacentes
Fichas no adyacentes
Todas las fichas adyacentes están a una distancia 1
Si la ficha no está a distancia 1, no es posible moverla
Solo intercambiar fichas si distancia=1
para evitar que el usuario de inputs no deseados, se usaron while loops hasta que el usuario de el input correcto
"""
|
[
"noreply@github.com"
] |
noreply@github.com
|
2c4021079c1f87e0901a6b63792636763cca4222
|
71fbc701cf090716b213b4025e0d96e73f452aa1
|
/thonny/plugins/micropython/bare_metal_backend.py
|
e2c05ea6a3bf2167e2930a0585828115101b1047
|
[
"MIT",
"LicenseRef-scancode-warranty-disclaimer"
] |
permissive
|
bosige/thonny
|
678f3ab05185dbead41b0a04c2db025834008fc9
|
78317670d1ec8e8cd39f4bb2e5c6a2927fedd7b3
|
refs/heads/master
| 2022-12-11T12:25:49.484079
| 2020-09-16T12:48:55
| 2020-09-16T12:48:55
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 42,084
|
py
|
import binascii
import datetime
import logging
import os
import queue
import re
import sys
import time
from _ast import Not
from textwrap import dedent, indent
from typing import BinaryIO, Callable, Optional, Tuple, Union
from thonny.backend import UploadDownloadMixin
from thonny.common import (
BackendEvent,
InlineResponse,
ToplevelResponse,
UserError,
execute_system_command,
serialize_message,
)
from thonny.misc_utils import find_volumes_by_name, sizeof_fmt
from thonny.plugins.micropython.backend import (
WAIT_OR_CRASH_TIMEOUT,
MicroPythonBackend,
ManagementError,
ReadOnlyFilesystemError,
_report_internal_error,
ends_overlap,
unix_dirname_basename,
Y2000_EPOCH_OFFSET,
)
from thonny.common import ConnectionFailedException
# See https://github.com/dhylands/rshell/blob/master/rshell/main.py
# for UART_BUFFER_SIZE vs USB_BUFFER_SIZE
# ampy uses 32 bytes: https://github.com/pycampers/ampy/blob/master/ampy/files.py
# I'm not worrying so much, because reader thread reads continuously
# and writer (SerialConnection) has it's own blocks and delays
BUFFER_SIZE = 512
BAUDRATE = 115200
ENCODING = "utf-8"
# Commands
RAW_MODE_CMD = b"\x01"
NORMAL_MODE_CMD = b"\x02"
INTERRUPT_CMD = b"\x03"
SOFT_REBOOT_CMD = b"\x04"
# Output tokens
VALUE_REPR_START = b"<repr>"
VALUE_REPR_END = b"</repr>"
STX = b"\x02"
EOT = b"\x04"
NORMAL_PROMPT = b">>> "
LF = b"\n"
OK = b"OK"
# first prompt when switching to raw mode (or after soft reboot in raw mode)
# Looks like it's not translatable in CP
# https://github.com/adafruit/circuitpython/blob/master/locale/circuitpython.pot
FIRST_RAW_PROMPT = b"raw REPL; CTRL-B to exit\r\n>"
# https://forum.micropython.org/viewtopic.php?f=12&t=7652&hilit=w600#p43640
W600_FIRST_RAW_PROMPT = b"raw REPL; CTRL-B to exit\r\r\n>"
FIRST_RAW_PROMPT_SUFFIX = b"\r\n>"
RAW_PROMPT = b">"
FALLBACK_BUILTIN_MODULES = [
"cmath",
"gc",
"math",
"sys",
"array",
# "binascii", # don't include it, as it may give false signal for reader/writer
"collections",
"errno",
"hashlib",
"heapq",
"io",
"json",
"os",
"re",
"select",
"socket",
"ssl",
"struct",
"time",
"zlib",
"_thread",
"btree",
"framebuf",
"machine",
"micropython",
"network",
"bluetooth",
"cryptolib",
"ctypes",
"pyb",
"esp",
"esp32",
]
logger = logging.getLogger("thonny.micropython.backend")
def debug(msg):
return
# print(msg, file=sys.stderr)
class BareMetalMicroPythonBackend(MicroPythonBackend, UploadDownloadMixin):
def __init__(self, connection, clean, args):
self._connection = connection
self._startup_time = time.time()
self._interrupt_suggestion_given = False
self._raw_prompt_ensured = False
MicroPythonBackend.__init__(self, clean, args)
def _get_custom_helpers(self):
return dedent(
"""
@classmethod
def getcwd(cls):
if hasattr(cls, "getcwd"):
return cls.os.getcwd()
else:
# micro:bit
return ""
@classmethod
def chdir(cls, x):
return cls.os.chdir(x)
@classmethod
def rmdir(cls, x):
return cls.os.rmdir(x)
"""
)
def _process_until_initial_prompt(self, clean):
if clean:
self._interrupt_to_raw_prompt()
else:
# Order first raw prompt to be output when the code is done.
# If the device is already at raw prompt then it gets repeated as first raw prompt.
# If it is at normal prompt then outputs first raw prompt
self._connection.write(RAW_MODE_CMD)
self._forward_output_until_active_prompt(self._send_output)
def _fetch_welcome_text(self) -> str:
self._connection.write(NORMAL_MODE_CMD)
self._raw_prompt_ensured = False
welcome_text = self._connection.read_until(NORMAL_PROMPT).strip(b"\r\n >")
if os.name != "nt":
welcome_text = welcome_text.replace(b"\r\n", b"\n")
return self._decode(welcome_text)
def _fetch_builtin_modules(self):
script = "help('modules')"
out, err = self._execute(script, capture_output=True)
if err or not out:
self._send_error_message(
"Could not query builtin modules. Code completion may not work properly."
)
return FALLBACK_BUILTIN_MODULES
modules_str_lines = out.strip().splitlines()
last_line = modules_str_lines[-1].strip()
if last_line.count(" ") > 0 and " " not in last_line and "\t" not in last_line:
# probably something like "plus any modules on the filesystem"
# (can be in different languages)
modules_str_lines = modules_str_lines[:-1]
modules_str = (
" ".join(modules_str_lines)
.replace("/__init__", "")
.replace("__main__", "")
.replace("/", ".")
)
return modules_str.split()
def _resolve_unknown_epoch(self) -> int:
if self._connected_to_circuitpython() or self._connected_to_pycom():
return 1970
else:
return 2000
def _sync_time(self):
"""Sets the time on the pyboard to match the time on the host."""
# RTC works on UTC
now = datetime.datetime.now(tz=datetime.timezone.utc).timetuple()
if self._connected_to_microbit():
return
elif self._connected_to_circuitpython():
specific_script = dedent(
"""
from rtc import RTC as __thonny_RTC
__thonny_RTC().datetime = {ts}
del __thonny_RTC
"""
).format(ts=tuple(now))
else:
specific_script = dedent(
"""
from machine import RTC as __thonny_RTC
try:
__thonny_RTC().datetime({datetime_ts})
except:
__thonny_RTC().init({init_ts})
del __thonny_RTC
"""
).format(
datetime_ts=(
now.tm_year,
now.tm_mon,
now.tm_mday,
now.tm_wday + 1,
now.tm_hour,
now.tm_min,
now.tm_sec,
0,
),
init_ts=tuple(now)[:6] + (0, 0),
)
script = (
dedent(
"""
try:
%s
__thonny_helper.print_mgmt_value(True)
except Exception as e:
__thonny_helper.print_mgmt_value(str(e))
"""
)
% indent(specific_script, " ")
)
val = self._evaluate(script)
if isinstance(val, str):
print("WARNING: Could not sync device's clock: " + val)
def _get_utc_timetuple_from_device(self) -> Union[tuple, str]:
if self._connected_to_microbit():
return "This device does not have a real-time clock"
elif self._connected_to_circuitpython():
specific_script = dedent(
"""
from rtc import RTC as __thonny_RTC
__thonny_helper.print_mgmt_value(tuple(__thonny_RTC().datetime)[:6])
del __thonny_RTC
"""
)
else:
specific_script = dedent(
"""
from machine import RTC as __thonny_RTC
try:
# now() on some devices also gives weekday, so prefer datetime
__thonny_temp = tuple(__thonny_RTC().datetime())
# remove weekday from index 3
__thonny_helper.print_mgmt_value(__thonny_temp[0:3] + __thonny_temp[4:7])
del __thonny_temp
except:
__thonny_helper.print_mgmt_value(tuple(__thonny_RTC().now())[:6])
del __thonny_RTC
"""
)
script = (
dedent(
"""
try:
%s
except Exception as e:
__thonny_helper.print_mgmt_value(str(e))
"""
)
% indent(specific_script, " ")
)
val = self._evaluate(script)
return val
def _get_actual_time_tuple_on_device(self):
script = dedent(
"""
try:
try:
from time import localtime as __thonny_localtime
__thonny_helper.print_mgmt_value(tuple(__thonny_localtime()))
del __thonny_localtime
except:
# some CP boards
from rtc import RTC as __thonny_RTC
__thonny_helper.print_mgmt_value(tuple(__thonny_RTC().datetime))
del __thonny_RTC
except Exception as e:
__thonny_helper.print_mgmt_value(str(e))
"""
)
return self._evaluate(script)
def _update_cwd(self):
if self._connected_to_microbit():
self._cwd = ""
else:
super()._update_cwd()
def _interrupt_to_raw_prompt(self):
# NB! Sometimes disconnecting and reconnecting (on macOS?)
# too quickly causes anomalies. See CalliopeMiniProxy for more details
discarded_bytes = b""
for delay in [0.05, 0.5, 0.1, 1.0, 3.0, 5.0]:
# Interrupt several times, because with some drivers first interrupts seem to vanish
if delay >= 1:
self._show_error(
"Could not enter REPL. Trying again with %d second waiting time..." % delay
)
self._connection.reset_output_buffer() # cancels previous writes
self._connection.write(INTERRUPT_CMD)
self._connection.write(RAW_MODE_CMD)
time.sleep(delay)
discarded_bytes += self._connection.read_all()
if discarded_bytes.endswith(FIRST_RAW_PROMPT) or discarded_bytes.endswith(
W600_FIRST_RAW_PROMPT
):
self._soft_reboot_after_interrupting_to_raw_prompt()
self._raw_prompt_ensured = True
break
else:
max_tail_length = 500
if len(discarded_bytes) > max_tail_length:
discarded_bytes_str = (
"[skipping %d bytes] ..." % (len(discarded_bytes) - max_tail_length)
) + repr(discarded_bytes[:-max_tail_length])
else:
discarded_bytes_str = repr(discarded_bytes)
self._show_error(
"Could not enter REPL. Giving up. Read bytes:\n"
+ discarded_bytes_str
+ "\n\nYour options:\n\n"
+ " - check connection properties;\n"
+ " - make sure the device has suitable firmware;\n"
+ " - make sure the device is not in bootloader mode;\n"
+ " - reset the device and try again;\n"
+ " - try other serial clients (Putty, TeraTerm, screen, ...);\n"
+ " - ask for help in Thonny's forum or issue tracker."
)
sys.exit()
def _soft_reboot_after_interrupting_to_raw_prompt(self):
self._connection.write(SOFT_REBOOT_CMD)
# CP runs code.py after soft-reboot even in raw repl, so I'll send some Ctrl-C to intervene
# # (they don't do anything in raw repl)
self._connection.write(INTERRUPT_CMD)
self._connection.write(INTERRUPT_CMD)
output = self._connection.soft_read_until(FIRST_RAW_PROMPT, timeout=2)
if not output.endswith(FIRST_RAW_PROMPT):
self._show_error("Could not soft-reboot after reaching raw prompt. Got %s" % output)
def _soft_reboot(self):
# Need to go to normal mode. MP doesn't run user code in raw mode
# (CP does, but it doesn't hurt to do it there as well)
self._connection.write(NORMAL_MODE_CMD)
self._raw_prompt_ensured = False
self._connection.read_until(NORMAL_PROMPT)
self._connection.write(SOFT_REBOOT_CMD)
self._forward_output_until_active_prompt(self._send_output)
self._ensure_raw_prompt()
self.send_message(ToplevelResponse(cwd=self._cwd))
def _transform_output(self, data, stream_name):
# Any keypress wouldn't work
return data.replace(
"Press any key to enter the REPL. Use CTRL-D to reload.",
"Press Ctrl-C to enter the REPL. Use CTRL-D to reload.",
)
def _write(self, data):
self._connection.write(data)
def _submit_input(self, cdata: str) -> None:
# TODO: what if there is a previous unused data waiting
assert self._connection.outgoing_is_empty()
assert cdata.endswith("\n")
if not cdata.endswith("\r\n"):
# submission is done with CRLF
cdata = cdata[:-1] + "\r\n"
bdata = cdata.encode(ENCODING)
with self._interrupt_lock:
self._write(bdata)
# Try to consume the echo
try:
echo = self._connection.read(len(bdata))
except queue.Empty:
# leave it.
logging.warning("Timeout when reading input echo")
return
if echo != bdata:
# because of autoreload? timing problems? interruption?
# Leave it.
logging.warning("Unexpected echo. Expected %r, got %r" % (bdata, echo))
self._connection.unread(echo)
def _submit_code(self, script):
assert script # otherwise EOT produces soft reboot
# assuming we are already in a prompt
self._forward_unexpected_output()
self._ensure_raw_prompt()
# send command
with self._interrupt_lock:
self._connection.write(script.encode(ENCODING) + EOT)
debug("Wrote " + script + "\n--------\n")
# fetch command confirmation
confirmation = self._connection.soft_read(2, timeout=WAIT_OR_CRASH_TIMEOUT)
if confirmation != OK:
data = confirmation + self._connection.read_all()
data += self._connection.read(1, timeout=1, timeout_is_soft=True)
data += self._connection.read_all()
self._report_internal_error(
"Could not read command confirmation. Got " + repr(data) + "\n\nSCRIPT:\n" + script
)
else:
debug("GOTOK")
def _ensure_raw_prompt(self):
if self._raw_prompt_ensured:
return
debug("Ensuring raw prompt")
self._connection.write(RAW_MODE_CMD)
prompt = (
self._connection.read_until(
FIRST_RAW_PROMPT_SUFFIX, timeout=WAIT_OR_CRASH_TIMEOUT, timeout_is_soft=True
)
+ self._connection.read_all()
)
if not prompt.endswith(FIRST_RAW_PROMPT_SUFFIX):
self._send_output(prompt, "stdout")
raise TimeoutError("Could not ensure raw prompt")
self._raw_prompt_ensured = True
debug("Restoring helpers")
self._prepare_helpers()
self._update_cwd()
def _execute_with_consumer(self, script, output_consumer: Callable[[str, str], None]):
"""Expected output after submitting the command and reading the confirmation is following:
stdout
EOT
stderr
EOT
RAW_PROMPT
"""
self._submit_code(script)
terminator = self._forward_output_until_eot_or_active_propmt(output_consumer, "stdout")
if terminator != EOT:
# an unexpected prompt
return
terminator = self._forward_output_until_eot_or_active_propmt(output_consumer, "stderr")
if terminator != EOT:
# an unexpected prompt
return
data = self._connection.read(1) + self._connection.read_all()
if data == RAW_PROMPT:
# happy path
self._raw_prompt_ensured = True
return
else:
self._connection.unread(data)
self._forward_output_until_active_prompt(output_consumer, "stdout")
def _forward_output_until_active_prompt(
self, output_consumer: Callable[[str, str], None], stream_name="stdout"
):
"""Used for finding initial prompt or forwarding problematic output
in case of parse errors"""
while True:
terminator = self._forward_output_until_eot_or_active_propmt(
output_consumer, stream_name
)
if terminator in (NORMAL_PROMPT, RAW_PROMPT, FIRST_RAW_PROMPT):
self._raw_prompt_ensured = terminator in (RAW_PROMPT, FIRST_RAW_PROMPT)
return terminator
else:
output_consumer(self._decode(terminator), "stdout")
def _forward_output_until_eot_or_active_propmt(self, output_consumer, stream_name="stdout"):
"""Meant for incrementally forwarding stdout from user statements,
scripts and soft-reboots. Also used for forwarding side-effect output from
expression evaluations and for capturing help("modules") output.
In these cases it is expected to arrive to an EOT.
Also used for initial prompt searching or for recovering from a protocol error.
In this case it must work until active normal prompt or first raw prompt.
The code may have been submitted in any of the REPL modes or
automatically via (soft-)reset.
NB! The processing may end in normal mode even if the command was started
in raw mode (eg. when user presses reset during processing in some devices)!
The processing may also end in FIRST_RAW_REPL, when it was started in
normal REPL and Ctrl+A was issued during processing (ie. before Ctrl+C in
this example):
6
7
8
9
10
Traceback (most recent call last):
File "main.py", line 5, in <module>
KeyboardInterrupt:
MicroPython v1.11-624-g210d05328 on 2019-12-09; ESP32 module with ESP32
Type "help()" for more information.
>>>
raw REPL; CTRL-B to exit
>
(Preceding output does not contain EOT)
Note that this Ctrl+A may have been issued even before Thonny connected to
the device.
Note that interrupt does not affect the structure of the output -- it is
presented just like any other exception.
The method returns EOT, RAW_PROMPT or NORMAL_PROMPT, depending on which terminator
ended the processing.
The terminating EOT may be either the first EOT from normal raw-REPL
output or the starting EOT from Thonny expression (or, in principle, even
the second raw-REPL EOT or terminating Thonny expression EOT)
-- the caller will do the interpretation.
Because ot the special role of EOT and NORMAL_PROMT, we assume user code
will not output these. If it does, processing may break.
It may succceed if the propmt is followed by something (quickly enough)
-- that's why we look for *active* prompt, ie. prompt without following text.
TODO: Experiment with this!
Output produced by background threads (eg. in WiPy ESP32) cause even more difficulties,
because it becomes impossible to say whether we are at prompt and output
is from another thread or the main thread is still running.
For now I'm ignoring these problems and assume all output comes from the main thread.
"""
INCREMENTAL_OUTPUT_BLOCK_CLOSERS = re.compile(
b"|".join(map(re.escape, [FIRST_RAW_PROMPT, NORMAL_PROMPT, LF, EOT]))
)
pending = b""
while True:
# There may be an input submission waiting
# and we can't progress without resolving it first
self._check_for_side_commands()
# Prefer whole lines, but allow also incremental output to single line
# Note that here I'm not looking for non-first raw prompt, because this
# is always preceded by EOT.
new_data = self._connection.soft_read_until(
INCREMENTAL_OUTPUT_BLOCK_CLOSERS, timeout=0.05
)
if not new_data:
# In case we are still waiting for the first bits after connecting ...
# TODO: this suggestion should be implemented in Shell
if (
self._connection.num_bytes_received == 0
and not self._interrupt_suggestion_given
and time.time() - self._startup_time > 1.5
):
self._show_error(
"\n"
+ "Device is busy or does not respond. Your options:\n\n"
+ " - wait until it completes current work;\n"
+ " - use Ctrl+C to interrupt current work;\n"
+ " - use Stop/Restart to interrupt more and enter REPL.\n"
)
self._interrupt_suggestion_given = True
if not pending:
# nothing to parse
continue
pending += new_data
if pending.endswith(EOT):
output_consumer(self._decode(pending[: -len(EOT)]), stream_name)
return EOT
elif pending.endswith(LF) and not pending.endswith(FIRST_RAW_PROMPT[:-1]):
output_consumer(self._decode(pending), stream_name)
pending = b""
elif pending.endswith(NORMAL_PROMPT) or pending.endswith(FIRST_RAW_PROMPT):
# This looks like prompt.
# Make sure it is not followed by anything.
# Note that in this context the prompt usually means something is wrong
# (EOT would have been the happy path), so no need to hurry.
# The only case where this path is happy path is just after connecting.
follow_up = self._connection.soft_read(1, timeout=0.5)
if follow_up:
# Nope, the prompt is not active.
# (Actually it may be that a background thread has produced this follow up,
# but this would be too hard to consider.)
# Don't output yet, because the follow up may turn into another prompt
# and they can be captured all together.
self._connection.unread(follow_up)
# read propmt must remain in pending
else:
# let's hope it is an active prompt
if pending.endswith(NORMAL_PROMPT):
terminator = NORMAL_PROMPT
else:
terminator = FIRST_RAW_PROMPT
# Strip all trailing prompts
out = pending
while True:
if out.endswith(NORMAL_PROMPT):
out = out[: -len(NORMAL_PROMPT)]
elif out.endswith(FIRST_RAW_PROMPT):
out = out[: -len(FIRST_RAW_PROMPT)]
else:
break
output_consumer(self._decode(out), stream_name)
return terminator
elif ends_overlap(pending, NORMAL_PROMPT) or ends_overlap(pending, FIRST_RAW_PROMPT):
# Maybe we have a prefix of the prompt and the rest is still coming?
# (it's OK to wait a bit, as the user output usually ends with a newline, ie not
# with a prompt prefix)
follow_up = self._connection.soft_read(1, timeout=0.3)
if not follow_up:
# most likely not a Python prompt, let's forget about it
output_consumer(self._decode(pending), stream_name)
pending = b""
else:
# Let's try the possible prefix again in the next iteration
# (I'm unreading otherwise the read_until won't see the whole prompt
# and needs to wait for the timeout)
if ends_overlap(pending, NORMAL_PROMPT):
n = ends_overlap(pending, NORMAL_PROMPT)
else:
n = ends_overlap(pending, FIRST_RAW_PROMPT)
try_again = pending[-n:]
pending = pending[:-n]
self._connection.unread(try_again + follow_up)
else:
# No EOT or prompt in sight.
# Output and keep working.
output_consumer(self._decode(pending), stream_name)
pending = b""
def _forward_unexpected_output(self, stream_name="stdout"):
"Invoked between commands"
data = self._connection.read_all()
if data:
self._raw_prompt_ensured = data.endswith(FIRST_RAW_PROMPT)
met_prompt = False
while data.endswith(NORMAL_PROMPT) or data.endswith(FIRST_RAW_PROMPT):
# looks like the device was resetted
met_prompt = True
if data.endswith(NORMAL_PROMPT):
terminator = NORMAL_PROMPT
else:
terminator = FIRST_RAW_PROMPT
# hide the prompt from the output ...
data = data[: -len(terminator)]
self._send_output(data.decode(ENCODING, "replace"), stream_name)
if met_prompt:
# ... and recreate Thonny prompt
self.send_message(ToplevelResponse())
def _cmd_execute_system_command(self, cmd):
# Can't use stdin, because a thread is draining it
execute_system_command(cmd, cwd=self._local_cwd, disconnect_stdin=True)
def _cmd_get_fs_info(self, cmd):
result = self._evaluate(
dedent(
"""
try:
from os import statvfs as __thonny_statvfs
__thonny_stat = __thonny_statvfs(%r)
__thonny_total = __thonny_stat[2] * __thonny_stat[0]
__thonny_free = __thonny_stat[3] * __thonny_stat[0]
__thonny_used = __thonny_total - __thonny_free
__thonny_sizes = None
del __thonny_statvfs
del __thonny_stat
except ImportError:
__thonny_sizes = [__thonny_helper.os.size(name) for name in __thonny_helper.listdir()]
__thonny_used = None
__thonny_total = None
__thonny_free = None
__thonny_helper.print_mgmt_value({
"total" : __thonny_total,
"used" : __thonny_used,
"free": __thonny_free,
"sizes": __thonny_sizes
})
del __thonny_total
del __thonny_free
del __thonny_used
del __thonny_sizes
"""
)
% cmd.path
)
if result["sizes"] is not None:
if self._connected_to_microbit():
comment = "Assuming around 30 kB of storage space for user files."
else:
comment = "Don't know the size of storage space on this device."
files_total_size = sum(result["sizes"])
# TODO: compute number of used blocks
if files_total_size > 0:
comment += "\n\n" + "At least %s of it is used by %d file(s)." % (
sizeof_fmt(files_total_size),
len(result["sizes"]),
)
result["comment"] = comment
del result["sizes"]
return result
def _cmd_upload(self, cmd):
self._check_sync_time()
return super(BareMetalMicroPythonBackend, self)._cmd_upload(cmd)
def _cmd_write_file(self, cmd):
self._check_sync_time()
return super(BareMetalMicroPythonBackend, self)._cmd_write_file(cmd)
def _delete_sorted_paths(self, paths):
if not self._supports_directories():
# micro:bit
self._execute_without_output(
dedent(
"""
for __thonny_path in %r:
__thonny_helper.os.remove(__thonny_path)
del __thonny_path
"""
)
% paths
)
else:
try:
super()._delete_sorted_paths(paths)
except Exception as e:
if "read-only" in str(e).lower():
self._delete_via_mount(paths)
self._sync_all_filesystems()
def _internal_path_to_mounted_path(self, path):
mount_path = self._get_fs_mount()
if mount_path is None:
return None
flash_prefix = self._get_flash_prefix()
if not path.startswith(flash_prefix):
return None
path_suffix = path[len(flash_prefix) :]
return os.path.join(mount_path, os.path.normpath(path_suffix))
def _get_stat_mode_for_upload(self, path: str) -> Optional[int]:
return self._get_stat_mode(path)
def _mkdir_for_upload(self, path: str) -> None:
self._mkdir(path)
def _read_file(
self, source_path: str, target_fp: BinaryIO, callback: Callable[[int, int], None]
) -> None:
# TODO: Is it better to read from mount when possible? Is the mount up to date when the file
# is written via serial? Does the MP API give up to date bytes when the file is written via mount?
hex_mode = self._should_hexlify(source_path)
self._execute_without_output("__thonny_fp = open(%r, 'rb')" % source_path)
if hex_mode:
self._execute_without_output("from binascii import hexlify as __temp_hexlify")
block_size = 1024
file_size = self._get_file_size(source_path)
num_bytes_read = 0
while True:
callback(num_bytes_read, file_size)
if hex_mode:
block = binascii.unhexlify(
self._evaluate("__temp_hexlify(__thonny_fp.read(%s))" % block_size)
)
else:
block = self._evaluate("__thonny_fp.read(%s)" % block_size)
if block:
target_fp.write(block)
num_bytes_read += len(block)
if len(block) < block_size:
break
self._execute_without_output(
dedent(
"""
__thonny_fp.close()
del __thonny_fp
try:
del __temp_hexlify
except:
pass
"""
)
)
def _write_file(
self,
source_fp: BinaryIO,
target_path: str,
file_size: int,
callback: Callable[[int, int], None],
) -> None:
try:
self._write_file_via_serial(source_fp, target_path, file_size, callback)
except ReadOnlyFilesystemError:
self._write_file_via_mount(source_fp, target_path, file_size, callback)
# self._sync_all_filesystems()
def _write_file_via_mount(
self,
source: BinaryIO,
target_path: str,
file_size: int,
callback: Callable[[int, int], None],
) -> None:
mounted_target_path = self._internal_path_to_mounted_path(target_path)
with open(mounted_target_path, "wb") as f:
bytes_written = 0
block_size = 4 * 1024
while True:
callback(bytes_written, file_size)
block = source.read(block_size)
if block:
bytes_written += f.write(block)
f.flush()
os.fsync(f)
if len(block) < block_size:
break
assert bytes_written == file_size
return bytes_written
def _write_file_via_serial(
self,
source_fp: BinaryIO,
target_path: str,
file_size: int,
callback: Callable[[int, int], None],
) -> None:
out, err = self._execute(
dedent(
"""
try:
__thonny_path = '{path}'
__thonny_written = 0
__thonny_fp = open(__thonny_path, 'wb')
except Exception as e:
print(str(e))
"""
).format(path=target_path),
capture_output=True,
)
if "readonly" in (out + err).replace("-", "").lower():
raise ReadOnlyFilesystemError()
elif out + err:
raise RuntimeError(
"Could not open file %s for writing, output:\n%s" % (target_path, out + err)
)
# Define function to allow shorter write commands
hex_mode = self._should_hexlify(target_path)
if hex_mode:
self._execute_without_output(
dedent(
"""
from binascii import unhexlify as __thonny_unhex
def __W(x):
global __thonny_written
__thonny_written += __thonny_fp.write(__thonny_unhex(x))
__thonny_fp.flush()
"""
)
)
else:
self._execute_without_output(
dedent(
"""
def __W(x):
global __thonny_written
__thonny_written += __thonny_fp.write(x)
"""
)
)
bytes_sent = 0
block_size = 512
while True:
callback(bytes_sent, file_size)
block = source_fp.read(block_size)
if block:
if hex_mode:
script = "__W(%r)" % binascii.hexlify(block)
else:
script = "__W(%r)" % block
out, err = self._execute(script, capture_output=True)
if out or err:
self._show_error(
"\nCould not write next block after having written %d bytes to %s"
% (bytes_sent, target_path)
)
if bytes_sent > 0:
self._show_error(
"Make sure your device's filesystem has enough free space. "
+ "(When overwriting a file, the old content may occupy space "
"until the end of the operation.)\n"
)
raise ManagementError(script, out, err)
bytes_sent += len(block)
if len(block) < block_size:
break
bytes_received = self._evaluate("__thonny_written")
if bytes_received != bytes_sent:
raise UserError("Expected %d written bytes but wrote %d" % (bytes_sent, bytes_received))
# clean up
self._execute_without_output(
dedent(
"""
try:
del __W
del __thonny_written
del __thonny_path
__thonny_fp.close()
del __thonny_fp
del __thonny_result
del __thonny_unhex
except:
pass
"""
)
)
return bytes_sent
def _sync_all_filesystems(self):
self._execute_without_output(
dedent(
"""
try:
from os import sync as __thonny_sync
__thonny_sync()
del __thonny_sync
except ImportError:
pass
"""
)
)
def _makedirs(self, path):
if path == "/":
return
try:
super()._makedirs(path)
except Exception as e:
if "read-only" in str(e).lower():
self._makedirs_via_mount(path)
self._sync_all_filesystems()
def _makedirs_via_mount(self, path):
mounted_path = self._internal_path_to_mounted_path(path)
assert mounted_path is not None, "Couldn't find mounted path for " + path
os.makedirs(mounted_path, exist_ok=True)
def _delete_via_mount(self, paths):
for path in paths:
mounted_path = self._internal_path_to_mounted_path(path)
assert mounted_path is not None
import shutil
shutil.rmtree(mounted_path)
def _get_fs_mount_label(self):
# This method is most likely required with CircuitPython,
# so try its approach first
# https://learn.adafruit.com/welcome-to-circuitpython/the-circuitpy-drive
result = self._evaluate(
dedent(
"""
try:
from storage import getmount as __thonny_getmount
try:
__thonny_result = __thonny_getmount("/").label
finally:
del __thonny_getmount
except ImportError:
__thonny_result = None
except OSError:
__thonny_result = None
__thonny_helper.print_mgmt_value(__thonny_result)
del __thonny_result
"""
)
)
if result is not None:
return result
if self._welcome_text is None:
return None
"""
# following is not reliable and probably not needed
markers_by_name = {"PYBFLASH": {"pyb"}, "CIRCUITPY": {"circuitpython"}}
for name in markers_by_name:
for marker in markers_by_name[name]:
if marker.lower() in self._welcome_text.lower():
return name
"""
return None
def _get_flash_prefix(self):
if not self._supports_directories():
return ""
elif (
"LoBo" in self._welcome_text
or "WiPy with ESP32" in self._welcome_text
or "PYBLITE" in self._welcome_text
or "PYBv" in self._welcome_text
or "PYBOARD" in self._welcome_text.upper()
):
return "/flash/"
else:
return "/"
def _get_fs_mount(self):
label = self._get_fs_mount_label()
if label is None:
return None
else:
candidates = find_volumes_by_name(
self._get_fs_mount_label(),
# querying A can be very slow
skip_letters="A",
)
if len(candidates) == 0:
raise RuntimeError("Could not find volume " + self._get_fs_mount_label())
elif len(candidates) > 1:
raise RuntimeError("Found several possible mount points: %s" % candidates)
else:
return candidates[0]
def _should_hexlify(self, path):
if "binascii" not in self._builtin_modules:
return False
for ext in (".py", ".txt", ".csv"):
if path.lower().endswith(ext):
return False
return True
def _is_connected(self):
return self._connection._error is None
def _get_epoch_offset(self) -> int:
# https://docs.micropython.org/en/latest/library/utime.html
# NB! Some boards (eg Pycom) may use Posix epoch!
try:
return super()._get_epoch_offset()
except NotImplementedError:
return Y2000_EPOCH_OFFSET
def _get_sep(self):
if self._supports_directories():
return "/"
else:
return ""
def _decode(self, data: bytes) -> str:
return data.decode(ENCODING, errors="replace")
if __name__ == "__main__":
THONNY_USER_DIR = os.environ["THONNY_USER_DIR"]
logger = logging.getLogger("thonny.micropython.backend")
logger.propagate = False
logFormatter = logging.Formatter("%(levelname)s: %(message)s")
file_handler = logging.FileHandler(
os.path.join(THONNY_USER_DIR, "micropython-backend.log"), encoding="UTF-8", mode="w"
)
file_handler.setFormatter(logFormatter)
file_handler.setLevel(logging.INFO)
logger.addHandler(file_handler)
import ast
import sys
args = ast.literal_eval(sys.argv[1])
try:
if args["port"] is None:
# remain busy
while True:
time.sleep(1000)
elif args["port"] == "webrepl":
from thonny.plugins.micropython.webrepl_connection import WebReplConnection
connection = WebReplConnection(args["url"], args["password"], args["min_write_delay"])
else:
from thonny.plugins.micropython.serial_connection import (
DifficultSerialConnection,
SerialConnection,
)
connection = SerialConnection(args["port"], BAUDRATE)
# connection = DifficultSerialConnection(args["port"], BAUDRATE)
backend = BareMetalMicroPythonBackend(connection, clean=args["clean"], args=args)
except ConnectionFailedException as e:
text = "\n" + str(e) + "\n"
msg = BackendEvent(event_type="ProgramOutput", stream_name="stderr", data=text)
sys.stdout.write(serialize_message(msg) + "\n")
sys.stdout.flush()
|
[
"aivar.annamaa@gmail.com"
] |
aivar.annamaa@gmail.com
|
7692972bf7b351e4ac875fb9d2b00890072ac697
|
7d84f1af14f8c30eeaa07d5f36542d93e03a4097
|
/module4/多线程/14异步调用与回调机制.py
|
62bf1fc058efd1f3f47ebbc3d4fe94922319cc11
|
[] |
no_license
|
crazy-heng/study
|
5ab50a73c4ce51593e493e8ecee2e591d2200a9e
|
034f7a5d63f278a4ac4678ed18ee63413a102757
|
refs/heads/master
| 2020-04-01T01:26:07.564735
| 2018-09-17T16:01:47
| 2018-09-17T16:01:47
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,511
|
py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# 1 同步调用,提交后原地拿到执行结果,再执行下一代码,导致串行执行
# from concurrent.futures import ThreadPoolExecutor
# import time
# import random
#
#
# def la(name):
# print('%s is laing' % name)
# time.sleep(random.randint(3, 5))
# res = random.randint(7, 13) * '#'
# return {'name': name, 'res': res}
#
#
# def weigh(shit):
# name = shit['name']
# size = len(shit['res'])
# print('%s la %s kg' % (name, size))
#
#
# if __name__ == '__main__':
# pool = ThreadPoolExecutor(13)
# shit1 = pool.submit(la, 'alex').result()
# weigh(shit1)
# shit2 = pool.submit(la, 'peiqi').result()
# weigh(shit2)
# shit3 = pool.submit(la, 'yoyo').result()
# weigh(shit3)
# 2 异步调用,提交完后,不等待任务执行完成
from concurrent.futures import ThreadPoolExecutor
import time
import random
def la(name):
print('%s is laing' % name)
time.sleep(random.randint(3, 5))
res = random.randint(7, 13) * '#'
return {'name': name, 'res': res}
def weigh(shit):
shit = shit.result() # weigh传入对象la,取对象的结果
name = shit['name']
size = len(shit['res'])
print('%s la %s kg' % (name, size))
if __name__ == '__main__':
pool = ThreadPoolExecutor(13)
shit1 = pool.submit(la, 'alex').add_done_callback(weigh)
shit2 = pool.submit(la, 'peiqi').add_done_callback(weigh)
shit3 = pool.submit(la, 'yoyo').add_done_callback(weigh)
|
[
"fanhk@126.com"
] |
fanhk@126.com
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.