blob_id stringlengths 40 40 | directory_id stringlengths 40 40 | path stringlengths 3 288 | content_id stringlengths 40 40 | detected_licenses listlengths 0 112 | license_type stringclasses 2 values | repo_name stringlengths 5 115 | snapshot_id stringlengths 40 40 | revision_id stringlengths 40 40 | branch_name stringclasses 684 values | visit_date timestamp[us]date 2015-08-06 10:31:46 2023-09-06 10:44:38 | revision_date timestamp[us]date 1970-01-01 02:38:32 2037-05-03 13:00:00 | committer_date timestamp[us]date 1970-01-01 02:38:32 2023-09-06 01:08:06 | github_id int64 4.92k 681M ⌀ | star_events_count int64 0 209k | fork_events_count int64 0 110k | gha_license_id stringclasses 22 values | gha_event_created_at timestamp[us]date 2012-06-04 01:52:49 2023-09-14 21:59:50 ⌀ | gha_created_at timestamp[us]date 2008-05-22 07:58:19 2023-08-21 12:35:19 ⌀ | gha_language stringclasses 147 values | src_encoding stringclasses 25 values | language stringclasses 1 value | is_vendor bool 2 classes | is_generated bool 2 classes | length_bytes int64 128 12.7k | extension stringclasses 142 values | content stringlengths 128 8.19k | authors listlengths 1 1 | author_id stringlengths 1 132 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
f7866615db4088601b520ceb0509c4eb8ed8a28e | 15f321878face2af9317363c5f6de1e5ddd9b749 | /solutions_python/Problem_120/713.py | 24cfd7c29cd4cf82e619addf3c3a2ef74f627af3 | [] | no_license | dr-dos-ok/Code_Jam_Webscraper | c06fd59870842664cd79c41eb460a09553e1c80a | 26a35bf114a3aa30fc4c677ef069d95f41665cc0 | refs/heads/master | 2020-04-06T08:17:40.938460 | 2018-10-14T10:12:47 | 2018-10-14T10:12:47 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 343 | py | import math
T = int(raw_input())
for i in xrange(T):
r, t = raw_input().split()
r = int(r)
t = int(t)
n = ((1.0 - 2*r) + math.sqrt( (2*r - 1.0)**2 + 8*t ))/4.0
n = int(n)
total = (2*r + 1) * n + n*(n-1)*2
if total <= t:
print 'Case #%s: %s' % (i+1, n)
else:
print 'Case #%s: %s' % (i+1, n-1)
| [
"miliar1732@gmail.com"
] | miliar1732@gmail.com |
f77ef9ebe4b1bb015901d003bfa80073a0be69f0 | 5c1531b47fb4dc4d7e5998d44f7200bf1786b12b | /__UNSORTED/139_word_break/word_break.py | 2de3f3f3df7205cae92ad800b5382c18f49e4c3a | [] | no_license | Web-Dev-Collaborative/Leetcode-JS-PY-MD | d1f560051aad1896a80eccdd4b4fbb389e7033e3 | 675b94fa5da8d40f0ea79efe6d3ef1393221425f | refs/heads/master | 2023-09-01T22:30:32.313793 | 2021-10-26T02:17:03 | 2021-10-26T02:17:03 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,069 | py | class Solution:
# @param s, a string
# @param wordDict, a set<string>
# @return a boolean
def wordBreak(self, s, wordDict):
self.tree = {}
self.memo = {}
for word in wordDict:
self.buildNode(word, self.tree)
return self.traverse(s)
def traverse(self, s):
if s in self.memo:
return self.memo[s]
if not s:
return True
ret = False
root = self.tree
for i in range(len(s) + 1):
if -1 in root:
if self.traverse(s[i:]):
ret = True
break
if i < len(s):
c = s[i]
if c in root:
root = root[c]
else:
break
self.memo[s] = ret
return ret
def buildNode(self, word, tree):
if not word:
tree[-1] = True
else:
c = word[0]
if c not in tree:
tree[c] = {}
self.buildNode(word[1:], tree[c])
| [
"bryan.guner@gmail.com"
] | bryan.guner@gmail.com |
03dd76a38abe323de5bb3342d57232631262aac6 | d7e4be3752a3b659d198893ebc4347c77f56a3b8 | /flaskr/repair/forms.py | 0e67749642c7c5607e0314eb0d443010e35c3527 | [] | no_license | maria-j-k/library_v2 | c76aa0b77f3103aab43e5339d5e468e28c9e485c | fdc292d152aec1ea596733b79126caecca8b93ac | refs/heads/main | 2023-03-19T20:13:06.104296 | 2021-03-11T12:16:37 | 2021-03-11T12:16:37 | 322,952,448 | 0 | 0 | null | 2021-01-19T13:13:38 | 2020-12-19T22:52:37 | Python | UTF-8 | Python | false | false | 4,328 | py | from flask_wtf import FlaskForm
from wtforms_sqlalchemy.fields import QuerySelectField
from wtforms import BooleanField, FieldList, FormField, HiddenField, IntegerField, TextAreaField, SelectField, StringField, SubmitField
from wtforms.widgets import HiddenInput
from wtforms.validators import DataRequired, AnyOf, Optional
from flaskr.models import Publisher, Room
class SearchForm(FlaskForm):
name = StringField('Name')
class PublisherForm(FlaskForm):
name = StringField('Name')
name_id = IntegerField('Id', widget=HiddenInput(), validators=[Optional(strip_whitespace=True)])
incorrect = BooleanField('Incorrect')
approuved = BooleanField('Approuved')
submit = SubmitField('Sumbit')
def all_publishers():
return Publisher.query.all()
class SerieForm(FlaskForm):
name = StringField('Name')
name_id = HiddenField(validators=[Optional(strip_whitespace=True)])
publisher = QuerySelectField(query_factory=all_publishers, allow_blank=False)
incorrect = BooleanField('Incorrect')
approuved = BooleanField('Approuved')
submit = SubmitField('Sumbit')
class CityForm(FlaskForm):
name = StringField('Name')
name_id = IntegerField('Id', widget=HiddenInput(), validators=[Optional(strip_whitespace=True)])
incorrect = BooleanField('Incorrect')
approuved = BooleanField('Approuved')
submit = SubmitField('Sumbit')
class CollectionForm(FlaskForm):
name = StringField('Name')
incorrect = BooleanField('Incorrect')
approuved = BooleanField('Approuved')
submit = SubmitField('Sumbit')
class RoomForm(FlaskForm):
name = StringField('Name')
incorrect = BooleanField('Incorrect')
approuved = BooleanField('Approuved')
submit = SubmitField('Sumbit')
def all_rooms():
return Room.query.all()
class ShelfForm(FlaskForm):
name = StringField('Name')
room = QuerySelectField(query_factory=all_rooms, allow_blank=False)
incorrect = BooleanField('Incorrect')
approuved = BooleanField('Approuved')
submit = SubmitField('Sumbit')
class BookForm(FlaskForm):
title = StringField('Title')
isbn = StringField('ISBN')
# authors = StringField('Authors')
# translation = StringField('Translation')
# redaction = StringField('Redaction')
# introduction = StringField('Introduction')
publisher = StringField('Publisher')
publisher_id = IntegerField('Id', widget=HiddenInput(), validators=[Optional(strip_whitespace=True)])
serie = StringField('Serie')
serie_id = IntegerField('Id', widget=HiddenInput(), validators=[Optional(strip_whitespace=True)])
city = StringField('Publication place')
city_id = IntegerField('Id', widget=HiddenInput(), validators=[Optional(strip_whitespace=True)])
pub_year = StringField('Publication year')
origin_language = StringField('Origin language')
fiction = SelectField('Fiction', choices=[
('', '---'),
(1, 'fiction'),
(0, 'non-fiction')
],
coerce=bool)
literary_form = SelectField('Literary form', choices=[
('', '---'),
('PO', 'Poetry'),
('PR', 'Prose'),
('DR', 'Drama')
])
genre = StringField('Genre')
precision = TextAreaField('Precision')
nukat = TextAreaField('NUKAT themes')
incorrect = BooleanField('Incorrect')
approuved = BooleanField('Approuved')
submit = SubmitField('Sumbit')
class PersonForm(FlaskForm):
name = StringField('Name')
name_id = HiddenField(validators=[Optional(strip_whitespace=True)])
incorrect = BooleanField('Incorrect')
approuved = BooleanField('Approuved')
submit = SubmitField('Submit')
class Person2Form(FlaskForm):
name = StringField('Name')
# person_id = IntegerField(widget=HiddenInput(), validators=[Optional(strip_whitespace=True)])
name_id = HiddenField(validators=[Optional(strip_whitespace=True)])
role = HiddenField(validators=[AnyOf(values=['A', 'T', 'R', 'I'])])
incorrect = BooleanField('Incorrect')
approuved = BooleanField('Approuved')
class Meta:
csrf = False
class CreatorForm(FlaskForm):
creators = FieldList(FormField(Person2Form, default={'role': 'A'}), max_entries=3)
submit = SubmitField('Sumbit')
| [
"maria77julia@gmail.com"
] | maria77julia@gmail.com |
baa23174ec0a53364cec169399118e44e0da551e | 5c668379197a236d3a961dbba32aba606d661111 | /chapter13/brother1.py | 5ddb4a359161e1d5221298537cb7ab4a76b44356 | [] | no_license | RobbiNespu/wargames.unix-heaven.org | a7cf1afbc7f5983a85638c0a63dfd5764fd74b4e | 6ad914d020f696bf6148bf33d66de72aaf001589 | refs/heads/master | 2021-04-05T23:27:17.027998 | 2015-06-26T14:38:28 | 2015-06-26T14:38:28 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,357 | py | #!/usr/bin/env python
# code taken from the examples from the Python documentation on SocketServer
# daemon1 returns base64'd message, which is xxd'ed
import SocketServer
class MyTCPHandler(SocketServer.BaseRequestHandler):
"""
The RequestHandler class for our server.
It is instantiated once per connection to the server, and must
override the handle() method to implement communication to the
client.
"""
def handle(self):
data = """
0000000: 5157 6773 4947 4675 6233 526f 5a58 4967 QWgsIGFub3RoZXIg
0000010: 6333 5279 5957 356e 5a58 4967 6347 467a c3RyYW5nZXIgcGFz
0000020: 6332 6c75 5a79 4269 6553 454b 436c 6c76 c2luZyBieSEKCllv
0000030: 6453 4268 636d 5567 6247 3976 6132 6c75 dSBhcmUgbG9va2lu
0000040: 5a79 426d 6233 4967 6447 686c 0a49 484e ZyBmb3IgdGhl.IHN
0000050: 6c59 334a 6c64 4342 725a 586b 7349 4746 lY3JldCBrZXksIGF
0000060: 795a 5734 6e64 4342 3562 3355 6763 3352 yZW4ndCB5b3Ugc3R
0000070: 7959 5735 6e5a 5849 2f43 6770 4a4a 3230 yYW5nZXI/CgpJJ20
0000080: 6763 3239 7963 6e6b 7349 474a 3164 4342 gc29ycnksIGJ1dCB
0000090: 4a49 474e 6862 6d35 760a 6443 426f 5a57 JIGNhbm5v.dCBoZW
00000a0: 7877 4948 6c76 6453 3475 4c67 6f4b 5432 xwIHlvdS4uLgoKT2
00000b0: 3573 6553 4276 626d 5567 6232 5967 5957 5seSBvbmUgb2YgYW
00000c0: 7873 4948 567a 4948 526f 636d 566c 4947 xsIHVzIHRocmVlIG
00000d0: 6876 6247 527a 4948 526f 5a53 4272 5a58 hvbGRzIHRoZSBrZX
00000e0: 6b75 4c69 344b 0a43 6c6c 7664 5342 755a kuLi4K.CllvdSBuZ
00000f0: 5756 6b49 4852 7649 475a 7062 6d51 6762 WVkIHRvIGZpbmQgb
0000100: 586b 6759 6e4a 7664 4768 6c63 6977 6761 XkgYnJvdGhlciwga
0000110: 4755 6764 326c 7362 4342 6f5a 5778 7749 GUgd2lsbCBoZWxwI
0000120: 486c 7664 5345 4b43 6b35 7664 7942 4a49 HlvdSEKCk5vdyBJI
0000130: 4735 6c0a 5a57 5167 6447 3867 636d 567a G5l.ZWQgdG8gcmVz
0000140: 6443 7767 5a32 3976 5a47 4a35 5a53 427a dCwgZ29vZGJ5ZSBz
0000150: 6448 4a68 626d 646c 6369 454b 4367 3d3d dHJhbmdlciEKCg==
0000160: 0a .
"""
self.request.sendall(data)
if __name__ == "__main__":
HOST, PORT = "localhost", 31123
# Create the server, binding to localhost on port 31123
server = SocketServer.TCPServer((HOST, PORT), MyTCPHandler)
# Activate the server; this will keep running until you
# interrupt the program with Ctrl-C
server.serve_forever()
| [
"dnaeon@gmail.com"
] | dnaeon@gmail.com |
bab2a8a108c7ebf93bc6405b7b31741a934d2815 | 54147931d6e9eecb797c4d3665337a6c2b27160c | /chapter2_string_text/2_2.py | c5d96d4ab2ab814774508068c66f69d48702b497 | [] | no_license | miniyk2012/python_cookbook | 1fefe857f22d1145e7335c15c45e8b12356d7a49 | c01daee2d5349ea018a0a8768be7b011df70b566 | refs/heads/master | 2020-06-10T19:53:45.749970 | 2017-01-16T15:23:51 | 2017-01-16T15:23:51 | 75,891,236 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 567 | py | # -*- coding:utf-8 -*-
import logging
_logger = logging.getLogger(__name__)
url = 'http://www.python.org'
print(url.startswith('http:'))
import os
filenames = os.listdir('../')
print(filenames)
print([name for name in filenames if name.endswith(('algorithm', '.md'))])
print(any(name.endswith('.idea') for name in filenames))
choices = ['http:', 'ftp:']
url = 'http://www.python.org'
print(url.startswith(tuple(choices)))
import re
url = 'http://www.python.org'
print(bool(re.match('http:|https:|ftp:', url)))
print(url.startswith(('http:', 'https:', 'ftp:')))
| [
"yangk@ersoft.cn"
] | yangk@ersoft.cn |
0b14592d0ba3665b7175831dfac9b89160af3f0e | 414239752b2bfc4cb3a947474f2662af7588b6eb | / protobufeditor/Tests/MarathonTests/TestCases/ProtoSearch/ProtoSearch6.py | 53ffee71fcdf81ff114b1db15c02e1c24c9934ad | [] | no_license | dtracers/protobufeditor | 8c7f9671c3b3a7d1cd3094321d030f6e6afcc7e8 | b65d06bce93165eebf9798c533e2447a5992d384 | refs/heads/master | 2020-12-24T19:28:22.486207 | 2016-04-19T23:41:57 | 2016-04-19T23:41:57 | 56,340,712 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 870 | py | useFixture(default)
def test():
from Modules import commonBits
java_recorded_version = '1.6.0_22'
if window('Protocol Buffer Editor'):
select('FileChooser', commonBits.sampleDir() + 'Ams_LocDownload_20041228.bin')
click('Proto Search')
assert_p('Table1', 'Content', r'[[' + commonBits.stdCopybookDir() + 'Ams_Location.proto, Ams_Location.proto, Locations], [' + commonBits.stdCopybookDir() + 'Ams_Location.protocomp, Ams_Location.proto, Locations]]')
click('BasicInternalFrameTitlePane$NoFocusButton2')
assert_p('FileChooser1', 'Text', commonBits.stdCopybookDir() + 'Ams_Location.protocomp')
assert_p('ComboBox2', 'Text', 'Ams_Location.proto')
assert_p('ComboBox3', 'Content', '[[Locations]]')
assert_p('ComboBox3', 'Text', 'Locations')
assert_p('ComboBox1', 'Text', 'Compiled Proto')
assert_p('ComboBox', 'Text', 'Delimited Messages')
close()
| [
"bm_tas@yahoo.com.au"
] | bm_tas@yahoo.com.au |
a804d5d5ce3df83393ae6bff5898fb1f6cc6e43b | e780a5bd72f98ca2513c993d64a85b08578166a6 | /buildout-cache/eggs/plone.autoform-1.7.5-py2.7.egg/plone/autoform/form.py | 5f38b7215aa23d0fecdc261c29372fd0c2f0f2dc | [] | no_license | vedantc98/Plone-test | 023246597ffe848e2a49b9f65742ff49127b190b | 9fd520fc78481e2c0b9b7ec427821e7f961c777e | refs/heads/master | 2021-03-30T22:14:33.368739 | 2018-03-11T19:22:58 | 2018-03-11T19:22:58 | 124,671,713 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,316 | py | # -*- coding: utf-8 -*-
from plone.autoform.base import AutoFields
from plone.autoform.interfaces import IAutoExtensibleForm
from plone.autoform.interfaces import IAutoObjectSubForm
from plone.z3cform.fieldsets.extensible import ExtensibleForm
from zope.interface import implementer
_marker = object()
@implementer(IAutoExtensibleForm)
class AutoExtensibleForm(AutoFields, ExtensibleForm):
"""Mixin class for z3c.form forms that support fields extracted from
a schema
"""
showEmptyGroups = False
@property
def schema(self):
raise NotImplementedError(
'The class deriving from AutoExtensibleForm must have a '
'\'schema\' property'
)
@property
def additionalSchemata(self):
"""Default to there being no additional schemata
"""
return ()
def updateFields(self):
self.updateFieldsFromSchemata()
super(AutoExtensibleForm, self).updateFields()
@implementer(IAutoObjectSubForm)
class AutoObjectSubForm(AutoFields):
"""A Mixin class for z3c.form.object.ObjectSubForm forms that supports
fields being updated from hints in a schema.
"""
@property
def schema(self):
return self.__parent__.field.schema
def setupFields(self):
self.updateFieldsFromSchemata()
| [
"vedantc98@gmail.com"
] | vedantc98@gmail.com |
95135697228d557cb3d8a41bb0ecbf01bf2709f0 | 169e75df163bb311198562d286d37aad14677101 | /tensorflow/tensorflow/python/util/compat.py | a24a52eea9710e98bd56025457e6fda5449a5197 | [
"Apache-2.0"
] | permissive | zylo117/tensorflow-gpu-macosx | e553d17b769c67dfda0440df8ac1314405e4a10a | 181bc2b37aa8a3eeb11a942d8f330b04abc804b3 | refs/heads/master | 2022-10-19T21:35:18.148271 | 2020-10-15T02:33:20 | 2020-10-15T02:33:20 | 134,240,831 | 116 | 26 | Apache-2.0 | 2022-10-04T23:36:22 | 2018-05-21T08:29:12 | C++ | UTF-8 | Python | false | false | 4,224 | py | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Functions for Python 2 vs. 3 compatibility.
## Conversion routines
In addition to the functions below, `as_str` converts an object to a `str`.
## Types
The compatibility module also provides the following types:
* `bytes_or_text_types`
* `complex_types`
* `integral_types`
* `real_types`
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numbers as _numbers
import numpy as _np
import six as _six
from tensorflow.python.util.tf_export import tf_export
def as_bytes(bytes_or_text, encoding='utf-8'):
"""Converts either bytes or unicode to `bytes`, using utf-8 encoding for text.
Args:
bytes_or_text: A `bytes`, `str`, or `unicode` object.
encoding: A string indicating the charset for encoding unicode.
Returns:
A `bytes` object.
Raises:
TypeError: If `bytes_or_text` is not a binary or unicode string.
"""
if isinstance(bytes_or_text, _six.text_type):
return bytes_or_text.encode(encoding)
elif isinstance(bytes_or_text, bytes):
return bytes_or_text
else:
raise TypeError('Expected binary or unicode string, got %r' %
(bytes_or_text,))
def as_text(bytes_or_text, encoding='utf-8'):
"""Returns the given argument as a unicode string.
Args:
bytes_or_text: A `bytes`, `str`, or `unicode` object.
encoding: A string indicating the charset for decoding unicode.
Returns:
A `unicode` (Python 2) or `str` (Python 3) object.
Raises:
TypeError: If `bytes_or_text` is not a binary or unicode string.
"""
if isinstance(bytes_or_text, _six.text_type):
return bytes_or_text
elif isinstance(bytes_or_text, bytes):
return bytes_or_text.decode(encoding)
else:
raise TypeError('Expected binary or unicode string, got %r' % bytes_or_text)
# Convert an object to a `str` in both Python 2 and 3.
if _six.PY2:
as_str = as_bytes
tf_export('compat.as_bytes', 'compat.as_str')(as_bytes)
tf_export('compat.as_text')(as_text)
else:
as_str = as_text
tf_export('compat.as_bytes')(as_bytes)
tf_export('compat.as_text', 'compat.as_str')(as_text)
@tf_export('compat.as_str_any')
def as_str_any(value):
"""Converts to `str` as `str(value)`, but use `as_str` for `bytes`.
Args:
value: A object that can be converted to `str`.
Returns:
A `str` object.
"""
if isinstance(value, bytes):
return as_str(value)
else:
return str(value)
@tf_export('compat.path_to_str')
def path_to_str(path):
"""Returns the file system path representation of a `PathLike` object, else as it is.
Args:
path: An object that can be converted to path representation.
Returns:
A `str` object.
"""
if hasattr(path, '__fspath__'):
path = as_str_any(path.__fspath__())
return path
# Numpy 1.8 scalars don't inherit from numbers.Integral in Python 3, so we
# need to check them specifically. The same goes from Real and Complex.
integral_types = (_numbers.Integral, _np.integer)
tf_export('compat.integral_types').export_constant(__name__, 'integral_types')
real_types = (_numbers.Real, _np.integer, _np.floating)
tf_export('compat.real_types').export_constant(__name__, 'real_types')
complex_types = (_numbers.Complex, _np.number)
tf_export('compat.complex_types').export_constant(__name__, 'complex_types')
# Either bytes or text.
bytes_or_text_types = (bytes, _six.text_type)
tf_export('compat.bytes_or_text_types').export_constant(__name__,
'bytes_or_text_types')
| [
"thomas.warfel@pnnl.gov"
] | thomas.warfel@pnnl.gov |
5bace3fe8c8b3966f2c6c49d67a01a79ff42c1a1 | 4bc6028ed8ba403b69adfd6f5cbd139baece0f4d | /basic/hello_world.py | 2bc192cf9dc891b27de6eabd3ffdaa95d2fe90f4 | [] | no_license | xrw560/learn-pyspark | 0ef9ed427ff887ceed1c5e5773bf97ed25ecae04 | 618d16dafd73165e714111670119d9cdecc0bf1f | refs/heads/master | 2020-03-07T00:12:36.885000 | 2019-01-04T09:51:32 | 2019-01-04T09:51:32 | 127,152,051 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 153 | py | #!/usr/bin/python
# -*- encoding:utf-8 -*-
if __name__ == '__main__':
"""
Hello World 程序
"""
print "hello world!!!!!!!!!!!!!!!"
| [
"ncutits@163.com"
] | ncutits@163.com |
c1a1eebde481557cb5e8673730e08fa7eff20636 | d191a04a3ded41175ea84ae88ebddb4f262b7fb1 | /Dynamic_program/33_palindromic_substring.py | d7e049406dc272a2923b0bce1e168fc05b3c3c98 | [] | no_license | YLyeliang/now_leet_code_practice | ae4aea945bae72ec08b11e57a8f8a3e81e704a54 | 204d770e095aec43800a9771fe88dd553463d2f7 | refs/heads/master | 2022-06-13T20:22:51.266813 | 2022-05-24T05:29:32 | 2022-05-24T05:29:32 | 205,753,056 | 3 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,343 | py | # Given a string, your task is to count how many palindromic substrings in this string.
#
# The substrings with different start indexes or end indexes are counted as different substrings even they consist of same characters.
#
# Example 1:
#
# Input: "abc"
# Output: 3
# Explanation: Three palindromic strings: "a", "b", "c".
#
#
# Example 2:
#
# Input: "aaa"
# Output: 6
# Explanation: Six palindromic strings: "a", "a", "a", "aa", "aa", "aaa".
#
#
# Note:
#
# The input string length won't exceed 1000.
# 问题:给定字符串s,求字符串中的回文子串的数目。相同子串,不同起始点算作不同子串。
# 分析;DP方法。考虑回文子串的索引序列 i, i+1, ..., j-1, j.如果s[i]==s[j]且s[i+1:j-1]为回文,那么这一段则为回文。
# 如果回文子串的长度小于3,只需要判断s[i]==s[j]即可。
# 根据上述表述。遍历字符串s.构建二维dp数组,dp[i][j]表示索引段是否为回文。则遍历时如果是回文,result+=1.
class Solution:
def countSubstrings(self, s: str) -> int:
n = len(s)
dp = [[0] * n for _ in range(n)]
res = 0
for i in range(n - 1, -1, -1):
for j in range(i, n):
dp[i][j] = s[i] == s[j] and ((j - i + 1) < 3 or dp[i + 1][j - 1])
res += dp[i][j]
return res
| [
"k87974@163.com"
] | k87974@163.com |
e1a70e8fa8ccf663700a94f2d16d3b20110080f5 | 03d07de94fc22d1583c45ca84c711a06df8a40ff | /lc/graph/lc_207_course-schedule.py | f5045d8a606e37fc0a05f3d15c5182ff10bcd859 | [] | no_license | gaopenghigh/algorithm | 94e04293c69a2ad6903495e1cf6e1b75556535bb | f5d78c98c7201c56f9d4c3a9c0c76e9447a17985 | refs/heads/master | 2022-03-11T18:46:38.712923 | 2022-02-20T14:20:54 | 2022-02-20T14:20:54 | 54,484,549 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,696 | py | # 207. 课程表
# 你这个学期必须选修 numCourses 门课程,记为 0 到 numCourses - 1 。
# 在选修某些课程之前需要一些先修课程。 先修课程按数组 prerequisites 给出,其中 prerequisites[i] = [ai, bi] ,表示如果要学习课程 ai 则 必须 先学习课程 bi 。
# 例如,先修课程对 [0, 1] 表示:想要学习课程 0 ,你需要先完成课程 1 。
# 请你判断是否可能完成所有课程的学习?如果可以,返回 true ;否则,返回 false 。
# 抽象为判断一幅有向图是否有环
# 抽象为判断一幅有向图是否有环
# 使用 DFS 遍历
# 不能简单地记录哪些顶点已经遍历过了,遍历碰到一个曾经遍历过的顶点,并不代表有环
# 比如下图:
# A -> B -> C <--+
# |______________|
# 从 B 到 C 的时候发现 C 已经被遍历过了,因为有直接从 A 到 C 的边,但并没有环
# 所以需要记录“当前遍历路径”上的顶点,也就是在递归栈上的顶点,这些顶点是“正在被搜索”的状态
#
# 使用邻接表来表示图
# 使用 2 个集合,一个存放“正在被搜索”的顶点,一个存放“还没被遍历到”的顶点
class Solution:
def __init__(self) -> None:
self.graph = []
self.n = 0
def _build_graph(self, n, edges):
self.n = n
self.graph = [[] for _ in range(n)]
for edge in edges:
src, dst = edge[0], edge[1]
self.graph[src].append(dst)
def has_cycle_dfs(self, u, unsearched, searching):
if u in searching:
return True
unsearched.remove(u)
searching.add(u)
for v in self.graph[u]:
if v in searching:
return True
if v in unsearched:
if self.has_cycle_dfs(v, unsearched, searching):
return True
searching.remove(u)
return False
def has_cycle(self):
unsearched = set()
searching = set()
for i in range(self.n):
unsearched.add(i)
# 这个图不一定是全连通的,所以需要每个节点都尝试一下
for i in range(self.n):
if i in unsearched:
if self.has_cycle_dfs(i, unsearched, searching):
return True
return False
def canFinish(self, numCourses: int, prerequisites: list[list[int]]) -> bool:
self._build_graph(numCourses, prerequisites)
# print(self.graph)
return not self.has_cycle()
if __name__ == '__main__':
s = Solution()
print(s.canFinish(20, [[0,10],[3,18],[5,5],[6,11],[11,14],[13,1],[15,1],[17,4]])) | [
"jh.gao@ucloud.cn"
] | jh.gao@ucloud.cn |
c55a5ab14f9f3d22bf01db0767cfa494e6709ed6 | 8cb0ace888d6dfe0b781e21906cab598cfb3783d | /apps/wop/wop/widgets/level_widget.py | c5349c5c427f2b346893af999b365c1f790900e3 | [] | no_license | DerThorsten/appdev | fefe1929af15f33d7848f03664b345df0f2aeaaf | 5c24328b8ba08ad57aa6b77296d68ece00b4154b | refs/heads/master | 2021-01-17T05:10:48.033350 | 2016-08-06T21:01:49 | 2016-08-06T21:01:49 | 33,121,987 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,725 | py | from kivy.logger import Logger
from kivy.uix.boxlayout import BoxLayout
from kivy.uix.button import Button
from kivy.lang import Builder
from kivy.uix.widget import Widget
from kivy.clock import Clock
from kivy.graphics import *
from kivy.properties import NumericProperty, ReferenceListProperty,\
ObjectProperty
import numpy
import wop.level
Builder.load_string("""
<LevelWidget>:
size_hint: (1, 1)
levelCanvasWidget: levelCanvasWidget
zoom_outButton: zoom_outButton
zoom_inButton: zoom_inButton
createAndSelectWidget: createAndSelectWidget
orientation: "vertical"
LevelCanvasWidget:
text: "foo"
size_hint: (0.1,0.9)
id: levelCanvasWidget
BoxLayout:
size_hint: (1, 0.1)
orientation: "horizontal"
spacing: self.width/10.0
CreateAndSelectWidget:
id: createAndSelectWidget
size_hint: (1, 1)
BoxLayout:
size_hint: (0.25, 1)
orientation: "horizontal"
spacing: self.width/10.0
BoxLayout:
size_hint: (0.5, 1)
orientation: 'vertical'
spacing: self.height/10.0
Button:
id: zoom_outButton
text: "-"
color: (0.3,1,0.3,1)
font_size: 50
font_name: "CBlocks"
background_color: (0,0,0,0)
on_release: root.zoom_out()
Button:
id: zoom_inButton
text: "+"
color: (1,0.3,0.3,1)
font_size: 50
font_name: "CBlocks"
background_color: (0,0,0,0)
on_release: root.zoom_in()
Button:
size_hint: (1, 1)
id: menuButton
color: (0.2,0.2,0.6,1)
font_size: 30
font_name: "CBlocks"
text: "menu"
background_color: (0,0,0,0)
on_press: root.screen_manager.current = 'main_menu_screen'
""")
class LevelWidget(BoxLayout):
levelCanvasWidget = ObjectProperty(None)
createAndSelectWidget = ObjectProperty(None)
screen_manager = ObjectProperty(None)
def __init__(self,*arg,**kwarg):
super(LevelWidget,self).__init__(*arg, **kwarg)
self.level = None
def on_global_pause(self):
if self.level is not None:
self.level.on_global_pause()
def on_global_resume(self):
if self.level is not None:
self.level.on_global_resume()
def on_pre_leave(self):
self.levelCanvasWidget.on_pre_leave()
self._kill_level()
def on_leave(self):
self.levelCanvasWidget.on_leave()
def on_pre_enter(self):
self.levelCanvasWidget.on_pre_enter()
self._init_level()
def on_enter(self):
self.levelCanvasWidget.on_enter()
#self._init_level()
def zoom_in(self):
s = self.get_scale()
self.set_scale(s*1.25)
def zoom_out(self):
s = self.get_scale()
ns = s/1.25
if ns > 1.0:
self.set_scale(ns)
def set_scale(self, scale):
self.levelCanvasWidget.set_scale(scale)
def get_scale(self):
print self.pos,self.size
return self.levelCanvasWidget.get_scale()
def get_offset(self):
return self.levelCanvasWidget.get_offset()
def set_offset(self, offset):
return self.levelCanvasWidget.set_offset(offset)
def render(self):
self.levelCanvasWidget.render()
def add_render_item(self, renderItem, z):
self.levelCanvasWidget.add_render_item(renderItem,z)
def set_level(self, level):
assert self.level is None
self.level = level
def _init_level(self):
# load level
#self.level = wop.level.SimpleLevel(gameRender=self.levelCanvasWidget)
assert self.level is not None
self.level.initPhysics()
# pass the level to the levelCanvasWidget
self.levelCanvasWidget.set_level(self.level)
wmManager = self.createAndSelectWidget.wmManager
#
self.level.set_wm_manager(wmManager)
wmManager.setLevel(level = self.level)
# start the level (start physic simulation)
# will schedule level.updateCaller
self.level.level_widget = self
self.level.start_level()
def _kill_level(self):
self.level.stop_level()
self.level = None
def level_finished(self):
self.screen_manager.current = 'main_menu_screen'
| [
"thorsten.beier@iwr.uni-heidelberg.de"
] | thorsten.beier@iwr.uni-heidelberg.de |
d45809a86a61969897ea0f9adf6b683a1732e968 | 79839ab9d00afabdaeb9e4ea0ffdb50db8fdde4e | /LudoGame/controller.py | 4b1be36912f187cbdb11bff8a34564d2e27f4406 | [
"Apache-2.0"
] | permissive | surajsinghbisht054/LudoGame | 85968d57d6432e9d56edab1b71f9eb967ef5d5be | 20352646230bc541208e93dfbf0818e42eb0c6b3 | refs/heads/master | 2021-01-11T13:54:12.932791 | 2018-01-09T10:18:32 | 2018-01-09T10:18:32 | 94,885,322 | 1 | 5 | null | null | null | null | UTF-8 | Python | false | false | 6,260 | py | #!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Written By:
# S.S.B
# surajsinghbisht054@gmail.com
# bitforestinfo.blogspot.com
#
#
#
#
##################################################
######## Please Don't Remove Author Name #########
############### Thanks ###########################
##################################################
#
#
# Import Module
try:
import Tkinter
except:
import tkinter as Tkinter
import random
from views import Board
from models import TRACK, OVALS, TEAM, F_TRACK
# Frame Of Dice Function
class Dice(Tkinter.Frame):
def __init__(self,root, s):
Tkinter.Frame.__init__(self,root)
self.string = s
self.string.set(6)
self.create_widget()
#
def round(self):
self.string.set(random.randint(1,6))
self.button.config(state="disable")
return
def create_widget(self):
store = Tkinter.Label(self, textvariable=self.string,width=20)
store.pack(fill='both')
self.button = Tkinter.Button(self, text="Team A", command=self.round)
self.button.pack(fill='both')
return
# Frame Of ScoreBoard
class ScoreBoard(Tkinter.LabelFrame):
def __init__(self, *args, **kwargs):
Tkinter.LabelFrame.__init__(self, *args, **kwargs)
self['padx']=20
self['pady']=20
self.create_label()
# Creating Label
def create_label(self):
Tkinter.Label(self, text="Team A", bg="RoyalBlue1").grid(row=1, column=1)
Tkinter.Label(self, text="Team B", bg="yellow2").grid(row=2, column=1)
self.team_a=Tkinter.Label(self, text="0")
self.team_a.grid(row=1, column=2)
self.team_b=Tkinter.Label(self, text="0")
self.team_b.grid(row=2, column=2)
return
# Creating Main Engine
class Engine:
def __init__(self, canvas):
self.canvas = canvas
#self.ovals=[]
self.create_ovals()
self.turn = "A"
self.number = Tkinter.IntVar()
self.add_dice()
self.score_board()
# Add Dice Frame
def add_dice(self):
self.dice=Dice(self.canvas.master, self.number)
self.dice.pack(side='left')
return
#Add Score Board
def score_board(self):
self.score=ScoreBoard(self.canvas.master, text="Score")
self.score.pack(side='right')
return
# Creating Ovals
def create_ovals(self):
self.oval_identity=[]
for a,b,c,d in OVALS:
for i in b:
s=self.canvas.create_oval(*self.getcoordinates(i), fill=c, tag="C{}".format(i), activewidth=3)
self.oval_identity.append("C{}".format(i))
self.canvas.tag_bind(s, "<Button-1>", self.oval_triggers)
return
# Oval Binding Handler
def oval_triggers(self, event):
tag = self.selected_oval(event)
if tag and (self.number.get()!=0):
# Team A
if self.turn =="A":
if tag in TEAM[0]:
# TEAM A PLAYERS
self.team_a_moves(tag)
# Team B
else:
if tag in TEAM[1]:
# TEAM B PLAYERS
self.team_b_moves(tag)
return
# Uplifting Ovals
def uplifting(self, team):
for a,b,c,d in OVALS:
# a = Track
# b = Station
# c = Color
# d = Team
for s in b:
tag=str("C"+s)
if (d==team) and tag:
# uplift here
self.canvas.lift(tag)
return
# Team A Moves
def team_a_moves(self, tag):
for a,b,c,d in OVALS:
# a = Track
# b = Station
# c = Color
# d = Team
for s in b:
if str("C"+s)==tag:
step=self.number.get()
# Open
if (step==1 or step==6) and not self.gettrackbox(tag):
self.change_place(tag,a[0])
print "Change Place to Start"
else:
print "Check"
# In Track
t = self.gettrackbox(tag)
if t:
present_address = a.index(t)
print t, a[-2]
if t==a[-2]:
self.score.team_a.config(text=str(int(self.score.team_a.cget("text"))+1))
self.canvas.delete(tag)
try:
self.change_place(tag,a[present_address+step])
#self.check_turns()
except:
pass
t = self.gettrackbox(tag)
if t==a[-2]:
print "One Coin Clear"
# One Coin Clear
self.canvas.delete(tag)
else:
self.check_turns()
return
return
# Team B Moves
def team_b_moves(self, tag):
for a,b,c,d in OVALS:
# a = Track
# b = Station
# c = Color
# d = Team
for s in b:
if str("C"+s)==tag:
step=self.number.get()
# Open
if (step==1 or step==6) and not self.gettrackbox(tag):
self.change_place(tag,a[0])
print "Change Place to Start"
else:
print "Check"
# In Track
t = self.gettrackbox(tag)
if t:
present_address = a.index(t)
print t, a[-2]
if t==a[-2]:
self.score.team_b.config(text=str(int(self.score.team_a.cget("text"))+1))
self.canvas.delete(tag)
try:
self.change_place(tag,a[present_address+step])
#self.check_turns()
except:
pass
t = self.gettrackbox(tag)
if t==a[-2]:
print "One Coin Clear"
# One Coin Clear
self.canvas.delete(tag)
else:
self.check_turns()
return
else:
print "not selected"
return
# Shape Movement Handler
def change_place(self, tag, track):
a,b,c,d=self.getcoordinates(tag)
e,f,g,h=self.getcoordinates(track)
self.canvas.move(tag, g-c, h-d)
self.check_turns()
return
# Get Square Shape Tag on Which Coin Shape Is Lying
def gettrackbox(self, tag):
for i in TRACK:
if self.getcoordinates(i)==self.getcoordinates(tag):
return i
for l in F_TRACK:
for i in l:
if self.getcoordinates(i)==self.getcoordinates(tag):
return i
return
# Selected Oval Tag Return
def selected_oval(self, event=None):
x , y = event.x, event.y
for i in self.oval_identity:
x1,y1,x2,y2 = self.getcoordinates(i)
if (x1<=x) and (x<=x2) and (y1<=y) and (y<=y2):
return i
# Team Turn handlers
def check_turns(self):
self.dice.button.config(state="normal")
self.number.set(0)
if self.turn == "A":
self.turn = "B"
self.dice.button.config(text="Team B")
self.uplifting("B")
return
else:
self.turn = "A"
self.dice.button.config(text="Team A")
self.uplifting("A")
return
# Get Tag Coordinates In Canvas
def getcoordinates(self, tags):
return self.canvas.coords(tags)
# Main Trigger
if __name__=="__main__":
root=Tkinter.Tk()
d=Board(root)
d.pack()
e=Engine(d)
root.mainloop()
| [
"surajsinghbisht054@gmail.com"
] | surajsinghbisht054@gmail.com |
95f27ad073deba4419d29e314fd828507e60ea62 | 0cb46d3c14ed82627ca7ffb4de2a80e9c3013c50 | /model/base.py | 21accadb14e32119bdef0da1932da4ef01752672 | [] | no_license | five3/zyw | 40533cd6eb1d17f98c055893d9cdfba30fa69696 | 7fcd4d7a4a877c907d59df153e42360fc616f9a5 | refs/heads/master | 2020-12-24T08:55:11.207216 | 2017-08-18T13:12:03 | 2017-08-18T13:12:03 | 38,229,605 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,135 | py | # -*- coding: utf-8 -*-
#!/usr/bin/env python
#coding=utf-8
from model.db.database import *
class base:
def getTable(self):
return self.__class__.__name__
def getDb(self):
return database()
def insert(self,data):
return self.getDb().insert(self.getTable(),data)
def delete(self,condition):
return self.getDb().delete(self.getTable(), condition)
def getList(self,colums,condition,orders='',limits=''):
return self.getDb().getList(self.getTable(),colums,condition,orders,limits)
def getOne(self,colums,condition,orders='',limits=''):
return self.getDb().getOne(self.getTable(),colums,condition,orders,limits)
def update(self, data,condition):
return self.getDb().update(self.getTable(),data,condition)
def execute(self, sql):
return self.getDb().execute(sql)
def executeInsert(self, sql):
return self.getDb().executeInsert(sql)
def fetchOne(self, sql):
return self.getDb().fetchOne(sql)
def fetchAll(self, sql):
return self.getDb().fetchAll(sql)
| [
"five3@163.com"
] | five3@163.com |
1afd17c2c1370a54520917228db7e4bd9cc00e5c | 1a114943c92a5db40034470ff31a79bcf8ddfc37 | /stdlib_exam/strop-example-1.py | d9e343e3aa7690e690e6a82f444da61173972665 | [] | no_license | renwl/mylinux | 1924918599efd6766c266231d66b2a7ed6f6cdd1 | 0602fc6d2b0d254a8503e57310f848fc3e1a73b4 | refs/heads/master | 2020-07-10T22:12:03.259349 | 2017-01-02T12:32:04 | 2017-01-02T12:32:04 | 66,467,007 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 319 | py | import strop
import sys
# assuming we have an executable named ".../executable", add a
# directory named ".../executable-extra" to the path
if strop.lower(sys.executable)[-4:] == ".exe":
extra = sys.executable[:-4] # windows
else:
extra = sys.executable
sys.path.insert(0, extra + "-extra")
import mymodule
| [
"wenliang.ren@quanray.com"
] | wenliang.ren@quanray.com |
4513fb0f5b0c0dacd59247201f0afd9c4f9b4c5b | 05e0429c617209530b212987f169640e6b75c8f0 | /Chapter 8/powerballLottery.py | 7fdd170731091cdea9226f01a1bb878352f84a87 | [] | no_license | nishantchaudhary12/Starting-with-Python | ac33baf01e3cf869cc1cf7f97991ecda4ee893bd | 9031fa64b19698c060d134cb0416812db01f1f7b | refs/heads/master | 2020-04-24T00:06:15.177647 | 2019-05-28T23:12:16 | 2019-05-28T23:12:16 | 171,555,355 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,186 | py | #powerball lottery
def records(file):
number_dict = dict()
pow_num_dict = dict()
overdue_num = list()
line = file.readline()
line = line.rstrip('\n')
while line != '':
num_list = line.split(' ')
for each in num_list:
if each in overdue_num:
overdue_num.remove(each)
overdue_num.append(each)
else:
overdue_num.append(each)
for i in range(6):
num_list[i] = int(num_list[i])
for each in range(5):
if num_list[each] not in number_dict:
number_dict[num_list[each]] = 1
else:
number_dict[num_list[each]] += 1
power_num = num_list[-1]
if power_num not in pow_num_dict:
pow_num_dict[power_num] = 1
else:
pow_num_dict[power_num] += 1
line = file.readline()
line = line.rstrip('\n')
return number_dict, pow_num_dict, overdue_num
def sort_dict(number_dict):
new_sorted_list = sorted(number_dict.items(), key=lambda x: x[1])
return new_sorted_list
def most_common(new_sorted_list):
print('Most Common Numbers with frequencies: ')
for i in range(-1, -10, -1):
print(new_sorted_list[i])
def least_common(new_sorted_list):
print('\n')
print('Least Common Numbers with frequencies: ')
for i in range(0, 10):
print(new_sorted_list[i])
def overdue(overdue_num_list):
print('\n')
print('Most overdue numbers(ordered from most to least):')
for i in range(10):
print(overdue_num_list[i])
def frequency(number_dict, pow_num_dict):
print('\n')
print('Number frequencies:')
for i in range(1, 70):
print(i,'=', number_dict[i])
print('Powerball numbers:')
for i in range(1, 27):
print(i, '=', pow_num_dict[i])
def main():
file = open('pbnumbers.txt', 'r')
number_dict, pow_num_dict, overdue_num_list = records(file)
file.close()
new_sorted_list = sort_dict(number_dict)
most_common(new_sorted_list)
least_common(new_sorted_list)
overdue(overdue_num_list)
frequency(number_dict, pow_num_dict)
main() | [
"chaudharynishant025@gmail.com"
] | chaudharynishant025@gmail.com |
a62a9c5b492057a1f8eaf18d3667012243bed748 | ad080bd1612b980490ef2d1b61647cbc6beddf5d | /my_game/administrator/asteroid_generation.py | bface5faa562653e2b5a47965d18e18a78871d13 | [] | no_license | rokealva83/my_game | 8f915076986144234950aa4443e8bc51ad019664 | 76ecc1dbf60c7f93621ddca66d62d5fea2826d0e | refs/heads/master | 2020-12-24T17:54:59.491881 | 2016-05-10T20:06:53 | 2016-05-10T20:06:53 | 29,264,967 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,035 | py | # -*- coding: utf-8 -*-
import random
from django.shortcuts import render
from my_game.models import System, AsteroidField
# функция генерации астероидных полей
def asteroid_generation(request):
if request.method == "POST" and request.POST.get('add_button') is not None:
asteroid = int(request.POST.get('asteroid', None))
for i in range(asteroid):
#генерация кооржинат поля
system = System.objects.filter().order_by('x').first()
x_min = system.x - 10
system = System.objects.filter().order_by('x').last()
x_max = system.x + 10
system = System.objects.filter().order_by('y').first()
y_min = system.y - 10
system = System.objects.filter().order_by('y').last()
y_max = system.y + 10
x = round(random.uniform(x_min, x_max), 3)
y = round(random.uniform(y_min, y_max), 3)
z = round(random.uniform(-30, 30), 3)
# размер поля
k = random.random()
if 0.07 > k:
size = random.randint(3000000, 5000000)
else:
if 0.07 <= k <= 0.2:
size = random.randint(1000000, 3000000)
else:
if 0.2 < k < 0.8:
size = random.randint(500000, 1000000)
else:
size = random.randint(100000, 500000)
#количество артефактов в поле
k = random.random()
if 0.02 > k:
artifact = 5
elif 0.02 <= k <= 0.1:
artifact = 4
elif 0.1 < k <= 0.2:
artifact = 3
elif 0.2 < k <= 0.4:
artifact = 2
elif 0.4 < k <= 0.7:
artifact = 1
else:
artifact = 0
# уровень обогащения поля
k = random.random()
if 0.05 > k:
ore = round(random.uniform(0.8, 0.95), 3)
elif 0.05 <= k <= 0.35:
ore = round(random.uniform(0.799, 0.8), 3)
else:
ore = round(random.uniform(0.6, 0.799), 3)
mineral_koef = round(random.uniform(0.07, 0.2), 3) * ore
resource_koef = ore - mineral_koef
koef_res_1 = round(round(random.uniform(0.2, 0.3), 3) * resource_koef, 3)
koef_res_2 = round(round(random.uniform(0.2, 0.3), 3) * resource_koef, 3)
koef_res_3 = round(round(random.uniform(0.2, 0.3), 3) * resource_koef, 3)
koef_res_4 = round(resource_koef - (koef_res_1 + koef_res_2 + koef_res_3), 3)
koef_min_1 = round(round(random.uniform(0.2, 0.3), 3) * mineral_koef, 3)
koef_min_2 = round(round(random.uniform(0.2, 0.3), 3) * mineral_koef, 3)
koef_min_3 = round(round(random.uniform(0.2, 0.3), 3) * mineral_koef, 3)
koef_min_4 = round(mineral_koef - (koef_min_1 + koef_min_2 + koef_min_3), 3)
asteroid_test = AsteroidField.objects.filter(x=x, y=y, z=z).first()
if asteroid_test:
size = asteroid_test.size
koef_res_1 = asteroid_test.koef_res_1
koef_res_2 = asteroid_test.koef_res_2
koef_res_3 = asteroid_test.koef_res_3
koef_res_4 = asteroid_test.koef_res_4
koef_min_1 = asteroid_test.koef_min_1
koef_min_2 = asteroid_test.koef_min_2
koef_min_3 = asteroid_test.koef_min_3
koef_min_4 = asteroid_test.koef_min_4
asteroid = AsteroidField(
x=x,
y=y,
z=z,
size=size,
koef_res_1=koef_res_1,
koef_res_2=koef_res_2,
koef_res_3=koef_res_3,
koef_res_4=koef_res_4,
koef_min_1=koef_min_1,
koef_min_2=koef_min_2,
koef_min_3=koef_min_3,
koef_min_4=koef_min_4,
artifact=artifact
)
asteroid.save()
else:
asteroid = AsteroidField(
x=x,
y=y,
z=z,
size=size,
koef_res_1=koef_res_1,
koef_res_2=koef_res_2,
koef_res_3=koef_res_3,
koef_res_4=koef_res_4,
koef_min_1=koef_min_1,
koef_min_2=koef_min_2,
koef_min_3=koef_min_3,
koef_min_4=koef_min_4,
artifact=artifact
)
asteroid.save()
message = 'Поля сгенерированы'
output = {'message': message}
return render(request, "admin/generation.html", output)
| [
"tolik20002@bigmir.net"
] | tolik20002@bigmir.net |
6df00727a21728a05316441890050263c914ffd5 | f47626fd3b236dd42f335952a3f3edf55f7e6075 | /region-app/app/tools/constants.py | 1b3a6661296e13c1420a27b4a2ce3b33edcd6574 | [] | no_license | reritom/region-app-example | da541afee5faf04bac65cceda15b6d2c265b7e79 | d0f4c2507c62755f8bbdc1400fb2fc538646ee76 | refs/heads/master | 2022-12-02T20:04:32.028450 | 2020-08-15T08:19:46 | 2020-08-15T08:19:46 | 287,619,909 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 186 | py | # Constants related to the application should be located here
# Currently we only have one constant used by the RegionController, but this allows us to scale.
COUNTRY_FRANCE = "France"
| [
"reikudjinn@gmail.com"
] | reikudjinn@gmail.com |
74a66ecda7c167ce393132bd9d092d8ca7413ac3 | 1cb97b0fe8b275efd540716cb6e742fc44e927bf | /setup.py | 3b851f54849494044ef8725faa00a353b62ae451 | [
"MIT"
] | permissive | khushjammu/rljax | 31e4d0f9c6aa57a0a07a35f7f8854cc78360ae5a | f2d5e81240d99187fcb625d2caa630c3c7deecfc | refs/heads/master | 2023-06-27T17:15:43.437065 | 2021-07-30T16:55:47 | 2021-07-30T16:55:47 | 391,125,669 | 0 | 0 | MIT | 2021-07-30T16:18:23 | 2021-07-30T16:18:22 | null | UTF-8 | Python | false | false | 2,890 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import io
import os
import sys
from shutil import rmtree
from setuptools import Command, find_packages, setup
NAME = "rljax"
DESCRIPTION = "A collection of RL algorithms written in JAX."
URL = "https://github.com/ku2482/rljax"
EMAIL = "watanabe.toshiki@outlook.jp"
AUTHOR = "Toshiki Watanabe"
REQUIRES_PYTHON = ">=3.6.0"
VERSION = "0.0.4"
here = os.path.abspath(os.path.dirname(__file__))
REQUIRED = open(os.path.join(here, "requirements.txt")).read().splitlines()
EXTRAS = {}
try:
with io.open(os.path.join(here, "README.md"), encoding="utf-8") as f:
long_description = "\n" + f.read()
except FileNotFoundError:
long_description = DESCRIPTION
about = {}
if not VERSION:
project_slug = NAME.lower().replace("-", "_").replace(" ", "_")
with open(os.path.join(here, project_slug, "__version__.py")) as f:
exec(f.read(), about)
else:
about["__version__"] = VERSION
class UploadCommand(Command):
"""Support setup.py upload."""
description = "Build and publish the package."
user_options = []
@staticmethod
def status(s):
"""Prints things in bold."""
print("\033[1m{0}\033[0m".format(s))
def initialize_options(self):
pass
def finalize_options(self):
pass
def run(self):
try:
self.status("Removing previous builds…")
rmtree(os.path.join(here, "dist"))
except OSError:
pass
self.status("Building Source and Wheel (universal) distribution…")
os.system("{0} setup.py sdist bdist_wheel --universal".format(sys.executable))
self.status("Uploading the package to PyPI via Twine…")
os.system("twine upload dist/*")
self.status("Pushing git tags…")
os.system("git tag v{0}".format(about["__version__"]))
os.system("git push --tags")
sys.exit()
setup(
name=NAME,
version=about["__version__"],
description=DESCRIPTION,
long_description=long_description,
long_description_content_type="text/markdown",
author=AUTHOR,
author_email=EMAIL,
python_requires=REQUIRES_PYTHON,
url=URL,
packages=[package for package in find_packages() if package.startswith("rljax")],
install_requires=REQUIRED,
extras_require=EXTRAS,
include_package_data=True,
license="MIT",
classifiers=[
"License :: OSI Approved :: MIT License",
"Programming Language :: Python",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.6",
"Programming Language :: Python :: 3.7",
"Programming Language :: Python :: 3.8",
"Programming Language :: Python :: Implementation :: CPython",
"Programming Language :: Python :: Implementation :: PyPy",
],
cmdclass={
"upload": UploadCommand,
},
)
| [
"kuboy2482@gmail.com"
] | kuboy2482@gmail.com |
ccc0c4a0baf39caf1e00f362fe8192eb9b77a0ee | 0635da394505415471efd89d542f225cae3e668b | /networkapi/api_environment_vip/facade.py | 38fb69529f25b6633b6b2dfd9aa6273ac730bec2 | [
"Apache-2.0",
"BSD-3-Clause",
"MIT",
"LicenseRef-scancode-public-domain",
"BSD-2-Clause"
] | permissive | enterstudio/GloboNetworkAPI | 5e2fbe7ef3f4a70aaa0ae474a0c5493e5568cb24 | ea8eebc0337636f9250e628cc392514934db8edd | refs/heads/master | 2023-07-25T20:36:40.717818 | 2017-01-27T20:22:40 | 2017-01-27T20:22:40 | 82,249,922 | 0 | 0 | Apache-2.0 | 2023-09-04T21:56:15 | 2017-02-17T02:48:54 | Python | UTF-8 | Python | false | false | 2,827 | py | # -*- coding: utf-8 -*-
import logging
from networkapi.ambiente.models import EnvironmentVip
from networkapi.requisicaovips.models import OptionVip
from networkapi.requisicaovips.models import OptionVipEnvironmentVip
log = logging.getLogger(__name__)
def get_option_vip_by_environment_vip_ids(environment_vip_ids):
"""Return option vip list by ids of environment vip.
:param environment_vip_ids: ids list of environment vip
:example: [<environment_vip_id>,...]
"""
options_vip = list()
for environment_vip_id in environment_vip_ids:
option_environment_vips = OptionVipEnvironmentVip.objects.filter(
environment=environment_vip_id
).order_by(
'option__tipo_opcao',
'option__nome_opcao_txt'
)
options_vip.append(option_environment_vips)
return options_vip
def get_option_vip_by_environment_vip_type(search_list):
"""Return option vip list by ids of environment vip and option vip type.
:param environment_vip_ids: ids list of environment vip
:param type_option: option vip type
:example: [{
environment_vip_id:<environment_vip_id>
type_option:<type_option>
]}
"""
options_vip = list()
for item in search_list:
option_environment_vips = OptionVip.objects.filter(
optionvipenvironmentvip__environment__id=item[
'environment_vip_id'],
tipo_opcao=item['type_option'])
options_vip.append(option_environment_vips)
return options_vip
def get_type_option_vip_by_environment_vip_ids(environment_vip_ids):
"""Return option vip list by ids of environment vip and option vip type.
:param environment_vip_ids: ids list of environment vip
"""
type_option_vip = list()
for environment_vip_id in environment_vip_ids:
type_options = OptionVip.objects.filter(
optionvipenvironmentvip__environment__id=environment_vip_id
).values('tipo_opcao').distinct()
type_options = [type_option['tipo_opcao']
for type_option in type_options]
type_option_vip.append(type_options)
return type_option_vip
def get_environmentvip_by_ids(environment_vip_ids):
envvip_ids = list()
for environment_vip_id in environment_vip_ids:
envvip = get_environmentvip_by_id(environment_vip_id).id
envvip_ids.append(envvip)
envvips = EnvironmentVip.objects.filter(id__in=envvip_ids)
return envvips
def get_environmentvip_by_id(environment_vip_id):
environmentvip = EnvironmentVip.get_by_pk(environment_vip_id)
return environmentvip
def update_environment_vip(environment_vip):
env = get_environmentvip_by_id(environment_vip.get('id'))
env.conf = environment_vip.get('conf')
env.save()
return env
| [
"ederson.brilhante@corp.globo.com"
] | ederson.brilhante@corp.globo.com |
60417ad7a9157ae72a147b6ef908a2afe7e87952 | b523a7d337ce60e1e1ca779be396eeeaab786b7d | /Python/leetcode_075_sort_colors_2nd.py | 41d2d71cf4a12c7dd6fea12fced690b2be118682 | [] | no_license | bakker4444/Algorithms | 3a65f83fde6a22a82646f6ee463a487f889291d0 | 453e92109494c962c36280cd0d32fb28aa771615 | refs/heads/master | 2021-05-25T11:56:18.424622 | 2019-05-08T22:07:15 | 2019-05-08T22:07:15 | 127,337,782 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,912 | py | ## 75. Sort Colors
#
# Given an array with n objects colored red, white or blue, sort them in-place so that objects of the same color are adjacent, with the colors in the order red, white and blue.
# Here, we will use the integers 0, 1, and 2 to represent the color red, white, and blue respectively.
# Note: You are not suppose to use the library's sort function for this problem.
#
# Example:
# Input: [2,0,2,1,1,0]
# Output: [0,0,1,1,2,2]
#
# Follow up:
# - A rather straight forward solution is a two-pass algorithm using counting sort. First, iterate the array counting number of 0's, 1's, and 2's, then overwrite array with total number of 0's, then 1's and followed by 2's.
# - Could you come up with a one-pass algorithm using only constant space?
##
### counting solution
## time complexity : O(2n)
## space complexity : O(1)
class Solution1(object):
def sortColors(self, nums):
"""
:type nums: List[int]
:rtype: None Do not return anything, modify nums in-place instead.
"""
cnt = [0, 0, 0]
for val in nums:
cnt[val] += 1
for i in range(len(nums)):
if cnt[0] != 0:
nums[i] = 0
cnt[0] -= 1
elif cnt[1] != 0:
nums[i] = 1
cnt[1] -= 1
else: ## cnt[2] != 0
nums[i] = 2
cnt[2] -= 1
return nums
### swap solution
## time complexity : O(n)
## space complexity : O(1)
class Solution2(object):
def sortColors(self, nums):
"""
:type nums: List[int]
:rtype: None Do not return anything, modify nums in-place instead.
"""
low, mid, high = 0, 0, len(nums)-1
while mid <= high:
if nums[mid] == 2:
nums[mid], nums[high] = nums[high], nums[mid]
high -= 1
elif nums[mid] == 0:
nums[mid], nums[low] = nums[low], nums[mid]
mid += 1
low += 1
else: ## nums[mid] == 1
mid += 1
return nums
import unittest
class Test(unittest.TestCase):
def test_sortColors(self):
test_input = [
[2,0,2,1,1,0],
[2, 0, 1],
[1, 0, 2],
[1, 0],
[1, 0, 1],
[1, 2, 1],
]
test_output = [
[0,0,1,1,2,2],
[0, 1, 2],
[0, 1, 2],
[0, 1],
[0, 1, 1],
[1, 1, 2]
]
sol1 = Solution1()
sol2 = Solution2()
for i in range(len(test_input)):
result1 = sol1.sortColors(test_input[i])
result2 = sol2.sortColors(test_input[i])
self.assertEqual(result1, test_output[i])
self.assertEqual(result2, test_output[i])
if __name__ == "__main__":
unittest.main()
| [
"bakker4444@gmail.com"
] | bakker4444@gmail.com |
635ff42ad41b2d8a6c0bda55e9fabefb122925cf | ba59629d53d9500ff620387d8bcbadd639796719 | /code_examples/pytorch/mnist/test_mnist.py | d860a89a1604f92dba036e6cf333c20f03eb83fd | [
"MIT"
] | permissive | Joejiong/examples-2 | 7d423e9bd4e68b29613903158a50d391ffc72ef8 | 553b90b57d2ed8c996c74cbe5d48bb2b7dba5a88 | refs/heads/master | 2023-03-08T02:54:32.583139 | 2021-01-22T18:29:13 | 2021-01-22T18:29:13 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,157 | py | # Copyright (c) 2020 Graphcore Ltd. All rights reserved.
import inspect
import os
import subprocess
import unittest
import torch
from mnist_poptorch import accuracy
def run_poptorch_mnist(**kwargs):
cwd = os.path.dirname(os.path.abspath(inspect.stack()[0][1]))
cmd = ["python3", 'mnist_poptorch.py']
out = subprocess.check_output(cmd, cwd=cwd).decode("utf-8")
return out
class TestPoptorchMNIST(unittest.TestCase):
def test_accuracy_calculation(self):
pred = torch.tensor([[0.9, 0.05, 0.05],
[0.1, 0.5, 0.4],
[0.6, 0.01, 0.49],
[0.09, 0.11, 0.8]])
label = torch.tensor([0, 1, 2, 2])
acc = accuracy(pred, label)
self.assertEqual(acc, 75)
def test_test_final_training_accuracy(self):
out = run_poptorch_mnist()
final_acc = 0.0
for line in out.split('\n'):
if line.find('Accuracy on test set:') != -1:
final_acc = float(line.split(": ")[-1].strip()[:-1])
break
self.assertGreater(final_acc, 90)
self.assertLess(final_acc, 99.9)
| [
"philb@graphcore.ai"
] | philb@graphcore.ai |
ecd1837e2da77b2be6d2f8094e2424221ab60afb | 7a7a818f482b4781e15948bb64ea6ae79a631175 | /deephyper/benchmark/nas/candleP1B3/data_utils.py | 3e360a5f557773f482433a7ca850a8b4ea64eb0c | [
"BSD-2-Clause"
] | permissive | BethanyL/deephyper | 85157a856b8a35a180d5b56e2b00321ea384ebcc | 42cbb846f2815223c6843e92e234c3b0a765aeb6 | refs/heads/master | 2020-04-11T00:36:49.519366 | 2019-01-13T16:35:11 | 2019-01-13T16:35:11 | 161,391,202 | 1 | 1 | null | 2018-12-11T20:41:41 | 2018-12-11T20:41:41 | null | UTF-8 | Python | false | false | 4,665 | py | from __future__ import absolute_import
from __future__ import print_function
import tarfile
import os
import sys
import shutil
import hashlib
from six.moves.urllib.request import urlopen
from six.moves.urllib.error import URLError, HTTPError
from deephyper.benchmark.candleP1B3Nas.generic_utils import Progbar
# Under Python 2, 'urlretrieve' relies on FancyURLopener from legacy
# urllib module, known to have issues with proxy management
if sys.version_info[0] == 2:
def urlretrieve(url, filename, reporthook=None, data=None):
def chunk_read(response, chunk_size=8192, reporthook=None):
total_size = response.info().get('Content-Length').strip()
total_size = int(total_size)
count = 0
while 1:
chunk = response.read(chunk_size)
count += 1
if not chunk:
reporthook(count, total_size, total_size)
break
if reporthook:
reporthook(count, chunk_size, total_size)
yield chunk
response = urlopen(url, data)
with open(filename, 'wb') as fd:
for chunk in chunk_read(response, reporthook=reporthook):
fd.write(chunk)
else:
from six.moves.urllib.request import urlretrieve
def get_file(fname, origin, untar=False,
md5_hash=None, cache_subdir='common'):
'''Downloads a file from a URL if it not already in the cache.
Passing the MD5 hash will verify the file after download as well as if it is already present in the cache.
# Arguments
fname: name of the file
origin: original URL of the file
untar: boolean, whether the file should be decompressed
md5_hash: MD5 hash of the file for verification
cache_subdir: directory being used as the cache
# Returns
Path to the downloaded file
'''
file_path = os.path.dirname(os.path.realpath(__file__))
datadir_base = os.path.expanduser(os.path.join(file_path, '..', 'Data'))
datadir = os.path.join(datadir_base, cache_subdir)
if not os.path.exists(datadir):
os.makedirs(datadir)
if untar:
untar_fpath = os.path.join(datadir, fname)
fpath = untar_fpath + '.tar.gz'
else:
fpath = os.path.join(datadir, fname)
download = False
if os.path.exists(fpath):
# file found; verify integrity if a hash was provided
if md5_hash is not None:
if not validate_file(fpath, md5_hash):
print('A local file was found, but it seems to be '
'incomplete or outdated.')
download = True
else:
download = True
if download:
print('Downloading data from', origin)
global progbar
progbar = None
def dl_progress(count, block_size, total_size):
global progbar
if progbar is None:
progbar = Progbar(total_size)
else:
progbar.update(count * block_size)
error_msg = 'URL fetch failure on {}: {} -- {}'
try:
try:
urlretrieve(origin, fpath, dl_progress)
except URLError as e:
raise Exception(error_msg.format(origin, e.errno, e.reason))
except HTTPError as e:
raise Exception(error_msg.format(origin, e.code, e.msg))
except (Exception, KeyboardInterrupt) as e:
if os.path.exists(fpath):
os.remove(fpath)
raise
progbar = None
print()
if untar:
if not os.path.exists(untar_fpath):
print('Untarring file...')
tfile = tarfile.open(fpath, 'r:gz')
try:
tfile.extractall(path=datadir)
except (Exception, KeyboardInterrupt) as e:
if os.path.exists(untar_fpath):
if os.path.isfile(untar_fpath):
os.remove(untar_fpath)
else:
shutil.rmtree(untar_fpath)
raise
tfile.close()
return untar_fpath
print()
return fpath
def validate_file(fpath, md5_hash):
'''Validates a file against a MD5 hash
# Arguments
fpath: path to the file being validated
md5_hash: the MD5 hash being validated against
# Returns
Whether the file is valid
'''
hasher = hashlib.md5()
with open(fpath, 'rb') as f:
buf = f.read()
hasher.update(buf)
if str(hasher.hexdigest()) == str(md5_hash):
return True
else:
return False
| [
"romainegele@gmail.com"
] | romainegele@gmail.com |
6376c69e303c4688ca205d4b3661e35929db601a | f0d713996eb095bcdc701f3fab0a8110b8541cbb | /Yfm3h3nT3apARd4gC_13.py | fdf5b9abdb03a54e6ec2254ddc78f755dd346d28 | [] | no_license | daniel-reich/turbo-robot | feda6c0523bb83ab8954b6d06302bfec5b16ebdf | a7a25c63097674c0a81675eed7e6b763785f1c41 | refs/heads/main | 2023-03-26T01:55:14.210264 | 2021-03-23T16:08:01 | 2021-03-23T16:08:01 | 350,773,815 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 910 | py | """
Create a function that takes a list consisting of dice rolls from 1-6. Return
the sum of your rolls with the following conditions:
1. If a 1 is rolled, that is bad luck. The next roll counts as 0.
2. If a 6 is rolled, that is good luck. The next roll is multiplied by 2.
3. The list length will always be 3 or higher.
### Examples
rolls([1, 2, 3]) ➞ 4
# The second roll, 2, counts as 0 as a result of rolling 1.
rolls([2, 6, 2, 5]) ➞ 17
# The 2 following the 6 was multiplied by 2.
rolls([6, 1, 1]) ➞ 8
# The first roll makes the second roll worth 2, but the
# second roll was still 1 so the third roll doesn't count.
### Notes
Even if a 6 is rolled after a 1, 6 isn't summed but the 6's "effect" still
takes place.
"""
def rolls(lst):
r=lst[0]
for i in range(1,len(lst)):
r+=lst[i]+(lst[i-1]==6)*lst[i]-(lst[i-1]==1)*lst[i]
return r
| [
"daniel.reich@danielreichs-MacBook-Pro.local"
] | daniel.reich@danielreichs-MacBook-Pro.local |
e7e4db5403fa269998ca5914755f189c589de9c1 | 3c847175b995991414bda789eabda8c9b150af4a | /raspberry_pi_unit/opencv_ball_tracking.py | 1eb8096cb52c8b781bf8e6e7eb78f2df29b566d3 | [] | no_license | DamoM73/10-Digital-Technologies | 4ed0149b1c94eecf31e4f6060d79219ad8690832 | 5bf20cacf2b323bee8fcf1ee2260808e86d8f7c2 | refs/heads/master | 2023-05-01T07:02:29.146224 | 2021-05-20T03:17:42 | 2021-05-20T03:17:42 | 294,894,422 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,869 | py | # import necessary packages
from collections import deque
from imutils.video import VideoStream
import numpy as np
import argparse
import cv2
import imutils
import time
# construct the argument parse and parse the arguments
ap = argparse.ArgumentParser()
ap.add_argument("-v", "--video",
help="path to the (optional) video file")
ap.add_argument("-b", "--buffer", type=int, default=64,
help="max buffer size")
args = vars(ap.parse_args())
# define the lower and upper boundaries of the "red"
# ball in the HSV colour space, then initialize the
# list of tracked points
redLower = (0,100,80)
redUpper = (10,255,255)
pts = deque(maxlen=args["buffer"])
# if video path was not supplied, grab the reference
# to the webcam
if not args.get("video", False):
vs = VideoStream(scr=0).start()
# otherwise, grab a reference to the video file
else:
vs = cv2.VideoCapture(args["video"])
# allow the camera or video file to warm up.
time.sleep(2.0)
# keep looping
while True:
# grab the current frame
frame = vs.read()
# handle the frame from VideoCapture to Video Stream
frame = frame[1] if args.get("video", False) else frame
#if we are viewing a video and we did not grab a frame
# then we have reached the end of the video
if frame is None:
break
# resize the frame, blur it, and convert it to the HSV
# colour space
frame = imutils.resize(frame, width = 600)
blurred = cv2.GaussianBlur(frame, (11,11),0)
hsv = cv2.cvtColor(blurred, cv2.COLOR_BGR2HSV)
# construct a mask for the colour "red", then perform
# a series of dilations and erosions to remove and small
# blobs left in the mask
mask = cv2.inRange(hsv, redLower, redUpper)
mask = cv2.erode(mask, None, iterations=2)
mask = cv2.dilate(mask, None, iterations=2)
# find the contours in the mask and initialize the current
# (x,y) center of the ball
cnts = cv2.findContours(mask.copy(), cv2.RETR_EXTERNAL,
cv2.CHAIN_APPROX_SIMPLE)
cnts = imutils.grab_contours(cnts)
center = None
# only proceed if at least one contour was found
if len(cnts) > 0:
# find the largest contour in the mask, then use
# it to compute the minimum enclosing circle and
# centroid
c = max(cnts, key=cv2.contourArea)
((x,y),radius) = cv2.minEnclosingCircle(c)
M = cv2.moments(c)
center = (in(M["m10"] / M["m00"]), int(M["m01"] / M["m00"]))
# only proceed if the radius meets a minimum size
if radius > 10:
# draw the circle and centroid on the frame
# then update the list of tracked points
cv2.circle(frame, (int(x), int(y)), int(radius),
(0,255,255),2)
cv2.circle(frame, centre, 5, (0,0,255), -1)
# upadte the points queue
pts.appendleft(centre)
# loop over the set of tracked points
for i in range(1, len(pts)):
# if either of the tracked points are None, ignore
# them
if pts[i-1] is None or pts[i] is None:
continue
# otherwise, computer the thickness of the line and
# draw the connecting lines
thickness = int(np.sqrt(args["buffer"] / float(i+1))*2.5)
cv2.line(frame, pts[i - 1], pts[i], (0,0,255), thickness)
# show the frame to our screen
cv2.imshow("Frame", frame)
key = cv2.waitKey(1) & 0xFF
# if the 'q' key is pressed, stop the loop
if key == ord("q"):
break
# if we are not using a video file, stop the camera video stream
if not args.get("video", False):
vs.stop()
# otherwise, release the camera
else:
vs.release()
# close all windows
cv2.destroyAllWindows()
| [
"damomurtagh@gmail.com"
] | damomurtagh@gmail.com |
6c15f08b036a6a77f7b91f708de56490fb8b681c | ef187d259d33e97c7b9ed07dfbf065cec3e41f59 | /work/atcoder/abc/abc070/C/answers/504659_wi.py | d7d7a14e475b780741e10f7ddca3d9fa61389074 | [] | no_license | kjnh10/pcw | 847f7295ea3174490485ffe14ce4cdea0931c032 | 8f677701bce15517fb9362cc5b596644da62dca8 | refs/heads/master | 2020-03-18T09:54:23.442772 | 2018-07-19T00:26:09 | 2018-07-19T00:26:09 | 134,586,379 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 345 | py | import sys
stdin = sys.stdin
ni = lambda: int(ns())
na = lambda: list(map(int, stdin.readline().split()))
ns = lambda: stdin.readline()
def gcd(a, b):
while(b > 0):
c = a % b
a = b; b = c
return a
n = ni()
g = 0
for i in range(n):
v = ni()
if g == 0:
g = v
else:
g = g//gcd(g,v)*v
print(g) | [
"kojinho10@gmail.com"
] | kojinho10@gmail.com |
509671c52ef3aab37220d37a55e97f13b9680c63 | 41f7085fffd12bb53222fdba00d033a43b9d7081 | /users/apps.py | b776938c9d4d9c5d420b841f16abfb29baea989c | [] | no_license | arifgafizov/online_store | b852e1bd32149268bbed9159f1037561a3d7e9a0 | 25c32f0ae65469e904509772d414a79a743ae31b | refs/heads/master | 2023-08-01T06:28:47.302377 | 2021-09-18T18:30:28 | 2021-09-18T18:30:28 | 345,300,892 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 153 | py | from django.apps import AppConfig
class UsersConfig(AppConfig):
name = 'users'
def ready(self):
from .signals import post_save_signup
| [
"agafizov@gmail.com"
] | agafizov@gmail.com |
10c4c8bcbc5ddcf5716aa35f9122b26965c9d62e | 2d80791a21a049243dd2bf7dd95a46c4d4b2510b | /domains/algorithms/warmup/TimeConversion.py | a7612996332e029de98c463eec1bd8e7fe9209aa | [] | no_license | jayrav13/jr-hackerrank | 909346d101fdf08a54ff75ec2ee39c90e661b251 | b7e0130fdd1c2eb4436871be3255200eac8ca3d9 | refs/heads/master | 2021-01-15T15:36:16.772814 | 2016-11-21T18:59:15 | 2016-11-21T18:59:15 | 48,657,803 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 296 | py | #!/bin/python
import sys
time = raw_input().strip().split(':')
if time[2][-2:] == 'PM' and int(time[0]) < 12:
time[0] = str(int(time[0]) + 12)
if time[2][-2:] == 'AM':
if time[0] == '12':
time[0] = '00'
if int(time[0]) == 24:
time[0] = "00"
time[2] = time[2][:-2]
print ":".join(time)
| [
"jayrav13@gmail.com"
] | jayrav13@gmail.com |
0742aaed6d2a00c0265fa9c84921f8017affaa93 | 6630694f401f6f475dd81bb01ff9368db844ccff | /configs/_base_/models/mobilevit/mobilevit_s.py | f6a4e05d2c8f1fc4f7b6a6b5953ff52cdfc7a2c6 | [
"Apache-2.0"
] | permissive | open-mmlab/mmpretrain | 98a4d6b3bb747efc3d50decebf84fc3ffa41076a | d2ccc44a2c8e5d49bb26187aff42f2abc90aee28 | refs/heads/main | 2023-08-30T19:11:24.771498 | 2023-08-23T02:45:18 | 2023-08-23T02:45:18 | 278,415,292 | 652 | 186 | Apache-2.0 | 2023-09-08T08:01:40 | 2020-07-09T16:25:04 | Python | UTF-8 | Python | false | false | 339 | py | # model settings
model = dict(
type='ImageClassifier',
backbone=dict(type='MobileViT', arch='small'),
neck=dict(type='GlobalAveragePooling'),
head=dict(
type='LinearClsHead',
num_classes=1000,
in_channels=640,
loss=dict(type='CrossEntropyLoss', loss_weight=1.0),
topk=(1, 5),
))
| [
"noreply@github.com"
] | open-mmlab.noreply@github.com |
dd98d05f369e5f312af1cbd5ef5826092fa4e837 | 24a9f32ae09cb545caf9984cedfad3ff89c0aad0 | /supportsystem/admin.py | c6edd541c0ab347389ef7d8782acc547463ce7be | [] | no_license | Jordonguy/TechCPRSupportSystem | 8bf81708ee3873795a76ad9ff5f79422c9a64d82 | e035fc0cd7502a726d8946f17e4d025ce3a83988 | refs/heads/master | 2020-04-27T13:43:39.318886 | 2019-10-24T23:48:15 | 2019-10-24T23:48:15 | 174,381,165 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 285 | py | from django.contrib import admin
# Register your models here.
from .models import Role, Company, Post, Comment, ExtendedUserProfile
admin.site.register(ExtendedUserProfile)
admin.site.register(Role)
admin.site.register(Company)
admin.site.register(Post)
admin.site.register(Comment)
| [
"you@example.com"
] | you@example.com |
fa75cf84f7c515ac5987b1a3387c4d6f759455b9 | 34745a8d54fa7e3d9e4237415eb52e507508ad79 | /Python Fundamentals/03 Lists Basics/More exercises/03_Josephus_Permutation.py | be8fe4e63778f129a03973856fc846ee279e0ba4 | [] | no_license | DilyanTsenkov/SoftUni-Software-Engineering | 50476af0dc88b267d72c56fa87eeb88d841164b2 | fe446e3a50a00bb2e48d71ab8f783e0a4a406094 | refs/heads/main | 2023-08-12T18:18:42.144210 | 2021-09-25T11:10:38 | 2021-09-25T11:10:38 | 317,235,419 | 1 | 2 | null | null | null | null | UTF-8 | Python | false | false | 395 | py | elements = input().split(" ")
number = int(input())
new_elements_list = []
counter = 0
while len(elements) != 0:
for i in range(len(elements)):
counter += 1
if counter % number == 0:
new_elements_list.append(elements[i])
elements[i] = None
elements = [i for i in elements if i]
print("[" + ",".join(new_elements_list) + "]") | [
"noreply@github.com"
] | DilyanTsenkov.noreply@github.com |
2885153df53235cefe140f7a4cd0be1e421959a6 | 5cd740c36bff792dec540f02ee95336b12808f36 | /account/views.py | eef1484898abccb1e4ecaa69fd7b5cbc5853f694 | [] | no_license | Pyxic/swipe | b4362a9e17d23b4b9f7d9cfcb3a63900119eb9e8 | 584062dfd5c8a5328c22bfcd2e194bb2f94a078c | refs/heads/master | 2023-09-03T21:38:34.577685 | 2021-11-17T12:23:06 | 2021-11-17T12:23:06 | 422,495,172 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,428 | py | from datetime import date
from django.conf import settings
from django.shortcuts import render, get_object_or_404
from django.utils.decorators import method_decorator
from drf_yasg.utils import swagger_auto_schema
from rest_framework import viewsets, permissions
from rest_framework.permissions import AllowAny
from account.models import User, UserFilter
from rest_framework import status
from rest_framework.response import Response
from rest_framework.views import APIView
from rest_framework import generics
from account.models import Role
from account.permissions import IsAdmin, IsOwner, IsAdminOrOwner, IsDeveloper
from account.serializers import RoleListSerializer, UserListSerializer, ClientUpdateSerializer, \
NotaryDetailSerializer, ClientSerializer, UserFilterSerializer, UserRoleSerializer
@method_decorator(name='get', decorator=swagger_auto_schema(tags=['admin']))
class RoleListView(generics.ListAPIView):
"""Вывод списка ролей"""
queryset = Role.objects.all()
serializer_class = RoleListSerializer
class UserRoleView(generics.RetrieveAPIView):
"""Вывод роли пользователя"""
queryset = User.objects.all()
serializer_class = UserRoleSerializer
permission_classes = [permissions.IsAuthenticated]
class AdminViewSet(viewsets.ReadOnlyModelViewSet):
view_tags = ['admin']
serializer_class = UserListSerializer
permission_classes = [IsAdmin]
queryset = User.objects.filter(is_superuser=True)
class ClientViewSet(viewsets.ModelViewSet):
view_tags = ['client']
def get_queryset(self):
return User.objects.filter(role__name='клиент')
def get_serializer_class(self):
if self.action == 'list':
return UserListSerializer
if self.action in ['retrieve', 'update', 'partial_update', 'destroy']:
return ClientSerializer
def get_permissions(self):
if self.action == 'list':
permission_classes = [AllowAny]
if self.action in ['update', 'partial_update', 'retrieve']:
permission_classes = [IsAdminOrOwner]
return [permission() for permission in permission_classes]
class NotaryViewSet(viewsets.ViewSet):
permission_classes = [IsAdmin]
@swagger_auto_schema(tags=['notary'])
def list(self, request):
"""Вывод списка нотариусов"""
queryset = User.objects.filter(role__name='нотариус')
serializer = UserListSerializer(queryset, many=True)
return Response(serializer.data)
@swagger_auto_schema(tags=['notary'])
def retrieve(self, request, pk=None):
"""Вывод полной информации о нотариусе"""
queryset = User.objects.filter(role__name='нотариус')
notary = get_object_or_404(queryset, pk=pk)
serializer = NotaryDetailSerializer(notary)
return Response(serializer.data)
@method_decorator(name='put', decorator=swagger_auto_schema(tags=['notary']))
@method_decorator(name='patch', decorator=swagger_auto_schema(tags=['notary']))
class NotaryUpdateView(generics.UpdateAPIView):
"""Редактирование нотариуса"""
permission_classes = [IsAdmin]
queryset = User.objects.filter(role__name='нотариус')
serializer_class = NotaryDetailSerializer
@method_decorator(name='delete', decorator=swagger_auto_schema(tags=['notary']))
class NotaryDestroyView(generics.DestroyAPIView):
"""Удаление нотариуса"""
permission_classes = [IsAdmin]
queryset = User.objects.filter(role__name='нотариус')
serializer_class = NotaryDetailSerializer
class DeveloperViewSet(viewsets.ModelViewSet):
view_tags = ['developer']
def get_permissions(self):
if self.action == 'list':
permission_classes = [IsAdmin]
if self.action in ['update', 'partial_update', 'retrieve', 'destroy']:
permission_classes = [IsAdminOrOwner]
return [permission() for permission in permission_classes]
def get_queryset(self):
return User.objects.filter(role__name='застройщик')
def get_serializer_class(self):
if self.action == 'list':
return UserListSerializer
if self.action in ['retrieve', 'update', 'partial_update', 'destroy']:
return ClientSerializer
class ClientUpdateSubscriptionView(generics.GenericAPIView):
permission_classes = (permissions.IsAuthenticated, IsOwner)
serializer_class = ClientUpdateSerializer
queryset = User.objects.filter(role__name='клиент')
view_tags = ['client']
def patch(self, request, *args, **kwargs):
user = self.get_object()
serializer = self.serializer_class(instance=user)
user.end_date = date.today().replace(month=1 if date.today().month // 12 == 1 else date.today().month + 1)
user.subscribed = True
user.save()
return Response({'pk': user.pk, 'subscribed': user.subscribed,
'end_date': user.end_date.strftime('%Y-%m-%d')})
class ChangeBanStatus(APIView):
permission_classes = (permissions.IsAuthenticated, IsAdmin)
view_tags = ['admin']
def patch(self, request, pk):
user = get_object_or_404(User, pk=pk)
user.banned = not user.banned
user.save()
return Response({'pk': user.pk,
'ban': user.banned}, status=status.HTTP_200_OK)
class UserFilterViewSet(viewsets.ModelViewSet):
"""
User can save 'Announcement' filters and get them from db
"""
permission_classes = (IsOwner,)
serializer_class = UserFilterSerializer
queryset = UserFilter.objects.all().order_by('-id')
view_tags = ['user']
def get_queryset(self):
return self.queryset.filter(user=self.request.user)
def perform_create(self, serializer):
serializer.save(user=self.request.user)
def create(self, request, *args, **kwargs):
"""
If user is subscribed - he doesnt have any restrictions
:param request:
:param args:
:param kwargs:
:return: Response
"""
if request.user.subscribed:
return super().create(request, *args, **kwargs)
return Response({'Error': 'Your subscribe are finished. Please, extend your subscribe'},
status=status.HTTP_400_BAD_REQUEST)
| [
"mishakalita3@gmail.com"
] | mishakalita3@gmail.com |
478ebf23b298e716de7e6cb64c4e04d287263c94 | 9994709e43d108ca49af5f5961f56a6492c84eb1 | /src/RegistrationWithConfirmation/settings.py | c1340257f4d192671f4dacebc9156788531fa0de | [] | no_license | achiengcindy/RegistrationWithConfirmation | f115bc8d5dc0ac6f3e832d4d159bc24c41e80915 | a47cca9bd519894684a8b2dbcb1a909c23fed40a | refs/heads/master | 2020-04-11T11:34:57.184708 | 2018-12-14T08:14:42 | 2018-12-14T08:14:42 | 161,752,664 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,459 | py | """
Django settings for RegistrationWithConfirmation project.
Generated by 'django-admin startproject' using Django 2.1.4.
For more information on this file, see
https://docs.djangoproject.com/en/2.1/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/2.1/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/2.1/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = ')=6=e8t!183j_69bzxhq(-n4w3g_51vow8o)30cu_@c9ys4ko@'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
EMAIL_HOST=''
EMAIL_HOST_USER=''
EMAIL_HOST_PASSWORD=''
EMAIL_PORT=587
EMAIL_USE_TLS=True
DEFAULT_FROM_EMAIL='your email'
EMAIL_BACKEND = 'django.core.mail.backends.smtp.EmailBackend'
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'accounts',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'RegistrationWithConfirmation.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [os.path.join(BASE_DIR, 'templates')],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'RegistrationWithConfirmation.wsgi.application'
# Database
# https://docs.djangoproject.com/en/2.1/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/2.1/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/2.1/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'Africa/Nairobi'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/2.1/howto/static-files/
STATIC_URL = '/static/'
LOGIN_REDIRECT_URL = 'home'
LOGOUT_REDIRECT_URL = 'home'
| [
"achiengcindy36@gmail.com"
] | achiengcindy36@gmail.com |
89070d14e64f9f1511324e3516874a9b389fcbeb | a87810b7abad09b642399e6b83ceabea17a6d9f2 | /lock.py | c6b4ce6650ff9655be2b4a325fb0ebb652ac12e1 | [] | no_license | Sendhub/sh_util | 452fb85386b669657c9c8cadffdfb3520c0ab7e4 | 739c14cdcfeebac2fab9a2e5f76f98ecd524d7bd | refs/heads/master | 2023-07-20T06:43:46.212443 | 2019-06-24T23:24:26 | 2019-06-24T23:24:26 | 10,512,240 | 0 | 0 | null | 2021-06-29T13:45:29 | 2013-06-05T20:35:28 | Python | UTF-8 | Python | false | false | 615 | py | __author__ = 'brock'
import settings
_redis = settings.REDIS
def acquireLock(lockId, timeout=60):
# make sure these redis locks always have a valid timeout
assert timeout > 0
acquired = _redis.setnx(lockId, "true")
if acquired:
_redis.expire(lockId, timeout)
else:
# if there is no timeout set and we couldn't acquire the lock
# then make sure that we set a timeout on the lock so we
# cant have a deadlock
if not _redis.ttl(lockId):
_redis.expire(lockId, timeout)
return acquired
def releaseLock(lockId):
_redis.delete(lockId)
| [
"outtatime@gmail.com"
] | outtatime@gmail.com |
17e2629a616b6de3e7c6b1f78079b754c52ef6ea | 862588320887ec451870fb35856d4315bd2b9685 | /simple_operations/generate_backrub_ensemble.py | 8aae652d80bb8410c64e9cb270f782ef186394d3 | [] | no_license | xingjiepan/pyrosetta_scripts | 6522aa8fef43b89adac6fba1bc6072f0df2425d2 | 640ea455319d55a0cb167c50f2722778dbdde1f1 | refs/heads/master | 2021-03-16T05:35:52.560752 | 2020-05-12T22:58:54 | 2020-05-12T22:58:54 | 111,063,960 | 2 | 1 | null | null | null | null | UTF-8 | Python | false | false | 646 | py | #!/usr/bin/env python3
'''Generate a backbrub ensemble for a given protein.
Usage:
./generate_backrub_ensemble.py input_pdb
'''
import sys
import pyrosetta
from pyrosetta import rosetta
if __name__ == '__main__':
pyrosetta.init()
input_pdb = sys.argv[1]
pose = rosetta.core.pose.Pose()
rosetta.core.import_pose.pose_from_file(pose, input_pdb)
br_mover = rosetta.protocols.backrub.BackrubMover()
pose.dump_pdb('before_br.pdb')
# Dump 20 structures
for i in range(20):
tmp_pose = pose.clone()
br_mover.apply(tmp_pose)
tmp_pose.dump_pdb('after_br_{0}.pdb'.format(i))
| [
"xingjiepan@gmail.com"
] | xingjiepan@gmail.com |
7e6cd1bfda829bc0090b0db86435f83cc639abda | d1aa6e7d5631d7806531660febbd1f856eaeece7 | /python/paddle/distributed/ps/utils/ps_factory.py | ddf5c1e3ec0315397d52c93cfb4eb2b01c3ccb4e | [
"Apache-2.0"
] | permissive | gongweibao/Paddle | 510cd4bc0ef89bc6ccee7b6b8eca52c00e014b77 | 60f9c60cd8196c66c391d79c35d341e9072f8838 | refs/heads/develop | 2023-03-13T17:43:35.675875 | 2022-09-20T08:46:15 | 2022-09-20T08:46:15 | 82,279,237 | 3 | 2 | Apache-2.0 | 2021-05-26T06:17:43 | 2017-02-17T09:16:16 | Python | UTF-8 | Python | false | false | 1,880 | py | # Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import paddle
from .ps_program_builder import *
from .public import *
__all__ = [
'PsProgramBuilder', 'GeoPsProgramBuilder', 'CpuSyncPsProgramBuilder',
'CpuAsyncPsProgramBuilder', 'GpuPsProgramBuilder',
'HeterAsyncPsProgramBuilder', 'FlPsProgramBuilder', 'NuPsProgramBuilder'
]
class PsProgramBuilderFactory(object):
def __init__(self):
pass
def _create_ps_program_builder(self, pass_ctx):
attrs = pass_ctx._attrs
if attrs['ps_mode'] == DistributedMode.GEO:
if len(attrs['local_sparse']) != 0:
return globals()['NuPsProgramBuilder'](pass_ctx)
else:
return globals()['GeoPsProgramBuilder'](pass_ctx)
elif attrs['use_ps_gpu']:
return globals()['GpuPsProgramBuilder'](pass_ctx)
elif attrs['is_heter_ps_mode'] and not attrs['is_fl_ps_mode']:
return globals()['HeterAsyncPsProgramBuilder'](pass_ctx)
elif 'is_fl_ps_mode' in attrs and attrs['is_fl_ps_mode']:
return globals()['FlPsProgramBuilder'](pass_ctx)
elif attrs['ps_mode'] == DistributedMode.SYNC:
return globals()['CpuSyncPsProgramBuilder'](pass_ctx)
else:
return globals()['CpuAsyncPsProgramBuilder'](pass_ctx)
| [
"noreply@github.com"
] | gongweibao.noreply@github.com |
654ea14d4f78784922eedf0686a4f756d0dd078a | cbe264842df4eae3569b28ed4aae9489014ed23c | /books/PythonCleanCode/ch7_generator/generators_coroutines_1.py | 65fbfdd9702000177e66d098a1a4ad1ae29048ae | [
"MIT"
] | permissive | zeroam/TIL | 31e176c2f4c3e1ef72b1155353690cc2f7160f96 | 43e3573be44c7f7aa4600ff8a34e99a65cbdc5d1 | refs/heads/master | 2021-07-23T01:43:34.135033 | 2021-07-10T06:47:17 | 2021-07-10T06:47:17 | 167,952,375 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,505 | py | """Clean Code in Python - Chapter 7: Using Generators
> Methods of the Generators Interface.
"""
import time
from log import logger
class DBHandler:
"""Simulate reading from the database by pages."""
def __init__(self, db):
self.db = db
self.is_closed = False
def read_n_records(self, limit):
return [(i, f"row {i}") for i in range(limit)]
def close(self):
logger.debug("closing connection to database %r", self.db)
self.is_closed = True
def stream_db_records(db_handler):
"""Example of .close()
>>> streamer = stream_db_records(DBHandler("testdb")) # doctest: +ELLIPSIS
>>> len(next(streamer))
10
>>> len(next(streamer))
10
"""
try:
while True:
yield db_handler.read_n_records(10)
time.sleep(.1)
except GeneratorExit:
db_handler.close()
class CustomException(Exception):
"""An exception of the domain model."""
def stream_data(db_handler):
"""Test the ``.throw()`` method.
>>> streamer = stream_data(DBHandler("testdb"))
>>> len(next(streamer))
10
"""
while True:
try:
yield db_handler.read_n_records(10)
except CustomException as e:
logger.info("controlled error %r, continuing", e)
except Exception as e:
logger.info("unhandled error %r, stopping", e)
db_handler.close()
break
| [
"imdff0803@gmail.com"
] | imdff0803@gmail.com |
eac8016636cfd014537c66a16982d00c21173836 | ade45967ee95ba61217658b479604bb97e86770e | /isint.py | b13dbef9269d8217ec1be98c8f2ae0469f7a8094 | [] | no_license | parkseohui/git | fb0c3b41e4efd8b7a5220864c935fff7a32523db | 238580378df8772bc47045843db52baac49e658c | refs/heads/master | 2020-04-17T10:45:07.983727 | 2019-02-27T14:27:13 | 2019-02-27T14:27:13 | 166,512,792 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 668 | py | #문자열을 제거한뒤 숫자만반환
'''난이도:(쉬움) 현우는 축구를보다가 우리나라선수들의몸값을 알고싶었다
그래서 검색을해서 메모장에 적는데 키보드가 조그만하고 안좋은지라
자꾸 숫자가아닌 문자를 같이입력해버린다
ex: xxx : 1627000000 > xxx : 1w627r00o00p00 만 (특수문자제외)
현우는 왜인지모르지만 뜻대로안되는것에
너무화가나서 자신이수량을입력하면 문자열만 딱빼서 숫자만 반환하는 코드를 만들고싶어한다
화가난 현우를위해 코드를 만들어보자!
'''
print(''.join(i for i in input('') if i.isdigit()))
| [
"skfhddl003@gmail.com"
] | skfhddl003@gmail.com |
4de909fdf690d158215c7a5f55c16f8c14efc0df | 2a54e8d6ed124c64abb9e075cc5524bb859ba0fa | /.history/3-OO-Python/5-encapsulaton_20200415212038.py | 120d237cce96f90aac71a89b9a9b9197e28bc4e1 | [] | no_license | CaptainStorm21/Python-Foundation | 01b5fbaf7a913506518cf22e0339dd948e65cea1 | a385adeda74f43dd7fb2d99d326b0be23db25024 | refs/heads/master | 2021-05-23T01:29:18.885239 | 2020-04-23T19:18:06 | 2020-04-23T19:18:06 | 253,171,611 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 216 | py | class PlayerCharacter:
def __init__(self, name, age):
self.name = name
self.age = age
def run (self):
print('run')
def speak(self):
print(f'my name is {') | [
"tikana4@yahoo.com"
] | tikana4@yahoo.com |
753ab077a5840cea3bc5b736b02cdb6ba8ab9c6a | 8fe833d3751486cf03130bfdfafffeaf60c01ff8 | /hwilib/devices/trezorlib/btc.py | f9c56cf0a74ad3b604cc460f66e6170a3fc5193f | [
"MIT"
] | permissive | fmr-llc/HWI | 50726924292c92e857c1ad13458af92a2ca23037 | 4c13daed3b62635320e2411c1dd6a8ee307012e5 | refs/heads/master | 2020-12-11T14:18:51.610966 | 2020-01-13T14:57:30 | 2020-01-13T14:57:40 | 233,870,798 | 1 | 1 | MIT | 2020-01-14T15:20:33 | 2020-01-14T15:20:32 | null | UTF-8 | Python | false | false | 5,700 | py | # This file is part of the Trezor project.
#
# Copyright (C) 2012-2018 SatoshiLabs and contributors
#
# This library is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License version 3
# as published by the Free Software Foundation.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the License along with this library.
# If not, see <https://www.gnu.org/licenses/lgpl-3.0.html>.
from . import messages
from .tools import CallException, expect, normalize_nfc, session
@expect(messages.PublicKey)
def get_public_node(
client,
n,
ecdsa_curve_name=None,
show_display=False,
coin_name=None,
script_type=messages.InputScriptType.SPENDADDRESS,
):
return client.call(
messages.GetPublicKey(
address_n=n,
ecdsa_curve_name=ecdsa_curve_name,
show_display=show_display,
coin_name=coin_name,
script_type=script_type,
)
)
@expect(messages.Address, field="address")
def get_address(
client,
coin_name,
n,
show_display=False,
multisig=None,
script_type=messages.InputScriptType.SPENDADDRESS,
):
return client.call(
messages.GetAddress(
address_n=n,
coin_name=coin_name,
show_display=show_display,
multisig=multisig,
script_type=script_type,
)
)
@expect(messages.MessageSignature)
def sign_message(
client, coin_name, n, message, script_type=messages.InputScriptType.SPENDADDRESS
):
message = normalize_nfc(message)
return client.call(
messages.SignMessage(
coin_name=coin_name, address_n=n, message=message, script_type=script_type
)
)
@session
def sign_tx(client, coin_name, inputs, outputs, details=None, prev_txes=None):
# set up a transactions dict
txes = {None: messages.TransactionType(inputs=inputs, outputs=outputs)}
# preload all relevant transactions ahead of time
for inp in inputs:
if inp.script_type not in (
messages.InputScriptType.SPENDP2SHWITNESS,
messages.InputScriptType.SPENDWITNESS,
messages.InputScriptType.EXTERNAL,
):
try:
prev_tx = prev_txes[inp.prev_hash]
except Exception as e:
raise ValueError("Could not retrieve prev_tx") from e
if not isinstance(prev_tx, messages.TransactionType):
raise ValueError("Invalid value for prev_tx") from None
txes[inp.prev_hash] = prev_tx
if details is None:
signtx = messages.SignTx()
else:
signtx = details
signtx.coin_name = coin_name
signtx.inputs_count = len(inputs)
signtx.outputs_count = len(outputs)
res = client.call(signtx)
# Prepare structure for signatures
signatures = [None] * len(inputs)
serialized_tx = b""
def copy_tx_meta(tx):
tx_copy = messages.TransactionType()
tx_copy.CopyFrom(tx)
# clear fields
tx_copy.inputs_cnt = len(tx.inputs)
tx_copy.inputs = []
tx_copy.outputs_cnt = len(tx.bin_outputs or tx.outputs)
tx_copy.outputs = []
tx_copy.bin_outputs = []
tx_copy.extra_data_len = len(tx.extra_data or b"")
tx_copy.extra_data = None
return tx_copy
R = messages.RequestType
while isinstance(res, messages.TxRequest):
# If there's some part of signed transaction, let's add it
if res.serialized:
if res.serialized.serialized_tx:
serialized_tx += res.serialized.serialized_tx
if res.serialized.signature_index is not None:
idx = res.serialized.signature_index
sig = res.serialized.signature
if signatures[idx] is not None:
raise ValueError("Signature for index %d already filled" % idx)
signatures[idx] = sig
if res.request_type == R.TXFINISHED:
break
# Device asked for one more information, let's process it.
current_tx = txes[res.details.tx_hash]
if res.request_type == R.TXMETA:
msg = copy_tx_meta(current_tx)
res = client.call(messages.TxAck(tx=msg))
elif res.request_type == R.TXINPUT:
msg = messages.TransactionType()
msg.inputs = [current_tx.inputs[res.details.request_index]]
res = client.call(messages.TxAck(tx=msg))
elif res.request_type == R.TXOUTPUT:
msg = messages.TransactionType()
if res.details.tx_hash:
msg.bin_outputs = [current_tx.bin_outputs[res.details.request_index]]
else:
msg.outputs = [current_tx.outputs[res.details.request_index]]
res = client.call(messages.TxAck(tx=msg))
elif res.request_type == R.TXEXTRADATA:
o, l = res.details.extra_data_offset, res.details.extra_data_len
msg = messages.TransactionType()
msg.extra_data = current_tx.extra_data[o : o + l]
res = client.call(messages.TxAck(tx=msg))
if isinstance(res, messages.Failure):
raise CallException("Signing failed")
if not isinstance(res, messages.TxRequest):
raise CallException("Unexpected message")
if None in signatures:
raise RuntimeError("Some signatures are missing!")
return signatures, serialized_tx
| [
"achow101-github@achow101.com"
] | achow101-github@achow101.com |
a2a8f7739a9aee7ce46c3440d4a2914bb62cb20f | 1c4a19c0d1953280f79193f30ad8c4759e3aff58 | /ansys/dpf/core/operators/math/cos_fc.py | 877e5d8eb4f4e277d7a79f318f9792673e6c3de6 | [
"MIT"
] | permissive | hoangxuyenle/DPF-Core | d02c843b678560f12715ea90dc8c9764b3bffc99 | a404dd290c7b3ee75463b2487cafb8bf48468691 | refs/heads/master | 2023-06-15T15:27:02.597938 | 2021-06-22T15:19:04 | 2021-06-22T15:19:04 | 381,611,135 | 0 | 0 | MIT | 2021-06-30T07:18:30 | 2021-06-30T07:18:30 | null | UTF-8 | Python | false | false | 4,828 | py | """
cos_fc
======
"""
from ansys.dpf.core.dpf_operator import Operator
from ansys.dpf.core.inputs import Input, _Inputs
from ansys.dpf.core.outputs import Output, _Outputs, _modify_output_spec_with_one_type
from ansys.dpf.core.operators.specification import PinSpecification, Specification
"""Operators from Ans.Dpf.Native plugin, from "math" category
"""
class cos_fc(Operator):
"""Computes element-wise cos(field[i]).
available inputs:
- fields_container (FieldsContainer)
available outputs:
- fields_container (FieldsContainer)
Examples
--------
>>> from ansys.dpf import core as dpf
>>> # Instantiate operator
>>> op = dpf.operators.math.cos_fc()
>>> # Make input connections
>>> my_fields_container = dpf.FieldsContainer()
>>> op.inputs.fields_container.connect(my_fields_container)
>>> # Instantiate operator and connect inputs in one line
>>> op = dpf.operators.math.cos_fc(fields_container=my_fields_container)
>>> # Get output data
>>> result_fields_container = op.outputs.fields_container()"""
def __init__(self, fields_container=None, config=None, server=None):
super().__init__(name="cos_fc", config = config, server = server)
self._inputs = InputsCosFc(self)
self._outputs = OutputsCosFc(self)
if fields_container !=None:
self.inputs.fields_container.connect(fields_container)
@staticmethod
def _spec():
spec = Specification(description="""Computes element-wise cos(field[i]).""",
map_input_pin_spec={
0 : PinSpecification(name = "fields_container", type_names=["fields_container"], optional=False, document="""field or fields container with only one field is expected""")},
map_output_pin_spec={
0 : PinSpecification(name = "fields_container", type_names=["fields_container"], optional=False, document="""""")})
return spec
@staticmethod
def default_config():
return Operator.default_config(name = "cos_fc")
@property
def inputs(self):
"""Enables to connect inputs to the operator
Returns
--------
inputs : InputsCosFc
"""
return super().inputs
@property
def outputs(self):
"""Enables to get outputs of the operator by evaluationg it
Returns
--------
outputs : OutputsCosFc
"""
return super().outputs
#internal name: cos_fc
#scripting name: cos_fc
class InputsCosFc(_Inputs):
"""Intermediate class used to connect user inputs to cos_fc operator
Examples
--------
>>> from ansys.dpf import core as dpf
>>> op = dpf.operators.math.cos_fc()
>>> my_fields_container = dpf.FieldsContainer()
>>> op.inputs.fields_container.connect(my_fields_container)
"""
def __init__(self, op: Operator):
super().__init__(cos_fc._spec().inputs, op)
self._fields_container = Input(cos_fc._spec().input_pin(0), 0, op, -1)
self._inputs.append(self._fields_container)
@property
def fields_container(self):
"""Allows to connect fields_container input to the operator
- pindoc: field or fields container with only one field is expected
Parameters
----------
my_fields_container : FieldsContainer,
Examples
--------
>>> from ansys.dpf import core as dpf
>>> op = dpf.operators.math.cos_fc()
>>> op.inputs.fields_container.connect(my_fields_container)
>>> #or
>>> op.inputs.fields_container(my_fields_container)
"""
return self._fields_container
class OutputsCosFc(_Outputs):
"""Intermediate class used to get outputs from cos_fc operator
Examples
--------
>>> from ansys.dpf import core as dpf
>>> op = dpf.operators.math.cos_fc()
>>> # Connect inputs : op.inputs. ...
>>> result_fields_container = op.outputs.fields_container()
"""
def __init__(self, op: Operator):
super().__init__(cos_fc._spec().outputs, op)
self._fields_container = Output(cos_fc._spec().output_pin(0), 0, op)
self._outputs.append(self._fields_container)
@property
def fields_container(self):
"""Allows to get fields_container output of the operator
Returns
----------
my_fields_container : FieldsContainer,
Examples
--------
>>> from ansys.dpf import core as dpf
>>> op = dpf.operators.math.cos_fc()
>>> # Connect inputs : op.inputs. ...
>>> result_fields_container = op.outputs.fields_container()
"""
return self._fields_container
| [
"lea.paradis@ansys.com"
] | lea.paradis@ansys.com |
acda52f070fb154262908ba31dbace2d0a3c92c6 | 93b8a4be20a0a6b56bc7709b3ab4690135257ebe | /BrowserSession.py | 51cf187550b12d8774319d4032fed37519c2acfc | [] | no_license | kordless/crawlapart | 1c551b300b91d7da245a76dc0e13cde63d7bea00 | da308154be03a08bd752e37a3c6088a356f48208 | refs/heads/master | 2021-04-22T23:16:32.699117 | 2020-03-26T20:49:33 | 2020-03-26T20:49:33 | 249,879,889 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,609 | py | #!/usr/bin/env python3
import webdriver
import json
import base64
import sys
import requests
import traceback
import logging
import time
# TODO Setup Interaction with DB rather than with flags and config files
# ideally we just want something like -> LookUpWord -> Provider -> Google
# ideally we just want something like -> go_to_url -> Cached/NotCached -> nytimes
# Explorer Mode
# Use Whois, Dig, nc, etc...
# Think of this like the Selenium but a true browser
class BrowserSession:
def __init__(self, url=None, persistent=False, debug=False):
# currently the configuration is going to be config.json
# soon it will be from MongoDB/LocalBox
self.debug = debug
#Navigation
self.url = url
self.stayopen = False
#BrowserSpecific
self.headless = False
self.fullscreen = True
#CrawlSpecific
self.local_db = None # stick to local socket
self.local_index = None # somelocalIndex Store
self.save_text = False
def setup_session(self):
self.config = json.loads(open('config.json', 'r').read())
if self.headless:
self.config['capabilities']['alwaysMatch']['moz:firefoxOptions']['args'].insert(1,'--headless')
self.config['capabilities']['alwaysMatch']['moz:firefoxOptions']['args'].insert(1,'--height=1080')
self.config['capabilities']['alwaysMatch']['moz:firefoxOptions']['args'].insert(1,'--width=1920')
print(self.config['capabilities'])
self.session = webdriver.Session(self.config['webdriverip'], self.config['webdriverport'], capabilities=self.config['capabilities'])
return
def go_to_url(self,url=None,fullscreen=True):
if url is None:
url = self.url
self.session.url = url
if fullscreen:
self.fullscreen=True
self.session.window.fullscreen()
if self.debug:
print("WebDriver to sessionID -------> {}".format(self.session.session_id))
return
def save_screenshot(self,filename=None):
if filename is None:
filename = "Screenshots/ss_{:.0f}.png".format(time.time())
print("Full Filename to use:\n\n")
print(filename + "\n\n")
try:
if self.fullscreen:
r = requests.get(url="http://localhost:4444/session/" + self.session.session_id + "/moz/screenshot/full")
print(r.status_code)
else:
r = requests.get(url="http://localhost:4444/session/" + self.session.session_id + "/screenshot")
if r.status_code == 200:
try:
with open(filename, 'wb') as screenshot:
screenshot.write(base64.b64decode(r.json()['value']))
except IOError as err:
print("I/O error: {0}".format(err))
elif r.status_code == 404:
print("Something is wrong with the session? maybe it's closed????")
print(r.json())
except Exception:
traceback.print_exc()
pass
def main_test():
new_session = BrowserSession()
new_session.headless = True
new_session.setup_session()
#new_session.go_to_url('https://google.com/search?q=MLK',fullscreen=True)
new_session.go_to_url('https://news.ycombinator.com',fullscreen=True)
print("waiting two seconds for page to load")
time.sleep(2)
new_session.save_screenshot()
if __name__ == '__main__':
main_test()
| [
"kordless@gmail.com"
] | kordless@gmail.com |
cfd3122f8016f9ea2f13eeb32f84937b90620619 | 743e3ab9bc94ada35913d03b77bf159c1ec75930 | /ZIJIE复习/11.py | 305043a736f576cc494d81217b9306b9ce2da630 | [] | no_license | Stella2019/leetcode- | 04d3978ba97ab321a5cefc061beefbf3c76cf795 | 60c8a20af961a57f73feb4ccd883dfc73370d994 | refs/heads/main | 2023-03-31T04:08:20.450791 | 2021-04-01T05:38:00 | 2021-04-01T05:38:00 | 353,585,382 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,674 | py | # 给定一个按照升序排列的整数数组 nums,和一个目标值 target。找出给定目标值在数组中的开始位置和结束位置。##给定一个按照升序排列的整数数组 nums,和一个目标值 target。找出给定目标值在数组中的开始位置和结束位置。
"""
def binarySearch(nums, target);
return binarySeachHelper(nums, target, 0, len(nums) -1 )
def binarySearchHelper(nums, target, left, right):
if left > right:
return -1
middle = (left + right)//2
potentialMatch = nums[middle]
if target == potentialMatch:
return middle
elif target < potentialMatch:
return binarySearchHelper(nums, target, left, middle - 1)
else:
return binarySearchHelper(nums, target, middle + 1, right)
"""
l, r = 0, len(nums) - 1
ans1 = -1
while l <= r:
mid = l + (r - l) // 2
if nums[mid] == target:
ans1 = mid
r = mid - 1
elif nums[mid] < target:
l = mid + 1
else:
r = mid - 1
if ans1 == -1:
print(-1, -1)
else:
ans2 = -1
l, r = ans1, len(nums) - 1
while l <= r:
mid = l + (r - l) // 2
if nums[mid] == target:
ans2 = mid
l = mid + 1
elif nums[mid] < target:
l = mid + 1
else:
r = mid - 1
print(ans1, ans2)
无序数组求中位数
Class
MedianFinder:
def _init_(self):
self.store = []
def addNum(self, num):
if not self.store:
self.store.append(num)
else:
bisect.insort_left(self.store, num)
def findMedian(self):
n = len(self.store)
if n & 1 == 1:
return self.store[n // 2]
def quicksort(num):
| [
"noreply@github.com"
] | Stella2019.noreply@github.com |
2fd671f0c486f88eddcb6ce484cf4129ba4f765f | 2d4af29250dca8c72b74e190e74d92f1467120a0 | /TaobaoSdk/Request/MallEaiOrderRefundGoodReturnRefuseRequest.py | 4efc69ca0a81e8aeb3c00f91dc7995a2141a3370 | [] | no_license | maimiaolmc/TaobaoOpenPythonSDK | 2c671be93c40cf487c0d7d644479ba7e1043004c | d349aa8ed6229ce6d76a09f279a0896a0f8075b3 | refs/heads/master | 2020-04-06T03:52:46.585927 | 2014-06-09T08:58:27 | 2014-06-09T08:58:27 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,457 | py | #! /usr/bin/env python
# -*- coding: utf-8 -*-
# vim: set ts=4 sts=4 sw=4 et:
## @brief 卖家拒绝退货
# @author wuliang@maimiaotech.com
# @date 2013-09-22 16:52:42
# @version: 0.0.0
import os
import sys
import time
def __getCurrentPath():
return os.path.normpath(os.path.join(os.path.realpath(__file__), os.path.pardir))
__modulePath = os.path.join(__getCurrentPath(), os.path.pardir)
__modulePath = os.path.normpath(__modulePath)
if __modulePath not in sys.path:
sys.path.insert(0, __modulePath)
## @brief <SPAN style="font-size:16px; font-family:'宋体','Times New Roman',Georgia,Serif;">卖家拒绝退货</SPAN>
# <UL>
# </UL>
class MallEaiOrderRefundGoodReturnRefuseRequest(object):
def __init__(self):
super(self.__class__, self).__init__()
## @brief <SPAN style="font-size:16px; font-family:'宋体','Times New Roman',Georgia,Serif;">获取API名称</SPAN>
# <UL>
# <LI>
# <SPAN style="color:DarkRed; font-size:18px; font-family:'Times New Roman',Georgia,Serif;">Type</SPAN>: <SPAN style="color:DarkMagenta; font-size:16px; font-family:'Times New Roman','宋体',Georgia,Serif;">str</SPAN>
# </LI>
# </UL>
self.method = "tmall.eai.order.refund.good.return.refuse"
## @brief <SPAN style="font-size:16px; font-family:'宋体','Times New Roman',Georgia,Serif;">时间戳,如果不设置,发送请求时将使用当时的时间</SPAN>
# <UL>
# <LI>
# <SPAN style="color:DarkRed; font-size:18px; font-family:'Times New Roman',Georgia,Serif;">Type</SPAN>: <SPAN style="color:DarkMagenta; font-size:16px; font-family:'Times New Roman','宋体',Georgia,Serif;">int</SPAN>
# </LI>
# </UL>
self.timestamp = int(time.time())
## @brief <SPAN style="font-size:16px; font-family:'宋体','Times New Roman',Georgia,Serif;">退款单编号</SPAN>
# <UL>
# <LI>
# <SPAN style="color:DarkRed; font-size:18px; font-family:'Times New Roman',Georgia,Serif;">Type</SPAN>: <SPAN style="color:DarkMagenta; font-size:16px; font-family:'Times New Roman','宋体',Georgia,Serif;">Number</SPAN>
# </LI>
# <LI>
# <SPAN style="color:DarkRed; font-size:18px; font-family:'Times New Roman',Georgia,Serif;">Required</SPAN>: <SPAN style="color:DarkMagenta; font-size:16px; font-family:'Times New Roman','宋体',Georgia,Serif;">required</SPAN>
# </LI>
# </UL>
self.refund_id = None
## @brief <SPAN style="font-size:16px; font-family:'宋体','Times New Roman',Georgia,Serif;">售中:onsale 售后:aftersale</SPAN>
# <UL>
# <LI>
# <SPAN style="color:DarkRed; font-size:18px; font-family:'Times New Roman',Georgia,Serif;">Type</SPAN>: <SPAN style="color:DarkMagenta; font-size:16px; font-family:'Times New Roman','宋体',Georgia,Serif;">String</SPAN>
# </LI>
# <LI>
# <SPAN style="color:DarkRed; font-size:18px; font-family:'Times New Roman',Georgia,Serif;">Required</SPAN>: <SPAN style="color:DarkMagenta; font-size:16px; font-family:'Times New Roman','宋体',Georgia,Serif;">required</SPAN>
# </LI>
# </UL>
self.refund_phase = None
## @brief <SPAN style="font-size:16px; font-family:'宋体','Times New Roman',Georgia,Serif;">退款版本号</SPAN>
# <UL>
# <LI>
# <SPAN style="color:DarkRed; font-size:18px; font-family:'Times New Roman',Georgia,Serif;">Type</SPAN>: <SPAN style="color:DarkMagenta; font-size:16px; font-family:'Times New Roman','宋体',Georgia,Serif;">Number</SPAN>
# </LI>
# <LI>
# <SPAN style="color:DarkRed; font-size:18px; font-family:'Times New Roman',Georgia,Serif;">Required</SPAN>: <SPAN style="color:DarkMagenta; font-size:16px; font-family:'Times New Roman','宋体',Georgia,Serif;">required</SPAN>
# </LI>
# </UL>
self.refund_version = None
## @brief <SPAN style="font-size:16px; font-family:'宋体','Times New Roman',Georgia,Serif;">拒绝退款原因留言</SPAN>
# <UL>
# <LI>
# <SPAN style="color:DarkRed; font-size:18px; font-family:'Times New Roman',Georgia,Serif;">Type</SPAN>: <SPAN style="color:DarkMagenta; font-size:16px; font-family:'Times New Roman','宋体',Georgia,Serif;">String</SPAN>
# </LI>
# <LI>
# <SPAN style="color:DarkRed; font-size:18px; font-family:'Times New Roman',Georgia,Serif;">Required</SPAN>: <SPAN style="color:DarkMagenta; font-size:16px; font-family:'Times New Roman','宋体',Georgia,Serif;">required</SPAN>
# </LI>
# </UL>
self.refuse_message = None
## @brief <SPAN style="font-size:16px; font-family:'宋体','Times New Roman',Georgia,Serif;">拒绝退款举证上传</SPAN>
# <UL>
# <LI>
# <SPAN style="color:DarkRed; font-size:18px; font-family:'Times New Roman',Georgia,Serif;">Type</SPAN>: <SPAN style="color:DarkMagenta; font-size:16px; font-family:'Times New Roman','宋体',Georgia,Serif;">byte[]</SPAN>
# </LI>
# <LI>
# <SPAN style="color:DarkRed; font-size:18px; font-family:'Times New Roman',Georgia,Serif;">Required</SPAN>: <SPAN style="color:DarkMagenta; font-size:16px; font-family:'Times New Roman','宋体',Georgia,Serif;">required</SPAN>
# </LI>
# </UL>
self.refuse_proof = None
| [
"chenke@maimiaotech.com"
] | chenke@maimiaotech.com |
cf54b3ebe7c2041a3817e65718bd1ffbe5bd3061 | ca7aa979e7059467e158830b76673f5b77a0f5a3 | /Python_codes/p03372/s980518099.py | 89fcc36b313a8f6192a1d4f6805dda59e1da0858 | [] | no_license | Aasthaengg/IBMdataset | 7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901 | f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8 | refs/heads/main | 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,800 | py | def main():
from collections import namedtuple
import sys
input = sys.stdin.readline
Sushi = namedtuple('Sushi', 'x cal')
n, c = map(int, input().split())
a = []
for _ in range(n):
x, v = map(int, input().split())
a.append(Sushi(x=x, cal=v))
# x昇順ソート済
clock = [0] * (n + 1) # 注目する寿司以前で離脱する最大摂取カロリー
clock_to_0 = [0] * (n + 1) # 時計回り->初期位置の最大摂取カロリー
ma = 0 # 注目する寿司以前で離脱する最大摂取カロリー
ma0 = 0 # 時計回り->初期位置の最大摂取カロリー
curr = 0 # 現在のカロリー(移動によるカロリー消費を無視)
for i, s in enumerate(a, start=1):
curr += s.cal
ma = max(ma, curr - s.x)
ma0 = max(ma0, curr - s.x * 2)
clock[i] = ma
clock_to_0[i] = ma0
anti = [0] * (n + 1) # 注目する寿司以前で離脱する最大摂取カロリー
anti_to_0 = [0] * (n + 1) # 反時計回り->初期位置の最大摂取カロリー
ma = 0 # 注目する寿司以前で離脱する最大摂取カロリー
ma0 = 0 # 時計回り->初期位置の最大摂取カロリー
curr = 0 # 現在のカロリー(移動によるカロリー消費を無視)
for i, s in zip(range(n, -1, -1), reversed(a)):
curr += s.cal
ma = max(ma, curr - (c - s.x))
ma0 = max(ma0, curr - (c - s.x) * 2)
anti[i] = ma
anti_to_0[i] = ma0
ans = 0
for exit_pos in range(1, n + 1):
ans = max(
ans,
clock_to_0[exit_pos - 1] + anti[exit_pos],
anti_to_0[(exit_pos + 1) % (n + 1)] + clock[exit_pos]
)
print(ans)
if __name__ == '__main__':
main()
| [
"66529651+Aastha2104@users.noreply.github.com"
] | 66529651+Aastha2104@users.noreply.github.com |
e7bd3908c39dac626bf71a69c7afdbf3231e9f2a | ba80ca143ba35fd481730786a27ebdb1f88ce835 | /algorithm/backjoon/35.DFS/1890.py | aca1cfcee092c960fd8624f4d28c8de0c983167f | [] | no_license | uiandwe/TIL | c541020b65adc53578aeb1c3ba4c6770b3b2e8b3 | 186544469374dd0279099c6c6aa7555ee23e42fe | refs/heads/master | 2022-02-15T08:33:07.270573 | 2022-01-01T15:22:54 | 2022-01-01T15:22:54 | 63,420,931 | 2 | 4 | null | null | null | null | UTF-8 | Python | false | false | 538 | py | map = [
[2, 3, 3, 1],
[1, 2, 1, 3],
[1, 2, 3, 1],
[3, 1, 1, 0]
]
min_step = float('inf')
position_x = [0, 1]
position_y = [1, 0]
def dfs(x, y, step):
global min_step
if x == 3 and y == 3:
if step < min_step:
min_step = step
return
c = map[x][y]
for i in range(2):
dx = x + ((c*position_x[i]))
dy = y + ((c*position_y[i]))
if dx >= 0 and dx < 4 and dy >= 0 and dy < 4:
dfs(dx, dy, step+1)
def solution():
dfs(0, 0, 0)
print(min_step)
solution()
| [
"noreply@github.com"
] | uiandwe.noreply@github.com |
c12ed3555353b036fbee254e5955d663e05c5577 | 96dcea595e7c16cec07b3f649afd65f3660a0bad | /homeassistant/components/tcp/const.py | 3a42736c753ef8a6036d4568a0522b0ac67010f0 | [
"Apache-2.0"
] | permissive | home-assistant/core | 3455eac2e9d925c92d30178643b1aaccf3a6484f | 80caeafcb5b6e2f9da192d0ea6dd1a5b8244b743 | refs/heads/dev | 2023-08-31T15:41:06.299469 | 2023-08-31T14:50:53 | 2023-08-31T14:50:53 | 12,888,993 | 35,501 | 20,617 | Apache-2.0 | 2023-09-14T21:50:15 | 2013-09-17T07:29:48 | Python | UTF-8 | Python | false | false | 328 | py | """Constants for TCP platform."""
from __future__ import annotations
from typing import Final
CONF_BUFFER_SIZE: Final = "buffer_size"
CONF_VALUE_ON: Final = "value_on"
DEFAULT_BUFFER_SIZE: Final = 1024
DEFAULT_NAME: Final = "TCP Sensor"
DEFAULT_TIMEOUT: Final = 10
DEFAULT_SSL: Final = False
DEFAULT_VERIFY_SSL: Final = True
| [
"noreply@github.com"
] | home-assistant.noreply@github.com |
0571fd0e83d3c8cedd7d3130d18e0298230f8218 | 2c4763aa544344a3a615f9a65d1ded7d0f59ae50 | /playground/test_fail/wscript | 95c282a2af9a13a02b6175cb035208d83d474246 | [] | no_license | afeldman/waf | 572bf95d6b11571bbb2941ba0fe463402b1e39f3 | 4c489b38fe1520ec1bc0fa7e1521f7129c20f8b6 | refs/heads/master | 2021-05-09T18:18:16.598191 | 2019-03-05T06:33:42 | 2019-03-05T06:33:42 | 58,713,085 | 0 | 0 | null | 2016-05-13T07:34:33 | 2016-05-13T07:34:33 | null | UTF-8 | Python | false | false | 1,163 | #! /usr/bin/env python
# encoding: utf-8
# Thomas Nagy, 2011 (ita)
"""
Map a compilation failure to a success status. People playing with C++ templates
might need this.
"""
top = '.'
out = 'build'
def options(opt):
opt.load('compiler_cxx')
def configure(conf):
conf.load('compiler_cxx')
def build(bld):
bld.objects(source='success.cpp', target='ok')
bld.objects(source='fail.cpp', target='fail', features='fail')
##################################################################
# the feature 'fail' is defined below
from waflib.Tools.cxx import cxx
# our task class
class cxxfail(cxx):
def run(self):
ret = super(cxxfail, self).run()
self.outputs[0].write('just a simulation')
return not ret
# @extension would apply this to all through TaskGen.mappings
def one_more_mapping(self, node):
return self.create_compiled_task('cxxfail', node)
from waflib.TaskGen import feature, before
@before('process_source')
@feature('fail')
def remap_failure_to_success(self):
# override
self.mappings = dict(self.mappings)
# then change the extension processing
self.mappings['.cpp'] = one_more_mapping
| [
"anton.feldmann@outlook.de"
] | anton.feldmann@outlook.de | |
792f0e26fb0531faa14be43065ca915945d46398 | f771e83756436594a145bd7b80e5e5d8bca53268 | /test_app/migrations/twitter/0002_auto_20180530_0935.py | 7f522ac7dda3d0cc45602484943d733bdcf6df26 | [
"MIT"
] | permissive | bnzk/djangocms-baseplugins | b76ed75460fbeacb62366935824d2bcfac52b25e | 98e390482aa4facc35efe2412ff1603d85e2c8ba | refs/heads/develop | 2023-06-17T23:55:41.574828 | 2023-06-09T09:22:01 | 2023-06-09T09:22:01 | 68,296,521 | 2 | 0 | MIT | 2023-04-17T09:18:11 | 2016-09-15T13:32:05 | Python | UTF-8 | Python | false | false | 2,319 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.11.11 on 2018-05-30 09:35
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('twitter', '0001_initial'),
]
operations = [
migrations.AddField(
model_name='tweetembed',
name='anchor_de',
field=models.SlugField(blank=True, default='', null=True, verbose_name='Anchor'),
),
migrations.AddField(
model_name='tweetembed',
name='anchor_en',
field=models.SlugField(blank=True, default='', null=True, verbose_name='Anchor'),
),
migrations.AddField(
model_name='tweetembed',
name='anchor_fr',
field=models.SlugField(blank=True, default='', null=True, verbose_name='Anchor'),
),
migrations.AddField(
model_name='tweetembed',
name='title_de',
field=models.CharField(blank=True, default='', max_length=256, null=True, verbose_name='Title'),
),
migrations.AddField(
model_name='tweetembed',
name='title_en',
field=models.CharField(blank=True, default='', max_length=256, null=True, verbose_name='Title'),
),
migrations.AddField(
model_name='tweetembed',
name='title_fr',
field=models.CharField(blank=True, default='', max_length=256, null=True, verbose_name='Title'),
),
migrations.AddField(
model_name='tweetembed',
name='tweet_url_de',
field=models.URLField(help_text='Example: https://twitter.com/MdDoomFest/status/795834590481018880', null=True, verbose_name='Tweet URL'),
),
migrations.AddField(
model_name='tweetembed',
name='tweet_url_en',
field=models.URLField(help_text='Example: https://twitter.com/MdDoomFest/status/795834590481018880', null=True, verbose_name='Tweet URL'),
),
migrations.AddField(
model_name='tweetembed',
name='tweet_url_fr',
field=models.URLField(help_text='Example: https://twitter.com/MdDoomFest/status/795834590481018880', null=True, verbose_name='Tweet URL'),
),
]
| [
"bnzk@bnzk.ch"
] | bnzk@bnzk.ch |
f847203df4ede5b0fbb299394fd26184af8cdc8a | 88ed6ed99589f7fb8e49aeb6c15bf0d51fe14a01 | /049_group-anagrams.py | be39e43dcb820a52855a524f2c1fe18e6cb730a2 | [] | no_license | ryeLearnMore/LeetCode | 3e97becb06ca2cf4ec15c43f77447b6ac2a061c6 | 04ec1eb720474a87a2995938743f05e7ad5e66e3 | refs/heads/master | 2020-04-07T19:02:43.171691 | 2019-06-23T15:09:19 | 2019-06-23T15:09:19 | 158,634,176 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,642 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#@author: rye
#@time: 2019/3/13
'''
总结:
思路正解差不多,但是过不了,原因在于如果strs里有重复的字符串就无法处理。
还没想好怎么解决。
'''
class Solution(object):
def groupAnagrams(self, strs):
"""
:type strs: List[str]
:rtype: List[List[str]]
"""
arr = []
have = []
for i in range(len(strs)):
temp = []
if strs[i] not in have:
temp.append(strs[i])
for j in range(i + 1, len(strs)):
if sorted(strs[i]) == sorted(strs[j]):
if strs[j] not in have:
temp.append(strs[j])
have.append(strs[j])
if temp != []:
arr.append(temp)
return arr
# 大佬的做法
'''
用字典的方式,感觉确实开拓了思路。
'''
class Solution1(object):
def groupAnagrams(self, strs):
"""
:type strs: List[str]
:rtype: List[List[str]]
"""
mapx = {}
for i in strs:
tmp = ''.join(sorted(list(i)))
if tmp in mapx:
mapx[tmp].append(i) # 注意这步,自己写的时候可能想不到还可以这样添加。即:一个key对应多个value,并用list表示
else:
mapx[tmp] = [i]
return mapx.values()
if __name__ == '__main__':
strs1 = ["eat", "tea", "tan", "ate", "nat", "bat"]
strs2 = ["","",""]
print(Solution1().groupAnagrams(strs1)) | [
"noreply@github.com"
] | ryeLearnMore.noreply@github.com |
5aa4056b5a3b3b7562859e1199fa04a338390c39 | 5730110af5e4f0abe538ed7825ddd62c79bc3704 | /pacu/pacu/core/model/analysis_fixture.py | 392c070eabf6f44a81418eea1b412337f5febeca | [] | no_license | jzeitoun/pacu-v2 | bdbb81def96a2d87171ca20b89c878b2f66975e7 | 0ccb254a658263b4fe8c80ea623f860cb7dc1428 | refs/heads/master | 2021-06-03T18:50:50.890399 | 2020-04-27T16:31:59 | 2020-04-27T16:31:59 | 110,889,657 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 983 | py | # one = dict(
# type=u'i3d',
# title=u'First transplantation',
# user=u'Sunil',
# desc=u'The brain has the remarkable capacity to rewire its connections and thereby reorganize its function. In the juvenile brain, the plasticity of neuronal connections mediates the fine-tuning of a wide range of behaviors, from visual perception to language acquisition to social recognition.',
# host=u'Scanbox',
# src=u'JZ_003'
# )
# two = dict(
# type=u'i3d',
# title=u'The dummy session',
# user=u'HT',
# desc=u'What mechanism regulates the plasticity of connections in the young brain? How might we manipulate neural circuits to reactivate this plasticity?',
# host=u'Scanbox',
# src=u'JZ_006'
# )
def get(Model=None):
if not Model:
from .analysis import AnalysisV1 as Model
return dict(
# one = Model(**one),
# two = Model(**two),
)
def dump(session):
session.add_all(get().values())
session.commit()
| [
"jzeitoun@uci.edu"
] | jzeitoun@uci.edu |
c32eb96335d89570632c54e7cfe7bbea03eb18aa | b22205aa21ac51c7b14dfaab556eea1f8902a922 | /bin/foamOutputDirs.py | e6882006845de0e8a6ce636f6f9e54ca763c15b1 | [] | no_license | ewquon/pylib | a1c6a64a0127c5078e19f190ec252ccd00b5035e | c34afb2a13fc0075f95a43bac99219b25b3984a2 | refs/heads/master | 2023-07-12T11:32:31.671093 | 2023-06-21T15:59:15 | 2023-06-21T15:59:15 | 41,262,844 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 615 | py | #!/usr/bin/env python
import sys
import os
dirs = [ dname for dname in os.listdir('.') if os.path.isdir(dname) ]
dirlist = []
numlist = []
for d in dirs:
try:
step = float(d)
numlist.append(step)
dirlist.append(d)
except ValueError: pass
# sort list of floats
indices = [i[0] for i in sorted(enumerate(numlist), key=lambda x:x[1])]
if len(sys.argv) > 1:
sep = sys.argv[1]
else: sep = ' '
#print(' '.join(dirlist))
#print(' '.join([dirlist[i] for i in indices]))
#print(' '.join([dirlist[i] for i in indices]).strip())
print(sep.join([dirlist[i] for i in indices]).strip())
| [
"eliot.quon@nrel.gov"
] | eliot.quon@nrel.gov |
609a9ef66b72016bf583dc87d491d71c0fe4395e | 18a0e8f672359f8f0e0e1b8a356e87627399be87 | /testproject/settings.py | 62082bdfc9780bf034cf882de53e64e6c9ab93ca | [
"BSD-2-Clause"
] | permissive | ptim/formulation | 69ee3cf24981ded2552ef47f1c8ba999820e038d | 2351cc85cd189c7029a35801a8f95e7450b175d3 | refs/heads/master | 2021-01-18T05:47:46.960270 | 2014-07-23T14:04:59 | 2014-07-23T14:04:59 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 503 | py | # test project django settings
import os
BASE_DIR = os.path.dirname(os.path.dirname(__file__))
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'name': ':memory:',
},
}
ROOT_URLCONF = 'testproject.urls'
SECRET_KEY = 's3cr3t'
INSTALLED_APPS = (
'formulation',
)
TEMPLATE_DIRS = (
os.path.join(BASE_DIR, 'testproject', 'templates'),
)
try:
from django.test.runner import DiscoverRunner
except:
TEST_RUNNER = 'discover_runner.DiscoverRunner'
| [
"curtis@tinbrain.net"
] | curtis@tinbrain.net |
3938f608cebd2d4198512b2979b0290982d04b86 | 6dfb7fe44b6c5bfb7feb5a101656e3d3402a621f | /simp_py_examples/course/SM1801/t012.py | a660e43c7dba6f39eeb873127d7ed0732ab05063 | [
"MIT"
] | permissive | kcfkwok2003/Simp_py | 11d6813fac83ab6309eb8efc22fcd8edde5b19b8 | f75e66da01b45dc8688dda602f8b33d4258f0c31 | refs/heads/master | 2021-05-11T00:36:36.872754 | 2018-12-19T01:41:15 | 2018-12-19T01:41:15 | 118,306,332 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 139 | py | # t012.py
from machine import Pin
led=Pin(26,Pin.OUT)
while True:
led.value(0) # 0V
time.sleep(1)
led.value(1) # 3.3V
time.sleep(1)
| [
"kcfkwok@gmail.com"
] | kcfkwok@gmail.com |
627a2e28b59cabee99cdbfa37daee24e496a77f5 | 561a032be5f4f37f40e49ed70740d167e3a12d56 | /django_movie/movies/migrations/0002_auto_20210820_0202.py | cfcce499af7dc2452be06bb99ba4325e264a6a22 | [] | no_license | Mazev/django_movie | 6f3e0cfbd4e46431f03bd900a86cae4dca9f27f3 | af5a194d5fb5a08a944358ba2226a2e1db2e137b | refs/heads/main | 2023-07-12T21:11:07.410313 | 2021-08-21T16:38:58 | 2021-08-21T16:38:58 | 397,938,816 | 0 | 0 | null | 2021-08-21T16:38:58 | 2021-08-19T12:40:24 | JavaScript | UTF-8 | Python | false | false | 1,270 | py | # Generated by Django 3.2.6 on 2021-08-19 23:02
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('movies', '0001_initial'),
]
operations = [
migrations.AlterField(
model_name='movie',
name='fees_in_world',
field=models.PositiveIntegerField(default=0, help_text='бюджета е в долари', verbose_name='Приходи по цял свят'),
),
migrations.CreateModel(
name='Reviews',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('email', models.EmailField(max_length=254)),
('name', models.CharField(max_length=100, verbose_name='Име')),
('text', models.TextField(max_length=5000, verbose_name='Съобщение')),
('movie', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='movies.movie', verbose_name='филм')),
],
options={
'verbose_name': 'Коментар',
'verbose_name_plural': 'Коментари',
},
),
]
| [
"77510376+Mazev@users.noreply.github.com"
] | 77510376+Mazev@users.noreply.github.com |
be33e0ee19c20030f9a4e0d0e74e372a872cd5e7 | 784a030b7afb119b5b7024339117a33549db4d74 | /taravel/locations/migrations/0002_auto_20160409_0001.py | 526d2deaffc258d532fb5a657c28a7d49a7b052a | [
"MIT"
] | permissive | ad-m/taravel | bb136d789cf3a22ffe3744fe3cc273edd5c74640 | 4697ee51eec48ed8bb57d7b4a00f352f47e40ba0 | refs/heads/master | 2020-12-29T02:32:04.320280 | 2017-04-09T18:05:28 | 2017-04-09T18:05:28 | 55,809,856 | 0 | 0 | null | 2016-07-21T14:54:38 | 2016-04-08T21:10:20 | JavaScript | UTF-8 | Python | false | false | 473 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.9.5 on 2016-04-09 00:01
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('locations', '0001_initial'),
]
operations = [
migrations.AlterField(
model_name='country',
name='name',
field=models.CharField(max_length=255, verbose_name='Name of country'),
),
]
| [
"naczelnik@jawnosc.tk"
] | naczelnik@jawnosc.tk |
2d7fb17440dde49931c5efae141b324e970191e8 | eab1756b01717e81537133400f36aea4d7a0876f | /yuxin_numpy/variable_fetch_bug_report.py | 5e9afe5df43ac413eecfdf31779316e614e6e14c | [] | no_license | bearpelican/cluster | d677fe392ac1196b77e3f8fb79e530ec8371080f | 2e316cf1def0b72b47f79a864ed3aa778c297b95 | refs/heads/master | 2020-03-21T06:52:57.514901 | 2018-08-10T10:20:26 | 2018-08-10T22:33:05 | 138,246,892 | 3 | 1 | null | 2018-06-22T02:51:07 | 2018-06-22T02:51:07 | null | UTF-8 | Python | false | false | 5,162 | py | # Run D2H and H2D benchmark with synthetic workload with feed-fetch step
import tensorflow as tf
import argparse
import numpy as np
import time
import ray
import os
import portpicker
import subprocess
import sys
import tensorflow as tf
import threading
import time
import pickle
from collections import OrderedDict
parser = argparse.ArgumentParser()
parser.add_argument("--dim", default=25*1000*1000, type=int,
help="The number of parameters.")
parser.add_argument("--align", default='none', type=str,
help="none/cpu/gpu/ray")
parser.add_argument("--target", default='cpu', type=str,
help="where target tensor lives (cpu or gpu)")
args = parser.parse_args()
global_timeit_dict = OrderedDict()
class timeit:
"""Decorator to measure length of time spent in the block in millis and log
it to TensorBoard."""
def __init__(self, tag=""):
self.tag = tag
def __enter__(self):
self.start = time.perf_counter()
return self
def __exit__(self, *args):
self.end = time.perf_counter()
interval_ms = 1000*(self.end - self.start)
global_timeit_dict.setdefault(self.tag, []).append(interval_ms)
# print("%20s %10.2f"%(self.tag, interval_ms))
def summarize_time(tag, time_list_ms):
# delete first large interval if exists
# if time_list_ms and time_list_ms[0]>3600*10:
del time_list_ms[0]
if len(time_list_ms)>0:
min = np.min(time_list_ms)
mean = np.mean(time_list_ms)
median = np.median(time_list_ms)
data_size_gb = args.dim*4/1e9
time_sec = min/1000
bw = data_size_gb/time_sec
formatted = ["%.2f"%(d,) for d in time_list_ms[:10]]
print("%-20s: %.1f GB/sec, min: %.2f, median: %.2f, mean: %.2f"%(tag, bw, min, median, mean))
else:
print("Times: <empty>")
timeline_counter = 0
run_options = tf.RunOptions(trace_level=tf.RunOptions.FULL_TRACE, output_partition_graphs=True)
def sessrun(*args, **kwargs):
"""Runs fetches, dumps timeline files in current directory."""
global timeline_counter
run_metadata = tf.RunMetadata()
log_fn = "%s"%(timeline_counter,)
sess = tf.get_default_session()
root = os.getcwd()+"/data"
os.system('mkdir -p '+root)
from tensorflow.python.client import timeline
kwargs['options'] = run_options
kwargs['run_metadata'] = run_metadata
results = sess.run(*args, **kwargs)
tl = timeline.Timeline(step_stats=run_metadata.step_stats)
ctf = tl.generate_chrome_trace_format(show_memory=True,
show_dataflow=False)
open(root+"/timeline_%s.json"%(log_fn,), "w").write(ctf)
open(root+"/stepstats_%s.pbtxt"%(log_fn,), "w").write(str(
run_metadata.step_stats))
timeline_counter+=1
return results
def fetch_cpu_variable():
data = np.ones((args.dim,), dtype=np.float32)
with tf.device('/cpu:0'):
params = tf.Variable(initial_value=data)
sess.run(tf.global_variables_initializer())
for i in range(20):
with timeit('fetch_cpu_variable'):
sess.run(params)
def fetch_cpu_variable_add():
data = np.ones((args.dim,), dtype=np.float32)
with tf.device('/cpu:0'):
params = tf.Variable(initial_value=data)
params = params+0.1
params_first = params[0]
params_sum = tf.reduce_sum(params)
sess.run(tf.global_variables_initializer())
for i in range(20):
with timeit('fetch_cpu_variable_add'):
# sess.run(params)
result = sess.run(params)
def fetch_cpu_variable_concat():
data = np.ones((args.dim,), dtype=np.float32)
with tf.device('/cpu:0'):
params = tf.Variable(initial_value=data)
params = tf.concat([params, tf.fill([1],1.0)], axis=0)
sess.run(tf.global_variables_initializer())
for i in range(20):
with timeit('fetch_cpu_variable_concat'):
sess.run(params)
def main():
global grad_cached_const
import gc
gc.disable()
params0 = np.ones((args.dim,), dtype=np.float32)/(np.sqrt(args.dim))
if args.align == 'none':
pass
elif args.align == 'cpu':
params0 = align_numpy_cpu(params0)
elif args.align == 'gpu':
params0 = align_numpy_gpu(params0)
loss, params, grad_cached, grad_assign_op = create_net('net1', params0)
sess.run(tf.global_variables_initializer())
lr = 0.01
for i in range(10):
loss0 = loss.eval()
print(loss0)
with timeit('step'):
pass
# sess.run(grad_assign_op)
with timeit('fetch'):
# grad0 = sess.run(grad_cached)
grad0 = sess.run(grad_cached_const)
# takes 75ms, 33ms is on allocation, 16ms on multiplication
with timeit('add'):
params0-=grad0*lr
with timeit('feed'):
# params.load(params0)
sess.run(params.initializer, feed_dict={params.initial_value:params0})
for key, times in global_timeit_dict.items():
summarize_time(key, times)
assert abs(loss0-0.69513524)<0.01
print('test passed')
if __name__ == '__main__':
import gc
gc.disable()
sess = tf.InteractiveSession()
fetch_cpu_variable()
fetch_cpu_variable_add()
fetch_cpu_variable_concat()
for key, times in global_timeit_dict.items():
summarize_time(key, times)
| [
"yaroslavvb@gmail.com"
] | yaroslavvb@gmail.com |
28a011661f24c1229e4e973964de433c47835416 | fd0328f6a5f78cfa80d61094517fa0f32943bb9e | /superlists/urls.py | 902f87670539d653d465801868e9194a9774418b | [] | no_license | liangsongyou/superlists | 3eee6ae492e89a13a54aec55f4b94c78c1fa049a | fd1704a14d18fe9fa7dc1074a172d9b0708ba1f3 | refs/heads/master | 2020-03-16T21:50:41.382819 | 2018-05-31T07:44:14 | 2018-05-31T07:44:14 | 111,548,891 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,036 | py | """superlists URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/1.11/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.conf.urls import url, include
2. Add a URL to urlpatterns: url(r'^blog/', include('blog.urls'))
"""
from django.conf.urls import url, include
# from django.contrib import admin
from lists import views as list_views
from lists import urls as list_urls
from accounts import urls as account_urls
urlpatterns = [
# url(r'^admin/', admin.site.urls),
url(r'^$', list_views.home_page, name='home'),
url(r'^lists/', include(list_urls)),
url(r'^accounts/', include(account_urls)),
]
| [
"yuebei58@gmail.com"
] | yuebei58@gmail.com |
b92f292c218ba6dc7a54c573b10dc237a4ac6bff | 7a0070b15636653f404c2b2b85d300e949db1fb2 | /muglaSepetiApp/migrationsex/0033_auto_20200921_0209.py | 8d9ec33af59cb6b3643d7afcc29b60c2de0cea8c | [] | no_license | furkankykc/MuglaSepeti | 8d0f29faf8a868b159ca0d158cdb2e312784c626 | 58a650e68fd283baeaa0ae6716c8ea316b996c16 | refs/heads/master | 2023-01-08T22:15:00.878505 | 2020-11-09T21:18:13 | 2020-11-09T21:18:13 | 287,108,356 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 423 | py | # Generated by Django 3.0.8 on 2020-09-20 23:09
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('muglaSepetiApp', '0032_auto_20200921_0208'),
]
operations = [
migrations.AlterField(
model_name='config',
name='aboutus_title',
field=models.CharField(blank=True, max_length=50, null=True),
),
]
| [
"furkanfbr@gmail.com"
] | furkanfbr@gmail.com |
b26759a3ad30279cdb6e5e3d1504992e161eee56 | 9cbab916088192af67a19aaee25fe7d6e5d27a31 | /file/create.py | 255728651113829e6fdd6bb10971222ec7e3638c | [] | no_license | ddayzzz/Pythonlearning | 806c75304d7d954f2c935031d4d7516be7ce7300 | 54e92aa5282da97b6d4bd2355a668a16c272ee68 | refs/heads/master | 2020-12-30T12:44:49.465356 | 2017-05-25T15:12:53 | 2017-05-25T15:12:53 | 91,356,527 | 0 | 0 | null | null | null | null | WINDOWS-1252 | Python | false | false | 361 | py | #create py
#coding=gb2312
import os,shelve,pprint
selffile='createfile'
selfptr=shelve.open(selffile)
dic=[{'name':'Ãû×Ö'},{'USSR':'former soviet'},{'China':{'1':'china','2':'Chinese'}}]
selfptr['dic']=dic
crepy='create1.py'
fptr=open(crepy,'a')
fptr.write('#coding=gb2312\nimport pprint,shelve\nprint(pprint.pformat(shelve.open(\'createfile\')[\'dic\']))') | [
"wangshu214@live.cn"
] | wangshu214@live.cn |
9124499c826b46e8fc759077b08027aae9b2d2d4 | ca7aa979e7059467e158830b76673f5b77a0f5a3 | /Python_codes/p02709/s723829299.py | 01a4d49022c1c656def13e678e9cfeee8f882cc1 | [] | no_license | Aasthaengg/IBMdataset | 7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901 | f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8 | refs/heads/main | 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 618 | py | n=int(input())
a=list(map(int,input().split()))
a=list(zip(range(n),a))#[index,a]の配列
a.sort(key=lambda x:x[1])#aの昇順で並べる
dp=[[0 for i in range(n+1)]for j in range(n+1)] #dp[x][y]:=左にx、右にy人並べたときの最大値
ans=0
for k in range(n):# k人目まで終了、k+1人目に対して
i,ai=a.pop()#aの大きいものから取り出す
dp[k+1][0]=dp[k][0]+ai*(i-k)
dp[0][k+1]=dp[0][k]+ai*(n-k-1-i)
for l in range(k):#右にl+1人並べたとき
dp[k-l][l+1]=max(dp[k-l-1][l+1]+ai*(i-k+l+1),dp[k-l][l]+ai*(n-l-1-i))
for k in range(n+1):
ans=max(ans,dp[k][n-k])
print(ans)
| [
"66529651+Aastha2104@users.noreply.github.com"
] | 66529651+Aastha2104@users.noreply.github.com |
4c8ab6bc0205d4424c71cea52b683546ac62f73b | c4313edda0f14795490080af1ba400b826611be8 | /lib/Crypto.lin.x64/Crypto/SelfTest/Hash/test_SHA3_224.py | 69128d9f74da2ba9b5b697ea56e7343ea0daab7a | [
"MIT"
] | permissive | tosher/Mediawiker | 821a4eab9f812e820bab3a8f4d3f3d542d3aeafa | 89c25d4fa6c6224edbaf5f06794a03594bcccad0 | refs/heads/master | 2023-07-06T02:45:05.924541 | 2023-07-01T18:32:09 | 2023-07-01T18:32:09 | 6,582,157 | 104 | 22 | NOASSERTION | 2023-09-14T18:00:30 | 2012-11-07T16:18:25 | Python | UTF-8 | Python | false | false | 2,874 | py | # -*- coding: utf-8 -*-
#
# SelfTest/Hash/test_SHA3_224.py: Self-test for the SHA-3/224 hash function
#
# ===================================================================
# The contents of this file are dedicated to the public domain. To
# the extent that dedication to the public domain is not available,
# everyone is granted a worldwide, perpetual, royalty-free,
# non-exclusive license to exercise all rights associated with the
# contents of this file for any purpose whatsoever.
# No rights are reserved.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
# BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
# ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
# ===================================================================
"""Self-test suite for Crypto.Hash.SHA3_224"""
import unittest
from binascii import hexlify
from Crypto.SelfTest.loader import load_tests
from Crypto.SelfTest.st_common import list_test_cases
from io import StringIO
from Crypto.Hash import SHA3_224 as SHA3
from Crypto.Util.py3compat import b
class APITest(unittest.TestCase):
def test_update_after_digest(self):
msg=b("rrrrttt")
# Normally, update() cannot be done after digest()
h = SHA3.new(data=msg[:4])
dig1 = h.digest()
self.assertRaises(TypeError, h.update, msg[4:])
dig2 = SHA3.new(data=msg).digest()
# With the proper flag, it is allowed
h = SHA3.new(data=msg[:4], update_after_digest=True)
self.assertEquals(h.digest(), dig1)
# ... and the subsequent digest applies to the entire message
# up to that point
h.update(msg[4:])
self.assertEquals(h.digest(), dig2)
def get_tests(config={}):
from .common import make_hash_tests
tests = []
test_vectors = load_tests(("Crypto", "SelfTest", "Hash", "test_vectors", "SHA3"),
"ShortMsgKAT_SHA3-224.txt",
"KAT SHA-3 224",
{ "len" : lambda x: int(x) } )
test_data = []
for tv in test_vectors:
if tv.len == 0:
tv.msg = b("")
test_data.append((hexlify(tv.md), tv.msg, tv.desc))
tests += make_hash_tests(SHA3, "SHA3_224", test_data,
digest_size=SHA3.digest_size,
oid="2.16.840.1.101.3.4.2.7")
tests += list_test_cases(APITest)
return tests
if __name__ == '__main__':
import unittest
suite = lambda: unittest.TestSuite(get_tests())
unittest.main(defaultTest='suite')
| [
"to.tosher@gmail.com"
] | to.tosher@gmail.com |
19c10d17f3d2fb83cd470c70b8a4a7eaa4e2d4c5 | 5b5145ce47a6e14f342f21ba3752ab8823d8043a | /panoptes_aggregation/tests/reducer_tests/test_shape_reducer_fan.py | 1e05d18634e45d2e3d228b12da528f8c96a1801f | [
"Apache-2.0"
] | permissive | isabella232/aggregation-for-caesar | 335f40c801e2cd18e807b6f10d8228e9c659df97 | 9ce7616b60ab32b13791868ace1637801ea937e9 | refs/heads/master | 2023-03-20T08:21:44.889957 | 2020-12-14T11:54:55 | 2020-12-14T14:17:28 | 322,817,491 | 0 | 0 | Apache-2.0 | 2021-02-23T23:28:33 | 2020-12-19T10:06:10 | null | UTF-8 | Python | false | false | 6,281 | py | from panoptes_aggregation.reducers.shape_reducer_dbscan import process_data as process_data_dbscan, shape_reducer_dbscan
from panoptes_aggregation.reducers.shape_reducer_hdbscan import process_data as process_data_hdbscan, shape_reducer_hdbscan
from .base_test_class import ReducerTest
import copy
extracted_data = [
{
'frame0': {
'T0_tool0_x': [0.0, 100.0],
'T0_tool0_y': [0.0, 100.0],
'T0_tool0_radius': [50.0, 10.0],
'T0_tool0_spread': [60.0, 20.0],
'T0_tool0_rotation': [1.0, 359.0]
},
'frame1': {
'T0_tool1_x': [50.0],
'T0_tool1_y': [50.0],
'T0_tool1_radius': [50.0],
'T0_tool1_spread': [50.0],
'T0_tool1_rotation': [50.0]
}
},
{
'frame0': {
'T0_tool0_x': [0.0, 100.0],
'T0_tool0_y': [0.0, 100.0],
'T0_tool0_radius': [50.0, 10.0],
'T0_tool0_spread': [60.0, 20.0],
'T0_tool0_rotation': [359.0, 1.0],
'T0_tool1_x': [0.0, 100.0],
'T0_tool1_y': [100.0, 0.0],
'T0_tool1_radius': [10.0, 50.0],
'T0_tool1_spread': [50.0, 10.0],
'T0_tool1_rotation': [1.0, 359.0]
}
},
{
'frame1': {
'T0_tool1_x': [50.0],
'T0_tool1_y': [50.0],
'T0_tool1_radius': [50.0],
'T0_tool1_spread': [50.0],
'T0_tool1_rotation': [50.0]
}
},
{
'frame0': {
'T0_tool1_x': [0.0, 100.0],
'T0_tool1_y': [100.0, 0.0],
'T0_tool1_radius': [10.0, 50.0],
'T0_tool1_spread': [50.0, 10.0],
'T0_tool1_rotation': [359.0, 1.0]
},
'frame1': {
'T0_tool0_x': [20.0],
'T0_tool0_y': [20.0],
'T0_tool0_radius': [20.0],
'T0_tool0_spread': [20.0],
'T0_tool0_rotation': [20.0]
}
},
{}
]
kwargs_extra_data = {
'user_id': [
1,
2,
3,
4,
5
]
}
processed_data = {
'shape': 'fan',
'symmetric': False,
'frame0': {
'T0_tool0': [
(0.0, 0.0, 50.0, 60.0, 1.0),
(100.0, 100.0, 10.0, 20.0, 359.0),
(0.0, 0.0, 50.0, 60.0, 359.0),
(100.0, 100.0, 10.0, 20.0, 1.0)
],
'T0_tool1': [
(0.0, 100.0, 10.0, 50.0, 1.0),
(100.0, 0.0, 50.0, 10.0, 359.0),
(0.0, 100.0, 10.0, 50.0, 359.0),
(100.0, 0.0, 50.0, 10.0, 1.0)
]
},
'frame1': {
'T0_tool0': [
(20.0, 20.0, 20.0, 20.0, 20.0)
],
'T0_tool1': [
(50.0, 50.0, 50.0, 50.0, 50.0),
(50.0, 50.0, 50.0, 50.0, 50.0)
]
}
}
reduced_data = {
'frame0': {
'T0_tool0_fan_x': [0.0, 100.0, 0.0, 100.0],
'T0_tool0_fan_y': [0.0, 100.0, 0.0, 100.0],
'T0_tool0_fan_radius': [50.0, 10.0, 50.0, 10.0],
'T0_tool0_fan_spread': [60.0, 20.0, 60.0, 20.0],
'T0_tool0_fan_rotation': [1.0, 359.0, 359.0, 1.0],
'T0_tool0_cluster_labels': [0, 1, 0, 1],
'T0_tool0_clusters_count': [2, 2],
'T0_tool0_clusters_x': [0.0, 100.0],
'T0_tool0_clusters_y': [0.0, 100.0],
'T0_tool0_clusters_radius': [50.0, 10.0],
'T0_tool0_clusters_spread': [60.0, 20.0],
'T0_tool0_clusters_rotation': [0.0, 0.0],
'T0_tool1_fan_x': [0.0, 100.0, 0.0, 100.0],
'T0_tool1_fan_y': [100.0, 0.0, 100.0, 0.0],
'T0_tool1_fan_radius': [10.0, 50.0, 10.0, 50.0],
'T0_tool1_fan_spread': [50.0, 10.0, 50.0, 10.0],
'T0_tool1_fan_rotation': [1.0, 359.0, 359.0, 1.0],
'T0_tool1_cluster_labels': [0, 1, 0, 1],
'T0_tool1_clusters_count': [2, 2],
'T0_tool1_clusters_x': [0.0, 100.0],
'T0_tool1_clusters_y': [100.0, 0.0],
'T0_tool1_clusters_radius': [10.0, 50.0],
'T0_tool1_clusters_spread': [50.0, 10.0],
'T0_tool1_clusters_rotation': [0.0, 0.0]
},
'frame1': {
'T0_tool0_fan_x': [20.0],
'T0_tool0_fan_y': [20.0],
'T0_tool0_fan_radius': [20.0],
'T0_tool0_fan_spread': [20.0],
'T0_tool0_fan_rotation': [20.0],
'T0_tool0_cluster_labels': [-1],
'T0_tool1_fan_x': [50.0, 50.0],
'T0_tool1_fan_y': [50.0, 50.0],
'T0_tool1_fan_radius': [50.0, 50.0],
'T0_tool1_fan_spread': [50.0, 50.0],
'T0_tool1_fan_rotation': [50.0, 50.0],
'T0_tool1_cluster_labels': [0, 0],
'T0_tool1_clusters_count': [2],
'T0_tool1_clusters_x': [50.0],
'T0_tool1_clusters_y': [50.0],
'T0_tool1_clusters_radius': [50.0],
'T0_tool1_clusters_spread': [50.0],
'T0_tool1_clusters_rotation': [50.0]
}
}
TestShapeReducerFan = ReducerTest(
shape_reducer_dbscan,
process_data_dbscan,
extracted_data,
processed_data,
reduced_data,
'Test shape fan reducer with DBSCAN',
network_kwargs=kwargs_extra_data,
pkwargs={'shape': 'fan'},
kwargs={
'eps': 5,
'min_samples': 2
},
test_name='TestShapeReducerFan'
)
reduced_data_hdbscan = copy.deepcopy(reduced_data)
reduced_data_hdbscan['frame0']['T0_tool0_cluster_probabilities'] = [1.0, 1.0, 1.0, 1.0]
reduced_data_hdbscan['frame0']['T0_tool0_clusters_persistance'] = [0.9868693567140278, 0.9868693567140278]
reduced_data_hdbscan['frame0']['T0_tool1_cluster_probabilities'] = [1.0, 1.0, 1.0, 1.0]
reduced_data_hdbscan['frame0']['T0_tool1_clusters_persistance'] = [0.9868693567140278, 0.9868693567140278]
reduced_data_hdbscan['frame1']['T0_tool0_cluster_probabilities'] = [0.0]
reduced_data_hdbscan['frame1']['T0_tool1_cluster_probabilities'] = [1.0, 1.0]
reduced_data_hdbscan['frame1']['T0_tool1_clusters_persistance'] = [1.0]
TestShapeReducerFanHdbscan = ReducerTest(
shape_reducer_hdbscan,
process_data_hdbscan,
extracted_data,
processed_data,
reduced_data_hdbscan,
'Test shape fan reducer with HDBSCAN',
network_kwargs=kwargs_extra_data,
pkwargs={'shape': 'fan'},
kwargs={
'min_cluster_size': 2,
'min_samples': 1,
'allow_single_cluster': True
},
test_name='TestShapeReducerFanHdbscan'
)
| [
"coleman.krawczyk@gmail.com"
] | coleman.krawczyk@gmail.com |
d3c4d7c5e6c299881eefffeaddcf06d19bd78463 | 4ccaf5252f5936414638f254ca5932ad922cd582 | /ex034 - Aumentos multiplos.py | 66bc546db7f4f138303f01e794b214d8ad0051a7 | [] | no_license | carlosaugus1o/Python-Exercicios | b0a525436d7cf24e3fc9ccfd046278ad383eb01c | 6216430dac9d5fc6fe3b75ae9625063d4971e419 | refs/heads/main | 2023-07-03T22:59:31.913494 | 2021-07-28T03:05:50 | 2021-07-28T03:05:50 | 390,193,957 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 231 | py | salário = float(input('Informe o salário do funcionário: R$ '))
if salário < 1250:
aumento = salário * 1.15
else:
aumento = salário * 1.1
print('O novo salário do funcionário será de {:.2f}'.format(aumento))
| [
"noreply@github.com"
] | carlosaugus1o.noreply@github.com |
6f9bb16e2b0de1dc1aaabf69938acdcbe66f7819 | b7d0f003cfb0ec6fa25f99d9f7b544dc38ae6aa8 | /concept/greedy/fractionalKnapsack.py | a04a8c99034d44bba0bd5bf14a0d3aa537b69b91 | [] | no_license | Kimyechan/dataStructureAndArgorithm | 43c2cfa0d12a5c729f687d786ef6dde23bf193a7 | c9f8f614621aee9e236ffef20e5e563b37bab0b3 | refs/heads/master | 2021-07-09T03:31:31.405725 | 2021-03-09T13:18:55 | 2021-03-09T13:18:55 | 231,402,629 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 541 | py | data_list = [(10, 10), (15, 12), (20, 10), (25, 8), (30, 5)]
def getMaxValue(dataList, capacity):
dataList = sorted(dataList, key=lambda x: x[1] / x[0], reverse=True)
detail = list()
value = 0
for data in dataList:
if capacity - data[0] >= 0:
value += data[1]
capacity -= data[0]
detail.append(data)
else:
value += data[1] * (capacity / data[0])
detail.append(data)
break
return value, detail
print(getMaxValue(data_list, 30)) | [
"vlvkcjswo7@naver.com"
] | vlvkcjswo7@naver.com |
eba37ec8db396d0c781197f2c79e8b1305138bf5 | 537e30c108e7a575cec1b7a9332439e1abac811d | /venv/bin/list_instances | c49900bc82f96c01a5302e64f08b84b988637fd1 | [] | no_license | kangqiwang/extractOntology | d5401bb27603e94264d84ec41fc9e994ece92278 | 89b3d7bc1cf08ea8856cb9221448e988d4d2f992 | refs/heads/master | 2020-08-13T01:44:19.399402 | 2019-11-18T18:52:28 | 2019-11-18T18:52:28 | 214,881,034 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 3,136 | #!/home/kang/Project/myGit/extractOntology/venv/bin/python
import sys
from operator import attrgetter
from optparse import OptionParser
import boto
from boto.ec2 import regions
HEADERS = {
'ID': {'get': attrgetter('id'), 'length':15},
'Zone': {'get': attrgetter('placement'), 'length':15},
'Groups': {'get': attrgetter('groups'), 'length':30},
'Hostname': {'get': attrgetter('public_dns_name'), 'length':50},
'PrivateHostname': {'get': attrgetter('private_dns_name'), 'length':50},
'State': {'get': attrgetter('state'), 'length':15},
'Image': {'get': attrgetter('image_id'), 'length':15},
'Type': {'get': attrgetter('instance_type'), 'length':15},
'IP': {'get': attrgetter('ip_address'), 'length':16},
'PrivateIP': {'get': attrgetter('private_ip_address'), 'length':16},
'Key': {'get': attrgetter('key_name'), 'length':25},
'T:': {'length': 30},
}
def get_column(name, instance=None):
if name.startswith('T:'):
_, tag = name.split(':', 1)
return instance.tags.get(tag, '')
return HEADERS[name]['get'](instance)
def main():
parser = OptionParser()
parser.add_option("-r", "--region", help="Region (default us-east-1)", dest="region", default="us-east-1")
parser.add_option("-H", "--headers", help="Set headers (use 'T:tagname' for including tags)", default=None, action="store", dest="headers", metavar="ID,Zone,Groups,Hostname,State,T:Name")
parser.add_option("-t", "--tab", help="Tab delimited, skip header - useful in shell scripts", action="store_true", default=False)
parser.add_option("-f", "--filter", help="Filter option sent to DescribeInstances API call, format is key1=value1,key2=value2,...", default=None)
(options, args) = parser.parse_args()
# Connect the region
for r in regions():
if r.name == options.region:
region = r
break
else:
print("Region %s not found." % options.region)
sys.exit(1)
ec2 = boto.connect_ec2(region=region)
# Read headers
if options.headers:
headers = tuple(options.headers.split(','))
else:
headers = ("ID", 'Zone', "Groups", "Hostname")
# Create format string
format_string = ""
for h in headers:
if h.startswith('T:'):
format_string += "%%-%ds" % HEADERS['T:']['length']
else:
format_string += "%%-%ds" % HEADERS[h]['length']
# Parse filters (if any)
if options.filter:
filters = dict([entry.split('=') for entry in options.filter.split(',')])
else:
filters = {}
# List and print
if not options.tab:
print(format_string % headers)
print("-" * len(format_string % headers))
for r in ec2.get_all_reservations(filters=filters):
groups = [g.name for g in r.groups]
for i in r.instances:
i.groups = ','.join(groups)
if options.tab:
print("\t".join(tuple(get_column(h, i) for h in headers)))
else:
print(format_string % tuple(get_column(h, i) for h in headers))
if __name__ == "__main__":
main()
| [
"kangqiwang@outlook.com"
] | kangqiwang@outlook.com | |
e8d97fc8a462b5ec994919115a9da2beb1399e1e | f8967772b9c67b3917285e5c14ff0c26440c75ad | /p_code/for.py | f4319f51509e38e57cfff177e4c91bdcb0e10193 | [] | no_license | mdAshrafuddin/python | 9302b5285f5f83ee4e03c001b2fa1234a54affbf | ae84e5c677a649c9916af38e60d91491b498a36f | refs/heads/main | 2023-02-17T13:56:19.855635 | 2021-01-13T16:45:17 | 2021-01-13T16:45:17 | 329,363,755 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,828 | py | # # for target_list in expression_list: suite
# # [else:suite]
# # n = [1, 3, 5, 6]
# for i in range(10):
# print(i)
# if i == 5:
# break
# # # string, tuple or list
# # persion = ["AShraf", 'tanjil', 'tamim', 'asj']
# # for p in persion:
# # print(p, len(p))
# phone_book = {
# "Mom": "123-44-55-665",
# "Dad": "233-33-55"
# }
# print(phone_book['Mom'])
# # users = {
# # "Ashraf": "inactive",
# # "Tanjil": "active",
# # "tamim" : "active"
# # }
# # for user, status in users.copy().items():
# # if status == 'inactive':
# # del users[user]
# # print("After deleting users")
# # for user, status in users.items():
# # print(user, status)
# # users = {
# # "John": "inactive",
# # "Helen": "active",
# # "James": "active", # and so on...
# # }
# # for user, status in users.copy().items():
# # if status == 'inactive':
# # del users[user]
# # print('After deleting users')
# # for user, status in users.copy().items():
# # print(user, status)
# users = {
# "John": "inactive",
# "Helen": "active",
# "James": "active", # and so on...
# }
# # active_users = {}
# # for user, status in users.items():
# # if status == 'active':
# # active_users[user] = status
# # for user, status in active_users.items():
# # print(user, status)
# for user, status in users.copy().items():
# if status == 'inactive':
# del users[user]
# for user, status in users.copy().items():
# print(user, status)
# print(sum(range(1, 10, 5)))
list = ['Ashraf', 'Tanjil', 'Tamim Chowdhury']
for i in list:
print(i, len(i))
a = ['Mary', 'had', 'a', 'little', 'lamb']
for i, v in enumerate(a):
print(i, v)
dic = {'name':'Ashrav', 'age':20}
for k, v in dic.items():
print(k,v)
| [
"mdtanjil01753511918@gmail.com"
] | mdtanjil01753511918@gmail.com |
5f43704fc9571b6699a66ef7258a37a6851a5d9f | d4adf8e72bfc767bb6ad32e81a2d24a0309d90b7 | /Clients/Python/DBSAPI/UserExamples/dbsUpdateLumiSection.py | e7cdc9a58acf08ed3e61d61816c2246ec9745685 | [] | no_license | dmwm/DBSAPI | 5c55dd10161a1bb1fb6f646ca92dd3c4f263a256 | 3117ac30672a1932cef4606fbf7693ce4952b79d | refs/heads/master | 2021-01-21T19:28:22.329112 | 2013-10-30T12:07:54 | 2013-10-30T12:07:54 | 13,984,999 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 956 | py | #!/usr/bin/env python
#
# Revision: 1.3 $"
# Id: DBSXMLParser.java,v 1.3 2006/10/26 18:26:04 afaq Exp $"
#
#
import sys
from DBSAPI.dbsApi import DbsApi
from DBSAPI.dbsException import *
from DBSAPI.dbsApiException import *
from DBSAPI.dbsLumiSection import DbsLumiSection
from DBSAPI.dbsOptions import DbsOptionParser
optManager = DbsOptionParser()
(opts,args) = optManager.getOpt()
api = DbsApi(opts.__dict__)
lumi = DbsLumiSection (
LumiSectionNumber=99,
StartEventNumber=333,
EndEventNumber=777,
LumiStartTime=8888,
LumiEndTime=999999,
RunNumber=1,
)
print "updating the lumi section"
try:
api.updateLumiSection (lumi)
print "Result: %s" % lumi
except DbsApiException, ex:
print "Caught API Exception %s: %s " % (ex.getClassName(), ex.getErrorMessage() )
if ex.getErrorCode() not in (None, ""):
print "DBS Exception Error Code: ", ex.getErrorCode()
print "Done"
| [
"giulio.eulisse@cern.ch"
] | giulio.eulisse@cern.ch |
0f71e7d1375a7e6b6ec28cf3c3c93b812af90331 | 33af6185b48bd76f97f0a74390a3a812ee216c78 | /angr-doc/examples/mma_howtouse/solve.py | 590cbe00ab75741931603ffef1f242a6c027b2ee | [
"BSD-2-Clause"
] | permissive | Ruide/angr-dev | dab0cabd907fce47ac698f890c3f3a8b80ab7e2a | 964dc80c758e25c698c2cbcc454ef5954c5fa0a0 | refs/heads/master | 2022-11-10T11:27:13.355024 | 2017-10-07T14:29:09 | 2017-10-07T14:29:09 | 104,417,044 | 0 | 1 | BSD-2-Clause | 2022-10-16T04:48:10 | 2017-09-22T01:35:12 | C | UTF-8 | Python | false | false | 1,329 | py | #!/usr/bin/env python
#
# This binary, from the MMA CTF, was a simple reversing challenge. THe biggest
# challenge was actually *running* this library in Windows. Luckily, with angr,
# we can avoid having to do this!
#
# The approach here is to use angr as a concrete execution engine to call the
# `howtouse` function 45 times, as the array of function pointers in that
# function has 45 entries. The result turned out to be the flag.
#
import angr
import claripy
def main():
# Load the binary. Base addresses are weird when loading binaries directly, so
# we specify it explicitly.
p = angr.Project('howtouse.dll', load_options={'main_opts': {'custom_base_addr': 0x10000000}})
# A "Callable" is angr's FFI-equivalent. It allows you to call binary functions
# from Python. Here, we use it to call the `howtouse` function.
howtouse = p.factory.callable(0x10001130)
# In this binary, the result is a concrete char, so we don't need a symbolic
# state or a solver to get its value.
getch = lambda i: chr(claripy.backends.concrete.convert(howtouse(i)).value)
# Let's call this 45 times, and that's the result!
return ''.join(getch(i) for i in xrange(45))
def test():
assert main() == 'MMA{fc7d90ca001fc8712497d88d9ee7efa9e9b32ed8}'
if __name__ == '__main__':
print main()
| [
"rd.cheung.bupt.sms@gmail.com"
] | rd.cheung.bupt.sms@gmail.com |
fdf6a8987be76bdec8c219e115f22fc45208bdc9 | c41bbe6374d896c6bb36fe6c94a83f474388214e | /train.py | 079bc9699d85b1dd9a766040f43a84bc48c4fd4a | [] | no_license | DableUTeeF/sift_rep | dce8f0d15d80a7d41b7f97480eb13ef744806888 | 922109478bd33e7e3de74e352a99023ef479bcbf | refs/heads/master | 2020-04-24T16:28:24.124287 | 2019-02-22T17:38:40 | 2019-02-22T17:38:40 | 172,108,298 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,357 | py | import natthaphon
import models
import datagen
from torch import nn, optim
import json
import os
def lrstep(epoch):
if epoch < 150:
a = 0.05
elif 150 < epoch < 225:
a = 0.005
else:
a = 0.0005
print(f'Epoch: {epoch+1} - returning learning rate {a}')
return a
if __name__ == '__main__':
model = natthaphon.Model(models.ResCift((3, 3, 3)))
rprop = optim.SGD(model.model.parameters(), lr=0.01, momentum=0.9)
model.compile(optimizer=rprop,
loss=nn.MSELoss(),
device='cuda'
)
if os.path.isdir('/home/palm/Pictures'):
train_datagen = datagen.SiftGenerator('/home/palm/Pictures/phuket')
val_datagen = datagen.SiftGenerator('/home/palm/Pictures/phuket')
else:
train_datagen = datagen.SiftGenerator('/root/palm/DATA/mscoco/images/train2017')
val_datagen = datagen.SiftGenerator('/root/palm/DATA/mscoco/images/val2017')
trainloader = natthaphon.Loader(train_datagen, shuffle=True, num_workers=4)
testloader = natthaphon.Loader(val_datagen, shuffle=False, num_workers=4)
schedule = natthaphon.LambdaLR(rprop, lrstep)
history = model.fit_generator(trainloader, 300, validation_data=testloader, schedule=schedule)
with open('logs/ResCift333-1.json', 'w') as wr:
json.dump(history, wr)
| [
"palm22180@gmail.com"
] | palm22180@gmail.com |
129fa6a2abc715f1b035273b3ccc5bae8d183bf9 | fe3bc38d2a9f80a6b258e2c61dbe4557323a1d71 | /corehq/apps/smsbillables/management/commands/add_moz_zero_charge.py | 3d49a8b4d97eb67199c33df40d022eb1d8355668 | [] | no_license | ekush/commcare-hq | 077eb3f525ffb7d1acca0848b9c7678baf776832 | 97a1f55f24f79224724b2ecdc7d5cea87d42f65b | refs/heads/master | 2021-01-17T22:25:09.734898 | 2015-08-25T23:07:49 | 2015-08-25T23:07:49 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,981 | py | from decimal import Decimal
import logging
from couchdbkit import ResourceNotFound
from django.core.management.base import LabelCommand
from corehq.apps.accounting.models import Currency
from corehq.apps.sms.backend.http_api import HttpBackend
from corehq.apps.sms.models import INCOMING, OUTGOING
from corehq.apps.smsbillables.models import SmsGatewayFee, SmsGatewayFeeCriteria
logger = logging.getLogger('accounting')
def add_moz_zero_charge(orm):
mzn, _ = (orm['accounting.Currency'] if orm else Currency).objects.get_or_create(code='MZN')
sms_gateway_fee_class = orm['smsbillables.SmsGatewayFee'] if orm else SmsGatewayFee
sms_gateway_fee_criteria_class = orm['smsbillables.SmsGatewayFeeCriteria'] if orm else SmsGatewayFeeCriteria
SmsGatewayFee.create_new(
'SISLOG',
INCOMING,
Decimal('0'),
country_code=None,
prefix='',
currency=mzn,
fee_class=sms_gateway_fee_class,
criteria_class=sms_gateway_fee_criteria_class,
)
backend_id = '7ddf3301c093b793c6020ebf755adb6f'
try:
backend = HttpBackend.get(backend_id)
SmsGatewayFee.create_new(
backend.get_api_id(),
OUTGOING,
Decimal('0'),
backend_instance=backend._id,
country_code=None,
prefix='',
currency=mzn,
fee_class=sms_gateway_fee_class,
criteria_class=sms_gateway_fee_criteria_class,
)
logger.info("Updated Moz gateway default fees.")
except ResourceNotFound:
logger.error("[SMS-BILLING] Could not find HttpBackend %s - did not create outgoing Moz gateway default fees."
" Please look into if this is on production, otherwise ignore." % backend_id)
class Command(LabelCommand):
help = "bootstrap MOZ global SMS backend gateway default fees"
args = ""
label = ""
def handle(self, *args, **options):
add_moz_zero_charge(None)
| [
"npellegrino@dimagi.com"
] | npellegrino@dimagi.com |
36012a75fd7fc8f9abd7cf667e21753bedc9c361 | c8362b6beb84577a89b90fa729beec35c094cf96 | /generate_json.py | 9e7aece6f258377d56e8c121de372110e0547509 | [] | no_license | akx/twitter-swagger-api-defs | 651f4117341fb5476e586a93940b5f663ec9e4f5 | 0fbb55527a990df35ebe659d7adcfbcab4ab123a | refs/heads/master | 2016-09-05T22:19:51.440050 | 2013-06-04T16:31:23 | 2013-06-04T16:31:23 | 10,478,163 | 1 | 5 | null | null | null | null | UTF-8 | Python | false | false | 2,929 | py | import os
import re
import urlparse
from lxml.html import HTMLParser
from lxml import etree
url_param_re = re.compile(r":([a-z_]+)", re.I)
def inner_text(el):
if not el:
return None
if isinstance(el, list):
el = el[0]
return etree.tostring(el, method="text", encoding="UTF-8").decode("UTF-8").strip().replace("\n", " ")
def parse_tree(tree):
title = inner_text(tree.cssselect("#title"))
if not ("GET" in title or "POST" in title):
return None
is_post = title.startswith("POST")
endpoint = inner_text(tree.cssselect(".field-doc-resource-url div")).replace("format", "{format}")
description = inner_text(tree.cssselect(".doc-updated+div>p"))
url_params = set()
def fix_url_param(m):
var = m.group(1)
url_params.add(var)
return "{%s}" % var
endpoint = url_param_re.sub(fix_url_param, endpoint)
parameters = []
for param in tree.cssselect("div.parameter"):
p_name_raw = inner_text(param.cssselect(".param"))
try:
p_name, required = p_name_raw.rsplit(" ", 1)
except ValueError:
p_name = p_name_raw
required = "required"
p_desc = inner_text(param.cssselect("p"))
parameters.append({
"name": p_name,
"description": p_desc,
"required": (required == "required"),
"dataType": "string", # Can't assume anything else,
"paramType": ("path" if p_name in url_params else ("form" if is_post else "query")),
})
return {
"path": urlparse.urlparse(endpoint).path,
"description": "",
"operations": [{
"httpMethod": "POST" if is_post else "GET",
"nickname": title.lower().replace("/", "_").replace(" ", "_"),
"responseClass": "complex",
"parameters": parameters,
"summary": description,
}]
}
def parse_file(fn):
parser = HTMLParser()
tree = etree.parse(fn, parser=parser).getroot()
return parse_tree(tree)
def parse_from_string(s):
parser = HTMLParser()
tree = etree.fromstring(s, parser=parser)
return parse_tree(tree)
def parse_from_zip():
import zipfile
apis = []
zf = zipfile.ZipFile("apidocs.zip")
for fileinfo in zf.infolist():
if fileinfo.file_size > 0:
apis.append(parse_from_string(zf.read(fileinfo)))
return apis
def main():
from json import dumps
apis = dict((api["path"], api) for api in parse_from_zip() if api).values()
print "%d unique API definitions parsed." % len(apis)
spec = {
"apiVersion": "1.1",
"swaggerVersion": "1.1",
"basePath": "https://api.twitter.com",
"description": u"Twitter",
"apis": sorted(apis, key=lambda api:api["path"]),
}
file("twitter_api.json", "wb").write(dumps(spec, indent=4))
if __name__ == "__main__":
main()
| [
"akx@iki.fi"
] | akx@iki.fi |
5393132b63d05209be58fd2eecf5fdb158d8cfab | 8cf9c32fcad16c4109809447a530b435d290aa25 | /desktop/libs/notebook/src/notebook/connectors/altus.py | c5b7c4646ffa65ca93d8617617fa8b39dfbde67d | [
"Apache-2.0"
] | permissive | veritascl/hue | 38618e923d43bc167be6dd15c9d4b084758655d3 | ceb267982049638d306aff975bf0c9572db1560d | refs/heads/master | 2020-03-26T13:57:57.673750 | 2018-08-15T16:27:06 | 2018-08-15T16:54:11 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,082 | py | #!/usr/bin/env python
# Licensed to Cloudera, Inc. under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. Cloudera, Inc. licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
import json
from datetime import datetime, timedelta
from django.urls import reverse
from django.utils.translation import ugettext as _
from metadata.conf import ALTUS
from navoptapi.api_lib import ApiLib
from desktop.lib.exceptions_renderable import PopupException
LOG = logging.getLogger(__name__)
DATE_FORMAT = "%Y-%m-%d"
def _exec(service, command, parameters=None):
if parameters is None:
parameters = {}
if service == 'analyticdb':
hostname = ALTUS.HOSTNAME_ANALYTICDB.get()
elif service == 'dataeng':
hostname = ALTUS.HOSTNAME_DATAENG.get()
elif service == 'wa':
hostname = ALTUS.HOSTNAME_WA.get()
else:
hostname = ALTUS.HOSTNAME.get()
try:
api = ApiLib(service, hostname, ALTUS.AUTH_KEY_ID.get(), ALTUS.AUTH_KEY_SECRET.get().replace('\\n', '\n'))
LOG.debug('%s : %s' % (command, parameters))
resp = api.call_api(command, parameters)
LOG.info(resp)
json_resp = resp.json()
LOG.debug(json_resp )
return json_resp
except Exception, e:
raise PopupException(e, title=_('Error accessing'))
class IAMApi(): pass
# altus iam list-user-assigned-roles --user=crn:altus:ia
class SdxApi():
def __init__(self, user): pass
def list_namespaces(self):
"""
e.g. returns
[{
u'status': u'CREATED',
u'namespaceName': u'cca-5150-ns',
u'creationDate': u'2018-06-03T23:24:46.125000+00:00',
u'crn': u'crn:altus:sdx:us-west-1:12a0079b-1591-4ca0-b721-a446bda74e67:namespace:cca-5150-ns/f54461af-b241-4f1d-a521-ab3624e841c4'},
...
]
"""
return _exec('sdx', 'listNamespaces')['namespaces']
class DataEngApi():
def __init__(self, user): pass
def list_jobs(self, submitter_crns=None, page_size=None, starting_token=None, job_statuses=None, job_ids=None, job_types=None, creation_date_before=None,
creation_date_after=None, cluster_crn=None, order=None):
args = {}
if creation_date_after is None:
creation_date_after = (datetime.today() - timedelta(days=7)).strftime(DATE_FORMAT)
if submitter_crns:
args['submitterCrns'] = submitter_crns
if page_size is not None:
args['pageSize'] = str(page_size)
if starting_token:
args['startingToken'] = starting_token
if job_statuses:
args['jobStatuses'] = job_statuses
if job_ids:
args['jobIds'] = job_ids
if job_types:
args['jobTypes'] = job_types
if creation_date_before:
args['creationDateBefore'] = creation_date_before
if creation_date_after:
args['creationDateAfter'] = creation_date_after
if cluster_crn:
args['clusterCrn'] = cluster_crn
if order:
args['order'] = order
return _exec('dataeng', 'listJobs', args)
def describe_job(self, job_id):
return _exec('dataeng', 'describeJob', {'jobId', job_id})
def submit_hive_job(self, cluster_name, script, params=None, job_xml=None):
job = {'script': script}
if params:
job['params'] = params
if job_xml:
job['jobXml'] = job_xml
return self.submit_jobs(cluster_name, [{'hiveJob': job}])
def submit_spark_job(self, cluster_name, jars=None, main_class=None, arguments=None, spark_arguments=None, properties_file=None):
job = {
"jars": jars if jars else [],
"applicationArguments": arguments if arguments else [],
#"sparkArguments": "string",
#"propertiesFile": "string"
}
if main_class:
job["mainClass"] = main_class
return self.submit_jobs(cluster_name, [{'sparkJob': job, 'name': None, 'failureAction': 'NONE'}]),
def submit_yarn_job(self):
return _exec('dataeng', 'submitJobs')
def submit_jobs(self, cluster_name, jobs):
return _exec('dataeng', 'submitJobs', {'clusterName': cluster_name, 'jobs': jobs})
def terminate_job(self, job_id):
return _exec('dataeng', 'terminateJob', {'jobId': job_id})
def list_clusters(self, names=None, page_size=None, starting_token=None):
args = {}
if names:
args['clusterNames'] = names
if page_size is not None:
args['pageSize'] = str(page_size)
if starting_token:
args['startingToken'] = starting_token
return _exec('dataeng', 'listClusters', args)
def create_cluster(self):
return _exec('dataeng', 'createCluster')
def delete_cluster(self):
return _exec('dataeng', 'deleteCluster')
def describe_clusters(self):
return _exec('dataeng', 'describeCluster')
class AnalyticDbApi():
def __init__(self, user): pass
def create_cluster(self, cloud_provider, cluster_name, cdh_version, public_key, instance_type, environment_name, workers_group_size=3, namespace_name=None):
# [--cloudera-manager-username <value>]
# [--cloudera-manager-password <value>]
params = { # cloud_provider: AWS, Azure...
'clusterName': cluster_name,
'cdhVersion': cdh_version,
'publicKey': public_key,
'instanceType': instance_type,
'environmentName': environment_name,
'workersGroupSize': workers_group_size
}
if namespace_name:
params['namespaceName'] = namespace_name
return _exec('analyticdb', 'createAWSCluster', params)
def list_clusters(self):
"""
e.g. returns
[{
"status": "CREATED",
"namespaceCrn": "crn:altus:sdx:us-west-1:12a0079b-1591-4ca0-b721-a446bda74e67:namespace:spot-ns/7bdb225f-a7a1-408e-8503-1b3a422cc039",
"workersGroupSize": 4,
"clusterName": "spot",
"environmentType": "AWS",
"secured": false,
"environmentCrn": "crn:altus:environments:us-west-1:12a0079b-1591-4ca0-b721-a446bda74e67:environment:Spot-AWS-dev2/5a6d0ced-c8af-4fa3-9b24-8c5a3ea11cf8",
"securityConfiguration": {
"enabled": false
},
"creationDate": "2018-06-01T13:14:43.530000+00:00",
"crn": "crn:altus:analyticdb:us-west-1:12a0079b-1591-4ca0-b721-a446bda74e67:cluster:spot/70595482-6a46-4a9d-b395-56fcabe079e4",
"instanceType": "r4.4xlarge",
"cdhVersion": "CDH514"
},
...
]
"""
return _exec('analyticdb', 'listClusters')
def submit_hue_query(self, cluster_crn, payload):
return _exec('analyticdb', 'submitHueQuery', {'clusterCrn': cluster_crn, 'payload': payload})
| [
"romain@cloudera.com"
] | romain@cloudera.com |
6b48c0a42b044f51c67f1fc12bca48691e1a126d | f594c17d04a882d80d0cc2bbcb54163fbeca0aa8 | /geotagging/fixes/gis/admin/options.py | f98e796036a2cbe5a280338ce47b51b5a5287cea | [
"LicenseRef-scancode-warranty-disclaimer",
"BSD-2-Clause"
] | permissive | AlphaRomeo13/django-geotagging | 9f3752c16260d4eab3592975ac9930ebe87f9244 | c846bdb82556a64e16ba08730c85921c66ca01eb | refs/heads/master | 2021-01-12T09:22:11.384120 | 2016-12-11T18:23:59 | 2016-12-11T18:23:59 | 76,152,012 | 1 | 0 | null | 2016-12-11T03:30:44 | 2016-12-11T03:30:44 | null | UTF-8 | Python | false | false | 6,161 | py | from django.contrib.admin.options import BaseModelAdmin, InlineModelAdmin, \
StackedInline, TabularInline
from geotagging.fixes.gis.admin.widgets import OpenLayersWidgetFixed as OpenLayersWidget
from django.contrib.gis.gdal import OGRGeomType
from django.contrib.gis.db import models
from django.contrib.contenttypes.generic import GenericInlineModelAdmin, \
GenericStackedInline, GenericTabularInline
class GeoBaseModelAdmin(BaseModelAdmin):
"""
The administration options class for Geographic models. Map settings
may be overloaded from their defaults to create custom maps.
"""
# The default map settings that may be overloaded -- still subject
# to API changes.
default_lon = 0
default_lat = 0
default_zoom = 4
display_wkt = False
display_srid = False
extra_js = []
num_zoom = 18
max_zoom = False
min_zoom = False
units = False
max_resolution = False
max_extent = False
modifiable = True
mouse_position = True
scale_text = True
layerswitcher = True
scrollable = True
map_width = 600
map_height = 400
map_srid = 4326
map_template = 'gis/admin/openlayers.html'
openlayers_url = 'http://openlayers.org/api/2.8/OpenLayers.js'
wms_url = 'http://labs.metacarta.com/wms/vmap0'
wms_layer = 'basic'
wms_name = 'OpenLayers WMS'
debug = False
widget = OpenLayersWidget
# inject Open Street map if GDAL works
from django.contrib.gis import gdal
if gdal.HAS_GDAL:
map_template = 'gis/admin/osm.html'
extra_js = ['http://openstreetmap.org/openlayers/OpenStreetMap.js']
num_zoom = 20
map_srid = 900913
max_extent = '-20037508,-20037508,20037508,20037508'
max_resolution = 156543.0339
units = 'm'
def formfield_for_dbfield(self, db_field, **kwargs):
"""
Overloaded from ModelAdmin so that an OpenLayersWidget is used
for viewing/editing GeometryFields.
"""
if isinstance(db_field, models.GeometryField):
# Setting the widget with the newly defined widget.
kwargs['widget'] = self.get_map_widget(db_field)
return db_field.formfield(**kwargs)
else:
return super(GeoBaseModelAdmin, self).formfield_for_dbfield(db_field, **kwargs)
def get_map_widget(self, db_field):
"""
Returns a subclass of the OpenLayersWidget (or whatever was specified
in the `widget` attribute) using the settings from the attributes set
in this class.
"""
is_collection = db_field._geom in ('MULTIPOINT', 'MULTILINESTRING', 'MULTIPOLYGON', 'GEOMETRYCOLLECTION')
if is_collection:
if db_field._geom == 'GEOMETRYCOLLECTION': collection_type = 'Any'
else: collection_type = OGRGeomType(db_field._geom.replace('MULTI', ''))
else:
collection_type = 'None'
class OLMap(self.widget):
template = self.map_template
geom_type = db_field._geom
params = {'default_lon' : self.default_lon,
'default_lat' : self.default_lat,
'default_zoom' : self.default_zoom,
'display_wkt' : self.debug or self.display_wkt,
'geom_type' : OGRGeomType(db_field._geom),
'field_name' : db_field.name,
'is_collection' : is_collection,
'scrollable' : self.scrollable,
'layerswitcher' : self.layerswitcher,
'collection_type' : collection_type,
'is_linestring' : db_field._geom in ('LINESTRING', 'MULTILINESTRING'),
'is_polygon' : db_field._geom in ('POLYGON', 'MULTIPOLYGON'),
'is_point' : db_field._geom in ('POINT', 'MULTIPOINT'),
'num_zoom' : self.num_zoom,
'max_zoom' : self.max_zoom,
'min_zoom' : self.min_zoom,
'units' : self.units, #likely shoud get from object
'max_resolution' : self.max_resolution,
'max_extent' : self.max_extent,
'modifiable' : self.modifiable,
'mouse_position' : self.mouse_position,
'scale_text' : self.scale_text,
'map_width' : self.map_width,
'map_height' : self.map_height,
'srid' : self.map_srid,
'display_srid' : self.display_srid,
'wms_url' : self.wms_url,
'wms_layer' : self.wms_layer,
'wms_name' : self.wms_name,
'debug' : self.debug,
}
return OLMap
# Using the Beta OSM in the admin requires the following:
# (1) The Google Maps Mercator projection needs to be added
# to your `spatial_ref_sys` table. You'll need at least GDAL 1.5:
# >>> from django.contrib.gis.gdal import SpatialReference
# >>> from django.contrib.gis.utils import add_postgis_srs
# >>> add_postgis_srs(SpatialReference(900913)) # Adding the Google Projection
#inlines
class GeoInlineModelAdmin(InlineModelAdmin, GeoBaseModelAdmin):
def _media(self):
"Injects OpenLayers JavaScript into the admin."
media = super(GeoInlineModelAdmin, self)._media()
media.add_js([self.openlayers_url])
media.add_js(self.extra_js)
return media
media = property(_media)
class GeoStackedInline(StackedInline, GeoInlineModelAdmin):
pass
class GeoTabularInline(TabularInline, GeoInlineModelAdmin):
map_width = 300
map_height = 200
#generic inlines
class GeoGenericInlineModelAdmin(GenericInlineModelAdmin, GeoInlineModelAdmin):
pass
class GeoGenericStackedInline(GenericStackedInline, GeoGenericInlineModelAdmin):
pass
class GeoGenericTablularInline(GenericTabularInline, GeoGenericInlineModelAdmin):
map_width = 300
map_height = 200
| [
"pete@lincolnloop.com"
] | pete@lincolnloop.com |
047ee7629aacfb019abe87491bacab106a583b03 | efb7180c05964aee07756dbd4f9982f81559d7e3 | /TradeBot/tradebotapp/admin.py | d151aa6b4efce4984dc6473207db707c1514746f | [] | no_license | ShunnoSaiful/Trade-Bot | 920ba75225d921f54530fc9f0d10a8eb9eabdaaf | d07489dea5fcf1d1d51a918a3127f620682107f2 | refs/heads/master | 2022-11-24T08:22:00.946773 | 2019-10-29T05:20:08 | 2019-10-29T05:20:08 | 218,207,062 | 0 | 0 | null | 2022-11-22T04:18:04 | 2019-10-29T04:54:41 | JavaScript | UTF-8 | Python | false | false | 372 | py | from django.contrib import admin
# Register your models here.
from .models import Question, Answer, Plan, Description, Download, Section, FeatureCategory
admin.site.register(Question)
admin.site.register(Answer)
admin.site.register(Plan)
admin.site.register(Description)
admin.site.register(Download)
admin.site.register(FeatureCategory)
admin.site.register(Section)
| [
"sunnosaiful@gmail.com"
] | sunnosaiful@gmail.com |
f8fbeb582f0bc1475af2524ec4a330871d14c9f0 | 98efe1aee73bd9fbec640132e6fb2e54ff444904 | /loldib/getratings/models/NA/na_maokai/__init__.py | d2c95d6f119c057f7496b2ef8fedde90603c8eda | [
"Apache-2.0"
] | permissive | koliupy/loldib | be4a1702c26546d6ae1b4a14943a416f73171718 | c9ab94deb07213cdc42b5a7c26467cdafaf81b7f | refs/heads/master | 2021-07-04T03:34:43.615423 | 2017-09-21T15:44:10 | 2017-09-21T15:44:10 | 104,359,388 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 150 | py | from .na_maokai_top import *
from .na_maokai_jng import *
from .na_maokai_mid import *
from .na_maokai_bot import *
from .na_maokai_sup import *
| [
"noreply@github.com"
] | koliupy.noreply@github.com |
c2c263958c80fabd4c1e349cb2fe676374cc2a1d | 36fc492ad0d36e9d4a4452007d57c733a84ccaac | /python_programming_drive/mixed/list/List funtion.py | 4b832abee986c92c6691bdb575dd49dc49ed18bb | [] | no_license | mahmudgithub/Core-python | 864568994490f857ba89e2c66fbf10a65a4aea98 | c37cb4218fe1e216a4e3e80544cae262582cf4b5 | refs/heads/master | 2023-04-29T02:14:47.106443 | 2021-05-14T12:36:26 | 2021-05-14T12:36:26 | 346,885,590 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,549 | py | # creat a simple list method
list=[2,3,4,5,6,7,8,9]
print(list)
mh=[2,3,4,5,6,7,8,9]
print(mh)
#list into list or sub list
list=[2,[3,[4,4,[5,5,5],4],3],3,4,5,6,7,8,9]
print(list)
# find similar cherecter in a list
list=[2,3,4,5,6,7,85,5,5,5,9]
x=list.count(5)
print("similar cherecter in list is:",x)
#find index number in a list
list=[2,3,4,5,6,7,8,9,"mahmud"]
x=list.index("mahmud") # here show mahmud stand position 8
print(x)
#list shorting means small to bigger list
list=[2,3,4,5,6,7,3,8,3,9]
list.sort()
print(list)
list=['a','d','h','m','a','a']
list.sort()
print(list)
list=["mah","ham","mah","azwy"] # for string sorting ,string letter is countable
list.sort()
print(list)
#remove a charecter from list
list=[1,2,3,4,555,6,"mahmud"]
list.remove(list[6]) # here 6 is index number
print(list)
list=[1,2,3,4,555,6,"mahmud"]
list.remove(555) # here 555 is item position
print(list)
#delet last item from list use pop funtion
list=[1,2,3,4,555,6,"mahmud"]
list.pop() # here pop delet last itm
print(list)
#added external element in a list
list=[1,2,3,4,555,6,"mahmud"]
list.insert(7,9) # here 7 is index number and 9 is adding number
print(list)
# 2D list or Matrix
mh=[[1, 2, 3, 4, 5],
[4, 5, 6, 7, 8],
[1, 1, 1, 1, 1],
[0, 0, 0, 0, 0]
]
for x in mh:
for y in x:
print(y,end=' ') # here end use for new row
print()
mh=[[1, 2, 3, 4, 5],
[4, 5, 6, 7, 8],
[1, 1, 1, 1, 1],
[0, 0, 0, 0, 0]
]
for x in mh:
for y in x:
print(y) # if we do not use end statement
print()
| [
"mahmudhossain838@gmail.com"
] | mahmudhossain838@gmail.com |
d4b595bf7439bdd8d1befa931a7f24802be5c9ee | 4e02eefa71196aac8d62a61e3d698b1d1257a523 | /豆瓣电影词云/爬取豆瓣影评生成词云.py | 378f32ac017fa8828f836d40976b8a828b7d8472 | [] | no_license | onism7/spider | e7723f9cc8727184b0edf468c8821b57a80af501 | 5a0fe16f367876ab5f63aa7737a9e0a0efdb3b09 | refs/heads/爬虫学习 | 2023-04-04T23:59:02.385924 | 2020-07-05T15:10:08 | 2020-07-05T15:10:08 | 268,724,369 | 1 | 0 | null | 2021-03-30T12:10:27 | 2020-06-02T06:54:24 | null | UTF-8 | Python | false | false | 2,951 | py | import jieba.analyse
import re
from urllib import request
from bs4 import BeautifulSoup
from wordcloud import WordCloud
import matplotlib.pyplot as plt
headers = {
'User-Agent': 'Mozilla/5.0 (Windows NT 6.1; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/79.0.3945.88 Safari/537.36'}
# 分析网页函数
def getNowPlayingMovie_list():
res = request.Request("https://movie.douban.com/nowplaying/hangzhou/", headers=headers)
resp = request.urlopen(res)
html_data = resp.read().decode("utf-8")
# 解析网页
soup = BeautifulSoup(html_data, "html.parser")
nowplaying = soup.find_all("div", id="nowplaying")
nowplaying_movie_list = nowplaying[0].find_all("li", class_="list-item")
movie_list = list()
for item in nowplaying_movie_list:
movie_dict = {} # 以字典形式存储每部电影的ID和名称
movie_dict["id"] = item["data-subject"]
for tag_img_item in item.find_all("img"):
movie_dict["name"] = tag_img_item["alt"]
movie_list.append(movie_dict)
return movie_list
# 抓取电影评论函数
def getCommentById(movieId, page_num):
if page_num > 0:
start = (page_num - 1) * 20
else:
return False
sub_url = "https://movie.douban.com/subject/" + movieId + "/comments?start=" + str(start) + "&limit=20"
sub_res = request.Request(sub_url, headers=headers)
sub_res_ = request.urlopen(sub_res)
comment_data = sub_res_.read().decode("utf-8")
soup = BeautifulSoup(comment_data, "html.parser")
comment_div_list = soup.find_all("div", class_="comment")
eachCommentList = list()
for item in comment_div_list:
if item.find_all("p")[0].find("span").string is not None:
eachCommentList.append(item.find_all("p")[0].find("span").string)
return eachCommentList
if __name__ == '__main__':
commentList = list()
movie_list = getNowPlayingMovie_list()
for i in range(10): # 前10页
num = i + 1
commentList_temp = getCommentById(movie_list[2]["id"], num)
commentList.append(commentList_temp)
# 将列表中的数据转换为字符串
comments = ""
for k in range(len(commentList)):
comments = comments + (str(commentList[k])).strip()
# 使用正则表达式去除标点符号
pattern = re.compile(r"[\u4e00-\u9fa5]+")
filterdata = re.findall(pattern, comments)
cleaned_comments = "".join(filterdata)
# 使用jieba分词进行中文分词
results = jieba.analyse.textrank(cleaned_comments, topK=50, withWeight=True)
keyword = dict()
for i in results:
keyword[i[0]] = i[1]
print("删除停用词前:", keyword)
# 用词云进行显示
wordcloud = WordCloud(font_path="simhei.ttf", background_color="white", max_font_size=80)
word_frequence = keyword
myword = wordcloud.fit_words(word_frequence)
plt.imshow(myword)
plt.axis("off")
plt.show()
| [
"1125699801@qq.com"
] | 1125699801@qq.com |
6296f20f61cae4491766f0d1526cc1ef1f53687d | 26be744685a62eb921e4d27f5c98cd4dd795a5b8 | /start_gui_for_nodes.py | 31dd718d98096eecc4a2f0c238c00731633603ff | [] | no_license | AlexDobrushskiy/python_layer | 51c7b736eb8997ede2d24899020b565ca11c4153 | 5ed47f2eacf920cfb285d0763a9170abfe5f5e95 | refs/heads/master | 2023-01-11T19:57:39.854724 | 2019-03-06T11:29:03 | 2019-03-06T11:29:03 | 173,755,304 | 0 | 0 | null | 2023-01-01T05:00:06 | 2019-03-04T14:00:08 | Python | UTF-8 | Python | false | false | 1,234 | py | # -*- coding: utf-8 -*-
import multiprocessing
import logging
import sys
from core_modules.blackbox_modules.keys import id_keypair_generation_func
from core_modules.masternode_discovery import discover_nodes
from client_prototype.cefpython.cefpython import start_cefpython
def initlogging():
logger = logging.getLogger(__name__)
logger.setLevel(logging.DEBUG)
formatter = logging.Formatter(' %(asctime)s - ' + __name__ + ' - %(levelname)s - %(message)s')
consolehandler = logging.StreamHandler()
consolehandler.setFormatter(formatter)
logger.addHandler(consolehandler)
return logger
if __name__ == "__main__":
basedir = sys.argv[1]
# discover nodes
logger = initlogging()
privkey, pubkey = id_keypair_generation_func()
# load tabs for masternodes
browsers = []
for settings in discover_nodes(basedir):
url = "http://%s:%s@%s:%s" % (settings["rpcuser"], settings["rpcpassword"], settings["ip"], settings["pyhttpadmin"])
p = multiprocessing.Process(target=start_cefpython, args=(settings["nodename"], url))
p.start()
browsers.append(p)
input("Press ENTER to stop browsers")
for browser in browsers:
browser.terminate()
| [
"a.dobrushskiy@gmail.com"
] | a.dobrushskiy@gmail.com |
983aa700f8495ac6e419b73ea7de4a40ea515472 | 15f321878face2af9317363c5f6de1e5ddd9b749 | /solutions_python/Problem_95/925.py | 469e68f78efb9387f7b68d65ff2ee906763fbb7b | [] | no_license | dr-dos-ok/Code_Jam_Webscraper | c06fd59870842664cd79c41eb460a09553e1c80a | 26a35bf114a3aa30fc4c677ef069d95f41665cc0 | refs/heads/master | 2020-04-06T08:17:40.938460 | 2018-10-14T10:12:47 | 2018-10-14T10:12:47 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 523 | py | raw_alphabet = "yhesocvxduiglbkrztnwjpfmaq"
alphabet = {}
for i in xrange(len(raw_alphabet)):
alphabet[chr(ord('a') + i)] = raw_alphabet[i]
def decrypt(text):
output = ""
for c in text:
if alphabet.has_key(c):
output += alphabet[c]
elif c != "\n":
output += c
return output
fin = open("tongues.txt", "r")
fin = fin.readlines()
inputs = int(fin[0])
fin.pop(0)
for i in xrange(inputs):
output = decrypt(fin[i])
print ("Case #%d: " % (i+1)) + output
| [
"miliar1732@gmail.com"
] | miliar1732@gmail.com |
cf68c743a79485c7d70e3f054c624e33b93dcc81 | c67627680973129e8f3cefa8778c12366a11621d | /test/unit/test_cli_utils.py | 66c58c5a02653b51c83d59eb27445f65f8f79dd7 | [
"Apache-2.0"
] | permissive | eprtvea/curator | f3fb0d5a02f3ed33c23da5153f8e192609b75f62 | f6d25bca20d66437d367956c812988c6d88431c4 | refs/heads/master | 2020-12-01T01:08:35.279466 | 2016-02-11T00:34:22 | 2016-02-11T00:34:22 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,565 | py | from datetime import datetime, timedelta
from unittest import TestCase
from mock import Mock
import sys
import click
from click import testing as clicktest
import logging
logger = logging.getLogger(__name__)
import curator
named_indices = [ "index1", "index2" ]
named_alias = 'alias_name'
alias_retval = { "pre_aliased_index": { "aliases" : { named_alias : { }}}}
aliases_retval = {
"index1": { "aliases" : { named_alias : { } } },
"index2": { "aliases" : { named_alias : { } } },
}
fake_fail = Exception('Simulated Failure')
repo_name = 'repo_name'
test_repo = {repo_name: {'type': 'fs', 'settings': {'compress': 'true', 'location': '/tmp/repos/repo_name'}}}
test_repos = {'TESTING': {'type': 'fs', 'settings': {'compress': 'true', 'location': '/tmp/repos/TESTING'}},
repo_name: {'type': 'fs', 'settings': {'compress': 'true', 'location': '/rmp/repos/repo_name'}}}
snap_name = 'snap_name'
snapshot = { 'snapshots': [
{
'duration_in_millis': 60000, 'start_time': '2015-01-01T00:00:00.000Z',
'shards': {'successful': 4, 'failed': 0, 'total': 4},
'end_time_in_millis': 0, 'state': 'SUCCESS',
'snapshot': snap_name, 'end_time': '2015-01-01T00:00:01.000Z',
'indices': named_indices,
'failures': [], 'start_time_in_millis': 0
}]}
snapshots = { 'snapshots': [
{
'duration_in_millis': 60000, 'start_time': '2015-01-01T00:00:00.000Z',
'shards': {'successful': 4, 'failed': 0, 'total': 4},
'end_time_in_millis': 0, 'state': 'SUCCESS',
'snapshot': snap_name, 'end_time': '2015-01-01T00:00:01.000Z',
'indices': named_indices,
'failures': [], 'start_time_in_millis': 0
},
{
'duration_in_millis': 60000, 'start_time': '2015-01-01T00:00:02.000Z',
'shards': {'successful': 4, 'failed': 0, 'total': 4},
'end_time_in_millis': 0, 'state': 'SUCCESS',
'snapshot': 'snapshot2', 'end_time': '2015-01-01T00:00:03.000Z',
'indices': named_indices,
'failures': [], 'start_time_in_millis': 0
}]}
snap_body_all = {
"ignore_unavailable": False,
"include_global_state": True,
"partial": False,
"indices" : "_all"
}
snap_body = {
"ignore_unavailable": False,
"include_global_state": True,
"partial": False,
"indices" : "index1,index2"
}
class TestExitMsg(TestCase):
def test_exit_msg_positive(self):
with self.assertRaises(SystemExit) as cm:
curator.exit_msg(True)
self.assertEqual(cm.exception.code, 0)
def test_exit_msg_negative(self):
with self.assertRaises(SystemExit) as cm:
curator.exit_msg(False)
self.assertEqual(cm.exception.code, 1)
class TestCheckVersion(TestCase):
def test_check_version_positive(self):
client = Mock()
client.info.return_value = {'version': {'number': '1.1.1'} }
self.assertIsNone(curator.check_version(client))
def test_check_version_less_than(self):
client = Mock()
client.info.return_value = {'version': {'number': '0.90.3'} }
with self.assertRaises(SystemExit) as cm:
curator.check_version(client)
self.assertEqual(cm.exception.code, 1)
def test_check_version_greater_than(self):
client = Mock()
client.info.return_value = {'version': {'number': '3.0.1'} }
with self.assertRaises(SystemExit) as cm:
curator.check_version(client)
self.assertEqual(cm.exception.code, 1)
class TestCheckMaster(TestCase):
def test_check_master_positive(self):
client = Mock()
client.nodes.info.return_value = {
'nodes': { "foo" : "bar"}
}
client.cluster.state.return_value = {
"master_node" : "foo"
}
self.assertIsNone(curator.check_master(client, master_only=True))
def test_check_master_negative(self):
client = Mock()
client.nodes.info.return_value = {
'nodes': { "bad" : "mojo"}
}
client.cluster.state.return_value = {
"master_node" : "foo"
}
with self.assertRaises(SystemExit) as cm:
curator.check_master(client, master_only=True)
self.assertEqual(cm.exception.code, 0)
class TestInList(TestCase):
def test_in_list_positive(self):
v = ['a', 'b']
s = ['a', 'b', 'c', 'd']
self.assertEqual(v, curator.in_list(v, s))
def test_in_list_negative(self):
v = ['a', 'b', 'q']
s = ['a', 'b', 'c', 'd']
self.assertEqual(['a', 'b'], curator.in_list(v, s))
class TestGetClient(TestCase):
def test_certificate_logic(self):
client = Mock()
kwargs = { 'use_ssl' : True, 'certificate' : 'mycert.pem' }
with self.assertRaises(SystemExit) as cm:
curator.get_client(**kwargs)
self.assertEqual(sys.stdout.getvalue(),'ERROR: Connection failure.\n')
self.assertEqual(cm.exception.code, 1)
| [
"aaron@mildensteins.com"
] | aaron@mildensteins.com |
d4179f61ac0365aa5d0843892018d942a29ed243 | e972dc486e62152981177f85b5f9cff919ac0867 | /sams/tmp/category_db.py | 744d848a0aa3b864e1139359886c0c5aed4d9f96 | [] | no_license | yeongsunpark/cute | d81b9b03f747f65bed742b10b2f9a59f69efea96 | d69f918f9a1f1d6db70bc62272fc0ce582d7bf50 | refs/heads/master | 2020-03-27T12:43:41.728918 | 2019-04-29T04:41:47 | 2019-04-29T04:41:47 | 146,564,948 | 0 | 2 | null | 2018-11-06T07:45:59 | 2018-08-29T07:52:20 | Python | UTF-8 | Python | false | false | 5,664 | py | import os, sys
import pymysql
import logging
import ys.cute.sams.ys_logger as ys_logger
import concurrent.futures
sys.path.append(os.path.abspath('..'))
logger = logging.getLogger('root')
logger.setLevel("INFO")
logger.addHandler(ys_logger.MyHandler())
logger.info("Finish setting logger")
class SquadDb():
def __init__(self):
self.db_cnf_dict = {"host": '10.122.64.71', "usr": "root", "pwd": "root",
"db": "cmdb", "encoding": "utf8"}
self.con = None
self.cur = None
self.connect_db()
self.f2 = open("c_change.txt", "w", newline="\n")
# self.f3 = open("/home/msl/ys/cute/sams/tmp/180824_17년11월1일이후기사랜덤3만개(다씀)_id만.txt","r")
self.q_id_1 = ""
self.q_id_2 = ""
self.question_1 = ""
self.question_2 = ""
self.result_list = []
self.test = False
def easy_mysql(self, cfg_dict, encoding='utf8', autocommit=False):
self.con = pymysql.connect(host=cfg_dict['host'], user=cfg_dict['usr'],
passwd=cfg_dict['pwd'], db=cfg_dict['db'], charset=encoding)
self.cur = self.con.cursor()
if autocommit is True:
self.con.autocommit(True)
def connect_db(self):
try: # try to connect to project db
cfg_dict = dict(host=self.db_cnf_dict['host'], usr=self.db_cnf_dict['usr'],
pwd=self.db_cnf_dict['pwd'], db=self.db_cnf_dict['db'])
self.easy_mysql(cfg_dict, encoding=self.db_cnf_dict['encoding'],
autocommit=True) # turn-on autocummit, be careful!
self.cur.execute("SET NAMES utf8")
except Exception as e:
pass
def change(self):
logger.info("Start Selection")
if self.test is True:
select_sql = 'select REG_DATE, TITLE, CONTENT, CATEGORY, ID from NEWS '\
'where REG_DATE >20180000000000 AND CATEGORY != "연예" '\
'order by rand() limit 10'
else:
select_sql ='select REG_DATE, TITLE, CONTENT, CATEGORY, ID from NEWS '\
'where REG_DATE >20180000000000 AND CATEGORY != "연예" '\
'order by rand() limit 20000'
self.cur.execute(select_sql)
logger.info("Selected")
select_data = self.cur.fetchall()
logger.info(len(select_data))
economy = ["금융기타", "대출", "증권", "취업"]
science = ["IT기타", "게임", "과학기타", "날씨", "모바일", "비만성장", "생활가전", "성형외과", "소프트웨어", "수송기기", "영상음향가전", "의료기타", "자동차",
"제약", "피부과", "하드웨어", "항공"]
society = ["결혼", "교육", "사회기타", "생활용품", "육아", "종교"]
sports = ["경마", "골프", "동계스포츠", "레저스포츠", "스포츠기타", "야구", "축구"]
normal = ["국방", "기호식품", "복권", "부동산", "쇼핑", "숙박", "식품기타", "애완", "여행기타", "연금보험", "인테리어", "재해", "정치", "탈모", "패션",
"화장품", "공연", "영화", "예술"]
for sd in select_data:
cate = sd[3]
if cate in economy:
c = "경제"
elif cate in science:
c = "과학"
elif cate in society:
c = "사회"
elif cate in sports:
c = "스포츠"
elif cate in normal:
c = "일반"
elif cate == "":
c = "null"
else:
c = "error"
# print (sd)
if 300 <= len(sd[2]) <= 3000:
self.f2.write("\t".join([sd[0], sd[1], sd[2], c, sd[4]]))
self.f2.write("\n")
self.f2.close()
# self.f3.close()
logger.info("Finish")
def count_data(self):
logger.info("count start")
try:
count_sql = 'select count(*) from NEWS'
self.cur.execute(count_sql)
select_count_row = self.cur.fetchall()
logger.info(select_count_row)
self.con.commit()
except:
logger.info("cannnot user_information")
def split():
c = dict()
science = []
society = []
sports = []
general = []
economy = []
etc = []
f2 = open("c_change.txt", "r")
for line1 in f2:
item = line1.split("\t")
if len(item) == 5:
category = item[3]
if category == "과학":
science.append(line1)
elif category == "사회":
society.append(line1)
elif category == "스포츠":
sports.append(line1)
elif category == "일반":
general.append(line1)
elif category == "경제":
economy.append(line1)
else:
etc.append(line1)
category_list = [science, society, sports, general, economy, etc]
str_category_list = ["science", "society", "sports", "general", "economy", "etc"]
for cl1, cl2 in zip(category_list, str_category_list):
with open("{}.txt".format(cl2), "w") as f3:
for l in cl1:
item = l.split("\t")
# c_id, title, context(marker), q_id_1, question_1, q_id_2, question_2, answer
f3.write(l)
# f3.write("\n")
if __name__ == "__main__":
# j = SquadDb()
# j.connect_db()
# j.change()
split() | [
"ylunar@naver.com"
] | ylunar@naver.com |
54f3b6909a21adf8a488b8156ea3dd3eff4e9bce | f2428051b3f7d77dc4cb2d61ee18cc31fe5eaa67 | /tiddlywebplugins/docs.py | 2afab067e1c13c5fcb7bb688dd7bb7e5300c03c6 | [
"BSD-3-Clause"
] | permissive | tiddlyweb/tiddlywebplugins.docs | b306ab081957c81c0f172fd2a11752f3f3252842 | af5df2369c794fea6f44ff8823f9ab0958909a80 | refs/heads/master | 2021-01-18T23:48:12.217948 | 2011-11-10T17:18:35 | 2011-11-10T17:18:35 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,898 | py | """
Autogenerate TiddlyWeb API docs via a serialization.
"""
from tiddlyweb.serializer import (Serializer, NoSerializationError,
TiddlerFormatError, BagFormatError, RecipeFormatError)
from tiddlyweb.serializations import SerializationInterface
from tiddlywebplugins.templates import get_template
import urllib
__version__ = '0.5'
EXTENSION_TYPES = {
'x-doc': 'text/x-tiddlyweb-docs'
}
SERIALIZERS = {
'text/x-tiddlyweb-docs': ['tiddlywebplugins.docs',
'text/html; charset=UTF-8'],
}
def init(config):
if 'selector' in config:
config['extension_types'].update(EXTENSION_TYPES)
config['serializers'].update(SERIALIZERS)
class Serialization(SerializationInterface):
def __init__(self, environ=None):
SerializationInterface.__init__(self, environ)
self.extensions = {}
self.serializations = []
self._build_serializers()
# XXX surely I can dry this up?
def recipe_as(self, recipe):
return self._all_info('recipe_as', 'as_recipe')
def bag_as(self, bag):
return self._all_info('bag_as', 'as_bag')
def tiddler_as(self, tiddler):
return self._all_info('tiddler_as', 'as_tiddler')
def list_recipes(self, recipes):
return self._all_info('list_recipes')
def list_bags(self, bags):
return self._all_info('list_bags')
def list_tiddlers(self, tiddlers):
return self._all_info('list_tiddlers')
def _build_serializers(self):
for extension, mime in (self.environ['tiddlyweb.config']
['extension_types'].iteritems()):
self.extensions[mime] = extension
for mime, outputter in (self.environ['tiddlyweb.config']
['serializers'].iteritems()):
module, _ = outputter
if module == __name__ or mime == 'default':
continue
try:
self.serializations.append((self.extensions[mime],
Serializer(module, self.environ).serialization))
except KeyError:
# we got a mime type for which there is not an
# extension so let's skip it
pass
def _matches(self, method):
matches = []
for serialization in self.serializations:
if hasattr(serialization[1], method):
matches.append(serialization)
return matches
def _all_info(self, out_method, in_method=None):
method_info = self._method_info()
out_serialization_info = self._serialization_info(out_method)
if in_method and 'PUT' in method_info['method']:
in_serialization_info = self._serialization_info(in_method)
else:
in_serialization_info = {}
# Disable HTMLPresenter if it is in the stack.
if 'tiddlyweb.title' in self.environ:
del self.environ['tiddlyweb.title']
template = get_template(self.environ, 'tiddlywebdocs.html')
return template.generate({'outserialization': out_serialization_info,
'inserialization': in_serialization_info,
'method': method_info})
def _serialization_info(self, method):
serializers = self._matches(method)
info = {}
for serializer in serializers:
try:
try:
getattr(serializer[1], method)([])
except TypeError:
getattr(serializer[1], method)('', '')
except NoSerializationError:
continue
except (AttributeError, TiddlerFormatError, BagFormatError,
RecipeFormatError):
pass # wow!
info[serializer[1].__module__] = {
'doc': getattr(serializer[1], method).__doc__,
'ext': serializer[0]}
return info
def _method_info(self):
methods = self.environ.get('selector.methods', [])
path = self.environ.get('SCRIPT_NAME', 'Unavailable')
matched_path = self.environ.get('selector.matches', [path])[0]
selector = self.environ['tiddlyweb.config'].get('selector', None)
if '.x-doc' in path:
cleanpath = path.rsplit('.x-doc')[0]
else:
cleanpath = path
query_string = self.environ.get('QUERY_STRING', '')
if query_string:
query_string = '?%s' % query_string
info = {'path': path.decode('utf-8'),
'cleanpath': urllib.quote(cleanpath),
'method': {},
'query': query_string}
if selector:
for method in sorted(methods):
handler = selector.select(matched_path, method)[0]
info['method'][method] = ('%s:%s' % (handler.__module__,
handler.__name__), '%s' % handler.__doc__)
return info
| [
"chris.dent@gmail.com"
] | chris.dent@gmail.com |
44b136ab3b9b77bb0ad55b20ac6da575b4601836 | 4fca17a3dbc3e74ba7e46bd7869eb6d138e4c422 | /_1725_Number_Of_Rectangles_That_Can_Form_The_Largest_Square.py | ae7a0dd9651d89effd3109888fcae48736d25659 | [] | no_license | mingweihe/leetcode | a2cfee0e004627b817a3c0321bb9c74128f8c1a7 | edff905f63ab95cdd40447b27a9c449c9cefec37 | refs/heads/master | 2021-06-19T07:46:46.897952 | 2021-05-02T05:13:17 | 2021-05-02T05:13:17 | 205,740,338 | 3 | 0 | null | null | null | null | UTF-8 | Python | false | false | 252 | py | from collections import Counter
class Solution(object):
def countGoodRectangles(self, rectangles):
"""
:type rectangles: List[List[int]]
:rtype: int
"""
return max(Counter(map(min, rectangles)).items())[1]
| [
"hemingwei2017@gmail.com"
] | hemingwei2017@gmail.com |
8ca7c327e6dcb11955cc3b53b864c85fe94c2207 | f250ee8189a91b9cc12d57665dfb09a34c343d38 | /setup.py | 16512f4d5054ee5e87cb774f96dd7d384736f418 | [] | no_license | duckworthd/optim | 4abb64b74c8df32175580b70d450963dbd099865 | 510e8fb81342fb145e140194dad0957724d124f7 | refs/heads/master | 2021-01-25T08:38:39.295589 | 2014-07-21T05:22:18 | 2014-07-21T05:22:18 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 930 | py | from setuptools import setup, find_packages
import os
def version(name):
fname = os.path.join(name, '_version.py')
environ = {}
execfile(fname, environ)
return environ['__version__']
if __name__ == '__main__':
NAME = 'optim'
setup(
name = NAME,
version = version(NAME),
author = 'Daniel Duckworth',
author_email = 'duckworthd@gmail.com',
description = 'Reference implementations of optimization algorithms',
license = 'BSD',
keywords = 'optimization',
url = 'http://github.com/duckworthd/optim',
packages = find_packages(),
classifiers = [
'Development Status :: 4 - Beta',
'License :: OSI Approved :: BSD License',
'Operating System :: OS Independent',
'Programming Language :: Python',
],
install_requires = [
'matplotlib',
'numpy',
],
tests_require = [
'nose',
]
)
| [
"duckworthd@gmail.com"
] | duckworthd@gmail.com |
99216479731815d1597e49465789a7c49782de16 | 345fdc5971db81240722901cbd1fef619b271676 | /chapter8/snippets/views.py | 4690d46532cbffce84863775e9a821a8b80a8d38 | [] | no_license | hisakin/practical-django | 093c449f42fe428320f5dce80db5b6708619e45d | a25e00d6283e1eb7a79728bbb99141f13c0f4692 | refs/heads/main | 2023-07-30T15:37:26.492059 | 2021-09-16T14:49:57 | 2021-09-16T14:49:57 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,637 | py | from django.contrib.auth.decorators import login_required
from django.shortcuts import render, redirect, get_object_or_404
from django.http import HttpResponse
from snippets.forms import SnippetForm
from snippets.models import Snippet
def top(request):
snippets = Snippet.objects.all()
context = {"snippets": snippets}
return render(request, "snippets/top.html", context)
@login_required
def snippet_new(request):
if request.method == 'POST':
form = SnippetForm(request.POST)
if form.is_valid():
snippet = form.save(commit=False)
snippet.created_by = request.user
snippet.save()
return redirect(snippet_detail, snippet_id=snippet.pk)
else:
form = SnippetForm()
return render(request, "snippets/snippet_new.html", {'form': form})
@login_required
def snippet_edit(request, snippet_id):
snippet = get_object_or_404(Snippet, pk=snippet_id)
if snippet.created_by_id != request.user.id:
return HttpResponseForbidden("このスニペットの編集は許可されていません。")
if request.method == "POST":
form = SnippetForm(request.POST, instance=snippet)
if form.is_valid():
form.save()
return redirect('snippet_detail', snippet_id=snippet_id)
else:
form = SnippetForm(instance=snippet)
return render(request, 'snippets/snippet_edit.html', {'form': form})
def snippet_detail(request, snippet_id):
snippet = get_object_or_404(Snippet, pk=snippet_id)
return render(request, 'snippets/snippet_detail.html',
{'snippet': snippet})
| [
"contact@c-bata.link"
] | contact@c-bata.link |
9819935bdeb231eee38a6eb8298167a33dd66791 | a9b24a31f27afc42736d923b7ba4df300e13a8cf | /qidian_book/start.spec | 06a4f3fef319e84fc683ec0a381d53cd51f3024f | [] | no_license | 520wsl/python-scrapy-test | 8dcb216a91c8f03266ae29d3b9590b124088eb67 | 2f93e161c849aabfe9efb90e719906c9ae5bee1c | refs/heads/master | 2022-12-13T00:47:44.177505 | 2020-05-23T05:05:57 | 2020-05-23T05:05:57 | 184,033,077 | 2 | 0 | null | 2022-12-08T05:25:05 | 2019-04-29T08:40:13 | HTML | UTF-8 | Python | false | false | 823 | spec | # -*- mode: python -*-
block_cipher = None
a = Analysis(['start.py'],
pathex=['E:\\GIT\\python-scrapy-test\\qidian_book'],
binaries=[],
datas=[],
hiddenimports=[],
hookspath=[],
runtime_hooks=[],
excludes=[],
win_no_prefer_redirects=False,
win_private_assemblies=False,
cipher=block_cipher,
noarchive=False)
pyz = PYZ(a.pure, a.zipped_data,
cipher=block_cipher)
exe = EXE(pyz,
a.scripts,
a.binaries,
a.zipfiles,
a.datas,
[],
name='start',
debug=False,
bootloader_ignore_signals=False,
strip=False,
upx=True,
runtime_tmpdir=None,
console=True )
| [
"395548460@qq.com"
] | 395548460@qq.com |
3bf2cae52d2734325db33a0646b8d648195c5ee6 | 54f352a242a8ad6ff5516703e91da61e08d9a9e6 | /Source Codes/AtCoder/abc054/A/4891080.py | da5c0c65d36312ef415307fd75b5c38f13c0880a | [] | no_license | Kawser-nerd/CLCDSA | 5cbd8a4c3f65173e4e8e0d7ed845574c4770c3eb | aee32551795763b54acb26856ab239370cac4e75 | refs/heads/master | 2022-02-09T11:08:56.588303 | 2022-01-26T18:53:40 | 2022-01-26T18:53:40 | 211,783,197 | 23 | 9 | null | null | null | null | UTF-8 | Python | false | false | 185 | py | A, B = map(int, input().split())
if (A == B):
print('Draw')
elif (A == 1):
print('Alice')
elif (B == 1):
print('Bob')
elif (A > B):
print('Alice')
else:
print('Bob') | [
"kwnafi@yahoo.com"
] | kwnafi@yahoo.com |
b1fa11cb1e9e3e99ea81b0ac5ea8466267d71a9a | f4ff27b8a5ab314659925eaf4be83151a1846bb5 | /cachetools_ext/fs.py | 744ff797e789a18e0a734d98e990346eb2afa9bd | [
"MIT"
] | permissive | thanakijwanavit/cachetools_ext | 0e9934ad9d264f3eb157d20973ed1c2751501e57 | 4355393d660c047ef3a286411f17795905476c91 | refs/heads/master | 2023-02-09T04:42:43.904587 | 2021-01-06T01:45:57 | 2021-01-06T01:45:57 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,429 | py | import datetime
import os
import pickle
import shutil
from collections.abc import MutableMapping
from pathlib import Path
from typing import Any, Optional, Union
class FSLRUCache(MutableMapping):
"""Filesystem LRU cache with optional TTL"""
def __init__(
self,
maxsize: int,
path: Optional[Union[Path, str]] = None,
ttl: Optional[Union[int, float]] = None,
clear_on_start=False,
):
if not ((path is None) or isinstance(path, (str, Path))):
raise TypeError("path must be str or None")
if not ((ttl is None) or isinstance(ttl, (int, float))):
raise TypeError("ttl must be int, float or None")
if not isinstance(maxsize, int):
raise TypeError("maxsize must be int or None")
# Absolute path to the cache
path = Path(path).absolute() if path else Path(".") / "cache"
# Create the directory if not exists
path.mkdir(parents=True, exist_ok=True)
self.path = path
self.ttl: Optional[int] = ttl
self.maxsize = maxsize
self.clear_on_start = clear_on_start
if clear_on_start:
# Clear the cache
shutil.rmtree(self.path)
path.mkdir(parents=True, exist_ok=True)
# Delete any existing expired entries
self.__delete_expired_entries()
def key_to_path(self, key) -> Path:
return self.path / f"{key}.pkl"
def path_to_key(self, path) -> str:
return path.name.strip(".pkl")
def __getitem__(self, key):
self.__delete_expired_entries()
value_path = self.key_to_path(key)
try:
value = pickle.loads(value_path.read_bytes())
return value
except Exception:
pass
return self.__missing__(key)
def __missing__(self, key):
raise KeyError(key)
def __setitem__(self, key: Any, value: Any) -> None:
self.__delete_expired_entries()
value_size = 1
current_size = len(self)
if value_size > self.maxsize:
raise ValueError("value too large")
while current_size + value_size > self.maxsize:
self.popitem()
current_size = len(self)
value_path = self.key_to_path(key)
value_path.write_bytes(pickle.dumps(value))
def __delitem__(self, key):
value_path = self.key_to_path(key)
try:
value_path.unlink()
except Exception:
pass
def __contains__(self, key) -> bool:
self.__delete_expired_entries()
value_path = self.key_to_path(key)
if value_path.is_file():
return True
return False
def __len__(self):
self.__delete_expired_entries()
return len([x for x in self.path.glob("*")])
def __iter__(self):
self.__delete_expired_entries()
for x in self.path.glob("*"):
yield self.path_to_key(x)
def items(self):
self.__delete_expired_entries()
for key in self.__iter__():
try:
value = self[key]
yield key, value
except KeyError:
continue
def keys(self):
for key in self:
yield key
def values(self):
for _, value in self.items():
yield value
def popitem(self):
"""Remove and return the `(key, value)` pair least recently used."""
file_to_ts = {path: os.stat(path).st_atime_ns for path in self.path.glob("*")}
ordered_file_to_ts = sorted(file_to_ts.items(), key=lambda x: x[1])
for path, ts in ordered_file_to_ts:
try:
key = self.path_to_key(path)
return (key, self.pop(key))
except KeyError:
pass
raise KeyError("Cache is empty")
def __delete_expired_entries(self):
"""Delete entries with an expired ttl"""
if self.ttl is None:
return
now = datetime.datetime.now().timestamp()
for path in self.path.glob("*"):
try:
created_ts = os.stat(path).st_ctime
except FileNotFoundError:
continue
print(now, created_ts)
if now - created_ts > self.ttl:
try:
path.unlink()
except FileNotFoundError:
continue
| [
"github@oliverrice.com"
] | github@oliverrice.com |
10a042b54434cd65b7b1f5ac8d959ac31181fc38 | a3e626f9893982c549d1f8d98237e9601c2ddfef | /importXPZcurve.py | 9ca5f8b16b7eab9faf2fd57411761780b9f48b31 | [] | no_license | richstoner/connectivity-blend | e515ef19e57179cdd30c98aa235f3feb586095c0 | dae27b67e6de58a33354b200b34cf045d37fa035 | refs/heads/master | 2020-04-25T22:20:33.640843 | 2013-09-24T18:22:36 | 2013-09-24T18:22:36 | 8,784,866 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,333 | py | print("Hello")
from mathutils import Vector
import bpy
import glob
import csv
import math
import struct
import os
import urllib.request
w=1
def clearAllCurves():
# gather list of items of interest.
candidate_list = [item.name for item in bpy.data.objects if item.type == "CURVE"]
# select them only.
for object_name in candidate_list:
bpy.data.objects[object_name].select = True
# remove all selected.
bpy.ops.object.delete()
# remove the meshes, they have no users anymore.
for item in bpy.data.meshes:
bpy.data.meshes.remove(item)
print('Cleared curves')
def clearAllMeshes():
# gather list of items of interest.
candidate_list = [item.name for item in bpy.data.objects if item.type == "MESH"]
# select them only.
for object_name in candidate_list:
bpy.data.objects[object_name].select = True
# remove all selected.
bpy.ops.object.delete()
# remove the meshes, they have no users anymore.
for item in bpy.data.meshes:
bpy.data.meshes.remove(item)
print('Cleared meshes')
def makeMaterial(name, diffuse, specular, alpha):
mat = bpy.data.materials.new(name)
mat.diffuse_color = diffuse
mat.diffuse_shader = 'LAMBERT'
mat.diffuse_intensity = 1.0
mat.specular_color = specular
mat.specular_shader = 'COOKTORR'
mat.specular_intensity = 0.5
mat.alpha = alpha
mat.ambient = 1
return mat
def setMaterial(ob, mat):
me = ob.data
me.materials.append(mat)
def printSummary():
for object in bpy.data.objects:
# print(object.type)
if object.type == 'CAMERA':
print('Camera location: ' + str(object.location))
if object.type == 'LAMP':
print('Sun location: ' + str(object.location))
def setSun():
rx = 0
ry = 180
rz = 180
pi = 3.14159265
sun = bpy.data.objects['Sun']
sun.location = [math.floor(133/2), math.floor(81/2), 0]
sun.rotation_mode = 'XYZ'
sun.rotation_euler[0] = rx*(pi/180.0)
sun.rotation_euler[1] = ry*(pi/180.0)
sun.rotation_euler[2] = rz*(pi/180.0)
sun.data.distance = 500
sun.data.falloff_type = 'CONSTANT'
def moveSag():
cam = bpy.data.objects['Camera']
cam.location = [math.floor(133/2), math.floor(81/2), -150]
rx = 0
ry = 180
rz = 180
pi = 3.14159265
cam.rotation_mode = 'XYZ'
cam.rotation_euler[0] = rx*(pi/180.0)
cam.rotation_euler[1] = ry*(pi/180.0)
cam.rotation_euler[2] = rz*(pi/180.0)
#cam.data.type = 'PERSP'
cam.data.type = 'ORTHO'
cam.data.ortho_scale = 250
def moveCoronal():
cam = bpy.data.objects['Camera']
cam.location = [-150, math.floor(81/2), math.floor(115/2)]
rx = 0
ry = 90
rz = 180
pi = 3.14159265
cam.rotation_mode = 'XYZ'
cam.rotation_euler[0] = rx*(pi/180.0)
cam.rotation_euler[1] = ry*(pi/180.0)
cam.rotation_euler[2] = rz*(pi/180.0)
#cam.data.type = 'PERSP'
cam.data.type = 'ORTHO'
cam.data.ortho_scale = 250
def moveAxial():
cam = bpy.data.objects['Camera']
cam.location = [math.floor(133/2), -115, math.floor(115/2)]
rx = 90
ry = 0
rz = 0
pi = 3.14159265
cam.rotation_mode = 'XYZ'
cam.rotation_euler[0] = rx*(pi/180.0)
cam.rotation_euler[1] = ry*(pi/180.0)
cam.rotation_euler[2] = rz*(pi/180.0)
#cam.data.type = 'PERSP'
cam.data.type = 'ORTHO'
cam.data.ortho_scale = 250
def addRootGroup(list_of_roots):
group_indexes = range(50,100)
print("there are %d roots" % len(list_of_roots))
#for rootgroup in list_of_roots[group_indexes]:
for group_in in group_indexes:
if group_in > len(list_of_roots):
break
rootgroup = list_of_roots[group_in].split('\\')[1]
urlstring = 'http://localhost:8888/series/%s' % rootgroup
url = urllib.request.urlopen(urlstring)
mybytes = url.read()
colorstring = mybytes.decode("utf8")
url.close()
print(colorstring)
csplit = colorstring.split(',')
r = float(csplit[0])
g = float(csplit[1])
b = float(csplit[2])
mtlname = rootgroup + '.mtl'
red = makeMaterial(mtlname, (r,g,b), (1,1,1), 1)
i = 0
print(rootgroup)
group_list = glob.glob(binary_location + '/' + rootgroup + '*')
for mes in group_list:
if i % 100 == 0: print(i)
vec_list = []
import os
f = open(mes,'rb')
filesize = os.fstat(f.fileno()).st_size
for k in range(0,int(filesize/12)):
vals = struct.unpack('fff', f.read(12))
vec = Vector(vals)
vec_list.append(vec)
def MakePolyLine(objname, curvename, cList):
curvedata = bpy.data.curves.new(name=curvename, type='CURVE')
curvedata.dimensions = '3D'
curvedata.bevel_depth = 0.025
objectdata = bpy.data.objects.new(objname, curvedata)
objectdata.location = (0,0,0) #object origin
bpy.context.scene.objects.link(objectdata)
polyline = curvedata.splines.new('POLY')
polyline.points.add(len(cList)-1)
for num in range(len(cList)):
x, y, z = cList[num]
polyline.points[num].co = (x, y, z, w)
MakePolyLine("%s-%04d" % (rootgroup,i), "%s-%04d" % (rootgroup,i), vec_list)
ob = bpy.data.objects.get("%s-%04d" % (rootgroup,i))
ob.select = True
i+=1
bpy.context.scene.objects.active = bpy.data.objects[("%s-0000" % (rootgroup))]
bpy.ops.object.join()
bpy.data.curves[("%s-0000" % (rootgroup))].bevel_depth = 0.025
setMaterial(bpy.context.active_object, red)
bpy.ops.object.select_all( action='DESELECT' )
add_cube = bpy.ops.mesh.primitive_cube_add
bpy.ops.object.select_all( action='DESELECT' )
binary_location = '/Users/Administrator/connectivity-blend/bindata'
raw_location = '/Users/Administrator/connectivity-blend/rawdata'
list_of_binmesh = glob.glob(binary_location + '/*')
list_of_roots = []
for mes in list_of_binmesh:
ms = mes.split('/')[-1].split('.')[0].split('-')[0]
if ms not in list_of_roots:
list_of_roots.append(ms)
print(list_of_roots[0])
#clearAllMeshes()
#clearAllCurves()
addRootGroup(list_of_roots)
layerList = [False]*20
layerList[0] = True
import math
shouldAddCube = 0
if shouldAddCube:
add_cube(location=(0, 0, 0,), layers=layerList)
ob = bpy.data.objects['Cube']
print(ob.location)
space = [133, 81, 115]
ob.scale = [133/2, 81/2, 115/2]
ob.location = [math.floor(133/2), math.floor(81/2), math.floor(115/2)]
printSummary()
#moveCoronal()
moveSag()
#moveAxial()
setSun()
| [
"stonerri@gmail.com"
] | stonerri@gmail.com |
59afa20535e5200ed4863696e830c84019f868a0 | 67f86bb3d09cbc86cac698b3f0abaf01457a966a | /master/nameko-master/nameko-master/test/standalone/test_event_dispatcher.py | 52657eb8d8a0c8264c82b40e327629c03d339992 | [
"MIT",
"LicenseRef-scancode-unknown-license-reference",
"Apache-2.0"
] | permissive | tied/DevArtifacts | efba1ccea5f0d832d4227c9fe1a040cb93b9ad4f | 931aabb8cbf27656151c54856eb2ea7d1153203a | refs/heads/master | 2020-06-06T01:48:32.149972 | 2018-12-08T15:26:16 | 2018-12-08T15:26:16 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,942 | py | import pytest
from amqp.exceptions import NotFound
from mock import Mock, patch
from six.moves import queue
from nameko.amqp import UndeliverableMessage
from nameko.events import event_handler
from nameko.standalone.events import event_dispatcher, get_event_exchange
from nameko.testing.services import entrypoint_waiter
handler_called = Mock()
class Service(object):
name = 'destservice'
@event_handler('srcservice', 'testevent')
def handler(self, msg):
handler_called(msg)
def test_dispatch(container_factory, rabbit_config):
config = rabbit_config
container = container_factory(Service, config)
container.start()
msg = "msg"
dispatch = event_dispatcher(config)
with entrypoint_waiter(container, 'handler', timeout=1):
dispatch('srcservice', 'testevent', msg)
handler_called.assert_called_once_with(msg)
class TestMandatoryDelivery(object):
""" Test and demonstrate mandatory delivery.
Dispatching an event should raise an exception when mandatory delivery
is requested and there is no destination queue, as long as publish-confirms
are enabled.
"""
@pytest.fixture(autouse=True)
def event_exchange(self, container_factory, rabbit_config):
# use a service-based dispatcher to declare an event exchange
container = container_factory(Service, rabbit_config)
container.start()
def test_default(self, rabbit_config):
# events are not mandatory by default;
# no error when routing to a non-existent handler
dispatch = event_dispatcher(rabbit_config)
dispatch("srcservice", "bogus", "payload")
def test_mandatory_delivery(self, rabbit_config):
# requesting mandatory delivery will result in an exception
# if there is no bound queue to receive the message
dispatch = event_dispatcher(rabbit_config, mandatory=True)
with pytest.raises(UndeliverableMessage):
dispatch("srcservice", "bogus", "payload")
def test_mandatory_delivery_no_exchange(self, rabbit_config):
# requesting mandatory delivery will result in an exception
# if the exchange does not exist
dispatch = event_dispatcher(rabbit_config, mandatory=True)
with pytest.raises(NotFound):
dispatch("bogus", "bogus", "payload")
@patch('nameko.amqp.publish.warnings')
def test_confirms_disabled(self, warnings, rabbit_config):
# no exception will be raised if confirms are disabled,
# even when mandatory delivery is requested,
# but there will be a warning raised
dispatch = event_dispatcher(
rabbit_config, mandatory=True, use_confirms=False
)
dispatch("srcservice", "bogus", "payload")
assert warnings.warn.called
class TestConfigurability(object):
"""
Test and demonstrate configuration options for the standalone dispatcher
"""
@pytest.yield_fixture
def get_producer(self):
with patch('nameko.amqp.publish.get_producer') as get_producer:
yield get_producer
@pytest.fixture
def producer(self, get_producer):
producer = get_producer().__enter__.return_value
# make sure we don't raise UndeliverableMessage if mandatory is True
producer.channel.returned_messages.get_nowait.side_effect = queue.Empty
return producer
@pytest.mark.parametrize("parameter", [
# delivery options
'delivery_mode', 'mandatory', 'priority', 'expiration',
# message options
'serializer', 'compression',
# retry policy
'retry', 'retry_policy',
# other arbitrary publish kwargs
'correlation_id', 'user_id', 'bogus_param'
])
def test_regular_parameters(
self, parameter, mock_container, producer
):
""" Verify that most parameters can be specified at instantiation time.
"""
config = {'AMQP_URI': 'memory://localhost'}
value = Mock()
dispatch = event_dispatcher(config, **{parameter: value})
dispatch("service-name", "event-type", "event-data")
assert producer.publish.call_args[1][parameter] == value
def test_restricted_parameters(
self, mock_container, producer
):
""" Verify that providing routing parameters at instantiation
time has no effect.
"""
config = {'AMQP_URI': 'memory://localhost'}
exchange = Mock()
routing_key = Mock()
dispatch = event_dispatcher(
config, exchange=exchange, routing_key=routing_key
)
service_name = "service-name"
event_exchange = get_event_exchange(service_name)
event_type = "event-type"
dispatch(service_name, event_type, "event-data")
assert producer.publish.call_args[1]['exchange'] == event_exchange
assert producer.publish.call_args[1]['routing_key'] == event_type
| [
"alexander.rogalsky@yandex.ru"
] | alexander.rogalsky@yandex.ru |
949312734036c00781d898b942bfafcd063a4d23 | a3fba5e8ecc502ff262b737d05f5b719e1cd4148 | /SlackWorkflows.py | 87db380e13a643f85a2702bae0537fe51abd266a | [] | no_license | cthacker-udel/Python-Slack-API | 6eccfbd97d564c4d8d4325fba22fab4db721a146 | 1bee3d77c4bf3179a348e83d760284bab3c13d24 | refs/heads/master | 2023-06-16T15:23:46.224602 | 2021-07-15T06:13:51 | 2021-07-15T06:13:51 | 378,105,544 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 987 | py | from SlackClient import SlackClient
class SlackWorkflows(SlackClient):
def __init__(self):
self.workflow_step_execute_id = None
self.outputs = None
self.inputs = None
self.step_image_url = None
self.step_name = None
def generate_queries(self):
body = {}
if self.workflow_step_execute_id != None:
body['workflow_step_execute_id'] = self.workflow_step_execute_id
if self.outputs != None:
body['outputs'] = self.outputs
if self.inputs != None:
body['inputs'] = self.inputs
if self.step_image_url != None:
body['step_image_url'] = self.step_image_url
if self.step_name != None:
body['step_name'] = self.step_name
return body
def clear_queries(self):
self.workflow_step_execute_id = None
self.outputs = None
self.inputs = None
self.step_image_url = None
self.step_name = None | [
"cthacker@udel.edu"
] | cthacker@udel.edu |
eb8e8ca7dac3dc3988a7a6219e8a6e4e15e4a9af | 0cba5529e387ba0f077b4e8ddeb96f914004f5df | /setup-gpu.py | 3bc6c47fe699a7d4a30bdddecd315bf0b0a3e42e | [
"MIT"
] | permissive | AsyrafAzlan/Malaya | dc78398ee6880578f40c5646a48882a5913217ae | 3d5166173cf74881f7a56fffaaf391813c55d4f1 | refs/heads/master | 2021-05-21T22:47:41.863857 | 2020-04-03T15:00:21 | 2020-04-03T15:00:21 | 252,841,526 | 1 | 0 | MIT | 2020-04-03T21:04:44 | 2020-04-03T21:04:44 | null | UTF-8 | Python | false | false | 1,367 | py | import setuptools
__packagename__ = 'malaya-gpu'
setuptools.setup(
name = __packagename__,
packages = setuptools.find_packages(),
version = '3.4',
python_requires = '>=3.6.*',
description = 'Natural-Language-Toolkit for bahasa Malaysia, powered by Deep Learning Tensorflow. GPU Version',
author = 'huseinzol05',
author_email = 'husein.zol05@gmail.com',
url = 'https://github.com/huseinzol05/Malaya',
download_url = 'https://github.com/huseinzol05/Malaya/archive/master.zip',
keywords = ['nlp', 'bm'],
install_requires = [
'dateparser',
'sklearn',
'scikit-learn',
'requests',
'unidecode',
'tensorflow-gpu>=1.15.2',
'numpy',
'scipy',
'PySastrawi',
'ftfy',
'networkx',
'sentencepiece',
'bert-tensorflow',
'tqdm',
'herpetologist',
'youtokentome',
],
license = 'MIT',
classifiers = [
'Programming Language :: Python :: 3.6',
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Operating System :: OS Independent',
'Topic :: Text Processing',
],
package_data = {
'malaya': [
'_utils/web/*.html',
'_utils/web/static/*.js',
'_utils/web/static/*.css',
]
},
)
| [
"husein.zol05@gmail.com"
] | husein.zol05@gmail.com |
a34fa47c41a776883e5322039c8a4ea490ae734f | f2604a924b5cc6638dba6e246a6aea38d335f3b1 | /gym_splendor_code/envs/mechanics/splendor_observation_space.py | fab06fe86b21933bdca0b151313882be48691a6b | [
"MIT"
] | permissive | StanczakDominik/gym-splendor | 379507c066dc8756f5514c3760000bed6bf28020 | b7d6b0356d96ad1c528371f52412b81687a0ecc6 | refs/heads/master | 2020-08-28T00:38:06.076814 | 2019-10-25T13:09:17 | 2019-10-25T13:09:17 | 217,536,093 | 0 | 0 | MIT | 2019-10-25T13:09:40 | 2019-10-25T13:09:39 | null | UTF-8 | Python | false | false | 2,426 | py | from gym.spaces import Space
class SplendorObservationSpace(Space):
"""This class contains all information we want to share with the agents playing Splendor. The difference between
SplendorObservationSpace and State is that State contains all information about the state of game (including list
of cards that are not yet revealed and class SplendorObservationSpace contains only some part of it that is
accessible by the player. By modifying this class we can change what agent knows about the state of the game."""
def __init__(self):
super().__init__()
def state_to_observation(self, state):
cards_on_board_names = {card.name for card in state.board.cards_on_board}
gems_on_board = state.board.gems_on_board.__copy__()
active_player_id = state.active_player_id
players_hands = [{'cards_possessed_names': {card.name for card in players_hand.cards_possessed},
'cards_reserved_names' : {card.name for card in players_hand.cards_reserved},
'gems_possessed_names' : players_hand.gems_possessed.__copy__()} for players_hand in state.list_of_players_hands]
return {'cards_on_board_names' : cards_on_board_names, 'gems_on_board' : gems_on_board,
'active_player_id' : active_player_id, 'players_hands' : players_hands}
def __repr__(self):
return 'Observation space in Splendor. It contains all information accessible to one player (so for example in \n' \
'a default setting in does not contain the list of hidden cards. One observation has the following structure: \n' \
'It is a dictionary with keys: \n' \
'1) cards_on_board_names - a set of names of card lying on the board \n' \
'2) gems_on_board - a collection of gems on board \n ' \
'3) active_player_id - a number that indicates which player is active in the current state \n' \
'4) players_hands - a list of dictionaries refering to consective players hands. Each dictionary in this \n' \
'list contains the following keys:' \
'a) cards_possessed_names - set of names of cards possesed by the players hand \n'\
'b) cards_reserved_names - set of names of cards reserved by the players hand \n' \
'c) gems_possessed - collection of gems possessed by the players hand' | [
"tomeko314@gmail.com"
] | tomeko314@gmail.com |
ac4dad4eb9fe62c08acc70ad3022a95b11ea530d | e10a6d844a286db26ef56469e31dc8488a8c6f0e | /aptamers_mlpd/simulation/utils.py | addb9740cbb0a633a07ab031f48e0ace1eddf5c8 | [
"Apache-2.0",
"CC-BY-4.0"
] | permissive | Jimmy-INL/google-research | 54ad5551f97977f01297abddbfc8a99a7900b791 | 5573d9c5822f4e866b6692769963ae819cb3f10d | refs/heads/master | 2023-04-07T19:43:54.483068 | 2023-03-24T16:27:28 | 2023-03-24T16:32:17 | 282,682,170 | 1 | 0 | Apache-2.0 | 2020-07-26T15:50:32 | 2020-07-26T15:50:31 | null | UTF-8 | Python | false | false | 3,197 | py | # coding=utf-8
# Copyright 2022 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Utility functions for aptamer simulations.
"""
import numpy
# numpy.random.RandomState uses uint32 seeds
RANDOM_SEED_MAX = 2 ** 32
def random_seed_stream(random_seed=None):
"""Yield an infinite stream of numbers for seeding random number generators.
This method is not proven to be cryptographically secure, and only explores a
small portion of the state space for NumPy's random number generator. Still,
it's a useful shortcut for writing decoupled functions that rely on random
state. See this thread for extensive discussion of its merits and the
alternatives:
https://mail.scipy.org/pipermail/numpy-discussion/2016-May/075487.html
Example:
>>> seed_gen = random_seed_stream(42)
>>> next(seed_gen)
1608637542
Args:
random_seed: optional integer used to seed this stream of random seeds.
Yields:
Integer seeds suitable for use in numpy.random.RandomState. Each seed is
independent and psuedo-randomly generated from the `random_seed` argument.
"""
rs = numpy.random.RandomState(random_seed)
seed = rs.randint(RANDOM_SEED_MAX)
while True:
yield seed
# Incrementing is better than generating new seeds with a call to randint,
# because with random seeds collisions are likely after only around 2 ** 16
# samples due to the birthday paradox.
seed = (seed + 1) % RANDOM_SEED_MAX
def target_occupancy(target_affinity,
serum_affinity,
target_concentration,
serum_concentration):
"""Calculate target site occupancy in the presence of serum.
Assumes that the amount of target and serum are very large (compared to the
amount of aptamers), such that their concentration can be treated as fixed.
TODO(mdimon): Validate this assumption.
All argument should be provided with the same units.
Args:
target_affinity: number or ndarray-like giving affinity for the target site.
serum_affinity: number or ndarray-like giving serum affinity.
target_concentration: number or ndarray-like giving target concentration.
serum_concentration: number or ndarray-like giving serum concentration.
Returns:
Number or ndarray-like giving the fraction of bound target sites.
"""
# see Equation (7) from:
# https://en.wikipedia.org/wiki/Competitive_inhibition#Derivation
numerator = serum_affinity * target_concentration
denominator = (target_affinity * serum_affinity
+ serum_affinity * target_concentration
+ target_affinity * serum_concentration)
return numerator / denominator
| [
"copybara-worker@google.com"
] | copybara-worker@google.com |
a7d3deb5e20d6442018c8bb519bec6820cd71cf0 | e3eead40e93fdf5186269536edefab4f08e9a5a2 | /LeetCode/393-utf8_validation.py | 9d69670d9f142e05f0ee5a6645188ff7b8527595 | [] | no_license | davll/practical-algorithms | bbc930b42363cae00ce39e8a686854c19131d334 | 0e35e4cc87bd41144b8e34302aafe776fec1b356 | refs/heads/master | 2021-08-22T13:12:34.555074 | 2020-03-28T08:56:13 | 2020-03-28T08:56:13 | 147,224,029 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,124 | py | def validate_utf8(data):
n = len(data)
i = 0
while i < n:
nb = _nsucc_bits(data[i])
if nb < 0:
return False
i += 1
for _ in range(nb):
if i < n:
if not _follow_check(data[i]):
return False
i += 1
else:
return False
return True
_HEADER_MASK = [
int('10000000', base=2),
int('11100000', base=2),
int('11110000', base=2),
int('11111000', base=2)
]
_HEADER_VALUE = list(map(lambda x: (x << 1) & 0xFF, _HEADER_MASK))
_FOLLOW_MASK = int('11000000', base=2)
_FOLLOW_VALUE = int('10000000', base=2)
#print(', '.join(map(bin, _HEADER_MASK)))
#print(', '.join(map(bin, _HEADER_VALUE)))
def _nsucc_bits(x):
for i, (m, v) in enumerate(zip(_HEADER_MASK, _HEADER_VALUE)):
if (x & m) == v:
return i
return -1
def _follow_check(x):
return (x & _FOLLOW_MASK) == _FOLLOW_VALUE
class Solution:
def validUtf8(self, data):
"""
:type data: List[int]
:rtype: bool
"""
return validate_utf8(data)
| [
"davll.xc@gmail.com"
] | davll.xc@gmail.com |
e88aae5ad5a2fd54f63aeec3e9e2ec2f17efeae8 | de33d709be6667a1972322fcd514edca80cfa6a0 | /snipps/check_mode.py | b20227c7ee97409be989525f4c0593c4f51b97ad | [
"MIT"
] | permissive | akshaynagpal/number_recognition | 8ecbc6affc970a9e9ffeb70cc290db9a4ed43489 | 363606205ccfe4a43320c2452c0ae0dd4e026ec2 | refs/heads/master | 2020-05-09T16:19:08.562084 | 2015-10-03T10:01:14 | 2015-10-03T10:01:14 | 30,975,567 | 0 | 2 | null | null | null | null | UTF-8 | Python | false | false | 150 | py | import PIL
from PIL import Image
image_name = raw_input("enter name of image to open!")
imgfile=Image.open(image_name)
print imgfile.mode
raw_input()
| [
"akshay2626@gmail.com"
] | akshay2626@gmail.com |
819d61a9de591bf744f7ebad7563c9fd8559d4dc | e8dba002d8916a468e559a52f254c0d92532d6b2 | /homeassistant/components/airnow/config_flow.py | 67bce66e1673ab24e42ba7ed70d4ad657b614040 | [
"Apache-2.0"
] | permissive | thomasgermain/home-assistant | 32b0f4d888220f4ce49dc85e506d0db39445c6c0 | 9673b93842ddcecc7e6a6d65e6d4f5b8a1089c43 | refs/heads/vaillant | 2023-08-21T23:50:24.679456 | 2020-05-20T21:01:18 | 2023-08-03T07:11:35 | 197,781,893 | 8 | 4 | Apache-2.0 | 2023-02-10T06:56:47 | 2019-07-19T13:57:53 | Python | UTF-8 | Python | false | false | 3,692 | py | """Config flow for AirNow integration."""
import logging
from pyairnow import WebServiceAPI
from pyairnow.errors import AirNowError, EmptyResponseError, InvalidKeyError
import voluptuous as vol
from homeassistant import config_entries, core, exceptions
from homeassistant.const import CONF_API_KEY, CONF_LATITUDE, CONF_LONGITUDE, CONF_RADIUS
from homeassistant.helpers.aiohttp_client import async_get_clientsession
import homeassistant.helpers.config_validation as cv
from .const import DOMAIN
_LOGGER = logging.getLogger(__name__)
async def validate_input(hass: core.HomeAssistant, data):
"""Validate the user input allows us to connect.
Data has the keys from DATA_SCHEMA with values provided by the user.
"""
session = async_get_clientsession(hass)
client = WebServiceAPI(data[CONF_API_KEY], session=session)
lat = data[CONF_LATITUDE]
lng = data[CONF_LONGITUDE]
distance = data[CONF_RADIUS]
# Check that the provided latitude/longitude provide a response
try:
test_data = await client.observations.latLong(lat, lng, distance=distance)
except InvalidKeyError as exc:
raise InvalidAuth from exc
except AirNowError as exc:
raise CannotConnect from exc
except EmptyResponseError as exc:
raise InvalidLocation from exc
if not test_data:
raise InvalidLocation
# Validation Succeeded
return True
class ConfigFlow(config_entries.ConfigFlow, domain=DOMAIN):
"""Handle a config flow for AirNow."""
VERSION = 1
async def async_step_user(self, user_input=None):
"""Handle the initial step."""
errors = {}
if user_input is not None:
# Set a unique id based on latitude/longitude
await self.async_set_unique_id(
f"{user_input[CONF_LATITUDE]}-{user_input[CONF_LONGITUDE]}"
)
self._abort_if_unique_id_configured()
try:
# Validate inputs
await validate_input(self.hass, user_input)
except CannotConnect:
errors["base"] = "cannot_connect"
except InvalidAuth:
errors["base"] = "invalid_auth"
except InvalidLocation:
errors["base"] = "invalid_location"
except Exception: # pylint: disable=broad-except
_LOGGER.exception("Unexpected exception")
errors["base"] = "unknown"
else:
# Create Entry
return self.async_create_entry(
title=(
f"AirNow Sensor at {user_input[CONF_LATITUDE]},"
f" {user_input[CONF_LONGITUDE]}"
),
data=user_input,
)
return self.async_show_form(
step_id="user",
data_schema=vol.Schema(
{
vol.Required(CONF_API_KEY): str,
vol.Optional(
CONF_LATITUDE, default=self.hass.config.latitude
): cv.latitude,
vol.Optional(
CONF_LONGITUDE, default=self.hass.config.longitude
): cv.longitude,
vol.Optional(CONF_RADIUS, default=150): int,
}
),
errors=errors,
)
class CannotConnect(exceptions.HomeAssistantError):
"""Error to indicate we cannot connect."""
class InvalidAuth(exceptions.HomeAssistantError):
"""Error to indicate there is invalid auth."""
class InvalidLocation(exceptions.HomeAssistantError):
"""Error to indicate the location is invalid."""
| [
"noreply@github.com"
] | thomasgermain.noreply@github.com |
c6526829039453f2cd279cccdd18b6d5e6844b8a | 1ebe5a07e7f6260c2c2ceb6ca00dcf2a0341e544 | /op_impl/built-in/ai_core/tbe/impl/dynamic/slice.py | d61346ea6f4566518203eebc550f23fe6dc7f588 | [] | no_license | gekowa/ascend-opp | f5e09905336d85f9974d555d03d37a75cb8185c1 | 5c28a2faf9d2a117ea6f0923efe35fcd53904dd2 | refs/heads/master | 2023-04-09T12:14:40.337104 | 2021-04-19T23:00:59 | 2021-04-19T23:00:59 | 359,620,865 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,539 | py | #!/usr/bin/env python
# -*- coding:utf-8 -*-
"""
Copyright (C) 2020. Huawei Technologies Co., Ltd. All rights reserved.
This program is free software; you can redistribute it and/or modify
it under the terms of the Apache License Version 2.0.You may not use
this file except in compliance with the License.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
Apache License for more details at
http://www.apache.org/licenses/LICENSE-2.0
strided slice
"""
from __future__ import absolute_import
import te.lang.dynamic
from topi.cce import util
from impl import common_util
from te.utils.op_utils import *
from .strided_slice import StridedSlice
# pylint: disable=locally-disabled,too-many-arguments,
# pylint: unused-argument,too-many-locals
@te.op.register_operator("Slice")
@check_op_params(REQUIRED_INPUT, REQUIRED_INPUT,
REQUIRED_INPUT, REQUIRED_OUTPUT, KERNEL_NAME)
def slice(x, offsets, size, y, kernel_name="slice"):
"""
algorithm: slice
calculating: this operation extracts a slice of size size
from a tensor input
starting at the location specified by begin.
Parameters
----------
x: dict
contains shape and dtype information of input tensor
y: dict
contains shape and dtype information of output tensor
offsets: dict
represents the index of the first value to select
size: dict
represents the shape of output tensor
kernel_name: str
cce kernel name, default value is "slice".
Returns
-------
tik instance
"""
# dynamic slice does not use offsets, end params.
strided_slice_instance = StridedSlice(x, None, 0, 0, 0, 0, 0, kernel_name)
strided_slice_instance.strided_slice()
inst = strided_slice_instance.tik_instance
opt_config = {"out_of_bound_sync_check": True}
inst.BuildCCE(kernel_name=strided_slice_instance.kernel_name,
inputs=(strided_slice_instance.input_gm,
strided_slice_instance.begin_gm,
strided_slice_instance.end_gm),
outputs=(strided_slice_instance.output_gm,),
flowtable=[strided_slice_instance.tiling_param.tiling_gm],
config=opt_config,
enable_l2=False)
te.op.add_compile_info("vars", {"block_dim": strided_slice_instance.aicore_num})
return inst
| [
"gekowa@gmail.com"
] | gekowa@gmail.com |
51e2085c83cb053c92ea24c9e86320bb8b126d03 | 8e3a3c845ca3320483b233e8a0db4081aa3b8664 | /clases/migrations/0005_auto_20160623_0039.py | bee89b25480c363feacc339deffa53fc94c63a41 | [] | no_license | sofide/loiprocesos | 7d56398395e6f3302f4d9ec3627ed1b4c24bc17a | 4047fa02d0cfbcf744c80d59e3402215f8b294d3 | refs/heads/master | 2021-07-08T03:26:55.171459 | 2020-08-04T03:23:10 | 2020-08-04T03:23:10 | 61,167,908 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 793 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.9.7 on 2016-06-23 03:39
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('grupos', '0001_initial'),
('clases', '0004_auto_20160623_0015'),
]
operations = [
migrations.AddField(
model_name='exposicion',
name='grupo',
field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, to='grupos.Grupo'),
),
migrations.AddField(
model_name='pregunta',
name='grupo',
field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, to='grupos.Grupo'),
),
]
| [
"sofi.denner@gmail.com"
] | sofi.denner@gmail.com |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.