blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 3
288
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
112
| license_type
stringclasses 2
values | repo_name
stringlengths 5
115
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 684
values | visit_date
timestamp[us]date 2015-08-06 10:31:46
2023-09-06 10:44:38
| revision_date
timestamp[us]date 1970-01-01 02:38:32
2037-05-03 13:00:00
| committer_date
timestamp[us]date 1970-01-01 02:38:32
2023-09-06 01:08:06
| github_id
int64 4.92k
681M
⌀ | star_events_count
int64 0
209k
| fork_events_count
int64 0
110k
| gha_license_id
stringclasses 22
values | gha_event_created_at
timestamp[us]date 2012-06-04 01:52:49
2023-09-14 21:59:50
⌀ | gha_created_at
timestamp[us]date 2008-05-22 07:58:19
2023-08-21 12:35:19
⌀ | gha_language
stringclasses 147
values | src_encoding
stringclasses 25
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 128
12.7k
| extension
stringclasses 142
values | content
stringlengths 128
8.19k
| authors
listlengths 1
1
| author_id
stringlengths 1
132
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
ea693066e5c2cfa3a129e92b9162b3156c200ed6
|
60598454222bc1e6d352993f9c4cd164cd6cc9cd
|
/core/migrations/0014_auto_20200723_1127.py
|
f07013abc7d877cba2d16a2195b83a8886e01144
|
[] |
no_license
|
nicksonlangat/mychurch
|
12be8911ce1497d7c6a595d06275f21ecf58b185
|
e503828cab165c9edcde89b3ef6d7c06b5eb7fdb
|
refs/heads/master
| 2023-08-10T15:36:06.208376
| 2020-07-23T09:52:19
| 2020-07-23T09:52:19
| 281,030,716
| 0
| 1
| null | 2021-09-22T19:35:09
| 2020-07-20T06:15:58
|
Python
|
UTF-8
|
Python
| false
| false
| 498
|
py
|
# Generated by Django 3.0.8 on 2020-07-23 08:27
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('core', '0013_attendance_status'),
]
operations = [
migrations.RemoveField(
model_name='service',
name='seat_capacity',
),
migrations.AddField(
model_name='service',
name='seats',
field=models.ManyToManyField(to='core.Seat'),
),
]
|
[
"nicksonlangat95@gmail.com"
] |
nicksonlangat95@gmail.com
|
78f3b9f5927206d15c77dd073f490b9202ab0fc2
|
cac93d697f9b3a75f059d725dee0251a8a81bf61
|
/robot/install/lib/python2.7/dist-packages/ur_dashboard_msgs/msg/_SetModeGoal.py
|
7628590a2f33e2c657df2d3e8743b53b989e0882
|
[
"BSD-3-Clause"
] |
permissive
|
satvu/TeachBot
|
c1394f2833649fdd72aa5b32719fef4c04bc4f70
|
5888aea544fea952afa36c097a597c5d575c8d6d
|
refs/heads/master
| 2020-07-25T12:21:34.240127
| 2020-03-09T20:51:54
| 2020-03-09T20:51:54
| 208,287,475
| 0
| 0
|
BSD-3-Clause
| 2019-09-13T15:00:35
| 2019-09-13T15:00:35
| null |
UTF-8
|
Python
| false
| false
| 5,203
|
py
|
# This Python file uses the following encoding: utf-8
"""autogenerated by genpy from ur_dashboard_msgs/SetModeGoal.msg. Do not edit."""
import sys
python3 = True if sys.hexversion > 0x03000000 else False
import genpy
import struct
class SetModeGoal(genpy.Message):
_md5sum = "6832df07338535cc06b3835f89ba9555"
_type = "ur_dashboard_msgs/SetModeGoal"
_has_header = False #flag to mark the presence of a Header object
_full_text = """# ====== DO NOT MODIFY! AUTOGENERATED FROM AN ACTION DEFINITION ======
# This action is for setting the robot into a desired mode (e.g. RUNNING) and safety mode into a
# non-critical state (e.g. NORMAL or REDUCED), for example after a safety incident happened.
# goal
int8 target_robot_mode
# Stop program execution before restoring the target mode. Can be used together with 'play_program'.
bool stop_program
# Play the currently loaded program after target mode is reached.#
# NOTE: Requesting mode RUNNING in combination with this will make the robot continue the motion it
# was doing before. This might probably lead into the same problem (protective stop, EM-Stop due to
# faulty motion, etc.) If you want to be safe, set the 'stop_program' flag below and manually play
# the program after robot state is returned to normal.
# This flag will only be used when requesting mode RUNNING
bool play_program
"""
__slots__ = ['target_robot_mode','stop_program','play_program']
_slot_types = ['int8','bool','bool']
def __init__(self, *args, **kwds):
"""
Constructor. Any message fields that are implicitly/explicitly
set to None will be assigned a default value. The recommend
use is keyword arguments as this is more robust to future message
changes. You cannot mix in-order arguments and keyword arguments.
The available fields are:
target_robot_mode,stop_program,play_program
:param args: complete set of field values, in .msg order
:param kwds: use keyword arguments corresponding to message field names
to set specific fields.
"""
if args or kwds:
super(SetModeGoal, self).__init__(*args, **kwds)
#message fields cannot be None, assign default values for those that are
if self.target_robot_mode is None:
self.target_robot_mode = 0
if self.stop_program is None:
self.stop_program = False
if self.play_program is None:
self.play_program = False
else:
self.target_robot_mode = 0
self.stop_program = False
self.play_program = False
def _get_types(self):
"""
internal API method
"""
return self._slot_types
def serialize(self, buff):
"""
serialize message into buffer
:param buff: buffer, ``StringIO``
"""
try:
_x = self
buff.write(_get_struct_b2B().pack(_x.target_robot_mode, _x.stop_program, _x.play_program))
except struct.error as se: self._check_types(struct.error("%s: '%s' when writing '%s'" % (type(se), str(se), str(locals().get('_x', self)))))
except TypeError as te: self._check_types(ValueError("%s: '%s' when writing '%s'" % (type(te), str(te), str(locals().get('_x', self)))))
def deserialize(self, str):
"""
unpack serialized message in str into this message instance
:param str: byte array of serialized message, ``str``
"""
try:
end = 0
_x = self
start = end
end += 3
(_x.target_robot_mode, _x.stop_program, _x.play_program,) = _get_struct_b2B().unpack(str[start:end])
self.stop_program = bool(self.stop_program)
self.play_program = bool(self.play_program)
return self
except struct.error as e:
raise genpy.DeserializationError(e) #most likely buffer underfill
def serialize_numpy(self, buff, numpy):
"""
serialize message with numpy array types into buffer
:param buff: buffer, ``StringIO``
:param numpy: numpy python module
"""
try:
_x = self
buff.write(_get_struct_b2B().pack(_x.target_robot_mode, _x.stop_program, _x.play_program))
except struct.error as se: self._check_types(struct.error("%s: '%s' when writing '%s'" % (type(se), str(se), str(locals().get('_x', self)))))
except TypeError as te: self._check_types(ValueError("%s: '%s' when writing '%s'" % (type(te), str(te), str(locals().get('_x', self)))))
def deserialize_numpy(self, str, numpy):
"""
unpack serialized message in str into this message instance using numpy for array types
:param str: byte array of serialized message, ``str``
:param numpy: numpy python module
"""
try:
end = 0
_x = self
start = end
end += 3
(_x.target_robot_mode, _x.stop_program, _x.play_program,) = _get_struct_b2B().unpack(str[start:end])
self.stop_program = bool(self.stop_program)
self.play_program = bool(self.play_program)
return self
except struct.error as e:
raise genpy.DeserializationError(e) #most likely buffer underfill
_struct_I = genpy.struct_I
def _get_struct_I():
global _struct_I
return _struct_I
_struct_b2B = None
def _get_struct_b2B():
global _struct_b2B
if _struct_b2B is None:
_struct_b2B = struct.Struct("<b2B")
return _struct_b2B
|
[
"sarahvu@mit.edu"
] |
sarahvu@mit.edu
|
e77b9bf7ab6d5437d6b040caef3e6915f04fffca
|
a71582e89e84a4fae2595f034d06af6d8ad2d43a
|
/tensorflow/python/data/experimental/kernel_tests/optimization/make_numa_aware_test.py
|
d79ae4387c868d4821ac65787ba0bc04d47cc7d3
|
[
"Apache-2.0"
] |
permissive
|
tfboyd/tensorflow
|
5328b1cabb3e24cb9534480fe6a8d18c4beeffb8
|
865004e8aa9ba630864ecab18381354827efe217
|
refs/heads/master
| 2021-07-06T09:41:36.700837
| 2019-04-01T20:21:03
| 2019-04-01T20:26:09
| 91,494,603
| 3
| 0
|
Apache-2.0
| 2018-07-17T22:45:10
| 2017-05-16T19:06:01
|
C++
|
UTF-8
|
Python
| false
| false
| 1,813
|
py
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for the `MakeNumaAware` optimization."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.data.experimental.ops import batching
from tensorflow.python.data.experimental.ops import optimization
from tensorflow.python.data.kernel_tests import test_base
from tensorflow.python.data.ops import dataset_ops
from tensorflow.python.framework import test_util
from tensorflow.python.platform import test
@test_util.run_all_in_graph_and_eager_modes
class MakeNumaAwareTest(test_base.DatasetTestBase):
def testMakeNumaAware(self):
dataset = dataset_ops.Dataset.range(10).apply(
optimization.assert_next(["NumaMapAndBatch"])).apply(
batching.map_and_batch(lambda x: x * x, 10))
options = dataset_ops.Options()
options.experimental_numa_aware = True
options.experimental_optimization.apply_default_optimizations = False
dataset = dataset.with_options(options)
self.assertDatasetProduces(
dataset, expected_output=[[x * x for x in range(10)]])
if __name__ == "__main__":
test.main()
|
[
"gardener@tensorflow.org"
] |
gardener@tensorflow.org
|
0430b585c6f5da83bef5507cb158267ac18d89c4
|
63b1a78452cb4204e501e023bd9f3c8a364b723c
|
/test_nbdev/_nbdev.py
|
358f0ff246827f6c9ce7115b0bbb8ec347081e0d
|
[
"Apache-2.0"
] |
permissive
|
teddyxiong53/test_nbdev
|
03e22ef361a1768bc14f83cf617b8ab5fd172663
|
11d4ca82eedb45f4a3f687bd3e3d06336ebcbe9c
|
refs/heads/master
| 2023-07-17T21:19:41.518320
| 2021-09-12T01:52:50
| 2021-09-12T01:52:50
| 405,512,137
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 331
|
py
|
# AUTOGENERATED BY NBDEV! DO NOT EDIT!
__all__ = ["index", "modules", "custom_doc_links", "git_url"]
index = {"say_hello": "00_core.ipynb"}
modules = ["core.py"]
doc_url = "https://teddyxiong53.github.io/test_nbdev/"
git_url = "https://github.com/teddyxiong53/test_nbdev/tree/master/"
def custom_doc_links(name): return None
|
[
"1073167306@qq.com"
] |
1073167306@qq.com
|
241cafabc1786d18738a3dbb2c5762712ff8cf93
|
98ca37f5dd2751efaa060cca19e0b83f871d7765
|
/sdk/translation/azure-ai-translation-document/tests/test_all_document_statuses.py
|
57e1a2f437a4cfaf8be9034e89d309b760822451
|
[
"LicenseRef-scancode-generic-cla",
"LGPL-2.1-or-later",
"MIT"
] |
permissive
|
jayhebe/azure-sdk-for-python
|
5ea99732ebb9929d3f6f77c08cc640d5915970b1
|
f4455f85d9fe747fa4de2fdc691b975c07bfeea5
|
refs/heads/main
| 2023-06-24T01:22:06.602194
| 2021-07-28T02:12:25
| 2021-07-28T02:12:25
| 390,290,984
| 1
| 0
|
MIT
| 2021-07-28T09:23:46
| 2021-07-28T09:23:46
| null |
UTF-8
|
Python
| false
| false
| 8,190
|
py
|
# coding=utf-8
# ------------------------------------
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT License.
# ------------------------------------
from datetime import datetime
import functools
from testcase import DocumentTranslationTest
from preparer import DocumentTranslationPreparer, DocumentTranslationClientPreparer as _DocumentTranslationClientPreparer
from azure.ai.translation.document import DocumentTranslationClient
import pytest
DocumentTranslationClientPreparer = functools.partial(_DocumentTranslationClientPreparer, DocumentTranslationClient)
class TestAllDocumentStatuses(DocumentTranslationTest):
@DocumentTranslationPreparer()
@DocumentTranslationClientPreparer()
def test_list_document_statuses(self, client):
docs_count = 5
target_language = "es"
# submit and validate operation
poller = self._begin_and_validate_translation_with_multiple_docs(client, docs_count, language_code=target_language, wait=True)
# list docs statuses
doc_statuses = list(client.list_all_document_statuses(poller.id)) # convert from generic iterator to list
self.assertEqual(len(doc_statuses), docs_count)
for document in doc_statuses:
self._validate_doc_status(document, target_language)
@DocumentTranslationPreparer()
@DocumentTranslationClientPreparer()
def test_list_document_statuses_with_pagination(self, client):
docs_count = 10
results_per_page = 2
no_of_pages = docs_count // results_per_page
target_language = "es"
# submit and validate operation
poller = self._begin_and_validate_translation_with_multiple_docs(client, docs_count, language_code=target_language, wait=True)
# check doc statuses
doc_statuses_pages = list(client.list_all_document_statuses(translation_id=poller.id, results_per_page=results_per_page).by_page())
self.assertEqual(len(doc_statuses_pages), no_of_pages)
# iterate by page
for page in doc_statuses_pages:
page_items = list(page)
self.assertLessEqual(len(page_items), results_per_page)
for document in page_items:
self._validate_doc_status(document, target_language)
@DocumentTranslationPreparer()
@DocumentTranslationClientPreparer()
def test_list_document_statuses_with_skip(self, client):
docs_count = 10
skip = 2
target_language = "es"
# submit and validate operation
poller = self._begin_and_validate_translation_with_multiple_docs(client, docs_count, language_code=target_language, wait=True)
# check doc statuses
doc_statuses = list(client.list_all_document_statuses(translation_id=poller.id, skip=skip))
self.assertEqual(len(doc_statuses), docs_count - skip)
# iterate over docs
for document in doc_statuses:
self._validate_doc_status(document, target_language)
@DocumentTranslationPreparer()
@DocumentTranslationClientPreparer()
def test_list_document_statuses_filter_by_status(self, client):
docs_count = 10
target_language = "es"
# submit and validate operation
poller = self._begin_and_validate_translation_with_multiple_docs(client, docs_count, language_code=target_language, wait=True)
# list operations
statuses = ["NotStarted"]
doc_statuses = list(client.list_all_document_statuses(poller.id, statuses=statuses))
assert(len(doc_statuses) == 0)
statuses = ["Succeeded"]
doc_statuses = list(client.list_all_document_statuses(poller.id, statuses=statuses))
assert(len(doc_statuses) == docs_count)
statuses = ["Failed"]
doc_statuses = list(client.list_all_document_statuses(poller.id, statuses=statuses))
assert(len(doc_statuses) == 0)
@DocumentTranslationPreparer()
@DocumentTranslationClientPreparer()
def test_list_document_statuses_filter_by_ids(self, client):
docs_count = 5
target_language = "es"
# submit and validate operation
poller = self._begin_and_validate_translation_with_multiple_docs(client, docs_count, language_code=target_language, wait=True)
# filter ids
doc_statuses = list(client.list_all_document_statuses(poller.id)) # convert from generic iterator to list
self.assertEqual(len(doc_statuses), docs_count)
ids = [doc.id for doc in doc_statuses]
ids = ids[:docs_count//2]
# do the testing
doc_statuses = list(client.list_all_document_statuses(poller.id, document_ids=ids))
self.assertEqual(len(doc_statuses), len(ids))
for document in doc_statuses:
self._validate_doc_status(document, target_language, ids=ids)
@DocumentTranslationPreparer()
@DocumentTranslationClientPreparer()
def test_list_document_statuses_order_by_creation_time_asc(self, client):
docs_count = 5
target_language = "es"
# submit and validate operation
poller = self._begin_and_validate_translation_with_multiple_docs(client, docs_count, language_code=target_language, wait=True)
# check doc statuses
doc_statuses = list(client.list_all_document_statuses(poller.id, order_by=["created_on asc"])) # convert from generic iterator to list
self.assertEqual(len(doc_statuses), docs_count)
curr = datetime.min
for document in doc_statuses:
assert(document.created_on.replace(tzinfo=None) >= curr.replace(tzinfo=None))
curr = document.created_on
@DocumentTranslationPreparer()
@DocumentTranslationClientPreparer()
def test_list_document_statuses_order_by_creation_time_desc(self, client):
docs_count = 5
target_language = "es"
# submit and validate operation
poller = self._begin_and_validate_translation_with_multiple_docs(client, docs_count, language_code=target_language, wait=True)
# check doc statuses
doc_statuses = list(client.list_all_document_statuses(poller.id, order_by=["created_on desc"])) # convert from generic iterator to list
self.assertEqual(len(doc_statuses), docs_count)
curr = datetime.max
for document in doc_statuses:
assert(document.created_on.replace(tzinfo=None) <= curr.replace(tzinfo=None))
curr = document.created_on
@DocumentTranslationPreparer()
@DocumentTranslationClientPreparer()
def test_list_document_statuses_mixed_filters(self, client):
docs_count = 10
target_language = "es"
skip = 1
results_per_page = 2
statuses = ["Succeeded"]
# submit and validate operation
poller = self._begin_and_validate_translation_with_multiple_docs(client, docs_count, language_code=target_language, wait=True)
# get ids
doc_statuses = list(client.list_all_document_statuses(poller.id)) # convert from generic iterator to list
self.assertEqual(len(doc_statuses), docs_count)
ids = [doc.id for doc in doc_statuses]
ids = ids[:docs_count//2]
filtered_docs = client.list_all_document_statuses(
poller.id,
# filters
document_ids=ids,
statuses=statuses,
# ordering
order_by=["created_on asc"],
# paging
skip=skip,
results_per_page=results_per_page
).by_page()
self.assertIsNotNone(filtered_docs)
# check statuses
counter = 0
curr_time = datetime.min
for page in filtered_docs:
page_docs = list(page)
self.assertLessEqual(len(page_docs), results_per_page) # assert paging
for doc in page_docs:
counter += 1
# assert ordering
assert(doc.created_on.replace(tzinfo=None) >= curr_time.replace(tzinfo=None))
curr_time = doc.created_on
# assert filters
self.assertIn(doc.status, statuses)
self.assertIn(doc.id, ids)
assert(counter == len(ids) - skip)
|
[
"noreply@github.com"
] |
jayhebe.noreply@github.com
|
f06015cdef49de3c91fb4a6212eece1e0d38b437
|
c0bc042e73825a89949c1df1daefc41796903ae1
|
/youtube/urls.py
|
aac35fa9fa3fba7ce66c1b936fd40c853849efae
|
[] |
no_license
|
Pennylele/pennyfang_portfolio
|
389aa93f392701ef5fa4f1a129d98c0ddd373dbc
|
b29706123b860d2378d89f0daa462b33e4609a68
|
refs/heads/master
| 2023-09-05T11:33:29.847845
| 2021-11-24T19:32:15
| 2021-11-24T19:32:15
| 325,154,462
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 474
|
py
|
from .views import VideoListView, SearchResultsView, FilterByViews, VideoDetailView, Sync
from django.urls import path
app_name='youtube'
urlpatterns = [
path('', VideoListView.as_view(), name='home'),
path('search/', SearchResultsView.as_view(), name='search_results'),
path('sort-by-views/', FilterByViews.as_view(), name='sort_views'),
path('video-detail/<slug>/', VideoDetailView.as_view(), name='video_detail'),
path('sync/', Sync, name='sync'),
]
|
[
"fangle0121@gmail.com"
] |
fangle0121@gmail.com
|
5bd4b629e2c1439c220548e9247835c48992f28e
|
fac96b4c97150e02f1405f7430c89b115e4c27f7
|
/ch08/ex8-20.printing_models.py
|
bfef927128761d040af4e85f8ced90253a583a32
|
[] |
no_license
|
gustavonvp/PYTHON-CRASH-COURSE
|
37478990ff3c3c368da505eb9e5a35dee5d1960b
|
8033e2eb84cf6d85fd4ff42ae0550f38dcd23f62
|
refs/heads/master
| 2023-04-03T00:42:20.333183
| 2017-10-24T05:47:01
| 2017-10-24T05:47:01
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 620
|
py
|
# Start whith some designs that need to be printed.
unprinted_designs = ['iphone case', 'robot pendant', 'dodecahedron']
completed_models = []
# Simulate printing each design, until none are left.
# Move each design to completed_models after printing.
while unprinted_designs:
current_design = unprinted_designs.pop()
# Simulate creating a 3D print form the design.
print("Printing model: " + current_design)
completed_models.append(current_design)
# Display all completed models.
print("\nThe following models have been printed:")
for completed_model in completed_models:
print(completed_model)
|
[
"freebz@hananet.net"
] |
freebz@hananet.net
|
0a7e4ac3a6aa381a2be9b21e6ff39af814db7972
|
8acffb8c4ddca5bfef910e58d3faa0e4de83fce8
|
/ml-flask/Lib/site-packages/joblib/parallel.py
|
dff07a7420ad4d2662baa74a296be91ffc236c13
|
[
"MIT"
] |
permissive
|
YaminiHP/SimilitudeApp
|
8cbde52caec3c19d5fa73508fc005f38f79b8418
|
005c59894d8788c97be16ec420c0a43aaec99b80
|
refs/heads/master
| 2023-06-27T00:03:00.404080
| 2021-07-25T17:51:27
| 2021-07-25T17:51:27
| 389,390,951
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 130
|
py
|
version https://git-lfs.github.com/spec/v1
oid sha256:10afb49d59c3778a8fd053bbec2d63b85f3b24f63a308df37f309126a62f3571
size 46534
|
[
"yamprakash130@gmail.com"
] |
yamprakash130@gmail.com
|
5d8c2c21c425289bf070970045fc338486eb2e08
|
66f383fec502102bfec58ed8cb9c43a71e599c55
|
/apps/accounts/events.py
|
f34b3398d34130125064588eeea3a67c4b10f9ab
|
[
"MIT"
] |
permissive
|
hacktoolkit/django-htk
|
0a984a28f7fbc7eed8e2b1975d210792ddbee829
|
935c4913e33d959f8c29583825f72b238f85b380
|
refs/heads/master
| 2023-08-08T11:52:54.298160
| 2023-07-21T19:08:37
| 2023-07-21T19:08:37
| 15,924,904
| 210
| 65
|
MIT
| 2023-09-08T23:59:28
| 2014-01-15T04:23:40
|
Python
|
UTF-8
|
Python
| false
| false
| 1,082
|
py
|
# Python Standard Library Imports
# Third Party (PyPI) Imports
import rollbar
# HTK Imports
from htk.utils import htk_setting
from htk.utils.notifications import slack_notify
def failed_recaptcha_on_login(user, request=None):
extra_data = {
'user' : {
'id': user.id,
'username': user.username,
'email': user.email,
},
}
message = 'Failed reCAPTCHA. Suspicious login detected.'
rollbar.report_message(
message,
request=request,
extra_data=extra_data
)
if htk_setting('HTK_SLACK_NOTIFICATIONS_ENABLED'):
slack_message = '%s User: %s <%s>' % (
message,
user.username,
user.email,
)
slack_notify(slack_message, level='warning')
def failed_recaptcha_on_account_register(request=None):
message = 'Failed reCAPTCHA. Suspicious account registration detected.'
rollbar.report_message(message, request=request)
if htk_setting('HTK_SLACK_NOTIFICATIONS_ENABLED'):
slack_notify(message, level='warning')
|
[
"hello@jontsai.com"
] |
hello@jontsai.com
|
cb662f3f1ec68283494e7dd6ab627411ce475ddd
|
4bd818bc9bd83ed39c9d48b0e4e4821a2b8c45d9
|
/src/etherollapp/etheroll/customtoolbar.py
|
d31f906fb38e24b9465769aeaa3b7894757e17a9
|
[
"MIT"
] |
permissive
|
AndreMiras/EtherollApp
|
8ef158e9e5886922bb56a42d836daa392e5d5f2e
|
2ccc30fad736a6fee0cba8b99c521bee6ad13087
|
refs/heads/develop
| 2021-09-11T14:41:04.753290
| 2021-09-01T07:28:44
| 2021-09-01T07:28:44
| 125,426,260
| 59
| 29
|
MIT
| 2021-06-08T20:29:42
| 2018-03-15T21:07:28
|
Python
|
UTF-8
|
Python
| false
| false
| 846
|
py
|
from kivy.app import App
from kivy.clock import Clock
from kivymd.toolbar import Toolbar
from etherollapp.etheroll.ui_utils import load_kv_from_py
load_kv_from_py(__file__)
class CustomToolbar(Toolbar):
"""Toolbar with helper method for loading default/back buttons."""
def __init__(self, **kwargs):
super().__init__(**kwargs)
Clock.schedule_once(self.load_default_buttons)
def load_default_buttons(self, dt=None):
app = App.get_running_app()
self.left_action_items = [
['menu', lambda x: app.root.navigation.toggle_nav_drawer()]]
self.right_action_items = [[
'dots-vertical',
lambda x: app.root.navigation.toggle_nav_drawer()]]
def load_back_button(self, function):
self.left_action_items = [['arrow-left', lambda x: function()]]
|
[
"andre.miras@gmail.com"
] |
andre.miras@gmail.com
|
b0fe4b6f20442f6ba76f885dbb49a445c8df729a
|
f07a42f652f46106dee4749277d41c302e2b7406
|
/Data Set/bug-fixing-5/d59101416366fa8d50bd2d8218e772b2c6a8bd7f-<remove_from_device>-fix.py
|
58894645341f61f607af0fa3f9de1bd569631b46
|
[] |
no_license
|
wsgan001/PyFPattern
|
e0fe06341cc5d51b3ad0fe29b84098d140ed54d1
|
cc347e32745f99c0cd95e79a18ddacc4574d7faa
|
refs/heads/main
| 2023-08-25T23:48:26.112133
| 2021-10-23T14:11:22
| 2021-10-23T14:11:22
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 673
|
py
|
def remove_from_device(self):
name = self.want.name
if self.want.parent_policy:
uri = 'https://{0}:{1}/mgmt/tm/security/firewall/policy/{2}/rules/{3}'.format(self.client.provider['server'], self.client.provider['server_port'], transform_name(self.want.partition, self.want.parent_policy), name.replace('/', '_'))
else:
uri = 'https://{0}:{1}/mgmt/tm/security/firewall/rule-list/{2}/rules/{3}'.format(self.client.provider['server'], self.client.provider['server_port'], transform_name(self.want.partition, self.want.parent_rule_list), name.replace('/', '_'))
resp = self.client.api.delete(uri)
if (resp.status == 200):
return True
|
[
"dg1732004@smail.nju.edu.cn"
] |
dg1732004@smail.nju.edu.cn
|
ac528187a330a1170469d7253b64bf4680d05ce3
|
9743d5fd24822f79c156ad112229e25adb9ed6f6
|
/xai/brain/wordbase/verbs/_evaporating.py
|
c26673a9d07507032d353945f4ce1d7da9ede1af
|
[
"MIT"
] |
permissive
|
cash2one/xai
|
de7adad1758f50dd6786bf0111e71a903f039b64
|
e76f12c9f4dcf3ac1c7c08b0cc8844c0b0a104b6
|
refs/heads/master
| 2021-01-19T12:33:54.964379
| 2017-01-28T02:00:50
| 2017-01-28T02:00:50
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 261
|
py
|
from xai.brain.wordbase.verbs._evaporate import _EVAPORATE
#calss header
class _EVAPORATING(_EVAPORATE, ):
def __init__(self,):
_EVAPORATE.__init__(self)
self.name = "EVAPORATING"
self.specie = 'verbs'
self.basic = "evaporate"
self.jsondata = {}
|
[
"xingwang1991@gmail.com"
] |
xingwang1991@gmail.com
|
04a50dc0e33be89b7e0b3dcf0a41fb02d629f963
|
b0cdbad299f6174bfdb0fba173dbcf3889b82209
|
/Object Oriented Programming/oops/exercise_4.py
|
57620e40b0f7418b80594400b89402e36f343253
|
[] |
no_license
|
deesaw/PythonD-06
|
a33e676f1e0cfc13b4ea645c8b60547b198239ac
|
3c6f065d7be2e3e10cafb6cef79d6cae9d55a7fa
|
refs/heads/master
| 2023-03-18T08:24:42.030935
| 2021-03-02T14:15:09
| 2021-03-02T14:15:09
| 343,797,605
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 864
|
py
|
class Dog(object):
def speak(self):
print( "bhou..bhou" )
def guard(self):
print( "I am guarding your home" )
class Cat(object):
def speak(self):
print( "meau..meau" )
def hunt(self):
print( "I am hunting mice" )
class Dd(Dog):
def hobby(self):
print( "Biting" )
def guard(self):
print( "Guarding house" )
def oldguard(self):
super(Dd,self).guard()
ginger=Dd()
ginger.guard()
ginger.speak()
ginger.hobby()
ginger.oldguard()
print("*******************************************************")
class Doat(Cat,Dog):
def hobby(self):
print( "programming in python" )
def speak(self):print( "bhou..meau" )
def oldspeak(self):
super(Doat,self).speak()
ginger1=Doat()
ginger1.speak()
ginger1.guard()
ginger1.hunt()
ginger1.hobby()
ginger1.oldspeak()
|
[
"69420960+deesaw@users.noreply.github.com"
] |
69420960+deesaw@users.noreply.github.com
|
29a1a2c46aa99f941385f809339cfe85914cf4d6
|
9d8acc20d2ee1d1957849dfb71c22e0dae2d8c5c
|
/baomoicrawl/venv/Lib/site-packages/scrapy/utils/sitemap.py
|
c9f5b4ef42d0d29efc71b43d2c9e9ba8ded9a1a6
|
[] |
no_license
|
thuy4tbn99/TranTruongThuy_17021178_Nhom4_Crawler
|
b0fdedee2942a12d9f64dfed93f43802dc5ab340
|
87c8c07433466bbc43a24ea089f75baeb467c356
|
refs/heads/master
| 2022-11-27T21:36:33.917491
| 2020-08-10T23:24:42
| 2020-08-10T23:24:42
| 286,583,216
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,548
|
py
|
"""
Module for processing Sitemaps.
Note: The main purpose of this module is to provide support for the
SitemapSpider, its API is subject to change without notice.
"""
from urllib.parse import urljoin
import lxml.etree
class Sitemap:
"""Class to parse Sitemap (type=urlset) and Sitemap Index
(type=sitemapindex) files"""
def __init__(self, xmltext):
xmlp = lxml.etree.XMLParser(recover=True, remove_comments=True, resolve_entities=False)
self._root = lxml.etree.fromstring(xmltext, parser=xmlp)
rt = self._root.tag
self.type = self._root.tag.split('}', 1)[1] if '}' in rt else rt
def __iter__(self):
for elem in self._root.getchildren():
d = {}
for el in elem.getchildren():
tag = el.tag
name = tag.split('}', 1)[1] if '}' in tag else tag
if name == 'link':
if 'href' in el.attrib:
d.setdefault('alternate', []).append(el.get('href'))
else:
d[name] = el.text.strip() if el.text else ''
if 'loc' in d:
yield d
def sitemap_urls_from_robots(robots_text, base_url=None):
"""Return an iterator over all sitemap urls contained in the given
robots.txt file
"""
for line in robots_text.splitlines():
if line.lstrip().lower().startswith('sitemap:'):
url = line.split(':', 1)[1].strip()
yield urljoin(base_url, url)
|
[
"thuy4tbn99@gmail.com"
] |
thuy4tbn99@gmail.com
|
653e892c9b0e8d7676e7419a4cd8223861cf33d8
|
4912cbd47c19c58d142e6833911d70f5ea037357
|
/question_bank/reverse-string/reverse-string.py
|
c47284c0fefc2d681bb7e99d7485ae06dcaf5e97
|
[
"Apache-2.0"
] |
permissive
|
yatengLG/leetcode-python
|
a09a17cd9e60cafd9ff8ca9c068f5b70719c436f
|
5d48aecb578c86d69835368fad3d9cc21961c226
|
refs/heads/master
| 2023-07-13T16:10:01.920716
| 2021-09-06T02:51:46
| 2021-09-06T02:51:46
| 286,969,109
| 13
| 6
| null | 2021-02-16T10:19:44
| 2020-08-12T09:13:02
|
Python
|
UTF-8
|
Python
| false
| false
| 643
|
py
|
# -*- coding: utf-8 -*-
# @Author : LG
"""
执行用时:36 ms, 在所有 Python3 提交中击败了98.98% 的用户
内存消耗:14.4 MB, 在所有 Python3 提交中击败了19.40% 的用户
解题思路:
双指针。分别指向列表首尾,然后交换指针指向元素
"""
class Solution:
def reverseString(self, s: List[str]) -> None:
"""
Do not return anything, modify s in-place instead.
"""
n = len(s)
l, r = 0, n-1 # 左右指针分别指向首尾
while r > l:
s[l], s[r] = s[r], s[l] # 交换元素
l += 1 # 移动指针
r -= 1
|
[
"767624851@qq.com"
] |
767624851@qq.com
|
d28f93833de104995b112c67f309aeca3665e1a5
|
8e75f2ba056e5bd75647f1e3f9773e1015c0dd0e
|
/628_maximum_product_of_three_numbers.py
|
9df4c2022f676107d77fa0ca3142287a66e826c7
|
[] |
no_license
|
eazow/leetcode
|
96cbcba143ce04c6e83c5c985e19320f48c60b0d
|
c1c5ee72b8fe608b278ca20a58bc240fdc62b599
|
refs/heads/master
| 2022-12-10T00:06:06.676066
| 2022-11-29T09:02:04
| 2022-11-29T09:02:04
| 46,109,860
| 5
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 351
|
py
|
class Solution(object):
def maximumProduct(self, nums):
"""
:type nums: List[int]
:rtype: int
"""
nums = sorted(nums)
return max(nums[-3] * nums[-2] * nums[-1], nums[0] * nums[1] * nums[-1])
assert Solution().maximumProduct([1,2,3,4]) == 24
assert Solution().maximumProduct([-4,-3,-2,-1,60]) == 720
|
[
"eazow@163.com"
] |
eazow@163.com
|
9848cbcc79703b08c9c0e9ee9bbbd69fb4c86624
|
b7add0d1b1effc50b27d3316fa5889a5227e5b19
|
/Atlasbuggy/atlasbuggy/files/videofile.py
|
205f824be3d6e10d3c1e8934bf447a376387c7fd
|
[] |
no_license
|
Woz4tetra/Atlas
|
efb83a7c7b2698bf8b36b023f7aa573cc38284f6
|
c7380868a9efef9d1594ed7aa87187f03a7e4612
|
refs/heads/master
| 2020-04-04T06:25:50.657631
| 2017-04-05T01:53:15
| 2017-04-05T01:53:15
| 50,269,756
| 3
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 7,588
|
py
|
import cv2
import time
from atlasbuggy.files.atlasbuggyfile import AtlasWriteFile, AtlasReadFile
from atlasbuggy.files.logfile import default_log_dir_name, default_log_file_name
class VideoPlayer:
def __init__(self, video_name, video_dir, window_name, capture, width=None, height=None, frame_skip=0,
loop_video=False, start_frame=0, slider_callback=None):
video_name, video_dir = AtlasReadFile.format_path_as_time(video_name, video_dir, default_log_dir_name,
default_log_file_name)
self.read_file = AtlasReadFile(video_name, video_dir, False, ("avi", "mov"), "videos")
self.window_name = window_name
self.frame = None
self.current_frame_num = 0
self.current_time = 0.0
self.capture = capture
self.cv_capture = cv2.VideoCapture(self.full_path)
cv2.namedWindow(self.window_name)
self.fps = self.cv_capture.get(cv2.CAP_PROP_FPS)
self.num_frames = int(self.cv_capture.get(cv2.CAP_PROP_FRAME_COUNT))
if self.num_frames <= 0:
raise FileNotFoundError("Video failed to load!")
self.length_sec = self.num_frames / self.fps
self.length_msec = int(self.length_sec * 1000)
self.slider_pos = 0
self.slider_ticks = int(self.cv_capture.get(cv2.CAP_PROP_FRAME_WIDTH) // 3)
if self.slider_ticks > self.num_frames:
self.slider_ticks = self.num_frames
self.track_bar_name = "frame:"
cv2.createTrackbar(self.track_bar_name, self.window_name, 0, self.slider_ticks,
self.on_slider)
self.slider_callback = slider_callback
self.width = int(self.cv_capture.get(cv2.CAP_PROP_FRAME_WIDTH))
self.height = int(self.cv_capture.get(cv2.CAP_PROP_FRAME_HEIGHT))
self.resize_frame = False
if width is None:
self.resize_width = self.width
else:
self.resize_width = width
self.resize_frame = True
if height is None:
self.resize_height = self.height
else:
self.resize_height = height
self.resize_frame = True
self.frame_skip = frame_skip
self.loop_video = loop_video
if start_frame > 0:
self.set_frame(start_frame)
self.sync_up_error = 0.01
def video_len(self):
return self.num_frames
def current_pos(self):
return int(self.cv_capture.get(cv2.CAP_PROP_POS_FRAMES))
def on_slider(self, slider_index):
slider_pos = int(slider_index * self.video_len() / self.slider_ticks)
if abs(slider_pos - self.current_pos()) > 1:
self.set_frame(slider_pos)
self.capture.show_frame(self.get_frame())
self.current_frame_num = self.current_pos()
self.slider_pos = slider_index
if self.slider_callback is not None:
self.slider_callback()
def set_frame(self, position):
if position >= self.video_len():
position = self.video_len()
if position >= 0:
self.cv_capture.set(cv2.CAP_PROP_POS_FRAMES, int(position))
def get_frame(self, current_time=None, advance_frame=True):
if current_time is not None:
self.current_time = self.current_pos() * self.length_sec / self.num_frames
if abs(current_time - self.current_time) > self.sync_up_error:
goal_frame = int(current_time * self.num_frames / self.length_sec)
self.set_frame(goal_frame)
return self.get_frame()
if self.frame_skip > 0:
self.set_frame(self.current_pos() + self.frame_skip)
success, self.frame = self.cv_capture.read()
if not advance_frame:
self.set_frame(self.current_pos() - 1)
if not success or self.frame is None:
if self.loop_video:
self.set_frame(0)
while success is False or self.frame is None:
success, self.frame = self.cv_capture.read()
else:
self.close()
return None
if self.resize_frame:
self.frame = cv2.resize(self.frame,
(self.resize_width, self.resize_height),
interpolation=cv2.INTER_NEAREST)
if self.current_pos() != self.current_frame_num:
self.current_frame_num = self.current_pos()
self.slider_pos = int(self.current_frame_num * self.slider_ticks / self.video_len())
cv2.setTrackbarPos(self.track_bar_name, self.window_name, self.slider_pos)
return self.frame
def close(self):
cv2.destroyWindow(self.window_name)
class VideoRecorder(AtlasWriteFile):
def __init__(self, video_name, video_dir, width, height, enable_recording, capture, cam_number, cv_capture):
super(VideoRecorder, self).__init__(video_name, video_dir, False, "avi", "videos")
if cv_capture is not None:
self.cv_capture = cv_capture
elif cam_number is not None:
self.cv_capture = cv2.VideoCapture(cam_number)
else:
raise ValueError("Capture number or capture instance not supplied!")
print("Sampling for FPS...", end="")
time0 = time.time()
samples = 15
for frame_num in range(samples):
success, self.frame = self.cv_capture.read()
if not success:
raise FileNotFoundError("Failed to retrieve from camera")
capture.show_frame(self.frame)
fps = samples / (time.time() - time0)
print("done: ", fps)
self.enable_recording = enable_recording
self.width = width
self.height = height
if width is not None:
self.recorder_width = width
self.width = width
else:
self.recorder_width = self.frame.shape[1]
self.width = self.frame.shape[1]
if height is not None:
self.recorder_height = height
self.height = height
else:
self.recorder_height = self.frame.shape[0]
self.height = self.frame.shape[0]
self.resize_frame = self.frame.shape[0:2] != (self.height, self.width)
if self.enable_recording:
codec = 'MJPG'
fourcc = cv2.VideoWriter_fourcc(*codec)
self.video = cv2.VideoWriter()
self.video.open(self.full_path, fourcc, fps, (self.recorder_width, self.recorder_height), True)
self._is_open = True
print("Writing video to:", self.full_path)
else:
self.video = None
def write(self, frame):
if frame.shape[0:2] != (self.recorder_height, self.recorder_width):
frame = cv2.resize(frame, (self.recorder_height, self.recorder_width))
if len(frame.shape) == 2:
self.video.write(cv2.cvtColor(frame, cv2.COLOR_GRAY2BGR))
else:
self.video.write(frame)
def get_frame(self):
success, self.frame = self.cv_capture.read()
if self.resize_frame and self.frame.shape[0:2] != (self.height, self.width):
self.frame = cv2.resize(self.frame, (self.width, self.height))
if self.enable_recording:
self.write(self.frame)
return self.frame
def close(self):
if self._is_open:
self.video.release()
self._is_open = False
print("Wrote video to:", self.full_path)
|
[
"woz4tetra@gmail.com"
] |
woz4tetra@gmail.com
|
08b3051adaf303a2d19d7736a97fbe771d06b6ae
|
80760d4c8a6b2c45b4b529bdd98d33c9c5509438
|
/Practice/atcoder/ABC/130/src/d2.py
|
82230a1e0cc7c786640ababca689faaaffcd8866
|
[] |
no_license
|
prrn-pg/Shojin
|
f1f46f8df932df0be90082b475ec02b52ddd882e
|
3a20f1122d8bf7d95d9ecd205a62fc36168953d2
|
refs/heads/master
| 2022-12-30T22:26:41.020473
| 2020-10-17T13:53:52
| 2020-10-17T13:53:52
| 93,830,182
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 538
|
py
|
# 尺取の練習と聞いて
n, k = map(int, input().split())
arr = list(map(int, input().split()))
ans = 0
r = 0
tmp_sum = 0
for l in range(n):
# 条件を満たすまで右端を伸ばす
while r < n and tmp_sum < k:
tmp_sum += arr[r]
r += 1
# whileを抜けた時は条件を満たしている
if r == n:
while tmp_sum >= k and l <= n:
tmp_sum -= arr[l]
ans += 1
l += 1
break
else:
ans += n - r + 1
tmp_sum -= arr[l]
print(ans)
|
[
"h4l@yahoo.ne.jp"
] |
h4l@yahoo.ne.jp
|
c112d5ce3c5633ee755fd9211c360485d7c5e38f
|
82a9077bcb5a90d88e0a8be7f8627af4f0844434
|
/google-cloud-sdk/lib/tests/unit/surface/apigee/operations_describe_test.py
|
e7433df43a3aae0f564d4c7c695e60939f77816c
|
[
"LicenseRef-scancode-unknown-license-reference",
"Apache-2.0"
] |
permissive
|
piotradamczyk5/gcloud_cli
|
1ae2553595e569fad6ce84af62b91a7ee5489017
|
384ece11040caadcd64d51da74e0b8491dd22ca3
|
refs/heads/master
| 2023-01-01T23:00:27.858583
| 2020-10-21T04:21:23
| 2020-10-21T04:21:23
| 290,238,061
| 0
| 0
| null | 2020-10-19T16:43:36
| 2020-08-25T14:31:00
|
Python
|
UTF-8
|
Python
| false
| false
| 1,920
|
py
|
# -*- coding: utf-8 -*- #
# Copyright 2020 Google LLC. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests that exercise the 'gcloud apigee operations describe' command."""
from __future__ import absolute_import
from __future__ import division
from __future__ import unicode_literals
import json
from tests.lib.surface.apigee import base
class OperationsDescribeTest(base.ApigeeSurfaceTest):
def testSimpleDescribe(self):
canned_response = {
"metadata": {
"@type":
"type.googleapis.com/google.cloud.apigee.v1.OperationMetadata",
"operationType":
"INSERT",
"state":
"IN_PROGRESS",
"targetResourceName":
"organizations/cwajh-test-project"
},
"name":
"organizations/test-org/operations/20b4ba00-0806-0000-997a-522a4adf027f"
}
self.AddHTTPResponse(
"https://apigee.googleapis.com/v1/organizations/test-org/operations/20b4ba00-0806-0000-997a-522a4adf027f",
status=200,
body=json.dumps(canned_response))
self.RunApigee("operations describe 20b4ba00-0806-0000-997a-522a4adf027f "
"--organization=test-org --format=json")
canned_response["uuid"] = "20b4ba00-0806-0000-997a-522a4adf027f"
canned_response["organization"] = "test-org"
self.AssertJsonOutputMatches(canned_response)
|
[
"actions@github.com"
] |
actions@github.com
|
7ac3108667a2fc73e496511aca4aa994b5413c18
|
9e988c0dfbea15cd23a3de860cb0c88c3dcdbd97
|
/sdBs/AllRun/bps_cs22950-10/sdB_bps_cs22950-10_coadd.py
|
e98ff4ffa0393f2b27e60d204c16b04f0ab364d4
|
[] |
no_license
|
tboudreaux/SummerSTScICode
|
73b2e5839b10c0bf733808f4316d34be91c5a3bd
|
4dd1ffbb09e0a599257d21872f9d62b5420028b0
|
refs/heads/master
| 2021-01-20T18:07:44.723496
| 2016-08-08T16:49:53
| 2016-08-08T16:49:53
| 65,221,159
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 443
|
py
|
from gPhoton.gMap import gMap
def main():
gMap(band="NUV", skypos=[304.840292,-15.674492], skyrange=[0.0333333333333,0.0333333333333], stepsz = 30., cntfile="/data2/fleming/GPHOTON_OUTPUT/LIGHTCURVES/sdBs/sdB_bps_cs22950-10/sdB_bps_cs22950-10_movie_count.fits", cntcoaddfile="/data2/fleming/GPHOTON_OUTPUT/LIGHTCURVES/sdB/sdB_bps_cs22950-10/sdB_bps_cs22950-10_count_coadd.fits", overwrite=True, verbose=3)
if __name__ == "__main__":
main()
|
[
"thomas@boudreauxmail.com"
] |
thomas@boudreauxmail.com
|
9d96d467699e2db6bbc9dacc1e91761aab92a6dc
|
2d921bb03eade0763ddb3a9cc5cb637730ecbde1
|
/python/plot/PlotStyle.py
|
12d2ecb4732ab7424ed515bca8ebc8ce4145f6a4
|
[] |
no_license
|
rmanzoni/WTau3Mu
|
10c57971b80f9769578284abd69009008901eea7
|
5ad336df976d5a1b39e4b516641661921b06ba20
|
refs/heads/92X
| 2021-01-18T15:10:41.887147
| 2019-05-09T12:48:00
| 2019-05-09T12:48:00
| 84,342,825
| 0
| 7
| null | 2018-07-19T09:08:19
| 2017-03-08T16:35:42
|
Python
|
UTF-8
|
Python
| false
| false
| 5,537
|
py
|
from ROOT import gROOT, gStyle, TFile, TH1F, TH2F, TCanvas, TLegend, TF1, TGraph, TVectorF, TGraphErrors, TObjArray, THStack, TStyle, TGaxis, kWhite
""" Initialises default ROOT plot style.
In order to support old instances of the PlotStyle that
depended on a given ntuple, it is setup as a class.
"""
class PlotStyle(object):
""" Main class for creating ROOT objects from PyRootObjects.
"""
@staticmethod
def initStyle():
gROOT.SetStyle("Plain")
# For the canvas:
gStyle.SetCanvasBorderMode(0)
gStyle.SetCanvasColor(kWhite)
gStyle.SetCanvasDefH(700) #Height of canvas
gStyle.SetCanvasDefW(700) #Width of canvas
gStyle.SetCanvasDefX(0) #Position on screen
gStyle.SetCanvasDefY(0)
# For the line:
gStyle.SetLineWidth(2)
# For the Pad:
gStyle.SetPadBorderMode(0)
# gStyle.SetPadBorderSize(Width_t size = 1)
gStyle.SetPadColor(kWhite)
gStyle.SetPadGridX(True)
gStyle.SetPadGridY(True)
gStyle.SetGridColor(0)
gStyle.SetGridStyle(3)
gStyle.SetGridWidth(1)
# For the frame:
gStyle.SetFrameBorderMode(0)
gStyle.SetFrameBorderSize(1)
gStyle.SetFrameFillColor(0)
gStyle.SetFrameFillStyle(0)
gStyle.SetFrameLineColor(1)
gStyle.SetFrameLineStyle(1)
gStyle.SetFrameLineWidth(1)
# For the histo:
# gStyle.SetHistFillColor(1)
# gStyle.SetHistFillStyle(0)
gStyle.SetHistLineColor(1)
gStyle.SetHistLineStyle(0)
gStyle.SetHistLineWidth(2)
# gStyle.SetLegoInnerR(Float_t rad = 0.5)
# gStyle.SetNumberContours(Int_t number = 20)
gStyle.SetEndErrorSize(2)
#gStyle.SetErrorMarker(20)
gStyle.SetErrorX(0.)
gStyle.SetMarkerStyle(8)
gStyle.SetMarkerSize(1)
#For the fit/function:
gStyle.SetOptFit(0)
gStyle.SetFitFormat("5.4g")
gStyle.SetFuncColor(2)
gStyle.SetFuncStyle(1)
gStyle.SetFuncWidth(1)
#For the date:
gStyle.SetOptDate(0)
# gStyle.SetDateX(Float_t x = 0.01)
# gStyle.SetDateY(Float_t y = 0.01)
# For the statistics box:
gStyle.SetOptFile(0)
gStyle.SetOptStat(0) # To display the mean and RMS: SetOptStat("mr")
gStyle.SetStatColor(kWhite)
gStyle.SetStatFont(42)
gStyle.SetStatFontSize(0.025)
gStyle.SetStatTextColor(1)
gStyle.SetStatFormat("6.4g")
gStyle.SetStatBorderSize(1)
gStyle.SetStatH(0.1)
gStyle.SetStatW(0.15)
# gStyle.SetStatStyle(Style_t style = 1001)
# gStyle.SetStatX(Float_t x = 0)
# gStyle.SetStatY(Float_t y = 0)
# Margins:
gStyle.SetPadTopMargin(0.11)
gStyle.SetPadBottomMargin(0.13)
gStyle.SetPadLeftMargin(0.17)
gStyle.SetPadRightMargin(0.07)
# For the Global title:
gStyle.SetOptTitle(0)
gStyle.SetTitleFont(42)
gStyle.SetTitleColor(1)
gStyle.SetTitleTextColor(1)
gStyle.SetTitleFillColor(10)
gStyle.SetTitleFontSize(0.04)
# gStyle.SetTitleH(0) # Set the height of the title box
# gStyle.SetTitleW(0) # Set the width of the title box
#gStyle.SetTitleX(0.35) # Set the position of the title box
#gStyle.SetTitleY(0.986) # Set the position of the title box
# gStyle.SetTitleStyle(Style_t style = 1001)
#gStyle.SetTitleBorderSize(0)
# For the axis titles:
gStyle.SetTitleColor(1, "XYZ")
gStyle.SetTitleFont(42, "XYZ")
gStyle.SetTitleSize(0.05, "XYZ")
# gStyle.SetTitleXSize(Float_t size = 0.02) # Another way to set the size?
# gStyle.SetTitleYSize(Float_t size = 0.02)
gStyle.SetTitleXOffset(1.)
gStyle.SetTitleYOffset(1.3)
#gStyle.SetTitleOffset(1.1, "Y") # Another way to set the Offset
# For the axis labels:
gStyle.SetLabelColor(1, "XYZ")
gStyle.SetLabelFont(42, "XYZ")
gStyle.SetLabelOffset(0.007, "XYZ")
gStyle.SetLabelSize(0.035, "XYZ")
# For the axis:
gStyle.SetAxisColor(1, "XYZ")
gStyle.SetStripDecimals(True)
gStyle.SetTickLength(0.03, "XYZ")
gStyle.SetNdivisions(510, "XYZ")
gStyle.SetPadTickX(1) # To get tick marks on the opposite side of the frame
gStyle.SetPadTickY(1)
# Change for log plots:
gStyle.SetOptLogx(0)
gStyle.SetOptLogy(0)
gStyle.SetOptLogz(0)
gStyle.SetPalette(1) #(1,0)
# another top group addition
gStyle.SetHatchesSpacing(1.0)
# Postscript options:
gStyle.SetPaperSize(20., 20.)
#gStyle.SetPaperSize(TStyle.kA4)
#gStyle.SetPaperSize(27., 29.7)
#TGaxis.SetMaxDigits(3)
# gStyle.SetLineScalePS(Float_t scale = 3)
# gStyle.SetLineStyleString(Int_t i, const char* text)
# gStyle.SetHeaderPS(const char* header)
# gStyle.SetTitlePS(const char* pstitle)
#gStyle.SetColorModelPS(1)
# gStyle.SetBarOffset(Float_t baroff = 0.5)
# gStyle.SetBarWidth(Float_t barwidth = 0.5)
# gStyle.SetPaintTextFormat(const char* format = "g")
# gStyle.SetPalette(Int_t ncolors = 0, Int_t* colors = 0)
# gStyle.SetTimeOffset(Double_t toffset)
# gStyle.SetHistMinimumZero(kTRUE)
#gStyle.cd()
print "TDR Style initialized"
|
[
"riccardo.manzoni@cern.ch"
] |
riccardo.manzoni@cern.ch
|
635bd4085a4fdd6fef954d62dc513a0220d56cfd
|
04b494a2286e7d0ec3bbe8d25c15d575486a0f91
|
/_exercises/exercise113/exercise113.py
|
b555ca05f49c798a2c6370f04eec96053da588c3
|
[] |
no_license
|
ViniciusGranado/_studies_Python
|
ea6adc35edccfbd81a67a613e8cd468fd8485856
|
af645fa777a408a8ff1b8ed89911971f5b537ac7
|
refs/heads/master
| 2023-02-01T19:57:04.117047
| 2020-12-19T00:56:10
| 2020-12-19T00:56:10
| 258,855,637
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,060
|
py
|
def read_int(msg):
while True:
try:
number = int(input(msg))
except (ValueError, TypeError):
print('[ERRO] Digite um número inteiro válido.')
print()
except KeyboardInterrupt:
print('[ERRO] Entrada de dados interrompida.')
print('Considerando valor 0')
return 0
else:
return number
def read_float(msg):
while True:
try:
number = float(input(msg).replace(',', '.'))
except (ValueError, TypeError):
print('[ERRO] Digite um número real válido.')
print()
except KeyboardInterrupt:
print('[ERRO] Entrada de dados interrompida.')
print('Considerando valor 0')
return 0
else:
if number.is_integer():
return int(number)
return number
int_number = read_int('Digite um valor inteiro: ')
float_number = read_float('Digite um valor real: ')
print(f'Você digitou {int_number} e {float_number}')
|
[
"vinicius.r.granado@gmail.com"
] |
vinicius.r.granado@gmail.com
|
de89526204340fed105a0efb1a4cfd7137b26f44
|
b08d42933ac06045905d7c005ca9c114ed3aecc0
|
/src/coefSubset/evaluate/ranks/tenth/rank_2i26_I.py
|
4c80ed792391aea378782cf42541231cd379011d
|
[] |
no_license
|
TanemuraKiyoto/PPI-native-detection-via-LR
|
d148d53f5eb60a4dda5318b371a3048e3f662725
|
897e7188b0da94e87126a4acc0c9a6ff44a64574
|
refs/heads/master
| 2022-12-05T11:59:01.014309
| 2020-08-10T00:41:17
| 2020-08-10T00:41:17
| 225,272,083
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,204
|
py
|
# 9 July 2019
# Kiyoto Aramis Tanemura
# Several metrics are used to assess the performance of the trained RF model, notably native ranking. This script returns a ranking of the native protein-protein complex among a decoy set. For convenience, I will define as a function and will call in a general performance assessment script.
# Modified 11 July 2019 by Kiyoto Aramis Tanemura. To parallelize the process, I will replace the for loop for the testFileList to a multiprocessing pool.
# Modified 9 September 2019 by Kiyoto Aramis Tanemura. I will use the function to perform the calculation on one CSV file only. Thus instead of a function to import in other scripts, they will be individual jobs parallelized as individual jobs in the queue.
import os
import pandas as pd
import numpy as np
import pickle
os.chdir('/mnt/scratch/tanemur1/')
# Read the model and trainFile
testFile = '2i26.csv'
identifier = 'I'
thresholdCoef = 0.1
testFilePath = '/mnt/scratch/tanemur1/CASF-PPI/nonb_descriptors/complete/'
modelPath = '/mnt/home/tanemur1/6May2019/2019-11-11/results/coefSubset/tenth/'
outputPath = '/mnt/home/tanemur1/6May2019/2019-11-11/results/coefSubset/evaluate/tenth/ranks/'
pdbID = testFile[:4]
with open(modelPath + 'model' + identifier + '.pkl', 'rb') as f:
clf = pickle.load(f)
result = pd.DataFrame()
scoreList = []
df1 = pd.read_csv(testFilePath + testFile)
dropList = ['Unnamed: 0', 'Unnamed: 0.1', 'ref']
df1 = df1.drop(dropList, axis = 1)
df1 = df1.set_index('Pair_name')
df1 = pd.DataFrame(df1.values.T, columns = df1.index, index = df1.columns)
df1.fillna(0.0, inplace = True)
df1 = df1.reindex(sorted(df1.columns), axis = 1)
# Drop features with coefficients below threshold
coefs = pd.read_csv('/mnt/home/tanemur1/6May2019/2019-11-11/results/medianCoefs.csv', index_col = 0, header = None, names = ['coefficients'])
coefs = coefs[np.abs(coefs['coefficients']) < thresholdCoef]
dropList = list(coefs.index)
del coefs
df1.drop(dropList, axis = 1, inplace = True)
with open(modelPath + 'standardScaler' + identifier + '.pkl', 'rb') as g:
scaler = pickle.load(g)
for i in range(len(df1)):
# subtract from one row each row of the dataframe, then remove the trivial row[[i]] - row[[i]]. Also some input files have 'class' column. This is erroneous and is removed.
df2 = pd.DataFrame(df1.iloc[[i]].values - df1.values, index = df1.index, columns = df1.columns)
df2 = df2.drop(df1.iloc[[i]].index[0], axis = 0)
# Standardize inut DF using the standard scaler used for training data.
df2 = scaler.transform(df2)
# Predict class of each comparison descriptor and sum the classes to obtain score. Higher score corresponds to more native-like complex
predictions = clf.predict(df2)
score = sum(predictions)
scoreList.append(score)
# Make a new DataFrame to store the score and corresponding descriptorID. Add rank as column. Note: lower rank corresponds to more native-like complex
result = pd.DataFrame(data = {'score': scoreList}, index = df1.index.tolist()).sort_values(by = 'score', ascending = False)
result['rank'] = range(1, len(result) + 1)
with open(outputPath + pdbID + identifier + '.csv', 'w') as h:
result.to_csv(h)
|
[
"tanemur1@msu.edu"
] |
tanemur1@msu.edu
|
a65a90b47466d6936a96eccf140c06ee21b57225
|
ddf1267a1a7cb01e70e3b12ad4a7bfaf291edb3e
|
/src/user/migrations/0023_action_read_date.py
|
d87e85c86569ba17531d435462e2b08d515436fd
|
[
"MIT"
] |
permissive
|
Garinmckayl/researchhub-backend
|
46a17513c2c9928e51db4b2ce5a5b62df453f066
|
cd135076d9a3b49a08456f7ca3bb18ff35a78b95
|
refs/heads/master
| 2023-06-17T04:37:23.041787
| 2021-05-18T01:26:46
| 2021-05-18T01:26:46
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 386
|
py
|
# Generated by Django 2.2.9 on 2020-01-10 20:16
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('user', '0022_action'),
]
operations = [
migrations.AddField(
model_name='action',
name='read_date',
field=models.DateTimeField(default=None, null=True),
),
]
|
[
"lightning.lu7@gmail.com"
] |
lightning.lu7@gmail.com
|
03600cc8214045434b642323a45c09a881382679
|
077c91b9d5cb1a6a724da47067483c622ce64be6
|
/syn_mem_corruption_3switch_fuzzer_mcs/intermcs_5_/replay_config.py
|
7aef30971cd0c4b996594cb00d2313e431ebf28b
|
[] |
no_license
|
Spencerx/experiments
|
0edd16398725f6fd9365ddbb1b773942e4878369
|
aaa98b0f67b0d0c0c826b8a1565916bf97ae3179
|
refs/heads/master
| 2020-04-03T10:11:40.671606
| 2014-06-11T23:55:11
| 2014-06-11T23:55:11
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,318
|
py
|
from config.experiment_config_lib import ControllerConfig
from sts.topology import *
from sts.control_flow import Replayer
from sts.simulation_state import SimulationConfig
from sts.input_traces.input_logger import InputLogger
simulation_config = SimulationConfig(controller_configs=[ControllerConfig(start_cmd='./pox.py --verbose openflow.of_01 --address=__address__ --port=__port__ openflow.discovery forwarding.l2_multi_syn_mem_corruption', label='c1', address='127.0.0.1', cwd='pox')],
topology_class=MeshTopology,
topology_params="num_switches=4",
patch_panel_class=BufferedPatchPanel,
multiplex_sockets=False,
kill_controllers_on_exit=True)
control_flow = Replayer(simulation_config, "experiments/syn_mem_corruption_3switch_fuzzer_mcs/intermcs_5_/mcs.trace.notimeouts",
input_logger=InputLogger(),
wait_on_deterministic_values=False,
allow_unexpected_messages=False,
delay_flow_mods=False,
default_dp_permit=False,
pass_through_whitelisted_messages=False,
invariant_check_name='InvariantChecker.check_liveness',
bug_signature="c1")
|
[
"b-github.com@wundsam.net"
] |
b-github.com@wundsam.net
|
1abd82cd32e985e35728a631c81c33ef0fe62b70
|
481ce69bd3611715fef0be99c655c95d67f16d5f
|
/riopy/tests/test_symops.py
|
a44f6bd76716d46e50bf17f299cbedb403e45b81
|
[
"BSD-3-Clause"
] |
permissive
|
fsimkovic/riopy
|
0ffed18c72573e824affa97d5c17ca462c5f2031
|
5dc4083d1c0919d94ceeac802d3fb40748b947f3
|
refs/heads/master
| 2021-03-24T10:14:25.904758
| 2018-08-30T13:16:02
| 2018-08-30T13:16:02
| 117,836,272
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 464
|
py
|
import unittest
from riopy.symops import SymmetryOperator
class SymmetryOperatorTest(unittest.TestCase):
def test___init___1(self):
symops = SymmetryOperator.ops("P1")
self.assertTrue(len(symops) == 1)
self.assertTupleEqual((0.0, 0.0, 0.0), symops[0].t().as_double())
self.assertTupleEqual((1.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0, 1.0), symops[0].r().as_double())
if __name__ == "__main__":
unittest.main(verbosity=2)
|
[
"felixsimkovic@me.com"
] |
felixsimkovic@me.com
|
59e1363d026e1cf5c641f40106aba606a342065e
|
53fab060fa262e5d5026e0807d93c75fb81e67b9
|
/backup/user_337/ch169_2020_06_21_16_48_03_433219.py
|
d50363959fd13d06ed505512e563e82d36dc80ab
|
[] |
no_license
|
gabriellaec/desoft-analise-exercicios
|
b77c6999424c5ce7e44086a12589a0ad43d6adca
|
01940ab0897aa6005764fc220b900e4d6161d36b
|
refs/heads/main
| 2023-01-31T17:19:42.050628
| 2020-12-16T05:21:31
| 2020-12-16T05:21:31
| 306,735,108
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 390
|
py
|
login = input('Login?')
lista = []
while login != 'fim':
if not login in lista:
lista.append(login)
else:
i = 1
k = True
while k:
login2 = login+str(i)
if not login2 in lista:
lista.append(login2)
k = False
i+=1
login = input('Login?')
for nome in lista:
print(nome)
|
[
"you@example.com"
] |
you@example.com
|
c71e156f811307de345da807ee15cfe276b92a55
|
f23c1741a63acd9d431077c4b2068e4072a72d51
|
/permutation.py
|
a92b11d13ab718087d9f9ce651ba2472f6a711a6
|
[] |
no_license
|
Martin9527/LeetCodeTest
|
b188c997ab01a38201bd5ba792cdc104ca79d1d4
|
5f860c8fd2d7d7ff94eca6065d643cc4ea204abf
|
refs/heads/master
| 2020-05-23T11:21:54.543063
| 2019-12-08T10:37:42
| 2019-12-08T10:37:42
| 186,735,271
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,301
|
py
|
class Solution(object):
def permute(self,nums):
size = len(nums)
if not size :
return []
result = []
curAns = []
usedNums = set()
self.backTrack(nums,size,curAns,usedNums,result)
return result
def backTrack(self,nums,size,curAns,usedNums,result):
if size == len(curAns):
import copy
ans = copy.deepcopy(curAns)
result.append(ans)
return
for j in range(size):
if nums[j] not in usedNums:
usedNums.add(nums[j])
curAns.append(nums[j])
self.backTrack(nums,size,curAns,usedNums,result)
usedNums.remove(nums[j])
curAns.pop()
def permuteUnique(self,nums):
size = len(nums)
if size < 1:
return []
res = []
usedNums = set()
def backTrack(nums,begin,curAns,usedNums):
if len(curAns) == size:
res.append(curAns[:])
return
hashMap = set()
for j in xrange(size):
if nums[j] in hashMap:
continue
else:
hashMap.add(nums[j])
if nums[j] not in usedNums:
usedNums.add(nums[j])
curAns.append(nums[j])
self.backTrack(nums,size,curAns,usedNums)
usedNums.remove(nums[j])
curAns.pop()
nums.sort()
backTrack(nums,0,[],usedNums)
print 'length: ',len(res)
return res
if __name__ == '__main__':
s = Solution()
nums = [1,1,2]
ans = s.permute(nums)
print 'AA: ',len(ans),ans
|
[
"="
] |
=
|
96aac0b4b4bb06d1a1361336110a66ef306f8784
|
cbda89443b351bb2047180dad4e300c13dc3df7f
|
/Crystals/Morpurgo_sp_outer/Jobs/TIPS_Pc/TIPS_Pc_cation_neut_inner0_outer2/TIPS_Pc_cation_neut_inner0_outer2.py
|
a0c28b5d437cb4a23e82114742f6ee0128900f05
|
[] |
no_license
|
sheridanfew/pythonpolarisation
|
080f52979f98d26360a46412a10c8e3f51ee4549
|
178e2684e9a239a8e60af5f7b1eb414ac5f31e92
|
refs/heads/master
| 2021-07-10T01:07:40.978790
| 2021-03-11T16:56:37
| 2021-03-11T16:56:37
| 96,101,351
| 0
| 0
| null | 2017-07-03T13:37:06
| 2017-07-03T10:54:52
| null |
UTF-8
|
Python
| false
| false
| 6,693
|
py
|
import sys
sys.path.append('../../../../../')
from BasicElements import *
from BasicElements.Register import GetRegister
from BasicElements.MoleculeFactory import ReadMoleculeType
from BasicElements.MoleculeFactory import GetMolecule
from BasicElements.Crystal import *
from Polarizability.GetDipoles import get_dipoles,split_dipoles_onto_atoms
from Polarizability import *
from Polarizability.GetEnergyFromDips import *
from Polarizability.JMatrix import JMatrix
import numpy as np
from math import *
from time import gmtime, strftime
import os
print strftime("%a, %d %b %Y %X +0000", gmtime())
name='TIPS_Pc_cation_neut_inner0_outer2'
#For crystals here, all cubic and centred at centre
insize=0
#number of TVs in each dir central mol is from edge of inner region
outsize=2
mols_cen=['TIPS_Pc_cation_aniso_cifstruct_chelpg.xyz']
mols_sur=['TIPS_Pc_neut_aniso_cifstruct_chelpg.xyz']
mols_outer=['sp_TIPS_Pc_neut.xyz']
#From cif:
'''
TIPS
data_k01029
_cell_length_a 7.5650(15)
_cell_length_b 7.7500(15)
_cell_length_c 16.835(3)
_cell_angle_alpha 89.15(3)
_cell_angle_beta 78.42(3)
_cell_angle_gamma 83.63(3)
_cell_volume 960.9(3)
'''
#Get translation vectors:
a=7.565015/0.5291772109217
b=7.750015/0.5291772109217
c=16.8353/0.5291772109217
alpha=89.153*(pi/180)
beta=78.423*(pi/180)
gamma=83.633*(pi/180)
cif_unit_cell_volume=960.9/(a*b*c*(0.5291772109217**3))
cell_volume=sqrt(1 - (cos(alpha)**2) - (cos(beta)**2) - (cos(gamma)**2) + (2*cos(alpha)*cos(beta)*cos(gamma)))
#Converts frac coords to carts
matrix_to_cartesian=np.matrix( [[a, b*cos(gamma), c*cos(beta)],
[0, b*sin(gamma), c*(cos(alpha) - cos(beta)*cos(gamma))/sin(gamma)],
[0, 0, c*cell_volume/sin(gamma)]])
#carts to frac
matrix_to_fractional=matrix_to_cartesian.I
#TVs, TV[0,1,2] are the three translation vectors.
TV=matrix_to_cartesian.T
cut=8.0
totsize=insize+outsize
#number of TVs in each dir nearest c inner mol is from edge of outer region
cenpos=[totsize,totsize,totsize]
length=[2*totsize+1,2*totsize+1,2*totsize+1]
maxTVs=insize
outer_maxTVs=insize+outsize
#for diamond outer, don't specify for cube and will fill to cube edges.
print 'name: ',name,'mols_cen: ', mols_cen,' mols_sur: ',mols_sur,' TVs: ', TV
# Place Molecules
prot_neut_cry=Crystal(name=name,mols_cen=mols_cen,mols_sur=mols_sur,cenpos=cenpos,length=length,TVs=TV,maxTVs=maxTVs,mols_outer=mols_outer,outer_maxTVs=outer_maxTVs)
#prot_neut_cry._mols contains all molecules.
#mols[0] contains a list of all molecules in position a, mols[1] all mols in pos'n b, etc.
#mols[0][x,y,z] contains molecule a in position x,y,z
#mols may as such be iterated over in a number of ways to consider different molecules.
prot_neut_cry().print_posns()
#Calculate Properties:
print strftime("%a, %d %b %Y %X +0000", gmtime())
E0 = np.matrix([0.,0.,0.])
print strftime("%a, %d %b %Y %X +0000", gmtime())
print 'Calc jm'
jm = JMatrix(cutoff=cut)
print strftime("%a, %d %b %Y %X +0000", gmtime())
print 'Calc dips:'
d = get_dipoles(E0=E0,jm=jm._m,cutoff=cut)
print strftime("%a, %d %b %Y %X +0000", gmtime())
Efield = get_electric_field(E0)
potential = get_potential()
print strftime("%a, %d %b %Y %X +0000", gmtime())
#print 'dips', d
print 'splitting dips onto atoms'
split_d = split_dipoles_onto_atoms(d)
print strftime("%a, %d %b %Y %X +0000", gmtime())
print 'summing dips:'
tot = np.matrix([0.,0.,0.])
for dd in split_d:
tot += dd
print strftime("%a, %d %b %Y %X +0000", gmtime())
print 'total dip moment', tot
Uqq = np.multiply(get_U_qq(potential=potential),27.211)
print strftime("%a, %d %b %Y %X +0000", gmtime())
print 'Uqq', Uqq
Uqd = np.multiply(get_U_qdip(dips=d,Efield=Efield),27.211)
print strftime("%a, %d %b %Y %X +0000", gmtime())
print 'Uqd', Uqd
Udd = np.multiply(get_U_dipdip(jm=jm._m,dips=d.T),27.211)
print strftime("%a, %d %b %Y %X +0000", gmtime())
print 'Udd', Udd
energyev = Udd+Uqd+Uqq
print 'energyev', energyev
energy=energyev/27.211
print strftime("%a, %d %b %Y %X +0000", gmtime())
print 'Making .dat cross sections for gnuplot'
# print TVs
if not os.path.exists('Dips_Posns_TVs'): os.makedirs('Dips_Posns_TVs')
f = open('Dips_Posns_TVs/%s_TVs.dat' % name, 'w')
TVstr=str(str(TV[0,0]) + ' ' + str(TV[0,1]) + ' ' + str(TV[0,2]) + '\n' + str(TV[1,0]) + ' ' + str(TV[1,1]) + ' ' + str(TV[1,2]) + '\n' + str(TV[2,0]) + ' ' + str(TV[2,1]) + ' ' + str(TV[2,2])+ '\n')
f.write(TVstr)
f.flush()
f.close()
# print dipoles
if not os.path.exists('Dips_Posns_TVs'): os.makedirs('Dips_Posns_TVs')
f = open('Dips_Posns_TVs/%s_dipoles.dat' % name, 'w')
for dd in split_d:
dstr=str(dd)
f.write(dstr)
f.write('\n')
f.flush()
f.close()
# print properties for charge in centrepos
time=strftime("%a, %d %b %Y %X +0000", gmtime())
f = open('%s_properties.csv' % name, 'w')
f.write ('time\tname\tmols_cen\tmols_sur\tmols_outer\tinsize\toutsize\tenergyev\tUqq\tUqd\tUdd\tTotdip_x\tTotdip_y\tTotdip_z')
f.write ('\n%s\t%s\t%s\t%s\t%s\t%s\t%s\t%s\t%s\t%s\t%s\t%s\t%s\t%s' % (time,name,mols_cen,mols_sur,mols_outer,insize,outsize,energyev,Uqq,Uqd,Udd,tot[0,0],tot[0,1],tot[0,2]))
f.flush()
f.close()
# print header for reorgs
f = open('reorg_energies_%s_properties.csv' % name, 'w')
f.write ('time\tname\tmols_cen\tmols_sur\tmols_outer\tinsize\toutsize\ta\tb\tc\tmolincell\tReorg(eV)')
f.flush()
f.close()
# REORGANISATION ENERGIES
#Note that this assumes a cube, and values for which
for dist in range(0,(length[0]/2)+1,1):
print '\n\nDIST: ', dist, '\n'
for a in range(prot_neut_cry()._cenpos[0]-dist,prot_neut_cry()._cenpos[0]+dist+1,1):
for b in range(prot_neut_cry()._cenpos[1]-dist,prot_neut_cry()._cenpos[1]+dist+1,1):
for c in range(prot_neut_cry()._cenpos[2]-dist,prot_neut_cry()._cenpos[2]+dist+1,1):
print strftime("%a, %d %b %Y %X +0000", gmtime())
print 'a,b,c',a,b,c
for molincell in range(0,len(prot_neut_cry()._mols),1):
prot_neut_cry().calc_reorg(a1=prot_neut_cry()._cenpos[0],b1=prot_neut_cry()._cenpos[1],c1=prot_neut_cry()._cenpos[2],molincell1=0,a2=a,b2=b,c2=c,molincell2=molincell,dips=d,oldUqd=Uqd)
print 'Reorg: ', prot_neut_cry()._reorgs[molincell][a][b][c]
f = open('reorg_energies_%s_properties.csv' % name, 'a')
f.write ('\n%s\t%s\t%s\t%s\t%s\t%s\t%s\t%s\t%s\t%s\t%s\t%s' % (time,name,mols_cen,mols_sur,mols_outer,insize,outsize,a,b,c,molincell,prot_neut_cry()._reorgs[molincell][a][b][c]))
f.flush()
f.close()
# Redo this and overwrite after each set to ensure we have some even if not all reorgs complete
prot_neut_cry().print_reorgs()
print 'Job Completed Successfully.'
|
[
"sheridan.few@gmail.com"
] |
sheridan.few@gmail.com
|
e5af3a05af1c55f4de514b9b82f99141101c9200
|
8aa0d1d407bb1c66d01261f7e2c4e9832e856a2d
|
/experiments/experiments_gdsc/hyperparameter/plots/plot_nmtf_gibbs_hyperparameter.py
|
dd3218e5fb59f547aca48d1125e82075eea0af28
|
[] |
no_license
|
garedaba/BNMTF_ARD
|
59e3ec1dbfd2a9ab9f4ec61368ec06e3783c3ee4
|
0a89e4b4971ff66c25010bd53ee2622aeaf69ae9
|
refs/heads/master
| 2022-01-16T06:57:12.581285
| 2018-06-10T10:22:12
| 2018-06-10T10:22:12
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,261
|
py
|
"""
Plot the performances of NMTF Gibbs for different hyperparameter values, for
three different sparsity levels.
"""
import matplotlib.pyplot as plt
import numpy
''' Plot settings. '''
MSE_min, MSE_max = 600, 1400
values_lambda = [0.0001, 0.001, 0.01, 0.1, 1., 10., 100.]
fractions_unknown = [0.2, 0.5, 0.8]
folder_plots = "./"
folder_results = "./../results/"
plot_file = folder_plots+"nmtf_gibbs_hyperparameter.png"
''' Load in the performances. '''
performances = eval(open(folder_results+'nmtf_gibbs.txt','r').read())
average_performances = {
fraction: [
numpy.mean(performances[fraction][lamb])
for lamb in values_lambda
]
for fraction in fractions_unknown
}
''' Plot the performances - one line per fraction. '''
fig = plt.figure(figsize=(2.5,1.9))
fig.subplots_adjust(left=0.17, right=0.98, bottom=0.17, top=0.98)
plt.xlabel('lambdaF, lambdaS, lambdaG', fontsize=8, labelpad=1)
plt.xscale("log")
plt.xticks(fontsize=6)
plt.ylabel('MSE', fontsize=8, labelpad=1)
plt.yticks(range(0,MSE_max+1,200),fontsize=6)
plt.ylim(MSE_min, MSE_max)
for fraction in fractions_unknown:
x = values_lambda
y = average_performances[fraction]
plt.plot(x, y, label='Fraction %s' % fraction)
plt.savefig(plot_file, dpi=600)
|
[
"tab43@cam.ac.uk"
] |
tab43@cam.ac.uk
|
285b5d35eb6f94c89715ad4fe68307437cf9ffc0
|
de24f83a5e3768a2638ebcf13cbe717e75740168
|
/moodledata/vpl_data/303/usersdata/302/92006/submittedfiles/testes.py
|
8d4dc26344d08e3707ea45e11e79240ce3625d53
|
[] |
no_license
|
rafaelperazzo/programacao-web
|
95643423a35c44613b0f64bed05bd34780fe2436
|
170dd5440afb9ee68a973f3de13a99aa4c735d79
|
refs/heads/master
| 2021-01-12T14:06:25.773146
| 2017-12-22T16:05:45
| 2017-12-22T16:05:45
| 69,566,344
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,405
|
py
|
lista1 = [1, 3, 4,]
lista1[len(lista1)-1]
print(len(lista1))
'''a = [8.0 , 5.0 , 10.0 , 5.0]
print(a)
print(len(a))
a.append(0.0)
print(len(a))
for i in range(len(a)-1, 0 , -1):
if i ==1:
a[1] = 2.0
else:
a[i] = a[i-1]
print(a)
print(len(a))
'''
'''
a = []
for i in range(1,5,1):
a.append(float(input('Digite o elemento: ')))
print(a)
print(sum(a))
print(len(a))
del a[1]
print(' a é igual: ', a)
print(len(a))
'''
'''
a = []
for i in range(1,11,1):
a.append(float(input('Digite o elemento: ')))
print(a)
for i in range(9, -1, -1):
print(a[i])
'''
'''
while(True):
n = int(input('DIgite o número de notas: '))
if n > 0:
break
notas = []
for i in range(0,n,1):
notas.append(float(input('Digite a nota%d: ' %(i+1))))
media = 0
for i in range(0,n,1):
media += notas[i]/n
print(notas)
print(media)
'''
'''
from minha_bib import primo
n = int(input('Digite n: '))
if primo(n):
print('Primo')
else:
print('Não é primo ')
'''
#exercício 15
'''
n = int(input('Digite o valor de n: '))
if n > 9999999 and n <=99999999:
soma = 0
while(n!=0):
resto = n%10
n = (n-resto)//10
soma = soma + resto
print(soma)
else:
print('Não Sei')
'''
#exercício 16
'''
while(True):
t1 = int(input('Digite o número de tomadas da T1: '))
t2 = int(input('Digite o número de tomadas da T2: '))
t3 = int(input('Digite o número de tomadas da T3: '))
t4 = int(input('Digite o número de tomadas da T4: '))
if t1 > 0 and t2 > 0 and t3 > 0 and t4 > 0:
n = t1 + (t2-1) + (t3-1) + (t4-1)
print(n)
break
else:
print("O NÚMERO DE TOMADAS TEM QUE SER MAIOR QUE 0, DIGITE NOVAMENTE\n")
'''
#Exercício 17
'''
a = int(input('Digite o primeiro número: '))
b = int(input('Digite o segundo número: '))
c = int(input('Digite o terceiro número: '))
d = int(input('Digite o quarto número: '))
if a > b and b < c and c > d:
print('S')
elif a < b and b > c and c > d:
print('S')
elif c > b and c > d and a < b:
print('S')
elif d > c and c > b and b > a:
print('S')
elif a > b and b == c and c == d:
print('S')
elif a > b and b < c and c == d:
print('S')
elif b > a and b > c and c == d:
print('S')
elif c > b and c > d and a == b:
print('S')
elif d > c and b == c and b == a:
print('S')
elif d > c and c < b and a == b:
print('S')
else:
print('N')
'''
#Exercício 20
'''
a = int(input('Digite o primeiro número: '))
b = int(input('Digite o segundo número: '))
for i in range(1000000,0,-1):
if a%i == 0 and b%i == 0:
print(i)
break
'''
#Exercício 21
'''
n = int(input('Digite n: '))
a = int(input('Digite a: '))
b = int(input('Digite b: '))
i = 2
while i <= n+1:
if i%a!=0 and i%b!=0:
n = n+1
if i%a == 0 or i%b == 0:
print(i)
i = i +1
'''
#Exercício 22
'''
while(True):
p = int(input(' Digite p: '))
q = int(input(' Digite q: '))
if q >= p :
break
if str(p) in str(q):
print('S')
else:
print('N')
'''
#Fatorial
'''
while(True):
while(True):
n = int(input('Digite um numero positivo: '))
if n >=0:
break
f = 1
for i in range(2,n+1,1):
f = f*i
print('%d!=%d' %(n,f))
opt = input('deseja continuar? [S ou N]')
if opt == 'N':
print('\n\nATE BREVE!')
break
'''
|
[
"rafael.mota@ufca.edu.br"
] |
rafael.mota@ufca.edu.br
|
7e74abaeb0078b3ee92242a7cc866c13d76bc37f
|
81982a278946fab96d74e3f711c937647faec036
|
/Trabalhos/a1.py
|
32584fb6bf8a53c7a44f632933f6fc2cdb41d8aa
|
[] |
no_license
|
juanengml/Prog1UTFPR
|
3f1b71888a0883a4e12922a0c09cce622ca27458
|
aca289ffece71b4ca4339fa8779a1d2a9076aecc
|
refs/heads/master
| 2021-06-23T09:58:37.167188
| 2019-06-14T01:21:51
| 2019-06-14T01:21:51
| 145,451,344
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 240
|
py
|
#Escreva um programa que leia duas matrizes 3x3 e apresente na tela o resultado da multiplicacao destas matrizes.
import numpy as np
a = np.matrix('1 2 3 ; 4 5 6; 7 8 9')
b = np.matrix('1 2 3 ; 4 5 6; 7 8 9')
print np.dot(a,b)
|
[
"juanengml@gmail.com"
] |
juanengml@gmail.com
|
e4d38da92d86aa517c776e552be806858ea7e31e
|
948d84d2e3fc04e353a11384d8570308174242f5
|
/17-Numpy/numpy-indexing.py
|
11653d3652d5b8b607738f0216cf7655bc401292
|
[] |
no_license
|
omerfarukcelenk/PythonMaster
|
a0084a800b8a41cd2ad538a7ca3687c26dc679ec
|
0db8f8b0ea2e1c2d810c542068cfcf1a3615f581
|
refs/heads/main
| 2023-04-16T17:42:05.501904
| 2021-04-26T21:19:27
| 2021-04-26T21:19:27
| 361,896,109
| 3
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 604
|
py
|
import numpy as np
numbers = np.array([0,5,10,15,20,25,50,75])
result = numbers[5]
result = numbers[-1]
result = numbers[0:3]
result = numbers[:3]
result = numbers[3:]
result = numbers[::]
result = numbers[::-1]
numbers2 = np.array([[0,5,10],[15,20,25],[50,75,85]])
result = numbers2[0]
result = numbers2[2]
result = numbers2[0,2]
result = numbers2[2,1]
result = numbers2[:,2]
result = numbers2[:,0]
result = numbers2[:,0:2]
result = numbers2[-1,:]
result = numbers2[:2,:2]
# print(result)
arr1 = np.arange(0,10)
# arr2 = arr1 # referans
arr2 = arr1.copy()
arr2[0] = 20
print(arr1)
print(arr2)
|
[
"omerfar0133@gmail.com"
] |
omerfar0133@gmail.com
|
68d8399c5199cd6c1ac9a2c275edb439b8a5ab47
|
c66955c6fc178955c2024e0318ec7a91a8386c2d
|
/programQuick/chapterEleven/mapIt.py
|
f66811ddbe725a952063e3f0d855d57f0bd18aa5
|
[] |
no_license
|
duheng18/python-study
|
a98642d6ee1b0043837c3e7c5b91bf1e28dfa588
|
13c0571ac5d1690bb9e615340482bdb2134ecf0e
|
refs/heads/master
| 2022-11-30T17:36:57.060130
| 2019-11-18T07:31:40
| 2019-11-18T07:31:40
| 147,268,053
| 1
| 0
| null | 2022-11-22T03:36:51
| 2018-09-04T00:49:42
|
Python
|
UTF-8
|
Python
| false
| false
| 353
|
py
|
import webbrowser, sys, pyperclip
if len(sys.argv) > 1:
# Get address from command line.
address = ' '.join(sys.argv[1:])
else:
# Get address from clipboard.
pyperclip.copy('mapit 870 Valencia St, San Francisco, CA 94110')
address = pyperclip.paste()
print(address)
webbrowser.open('https://www.google.com/maps/place/'+address)
|
[
"emaildh@163.com"
] |
emaildh@163.com
|
a41fbaec0c7870b206597745a26e289cb91943e7
|
4c9c2940ef3a07e2756fcceddf01acd384ebde01
|
/Python/[5 kyu] emirps.py
|
4550d94ea211e128c3446713211ba9db63e83b25
|
[
"MIT"
] |
permissive
|
KonstantinosAng/CodeWars
|
7d3501a605f7ffecb7f0b761b5ffe414e2f1983a
|
157818ece648454e882c171a71b4c81245ab0214
|
refs/heads/master
| 2023-04-11T09:44:27.480064
| 2023-03-26T21:37:07
| 2023-03-26T21:37:07
| 245,296,762
| 6
| 6
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,160
|
py
|
# see https://www.codewars.com/kata/55a29405bc7d2efaff00007c/train/python
from TestFunction import Test
def is_prime(num):
if num % 2 == 0: return False
for i in range(3, int(num**0.5+1), 2):
if (num % i) == 0:
return False
else:
return True
return False
def is_emrip(num):
s = int(''.join([s for s in reversed(str(num))]))
if s == num: return False
return is_prime(s)
def primes(n):
return [x for x in range(3, n, 2) if is_prime(x)]
def find_emirp(n):
generator = set(primes(10**6))
primes_ = [num for num in generator if num < n]
emrips = [num for num in primes_ if is_emrip(num)]
return [len(emrips), max(emrips) if emrips != [] else 0, sum(emrips)]
test = Test(None)
test.assert_equals(find_emirp(10), [0, 0, 0])
test.assert_equals(find_emirp(50), [4, 37, 98])
test.assert_equals(find_emirp(100), [8, 97, 418])
test.assert_equals(find_emirp(200), [15, 199, 1489])
test.assert_equals(find_emirp(500), [20, 389, 3232])
test.assert_equals(find_emirp(750), [25, 743, 6857])
test.assert_equals(find_emirp(915505), [9278, 915283, 3303565930])
test.assert_equals(find_emirp(530492), [6700, 399941, 1317845448])
|
[
"kwstantinos.agelopoulos@outlook.com"
] |
kwstantinos.agelopoulos@outlook.com
|
da327466a9c5966169ed0e73790c57d204126c2b
|
15f321878face2af9317363c5f6de1e5ddd9b749
|
/solutions_python/Problem_74/244.py
|
1aad3f70f9fbf37a7323274a79680d37008e458c
|
[] |
no_license
|
dr-dos-ok/Code_Jam_Webscraper
|
c06fd59870842664cd79c41eb460a09553e1c80a
|
26a35bf114a3aa30fc4c677ef069d95f41665cc0
|
refs/heads/master
| 2020-04-06T08:17:40.938460
| 2018-10-14T10:12:47
| 2018-10-14T10:12:47
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,237
|
py
|
#!/usr/bin/env python
# encoding: utf-8
import os
def parse_sequence(fileDescriptor):
# Returns a sequence to complete
f = fileDescriptor
sequence = f.readline()
sequence = sequence.split(' ')
n = int(sequence[0])
sequence_a = []
sequence_b = []
sequence_r = []
have_color = False
last_color = 'O'
for i in xrange(1,len(sequence)):
if not have_color and (sequence[i] == 'O' or sequence[i] == 'B'):
have_color = True
last_color = sequence[i]
elif have_color and (sequence[i] != 'O' and sequence[i] != 'B'):
t = (int(sequence[i]), last_color)
if t[1] == 'O':
sequence_a.append(t)
else:
sequence_b.append(t)
sequence_r.append(t)
have_color = False
else:
print "Badformed Input"
exit()
return n, sequence_r, sequence_a, sequence_b
def min_time(n, sequence, seqO, seqB):
posO = 1
posB = 1
cTime = 0
for step in sequence:
if step[1] == 'O':
toComplete = timeToComplete(posO, step[0])
cTime += toComplete
posO = step[0]
seqO.pop(0)
if seqB:
# Is not empty
posB = newPosition(posB, seqB[0][0], toComplete)
else:
toComplete = timeToComplete(posB, step[0])
cTime += toComplete
posB = step[0]
seqB.pop(0)
if seqO:
# Is not empty
posO = newPosition(posO, seqO[0][0], toComplete)
return cTime
def timeToComplete(currPos, destPos):
return (max(currPos, destPos) - min(currPos, destPos) + 1)
def newPosition(currPos, destPos, time):
result = 0
advance = min(timeToComplete(currPos, destPos) -1, time)
if currPos < destPos:
result = currPos + advance
else:
result = currPos - advance
return result
def solve(fileName):
try:
f = open(fileName, "r")
except:
exit()
test_cases = int(f.readline())
for i in xrange(test_cases):
args = parse_sequence(f)
result = min_time(*args)
print "Case #%d: %d" %(i+1, result)
|
[
"miliar1732@gmail.com"
] |
miliar1732@gmail.com
|
58bbcb0b913a6f6d65e9cc3f765cf80b1e6d8d8d
|
f4b60f5e49baf60976987946c20a8ebca4880602
|
/lib/python2.7/site-packages/acimodel-1.3_2j-py2.7.egg/cobra/modelimpl/fabric/rssnmppol.py
|
4738ca78412d8a6382b312d0f46b6ee434811e5c
|
[] |
no_license
|
cqbomb/qytang_aci
|
12e508d54d9f774b537c33563762e694783d6ba8
|
a7fab9d6cda7fadcc995672e55c0ef7e7187696e
|
refs/heads/master
| 2022-12-21T13:30:05.240231
| 2018-12-04T01:46:53
| 2018-12-04T01:46:53
| 159,911,666
| 0
| 0
| null | 2022-12-07T23:53:02
| 2018-12-01T05:17:50
|
Python
|
UTF-8
|
Python
| false
| false
| 8,086
|
py
|
# coding=UTF-8
# **********************************************************************
# Copyright (c) 2013-2016 Cisco Systems, Inc. All rights reserved
# written by zen warriors, do not modify!
# **********************************************************************
from cobra.mit.meta import ClassMeta
from cobra.mit.meta import StatsClassMeta
from cobra.mit.meta import CounterMeta
from cobra.mit.meta import PropMeta
from cobra.mit.meta import Category
from cobra.mit.meta import SourceRelationMeta
from cobra.mit.meta import NamedSourceRelationMeta
from cobra.mit.meta import TargetRelationMeta
from cobra.mit.meta import DeploymentPathMeta, DeploymentCategory
from cobra.model.category import MoCategory, PropCategory, CounterCategory
from cobra.mit.mo import Mo
# ##################################################
class RsSnmpPol(Mo):
"""
A source relation to the SNMP policy.
"""
meta = NamedSourceRelationMeta("cobra.model.fabric.RsSnmpPol", "cobra.model.snmp.Pol")
meta.targetNameProps["name"] = "tnSnmpPolName"
meta.cardinality = SourceRelationMeta.N_TO_ONE
meta.moClassName = "fabricRsSnmpPol"
meta.rnFormat = "rssnmpPol"
meta.category = MoCategory.RELATIONSHIP_TO_LOCAL
meta.label = "SNMP Policy"
meta.writeAccessMask = 0x8e700000001
meta.readAccessMask = 0x8e700000001
meta.isDomainable = False
meta.isReadOnly = False
meta.isConfigurable = True
meta.isDeletable = False
meta.isContextRoot = False
meta.childClasses.add("cobra.model.fault.Inst")
meta.childClasses.add("cobra.model.fault.Counts")
meta.childClasses.add("cobra.model.health.Inst")
meta.childNamesAndRnPrefix.append(("cobra.model.fault.Counts", "fltCnts"))
meta.childNamesAndRnPrefix.append(("cobra.model.fault.Inst", "fault-"))
meta.childNamesAndRnPrefix.append(("cobra.model.health.Inst", "health"))
meta.parentClasses.add("cobra.model.fabric.PodPGrp")
meta.superClasses.add("cobra.model.reln.Inst")
meta.superClasses.add("cobra.model.reln.To")
meta.superClasses.add("cobra.model.pol.NToRef")
meta.rnPrefixes = [
('rssnmpPol', False),
]
prop = PropMeta("str", "childAction", "childAction", 4, PropCategory.CHILD_ACTION)
prop.label = "None"
prop.isImplicit = True
prop.isAdmin = True
prop._addConstant("deleteAll", "deleteall", 16384)
prop._addConstant("deleteNonPresent", "deletenonpresent", 8192)
prop._addConstant("ignore", "ignore", 4096)
meta.props.add("childAction", prop)
prop = PropMeta("str", "dn", "dn", 1, PropCategory.DN)
prop.label = "None"
prop.isDn = True
prop.isImplicit = True
prop.isAdmin = True
prop.isCreateOnly = True
meta.props.add("dn", prop)
prop = PropMeta("str", "forceResolve", "forceResolve", 107, PropCategory.REGULAR)
prop.label = "None"
prop.isImplicit = True
prop.isAdmin = True
prop.defaultValue = True
prop.defaultValueStr = "yes"
prop._addConstant("no", None, False)
prop._addConstant("yes", None, True)
meta.props.add("forceResolve", prop)
prop = PropMeta("str", "lcOwn", "lcOwn", 9, PropCategory.REGULAR)
prop.label = "None"
prop.isImplicit = True
prop.isAdmin = True
prop.defaultValue = 0
prop.defaultValueStr = "local"
prop._addConstant("implicit", "implicit", 4)
prop._addConstant("local", "local", 0)
prop._addConstant("policy", "policy", 1)
prop._addConstant("replica", "replica", 2)
prop._addConstant("resolveOnBehalf", "resolvedonbehalf", 3)
meta.props.add("lcOwn", prop)
prop = PropMeta("str", "modTs", "modTs", 7, PropCategory.REGULAR)
prop.label = "None"
prop.isImplicit = True
prop.isAdmin = True
prop.defaultValue = 0
prop.defaultValueStr = "never"
prop._addConstant("never", "never", 0)
meta.props.add("modTs", prop)
prop = PropMeta("str", "monPolDn", "monPolDn", 13999, PropCategory.REGULAR)
prop.label = "Monitoring policy attached to this observable object"
prop.isImplicit = True
prop.isAdmin = True
meta.props.add("monPolDn", prop)
prop = PropMeta("str", "rType", "rType", 106, PropCategory.REGULAR)
prop.label = "None"
prop.isImplicit = True
prop.isAdmin = True
prop.defaultValue = 1
prop.defaultValueStr = "mo"
prop._addConstant("local", "local", 3)
prop._addConstant("mo", "mo", 1)
prop._addConstant("service", "service", 2)
meta.props.add("rType", prop)
prop = PropMeta("str", "rn", "rn", 2, PropCategory.RN)
prop.label = "None"
prop.isRn = True
prop.isImplicit = True
prop.isAdmin = True
prop.isCreateOnly = True
meta.props.add("rn", prop)
prop = PropMeta("str", "state", "state", 103, PropCategory.REGULAR)
prop.label = "State"
prop.isImplicit = True
prop.isAdmin = True
prop.defaultValue = 0
prop.defaultValueStr = "unformed"
prop._addConstant("cardinality-violation", "cardinality-violation", 5)
prop._addConstant("formed", "formed", 1)
prop._addConstant("invalid-target", "invalid-target", 4)
prop._addConstant("missing-target", "missing-target", 2)
prop._addConstant("unformed", "unformed", 0)
meta.props.add("state", prop)
prop = PropMeta("str", "stateQual", "stateQual", 104, PropCategory.REGULAR)
prop.label = "State Qualifier"
prop.isImplicit = True
prop.isAdmin = True
prop.defaultValue = 0
prop.defaultValueStr = "none"
prop._addConstant("default-target", "default-target", 2)
prop._addConstant("mismatch-target", "mismatch-target", 1)
prop._addConstant("none", "none", 0)
meta.props.add("stateQual", prop)
prop = PropMeta("str", "status", "status", 3, PropCategory.STATUS)
prop.label = "None"
prop.isImplicit = True
prop.isAdmin = True
prop._addConstant("created", "created", 2)
prop._addConstant("deleted", "deleted", 8)
prop._addConstant("modified", "modified", 4)
meta.props.add("status", prop)
prop = PropMeta("str", "tCl", "tCl", 11558, PropCategory.REGULAR)
prop.label = "Target-class"
prop.isImplicit = True
prop.isAdmin = True
prop.defaultValue = 4571
prop.defaultValueStr = "snmpPol"
prop._addConstant("snmpPol", None, 4571)
prop._addConstant("unspecified", "unspecified", 0)
meta.props.add("tCl", prop)
prop = PropMeta("str", "tContextDn", "tContextDn", 4990, PropCategory.REGULAR)
prop.label = "Target-context"
prop.isImplicit = True
prop.isAdmin = True
meta.props.add("tContextDn", prop)
prop = PropMeta("str", "tDn", "tDn", 100, PropCategory.REGULAR)
prop.label = "Target-dn"
prop.isImplicit = True
prop.isAdmin = True
meta.props.add("tDn", prop)
prop = PropMeta("str", "tRn", "tRn", 4989, PropCategory.REGULAR)
prop.label = "Target-rn"
prop.isImplicit = True
prop.isAdmin = True
prop.range = [(0, 512)]
meta.props.add("tRn", prop)
prop = PropMeta("str", "tType", "tType", 4988, PropCategory.REGULAR)
prop.label = "None"
prop.isImplicit = True
prop.isAdmin = True
prop.defaultValue = 0
prop.defaultValueStr = "name"
prop._addConstant("all", "all", 2)
prop._addConstant("mo", "mo", 1)
prop._addConstant("name", "name", 0)
meta.props.add("tType", prop)
prop = PropMeta("str", "tnSnmpPolName", "tnSnmpPolName", 11557, PropCategory.REGULAR)
prop.label = "Name"
prop.isConfig = True
prop.isAdmin = True
prop.range = [(0, 64)]
prop.regex = ['[a-zA-Z0-9_.:-]+']
meta.props.add("tnSnmpPolName", prop)
prop = PropMeta("str", "uid", "uid", 8, PropCategory.REGULAR)
prop.label = "None"
prop.isImplicit = True
prop.isAdmin = True
meta.props.add("uid", prop)
# Deployment Meta
meta.deploymentQuery = True
meta.deploymentType = "Ancestor"
def __init__(self, parentMoOrDn, markDirty=True, **creationProps):
namingVals = []
Mo.__init__(self, parentMoOrDn, markDirty, *namingVals, **creationProps)
# End of package file
# ##################################################
|
[
"collinsctk@qytang.com"
] |
collinsctk@qytang.com
|
4a7f9b779862e39bed7fde83a238b96e4b69f2f1
|
fe4c3905ec0e2d8fa5100454c49a863bda3d05ab
|
/Code/Mantid/Framework/PythonInterface/plugins/algorithms/WorkflowAlgorithms/IndirectResolution.py
|
3fe3e42c49c3011afbab8d24a9adf8e2cf6fcb2b
|
[] |
no_license
|
mkoennecke/mantid
|
11f16fe573056d70c119c4d6fb6984b7008cb8e6
|
c0a8e5d97cde6cc28abb8c7b1b5c056986a81fec
|
refs/heads/master
| 2021-01-18T11:51:28.997458
| 2015-02-13T10:48:51
| 2015-02-13T10:48:51
| 11,472,662
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 6,587
|
py
|
from mantid.simpleapi import *
from mantid.api import *
from mantid.kernel import *
from mantid import config, logger
class IndirectResolution(DataProcessorAlgorithm):
def category(self):
return 'Workflow\\Inelastic;PythonAlgorithms;Inelastic'
def summary(self):
return 'Creates a resolution workspace'
def PyInit(self):
self.declareProperty(StringArrayProperty(name='InputFiles'),
doc='Comma seperated list if input files')
self.declareProperty(WorkspaceProperty('OutputWorkspace', '',
optional=PropertyMode.Optional,
direction=Direction.Output),
doc='Output resolution workspace (if left blank a name will be gernerated automatically)')
self.declareProperty(name='Instrument', defaultValue='',
validator=StringListValidator(['IRIS', 'OSIRIS', 'TOSCA']),
doc='Instrument used during run')
self.declareProperty(name='Analyser', defaultValue='',
validator=StringListValidator(['graphite', 'mica', 'fmica']),
doc='Analyser used during run')
self.declareProperty(name='Reflection', defaultValue='',
validator=StringListValidator(['002', '004', '006']),
doc='Reflection used during run')
self.declareProperty(IntArrayProperty(name='DetectorRange', values=[0, 1]),
doc='Range of detetcors to use in resolution calculation')
self.declareProperty(FloatArrayProperty(name='BackgroundRange', values=[0.0, 0.0]),
doc='Energy range to use as background')
self.declareProperty(name='RebinParam', defaultValue='', doc='Rebinning parameters (min,width,max)')
self.declareProperty(name='ScaleFactor', defaultValue=1.0, doc='Factor to scale resolution curve by')
self.declareProperty(name='Smooth', defaultValue=False, doc='Apply WienerSmooth to resolution')
self.declareProperty(name='Plot', defaultValue=False, doc='Plot resolution curve')
self.declareProperty(name='Save', defaultValue=False, doc='Save resolution workspace as a Nexus file')
def PyExec(self):
from IndirectCommon import StartTime, EndTime, getWSprefix
import inelastic_indirect_reducer
StartTime('IndirectResolution')
self._setup()
InelasticIndirectReduction(Instrument=self._instrument,
Analyser=self._analyser,
Reflection=self._reflection,
Grouping='All',
SumFiles=True,
InputFiles=self._input_files,
DetectorRange=self._detector_range,
OutputWorkspace='__icon_ws_group')
icon_ws = mtd['__icon_ws_group'].getItem(0).getName()
if self._out_ws == "":
self._out_ws = getWSprefix(icon_ws) + 'res'
if self._scale_factor != 1.0:
Scale(InputWorkspace=icon_ws, OutputWorkspace=icon_ws, Factor=self._scale_factor)
CalculateFlatBackground(InputWorkspace=icon_ws, OutputWorkspace=self._out_ws,
StartX=self._background[0], EndX=self._background[1],
Mode='Mean', OutputMode='Subtract Background')
Rebin(InputWorkspace=self._out_ws, OutputWorkspace=self._out_ws, Params=self._rebin_string)
if self._smooth:
WienerSmooth(InputWorkspace=self._out_ws, OutputWorkspace='__smooth_temp')
CopyLogs(InputWorkspace=self._out_ws, OutputWorkspace='__smooth_temp')
RenameWorkspace(InputWorkspace='__smooth_temp', OutputWorkspace=self._out_ws)
self._post_process()
self.setProperty('OutputWorkspace', self._out_ws)
EndTime('IndirectResolution')
def _setup(self):
"""
Gets algorithm properties.
"""
self._input_files = self.getProperty('InputFiles').value
self._out_ws = self.getPropertyValue('OutputWorkspace')
self._instrument = self.getProperty('Instrument').value
self._analyser = self.getProperty('Analyser').value
self._reflection = self.getProperty('Reflection').value
self._detector_range = self.getProperty('DetectorRange').value
self._background = self.getProperty('BackgroundRange').value
self._rebin_string = self.getProperty('RebinParam').value
self._scale_factor = self.getProperty('ScaleFactor').value
self._smooth = self.getProperty('Smooth').value
self._plot = self.getProperty('Plot').value
self._save = self.getProperty('Save').value
def _post_process(self):
"""
Handles adding logs, saving and plotting.
"""
use_scale_factor = self._scale_factor == 1.0
AddSampleLog(Workspace=self._out_ws, LogName='scale', LogType='String', LogText=str(use_scale_factor))
if use_scale_factor:
AddSampleLog(Workspace=self._out_ws, LogName='scale_factor', LogType='Number', LogText=str(self._scale_factor))
AddSampleLog(Workspace=self._out_ws, LogName='res_smoothing_applied', LogType='String', LogText=str(self._smooth))
AddSampleLog(Workspace=self._out_ws, LogName='back_start', LogType='Number', LogText=str(self._background[0]))
AddSampleLog(Workspace=self._out_ws, LogName='back_end', LogType='Number', LogText=str(self._background[1]))
rebin_params = self._rebin_string.split(',')
if len(rebin_params) == 3:
AddSampleLog(Workspace=self._out_ws, LogName='rebin_low', LogType='Number', LogText=rebin_params[0])
AddSampleLog(Workspace=self._out_ws, LogName='rebin_width', LogType='Number', LogText=rebin_params[1])
AddSampleLog(Workspace=self._out_ws, LogName='rebin_high', LogType='Number', LogText=rebin_params[2])
self.setProperty('OutputWorkspace', self._out_ws)
if self._save:
logger.information("Resolution file saved to default save directory.")
SaveNexusProcessed(InputWorkspace=self._out_ws, Filename=self._out_ws + '.nxs')
if self._plot:
from IndirectImport import import_mantidplot
mtd_plot = import_mantidplot()
mtd_plot.plotSpectrum(self._out_ws, 0)
AlgorithmFactory.subscribe(IndirectResolution)
|
[
"dan@dan-nixon.com"
] |
dan@dan-nixon.com
|
435e90d2b0debc710dd2aca553b76e51ea39e688
|
f4434c85e3814b6347f8f8099c081ed4af5678a5
|
/sdk/synapse/azure-synapse-artifacts/azure/synapse/artifacts/operations/_big_data_pools_operations.py
|
2b2366e730881713afa1086c0e769bf1a8d28656
|
[
"LicenseRef-scancode-generic-cla",
"MIT",
"LGPL-2.1-or-later"
] |
permissive
|
yunhaoling/azure-sdk-for-python
|
5da12a174a37672ac6ed8e3c1f863cb77010a506
|
c4eb0ca1aadb76ad892114230473034830116362
|
refs/heads/master
| 2022-06-11T01:17:39.636461
| 2020-12-08T17:42:08
| 2020-12-08T17:42:08
| 177,675,796
| 1
| 0
|
MIT
| 2020-03-31T20:35:17
| 2019-03-25T22:43:40
|
Python
|
UTF-8
|
Python
| false
| false
| 6,664
|
py
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import TYPE_CHECKING
import warnings
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import HttpRequest, HttpResponse
from .. import models as _models
if TYPE_CHECKING:
# pylint: disable=unused-import,ungrouped-imports
from typing import Any, Callable, Dict, Generic, Optional, TypeVar
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]]
class BigDataPoolsOperations(object):
"""BigDataPoolsOperations operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.synapse.artifacts.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer):
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
def list(
self,
**kwargs # type: Any
):
# type: (...) -> "_models.BigDataPoolResourceInfoListResult"
"""List Big Data Pools.
:keyword callable cls: A custom type or function that will be passed the direct response
:return: BigDataPoolResourceInfoListResult, or the result of cls(response)
:rtype: ~azure.synapse.artifacts.models.BigDataPoolResourceInfoListResult
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.BigDataPoolResourceInfoListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2019-06-01-preview"
accept = "application/json"
# Construct URL
url = self.list.metadata['url'] # type: ignore
path_format_arguments = {
'endpoint': self._serialize.url("self._config.endpoint", self._config.endpoint, 'str', skip_quote=True),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize(_models.ErrorContract, response)
raise HttpResponseError(response=response, model=error)
deserialized = self._deserialize('BigDataPoolResourceInfoListResult', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
list.metadata = {'url': '/bigDataPools'} # type: ignore
def get(
self,
big_data_pool_name, # type: str
**kwargs # type: Any
):
# type: (...) -> "_models.BigDataPoolResourceInfo"
"""Get Big Data Pool.
:param big_data_pool_name: The Big Data Pool name.
:type big_data_pool_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: BigDataPoolResourceInfo, or the result of cls(response)
:rtype: ~azure.synapse.artifacts.models.BigDataPoolResourceInfo
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.BigDataPoolResourceInfo"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2019-06-01-preview"
accept = "application/json"
# Construct URL
url = self.get.metadata['url'] # type: ignore
path_format_arguments = {
'endpoint': self._serialize.url("self._config.endpoint", self._config.endpoint, 'str', skip_quote=True),
'bigDataPoolName': self._serialize.url("big_data_pool_name", big_data_pool_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize(_models.ErrorContract, response)
raise HttpResponseError(response=response, model=error)
deserialized = self._deserialize('BigDataPoolResourceInfo', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {'url': '/bigDataPools/{bigDataPoolName}'} # type: ignore
|
[
"noreply@github.com"
] |
yunhaoling.noreply@github.com
|
8be72a52068001cc66bd59da148af82ea5b224a8
|
db575f3401a5e25494e30d98ec915158dd7e529b
|
/BIO_Stocks/PMD.py
|
f9d9498e20f4a6d77b53ce8653cbb90641628f67
|
[] |
no_license
|
andisc/StockWebScraping
|
b10453295b4b16f065064db6a1e3bbcba0d62bad
|
41db75e941cfccaa7043a53b0e23ba6e5daa958a
|
refs/heads/main
| 2023-08-08T01:33:33.495541
| 2023-07-22T21:41:08
| 2023-07-22T21:41:08
| 355,332,230
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,134
|
py
|
import requests
from lxml import html
from bs4 import BeautifulSoup
import os
from datetime import date, datetime
from ValidationTools import validateday
from Database_Connections import InsertData, Insert_Logging
def main(id_control):
try:
url = 'https://investors.psychemedics.com/sec-filings-and-press-releases/news-releases/default.aspx'
headers = {'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_11_5) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/50.0.2661.102 Safari/537.36'}
result = requests.get(url, headers=headers)
#print(result.content.decode())
html_content = result.content.decode()
soup = BeautifulSoup(html_content, 'html.parser')
#print(soup)
articles = soup.findAll('div', attrs={'class':'irwTableRowItem'})
# get first article
FIRST_ARTICLE = articles[0]
article_date = FIRST_ARTICLE.find('div', attrs={'class':'irwPRDate'})
article_desc = FIRST_ARTICLE.find('h4')
v_article_date = article_date.text.lstrip().rstrip()
#if the process find any article with the today date
istoday, v_art_date = validateday(v_article_date)
if (istoday == True):
v_ticker = os.path.basename(__file__).replace(".py", "")
v_url = article_desc.a.get('href')
v_description = article_desc.text.lstrip().rstrip()
now = datetime.now()
print("URL: " + v_url)
print("DESCRIPTION: " + v_description)
print("ARTICLE_DATE: " + str(now))
# Insert articles
if "https://" in v_url:
InsertData(v_ticker, v_description, v_url, v_art_date)
else:
InsertData(v_ticker, v_description, url, v_art_date)
except Exception:
error_message = "Entrou na excepção ao tratar " + os.path.basename(__file__) + "..."
print(error_message)
Insert_Logging(id_control, 'Detail', error_message)
pass
#InsertData()
if __name__ == "__main__":
main()
|
[
"andisc_3@hotmail.com"
] |
andisc_3@hotmail.com
|
018b2906e7a41541d957764ddd1c47e355d03386
|
6b2a8dd202fdce77c971c412717e305e1caaac51
|
/solutions_2464487_0/Python/CuteCube/ra1.py
|
dbc146df38875aae8ae187eac50411365e303fb4
|
[] |
no_license
|
alexandraback/datacollection
|
0bc67a9ace00abbc843f4912562f3a064992e0e9
|
076a7bc7693f3abf07bfdbdac838cb4ef65ccfcf
|
refs/heads/master
| 2021-01-24T18:27:24.417992
| 2017-05-23T09:23:38
| 2017-05-23T09:23:38
| 84,313,442
| 2
| 4
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 747
|
py
|
#!/usr/bin/env python
import math
def main():
f = open('input.txt', 'r')
total_T = int(f.readline())
#print total_T
for T in xrange(1,total_T+1):
r,t = f.readline().split()
r = long(r)
t=long(t)
# 2k^2 + (2r - 1)k - t = 0
b = 2*r -1.0
a = 2.0
c = -t
k = (-b + math.sqrt(b*b - 4*a*c))/2/a
# k = 1
k = long(k)
while not (need(k ,r) <= t and need(k+1, r) > t):
if need(k, r) < t:
k += 1
else:
#k = max(long(k/2)+1, long(k*0.75))
k -= 1
print "Case #{}: {}".format(T, long(k))
def need(k,r):
return 2*k*k + (2*r-1)*k
if __name__ == '__main__':
main()
|
[
"eewestman@gmail.com"
] |
eewestman@gmail.com
|
36675db792eaa04c9b5c9732126b47ebda3a154f
|
43cdd7cb26fe44b1ed7de6a46f8b5e680c9b1372
|
/openpeerpower/generated/config_flows.py
|
244c7e0f950d8f44b848d46e9680ed38ab8aaabb
|
[
"Apache-2.0"
] |
permissive
|
OpenPeerPower/Open-Peer-Power
|
02ec5c133564b47c6f72f669e844a666643cacd6
|
940a04a88e8f78e2d010dc912ad6905ae363503c
|
refs/heads/master
| 2022-08-16T09:38:49.994009
| 2021-05-29T03:54:13
| 2021-05-29T03:54:13
| 183,174,237
| 1
| 0
|
Apache-2.0
| 2022-07-15T18:43:02
| 2019-04-24T07:35:47
|
Python
|
UTF-8
|
Python
| false
| false
| 246
|
py
|
"""Automatically generated by oppfest.
To update, run python3 -m script.oppfest
"""
# fmt: off
FLOWS = [
"almond",
"daikin",
"dialogflow",
"homekit_controller",
"met",
"mobile_app",
"mqtt",
"zha",
"zwave"
]
|
[
"pcaston@arach.net.au"
] |
pcaston@arach.net.au
|
20adba546311eb8ef3f505a79525f18a05e924ff
|
4fd65dc15ed0e5849c440a41d81036d1ff47ea96
|
/tests/integration/test_deploy_and_evaluate_model_auth_on.py
|
56f92793bb30c984b1b9583ee2c3e49b30cd861f
|
[
"MIT"
] |
permissive
|
tableau/TabPy
|
20ae3dacb958bf2d0e48fc36220366cb3db412bb
|
96aa26252b6115bd2788f9526680ec1b34f1c86f
|
refs/heads/master
| 2023-08-29T13:47:21.507211
| 2023-06-21T21:30:40
| 2023-06-21T21:30:40
| 69,400,040
| 1,527
| 633
|
MIT
| 2023-06-21T21:30:42
| 2016-09-27T21:26:03
|
Python
|
UTF-8
|
Python
| false
| false
| 1,233
|
py
|
from . import integ_test_base
class TestDeployAndEvaluateModelAuthOn(integ_test_base.IntegTestBase):
def _get_config_file_name(self) -> str:
return "./tests/integration/resources/deploy_and_evaluate_model_auth.conf"
def _get_port(self) -> str:
return "9009"
def test_deploy_and_evaluate_model(self):
# Uncomment the following line to preserve
# test case output and other files (config, state, ect.)
# in system temp folder.
# self.set_delete_temp_folder(False)
self.deploy_models(self._get_username(), self._get_password())
headers = {
"Content-Type": "application/json",
"Authorization": "Basic dXNlcjE6UEBzc3cwcmQ=",
"Host": "localhost:9009",
}
payload = """{
"data": { "_arg1": ["happy", "sad", "neutral"] },
"script":
"return tabpy.query('Sentiment Analysis',_arg1)['response']"
}"""
conn = self._get_connection()
conn.request("POST", "/evaluate", payload, headers)
SentimentAnalysis_eval = conn.getresponse()
self.assertEqual(200, SentimentAnalysis_eval.status)
SentimentAnalysis_eval.read()
|
[
"noreply@github.com"
] |
tableau.noreply@github.com
|
1685d2a9cf7e5dc726fffb430a61ba17869e53f8
|
4cce3b466591f7f8b9d58c1f8cae4dd0b6425b09
|
/classes dealing.py
|
09e2606008d31426022cdef988fb9cec1726491e
|
[] |
no_license
|
adityamangal1/hackerRank-solutions
|
4e5fc66785215688449f58176b0260e05fb0c404
|
102ee32f5984240939bf14e799a458d99388774b
|
refs/heads/master
| 2023-04-18T15:35:36.998087
| 2021-04-22T07:16:38
| 2021-04-22T07:16:38
| 297,935,486
| 13
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,770
|
py
|
import math
class Complex(object):
def __init__(self, real, imaginary):
self.real = real
self.imaginary = imaginary
def __add__(self, no):
complex_n = complex(self.real, self.imaginary) + \
complex(no.real, no.imaginary)
return Complex(complex_n.real, complex_n.imag)
def __sub__(self, no):
complex_n = complex(self.real, self.imaginary) - \
complex(no.real, no.imaginary)
return Complex(complex_n.real, complex_n.imag)
def __mul__(self, no):
complex_n = complex(self.real, self.imaginary) * \
complex(no.real, no.imaginary)
return Complex(complex_n.real, complex_n.imag)
def __truediv__(self, no):
factor = no.real ** 2 + no.imaginary ** 2
return Complex((self.real * no.real + self.imaginary * no.imaginary) / factor, (self.imaginary * no.real - self.real * no.imaginary) / factor)
def mod(self):
return Complex((self.real ** 2 + self.imaginary ** 2) ** (1 / 2), 0)
def __str__(self):
if self.imaginary == 0:
result = "%.2f+0.00i" % (self.real)
elif self.real == 0:
if self.imaginary >= 0:
result = "0.00+%.2fi" % (self.imaginary)
else:
result = "0.00-%.2fi" % (abs(self.imaginary))
elif self.imaginary > 0:
result = "%.2f+%.2fi" % (self.real, self.imaginary)
else:
result = "%.2f-%.2fi" % (self.real, abs(self.imaginary))
return result
if __name__ == '__main__':
c = map(float, input().split())
d = map(float, input().split())
x = Complex(*c)
y = Complex(*d)
print(*map(str, [x+y, x-y, x*y, x/y, x.mod(), y.mod()]), sep='\n')
|
[
"adityamangal0202@gmail.com"
] |
adityamangal0202@gmail.com
|
517e4b682e6b12974385b9c23201af4bebefd1d0
|
5679731cee36c537615d285ed72810f4c6b17380
|
/513_FindBottomLeftTreeValue.py
|
0de079fbf90fd9385df6647f65a7e451a7aa108a
|
[] |
no_license
|
manofmountain/LeetCode
|
6b76105190a9b62df65a7b56b6def4120498b9fa
|
718f688b3d316e8c10ef680d9c21ecd518d062f8
|
refs/heads/master
| 2021-01-12T03:41:48.318116
| 2017-07-18T12:35:58
| 2017-07-18T12:35:58
| 78,252,164
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 924
|
py
|
# 40.9%
# Definition for a binary tree node.
# class TreeNode(object):
# def __init__(self, x):
# self.val = x
# self.left = None
# self.right = None
#from collections import deque
class Solution(object):
def findBottomLeftValue(self, root):
"""
:type root: TreeNode
:rtype: int
"""
if not root:
return 0
q, last = [root], root.val
while q:
q.append(None)
last = q[0].val
while q[0]:
if q[0].left:
q.append(q[0].left)
if q[0].right:
q.append(q[0].right)
del q[0]
del q[0]
return last
def findLeftMostNode(self, root):
queue = [root]
for node in queue:
queue += filter(None, (node.right, node.left))
return node.val
|
[
"noreply@github.com"
] |
manofmountain.noreply@github.com
|
ec0bfed2e04944f6a53b48dd4438719b1733cb75
|
699ff10c347dc9b6d5af7f531a1c941dbfecd558
|
/leetcode/python/232-implement-queue-using-stacks.py
|
cfbd49aa1d50363b1d16e3ac48c0bcd623bf7032
|
[] |
no_license
|
iampkuhz/OnlineJudge_cpp
|
71a7637c54d81be2aa066a6132aab31b798bbe6b
|
737b9bac5a73c319e46cda8c3e9d729f734d7792
|
refs/heads/master
| 2021-01-10T10:16:37.589855
| 2017-03-06T12:45:20
| 2017-03-06T12:45:20
| 24,891,815
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,886
|
py
|
#!/usr/bin/env python
# encoding: utf-8
"""
Implement the following operations of a queue using stacks.
push(x) -- Push element x to the back of queue.
pop() -- Removes the element from in front of queue.
peek() -- Get the front element.
empty() -- Return whether the queue is empty.
Notes:
You must use only standard operations of a stack -- which means only push to top, peek/pop from top, size, and is empty operations are valid.
Depending on your language, stack may not be supported natively. You may simulate a stack by using a list or deque (double-ended queue), as long as you use only standard operations of a stack.
You may assume that all operations are valid (for example, no pop or peek operations will be called on an empty queue).
"""
# 2次过,速度差不多 40-44ms
class Queue(object):
def __init__(self):
self.ls = []
def push(self, x):
self.ls.append(x)
def pop(self):
return self.ls.pop()
def peek(self):
return self.ls[-1]
def empty(self):
return len(self.ls) == 0
# 3次过, 36-44ms
class Queue(object):
def __init__(self):
"""
initialize your data structure here.
"""
self.ins, self.out = [], []
def conv(self):
k = len(self.ins)
while k > 0:
k -= 1
self.out.append(self.ins.pop())
def push(self, x):
"""
:type x: int
:rtype: nothing
"""
self.ins.append(x)
def pop(self):
"""
:rtype: nothing
"""
if len(self.out) == 0:self.conv()
return self.out.pop()
def peek(self):
"""
:rtype: int
"""
if len(self.out) == 0: self.conv()
return self.out[-1]
def empty(self):
"""
:rtype: bool
"""
return len(self.out) == 0 and len(self.ins) == 0
|
[
"iampkuhz@gmail.com"
] |
iampkuhz@gmail.com
|
168a729a213cb05a64c5b3b4dc1ab8aa2155d254
|
ac9e892c02af18cea990bb0a3f60071b34a03194
|
/pytorch_pfn_extras/training/triggers/manual_schedule_trigger.py
|
fc2db995b809735e7cefe6fc0d8df2ffd185d4ee
|
[
"MIT"
] |
permissive
|
limsijie93/pytorch-pfn-extras
|
1323e796d59fe113ee86f631cc65ad44c7914a77
|
4b675fce8f4a420d87f1685423a9e62dbe598700
|
refs/heads/master
| 2022-09-18T09:18:25.459126
| 2020-06-04T04:43:47
| 2020-06-04T04:43:47
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,131
|
py
|
class ManualScheduleTrigger:
"""Trigger invoked at specified point(s) of iterations or epochs.
This trigger accepts iterations or epochs indicated by given point(s).
There are two ways to specify the point(s): iteration and epoch.
``iteration`` means the number of updates, while ``epoch`` means the number
of sweeps over the training dataset. Fractional values are allowed
if the point is a number of epochs; the trigger uses the ``iteration``
and ``epoch_detail`` attributes defined by the updater.
Args:
points (int, float, or list of int or float): time of the trigger.
Must be an integer or list of integer if unit is ``'iteration'``.
unit (str): Unit of the time specified by ``points``. It must be
either ``'iteration'`` or ``'epoch'``.
Attributes:
finished (bool): Flag that indicates whether or not this trigger will
fire in the future. This flag is used to determine if the extension
should be initialized after resume.
"""
def __init__(self, points, unit):
if unit not in ('epoch', 'iteration'):
raise ValueError(
'Trigger unit must be either \'epoch\' or \'iteration\'.')
self.points = (points if isinstance(points, list) else [points])
self.unit = unit
self.finished = False
self._previous_iteration = 0
self._previous_epoch_detail = 0.
def __call__(self, manager):
"""Decides whether the extension should be called on this iteration.
Args:
manager (~pytorch_pfn_extras.training.ExtensionsManager):
Manager object that this trigger is associated with.
The updater associated with this manager is used to
determine if the trigger should fire.
Returns:
bool: True if the corresponding extension should be invoked in this
iteration.
"""
updater = manager.updater
if self.unit == 'epoch':
epoch_detail = updater.epoch_detail
previous_epoch_detail = self._previous_epoch_detail
# if previous_epoch_detail is invalid value,
# use the value of updater.
if previous_epoch_detail < 0:
previous_epoch_detail = updater.previous_epoch_detail
fire = any(
previous_epoch_detail < p <= epoch_detail
for p in self.points)
if hasattr(self, '_finished_is_tmp'):
del self._finished_is_tmp
if epoch_detail >= max(self.points):
self.finished = True
if fire and epoch_detail >= max(self.points):
self.finished = True
else:
iteration = updater.iteration
previous_iteration = self._previous_iteration
# if previous_iteration is invalid value,
# guess it from current iteration.
if previous_iteration < 0:
previous_iteration = iteration - 1
fire = any(
previous_iteration < p <= iteration
for p in self.points)
if hasattr(self, '_finished_is_tmp'):
del self._finished_is_tmp
if iteration >= max(self.points):
self.finished = True
if fire and iteration >= max(self.points):
self.finished = True
# save current values
self._previous_iteration = updater.iteration
if hasattr(updater, 'epoch_detail'):
self._previous_epoch_detail = updater.epoch_detail
return fire
def state_dict(self):
state = {}
state['_previous_iteration'] = self._previous_iteration
state['_previous_epoch_detail'] = self._previous_epoch_detail
state['finished'] = self.finished
return state
def load_state_dict(self, to_load):
self._previous_iteration = to_load['_previous_iteration']
self._previous_epoch_detail = to_load['_previous_epoch_detail']
self.finished = to_load['finished']
|
[
"webmaster@kenichimaehashi.com"
] |
webmaster@kenichimaehashi.com
|
640acd474ccc2667449fec3953056cfc3acb5173
|
3e74c0b272bfd7981454953aeef96ab2f5c59c69
|
/benchmarking/timeIt.py
|
8d8650898c5cef602fc4840308c61e368cda7614
|
[] |
no_license
|
LokeshKD/DSPython
|
09e2e086182d1d0e73f85cc88611b7aa446d1253
|
f657678ac2cc1855c4d13bdc66d790a1022b6640
|
refs/heads/master
| 2023-04-16T13:58:02.500681
| 2021-04-17T17:04:51
| 2021-04-17T17:04:51
| 357,611,322
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 235
|
py
|
#
def my_function():
try:
1 / 0
except ZeroDivisionError:
pass
if __name__ == "__main__":
import timeit
setup = "from __main__ import my_function"
print(timeit.timeit("my_function()", setup=setup))
|
[
"i.lokesh@gmail.com"
] |
i.lokesh@gmail.com
|
c40f6b94961010096fa1e43f69e3c26d32368c2c
|
53fab060fa262e5d5026e0807d93c75fb81e67b9
|
/backup/user_229/ch57_2020_04_10_21_47_51_592464.py
|
1b83ba556f170dae510b6bab0604f9e0d9a59eca
|
[] |
no_license
|
gabriellaec/desoft-analise-exercicios
|
b77c6999424c5ce7e44086a12589a0ad43d6adca
|
01940ab0897aa6005764fc220b900e4d6161d36b
|
refs/heads/main
| 2023-01-31T17:19:42.050628
| 2020-12-16T05:21:31
| 2020-12-16T05:21:31
| 306,735,108
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,127
|
py
|
def verifica_progressao(lista):
r = lista[1] - lista[0]
rn = lista[2] - lista[1]
continua = True
continua2 = True
i = 0
if lista[0] != 0 and lista[1] != 0:
q = lista[1]/lista[0]
qn = lista[2]/lista[1]
if qn == q:
while continua == True:
while i < len(lista):
if lista[i] != 0:
qn = lista[i+1]/lista[i]
if qn != q:
continua = False
break
else:
i += 1
break
else:
continua = False
break
if rn == r:
i = 0
while continua2 == True:
while i < len(lista):
rn = lista[i+1] - lista[i]
if rn != r:
continua2 = False
break
else:
i += 1
break
return "AG"
return "PG"
else:
return "PG"
elif rn == r:
i = 0
while continua2 == True:
while i < len(lista):
rn = lista[i+1] - lista[i]
if rn != r:
continua2 = False
break
else:
i += 1
break
return "PA"
else:
return "NA"
else:
if rn == r:
while continua2 == True:
while i < len(lista):
rn = lista[i+1] - lista[i]
if rn != r:
continua = False
break
else:
i += 1
break
return "PA"
else:
return "NA"
|
[
"you@example.com"
] |
you@example.com
|
7436c12c9b17ab4e53a8e623b20b1a24fc082352
|
dfaf6f7ac83185c361c81e2e1efc09081bd9c891
|
/k8sdeployment/k8sstat/python/kubernetes/test/test_v1beta1_subject_access_review_status.py
|
86ad671f95cfc1388e0b498d3971b2a7c14d6e90
|
[
"Apache-2.0",
"MIT"
] |
permissive
|
JeffYFHuang/gpuaccounting
|
d754efac2dffe108b591ea8722c831d979b68cda
|
2c63a63c571240561725847daf1a7f23f67e2088
|
refs/heads/master
| 2022-08-09T03:10:28.185083
| 2022-07-20T00:50:06
| 2022-07-20T00:50:06
| 245,053,008
| 0
| 0
|
MIT
| 2021-03-25T23:44:50
| 2020-03-05T02:44:15
|
JavaScript
|
UTF-8
|
Python
| false
| false
| 1,076
|
py
|
# coding: utf-8
"""
Kubernetes
No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501
OpenAPI spec version: v1.15.6
Generated by: https://openapi-generator.tech
"""
from __future__ import absolute_import
import unittest
import kubernetes.client
from kubernetes.client.models.v1beta1_subject_access_review_status import V1beta1SubjectAccessReviewStatus # noqa: E501
from kubernetes.client.rest import ApiException
class TestV1beta1SubjectAccessReviewStatus(unittest.TestCase):
"""V1beta1SubjectAccessReviewStatus unit test stubs"""
def setUp(self):
pass
def tearDown(self):
pass
def testV1beta1SubjectAccessReviewStatus(self):
"""Test V1beta1SubjectAccessReviewStatus"""
# FIXME: construct object with mandatory attributes with example values
# model = kubernetes.client.models.v1beta1_subject_access_review_status.V1beta1SubjectAccessReviewStatus() # noqa: E501
pass
if __name__ == '__main__':
unittest.main()
|
[
"JeffYFHuang@github.com"
] |
JeffYFHuang@github.com
|
49ddfd050e02c9a29ad478cd2401367cf761db46
|
9adc810b07f7172a7d0341f0b38088b4f5829cf4
|
/experiments/ashvin/icml2020/murtaza/pusher/demo_state_td3.py
|
1bb88eaceb172d7677d3cb4f22eca88400bb1641
|
[
"MIT"
] |
permissive
|
Asap7772/railrl_evalsawyer
|
7ee9358b5277b9ddf2468f0c6d28beb92a5a0879
|
baba8ce634d32a48c7dfe4dc03b123e18e96e0a3
|
refs/heads/main
| 2023-05-29T10:00:50.126508
| 2021-06-18T03:08:12
| 2021-06-18T03:08:12
| 375,810,557
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,221
|
py
|
from multiworld.envs.mujoco.cameras import sawyer_init_camera_zoomed_in
from rlkit.launchers.launcher_util import run_experiment
import rlkit.misc.hyperparameter as hyp
from rlkit.launchers.experiments.murtaza.rfeatures_rl import state_td3bc_experiment
from rlkit.launchers.arglauncher import run_variants
if __name__ == "__main__":
variant = dict(
env_id='SawyerPushNIPSEasy-v0',
algo_kwargs=dict(
batch_size=1024,
num_epochs=300,
num_eval_steps_per_epoch=500,
num_expl_steps_per_train_loop=500,
num_trains_per_train_loop=1000,
min_num_steps_before_training=10000,
max_path_length=50,
),
td3_trainer_kwargs=dict(
discount=0.99,
),
td3_bc_trainer_kwargs=dict(
discount=0.99,
demo_path=["demos/icml2020/pusher/demos_action_noise_1000.npy"],
demo_off_policy_path=None,
bc_num_pretrain_steps=10000,
q_num_pretrain_steps=10000,
rl_weight=1.0,
bc_weight=0,
reward_scale=1.0,
target_update_period=2,
policy_update_period=2,
obs_key='state_observation',
env_info_key='puck_distance',
max_path_length=50,
),
replay_buffer_kwargs=dict(
max_size=int(1e6),
fraction_goals_rollout_goals=0.2,
fraction_goals_env_goals=0.5,
),
qf_kwargs=dict(
hidden_sizes=[400, 300],
),
policy_kwargs=dict(
hidden_sizes=[400, 300],
),
exploration_noise=.8,
load_demos=True,
pretrain_rl=False,
pretrain_policy=False,
es='ou',
td3_bc=True,
save_video=True,
image_env_kwargs=dict(
imsize=48,
init_camera=sawyer_init_camera_zoomed_in,
transpose=True,
normalize=True,
),
num_exps_per_instance=1,
region='us-west-2',
logger_variant=dict(
tensorboard=True,
),
)
search_space = {
'td3_bc_trainer_kwargs.use_awr': [False],
# 'td3_bc_trainer_kwargs.demo_beta':[1, 10],
'td3_bc_trainer_kwargs.bc_weight': [1, 0],
'td3_bc_trainer_kwargs.rl_weight': [1],
'algo_kwargs.num_epochs': [1000],
'algo_kwargs.num_eval_steps_per_epoch': [100],
'algo_kwargs.num_expl_steps_per_train_loop': [100],
'algo_kwargs.min_num_steps_before_training': [0],
# 'td3_bc_trainer_kwargs.add_demos_to_replay_buffer':[True, False],
# 'td3_bc_trainer_kwargs.num_trains_per_train_loop':[1000, 2000, 4000, 10000, 16000],
# 'exploration_noise':[0.1, .3, .5],
# 'pretrain_rl':[True],
# 'pretrain_policy':[False],
'pretrain_rl': [False],
'pretrain_policy': [False],
'seedid': range(5),
}
sweeper = hyp.DeterministicHyperparameterSweeper(
search_space, default_parameters=variant,
)
variants = []
for variant in sweeper.iterate_hyperparameters():
variants.append(variant)
run_variants(state_td3bc_experiment, variants, run_id=0)
|
[
"alexanderkhazatsky@gmail.com"
] |
alexanderkhazatsky@gmail.com
|
075d717759921834a2a8c9622dbb53790cf0228a
|
b198ab1d3faf79d34b1745236daa5eb02a37e18e
|
/yggdrasil/metaschema/properties/tests/test_JSONArrayMetaschemaProperties.py
|
ed812677d1d5d9df256fbc5b8f6903ae12c185fa
|
[
"BSD-3-Clause"
] |
permissive
|
leighmatth/yggdrasil
|
688f13aa0d274217daec9f412269fbbaf5f10aef
|
dcc4d75a4d2c6aaa7e50e75095a16df1df6b2b0a
|
refs/heads/master
| 2021-07-09T10:39:25.422978
| 2021-04-14T16:40:04
| 2021-04-14T16:40:04
| 245,011,886
| 0
| 0
|
NOASSERTION
| 2020-03-04T21:54:25
| 2020-03-04T21:54:24
| null |
UTF-8
|
Python
| false
| false
| 1,645
|
py
|
from yggdrasil.metaschema.properties.tests import (
test_MetaschemaProperty as parent)
class TestItemsMetaschemaProperty(parent.TestMetaschemaProperty):
r"""Test class for ItemsMetaschemaProperty class."""
_mod = 'JSONArrayMetaschemaProperties'
_cls = 'ItemsMetaschemaProperty'
def __init__(self, *args, **kwargs):
super(TestItemsMetaschemaProperty, self).__init__(*args, **kwargs)
nele = 3
valid_value = [int(i) for i in range(nele)]
valid_sing = {'type': 'int'}
valid_mult = [{'type': 'int'} for i in range(nele)]
invalid_sing = {'type': 'float'}
invalid_mult = [{'type': 'float'} for i in range(nele)]
self._valid = [(valid_value, valid_sing),
(valid_value, valid_mult),
([int(i) for i in range(nele - 1)], valid_sing)]
self._invalid = [([float(i) for i in range(nele)], valid_sing),
([float(i) for i in range(nele)], valid_mult)]
# ([int(i) for i in range(nele - 1)], valid_mult)]
self._valid_compare = [(valid_sing, valid_sing),
(valid_sing, valid_mult),
(valid_mult, valid_sing),
(valid_mult, valid_mult)]
self._invalid_compare = [(valid_sing, invalid_sing),
(valid_sing, invalid_mult),
(valid_mult, invalid_sing),
(valid_mult, invalid_mult),
(1, 1),
(valid_mult, valid_mult[:-1])]
|
[
"langmm.astro@gmail.com"
] |
langmm.astro@gmail.com
|
b08f3840e780e082aad97256d99c215839e1e058
|
1012f61f46ff7aaf37cd3ce0ead64e035ec201dc
|
/coding-challange/codewars/8kyu/~2021-07-25/capitalization-and-mutability/capitalization-and-mutability.py
|
70ab2ba79b4d13199ed131fb83a863ae49274dcb
|
[] |
no_license
|
polyglotm/coding-dojo
|
89efe22f5a34088e94c9e3a4e25cad510b04172a
|
43da9c75e3125f5cb1ac317d275475f1c0ea6727
|
refs/heads/develop
| 2023-08-17T11:59:30.945061
| 2023-08-16T14:13:45
| 2023-08-16T14:13:45
| 188,733,115
| 2
| 0
| null | 2023-03-04T05:49:21
| 2019-05-26T21:26:25
|
JavaScript
|
UTF-8
|
Python
| false
| false
| 386
|
py
|
"""
capitalization-and-mutability
codewars/8kyu/Capitalization and Mutability
Difficulty: 8kyu
URL: https://www.codewars.com/kata/595970246c9b8fa0a8000086/
"""
def capitalize_word(word):
return word.capitalize()
def test_capitalize_word():
assert capitalize_word('word') == 'Word'
assert capitalize_word('i') == 'I'
assert capitalize_word('glasswear') == 'Glasswear'
|
[
"polyglot.m@gmail.com"
] |
polyglot.m@gmail.com
|
4b10fa53b97294463e20ad06343f2dd982acc650
|
afebbb07b2b4eada17a5853c1ce63b4075d280df
|
/marketsim/gen/_intrinsic/orderbook/of_trader.py
|
804ce5709645171b35783b2eb31d41c8a145e2c1
|
[] |
no_license
|
peter1000/marketsimulator
|
8c0a55fc6408b880311d3ad49defc55e9af57824
|
1b677200a9d5323f2970c83f076c2b83d39d4fe6
|
refs/heads/master
| 2021-01-18T01:39:04.869755
| 2015-03-29T17:47:24
| 2015-03-29T17:47:24
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,224
|
py
|
from marketsim import types
from marketsim.gen._out.trader._singleproxy import SingleProxy
from marketsim import getLabel
from marketsim.gen._out._intrinsic_base.orderbook.of_trader import OfTrader_Base, Proxy_Base
class Base(object):
_properties = {}
def __getattr__(self, name):
if name[0:2] != '__' and self._impl:
return getattr(self._impl, name)
else:
raise AttributeError
def __str__(self):
return getLabel(self._impl) if self._impl else ''
def __repr__(self):
return self.__str__()
class OfTrader_Impl(Base, OfTrader_Base):
def __init__(self):
self._alias = ["$(TraderAsset)"] if type(self.Trader) == SingleProxy else ['OfTrader']
Base.__init__(self)
@property
def _impl(self):
try:
return self.Trader.orderBook
except AttributeError:
return None
class Proxy_Impl(Base, Proxy_Base):
def __init__(self):
self._impl = None
Base.__init__(self)
@property
def label(self):
return self._impl.label if self._impl else '$(OrderBook)'
def bind_impl(self, ctx):
if self._impl is None:
self._impl = ctx.orderbook
|
[
"anton.kolotaev@gmail.com"
] |
anton.kolotaev@gmail.com
|
e4f7d50b81def02a4fc5c109097676d372a8b5c3
|
fbb12b2b7dcf7f2a33235f6766b4176c083a0c8e
|
/ARsyntax/workflow/rules/pseudoReplicates.smk
|
66d10732db24581cccd7e5b362ac228197b0e3d1
|
[] |
no_license
|
birkiy/TermProjectCOMP541
|
b76c8fa3a01e48dc302dc040a2c499c2c9f1b8ba
|
400a81765889a21d0590b599c4ba0e529a56e3ca
|
refs/heads/main
| 2023-01-19T21:36:55.085293
| 2020-11-30T12:59:14
| 2020-11-30T12:59:14
| 306,048,866
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,596
|
smk
|
folder = "results/mapping/processed"
rule pseudoReplicates:
input:
"results/mapping/processed/{raw}.merged.final.bam"
output:
header=temp("results/mapping/processed/{raw}.merged.header.final.sam"),
pseudo1="results/mapping/processed/{raw}.pseudo1.final.bam",
pseudo2="results/mapping/processed/{raw}.pseudo2.final.bam"
message:
"Executing pseudoReplicates rule for {wildcards.raw}"
shell:
"""
samtools view -H {input} > {output.header}
#Split merged treatments
nlines=$(samtools view {input} | wc -l )
nlines=$(( (nlines + 1) / 2 )) # half that number
samtools view {input} | shuf - | split -d -l $nlines - "{folder}/{wildcards.raw}"
cat {output.header} {folder}/{wildcards.raw}00 | \
samtools view -bS - > {output.pseudo1}
cat {output.header} {folder}/{wildcards.raw}01 | \
samtools view -bS - > {output.pseudo2}
"""
rule pool:
input:
expand("results/mapping/processed/{{raw}}.{rep}.final.bam", rep=["rep1", "rep2"])
output:
"results/mapping/processed/{raw}.merged.final.bam"
message:
"Executing pool rule for {wildcards.raw}"
threads:
16
shell:
"""
#Merge treatment BAMS
samtools merge -@ {threads} -u {output} {input}
"""
|
[
"umutberkayaltintas@gmail.com"
] |
umutberkayaltintas@gmail.com
|
b7345219fb5ba716b3fed095337bf4ff6b1df307
|
98c6ea9c884152e8340605a706efefbea6170be5
|
/examples/data/Assignment_2/nwxtho001/question2.py
|
e5b2fd67ffe16d5f456ab603de434f28d2291d9f
|
[] |
no_license
|
MrHamdulay/csc3-capstone
|
479d659e1dcd28040e83ebd9e3374d0ccc0c6817
|
6f0fa0fa1555ceb1b0fb33f25e9694e68b6a53d2
|
refs/heads/master
| 2021-03-12T21:55:57.781339
| 2014-09-22T02:22:22
| 2014-09-22T02:22:22
| 22,372,174
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,398
|
py
|
print ("Welcome to the 30 Second Rule Expert\n------------------------------------\nAnswer the following questions by selecting from among the options.")
seen = input ('Did anyone see you? (yes/no)\n')
if seen == 'yes' :
seen_type = input ('Was it a boss/lover/parent? (yes/no)\n')
if seen_type == 'no' :
print ('Decision: Eat it.')
else :
exp = input ('Was it expensive? (yes/no)\n')
if exp == 'yes' :
cut = input ('Can you cut off the part that touched the floor? (yes/no)\n')
if cut == 'yes' :
print ('Decision: Eat it.')
else :
print ('Decision: Your call.')
else :
choc = input ('Is it chocolate? (yes/no)\n')
if choc == 'yes' :
print ('Decision: Eat it.')
else :
print ('Decision: Don\'t eat it.')
else :
sticky = input ('Was it sticky? (yes/no)\n')
if sticky == 'yes' :
steak = input ('Is it a raw steak? (yes/no)\n')
if steak == 'yes' :
puma = input ('Are you a puma? (yes/no)\n')
if puma == 'yes' :
print ('Decision: Eat it.')
else :
print ('Decision: Don\'t eat it.')
else :
cat = input ('Did the cat lick it? (yes/no)\n')
if cat == 'yes' :
health = input ('Is your cat healthy? (yes/no)\n')
if health == 'yes' :
print ('Decision: Eat it.')
else :
print ('Decision: Your call.')
else :
print ('Decision: Eat it.')
else :
emau = input ('Is it an Emausaurus? (yes/no)\n')
if emau == 'yes':
mega = input ('Are you a Megalosaurus? (yes/no)\n')
if mega == 'yes' :
print ('Decision: Eat it.')
else :
print ('Decision: Don\'t eat it.')
else :
cat = input ('Did the cat lick it? (yes/no)\n')
if cat == 'yes' :
health = input ('Is your cat healthy? (yes/no)\n')
if health == 'yes' :
print ('Decision: Eat it.')
else :
print ('Decision: Your call.')
else :
print ('Decision: Eat it.')
|
[
"jarr2000@gmail.com"
] |
jarr2000@gmail.com
|
f486e9a0a0c4bfa8648db2f3ab716096708a8df8
|
4b7e282fe480415f5d52c0fc0429f144156190fe
|
/google/ads/googleads/v8/common/types/feed_common.py
|
12888a33eb9f184c2402a3337e503e869b2be75f
|
[
"Apache-2.0"
] |
permissive
|
Z2Xsoft/google-ads-python
|
c4750357bb19da91bb3b6bf2fa84bef9d2df36d3
|
1779d52a0446c8afb2437b0a9e103dcb849f5590
|
refs/heads/main
| 2023-08-18T15:22:17.840364
| 2021-09-26T04:08:53
| 2021-09-26T04:08:53
| 410,444,398
| 0
| 0
|
Apache-2.0
| 2021-09-26T04:08:53
| 2021-09-26T03:55:38
| null |
UTF-8
|
Python
| false
| false
| 1,263
|
py
|
# -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import proto # type: ignore
__protobuf__ = proto.module(
package="google.ads.googleads.v8.common",
marshal="google.ads.googleads.v8",
manifest={"Money",},
)
class Money(proto.Message):
r"""Represents a price in a particular currency.
Attributes:
currency_code (str):
Three-character ISO 4217 currency code.
amount_micros (int):
Amount in micros. One million is equivalent
to one unit.
"""
currency_code = proto.Field(proto.STRING, number=3, optional=True,)
amount_micros = proto.Field(proto.INT64, number=4, optional=True,)
__all__ = tuple(sorted(__protobuf__.manifest))
|
[
"noreply@github.com"
] |
Z2Xsoft.noreply@github.com
|
d5655f14e27d61edfb7d6882009fe9f0ad295296
|
163bbb4e0920dedd5941e3edfb2d8706ba75627d
|
/Code/CodeRecords/2755/60793/267817.py
|
8e0ef8cec59b834a9a8d68728452208db38b0567
|
[] |
no_license
|
AdamZhouSE/pythonHomework
|
a25c120b03a158d60aaa9fdc5fb203b1bb377a19
|
ffc5606817a666aa6241cfab27364326f5c066ff
|
refs/heads/master
| 2022-11-24T08:05:22.122011
| 2020-07-28T16:21:24
| 2020-07-28T16:21:24
| 259,576,640
| 2
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 371
|
py
|
for test in range(0, int(input())):
input()
ls1 = list(map(int, input().split()))
ls2 = list(map(int, input().split()))
l1, l2 = len(ls1), len(ls2)
ls3 = [0 for x in range(0, l1 + l2 - 1)]
for i in range(0, l1):
for j in range(0, l2):
ls3[i + j] += ls1[i] * ls2[j]
for i in ls3:
print(i, end=" ")
print(ls3[-1])
|
[
"1069583789@qq.com"
] |
1069583789@qq.com
|
086a9a37c222334524b2121455b685678a95f665
|
63c7060562ec5d1a9153f0454ea6886b0a62a28e
|
/tb/axi_cdma/test_axi_cdma.py
|
6b7ce9326dc3e25a24752ed080d6e17b2cf42064
|
[
"MIT"
] |
permissive
|
alexforencich/verilog-axi
|
666e6dfbd14fd124bdcbc2798b4f557347fb8261
|
38915fb5330cb8270b454afc0140a94489dc56db
|
refs/heads/master
| 2023-03-30T07:34:17.721579
| 2023-03-30T07:12:13
| 2023-03-30T07:12:13
| 142,810,315
| 1,042
| 342
|
MIT
| 2023-03-05T19:52:57
| 2018-07-30T01:36:26
|
Verilog
|
UTF-8
|
Python
| false
| false
| 6,800
|
py
|
"""
Copyright (c) 2020 Alex Forencich
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
"""
import itertools
import logging
import os
import cocotb_test.simulator
import pytest
import cocotb
from cocotb.clock import Clock
from cocotb.triggers import RisingEdge
from cocotb.regression import TestFactory
from cocotbext.axi import AxiBus, AxiRam
from cocotbext.axi.stream import define_stream
DescBus, DescTransaction, DescSource, DescSink, DescMonitor = define_stream("Desc",
signals=["read_addr", "write_addr", "len", "tag", "valid", "ready"]
)
DescStatusBus, DescStatusTransaction, DescStatusSource, DescStatusSink, DescStatusMonitor = define_stream("DescStatus",
signals=["tag", "error", "valid"]
)
class TB(object):
def __init__(self, dut):
self.dut = dut
self.log = logging.getLogger("cocotb.tb")
self.log.setLevel(logging.DEBUG)
cocotb.start_soon(Clock(dut.clk, 10, units="ns").start())
# control interface
self.desc_source = DescSource(DescBus.from_prefix(dut, "s_axis_desc"), dut.clk, dut.rst)
self.desc_status_sink = DescStatusSink(DescStatusBus.from_prefix(dut, "m_axis_desc_status"), dut.clk, dut.rst)
# AXI interface
self.axi_ram = AxiRam(AxiBus.from_prefix(dut, "m_axi"), dut.clk, dut.rst, size=2**16)
dut.enable.setimmediatevalue(0)
def set_idle_generator(self, generator=None):
if generator:
self.desc_source.set_pause_generator(generator())
self.axi_ram.write_if.b_channel.set_pause_generator(generator())
self.axi_ram.read_if.r_channel.set_pause_generator(generator())
def set_backpressure_generator(self, generator=None):
if generator:
self.axi_ram.write_if.aw_channel.set_pause_generator(generator())
self.axi_ram.write_if.w_channel.set_pause_generator(generator())
self.axi_ram.read_if.ar_channel.set_pause_generator(generator())
async def cycle_reset(self):
self.dut.rst.setimmediatevalue(0)
await RisingEdge(self.dut.clk)
await RisingEdge(self.dut.clk)
self.dut.rst.value = 1
await RisingEdge(self.dut.clk)
await RisingEdge(self.dut.clk)
self.dut.rst.value = 0
await RisingEdge(self.dut.clk)
await RisingEdge(self.dut.clk)
async def run_test(dut, data_in=None, idle_inserter=None, backpressure_inserter=None):
tb = TB(dut)
byte_lanes = tb.axi_ram.write_if.byte_lanes
step_size = 1 if int(os.getenv("PARAM_ENABLE_UNALIGNED")) else byte_lanes
tag_count = 2**len(tb.desc_source.bus.tag)
cur_tag = 1
await tb.cycle_reset()
tb.set_idle_generator(idle_inserter)
tb.set_backpressure_generator(backpressure_inserter)
dut.enable.value = 1
for length in list(range(1, byte_lanes*4+1))+[128]:
for read_offset in list(range(8, 8+byte_lanes*2, step_size))+list(range(4096-byte_lanes*2, 4096, step_size)):
for write_offset in list(range(8, 8+byte_lanes*2, step_size))+list(range(4096-byte_lanes*2, 4096, step_size)):
tb.log.info("length %d, read_offset %d, write_offset %d", length, read_offset, write_offset)
read_addr = read_offset+0x1000
write_addr = 0x00008000+write_offset+0x1000
test_data = bytearray([x % 256 for x in range(length)])
tb.axi_ram.write(read_addr, test_data)
tb.axi_ram.write(write_addr & 0xffff80, b'\xaa'*(len(test_data)+256))
desc = DescTransaction(read_addr=read_addr, write_addr=write_addr, len=len(test_data), tag=cur_tag)
await tb.desc_source.send(desc)
status = await tb.desc_status_sink.recv()
tb.log.info("status: %s", status)
assert int(status.tag) == cur_tag
assert int(status.error) == 0
tb.log.debug("%s", tb.axi_ram.hexdump_str((write_addr & ~0xf)-16, (((write_addr & 0xf)+length-1) & ~0xf)+48))
assert tb.axi_ram.read(write_addr-8, len(test_data)+16) == b'\xaa'*8+test_data+b'\xaa'*8
cur_tag = (cur_tag + 1) % tag_count
await RisingEdge(dut.clk)
await RisingEdge(dut.clk)
def cycle_pause():
return itertools.cycle([1, 1, 1, 0])
if cocotb.SIM_NAME:
for test in [run_test]:
factory = TestFactory(test)
factory.add_option("idle_inserter", [None, cycle_pause])
factory.add_option("backpressure_inserter", [None, cycle_pause])
factory.generate_tests()
# cocotb-test
tests_dir = os.path.abspath(os.path.dirname(__file__))
rtl_dir = os.path.abspath(os.path.join(tests_dir, '..', '..', 'rtl'))
@pytest.mark.parametrize("unaligned", [0, 1])
@pytest.mark.parametrize("axi_data_width", [8, 16, 32])
def test_axi_cdma(request, axi_data_width, unaligned):
dut = "axi_cdma"
module = os.path.splitext(os.path.basename(__file__))[0]
toplevel = dut
verilog_sources = [
os.path.join(rtl_dir, f"{dut}.v"),
]
parameters = {}
parameters['AXI_DATA_WIDTH'] = axi_data_width
parameters['AXI_ADDR_WIDTH'] = 16
parameters['AXI_STRB_WIDTH'] = parameters['AXI_DATA_WIDTH'] // 8
parameters['AXI_ID_WIDTH'] = 8
parameters['AXI_MAX_BURST_LEN'] = 16
parameters['LEN_WIDTH'] = 20
parameters['TAG_WIDTH'] = 8
parameters['ENABLE_UNALIGNED'] = unaligned
extra_env = {f'PARAM_{k}': str(v) for k, v in parameters.items()}
sim_build = os.path.join(tests_dir, "sim_build",
request.node.name.replace('[', '-').replace(']', ''))
cocotb_test.simulator.run(
python_search=[tests_dir],
verilog_sources=verilog_sources,
toplevel=toplevel,
module=module,
parameters=parameters,
sim_build=sim_build,
extra_env=extra_env,
)
|
[
"alex@alexforencich.com"
] |
alex@alexforencich.com
|
3611831f18561cfa5af0f745acdf03a946f45c97
|
d3762b1b4d908b2b43f6e0ae362daa7136c6c7a4
|
/elections/management/commands/migrate_data.py
|
d427572608b5a937a16039325feb542271465cab
|
[] |
no_license
|
pbahle/elections-api
|
c58cdf2b05f1560c8d6a69f8bc07e878458585c1
|
60cc06610ab7a279102018078f29f38d31e8bd26
|
refs/heads/master
| 2020-09-02T10:44:03.663386
| 2019-11-02T19:26:30
| 2019-11-02T19:26:30
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,838
|
py
|
# pylint: disable=no-self-use
import sys
from datetime import timedelta
from pathlib import Path
from django.core.management.base import BaseCommand
from django.utils import timezone
import log
from elections import defaults
from elections.helpers import normalize_jurisdiction
from elections.models import District, DistrictCategory, Election, Party, Position
class Command(BaseCommand):
help = "Initialize contants and migrate data between existing models"
def handle(self, verbosity: int, **_kwargs):
log.init(verbosity=verbosity if '-v' in sys.argv else 2)
defaults.initialize_parties()
defaults.initialize_districts()
self.update_elections()
self.update_jurisdictions()
self.import_descriptions()
self.export_descriptions()
def update_elections(self):
for election in Election.objects.filter(active=True):
age = timezone.now() - timedelta(weeks=3)
if election.date < age.date():
log.info(f'Deactivating election: {election}')
election.active = False
election.save()
def update_jurisdictions(self):
jurisdiction = DistrictCategory.objects.get(name="Jurisdiction")
for district in District.objects.filter(category=jurisdiction):
old = district.name
new = normalize_jurisdiction(district.name)
if new != old:
if District.objects.filter(category=jurisdiction, name=new):
log.warning(f'Deleting district {old!r} in favor of {new!r}')
district.delete()
else:
log.info(f'Renaming district {old!r} to {new!r}')
district.name = new
district.save()
def import_descriptions(self):
pass
def export_descriptions(self):
elections = {}
for election in Election.objects.all():
elections[election.name] = election.description
self._write('elections', elections)
districts = {}
for category in DistrictCategory.objects.all():
districts[category.name] = category.description
self._write('districts', districts)
parties = {}
for party in Party.objects.all():
parties[party.name] = party.description
self._write('parties', parties)
positions = {}
for position in Position.objects.all():
positions[position.name] = position.description
self._write('positions', positions)
def _write(self, name, data):
with Path(f'content/{name}.txt').open('w') as f:
for key, value in sorted(data.items()):
f.write(f'name: {key}\n')
f.write(f'description: {value}\n')
f.write('\n')
|
[
"jacebrowning@gmail.com"
] |
jacebrowning@gmail.com
|
c1fb632462fb073565ae995962ae392db45905b3
|
a411a55762de11dc2c9d913ff33d2f1477ac02cf
|
/lte/gateway/python/magma/mobilityd/subscriberdb_client.py
|
1fec443db1956ef872a11cfbc3a1d98d7a4c2e0f
|
[
"BSD-3-Clause"
] |
permissive
|
magma/magma
|
0dc48c1513d9968bd05fb7589f302c192b7c0f94
|
0e1d895dfe625681229e181fbc2dbad83e13c5cb
|
refs/heads/master
| 2023-09-04T09:31:56.140395
| 2023-08-29T13:54:49
| 2023-08-29T13:54:49
| 170,803,235
| 1,219
| 525
|
NOASSERTION
| 2023-09-07T17:45:42
| 2019-02-15T04:46:24
|
C++
|
UTF-8
|
Python
| false
| false
| 6,221
|
py
|
"""
Copyright 2020 The Magma Authors.
This source code is licensed under the BSD-style license found in the
LICENSE file in the root directory of this source tree.
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import ipaddress
import logging
from typing import Optional
import grpc
from lte.protos.apn_pb2 import APNConfiguration
from magma.mobilityd.utils import log_error_and_raise
from magma.subscriberdb.sid import SIDUtils
class NetworkInfo:
def __init__(
self, gw_ip: Optional[str] = None, gw_mac: Optional[str] = None,
vlan: int = 0,
):
gw_ip_parsed = None
try:
gw_ip_parsed = ipaddress.ip_address(gw_ip) # type: ignore
except ValueError:
logging.debug("invalid internet gw ip: %s", gw_ip)
self.gw_ip = gw_ip_parsed
self.gw_mac = gw_mac
self.vlan = vlan
def __str__(self):
return f"GW-IP: {self.gw_ip} GW-MAC: {self.gw_mac} VLAN: {self.vlan}"
class StaticIPInfo:
"""
Operator can configure Static GW IP and MAC.
This would be used by AGW services to generate networking
configuration.
"""
def __init__(
self, ip: Optional[str],
gw_ip: Optional[str],
gw_mac: Optional[str],
vlan: int,
):
self.ip = None
if ip:
self.ip = ipaddress.ip_address(ip)
self.net_info = NetworkInfo(gw_ip, gw_mac, vlan)
def __str__(self):
return f"IP: {self.ip} NETWORK: {self.net_info}"
class SubscriberDbClient:
def __init__(self, subscriberdb_rpc_stub):
self.subscriber_client = subscriberdb_rpc_stub
def get_subscriber_ip(self, sid: str) -> Optional[StaticIPInfo]:
"""
Make RPC call to 'GetSubscriberData' method of local SubscriberDB
service to get assigned IP address if any.
"""
if self.subscriber_client is None:
return None
try:
apn_config = self._find_ip_and_apn_config(sid)
logging.debug("ip: Got APN: %s", apn_config)
if apn_config and apn_config.assigned_static_ip:
return StaticIPInfo(
ip=apn_config.assigned_static_ip,
gw_ip=apn_config.resource.gateway_ip,
gw_mac=apn_config.resource.gateway_mac,
vlan=apn_config.resource.vlan_id,
)
except ValueError as ex:
logging.warning(
"static Ip: Invalid or missing data for sid %s: ", sid,
)
logging.debug(ex)
raise SubscriberDBStaticIPValueError(sid)
except grpc.RpcError as err:
log_error_and_raise(
SubscriberDBConnectionError,
"GetSubscriberData: while reading vlan-id error[%s] %s",
err.code(),
err.details(),
)
return None
def get_subscriber_apn_network_info(self, sid: str) -> NetworkInfo:
"""
Make RPC call to 'GetSubscriberData' method of local SubscriberDB
service to get assigned IP address if any.
TODO: Move this API to separate APN configuration service.
"""
if self.subscriber_client:
try:
apn_config = self._find_ip_and_apn_config(sid)
logging.debug("vlan: Got APN: %s", apn_config)
if apn_config and apn_config.resource.vlan_id:
return NetworkInfo(
gw_ip=apn_config.resource.gateway_ip,
gw_mac=apn_config.resource.gateway_mac,
vlan=apn_config.resource.vlan_id,
)
except ValueError as ex:
logging.warning(
"vlan: Invalid or missing data for sid %s", sid,
)
logging.debug(ex)
raise SubscriberDBMultiAPNValueError(sid)
except grpc.RpcError as err:
log_error_and_raise(
SubscriberDBConnectionError,
"GetSubscriberData: while reading vlan-id error[%s] %s",
err.code(),
err.details(),
)
return NetworkInfo()
# use same API to retrieve IP address and related config.
def _find_ip_and_apn_config(
self, sid: str,
) -> Optional[APNConfiguration]:
if '.' in sid:
imsi, apn_name_part = sid.split('.', maxsplit=1)
apn_name, _ = apn_name_part.split(',', maxsplit=1)
else:
imsi, _ = sid.split(',', maxsplit=1)
apn_name = ''
logging.debug("Find APN config for: %s", sid)
data = self.subscriber_client.GetSubscriberData(SIDUtils.to_pb(imsi))
if data and data.non_3gpp and data.non_3gpp.apn_config:
selected_apn_conf = None
for apn_config in data.non_3gpp.apn_config:
logging.debug("APN config: %s", apn_config)
try:
if apn_config.assigned_static_ip:
ipaddress.ip_address(apn_config.assigned_static_ip)
except ValueError:
continue
if apn_config.service_selection == '*':
selected_apn_conf = apn_config
elif apn_config.service_selection == apn_name:
selected_apn_conf = apn_config
break
return selected_apn_conf
return None
class SubscriberDBConnectionError(Exception):
""" Exception thrown subscriber DB is not available
"""
pass
class SubscriberDBStaticIPValueError(Exception):
""" Exception thrown when subscriber DB has invalid IP value for the subscriber.
"""
pass
class SubscriberDBMultiAPNValueError(Exception):
""" Exception thrown when subscriber DB has invalid MultiAPN vlan value
for the subscriber.
"""
pass
|
[
"noreply@github.com"
] |
magma.noreply@github.com
|
536e8eda7de1c4a381f2c709fa56729cfbf19ee7
|
04b1803adb6653ecb7cb827c4f4aa616afacf629
|
/native_client_sdk/src/build_tools/tests/verify_filelist_test.py
|
2e01da1c93e9b3e5b6743a0e4d6f71f712de429d
|
[
"BSD-3-Clause"
] |
permissive
|
Samsung/Castanets
|
240d9338e097b75b3f669604315b06f7cf129d64
|
4896f732fc747dfdcfcbac3d442f2d2d42df264a
|
refs/heads/castanets_76_dev
| 2023-08-31T09:01:04.744346
| 2021-07-30T04:56:25
| 2021-08-11T05:45:21
| 125,484,161
| 58
| 49
|
BSD-3-Clause
| 2022-10-16T19:31:26
| 2018-03-16T08:07:37
| null |
UTF-8
|
Python
| false
| false
| 3,854
|
py
|
#!/usr/bin/env python
# Copyright (c) 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import os
import sys
import unittest
SCRIPT_DIR = os.path.dirname(os.path.abspath(__file__))
BUILD_TOOLS_DIR = os.path.dirname(SCRIPT_DIR)
sys.path.append(BUILD_TOOLS_DIR)
import verify_filelist
def Verify(platform, rules_contents, directory_list):
rules = verify_filelist.Rules('test', platform, rules_contents)
rules.VerifyDirectoryList(directory_list)
class VerifyFilelistTestCase(unittest.TestCase):
def testBasic(self):
rules = """\
foo/file1
foo/file2
foo/file3
bar/baz/other
"""
dirlist = ['foo/file1', 'foo/file2', 'foo/file3', 'bar/baz/other']
Verify('linux', rules, dirlist)
def testGlob(self):
rules = 'foo/*'
dirlist = ['foo/file1', 'foo/file2', 'foo/file3/and/subdir']
Verify('linux', rules, dirlist)
def testPlatformVar(self):
rules = 'dir/${PLATFORM}/blah'
dirlist = ['dir/linux/blah']
Verify('linux', rules, dirlist)
def testPlatformVarGlob(self):
rules = 'dir/${PLATFORM}/*'
dirlist = ['dir/linux/file1', 'dir/linux/file2']
Verify('linux', rules, dirlist)
def testPlatformRule(self):
rules = """\
[linux]dir/linux/only
all/platforms
"""
linux_dirlist = ['dir/linux/only', 'all/platforms']
other_dirlist = ['all/platforms']
Verify('linux', rules, linux_dirlist)
Verify('mac', rules, other_dirlist)
def testMultiPlatformRule(self):
rules = """\
[linux,win]dir/no/macs
all/platforms
"""
nonmac_dirlist = ['dir/no/macs', 'all/platforms']
mac_dirlist = ['all/platforms']
Verify('linux', rules, nonmac_dirlist)
Verify('win', rules, nonmac_dirlist)
Verify('mac', rules, mac_dirlist)
def testPlatformRuleBadPlatform(self):
rules = '[frob]bad/platform'
self.assertRaises(verify_filelist.ParseException, Verify,
'linux', rules, [])
def testMissingFile(self):
rules = """\
foo/file1
foo/missing
"""
dirlist = ['foo/file1']
self.assertRaises(verify_filelist.VerifyException, Verify,
'linux', rules, dirlist)
def testExtraFile(self):
rules = 'foo/file1'
dirlist = ['foo/file1', 'foo/extra_file']
self.assertRaises(verify_filelist.VerifyException, Verify,
'linux', rules, dirlist)
def testEmptyGlob(self):
rules = 'foo/*'
dirlist = ['foo'] # Directory existing is not enough!
self.assertRaises(verify_filelist.VerifyException, Verify,
'linux', rules, dirlist)
def testBadGlob(self):
rules = '*/foo/bar'
dirlist = []
self.assertRaises(verify_filelist.ParseException, Verify,
'linux', rules, dirlist)
def testUnknownPlatform(self):
rules = 'foo'
dirlist = ['foo']
for platform in ('linux', 'mac', 'win'):
Verify(platform, rules, dirlist)
self.assertRaises(verify_filelist.ParseException, Verify,
'foobar', rules, dirlist)
def testUnexpectedPlatformFile(self):
rules = '[mac,win]foo/file1'
dirlist = ['foo/file1']
self.assertRaises(verify_filelist.VerifyException, Verify,
'linux', rules, dirlist)
def testWindowsPaths(self):
if os.path.sep != '/':
rules = 'foo/bar/baz'
dirlist = ['foo\\bar\\baz']
Verify('win', rules, dirlist)
else:
rules = 'foo/bar/baz\\foo'
dirlist = ['foo/bar/baz\\foo']
Verify('linux', rules, dirlist)
def testNestedGlobs(self):
rules = """\
foo/*
foo/bar/*"""
dirlist = ['foo/file', 'foo/bar/file']
Verify('linux', rules, dirlist)
rules = """\
foo/bar/*
foo/*"""
dirlist = ['foo/file', 'foo/bar/file']
Verify('linux', rules, dirlist)
if __name__ == '__main__':
unittest.main()
|
[
"sunny.nam@samsung.com"
] |
sunny.nam@samsung.com
|
9a6669dbb8aa1d8739a39c14d383548d2e889676
|
557d75e6dfb42c881d4df73950c41935635f2162
|
/preprocessing/recon_all.py
|
d9bf4632fb59ca40f6606a9db0ddc41864471963
|
[] |
no_license
|
sssilvar/multiple-sclerosis
|
e6139558249f00a882ffeb9d4b82ac323a50ec96
|
a2e1e97e1297d45c2b84c5c57b372eee26047941
|
refs/heads/master
| 2020-06-05T00:09:04.781033
| 2019-07-13T23:15:00
| 2019-07-13T23:15:00
| 192,245,761
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,045
|
py
|
#!/bin/env python3
import os
import glob
from multiprocessing.pool import Pool
from os.path import join, isdir, basename
def recon_all(vol_file):
sid = basename(vol_file).split('_')[0]
t2_file = vol_file.replace('T1Wreg.nii.gz', 'T2Wreg.nii.gz')
cmd = f'recon-all -i {vol_file} -T2 {t2_file} -s {sid} -sd {out_folder} -all'
print(cmd)
os.system(cmd)
if __name__ == "__main__":
# Set dataset folder
dataset_folder = '/home/jullygh/Downloads/MS/extracted/*'
pattern = join(dataset_folder, 'patient*_study1_T1Wreg.nii.gz')
print(f'Finging pattern: {pattern}')
# Output Folder
out_folder = '/home/jullygh/Downloads/MS/processed_fs/'
# Find files in folder
files = glob.glob(pattern, recursive=True)
print(f'Total files found: {len(files)}')
confirm = input('Start [y/n]:')
if confirm == 'y':
# Process subjects in parallel
pool = Pool(20)
pool.map(recon_all, files)
pool.close()
else:
print('No process started')
print('Done')
|
[
"sssilvar@unal.edu.co"
] |
sssilvar@unal.edu.co
|
d04ae994a53ff06417f846f19c0403d3bc065f10
|
e5d83ede8521027b05d9b91c43be8cab168610e6
|
/0x0B-python-input_output/1-number_of_lines.py
|
1dfc5fcc64012fcf583f7f599a0cd5e13d80cbb1
|
[] |
no_license
|
Danielo814/holbertonschool-higher_level_programming
|
8918c3a6a9c136137761d47c5162b650708dd5cd
|
832b692529198bbee44d2733464aedfe650bff7e
|
refs/heads/master
| 2020-03-28T11:09:00.343055
| 2019-02-22T03:33:54
| 2019-02-22T03:33:54
| 148,181,433
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 290
|
py
|
#!/usr/bin/python3
"""
1-number_of_lines module
"""
def number_of_lines(filename=""):
"""
returns the number of lines of a text file
"""
numlines = 0
with open(filename, 'r', encoding='utf-8') as f:
for line in f:
numlines += 1
return numlines
|
[
"211@holbertonschool.com"
] |
211@holbertonschool.com
|
0d6361a1c0ab589a30c8857539292b0ea2ba6f17
|
43dabf77afd5c44d55b465c1b88bf9a5e7c4c9be
|
/drawing_random_circles.py
|
be298cbf90b23e67ea008144b485fca1b94b056c
|
[] |
no_license
|
geegatomar/OpenCV-Computer-Vision-Adrian-Rosebrock
|
cc81a990a481b5e4347dd97369b38479b46e55bc
|
daa579309010e6e7fefb004b878ffb26374401d0
|
refs/heads/master
| 2022-11-18T13:07:08.040483
| 2020-07-20T01:55:39
| 2020-07-20T01:55:39
| 280,987,262
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 510
|
py
|
import cv2
import numpy as np
# drawing 25 random circles
canvas = np.zeros((400, 400, 3), dtype="uint8")
for i in range(25):
radius = np.random.randint(180) # will generate random radius value between 0 and 100
centre = np.random.randint(0, 400, size=(2, ))
color = np.random.randint(0, 255, size=(3, ))
color = (int(color[0]), int(color[1]), int(color[2]))
cv2.circle(canvas, tuple(centre), radius, tuple(color), 2)
cv2.imshow("MyCanvas", canvas)
cv2.waitKey(0)
|
[
"geegatomar@gmail.com"
] |
geegatomar@gmail.com
|
c3e597348ecd704038d52109bd25c04c2baf9da0
|
54f352a242a8ad6ff5516703e91da61e08d9a9e6
|
/Source Codes/AtCoder/abc003/D/4547211.py
|
6a0acb9b36e11c97948531a48a505d78d41e9f86
|
[] |
no_license
|
Kawser-nerd/CLCDSA
|
5cbd8a4c3f65173e4e8e0d7ed845574c4770c3eb
|
aee32551795763b54acb26856ab239370cac4e75
|
refs/heads/master
| 2022-02-09T11:08:56.588303
| 2022-01-26T18:53:40
| 2022-01-26T18:53:40
| 211,783,197
| 23
| 9
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 998
|
py
|
mod = 10**9 + 7
def powmod(x, n):
ret = 1
while n > 0:
if n & 1:
ret *= x; ret %= mod; n -= 1
else:
x *= x; x %= mod; n >>= 1
return ret
fact = [1 for _ in range(1000)]
revfact = [1 for _ in range(1000)]
def setfact(n):
for i in range(n):
fact[i+1] = fact[i] * (i+1); fact[i+1] %= mod
revfact[n] = powmod(fact[n], mod-2)
for i in range(n):
revfact[n-i-1] = revfact[n-i] * (n-i); revfact[i] %= mod
return
def getC(n, r):
if n < r: return 0
return fact[n] * revfact[r] % mod * revfact[n-r] % mod
r, c = map(int, input().split())
x, y = map(int, input().split())
d, l = map(int, input().split())
setfact(x*y)
num = 0
for i in range(1, 2**4):
txy = [x, y]
cnt = 0
for j in range(4):
if (i>>j)&1:
txy[j%2] -= 1
cnt += 1
if txy[0] > 0 and txy[1] > 0:
num += (cnt%2*2-1) * getC(txy[0]*txy[1], d+l) % mod
print((r-x+1) * (c-y+1) % mod * (getC(x*y, d+l) - num) % mod * getC(d+l, d) % mod)
|
[
"kwnafi@yahoo.com"
] |
kwnafi@yahoo.com
|
873fd33b792017d4797bb0d1acbb046e82beacde
|
26f8a8782a03693905a2d1eef69a5b9f37a07cce
|
/test/test_destiny_historical_stats_destiny_historical_stats_period_group.py
|
54f3aa5d3731b9a1cb0a50764667212af0aef180
|
[] |
no_license
|
roscroft/openapi3-swagger
|
60975db806095fe9eba6d9d800b96f2feee99a5b
|
d1c659c7f301dcfee97ab30ba9db0f2506f4e95d
|
refs/heads/master
| 2021-06-27T13:20:53.767130
| 2017-08-31T17:09:40
| 2017-08-31T17:09:40
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,391
|
py
|
# coding: utf-8
"""
Bungie.Net API
These endpoints constitute the functionality exposed by Bungie.net, both for more traditional website functionality and for connectivity to Bungie video games and their related functionality.
OpenAPI spec version: 2.0.0
Contact: support@bungie.com
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import os
import sys
import unittest
import swagger_client
from swagger_client.rest import ApiException
from swagger_client.models.destiny_historical_stats_destiny_historical_stats_period_group import DestinyHistoricalStatsDestinyHistoricalStatsPeriodGroup
class TestDestinyHistoricalStatsDestinyHistoricalStatsPeriodGroup(unittest.TestCase):
""" DestinyHistoricalStatsDestinyHistoricalStatsPeriodGroup unit test stubs """
def setUp(self):
pass
def tearDown(self):
pass
def testDestinyHistoricalStatsDestinyHistoricalStatsPeriodGroup(self):
"""
Test DestinyHistoricalStatsDestinyHistoricalStatsPeriodGroup
"""
# FIXME: construct object with mandatory attributes with example values
#model = swagger_client.models.destiny_historical_stats_destiny_historical_stats_period_group.DestinyHistoricalStatsDestinyHistoricalStatsPeriodGroup()
pass
if __name__ == '__main__':
unittest.main()
|
[
"adherrling@gmail.com"
] |
adherrling@gmail.com
|
c29ff1701a3bfbca5682d464670a0183a3517f7b
|
8882bfe78b3a6e5d022f81c86512b22f851d9dc8
|
/tgflow/TgFlow.py
|
ffb115f18b4e9744371863b4ed3007956ddc5bbd
|
[
"MIT"
] |
permissive
|
inexcode/tgflow
|
5600fa4040d30157daf6d2ad5fe8d625ac64789d
|
e7bbd7df87e7a711c1b2924f3f2ae909fb2086c5
|
refs/heads/master
| 2020-03-29T10:19:39.405683
| 2018-09-21T18:13:30
| 2018-09-21T18:13:30
| 149,799,442
| 0
| 0
| null | 2018-09-21T17:59:23
| 2018-09-21T17:59:23
| null |
UTF-8
|
Python
| false
| false
| 6,673
|
py
|
#import telebot
import hashlib
from enum import Enum
from . import handles
from . import render
import pickle,time
from .api.tg import telegramAPI
import pprint
pp = pprint.PrettyPrinter(indent=4)
action = handles.action
api,key = None,None
def_state = None
def_data= None
States = {}
UI = {}
Data = {}
Actions = {}
Keyboards = {}
Reaction_triggers = {}
def read_sd(sf,df):
with open(sf,'rb') as f:
try:
s= pickle.load(f)
except:
s={}
with open(df,'rb') as f:
try:
d = pickle.load(f)
except:
d={}
return s,d
def save_sd(states,data):
try:
with open('states.p','wb+') as f:
pickle.dump(states,f)
with open('data.p','wb+') as f:
pickle.dump(data,f)
except Exception as e:
print('Non-picklable',str(e))
try:
States,Data = read_sd('states.p','data.p')
except FileNotFoundError:
print("tgflow: creating data.p and states.p files")
def configure(token=None, state=None,
apiModel=telegramAPI, data={},
group_id=None
):
global def_state,def_data
global api,key
if not token:
raise Exception("tgflow needs your bot token")
if not state:
raise Exception("tgflow needs a default state for new users")
key =token
def_state=state
def_data =data
# create bot and assign handlers
# Group Id is not used in telegram
api = apiModel(key,group_id=group_id)
api.set_message_handler(message_handler)
api.set_callback_handler(callback_handler)
def start(ui):
global api,UI
UI = ui
print("tgflow: listening")
try:
api.start(none_stop=True)
except Exception as e:
print("tgflow:polling error",e)
def get_file_link(file_id):
# TODO: implement this in api
finfo = bot.get_file(file_id)
l='https://api.telegram.org/file/bot%s/%s'%(
key,finfo.file_path)
return l
def message_handler(messages):
global States,UI
for msg in messages:
s = States.get(msg.chat.id,def_state)
print('tgflow: got message. State:'+str(s))
# for security reasons need to hash. user can call every action in this state
# key format: kb_+ButtonName
a = Actions.get('kb_'+str(msg.text))
if not a:
if Reaction_triggers.get(msg.chat.id):
for r,a_ in Reaction_triggers[msg.chat.id]:
if msg.__dict__.get(r):
a = a_
if r=='all':
a = a_
d = Data.get(msg.chat.id,def_data)
# following restriction is dictaded by telegram api
messages = flow(a,s,d,msg,msg.chat.id)
send(messages,msg.chat.id)
def callback_handler(call):
s = States.get(call.message.chat.id,def_state)
a = Actions.get(call.data)
d = Data.get(call.message.chat.id,def_data)
print("tgflow: got callback. State:",s)
messages = flow(a,s,d,call,call.message.chat.id)
if a:
if not a.update:
send(messages,call.message.chat.id)
else:
update(messages, call.message)
else:
print("tgflow: Warning: no action found but should")
send(messages,call.message.chat.id)
def gen_state_msg(i,ns,nd,_id,state_upd=True):
pre_a = UI.get(ns).get('prepare')
if pre_a:
# call user-defined data perparations.
print("tgflow: found a prep function, calling...")
nd = pre_a(i,ns,**nd)
args = {'s':ns,'d':nd}
ui = render.prep(UI.get(ns),args)
# saving data and state
Data[_id] = nd
if state_upd: States[_id] = ns
save_sd(States,Data)
# registering callback triggers on buttons
save_iactions(ui.get('b'))
save_kactions(ns,ui.get('kb'),ns,_id)
print("tgflow: actions registered:\n",Actions)
# registering reaction triggers
rc = ui.get('react') or ui.get('react_to')
if rc:
trigs = Reaction_triggers.get(_id)
if trigs:
Reaction_triggers[_id].append((rc.react_to,rc))
else:
Reaction_triggers.update({_id:[(rc.react_to,rc)]})
print("tgflow: reaction tgigger for %s registrated %s"%(str(_id),str(rc)))
# clearing reaction triggers if needed
rc = ui.get('clear_trig')
if rc:
print("tgflow: reaction trigger clear",rc)
if Reaction_triggers.get(_id):
for r,a_ in Reaction_triggers[_id]:
#TODO: handle arrays of triggers
if rc == r:
Reaction_triggers[_id].remove((r,a_))
else:
print("tgflow:WARN removing unset trigger",rc)
# rendering message and buttons
messages = render.render(ui)
return messages
def send_state(ns,tg_id):
d = Data.get(tg_id,def_data)
msg = gen_state_msg(None,ns,d,tg_id)
send(msg,tg_id)
def flow(a,s,d,i,_id):
if a:
ns,nd = a.call(i,s,**d)
print('tgflow: called action:'+str(a))
if isinstance(s,Enum) and isinstance(ns,Enum):
print ('tgflow: states change %s --> %s'%(s.name,ns.name))
else:
print ('tgflow: states change %s --> %s'%(s,ns))
else:
print('tgflow: no action found for message. %s unchanged'%s)
ns,nd = s,d
return gen_state_msg(i,ns,nd,_id)
def get_state(id,s):
pass
def save_iactions(ui):
if isinstance(ui,action):
#TODO: assign actions to every user distinctly, as with butons
key = ui.get_register_key()
Actions[key]=ui
if isinstance(ui,dict):
for k,v in ui.items():
save_iactions(v)
elif isinstance(ui,list):
d = [save_iactions(x) for x in ui ]
# TODO: remove s argument
def save_kactions(k,ui,s,_id):
if isinstance(ui,action):
# key format: State+ButtonName
if ui.react_to:
trigs = Reaction_triggers.get(_id)
if trigs:
Reaction_triggers[_id].append((ui.react_to,ui))
else:
Reaction_triggers.update({_id:[(ui.react_to,ui)]})
print("tgflow: reaction tgigger for %s registrated %s"%(str(_id),str(ui)))
else:
Actions['kb_'+str(k)]=ui
if isinstance(ui,dict):
for k,v in ui.items():
save_kactions(k,v,s,_id)
elif isinstance(ui,list):
ui = [save_kactions(k,x,s,_id) for x in ui ]
def send(message,id):
print("tgflow: sending message")
for text,markup in message:
api.send(id,text=text,markup=markup)
def update(messages,msg):
for text,markup in messages:
print("tgflow: updating message")
api.update(msg,text=text,markup=markup)
|
[
"lkv97dn@gmail.com"
] |
lkv97dn@gmail.com
|
28eb9cf5f13dc05100ba9264f00df18331a9e5ba
|
ca7aa979e7059467e158830b76673f5b77a0f5a3
|
/Python_codes/p04030/s999899727.py
|
d1c7afb12be8ed4228e960b1a6e2e0f7fc222ea5
|
[] |
no_license
|
Aasthaengg/IBMdataset
|
7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901
|
f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8
|
refs/heads/main
| 2023-04-22T10:22:44.763102
| 2021-05-13T17:27:22
| 2021-05-13T17:27:22
| 367,112,348
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 164
|
py
|
s=input()
fin=""
for c in s:
if c=='1':
fin+="1"
elif c=='0':
fin+="0"
else:
if len(fin)>0:
fin=fin[:-1]
print(fin)
|
[
"66529651+Aastha2104@users.noreply.github.com"
] |
66529651+Aastha2104@users.noreply.github.com
|
5fe4c7ed46fc6342f89f21baa980a8b8f0c9a22a
|
a814debee728e59a7a10d8c12b92c1f3ee97e19d
|
/Cadeias/Questao01.py
|
5a06773ddc2a07e94da38507662ab3bf4ae50ea1
|
[] |
no_license
|
PedroVitor1995/Algoritmo-ADS-2016.1
|
0ee034d2f03b29d3c8177fb3402f7aeae08d07cf
|
8e3b6dfb0db188b9f5d68dcb8619f6636883ab89
|
refs/heads/master
| 2021-01-01T15:51:56.636502
| 2017-07-19T13:47:36
| 2017-07-19T13:47:36
| 81,328,868
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 462
|
py
|
#__*__ encoding:utf8 __*__
"""1. Faça a criptografia de uma frase digitada pelo usuário. Na criptografia, a frase deverá ser invertida e as
consoantes deverão ser substituídas pelo caractere #."""
def main():
frase = raw_input('Digite uma frase: ')
consoantes = 'BCDFGHJKLMNPQRSTVXYWZbcdfghjklmnpqrstvxywz'
for letra in consoantes:
if letra in frase:
frase = frase[::-1].replace(letra,'#')
print frase
if __name__ == '__main__':
main()
|
[
"noreply@github.com"
] |
PedroVitor1995.noreply@github.com
|
f78ec480786556e08f9a2cddea0271a0013e24e1
|
9ff1d0d5049dfe1c14528e098bdd8c934fb2274a
|
/tests/test3/test_port7_unittest.py
|
6465c5d08297ccf489943439d23ab7e7aca49cfa
|
[] |
no_license
|
486dx/utility_Python
|
43e06b3f74dac140396643d0e5c132fb874d2467
|
598117f7e9fd416f4bc7f1ccea931048a977a0bc
|
refs/heads/master
| 2022-04-23T06:36:36.220406
| 2020-04-06T08:59:35
| 2020-04-06T08:59:35
| 264,519,552
| 1
| 0
| null | 2020-05-16T20:17:30
| 2020-05-16T20:17:30
| null |
UTF-8
|
Python
| false
| false
| 1,690
|
py
|
# test_port7_unittest.py
import unittest
from portfolio3 import Portfolio
class PortfolioTest(unittest.TestCase):
def test_empty(self):
p = Portfolio()
self.assertEqual(p.cost(), 0.0)
def test_buy_one_stock(self):
p = Portfolio()
p.buy("IBM", 100, 176.48)
self.assertEqual(p.cost(), 17648.0)
def test_buy_two_stocks(self):
p = Portfolio()
p.buy("IBM", 100, 176.48)
p.buy("HPQ", 100, 36.15)
self.assertEqual(p.cost(), 21263.0)
def test_bad_input(self):
p = Portfolio()
with self.assertRaises(TypeError):
p.buy("IBM")
class PortfolioSellTest(unittest.TestCase):
def setUp(self):
self.p = Portfolio()
self.p.buy("MSFT", 100, 27.0)
self.p.buy("DELL", 100, 17.0)
self.p.buy("ORCL", 100, 34.0)
def test_sell(self):
self.p.sell("MSFT", 50)
self.assertEqual(self.p.cost(), 6450)
def test_not_enough(self):
with self.assertRaises(ValueError):
self.p.sell("MSFT", 200)
def test_dont_own_it(self):
with self.assertRaises(ValueError):
self.p.sell("IBM", 1)
# Replace Portfolio.current_prices with a stub implementation.
# This avoids the web, but also skips all our current_prices
# code.
class PortfolioValueTest(unittest.TestCase):
def fake_current_prices(self):
return {'IBM': 140.0, 'HPQ': 32.0}
def setUp(self):
self.p = Portfolio()
self.p.buy("IBM", 100, 120.0)
self.p.buy("HPQ", 100, 30.0)
self.p.current_prices = self.fake_current_prices
def test_value(self):
self.assertEqual(self.p.value(), 17200)
|
[
"f339339@gmail.com"
] |
f339339@gmail.com
|
374eb12b1ec6126e692a94315444e4a7bcf0621b
|
4eaab9327d25f851f9e9b2cf4e9687d5e16833f7
|
/problems/search_suggestions_system/solution.py
|
47a2ff3a14f8b27c1b8af6d2a0b73ebff62b06d6
|
[] |
no_license
|
kadhirash/leetcode
|
42e372d5e77d7b3281e287189dcc1cd7ba820bc0
|
72aea7d43471e529ee757ff912b0267ca0ce015d
|
refs/heads/master
| 2023-01-21T19:05:15.123012
| 2020-11-28T13:53:11
| 2020-11-28T13:53:11
| 250,115,603
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,011
|
py
|
class Solution:
def suggestedProducts(self, products: List[str], searchWord: str) -> List[List[str]]:
products.sort() # time O(nlogn)
array_len = len(products)
ans = []
input_char = ""
for chr in searchWord:
tmp = []
input_char += chr
insertion_index = self.binary_search(products, input_char) # find where input_char can be inserted in-order in the products array
for word_ind in range(insertion_index, min(array_len, insertion_index+3)): # check the following 3 words, if valid
if products[word_ind].startswith(input_char):
tmp.append(products[word_ind])
ans.append(tmp)
return ans
def binary_search(self, array, target): # bisect.bisect_left implementation
lo = 0
hi = len(array)
while lo < hi:
mid = (lo + hi) //2
if array[mid] < target: lo = mid + 1
else: hi = mid
return lo
|
[
"kadhirash@gmail.com"
] |
kadhirash@gmail.com
|
8cdd5f52e919892a5acf7fabc7f846d69d487956
|
5491f4b600f7ecd1d0848d60d7b017e5e407d4c7
|
/inventario/migrations/0005_ventamodel.py
|
79ad0c9268a28f2a5951adb94199d7fd065bfa48
|
[] |
no_license
|
GustavoPMex/web-inventario
|
409456dd356bbfcadd735cc9b8e2aae7605a0e37
|
d0ac36ee791ff0262f9390497da1dd990581a4fd
|
refs/heads/master
| 2023-06-10T10:08:39.029666
| 2021-06-30T23:40:19
| 2021-06-30T23:40:19
| 296,677,352
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 909
|
py
|
# Generated by Django 3.0.8 on 2020-09-29 03:40
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('inventario', '0004_historicalarticulomodel'),
]
operations = [
migrations.CreateModel(
name='VentaModel',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('nombre', models.CharField(max_length=100)),
('proveedor', models.CharField(max_length=100)),
('vendidos', models.IntegerField()),
('precio', models.CharField(max_length=100)),
('fecha_venta', models.DateTimeField(auto_now_add=True)),
],
options={
'verbose_name': 'Venta',
'verbose_name_plural': 'Ventas',
},
),
]
|
[
"gustavoppymex@gmail.com"
] |
gustavoppymex@gmail.com
|
0ae646e5fd55b65b3f924b29c97b5843b2eca062
|
bd1362c60313784c90013dfc9f0169e64389bf27
|
/scripts/ingestors/soilm_ingest.py
|
a3a1ef7cc473f3149593d222b9f47ed4891c86b8
|
[] |
no_license
|
ForceCry/iem
|
391aa9daf796591909cb9d4e60e27375adfb0eab
|
4b0390d89e6570b99ca83a5fa9b042226e17c1ad
|
refs/heads/master
| 2020-12-24T19:04:55.517409
| 2013-04-09T14:25:36
| 2013-04-09T14:25:36
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,725
|
py
|
"""
Ingest ISU SOILM data!
DailySI
"TIMESTAMP",
"RECORD",
"TAir_C_Avg",
"TAir_C_Max",
"TAir_C_TMx",
"TAir_C_Min",
"TAir_C_TMn",
"SlrMJ_Tot",
"Rain_mm_Tot",
"WS_mps_S_WVT",
"WindDir_D1_WVT",
"WindDir_SD1_WVT",
"WS_mps_Max",
"WS_mps_TMx",
"DailyET",
"TSoil_C_Avg",
"VWC_12_Avg",
"VWC_24_Avg",
"VWC_50_Avg",
"EC12",
"EC24",
"EC50",
"T12_C_Avg",
"T24_C_Avg",
"T50_C_Avg",
"PA",
"PA_2",
"PA_3"
HrlySI
"TIMESTAMP",
"RECORD",
"TAir_C_Avg",
"RH",
"SlrkW_Avg",
"SlrMJ_Tot",
"Rain_mm_Tot",
"WS_mps_S_WVT",
"WindDir_D1_WVT",
"WindDir_SD1_WVT",
"ETAlfalfa",
"SolarRadCalc",
"TSoil_C_Avg",
"VWC_12_Avg",
"VWC_24_Avg",
"VWC_50_Avg",
"EC12",
"EC24",
"EC50",
"T12_C_Avg",
"T24_C_Avg",
"T50_C_Avg",
"PA",
"PA_2",
"PA_3"
"""
import os
import iemdb
import iemtz
import datetime
ISUAG = iemdb.connect('isuag')
icursor = ISUAG.cursor()
STATIONS = {'CAMI4': dict(daily='/mnt/home/mesonet/sm/Calumet/Calumet_DailySI.dat',
hourly='/mnt/home/mesonet/sm/Calumet/Calumet_HrlySI.dat'),
}
def hourly_process(nwsli, maxts):
""" Process the hourly file """
""" Process the daily file """
fn = STATIONS[nwsli]['hourly']
if not os.path.isfile(fn):
return
lines = open(fn).readlines()
if len(lines) < 6:
return
# Read header....
headers = []
for col in lines[1].strip().replace('"', '').split(","):
headers.append(col)
# Read data
for i in range(len(lines)-1,3,-1):
tokens = lines[i].strip().replace('"','').split(",")
if len(tokens) != len(headers):
continue
valid = datetime.datetime.strptime(tokens[ headers.index('TIMESTAMP')],
'%Y-%m-%d %H:%M:%S')
valid = valid.replace(tzinfo=iemtz.CentralStandard)
if valid <= maxts:
break
# We are ready for dbinserting!
dbcols = "station,valid," + ",".join(headers[2:])
dbvals = "'%s','%s-06'," % (nwsli, valid.strftime("%Y-%m-%d %H:%M:%S"))
for v in tokens[2:]:
dbvals += "%s," % (formatter(v),)
sql = "INSERT into sm_hourly (%s) values (%s)" % (dbcols, dbvals[:-1])
icursor.execute(sql)
def formatter(v):
""" Something to format things nicely for SQL"""
if v.find("NAN") > -1:
return 'Null'
if v.find(" ") > -1: #Timestamp
return "'%s-06'" % (v,)
return v
def daily_process(nwsli, maxts):
""" Process the daily file """
fn = STATIONS[nwsli]['daily']
if not os.path.isfile(fn):
return
lines = open(fn).readlines()
if len(lines) < 6:
return
# Read header....
headers = []
for col in lines[1].strip().replace('"', '').split(","):
headers.append(col)
# Read data
for i in range(len(lines)-1,3,-1):
tokens = lines[i].strip().replace('"','').split(",")
if len(tokens) != len(headers):
continue
valid = datetime.datetime.strptime(tokens[ headers.index('TIMESTAMP')][:10],
'%Y-%m-%d')
valid = valid.date() - datetime.timedelta(days=1)
if valid < maxts:
break
if valid == maxts: # Reprocess
icursor.execute("""DELETE from sm_daily WHERE valid = '%s' and
station = '%s' """ % (valid.strftime("%Y-%m-%d") ,nwsli))
# We are ready for dbinserting!
dbcols = "station,valid," + ",".join(headers[2:])
dbvals = "'%s','%s'," % (nwsli, valid.strftime("%Y-%m-%d"))
for v in tokens[2:]:
dbvals += "%s," % (formatter(v),)
sql = "INSERT into sm_daily (%s) values (%s)" % (dbcols, dbvals[:-1])
icursor.execute(sql)
def get_max_timestamps(nwsli):
""" Fetch out our max values """
data = {'hourly': datetime.datetime(2012,1,1, tzinfo=iemtz.CentralStandard),
'daily': datetime.date(2012,1,1)}
icursor.execute("""SELECT max(valid) from sm_daily WHERE station = '%s'""" % (
nwsli,))
row = icursor.fetchone()
if row[0] is not None:
data['daily'] = row[0]
icursor.execute("""SELECT max(valid) from sm_hourly WHERE station = '%s'""" % (
nwsli,))
row = icursor.fetchone()
if row[0] is not None:
data['hourly'] = row[0]
return data
def main():
for nwsli in STATIONS.keys():
maxobs = get_max_timestamps(nwsli)
hourly_process(nwsli, maxobs['hourly'])
daily_process(nwsli, maxobs['daily'])
icursor.close()
ISUAG.commit()
ISUAG.close()
if __name__ == '__main__':
main()
|
[
"akrherz@iastate.edu"
] |
akrherz@iastate.edu
|
de8f1e1f2f085838464375d1849719293a936020
|
0af30c2e3ddcc80a19ea9cfaad9d7e1fedf8b876
|
/210311-210314/백)2579 계단 오르기/이동재.py
|
b27bbc2081db13195ca37f930e92c97bac44a0d8
|
[] |
no_license
|
winterash2/algorithm_study_2021_1
|
d1cd6077f71f68e7fc3eb6dfae7b2cc220885e4c
|
c1fee62c7e5e560c3bf7ae5e6166866d0147f23f
|
refs/heads/master
| 2023-04-02T20:11:04.169856
| 2021-04-05T11:18:22
| 2021-04-05T11:18:22
| 327,563,535
| 1
| 2
| null | 2021-01-24T14:17:40
| 2021-01-07T09:28:08
|
Python
|
UTF-8
|
Python
| false
| false
| 512
|
py
|
import sys
input = sys.stdin.readline
N = int(input())
scores = []
for _ in range(N):
scores.append(int(input()))
dp1 = [0 for _ in range(N)]
dp2 = [0 for _ in range(N)]
# 0번 칸 초기화
dp1[0] = scores[0]
if N == 1:
print(scores[0])
else: # N이 2보다 클 때
# 2번 칸 초기화
dp1[1] = scores[1]
dp2[1] = scores[1] + dp1[0]
for i in range(2, N):
dp1[i] = scores[i] + max(dp1[i-2], dp2[i-2])
dp2[i] = scores[i] + dp1[i-1]
print(max(dp1[N-1], dp2[N-1]))
|
[
"winterash2@naver.com"
] |
winterash2@naver.com
|
64b1ff60158655b97b826b8467eb04fc9536b67f
|
c264153f9188d3af187905d846fa20296a0af85d
|
/Python/Python3网络爬虫开发实战/《Python3网络爬虫开发实战》随书源代码/urllib/error/demo3.py
|
6928b02a18d8a9762b9a281c84c97d5aa162f9c4
|
[] |
no_license
|
IS-OSCAR-YU/ebooks
|
5cd3c1089a221759793524df647e231a582b19ba
|
b125204c4fe69b9ca9ff774c7bc166d3cb2a875b
|
refs/heads/master
| 2023-05-23T02:46:58.718636
| 2021-06-16T12:15:13
| 2021-06-16T12:15:13
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 280
|
py
|
from urllib import request, error
try:
response = request.urlopen('http://cuiqingcai.com/index.htm')
except error.HTTPError as e:
print(e.reason, e.code, e.headers, sep='\n')
except error.URLError as e:
print(e.reason)
else:
print('Request Successfully')
|
[
"jiangzhangha@163.com"
] |
jiangzhangha@163.com
|
c5c570c5d072a814ff270e276deaef84ad277e35
|
56255c15702f4f4a01b7f785f956cee7290d0097
|
/segmentation_pytorch/utils/train.py
|
ef5089b869ed248028f04a015305e45cdec34d74
|
[] |
no_license
|
devhliu/PyTorch_UNOdeMSegNet
|
d2561606aac34ace4664c48bc000d4c4a915699a
|
3a446ca71ddd74e612bf2c2acc43e7b210366e5b
|
refs/heads/master
| 2020-12-24T04:41:23.674029
| 2019-11-13T08:07:40
| 2019-11-13T08:07:40
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,142
|
py
|
import sys
import torch
import pdb
from tqdm import tqdm as tqdm
from torchnet.meter import AverageValueMeter
# from ..models.CRF import dense_crf
class Epoch:
def __init__(self, model, loss, metrics, stage_name, device='cpu', verbose=True):
self.model = model
self.loss = loss
self.metrics = metrics
self.stage_name = stage_name
self.verbose = verbose
self.device = device
self._to_device()
def _to_device(self):
self.model.to(self.device)
self.loss.to(self.device)
for metric in self.metrics:
metric.to(self.device)
def _format_logs(self, logs):
str_logs = ['{} - {:.4}'.format(k, v) for k, v in logs.items()]
s = ', '.join(str_logs)
return s
def batch_update(self, x, y):
raise NotImplementedError
def on_epoch_start(self):
pass
def run(self, dataloader):
self.on_epoch_start()
logs = {}
loss_meter = AverageValueMeter()
metrics_meters = {metric.__name__: AverageValueMeter() for metric in self.metrics}
with tqdm(dataloader, desc=self.stage_name, file=sys.stdout, disable=not (self.verbose)) as iterator:
for x, y in iterator:
# x, y = x.to(self.device), y.to(self.device)
x = x.to(self.device)
if isinstance(y, list):
y = [i.to(self.device) for i in y]
else:
y = y.to(self.device)
loss, y_pred = self.batch_update(x, y)
# update loss logs
loss_value = loss.cpu().detach().numpy()
loss_meter.add(loss_value)
loss_logs = {self.loss.__name__: loss_meter.mean}
logs.update(loss_logs)
# update metrics logs
y = y[-1] if isinstance(y, list) else y
for metric_fn in self.metrics:
metric_value = metric_fn(y_pred, y).cpu().detach().numpy()
metrics_meters[metric_fn.__name__].add(metric_value)
metrics_logs = {k: v.mean for k, v in metrics_meters.items()}
logs.update(metrics_logs)
if self.verbose:
s = self._format_logs(logs)
iterator.set_postfix_str(s)
return logs
class TrainEpoch(Epoch):
def __init__(self, model, loss, metrics, optimizer, device='cpu', verbose=True, crf=False):
super().__init__(
model=model,
loss=loss,
metrics=metrics,
stage_name='train',
device=device,
verbose=verbose,
)
self.crf = crf
self.optimizer = optimizer
def on_epoch_start(self):
self.model.train()
def batch_update(self, x, y):
self.optimizer.zero_grad()
prediction = self.model.forward(x)
if self.crf:
prediction = dense_crf(img=prediction, output_probs=y)
loss = self.loss(prediction, y)
loss.backward()
self.optimizer.step()
if isinstance(prediction, list):
return loss, prediction[-1]
return loss, prediction
class ValidEpoch(Epoch):
def __init__(self, model, loss, metrics, device='cpu', verbose=True):
super().__init__(
model=model,
loss=loss,
metrics=metrics,
stage_name='valid',
device=device,
verbose=verbose,
)
def on_epoch_start(self):
self.model.eval()
def batch_update(self, x, y):
with torch.no_grad():
prediction = self.model.forward(x)
if isinstance(prediction, list):
prediction = prediction[-1]
loss = self.loss(prediction, y, self.model.training)
return loss, prediction
|
[
"maverickers@outlook.com"
] |
maverickers@outlook.com
|
af3c13b0b6d71fc197d85e36c8e32fa818a832f2
|
b72c37e3ccda507b231649cddd5c7845c6c34ba1
|
/PythonBasic/Day15/exec5_enumate.py
|
bcde4f16b170aa836494556ff4f435dfe5176b43
|
[] |
no_license
|
ljrdemail/AID1810
|
51c61c255b5c5efc1dc642b46691a614daedd85e
|
b417bd831bc1550ab953ce7ca23f54e34b8b2692
|
refs/heads/master
| 2020-04-24T09:45:14.781612
| 2019-02-21T11:26:49
| 2019-02-21T11:26:49
| 171,866,267
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 225
|
py
|
# -*- coding:utf-8 -*-
def myenumerate(iterable, start=0):
i = start # 开始索引
for x in iterable:
yield (i, x) # 生成一个元组
i += 1
d = myenumerate("ABCDE", 1)
for i in d:
print(i)
|
[
"root"
] |
root
|
fed79b9a386ddab376d7acd6d52191fc5ec5f846
|
23fb5b1fb275892b0a27657685c062360630889e
|
/Week 7/django/src/bookstore/settings.py
|
ad6bf63e9bc7c5c3b7fdb61d360525456c224875
|
[
"MIT"
] |
permissive
|
carlosal1015/python2017
|
2b596fa1e4cad4de06537ffc99fb0af0dfa4563d
|
c1eed0201039c6b4daf857dd1f08c47a7b1e3f45
|
refs/heads/master
| 2020-09-13T17:15:50.419142
| 2018-05-24T12:44:40
| 2018-05-24T12:44:40
| 222,850,901
| 1
| 2
|
MIT
| 2019-11-20T04:32:23
| 2019-11-20T04:30:54
| null |
UTF-8
|
Python
| false
| false
| 3,161
|
py
|
"""
Django settings for bookstore project.
Generated by 'django-admin startproject' using Django 1.11.7.
For more information on this file, see
https://docs.djangoproject.com/en/1.11/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.11/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.11/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '=4*-6vzd*%j--m+ki)mhd+rpdw2v#t@_&r8z8k8typl8292#te'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'main',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'bookstore.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [os.path.join(BASE_DIR, "templates")],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'bookstore.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.11/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/1.11/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/1.11/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'Asia/Kolkata'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.11/howto/static-files/
STATIC_URL = '/static/'
|
[
"sebastinssanty@gmail.com"
] |
sebastinssanty@gmail.com
|
855033498433fc4b023163b8a1e030790481cc8e
|
102d09ef1d6effe166ad703ba4472c45dfb03263
|
/py/Unique Binary Search Trees.py
|
ff810735f7dccf5e13975b50685aee50ae48a74b
|
[] |
no_license
|
bitcsdby/Codes-for-leetcode
|
5693100d4b66de65d7f135bbdd81b32650aed7d0
|
9e24e621cfb9e7fd46f9f02dfc40a18a702d4990
|
refs/heads/master
| 2016-09-05T08:43:31.656437
| 2014-08-02T15:14:53
| 2014-08-02T15:14:53
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 486
|
py
|
class Solution:
# @return an integer
def numTrees(self, n):
if n == 0 or n == 1:
return 1;
l = [];
l.append(1);
l.append(1);
count = 2;
while count <= n:
c = 0;
tmp = 0;
while c < count:
tmp += l[count-c-1] * l[c];
c += 1;
l.append(tmp);
count += 1;
return l.pop();
|
[
"bitcsdby@gmail.com"
] |
bitcsdby@gmail.com
|
163d7c44a7e018cae6d6ff4a03b364723f15cc08
|
487c45df5fcbe7fdf6df5a348f6fe163bbb22033
|
/leetcode/875_koko_eating_bananas.py
|
20b4f0c350be2d1c309eb1f272a208f5b384aa40
|
[
"Unlicense"
] |
permissive
|
leetcode-notes/daily-algorithms-practice
|
dba03ac1c55262f6bae7d5aa4dac590c3c067e75
|
2a03499ed0b403d79f6c8451c9a839991b23e188
|
refs/heads/master
| 2023-06-18T14:14:58.770797
| 2021-07-12T05:27:32
| 2021-07-12T05:27:32
| 264,057,786
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 775
|
py
|
class Solution:
def minEatingSpeed(self, piles, H: int) -> int:
low, high = 1, max(piles)
def cannot_finish(k, piles):
total = 0
for p in piles:
total += p//k
if p % k:
total += 1
return total > H
while low < high:
mid = low + (high-low)//2
if cannot_finish(mid, piles):
low = mid + 1
else:
high = mid
return low
"""
Success
Details
Runtime: 500 ms, faster than 53.72% of Python3 online
submissions for Koko Eating Bananas.
Memory Usage: 15.4 MB, less than 76.05% of Python3 online
submissions for Koko Eating Bananas.
Next challenges:
Minimize Max Distance to Gas Station
"""
|
[
"leetcode.notes@gmail.com"
] |
leetcode.notes@gmail.com
|
eb8bd2bd90dfe1850bd04800fbf208772c98a519
|
8e07b5b7a8dd38e0ef2c7ffc97d0392d886f32e6
|
/venv/Lib/site-packages/mypy/typeshed/third_party/2and3/paramiko/server.pyi
|
f43bc83b05520072133af6f1a6c7ad7944981cc9
|
[] |
no_license
|
RodrigoNeto/cursopythonyt
|
fc064a2e6106324e22a23c54bdb9c31040ac9eb6
|
279dad531e21a9c7121b73d84fcbdd714f435e7e
|
refs/heads/master
| 2023-07-03T00:54:09.795054
| 2021-08-13T12:42:24
| 2021-08-13T12:42:24
| 395,646,798
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,113
|
pyi
|
import threading
from typing import Any, List, Optional, Tuple, Union
from paramiko.channel import Channel
from paramiko.message import Message
from paramiko.pkey import PKey
from paramiko.transport import Transport
class ServerInterface:
def check_channel_request(self, kind: str, chanid: int) -> int: ...
def get_allowed_auths(self, username: str) -> str: ...
def check_auth_none(self, username: str) -> int: ...
def check_auth_password(self, username: str, password: str) -> int: ...
def check_auth_publickey(self, username: str, key: PKey) -> int: ...
def check_auth_interactive(self, username: str, submethods: str) -> Union[int, InteractiveQuery]: ...
def check_auth_interactive_response(self, responses: List[str]) -> Union[int, InteractiveQuery]: ...
def check_auth_gssapi_with_mic(self, username: str, gss_authenticated: int = ..., cc_file: Optional[str] = ...) -> int: ...
def check_auth_gssapi_keyex(self, username: str, gss_authenticated: int = ..., cc_file: Optional[str] = ...) -> int: ...
def enable_auth_gssapi(self) -> bool: ...
def check_port_forward_request(self, address: str, port: int) -> int: ...
def cancel_port_forward_request(self, address: str, port: int) -> None: ...
def check_global_request(self, kind: str, msg: Message) -> Union[bool, Tuple[Any, ...]]: ...
def check_channel_pty_request(
self, channel: Channel, term: str, width: int, height: int, pixelwidth: int, pixelheight: int, modes: str
) -> bool: ...
def check_channel_shell_request(self, channel: Channel) -> bool: ...
def check_channel_exec_request(self, channel: Channel, command: bytes) -> bool: ...
def check_channel_subsystem_request(self, channel: Channel, name: str) -> bool: ...
def check_channel_window_change_request(
self, channel: Channel, width: int, height: int, pixelwidth: int, pixelheight: int
) -> bool: ...
def check_channel_x11_request(
self, channel: Channel, single_connection: bool, auth_protocol: str, auth_cookie: bytes, screen_number: int
) -> bool: ...
def check_channel_forward_agent_request(self, channel: Channel) -> bool: ...
def check_channel_direct_tcpip_request(self, chanid: int, origin: Tuple[str, int], destination: Tuple[str, int]) -> int: ...
def check_channel_env_request(self, channel: Channel, name: str, value: str) -> bool: ...
def get_banner(self) -> Tuple[Optional[str], Optional[str]]: ...
class InteractiveQuery:
name: str
instructions: str
prompts: List[Tuple[str, bool]]
def __init__(self, name: str = ..., instructions: str = ..., *prompts: Union[str, Tuple[str, bool]]) -> None: ...
def add_prompt(self, prompt: str, echo: bool = ...) -> None: ...
class SubsystemHandler(threading.Thread):
def __init__(self, channel: Channel, name: str, server: ServerInterface) -> None: ...
def get_server(self) -> ServerInterface: ...
def start_subsystem(self, name: str, transport: Transport, channel: Channel) -> None: ...
def finish_subsystem(self) -> None: ...
|
[
"rodrigoneto.forseti@gmail.com"
] |
rodrigoneto.forseti@gmail.com
|
87285319b453d6b779837ac5d96b87d989629dbd
|
1277c0d30434133a7ce6f4d1db6c04d65b0a49c9
|
/backend/findme_20524/wsgi.py
|
8403d67f78f9ff859caa0a7e2ffa509f5e7f5195
|
[] |
no_license
|
crowdbotics-apps/findme-20524
|
aef86f49038e1e06967c3d22fee0968ec769c3b4
|
da959e3a82c81a93ce2e6d3388ad610ebc7be7f5
|
refs/heads/master
| 2022-12-23T10:47:01.480756
| 2020-09-21T19:34:35
| 2020-09-21T19:34:35
| 297,441,771
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 401
|
py
|
"""
WSGI config for findme_20524 project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/2.2/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'findme_20524.settings')
application = get_wsgi_application()
|
[
"team@crowdbotics.com"
] |
team@crowdbotics.com
|
8faa8b56120958de0b6f1135e29aabb4e6389a29
|
ea4e3ac0966fe7b69f42eaa5a32980caa2248957
|
/download/unzip/pyobjc/pyobjc-14/pyobjc/stable/PyOpenGL-2.0.2.01/src/shadow/GL.KTX.buffer_region.0100.py
|
ac8f5465112ac5e17f3261bbe25ef82d3803a274
|
[] |
no_license
|
hyl946/opensource_apple
|
36b49deda8b2f241437ed45113d624ad45aa6d5f
|
e0f41fa0d9d535d57bfe56a264b4b27b8f93d86a
|
refs/heads/master
| 2023-02-26T16:27:25.343636
| 2020-03-29T08:50:45
| 2020-03-29T08:50:45
| 249,169,732
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,082
|
py
|
# This file was created automatically by SWIG.
# Don't modify this file, modify the SWIG interface instead.
# This file is compatible with both classic and new-style classes.
import _buffer_region
def _swig_setattr_nondynamic(self,class_type,name,value,static=1):
if (name == "this"):
if isinstance(value, class_type):
self.__dict__[name] = value.this
if hasattr(value,"thisown"): self.__dict__["thisown"] = value.thisown
del value.thisown
return
method = class_type.__swig_setmethods__.get(name,None)
if method: return method(self,value)
if (not static) or hasattr(self,name) or (name == "thisown"):
self.__dict__[name] = value
else:
raise AttributeError("You cannot add attributes to %s" % self)
def _swig_setattr(self,class_type,name,value):
return _swig_setattr_nondynamic(self,class_type,name,value,0)
def _swig_getattr(self,class_type,name):
method = class_type.__swig_getmethods__.get(name,None)
if method: return method(self)
raise AttributeError,name
import types
try:
_object = types.ObjectType
_newclass = 1
except AttributeError:
class _object : pass
_newclass = 0
del types
__version__ = _buffer_region.__version__
__date__ = _buffer_region.__date__
__api_version__ = _buffer_region.__api_version__
__author__ = _buffer_region.__author__
__doc__ = _buffer_region.__doc__
glBufferRegionEnabled = _buffer_region.glBufferRegionEnabled
glNewBufferRegion = _buffer_region.glNewBufferRegion
glDeleteBufferRegion = _buffer_region.glDeleteBufferRegion
glReadBufferRegion = _buffer_region.glReadBufferRegion
glDrawBufferRegion = _buffer_region.glDrawBufferRegion
glInitBufferRegionKTX = _buffer_region.glInitBufferRegionKTX
__info = _buffer_region.__info
GL_KTX_FRONT_REGION = _buffer_region.GL_KTX_FRONT_REGION
GL_KTX_BACK_REGION = _buffer_region.GL_KTX_BACK_REGION
GL_KTX_Z_REGION = _buffer_region.GL_KTX_Z_REGION
GL_KTX_STENCIL_REGION = _buffer_region.GL_KTX_STENCIL_REGION
|
[
"hyl946@163.com"
] |
hyl946@163.com
|
fbab5560e9894901c5617e613add83c277d25710
|
8e8acc57b63a66cb1450fa4d015d4ddcd74cce85
|
/liaoxuefengLessons/ObjectOrientedProgramming/ENUM.py
|
5e50eaa8c9becb7d3b84f7e8a321feb1a34f2cb0
|
[] |
no_license
|
indeyo/PythonStudy
|
fc2241db7cec8075a59a307ff47c9de83494844b
|
099feb4e4c8dec9e68887cedd95705d831e51b0f
|
refs/heads/master
| 2021-03-29T19:04:24.553848
| 2020-06-05T15:07:33
| 2020-06-05T15:07:33
| 247,978,205
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,062
|
py
|
#!/usr/bin/env python3
#-*- coding: utf-8 -*-
"""
@Project : StudyPython0-100
@File : ENUM.py
@Time : 2019-08-05 22:57:52
@Author : indeyo_lin
@Version :
@Remark :
"""
"""
练习:
把Student的gender属性改造为枚举类型,可以避免使用字符串:
"""
# from enum import Enum, unique
#
# class Gender(Enum):
# Male = 0
# Female = 1
#
# class Student():
#
# def __init__(self, name, gender):
# self.name = name
# self.gender = gender
#
# # 测试:
# # 这道题完全不需要改嘛!!!直接通过
# bart = Student('Bart', Gender.Male)
# if bart.gender == Gender.Male:
# print('测试通过!')
# else:
# print('测试失败!')
from enum import Enum
Month = Enum('Month', ('Jan', 'Feb', 'Mar', 'Apr', 'May', 'Jun', 'Jul', 'Aug', 'Sep', 'Oct', 'Nov', 'Dec'))
for name, member in Month.__members__.items():
print(name, '=>', member, ',', member.value)
@unique
class Weekday(Enum):
Sun = 0 # Sun的value被设定为0
Mon = 1
Tue = 2
Wed = 3
Thu = 4
Fri = 5
Sat = 6
|
[
"indeyo@git.com"
] |
indeyo@git.com
|
3b6e664d5482c257c0400170a43bf6b823377024
|
5cf7f81791a9d66ba495512f0b1d2c8b6cccbd3d
|
/python/test/test_rhomb_H_and_R.py
|
ac268c603a510df1fc1881d48b3b0bc262075ef6
|
[
"BSD-3-Clause"
] |
permissive
|
odidev/spglib
|
9e0eecbb77b20e09f1affec42af48dc6a1c60e82
|
e807f1193ad57af8b916245fc397e4667baaaf92
|
refs/heads/develop
| 2023-07-03T03:03:37.592891
| 2021-05-17T08:45:07
| 2021-05-17T08:45:07
| 390,248,634
| 0
| 0
|
BSD-3-Clause
| 2021-07-28T11:44:17
| 2021-07-28T07:01:36
| null |
UTF-8
|
Python
| false
| false
| 2,280
|
py
|
import unittest
import numpy as np
from spglib import get_symmetry_dataset, find_primitive
from vasp import read_vasp
import os
data_dir = os.path.dirname(os.path.abspath(__file__))
dirnames = ('trigonal', )
rhomb_numbers = (146, 148, 155, 160, 161, 166, 167)
tmat = [[0.6666666666666666, -0.3333333333333333, -0.3333333333333333],
[0.3333333333333333, 0.3333333333333333, -0.6666666666666666],
[0.3333333333333333, 0.3333333333333333, 0.3333333333333333]]
class TestRhombSettingHR(unittest.TestCase):
def setUp(self):
"""Extract filename of rhombohedral cell"""
self._filenames = []
for d in dirnames:
dirname = os.path.join(data_dir, "data", d)
filenames = []
trigo_filenames = os.listdir(dirname)
for number in rhomb_numbers:
filenames += [fname for fname in trigo_filenames
if str(number) in fname]
self._filenames += [os.path.join(dirname, fname)
for fname in filenames]
def tearDown(self):
pass
def test_rhomb_prim_agreement_over_settings(self):
for fname in self._filenames:
cell = read_vasp(fname)
symprec = 1e-5
dataset_H = get_symmetry_dataset(cell, symprec=symprec)
hall_number_R = dataset_H['hall_number'] + 1
dataset_R = get_symmetry_dataset(cell,
hall_number=hall_number_R,
symprec=symprec)
plat, _, _ = find_primitive(cell)
plat_H = np.dot(dataset_H['std_lattice'].T, tmat).T
plat_R = dataset_R['std_lattice']
np.testing.assert_allclose(plat, plat_H,
atol=1e-5, err_msg="%s" % fname)
np.testing.assert_allclose(plat_R, plat_H,
atol=1e-5, err_msg="%s" % fname)
np.testing.assert_allclose(plat_R, plat,
atol=1e-5, err_msg="%s" % fname)
if __name__ == '__main__':
suite = unittest.TestLoader().loadTestsFromTestCase(TestRhombSettingHR)
unittest.TextTestRunner(verbosity=2).run(suite)
# unittest.main()
|
[
"atz.togo@gmail.com"
] |
atz.togo@gmail.com
|
61bbe9c6a03dc155f5c1f6a09c732284f2f3acdf
|
0d9c964fd7644395a3f0763f484e485fcc67f762
|
/new/src/21.03.2020/list_of_business.py
|
2e49049cc01217aba3b71d33a8cc65d4af44bb18
|
[
"Apache-2.0"
] |
permissive
|
VladBaryliuk/my_start_tasks
|
eaa2e6ff031f2f504be11f0f64f5d99bd1a68a0e
|
bf387543e6fa3ee303cbef04d2af48d558011ed9
|
refs/heads/main
| 2023-04-14T14:00:08.415787
| 2021-04-24T13:47:38
| 2021-04-24T13:47:38
| 354,538,499
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 291
|
py
|
from tkinter import *
root = Tk()
root.geometry('300x400')
btn2 = Button(text = 'save')
btn2.pack()
text = Entry()
text.pack()
list = Text()
list.pack()
def add ():
todo = text.get() + '\n'
list.insert (END, todo)
btn = Button(text = 'enter',command=add)
btn.pack()
root.mainloop()
|
[
"vladmain9@gmail.com"
] |
vladmain9@gmail.com
|
c1f0f56f1f31047cfc5c943b9b8cb27094c83a27
|
69bb1d0e824625876207d492722adfdb9d959ad1
|
/Codeforces/antonAndDanik.py
|
c059ac795188e2be373516cbb3ff30f3a2ece7af
|
[] |
no_license
|
domiee13/dungcaythuattoan
|
8e2859264515e0fac3e9f0642a8b79ce5d966fff
|
7e95d037d47d6e4777e9cf56b9827c3e42f556b3
|
refs/heads/master
| 2023-03-28T03:58:44.225136
| 2021-03-29T10:32:52
| 2021-03-29T10:32:52
| 277,798,242
| 2
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,413
|
py
|
# A. Anton and Danik
# time limit per test1 second
# memory limit per test256 megabytes
# inputstandard input
# outputstandard output
# Anton likes to play chess, and so does his friend Danik.
# Once they have played n games in a row. For each game it's known who was the winner — Anton or Danik. None of the games ended with a tie.
# Now Anton wonders, who won more games, he or Danik? Help him determine this.
# Input
# The first line of the input contains a single integer n (1 ≤ n ≤ 100 000) — the number of games played.
# The second line contains a string s, consisting of n uppercase English letters 'A' and 'D' — the outcome of each of the games. The i-th character of the string is equal to 'A' if the Anton won the i-th game and 'D' if Danik won the i-th game.
# Output
# If Anton won more games than Danik, print "Anton" (without quotes) in the only line of the output.
# If Danik won more games than Anton, print "Danik" (without quotes) in the only line of the output.
# If Anton and Danik won the same number of games, print "Friendship" (without quotes).
# Examples
# inputCopy
# 6
# ADAAAA
# outputCopy
# Anton
# inputCopy
# 7
# DDDAADA
# outputCopy
# Danik
# inputCopy
# 6
# DADADA
# outputCopy
# Friendship
t = int(input())
s = input()
if s.count('A')>s.count('D'):
print("Anton")
elif s.count('A')<s.count('D'):
print("Danik")
else:
print("Friendship")
|
[
"dungngocmd@gmail.com"
] |
dungngocmd@gmail.com
|
4d7886f416baba1c84d182a66f20391da7c27df2
|
0d5c77661f9d1e6783b1c047d2c9cdd0160699d1
|
/python/paddle/fluid/tests/unittests/test_row_conv_op.py
|
07dcd108689ae6069e30fe22029258d192215549
|
[
"Apache-2.0"
] |
permissive
|
xiaoyichao/anyq_paddle
|
ae68fabf1f1b02ffbc287a37eb6c0bcfbf738e7f
|
6f48b8f06f722e3bc5e81f4a439968c0296027fb
|
refs/heads/master
| 2022-10-05T16:52:28.768335
| 2020-03-03T03:28:50
| 2020-03-03T03:28:50
| 244,155,581
| 1
| 0
|
Apache-2.0
| 2022-09-23T22:37:13
| 2020-03-01T13:36:58
|
C++
|
UTF-8
|
Python
| false
| false
| 3,441
|
py
|
# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
import numpy as np
from op_test import OpTest
def row_conv_forward(x, lod, wt):
out = np.zeros_like(x)
num_sequences = len(lod[0])
seq_info = [0]
for seq_len in lod[0]:
seq_info.append(seq_info[-1] + seq_len)
context_length = wt.shape[0]
for i in range(num_sequences): # loop over number of sequences
start = seq_info[i]
end = seq_info[i + 1]
curinput = x[start:end, :]
curoutput = out[start:end, :]
cur_timesteps = end - start
for j in range(cur_timesteps): # loop over different timesteps
for k in range(context_length):
if j + k >= cur_timesteps:
continue
curoutput[j, :] += curinput[j + k, :] * wt[k, :]
return out
class TestRowConvOp1(OpTest):
def setUp(self):
self.op_type = "row_conv"
lod = [[2, 3, 2]]
T = sum(lod[0])
D = 16
context_length = 2
x = np.random.random((T, D)).astype("float32")
wt = np.random.random((context_length, D)).astype("float32")
self.inputs = {'X': (x, lod), 'Filter': wt}
out = row_conv_forward(x, lod, wt)
self.outputs = {'Out': (out, lod)}
def test_check_output(self):
self.check_output()
def test_check_grad_normal(self):
self.check_grad(['X', 'Filter'], 'Out', max_relative_error=0.05)
def test_check_grad_ignore_x(self):
self.check_grad(
['Filter'], 'Out', max_relative_error=0.05, no_grad_set=set('X'))
def test_check_grad_ignore_wt(self):
self.check_grad(
['X'], 'Out', max_relative_error=0.05, no_grad_set=set('Filter'))
class TestRowConvOp2(OpTest):
def setUp(self):
self.op_type = "row_conv"
lod = [[20, 30, 50]]
T = sum(lod[0])
D = 35
context_length = 35
x = np.random.random((T, D)).astype("float32")
wt = np.random.random((context_length, D)).astype("float32")
self.inputs = {'X': (x, lod), 'Filter': wt}
out = row_conv_forward(x, lod, wt)
self.outputs = {'Out': (out, lod)}
def test_check_output(self):
self.check_output()
#max_relative_error is increased from 0.05 to 0.06 as for higher
#dimensional input, the dX on CPU for some values has max_rel_error
#slightly more than 0.05
def test_check_grad_normal(self):
self.check_grad(['X', 'Filter'], 'Out', max_relative_error=0.06)
def test_check_grad_ignore_x(self):
self.check_grad(
['Filter'], 'Out', max_relative_error=0.06, no_grad_set=set('X'))
def test_check_grad_ignore_wt(self):
self.check_grad(
['X'], 'Out', max_relative_error=0.06, no_grad_set=set('Filter'))
if __name__ == '__main__':
unittest.main()
|
[
"xiaoyichao@haohaozhu.com"
] |
xiaoyichao@haohaozhu.com
|
5f5c03bcd52eb2348ea2bfae56c4eb554064760a
|
15f321878face2af9317363c5f6de1e5ddd9b749
|
/solutions_python/Problem_210/263.py
|
07aad036673e87dff6e60957731771366d880485
|
[] |
no_license
|
dr-dos-ok/Code_Jam_Webscraper
|
c06fd59870842664cd79c41eb460a09553e1c80a
|
26a35bf114a3aa30fc4c677ef069d95f41665cc0
|
refs/heads/master
| 2020-04-06T08:17:40.938460
| 2018-10-14T10:12:47
| 2018-10-14T10:12:47
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,406
|
py
|
import operator
fin = open('B-small-attempt2.in', 'r')
fout = open('output.out', 'w')
tcs = int(fin.readline())
for tc in range(0, tcs):
inptemp = fin.readline().split(' ')
ac = int(inptemp[0])
aj = int(inptemp[1])
acs = list()
ajs = list()
for i in range(0, ac):
acinp = fin.readline().split(' ')
acs.append([int(acinp[0]), int(acinp[1])])
for i in range(0, aj):
ajinp = fin.readline().split(' ')
ajs.append([int(ajinp[0]), int(ajinp[1])])
acs.sort(key=operator.itemgetter(0))
ajs.sort(key=operator.itemgetter(0))
result = -1
if ac == 2 and aj == 0:
time1 = acs[1][1] - acs[0][0]
time2 = acs[1][0] - acs[0][1]
print("time1, 2",time1, time2)
if time1 <= 720 or time2 >= 720:
result = 2
else:
result = 4
if ac == 0 and aj == 2:
time1 = ajs[1][1] - ajs[0][0]
time2 = ajs[1][0] - ajs[0][1]
print("time1, 2", time1, time2)
if time1 <= 720 or time2 >= 720:
result = 2
else:
result = 4
if ac == 1 and aj == 0:
result = 2
if ac == 0 and aj == 1:
result = 2
if ac == 1 and aj == 1:
result = 2
print("Case #%d: %d" %(tc+1, result))
fout.write("Case #%d: %d\n" %(tc+1, result))
fin.close()
fout.close()
|
[
"miliar1732@gmail.com"
] |
miliar1732@gmail.com
|
e7846284c7e134592127b48bc185fe593b0949ec
|
fe7b700cfe3c06d89d18ffad3eeeb3b8220c1759
|
/pipeline/feature-classification/exp-3/selection-extraction/pca/pipeline_classifier_mrsi.py
|
30793e60571a5f7f0342ae4b772cf21d2691ce80
|
[
"MIT"
] |
permissive
|
DivyaRavindran007007/mp-mri-prostate
|
928684a607cf03a2d76ea3e3e5b971bbd3a1dd01
|
bd420534b4b5c464e5bbb4a07eabdc8724831f8a
|
refs/heads/master
| 2021-06-08T21:09:15.850708
| 2016-10-20T16:08:57
| 2016-10-20T16:08:57
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,823
|
py
|
"""This pipeline is intended to make the classification of MRSI modality
features."""
from __future__ import division
import os
import numpy as np
from sklearn.externals import joblib
from sklearn.preprocessing import label_binarize
from sklearn.decomposition import PCA
from sklearn.ensemble import RandomForestClassifier
from protoclass.data_management import GTModality
# Define the path where the patients are stored
path_patients = '/data/prostate/experiments'
# Define the path where the features have been extracted
path_features = '/data/prostate/extraction/mp-mri-prostate'
# Define a list of the path where the feature are kept
mrsi_features = ['mrsi-spectra']
ext_features = ['_spectra_mrsi.npy']
# Define the path of the balanced data
path_balanced = '/data/prostate/balanced/mp-mri-prostate/exp-3/smote'
ext_balanced = '_mrsi.npz'
# Define the path of the ground for the prostate
path_gt = ['GT_inv/prostate', 'GT_inv/pz', 'GT_inv/cg', 'GT_inv/cap']
# Define the label of the ground-truth which will be provided
label_gt = ['prostate', 'pz', 'cg', 'cap']
# Generate the different path to be later treated
path_patients_list_gt = []
# Create the generator
id_patient_list = [name for name in os.listdir(path_patients)
if os.path.isdir(os.path.join(path_patients, name))]
id_patient_list = sorted(id_patient_list)
for id_patient in id_patient_list:
# Append for the GT data - Note that we need a list of gt path
path_patients_list_gt.append([os.path.join(path_patients, id_patient, gt)
for gt in path_gt])
# Load all the data once. Splitting into training and testing will be done at
# the cross-validation time
data = []
data_bal = []
label = []
label_bal = []
for idx_pat in range(len(id_patient_list)):
print 'Read patient {}'.format(id_patient_list[idx_pat])
# For each patient we nee to load the different feature
patient_data = []
for idx_feat in range(len(mrsi_features)):
# Create the path to the patient file
filename_feature = (id_patient_list[idx_pat].lower().replace(' ', '_') +
ext_features[idx_feat])
path_data = os.path.join(path_features, mrsi_features[idx_feat],
filename_feature)
single_feature_data = np.load(path_data)
# Check if this is only one dimension data
if len(single_feature_data.shape) == 1:
single_feature_data = np.atleast_2d(single_feature_data).T
patient_data.append(single_feature_data)
# Concatenate the data in a single array
patient_data = np.concatenate(patient_data, axis=1)
print 'Imbalanced feature loaded ...'
# Load the dataset from each balancing method
data_bal_meth = []
label_bal_meth = []
pat_chg = (id_patient_list[idx_pat].lower().replace(' ', '_') +
ext_balanced)
filename = os.path.join(path_balanced, pat_chg)
npz_file = np.load(filename)
data_bal.append(npz_file['data_resampled'])
label_bal.append(npz_file['label_resampled'])
print 'Balanced data loaded ...'
# Create the corresponding ground-truth
gt_mod = GTModality()
gt_mod.read_data_from_path(label_gt,
path_patients_list_gt[idx_pat])
print 'Read the GT data for the current patient ...'
# Concatenate the training data
data.append(patient_data)
# Extract the corresponding ground-truth for the testing data
# Get the index corresponding to the ground-truth
roi_prostate = gt_mod.extract_gt_data('prostate', output_type='index')
# Get the label of the gt only for the prostate ROI
gt_cap = gt_mod.extract_gt_data('cap', output_type='data')
label.append(gt_cap[roi_prostate])
print 'Data and label extracted for the current patient ...'
# Define the different level of sparsity
sparsity_level = [2, 4, 8, 16, 24, 32, 36]
results_sp = []
for sp in sparsity_level:
result_cv = []
# Go for LOPO cross-validation
for idx_lopo_cv in range(len(id_patient_list)):
# Display some information about the LOPO-CV
print 'Round #{} of the LOPO-CV'.format(idx_lopo_cv + 1)
# Get the testing data
testing_data = data[idx_lopo_cv]
testing_label = np.ravel(label_binarize(label[idx_lopo_cv], [0, 255]))
print 'Create the testing set ...'
# Create the training data and label
# We need to take the balanced data
training_data = [arr for idx_arr, arr in enumerate(data_bal)
if idx_arr != idx_lopo_cv]
training_label = [arr for idx_arr, arr in enumerate(label_bal)
if idx_arr != idx_lopo_cv]
# Concatenate the data
training_data = np.vstack(training_data)
training_label = np.ravel(label_binarize(
np.hstack(training_label).astype(int), [0, 255]))
print 'Create the training set ...'
# Learn the PCA projection
pca = PCA(n_components=sp, whiten=True)
training_data = pca.fit_transform(training_data)
testing_data = pca.transform(testing_data)
# Perform the classification for the current cv and the
# given configuration
crf = RandomForestClassifier(n_estimators=100, n_jobs=-1)
pred_prob = crf.fit(training_data,
np.ravel(training_label)).predict_proba(
testing_data)
result_cv.append([pred_prob, crf.classes_])
results_sp.append(result_cv)
# Save the information
path_store = '/data/prostate/results/mp-mri-prostate/exp-3/selection-extraction/pca/mrsi'
if not os.path.exists(path_store):
os.makedirs(path_store)
joblib.dump(results_sp, os.path.join(path_store,
'results.pkl'))
|
[
"glemaitre@visor.udg.edu"
] |
glemaitre@visor.udg.edu
|
06e9af435b48d5945c4ae92e1b4270ba096357cc
|
f0d713996eb095bcdc701f3fab0a8110b8541cbb
|
/iBqJcagS56wmDpe4x_7.py
|
3acaa1ddc25b89eab9db4328cabbfff41f70461d
|
[] |
no_license
|
daniel-reich/turbo-robot
|
feda6c0523bb83ab8954b6d06302bfec5b16ebdf
|
a7a25c63097674c0a81675eed7e6b763785f1c41
|
refs/heads/main
| 2023-03-26T01:55:14.210264
| 2021-03-23T16:08:01
| 2021-03-23T16:08:01
| 350,773,815
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 845
|
py
|
"""
The volume of a spherical shell is the difference between the enclosed volume
of the outer sphere and the enclosed volume of the inner sphere:

Create a function that takes `r1` and `r2` as arguments and returns the volume
of a spherical shell rounded to the nearest thousandth.

### Examples
vol_shell(3, 3) ➞ 0
vol_shell(7, 2) ➞ 1403.245
vol_shell(3, 800) ➞ 2144660471.753
### Notes
The inputs are always positive numbers. `r1` could be the inner radius or the
outer radius, don't return a negative number.
"""
from math import pi
def vol_shell(r1, r2):
return round((4/3 *pi*(abs(r1**3 - r2**3))),3)
|
[
"daniel.reich@danielreichs-MacBook-Pro.local"
] |
daniel.reich@danielreichs-MacBook-Pro.local
|
344734125bb7c7899ca6cc6c2558fd173da78d68
|
279ed7207ac2c407487416b595e12f573049dd72
|
/pybvk/bvk/bvkmodels/ni_676.py
|
8e2c8f20a537ec5b2eaa574c6f66b29f2b1de7de
|
[] |
no_license
|
danse-inelastic/pybvk
|
30388455e211fec69130930f2925fe16abe455bd
|
922c8c0a8c50a9fabd619fa06e005cacc2d13a15
|
refs/heads/master
| 2016-09-15T22:21:13.131688
| 2014-06-25T17:12:34
| 2014-06-25T17:12:34
| 34,995,254
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,089
|
py
|
# ni_676.py
# BvK force constants
element = "Ni"
lattice_type = "fcc"
temperature = 676 # Units: K
reference = "De Wit, G.A., Brockhouse, B.N.: J. Appl. Phys. 39 (1968) 451"
details = "All fits use the measured elastic constants. This fit uses general force up to fourth neighbour, axially symmetric force for fifth neighbour."
a = 3.52 # lattice parameters in angstroms
# Units: N m^-1
force_constants = { "110": { "xx": 16.250,
"zz": -0.970,
"xy": 19.390 },
"200": { "xx": 1.070,
"yy": 0.056 },
"211": { "xx": 0.963,
"yy": 0.449,
"yz": -0.391,
"xz": 0.458 },
"220": { "xx": 0.115,
"zz": -0.457,
"xy": 0.222 },
"310": { "xx": -0.256,
"yy": -0.063,
"zz": -0.040,
"xy": -0.072 } }
|
[
"linjiao@caltech.edu"
] |
linjiao@caltech.edu
|
8ff8c60155eca0198afd7158b8f4dbb5f00a51d5
|
163cb8cae7d364a090565710ee9f347e5cdbf38f
|
/new_deeplab/utils/get_dataset_colormap_test.py
|
90005ebbf542c89e44a7dd4783811474cc59853d
|
[
"CC-BY-4.0",
"CC-BY-3.0"
] |
permissive
|
abhineet123/river_ice_segmentation
|
2b671f7950aac6ab2b1185e3288490bc5e079bc1
|
df694107be5ad6509206e409f5cde4428a715654
|
refs/heads/master
| 2023-05-01T11:52:10.897922
| 2023-04-25T22:55:04
| 2023-04-25T22:55:04
| 179,993,952
| 15
| 8
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,955
|
py
|
# Copyright 2018 The TensorFlow Authors All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for get_dataset_colormap.py."""
import numpy as np
import tensorflow as tf
from new_deeplab.utils import get_dataset_colormap
class VisualizationUtilTest(tf.test.TestCase):
def testBitGet(self):
"""Test that if the returned bit value is correct."""
self.assertEqual(1, get_dataset_colormap.bit_get(9, 0))
self.assertEqual(0, get_dataset_colormap.bit_get(9, 1))
self.assertEqual(0, get_dataset_colormap.bit_get(9, 2))
self.assertEqual(1, get_dataset_colormap.bit_get(9, 3))
def testPASCALLabelColorMapValue(self):
"""Test the getd color map value."""
colormap = get_dataset_colormap.create_pascal_label_colormap()
# Only test a few sampled entries in the color map.
self.assertTrue(np.array_equal([128., 0., 128.], colormap[5, :]))
self.assertTrue(np.array_equal([128., 192., 128.], colormap[23, :]))
self.assertTrue(np.array_equal([128., 0., 192.], colormap[37, :]))
self.assertTrue(np.array_equal([224., 192., 192.], colormap[127, :]))
self.assertTrue(np.array_equal([192., 160., 192.], colormap[175, :]))
def testLabelToPASCALColorImage(self):
"""Test the value of the converted label value."""
label = np.array([[0, 16, 16], [52, 7, 52]])
expected_result = np.array([
[[0, 0, 0], [0, 64, 0], [0, 64, 0]],
[[0, 64, 192], [128, 128, 128], [0, 64, 192]]
])
colored_label = get_dataset_colormap.label_to_color_image(
label, get_dataset_colormap.get_pascal_name())
self.assertTrue(np.array_equal(expected_result, colored_label))
def testUnExpectedLabelValueForLabelToPASCALColorImage(self):
"""Raise ValueError when input value exceeds range."""
label = np.array([[120], [300]])
with self.assertRaises(ValueError):
get_dataset_colormap.label_to_color_image(
label, get_dataset_colormap.get_pascal_name())
def testUnExpectedLabelDimensionForLabelToPASCALColorImage(self):
"""Raise ValueError if input dimension is not correct."""
label = np.array([120])
with self.assertRaises(ValueError):
get_dataset_colormap.label_to_color_image(
label, get_dataset_colormap.get_pascal_name())
def testGetColormapForUnsupportedDataset(self):
with self.assertRaises(ValueError):
get_dataset_colormap.create_label_colormap('unsupported_dataset')
def testUnExpectedLabelDimensionForLabelToADE20KColorImage(self):
label = np.array([250])
with self.assertRaises(ValueError):
get_dataset_colormap.label_to_color_image(
label, get_dataset_colormap.get_ade20k_name())
def testFirstColorInADE20KColorMap(self):
label = np.array([[1, 3], [10, 20]])
expected_result = np.array([
[[120, 120, 120], [6, 230, 230]],
[[4, 250, 7], [204, 70, 3]]
])
colored_label = get_dataset_colormap.label_to_color_image(
label, get_dataset_colormap.get_ade20k_name())
self.assertTrue(np.array_equal(colored_label, expected_result))
def testMapillaryVistasColorMapValue(self):
colormap = get_dataset_colormap.create_mapillary_vistas_label_colormap()
self.assertTrue(np.array_equal([190, 153, 153], colormap[3, :]))
self.assertTrue(np.array_equal([102, 102, 156], colormap[6, :]))
if __name__ == '__main__':
tf.test.main()
|
[
"asingh1@ualberta.ca"
] |
asingh1@ualberta.ca
|
ddfdd2f0efe461b056235acb80be18b8c1228721
|
34165333483426832b19830b927a955649199003
|
/publish/library_app/reports/library_book_report.py
|
f1b54fc8a5cc254109a6f75a8fa9f9b3ecad1cee
|
[] |
no_license
|
terroristhouse/Odoo12
|
2d4315293ac8ca489d9238f464d64bde4968a0a9
|
d266943d7affdff110479656543521889b4855c9
|
refs/heads/master
| 2022-12-02T01:09:20.160285
| 2020-08-16T13:56:07
| 2020-08-16T13:56:07
| 283,630,459
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 447
|
py
|
from odoo import fields,models
class BookReport(models.Model):
_name = 'library.book.report'
_description = 'Book Report'
_auto = False
name = fields.Char('Title')
publisher_id = fields.Many2one('res.partner')
date_published = fields.Date()
def init(self):
self.env.cr.execute("""
CREATE OR REPLACE VIEW library_book_report AS
(SELECT * FROM library_book WHERE active=True)
""")
|
[
"867940410@qq.com"
] |
867940410@qq.com
|
99831b86797def2356ed377f20ea20834b08bcec
|
94d4ccd443a37c8090a84d730d006edef869b816
|
/recursion_and_backtracking/rat_in_maze.py
|
8fde6e9bcdff326a7021e497c7d29b0c638e1f3d
|
[] |
no_license
|
sudo-hemant/CP_CipherSchools
|
e0fc51d6f77f80709a88a7711ef6360f1fdd13e3
|
4f741f5f6fbbb182bd03135fb3180f5a40acbb1e
|
refs/heads/master
| 2023-03-09T20:59:59.704014
| 2021-02-19T15:27:15
| 2021-02-19T15:27:15
| 338,825,372
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,363
|
py
|
# https://practice.geeksforgeeks.org/problems/rat-maze-with-multiple-jumps-1587115621/1/?track=DSASP-Backtracking&batchId=154
from collections import deque
def solve(n, maze):
res = [ [0 for i in range(n)] for j in range(n)]
if is_path(0, 0, res, n, maze):
print_sol(n, res)
else:
print(-1)
def is_path(i, j, res, n, maze):
if i == n - 1 and j == n - 1:
res[i][j] = 1
return True
if is_safe(i, j, n, maze):
res[i][j] = 1
for jump in range(1, maze[i][j] + 1):
if jump >= n:
break
if is_path(i, j + jump, res, n, maze):
return True
if is_path(i + jump, j, res, n, maze):
return True
res[i][j] = 0
return False
return False
def is_safe(i, j, n, maze):
if i >= 0 and j >= 0 and i < n and j < n and maze[i][j]:
return True
return False
def print_sol(n, sol):
for i in range(n):
for j in range(n):
print(sol[i][j], end=" ")
print()
if __name__ == "__main__":
t = int(input())
while(t>0):
n = int(input())
maze = [[0 for i in range(n)] for j in range(n)]
for i in range(n):
maze[i] = [int(x) for x in input().strip().split()]
solve(n, maze)
t=t-1
|
[
"sudohemant@gmail.com"
] |
sudohemant@gmail.com
|
80edbd1d65d545f84f4122c4822dc23a4c57785d
|
70d39e4ee19154a62e8c82467ef75b601e584738
|
/pyth3/mysql/just_mysql_pandas_things_.py
|
84e264411f2cd3f02839fd45febb7a3e86ce9f2e
|
[] |
no_license
|
babywyrm/sysadmin
|
6f2724be13ae7e5b9372278856a8c072073beffb
|
2a5f3d29c7529bc917d4ff9be03af30ec23948a5
|
refs/heads/master
| 2023-08-16T03:50:38.717442
| 2023-08-16T03:05:55
| 2023-08-16T03:05:55
| 210,228,940
| 10
| 5
| null | 2023-05-01T23:15:31
| 2019-09-22T23:42:50
|
PowerShell
|
UTF-8
|
Python
| false
| false
| 6,634
|
py
|
# MySQL Querying Using Pandas
# Author: Elena Adlaf
# Version 1.2, 10/16/17
# This Python file shows how to query results from table, 't', in database, 'af', stored on a local MySQL server while
# importing the values directly into a Pandas dataframe.
# The table lists details about pieces created by the custom furniture business, Artfully Functional,
# with fields for ID, size type, year built, labor hours, materials cost, sale prices (wholesale or retail,
# before or after sales tax) and potential profits. A second table, 'a', contains additional information and is
# used to demonstrate queries indexing or joining multiple tables.
# Import modules.
import mysql.connector
import pandas as pd
# Create variables for 1) a connector to the local database with user and password and 2) the read-to-pandas command
cnx = mysql.connector.connect(user='root', password='...', database='af')
g = pd.read_sql_query
# To import the entire table, 't', into a Pandas dataframe:
df = g('SELECT * FROM t', cnx)
# Look at the shape of the dataframe and index the first five records for all of the fields.
print(df.shape)
print(df.iloc[0:5, 0:14])
print(df.iloc[0:5, 14:])
# Most tables will likely be too large to import in full, so we can import only the data of interest by
# querying the database through Pandas.
# Return the column names and column info of the table, 't'.
col_names = g('SHOW COLUMNS FROM t', cnx)
print(col_names)
# Select only Name and Retail_High columns and limit the number of records returned.
namehighretail_firstten = g('SELECT Name, Retail_High FROM t LIMIT 10', cnx)
print(namehighretail_firstten)
# Select all unique values from the Yr column.
years = g('SELECT DISTINCT Yr FROM t', cnx)
print(years)
# Return the number of records in the table.
num_tablerows = g('SELECT COUNT(*) FROM t', cnx)
print(num_tablerows)
# Return the number of non-missing values in the Labor column.
num_laborvalues = g('SELECT COUNT(Labor) FROM t', cnx)
print(num_laborvalues)
# Return the number of distinct values in Yr column.
num_years = g('SELECT COUNT(DISTINCT Yr) FROM t', cnx)
print(num_years)
# Select names of all pieces with a Retail_Low value greater than or equal to $500
over500usd = g('SELECT Name FROM t WHERE Retail_Low >= 500', cnx)
print(over500usd)
# Select the ID number of all pieces whose Sale value is null.
idprofitnull = g('SELECT ID FROM t WHERE Sale IS NULL', cnx)
print(idprofitnull)
# Return the number of items whose build year is not 2017.
num_not2017 = g('SELECT COUNT(*) FROM t WHERE Yr <> 2017', cnx)
print(num_not2017)
# Select name and location (disposition) of items with a low retail price over 100 or a low wholesale price over 50.
nameloc_price = g('SELECT Name, Disposition FROM t WHERE Retail_Low > 100 OR Wholesale_Low > 50', cnx)
print(nameloc_price)
# Select the labor hours of items built in 2015 or 2017 and located at Holloway or Art Show
laborhours_notforsale = g("SELECT Labor FROM t WHERE (Yr = 2015 OR Yr = 2017) AND (Disposition = 'Holloway' OR "
"Disposition = 'Art Show')", cnx)
print(laborhours_notforsale)
# Select the class of items whose potential profit (retail high) is between 10 and 50.
class_ptlprofit = g('SELECT Class_version FROM t WHERE Ptnlprofit_rtl_High BETWEEN 10 AND 50', cnx)
print(class_ptlprofit)
# Select the disposition, class, and potential high wholesale profit for the items with disposition as Classic Tres,
# Art Show or For Sale. Calculate the sum of the returned potential profits.
ptlprofit_forsale = g("SELECT Disposition, Class_version, Ptnlprofit_whsle_High FROM t WHERE Disposition IN "
"('Classic Tres', 'Art Show', 'For Sale') AND Ptnlprofit_whsle_High > 0", cnx)
print(ptlprofit_forsale)
print(ptlprofit_forsale.sum(axis=0, numeric_only=True))
# Select the ID, name and class_version designation of all C-class items.
c_class_items = g("SELECT ID, Name, Class_version FROM t WHERE Class_version LIKE 'C%'", cnx)
print(c_class_items)
# Select name and retail prices of all tables. Calculate the lowest and highest table prices.
tables_retail = g("SELECT Name, Retail_Low, Retail_High FROM t WHERE Name LIKE '% Table' AND Retail_Low <> 0", cnx)
print(tables_retail)
print(tables_retail.agg({'Retail_Low' : ['min'], 'Retail_High' : ['max']}))
# Select names and labor hours of tables that don't include side tables.
noside = g("SELECT Name, Labor FROM t WHERE Name LIKE '% Table' AND Name NOT LIKE '%_ide %'", cnx)
print(noside)
# Return the average retail high price.
ave_rtlhigh = g('SELECT AVG(Retail_High) FROM t', cnx)
print(ave_rtlhigh)
# Return the sum of the retail low prices minus the sum of the Materials_Base column aliased as est_profit.
rtllow_minuscost = g('SELECT SUM(Retail_Low) - SUM(Materials_Base) AS est_profit FROM t', cnx)
print(rtllow_minuscost)
# Return the maximum materials base value increased by 20% aliased as max_material.
max_material = g('SELECT MAX(Materials_Base)*1.2 AS max_material FROM t', cnx)
print(max_material)
# Select the name and price of the lowest wholesale priced cabinet that is for sale, aliased as cabinet_low.
cabinet_low = g("SELECT Name, MIN(Wholesale_Low) AS cabinet_low FROM t WHERE Name LIKE '% Cabinet' AND Disposition = "
"'For Sale'", cnx)
print(cabinet_low)
# Select names of pieces built in 2017 in descending order by retail_high price.
high_to_low_priced = g('SELECT Name FROM t WHERE Yr = 2017 ORDER BY Retail_High DESC', cnx)
print(high_to_low_priced)
# Select number of items and years built grouped by year in descending order by count.
groupyear_sortcount = g('SELECT COUNT(*), Yr FROM t GROUP BY Yr ORDER BY COUNT(*) DESC', cnx)
print(groupyear_sortcount)
# Select Class_version categories (A1, B1, C1) aliased as Size and average wholesale low price grouped by Size.
size_aveprice = g("SELECT Class_version AS Size, AVG(Wholesale_Low) FROM t WHERE Class_version IN ('A1', 'B1', "
"'C1') GROUP BY Size", cnx)
print(size_aveprice)
# The items in tables 't' and 'a' have the same ID column, so information can be queried from both simultaneously with
# the JOIN command.
# Return the column names and column info of the table, 'a'.
table_a_colnames = g('SHOW COLUMNS FROM a', cnx)
print(table_a_colnames)
# Select the ID and disposition from table 't' and the corresponding number of website photos for those items from
# table 'a'.
webphotos = g('SELECT ID, Class_version, Disposition, Website FROM t JOIN a ON ID = ID2 WHERE Website > 0', cnx)
print(webphotos)
# After querying is complete, cnx.close() closes the connection to the database.
cnx.close()
|
[
"noreply@github.com"
] |
babywyrm.noreply@github.com
|
a54dba4d3ebcdf78eb1020f011bb1570ffd11720
|
3595d51ff2499bb990b87a25b17516edf6907696
|
/boards/models.py
|
a9fc0f9597a2654f291a202c6c60a21410fac535
|
[
"Apache-2.0"
] |
permissive
|
maxlipsky/infomate.club
|
01fa55b3dfd318212b0c328dd48019b585d3ef9d
|
9729b568622120f8cba3d22fefdcfba81d1b5b88
|
refs/heads/master
| 2020-12-08T19:53:37.231403
| 2020-01-16T19:04:14
| 2020-01-16T19:04:14
| 233,079,581
| 1
| 0
|
Apache-2.0
| 2020-01-10T15:49:08
| 2020-01-10T15:49:07
| null |
UTF-8
|
Python
| false
| false
| 6,435
|
py
|
import uuid
from datetime import datetime, timedelta
from django.contrib.humanize.templatetags.humanize import naturaltime
from django.db import models
from slugify import slugify
from boards.icons import DOMAIN_ICONS
class Board(models.Model):
id = models.UUIDField(primary_key=True, default=uuid.uuid4, editable=False)
slug = models.SlugField(unique=True)
name = models.CharField(max_length=120, db_index=True)
avatar = models.URLField(max_length=512, null=True)
curator_name = models.CharField(max_length=120)
curator_title = models.CharField(max_length=120)
curator_url = models.URLField(null=True)
curator_bio = models.CharField(max_length=120, null=True)
curator_footer = models.TextField(null=True)
schema = models.TextField(null=True)
created_at = models.DateTimeField(db_index=True)
updated_at = models.DateTimeField()
refreshed_at = models.DateTimeField(null=True)
is_visible = models.BooleanField(default=True)
is_private = models.BooleanField(default=True)
index = models.PositiveIntegerField(default=0)
class Meta:
db_table = "boards"
ordering = ["index", "name"]
def save(self, *args, **kwargs):
if not self.created_at:
self.created_at = datetime.utcnow()
if not self.slug:
self.slug = slugify(self.name).lower()
self.updated_at = datetime.utcnow()
return super().save(*args, **kwargs)
def board_name(self):
return self.name or self.curator_name
def natural_refreshed_at(self):
if not self.refreshed_at:
return "now..."
return naturaltime(self.refreshed_at)
class BoardBlock(models.Model):
id = models.UUIDField(primary_key=True, default=uuid.uuid4, editable=False)
board = models.ForeignKey(Board, related_name="blocks", on_delete=models.CASCADE, db_index=True)
name = models.CharField(max_length=512, null=True)
slug = models.SlugField()
created_at = models.DateTimeField(db_index=True)
updated_at = models.DateTimeField()
index = models.PositiveIntegerField(default=0)
class Meta:
db_table = "board_blocks"
ordering = ["index"]
def save(self, *args, **kwargs):
if not self.created_at:
self.created_at = datetime.utcnow()
if not self.slug:
self.slug = slugify(self.name).lower()
self.updated_at = datetime.utcnow()
return super().save(*args, **kwargs)
class BoardFeed(models.Model):
id = models.UUIDField(primary_key=True, default=uuid.uuid4, editable=False)
board = models.ForeignKey(Board, related_name="feeds", on_delete=models.CASCADE, db_index=True)
block = models.ForeignKey(BoardBlock, related_name="feeds", on_delete=models.CASCADE, db_index=True)
name = models.CharField(max_length=512, null=True)
comment = models.TextField(null=True)
url = models.URLField(max_length=512)
icon = models.URLField(max_length=512, null=True)
rss = models.URLField(max_length=512, null=True)
created_at = models.DateTimeField(db_index=True)
last_article_at = models.DateTimeField(null=True)
refreshed_at = models.DateTimeField(null=True)
frequency = models.FloatField(default=0.0) # per week
columns = models.SmallIntegerField(default=1)
articles_per_column = models.SmallIntegerField(default=15)
index = models.PositiveIntegerField(default=0)
class Meta:
db_table = "board_feeds"
ordering = ["index"]
def save(self, *args, **kwargs):
if not self.created_at:
self.created_at = datetime.utcnow()
self.updated_at = datetime.utcnow()
return super().save(*args, **kwargs)
def last_articles(self):
return self.articles.all()[:15 * self.columns]
def articles_by_column(self):
articles = self.articles.all()[:self.articles_per_column * self.columns]
return [
(column, articles[column * self.articles_per_column:self.articles_per_column * (column + 1)])
for column in range(self.columns)
]
def natural_last_article_at(self):
if not self.last_article_at:
return None
return naturaltime(self.last_article_at)
class Article(models.Model):
id = models.UUIDField(primary_key=True, default=uuid.uuid4, editable=False)
uniq_id = models.TextField(db_index=True)
board = models.ForeignKey(Board, related_name="articles", on_delete=models.CASCADE, db_index=True)
feed = models.ForeignKey(BoardFeed, related_name="articles", on_delete=models.CASCADE, db_index=True)
url = models.URLField(max_length=2048)
type = models.CharField(max_length=16)
domain = models.CharField(max_length=256, null=True)
title = models.CharField(max_length=256)
image = models.URLField(max_length=512, null=True)
description = models.TextField(null=True)
summary = models.TextField(null=True)
created_at = models.DateTimeField(db_index=True)
updated_at = models.DateTimeField()
class Meta:
db_table = "articles"
ordering = ["-created_at"]
def save(self, *args, **kwargs):
if not self.created_at:
self.created_at = datetime.utcnow()
self.updated_at = datetime.utcnow()
return super().save(*args, **kwargs)
def icon(self):
article_icon = DOMAIN_ICONS.get(self.domain)
if not article_icon:
return ""
if article_icon.startswith("fa:"):
return f"""<i class="{article_icon[3:]}"></i> """
return f"""<img src="{article_icon}" alt="{self.domain}" class="icon"> """
def natural_created_at(self):
if not self.created_at:
return None
return naturaltime(self.created_at)
def is_fresh(self):
frequency = self.feed.frequency
now = datetime.utcnow()
if frequency <= 1:
# low frequency feed — any post this week is new
return self.created_at > now - timedelta(days=7)
elif frequency <= 20:
# average frequency — mark today posts
return self.created_at > now - timedelta(days=1)
elif frequency >= 100:
# extra high frequency — mark newest posts
return self.created_at > now - timedelta(hours=3)
# normal frequency - mark 6-hour old posts
return self.created_at > now - timedelta(hours=6)
|
[
"me@vas3k.ru"
] |
me@vas3k.ru
|
b8241865b3838ea090162eb428a1c8b48348b10e
|
e692a9074d21c456dbdcb938ce7c74d8254f6ad9
|
/Module 010/module 010.py
|
815c2efdfd0e3789bf37330b86d323acc2079d0b
|
[] |
no_license
|
Marius-Juston/ECE-110
|
e29b08d5a305a315467635a138ef62a1a638e4ed
|
962761b9bb23b02cc2a687bc691d568db8c82b31
|
refs/heads/master
| 2022-05-29T08:17:58.809019
| 2020-05-05T16:48:45
| 2020-05-05T16:48:45
| 261,530,020
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 685
|
py
|
import matplotlib.pyplot as plt
import numpy as np
from matplotlib.axes import Axes
from matplotlib.figure import Figure
if __name__ == '__main__':
with open('data-20200502-1713.circuitjs.txt', 'r') as file:
time_step = float(file.readline().split(' ')[4])
lines = tuple(map(int, file.readlines()))
x = np.arange(0, len(lines)) * time_step
fig: Figure = plt.figure(figsize=(11.69, 8.27))
ax: Axes = fig.gca()
ax.plot(x, lines)
ax.set_xlabel("Time (s)")
ax.set_ylabel("Voltage (V)")
ax.set_title("Audio Output (mjuston2)")
fig.tight_layout()
fig.savefig("figure.png")
# plt.show()
|
[
"Marius.juston@hotmail.fr"
] |
Marius.juston@hotmail.fr
|
2c4e9748a4fe10c33bdca30bdba1637018100b86
|
9ec4bc3cdba9e46fe05712daeec3e35f5b6bb704
|
/hallicrafter2/device/ics.py
|
ce1fe1212d2c1fc2085fe53bbaeb7981d6a1c3fd
|
[] |
no_license
|
derekmerck/hallicrafter
|
04d7d31017079fcc0c9c9361ad7e653f6e0e6418
|
b9439bb9f9b311ca1f8a224ce25c64c836901381
|
refs/heads/master
| 2020-06-02T03:23:23.086094
| 2019-09-03T02:06:03
| 2019-09-03T02:06:03
| 191,018,673
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,220
|
py
|
from .device import Device
class SirenIC(Device):
# Control a UM3561 ic
# See https://www.instructables.com/id/Siren-Generation-using-IC-UM3561/ for pinout
#
# 1. sel1
# 2. gnd
# 3. out -> 10k ohm -> NPN transistor that drives speaker gnd line
# 4. not connected (testing)
# 5. active (3-5vin)
# 6. sel2
# 7. osc1
# 8. osc2 bridge -> osc1 with a 220k ohm resistor
#
# S1 S2 Sound
# --------------------
# NC NC Police (default)
# 5v NC Fire brigade
# Gnd NC Ambulance
# Any 5v Machine gun
class AlarmProfile(object):
POLICE = "police"
FIRE = "fire"
AMBULANCE = "ambulance"
MACHINE_GUN = "machine gun"
def __init__(self, pin_active, pin_sel1, pin_sel2, name="ic_srn0", interval=0.1, *args, **kwargs):
Device.__init__(self, name=name, interval=interval, *args, **kwargs)
import digitalio
self.pin_active = digitalio.DigitalInOut(pin_active)
self.pin_active.direction = digitalio.Direction.OUTPUT
self.pin_sel1 = digitalio.DigitalInOut(pin_sel1)
self.pin_sel1.direction = digitalio.Direction.OUTPUT
self.pin_sel2 = digitalio.DigitalInOut(pin_sel2)
self.pin_sel2.direction = digitalio.Direction.OUTPUT
self.data["active"] = False
self.data["profile"] = SirenIC.AlarmProfile.POLICE
def write(self):
if self.data["profile"] == SirenIC.AlarmProfile.POLICE:
self.pin_sel1.value = False
self.pin_sel2.value = False
elif self.data["profile"] == SirenIC.AlarmProfile.FIRE:
self.pin_sel1.value = True
self.pin_sel2.value = False
elif self.data["profile"] == SirenIC.AlarmProfile.AMBULANCE:
self.pin_sel1.value = False
self.pin_sel2.value = True
elif self.data["profile"] == SirenIC.AlarmProfile.MACHINE_GUN:
self.pin_sel1.value = True
self.pin_sel2.value = True
else:
raise ValueError("Unknown alarm profile {}".format(self.data["profile"]))
self.pin_active.value = self.data["active"]
# print("Siren is {}".format(self.pin_active.value))
|
[
"derek_merck@brown.edu"
] |
derek_merck@brown.edu
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.