blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 3
288
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
112
| license_type
stringclasses 2
values | repo_name
stringlengths 5
115
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 684
values | visit_date
timestamp[us]date 2015-08-06 10:31:46
2023-09-06 10:44:38
| revision_date
timestamp[us]date 1970-01-01 02:38:32
2037-05-03 13:00:00
| committer_date
timestamp[us]date 1970-01-01 02:38:32
2023-09-06 01:08:06
| github_id
int64 4.92k
681M
⌀ | star_events_count
int64 0
209k
| fork_events_count
int64 0
110k
| gha_license_id
stringclasses 22
values | gha_event_created_at
timestamp[us]date 2012-06-04 01:52:49
2023-09-14 21:59:50
⌀ | gha_created_at
timestamp[us]date 2008-05-22 07:58:19
2023-08-21 12:35:19
⌀ | gha_language
stringclasses 147
values | src_encoding
stringclasses 25
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 128
12.7k
| extension
stringclasses 142
values | content
stringlengths 128
8.19k
| authors
listlengths 1
1
| author_id
stringlengths 1
132
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
8c4abda4240fb87193b4e822cb56db6d9f60a404
|
a1565539ee37e6c25d3b0d1483dd788103aced61
|
/tensorflow/python/training/tracking/layer_utils.py
|
c325884bd1d6b21b88d183f5d8a33c00f1d6195d
|
[
"Apache-2.0"
] |
permissive
|
mrader1248/tensorflow
|
b954d8989706b1792c7d013e54f816a1b1485807
|
3c6a2292fff3fe3ec1b7d62042cb15e457bc6d82
|
refs/heads/master
| 2020-04-07T20:08:16.629986
| 2019-08-05T22:40:18
| 2019-08-05T22:40:18
| 158,676,439
| 0
| 0
|
Apache-2.0
| 2018-11-22T09:44:49
| 2018-11-22T09:44:49
| null |
UTF-8
|
Python
| false
| false
| 4,195
|
py
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Utilities related to layer/model functionality."""
# TODO(b/110718070): Move these functions back to tensorflow/python/keras/utils
# once __init__ files no longer require all of tf.keras to be imported together.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.training.tracking import object_identity
def is_layer(obj):
"""Implicit check for Layer-like objects."""
# TODO(b/110718070): Replace with isinstance(obj, base_layer.Layer).
return hasattr(obj, "_is_layer") and not isinstance(obj, type)
def has_weights(obj):
"""Implicit check for Layer-like objects."""
# TODO(b/110718070): Replace with isinstance(obj, base_layer.Layer).
try:
has_weight = (hasattr(obj, "trainable_weights")
and hasattr(obj, "non_trainable_weights"))
except ValueError:
# Ignore the ValueError here since the model/layer might not be built yet.
# In that case, the obj is actual a instance with weights.
has_weight = True
return has_weight and not isinstance(obj, type)
def filter_empty_layer_containers(layer_list):
"""Filter out empty Layer-like containers and uniquify."""
existing = object_identity.ObjectIdentitySet()
to_visit = layer_list[::-1]
filtered = []
while to_visit:
obj = to_visit.pop()
if obj in existing:
continue
existing.add(obj)
if is_layer(obj):
filtered.append(obj)
elif hasattr(obj, "layers"):
# Trackable data structures will not show up in ".layers" lists, but
# the layers they contain will.
to_visit.extend(obj.layers[::-1])
return filtered
def gather_trainable_weights(trainable, sub_layers, extra_variables):
"""Lists the trainable weights for an object with sub-layers.
Args:
trainable: Whether the object collecting the variables is trainable.
sub_layers: A flat list of Layer objects owned by this object, to collect
variables from.
extra_variables: Any extra variables to include. Their `.trainable` property
is used to categorize them.
Returns:
A list of collected trainable weights/variables.
"""
if not trainable:
return []
weights = []
for layer in sub_layers:
weights += layer.trainable_weights
trainable_extra_variables = [
v for v in extra_variables if v.trainable]
return weights + trainable_extra_variables
def gather_non_trainable_weights(trainable, sub_layers, extra_variables):
"""Lists the non-trainable weights for an object with sub-layers.
Args:
trainable: Whether the object collecting the variables is trainable.
sub_layers: A flat list of Layer objects owned by this object, to collect
variables from.
extra_variables: Any extra variables to include. Their `.trainable` property
is used to categorize them.
Returns:
A list of collected non-trainable weights/variables.
"""
trainable_extra_variables = []
non_trainable_extra_variables = []
for v in extra_variables:
if v.trainable:
trainable_extra_variables.append(v)
else:
non_trainable_extra_variables.append(v)
weights = []
for layer in sub_layers:
weights += layer.non_trainable_weights
if not trainable:
trainable_weights = []
for layer in sub_layers:
trainable_weights += layer.trainable_weights
return (trainable_weights + trainable_extra_variables
+ weights + non_trainable_extra_variables)
return weights + non_trainable_extra_variables
|
[
"gardener@tensorflow.org"
] |
gardener@tensorflow.org
|
7e0ab15b65a9e964e5683463dfe8b82159d7d9fb
|
2d040fde3d3638939718ccfd42eff9e35c352331
|
/EXAMPLES/EDABIT/EXPERT/141_8_bit_arithmetic.py
|
a964ec7e6bb1367a9427d8836bfff645514c243b
|
[] |
no_license
|
ceyhunsahin/TRAINING
|
fcb9837833d2099dc5724f46a675efe4edf3d5f1
|
73cc7dba8447e4066ccfe7adadc727d134ffbf0b
|
refs/heads/master
| 2023-07-13T01:12:53.160300
| 2021-08-17T11:50:51
| 2021-08-17T11:50:51
| 397,591,819
| 1
| 0
| null | 2021-08-18T12:25:45
| 2021-08-18T12:25:44
| null |
UTF-8
|
Python
| false
| false
| 1,302
|
py
|
"""
https://edabit.com/challenge/cXoBRe9RdDGeLNfaD EXPERT
8 Bit Arithmetic
You will be given a simple string expression representing an addition or subtraction in 8-bit 2's complement arithmetic. Write a function that returns the result in base 10 followed by a binary representation. If any of the values are outside the range of 8-bit 2's complement, return "Overflow".
Examples
eight_bit("3 + 12") ➞ (15, "11 + 1100 = 1111")
eight_bit("3 - 12") ➞ (-9, "11 - 1100 = 11110111")
eight_bit("-18 - 6") ➞ (-24, "11101110 - 110 = 11101000")
eight_bit("65 + 70") ➞ "Overflow"
eight_bit("-127 + 127") ➞ (0, "10000001 + 1111111 = 0")
Notes
Numbers in 8-bit 2's complement notation can range from -128 to 127. The eighth (leftmost) bit signifies a negative number. See Resources for details.
"""
def eight_bit(exp):
a, b, c = exp.split(" ")
if not (-129<int(a) < 128) or not (-129 <int(c) < 128):
return "Overflow"
if not (-129 < eval(exp) < 128):
return "Overflow"
return (eval(exp), "{} {} {} = {}".format(bin(int(a))[2:] if int(a) > 0 else bin(int(a) & 255)[2:], b, bin(int(c))[2:] if int(c) > 0 else bin(int(c) & 255)[2:], bin(eval(exp) & 255)[2:]))
eight_bit("-18 - 6") #➞ (-24, "11101110 - 110 = 11101000")
#eight_bit("65 + 70") #➞ "Overflow"
|
[
"mustafaankarali35@gmail.com"
] |
mustafaankarali35@gmail.com
|
bb1239f3ba44a52dca073e7a2bb09bd13ee12364
|
29d9e33cf882805017cb50653233c5b3759bba0a
|
/services/core-api/app/api/mines/incidents/models/mine_incident_document_xref.py
|
2789000ba92038d50a310d76ce81ad0b613a9dd0
|
[
"Apache-2.0"
] |
permissive
|
yasserhu/mds
|
e634def83ad9825f58986305a638cf4a5010d40f
|
d7669f4c805e4a4006f30f649f324b3e88bc9aab
|
refs/heads/master
| 2023-08-03T18:20:57.175738
| 2021-06-17T20:44:10
| 2021-06-17T20:44:10
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,352
|
py
|
from sqlalchemy.dialects.postgresql import UUID
from sqlalchemy.schema import FetchedValue
from sqlalchemy.ext.associationproxy import association_proxy
from app.api.utils.models_mixins import Base
from app.extensions import db
class MineIncidentDocumentXref(Base):
__tablename__ = "mine_incident_document_xref"
mine_incident_document_xref_guid = db.Column(
UUID(as_uuid=True), primary_key=True, server_default=FetchedValue())
mine_document_guid = db.Column(
UUID(as_uuid=True), db.ForeignKey('mine_document.mine_document_guid'))
mine_incident_id = db.Column(
db.Integer, db.ForeignKey('mine_incident.mine_incident_id'), server_default=FetchedValue())
mine_incident_document_type_code = db.Column(
db.String,
db.ForeignKey('mine_incident_document_type_code.mine_incident_document_type_code'),
nullable=False)
mine_document = db.relationship('MineDocument', lazy='joined')
mine_guid = association_proxy('mine_document', 'mine_guid')
document_manager_guid = association_proxy('mine_document', 'document_manager_guid')
document_name = association_proxy('mine_document', 'document_name')
upload_date = association_proxy('mine_document', 'upload_date')
def __repr__(self):
return '<MineIncidentDocumentXref %r>' % self.mine_incident_document_xref_guid
|
[
"bcgov-csnr-cd@gov.bc.ca"
] |
bcgov-csnr-cd@gov.bc.ca
|
40598402f54008a18aa6cfc5a2e992fbf8013f3b
|
bc441bb06b8948288f110af63feda4e798f30225
|
/cmdb_sdk/model/topboard/sprint_pb2.pyi
|
91317ff37629e11380429ff0970bac9a86f52ac4
|
[
"Apache-2.0"
] |
permissive
|
easyopsapis/easyops-api-python
|
23204f8846a332c30f5f3ff627bf220940137b6b
|
adf6e3bad33fa6266b5fa0a449dd4ac42f8447d0
|
refs/heads/master
| 2020-06-26T23:38:27.308803
| 2020-06-16T07:25:41
| 2020-06-16T07:25:41
| 199,773,131
| 5
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,313
|
pyi
|
# @generated by generate_proto_mypy_stubs.py. Do not edit!
import sys
from cmdb_sdk.model.topboard.issue_basic_pb2 import (
IssueBasic as cmdb_sdk___model___topboard___issue_basic_pb2___IssueBasic,
)
from cmdb_sdk.model.topboard.product_basic_pb2 import (
ProductBasic as cmdb_sdk___model___topboard___product_basic_pb2___ProductBasic,
)
from google.protobuf.descriptor import (
Descriptor as google___protobuf___descriptor___Descriptor,
)
from google.protobuf.internal.containers import (
RepeatedCompositeFieldContainer as google___protobuf___internal___containers___RepeatedCompositeFieldContainer,
)
from google.protobuf.message import (
Message as google___protobuf___message___Message,
)
from typing import (
Iterable as typing___Iterable,
Optional as typing___Optional,
Text as typing___Text,
Union as typing___Union,
)
from typing_extensions import (
Literal as typing_extensions___Literal,
)
builtin___bool = bool
builtin___bytes = bytes
builtin___float = float
builtin___int = int
if sys.version_info < (3,):
builtin___buffer = buffer
builtin___unicode = unicode
class Sprint(google___protobuf___message___Message):
DESCRIPTOR: google___protobuf___descriptor___Descriptor = ...
name = ... # type: typing___Text
instanceId = ... # type: typing___Text
title = ... # type: typing___Text
status = ... # type: typing___Text
goal = ... # type: typing___Text
startTime = ... # type: typing___Text
endTime = ... # type: typing___Text
@property
def product(self) -> google___protobuf___internal___containers___RepeatedCompositeFieldContainer[cmdb_sdk___model___topboard___product_basic_pb2___ProductBasic]: ...
@property
def issues(self) -> google___protobuf___internal___containers___RepeatedCompositeFieldContainer[cmdb_sdk___model___topboard___issue_basic_pb2___IssueBasic]: ...
def __init__(self,
*,
product : typing___Optional[typing___Iterable[cmdb_sdk___model___topboard___product_basic_pb2___ProductBasic]] = None,
issues : typing___Optional[typing___Iterable[cmdb_sdk___model___topboard___issue_basic_pb2___IssueBasic]] = None,
name : typing___Optional[typing___Text] = None,
instanceId : typing___Optional[typing___Text] = None,
title : typing___Optional[typing___Text] = None,
status : typing___Optional[typing___Text] = None,
goal : typing___Optional[typing___Text] = None,
startTime : typing___Optional[typing___Text] = None,
endTime : typing___Optional[typing___Text] = None,
) -> None: ...
if sys.version_info >= (3,):
@classmethod
def FromString(cls, s: builtin___bytes) -> Sprint: ...
else:
@classmethod
def FromString(cls, s: typing___Union[builtin___bytes, builtin___buffer, builtin___unicode]) -> Sprint: ...
def MergeFrom(self, other_msg: google___protobuf___message___Message) -> None: ...
def CopyFrom(self, other_msg: google___protobuf___message___Message) -> None: ...
def ClearField(self, field_name: typing_extensions___Literal[u"endTime",b"endTime",u"goal",b"goal",u"instanceId",b"instanceId",u"issues",b"issues",u"name",b"name",u"product",b"product",u"startTime",b"startTime",u"status",b"status",u"title",b"title"]) -> None: ...
|
[
"service@easyops.cn"
] |
service@easyops.cn
|
52fc95ad5b8193fbb06f7d690e85b19da65068ea
|
ca7aa979e7059467e158830b76673f5b77a0f5a3
|
/Python_codes/p02993/s796060411.py
|
4dd88987d6c5bd8bf3fa6a30abda20e3ca9e5462
|
[] |
no_license
|
Aasthaengg/IBMdataset
|
7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901
|
f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8
|
refs/heads/main
| 2023-04-22T10:22:44.763102
| 2021-05-13T17:27:22
| 2021-05-13T17:27:22
| 367,112,348
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 137
|
py
|
S = input()
flag = False
for s in S:
if flag:
if temp == s:
print("Bad")
quit()
temp = s
flag = True
print("Good")
|
[
"66529651+Aastha2104@users.noreply.github.com"
] |
66529651+Aastha2104@users.noreply.github.com
|
d7af8673a8c37fb128458cbca49042bd96905906
|
163bbb4e0920dedd5941e3edfb2d8706ba75627d
|
/Code/CodeRecords/2722/60791/263371.py
|
f6c0db0251d1b057fb150e5cd509494e9cbe324d
|
[] |
no_license
|
AdamZhouSE/pythonHomework
|
a25c120b03a158d60aaa9fdc5fb203b1bb377a19
|
ffc5606817a666aa6241cfab27364326f5c066ff
|
refs/heads/master
| 2022-11-24T08:05:22.122011
| 2020-07-28T16:21:24
| 2020-07-28T16:21:24
| 259,576,640
| 2
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 148
|
py
|
def solve(i):
print('YES') if(i%5==0) else print('NO')
return
T = int(input())
x = 0
while(x < T):
x += 1
solve(int(input()))
|
[
"1069583789@qq.com"
] |
1069583789@qq.com
|
e93c4d081120f85a8ead644f7cd1de2ce0c2c312
|
c8a04384030c3af88a8e16de4cedc4ef8aebfae5
|
/stubs/pandas/tests/sparse/test_indexing.pyi
|
74ad6a2d4deb63a95e1f23407b8600c610472fa1
|
[
"MIT"
] |
permissive
|
Accern/accern-xyme
|
f61fce4b426262b4f67c722e563bb4297cfc4235
|
6ed6c52671d02745efabe7e6b8bdf0ad21f8762c
|
refs/heads/master
| 2023-08-17T04:29:00.904122
| 2023-05-23T09:18:09
| 2023-05-23T09:18:09
| 226,960,272
| 3
| 2
|
MIT
| 2023-07-19T02:13:18
| 2019-12-09T20:21:59
|
Python
|
UTF-8
|
Python
| false
| false
| 3,984
|
pyi
|
# Stubs for pandas.tests.sparse.test_indexing (Python 3)
#
# NOTE: This dynamically typed stub was automatically generated by stubgen.
# pylint: disable=unused-argument,redefined-outer-name,no-self-use,invalid-name
# pylint: disable=relative-beyond-top-level
from typing import Any
class TestSparseSeriesIndexing:
orig: Any = ...
sparse: Any = ...
def setup_method(self, method: Any) -> None:
...
def test_getitem(self) -> None:
...
def test_getitem_slice(self) -> None:
...
def test_getitem_int_dtype(self) -> None:
...
def test_getitem_fill_value(self) -> None:
...
def test_getitem_ellipsis(self) -> None:
...
def test_getitem_slice_fill_value(self) -> None:
...
def test_loc(self) -> None:
...
def test_loc_index(self) -> None:
...
def test_loc_index_fill_value(self) -> None:
...
def test_loc_slice(self) -> None:
...
def test_loc_slice_index_fill_value(self) -> None:
...
def test_loc_slice_fill_value(self) -> None:
...
def test_iloc(self) -> None:
...
def test_iloc_fill_value(self) -> None:
...
def test_iloc_slice(self) -> None:
...
def test_iloc_slice_fill_value(self) -> None:
...
def test_at(self) -> None:
...
def test_at_fill_value(self) -> None:
...
def test_iat(self) -> None:
...
def test_iat_fill_value(self) -> None:
...
def test_get(self) -> None:
...
def test_take(self) -> None:
...
def test_take_fill_value(self) -> None:
...
def test_reindex(self) -> None:
...
def test_fill_value_reindex(self) -> None:
...
def test_fill_value_reindex_coerces_float_int(self) -> None:
...
def test_reindex_fill_value(self) -> None:
...
def test_reindex_nearest(self) -> None:
...
def tests_indexing_with_sparse(self, kind: Any, fill: Any) -> None:
...
class TestSparseSeriesMultiIndexing(TestSparseSeriesIndexing):
orig: Any = ...
sparse: Any = ...
def setup_method(self, method: Any) -> None:
...
def test_getitem_multi(self) -> None:
...
def test_getitem_multi_tuple(self) -> None:
...
def test_getitems_slice_multi(self) -> None:
...
def test_loc(self) -> None:
...
def test_loc_multi_tuple(self) -> None:
...
def test_loc_slice(self) -> None:
...
def test_reindex(self) -> None:
...
class TestSparseDataFrameIndexing:
def test_getitem(self) -> None:
...
def test_getitem_fill_value(self) -> None:
...
def test_loc(self) -> None:
...
def test_loc_index(self) -> None:
...
def test_loc_slice(self) -> None:
...
def test_iloc(self) -> None:
...
def test_iloc_slice(self) -> None:
...
def test_at(self) -> None:
...
def test_at_fill_value(self) -> None:
...
def test_iat(self) -> None:
...
def test_iat_fill_value(self) -> None:
...
def test_take(self) -> None:
...
def test_take_fill_value(self) -> None:
...
def test_reindex(self) -> None:
...
def test_reindex_fill_value(self) -> None:
...
class TestMultitype:
cols: Any = ...
string_series: Any = ...
int_series: Any = ...
float_series: Any = ...
object_series: Any = ...
sdf: Any = ...
ss: Any = ...
def setup_method(self, method: Any) -> None:
...
def test_frame_basic_dtypes(self) -> None:
...
def test_frame_indexing_single(self) -> None:
...
def test_frame_indexing_multiple(self) -> None:
...
def test_series_indexing_single(self) -> None:
...
def test_series_indexing_multiple(self) -> None:
...
|
[
"josua.krause@gmail.com"
] |
josua.krause@gmail.com
|
71fcc95c25ad1035690e8b7af74cdf1bf2de73f5
|
e467d1860dfc1f42e493eb3358003801b4959677
|
/accounts/serializers.py
|
85a0e0d22dfc8a259c4ea464436f534b6a5cef25
|
[] |
no_license
|
Oswaldinho24k/e-commerce-api-fixtercamp
|
e1c8cc0b1a67c3347a14892ed584873b7d208316
|
c71f2eca9828f04b8272ecfc08d3594eaa0fbdcd
|
refs/heads/master
| 2020-03-15T13:17:14.230879
| 2018-05-11T00:41:09
| 2018-05-11T00:41:09
| 132,163,052
| 0
| 2
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 877
|
py
|
from .models import Profile, ItemCart, UserCart
from django.contrib.auth.models import User
from rest_framework import serializers
from products.models import Product
from orders.serializers import OrderSerializer
class ProfileSerializer(serializers.ModelSerializer):
#user = BasicUserSerializer(many=False, read_only=True)
class Meta:
model = Profile
fields = '__all__'
class UserSerializer(serializers.ModelSerializer):
orders = OrderSerializer(many=True, read_only=True)
profile = ProfileSerializer(many=False, read_only=True)
password = serializers.CharField(write_only=True)
class Meta:
model = User
fields = ['username', 'email', 'id', 'password', 'orders', 'profile']
def create(self, validated_data):
password = validated_data.pop('password')
user = User.objects.create(**validated_data)
user.set_password(password)
user.save()
return user
|
[
"oswalfut_96@hotmail.com"
] |
oswalfut_96@hotmail.com
|
5c2a2fd9914c3fefccd327f74467755d9217473a
|
0a973640f0b02d7f3cf9211fcce33221c3a50c88
|
/.history/src/main_20210201154004.py
|
4749d7b3238241af15c20e74de05c8a23fe43913
|
[] |
no_license
|
JiajunChen123/IPO_under_review_crawler
|
5468b9079950fdd11c5e3ce45af2c75ccb30323c
|
031aac915ebe350ec816c05a29b5827fde588567
|
refs/heads/main
| 2023-02-26T08:23:09.622725
| 2021-02-04T10:11:16
| 2021-02-04T10:11:16
| 332,619,348
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,050
|
py
|
import datetime
import os
from preprocessing import data_process
from szcyb_crawler import data_getter, index_getter
from utils import load_pickle,save_pickle
import random,time
from html_gen import gen_html
def szcyb_check_update():
prjtype = 'ipo'
try:
proj_list_old = load_pickle(os.getcwd()+'/saved_config/szcyb_index.pkl')
proj_list_new = index_getter(prjtype)
stocksInfo = load_pickle(os.getcwd()+'/saved_config/szcyb_stocksInfo.pkl')
updated_idx = [index for (index, d) in enumerate(proj_list_new) if d["updtdt"] == datetime.date.today().strftime('%Y-%m-%d')]
if updated_idx == []:
print("Nothing has changed!")
return
else:
print("there are {} projects have been updated!".format(len(updated_idx)))
for idx in updated_idx:
raw_data = data_getter(proj_list_new[idx]['prjid'])
cleaned_data = data_process(raw_data)
print('company:', cleaned_data['baseInfo']['cmpName'],'is updated')
html = gen_html(cleaned_data)
new_idx = next((index for (index, d) in enumerate(stocksInfo) if d["baseInfo"]['cmpName'] == proj_list_new[idx]['cmpName']), None)
stocksInfo[idx] = cleaned_data
save_pickle(stocksInfo, os.getcwd()+'/saved_config/szcyb_stocksInfo.pkl')
print("all stocksInfo are updated!")
return
except FileNotFoundError:
proj_list = index_getter(prjtype)
print('there are total {} stocks in the list'.format(len(proj_list)))
i=0
for proj in proj_list:
i+=1
print('fetching {} project, {}'.format(i,proj['cmpsnm']))
stockInfo = data_getter(str(proj['prjid']))
cleaned_data = data_process(stockInfo)
html = gen_html(cleaned_data)
# file_getter(stockInfo)
time.sleep(random.randint(2,5))
else:
print('Update completed!!!!')
return
def update_allStockInfo(market):
if market == 'szcyb':
mkt = '创业板'
elif market == 'shkcb':
mkt = '科创板'
listOfFiles = list()
for (dirpath, dirnames, filenames) in os.walk(os.getcwd()+'/data/IPO/'+mkt):
listOfFiles += [os.path.join(dirpath, file) for file in filenames]
allStock_info = []
for i in listOfFiles:
if os.path.basename(i) == 'clean_info.pkl':
# print('clean up company:', os.path.dirname(i))
# raw_data = load_pickle(i)
# cleaned_data = data_process(raw_data)
clean_data = load_pickle(i)
allStock_info.append(clean_data)
saved_path = os.getcwd()+'/saved_config/'+'_stocksInfo.pkl'
print('clean up company:', os.path.dirname(i))
# to_dataframe(allStock_info)
saved_path = os.getcwd()+'/saved_config/'+prjtype+'_stocksInfo.pkl'
save_pickle(allStock_info, saved_path)
return
if __name__ == '__main__':
check_update()
# update_allStockInfo()
|
[
"chenjiajun.jason@outlook.com"
] |
chenjiajun.jason@outlook.com
|
ae5b87fb9080facdde7642f860f15279a5dc96ce
|
c19bcbc98555ef06276f9f0dcffc9ac35942a7c4
|
/jc/parsers/timestamp.py
|
73c9d991a3c4a45fb68f62917214a7362c2b7e28
|
[
"MIT"
] |
permissive
|
kellyjonbrazil/jc
|
4e81a5421cd20be5965baf375f4a5671c2ef0410
|
4cd721be8595db52b620cc26cd455d95bf56b85b
|
refs/heads/master
| 2023-08-30T09:53:18.284296
| 2023-07-30T17:08:39
| 2023-07-30T17:08:39
| 215,404,927
| 6,278
| 185
|
MIT
| 2023-09-08T14:52:22
| 2019-10-15T22:04:52
|
Python
|
UTF-8
|
Python
| false
| false
| 5,273
|
py
|
"""jc - JSON Convert Unix Epoch Timestamp string parser
The naive fields are based on the local time of the system the parser is
run on.
The utc fields are timezone-aware, based on the UTC timezone.
Usage (cli):
$ echo 1658599410 | jc --timestamp
Usage (module):
import jc
result = jc.parse('timestamp', timestamp_string)
Schema:
{
"naive": {
"year": integer,
"month": string,
"month_num": integer,
"day": integer,
"weekday": string,
"weekday_num": integer,
"hour": integer,
"hour_24": integer,
"minute": integer,
"second": integer,
"period": string,
"day_of_year": integer,
"week_of_year": integer,
"iso": string
},
"utc": {
"year": integer,
"month": string,
"month_num": integer,
"day": integer,
"weekday": string,
"weekday_num": integer,
"hour": integer,
"hour_24": integer,
"minute": integer,
"second": integer,
"period": string,
"utc_offset": string,
"day_of_year": integer,
"week_of_year": integer,
"iso": string
}
}
Examples:
$ echo 1658599410 | jc --timestamp -p
{
"naive": {
"year": 2022,
"month": "Jul",
"month_num": 7,
"day": 23,
"weekday": "Sat",
"weekday_num": 6,
"hour": 11,
"hour_24": 11,
"minute": 3,
"second": 30,
"period": "AM",
"day_of_year": 204,
"week_of_year": 29,
"iso": "2022-07-23T11:03:30"
},
"utc": {
"year": 2022,
"month": "Jul",
"month_num": 7,
"day": 23,
"weekday": "Sat",
"weekday_num": 6,
"hour": 6,
"hour_24": 18,
"minute": 3,
"second": 30,
"period": "PM",
"utc_offset": "+0000",
"day_of_year": 204,
"week_of_year": 29,
"iso": "2022-07-23T18:03:30+00:00"
}
}
"""
from datetime import datetime, timezone
import jc.utils
class info():
"""Provides parser metadata (version, author, etc.)"""
version = '1.0'
description = 'Unix Epoch Timestamp string parser'
author = 'Kelly Brazil'
author_email = 'kellyjonbrazil@gmail.com'
compatible = ['linux', 'aix', 'freebsd', 'darwin', 'win32', 'cygwin']
tags = ['standard', 'string']
__version__ = info.version
def _process(proc_data):
"""
Final processing to conform to the schema.
Parameters:
proc_data: (Dictionary) raw structured data to process
Returns:
Dictionary. Structured data to conform to the schema.
"""
# no further processing
return proc_data
def parse(data, raw=False, quiet=False):
"""
Main text parsing function
Parameters:
data: (string) text data to parse
raw: (boolean) unprocessed output if True
quiet: (boolean) suppress warning messages if True
Returns:
Dictionary. Raw or processed structured data.
"""
jc.utils.compatibility(__name__, info.compatible, quiet)
jc.utils.input_type_check(data)
raw_output = {}
if jc.utils.has_data(data):
data = data[0:10]
dt = datetime.fromtimestamp(int(data))
dt_utc = datetime.fromtimestamp(int(data), tz=timezone.utc)
raw_output = {
'naive': {
'year': dt.year,
'month': dt.strftime('%b'),
'month_num': dt.month,
'day': dt.day,
'weekday': dt.strftime('%a'),
'weekday_num': dt.isoweekday(),
'hour': int(dt.strftime('%I')),
'hour_24': dt.hour,
'minute': dt.minute,
'second': dt.second,
'period': dt.strftime('%p').upper(),
'day_of_year': int(dt.strftime('%j')),
'week_of_year': int(dt.strftime('%W')),
'iso': dt.isoformat()
},
'utc': {
'year': dt_utc.year,
'month': dt_utc.strftime('%b'),
'month_num': dt_utc.month,
'day': dt_utc.day,
'weekday': dt_utc.strftime('%a'),
'weekday_num': dt_utc.isoweekday(),
'hour': int(dt_utc.strftime('%I')),
'hour_24': dt_utc.hour,
'minute': dt_utc.minute,
'second': dt_utc.second,
'period': dt_utc.strftime('%p').upper(),
'utc_offset': dt_utc.strftime('%z') or None,
'day_of_year': int(dt_utc.strftime('%j')),
'week_of_year': int(dt_utc.strftime('%W')),
'iso': dt_utc.isoformat()
}
}
return raw_output if raw else _process(raw_output)
|
[
"kellyjonbrazil@gmail.com"
] |
kellyjonbrazil@gmail.com
|
9fb705dd519ac18646718ae01fe93d8c4c571f54
|
04d8f0b5a291ec6c3470f4498dd64ab9c1845f96
|
/programs/big_prog/ex_socket/ex_3/ex_serv.py
|
298d92aa2dedf00091936d314a8cf34d4af734c6
|
[] |
no_license
|
volitilov/Python_learn
|
8c0f54d89e0ead964320d17eeddeacd5b704b717
|
f89e52655f83a9f1105689f0302ef5b0ee30a25c
|
refs/heads/master
| 2022-01-10T13:39:59.237716
| 2019-07-17T11:39:10
| 2019-07-17T11:39:10
| 70,601,503
| 3
| 2
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,083
|
py
|
#!/usr/bin/python3
# :::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
from socketserver import (
TCPServer as TCP,
StreamRequestHandler as SRH
)
from time import ctime
import sys
# :::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
class MyTCPHandler(SRH):
def handle(self):
addr_1 = ' ' + str(self.client_address[0])
addr_2 = ' ' + str(self.client_address[1])
print('Connected client:' + addr_1 + addr_2)
self.data = self.rfile.readline().strip()
if not self.data:
print(addr_1 + ':' + addr_2 + ' - disconnected')
self.wfile.write(self.data)
# :::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
if __name__ == "__main__":
if len(sys.argv) < 3 or type(int(sys.argv[2])).__name__ != 'int':
print('Usage: python ex_serv.py hostname port')
sys.exit()
else:
ADDR = (sys.argv[1], int(sys.argv[2]))
server = TCP(ADDR, MyTCPHandler)
try:
print('Waiting for connection...')
server.serve_forever()
except KeyboardInterrupt:
print('\nServer stoped.')
server.server_close()
|
[
"volitilov@gmail.com"
] |
volitilov@gmail.com
|
0ad73b90f00d4e8815c97ebf285a05f7325bae7b
|
f33b30743110532ddae286ba1b34993e61669ab7
|
/Odd Even Linked List.py
|
5310e33a71fe301849961b055d5b3d55e22f05ea
|
[] |
no_license
|
c940606/leetcode
|
fe9dcee7a5daa4d52999d5f53253dd6dd33c348b
|
631df2ce6892a6fbb3e435f57e90d85f8200d125
|
refs/heads/master
| 2021-07-10T14:01:26.164966
| 2020-08-16T10:46:16
| 2020-08-16T10:46:16
| 186,588,449
| 3
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,391
|
py
|
# Definition for singly-linked list.
class ListNode(object):
def __init__(self, x):
self.val = x
self.next = None
class Solution(object):
def oddEvenList(self, head):
"""
给定一个单链表,把所有的奇数节点和偶数节点分别排在一起。
请注意,这里的奇数节点和偶数节点指的是节点编号的奇偶性,而不是节点的值的奇偶性。
请尝试使用原地算法完成。你的算法的空间复杂度应为 O(1),时间复杂度应为 O(nodes),nodes 为节点总数。
----
输入: 1->2->3->4->5->NULL
输出: 1->3->5->2->4->NULL
--
思路:
用dummy法
:type head: ListNode
:rtype: ListNode
"""
dummy = ListNode(0)
l = dummy
p = 0
if head:
p = head
# flag = 1
while p:
l.next = ListNode(p.val)
l = l.next
if p.next and p.next.next:
p = p.next.next
else:
break
q = 0
if head and head.next:
q = head.next
while q:
l.next = ListNode(q.val)
l = l.next
if q.next and q.next.next:
q = q.next.next
else:
break
return dummy.next
# def oddEvenList1(self, head):
# if not head:
# return head
# dummy = ListNode(0)
# beg = dummy
# odd = 0
# even = 0
# if head:
# odd = head
# if head and head.next:
# even = head.next
# while odd or even:
# beg.next = ListNode(odd.val)
# beg = beg.next
# beg.next = ListNode(even.val)
|
[
"762307667@qq.com"
] |
762307667@qq.com
|
9373c0cd05fa128d62a95b63054c5a5f5d3ec8dc
|
97426aa614cd9e07d53dd761b55472389a3ebd60
|
/python/scripts/marketsim/scheduler.py
|
e4bb7eb635eb6e453927fdca5173fbb21bee0838
|
[] |
no_license
|
antonkolotaev/v2
|
e30a12ea710848838d85ee0b6bbd9224e40602d2
|
db64cd78577cebb366d0b3d849fdfbe694b97f94
|
refs/heads/master
| 2020-12-24T14:35:59.486012
| 2012-08-16T08:24:13
| 2012-08-16T08:24:13
| 10,887,220
| 1
| 6
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,665
|
py
|
import heapq
class _EventHandler(object):
def __init__(self, handler):
self._handler = handler
self._cancelled = False
def __call__(self):
self._handler()
def cancel(self):
self._cancelled = True
@property
def cancelled(self):
return self._cancelled
def __repr__(self):
return "("+repr(self._handler) + ("-> Cancelled" if self.cancelled else "") + ")"
class Scheduler(object):
def __init__(self):
self.reset()
def reset(self):
self._elements = []
self._currentTime = 0.
def __repr__(self):
return "(t=" + str(self.currentTime) + ": " + repr(self._elements) + ")"
@property
def currentTime(self):
return self._currentTime
def schedule(self, actionTime, handler):
assert actionTime >= self.currentTime
eh = _EventHandler(handler)
event = (actionTime, eh)
heapq.heappush(self._elements, event)
return eh.cancel
def scheduleAfter(self, dt, handler):
self.schedule(self.currentTime + dt, handler)
def workTill(self, limitTime):
while (self._elements <> [] and self._elements[0][0] < limitTime):
(actionTime, eh) = heapq.heappop(self._elements)
if not eh.cancelled:
self._currentTime = actionTime
eh()
self._currentTime = limitTime
def advance(self, dt):
self.workTill(self.currentTime + dt)
def process(self, intervalFunc, handler):
def h():
handler()
self.scheduleAfter(intervalFunc(), h)
self.scheduleAfter(intervalFunc(), h)
world = Scheduler()
|
[
"anton.kolotaev@gmail.com"
] |
anton.kolotaev@gmail.com
|
90e2fd31f15d3ba613a447de0e0f4bb4e370a085
|
c67dc92dd0c4dc7661b9185ae7487abf086d4dc6
|
/appraisalproject/settings.py
|
4130eeb0d62b3e1e7b6a41d0a38d16ffe9f025bf
|
[
"MIT"
] |
permissive
|
felkiriinya/Quality-Appraisal
|
1f14339eddaad256994501ab2aa5e1a128b16478
|
5b9e114d96816a9d146eca7646330da7d273b6ef
|
refs/heads/master
| 2023-01-22T22:31:30.052977
| 2020-12-09T14:13:41
| 2020-12-09T14:13:41
| 319,227,932
| 2
| 0
|
MIT
| 2020-12-08T18:46:21
| 2020-12-07T06:43:12
|
HTML
|
UTF-8
|
Python
| false
| false
| 4,515
|
py
|
"""
Django settings for appraisalproject project.
Generated by 'django-admin startproject' using Django 3.1.3.
For more information on this file, see
https://docs.djangoproject.com/en/3.1/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/3.1/ref/settings/
"""
import os
from pathlib import Path
import cloudinary
import cloudinary.api
import cloudinary.uploader
import django_heroku
import dj_database_url
from decouple import config,Csv
# Build paths inside the project like this: BASE_DIR / 'subdir'.
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/3.1/howto/deployment/checklist/
MODE=config("MODE", default="dev")
SECRET_KEY = config('SECRET_KEY')
DEBUG = config('DEBUG', default=False, cast=bool)
# development
if config('MODE')=="dev":
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.postgresql_psycopg2',
'NAME': config('DB_NAME'),
'USER': config('DB_USER'),
'PASSWORD': config('DB_PASSWORD'),
'HOST': config('DB_HOST'),
'PORT': '',
}
}
# production
else:
DATABASES = {
'default': dj_database_url.config(
default=config('DATABASE_URL')
)
}
db_from_env = dj_database_url.config(conn_max_age=500)
DATABASES['default'].update(db_from_env)
ALLOWED_HOSTS = config('ALLOWED_HOSTS', cast=Csv())
# Application definition
INSTALLED_APPS = [
'bootstrap3',
'appraisalapp.apps.AppraisalappConfig',
'cloudinary',
'crispy_forms',
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
]
MIDDLEWARE = [
'whitenoise.middleware.WhiteNoiseMiddleware',
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'appraisalproject.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'appraisalproject.wsgi.application'
cloudinary.config(
cloud_name = "duhceor4r",
api_key = "988552584751394",
api_secret = "grnCc_TFy5WFWteERzMJRj3t88k"
)
# Database
# https://docs.djangoproject.com/en/3.1/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.postgresql',
'NAME': 'appraisal',
'USER': 'felista',
'PASSWORD':'ilovemyself',
}
}
# Password validation
# https://docs.djangoproject.com/en/3.1/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/3.1/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'Africa/Nairobi'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/3.1/howto/static-files/
STATIC_URL = '/static/'
STATICFILES_STORAGE = 'whitenoise.storage.CompressedManifestStaticFilesStorage'
STATIC_ROOT = os.path.join(BASE_DIR, 'staticfiles')
STATICFILES_DIRS = [
os.path.join(BASE_DIR, "static"),
]
MEDIA_URL = '/media/'
MEDIA_ROOT = os.path.join(BASE_DIR, 'media')
AUTH_PROFILE_MODULE = 'accounts.Profile'
LOGOUT_REDIRECT_URL='/logout/'
LOGIN_REDIRECT_URL='/'
django_heroku.settings(locals())
|
[
"felkiriinya@gmail.com"
] |
felkiriinya@gmail.com
|
f2f8d6a4696af48a294dd7a3760a76943e0fa51a
|
e3fe234510d19c120d56f9a2876b7d508d306212
|
/16paddle/dssm_lm_rank/infer.py
|
46aade009862bd1903c9ce6ade3cb0918b75bd60
|
[
"Apache-2.0"
] |
permissive
|
KEVINYZY/python-tutorial
|
78b348fb2fa2eb1c8c55d016affb6a9534332997
|
ae43536908eb8af56c34865f52a6e8644edc4fa3
|
refs/heads/master
| 2020-03-30T02:11:03.394073
| 2019-12-03T00:52:10
| 2019-12-03T00:52:10
| 150,617,875
| 0
| 0
|
Apache-2.0
| 2018-09-27T16:39:29
| 2018-09-27T16:39:28
| null |
UTF-8
|
Python
| false
| false
| 2,827
|
py
|
# -*- coding: utf-8 -*-
# Author: XuMing <shibing624@126.com>
# Data: 17/10/18
# Brief: 预测
import os
import sys
import paddle.v2 as paddle
import config
import reader
from network import dssm_lm
from utils import logger, load_dict, load_reverse_dict
def infer(model_path, dic_path, infer_path, prediction_output_path, rnn_type="gru", batch_size=1):
logger.info("begin to predict...")
# check files
assert os.path.exists(model_path), "trained model not exits."
assert os.path.exists(dic_path), " word dictionary file not exist."
assert os.path.exists(infer_path), "infer file not exist."
logger.info("load word dictionary.")
word_dict = load_dict(dic_path)
word_reverse_dict = load_reverse_dict(dic_path)
logger.info("dictionary size = %d" % (len(word_dict)))
try:
word_dict["<unk>"]
except KeyError:
logger.fatal("the word dictionary must contain <unk> token.")
sys.exit(-1)
# initialize PaddlePaddle
paddle.init(use_gpu=config.use_gpu, trainer_count=config.num_workers)
# load parameter
logger.info("load model parameters from %s " % model_path)
parameters = paddle.parameters.Parameters.from_tar(
open(model_path, "r"))
# load the trained model
prediction = dssm_lm(
vocab_sizes=[len(word_dict), len(word_dict)],
emb_dim=config.emb_dim,
hidden_size=config.hidden_size,
stacked_rnn_num=config.stacked_rnn_num,
rnn_type=rnn_type,
share_semantic_generator=config.share_semantic_generator,
share_embed=config.share_embed,
is_infer=True)
inferer = paddle.inference.Inference(
output_layer=prediction, parameters=parameters)
feeding = {"left_input": 0, "left_target": 1, "right_input": 2, "right_target": 3}
logger.info("infer data...")
# define reader
reader_args = {
"file_path": infer_path,
"word_dict": word_dict,
"is_infer": True,
}
infer_reader = paddle.batch(reader.rnn_reader(**reader_args), batch_size=batch_size)
logger.warning("output prediction to %s" % prediction_output_path)
with open(prediction_output_path, "w")as f:
for id, item in enumerate(infer_reader()):
left_text = " ".join([word_reverse_dict[id] for id in item[0][0]])
right_text = " ".join([word_reverse_dict[id] for id in item[0][2]])
probs = inferer.infer(input=item, field=["value"], feeding=feeding)
f.write("%f\t%f\t%s\t%s" % (probs[0], probs[1], left_text, right_text))
f.write("\n")
if __name__ == "__main__":
infer(model_path=config.model_path,
dic_path=config.dic_path,
infer_path=config.infer_path,
prediction_output_path=config.prediction_output_path,
rnn_type=config.rnn_type)
|
[
"507153809@qq.com"
] |
507153809@qq.com
|
8164c15ce080bba486b0e97395893638e109f140
|
673e829dda9583c8dd2ac8d958ba1dc304bffeaf
|
/data/multilingual/Latn.QVA/Sun-ExtA_16/pdf_to_json_test_Latn.QVA_Sun-ExtA_16.py
|
c9e6eeadc61bf0cfc64ae23cd016123070abc397
|
[
"BSD-3-Clause"
] |
permissive
|
antoinecarme/pdf_to_json_tests
|
58bab9f6ba263531e69f793233ddc4d33b783b7e
|
d57a024fde862e698d916a1178f285883d7a3b2f
|
refs/heads/master
| 2021-01-26T08:41:47.327804
| 2020-02-27T15:54:48
| 2020-02-27T15:54:48
| 243,359,934
| 2
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 311
|
py
|
import pdf_to_json as p2j
import json
url = "file:data/multilingual/Latn.QVA/Sun-ExtA_16/udhr_Latn.QVA_Sun-ExtA_16.pdf"
lConverter = p2j.pdf_to_json.pdf_to_json_converter()
lConverter.mImageHashOnly = True
lDict = lConverter.convert(url)
print(json.dumps(lDict, indent=4, ensure_ascii=False, sort_keys=True))
|
[
"antoine.carme@laposte.net"
] |
antoine.carme@laposte.net
|
310ef3f7f502ac9fca2d6fc43f37500bd8a533f7
|
3e4c3b6a6ba770fa18e9f072b1cfb58207f96b30
|
/openaddr/compat.py
|
ec93ded08da55c579f23fc715124f0d6f8c05740
|
[
"ISC"
] |
permissive
|
cbmeeks/machine
|
931b53657db3bb0b960006ccc6abd67fd41d704a
|
39652f0614597e2b56973ded9f61a1a2a208da2e
|
refs/heads/master
| 2020-12-26T00:46:01.112727
| 2016-07-31T03:41:06
| 2016-07-31T03:41:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,599
|
py
|
import sys
import io
PY2 = (sys.version_info[0] == 2)
if PY2:
import unicodecsv, subprocess32, uritemplate
unicodecsv.field_size_limit(sys.maxsize)
check_output = subprocess32.check_output
CalledProcessError = subprocess32.CalledProcessError
TimeoutExpired = subprocess32.TimeoutExpired
csvIO = io.BytesIO
def csvreader(file, encoding=None, **kwargs):
''' Pass encoding to unicodecsv
'''
if encoding is not None:
kwargs['encoding'] = encoding
if 'delimiter' in kwargs:
kwargs['delimiter'] = str(kwargs['delimiter'])
return unicodecsv.reader(file, **kwargs)
def csvwriter(file, encoding=None, **kwargs):
''' Pass encoding to unicodecsv
'''
if encoding is not None:
kwargs['encoding'] = encoding
return unicodecsv.writer(file, **kwargs)
def csvDictReader(file, encoding=None, delimiter=None, **kwargs):
''' Pass encoding to unicodecsv
'''
# Python2 unicodecsv requires this be not unicode
if delimiter is not None:
kwargs['delimiter'] = delimiter.encode('ascii')
if encoding is not None:
kwargs['encoding'] = encoding
return unicodecsv.DictReader(file, **kwargs)
def csvDictWriter(file, fieldnames, encoding=None, delimiter=None, **kwargs):
''' Pass encoding to unicodecsv
'''
# Python2 unicodecsv requires this be not unicode
if delimiter is not None:
kwargs['delimiter'] = delimiter.encode('ascii')
if encoding is not None:
kwargs['encoding'] = encoding
return unicodecsv.DictWriter(file, fieldnames, **kwargs)
def csvopen(filename, mode='r', encoding=None):
''' Discard encoding
'''
return io.FileIO(filename, mode=mode)
def expand_uri(template, args):
'''
'''
new_args = {k: v for (k, v) in args.items() if not hasattr(v, 'encode')}
new_args.update({k: v.encode('utf8') for (k, v) in args.items() if hasattr(v, 'encode')})
return uritemplate.expand(template, new_args)
from future import standard_library
standard_library.install_aliases()
else:
import csv, subprocess
from uritemplate import expand as expand_uri
standard_library = None
check_output = subprocess.check_output
CalledProcessError = subprocess.CalledProcessError
TimeoutExpired = subprocess.TimeoutExpired
csvIO = io.StringIO
def csvreader(file, encoding=None, **kwargs):
''' Discard encoding
'''
if 'delimiter' in kwargs:
kwargs['delimiter'] = str(kwargs['delimiter'])
return csv.reader(file, **kwargs)
def csvwriter(file, encoding=None, **kwargs):
''' Discard encoding
'''
return csv.writer(file, **kwargs)
def csvDictReader(file, encoding=None, **kwargs):
''' Discard encoding
'''
return csv.DictReader(file, **kwargs)
def csvDictWriter(file, fieldnames, encoding=None, **kwargs):
''' Discard encoding
'''
return csv.DictWriter(file, fieldnames, **kwargs)
def csvopen(filename, mode='r', encoding=None):
''' Pass encoding to io.open
'''
return io.open(filename, mode=mode, encoding=encoding)
try:
import cairo
except ImportError:
# http://stackoverflow.com/questions/11491268/install-pycairo-in-virtualenv
import cairocffi as cairo
|
[
"mike@teczno.com"
] |
mike@teczno.com
|
d680686b38adb8e9cdfc5bf3e14016b01354af3a
|
d1c6de4e0d4aafbe1e7d15a02487494f86bf9b7e
|
/알고리즘문제/내려가기.py
|
1515a653c108bd21017b437c35fc3fc9e25479c1
|
[] |
no_license
|
kdm604/TIL
|
d2ce2122e0b828a595530ac2a405a4661cf60205
|
554bbd8e884f4e7fbebdefbfa22a1a5eee0fa452
|
refs/heads/master
| 2023-01-11T21:41:57.845549
| 2020-03-24T08:55:10
| 2020-03-24T08:55:10
| 195,938,033
| 0
| 0
| null | 2023-01-05T01:14:37
| 2019-07-09T05:23:00
|
Python
|
UTF-8
|
Python
| false
| false
| 903
|
py
|
import sys
N = int(input())
ans_max = [[0 for _ in range(3)]for _ in range(2)]
ans_min = [[0 for _ in range(3)]for _ in range(2)]
for i in range(1, N+1):
arr = list(map(int, sys.stdin.readline().split()))
ans_max[i % 2][0] = max(ans_max[(i -1)%2][0], ans_max[(i-1) %2][1]) + arr[0]
ans_max[i % 2][1] = max(ans_max[(i - 1) % 2][0], ans_max[(i - 1) % 2][1], ans_max[(i - 1) % 2][2]) + arr[1]
ans_max[i % 2][2] = max(ans_max[(i - 1) % 2][1], ans_max[(i - 1) % 2][2]) + arr[2]
ans_min[i % 2][0] = min(ans_min[(i - 1) % 2][0], ans_min[(i - 1) % 2][1]) + arr[0]
ans_min[i % 2][1] = min(ans_min[(i - 1) % 2][0], ans_min[(i - 1) % 2][1], ans_min[(i - 1) % 2][2]) + arr[1]
ans_min[i % 2][2] = min(ans_min[(i - 1) % 2][1], ans_min[(i - 1) % 2][2]) + arr[2]
print(max(ans_max[N%2][0], ans_max[N%2][1], ans_max[N%2][2]))
print(min(ans_min[N%2][0], ans_min[N%2][1], ans_min[N%2][2]))
|
[
"kdm604@naver.com"
] |
kdm604@naver.com
|
8f3cc002c398732246f1e2d85326681bd76a8411
|
c5a8f6dd4e5ebc43f02923704325620f0787b2f4
|
/visual-experiments/rectangular_visualizer.py
|
657afe5661a8fb7256dba49930c2c02daf9a6eec
|
[] |
no_license
|
alex-berman/tforms
|
50098501d19de75632426423d02025162bbc94e6
|
046476001609dfa8192c2e373a040d4129975ab6
|
refs/heads/master
| 2021-01-01T20:00:00.381901
| 2014-03-16T13:44:09
| 2014-03-16T13:44:09
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 936
|
py
|
import visualizer
from visualizer import File, run
from vector import DirectionalVector, Vector2d
import math
class Chunk:
def peer_position(self):
return Visualizer.bearing_to_border_position(
self.peer.bearing, self.visualizer.width, self.visualizer.height)
class Segment(visualizer.Segment, Chunk):
pass
class Peer(visualizer.Peer):
pass
class Visualizer(visualizer.Visualizer):
@staticmethod
def bearing_to_border_position(bearing, width, height):
radius = math.sqrt(width*width + height*height) / 2
midpoint = Vector2d(width/2, height/2)
circle_position = midpoint + DirectionalVector(bearing - 2*math.pi/4, radius)
return circle_position
def pan_segment(self, segment):
relative_x = segment.pan
space_y = 3
space_x = (relative_x - 0.5) * 5
self.orchestra.place_segment(segment.id, space_x, space_y, segment.duration)
|
[
"alex@nimations.com"
] |
alex@nimations.com
|
042476a02c8bf29a0201454a2168abe364601a48
|
a67d999deafb7d3dac60ad95f66234fe3e79030e
|
/Python/Advanted/src/chauthoi/myGUItest1.py
|
3a9a1c4fe3d7059a5e5b5415c33d5c352348e5ae
|
[] |
no_license
|
tielse/Example_Python
|
1282728a3e38725a48f30a1c49a688b5262be485
|
0bc31f86f16ef98cf3b7ad8a524c27978e47775f
|
refs/heads/master
| 2021-01-02T22:36:58.866922
| 2017-08-04T15:25:17
| 2017-08-04T15:25:17
| 99,355,643
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 140
|
py
|
#!/usr/bin/env python 2.7
import Tkinter
from Tkinter import *
Widget=Label(None,text='Hello Python')
Widget.pack()
Widget.mainloop()
|
[
"you@example.com"
] |
you@example.com
|
7a15d93ffe5208e8afe7da36fd5f11f27c9fd337
|
59e8a041435b70f1dfb2464ccef298c69cf8466e
|
/058_Length_of_Last_Word/tests.py
|
22dd861ca481516c780a5c55fa2454e7d4fdcbd3
|
[] |
no_license
|
sallowdish/LeetCode
|
f0aa6c5be864711c75a3583f320ce967d50c55d3
|
d12ca00f30a1784802f42f8e76f782d7b72e95a6
|
refs/heads/master
| 2021-01-21T04:32:02.351940
| 2016-06-25T00:12:22
| 2016-06-25T00:12:22
| 33,152,440
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,072
|
py
|
#!/usr/bin/python3
from unittest import TestCase, main
from sol1 import Solution
def split(n):
l = []
for i in n:
l.append(list(i))
return l
class Test(TestCase):
sol = None
def setUp(self):
self.sol = Solution()
def test0(self):
n = ""
self.assertEqual(self.sol.lengthOfLastWord(n) ,0)
def test1(self):
n = " "
self.assertEqual(self.sol.lengthOfLastWord(n) ,0)
def test2(self):
n = " a"
self.assertEqual(self.sol.lengthOfLastWord(n) ,1)
def test3(self):
n = " ab"
self.assertEqual(self.sol.lengthOfLastWord(n) ,2)
def test4(self):
n = " aVb "
self.assertEqual(self.sol.lengthOfLastWord(n) ,3)
def test5(self):
n = " ab IUHB POQPEQJ83894e2"
self.assertEqual(self.sol.lengthOfLastWord(n) ,len("POQPEQJ83894e2"))
if __name__ == "__main__":
# logging.basicConfig( stream=sys.stderr )
# logging.getLogger( "Test.testSomething" ).setLevel( logging.DEBUG )
main()
|
[
"zhrud21@gmail.com"
] |
zhrud21@gmail.com
|
dcf94f3467263d06f0cdc6a6fd45814921ae79cf
|
1e9c9f2a9639db7cdb032aae69cb4d99aef1d3a5
|
/hackerEarth/practice/dataStructures/advancedDataStructures/segmentTrees/researchOnNumbers.py
|
cd843193b38d663316dbb8d7bec57cc27e97e182
|
[
"MIT"
] |
permissive
|
sagarnikam123/learnNPractice
|
f0da3f8acf653e56c591353ab342765a6831698c
|
1b3b0cb2cff2f478006626a4c37a99102acbb628
|
refs/heads/master
| 2023-02-04T11:21:18.211654
| 2023-01-24T14:47:52
| 2023-01-24T14:47:52
| 61,184,927
| 2
| 1
|
MIT
| 2022-03-06T11:07:18
| 2016-06-15T06:57:19
|
Python
|
UTF-8
|
Python
| false
| false
| 2,349
|
py
|
# Research on Numbers
#######################################################################################################################
#
# Bob is studying in a research institute. He is currently researching on integer sequences. He has already done
# some research on famous fibonacci sequence. Now he is trying to investigate patterns
# in a general recursive sequence (Ai)
# Sequence (Ai) is
# Ai = Bi (for i <= k)
# Ai = C1 * Ai-1 + C2 * Ai-2 +.......+ Ck*Ai-k (for i > k)
#
# But while calculating the sequence he realizes that values are growing very fast. So to keep the values small
# he calculates values modulo 109+7 (1000000007) . So that each term of sequence will be less than 109+7.
# While he is busy with his work, his girlfriend is disturbing him a lot. He wants to make her busy with some task.
# He gives her the task of sorting all the terms from Al to Ar of his sequence. She is very quick so he gives
# her same task Q times (of course with different l and r). Since sorting is very boring task so she asks you
# to complete the task.
# You will be given two numbers l and r and you are expected to output all the terms from Al to Ar in non
# decreasing order. But to avoid such a large output, if there are more than 100 terms
# in the output print only first 100.
#
# Input :
# First line contains T, the number of test cases. First line of each test case contains two space separated
# integers Q and k. Next line contains array B of length k. 3rd line contains array C of length k.
# Each of next Q lines contains two space separated integers l and r.
#
# Output :
# For each test case output Q lines. Each line contains terms from Al to Ar in non decreasing order.
# If more than 100 terms are there to output,print only first 100
#
# Constraints :
# 1 <= T <= 3
# 1 <= Q <= 100
# 1 <= k <= 5
# 1 <= Bj,Cj <= 50
# 1 <= l,r <= 10^6
# l <= r
#
# SAMPLE INPUT
# 2
# 4 3
# 1 2 3
# 2 1 1
# 1 5
# 6 8
# 8 9
# 6 9
# 3 4
# 4 5 7 9
# 2 2 1 3
# 2 7
# 10 12
# 100 101
#
# SAMPLE OUTPUT
# 1 2 3 9 23
# 58 148 377
# 377 960
# 58 148 377 960
# 5 7 9 49 138 404
# 9964 29126 85073
# 483689722 905484679
#
#######################################################################################################################
|
[
"sagarnikam123@gmail.com"
] |
sagarnikam123@gmail.com
|
61bdf96e9e66babc6af5fbb50dce07eacb4d3e7e
|
b804260baffde6044d0da699ebd01eefd5524897
|
/tests/loss/test_loss.py
|
db2c74e8c2f0e1a7ffec9783b81e8edcb95589ba
|
[
"MIT"
] |
permissive
|
pfnet/pynif3d
|
d8112e659c3158cd87f4f88ebb77c653c2a0eb7c
|
da3680cce7e8fc4c194f13a1528cddbad9a18ab0
|
refs/heads/main
| 2023-07-15T06:27:27.849842
| 2021-08-18T07:15:13
| 2021-08-18T07:15:13
| 397,141,414
| 72
| 5
|
MIT
| 2021-08-18T07:15:14
| 2021-08-17T06:53:45
|
Python
|
UTF-8
|
Python
| false
| false
| 533
|
py
|
from unittest import TestCase
import torch
from pynif3d.loss import eikonal_loss
class TestLoss(TestCase):
def test_eikonal_loss(self):
x = torch.as_tensor(
[
[0.2936261892, -1.0289776325, 0.1445489526],
[-0.2577984035, -0.7820385098, 0.3506951332],
[-0.4243153632, 0.8669579029, -0.6295363903],
]
)
loss = float(eikonal_loss(x))
expected_loss = 0.0135356029
self.assertAlmostEqual(loss, expected_loss, places=5)
|
[
"mihaimorariu@gmail.com"
] |
mihaimorariu@gmail.com
|
1b4972c56701c6145e833481d3454ceb0bfc240a
|
62980875b6e08d0099b1662fa3148ae29986fb64
|
/BeautifulSoup/6_bs4.py
|
898014028b10426db05bb94eb1a9f99b419b19ca
|
[] |
no_license
|
kogkuemryong/Python_WebScraping-
|
9db659c9a11c2677074fcac7f7029ec8541cb4f5
|
51cf7e7e71ce7c90b68f70daa43785671350dfb5
|
refs/heads/master
| 2022-12-12T17:01:27.142178
| 2020-09-08T16:48:19
| 2020-09-08T16:48:19
| 293,404,930
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,019
|
py
|
import requests
from bs4 import BeautifulSoup
url ='https://comic.naver.com/webtoon/weekday.nhn'
res = requests.get(url) # url 를 읽음
res.raise_for_status() # 문제가 생기면 프로그램 종료를 시켜줌
soup = BeautifulSoup(res.text, 'lxml') # 텍스트 형태로 가져온 데이터를 lxml를 통해서
# BeautifulSoup 객체로 만든 것이다.
'''
해당 웹페이지를 잘 알 때 사용
print(soup.title) # <title>네이버 만화 > 요일별 웹툰 > 전체웹툰</title>
print(soup.title.get_text()) # 글자만 빼옴 / 네이버 만화 > 요일별 웹툰 > 전체웹툰
print(soup.a) # soup 전체에서 첫번째 a element 출력
print(soup.a.attrs) # a element의 속성 정보를 출력
print(soup.a['href']) # a element의 href 속성 '값' 정보를 출력`
'''
# print(soup.find('a', attrs={'class' :'Nbtn_upload'})) # class = 'Nbtn_upload' 인 a element를 찾아줘
# print(soup.find(attrs={'class' :'Nbtn_upload'})) # class = 'Nbtn_upload'인 어떤 element 를 찾아줘
# print(soup.find('li', attrs={'class':'rank01'}))
# rank1 = soup.find('li', attrs={'class':'rank01'})
# print(rank1.a.get_text()) # 글자만
# print (rank1.next_sibling) # 아무것도 출력 안됨
# rank2 = rank1.next_sibling.next_sibling # 형제 관계로 넘어가게 해준다.
# rank3 = rank2.next_sibling.next_sibling
# rank4 = rank3.next_sibling.next_sibling
# print(rank4.get_text())
# rank2 = rank3.previous_sibling.previous_sibling # 이전으로 가기
# print(rank1.parent) # 부모로 가기
# rank2 = rank1.find_next_sibling('li')
# print(rank2.a.get_text()) # next.sibling 을 여러번 사용하게 될 때 대신하여 유용하게 사용.
#
# rank3 = rank2.find_next_sibling('li')
# print(rank3.a.get_text())
#
# rank2 = rank3.find_previous_sibling('li')
# print(rank2.a.get_text())
# print (rank1.find_next_siblings('li'))
webtooon = soup.find('a' , text = '인생존망-43화 : 너 뽀뽀하려고 그랬지!!!')
print(webtooon)
|
[
"rmafud93@naver.com"
] |
rmafud93@naver.com
|
2520da0ffe6d528d917b6d76d7e86d7767ae8d15
|
8f4488494507da4cb6f15073b8aa2e6f97fabb35
|
/test/integration/local/test_tensorflow.py
|
c85f8f5d446253c4b38bdc7e634c6851379fd0e4
|
[
"Apache-2.0"
] |
permissive
|
aws/sagemaker-training-toolkit
|
025966a1216aeb78b58f7abab19c6ccb01b0897d
|
e4a765e699e16c5849bbdfd789edbfc9820fdd77
|
refs/heads/master
| 2023-08-21T12:33:59.831391
| 2023-08-08T16:46:40
| 2023-08-08T16:46:40
| 212,439,434
| 415
| 110
|
Apache-2.0
| 2023-09-07T19:58:23
| 2019-10-02T20:54:32
|
Python
|
UTF-8
|
Python
| false
| false
| 1,528
|
py
|
# Copyright 2022 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"). You
# may not use this file except in compliance with the License. A copy of
# the License is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the "license" file accompanying this file. This file is
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
# ANY KIND, either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
from __future__ import absolute_import
import subprocess
import sys
import pytest
from sagemaker.estimator import Estimator
@pytest.fixture(scope="module", autouse=True)
def container():
try:
command = (
"docker run --name sagemaker-training-toolkit-test "
"sagemaker-training-toolkit-test:tensorflow train"
)
proc = subprocess.Popen(command.split(), stdout=sys.stdout, stderr=subprocess.STDOUT)
yield proc.pid
finally:
subprocess.check_call("docker rm -f sagemaker-training-toolkit-test".split())
def test_tensorflow_exceptions(capsys):
with pytest.raises(Exception):
estimator = Estimator(
image_uri="sagemaker-training-toolkit-test:tensorflow",
role="SageMakerRole",
instance_count=1,
instance_type="local",
)
estimator.fit()
stdout = capsys.readouterr().out
assert "XlaRuntimeError" in stdout
|
[
"noreply@github.com"
] |
aws.noreply@github.com
|
538432edd63d9503879fed091c2da849b88aeb19
|
d7ccb4225f623139995a7039f0981e89bf6365a4
|
/.history/mall/settings_20211011171802.py
|
d6ac69d215da3f819a7996e8f1d92e8ab5d563bf
|
[] |
no_license
|
tonnymuchui/django-mall
|
64fd4abc3725c1bd0a3dcf20b93b490fe9307b37
|
55c083d8433be3c77adc61939cd197902de4ce76
|
refs/heads/master
| 2023-08-23T04:59:20.418732
| 2021-10-13T15:59:37
| 2021-10-13T15:59:37
| 415,668,388
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,642
|
py
|
"""
Django settings for mall project.
Generated by 'django-admin startproject' using Django 3.2.5.
For more information on this file, see
https://docs.djangoproject.com/en/3.2/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/3.2/ref/settings/
"""
import os
from pathlib import Path
# Build paths inside the project like this: BASE_DIR / 'subdir'.
BASE_DIR = Path(__file__).resolve().parent.parent
TEMPLATE_DIR = os.path.join(BASE_DIR,"templates")
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/3.2/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'django-insecure-#l0ij4e$3v@&xi3i#y$19f#_@z(yv+5yw$kc+02!-)g%ny%oi8'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'category',
'accounts',
'store',
'carts'
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'mall.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [TEMPLATE_DIR,],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
'category.context_processors.menu_links',
'cart.cont'
],
},
},
]
WSGI_APPLICATION = 'mall.wsgi.application'
AUTH_USER_MODEL = 'accounts.Account'
# Database
# https://docs.djangoproject.com/en/3.2/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': BASE_DIR / 'db.sqlite3',
}
}
# Password validation
# https://docs.djangoproject.com/en/3.2/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/3.2/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/3.2/howto/static-files/
STATIC_URL = '/static/'
STATIC_ROOT = BASE_DIR /'static'
STATICFILES_DIRS = [
'mall/static',
]
# media files configuration
MEDIA_URL = '/media/'
MEDIA_ROOT = BASE_DIR /'media'
# Default primary key field type
# https://docs.djangoproject.com/en/3.2/ref/settings/#default-auto-field
DEFAULT_AUTO_FIELD = 'django.db.models.BigAutoField'
|
[
"tonykanyingah@gmail.com"
] |
tonykanyingah@gmail.com
|
2ea6a54e6d5e934338510fc52ec20c0e4d55851c
|
ce6cb09c21470d1981f1b459293d353407c8392e
|
/docs/jnpr_healthbot_swagger/swagger_client/models/rule_schema_formula1_or.py
|
71314684086751f0563ed538b08bac277bdc9834
|
[
"Apache-2.0"
] |
permissive
|
minefuto/healthbot-py-client
|
c4be4c9c3153ef64b37e5344bf84154e93e7b521
|
bb81452c974456af44299aebf32a73abeda8a943
|
refs/heads/master
| 2022-12-04T07:47:04.722993
| 2020-05-13T14:04:07
| 2020-05-13T14:04:07
| 290,145,286
| 0
| 0
|
Apache-2.0
| 2020-08-25T07:27:54
| 2020-08-25T07:27:53
| null |
UTF-8
|
Python
| false
| false
| 5,021
|
py
|
# coding: utf-8
"""
Healthbot APIs
API interface for Healthbot application # noqa: E501
OpenAPI spec version: 1.0.0
Contact: healthbot-hackers@juniper.net
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re # noqa: F401
import six
class RuleSchemaFormula1Or(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'left_vector': 'str',
'right_vector': 'str'
}
attribute_map = {
'left_vector': 'left-vector',
'right_vector': 'right-vector'
}
def __init__(self, left_vector=None, right_vector=None): # noqa: E501
"""RuleSchemaFormula1Or - a model defined in Swagger""" # noqa: E501
self._left_vector = None
self._right_vector = None
self.discriminator = None
self.left_vector = left_vector
self.right_vector = right_vector
@property
def left_vector(self):
"""Gets the left_vector of this RuleSchemaFormula1Or. # noqa: E501
Vector name. Pattern for giving vector name is @[a-z][a-zA-Z0-9_-]* # noqa: E501
:return: The left_vector of this RuleSchemaFormula1Or. # noqa: E501
:rtype: str
"""
return self._left_vector
@left_vector.setter
def left_vector(self, left_vector):
"""Sets the left_vector of this RuleSchemaFormula1Or.
Vector name. Pattern for giving vector name is @[a-z][a-zA-Z0-9_-]* # noqa: E501
:param left_vector: The left_vector of this RuleSchemaFormula1Or. # noqa: E501
:type: str
"""
if left_vector is None:
raise ValueError("Invalid value for `left_vector`, must not be `None`") # noqa: E501
if left_vector is not None and not re.search(r'^@[a-z][a-zA-Z0-9_-]*$', left_vector): # noqa: E501
raise ValueError(r"Invalid value for `left_vector`, must be a follow pattern or equal to `/^@[a-z][a-zA-Z0-9_-]*$/`") # noqa: E501
self._left_vector = left_vector
@property
def right_vector(self):
"""Gets the right_vector of this RuleSchemaFormula1Or. # noqa: E501
Vector name. Pattern for giving vector name is @[a-z][a-zA-Z0-9_-]* # noqa: E501
:return: The right_vector of this RuleSchemaFormula1Or. # noqa: E501
:rtype: str
"""
return self._right_vector
@right_vector.setter
def right_vector(self, right_vector):
"""Sets the right_vector of this RuleSchemaFormula1Or.
Vector name. Pattern for giving vector name is @[a-z][a-zA-Z0-9_-]* # noqa: E501
:param right_vector: The right_vector of this RuleSchemaFormula1Or. # noqa: E501
:type: str
"""
if right_vector is None:
raise ValueError("Invalid value for `right_vector`, must not be `None`") # noqa: E501
if right_vector is not None and not re.search(r'^@[a-z][a-zA-Z0-9_-]*$', right_vector): # noqa: E501
raise ValueError(r"Invalid value for `right_vector`, must be a follow pattern or equal to `/^@[a-z][a-zA-Z0-9_-]*$/`") # noqa: E501
self._right_vector = right_vector
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
if issubclass(RuleSchemaFormula1Or, dict):
for key, value in self.items():
result[key] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, RuleSchemaFormula1Or):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
|
[
"nitinkr@juniper.net"
] |
nitinkr@juniper.net
|
4c0d4d4150b62d2151f73bd99f474cc1fcdc41af
|
e01dde12be71c40065a9d6d2b1451f837c42a41e
|
/py_trees_ros_viewer/viewer.py
|
754f696ee24634ae00238eb788ed5305d7f1e131
|
[
"BSD-3-Clause"
] |
permissive
|
neelj09/py_trees_ros_viewer
|
29336ce5a7f7592ffb67c0170b42902d16fea5d3
|
1fbd7877fa4bcb53119b3111db26ce87ec8ccebd
|
refs/heads/master
| 2022-04-09T00:48:10.260221
| 2019-08-10T02:54:03
| 2019-08-10T02:54:03
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,833
|
py
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# License: BSD
# https://github.com/splintered-reality/py_trees_ros_viewer/raw/devel/LICENSE
#
##############################################################################
# Documentation
##############################################################################
"""
A qt-javascript application for viewing executing or replaying py_trees
"""
##############################################################################
# Imports
##############################################################################
import functools
import json
import signal
import sys
import time
import PyQt5.QtCore as qt_core
import PyQt5.QtWidgets as qt_widgets
from . import console
from . import trees
from . import main_window
##############################################################################
# Helpers
##############################################################################
def send_tree_response(reply):
console.logdebug("reply: '{}' [viewer]".format(reply))
@qt_core.pyqtSlot()
def send_tree(web_view_page, demo_trees, unused_checked):
send_tree.index = 0 if send_tree.index == 2 else send_tree.index + 1
demo_trees[send_tree.index]['timestamp'] = time.time()
console.logdebug("send: tree '{}' [{}][viewer]".format(
send_tree.index, demo_trees[send_tree.index]['timestamp'])
)
web_view_page.runJavaScript(
"render_tree({tree: '%s'});" % json.dumps(demo_trees[send_tree.index]),
send_tree_response
)
send_tree.index = 0
##############################################################################
# Main
##############################################################################
def main():
# logging
console.log_level = console.LogLevel.DEBUG
# the players
app = qt_widgets.QApplication(sys.argv)
demo_trees = trees.create_demo_tree_list()
window = main_window.MainWindow(
default_tree=demo_trees[0]
)
# sig interrupt handling
# use a timer to get out of the gui thread and
# permit python a chance to catch the signal
# https://stackoverflow.com/questions/4938723/what-is-the-correct-way-to-make-my-pyqt-application-quit-when-killed-from-the-co
def on_shutdown(unused_signal, unused_frame):
console.logdebug("received interrupt signal [viewer]")
window.close()
signal.signal(signal.SIGINT, on_shutdown)
timer = qt_core.QTimer()
timer.timeout.connect(lambda: None)
timer.start(250)
# sigslots
window.ui.send_button.clicked.connect(
functools.partial(
send_tree,
window.ui.web_view_group_box.ui.web_engine_view.page(),
demo_trees
)
)
# qt bringup
window.show()
result = app.exec_()
# shutdown
sys.exit(result)
|
[
"d.stonier@gmail.com"
] |
d.stonier@gmail.com
|
15bd7e332a59184de848af3cc92208ff3dcc0330
|
7d1e9acf94a5e4533d3ef5828b568e89c29519a3
|
/11-Message Box/MessageBox.py
|
a6e635c724e37df0204a8b500c9173b5d056455a
|
[] |
no_license
|
abuzarrizvi/Python-GUI-s-With-TKinter
|
c960e3629589d25b72f6720caebb552352e77976
|
d5c7843cdd3203294762ae92b6503ecb55d083f1
|
refs/heads/master
| 2020-07-06T03:17:56.798236
| 2019-08-23T10:56:41
| 2019-08-23T10:56:41
| 202,871,347
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 592
|
py
|
from tkinter import *
from PIL import ImageTk, Image
from tkinter import messagebox
root = Tk()
root.title('Learn To Code at Github.com')
root.iconbitmap('Martz90-Circle-Camera.ico')
#showinfo, showwarning, showerror, askquestion, askokcancel, askyesno
def popup():
response = messagebox.showerror("This is my Popup!", "Hello World!")
Label(root, text=response).pack()
#if response == "yes":
# Label(root, text="You Clicked Yes! ").pack()
#else:
# Label(root, text="You Clicked No! ").pack()
Button(root, text="Popup", command=popup).pack()
root.mainloop()
|
[
"noreply@github.com"
] |
abuzarrizvi.noreply@github.com
|
728c81d8394209a41c9c13be78e81117b4680432
|
250e692078234b0e3ef22ad20ab7168f807d1d5f
|
/diagonal_matrix.py
|
08b03ebc30dd750a07341d1b062de7ee30082f1c
|
[] |
no_license
|
AnTznimalz/python_prepro
|
694338609985971c5e6eaf8ec463c2a5c62dd836
|
bdc1e49fa03704bebcf2ab69a4c1600e4cd46a74
|
refs/heads/master
| 2022-06-22T23:47:28.396580
| 2020-05-07T15:07:56
| 2020-05-07T15:07:56
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 342
|
py
|
""" Diagonal Matrix"""
def mat():
""" Func. mat for calculate matrix """
dim = int(input())
box = list()
a, b = 0, 0
for n in range(dim):
lst = input().split()
box.append(lst)
lst = []
for i in range(dim):
a += int(box[i][i])
b += int(box[i][dim-i-1])
print(abs(a-b))
mat()
|
[
"thuchpunapivitcholachat@gmail.com"
] |
thuchpunapivitcholachat@gmail.com
|
2beb1f616a83a5c13a520bc827faceffac12cedc
|
5864e86954a221d52d4fa83a607c71bacf201c5a
|
/eve/devtools/script/networkdatamonitor.py
|
ad91d9153d462512bd7775ae06745daf165a0b2d
|
[] |
no_license
|
connoryang/1v1dec
|
e9a2303a01e5a26bf14159112b112be81a6560fd
|
404f2cebf13b311e754d45206008918881496370
|
refs/heads/master
| 2021-05-04T02:34:59.627529
| 2016-10-19T08:56:26
| 2016-10-19T08:56:26
| 71,334,417
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,749
|
py
|
#Embedded file name: e:\jenkins\workspace\client_SERENITY\branches\release\SERENITY\eve\devtools\script\networkdatamonitor.py
import operator
import carbonui.const as uiconst
from carbonui.primitives.container import Container
from eve.client.script.ui.control.buttons import Button
from eve.client.script.ui.control.eveLabel import Label
from eve.client.script.ui.control.eveWindow import Window
import log
import uthread2
import util
PROPS = [('Packets out', 'packets_out', 0),
('Packets in', 'packets_in', 0),
('Kilobytes out', 'bytes_out', 1),
('Kilobytes in', 'bytes_in', 1)]
class NetworkDataMonitor(Window):
default_caption = 'Network Data Monitor'
default_windowID = 'networkdatamonitor'
default_minSize = (400, 300)
refreshDelay = 0.5
def ApplyAttributes(self, attributes):
self._ready = False
Window.ApplyAttributes(self, attributes)
self.Reset()
self.SetTopparentHeight(4)
self.settingsContainer = Container(parent=self.sr.main, align=uiconst.TOBOTTOM, height=16, padding=8)
Button(parent=self.settingsContainer, label='Reset', align=uiconst.CENTER, func=self.Reset)
container = Container(parent=self.sr.main, align=uiconst.TOALL, padding=8)
statusHeader = ' '
for tme in self.intvals:
statusHeader += '<t><right>%s' % util.FmtDate(long(tme * 10000), 'ss')
statusHeader += '<t><right>total'
self.statusLabels = []
txt = Label(parent=container, align=uiconst.TOPLEFT, text=statusHeader, tabs=[80,
130,
180,
230,
280,
330,
380], state=uiconst.UI_DISABLED)
for i in xrange(7):
statusLabel = Label(parent=container, text='', top=(i + 1) * txt.height + 1, align=uiconst.TOPLEFT, tabs=[80,
130,
180,
230,
280,
330,
380], state=uiconst.UI_DISABLED)
self.statusLabels.append(statusLabel)
self.PopulateLabels()
uthread2.StartTasklet(self.Refresh)
def Reset(self, *args):
self.intvals = [5000,
10000,
15000,
30000,
60000]
self.counter = [[],
[],
[],
[],
[],
[]]
self.ticker = 0
self.packets_outTotal = 0
self.packets_inTotal = 0
self.bytes_outTotal = 0
self.bytes_inTotal = 0
self.laststats = {}
self.lastresetstats = sm.GetService('machoNet').GetConnectionProperties()
def Refresh(self):
while not self.destroyed:
uthread2.Sleep(self.refreshDelay)
self.PopulateLabels()
def PopulateLabels(self, *args):
self.ticker += self.intvals[0]
if self.ticker > self.intvals[-1]:
self.ticker = self.intvals[0]
stats = sm.GetService('machoNet').GetConnectionProperties()
if self.laststats == {}:
self.laststats = stats
if self.lastresetstats != {}:
for key in stats.iterkeys():
stats[key] = stats[key] - self.lastresetstats[key]
for i in xrange(len(self.counter) - 1):
self.counter[i].append([ stats[key] - self.laststats[key] for header, key, K in PROPS ])
self.counter[i] = self.counter[i][-(self.intvals[i] / 1000):]
self.counter[-1].append([ stats[key] - self.laststats[key] for header, key, K in PROPS ])
if not self.display:
self.laststats = stats
return
valueIdx = 0
for header, key, K in PROPS:
statusstr = '%s' % header
for intvals in self.counter:
value = reduce(operator.add, [ intval[valueIdx] for intval in intvals ], 0)
if not value:
statusstr += '<t><right>-'
else:
statusstr += '<t><right>%s' % [value, '%.1f' % (value / 1024.0)][K]
self.statusLabels[valueIdx].text = statusstr
valueIdx += 1
self.statusLabels[valueIdx].text = 'Outstanding<t><right>%s' % stats['calls_outstanding']
valueIdx += 1
self.statusLabels[valueIdx].text = 'Blocking Calls<t><right>%s' % stats['blocking_calls']
valueIdx += 1
block_time = stats['blocking_call_times']
if block_time >= 0:
secs = util.SecsFromBlueTimeDelta(block_time)
self.statusLabels[valueIdx].text = 'Blocking time<t><right>%sH<t><right>%sM<t><right>%sS' % util.HoursMinsSecsFromSecs(secs)
elif not hasattr(self, 'warnedBlockingTimeNegative'):
self.warnedBlockingTimeNegative = True
log.LogTraceback('Blocking time is negative?')
self.laststats = stats
|
[
"le02005@163.com"
] |
le02005@163.com
|
3b5330ea0aa6a4a8d96e5804f4c85d8878f67ed5
|
d5440edcfc66496937e98c557ab9c33946234808
|
/lifting line theory basic.py
|
750e9c9edd03ecebdd25e481ebf6dc7a98950762
|
[] |
no_license
|
geoffreynyaga/lifting-line-theory
|
4df7fb1baca79b9e3dfb19f5ec6c4ba86fa8fe69
|
352e1379863adf25c5f3e4966e16ae67d38f97ba
|
refs/heads/master
| 2022-08-30T04:18:23.725361
| 2020-02-14T18:55:28
| 2020-02-14T18:55:28
| 99,334,542
| 2
| 0
| null | 2022-06-22T01:09:44
| 2017-08-04T10:58:33
|
Python
|
UTF-8
|
Python
| false
| false
| 2,355
|
py
|
# coding: utf-8
__author__ = "Geoffrey Nyaga"
import numpy as np # type: ignore
import math
import matplotlib.pylab as plt # type: ignore
N: int = 9 # (number of segments - 1)
S: float = 24.39 # wing area m^2
AR: float = 7.8 # Aspect ratio
taper: float = 0.45 # Taper ratio
alpha_twist: float = -2.0 # Twist angle (deg)
i_w: float = 1.0 # wing setting angle (deg)
a_2d: float = 6.8754 # lift curve slope (1/rad)
alpha_0: float = -4.2 # zero-lift angle of attack (deg)
b = math.sqrt(AR * S) # wing span (m)
MAC = S / b # Mean Aerodynamic Chord (m)
Croot = (1.5 * (1 + taper) * MAC) / (1 + taper + taper ** 2) # root chord (m)
# theta = np.arange(math.pi/(2*N), math.pi/2, math.pi/(2*(N)))
theta = np.linspace((math.pi / (2 * N)), (math.pi / 2), N, endpoint=True)
# alpha =np.arange(i_w+alpha_twist,i_w ,-alpha_twist/(N))
alpha = np.linspace(i_w + alpha_twist, i_w, N)
z = (b / 2) * np.cos(theta)
c = Croot * (1 - (1 - taper) * np.cos(theta)) # Mean Aerodynamics
mu = c * a_2d / (4 * b)
LHS = mu * (np.array(alpha) - alpha_0) / 57.3 # .reshape((N-1),1)# Left Hand Side
RHS = []
for i in range(1, 2 * N + 1, 2):
RHS_iter = np.sin(i * theta) * (
1 + (mu * i) / (np.sin(list(theta)))
) # .reshape(1,N)
# print(RHS_iter,"RHS_iter shape")
RHS.append(RHS_iter)
test = np.asarray(RHS)
x = np.transpose(test)
inv_RHS = np.linalg.inv(x)
ans = np.matmul(inv_RHS, LHS)
mynum = np.divide((4 * b), c)
test = (np.sin((1) * theta)) * ans[0] * mynum
test1 = (np.sin((3) * theta)) * ans[1] * mynum
test2 = (np.sin((5) * theta)) * ans[2] * mynum
test3 = (np.sin((7) * theta)) * ans[3] * mynum
test4 = (np.sin((9) * theta)) * ans[4] * mynum
test5 = (np.sin((11) * theta)) * ans[5] * mynum
test6 = (np.sin((13) * theta)) * ans[6] * mynum
test7 = (np.sin((15) * theta)) * ans[7] * mynum
test8 = (np.sin((17) * theta)) * ans[8] * mynum
CL = test + test1 + test2 + test3 + test4 + test5 + test6 + test7 + test8
CL1 = np.append(0, CL)
y_s = [b / 2, z[0], z[1], z[2], z[3], z[4], z[5], z[6], z[7], z[8]]
plt.plot(y_s, CL1, marker="o")
plt.title("Lifting Line Theory\n Elliptical Lift distribution")
plt.xlabel("Semi-span location (m)")
plt.ylabel("Lift coefficient")
plt.grid()
plt.show()
CL_wing = (
math.pi * AR * ans[0]
) # USE THIS CL WITH CRUISE SPEED TO CALCULATE THE ACCURATE LIFT!!!!!!!!!!
print(CL_wing, "CL_wing")
|
[
"geoffreynyagak@gmail.com"
] |
geoffreynyagak@gmail.com
|
ff54639667d43e2a8ef0b80917c081381a5370b5
|
5471de6fd11cc36e8ad9c05ea25d13ae568ad060
|
/ClassesAndInstances/Lab Vet.py
|
0661e0116a2ab4a17184311b5b09a71a094a3404
|
[] |
no_license
|
olgayordanova/PythonOOP
|
75bbf9a20c612be7212de7bed59edccef1e02304
|
2d177d17bf50335b17f6246198b1cf85719de1df
|
refs/heads/main
| 2023-03-30T18:59:56.751037
| 2021-04-03T19:48:37
| 2021-04-03T19:48:37
| 333,202,583
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,327
|
py
|
class Vet:
animals =[]
space =5
def __init__(self, name):
self.name =name
self.animals = []
def register_animal(self,animal_name):
if len(Vet.animals)<Vet.space:
self.animals.append(animal_name)
Vet.animals.append ( animal_name )
return f"{animal_name} registered in the clinic"
else:
return f"Not enough space"
def unregister_animal(self, animal_name):
if animal_name in self.animals:
self.animals.remove ( animal_name )
Vet.animals.remove ( animal_name )
return f"{animal_name} unregistered successfully"
else:
return f"{animal_name} not in the clinic"
def info(self):
return f"{self.name} has {len(self.animals)} animals. {Vet.space-len(Vet.animals)} space left in clinic"
peter = Vet("Peter")
george = Vet("George")
print(peter.register_animal("Tom"))
print(george.register_animal("Cory"))
print(peter.register_animal("Fishy"))
print(peter.register_animal("Bobby"))
print(george.register_animal("Kay"))
print(george.unregister_animal("Cory"))
print(peter.register_animal("Silky"))
print(peter.unregister_animal("Molly"))
print(peter.unregister_animal("Tom"))
print(peter.info())
print(george.info())
|
[
"noreply@github.com"
] |
olgayordanova.noreply@github.com
|
b8728bf275bb2ca91a768945aac95810d2f474eb
|
55647a80c8b412af9df0ba3f50595cc2f29c25e6
|
/res/scripts/client/gui/shared/gui_items/dossier/achievements/Achieved.py
|
abf5a6ed09d5c5dab3a8ed8390af41b1ca9fb8d5
|
[] |
no_license
|
cnsuhao/WOT-0.9.17-CT
|
0035eb6070fb4fab8d8ee9f8bbc676c10d511cfb
|
d1f932d8cabaf8aa21708622e87f83c8d24d6451
|
refs/heads/master
| 2021-06-08T18:11:07.039293
| 2016-11-19T19:12:37
| 2016-11-19T19:12:37
| null | 0
| 0
| null | null | null | null |
WINDOWS-1250
|
Python
| false
| false
| 668
|
py
|
# 2016.11.19 19:52:48 Střední Evropa (běžný čas)
# Embedded file name: scripts/client/gui/shared/gui_items/dossier/achievements/Achieved.py
from abstract import RegularAchievement
from gui.shared.gui_items.dossier.achievements import validators
class Achieved(RegularAchievement):
@classmethod
def checkIsValid(cls, block, name, dossier):
return validators.alreadyAchieved(cls, name, block, dossier)
# okay decompyling c:\Users\PC\wotsources\files\originals\res\scripts\client\gui\shared\gui_items\dossier\achievements\Achieved.pyc
# decompiled 1 files: 1 okay, 0 failed, 0 verify failed
# 2016.11.19 19:52:48 Střední Evropa (běžný čas)
|
[
"info@webium.sk"
] |
info@webium.sk
|
8051de40984a9a2acb43e21095fbc3aae7026551
|
53fab060fa262e5d5026e0807d93c75fb81e67b9
|
/backup/user_118/ch23_2020_03_11_11_23_45_741474.py
|
c6ecc8ea4e87cf6cff5cef2378d5c6e336252e92
|
[] |
no_license
|
gabriellaec/desoft-analise-exercicios
|
b77c6999424c5ce7e44086a12589a0ad43d6adca
|
01940ab0897aa6005764fc220b900e4d6161d36b
|
refs/heads/main
| 2023-01-31T17:19:42.050628
| 2020-12-16T05:21:31
| 2020-12-16T05:21:31
| 306,735,108
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 203
|
py
|
def velocidade(c):
v=(c-80)*5
return v
x = float(input('Qual a velocidade? '))
y=velocidade(x)
if y == 0:
print('Não foi multado')
else:
print ('Foi multado em R$ '' {0:.2f}'.format (y))
|
[
"you@example.com"
] |
you@example.com
|
b76eebcce6d333ab9eeb6a635d645bcff821d353
|
cd4bbecc3f713b0c25508d0c5674d9e103db5df4
|
/toontown/estate/FlowerCollection.py
|
ae519a6213959a49508db54bc4af3e2794d78be4
|
[] |
no_license
|
peppythegod/ToontownOnline
|
dce0351cfa1ad8c476e035aa3947fdf53de916a6
|
2e5a106f3027714d301f284721382cb956cd87a0
|
refs/heads/master
| 2020-04-20T05:05:22.934339
| 2020-01-02T18:05:28
| 2020-01-02T18:05:28
| 168,646,608
| 11
| 2
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,443
|
py
|
import GardenGlobals
from direct.directnotify import DirectNotifyGlobal
import FlowerBase
class FlowerCollection:
notify = DirectNotifyGlobal.directNotify.newCategory('FlowerCollection')
def __init__(self):
self.flowerlist = []
def __len__(self):
return len(self.flowerlist)
def getFlower(self):
return self.flowerlist
def makeFromNetLists(self, speciesList, varietyList):
self.flowerlist = []
for (species, variety) in zip(speciesList, varietyList):
self.flowerlist.append(FlowerBase.FlowerBase(species, variety))
def getNetLists(self):
speciesList = []
varietyList = []
for flower in self.flowerlist:
speciesList.append(flower.getSpecies())
varietyList.append(flower.getVariety())
return [speciesList, varietyList]
def hasFlower(self, species, variety):
for flower in self.flowerlist:
if flower.getSpecies() == species and flower.getVariety(
) == variety:
return 1
continue
return 0
def hasSpecies(self, species):
for flower in self.flowerlist:
if flower.getSpecies() == species:
return 1
continue
return 0
def getInitialVariety(self, species):
retVal = 100000
for flower in self.flowerlist:
if flower.getSpecies() == species:
if flower.getVariety() < retVal:
retVal = flower.getVariety()
flower.getVariety() < retVal
if retVal == 100000:
retVal = 0
return retVal
def _FlowerCollection__collect(self, newFlower, updateCollection):
for flower in self.flowerlist:
if flower.getVariety() == newFlower.getVariety(
) and flower.getSpecies() == newFlower.getSpecies():
return GardenGlobals.COLLECT_NO_UPDATE
continue
if updateCollection:
self.flowerlist.append(newFlower)
return GardenGlobals.COLLECT_NEW_ENTRY
def collectFlower(self, newFlower):
return self._FlowerCollection__collect(newFlower, updateCollection=1)
def __str__(self):
numFlower = len(self.flowerlist)
txt = 'Flower Collection (%s flowers):' % numFlower
for flower in self.flowerlist:
txt += '\n' + str(flower)
return txt
|
[
"47166977+peppythegod@users.noreply.github.com"
] |
47166977+peppythegod@users.noreply.github.com
|
cc462bc85d0d716ae2e44775a9e09ff96c2e6614
|
d9f52125601ec26f79202f0e912891b31b60ffc4
|
/오전반/30-days-of-code/Day_06/Day_06_YNY.py
|
463f620727b012c7231ca35c7a30dd8078ae48fe
|
[] |
no_license
|
YoungGaLee/2020_Python_coding-study
|
5a4f36a39021c89ac773a3a7878c44bf8b0b811f
|
b876aabc747709afa21035c3afa7e3f7ee01b26a
|
refs/heads/master
| 2022-12-12T13:34:44.729245
| 2020-09-07T04:07:48
| 2020-09-07T04:07:48
| 280,745,587
| 4
| 4
| null | 2020-07-22T03:27:22
| 2020-07-18T21:51:40
|
Python
|
UTF-8
|
Python
| false
| false
| 268
|
py
|
n=int(input())
q_odd=[]
q_even=[]
for i in range (n):
q=str(input())
for j in range(len(q)):
if j%2==0:
q_odd.append(q[j])
if j%2==1:
q_even.append(q[j])
print("".join(q_odd) ,"".join(q_even))
q_odd,q_even=[],[]
|
[
"noreply@github.com"
] |
YoungGaLee.noreply@github.com
|
81a8f7309966861e6a73d3cea111f8f0f441759e
|
153d5ff918a33afb1e73fefab9e774672cf4f129
|
/auth_demo_stripe/wsgi.py
|
06be555707e70f63105bf12ae7bbb1f7f8d691c1
|
[] |
no_license
|
meganduffy/auth_demo_stripe
|
a67700e406fab62091ab52bbb72b0eede89c1f72
|
74c6e1d2af19221d78c4eb813513e5f1d36c3abe
|
refs/heads/master
| 2021-01-17T10:01:22.309264
| 2017-03-06T11:44:39
| 2017-03-06T11:44:39
| 84,001,104
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 410
|
py
|
"""
WSGI config for auth_demo_stripe project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/1.10/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "auth_demo_stripe.settings")
application = get_wsgi_application()
|
[
"meganemilyduffy@gmail.com"
] |
meganemilyduffy@gmail.com
|
26c1bf3393e74fe359f62019bcd01a096dc2a25a
|
f662aa3ce7896ca0283cae38df8ef824c1b80c9a
|
/examples/larson_hue.py
|
e59f5ae5199f37f0ab3c97b555d030f940ee0d49
|
[
"MIT"
] |
permissive
|
pimoroni/plasma
|
bd7ddebbc60ae7cc9c2561408b52fc46bf810672
|
7857c44255285aac061a9064dd033fd63bbbda29
|
refs/heads/master
| 2023-02-10T13:27:17.565867
| 2023-01-30T17:27:28
| 2023-01-30T17:27:28
| 155,544,928
| 12
| 9
|
MIT
| 2021-11-06T04:14:19
| 2018-10-31T11:17:40
|
Python
|
UTF-8
|
Python
| false
| false
| 1,548
|
py
|
#!/usr/bin/env python3
import math
import time
import colorsys
from plasma import auto
NUM_PIXELS = 10 * 4
FALLOFF = 1.9
SCAN_SPEED = 4
plasma = auto(default=f"GPIO:14:15:pixel_count={NUM_PIXELS}")
if plasma.get_pixel_count() == 1:
raise RuntimeError("Uh, you can't larson scan *one* pixel!?")
plasma.set_clear_on_exit()
start_time = time.time()
while True:
delta = (time.time() - start_time)
# Offset is a sine wave derived from the time delta
# we use this to animate both the hue and larson scan
# so they are kept in sync with each other
offset = (math.sin(delta * SCAN_SPEED) + 1) / 2
# Use offset to pick the right colour from the hue wheel
hue = int(round(offset * 360))
# Maximum number basex on NUM_PIXELS
max_val = plasma.get_pixel_count() - 1
# Now we generate a value from 0 to max_val
offset = int(round(offset * max_val))
for x in range(plasma.get_pixel_count()):
sat = 1.0
val = max_val - (abs(offset - x) * FALLOFF)
val /= float(max_val) # Convert to 0.0 to 1.0
val = max(val, 0.0) # Ditch negative values
xhue = hue # Grab hue for this pixel
xhue += (1 - val) * 10 # Use the val offset to give a slight colour trail variation
xhue %= 360 # Clamp to 0-359
xhue /= 360.0 # Convert to 0.0 to 1.0
r, g, b = [int(c * 255) for c in colorsys.hsv_to_rgb(xhue, sat, val)]
plasma.set_pixel(x, r, g, b, val)
plasma.show()
time.sleep(0.001)
|
[
"phil@gadgetoid.com"
] |
phil@gadgetoid.com
|
9d14d6702c380b23bdbc1f209bb5f8a3e6a6beb7
|
46bab53f41324fa880626d80c7a175e11ec30f5b
|
/sinar/representatives/setuphandlers.py
|
f322b7a6cba088432f71f03cc810a8c9149343b1
|
[] |
no_license
|
joemariedimzon/sinar.representatives
|
8d21b5447b65f55fbde809c74dc74be6bc0bfdf7
|
11d63647a1d82c739a6d4312363392f8a6ca79ed
|
refs/heads/master
| 2021-01-18T05:00:12.128279
| 2015-07-07T07:51:19
| 2015-07-07T07:51:19
| 38,667,596
| 0
| 0
| null | 2015-07-07T06:07:04
| 2015-07-07T06:07:03
| null |
UTF-8
|
Python
| false
| false
| 384
|
py
|
from collective.grok import gs
from sinar.representatives import MessageFactory as _
@gs.importstep(
name=u'sinar.representatives',
title=_('sinar.representatives import handler'),
description=_(''))
def setupVarious(context):
if context.readDataFile('sinar.representatives.marker.txt') is None:
return
portal = context.getSite()
# do anything here
|
[
"khairil.yusof@gmail.com"
] |
khairil.yusof@gmail.com
|
ccc95a8679b749bc527794939994aee82257f6dd
|
1d182c8cf1ce19019e0b1cba4a16ee1a2a49751e
|
/data/base.py
|
d4e7c2318658561292e5f341ea1513223aa70af8
|
[
"MIT"
] |
permissive
|
zxt881108/pytorch-cv
|
e30ac8638a8819b637c6bbef717f733264229126
|
6f2d1760f12c9a56a3e7b19ba74bc41451ea284c
|
refs/heads/master
| 2020-06-18T18:16:09.741626
| 2019-04-29T14:11:06
| 2019-04-29T14:11:06
| 196,396,348
| 5
| 0
| null | 2019-07-11T13:06:29
| 2019-07-11T13:06:28
| null |
UTF-8
|
Python
| false
| false
| 4,270
|
py
|
"""Base dataset methods."""
import os
from torch.utils import data
class ClassProperty(object):
"""Readonly @ClassProperty descriptor for internal usage."""
def __init__(self, fget):
self.fget = fget
def __get__(self, owner_self, owner_cls):
return self.fget(owner_cls)
class SimpleDataset(data.Dataset):
"""Simple Dataset wrapper for lists and arrays.
Parameters
----------
data : dataset-like object
Any object that implements `len()` and `[]`.
"""
def __init__(self, data):
self._data = data
def __len__(self):
return len(self._data)
def __getitem__(self, idx):
return self._data[idx]
class _LazyTransformDataset(data.Dataset):
"""Lazily transformed dataset."""
def __init__(self, data, fn):
super(_LazyTransformDataset, self).__init__()
self._data = data
self._fn = fn
def __len__(self):
return len(self._data)
def __getitem__(self, idx):
item = self._data[idx]
if isinstance(item, tuple):
return self._fn(*item)
return self._fn(item)
def transform(self, fn):
self._fn = fn
class VisionDataset(data.Dataset):
"""Base Dataset with directory checker.
Parameters
----------
root : str
The root path of xxx.names, by default is '~/.mxnet/datasets/foo', where
`foo` is the name of the dataset.
"""
def __init__(self, root):
super(VisionDataset, self).__init__()
if not os.path.isdir(os.path.expanduser(root)):
helper_msg = "{} is not a valid dir. Did you forget to initialize \
datasets described in: \
`http://gluon-cv.mxnet.io/build/examples_datasets/index.html`? \
You need to initialize each dataset only once.".format(root)
raise OSError(helper_msg)
@property
def classes(self):
raise NotImplementedError
@property
def num_class(self):
"""Number of categories."""
return len(self.classes)
def transform(self, fn, lazy=True):
"""Returns a new dataset with each sample transformed by the
transformer function `fn`.
Parameters
----------
fn : callable
A transformer function that takes a sample as input and
returns the transformed sample.
lazy : bool, default True
If False, transforms all samples at once. Otherwise,
transforms each sample on demand. Note that if `fn`
is stochastic, you must set lazy to True or you will
get the same result on all epochs.
Returns
-------
Dataset
The transformed dataset.
"""
trans = _LazyTransformDataset(self, fn)
if lazy:
return trans
return SimpleDataset([i for i in trans])
#### for debug (Note: delete)
from PIL import Image
import numpy as np
class DemoDataset(data.Dataset):
"""Simple Dataset wrapper for lists and arrays.
Parameters
----------
data : dataset-like object
Any object that implements `len()` and `[]`.
"""
def __init__(self, num):
self._num = num
def __len__(self):
return self._num
def __getitem__(self, idx):
return Image.fromarray(np.random.randint(0, 255, size=(60, 60, 3)).astype(np.uint8))
def transform(self, fn, lazy=True):
"""Returns a new dataset with each sample transformed by the
transformer function `fn`.
Parameters
----------
fn : callable
A transformer function that takes a sample as input and
returns the transformed sample.
lazy : bool, default True
If False, transforms all samples at once. Otherwise,
transforms each sample on demand. Note that if `fn`
is stochastic, you must set lazy to True or you will
get the same result on all epochs.
Returns
-------
Dataset
The transformed dataset.
"""
trans = _LazyTransformDataset(self, fn)
if lazy:
return trans
return SimpleDataset([i for i in trans])
|
[
"tinyshine@yeah.net"
] |
tinyshine@yeah.net
|
6235ff1283a1cd1df9f2920ac2d4acc0b4fda5f2
|
9743d5fd24822f79c156ad112229e25adb9ed6f6
|
/xai/brain/wordbase/otherforms/_tubercles.py
|
1fd9350940d02997c44f6017604e905edf183a0b
|
[
"MIT"
] |
permissive
|
cash2one/xai
|
de7adad1758f50dd6786bf0111e71a903f039b64
|
e76f12c9f4dcf3ac1c7c08b0cc8844c0b0a104b6
|
refs/heads/master
| 2021-01-19T12:33:54.964379
| 2017-01-28T02:00:50
| 2017-01-28T02:00:50
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 230
|
py
|
#calss header
class _TUBERCLES():
def __init__(self,):
self.name = "TUBERCLES"
self.definitions = tubercle
self.parents = []
self.childen = []
self.properties = []
self.jsondata = {}
self.basic = ['tubercle']
|
[
"xingwang1991@gmail.com"
] |
xingwang1991@gmail.com
|
23259865da4b2ba2241e13dc4a003730ecd8244e
|
f483545d7765c25d1b315027726dbd74bc77b98a
|
/myproject/helloflask/__init__.py
|
3c841b6144c426d612c3be2276bab54c47abc33d
|
[] |
no_license
|
niceman5/pythonProject
|
e51b44a50776100a63443d7da850ba4b8b00f5eb
|
3589fd200b56f68b856d2b4d2031c2a1135168a0
|
refs/heads/master
| 2023-07-10T16:12:57.756944
| 2023-06-27T08:13:54
| 2023-06-27T08:13:54
| 135,047,965
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,573
|
py
|
from flask import Flask, g, request, Response, make_response
from flask import session, render_template, Markup, url_for
from datetime import date, datetime, timedelta
import os
from helloflask.init_db import init_database, db_session
app = Flask(__name__)
import helloflask.views
import helloflask.tests
import helloflask.filters
app.debug = True
app.jinja_env.trim_blocks = True
# config["connect_args"] = {"options": "-c timezone=utc"}
def dated_url_for(endpoint, **values):
if endpoint == 'static':
filename = values.get('filename', None)
if filename:
file_path = os.path.join(app.root_path,
endpoint, filename)
values['q'] = int(os.stat(file_path).st_mtime)
return url_for(endpoint, **values)
@app.context_processor
def override_url_for():
return dict(url_for=dated_url_for)
app.config.update(
connect_args={"options": "-c timezone=utc"},
SECRET_KEY='X1243yRH!mMwf',
SESSION_COOKIE_NAME='pyweb_flask_session',
PERMANENT_SESSION_LIFETIME=timedelta(31) # 31 days
)
@app.before_first_request
def beforeFirstRequest():
print(">> before_first_request!!")
init_database() # initialize database
@app.after_request
def afterReq(response):
print(">> after_request!!")
return response
@app.teardown_request
def teardown_request(exception):
print(">>> teardown request!!", exception)
@app.teardown_appcontext
def teardown_context(exception):
print(">>> teardown context!!", exception)
db_session.remove() # remove used db-session
|
[
"niceman555@gmail.com"
] |
niceman555@gmail.com
|
7e0e11a25de222a5998cf039e5d07b16e1e5ee3d
|
0cfb5831a748ebd46e438e3ad7e7a09c1d196499
|
/com/chapter_02/section_03/task_2.3.1_string.py
|
0ced5f96b6c94cd49087d941d8d2db0b958d7a97
|
[] |
no_license
|
StevenGeGe/pythonFromIntroductionToPractice01
|
7cfe8cdb4bc5c0ddbe25b44976231d72d9e10108
|
9d2ba499056b30ded14180e6c4719ee48edd9772
|
refs/heads/master
| 2023-02-15T04:08:59.878711
| 2020-12-28T13:27:55
| 2020-12-28T13:27:55
| 310,980,820
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 503
|
py
|
#!/usr/bin/python3
# -*- coding: utf-8 -*-
# @Time : 2020/11/8 14:44
# @Author : Yong
# @Email : Yong_GJ@163.com
# @File : task_2.3.1_string.py
# @Software: PyCharm
# title() : 以首字母大写的方式显示每个单词,即将每个单词的首字母全部大写或者全部小写。
# 更改字符串的小写
name_big = "ada love"
print(name_big.title()) # 输出: Ada Love
# 更改字符串的大写
name_small = "All The World"
print(name_small.title()) # 输出: All The World
|
[
"Yong_GJ@163.com"
] |
Yong_GJ@163.com
|
4592909cbecdc99a76075adfdb88ebecd628f893
|
e247d9261676f257752c0c6beac161954137a81c
|
/src/0670.maximum-swap/maximum-swap.py
|
a768dba246b1ee138757c7df172f980aba66c1ea
|
[
"MIT"
] |
permissive
|
henrymorgen/Just-Code
|
8fbbd8288b485372a44e10b0078b5edb8af61a3b
|
fa03ebb89edd8f2292de7c0644dbab88dc1d924c
|
refs/heads/master
| 2022-10-19T05:59:53.134092
| 2020-06-10T02:26:43
| 2020-06-10T02:26:43
| 273,656,532
| 1
| 2
|
MIT
| 2020-06-20T07:02:38
| 2020-06-20T07:02:38
| null |
UTF-8
|
Python
| false
| false
| 447
|
py
|
class Solution:
def maximumSwap(self, num: int) -> int:
num = list(str(num))
max_idx = len(num) - 1
xi = yi = 0
for i in range(len(num) - 1, -1, -1):
if num[i] > num[max_idx]:
max_idx = i
elif num[i] < num[max_idx]:
xi = i
yi = max_idx
num[xi], num[yi] = num[yi], num[xi]
return int("".join(num))
|
[
"yaxe522@163.com"
] |
yaxe522@163.com
|
c288be163fc503676e07dbc33ab1ccc5193348d6
|
f28591fab50d9b7a539c66b5a81fc91d1bc2ce64
|
/py3/def/uint32_rotateleft.py
|
3d8529dece0a6541a402dce9cfeefd84e5370f9e
|
[] |
no_license
|
tnzw/tnzw.github.io
|
b8a5fe1f8479736bbf2b3594d511a1282939a3b3
|
6d95968db793cebcfa77cb49eecd987f821350db
|
refs/heads/master
| 2023-04-21T14:22:49.849859
| 2023-03-31T15:55:01
| 2023-03-31T15:55:01
| 176,712,013
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 532
|
py
|
# uint32_rotateleft.py Version 1.0.0
# Copyright (c) 2020 Tristan Cavelier <t.cavelier@free.fr>
# This program is free software. It comes without any warranty, to
# the extent permitted by applicable law. You can redistribute it
# and/or modify it under the terms of the Do What The Fuck You Want
# To Public License, Version 2, as published by Sam Hocevar. See
# http://www.wtfpl.net/ for more details.
def uint32_rotateleft(uint32, n):
n %= 32
if n < 0: n += 32
return (((uint32 << n) & 0xFFFFFFFF) | (uint32 >> (32 - n)))
|
[
"tzw56702@outlook.com"
] |
tzw56702@outlook.com
|
5cbbcad90b7a18247ef4129e11896b12752543ab
|
ec827bd5df431c9400946e8d0593448814b5534b
|
/venv/bin/ipython
|
498f13bc79c779676e375d1d51d86e95af3fa922
|
[] |
no_license
|
grantnicholas/pytone
|
7acd70878de8090d06d7a2911a67b3dbb3b64256
|
b89c688cc88588a3758fff288bc9b1364534b42e
|
refs/heads/master
| 2021-01-23T06:19:47.203418
| 2014-09-21T21:52:27
| 2014-09-21T21:52:27
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 253
|
#!/home/grant/Desktop/pytone/venv/bin/python
# -*- coding: utf-8 -*-
import re
import sys
from IPython import start_ipython
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw|\.exe)?$', '', sys.argv[0])
sys.exit(start_ipython())
|
[
"grantnicholas2015@u.northwestern.edu"
] |
grantnicholas2015@u.northwestern.edu
|
|
56b5cf1eaba651687a7c590fa1649daae00ec525
|
1b0755fafd5993c8fe5c847d0f3b250f0705cc87
|
/perf/__init__.py
|
ccef7a523ee945da1eb514e9d7dade75768eb8dd
|
[
"MIT"
] |
permissive
|
pombredanne/perf
|
65b722b2822daf598798da40917abdc608708ec3
|
da5f2259815c39569957f584a7e1e57cfdbbb927
|
refs/heads/master
| 2021-04-29T11:31:23.533547
| 2016-12-16T14:50:02
| 2016-12-16T14:50:02
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 913
|
py
|
from __future__ import division, print_function, absolute_import
__version__ = '0.9.2'
# Clocks
try:
# Python 3.3+ (PEP 418)
from time import monotonic as monotonic_clock, perf_counter
except ImportError:
import sys
import time
monotonic_clock = time.time
if sys.platform == "win32":
perf_counter = time.clock
else:
perf_counter = time.time
del sys, time
__all__ = ['monotonic_clock', 'perf_counter']
from perf._utils import is_significant, python_implementation, python_has_jit # noqa
__all__.extend(('is_significant', 'python_implementation', 'python_has_jit'))
from perf._metadata import format_metadata # noqa
__all__.append('format_metadata')
from perf._bench import Run, Benchmark, BenchmarkSuite, add_runs # noqa
__all__.extend(('Run', 'Benchmark', 'BenchmarkSuite', 'add_runs'))
from perf._runner import Runner # noqa
__all__.append('Runner')
|
[
"vstinner@redhat.com"
] |
vstinner@redhat.com
|
18038f0af6c237d5b9db5678328e4d466f172dc2
|
57fec0f5928beaaeb2dc66004267204e77bf05a7
|
/scripts/05-gmaps-test.py
|
ca95867cc5fec1d0fc87836f9afd89caf7c679cc
|
[] |
no_license
|
fgolemo/neo-m8p-python
|
a26d382cd0a8d90bd8eca4a6a2c13a51bc1a08b9
|
f9af936cdc804b24a76b697df749b0aca0325bed
|
refs/heads/master
| 2020-06-21T09:55:13.280892
| 2019-07-25T17:36:07
| 2019-07-25T17:36:07
| 197,414,904
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 138
|
py
|
import cv2
from neom8p.gmaps import get_gmap
map = get_gmap(45.530807,-73.613293, 19)
cv2.imshow("map", map)
cv2.waitKey(1)
print ("yo")
|
[
"fgolemo@gmail.com"
] |
fgolemo@gmail.com
|
a47f8034e2370aec414aa1e5b290f1bff3f65fe2
|
6b2a8dd202fdce77c971c412717e305e1caaac51
|
/solutions_2700486_0/Python/jbaek/codejam3.py
|
66cc08fb16dc343fe03e3fc66bf66e11429e006d
|
[] |
no_license
|
alexandraback/datacollection
|
0bc67a9ace00abbc843f4912562f3a064992e0e9
|
076a7bc7693f3abf07bfdbdac838cb4ef65ccfcf
|
refs/heads/master
| 2021-01-24T18:27:24.417992
| 2017-05-23T09:23:38
| 2017-05-23T09:23:38
| 84,313,442
| 2
| 4
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,392
|
py
|
from math import *
from itertools import *
import os
from decimal import *
ALLGRIDS = []
def main():
global ALLGRIDS
f = open("/home/jackie/Documents/Codejam/in")
lines = f.readlines()
cases = int(lines.pop(0))
for i in range(cases):
ALLGRIDS = []
print "Case #%d:" % (i+1),
guide = split_to_int(lines)
number = guide[0]
x = guide[1]
y = guide[2]
diamonds = []
grid = {}
if x == 0 and y == 0:
print "1.0"
continue
ALLGRIDS.append(grid)
do_problem(number, diamonds)
total = len(ALLGRIDS)
win = 0
for grid in ALLGRIDS:
if x in grid and grid[x] >= y+1:
win += 1
answer = str(Decimal(win)/Decimal(total))
if "." not in answer:
answer += ".0"
print answer
def do_problem(number,diamonds):
global ALLGRIDS
for i in range(number):
for j in range(len(ALLGRIDS)):
helper(ALLGRIDS[j], 0)
# drops one diamond
def helper(grid, pos):
global ALLGRIDS
if pos not in grid:
grid[pos]=0
highest = grid[pos]
if blockedleft(grid, pos):
if blockedright(grid,pos):
grid[pos]+=2
return
else:
helper(grid, pos+1)
return
elif blockedright(grid,pos):
helper(grid, pos-1)
return
# go on ground
elif highest == 0:
grid[pos]=1
return
else:
# right
newgrid = grid.copy()
ALLGRIDS.append(newgrid)
helper(newgrid, pos+1)
# left
helper(grid, pos-1)
def blockedleft(grid, pos):
return pos-1 in grid and grid[pos-1]>grid[pos]
def blockedright(grid, pos):
return pos+1 in grid and grid[pos+1]>grid[pos]
# general helper functions
def split_to_int(lines):
return [int(v) for v in lines.pop(0).split()]
def factors(n):
return set(reduce(list.__add__,
([i, n//i] for i in range(1, int(n**0.5) + 1) if n % i == 0)))
def isPrime(n):
if n == 2 or n == 3: return True
if n < 2 or n%2 == 0: return False
if n < 9: return True
if n%3 == 0: return False
r = int(n**0.5)
f = 5
while f <= r:
if n%f == 0: return False
if n%(f+2) == 0: return False
f +=6
return True
g = {0:1, 2:1}
#helper(g, 0)
#print ALLGRIDS
main()
|
[
"eewestman@gmail.com"
] |
eewestman@gmail.com
|
ee9a241f9d288ae78366ae06757b0dee588ce874
|
5acc77c4d594c1750a9b7477499ee25b4c307bca
|
/ehpi_action_recognition/train_ehpi.py
|
3c8f3b90123e199bd9a2df7439bbf06c510462ca
|
[
"MIT"
] |
permissive
|
noboevbo/ehpi_action_recognition
|
bc15a3c260c79b85a82844a2779c9b1ec9cf42fd
|
3b77eeb5103f0f11c8d4be993ec79dddad7e661c
|
refs/heads/master
| 2021-12-29T05:24:31.891044
| 2021-12-19T16:23:36
| 2021-12-19T16:23:36
| 180,351,212
| 113
| 23
| null | 2019-04-23T11:24:27
| 2019-04-09T11:22:45
|
Python
|
UTF-8
|
Python
| false
| false
| 3,006
|
py
|
import os
import random
from typing import List
import torch
from ehpi_action_recognition.config import ehpi_dataset_path
from nobos_commons.data_structures.constants.dataset_part import DatasetPart
from nobos_commons.data_structures.dimension import ImageSize
from nobos_torch_lib.configs.training_configs.training_config_base import TrainingConfigBase
from nobos_torch_lib.datasets.action_recognition_datasets.ehpi_dataset import EhpiDataset, RemoveJointsOutsideImgEhpi, \
ScaleEhpi, TranslateEhpi, FlipEhpi, NormalizeEhpi
from nobos_torch_lib.datasets.samplers.imbalanced_dataset_sampler import ImbalancedDatasetSampler
from nobos_torch_lib.models.detection_models.shufflenet_v2 import ShuffleNetV2
from torch.utils.data import ConcatDataset, DataLoader
from torchvision.transforms import transforms
from ehpi_action_recognition.trainer_ehpi import TrainerEhpi
foot_indexes: List[int] = [11, 14]
knee_indexes: List[int] = [10, 13]
def get_train_set(dataset_path: str, image_size: ImageSize):
num_joints = 15
left_indexes: List[int] = [3, 4, 5, 9, 10, 11]
right_indexes: List[int] = [6, 7, 8, 12, 13, 14]
datasets: List[EhpiDataset] = [
# Set 1
EhpiDataset(os.path.join(dataset_path, "ofp_record_2019_03_11_HSRT_30FPS"),
transform=transforms.Compose([
RemoveJointsOutsideImgEhpi(image_size),
ScaleEhpi(image_size),
TranslateEhpi(image_size),
FlipEhpi(left_indexes=left_indexes, right_indexes=right_indexes),
NormalizeEhpi(image_size)
]), num_joints=num_joints, dataset_part=DatasetPart.TEST),
# Set 2
EhpiDataset(os.path.join(dataset_path, "2019_03_13_Freilichtmuseum_30FPS"),
transform=transforms.Compose([
RemoveJointsOutsideImgEhpi(image_size),
ScaleEhpi(image_size),
TranslateEhpi(image_size),
FlipEhpi(left_indexes=left_indexes, right_indexes=right_indexes),
NormalizeEhpi(image_size)
]), num_joints=num_joints, dataset_part=DatasetPart.TRAIN),
]
for dataset in datasets:
dataset.print_label_statistics()
return ConcatDataset(datasets)
if __name__ == '__main__':
batch_size = 128
seed = 0
random.seed(seed)
torch.manual_seed(seed)
torch.cuda.manual_seed_all(seed)
# Train set
train_set = get_train_set(ehpi_dataset_path, image_size=ImageSize(1280, 720))
sampler = ImbalancedDatasetSampler(train_set, dataset_type=EhpiDataset)
train_loader = DataLoader(train_set, batch_size=batch_size, sampler=sampler, num_workers=1)
# config
train_config = TrainingConfigBase("ehpi_model", "models")
train_config.weight_decay = 0
train_config.num_epochs = 140
trainer = TrainerEhpi()
trainer.train(train_loader, train_config, model=ShuffleNetV2(3))
|
[
"Dennis.Ludl@reutlingen-university.de"
] |
Dennis.Ludl@reutlingen-university.de
|
49df46b47998c18b9a1e1cd63e336461b0b668e5
|
5390d79dad71ad0d9ff9d0777435dcaf4aad16b3
|
/chapter_05/toppings5.py
|
bb3053276c058e6ce16e156ef1659461aab3c552
|
[] |
no_license
|
JasperMi/python_learning
|
19770d79cce900d968cec76dac11e45a3df9c34c
|
8111d0d12e4608484864dddb597522c6c60b54e8
|
refs/heads/master
| 2020-11-26T08:57:02.983869
| 2020-03-11T10:14:55
| 2020-03-11T10:14:55
| 218,935,548
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 247
|
py
|
requested_toppings = []
if requested_toppings:
for requested_topping in requested_toppings:
print("Adding " + requested_topping + ".")
print("\nFinished making your pizza!")
else:
print("Are you sure you want a plain pizza?")
|
[
"darmi19@163.com"
] |
darmi19@163.com
|
193cb91ce7cabc2daeb6898364f78bd9d496cf4b
|
9fc6604ae98e1ae91c490e8201364fdee1b4222a
|
/eg_delivery_return_disclaimer_msg/wizards/msg_by_unifonic.py
|
1e5e3eb6e45160c46c0dadf6f1a4942c11dc796a
|
[] |
no_license
|
nabiforks/baytonia
|
b65e6a7e1c7f52a7243e82f5fbcc62ae4cbe93c4
|
58cb304d105bb7332f0a6ab685015f070988ba56
|
refs/heads/main
| 2023-03-23T21:02:57.862331
| 2021-01-04T03:40:58
| 2021-01-04T03:40:58
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,566
|
py
|
from odoo import models, fields, api
from odoo.exceptions import Warning
class MsgByUnifonic(models.TransientModel):
_name = "msg.by.unifonic"
number = fields.Char(string="Number")
message = fields.Text(string="Message")
@api.model
def default_get(self, fields_list):
res = super(MsgByUnifonic, self).default_get(fields_list)
picking_id = self.env["stock.picking"].browse(self._context.get("active_id"))
sms_instance_id = self.env["sms.instance"].search([("provider", "=", "unifonic_sms")], limit=1)
if picking_id and sms_instance_id:
message = sms_instance_id.return_disclaimer_msg
dst_number = picking_id.partner_id.phone or picking_id.partner_id.mobile or None
if message:
url = "https://oddo.baytonia.com/delivery_return/confirm/{}".format(picking_id.id)
message = message.replace("{{order_number}}", picking_id.name)
message = message.replace("{{customer_name}}", picking_id.partner_id.name)
message = message.replace("{{total_amount}}", str(picking_id.total_amount))
message = message.replace("{{return_approve_url}}", url)
res["number"] = dst_number
res["message"] = message
return res
@api.multi
def send_msg_customer_by_unifonic(self):
if self.message and self.number:
self.env["post.sms.wizard"].send_sms(body=self.message, dst_number=self.number)
else:
raise Warning("Number and Message are required")
|
[
"ash@odoxsofthub.com"
] |
ash@odoxsofthub.com
|
7f733621867abbd79a0a8d2784f7d57814b625e5
|
ebd24e400986c57b4bb1b9578ebd8807a6db62e8
|
/InstaGrade-FormBuilder/xlsxwriter/test/comparison/test_chart_errorbars05.py
|
002e0d8055c1d99983bc226195274cbf4b92c183
|
[] |
no_license
|
nate-parrott/ig
|
6abed952bf32119a536a524422037ede9b431926
|
6e0b6ac0fb4b59846680567150ce69a620e7f15d
|
refs/heads/master
| 2021-01-12T10:15:15.825004
| 2016-12-13T21:23:17
| 2016-12-13T21:23:17
| 76,399,529
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,706
|
py
|
###############################################################################
#
# Tests for XlsxWriter.
#
# Copyright (c), 2013-2014, John McNamara, jmcnamara@cpan.org
#
from ..excel_comparsion_test import ExcelComparisonTest
from ...workbook import Workbook
class TestCompareXLSXFiles(ExcelComparisonTest):
"""
Test file created by XlsxWriter against a file created by Excel.
"""
def setUp(self):
self.maxDiff = None
filename = 'chart_errorbars05.xlsx'
test_dir = 'xlsxwriter/test/comparison/'
self.got_filename = test_dir + '_test_' + filename
self.exp_filename = test_dir + 'xlsx_files/' + filename
self.ignore_files = []
self.ignore_elements = {}
def test_create_file(self):
"""Test the creation of an XlsxWriter file with error bars."""
workbook = Workbook(self.got_filename)
worksheet = workbook.add_worksheet()
chart = workbook.add_chart({'type': 'bar'})
chart.axis_ids = [49016832, 49019136]
data = [
[1, 2, 3, 4, 5],
[2, 4, 6, 8, 10],
[3, 6, 9, 12, 15],
]
worksheet.write_column('A1', data[0])
worksheet.write_column('B1', data[1])
worksheet.write_column('C1', data[2])
chart.add_series({
'categories': '=Sheet1!$A$1:$A$5',
'values': '=Sheet1!$B$1:$B$5',
'x_error_bars': {'type': 'standard_error'},
})
chart.add_series({
'categories': '=Sheet1!$A$1:$A$5',
'values': '=Sheet1!$C$1:$C$5',
})
worksheet.insert_chart('E9', chart)
workbook.close()
self.assertExcelEqual()
|
[
"nateparro2t@gmail.com"
] |
nateparro2t@gmail.com
|
5ad138fa284a69c9c985ba8a2084ea57d9d8d176
|
0071aad01ab5e91b7d32567470bd729c23bac656
|
/g2048.py
|
d75f388736b07dd7f87d31f67252e7ab02cbf060
|
[] |
no_license
|
Hakuyume/2048-rl
|
19c29e24492bd1efaddbbe0dad28474752b2d97f
|
ff0593582b293bcf1c21bd2e26701da6d24c6647
|
refs/heads/master
| 2021-01-22T18:33:36.057004
| 2017-08-26T06:47:37
| 2017-08-26T06:47:37
| 100,769,933
| 1
| 2
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,058
|
py
|
import numpy as np
import random
class G2048(object):
def __init__(self, size=4):
self.size = size
self.board = np.empty((size, size), dtype=np.uint8)
def reset(self):
self.score = 0
self.board[:] = 0
for _ in range(2):
self._add()
@property
def movability(self):
m = np.zeros(4, dtype=bool)
for d in range(4):
board = np.rot90(self.board, d)
if np.logical_and(board[:, :-1] == 0, board[:, 1:] > 0).any():
m[d] = True
elif np.logical_and(
board[:, :-1] > 0, board[:, :-1] == board[:, 1:]).any():
m[d] = True
return m
@property
def is_finished(self):
return not self.movability.any()
def _add(self):
blank = tuple(zip(*np.where(self.board == 0)))
if len(blank) > 0:
u, v = random.choice(blank)
if random.uniform(0, 1) > 1 / 4:
self.board[u, v] = 1
else:
self.board[u, v] = 2
def move(self, direction):
change = False
for line in np.rot90(self.board, direction):
v, w = 0, 0
new_line = np.zeros_like(line)
while v < self.size:
if line[v] == 0:
v += 1
elif new_line[w] == line[v]:
new_line[w] += 1
self.score += 1 << new_line[w]
change = True
v += 1
w += 1
elif new_line[w] == 0:
new_line[w] = line[v]
change = change or not v == w
v += 1
else:
w += 1
line[:] = new_line
if change:
self._add()
def normalize(self):
self.board[:] = min(
(np.rot90(b, r)
for b in (self.board, self.board.transpose())
for r in range(4)),
key=lambda b: tuple(b.flatten()))
|
[
"Hakuyume@users.noreply.github.com"
] |
Hakuyume@users.noreply.github.com
|
8ee0c7c66379fbead56732ab779d72356e965329
|
925f199438b3af508cf083ce094cb6a5f208fed8
|
/src/lt_847.py
|
ed54216792f6792912f298fe087f8840d98ee563
|
[] |
no_license
|
oxhead/CodingYourWay
|
b1b50236cdfb06669c123fd9202ce3d87304a3bf
|
e60ba45fe2f2e5e3b3abfecec3db76f5ce1fde59
|
refs/heads/master
| 2020-08-06T16:45:21.054650
| 2018-06-26T03:53:38
| 2018-06-26T03:53:38
| 30,577,969
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,323
|
py
|
"""
https://leetcode.com/contest/weekly-contest-87/problems/shortest-path-visiting-all-nodes/
"""
"""
An undirected, connected graph of N nodes (labeled 0, 1, 2, ..., N-1) is given as graph.
graph.length = N, and j != i is in the list graph[i] exactly once, if and only if nodes i and j are connected.
Return the length of the shortest path that visits every node. You may start and stop at any node, you may revisit nodes multiple times, and you may reuse edges.
Example 1:
Input: [[1,2,3],[0],[0],[0]]
Output: 4
Explanation: One possible path is [1,0,2,0,3]
Example 2:
Input: [[1],[0,2,4],[1,3,4],[2],[1,2]]
Output: 4
Explanation: One possible path is [0,1,4,2,3]
Note:
1 <= graph.length <= 12
0 <= graph[i].length < graph.length
"""
class Solution:
def shortestPathLength(self, graph):
"""
:type graph: List[List[int]]
:rtype: int
"""
def traverse(queue):
while queue:
current_node, visited, current_length = queue.pop(0)
if len(visited) == len(graph):
return current_length
for neighbor in graph[current_node]:
queue.append((neighbor, visited | set([neighbor]), current_length + 1))
num_edges = float('inf')
endpoints = []
for node_id in range(len(graph)):
node_edges = graph[node_id]
if len(node_edges) < num_edges:
endpoints = [node_id]
num_edges = len(node_edges)
elif len(node_edges) == num_edges:
endpoints.append(node_id)
queue = []
print(endpoints)
for node_id in endpoints[1:2]:
queue.append((node_id, set([node_id]), 0))
return traverse([x for x in queue])
if __name__ == '__main__':
test_cases = [
#([[1,2,3],[0],[0],[0]], 4),
#([[1],[0,2,4],[1,3,4],[2],[1,2]], 4),
#([[1],[0,2],[1,3],[2],[1,5],[4]], 6),
#([[1],[0,2,6],[1,3],[2],[5],[4,6],[1,5,7],[6]], 9),
([[1,4,6,8,9],[0,6],[9],[5],[0],[7,3],[0,1],[9,5],[0],[0,2,7]], 10),
]
for test_case in test_cases:
print('case:', test_case)
output = Solution().shortestPathLength(test_case[0])
print('output:', output)
assert output == test_case[1]
|
[
"kmscout@gmail.com"
] |
kmscout@gmail.com
|
a2d189784bb2a282ec8d7cdf005a0c8612dceb9b
|
bd08d0532f20b7285b437c9bf620de1bbcd5b9ea
|
/aalh_iit_buildings_006/populate-iso8601-amerdate.py
|
08c1fdd9ca6bdcee638e2292f3d12d555f36c6ff
|
[
"Unlicense"
] |
permissive
|
johndewees/iitmigration
|
a9e8a31ba6ceb541ce12c22fd612596cc243dbca
|
4dadfbecda719d6e7d60af076a231aedec3c862f
|
refs/heads/main
| 2023-03-14T17:06:58.777683
| 2021-03-27T20:44:58
| 2021-03-27T20:44:58
| 320,086,321
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,719
|
py
|
from openpyxl import load_workbook
import re
filename = 'aalh_iit_buildings_006.xlsx'
wb = load_workbook(filename)
ws = wb['Metadata Template']
minimumcol = 15
maximumcol = 15
minimumrow = 7
maximumrow = 515
iterationrow = 7
targetcol = 15
isostandardcol = 16
for row in ws.iter_rows(min_row=minimumrow, min_col=minimumcol, max_row=maximumrow, max_col=maximumcol):
print(iterationrow)
print(ws.cell(row=iterationrow, column=targetcol).value)
try:
for cell in row:
ameryear = None
yearraw = ws.cell(row=iterationrow, column=targetcol).value
if yearraw.find(',') != -1:
ameryearre = re.findall('\d\d\d\d', yearraw)
ameryear = ameryearre[0]
print(ameryear)
else:
print('Not an American formatted date (year)')
for cell in row:
amermon = None
monraw = ws.cell(row=iterationrow, column=targetcol).value
if monraw.find(',') != -1:
if monraw.find('Jan') != -1:
amermon = '01'
elif monraw.find('jan') != -1:
amermon = '01'
elif monraw.find('Feb') != -1:
amermon = '02'
elif monraw.find('feb') != -1:
amermon = '02'
elif monraw.find('Mar') != -1:
amermon = '03'
elif monraw.find('mar') != -1:
amermon = '03'
elif monraw.find('Apr') != -1:
amermon = '04'
elif monraw.find('apr') != -1:
amermon = '04'
elif monraw.find('May') != -1:
amermon = '05'
elif monraw.find('may') != -1:
amermon = '05'
elif monraw.find('Jun') != -1:
amermon = '06'
elif monraw.find('jun') != -1:
amermon = '06'
elif monraw.find('Jul') != -1:
amermon = '07'
elif monraw.find('jul') != -1:
amermon = '07'
elif monraw.find('Aug') != -1:
amermon = '08'
elif monraw.find('aug') != -1:
amermon = '08'
elif monraw.find('Sep') != -1:
amermon = '09'
elif monraw.find('sep') != -1:
amermon = '09'
elif monraw.find('Oct') != -1:
amermon = '10'
elif monraw.find('oct') != -1:
amermon = '10'
elif monraw.find('Nov') != -1:
amermon = '11'
elif monraw.find('nov') != -1:
amermon = '11'
elif monraw.find('Dec') != -1:
amermon = '12'
elif monraw.find('dec') != -1:
amermon = '12'
print(amermon)
else:
print('Not an American formatted date (month)')
for cell in row:
amerday = None
dayraw = ws.cell(row=iterationrow, column=targetcol).value
if dayraw.find(',') != -1:
daypart1 = dayraw.split(',')
daypart2 = daypart1[0]
daypart3 = daypart2.split()
daypart4 = daypart3[1]
if daypart4.startswith('1'):
amerday = daypart4
elif daypart4.startswith('2'):
amerday = daypart4
elif daypart4.startswith('3'):
amerday = daypart4
else:
amerday = '0' + daypart4
print(amerday)
else:
print('Not an American formatted date (day)')
for cell in row:
testvar = ws.cell(row=iterationrow, column=targetcol).value
if testvar.find('/') != -1:
testvarlist = testvar.split('/')
testvaryear = testvarlist[2]
testvaryear = testvaryear.strip()
testvarmonth = testvarlist[0]
testvarmonth = testvarmonth.strip()
testvarmonth = int(testvarmonth)
if testvarmonth < 10:
testvarmonth = str(testvarmonth)
testvarmonth = '0' + testvarmonth
else:
testvarmonth = str(testvarmonth)
testvarday = testvarlist[1]
testvarday = testvarday.strip()
testvarday = int(testvarday)
if testvarday < 10:
testvarday = str(testvarday)
testvarday = '0' + testvarday
else:
testvarday = str(testvarday)
isodate = testvaryear + '-' + testvarmonth + '-' + testvarday
ws.cell(row=iterationrow, column=targetcol).value = isodate
#print(isodate)
else:
print ('Not a date formatted with a slash')
for cell in row:
if ameryear == None:
print('Not an American formatted date at all')
else:
amerdatetrans = ameryear + '-' + amermon + '-' + amerday
ws.cell(row=iterationrow, column=isostandardcol).value = amerdatetrans
print(amerdatetrans)
except:
print('Not an American formatted date at all')
iterationrow = iterationrow + 1
wb.save('aalh_iit_buildings_006.xlsx')
|
[
"noreply@github.com"
] |
johndewees.noreply@github.com
|
5d46d3160485153a72aeaa43b0d98d716859314c
|
5cdd13489c995d825985f8e76fb9641d83675972
|
/PlotConfiguration/ISR/2016/fake_estimation/muon/LLSS/cuts.py
|
313c13d35f546643f1eed5f28fcb69008150737b
|
[] |
no_license
|
CMSSNU/MultiUniv
|
d506cea55b1f57e0694309e04b9584434c859917
|
cb72ac8cba215598a0f09a46725123e071f9137f
|
refs/heads/master
| 2020-04-20T06:23:13.425043
| 2020-03-25T08:11:31
| 2020-03-25T08:11:31
| 168,682,069
| 0
| 4
| null | 2020-02-13T10:14:48
| 2019-02-01T10:35:47
|
Python
|
UTF-8
|
Python
| false
| false
| 509
|
py
|
from CommonPyTools.python.CommonTools import *
SKFlat_WD = os.getenv('SKFlat_WD')
sys.path.insert(0,SKFlat_WD+'/CommonTools/include')
from Definitions import *
supercut = '1==1'
# for fake estimation
# LL same sign
cuts['detector_level'] = 'is_dimu_tri_passed == 1 && evt_tag_dimuon_rec_Fake == 1 && evt_tag_dielectron_rec_Fake == 0 && evt_tag_analysisevnt_sel_rec_Fake == 1 && dilep_pt_rec_Fake < 100. && dilep_mass_rec_Fake > 40 && evt_tag_oppositecharge_sel_rec_Fake == 0 && evt_tag_LL_rec_Fake == 1 '
|
[
"jhkim@cern.ch"
] |
jhkim@cern.ch
|
a7806cbd020f9a30ef0b3337e9f90d839d99a427
|
da92caf06447ec7e244dfa11e71b551a4dab7d14
|
/src/plugins/evoked_average.py
|
21e26af5a91a55b09c07c45812ed17bb1e6ac9ab
|
[
"MIT"
] |
permissive
|
Frikster/Mesoscale-Brain-Explorer
|
28298adbcb49dc399f85fe4db1c3dc1263468677
|
269d8f18162e2b9dca4619561e73a6beb8ba810c
|
refs/heads/master
| 2020-04-04T22:17:29.714298
| 2017-11-20T16:24:19
| 2017-11-20T16:24:19
| 61,849,037
| 5
| 6
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,036
|
py
|
#!/usr/bin/env python3
import os
import numpy as np
import psutil
import qtutil
from PyQt4.QtGui import *
from .util import project_functions as pfs
from .util.plugin import PluginDefault
from .util.plugin import WidgetDefault
class Widget(QWidget, WidgetDefault):
class Labels(WidgetDefault.Labels):
pass
class Defaults(WidgetDefault.Defaults):
manip = 'evoked-avg'
def __init__(self, project, plugin_position, parent=None):
super(Widget, self).__init__(parent)
if not project or not isinstance(plugin_position, int):
return
self.avg_button = QPushButton('Generate Evoked Average')
WidgetDefault.__init__(self, project, plugin_position)
def setup_ui(self):
super().setup_ui()
self.vbox.addWidget(self.avg_button)
def setup_signals(self):
super().setup_signals()
self.avg_button.clicked.connect(self.execute_primary_function)
def execute_primary_function(self, input_paths=None):
if not input_paths:
if not self.selected_videos:
return
else:
selected_videos = self.selected_videos
else:
selected_videos = input_paths
progress_global = QProgressDialog('Creating evoked average...', 'Abort', 0, 100, self)
progress_global.setAutoClose(True)
progress_global.setMinimumDuration(0)
def global_callback(x):
progress_global.setValue(x * 100)
QApplication.processEvents()
filenames = selected_videos
if len(filenames) < 2:
qtutil.warning('Select multiple files to average.')
return
stacks = [np.load(f, mmap_mode='r') for f in filenames]
lens = [len(stacks[x]) for x in range(len(stacks))]
min_lens = np.min(lens)
breadth = stacks[0].shape[1]
length = stacks[0].shape[2]
trig_avg = np.empty((min_lens, length, breadth), np.load(filenames[0], mmap_mode='r').dtype)
for frame_index in range(min_lens):
global_callback(frame_index / min_lens)
frames_to_avg = [stacks[stack_index][frame_index]
for stack_index in range(len(stacks))]
frames_to_avg = np.array(frames_to_avg, dtype=np.float32)
avg = np.mean(frames_to_avg, axis=0, dtype=np.float32)
trig_avg[frame_index] = avg
global_callback(1)
manip = self.Defaults.manip + '_' + str(len(filenames))
output_path = pfs.save_project(filenames[0], self.project, trig_avg, manip, 'video')
pfs.refresh_list(self.project, self.video_list,
self.params[self.Labels.video_list_indices_label],
self.Defaults.list_display_type,
self.params[self.Labels.last_manips_to_display_label])
return output_path
# self.update_tables()
def setup_whats_this(self):
super().setup_whats_this()
self.avg_button.setWhatsThis("Generate evoked average for selected image stacks where each frame is averaged "
"across image stacks for each frame")
class MyPlugin(PluginDefault):
def __init__(self, project, plugin_position):
self.name = 'Evoked Average'
self.widget = Widget(project, plugin_position)
super().__init__(self.widget, self.widget.Labels, self.name)
def check_ready_for_automation(self, expected_input_number):
self.summed_filesize = 0
for path in self.widget.selected_videos:
self.summed_filesize = self.summed_filesize + os.path.getsize(path)
self.available = list(psutil.virtual_memory())[1]
if self.summed_filesize > self.available:
return False
return True
def automation_error_message(self):
return "Not enough memory. All files to be averaged together are of size ~"+str(self.summed_filesize) +\
" and available memory is: " + str(self.available)
|
[
"dirk.haupt@gmail.com"
] |
dirk.haupt@gmail.com
|
9fa71db652f5ba9a7efaf6487c314e53826c6153
|
187a6558f3c7cb6234164677a2bda2e73c26eaaf
|
/jdcloud_sdk/services/tidb/apis/DescribeAvailableDBInfoInternelRequest.py
|
e771d081b365e9d329da6981125f9fced96c4cf4
|
[
"Apache-2.0"
] |
permissive
|
jdcloud-api/jdcloud-sdk-python
|
4d2db584acc2620b7a866af82d21658cdd7cc227
|
3d1c50ed9117304d3b77a21babe899f939ae91cd
|
refs/heads/master
| 2023-09-04T02:51:08.335168
| 2023-08-30T12:00:25
| 2023-08-30T12:00:25
| 126,276,169
| 18
| 36
|
Apache-2.0
| 2023-09-07T06:54:49
| 2018-03-22T03:47:02
|
Python
|
UTF-8
|
Python
| false
| false
| 1,479
|
py
|
# coding=utf8
# Copyright 2018 JDCLOUD.COM
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# NOTE: This class is auto generated by the jdcloud code generator program.
from jdcloud_sdk.core.jdcloudrequest import JDCloudRequest
class DescribeAvailableDBInfoInternelRequest(JDCloudRequest):
"""
查询 TiDB支持的基本信息。
"""
def __init__(self, parameters, header=None, version="v1"):
super(DescribeAvailableDBInfoInternelRequest, self).__init__(
'/regions/{regionId}/instances:describeAvailableDBInfoInternel', 'GET', header, version)
self.parameters = parameters
class DescribeAvailableDBInfoInternelParameters(object):
def __init__(self,regionId, ):
"""
:param regionId: 地域代码
"""
self.regionId = regionId
self.azs = None
def setAzs(self, azs):
"""
:param azs: (Optional) 用户可用区[多个使用,分隔]
"""
self.azs = azs
|
[
"jdcloud-api@jd.com"
] |
jdcloud-api@jd.com
|
47d31b4ad6d9d3f9ec16487c975797465de7096d
|
de24f83a5e3768a2638ebcf13cbe717e75740168
|
/moodledata/vpl_data/22/usersdata/112/11794/submittedfiles/av1_2.py
|
5ecfae8a8c59536c3785bab3a905bd43d390601a
|
[] |
no_license
|
rafaelperazzo/programacao-web
|
95643423a35c44613b0f64bed05bd34780fe2436
|
170dd5440afb9ee68a973f3de13a99aa4c735d79
|
refs/heads/master
| 2021-01-12T14:06:25.773146
| 2017-12-22T16:05:45
| 2017-12-22T16:05:45
| 69,566,344
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 317
|
py
|
# -*- coding: utf-8 -*-
from __future__ import division
import math
ant=0
prox=0
meio=B
n=input('Digite o valor de n:')
j=input('Digite o valor de j:')
k=input('Digite o valor de k:')
l=input('Digite o valor de l:')
if n=k and j!=l:
print('verdadeira')
if j=l and n!=k:
print('verdadeira')
else:
('falsa')
|
[
"rafael.mota@ufca.edu.br"
] |
rafael.mota@ufca.edu.br
|
870d12fe6a587e970c108504b42268cb10c844f3
|
2ed2dd917afb05d194e87f989d78953b31a5781b
|
/lesson10/mission08.py
|
718005e6a8b1523d4636183b46dc3a00179e899b
|
[] |
no_license
|
RenegaDe1288/pythonProject
|
4058d549db7c37652f77438c31f8b31476497d98
|
801c06f3be22ed63214987b11d6f1b3fd2fe5b44
|
refs/heads/master
| 2023-08-17T13:20:50.777842
| 2021-10-05T10:51:00
| 2021-10-05T10:51:00
| 393,145,207
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 498
|
py
|
lent = int(input('Введите ширину '))
lent_2 = int(input('Введите длину '))
for row in range(lent):
for col in range(lent_2):
if col == lent_2 // 2 and row != lent//2:
print('|', end='')
elif row == lent // 2:
print('-', end='')
elif col == lent_2//2 + 5+ row:
print('\\', end='')
elif col == lent_2//2- row -5:
print('/', end='')
else:
print(' ', end='')
print()
|
[
"D121188@yandex.ru"
] |
D121188@yandex.ru
|
473d655633f7f72afa53daced7e8c8a4a90c4f51
|
a209c2238ff97d781fc6f15d9b3ae6ecf9c15b53
|
/utils/preprocess.py
|
6b7077e20c2ba3b9257a3940756e4f54e10dd416
|
[] |
no_license
|
Arcana-2236/Text-Classification
|
1788e05e4c29ce0e7130f38cd16af5ab08fbe6fd
|
69047f0ffdfc621e3cb2d59056ac93d69582090b
|
refs/heads/master
| 2022-04-12T08:30:50.089277
| 2020-03-28T06:09:16
| 2020-03-28T06:09:16
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 7,234
|
py
|
import os
import re
import zipfile
import pickle
import jieba
import pandas as pd
import numpy as np
from collections import Counter
from tensorflow.keras.preprocessing.text import Tokenizer
from tensorflow.keras.preprocessing.sequence import pad_sequences
from sklearn.preprocessing import MultiLabelBinarizer
from sklearn.model_selection import train_test_split
ROOT = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# input file
ZIP_DATA = os.path.join(ROOT, 'data', '百度题库.zip') # 要解压的文件
STOPWORDS = os.path.join(ROOT, 'data', 'stopwords.txt')
# output file path
# BERT
TRAIN_TSV = os.path.join(ROOT, 'data', 'train.tsv') # BERT的数据文件
DEV_TSV = os.path.join(ROOT, 'data', 'dev.tsv')
TEST_TSV = os.path.join(ROOT, 'data', 'test.tsv')
# TextCNN and Transformer
TOKENIZER_BINARIZER = os.path.join(ROOT, 'data', 'tokenizer_binarizer.pickle')
LABELS_FILE = os.path.join(ROOT, 'data', 'label.txt')
X_NPY = os.path.join(ROOT, 'data', 'x.npy') # testcnn 和 transformer的数据文件
Y_NPY = os.path.join(ROOT, 'data', 'y.npy')
def unzip_data():
"""
解压数据
"""
with zipfile.ZipFile(ZIP_DATA, 'r') as z:
z.extractall(os.path.join(ROOT, 'data'))
print("已将压缩包解压至{}".format(z.filename.rstrip('.zip')))
return z.filename.rstrip('.zip')
def combine_data(data_path):
"""
把四门科目内的所有文件合并
"""
r = re.compile(r'\[知识点:\]\n(.*)') # 用来寻找知识点的正则表达式
r1 = re.compile(r'纠错复制收藏到空间加入选题篮查看答案解析|\n|知识点:|\s|\[题目\]') # 简单清洗
data = []
for root, dirs, files in os.walk(data_path):
if files: # 如果文件夹下有csv文件
for f in files:
subject = re.findall('高中_(.{2})', root)[0]
topic = f.strip('.csv')
tmp = pd.read_csv(os.path.join(root, f)) # 打开csv文件
tmp['subject'] = subject # 主标签:科目
tmp['topic'] = topic # 副标签:科目下主题
tmp['knowledge'] = tmp['item'].apply(
lambda x: r.findall(x)[0].replace(',', ' ') if r.findall(x) else '')
tmp['item'] = tmp['item'].apply(lambda x: r1.sub('', r.sub('', x)))
data.append(tmp)
data = pd.concat(data).rename(columns={'item': 'content'}).reset_index(drop=True)
# 删掉多余的两列
data.drop(['web-scraper-order', 'web-scraper-start-url'], axis=1, inplace=True)
return data
def extract_label(df, freq=0.01):
"""
:param df: 合并后的数据集
:param freq: 要过滤的标签占样本数量的比例
:return: DataFrame
"""
knowledges = ' '.join(df['knowledge']).split() # 合并
knowledges = Counter(knowledges)
k = int(df.shape[0] * freq) # 计算对应频率知识点出现的次数
print('过滤掉出现次数少于 %d 次的标签' % k)
top_k = {i for i in knowledges if knowledges[i] > k} # 过滤掉知识点出现次数小于k的样本
df.knowledge = df.knowledge.apply(lambda x: ' '.join([label for label in x.split() if label in top_k]))
df['label'] = df[['subject', 'topic', 'knowledge']].apply(lambda x: ' '.join(x), axis=1)
return df[['label', 'content']]
def create_bert_data(df, small=False):
"""
对于 bert 的预处理
如果small=True:是因为自己的电脑太菜,就用比较小的数据量在本地实现模型
该函数给bert模型划分了3个数据集
"""
df['content'] = df['content'].apply(lambda x: x.replace(' ', ''))
if small:
print('use small dataset to test my local bert model really work')
train = df.sample(128)
dev = df.sample(64)
test = df.sample(64)
else:
train, test = train_test_split(df, test_size=0.2, random_state=2020)
train, dev = train_test_split(train, test_size=0.2, random_state=2020)
print('preprocess for bert!')
print('create 3 tsv file(train, dev, test) in %s' % (os.path.join(ROOT, 'data')))
train.to_csv(TRAIN_TSV, index=None, sep='\t')
dev.to_csv(DEV_TSV, index=None, sep='\t')
test.to_csv(TEST_TSV, index=None, sep='\t')
def load_stopwords():
return {line.strip() for line in open(STOPWORDS, encoding='UTF-8').readlines()}
def sentence_preprocess(sentence):
# 去标点
r = re.compile("[^\u4e00-\u9fa5]+|题目")
sentence = r.sub("", sentence) # 删除所有非汉字字符
# 切词
words = jieba.cut(sentence, cut_all=False)
# 去停用词
stop_words = load_stopwords()
words = [w for w in words if w not in stop_words]
return words
def df_preprocess(df):
"""
合并了去标点,切词,去停用词的操作
:param df:
:return:
"""
df.content = df.content.apply(sentence_preprocess)
return df
def create_testcnn_data(df, num_words=50000, maxlen=128):
# 对于label处理
mlb = MultiLabelBinarizer()
y = mlb.fit_transform(df.label.apply(lambda label: label.split()))
with open(LABELS_FILE, mode='w', encoding='utf-8') as f:
for label in mlb.classes_:
f.write(label+'\n')
# 对content处理
tokenizer = Tokenizer(num_words=num_words, oov_token="<UNK>")
tokenizer.fit_on_texts(df.content.tolist())
x = tokenizer.texts_to_sequences(df.content)
x = pad_sequences(x, maxlen=maxlen, padding='post', truncating='post') # padding
# 保存数据
np.save(X_NPY, x)
np.save(Y_NPY, y)
print('已创建并保存x,y至:\n {} \n {}'.format(X_NPY, Y_NPY))
# 同时还要保存tokenizer和 multi_label_binarizer
# 否则训练结束后无法还原把数字还原成文本
tb = {'tokenizer': tokenizer, 'binarizer': mlb} # 用个字典来保存
with open(TOKENIZER_BINARIZER, 'wb') as f:
pickle.dump(tb, f)
print('已创建并保存tokenizer和binarizer至:\n {}'.format(TOKENIZER_BINARIZER))
def load_testcnn_data():
"""
如果分开保存,那要保存6个文件太麻烦了。
所以采取读取之后划分数据集的方式
"""
# 与之前的bert同步
x = np.load(X_NPY).astype(np.float32)
y = np.load(Y_NPY).astype(np.float32)
# 与之前bert的划分方式统一
train_x, test_x, train_y, test_y = train_test_split(x, y, test_size=0.2, random_state=2020)
train_x, dev_x, train_y, dev_y = train_test_split(train_x, train_y, test_size=0.2, random_state=2020)
return train_x, dev_x, test_x, train_y, dev_y, test_y
def load_tokenizer_binarizer():
"""
读取tokenizer 和 binarizer
:return:
"""
with open(TOKENIZER_BINARIZER, 'rb') as f:
tb = pickle.load(f)
return tb['tokenizer'], tb['binarizer']
def main():
"""
合并以上所有操作
"""
data_path = unzip_data() # 解压
df = combine_data(data_path) # 合并
df = extract_label(df) # 提取标签
# 对于bert的预处理
create_bert_data(df)
# 对于testcnn和transformer的预处理
df = df_preprocess(df) # 切词,分词,去停用词
create_testcnn_data(df, num_words=50000, maxlen=128)
if __name__ == '__main__':
main()
|
[
"435786117@qq.com"
] |
435786117@qq.com
|
4cb105211199b388e964f55bb905a04d35572cf9
|
b22588340d7925b614a735bbbde1b351ad657ffc
|
/athena/LArCalorimeter/LArTest/LArConditionsTest/share/FixLArElecCalib_fix6_jobOptions.py
|
59efd81bb72cab9f075cafd0a9f3b68c0147137b
|
[] |
no_license
|
rushioda/PIXELVALID_athena
|
90befe12042c1249cbb3655dde1428bb9b9a42ce
|
22df23187ef85e9c3120122c8375ea0e7d8ea440
|
refs/heads/master
| 2020-12-14T22:01:15.365949
| 2020-01-19T03:59:35
| 2020-01-19T03:59:35
| 234,836,993
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,813
|
py
|
###############################################################
#
# Job options file 1
#
#==============================================================
#use McEventSelector
include( "AthenaCommon/Atlas_Gen.UnixStandardJob.py" )
from AthenaCommon.DetFlags import DetFlags
DetFlags.Calo_setOn()
DetFlags.ID_setOff()
DetFlags.Muon_setOff()
DetFlags.Truth_setOff()
DetFlags.LVL1_setOff()
DetFlags.digitize.all_setOff()
from AthenaCommon.GlobalFlags import GlobalFlags
GlobalFlags.DataSource.set_geant4()
GlobalFlags.InputFormat.set_pool()
GlobalFlags.DetGeo.set_atlas()
DetDescrVersion = "ATLAS-CSC-02-00-00"
# DetDescrVersion = "ATLAS-DC3-05"
# LArIdMapFix=7
# G4Phys ="QGSP_EMV"
# G4Phys ="QGSP_BERT"
# Switches:
# items
RunNumber = 1
#
RecreateFolder = False
WriteIOV = True
# Objects and its tag
ObjectList = []
TagList = []
# FIX
if DetDescrVersion == "ATLAS-CSC-02-00-00" :
TagNameForFix = "CSC02-F"
else :
TagNameForFix = "Wrong"
print " ERROR: wrong DetDescrVersion"
ObjectList += ["LArNoiseMC#LArNoise#/LAR/ElecCalibMC/Noise"]
ObjectList += ["LAruA2MeVMC#LAruA2MeV#/LAR/ElecCalibMC/uA2MeV"]
ObjectList += ["LArDAC2uAMC#LArDAC2uA#/LAR/ElecCalibMC/DAC2uA"]
ObjectList += ["LArRampMC#LArRamp#/LAR/ElecCalibMC/Ramp"]
TagList += ["LARElecCalibMCNoise-"+TagNameForFix]
TagList += ["LARElecCalibMCuA2MeV-"+TagNameForFix]
TagList += ["LARElecCalibMCDAC2uA-"+TagNameForFix]
TagList += ["LARElecCalibMCRamp-"+TagNameForFix]
OutputPOOLFileName = "LArFCalADC2MeV_13.0.30_v1.pool.root"
#/--------------------------------------------------------------
# Algorithm to fix the LAr Id, if needed
#/-------------------------------
theApp.Dlls += [ "LArConditionsTest" ]
theApp.TopAlg += [ "FixLArElecCalib" ]
FixLArElecCalib = Algorithm("FixLArElecCalib")
# 1=
# 2=fix for IdMapFix=1
# 3=new fsample for CSC-02
# 5=new FCAL noise and minbias
FixLArElecCalib.FixFlag =6
#--------------------------------------------------------------
# Private Application Configuration options
#--------------------------------------------------------------
theApp.Dlls += [ "LArTools" ]
include ("AtlasGeoModel/SetGeometryVersion.py")
include ("AtlasGeoModel/GeoModelInit.py")
# Other LAr related
include( "LArIdCnv/LArIdCnv_joboptions.py" )
include( "CaloDetMgrDetDescrCnv/CaloDetMgrDetDescrCnv_joboptions.py" )
include( "IdDictDetDescrCnv/IdDictDetDescrCnv_joboptions.py" )
include( "LArConditionsCommon/LArConditionsCommon_MC_jobOptions.py" )
include( "LArConditionsCommon/LArIdMap_MC_jobOptions.py" )
#--------------------------------------------------------------
EventSelector = Service( "EventSelector" )
EventSelector.RunNumber=1
#EventSelector.EventsPerRun=10;
EventSelector.EventsPerRun=2
EventSelector.FirstEvent=1
# theApp.Dlls += [ "PoolSvc", "AthenaPoolCnvSvc", "AthenaPoolCnvSvcPoolCnv", "EventAthenaPoolPoolCnv", "EventSelectorAthenaPool" ]
include( "AthenaPoolCnvSvc/AthenaPool_jobOptions.py" )
theApp.Dlls += [ "AthenaPoolCnvSvc" ]
theApp.Dlls += [ "LArCondAthenaPoolPoolCnv" ]
include( "AthenaSealSvc/AthenaSealSvc_joboptions.py" )
# AthenaSealSvc.CheckDictAtInit = True
include ("LArRawConditions/LArRawConditionsDict_joboptions.py")
# include ("LArTools/LArToolsDict_joboptions.py")
theApp.EvtMax=1
AthenaEventLoopMgr=Service("AthenaEventLoopMgr")
AthenaEventLoopMgr.OutputLevel = INFO
MessageSvc = Service( "MessageSvc" )
MessageSvc.OutputLevel = INFO
MessageSvc.defaultLimit = 1000000;
MessageSvc.Format = "% F%20W%S%7W%R%T %0W%M"
theApp.Dlls += [ "GaudiAud" ]
theAuditorSvc = AuditorSvc()
theAuditorSvc.Auditors = [ "ChronoAuditor" ]
##############################################
# Writing POOL and COOL
if len(ObjectList)>0 :
# include regstration alg (default is WriteIOV = False)
include("RegistrationServices/OutputConditionsAlg_jobOptions.py")
# List of objects container type#key#foldername
OutputConditionsAlg.ObjectList = ObjectList
OutputConditionsAlg.IOVTagList = TagList
ToolSvc = Service("ToolSvc")
ToolSvc.ConditionsAlgStream.OutputFile = OutputPOOLFileName
# Set flag to register and run interval Run1/Event1 to Run2/Event2
# Usually, only need to set Run1, others go to default
####
OutputConditionsAlg.WriteIOV = WriteIOV
OutputConditionsAlg.Run1 = 0
OutputConditionsAlg.LB1 = 0
# Set the connection string
include ( "IOVDbSvc/IOVDbSvc_jobOptions.py" )
IOVDbSvc = Service( "IOVDbSvc" )
IOVDbSvc.dbConnection="impl=cool;techno=sqlite;schema=LArElecCalib_FCalADC2MeV.db;X:OFLP200"
# For schema creation - only should be used when creating the folder,
# i.e. the first time
IOVRegSvc = Service( "IOVRegistrationSvc" )
IOVRegSvc.OutputLevel = DEBUG
IOVRegSvc.RecreateFolders = RecreateFolder
# PoolSvc.FileOpen = "update"
###########################################################################
|
[
"rushioda@lxplus754.cern.ch"
] |
rushioda@lxplus754.cern.ch
|
86e96ae863d4f9f1817fcae036de87f3df2a15ec
|
e694891ff8c9d06df7b7b5def7ba71c1dba03aa8
|
/rabbitmq_rabbitpy/test_rabbitmq.py
|
23f166795359b1166e1d5e54aa4a636cf2e3c2e1
|
[] |
no_license
|
wangyu190810/python-skill
|
78f9abb39ebfa01b92ffb2ec96c7ef57c490d68d
|
719d082d47a5a82ce4a15c57dd481932a9d8f1ba
|
refs/heads/master
| 2020-04-05T17:43:48.005145
| 2019-02-01T01:45:49
| 2019-02-01T01:45:49
| 41,524,479
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 962
|
py
|
# -*-coding:utf-8-*-
# email:190810401@qq.com
__author__ = 'wangyu'
<<<<<<< HEAD
=======
import rabbitpy
# with rabbitpy.Connection("amqp://guest:guest@localhost:5672/%2F") as conn:
# with conn.channel() as channel:
# amqp = rabbitpy.AMQP(channel)
#
# for message in amqp.basic_consume('queue-name'):
# print(message)
#
# import rabbitpy
with rabbitpy.Connection('amqp://guest:guest@localhost:5672/%2f') as conn:
with conn.channel() as channel:
queue = rabbitpy.Queue(channel, 'example')
while len(queue) > 0:
message = queue.get()
print 'Message:'
print ' ID: %s' % message.properties['message_id']
print ' Time: %s' % message.properties['timestamp'].isoformat()
print ' Body: %s' % message.body
message.ack()
print 'There are %i more messages in the queue' % len(queue)
>>>>>>> 85e7424cf14daa2d8af9040031bec995ac70cde1
|
[
"190810401@qq.com"
] |
190810401@qq.com
|
fb9705a0d1b4b5da9c80db0e6507fd386d90b160
|
f28a261132fbf98f5ebfd004672af4155dfa1cc5
|
/nanodash/service/dataset-description-nano-090.py
|
b6fd62ced21821aab7733a8570b3d22d64d38b3d
|
[
"Apache-2.0",
"MIT"
] |
permissive
|
curtislisle/nanomaterial-dashboard
|
8704779b7410747092c8fdb9326fb69b9f6b94ff
|
06de2e0782f53ce56d6edd0937b14cbd738fc22a
|
refs/heads/master
| 2021-01-21T04:41:16.713855
| 2016-07-08T01:07:17
| 2016-07-08T01:07:17
| 54,521,714
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,213
|
py
|
#import bson
import pymongo
import json
from bson import ObjectId
from pymongo import MongoClient
import string
import tangelo
def run(ipaddress):
# Create an empty response object.
response = {}
response['datasource'] = 'remote'
response['file'] = "http://"+str(ipaddress)+":8080/nanodash/service/dataset-content-nano-090/NanoDB3/Nano_combined_0301"
response['name'] = "Nano Database Dashboard v0.9.0"
response['separator'] = ','
response['skip'] = 0
response['meta'] = [
{ "type": "id", "name": "NanomaterialID" },
{ "type": "string", "name": "Molecular Identity" },
{ "type": "string", "name": "Material Type" },
{ "type": "string", "name": "Molecular Type" },
{"type":"string","name":"Product Name"},
# {'name':'Mean Hydrodynamic Diameter','type':'float'},
{'name':'Mean Primary Particle Size','type':'float'},
# {'name':'Component Molecular Weight','type':'float'},
# {'name':'Molecular Weight','type':'float'},
{'name':'Lambda Max','type':'float'},
# {'name':'Bulk Density','type':'float'},
# {'name':'Primary Particle Size','type':'float'},
{'name':'Specific Surface Area','type':'float'},
{'name':'Zeta Potential','type':'float'}
]
response['sets'] = [
{ "format": "binary", "start": 1, "end": 5}]
response['setlist'] = ['2D Dimensionality','3D Dimensionality','Metal','Metal Oxide','Polymer','Carbohydrate',
'Protein','Nucleic Acid','Group Ii-Vi','Dendrimer','Lipid','Group Iv - Non C',
'Agglomerated','Aggregated','Positive Polarity','Negative Polarity','Purity99+','IsCrystalline',
'Aromatic','Macrocyclic','Sugar','VHQ-R subset', 'UHQ-R subset',
'source_pdf','source_nano_db']
#'Monoclinic','SingleCrystal','Polycrystalline','Amorphous','Anatase','Tetragonal','Rutile','Cubic','Brookite','Wurtzite','Zincite']
response['attributelist'] = []
response['author'] = 'ABCC IVG & KnowledgeVis'
response['description'] = 'Nanomaterial database v2'
response['source'] = "Nanomaterials reference database"
#tangelo.log(str(response))
return json.dumps(response)
|
[
"clisle@knowledgevis.com"
] |
clisle@knowledgevis.com
|
bb32c9b355ff5984723a6f55c49c36cdbc32e17c
|
da280a226bbf15d7243410c0d3930bdca00d0088
|
/firsttry/ex41.py
|
0ba10ceba34cd4003844fa210c2ed0733881e028
|
[] |
no_license
|
c4collins/PyTHWay
|
174cae57c73431ce5bfc90a361613c5db5c846d7
|
135b4b908ef2698084ee1b3fb9f1e5550c3c8843
|
refs/heads/master
| 2021-01-10T18:29:43.998528
| 2012-11-03T22:53:17
| 2012-11-03T22:53:17
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,767
|
py
|
from sys import exit
from random import randint
def death():
quips = ["You died. You kinda suck at this.", "Your mum would be proud, if she were smarter.", "Such a loser.", "I have a small puppy that's better at this."]
print quips[randint(0, len(quips)-1)]
exit(1)
def princess_lives_here():
print "You see a beautiful princess with a shiny crown."
print "She offers you some cake."
eat_it = raw_input("> ")
if eat_it == "eat it":
print "You explode like a pinata full of frogs."
print "The princess cackles and eats the frogs. Yum!"
return 'death'
elif eat_it == "do not eat it":
print "She throws the cake at you and it cuts off your head."
print "The last thing you see if her munching on your torso. Yum!"
return 'death'
elif eat_it == "make her eat it":
print "The princess screams as you cram the cake in her mouth."
print "The she smiles and cries and thanks you for saving her."
print "She points to a tiny door and says, 'The Koi needs cake too.'"
print "She gives you the very last bit of cake and shoves you in."
return 'gold_koi_pond'
else:
print "The princess looks at you confused and just points at the cake."
return 'princess_lives_here'
def gold_koi_pond():
print "There is a garden with a koi pond in the centre."
print "You walk close and see a massive fin poke out."
print "You peek in and a creepy looking huge Koi stares at you."
print "It opens its mouth waiting for food."
feed_it = raw_input("> ")
if feed_it == "feed it":
print "The Koi jumps up, and rather than eating the cake, eats your arm."
print "You fall in and the Koi shrugs then eats you."
print "You are then pooped out sometime later."
return 'death'
elif feed_it == "do not feed it":
print "The Koi grimaces, then thrashes around for a second."
print "If rushes to the other side of the pong, braces against the wall..."
print "The it *lunges* out of the water, up in the air and over your"
print "entire body, cake and all."
print "You are pooped out about a week later."
return 'death'
elif feed_it == "throw it in":
print "The Koi wiggles, then leaps into the air to eat the cake."
print "You can see it's happy, it gruts, thrashes..."
print "and finally rolls over and poops a magic diamond into the air."
print "It lands at your feet."
return 'bear_with_sword'
else:
print "The Koi gets annoyed and wiggles a bit."
return 'golden_koi_pond'
def bear_with_sword():
print "Puzzled, you are about to pick up the fish poop diamond when"
print "a bear bearing a load bearing sword walks in."
print "\"Hey, that's MY diamond! Where'd you get that!?\""
print "It holds its paw out and looks at you."
give_it = raw_input("> ")
if give_it == "give it":
print "The bear swipes at your hand to grab the diamond and"
print "rips your hand off in the process. It then looks at"
print "your bloody stump and says \"Oh crap, sorry about that.\""
print "It tries to put your hand back on, but you collapse."
print "The last thing you see is the bear shrug and eat you."
return 'death'
elif give_it == "say no":
print "The bear looks shocked. Nobody ever told a bear"
print "with a broadsword 'no'. It asks, "
print "\"Is it because it's not a katana? I could go get one!\""
print "It then runs off and you notice a big iron gate."
print "\"Where the hell did that come from?\" You say."
return 'big_iron_gate'
else:
print "The bear looks puzzled as to why you'd do that."
return 'bear_with_sword'
def big_iron_gate():
print "You walk up to the big iron gate and see there's a handle."
open_it = raw_input("> ")
if open_it == "open it":
print "You open it and you are free!"
print "There are mountains. And berries! And..."
print "Oh, but then the bear comes with his katana and stabs you."
print "\"Who's laughing now!? Love this katana.\""
return 'death'
else:
print "That doesn't seem sensible. I mean, the door's right there."
return 'big_iron_gate'
ROOMS = {'death': death, 'princess_lives_here': princess_lives_here, 'gold_koi_pond': gold_koi_pond, 'big_iron_gate': big_iron_gate, 'bear_with_sword': bear_with_sword}
def runner(map, start):
next = start
while True:
room = map[next]
print "\n--------"
next = room()
runner(ROOMS, 'princess_lives_here')
|
[
"connor.collins@gmail.com"
] |
connor.collins@gmail.com
|
faa87c8e3f067bcd7755c759e47e022742482bb8
|
f0d713996eb095bcdc701f3fab0a8110b8541cbb
|
/wbhjXmdbPSxCSE5hW_0.py
|
e9536e0fed2a7c9b48f0291977cccbacbce5b686
|
[] |
no_license
|
daniel-reich/turbo-robot
|
feda6c0523bb83ab8954b6d06302bfec5b16ebdf
|
a7a25c63097674c0a81675eed7e6b763785f1c41
|
refs/heads/main
| 2023-03-26T01:55:14.210264
| 2021-03-23T16:08:01
| 2021-03-23T16:08:01
| 350,773,815
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,262
|
py
|
"""
A magic sigil is a glyph which represents a desire one wishes to manifest in
their lives. There are many ways to create a sigil, but the most common is to
write out a specific desire (e.g. " _I HAVE WONDERFUL FRIENDS WHO LOVE ME_ "),
remove all vowels, remove any duplicate letters (keeping the last occurence),
and then design a glyph from what remains.
Using the sentence above as an example, we would remove duplicate letters:
AUFRINDSWHLOVME
And then remove all vowels, leaving us with:
FRNDSWHLVM
Create a function that takes a string and removes its vowels and duplicate
letters. The returned string should not contain any spaces and be in
uppercase.
### Examples
sigilize("i am healthy") ➞ "MLTHY"
sigilize("I FOUND MY SOULMATE") ➞ "FNDYSLMT"
sigilize("I have a job I enjoy and it pays well") ➞ "HVBJNDTPYSWL"
### Notes
* For duplicate letters the **last one** is kept.
* When performing actual sigil magic, you **must** make your sigils **manually**.
* Check the **Resources** tab for more info on sigils if you're interested in the concept.
"""
def sigilize(desire):
a = ''.join(desire.upper().split())
b = sorted(set(a), key=a.rindex)
return ''.join(i for i in b if i not in "AEIOU")
|
[
"daniel.reich@danielreichs-MacBook-Pro.local"
] |
daniel.reich@danielreichs-MacBook-Pro.local
|
e70cf9d6e63ff327f4103d60a0c7ba98634ec982
|
4d98abd2553e95856d835519424a60634fc4cdd3
|
/CVE-2016-4437 Apache_Shiro_RCE/ShiroScan_1.2.4/moule/plugins/Spring2.py
|
68bb19cf574477e3533d5a8f8ec6fe04827cd872
|
[] |
no_license
|
ANNS666/my_POC
|
0157fa41bdd2d0f264e464b05bf9c75405083e44
|
b3a38745609c9407a9bc0427f5dd55e4acfe6d70
|
refs/heads/master
| 2023-08-10T19:13:15.521562
| 2021-10-10T04:09:58
| 2021-10-10T04:09:58
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,658
|
py
|
# -*- coding: utf-8 -*-
# By 斯文beast svenbeast.com
import os
import re
import base64
import uuid
import subprocess
import requests
import sys
import threadpool
from Crypto.Cipher import AES
from ..main import Idea
requests.packages.urllib3.disable_warnings()
JAR_FILE = 'moule/ysoserial.jar'
@Idea.plugin_register('Class26:Spring2')
class Spring2(object):
def process(self,url,command,resKey,func):
self.sendPayload(url,command,resKey)
def gcm_encode(self,resKey,file_body):
mode = AES.MODE_GCM
iv = uuid.uuid4().bytes
encryptor = AES.new(base64.b64decode(resKey), mode, iv)
ciphertext, tag = encryptor.encrypt_and_digest(file_body)
ciphertext = ciphertext + tag
payload = base64.b64encode(iv + ciphertext)
return payload
def cbc_encode(self,resKey,file_body):
mode = AES.MODE_CBC
iv = uuid.uuid4().bytes
encryptor = AES.new(base64.b64decode(resKey), mode, iv) #受key影响的encryptor
payload = base64.b64encode(iv + encryptor.encrypt(file_body))
return payload
def sendPayload(self,url,command,resKey,fp=JAR_FILE):
if not os.path.exists(fp):
raise Exception('jar file not found!')
popen = subprocess.Popen(['java', '-jar', fp, 'Spring2', command], #popen
stdout=subprocess.PIPE)
BS = AES.block_size
pad = lambda s: s + ( (BS - len(s) % BS) * chr(BS - len(s) % BS)).encode()
file_body = pad(popen.stdout.read()) #受popen影响的file_body
payloadCBC = self.cbc_encode(resKey,file_body)
payloadGCM = self.gcm_encode(resKey,file_body)
header={
'User-agent' : 'Mozilla/5.0 (Windows NT 6.2; WOW64; rv:22.0) Gecko/20100101 Firefox/22.0;'
}
try:
x = requests.post(url, headers=header, cookies={'rememberMe': payloadCBC.decode()+"="},verify=False, timeout=20) # 发送验证请求1
y = requests.post(url, headers=header, cookies={'rememberMe': payloadGCM.decode()+"="},verify=False, timeout=20) # 发送验证请求2
#print("payload1已完成,字段rememberMe:看需要自己到源代码print "+payload.decode())
if(x.status_code==200):
print("[+] ****Spring2模块 key: {} 已成功发送! 状态码:{}".format(str(resKey),str(x.status_code)))
else:
print("[-] ****Spring2模块 key: {} 发送异常! 状态码:{}".format(str(resKey),str(x.status_code)))
except Exception as e:
print(e)
return False
|
[
"m18479685120@163.com"
] |
m18479685120@163.com
|
b8fecdcd2f6db4c77f8c2dd91e69e1f8869ea920
|
ff3da62ab2a336ba286ea320b8bf1eba5b1978ea
|
/normalization/time_Info/apm.py
|
e242dc16e93401a0d43eed4f9fa6c779d03c8403
|
[] |
no_license
|
llq20133100095/bert_ner_time
|
9e17e9de77ff12b4ae5267986f646665066e070c
|
9dc3baf5ca8f6d5cc7d4255bcfd913bd695c7b5e
|
refs/heads/master
| 2021-10-28T14:59:17.217552
| 2019-04-24T06:12:22
| 2019-04-24T06:12:22
| 182,626,582
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,688
|
py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Time : 2018/11/24 16:33
# @Author : honeyding
# @File : apm.py
# @Software: PyCharm
import re
class Apm:
apm_pat = re.compile(u'.*?(明早|傍晚|早上|早晨|凌晨|上午|中午|下午|大晚上|晚上|夜里|今晚|明晚|昨晚|前晚|这晚|晚|清晨|午后).*?')
apm_hour_pat = re.compile(u'.*?(明早|傍晚|早上|早晨|凌晨|上午|中午|下午|大晚上|晚上|夜里|今晚|明晚|昨晚|前晚|这晚|晚|清晨|午后).*?([0-9一二三四五六七八九两十]).*?')
def get_apm_info(self, entity, commonParser):
matcher = self.apm_pat.match(entity)
if matcher:
if commonParser:
commonParser.timeUnit[4] = True
return True
return False
def judge_apm_hour(self, entity, commonParser):
matcher = self.apm_hour_pat.match(entity)
if matcher:
if commonParser:
commonParser.timeUnit[4] = True
return True
return False
def adjustHours(self, entity, hour, commonParser):
if u"早" not in entity and u"上午" not in entity and u"晨" not in entity:
if u"中午" in entity:
if hour > 14 or hour > 2 and hour < 10:
print(u'不能是中午。')
commonParser.timeAPMInfo = str(hour) + u"点不能是中午。"
elif hour < 2 and hour > 0:
hour += 12
elif u"下午" not in entity and u"午后" not in entity:
if u"昨晚" in entity or u"明晚" in entity or u"傍晚" in entity or u"晚" in entity or u"晚上" in entity or u"夜里" in entity or u"今晚" in entity:
if hour > 12 and hour < 17 or hour >= 0 and hour < 5:
print(u'不能是晚上。')
commonParser.timeAPMInfo = str(hour) + u"点不能是晚上。"
elif hour >= 4 and hour <= 12:
hour += 12
else:
if hour > 0 and hour <= 12:
hour += 12
# if hour > 19 or hour < 1 or hour > 7 and hour < 12:
# print(u'不能是下午。')
# commonParser.timeAPMInfo = str(hour) + u'不能是下午。'
# elif hour > 0 and hour <= 7:
# hour += 12
elif hour > 12:
print(u'不能是上午或早上。')
commonParser.timeAPMInfo = str(hour) + u'点不能是上午或早上。'
return hour
if __name__ == '__main__':
apm_proc = Apm()
assert apm_proc.get_apm_info(u'早晨') is True
|
[
"1182953475@qq.com"
] |
1182953475@qq.com
|
6e8da8e397cef33da10c132cc14befac799d08b6
|
e23a4f57ce5474d468258e5e63b9e23fb6011188
|
/125_algorithms/_exercises/templates/_algorithms_challenges/pybites/intermediate/030_movie_data_analysis/save1_nopass.py
|
de9624e5838b09cfbf6dd63a838b4df2ba2feb25
|
[] |
no_license
|
syurskyi/Python_Topics
|
52851ecce000cb751a3b986408efe32f0b4c0835
|
be331826b490b73f0a176e6abed86ef68ff2dd2b
|
refs/heads/master
| 2023-06-08T19:29:16.214395
| 2023-05-29T17:09:11
| 2023-05-29T17:09:11
| 220,583,118
| 3
| 2
| null | 2023-02-16T03:08:10
| 2019-11-09T02:58:47
|
Python
|
UTF-8
|
Python
| false
| false
| 1,495
|
py
|
# _______ c__
# ____ c.. _______ d.., n..
# _______ __
# ____ u__.r.. _______ u..
#
# BASE_URL 'https://bites-data.s3.us-east-2.amazonaws.com/'
# TMP '/tmp'
#
# fname 'movie_metadata.csv'
# remote __.p...j.. B.. f..
# local __.p...j.. T.. f..
# u.. ? ?
#
# MOVIE_DATA local
# MIN_MOVIES 4
# MIN_YEAR 1960
#
# Movie n.. 'Movie', 'title year score'
#
#
# ___ get_movies_by_director
# """Extracts all movies from csv and stores them in a dict,
# where keys are directors, and values are a list of movies,
# use the defined Movie namedtuple"""
#
# d d.. l..
# full_list # list
#
# w__ o.. M.. newline='' __ file
# reader c__.D.. ?
# ___ row __ ?
# year ? 'title_year'
# __ ? !_ '' a.. i.. ? > 1960
# f__.a.. ? 'director_name' ? 'movie_title' .s.. i.. ? 'title_year' f__ ? 'imdb_score'
#
# ___ name, movie, year, score __ f..
# d name .a.. ? t.._m.. y.._y.. s.._s..
#
# r.. ?
#
#
# ___ calc_mean_score movies
# """Helper method to calculate mean of list of Movie namedtuples,
# round the mean to 1 decimal place"""
# scores movie.s.. ___ ? __ ?
# r.. r.. s.. ? / l.. ? 1
#
# ___ get_average_scores directors
# """Iterate through the directors dict (returned by get_movies_by_director),
# return a list of tuples (director, average_score) ordered by highest
# score in descending order. Only take directors into account
# with >= MIN_MOVIES"""
#
# p..
|
[
"sergejyurskyj@yahoo.com"
] |
sergejyurskyj@yahoo.com
|
f6781a69e1b2ae0d198cc5c11ac27d5d185fa49e
|
c3cc755ae500e87b6d5fa839efaa4d7d0f746d43
|
/Part 1/Ch.6 Dictionaries/Nesting/pizza.py
|
f07401d2bb54c94f78013b95d7f88cd48287e6fd
|
[] |
no_license
|
AngryGrizzlyBear/PythonCrashCourseRedux
|
9393e692cdc8e5e28a66077bbc6c1e674642d209
|
28d48fa16fc238cf0409f6e987a3b4b72e956a92
|
refs/heads/master
| 2020-03-28T11:04:44.030307
| 2018-10-20T21:06:27
| 2018-10-20T21:06:27
| 148,175,301
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 312
|
py
|
# Store information about a pizza being ordered.
pizza = {
'crust': 'thick',
'toppings': ['mushrooms', 'extra cheese'],
}
# Summarized the order
print("You ordered a " + pizza['crust'] + "-crust pizza " +
"with the following toppings:")
for topping in pizza['toppings']:
print("\t" + topping)
|
[
"evanmlongwood@gmail.com"
] |
evanmlongwood@gmail.com
|
a381405f3e7de92702f28ddc67b8a4d3d57494cd
|
7bd5ca970fbbe4a3ed0c7dadcf43ba8681a737f3
|
/aoj/aoj-icpc/300/1315.py
|
fc47a7e25bc9e18a6c15f3d4e5a4aeac5a025693
|
[] |
no_license
|
roiti46/Contest
|
c0c35478cd80f675965d10b1a371e44084f9b6ee
|
c4b850d76796c5388d2e0d2234f90dc8acfaadfa
|
refs/heads/master
| 2021-01-17T13:23:30.551754
| 2017-12-10T13:06:42
| 2017-12-10T13:06:42
| 27,001,893
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 575
|
py
|
while 1:
n = int(raw_input())
if n == 0: break
exist = set([])
enter = [0]*1000
bless = [0]*1000
for loop in xrange(n):
md,hm,io,p = raw_input().split()
h,m = map(int,hm.split(":"))
t = 60*h+m
p = int(p)
if io == "I":
enter[p] = t
exist.add(p)
else:
exist.remove(p)
if p == 0:
for i in exist: bless[i] += t-max(enter[p],enter[i])
elif 0 in exist:
bless[p] += t-max(enter[0],enter[p])
print max(bless)
|
[
"roiti46@gmail.com"
] |
roiti46@gmail.com
|
15a60453aa5419b4fa377688c031c2632596a4f9
|
7ce479cac0a14d924159db9c784e3325b8f0bce7
|
/schemaorgschemas/Thing/MedicalEntity/MedicalProcedure/__init__.py
|
cbefd8704afe1d477dfc83e65cb81ce50f18686e
|
[] |
no_license
|
EvelineAndreea/AGRe
|
1f0c27237eb047a60bbcfb8d73e3157035406409
|
b952125896a82741f6617c259dd4060954583180
|
refs/heads/master
| 2020-04-08T16:08:11.517166
| 2018-11-28T07:15:56
| 2018-11-28T07:15:56
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,059
|
py
|
# -*- coding: utf-8 -*-
from schemaorgschemas.Thing import potentialActionProp, nameProp, sameAsProp, imageProp, urlProp, mainEntityOfPageProp, additionalTypeProp, alternateNameProp, descriptionProp
from schemaorgschemas.Thing.MedicalEntity import codeProp, relevantSpecialtyProp, studyProp, guidelineProp, recognizingAuthorityProp, medicineSystemProp
from schemaorgschemas.djangoschema import SchemaObject, SchemaProperty, SchemaEnumProperty, SCHEMA_ORG
from django.conf import settings
class MedicalProcedureSchema(SchemaObject):
"""Schema Mixin for MedicalProcedure
Usage: place after django model in class definition, schema will return the schema.org url for the object
A process of care used in either a diagnostic, therapeutic, or palliative capacity that relies on invasive (surgical), non-invasive, or percutaneous techniques.
"""
def __init__(self):
self.schema = 'MedicalProcedure'
class followupProp(SchemaProperty):
"""
SchemaField for followup
Usage: Include in SchemaObject SchemaFields as your_django_field = followupProp()
schema.org description:Typical or recommended followup care after the procedure is performed.
prop_schema returns just the property without url#
format_as is used by app templatetags based upon schema.org datatype
"""
_prop_schema = 'followup'
_expected_schema = None
_enum = False
_format_as = "TextField"
class preparationProp(SchemaProperty):
"""
SchemaField for preparation
Usage: Include in SchemaObject SchemaFields as your_django_field = preparationProp()
schema.org description:Typical preparation that a patient must undergo before having the procedure performed.
prop_schema returns just the property without url#
format_as is used by app templatetags based upon schema.org datatype
"""
_prop_schema = 'preparation'
_expected_schema = None
_enum = False
_format_as = "TextField"
class procedureTypeProp(SchemaProperty):
"""
SchemaField for procedureType
Usage: Include in SchemaObject SchemaFields as your_django_field = procedureTypeProp()
schema.org description:The type of procedure, for example Surgical, Noninvasive, or Percutaneous.
prop_schema returns just the property without url#
format_as is used by app templatetags based upon schema.org datatype
used to reference MedicalProcedureType"""
_prop_schema = 'procedureType'
_expected_schema = 'MedicalProcedureType'
_enum = False
_format_as = "ForeignKey"
class howPerformedProp(SchemaProperty):
"""
SchemaField for howPerformed
Usage: Include in SchemaObject SchemaFields as your_django_field = howPerformedProp()
schema.org description:How the procedure is performed.
prop_schema returns just the property without url#
format_as is used by app templatetags based upon schema.org datatype
"""
_prop_schema = 'howPerformed'
_expected_schema = None
_enum = False
_format_as = "TextField"
# schema.org version 2.0
|
[
"mihai.nechita95@gmail.com"
] |
mihai.nechita95@gmail.com
|
222a8516170dbdfd60052c5217c8dbe791724e6b
|
a6df74bc7c139734bd9ce9f48d51e08fdc7d7efb
|
/article/migrations/0016_auto_20210412_1456.py
|
34b4c21151d60a7d9f4aa95d47c0410f17c749cc
|
[] |
no_license
|
Erlan1998/python_group_7_homework_68_Erlan_Kurbanaliev
|
5a7f210e51f1998e5d52cdeb42538f2786af3f9f
|
fdc92be2c5187c78fecdc713f58e0e3e9fc62cb1
|
refs/heads/master
| 2023-05-03T17:01:59.066596
| 2021-05-26T13:28:41
| 2021-05-26T13:28:41
| 368,165,221
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 495
|
py
|
# Generated by Django 3.1.6 on 2021-04-12 14:56
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('article', '0015_auto_20210412_1444'),
]
operations = [
migrations.AlterModelOptions(
name='article',
options={'permissions': [('сan_have_piece_of_pizza', 'Может съесть кусочек пиццы')], 'verbose_name': 'Статья', 'verbose_name_plural': 'Статьи'},
),
]
|
[
"kurbanalieverlan@gmail.com"
] |
kurbanalieverlan@gmail.com
|
3f6f9421f822fd2a774361edb18fd8c12c87027d
|
b58b175263f275e15a1b56bf1b0914db0f35ffc8
|
/testcase/testcase_lan.py
|
8d4326cbd823f38fc4d2cbf52a1cb50582dc55ed
|
[] |
no_license
|
zeewii/BHU
|
aa9ff900a4bb6adb368081509b9f9222479f7742
|
1f3c4f634b44845f7a4f84535ff4904de4efc634
|
refs/heads/master
| 2021-01-09T21:49:01.534541
| 2015-09-30T09:21:28
| 2015-09-30T09:21:28
| 43,213,971
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,828
|
py
|
#coding=utf-8
#描述:该模块为测试lan模块
#作者:曾祥卫
import unittest
from selenium import webdriver
import time,os,commands
from selenium.webdriver.common.action_chains import ActionChains
from selenium.common.exceptions import NoSuchElementException
from selenium.common.exceptions import NoAlertPresentException
from login import login_control
from data import data
from network.interface import interface_control
from connect import ssh
from publicControl import public_control
from network.interface.lan import lan_business
from network.interface import interface_business
class TestLan(unittest.TestCase):
def setUp(self):
self.driver = webdriver.Firefox()
#将浏览器最大化
self.driver.maximize_window()
#使用默认ip登录lan页面
lan_business.goin_default_lan(self)
def test_054_055_IP_netmask(self):
u"""修改LAN IP和A,B,C类子网掩码"""
#把4次修改LAN IP和子网掩码后client ping修改后ip的值取出
result = lan_business.step_100msh0054_100msh0055(self)
print result
#如果4次都为0则通过,否则不通过
assert result == [0,0,0,0],u"测试LAN IP和A,B,C类子网掩码失败"
print u"测试LAN IP和A,B,C类子网掩码成功"
def test_056_custom_netmask(self):
u"""lan自定义掩码设置"""
result = lan_business.step_100msh0056(self)
print result
#如果4次都为1则通过,否则不通过
assert result == [1,1,1,1],u"测试lan自定义掩码设置失败"
print u"测试lan自定义掩码设置成功"
def test_057_broadcast(self):
u"""lan广播地址配置有效性测试"""
result = lan_business.step_100msh0057(self)
print result
#如果2次都为1则通过,否则不通过
assert result == [1,1],u"测试lan广播地址配置有效性失败"
print u"测试lan广播地址配置有效性成功"
def test_059_startip(self):
u"""IP地址池默认起始值检查"""
result = lan_business.step_100msh0059(self)
print result
#如果IP地址池默认起始值为100则通过,否则不通过
assert result == '100',u"测试IP地址池默认起始值失败"
print u"测试IP地址池默认起始值成功"
def test_067_068_abnormal_input(self):
u"""lan异常输入测试"""
result = lan_business.step_100msh0067_100msh0068(self)
print result
#如果4次都为1则通过,否则不通过
assert result == [1,1,1,1],u"测试lan异常输入测试失败"
print u"lan测试异常输入测试成功"
#退出清理工作
def tearDown(self):
self.driver.quit()
if __name__=='__main__':
unittest.main()
__author__ = 'zeng'
|
[
"zeewii@sina.com"
] |
zeewii@sina.com
|
5a65c3db8f5241c487aab78f930d7ec197529388
|
5a4d5ee624b375ece06fda1467afe18beb69c14b
|
/Algorithm/SW_Expert/1-46.py
|
e2fcfc033cbfedd0121723aaeb2c5ba1ecc91913
|
[] |
no_license
|
Knightofcydonia51/TIL
|
cd10dab949659bc827118ee42b25d926336dce23
|
78d7e8617f4abed9932a557c12e68bd950f8230d
|
refs/heads/master
| 2022-12-26T00:10:06.262200
| 2022-05-26T01:12:32
| 2022-05-26T01:12:32
| 195,938,010
| 0
| 0
| null | 2022-12-16T01:03:09
| 2019-07-09T05:22:49
|
Python
|
UTF-8
|
Python
| false
| false
| 218
|
py
|
def score(text):
result=list(map(lambda x: 4 if x=='A' else 3 if x=='B' else 2 if x=='C' else 1 ,text))
return sum(result)
print(score('ADCBBBBCABBCBDACBDCAACDDDCAABABDBCBCBDBDBDDABBAAAAAAADADBDBCBDABADCADC'))
|
[
"leavingwill@gmail.com"
] |
leavingwill@gmail.com
|
7cc5e26b3b002ea59b7a91392cf6ad2b4d9042bb
|
12b5584956797fcb0f48e7971bc074ae13a37489
|
/pySpatialTools/release.py
|
b4439a5b0eb2c44ff32c36629289ad36af5e241a
|
[
"MIT"
] |
permissive
|
tgquintela/pySpatialTools
|
a0ef5b032310aa1c140e805f4ee8c4a40fd2d10e
|
e028008f9750521bf7d311f7cd3323c88d621ea4
|
refs/heads/master
| 2020-05-21T22:09:08.858084
| 2017-02-10T11:18:41
| 2017-02-10T11:18:41
| 39,067,763
| 8
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 6,919
|
py
|
"""Release data for pySpatialTools.
The information of the version is in the version.py file.
"""
from __future__ import absolute_import
import os
import sys
import time
import datetime
basedir = os.path.abspath(os.path.split(__file__)[0])
## Quantify the version
MAJOR = 0
MINOR = 0
MICRO = 0
ISRELEASED = False
VERSION = '%d.%d.%d' % (MAJOR, MINOR, MICRO)
QUALIFIER = ''
def write_version_py(filename=None):
cnt = """\
version = '%s'
"""
if not filename:
filename = os.path.join(
os.path.dirname(__file__), 'pySpatialTools', 'version.py')
a = open(filename, 'w')
try:
a.write(cnt % (version))
finally:
a.close()
def write_versionfile():
"""Creates a static file containing version information."""
versionfile = os.path.join(basedir, 'version.py')
text = '''"""
Version information for pySpatialTools, created during installation by
setup.py.
Do not add this file to the repository.
"""
import datetime
version = %(version)r
date = %(date)r
# Development version
dev = %(dev)r
# Format: (name, major, minor, micro, revision)
version_info = %(version_info)r
# Format: a 'datetime.datetime' instance
date_info = %(date_info)r
# Format: (vcs, vcs_tuple)
vcs_info = %(vcs_info)r
'''
# Try to update all information
date, date_info, version, version_info, vcs_info = get_info(dynamic=True)
def writefile():
fh = open(versionfile, 'w')
subs = {
'dev': dev,
'version': version,
'version_info': version_info,
'date': date,
'date_info': date_info,
'vcs_info': vcs_info
}
fh.write(text % subs)
fh.close()
## Mercurial? Change that
if vcs_info[0] == 'mercurial':
# Then, we want to update version.py.
writefile()
else:
if os.path.isfile(versionfile):
# This is *good*, and the most likely place users will be when
# running setup.py. We do not want to overwrite version.py.
# Grab the version so that setup can use it.
sys.path.insert(0, basedir)
from version import version
del sys.path[0]
else:
# Then we write a new file.
writefile()
return version
def get_revision():
"""Returns revision and vcs information, dynamically obtained."""
vcs, revision, tag = None, None, None
hgdir = os.path.join(basedir, '..', '.hg')
gitdir = os.path.join(basedir, '..', '.git')
if os.path.isdir(gitdir):
vcs = 'git'
# For now, we are not bothering with revision and tag.
vcs_info = (vcs, (revision, tag))
return revision, vcs_info
def get_info(dynamic=True):
## Date information
date_info = datetime.datetime.now()
date = time.asctime(date_info.timetuple())
revision, version, version_info, vcs_info = None, None, None, None
import_failed = False
dynamic_failed = False
if dynamic:
revision, vcs_info = get_revision()
if revision is None:
dynamic_failed = True
if dynamic_failed or not dynamic:
# All info should come from version.py. If it does not exist, then
# no vcs information will be provided.
sys.path.insert(0, basedir)
try:
from version import date, date_info, version, version_info,\
vcs_info
except ImportError:
import_failed = True
vcs_info = (None, (None, None))
else:
revision = vcs_info[1][0]
del sys.path[0]
if import_failed or (dynamic and not dynamic_failed):
# We are here if:
# we failed to determine static versioning info, or
# we successfully obtained dynamic revision info
version = ''.join([str(major), '.', str(minor), '.', str(micro)])
if dev:
version += '.dev_' + date_info.strftime("%Y%m%d%H%M%S")
version_info = (name, major, minor, micro, revision)
return date, date_info, version, version_info, vcs_info
## Version information
name = 'pySpatialTools'
major = "0"
minor = "0"
micro = "0"
## Declare current release as a development release.
## Change to False before tagging a release; then change back.
dev = True
description = """Python package for studying spatial irregular heterogenous
data."""
long_description = """
This package is built in order to provide prototyping tools in python to deal
with spatial data in python and model spatial-derived relations between
different elements in a system. In some systems, due to the huge amount of
data, the complexity of their topology their local nature or because other
practical reasons we are forced to use only local information for model the
system properties and dynamics.
pySpatialTools is useful for complex topological systems with different type
of spatial data elements and feature data elements in which we are not able to
study alls at once because of the data size.
pySpatialTools could be not recommendable for treating some specific problems
with homogeneous and/or regular data which could be treated with other python
packages, as for example computational linguistics (nltk), computer vision or
grid data (scipy.ndimage and openCV) or others.
"""
## Main author
author = 'T. Gonzalez Quintela',
author_email = 'tgq.spm@gmail.com',
license = 'MIT'
authors = {'tgquintela': ('T. Gonzalez Quintela', 'tgq.spm@gmail.com')}
maintainer = ""
maintainer_email = ""
url = ''
download_url = ''
platforms = ['Linux', 'Mac OSX', 'Windows', 'Unix']
keywords = ['math', 'data analysis', 'Mathematics', 'spatial networks',
'spatial correlations', 'framework', 'social sciences',
'spatial analysis', 'spatial ecology']
classifiers = [
# How mature is this project? Common values are
# 3 - Alpha
# 4 - Beta
# 5 - Production/Stable
'Development Status :: 3 - Alpha',
# Indicate who your project is intended for
'Intended Audience :: Developers',
'Intended Audience :: Science/Research',
# Pick your license as you wish (should match "license" above)
'License :: OSI Approved :: MIT License',
'Operating System :: OS Independent',
# Specify the Python versions you support here
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.7',
# Topic information
'Topic :: Software Development :: Build Tools',
'Topic :: Software Development :: Libraries :: Python Modules',
'Topic :: Scientific/Engineering :: Sociology',
'Topic :: Scientific/Engineering :: Data Analysis',
'Topic :: Scientific/Engineering :: Information Analysis',
'Topic :: Scientific/Engineering :: Mathematics']
date, date_info, version, version_info, vcs_info = get_info()
if __name__ == '__main__':
# Write versionfile for nightly snapshots.
write_versionfile()
|
[
"tgq.spm@gmail.com"
] |
tgq.spm@gmail.com
|
645b5682e9763727540ac5d791536bf21623922f
|
62e58c051128baef9452e7e0eb0b5a83367add26
|
/x12/5020/309005020.py
|
83361578777dc5a5345e3f1329482955522de273
|
[] |
no_license
|
dougvanhorn/bots-grammars
|
2eb6c0a6b5231c14a6faf194b932aa614809076c
|
09db18d9d9bd9d92cefbf00f1c0de1c590fe3d0d
|
refs/heads/master
| 2021-05-16T12:55:58.022904
| 2019-05-17T15:22:23
| 2019-05-17T15:22:23
| 105,274,633
| 0
| 0
| null | 2017-09-29T13:21:21
| 2017-09-29T13:21:21
| null |
UTF-8
|
Python
| false
| false
| 1,711
|
py
|
from bots.botsconfig import *
from records005020 import recorddefs
syntax = {
'version' : '00403', #version of ISA to send
'functionalgroup' : 'AQ',
}
structure = [
{ID: 'ST', MIN: 1, MAX: 1, LEVEL: [
{ID: 'M10', MIN: 1, MAX: 1},
{ID: 'VEH', MIN: 0, MAX: 10},
{ID: 'CII', MIN: 0, MAX: 3},
{ID: 'NM1', MIN: 0, MAX: 999, LEVEL: [
{ID: 'DMG', MIN: 0, MAX: 1},
{ID: 'DMA', MIN: 0, MAX: 1},
{ID: 'REF', MIN: 0, MAX: 10},
{ID: 'N3', MIN: 0, MAX: 2},
{ID: 'N4', MIN: 0, MAX: 1},
]},
{ID: 'P4', MIN: 1, MAX: 20, LEVEL: [
{ID: 'LX', MIN: 1, MAX: 9999, LEVEL: [
{ID: 'M13', MIN: 0, MAX: 1},
{ID: 'M11', MIN: 0, MAX: 1},
{ID: 'N9', MIN: 0, MAX: 999},
{ID: 'N1', MIN: 0, MAX: 20, LEVEL: [
{ID: 'N3', MIN: 0, MAX: 2},
{ID: 'N4', MIN: 0, MAX: 1},
{ID: 'DTM', MIN: 0, MAX: 1},
{ID: 'PER', MIN: 0, MAX: 1},
{ID: 'X1', MIN: 0, MAX: 1},
]},
{ID: 'M12', MIN: 0, MAX: 1, LEVEL: [
{ID: 'R4', MIN: 0, MAX: 10},
]},
{ID: 'VID', MIN: 0, MAX: 999, LEVEL: [
{ID: 'M7', MIN: 0, MAX: 5},
{ID: 'N10', MIN: 0, MAX: 999, LEVEL: [
{ID: 'VC', MIN: 0, MAX: 999},
{ID: 'MAN', MIN: 0, MAX: 999},
{ID: 'H1', MIN: 0, MAX: 99, LEVEL: [
{ID: 'H2', MIN: 0, MAX: 99},
]},
]},
]},
]},
]},
{ID: 'SE', MIN: 1, MAX: 1},
]}
]
|
[
"jason.capriotti@gmail.com"
] |
jason.capriotti@gmail.com
|
10bcb6a6cca24a31397972415ea766cbddfa555c
|
523f8f5febbbfeb6d42183f2bbeebc36f98eadb5
|
/147_best.py
|
3d1e8b37f5da10cd271490da0e35045823c72455
|
[] |
no_license
|
saleed/LeetCode
|
655f82fdfcc3000400f49388e97fc0560f356af0
|
48b43999fb7e2ed82d922e1f64ac76f8fabe4baa
|
refs/heads/master
| 2022-06-15T21:54:56.223204
| 2022-05-09T14:05:50
| 2022-05-09T14:05:50
| 209,430,056
| 2
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 614
|
py
|
# Definition for singly-linked list.
# class ListNode(object):
# def __init__(self, val=0, next=None):
# self.val = val
# self.next = next
class Solution(object):
def insertionSortList(self, head):
"""
:type head: ListNode
:rtype: ListNode
"""
if head==None:
return None
l=ListNode(0)
p=head
while p!=None:
q=l
while q.next!=None and q.next.val<p.val :
q=q.next
np=p.next
p.next=q.next
q.next=p
p=np
return l.next
|
[
"noelsun@mowennaierdeMacBook-Pro.local"
] |
noelsun@mowennaierdeMacBook-Pro.local
|
ffbba23a3c4c45c2d06645337aa75f9d54d24f4c
|
53fab060fa262e5d5026e0807d93c75fb81e67b9
|
/backup/user_243/ch161_2020_06_15_19_33_27_198209.py
|
f921b1b82956792ae479cd3fccf38b2e9021b5f4
|
[] |
no_license
|
gabriellaec/desoft-analise-exercicios
|
b77c6999424c5ce7e44086a12589a0ad43d6adca
|
01940ab0897aa6005764fc220b900e4d6161d36b
|
refs/heads/main
| 2023-01-31T17:19:42.050628
| 2020-12-16T05:21:31
| 2020-12-16T05:21:31
| 306,735,108
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 267
|
py
|
def PiWallis(num):
numerador=1
denominador=2
i=0
multi = 1
while i < num:
multi *= numerador/denominador
if i%2 == 0:
denominador += 2
else:
numerador += 2
i+=1
return multi
|
[
"you@example.com"
] |
you@example.com
|
17443d48e14b9c51e3399739df9833c81a42bef8
|
886436fe7993aa2913e339ebe70b0eddfacac44c
|
/build/lib/armin/api/share/utils.py
|
e68eddb20a572579f23515d616640d6bb6bc3c91
|
[] |
no_license
|
singajeet/armin
|
581793cac1ac3b1ab638d274b356965ee5d76750
|
99f61a0ce0f2d5c587002ddf8d2843e83d9538d3
|
refs/heads/master
| 2021-04-28T07:15:42.509397
| 2018-03-19T17:30:09
| 2018-03-19T17:30:09
| 122,219,698
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,957
|
py
|
"""
.. module:: source_driver
:platform: Unix, Windows
:synopsis: A default implementation of source system driver
"""
from typing import Type, Dict, Any
import pathlib
from armin.api.share.constants import N, F, V
from tinydb import TinyDB, Query
def get_meta_table(meta_repo_details:Type[Dict]):
"""Returns the table from meta repo based on details passed as args
"""
__db_path = meta_repo_details[N.DB_URI]
if __db_path.find('~') >= 0:
__db_path = pathlib.Path(__db_path).expanduser()
else:
__db_path = pathlib.Path(__db_path).absolute()
__meta_db = TinyDB(__db_path)
if __meta_db is None:
return (F.FAILED, 'Unable to create instance of TinyDB')
__source_sys_meta_table = __meta_db\
.table(meta_repo_details[N.META_TABLE])
if __source_sys_meta_table is None:
return (F.FAILED, 'Inconsistent meta repo. Can not find source\
system details table - %s' % meta_repo_details[N.META_TABLE])
else:
return (F.SUCCESS, __source_sys_meta_table)
def connect_to_meta(meta_repo_details:Type[Dict], name:str) -> (Type[F], Any):
"""Connect to metadata database using the details provided asparameters in the constructor
Args:
meta_repo_details (Dict): Repository details for making connection and query
name (str): Name of the item that needs to be queried
Returns:
status (Tuple): Returns flag Success or Failed and details in case of failure and table record in case of success
"""
__record = None
(status, result_obj) = get_meta_table(meta_repo_details)
if status == F.SUCCESS:
__source_sys_meta_table = result_obj
__record = __source_sys_meta_table\
.get(Query()[N.NAME] == name)
else:
return (status, result_obj)
if __record is not None:
return (F.SUCCESS, __record)
return (F.FAILED, 'Record not found in meta repo')
|
[
"singajeet@gmail.com"
] |
singajeet@gmail.com
|
44ad04a59f6f8b2df27bfda02eaab12a2aa8d256
|
06a045819cf99c7059afde40dca12cf9d3eb5f81
|
/pandas/tests/indexing/test_at.py
|
01315647c464b7573433bf36515371ffed05e411
|
[
"BSD-3-Clause"
] |
permissive
|
MarcoGorelli/pandas
|
b9882c6ac1e4bc753819b7bc7c8b567964efd275
|
86a4ee01c7899ef454d35b95cde11e9593921c9d
|
refs/heads/main
| 2023-08-22T12:35:45.122152
| 2023-05-04T22:11:07
| 2023-05-04T22:11:07
| 164,618,359
| 4
| 1
|
BSD-3-Clause
| 2023-05-05T09:02:23
| 2019-01-08T09:55:54
|
Python
|
UTF-8
|
Python
| false
| false
| 7,983
|
py
|
from datetime import (
datetime,
timezone,
)
import numpy as np
import pytest
from pandas.errors import InvalidIndexError
from pandas import (
CategoricalDtype,
CategoricalIndex,
DataFrame,
DatetimeIndex,
MultiIndex,
Series,
Timestamp,
)
import pandas._testing as tm
def test_at_timezone():
# https://github.com/pandas-dev/pandas/issues/33544
result = DataFrame({"foo": [datetime(2000, 1, 1)]})
result.at[0, "foo"] = datetime(2000, 1, 2, tzinfo=timezone.utc)
expected = DataFrame(
{"foo": [datetime(2000, 1, 2, tzinfo=timezone.utc)]}, dtype=object
)
tm.assert_frame_equal(result, expected)
def test_selection_methods_of_assigned_col():
# GH 29282
df = DataFrame(data={"a": [1, 2, 3], "b": [4, 5, 6]})
df2 = DataFrame(data={"c": [7, 8, 9]}, index=[2, 1, 0])
df["c"] = df2["c"]
df.at[1, "c"] = 11
result = df
expected = DataFrame({"a": [1, 2, 3], "b": [4, 5, 6], "c": [9, 11, 7]})
tm.assert_frame_equal(result, expected)
result = df.at[1, "c"]
assert result == 11
result = df["c"]
expected = Series([9, 11, 7], name="c")
tm.assert_series_equal(result, expected)
result = df[["c"]]
expected = DataFrame({"c": [9, 11, 7]})
tm.assert_frame_equal(result, expected)
class TestAtSetItem:
def test_at_setitem_item_cache_cleared(self):
# GH#22372 Note the multi-step construction is necessary to trigger
# the original bug. pandas/issues/22372#issuecomment-413345309
df = DataFrame(index=[0])
df["x"] = 1
df["cost"] = 2
# accessing df["cost"] adds "cost" to the _item_cache
df["cost"]
# This loc[[0]] lookup used to call _consolidate_inplace at the
# BlockManager level, which failed to clear the _item_cache
df.loc[[0]]
df.at[0, "x"] = 4
df.at[0, "cost"] = 789
expected = DataFrame({"x": [4], "cost": 789}, index=[0])
tm.assert_frame_equal(df, expected)
# And in particular, check that the _item_cache has updated correctly.
tm.assert_series_equal(df["cost"], expected["cost"])
def test_at_setitem_mixed_index_assignment(self):
# GH#19860
ser = Series([1, 2, 3, 4, 5], index=["a", "b", "c", 1, 2])
ser.at["a"] = 11
assert ser.iat[0] == 11
ser.at[1] = 22
assert ser.iat[3] == 22
def test_at_setitem_categorical_missing(self):
df = DataFrame(
index=range(3), columns=range(3), dtype=CategoricalDtype(["foo", "bar"])
)
df.at[1, 1] = "foo"
expected = DataFrame(
[
[np.nan, np.nan, np.nan],
[np.nan, "foo", np.nan],
[np.nan, np.nan, np.nan],
],
dtype=CategoricalDtype(["foo", "bar"]),
)
tm.assert_frame_equal(df, expected)
def test_at_setitem_multiindex(self):
df = DataFrame(
np.zeros((3, 2), dtype="int64"),
columns=MultiIndex.from_tuples([("a", 0), ("a", 1)]),
)
df.at[0, "a"] = 10
expected = DataFrame(
[[10, 10], [0, 0], [0, 0]],
columns=MultiIndex.from_tuples([("a", 0), ("a", 1)]),
)
tm.assert_frame_equal(df, expected)
@pytest.mark.parametrize("row", (Timestamp("2019-01-01"), "2019-01-01"))
def test_at_datetime_index(self, row):
# Set float64 dtype to avoid upcast when setting .5
df = DataFrame(
data=[[1] * 2], index=DatetimeIndex(data=["2019-01-01", "2019-01-02"])
).astype({0: "float64"})
expected = DataFrame(
data=[[0.5, 1], [1.0, 1]],
index=DatetimeIndex(data=["2019-01-01", "2019-01-02"]),
)
df.at[row, 0] = 0.5
tm.assert_frame_equal(df, expected)
class TestAtSetItemWithExpansion:
def test_at_setitem_expansion_series_dt64tz_value(self, tz_naive_fixture):
# GH#25506
ts = Timestamp("2017-08-05 00:00:00+0100", tz=tz_naive_fixture)
result = Series(ts)
result.at[1] = ts
expected = Series([ts, ts])
tm.assert_series_equal(result, expected)
class TestAtWithDuplicates:
def test_at_with_duplicate_axes_requires_scalar_lookup(self):
# GH#33041 check that falling back to loc doesn't allow non-scalar
# args to slip in
arr = np.random.randn(6).reshape(3, 2)
df = DataFrame(arr, columns=["A", "A"])
msg = "Invalid call for scalar access"
with pytest.raises(ValueError, match=msg):
df.at[[1, 2]]
with pytest.raises(ValueError, match=msg):
df.at[1, ["A"]]
with pytest.raises(ValueError, match=msg):
df.at[:, "A"]
with pytest.raises(ValueError, match=msg):
df.at[[1, 2]] = 1
with pytest.raises(ValueError, match=msg):
df.at[1, ["A"]] = 1
with pytest.raises(ValueError, match=msg):
df.at[:, "A"] = 1
class TestAtErrors:
# TODO: De-duplicate/parametrize
# test_at_series_raises_key_error2, test_at_frame_raises_key_error2
def test_at_series_raises_key_error(self, indexer_al):
# GH#31724 .at should match .loc
ser = Series([1, 2, 3], index=[3, 2, 1])
result = indexer_al(ser)[1]
assert result == 3
with pytest.raises(KeyError, match="a"):
indexer_al(ser)["a"]
def test_at_frame_raises_key_error(self, indexer_al):
# GH#31724 .at should match .loc
df = DataFrame({0: [1, 2, 3]}, index=[3, 2, 1])
result = indexer_al(df)[1, 0]
assert result == 3
with pytest.raises(KeyError, match="a"):
indexer_al(df)["a", 0]
with pytest.raises(KeyError, match="a"):
indexer_al(df)[1, "a"]
def test_at_series_raises_key_error2(self, indexer_al):
# at should not fallback
# GH#7814
# GH#31724 .at should match .loc
ser = Series([1, 2, 3], index=list("abc"))
result = indexer_al(ser)["a"]
assert result == 1
with pytest.raises(KeyError, match="^0$"):
indexer_al(ser)[0]
def test_at_frame_raises_key_error2(self, indexer_al):
# GH#31724 .at should match .loc
df = DataFrame({"A": [1, 2, 3]}, index=list("abc"))
result = indexer_al(df)["a", "A"]
assert result == 1
with pytest.raises(KeyError, match="^0$"):
indexer_al(df)["a", 0]
def test_at_frame_multiple_columns(self):
# GH#48296 - at shouldn't modify multiple columns
df = DataFrame({"a": [1, 2], "b": [3, 4]})
new_row = [6, 7]
with pytest.raises(
InvalidIndexError,
match=f"You can only assign a scalar value not a \\{type(new_row)}",
):
df.at[5] = new_row
def test_at_getitem_mixed_index_no_fallback(self):
# GH#19860
ser = Series([1, 2, 3, 4, 5], index=["a", "b", "c", 1, 2])
with pytest.raises(KeyError, match="^0$"):
ser.at[0]
with pytest.raises(KeyError, match="^4$"):
ser.at[4]
def test_at_categorical_integers(self):
# CategoricalIndex with integer categories that don't happen to match
# the Categorical's codes
ci = CategoricalIndex([3, 4])
arr = np.arange(4).reshape(2, 2)
frame = DataFrame(arr, index=ci)
for df in [frame, frame.T]:
for key in [0, 1]:
with pytest.raises(KeyError, match=str(key)):
df.at[key, key]
def test_at_applied_for_rows(self):
# GH#48729 .at should raise InvalidIndexError when assigning rows
df = DataFrame(index=["a"], columns=["col1", "col2"])
new_row = [123, 15]
with pytest.raises(
InvalidIndexError,
match=f"You can only assign a scalar value not a \\{type(new_row)}",
):
df.at["a"] = new_row
|
[
"noreply@github.com"
] |
MarcoGorelli.noreply@github.com
|
3730426a331bcc75745f9af0cdfc8efaf059a9b9
|
8eab8ab725c2132bb8d090cdb2d23a5f71945249
|
/virt/Lib/site-packages/numpy/array_api/tests/test_elementwise_functions.py
|
b2fb44e766f8adfc368d988bd7d17c2ac418b386
|
[
"GPL-3.0-only",
"BSD-3-Clause-Open-MPI",
"GPL-3.0-or-later",
"GCC-exception-3.1",
"BSD-3-Clause",
"MIT"
] |
permissive
|
JoaoSevergnini/metalpy
|
6c88a413a82bc25edd9308b8490a76fae8dd76ca
|
c2d0098a309b6ce8c756ff840bfb53fb291747b6
|
refs/heads/main
| 2023-04-18T17:25:26.474485
| 2022-09-18T20:44:45
| 2022-09-18T20:44:45
| 474,773,752
| 3
| 1
|
MIT
| 2022-11-03T20:07:50
| 2022-03-27T22:21:01
|
Python
|
UTF-8
|
Python
| false
| false
| 3,619
|
py
|
from inspect import getfullargspec
from numpy.testing import assert_raises
from .. import asarray, _elementwise_functions
from .._elementwise_functions import bitwise_left_shift, bitwise_right_shift
from .._dtypes import (
_dtype_categories,
_boolean_dtypes,
_floating_dtypes,
_integer_dtypes,
)
def nargs(func):
return len(getfullargspec(func).args)
def test_function_types():
# Test that every function accepts only the required input types. We only
# test the negative cases here (error). The positive cases are tested in
# the array API test suite.
elementwise_function_input_types = {
"abs": "numeric",
"acos": "floating-point",
"acosh": "floating-point",
"add": "numeric",
"asin": "floating-point",
"asinh": "floating-point",
"atan": "floating-point",
"atan2": "floating-point",
"atanh": "floating-point",
"bitwise_and": "integer or boolean",
"bitwise_invert": "integer or boolean",
"bitwise_left_shift": "integer",
"bitwise_or": "integer or boolean",
"bitwise_right_shift": "integer",
"bitwise_xor": "integer or boolean",
"ceil": "numeric",
"cos": "floating-point",
"cosh": "floating-point",
"divide": "floating-point",
"equal": "all",
"exp": "floating-point",
"expm1": "floating-point",
"floor": "numeric",
"floor_divide": "numeric",
"greater": "numeric",
"greater_equal": "numeric",
"isfinite": "numeric",
"isinf": "numeric",
"isnan": "numeric",
"less": "numeric",
"less_equal": "numeric",
"log": "floating-point",
"logaddexp": "floating-point",
"log10": "floating-point",
"log1p": "floating-point",
"log2": "floating-point",
"logical_and": "boolean",
"logical_not": "boolean",
"logical_or": "boolean",
"logical_xor": "boolean",
"multiply": "numeric",
"negative": "numeric",
"not_equal": "all",
"positive": "numeric",
"pow": "numeric",
"remainder": "numeric",
"round": "numeric",
"sign": "numeric",
"sin": "floating-point",
"sinh": "floating-point",
"sqrt": "floating-point",
"square": "numeric",
"subtract": "numeric",
"tan": "floating-point",
"tanh": "floating-point",
"trunc": "numeric",
}
def _array_vals():
for d in _integer_dtypes:
yield asarray(1, dtype=d)
for d in _boolean_dtypes:
yield asarray(False, dtype=d)
for d in _floating_dtypes:
yield asarray(1.0, dtype=d)
for x in _array_vals():
for func_name, types in elementwise_function_input_types.items():
dtypes = _dtype_categories[types]
func = getattr(_elementwise_functions, func_name)
if nargs(func) == 2:
for y in _array_vals():
if x.dtype not in dtypes or y.dtype not in dtypes:
assert_raises(TypeError, lambda: func(x, y))
else:
if x.dtype not in dtypes:
assert_raises(TypeError, lambda: func(x))
def test_bitwise_shift_error():
# bitwise shift functions should raise when the second argument is negative
assert_raises(
ValueError, lambda: bitwise_left_shift(asarray([1, 1]), asarray([1, -1]))
)
assert_raises(
ValueError, lambda: bitwise_right_shift(asarray([1, 1]), asarray([1, -1]))
)
|
[
"joao.a.severgnini@gmail.com"
] |
joao.a.severgnini@gmail.com
|
0dcf4b6b5bcf74c86dfbcba79e56758e85c90377
|
08c7844a2bd2d94d16e851ce78109a7f33ffc53f
|
/config.py
|
58407e73518f4329eb385d50488e096f33660915
|
[] |
no_license
|
jreiher2003/menu-app
|
dd5bd4a44688f43086f6a284684ebafff74daf2a
|
cc93f6a41539ab00b2d85bae21ee308987c93afe
|
refs/heads/master
| 2021-01-10T09:26:51.673657
| 2015-11-17T19:11:25
| 2015-11-17T19:11:25
| 46,355,082
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 300
|
py
|
WTF_CSRF_ENABLED = True
SECRET_KEY = 'you-will-never-guess'
import os
basedir = os.path.abspath(os.path.dirname(__file__))
SQLALCHEMY_TRACK_MODIFICATIONS = True
SQLALCHEMY_DATABASE_URI = 'sqlite:///' + os.path.join(basedir, 'menu.db')
SQLALCHEMY_MIGRATE_REPO = os.path.join(basedir, 'db_repository')
|
[
"jeffreiher@gmail.com"
] |
jeffreiher@gmail.com
|
b517f0bb5ca6346a38ef4745c26d781ed5b2d2cd
|
e83f2198cb765f048398e6485f138cf4e172199f
|
/src/pywaz/sprite/__init__.py
|
2b4fa577eabb5f9d7b1f852d71ca2119cee7f2c3
|
[] |
no_license
|
giginet/MachiMatch
|
6d1c2cb2a77323043e8e04e90df5d5e1d8e010d5
|
69b0e788f75966bf6e2fbfaba19e66da5ce22415
|
refs/heads/master
| 2021-01-13T01:36:19.399768
| 2011-12-25T02:40:10
| 2011-12-25T02:40:10
| 1,630,776
| 1
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,221
|
py
|
import pygame
from pygame.sprite import Sprite
class _Mixin(object):
def draw(self, surface):
for sprite in self.sprites():
if isinstance(sprite, Sprite):
sprite.draw(surface)
else:
surface.blit(sprite.image, sprite.rect)
class _Mixin2(object):
def draw(self, surface):
spritedict = self.spritedict
surface_blit = surface.blit
dirty = self.lostsprites
self.lostsprites = []
dirty_append = dirty.append
for s in self.sprites():
r = spritedict[s]
if isinstance(s, Sprite):
newrect = s.draw(surface)
else:
newrect = surface_blit(s.image, s.rect)
if r is 0:
dirty_append(newrect)
else:
if newrect and newrect.colliderect(r):
dirty_append(newrect.union(r))
elif newrect:
dirty_append(newrect)
dirty_append(r)
spritedict[s] = newrect
return dirty
# group -----------------------------------------------------------------------------------
#
# Notice:
# The order of inheritation is IMPORTANT
#
class Group(_Mixin, pygame.sprite.Group):
pass
class RenderUpdates(_Mixin2, pygame.sprite.RenderUpdates):
pass
class OrderedUpdates(_Mixin2, pygame.sprite.OrderedUpdates):
pass
class LayeredUpdates(_Mixin2, pygame.sprite.LayeredUpdates):
pass
# collide ---------------------------------------------------------------------------------
#
# Notice:
# Only `collide_rect` and `spritecollide` is modified
#
from pygame.sprite import collide_rect_ratio
from pygame.sprite import collide_circle, collide_circle_ratio
from pygame.sprite import collide_mask
from pygame.sprite import groupcollide, spritecollideany
def collide_rect(left, right):
u"""collision detection between two sprites, using `colrect` of each sprite"""
return left.coltest_rect.colliderect(right.coltest_rect)
def spritecollide(sprite, group, dokill, collided = None):
if collided is None:
collided = collide_rect
return pygame.sprite.spritecollide(sprite, group, dokill, collided)
|
[
"giginet.net@gmail.com"
] |
giginet.net@gmail.com
|
5e7eae6b648b87e1195f66e8de1baf28ed5cc3b4
|
176088b355fd48f89aa377d1358bc54fd5d9d35d
|
/backend/task_category/migrations/0001_initial.py
|
9093194c12138f4db006dc787f9880e94c74f40c
|
[] |
no_license
|
crowdbotics-apps/fashion-by-genesis-18024
|
bbf2c78adaefcaf5297b208a23d291ec8c7b0f0f
|
a725add80913c3ecb4f9e049baa3c78c8de3ffbd
|
refs/heads/master
| 2022-10-26T19:09:33.359374
| 2020-06-11T18:21:20
| 2020-06-11T18:21:20
| 271,617,523
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,239
|
py
|
# Generated by Django 2.2.13 on 2020-06-11 18:20
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Category',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=255)),
('icon', models.URLField()),
('description', models.TextField(blank=True, null=True)),
('is_recurring', models.BooleanField(blank=True, null=True)),
],
),
migrations.CreateModel(
name='Subcategory',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=255)),
('description', models.TextField(blank=True, null=True)),
('category', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='subcategory_category', to='task_category.Category')),
],
),
]
|
[
"team@crowdbotics.com"
] |
team@crowdbotics.com
|
5b93c2a71b7fe9860423932a99487ea380b7ad1b
|
e7307703a08ccdc0615bfa3b7a963a2ba2e9e732
|
/bots/courses_bot/data_models/student_profile.py
|
8f2b9d06af726a5cb4e8919a976c563d36878473
|
[] |
no_license
|
liyocee/cs_course_bot
|
7817c43975c56aeb6edf31d28d9a7f553d107c26
|
93354ade3713293bf31a494a75bd11c3229814a8
|
refs/heads/master
| 2023-05-24T23:29:34.309303
| 2020-03-15T14:37:15
| 2020-03-15T14:37:15
| 246,835,877
| 0
| 0
| null | 2023-05-22T22:42:22
| 2020-03-12T13:03:32
|
Python
|
UTF-8
|
Python
| false
| false
| 707
|
py
|
from enum import Enum
from typing import Optional
from botbuilder.schema import Attachment
from .course_unit import CourseUnit
class StudentProfile:
def __init__(
self,
name: str = None,
admission_number: str = None,
course_unit: CourseUnit = None,
picture: Attachment = None
):
self.name: Optional[str] = name
self.admission_number: Optional[str] = admission_number
self.course_unit: Optional[CourseUnit] = course_unit
self.picture: Optional[Attachment] = picture
class StudentProfileAttributes(Enum):
NAME = "name"
ADMISSION_NUMBER = "admission_number"
COURSE_UNIT = "course_unit"
PICTURE = "picture"
|
[
"collinskivale@gmail.com"
] |
collinskivale@gmail.com
|
179a046688ec86cdc0a1838723c43484ef4af058
|
6f57761c60582c546423a2a08c769f18236fd153
|
/benchmarks/data/codes/run_pk_param_space.py
|
da414557736a952df54584c6f3e3878402cbf9b5
|
[
"BSD-3-Clause",
"LGPL-2.0-or-later",
"BSD-2-Clause"
] |
permissive
|
LSSTDESC/CCL
|
30644922fead0b017c1056e628bec23cf6bc4dfb
|
29d46978445678d86a4bee485cb29d30246ff64a
|
refs/heads/master
| 2023-09-03T17:03:17.012019
| 2023-08-08T11:01:33
| 2023-08-08T11:01:33
| 57,389,367
| 118
| 68
|
BSD-3-Clause
| 2023-08-30T13:25:25
| 2016-04-29T14:08:38
|
C
|
UTF-8
|
Python
| false
| false
| 3,398
|
py
|
#!/usr/bin/env python
"""
Generate a set of CLASS power spectra across a set of sample points in
cosmological parameter space, and compare with CCL.
"""
from param_space import *
import os, sys
# Need to specify directory containing 'class' executable
CLASS_ROOT = None
if len(sys.argv) > 1: CLASS_ROOT = sys.argv[1]
assert CLASS_ROOT is not None, \
"Must specify CLASS_ROOT (as argument or in source file)."
PREFIX = "std" # Prefix to use for this run
NSAMP = 100 # No. of sample points in parameter space
SEED = 10 # Random seed to use for sampling
ZVALS = np.arange(0., 3., 0.5) # Redshifts to evaluate P(k) at
# Define parameter space to sample over
param_dict = {
'h': (0.55, 0.8),
'Omega_cdm': (0.15, 0.35),
'Omega_b': (0.018, 0.052),
'A_s': (1.5e-9, 2.5e-9),
'n_s': (0.94, 0.98)
}
# Check that expected output data directories exist
class_datadir = "%s/data/class" % os.path.abspath(".")
ccl_datadir = "%s/data/ccl" % os.path.abspath(".")
if not os.path.exists(class_datadir): os.makedirs(class_datadir)
if not os.path.exists(ccl_datadir): os.makedirs(ccl_datadir)
# Get root filename for CLASS and CCL filenames
root = "%s/%s" % (class_datadir, PREFIX)
ccl_root = "%s/%s" % (ccl_datadir, PREFIX)
# Generate sample points on Latin hypercube
sample_points = generate_latin_hypercube( samples=NSAMP, param_dict=param_dict,
class_root=CLASS_ROOT, seed=SEED )
save_hypercube("%s_params.dat" % root, sample_points)
# Generate CLASS .ini files
print("Writing CLASS linear .ini files")
generate_class_ini(sample_points, root="%s_lin_std" % root,
nonlinear=False, redshifts=ZVALS)
generate_class_ini(sample_points, root="%s_lin_pre" % root,
nonlinear=False, redshifts=ZVALS)
print("Writing CLASS nonlinear .ini files")
generate_class_ini(sample_points, root="%s_nl_std" % root,
nonlinear=True, redshifts=ZVALS)
generate_class_ini(sample_points, root="%s_nl_pre" % root,
nonlinear=True, redshifts=ZVALS)
# Run CLASS on generated .ini files
print("Running CLASS on .ini files")
run_class(fname_pattern="%s_lin_std_?????.ini" % root,
class_root=CLASS_ROOT, precision=False)
run_class(fname_pattern="%s_lin_pre_?????.ini" % root,
class_root=CLASS_ROOT, precision=True)
run_class(fname_pattern="%s_nl_std_?????.ini" % root,
class_root=CLASS_ROOT, precision=False)
run_class(fname_pattern="%s_nl_pre_?????.ini" % root,
class_root=CLASS_ROOT, precision=True)
# Run CCL for the same sets of parameters
generate_ccl_pspec(sample_points, ccl_root,
class_data_root="%s_lin_std" % root,
zvals=ZVALS, default_params={'mnu': 0.}, mode='std')
generate_ccl_pspec(sample_points, ccl_root,
class_data_root="%s_lin_pre" % root,
zvals=ZVALS, default_params={'mnu': 0.}, mode='pre')
generate_ccl_pspec(sample_points, ccl_root,
class_data_root="%s_nl_std" % root,
zvals=ZVALS, default_params={'mnu': 0.},
nonlin=True, mode='std')
generate_ccl_pspec(sample_points, ccl_root,
class_data_root="%s_nl_pre" % root,
zvals=ZVALS, default_params={'mnu': 0.},
nonlin=True, mode='pre')
|
[
"philbull@gmail.com"
] |
philbull@gmail.com
|
1416874157729825714165b2eecc1af24e692c63
|
d3196fb38078fdbe966bd5af8a8a4f2924a47c20
|
/wandb/sdk/wandb_manager.py
|
69e8c503a571a70e1c710938889cb33c97a665cf
|
[
"MIT"
] |
permissive
|
morganmcg1/client
|
a1ae01ea302f13a6c9850972411ecabcb900dbc6
|
099f7aa938fb62c5a5d3e12f7d2067196498b67c
|
refs/heads/master
| 2023-09-06T01:14:40.282234
| 2021-11-13T03:01:01
| 2021-11-13T03:01:01
| 427,620,124
| 0
| 0
|
MIT
| 2021-11-13T09:22:12
| 2021-11-13T09:22:11
| null |
UTF-8
|
Python
| false
| false
| 3,831
|
py
|
"""Manage wandb processes.
Create a grpc manager channel.
"""
import atexit
import multiprocessing
import os
from typing import Callable, Optional, Tuple, TYPE_CHECKING
from wandb import env
from wandb.sdk.lib.exit_hooks import ExitHooks
if TYPE_CHECKING:
from wandb.sdk.service import service
from wandb.sdk.wandb_settings import Settings
class _ManagerToken:
_token_str: Optional[str]
def __init__(self) -> None:
self._token_str = None
def probe(self) -> None:
token = os.environ.get(env.SERVICE)
if not token:
return
self._token_str = token
def configure(self, port: int) -> None:
version = "1"
pid = os.getpid()
token = "-".join([version, str(pid), str(port)])
os.environ[env.SERVICE] = token
self._token_str = token
def parse(self) -> Tuple[str, int, int]:
assert self._token_str
parts = self._token_str.split("-")
assert len(parts) == 3, f"token must have 3 parts: {parts}"
# TODO: make more robust?
version, pid_str, port_str = parts
pid_int = int(pid_str)
port_int = int(port_str)
return version, pid_int, port_int
@property
def token(self) -> Optional[str]:
return self._token_str
@property
def port(self) -> int:
_, _, port = self.parse()
return port
class _Manager:
_token: _ManagerToken
_atexit_lambda: Optional[Callable[[], None]]
_hooks: Optional[ExitHooks]
def __init__(self) -> None:
# TODO: warn if user doesnt have grpc installed
from wandb.sdk.service import service
self._atexit_lambda = None
self._hooks = None
self._token = _ManagerToken()
self._service = service._Service()
self._setup_mp()
self._setup()
def _setup_mp(self) -> None:
# NOTE: manager does not support fork yet, support coming later
start_method = multiprocessing.get_start_method(allow_none=True)
assert start_method != "fork", "start method 'fork' is not supported yet"
if start_method is None:
multiprocessing.set_start_method("spawn")
def _setup(self) -> None:
self._token.probe()
if not self._token.token:
self._setup_service()
port = self._token.port
self._service.connect(port=port)
def _setup_service(self) -> None:
port = self._service.start()
assert port
self._token.configure(port=port)
self._atexit_setup()
def _atexit_setup(self) -> None:
self._atexit_lambda = lambda: self._atexit_teardown()
self._hooks = ExitHooks()
self._hooks.hook()
atexit.register(self._atexit_lambda)
def _atexit_teardown(self) -> None:
exit_code = self._hooks.exit_code if self._hooks else 0
self._teardown(exit_code)
def _teardown(self, exit_code: int) -> None:
if self._atexit_lambda:
atexit.unregister(self._atexit_lambda)
self._atexit_lambda = None
self._inform_teardown(exit_code)
def _get_service(self) -> "service._Service":
return self._service
def _inform_init(self, settings: "Settings", run_id: str) -> None:
svc = self._service
assert svc
svc._svc_inform_init(settings=settings, run_id=run_id)
def _inform_attach(self, attach_id: str) -> None:
svc = self._service
assert svc
svc._svc_inform_attach(attach_id=attach_id)
def _inform_finish(self, run_id: str = None) -> None:
svc = self._service
assert svc
svc._svc_inform_finish(run_id=run_id)
def _inform_teardown(self, exit_code: int) -> None:
svc = self._service
assert svc
svc._svc_inform_teardown(exit_code)
|
[
"noreply@github.com"
] |
morganmcg1.noreply@github.com
|
bfd99c37854c269ae7937012f17c63e5e0f061cd
|
223590e81400eb8192aeb0a56b36b5a80408d4b4
|
/House Robber III.py
|
584a90ed9a8be182d31caf96c6718832136be16d
|
[] |
no_license
|
TianyaoHua/LeetCodeSolutions
|
c47fd3b6ae0bf60c0656ce12fb88290672c129ed
|
418172cee1bf48bb2aed3b84fe8b4defd9ef4fdf
|
refs/heads/master
| 2020-03-06T19:48:13.338630
| 2018-08-10T18:27:52
| 2018-08-10T18:27:52
| 127,037,907
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 927
|
py
|
# Definition for a binary tree node.
class TreeNode(object):
def __init__(self, x):
self.val = x
self.left = None
self.right = None
class Solution(object):
def value(self, node, dict):
if not node:
return 0
elif node in dict:
return dict[node]
else:
money1 = self.value(node.lert, dict) + self.value(node.right ,dict)
money2 = node.val
if node.left:
money2 += (self.value(node.left.left, dict) + self.value(node.left.right, dict))
if node.right:
money2 += (self.value(node.right.left, dict) + self.value(node.right.right, dict))
money = max(money1, money2)
dict.update({node: money})
return money
def rob(self, root):
"""
:type root: TreeNode
:rtype: int
"""
return self.value(root, {})
|
[
"hua.tianyao@columbia.edu"
] |
hua.tianyao@columbia.edu
|
1226dd2c5a9a51b542246bedd7bd3c1873fdbad6
|
20f951bd927e4e5cde8ef7781813fcf0d51cc3ea
|
/fossir/modules/auth/models/registration_requests.py
|
2b82b271bd15c697c17e87bacc2dcbf1d924edf3
|
[] |
no_license
|
HodardCodeclub/SoftwareDevelopment
|
60a0fbab045cb1802925d4dd5012d5b030c272e0
|
6300f2fae830c0c2c73fe0afd9c684383bce63e5
|
refs/heads/master
| 2021-01-20T00:30:02.800383
| 2018-04-27T09:28:25
| 2018-04-27T09:28:25
| 101,277,325
| 0
| 2
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,160
|
py
|
from __future__ import unicode_literals
from sqlalchemy.dialects.postgresql import ARRAY, JSON
from werkzeug.datastructures import MultiDict
from fossir.core.db import db
from fossir.util.locators import locator_property
from fossir.util.string import format_repr, return_ascii
class RegistrationRequest(db.Model):
__tablename__ = 'registration_requests'
__table_args__ = (
db.CheckConstraint('email = lower(email)', 'lowercase_email'),
{'schema': 'users'}
)
id = db.Column(
db.Integer,
primary_key=True
)
comment = db.Column(
db.Text,
nullable=False,
default=''
)
email = db.Column(
db.String,
unique=True,
nullable=False,
index=True
)
extra_emails = db.Column(
ARRAY(db.String),
nullable=False,
default=[]
)
user_data = db.Column(
JSON,
nullable=False
)
_identity_data = db.Column(
'identity_data',
JSON,
nullable=False
)
settings = db.Column(
JSON,
nullable=False
)
@locator_property
def locator(self):
return {'request_id': self.id}
@property
def identity_data(self):
identity_data = self._identity_data.copy()
# if we have data in identity_data, it was converted from a
# MultiDict so we need to convert it back.
if 'data' in identity_data:
tmp = MultiDict()
tmp.update(self._identity_data['data'])
identity_data['data'] = tmp
return identity_data
@identity_data.setter
def identity_data(self, identity_data):
identity_data = identity_data.copy()
# `identity_data['data']` for multipass-based identities is a
# MultiDict, but json-encoding it would lose all extra values
# for a key, so we convert it to a dict of lists first
if 'data' in identity_data:
identity_data['data'] = dict(identity_data['data'].lists())
self._identity_data = identity_data
@return_ascii
def __repr__(self):
return format_repr(self, 'id', 'email')
|
[
"hodardhazwinayo@gmail.com"
] |
hodardhazwinayo@gmail.com
|
7b571d83f84608ebeeaddbfae06938549a457d9b
|
54d17336ca03801bd9c9ef37be8642b332ab71c4
|
/osm/SO/common/python/rift/mano/yang_translator/rwmano/translate_descriptors.py
|
2023db5a8ce00b6b1b6982b49c0b0047939c92fb
|
[] |
no_license
|
dennis-me/Pishahang
|
2428379c4f7d3ee85df4b85727ce92e8fe69957a
|
cdd0abe80a76d533d08a51c7970d8ded06624b7d
|
refs/heads/master
| 2020-09-07T12:35:54.734782
| 2020-01-24T20:11:33
| 2020-01-24T20:11:33
| 220,782,212
| 2
| 0
| null | 2019-11-10T11:46:44
| 2019-11-10T11:46:43
| null |
UTF-8
|
Python
| false
| false
| 8,136
|
py
|
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
# Copyright 2016 RIFT.io Inc
import importlib
import os
from rift.mano.yang_translator.common.exception import YangClassAttributeError
from rift.mano.yang_translator.common.exception import YangClassImportError
from rift.mano.yang_translator.common.exception import YangModImportError
from rift.mano.yang_translator.common.utils import _
from rift.mano.yang_translator.conf.config import ConfigProvider \
as translatorConfig
from rift.mano.yang_translator.rwmano.syntax.tosca_resource \
import ToscaResource
class TranslateDescriptors(object):
'''Translate YANG NodeTemplates to RIFT.io MANO Resources.'''
YANG_DESC = (NSD, VNFD) = ('nsd', 'vnfd')
###########################
# Module utility Functions
# for dynamic class loading
###########################
YANG_TO_TOSCA_TYPE = None
def _load_classes(log, locations, classes):
'''Dynamically load all the classes from the given locations.'''
for cls_path in locations:
# Use the absolute path of the class path
abs_path = os.path.dirname(os.path.abspath(__file__))
abs_path = abs_path.replace('rift/mano/yang_translator/rwmano',
cls_path)
log.debug(_("Loading classes from %s") % abs_path)
# Grab all the yang type module files in the given path
mod_files = [f for f in os.listdir(abs_path) if (
f.endswith('.py') and
not f.startswith('__init__') and
f.startswith('yang_'))]
# For each module, pick out the target translation class
for f in mod_files:
f_name, ext = f.rsplit('.', 1)
mod_name = cls_path + '/' + f_name
mod_name = mod_name.replace('/', '.')
try:
mod = importlib.import_module(mod_name)
target_name = getattr(mod, 'TARGET_CLASS_NAME')
clazz = getattr(mod, target_name)
classes.append(clazz)
except ImportError:
raise YangModImportError(mod_name=mod_name)
except AttributeError:
if target_name:
raise YangClassImportError(name=target_name,
mod_name=mod_name)
else:
# TARGET_CLASS_NAME is not defined in module.
# Re-raise the exception
raise
def _generate_type_map(log):
'''Generate YANG translation types map.
Load user defined classes from location path specified in conf file.
Base classes are located within the yang directory.
'''
# Base types directory
BASE_PATH = 'rift/mano/yang_translator/rwmano/yang'
# Custom types directory defined in conf file
custom_path = translatorConfig.get_value('DEFAULT',
'custom_types_location')
# First need to load the parent module, for example 'contrib.mano',
# for all of the dynamically loaded classes.
classes = []
TranslateDescriptors._load_classes(log,
(BASE_PATH, custom_path),
classes)
try:
types_map = {clazz.yangtype: clazz for clazz in classes}
log.debug(_("Type maps loaded: {}").format(types_map.keys()))
except AttributeError as e:
raise YangClassAttributeError(message=e.message)
return types_map
def __init__(self, log, yangs, tosca_template, vnfd_files=None):
self.log = log
self.yangs = yangs
self.tosca_template = tosca_template
self.vnfd_files = vnfd_files
# list of all TOSCA resources generated
self.tosca_resources = []
self.metadata = {}
log.debug(_('Mapping between YANG nodetemplate and TOSCA resource.'))
def translate(self):
if TranslateDescriptors.YANG_TO_TOSCA_TYPE is None:
TranslateDescriptors.YANG_TO_TOSCA_TYPE = \
TranslateDescriptors._generate_type_map(self.log)
return self._translate_yang()
def translate_metadata(self):
"""Translate and store the metadata in instance"""
FIELDS_MAP = {
'ID': 'name',
'vendor': 'vendor',
'version': 'version',
}
metadata = {}
# Initialize to default values
metadata['name'] = 'yang_to_tosca'
metadata['vendor'] = 'RIFT.io'
metadata['version'] = '1.0'
if 'nsd' in self.yangs:
yang_meta = self.yang['nsd'][0]
elif 'vnfd' in self.yangs:
yang_meta = self.yang['vnfd'][0]
for key in FIELDS_MAP:
if key in yang_meta.keys():
metadata[key] = str(yang_meta[FIELDS_MAP[key]])
self.log.debug(_("Metadata {0}").format(metadata))
self.metadata = metadata
def _translate_yang(self):
self.log.debug(_('Translating the descriptors.'))
if self.NSD in self.yangs:
for nsd in self.yangs[self.NSD]:
self.log.debug(_("Translate descriptor of type nsd: {}").
format(nsd))
node_name = nsd.pop(ToscaResource.NAME).replace(' ','_')
node_name = node_name if node_name.endswith('nsd') else ''.join([node_name, '_nsd'])
tosca_node = TranslateDescriptors. \
YANG_TO_TOSCA_TYPE[self.NSD](
self.log,
node_name,
self.NSD,
nsd,
self.vnfd_files)
self.tosca_resources.append(tosca_node)
vnfd_name_list = []
if self.VNFD in self.yangs:
for vnfd in self.yangs[self.VNFD]:
if vnfd['name'] not in vnfd_name_list:
self.log.debug(_("Translate descriptor of type vnfd: {}").
format(vnfd))
vnfd_name_list.append(vnfd['name'])
tosca_node = TranslateDescriptors. \
YANG_TO_TOSCA_TYPE[self.VNFD](
self.log,
vnfd.pop(ToscaResource.NAME),
self.VNFD,
vnfd)
self.tosca_resources.append(tosca_node)
# First translate VNFDs
for node in self.tosca_resources:
if node.type == self.VNFD:
self.log.debug(_("Handle yang for {0} of type {1}").
format(node.name, node.type_))
node.handle_yang()
# Now translate NSDs
for node in self.tosca_resources:
if node.type == self.NSD:
self.log.debug(_("Handle yang for {0} of type {1}").
format(node.name, node.type_))
node.handle_yang(self.tosca_resources)
return self.tosca_resources
def find_tosca_resource(self, name):
for resource in self.tosca_resources:
if resource.name == name:
return resource
def _find_yang_node(self, yang_name):
for node in self.nodetemplates:
if node.name == yang_name:
return node
|
[
"github@OrangeOnBlack.de"
] |
github@OrangeOnBlack.de
|
fed38d32e3c3d4c4c31ce116303ad6588a73d350
|
49cd488edb28d0433aaab9686e90ed90d134dd14
|
/tests/test_generator.py
|
c422ffbb35a6f1b2df7ba62d732e99b0d49a368f
|
[
"MIT"
] |
permissive
|
Dmdv/python-fibers
|
349fab65a37475b2fee73bdc53960b1a289227bd
|
20349077843033610864935e45977cf33d16a7e1
|
refs/heads/master
| 2021-01-15T20:53:34.925672
| 2013-08-06T21:19:08
| 2013-08-06T21:19:08
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,434
|
py
|
import sys
sys.path.insert(0, '../')
import unittest
import fibers
from fibers import Fiber
class genlet(Fiber):
def __init__(self, *args, **kwds):
self.args = args
self.kwds = kwds
Fiber.__init__(self, target=self.run)
def run(self):
fn, = self.fn
fn(*self.args, **self.kwds)
def __iter__(self):
return self
def __next__(self):
self.parent = fibers.current()
result = self.switch()
if self.is_alive():
return result
else:
raise StopIteration
# Hack: Python < 2.6 compatibility
next = __next__
def Yield(value):
g = fibers.current()
while not isinstance(g, genlet):
if g is None:
raise RuntimeError('yield outside a genlet')
g = g.parent
g.parent.switch(value)
def generator(func):
class generator(genlet):
fn = (func,)
return generator
# ____________________________________________________________
class GeneratorTests(unittest.TestCase):
def test_generator(self):
seen = []
def g(n):
for i in range(n):
seen.append(i)
Yield(i)
g = generator(g)
for k in range(3):
for j in g(5):
seen.append(j)
self.assertEqual(seen, 3 * [0, 0, 1, 1, 2, 2, 3, 3, 4, 4])
if __name__ == '__main__':
unittest.main(verbosity=2)
|
[
"saghul@gmail.com"
] |
saghul@gmail.com
|
81ade5278aeab0a1197c12c2bde8a62122fad070
|
3f60b999ea7bda83c9586f75f52463dc20337f24
|
/sensitive_user_portrait/cron/attribute/filter_sensitive_uid_text.py
|
d49971916dc61266df2f85bbccec815232885978
|
[] |
no_license
|
jianjian0dandan/sensitive_user_portrait
|
629e49ce71db92b50634bac9c828811cdb5381e9
|
cacc30267ebc0e621b1d48d4f1206277a0f48123
|
refs/heads/master
| 2021-01-20T23:18:07.138057
| 2016-05-22T12:09:40
| 2016-05-22T12:09:40
| 42,869,287
| 0
| 0
| null | 2015-09-21T13:55:12
| 2015-09-21T13:55:11
| null |
UTF-8
|
Python
| false
| false
| 4,249
|
py
|
# -*- coding: utf-8 -*-
import csv
import os
import sys
import time
from elasticsearch import Elasticsearch
from DFA_filter import sensitive_words_extract
reload(sys)
sys.path.append('./../flow1/')
from csv2json import itemLine2Dict, csv2bin
sys.setdefaultencoding('utf-8')
f_file = open('es_error.txt', 'wb')
CSV_FILE_PATH = '/home/ubuntu8/data1309/20130901'
uid_csv_path = './../recommend_in/'
uid_csv = 'sensitive_uid_list.txt'
es = Elasticsearch('219.224.135.93:9206')
count_n = 0
tb = time.time()
uid_set = set()
with open (os.path.join(uid_csv_path, uid_csv), 'rb') as t:
for line in t:
uid = line.strip()
uid_set.add(uid)
count_n += 1
uid_text = file('sensitive_uid_text_1.csv', 'wb')
writer = csv.writer(uid_text)
count = 0
count_f = 0
bulk_action = []
file_list = set(os.listdir(CSV_FILE_PATH))
print "total file is ", len(file_list)
for each in file_list:
with open(os.path.join(CSV_FILE_PATH, each), 'rb') as f:
try:
for line in f:
count_f += 1
weibo_item = itemLine2Dict(line)
if weibo_item:
weibo_item_bin = csv2bin(weibo_item)
if int(weibo_item_bin['sp_type']) != 1:
continue
#if not str(weibo_item_bin['uid']) in uid_set:
# continue
text = weibo_item_bin['text']
message_type = 0
if weibo_item_bin['message_type'] == 1:
write_text = text
message_type = 1
elif weibo_item_bin['message_type'] == 2:
temp = text.split('//@')[0].split(':')[1:]
write_text = ''.join(temp)
message_type = 2
elif weibo_item_bin['message_type'] == 3:
write_text = text
message_type = 3
else:
continue
if not isinstance(write_text, str):
text = text.encode('utf-8', 'ignore')
'''
if text:
sw_dict = sensitive_words_extract(text)
if not sw_dict:
sensitive = 0
else:
seneitive = 1
'''
origin_text = weibo_item_bin['text'].encode('utf-8', 'ignore')
item = [str(weibo_item_bin['uid']), str(weibo_item_bin['mid']), str(weibo_item_bin['send_ip']), str(weibo_item_bin['timestamp']), message_type, str(weibo_item_bin['root_uid']), str(weibo_item_bin['root_mid']), origin_text ]
key_list = ['uid', 'mid', 'ip', 'timestamp', 'message_type','root_uid', 'root_mid', 'text']
item_dict = dict()
for i in range(len(key_list)):
item_dict[key_list[i]] = item[i]
_id = item[1]
action = {'index': {'_id': _id}}
bulk_action.extend([action, item_dict])
count += 1
if count % 1000 == 0:
if bulk_action:
es.bulk(bulk_action, index='weibo_text', doc_type='text', timeout=30)
bulk_action = []
'''
except Exception, r:
time_date = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime())
f_file.write(time_date + '\t' + r + '\n')
'''
print count, count_f
#if write_text != "":
# writer.writerow(item)
# count += 1
if count_f % 10000 == 0:
ts = time.time()
print "%s per %s second" %(count_f, ts-tb)
print "have get %s" % count
tb = ts
except SystemError:
print "system error"
except Exception, r:
print Exception, r
print bulk_action
|
[
"1257819385@qq.com"
] |
1257819385@qq.com
|
ef9249722a55ff00c9ec100a856e360d1281320d
|
5e255ad1360c90478393744586663741a9569c21
|
/linebot/v3/audience/models/create_audience_group_request.py
|
3d855e668830bb2b753b6d12e2288f9444ee979f
|
[
"Apache-2.0"
] |
permissive
|
line/line-bot-sdk-python
|
d76268e8b542060d6eccbacc5dbfab16960ecc35
|
cffd35948238ae24982173e30b1ea1e595bbefd9
|
refs/heads/master
| 2023-08-31T22:12:31.698183
| 2023-08-28T01:10:09
| 2023-08-28T01:10:09
| 70,553,423
| 1,898
| 1,181
|
Apache-2.0
| 2023-09-11T05:14:07
| 2016-10-11T03:42:26
|
Python
|
UTF-8
|
Python
| false
| false
| 3,502
|
py
|
# coding: utf-8
"""
LINE Messaging API
This document describes LINE Messaging API. # noqa: E501
The version of the OpenAPI document: 0.0.1
Generated by OpenAPI Generator (https://openapi-generator.tech)
Do not edit the class manually.
"""
from __future__ import annotations
import pprint
import re # noqa: F401
import json
from typing import List, Optional
from pydantic.v1 import BaseModel, Field, StrictBool, StrictStr, conlist, constr
from linebot.v3.audience.models.audience import Audience
class CreateAudienceGroupRequest(BaseModel):
"""
Create audience for uploading user IDs (by JSON)
https://developers.line.biz/en/reference/messaging-api/#create-upload-audience-group
"""
description: Optional[constr(strict=True, max_length=120)] = Field(None, description="The audience's name. This is case-insensitive, meaning AUDIENCE and audience are considered identical. Max character limit: 120 ")
is_ifa_audience: Optional[StrictBool] = Field(None, alias="isIfaAudience", description="To specify recipients by IFAs: set true. To specify recipients by user IDs: set false or omit isIfaAudience property. ")
upload_description: Optional[StrictStr] = Field(None, alias="uploadDescription", description="The description to register for the job (in jobs[].description). ")
audiences: Optional[conlist(Audience, max_items=10000)] = Field(None, description="An array of user IDs or IFAs. Max number: 10,000 ")
__properties = ["description", "isIfaAudience", "uploadDescription", "audiences"]
class Config:
"""Pydantic configuration"""
allow_population_by_field_name = True
validate_assignment = True
def to_str(self) -> str:
"""Returns the string representation of the model using alias"""
return pprint.pformat(self.dict(by_alias=True))
def to_json(self) -> str:
"""Returns the JSON representation of the model using alias"""
return json.dumps(self.to_dict())
@classmethod
def from_json(cls, json_str: str) -> CreateAudienceGroupRequest:
"""Create an instance of CreateAudienceGroupRequest from a JSON string"""
return cls.from_dict(json.loads(json_str))
def to_dict(self):
"""Returns the dictionary representation of the model using alias"""
_dict = self.dict(by_alias=True,
exclude={
},
exclude_none=True)
# override the default output from pydantic.v1 by calling `to_dict()` of each item in audiences (list)
_items = []
if self.audiences:
for _item in self.audiences:
if _item:
_items.append(_item.to_dict())
_dict['audiences'] = _items
return _dict
@classmethod
def from_dict(cls, obj: dict) -> CreateAudienceGroupRequest:
"""Create an instance of CreateAudienceGroupRequest from a dict"""
if obj is None:
return None
if not isinstance(obj, dict):
return CreateAudienceGroupRequest.parse_obj(obj)
_obj = CreateAudienceGroupRequest.parse_obj({
"description": obj.get("description"),
"is_ifa_audience": obj.get("isIfaAudience"),
"upload_description": obj.get("uploadDescription"),
"audiences": [Audience.from_dict(_item) for _item in obj.get("audiences")] if obj.get("audiences") is not None else None
})
return _obj
|
[
"noreply@github.com"
] |
line.noreply@github.com
|
f8e54ed7de4fa1713441907b2b002188d27537c3
|
d7da288db4fd9fc0bb1c60c5074f290b5f70c8ef
|
/Aulas Python/Conteúdo das Aulas/033/Gabarito/Exercício 1 - Gabarito.py
|
897f4b881fb6433e5d3d0ea8f4c4d834a4d639ac
|
[] |
no_license
|
luizdefranca/Curso-Python-IgnoranciaZero
|
dbf4cf342b3f3efea6fb3b8cf27bf39ed92927e9
|
9fbf2f25e3e6fce1f1582af0bd6bc7dbc5b9f588
|
refs/heads/master
| 2020-04-09T07:17:00.735378
| 2016-09-12T10:51:37
| 2016-09-12T10:51:37
| 67,999,169
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 613
|
py
|
"""
Faça um programa com uma função chamada somaImposto.
A função possui dois parâmetros formais:
1 - taxaImposto, que é a quantia de imposto sobre vendas expressa em
porcentagem
2 - custo, que é o custo de um item antes do imposto.
A função “altera” o valor de custo para incluir o imposto sobre vendas.
"""
def somaImposto(taxaImposto, custo):
return custo*(1 + taxaImposto/100)
custo_normal = float(input("Digite o custo(R$): "))
taxa = float(input("Digite a taxa de imposto(%): "))
print("O custo recalculado com o imposto é de R$%.2f"%somaImposto(custo_normal, taxa))
|
[
"luizramospe@hotmail.com"
] |
luizramospe@hotmail.com
|
9679197a61ccf26610d250d3868a81a8e7401233
|
3e9cdcc8847da5a2ea8391639ad8fd95592475b1
|
/696.py
|
edda7ebd43c2b347e2386e5ca317ea69007a5d58
|
[] |
no_license
|
mindentropy/leetcode
|
ec790ed671a2224411133af127e605438bbbbe52
|
4a24edca5926c0b10d1a4786262dd403b12d1aee
|
refs/heads/master
| 2023-01-27T11:26:07.949478
| 2023-01-25T19:08:18
| 2023-01-25T19:08:18
| 233,759,165
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 810
|
py
|
#!/usr/bin/env python
class Solution(object):
def countBinarySubstrings(self, s):
strcnt = 0
i = 0
while i < len(s) - 1:
j = i + 1
oppcnt = 1
eqflag = True
while j < len(s):
if s[i] == s[j]:
if eqflag == False:
break
oppcnt += 1
else:
oppcnt -= 1
eqflag = False
j += 1
if oppcnt <= 0:
break
if oppcnt == 0:
strcnt += 1
i += 1
return strcnt
class Solution(object):
def countBinarySubstrings(self, s):
group = [1]
for idx in xrange(1, len(s)):
if s[idx - 1] != s[idx]:
group.append(1)
else:
group[-1] += 1
cnt = 0
for idx in xrange(len(group) - 1):
cnt += min(group[idx], group[idx + 1])
return cnt
if __name__ == '__main__':
sol = Solution()
print sol.countBinarySubstrings('00110011')
|
[
"mindentropy@gmail.com"
] |
mindentropy@gmail.com
|
8bf5c9cb87033d334d26c9436c9f04e4b173ba65
|
f576f0ea3725d54bd2551883901b25b863fe6688
|
/sdk/kusto/azure-mgmt-kusto/generated_samples/kusto_managed_private_endpoints_check_name_availability.py
|
3ccfc9a68d42bd47f54b8ba0ce14082f3885382b
|
[
"LicenseRef-scancode-generic-cla",
"MIT",
"LGPL-2.1-or-later"
] |
permissive
|
Azure/azure-sdk-for-python
|
02e3838e53a33d8ba27e9bcc22bd84e790e4ca7c
|
c2ca191e736bb06bfbbbc9493e8325763ba990bb
|
refs/heads/main
| 2023-09-06T09:30:13.135012
| 2023-09-06T01:08:06
| 2023-09-06T01:08:06
| 4,127,088
| 4,046
| 2,755
|
MIT
| 2023-09-14T21:48:49
| 2012-04-24T16:46:12
|
Python
|
UTF-8
|
Python
| false
| false
| 1,746
|
py
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from azure.identity import DefaultAzureCredential
from azure.mgmt.kusto import KustoManagementClient
"""
# PREREQUISITES
pip install azure-identity
pip install azure-mgmt-kusto
# USAGE
python kusto_managed_private_endpoints_check_name_availability.py
Before run the sample, please set the values of the client ID, tenant ID and client secret
of the AAD application as environment variables: AZURE_CLIENT_ID, AZURE_TENANT_ID,
AZURE_CLIENT_SECRET. For more info about how to get the value, please see:
https://docs.microsoft.com/azure/active-directory/develop/howto-create-service-principal-portal
"""
def main():
client = KustoManagementClient(
credential=DefaultAzureCredential(),
subscription_id="12345678-1234-1234-1234-123456789098",
)
response = client.managed_private_endpoints.check_name_availability(
resource_group_name="kustorptest",
cluster_name="kustoCluster",
resource_name={"name": "pme1", "type": "Microsoft.Kusto/clusters/managedPrivateEndpoints"},
)
print(response)
# x-ms-original-file: specification/azure-kusto/resource-manager/Microsoft.Kusto/stable/2023-05-02/examples/KustoManagedPrivateEndpointsCheckNameAvailability.json
if __name__ == "__main__":
main()
|
[
"noreply@github.com"
] |
Azure.noreply@github.com
|
f9149adc1d138f483eb14838fe57cbf12e65eec4
|
5de5ae0adb6fb1e73c2e897fbc13b6abf53c559b
|
/Applications/Equations/knapsack-1.py
|
98dc10ab696f6baaedba79c8b32dbe93669eedb8
|
[] |
no_license
|
Trietptm-on-Coding-Algorithms/Learning-Z3
|
af935450226ee3299e10361f21a567945aa0fd5c
|
c5ef7faca49aa164556b3c7e9ccfb4709027cf74
|
refs/heads/master
| 2020-05-13T18:34:38.105308
| 2017-12-23T11:08:43
| 2017-12-23T11:08:43
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,109
|
py
|
# Solving knapsack problem with Z3
#
# Use:
# python knapsack.py
#
from z3 import *
# from https://www.xkcd.com/287/
fruits, fries, salad, wings, sticks, plate = Ints('fruits fries salad wings sticks plate')
s = Solver()
s.add(fruits>=0, fries>=0, salad>=0, wings>=0, sticks>=0, plate>=0)
s.add(215*fruits + 275*fries + 225*salad + 355*wings + 420*sticks + 580*plate == 1505)
result = []
while s.check() == sat:
m = s.model()
print(m)
result.append(m)
# Create new constraint the blocks the current model
block = []
for el in m:
# el is a declaration
if el.arity() > 0:
raise Z3Exception("uninterpreted function are not supported")
# Create a constant from declaration
obj = el()
if is_array(obj) or obj.sort().kind() == Z3_UNINTERPRETED_SORT:
raise Z3Exception("arrays and uninterpreted sorts are not supported")
block.append(obj != m[el])
s.add(Or(block))
print(len(result))
# https://stackoverflow.com/questions/141779/solving-the-np-complete-proble
|
[
"me@xathrya.id"
] |
me@xathrya.id
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.