blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 3
288
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
112
| license_type
stringclasses 2
values | repo_name
stringlengths 5
115
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 684
values | visit_date
timestamp[us]date 2015-08-06 10:31:46
2023-09-06 10:44:38
| revision_date
timestamp[us]date 1970-01-01 02:38:32
2037-05-03 13:00:00
| committer_date
timestamp[us]date 1970-01-01 02:38:32
2023-09-06 01:08:06
| github_id
int64 4.92k
681M
⌀ | star_events_count
int64 0
209k
| fork_events_count
int64 0
110k
| gha_license_id
stringclasses 22
values | gha_event_created_at
timestamp[us]date 2012-06-04 01:52:49
2023-09-14 21:59:50
⌀ | gha_created_at
timestamp[us]date 2008-05-22 07:58:19
2023-08-21 12:35:19
⌀ | gha_language
stringclasses 147
values | src_encoding
stringclasses 25
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 128
12.7k
| extension
stringclasses 142
values | content
stringlengths 128
8.19k
| authors
listlengths 1
1
| author_id
stringlengths 1
132
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
cc8073a006724d4c3a463c9da8af11bbef0e2d5c
|
19136335b7e88324546fdfed45b4d0b22042202c
|
/rplugin/python3/deoplete/filter/converter_truncate_menu.py
|
90a331d0f918b01825f96575468fc8be3376b89e
|
[
"MIT"
] |
permissive
|
nholik/deoplete.nvim
|
3074fa3cdd5a8a2df5f300d0ac74fedde6555fdf
|
614cd3ddf1f352c977f3405e809d967093571117
|
refs/heads/master
| 2020-05-27T18:05:59.540419
| 2019-05-26T22:26:41
| 2019-05-26T22:26:41
| 188,736,112
| 0
| 0
|
NOASSERTION
| 2019-05-26T22:06:01
| 2019-05-26T22:06:01
| null |
UTF-8
|
Python
| false
| false
| 1,034
|
py
|
# ============================================================================
# FILE: converter_truncate_menu.py
# AUTHOR: Shougo Matsushita <Shougo.Matsu at gmail.com>
# License: MIT license
# ============================================================================
from deoplete.base.filter import Base
from deoplete.util import truncate_skipping
class Filter(Base):
def __init__(self, vim):
super().__init__(vim)
self.name = 'converter_truncate_menu'
self.description = 'truncate menu converter'
def filter(self, context):
max_width = context['max_menu_width']
if not context['candidates'] or 'menu' not in context[
'candidates'][0] or max_width <= 0:
return context['candidates']
footer_width = max_width / 3
for candidate in context['candidates']:
candidate['menu'] = truncate_skipping(
candidate.get('menu', ''),
max_width, '..', footer_width)
return context['candidates']
|
[
"Shougo.Matsu@gmail.com"
] |
Shougo.Matsu@gmail.com
|
2a2d1d8830e835a1494087e94fb849e401876cc4
|
bf21cd0ef7a94fa106ccd9f91a4bbfdcda7f94ed
|
/python-basic/chapter04/ex01_2.py
|
2b0d435813f0cc5b511a07e9e93529dd676c29ef
|
[] |
no_license
|
juneglee/Deep_Learning
|
fdf8cae1b962aaa0ce557cb53f78a22b6d5ae1e8
|
17a448cf6a7c5b61b967dd78af3d328d63378205
|
refs/heads/master
| 2023-07-15T03:02:55.739619
| 2021-08-19T14:04:55
| 2021-08-19T14:04:55
| 273,253,872
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 462
|
py
|
# 리스트 연선자 : 연결(+) , 반복(*), len()
# 리스트 연산자
list_a = [1, 2, 3]
list_b = [4, 5, 6]
print("# 리스트")
print("list_a = ", list_a)
print("list_b = ", list_b)
print()
# 기본 연산자 : 연결(+) , 반복(*)
print("# 리스트 기본 연산자")
print("list_a + list_b =", list_a + list_b)
print("list_a * 3 =", list_a * 3)
print()
# 길이 구하기 : len()
print("# 길이 구하기")
print("len(list_a) = ", len(list_a))
|
[
"klcpop1@gmail.com"
] |
klcpop1@gmail.com
|
59f257e74467edf2e02f1c12f63bef4bc528fd7e
|
085488720112922ff3aed15f99f3c93911425c4a
|
/vesper/signal/tests/test_s3_byte_sequence.py
|
c6f1f484a9b415154c8d517bf998a2ab6d8b4200
|
[
"MIT"
] |
permissive
|
HaroldMills/Vesper
|
0b61d18bc241af22bfc251088fc87d72add6367b
|
ec92fe5231f54336499db189a3bbc6cb08a19e61
|
refs/heads/master
| 2023-07-05T22:45:27.316498
| 2023-07-04T11:58:14
| 2023-07-04T11:58:14
| 19,112,486
| 49
| 6
|
MIT
| 2023-02-14T16:09:19
| 2014-04-24T14:55:34
|
Python
|
UTF-8
|
Python
| false
| false
| 909
|
py
|
import unittest
import warnings
from vesper.signal.tests.byte_sequence_tests import ByteSequenceTests
from vesper.signal.s3_byte_sequence import S3ByteSequence
from vesper.tests.test_case import TestCase
REGION_NAME = 'us-east-2'
BUCKET_NAME = 'vesper-test'
OBJECT_KEY = 'Bytes 00-FF.dat'
OBJECT_LENGTH = 256
# TODO: Look into ResourceWarning issue mentioned below. Is it safe to
# ignore the warnings?
class S3ByteSequenceTests(TestCase, ByteSequenceTests):
@property
def sequence(self):
return S3ByteSequence(REGION_NAME, BUCKET_NAME, OBJECT_KEY)
def setUp(self):
# Without the following, the `S3ByteSequence` unit tests
# output a ResourceWarning about an unclosed transport to the
# console.
warnings.filterwarnings(
action="ignore", message="unclosed", category=ResourceWarning)
if __name__ == '__main__':
unittest.main()
|
[
"harold.mills@gmail.com"
] |
harold.mills@gmail.com
|
102f709bebff12b32c93c321b66bd7327cd6e92b
|
b15d2787a1eeb56dfa700480364337216d2b1eb9
|
/accelbyte_py_sdk/api/matchmaking/models/models_query_mock_by.py
|
8e41cf6eec7f84d441d5c2d4e272a292a791f88e
|
[
"MIT"
] |
permissive
|
AccelByte/accelbyte-python-sdk
|
dedf3b8a592beef5fcf86b4245678ee3277f953d
|
539c617c7e6938892fa49f95585b2a45c97a59e0
|
refs/heads/main
| 2023-08-24T14:38:04.370340
| 2023-08-22T01:08:03
| 2023-08-22T01:08:03
| 410,735,805
| 2
| 1
|
MIT
| 2022-08-02T03:54:11
| 2021-09-27T04:00:10
|
Python
|
UTF-8
|
Python
| false
| false
| 3,879
|
py
|
# Copyright (c) 2021 AccelByte Inc. All Rights Reserved.
# This is licensed software from AccelByte Inc, for limitations
# and restrictions contact your company contract manager.
#
# Code generated. DO NOT EDIT!
# template file: ags_py_codegen
# AccelByte Gaming Services Matchmaking Service (2.25.7)
# pylint: disable=duplicate-code
# pylint: disable=line-too-long
# pylint: disable=missing-function-docstring
# pylint: disable=missing-module-docstring
# pylint: disable=too-many-arguments
# pylint: disable=too-many-branches
# pylint: disable=too-many-instance-attributes
# pylint: disable=too-many-lines
# pylint: disable=too-many-locals
# pylint: disable=too-many-public-methods
# pylint: disable=too-many-return-statements
# pylint: disable=too-many-statements
# pylint: disable=unused-import
from __future__ import annotations
from typing import Any, Dict, List, Optional, Tuple, Union
from ....core import Model
class ModelsQueryMockBy(Model):
"""Models query mock by (models.QueryMockBy)
Properties:
timestamp_after: (timestamp_after) REQUIRED int
"""
# region fields
timestamp_after: int # REQUIRED
# endregion fields
# region with_x methods
def with_timestamp_after(self, value: int) -> ModelsQueryMockBy:
self.timestamp_after = value
return self
# endregion with_x methods
# region to methods
def to_dict(self, include_empty: bool = False) -> dict:
result: dict = {}
if hasattr(self, "timestamp_after"):
result["timestamp_after"] = int(self.timestamp_after)
elif include_empty:
result["timestamp_after"] = 0
return result
# endregion to methods
# region static methods
@classmethod
def create(cls, timestamp_after: int, **kwargs) -> ModelsQueryMockBy:
instance = cls()
instance.timestamp_after = timestamp_after
return instance
@classmethod
def create_from_dict(
cls, dict_: dict, include_empty: bool = False
) -> ModelsQueryMockBy:
instance = cls()
if not dict_:
return instance
if "timestamp_after" in dict_ and dict_["timestamp_after"] is not None:
instance.timestamp_after = int(dict_["timestamp_after"])
elif include_empty:
instance.timestamp_after = 0
return instance
@classmethod
def create_many_from_dict(
cls, dict_: dict, include_empty: bool = False
) -> Dict[str, ModelsQueryMockBy]:
return (
{k: cls.create_from_dict(v, include_empty=include_empty) for k, v in dict_}
if dict_
else {}
)
@classmethod
def create_many_from_list(
cls, list_: list, include_empty: bool = False
) -> List[ModelsQueryMockBy]:
return (
[cls.create_from_dict(i, include_empty=include_empty) for i in list_]
if list_
else []
)
@classmethod
def create_from_any(
cls, any_: any, include_empty: bool = False, many: bool = False
) -> Union[
ModelsQueryMockBy, List[ModelsQueryMockBy], Dict[Any, ModelsQueryMockBy]
]:
if many:
if isinstance(any_, dict):
return cls.create_many_from_dict(any_, include_empty=include_empty)
elif isinstance(any_, list):
return cls.create_many_from_list(any_, include_empty=include_empty)
else:
raise ValueError()
else:
return cls.create_from_dict(any_, include_empty=include_empty)
@staticmethod
def get_field_info() -> Dict[str, str]:
return {
"timestamp_after": "timestamp_after",
}
@staticmethod
def get_required_map() -> Dict[str, bool]:
return {
"timestamp_after": True,
}
# endregion static methods
|
[
"elmernocon@gmail.com"
] |
elmernocon@gmail.com
|
685eea7db453f95d3b09c7e014f28eeee0ba4439
|
a8123a86db99b9365b10ba76dd509d58caa7bc10
|
/python/practice/start_again/2021/05182021/Day18.3_Darw_a_spriograph.py
|
8814fcac3b147a6c0f49245cd49b4fbe21a8a16f
|
[] |
no_license
|
smohapatra1/scripting
|
c0404081da8a10e92e7c7baa8b540acc16540e77
|
3628c9109204ad98231ae8ee92b6bfa6b27e93cd
|
refs/heads/master
| 2023-08-22T20:49:50.156979
| 2023-08-22T20:43:03
| 2023-08-22T20:43:03
| 147,619,016
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 630
|
py
|
#Draw a Spirograph
from turtle import Turtle, Screen
import turtle as t
import random
tim = t.Turtle()
t.colormode(255)
tim.speed("fastest")
#Random Color
def random_color():
r = random.randint(0,255)
g = random.randint(0,255)
b = random.randint(0,255)
color = (r, g, b)
return color
def draw_spirograph(size_of_gap):
for _ in range(int(360/size_of_gap)):
tim.color(random_color())
tim.circle(100)
#current_heading = tim.heading()
# To change the direction
tim.setheading(tim.heading() + size_of_gap )
draw_spirograph(10)
screen = t.Screen()
screen.exitonclick()
|
[
"samarendra.mohapatra121@gmail.com"
] |
samarendra.mohapatra121@gmail.com
|
aaf7c07df0a3a79d0aa83017aa4a3142f7911d98
|
dec5c1416279178c23e81794789ed27e7e806faf
|
/profiles_api/models.py
|
921345fafbd8fe1b8cb4afa2e7952b8838987617
|
[
"MIT"
] |
permissive
|
amitarvindpatil/profiles-rest-api
|
44c7555888e654a2a64362d21834f5a67aeab07a
|
c2092bdc13c77e2f1f3cd4940740f752cc2b180f
|
refs/heads/master
| 2022-09-15T06:53:40.777169
| 2020-05-31T09:01:43
| 2020-05-31T09:01:43
| 260,257,045
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,032
|
py
|
from django.db import models
from django.contrib.auth.models import AbstractBaseUser
from django.contrib.auth.models import PermissionsMixin
from django.contrib.auth.models import BaseUserManager
from django.conf import settings
# Create your models here.
class UserProfileManager(BaseUserManager):
"""Manager For UserProfile"""
def create_user(self,email,name,password=None):
"""Create New User Prfile"""
if not email:
raise ValueError('User Must have an email address')
email = self.normalize_email(email)
user = self.model(email=email,name=name)
user.set_password(password)
user.save(using=self._db)
return user
def create_superuser(self,email,name,password):
"""create and save new superuser with given details"""
user = self.create_user(email,name,password)
user.is_superuser = True
user.is_staff = True
user.save(using=self._db)
return user
class UserProfile(AbstractBaseUser,PermissionsMixin):
""" DataBase model for user in a system """
email = models.EmailField(max_length=255,unique=True)
name = models.CharField(max_length=255)
is_active = models.BooleanField(default=True)
is_staff = models.BooleanField(default=False)
objects = UserProfileManager()
USERNAME_FIELD = 'email'
REQUIRED_FIELDS = ['name']
def get_full_name(self):
""" Retrive Full Name of User"""
return self.name
def get_short_name(self):
""" Retrive Short Name of user """
return self.name
def __str__(self):
""" Retrive String representation of user"""
return self.email
class ProfileFeedItem(models.Model):
"""Profile Status Update"""
user_profile = models.ForeignKey(
settings.AUTH_USER_MODEL,
on_delete=models.CASCADE
)
status_text = models.CharField(max_length=255)
created_on = models.DateTimeField(auto_now_add=True)
def __str__(self):
return self.status_text
|
[
"amitpatil04041993@gmail.com"
] |
amitpatil04041993@gmail.com
|
792b61efe2adbe81bfa8e2d488a1dbf4bd884444
|
f9d564f1aa83eca45872dab7fbaa26dd48210d08
|
/huaweicloud-sdk-das/huaweicloudsdkdas/v3/model/export_top_sql_templates_details_response.py
|
1286a8c3db19b4ec0f54ea95567708a585fd8a62
|
[
"Apache-2.0"
] |
permissive
|
huaweicloud/huaweicloud-sdk-python-v3
|
cde6d849ce5b1de05ac5ebfd6153f27803837d84
|
f69344c1dadb79067746ddf9bfde4bddc18d5ecf
|
refs/heads/master
| 2023-09-01T19:29:43.013318
| 2023-08-31T08:28:59
| 2023-08-31T08:28:59
| 262,207,814
| 103
| 44
|
NOASSERTION
| 2023-06-22T14:50:48
| 2020-05-08T02:28:43
|
Python
|
UTF-8
|
Python
| false
| false
| 4,619
|
py
|
# coding: utf-8
import six
from huaweicloudsdkcore.sdk_response import SdkResponse
from huaweicloudsdkcore.utils.http_utils import sanitize_for_serialization
class ExportTopSqlTemplatesDetailsResponse(SdkResponse):
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
sensitive_list = []
openapi_types = {
'top_sql_templates': 'list[TopSqlTemplate]',
'total_count': 'int'
}
attribute_map = {
'top_sql_templates': 'top_sql_templates',
'total_count': 'total_count'
}
def __init__(self, top_sql_templates=None, total_count=None):
"""ExportTopSqlTemplatesDetailsResponse
The model defined in huaweicloud sdk
:param top_sql_templates: SQL模板列表。
:type top_sql_templates: list[:class:`huaweicloudsdkdas.v3.TopSqlTemplate`]
:param total_count: SQL模板总数。
:type total_count: int
"""
super(ExportTopSqlTemplatesDetailsResponse, self).__init__()
self._top_sql_templates = None
self._total_count = None
self.discriminator = None
if top_sql_templates is not None:
self.top_sql_templates = top_sql_templates
if total_count is not None:
self.total_count = total_count
@property
def top_sql_templates(self):
"""Gets the top_sql_templates of this ExportTopSqlTemplatesDetailsResponse.
SQL模板列表。
:return: The top_sql_templates of this ExportTopSqlTemplatesDetailsResponse.
:rtype: list[:class:`huaweicloudsdkdas.v3.TopSqlTemplate`]
"""
return self._top_sql_templates
@top_sql_templates.setter
def top_sql_templates(self, top_sql_templates):
"""Sets the top_sql_templates of this ExportTopSqlTemplatesDetailsResponse.
SQL模板列表。
:param top_sql_templates: The top_sql_templates of this ExportTopSqlTemplatesDetailsResponse.
:type top_sql_templates: list[:class:`huaweicloudsdkdas.v3.TopSqlTemplate`]
"""
self._top_sql_templates = top_sql_templates
@property
def total_count(self):
"""Gets the total_count of this ExportTopSqlTemplatesDetailsResponse.
SQL模板总数。
:return: The total_count of this ExportTopSqlTemplatesDetailsResponse.
:rtype: int
"""
return self._total_count
@total_count.setter
def total_count(self, total_count):
"""Sets the total_count of this ExportTopSqlTemplatesDetailsResponse.
SQL模板总数。
:param total_count: The total_count of this ExportTopSqlTemplatesDetailsResponse.
:type total_count: int
"""
self._total_count = total_count
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
if attr in self.sensitive_list:
result[attr] = "****"
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
import simplejson as json
if six.PY2:
import sys
reload(sys)
sys.setdefaultencoding("utf-8")
return json.dumps(sanitize_for_serialization(self), ensure_ascii=False)
def __repr__(self):
"""For `print`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, ExportTopSqlTemplatesDetailsResponse):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
|
[
"hwcloudsdk@huawei.com"
] |
hwcloudsdk@huawei.com
|
d5af0b0faa18fdfc639b31b41dfbdb93a890659b
|
085a6c4ac532bd4f46980f340890659b0cd03824
|
/two_sigma_problems/problem_9.py
|
f2c2e10d39d7f5185f1a978013c9b743178ba7e5
|
[
"MIT"
] |
permissive
|
thinhnguyennt7/Daily-Coding-Problem
|
c66aa51422dc79ee912fbd042fefb2b2cf37a94f
|
16d42e33af1de08aac1d888be518e398b4674bc8
|
refs/heads/master
| 2021-04-04T02:10:52.800504
| 2020-03-18T17:29:44
| 2020-03-18T17:30:01
| 248,416,248
| 1
| 1
|
MIT
| 2020-03-19T05:13:37
| 2020-03-19T05:13:36
| null |
UTF-8
|
Python
| false
| false
| 223
|
py
|
"""This problem was asked by Two Sigma.
Using a function rand5() that returns an integer from 1 to 5 (inclusive) with
uniform probability, implement a function rand7() that returns an integer
from 1 to 7 (inclusive).
"""
|
[
"mxcsyounes@gmail.com"
] |
mxcsyounes@gmail.com
|
d994f4b20a182b9c9b4b26dea314bed2f83d5097
|
da52951c32b37aa75765b718707ce08c0a6208d1
|
/ReinforcementLearning/PolicyGradient/PPO/tf2/main.py
|
b3a0d38e4986d6a9da18c87322ee6faa32643f1d
|
[] |
no_license
|
philtabor/Youtube-Code-Repository
|
08c1a0210f80976df50b01a91f1936a7d5c7b302
|
eb3aa9733158a4f7c4ba1fefaa812b27ffd889b6
|
refs/heads/master
| 2023-08-08T05:28:11.712470
| 2023-03-27T16:07:29
| 2023-03-27T16:07:29
| 144,081,173
| 811
| 568
| null | 2023-07-24T20:00:37
| 2018-08-09T00:21:29
|
Python
|
UTF-8
|
Python
| false
| false
| 1,575
|
py
|
import gym
import numpy as np
from agent import Agent
from utils import plot_learning_curve
if __name__ == '__main__':
env = gym.make('CartPole-v0')
N = 20
batch_size = 5
n_epochs = 4
alpha = 0.0003
agent = Agent(n_actions=env.action_space.n, batch_size=batch_size,
alpha=alpha, n_epochs=n_epochs,
input_dims=env.observation_space.shape)
n_games = 300
figure_file = 'plots/cartpole.png'
best_score = env.reward_range[0]
score_history = []
learn_iters = 0
avg_score = 0
n_steps = 0
for i in range(n_games):
observation = env.reset()
done = False
score = 0
while not done:
action, prob, val = agent.choose_action(observation)
observation_, reward, done, info = env.step(action)
n_steps += 1
score += reward
agent.store_transition(observation, action,
prob, val, reward, done)
if n_steps % N == 0:
agent.learn()
learn_iters += 1
observation = observation_
score_history.append(score)
avg_score = np.mean(score_history[-100:])
if avg_score > best_score:
best_score = avg_score
agent.save_models()
print('episode', i, 'score %.1f' % score, 'avg score %.1f' % avg_score,
'time_steps', n_steps, 'learning_steps', learn_iters)
x = [i+1 for i in range(len(score_history))]
plot_learning_curve(x, score_history, figure_file)
|
[
"ptabor@gmail.com"
] |
ptabor@gmail.com
|
351cca2054fb8641c34017b3bc190680a699b824
|
4b44a299bafbd4ca408ce1c89c9fe4a449632783
|
/python3/10_Modules/Parallel_Processing/a_get_cpu_count.py
|
0a0464db866ec3a6c8aa2be9e3d728d2be413a38
|
[] |
no_license
|
umunusb1/PythonMaterial
|
ecd33d32b2de664eaaae5192be7c3f6d6bef1d67
|
1e0785c55ccb8f5b9df1978e1773365a29479ce0
|
refs/heads/master
| 2023-01-23T23:39:35.797800
| 2020-12-02T19:29:00
| 2020-12-02T19:29:00
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 192
|
py
|
import multiprocessing as mp
result = '''There are {} processors, in number, in this \
computer'''.format(mp.cpu_count())
print(result)
print(dir(mp))
print(mp.current_process())
|
[
"uday3prakash@gmail.com"
] |
uday3prakash@gmail.com
|
08aea1c4cf86277a51c4d590dbf843a9e116acea
|
3ccd609f68016aad24829b8dd3cdbb535fb0ff6d
|
/python/bpy/types/FILEBROWSER_UL_dir.py
|
d242f98ab9b1a289208ea3db9e875d5ed1fb5d58
|
[] |
no_license
|
katharostech/blender_externs
|
79b2eed064fd927e3555aced3e2eb8a45840508e
|
fdf7f019a460de0fe7e62375c1c94f7ab0e9f68d
|
refs/heads/master
| 2020-04-11T14:00:29.393478
| 2018-10-01T00:40:51
| 2018-10-01T00:40:51
| 161,838,212
| 1
| 1
| null | 2018-12-14T20:41:32
| 2018-12-14T20:41:32
| null |
UTF-8
|
Python
| false
| false
| 140
|
py
|
class FILEBROWSER_UL_dir:
def draw_item(self, context, layout, data, item, icon, active_data, active_propname, index):
pass
|
[
"troyedwardsjr@gmail.com"
] |
troyedwardsjr@gmail.com
|
cb3c52836c92de725f4b0b5bc037f530ce63d13a
|
656b431bf7ac23d5593ddf4fb69c29c251d744cb
|
/zen/layer/base/node.py
|
91c5f8a19f460b42f4d3cf942d8f853c60c39140
|
[] |
no_license
|
knighton/zen-0.14
|
2c8e4f0aa2e6c862d4022eb346a619268250273e
|
7936e43a115d00888bf6c523525bf9f3e7a49256
|
refs/heads/master
| 2021-01-21T05:33:01.494392
| 2018-05-17T15:01:30
| 2018-05-17T15:01:30
| 101,927,115
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,619
|
py
|
from copy import deepcopy
from ..arch.vee import Vee
class Node(Vee):
"""
A node of a neural network.
They consist of input and non-input nodes (Inputs and LayerNodes).
"""
def __init__(self):
self._out_shape = None
self._out_dtype = None
self._out_data = None
self._out_nodes = []
def out_shape(self):
"""
-> shape (must be built)
"""
return self._out_shape
def out_dtype(self):
"""
-> dtype (must be built)
"""
return self._out_dtype
def out_data(self):
"""
-> data (must be forward()'ed)
"""
return self._out_data
def add_out_node(self, node):
"""
node ->
"""
self._out_nodes.append(node)
def out_nodes(self):
"""
-> node
"""
return self._out_nodes
def try_to_build(self):
raise NotImplementedError
def is_built(self):
raise NotImplementedError
def params(self):
raise NotImplementedError
class InteriorNode(Node):
"""
A non-input node (the normal case).
"""
def __init__(self):
super().__init__()
self._in_nodes = None
self._num_ready_in_nodes = 0
def _gather_shapes_dtypes_for_build(self):
assert self._in_nodes, 'Tried to build an internal node with no inputs.'
in_shapes = []
in_dtypes = []
for node in self._in_nodes:
shape = node.out_shape()
if shape is None:
return False, None, None
in_shapes.append(shape)
dtype = node.out_dtype()
if dtype is None:
return False, None, None
in_dtypes.append(dtype)
return True, in_shapes, in_dtypes
def in_nodes(self):
return self._in_nodes
def to_spec_or_specs(self):
raise NotImplementedError
class LayerNode(InteriorNode):
"""
Neural network node wrapping a single layer.
"""
def __init__(self, spec, in_nodes=None):
super().__init__()
if in_nodes:
for node in in_nodes:
node.add_out_node(self)
self._in_nodes = in_nodes
self._spec = spec
self._layer = None
def __call__(self, *in_nodes):
"""
Return a copy of ourself that is connected to the given feed nodes.
This is how graphs are constructed.
"""
assert not self._in_nodes
return LayerNode(deepcopy(self._spec), in_nodes)
def try_to_build(self):
"""
Try to construct the internal layer of a node given the shapes and
dtypes of its input nodes. Tries to build its output nodes.
Returns true if this node could be built (output nodes will fail if not
all inputs are built yet during graph building).
"""
can_build, in_shapes, in_dtypes = self._gather_shapes_dtypes_for_build()
if not can_build:
return False
self._layer, self._out_shape, self._out_dtype = \
self._spec.build_multi_input(in_shapes, in_dtypes)
for node in self._out_nodes:
node.try_to_build()
return True
def is_built(self):
return self._layer is not None
def params(self):
"""
Build the node if not built, then collect the node's trainable
parameters for the optimizer.
"""
assert self._layer, \
'Not all input nodes have been built (the graph is missing an ' + \
'input or inputs).'
return self._layer.params()
def in_node_is_ready(self, is_training):
"""
Receive notification that one of our input nodes has data. If they all
do, perform a forward pass and notify the nodes that we feed into.
"""
assert self._in_nodes, \
'Called in_node_is_ready() on a node with no inputs.'
assert self._layer, \
'Not all input nodes have been built (the graph is missing an ' + \
'input or inputs).'
self._num_ready_in_nodes += 1
if self._num_ready_in_nodes < len(self._in_nodes):
return
xx = []
for node in self._in_nodes:
x = node.out_data()
assert x is not None
xx.append(x)
self._out_data = self._layer.forward_multi_input(xx, is_training)
for node in self._out_nodes:
node.in_node_is_ready(is_training)
self._num_ready_in_nodes = 0
def to_spec_or_specs(self):
return self._spec
|
[
"iamknighton@gmail.com"
] |
iamknighton@gmail.com
|
3e6a874a64e7d69cc870d2a47199ffe654c59f9b
|
904e75e2ceff81c18a432fe1b951b683e859cbed
|
/views/console/voucher.py
|
809b9bed03144310d4c914e793626e5bbf9acd22
|
[] |
no_license
|
PUYUP/plutoborn
|
a42c65fa360de41a1236af00b5718948dc1b9940
|
e6b47b7f183fcff60fa803329e11c2e87de560ef
|
refs/heads/master
| 2022-12-05T17:06:10.049472
| 2020-08-19T09:12:45
| 2020-08-19T09:12:45
| 254,116,939
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,675
|
py
|
from django.conf import settings
from django.views import View
from django.shortcuts import render
from django.contrib.auth.decorators import login_required
from django.utils.decorators import method_decorator
from django.core.paginator import Paginator, EmptyPage, PageNotAnInteger
from django.db.models.functions import Coalesce
from django.db.models import Q, F, Sum, Count, Case, When, Value, Subquery, OuterRef, IntegerField
from utils.pagination import Pagination
from utils.generals import get_model
Voucher = get_model('market', 'Voucher')
@method_decorator(login_required, name='dispatch')
class VoucherListView(View):
template_name = 'console/voucher/list.html'
context = dict()
def get(self, request):
vouchers = Voucher.objects \
.annotate(
total_redeem=Coalesce(Count('voucher_redeems'), 0)
).order_by('-total_redeem')
# paginator
page_num = int(self.request.GET.get('p', 0))
paginator = Paginator(vouchers, settings.PAGINATION_PER_PAGE)
try:
vouchers_pagination = paginator.page(page_num + 1)
except PageNotAnInteger:
vouchers_pagination = paginator.page(1)
except EmptyPage:
vouchers_pagination = paginator.page(paginator.num_pages)
pagination = Pagination(request, vouchers, vouchers_pagination, page_num, paginator)
self.context['vouchers'] = vouchers
self.context['vouchers_total'] = vouchers.count()
self.context['vouchers_pagination'] = vouchers_pagination
self.context['pagination'] = pagination
return render(request, self.template_name, self.context)
|
[
"hellopuyup@gmail.com"
] |
hellopuyup@gmail.com
|
1d6c708e713687a606bcec30490c9970a32b2031
|
de24f83a5e3768a2638ebcf13cbe717e75740168
|
/moodledata/vpl_data/94/usersdata/203/55258/submittedfiles/mediaLista.py
|
9f8eabcc98947ef4aefb6758c5a6a0a6eab90482
|
[] |
no_license
|
rafaelperazzo/programacao-web
|
95643423a35c44613b0f64bed05bd34780fe2436
|
170dd5440afb9ee68a973f3de13a99aa4c735d79
|
refs/heads/master
| 2021-01-12T14:06:25.773146
| 2017-12-22T16:05:45
| 2017-12-22T16:05:45
| 69,566,344
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 281
|
py
|
# -*- coding: utf-8 -*-
n=int(input('tamanho da lista: '))
l=[]
soma=0
for i in range (1,n+1,1):
l.append(input('elemento da lista: '))
for i in range (0,n-1,1):
soma=soma+l[i]
media=soma/n
print ('%.2f' %l[0])
print ('%.2f' %l[n-1])
print ('%.2f' %media)
print ('%.2f' %l)
|
[
"rafael.mota@ufca.edu.br"
] |
rafael.mota@ufca.edu.br
|
f16c623f2284f4fcc342ceffbc101ff396686148
|
59b3dce3c770e70b2406cc1dd623a2b1f68b8394
|
/python_1/lessons/calculations.py
|
9fc441721ed85e47fac26d241c4db2cfd87301c8
|
[] |
no_license
|
patrickbeeson/python-classes
|
04ed7b54fc4e1152a191eeb35d42adc214b08e39
|
b5041e71badd1ca2c013828e3b2910fb02e9728f
|
refs/heads/master
| 2020-05-20T07:17:36.693960
| 2015-01-23T14:41:46
| 2015-01-23T14:41:46
| 29,736,517
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 272
|
py
|
print("""---------------
Some Calculations
---------------""")
print(314159e-5)
print(10**6, 1j**2)
print(3 + 2 * 4, 1 / 3)
print("-" * 20)
print((3.14159 * 16) ** 2)
print(3.14159 * 16 ** 2)
print(20 * "-")
print("------------------\nEnd of Calculations\n--------------")
|
[
"patrickbeeson@gmail.com"
] |
patrickbeeson@gmail.com
|
982f09e06da9b91e11bebb4ecf8d383bc704f702
|
a5cf1d2fc478d490df05eb198d1a0fb77fcb0bc9
|
/flask_oauthlib/contrib/client/__init__.py
|
4b777b4430dcbd3daf2326ea063c4e02dca552ae
|
[
"BSD-3-Clause"
] |
permissive
|
ageis/flask-oauthlib
|
516df1a661441cc46c26ab5e9b07fa328066a5f4
|
9414e002505354e8b5b3aa5f54a0889c836aa732
|
refs/heads/master
| 2021-01-05T05:11:59.090723
| 2020-04-19T07:20:23
| 2020-04-19T07:20:23
| 240,891,932
| 1
| 0
|
BSD-3-Clause
| 2020-04-19T07:20:24
| 2020-02-16T12:58:27
| null |
UTF-8
|
Python
| false
| false
| 3,277
|
py
|
import copy
from flask import current_app
from werkzeug.local import LocalProxy
from .application import OAuth1Application, OAuth2Application
__all__ = ['OAuth', 'OAuth1Application', 'OAuth2Application']
class OAuth(object):
"""The extension to integrate OAuth 1.0a/2.0 to Flask applications.
oauth = OAuth(app)
or::
oauth = OAuth()
oauth.init_app(app)
"""
state_key = 'oauthlib.contrib.client'
def __init__(self, app=None):
self.remote_apps = {}
if app is not None:
self.init_app(app)
def init_app(self, app):
app.extensions = getattr(app, 'extensions', {})
app.extensions[self.state_key] = OAuthState()
def add_remote_app(self, remote_app, name=None, **kwargs):
"""Adds remote application and applies custom attributes on it.
If the application instance's name is different from the argument
provided name, or the keyword arguments is not empty, then the
application instance will not be modified but be copied as a
prototype.
:param remote_app: the remote application instance.
:type remote_app: the subclasses of :class:`BaseApplication`
:params kwargs: the overriding attributes for the application instance.
"""
if name is None:
name = remote_app.name
if name != remote_app.name or kwargs:
remote_app = copy.copy(remote_app)
remote_app.name = name
vars(remote_app).update(kwargs)
if not hasattr(remote_app, 'clients'):
remote_app.clients = cached_clients
self.remote_apps[name] = remote_app
return remote_app
def remote_app(self, name, version=None, **kwargs):
"""Creates and adds new remote application.
:param name: the remote application's name.
:param version: '1' or '2', the version code of OAuth protocol.
:param kwargs: the attributes of remote application.
"""
if version is None:
if 'request_token_url' in kwargs:
version = '1'
else:
version = '2'
if version == '1':
remote_app = OAuth1Application(name, clients=cached_clients)
elif version == '2':
remote_app = OAuth2Application(name, clients=cached_clients)
else:
raise ValueError('unkonwn version %r' % version)
return self.add_remote_app(remote_app, **kwargs)
def __getitem__(self, name):
return self.remote_apps[name]
def __getattr__(self, key):
try:
return object.__getattribute__(self, key)
except AttributeError:
app = self.remote_apps.get(key)
if app:
return app
raise AttributeError('No such app: %s' % key)
class OAuthState(object):
def __init__(self):
self.cached_clients = {}
def get_cached_clients():
"""Gets the cached clients dictionary in current context."""
if OAuth.state_key not in current_app.extensions:
raise RuntimeError('%r is not initialized.' % current_app)
state = current_app.extensions[OAuth.state_key]
return state.cached_clients
cached_clients = LocalProxy(get_cached_clients)
|
[
"me@lepture.com"
] |
me@lepture.com
|
8113e61753b63a1adf848618b5af0bff3890f601
|
eecbf2f570b46e5a890847288144f2df8097d988
|
/awlsim/core/instructions/insn_zr.py
|
f279e701766a8124bf9e436cb8dc38b157639018
|
[] |
no_license
|
ITI/PLCNet
|
8ebb34dc57862abfc3a635fb3cee197601cade71
|
7f2c1a9d3a8a0ca8d8ab9a8027c65bc0ff0db64c
|
refs/heads/master
| 2020-06-10T00:19:14.916423
| 2016-10-01T06:53:38
| 2016-10-01T06:53:38
| 193,533,866
| 2
| 0
| null | 2019-06-24T15:42:51
| 2019-06-24T15:42:50
| null |
UTF-8
|
Python
| false
| false
| 1,490
|
py
|
# -*- coding: utf-8 -*-
#
# AWL simulator - instructions
#
# Copyright 2012-2014 Michael Buesch <m@bues.ch>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
from __future__ import division, absolute_import, print_function, unicode_literals
from awlsim.common.compat import *
from awlsim.core.instructions.main import * #@nocy
from awlsim.core.operators import *
#from awlsim.core.instructions.main cimport * #@cy
class AwlInsn_ZR(AwlInsn): #+cdef
__slots__ = ()
def __init__(self, cpu, rawInsn):
AwlInsn.__init__(self, cpu, AwlInsn.TYPE_ZR, rawInsn)
self.assertOpCount(1)
def staticSanityChecks(self):
self.ops[0].assertType(AwlOperator.MEM_Z)
def run(self):
#@cy cdef S7StatusWord s
s = self.cpu.statusWord
self.cpu.getCounter(self.ops[0].resolve(True).value.byteOffset).run_ZR(s.VKE)
s.OR, s.NER = 0, 0
|
[
"vig2208@gmail.com"
] |
vig2208@gmail.com
|
f45bd2b725edf19a4c9f528650707dc5900d8683
|
83959c80527cd727042bc3467b6e537fca8bef1a
|
/kbengine_stone_assets/scripts/common/tornado/platform/windows.py
|
b1d701de4fcc5ac181dde0a8d77764622db74e77
|
[] |
no_license
|
shanlihou/ttxiaoyouxi
|
696697807cbf9d1fe41fb10fe64f8f29d5bd8864
|
bca20863c4e1b5d6f3f835fee17c700292918a6c
|
refs/heads/master
| 2020-04-26T13:12:13.153761
| 2019-03-03T12:36:04
| 2019-03-03T12:36:04
| 173,572,763
| 1
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 727
|
py
|
# NOTE: win32 support is currently experimental, and not recommended
# for production use.
#from __future__ import absolute_import, division, print_function, with_statement
#import ctypes # type: ignore
#import ctypes.wintypes # type: ignore
# See: http://msdn.microsoft.com/en-us/library/ms724935(VS.85).aspx
#SetHandleInformation = ctypes.windll.kernel32.SetHandleInformation
#SetHandleInformation.argtypes = (ctypes.wintypes.HANDLE, ctypes.wintypes.DWORD, ctypes.wintypes.DWORD)
#SetHandleInformation.restype = ctypes.wintypes.BOOL
#HANDLE_FLAG_INHERIT = 0x00000001
def set_close_exec(fd):
# success = SetHandleInformation(fd, HANDLE_FLAG_INHERIT, 0)
# if not success:
# raise ctypes.WinError()
pass
|
[
"shanlihou@gmail.com"
] |
shanlihou@gmail.com
|
d50db3f0895bfcfe6b6a9eb5f62c99302983871e
|
a29c6e83ae4f9010941d15c8fd4cfc67680bb054
|
/pandas/pandas_sample.py
|
11ec8f3f3066928a773948c21c7d305883c6a906
|
[] |
no_license
|
ym0179/bit_seoul
|
f1ff5faf4ae20fbc8c0e2ed10a005f8bd4b2c2b8
|
14d1fb2752312790c39898fc53a45c1cf427a4d1
|
refs/heads/master
| 2023-02-27T19:52:23.577540
| 2021-02-08T00:30:16
| 2021-02-08T00:30:16
| 311,265,926
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,632
|
py
|
#Day9
#2020-11-19
import pandas as pd
import numpy as np
from numpy.random import randn
np.random.seed(100)
data = randn(5,4) #5행 4열
print(data)
df = pd.DataFrame(data, index='A B C D E'.split(),
columns='가 나 다 라'.split())
print(df)
data2 = [[1,2,3,4,], [5,6,7,8], [9,10,11,12],
[13,14,15,16], [17,18,19,20]] #list
df2 = pd.DataFrame(data2, index=['A','B','C','D','E'],
columns=['가','나','다','라'])
print(df2)
# 가 나 다 라
# A 1 2 3 4
# B 5 6 7 8
# C 9 10 11 12
# D 13 14 15 16
# E 17 18 19 20
df3 = pd.DataFrame(np.array([[1,2,3],[4,5,6]]))
print(df3)
print("df2['나'] :\n",df2['나']) #2,6,10,14,18
print("df2['나','라'] :\n",df2[['나','라']]) #2,6,10,14,18
#4,8,12,16,20
# print("df2[0] : ", df2[0]) #에러, 컬럼명으로 해줘야 에러 안남
# print("df2.loc['나'] : \n", df2.loc['나']) #에러, loc 행에서만 사용 가능 (행과 함께 사용)
print("df2.iloc[:,2] : \n", df2.iloc[:, 2]) #3,7,11,15,19
# print("df2[:,2] : \n", df2[:, 2]) #에러
#행
print("df2.loc['A'] : \n", df2.loc['A']) #A행 출력
print("df2.loc['A','C'] : \n", df2.loc[['A','C']]) #A, C행 출력
print("df2.iloc[0] : \n", df2.iloc[0]) #A행 출력
print("df2.iloc[0,1] : \n", df2.iloc[[0,2]]) #A, C행 출력
#행렬
print("df2.loc[['A','B'], ['나','다']] : \n",df2.loc[['A','B'], ['나','다']])
#한개의 값만 확인
print("df2.loc['E','다'] : \n",df2.loc['E','다']) #19
print("df2.iloc[4,2] : \n",df2.iloc[4,2]) #19
print("df2.iloc[4][2] : \n",df2.iloc[4][2]) #19
|
[
"ym4766@gmail.com"
] |
ym4766@gmail.com
|
bf56ed2037a8d92ae1cd83b1ca14a15536c85df2
|
163bbb4e0920dedd5941e3edfb2d8706ba75627d
|
/Code/CodeRecords/2385/60677/251112.py
|
a199b651fd4db5634ac382da13966aee61e6f9bc
|
[] |
no_license
|
AdamZhouSE/pythonHomework
|
a25c120b03a158d60aaa9fdc5fb203b1bb377a19
|
ffc5606817a666aa6241cfab27364326f5c066ff
|
refs/heads/master
| 2022-11-24T08:05:22.122011
| 2020-07-28T16:21:24
| 2020-07-28T16:21:24
| 259,576,640
| 2
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 199
|
py
|
times=int(input())
for i in range(times):
n=int(input())
k=1
answer=1
while n-2*k+2>0:
answer+=n-2*k+2
k+=1
if n==4:
answer=8
print((answer)%(10**9+7))
|
[
"1069583789@qq.com"
] |
1069583789@qq.com
|
a4c56e977fcf8aa0aa8b1d5700eac711f0e99616
|
e1ffebca6a0f185663c779462e3ca27866f557b8
|
/GROUP_project/project/api/migrations/0002_auto_20191204_0429.py
|
9dcf91bc2afb5abac10e0bf7a31e18ff8156c88e
|
[] |
no_license
|
asselyer/Backend2019
|
d8d85d7850261880fe4aeef9092b0a8c7b1b6767
|
ec5931e2bd22ec62e68592a4199c00184f4dacc3
|
refs/heads/master
| 2020-07-24T13:38:21.246351
| 2019-12-04T03:16:27
| 2019-12-04T03:16:27
| 207,944,768
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,680
|
py
|
# Generated by Django 2.2.3 on 2019-12-03 22:29
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('api', '0001_initial'),
]
operations = [
migrations.AddField(
model_name='postfile',
name='creator',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL),
),
migrations.AddField(
model_name='postcomment',
name='creator',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL),
),
migrations.AddField(
model_name='favoritepost',
name='users',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL),
),
migrations.AddField(
model_name='blog',
name='category',
field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, related_name='blogs', to='api.BlogCategory'),
),
migrations.AddField(
model_name='blog',
name='creator',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='created_blogs', to=settings.AUTH_USER_MODEL),
),
migrations.AddField(
model_name='postfile',
name='posts',
field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, related_name='post_documents', to='api.Post'),
),
migrations.AddField(
model_name='postcomment',
name='posts',
field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, related_name='post_comments', to='api.Post'),
),
migrations.AddField(
model_name='post',
name='blog',
field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='posts', to='api.Blog'),
),
migrations.AddField(
model_name='post',
name='creator',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='created_posts', to=settings.AUTH_USER_MODEL),
),
migrations.AddField(
model_name='favoritepost',
name='posts',
field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, related_name='likes', to='api.Post'),
),
]
|
[
"asel.yer98@gmail.com"
] |
asel.yer98@gmail.com
|
af3d099f71d75651e8da95d4362fc5e824ea06bf
|
1886065d10342822b10063cd908a690fccf03d8b
|
/appengine/findit/waterfall/analyze_build_failure_pipeline.py
|
df1014f2a3c07efd6b1e1306d39bff06edb9fa1f
|
[
"BSD-3-Clause"
] |
permissive
|
TrellixVulnTeam/chromium-infra_A6Y5
|
26af0dee12f89595ebc6a040210c9f62d8ded763
|
d27ac0b230bedae4bc968515b02927cf9e17c2b7
|
refs/heads/master
| 2023-03-16T15:33:31.015840
| 2017-01-31T19:55:59
| 2017-01-31T20:06:48
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,205
|
py
|
# Copyright 2015 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
from common import appengine_util
from common.pipeline_wrapper import BasePipeline
from common.pipeline_wrapper import pipeline
from libs import time_util
from model import analysis_status
from model.wf_analysis import WfAnalysis
from waterfall.detect_first_failure_pipeline import DetectFirstFailurePipeline
from waterfall.extract_deps_info_pipeline import ExtractDEPSInfoPipeline
from waterfall.extract_signal_pipeline import ExtractSignalPipeline
from waterfall.flake.trigger_flake_analyses_pipeline import (
TriggerFlakeAnalysesPipeline)
from waterfall.identify_culprit_pipeline import IdentifyCulpritPipeline
from waterfall.pull_changelog_pipeline import PullChangelogPipeline
from waterfall.start_try_job_on_demand_pipeline import (
StartTryJobOnDemandPipeline)
from waterfall.trigger_swarming_tasks_pipeline import (
TriggerSwarmingTasksPipeline)
class AnalyzeBuildFailurePipeline(BasePipeline):
def __init__(self, master_name, builder_name, build_number, build_completed,
force_rerun_try_job):
super(AnalyzeBuildFailurePipeline, self).__init__(
master_name, builder_name, build_number, build_completed,
force_rerun_try_job)
self.master_name = master_name
self.builder_name = builder_name
self.build_number = build_number
def _LogUnexpectedAborting(self, was_aborted):
"""Marks the WfAnalysis status as error, indicating that it was aborted.
Args:
was_aborted (bool): True if the pipeline was aborted, otherwise False.
"""
if not was_aborted:
return
analysis = WfAnalysis.Get(
self.master_name, self.builder_name, self.build_number)
# Heuristic analysis could have already completed, while triggering the
# try job kept failing and lead to the abortion.
if not analysis.completed:
analysis.status = analysis_status.ERROR
analysis.result_status = None
analysis.put()
def finalized(self):
self._LogUnexpectedAborting(self.was_aborted)
def _ResetAnalysis(self, master_name, builder_name, build_number):
analysis = WfAnalysis.Get(master_name, builder_name, build_number)
analysis.pipeline_status_path = self.pipeline_status_path()
analysis.status = analysis_status.RUNNING
analysis.result_status = None
analysis.start_time = time_util.GetUTCNow()
analysis.version = appengine_util.GetCurrentVersion()
analysis.end_time = None
analysis.put()
# Arguments number differs from overridden method - pylint: disable=W0221
def run(self, master_name, builder_name, build_number, build_completed,
force_rerun_try_job):
self._ResetAnalysis(master_name, builder_name, build_number)
# The yield statements below return PipelineFutures, which allow subsequent
# pipelines to refer to previous output values.
# https://github.com/GoogleCloudPlatform/appengine-pipelines/wiki/Python
# Heuristic Approach.
failure_info = yield DetectFirstFailurePipeline(
master_name, builder_name, build_number)
change_logs = yield PullChangelogPipeline(failure_info)
deps_info = yield ExtractDEPSInfoPipeline(failure_info, change_logs)
signals = yield ExtractSignalPipeline(failure_info)
heuristic_result = yield IdentifyCulpritPipeline(
failure_info, change_logs, deps_info, signals, build_completed)
# Try job approach.
with pipeline.InOrder():
# Swarming rerun.
# Triggers swarming tasks when first time test failure happens.
# This pipeline will run before build completes.
yield TriggerSwarmingTasksPipeline(
master_name, builder_name, build_number, failure_info)
# Checks if first time failures happen and starts a try job if yes.
yield StartTryJobOnDemandPipeline(
master_name, builder_name, build_number, failure_info,
signals, heuristic_result, build_completed, force_rerun_try_job)
# Trigger flake analysis on flaky tests, if any.
yield TriggerFlakeAnalysesPipeline(
master_name, builder_name, build_number)
|
[
"commit-bot@chromium.org"
] |
commit-bot@chromium.org
|
350dcd30a907105662e6bda717ac24f31ad8370f
|
2136701f48ad131084b331039d864f85988cf451
|
/spider/.svn/pristine/35/350dcd30a907105662e6bda717ac24f31ad8370f.svn-base
|
c6842a58e1a64674b74afbe2cc40404b328236bd
|
[] |
no_license
|
cuiyulin77/other
|
9d374a47d482f1c3f9ef0f3ac4429487643b04b9
|
c00cafaf7607452966fa523c4d0b04edb7f153e6
|
refs/heads/master
| 2020-05-18T04:24:26.095929
| 2019-04-30T06:37:53
| 2019-04-30T06:37:53
| 184,169,488
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,692
|
# -*- coding: utf-8 -*-
# Define your item pipelines here
#
# Don't forget to add your pipeline to the ITEM_PIPELINES setting
# See: https://doc.scrapy.org/en/latest/topics/item-pipeline.html
from pymysql import *
import re
class WbUserPipeline(object):
def process_item(self, item, spider):
return item
class DBPipeline(object):
def __init__(self):
# 连接数据库
self.connect = connect(
# host='47.92.77.18',
host='192.168.3.15',
# host='127.0.0.1',
db='spider',
user='root',
# password='admin8152', # 生产服务器
password='root',
port=3306,
charset='utf8'
)
self.cursor = self.connect.cursor()
def process_item(self, item, spider):
# 获取情感分类
try:
# 插入数据
print("*"*100)
user_id = re.match(r'https\:\/\/m\.weibo\.cn\/u\/(\d+)\?uid.*', item['user_url']).group(1)
print('user_id',user_id)
self.cursor.execute(
"INSERT INTO weibo_user(id,summary,user_name,user_id,user_url,fans,followers,get_time) VALUES (%s,%s,%s,%s,%s,%s,%s,%s)",
('0', str(item['summary']),str(item['user_name']),
str(user_id),
str(item['user_url']),
str(item['fans']),
str(item['followers']),
str(item['get_time'])),
)
self.connect.commit()
print('mysql一条数据插入成功')
except Exception as e:
# 出现错误时打印错误日志
print(e)
return item
|
[
"494658565@qq.com"
] |
494658565@qq.com
|
|
2e67dafe7fac1cbbc5927705e346ad37a6ed6c89
|
fcde32709c62b8ee86da459bb7c8eee52c848118
|
/爬虫1903/day09/Baidu/Baidu/settings.py
|
6b94193878c3f25ccff9e68ecba1f7857d9f4e73
|
[] |
no_license
|
klaus2015/py_base
|
6b92d362c3d7dc0e09205a037f4d580381dac94d
|
ec32c731c1c2f6a0dab87f1d167397e4fa86b8de
|
refs/heads/master
| 2022-07-28T15:49:30.383648
| 2020-05-11T15:31:43
| 2020-05-11T15:31:43
| 261,777,278
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,181
|
py
|
# -*- coding: utf-8 -*-
# Scrapy settings for Baidu project
#
# For simplicity, this file contains only settings considered important or
# commonly used. You can find more settings consulting the documentation:
#
# https://doc.scrapy.org/en/latest/topics/settings.html
# https://doc.scrapy.org/en/latest/topics/downloader-middleware.html
# https://doc.scrapy.org/en/latest/topics/spider-middleware.html
BOT_NAME = 'Baidu'
SPIDER_MODULES = ['Baidu.spiders']
NEWSPIDER_MODULE = 'Baidu.spiders'
# Crawl responsibly by identifying yourself (and your website) on the user-agent
#USER_AGENT = 'Baidu (+http://www.yourdomain.com)'
# Obey robots.txt rules
ROBOTSTXT_OBEY = False
# Configure maximum concurrent requests performed by Scrapy (default: 16)
CONCURRENT_REQUESTS = 20
# Configure a delay for requests for the same website (default: 0)
# See https://doc.scrapy.org/en/latest/topics/settings.html#download-delay
# See also autothrottle settings and docs
#DOWNLOAD_DELAY = 3
# The download delay setting will honor only one of:
#CONCURRENT_REQUESTS_PER_DOMAIN = 16
#CONCURRENT_REQUESTS_PER_IP = 16
# Disable cookies (enabled by default)
#COOKIES_ENABLED = False
# Disable Telnet Console (enabled by default)
#TELNETCONSOLE_ENABLED = False
# Override the default request headers:
#DEFAULT_REQUEST_HEADERS = {
# 'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',
# 'Accept-Language': 'en',
#}
# Enable or disable spider middlewares
# See https://doc.scrapy.org/en/latest/topics/spider-middleware.html
#SPIDER_MIDDLEWARES = {
# 'Baidu.middlewares.BaiduSpiderMiddleware': 543,
#}
# Enable or disable downloader middlewares
# See https://doc.scrapy.org/en/latest/topics/downloader-middleware.html
DOWNLOADER_MIDDLEWARES = {
'Baidu.middlewares.BaiduDownloaderMiddleware': 543,
'Baidu.middlewares.RandomUserAgentDownloaderMiddleware':200,
'Baidu.middlewares.RandomProxyDownloaderMiddleware':250,
}
# Enable or disable extensions
# See https://doc.scrapy.org/en/latest/topics/extensions.html
#EXTENSIONS = {
# 'scrapy.extensions.telnet.TelnetConsole': None,
#}
# Configure item pipelines
# See https://doc.scrapy.org/en/latest/topics/item-pipeline.html
#ITEM_PIPELINES = {
# 'Baidu.pipelines.BaiduPipeline': 300,
#}
# Enable and configure the AutoThrottle extension (disabled by default)
# See https://doc.scrapy.org/en/latest/topics/autothrottle.html
#AUTOTHROTTLE_ENABLED = True
# The initial download delay
#AUTOTHROTTLE_START_DELAY = 5
# The maximum download delay to be set in case of high latencies
#AUTOTHROTTLE_MAX_DELAY = 60
# The average number of requests Scrapy should be sending in parallel to
# each remote server
#AUTOTHROTTLE_TARGET_CONCURRENCY = 1.0
# Enable showing throttling stats for every response received:
#AUTOTHROTTLE_DEBUG = False
# Enable and configure HTTP caching (disabled by default)
# See https://doc.scrapy.org/en/latest/topics/downloader-middleware.html#httpcache-middleware-settings
#HTTPCACHE_ENABLED = True
#HTTPCACHE_EXPIRATION_SECS = 0
#HTTPCACHE_DIR = 'httpcache'
#HTTPCACHE_IGNORE_HTTP_CODES = []
#HTTPCACHE_STORAGE = 'scrapy.extensions.httpcache.FilesystemCacheStorage'
|
[
"598467866@qq.com"
] |
598467866@qq.com
|
93320c27e48d82cb9a176e9aed8825a5e95f31a2
|
cee3e57aaae9eaeb16f696e3cdad5f32c3af6861
|
/evennia/server/portal/mssp.py
|
5ff0a7b319a0e8139ee16b4a057e3b81c4e8bf0c
|
[
"BSD-3-Clause"
] |
permissive
|
Sa1tC/evennia
|
8ef7fae9cbeb2d46bd6cdf5c5482331f9e0846ff
|
1248428d132fde1b975678b53e22c1ca68a73a43
|
refs/heads/master
| 2021-01-23T12:32:03.594263
| 2017-05-22T06:21:25
| 2017-05-22T06:21:25
| 93,164,000
| 0
| 1
| null | 2017-06-02T12:36:55
| 2017-06-02T12:36:55
| null |
UTF-8
|
Python
| false
| false
| 6,861
|
py
|
"""
MSSP - Mud Server Status Protocol
This implements the MSSP telnet protocol as per
http://tintin.sourceforge.net/mssp/. MSSP allows web portals and
listings to have their crawlers find the mud and automatically
extract relevant information about it, such as genre, how many
active players and so on.
"""
from builtins import object
from django.conf import settings
from evennia.utils import utils
MSSP = chr(70)
MSSP_VAR = chr(1)
MSSP_VAL = chr(2)
# try to get the customized mssp info, if it exists.
MSSPTable_CUSTOM = utils.variable_from_module(settings.MSSP_META_MODULE, "MSSPTable", default={})
class Mssp(object):
"""
Implements the MSSP protocol. Add this to a variable on the telnet
protocol to set it up.
"""
def __init__(self, protocol):
"""
initialize MSSP by storing protocol on ourselves and calling
the client to see if it supports MSSP.
Args:
protocol (Protocol): The active protocol instance.
"""
self.protocol = protocol
self.protocol.will(MSSP).addCallbacks(self.do_mssp, self.no_mssp)
def get_player_count(self):
"""
Get number of logged-in players.
Returns:
count (int): The number of players in the MUD.
"""
return str(self.protocol.sessionhandler.count_loggedin())
def get_uptime(self):
"""
Get how long the portal has been online (reloads are not counted).
Returns:
uptime (int): Number of seconds of uptime.
"""
return str(self.protocol.sessionhandler.uptime)
def no_mssp(self, option):
"""
Called when mssp is not requested. This is the normal
operation.
Args:
option (Option): Not used.
"""
self.protocol.handshake_done()
def do_mssp(self, option):
"""
Negotiate all the information.
Args:
option (Option): Not used.
"""
self.mssp_table = {
# Required fields
"NAME": "Evennia",
"PLAYERS": self.get_player_count,
"UPTIME" : self.get_uptime,
# Generic
"CRAWL DELAY": "-1",
"HOSTNAME": "", # current or new hostname
"PORT": ["4000"], # most important port should be last in list
"CODEBASE": "Evennia",
"CONTACT": "", # email for contacting the mud
"CREATED": "", # year MUD was created
"ICON": "", # url to icon 32x32 or larger; <32kb.
"IP": "", # current or new IP address
"LANGUAGE": "", # name of language used, e.g. English
"LOCATION": "", # full English name of server country
"MINIMUM AGE": "0", # set to 0 if not applicable
"WEBSITE": "www.evennia.com",
# Categorisation
"FAMILY": "Custom", # evennia goes under 'Custom'
"GENRE": "None", # Adult, Fantasy, Historical, Horror, Modern, None, or Science Fiction
"GAMEPLAY": "None", # Adventure, Educational, Hack and Slash, None,
# Player versus Player, Player versus Environment,
# Roleplaying, Simulation, Social or Strategy
"STATUS": "Open Beta", # Alpha, Closed Beta, Open Beta, Live
"GAMESYSTEM": "Custom", # D&D, d20 System, World of Darkness, etc. Use Custom if homebrew
"SUBGENRE": "None", # LASG, Medieval Fantasy, World War II, Frankenstein,
# Cyberpunk, Dragonlance, etc. Or None if not available.
# World
"AREAS": "0",
"HELPFILES": "0",
"MOBILES": "0",
"OBJECTS": "0",
"ROOMS": "0", # use 0 if room-less
"CLASSES": "0", # use 0 if class-less
"LEVELS": "0", # use 0 if level-less
"RACES": "0", # use 0 if race-less
"SKILLS": "0", # use 0 if skill-less
# Protocols set to 1 or 0)
"ANSI": "1",
"GMCP": "0",
"ATCP": "0",
"MCCP": "0",
"MCP": "0",
"MSDP": "0",
"MSP": "0",
"MXP": "0",
"PUEBLO": "0",
"SSL": "1",
"UTF-8": "1",
"ZMP": "0",
"VT100": "0",
"XTERM 256 COLORS": "0",
# Commercial set to 1 or 0)
"PAY TO PLAY": "0",
"PAY FOR PERKS": "0",
# Hiring set to 1 or 0)
"HIRING BUILDERS": "0",
"HIRING CODERS": "0",
# Extended variables
# World
"DBSIZE": "0",
"EXITS": "0",
"EXTRA DESCRIPTIONS": "0",
"MUDPROGS": "0",
"MUDTRIGS": "0",
"RESETS": "0",
# Game (set to 1, 0 or one of the given alternatives)
"ADULT MATERIAL": "0",
"MULTICLASSING": "0",
"NEWBIE FRIENDLY": "0",
"PLAYER CITIES": "0",
"PLAYER CLANS": "0",
"PLAYER CRAFTING": "0",
"PLAYER GUILDS": "0",
"EQUIPMENT SYSTEM": "None", # "None", "Level", "Skill", "Both"
"MULTIPLAYING": "None", # "None", "Restricted", "Full"
"PLAYERKILLING": "None", # "None", "Restricted", "Full"
"QUEST SYSTEM": "None", # "None", "Immortal Run", "Automated", "Integrated"
"ROLEPLAYING": "None", # "None", "Accepted", "Encouraged", "Enforced"
"TRAINING SYSTEM": "None", # "None", "Level", "Skill", "Both"
"WORLD ORIGINALITY": "None", # "All Stock", "Mostly Stock", "Mostly Original", "All Original"
}
# update the static table with the custom one
if MSSPTable_CUSTOM:
self.mssp_table.update(MSSPTable_CUSTOM)
varlist = ''
for variable, value in self.mssp_table.items():
if callable(value):
value = value()
if utils.is_iter(value):
for partval in value:
varlist += MSSP_VAR + str(variable) + MSSP_VAL + str(partval)
else:
varlist += MSSP_VAR + str(variable) + MSSP_VAL + str(value)
# send to crawler by subnegotiation
self.protocol.requestNegotiation(MSSP, varlist)
self.protocol.handshake_done()
|
[
"griatch@gmail.com"
] |
griatch@gmail.com
|
aef3c3624058a9104e4a84e3fdb7e33668a84b8c
|
90d4b790f9a7198760fdbcfad6abd2da851f2f4e
|
/0x0F-python-object_relational_mapping/3-my_safe_filter_states.py
|
b2b27fdbdcc3fbb2c02bd4aa205bc8225158b438
|
[] |
no_license
|
Murega14/holbertonschool-higher_level_programming
|
2817a532d7d6739ed046e350903e394ed1bae0a3
|
f29a4c4e74c01798cb51bfe5160432569a1ca833
|
refs/heads/master
| 2023-03-15T08:22:06.926537
| 2018-09-09T20:46:33
| 2018-09-09T20:46:33
| 572,548,803
| 1
| 0
| null | 2022-11-30T14:11:06
| 2022-11-30T14:10:32
| null |
UTF-8
|
Python
| false
| false
| 643
|
py
|
#!/usr/bin/python3
# gets all states via python yee boi with your own state SAFE
def main(args):
# gets all state stuff SAFELY
if len(args) != 5:
raise Exception("need 4 arguments!")
db = MySQLdb.connect(host='localhost',
user=args[1],
passwd=args[2],
db=args[3])
cur = db.cursor()
cur.execute(
"SELECT * FROM states WHERE name LIKE %s ORDER BY id ASC",
(args[4],))
states = cur.fetchall()
for state in states:
print(state)
if __name__ == "__main__":
import sys
import MySQLdb
main(sys.argv)
|
[
"Dkazemian@gmail.com"
] |
Dkazemian@gmail.com
|
c1ee39b1b2a7ca3e916a559da292bc53bfdc5b74
|
017f62ebc7357dc665723a5b4fa75294f31fda8f
|
/lib/jnpr/eznc/resrc/srx/nat/nat_proxy_arp.py
|
10fe27280370f10dad027ec9771f769faed67709
|
[
"Apache-2.0",
"BSD-2-Clause"
] |
permissive
|
cro/py-junos-eznc
|
c2588d9fde7b65ec523c558d741716f3a19621c7
|
4c111476cc8eb7599462379ddf55743ae30bbf5c
|
refs/heads/master
| 2021-01-20T16:36:20.034788
| 2013-11-19T19:17:32
| 2013-11-19T19:17:32
| 14,535,497
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,235
|
py
|
# 3rd-party modules
from lxml.builder import E
# module packages
from ...resource import Resource
from .... import jxml as JXML
class NatProxyArp( Resource ):
"""
[edit security nat proxy-arp interface <if_name> address <ip_prefix>]
Resource namevar:
tuple(if_name, ip_prefix)
Description:
This resource allows you to add/remove proxy-arp entries for NAT. At
this time, there are no managed properties, so you can simply add or
remove entries by the name tuple(if_name, ip_prefix)
For example, to select an entry directly:
entry = NatProxyArp(jdev, ('reth0.213','198.18.11.5'))
Or using the bind mechanism:
jdev.bind(parp=NatProxyArp)
entry = jdev.parp[('reth0.213', '198.18.11.5')]
To create it, you need to use the 'touch' option when invoking
write() since there are no properites for proxy-arp entries
if not entry.exists:
entry.write(touch=True)
And to remove the same entry:
entry.delete()
"""
def _xml_at_top(self):
return E.security(E.nat(
E('proxy-arp',
E.interface(E.name( self._name[0] ),
E.address(E.name( self._name[1]))
)
)
))
##### -----------------------------------------------------------------------
##### OVERLOADS
##### -----------------------------------------------------------------------
def rename(self, name):
""" UNSUPPORTED """
raise RuntimeError("Unsupported for Resource: %s" % self.__class__.__name__)
##### -----------------------------------------------------------------------
##### XML read
##### -----------------------------------------------------------------------
def _xml_at_res(self, xml):
return xml.find('.//proxy-arp/interface')
def _xml_to_py(self, as_xml, to_py ):
Resource._r_has_xml_status( as_xml, to_py )
##### -----------------------------------------------------------------------
##### Resource List, Catalog
##### -- only executed by 'manager' resources
##### -----------------------------------------------------------------------
def _r_list(self):
raise RuntimeError("@@@ NEED TO IMPLEMENT!")
def _r_catalog(self):
raise RuntimeError("@@@ NEED TO IMPLEMENT!")
|
[
"jschulman@juniper.net"
] |
jschulman@juniper.net
|
9e9bd761750fdacff2550f9144c914ddc1e8529c
|
7bead245354e233f76fff4608938bf956abb84cf
|
/test/test_docx_table_cell.py
|
f1aec4840f9d6c266499020f55fa9f2df8b0c8a9
|
[
"Apache-2.0"
] |
permissive
|
Cloudmersive/Cloudmersive.APIClient.Python.Convert
|
5ba499937b9664f37cb2700509a4ba93952e9d6c
|
dba2fe7257229ebdacd266531b3724552c651009
|
refs/heads/master
| 2021-10-28T23:12:42.698951
| 2021-10-18T03:44:49
| 2021-10-18T03:44:49
| 138,449,321
| 3
| 2
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 955
|
py
|
# coding: utf-8
"""
convertapi
Convert API lets you effortlessly convert file formats and types. # noqa: E501
OpenAPI spec version: v1
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import unittest
import cloudmersive_convert_api_client
from cloudmersive_convert_api_client.models.docx_table_cell import DocxTableCell # noqa: E501
from cloudmersive_convert_api_client.rest import ApiException
class TestDocxTableCell(unittest.TestCase):
"""DocxTableCell unit test stubs"""
def setUp(self):
pass
def tearDown(self):
pass
def testDocxTableCell(self):
"""Test DocxTableCell"""
# FIXME: construct object with mandatory attributes with example values
# model = cloudmersive_convert_api_client.models.docx_table_cell.DocxTableCell() # noqa: E501
pass
if __name__ == '__main__':
unittest.main()
|
[
"35204726+Cloudmersive@users.noreply.github.com"
] |
35204726+Cloudmersive@users.noreply.github.com
|
aa5650cfa845d0f8a1a8b2048a907d06c2b3d36d
|
1061216c2c33c1ed4ffb33e6211565575957e48f
|
/python-legacy/test/test_custom_profile_field.py
|
9c780d683beda23dc85ae0a5a0c376b149184f96
|
[] |
no_license
|
MSurfer20/test2
|
be9532f54839e8f58b60a8e4587348c2810ecdb9
|
13b35d72f33302fa532aea189e8f532272f1f799
|
refs/heads/main
| 2023-07-03T04:19:57.548080
| 2021-08-11T19:16:42
| 2021-08-11T19:16:42
| 393,920,506
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,526
|
py
|
# coding: utf-8
"""
Zulip REST API
Powerful open source group chat # noqa: E501
The version of the OpenAPI document: 1.0.0
Generated by: https://openapi-generator.tech
"""
from __future__ import absolute_import
import unittest
import datetime
import openapi_client
from openapi_client.models.custom_profile_field import CustomProfileField # noqa: E501
from openapi_client.rest import ApiException
class TestCustomProfileField(unittest.TestCase):
"""CustomProfileField unit test stubs"""
def setUp(self):
pass
def tearDown(self):
pass
def make_instance(self, include_optional):
"""Test CustomProfileField
include_option is a boolean, when False only required
params are included, when True both required and
optional params are included """
# model = openapi_client.models.custom_profile_field.CustomProfileField() # noqa: E501
if include_optional :
return CustomProfileField(
id = 56,
type = 56,
order = 56,
name = '',
hint = '',
field_data = ''
)
else :
return CustomProfileField(
)
def testCustomProfileField(self):
"""Test CustomProfileField"""
inst_req_only = self.make_instance(include_optional=False)
inst_req_and_optional = self.make_instance(include_optional=True)
if __name__ == '__main__':
unittest.main()
|
[
"suyash.mathur@research.iiit.ac.in"
] |
suyash.mathur@research.iiit.ac.in
|
b7c41240fa74e52ba4534e26961d3cbf7322a0d6
|
43ed422113d58b27d5012f5ccf405700a46fc0f2
|
/MaskRCNN/model/loss.py
|
eb4cb8a2d3d03b016b3857b3071a40cc1977da99
|
[] |
no_license
|
wprazuch/DeepLearningPlayground
|
99a86945818e8a42e77408369e566b793ac612b9
|
53859fb4fd7bfc314121c85870afabd47627ce73
|
refs/heads/master
| 2022-12-16T05:50:22.757434
| 2020-09-03T09:23:26
| 2020-09-03T09:23:26
| 187,896,626
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 6,227
|
py
|
import tensorflow as tf
import tensorflow.keras.backend as K
from utils import batch_pack_graph
def smooth_l1_loss(y_true, y_pred):
"""Implements Smooth-L1 loss.
y_true and y_pred are typically: [N, 4], but could be any shape.
"""
diff = K.abs(y_true - y_pred)
less_than_one = K.cast(K.less(diff, 1.0), 'float32')
loss = (less_than_one * 0.5 * diff ** 2) + (1 - less_than_one) * (diff - 0.5)
return loss
def rpn_class_loss_graph(rpn_match, rpn_class_logits):
"""RPN anchor classifier loss.
rpn_match: [batch, anchors, 1]. Anchor match type. 1=positive,
-1=negative, 0=neutral anchor.
rpn_class_logits: [batch, anchors, 2]. RPN classifier logits for BG/FG.
"""
rpn_match = tf.squeeze(rpn_match, -1)
anchor_class = K.cast(K.equal(rpn_match, 1), tf.int32)
indices = tf.where(K.not_equal(rpn_match, 0))
rpn_class_logits = tf.gather_nd(rpn_class_logits, indices)
anchor_class = tf.gather_nd(anchor_class, indices)
loss = K.sparse_categorical_crossentropy(target=anchor_class, output=rpn_class_logits,
from_logits=True)
loss = K.switch(tf.size(loss) > 0, K.mean(loss), tf.constant(0.0))
return loss
def rpn_bbox_loss_graph(config, target_bbox, rpn_match, rpn_bbox):
"""Return the RPN bounding box loss graph.
configs: the model configs object.
target_bbox: [batch, max positive anchors, (dy, dx, log(dh), log(dw))].
Uses 0 padding to fill in unsed bbox deltas.
rpn_match: [batch, anchors, 1]. Anchor match type. 1=positive,
-1=negative, 0=neutral anchor.
rpn_bbox: [batch, anchors, (dy, dx, log(dh), log(dw))]
"""
# Positive anchors contribute to the loss, but negative and
# neutral anchors (match value of 0 or -1) don't.
rpn_match = K.squeeze(rpn_match, -1)
indices = tf.where(K.equal(rpn_match, 1))
rpn_bbox = tf.gather_nd(rpn_bbox, indices)
batch_counts = K.sum(K.cast(K.equal(rpn_match, 1), tf.int32), axis=1)
target_bbox = batch_pack_graph(target_bbox, batch_counts,
config.IMAGES_PER_GPU)
loss = smooth_l1_loss(target_bbox, rpn_bbox)
loss = K.switch(tf.size(loss) > 0, K.mean(loss), tf.constant(0.0))
return loss
def mrcnn_class_loss_graph(target_class_ids, pred_class_logits, active_class_ids):
"""Loss for the classifier head of Mask RCNN.
target_class_ids: [batch, num_rois]. Integer class IDs. Uses zero
padding to fill in the array.
pred_class_logits: [batch, num_rois, num_classes]
active_class_ids: [batch, num_classes]. Has a value of 1 for
classes that are in the dataset of the image, and 0
for classes that are not in the dataset.
"""
# During model building, Keras calls this function with
# target_class_ids of type float32. Unclear why. Cast it
# to int to get around it.
target_class_ids = tf.cast(target_class_ids, 'int64')
pred_class_ids = tf.argmax(pred_class_logits, axis=2)
# TODO: Update this line to work with batch > 1. Right now it assumes all
# images in a batch have the same active_class_ids
pred_active = tf.gather(active_class_ids[0], pred_class_ids)
# Loss
loss = tf.nn.sparse_softmax_cross_entropy_with_logits(
labels=target_class_ids, logits=pred_class_logits
)
loss = loss * pred_active
loss = tf.reduce_sum(loss) / tf.reduce_sum(pred_active)
return loss
def mrcnn_bbox_loss_graph(target_bbox, target_class_ids, pred_bbox):
"""Loss for Mask R-CNN bounding box refinement.
target_bbox: [batch, num_rois, (dy, dx, log(dh), log(dw))]
target_class_ids: [batch, num_rois]. Integer class IDs.
pred_bbox: [batch, num_rois, num_classes, (dy, dx, log(dh), log(dw))]
"""
# Reshape to merge batch and roi dimensions for simplicity.
target_class_ids = K.reshape(target_class_ids, (-1,))
target_bbox = K.reshape(target_bbox, (-1, 4))
pred_bbox = K.reshape(pred_bbox, (-1, K.int_shape(pred_bbox)[2], 4))
# Only positive ROIs contribute to the loss. And only
# the right class_id of each ROI. Get their indices.
positive_roi_ix = tf.where(target_class_ids > 0)[:, 0]
positive_roi_class_ids = tf.cast(
tf.gather(target_class_ids, positive_roi_class_ids), tf.int64
)
indices = tf.stack([positive_roi_ix, positive_roi_class_ids], axis=1)
# Gather the deltas (predicted and true) that contribute to loss
target_bbox = tf.gather(target_bbox, positive_roi_ix)
pred_bbox = tf.gather_nd(pred_bbox, indices)
# Smooth-L1 Loss
loss = K.switch(tf.size(target_bbox) > 0,
smooth_l1_loss(y_true=target_bbox, y_pred=pred_bbox),
tf.constant(0.0))
loss = K.mean(loss)
return loss
def mrcnn_mask_loss_graph(target_masks, target_class_ids, pred_masks):
"""Mask binary cross-entropy loss for the masks head.
target_masks: [batch, num_rois, height, width].
A float32 tensor of values 0 or 1. Uses zero padding to fill array.
target_class_ids: [batch, num_rois]. Integer class IDs. Zero padded.
pred_masks: [batch, proposals, height, width, num_classes] float32 tensor
with values from 0 to 1.
"""
# Reshape for simplicity. Merge first two dimensions into one.
target_class_ids = K.reshape(target_class_ids, (-1,))
mask_shape = tf.shape(target_masks)
target_masks = K.reshape(target_masks, (-1, mask_shape[2], mask_shape[3]))
pred_shape = tf.shape(pred_masks)
pred_masks = K.reshape(pred_masks,
(-1, pred_shape[2], pred_shape[3], pred_shape[4]))
pred_masks = tf.transpose(pred_masks, [0, 3, 1, 2])
positive_ix = tf.where(target_class_ids > 0)[:, 0]
positive_class_ids = tf.cast(
tf.gather(target_class_ids, positive_ix), tf.int64
)
indices = tf.stack([positive_ix, positive_class_ids], axis=1)
y_true = tf.gather(target_masks, positive_ix)
y_pred = tf.gather_nd(pred_masks, indices)
loss = K.switch(tf.size(y_true) > 0, K.binary_crossentropy(target=y_true, output=y_pred),
tf.constant(0.0))
loss = K.mean(loss)
return loss
|
[
"wojciechprazuch3@gmail.com"
] |
wojciechprazuch3@gmail.com
|
00f4f432b42195fe0d5718333d4fea31f17c3546
|
d94b6845aeeb412aac6850b70e22628bc84d1d6d
|
/layout-blt/configs/bert_layout_publaynet_config.py
|
c468d18d59fde1f6a87c790cc4dbb6815ec3e80b
|
[
"CC-BY-4.0",
"Apache-2.0"
] |
permissive
|
ishine/google-research
|
541aea114a68ced68736340e037fc0f8257d1ea2
|
c1ae273841592fce4c993bf35cdd0a6424e73da4
|
refs/heads/master
| 2023-06-08T23:02:25.502203
| 2023-05-31T01:00:56
| 2023-05-31T01:06:45
| 242,478,569
| 0
| 0
|
Apache-2.0
| 2020-06-23T01:55:11
| 2020-02-23T07:59:42
|
Jupyter Notebook
|
UTF-8
|
Python
| false
| false
| 2,219
|
py
|
# coding=utf-8
# Copyright 2023 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Default Hyperparameter configuration."""
import ml_collections
def get_config():
"""Gets the default hyperparameter configuration."""
config = ml_collections.ConfigDict()
# Exp info
config.dataset_path = "/path/to/publaynet/"
config.dataset = "PubLayNet"
config.vocab_size = 137
config.experiment = "bert_layout"
config.model_class = "bert_layout"
config.image_size = 256
# Training info
config.seed = 0
config.log_every_steps = 100
config.eval_num_steps = 1000
config.max_length = 130
config.batch_size = 64
config.train_shuffle = True
config.eval_pad_last_batch = False
config.eval_batch_size = 64
config.num_train_steps = 100_000
config.checkpoint_every_steps = 5000
config.eval_every_steps = 5000
config.num_eval_steps = 100
# Model info
config.layout_dim = 2
config.dtype = "float32"
config.autoregressive = False
config.shuffle_buffer_size = 10
config.use_vae = True
config.share_embeddings = True
config.num_layers = 4
config.qkv_dim = 512
config.emb_dim = 512
config.mlp_dim = 2048
config.num_heads = 8
config.dropout_rate = 0.1
config.attention_dropout_rate = 0.3
config.restore_checkpoints = True
config.label_smoothing = 0.
config.sampling_method = "top-p"
config.use_vertical_info = False
# Optimizer info
config.optimizer = ml_collections.ConfigDict()
config.optimizer.type = "adam"
config.optimizer.warmup_steps = 4000
config.optimizer.lr = 5e-3
config.optimizer.beta1 = 0.9
config.optimizer.beta2 = 0.98
config.optimizer.weight_decay = 0.01
config.beta_rate = 1 / 20_000
return config
|
[
"copybara-worker@google.com"
] |
copybara-worker@google.com
|
b0852cf85d9083b3a78990c4c4ecb96b24190dc2
|
191d18fae52df2b10fc3c78676612ce0828c1ad8
|
/essentials/multi_server.py
|
b345cfc321697ef93d206779c4ae5ae4b88e165c
|
[
"MIT"
] |
permissive
|
yada-yoda/pollmaster
|
1e44ef42f68bf971e67b75c84842556d2ef2d687
|
c7431d6b952599671c6408209528dceaad19116e
|
refs/heads/master
| 2021-10-19T14:32:24.222515
| 2019-02-21T18:57:07
| 2019-02-21T18:57:07
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 6,061
|
py
|
import time
import discord
from essentials.settings import SETTINGS
from utils.paginator import embed_list_paginated
async def get_pre(bot, message):
'''Gets the prefix for a message.'''
if str(message.channel.type) == 'private':
shared_server_list = await get_servers(bot, message)
if shared_server_list.__len__() == 0:
return 'pm!'
elif shared_server_list.__len__() == 1:
return await get_server_pre(bot, shared_server_list[0])
else:
# return a tuple of all prefixes.. this will check them all!
return tuple([await get_server_pre(bot, s) for s in shared_server_list])
else:
return await get_server_pre(bot, message.server)
async def get_server_pre(bot, server):
'''Gets the prefix for a server.'''
try:
#result = await bot.db.config.find_one({'_id': str(server.id)})
result = bot.pre[str(server.id)]
except AttributeError:
return 'pm!'
if not result: #or not result.get('prefix'):
return 'pm!'
return result #result.get('prefix')
async def get_servers(bot, message, short=None):
'''Get best guess of relevant shared servers'''
if message.server is None:
list_of_shared_servers = []
for s in bot.servers:
if message.author.id in [m.id for m in s.members]:
list_of_shared_servers.append(s)
if short is not None:
query = bot.db.polls.find({'short': short})
if query is not None:
server_ids_with_short = [poll['server_id'] async for poll in query]
servers_with_short = [bot.get_server(x) for x in server_ids_with_short]
shared_servers_with_short = list(set(servers_with_short).intersection(set(list_of_shared_servers)))
if shared_servers_with_short.__len__() >= 1:
return shared_servers_with_short
# do this if no shared server with short is found
if list_of_shared_servers.__len__() == 0:
return []
else:
return list_of_shared_servers
else:
return [message.server]
async def ask_for_server(bot, message, short=None):
server_list = await get_servers(bot, message, short)
if server_list.__len__() == 0:
if short == None:
await bot.say(
'I could not find a common server where we can see eachother. If you think this is an error, please contact the developer.')
else:
await bot.say(f'I could not find a server where the poll {short} exists that we both can see.')
return None
elif server_list.__len__() == 1:
return server_list[0]
else:
text = 'I\'m not sure which server you are referring to. Please tell me by typing the corresponding number.\n'
i = 1
for name in [s.name for s in server_list]:
text += f'\n**{i}** - {name}'
i += 1
embed = discord.Embed(title="Select your server", description=text, color=SETTINGS.color)
server_msg = await bot.send_message(message.channel, embed=embed)
valid_reply = False
nr = 1
while valid_reply == False:
reply = await bot.wait_for_message(timeout=60, author=message.author)
if reply and reply.content:
if reply.content.startswith(await get_pre(bot, message)):
# await bot.say('You can\'t use bot commands while I am waiting for an answer.'
# '\n I\'ll stop waiting and execute your command.')
return False
if str(reply.content).isdigit():
nr = int(reply.content)
if 0 < nr <= server_list.__len__():
valid_reply = True
return server_list[nr - 1]
async def ask_for_channel(bot, server, message):
# if performed from a channel, return that channel
if str(message.channel.type) == 'text':
return message.channel
# if exactly 1 channel, return it
channel_list = [c for c in server.channels if str(c.type) == 'text']
if channel_list.__len__() == 1:
return channel_list[0]
# if no channels, display error
if channel_list.__len__() == 0:
embed = discord.Embed(title="Select a channel", description='No text channels found on this server. Make sure I can see them.', color=SETTINGS.color)
await bot.say(embed=embed)
return False
# otherwise ask for a channel
i = 1
text = 'Polls are bound to a specific channel on a server. Please select the channel for this poll by typing the corresponding number.\n'
for name in [c.name for c in channel_list]:
to_add = f'\n**{i}** - {name}'
# check if length doesn't exceed allowed maximum or split it into multiple messages
if text.__len__() + to_add.__len__() > 2048:
embed = discord.Embed(title="Select a channel", description=text, color=SETTINGS.color)
await bot.say(embed=embed)
text = 'Polls are bound to a specific channel on a server. Please select the channel for this poll by typing the corresponding number.\n'
else:
text += to_add
i += 1
embed = discord.Embed(title="Select a channel", description=text, color=SETTINGS.color)
await bot.say(embed=embed)
valid_reply = False
nr = 1
while valid_reply == False:
reply = await bot.wait_for_message(timeout=60, author=message.author)
if reply and reply.content:
if reply.content.startswith(await get_pre(bot, message)):
# await bot.say('You can\'t use bot commands while I am waiting for an answer.'
# '\n I\'ll stop waiting and execute your command.')
return False
if str(reply.content).isdigit():
nr = int(reply.content)
if 0 < nr <= channel_list.__len__():
valid_reply = True
return channel_list[nr - 1]
|
[
"matnad@gmail.com"
] |
matnad@gmail.com
|
921548cdfb11ada7eb5d4be07398294bf09ce197
|
b9963ffb80aad7e057bc375edb85ac7ed5a837d0
|
/adventofcode2017/03b.py
|
44f43305774184f644e62bce54dfc526c453e223
|
[
"MIT"
] |
permissive
|
matslindh/codingchallenges
|
a2db9f4579e9f35189f5cdf74590863cf84bdf95
|
a846e522f7a31e988c470cda87955ee3ef20a274
|
refs/heads/main
| 2022-12-23T15:56:19.776354
| 2022-12-15T21:03:37
| 2022-12-15T21:03:37
| 76,491,177
| 3
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 888
|
py
|
from itertools import repeat
from math import floor
map = []
s_y = s_x = 1001
for y in range(0, s_y):
map.append(list(repeat(0, s_x)))
x = y = floor(s_x/2)
map[y][x] = 1
x += 1
dir = 'R'
written = 0
while written <= 289326:
if dir == 'R':
if not map[y-1][x]:
dir = 'U'
else:
x += 1
elif dir == 'U':
if not map[y][x-1]:
dir = 'L'
else:
y -= 1
elif dir == 'L':
if not map[y+1][x]:
dir = 'D'
else:
x -= 1
elif dir == 'D':
if not map[y][x+1]:
dir = 'R'
else:
y += 1
written = map[y-1][x-1] + map[y-1][x] + map[y-1][x+1] + \
map[y][x-1] + map[y][x+1] + \
map[y+1][x-1] + map[y+1][x] + map[y+1][x+1]
print(dir, x, y, written)
map[y][x] = written
|
[
"mats@lindh.no"
] |
mats@lindh.no
|
f829374ecf93d80a724d38e00dff9ecc2cb9c16b
|
f68065baf489013c926dcfea9994878716d19586
|
/accounts/views.py
|
323deb2d9a062d75f066d39db1854285279ddd21
|
[] |
no_license
|
groyce/pots
|
06667fdc686b74a897c42879cbed5803e9efb154
|
ac839943c84c3135cb4596a8f734e4a061086e10
|
refs/heads/master
| 2020-04-10T01:42:55.863071
| 2018-12-06T19:47:18
| 2018-12-06T19:47:18
| 160,723,310
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,905
|
py
|
from django.http import HttpResponse
from django.shortcuts import render
from django.contrib.auth import authenticate, login
from .forms import LoginForm, UserRegistrationForm, UserEditForm, ProfileEditForm
from django.contrib.auth.decorators import login_required
from .models import Profile
def user_login(request):
if request.method == 'POST':
form = LoginForm(request.POST)
if form.is_valid():
cd = form.cleaned_data
user = authenticate(request,
username=cd['username'],
password=cd['password'])
if user is not None:
if user.is_active:
login(request, user)
return HttpResponse('Authenticated '\
'successfully')
else:
return HttpResponse('Disabled account')
else:
return HttpResponse('Invalid login')
else:
form = LoginForm()
return render(request, 'accounts/login.html', {'form': form})
@login_required
def edit(request):
if request.method == 'POST':
user_form = UserEditForm(instance=request.user,
data=request.POST)
profile_form = ProfileEditForm(
instance=request.user.profile,
data=request.POST,
files=request.FILES)
if user_form.is_valid() and profile_form.is_valid():
user_form.save()
profile_form.save()
else:
user_form = UserEditForm(instance=request.user)
profile_form = ProfileEditForm(instance=request.user.profile)
return render(request,
'accounts/edit.html',
{'user_form': user_form,
'profile_form': profile_form})
@login_required
def dashboard(request):
return render(request,
'accounts/dashboard.html',
{'section': 'dashboard'})
def register(request):
if request.method == 'POST':
user_form = UserRegistrationForm(request.POST)
if user_form.is_valid():
# Create a new user object but avoid saving it yet
new_user = user_form.save(commit=False)
# Set the chosen password
new_user.set_password(
user_form.cleaned_data['password'])
# Save the User object
new_user.save()
# Create the user profile
Profile.objects.create(user=new_user)
return render(request,
'accounts/register_done.html',
{'new_user': new_user})
else:
user_form = UserRegistrationForm()
return render(request,
'accounts/register.html',
{'user_form': user_form})
|
[
"groyce@unomaha.edu"
] |
groyce@unomaha.edu
|
8cd85855d175d322e73f636de7aed0b6850bdf52
|
2f233b31ea7ffefad4b901b561f341fabe3bbb1f
|
/2017/02a.py
|
77f9ee8c4d1e176ea1331fdbdd314eff205802e3
|
[
"MIT"
] |
permissive
|
cz-fish/advent-of-code
|
066b63c3ac2e3b13bf88ae86843a7a9a7b687e96
|
ecbcef544e8d89ec019464811760ce86f84dbc6e
|
refs/heads/master
| 2023-08-03T19:41:23.186666
| 2023-03-14T08:59:04
| 2023-03-14T08:59:04
| 226,355,674
| 0
| 0
|
MIT
| 2023-07-20T02:51:13
| 2019-12-06T15:17:10
|
Python
|
UTF-8
|
Python
| false
| false
| 593
|
py
|
#!/usr/bin/env python3
grid = []
with open('input02.txt', 'rt') as f:
for ln in f.readlines():
grid.append([int(x) for x in ln.strip().split('\t')])
print(sum([max(l) - min(l) for l in grid]))
print('-----')
s = 0
for ln in grid:
srt = sorted(ln)
stop = False
for i in range(len(srt) - 1):
x = srt[i]
if x == 0:
continue
for j in range(i+1, len(srt)):
y = srt[j]
if y // x * x == y:
s += y // x
stop = True
break
if stop:
break
print(s)
|
[
"filip.simek@gmail.com"
] |
filip.simek@gmail.com
|
f6066d060c195e6f9ef837b859b666ab4f30bdb8
|
096167807fa625681beae7e25919357c90b89e75
|
/emails/models.py
|
1fb86f349ab69c1489f2ef26d7c95be401ff5b2d
|
[] |
no_license
|
bussiere/Sumomo
|
c849484fbae37490998bcc44e232bf6a252fe9d7
|
ac3efc46014e66e193c5f852d121a25dd0a9ec5e
|
refs/heads/master
| 2021-01-19T11:34:42.645970
| 2012-08-31T04:15:32
| 2012-08-31T04:15:32
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 653
|
py
|
from django.db import models
# Create your models here.
class Contact(models.Model):
Emails = models.TextField(null=True, blank=True)
class Email(models.Model):
Sender = models.ForeignKey("Contact",related_name="Sender", null=True, blank=True)
Recepter = models.ManyToManyField("Contact", related_name="Recepter",null=True, blank=True)
Title = models.TextField(null=True, blank=True)
Date = models.DateField(null=True, blank=True)
Content = models.TextField(null=True, blank=True)
File = models.ManyToManyField("attachments.File", null=True, blank=True)
Tag = models.ManyToManyField("tags.Tag", null=True, blank=True)
|
[
"bussiere@gmail.com"
] |
bussiere@gmail.com
|
1c9832b0b85c1b52d6843f79ec2dcb1fa84e81b1
|
68ab00c77312827e522151e6e9f2fff166e85b9c
|
/mypy_boto3_builder/structures/collection.py
|
41b5f2550bb9a33bdb6cd53825ecc814e7734f48
|
[
"MIT"
] |
permissive
|
pyto86pri/mypy_boto3_builder
|
2cdfb3ed55ea1ff23cdffd5a9ee5400e71562450
|
e8132dc4632430e0abd4cd330af51a8b1c82028f
|
refs/heads/master
| 2023-01-25T04:06:11.174287
| 2020-12-03T23:39:06
| 2020-12-03T23:39:06
| 319,283,736
| 0
| 0
|
MIT
| 2020-12-07T10:29:52
| 2020-12-07T10:29:51
| null |
UTF-8
|
Python
| false
| false
| 1,280
|
py
|
"""
Boto3 ServiceResource or Resource collection.
"""
from typing import Set
from mypy_boto3_builder.import_helpers.import_string import ImportString
from mypy_boto3_builder.structures.class_record import ClassRecord
from mypy_boto3_builder.type_annotations.external_import import ExternalImport
from mypy_boto3_builder.type_annotations.fake_annotation import FakeAnnotation
class Collection(ClassRecord):
"""
Boto3 ServiceResource or Resource collection.
"""
def __init__(
self,
name: str,
attribute_name: str,
parent_name: str,
type_annotation: FakeAnnotation,
docstring: str = "",
):
super().__init__(
name=name,
use_alias=True,
docstring=docstring,
bases=[
ExternalImport(
source=ImportString("boto3", "resources", "collection"),
name="ResourceCollection",
)
],
)
self.attribute_name = attribute_name
self.parent_name = parent_name
self.type_annotation = type_annotation
def get_types(self) -> Set[FakeAnnotation]:
types = super().get_types()
types.update(self.type_annotation.get_types())
return types
|
[
"volshebnyi@gmail.com"
] |
volshebnyi@gmail.com
|
97876c1143af3c1bbcf63ea5db171555c18fc239
|
242086b8c6a39cbc7af3bd7f2fd9b78a66567024
|
/python/PP4E-Examples-1.4/Examples/PP4E/Gui/Intro/gui3.py
|
6617d3e8edd2b088131c50e73653265dc000e795
|
[] |
no_license
|
chuzui/algorithm
|
7537d0aa051ac4cbe9f6a7ca9a3037204803a650
|
c3006b24c4896c1242d3ceab43ace995c94f10c8
|
refs/heads/master
| 2021-01-10T13:05:30.902020
| 2015-09-27T14:39:02
| 2015-09-27T14:39:02
| 8,404,397
| 4
| 4
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 290
|
py
|
import sys
from tkinter import *
def quit(): # a custom callback handler
print('Hello, I must be going...') # kill windows and process
sys.exit()
widget = Button(None, text='Hello event world', command=quit)
widget.pack()
widget.mainloop()
|
[
"zui"
] |
zui
|
4fd2db085bebdf0fb2594d570603ecce95d71f50
|
ca7aa979e7059467e158830b76673f5b77a0f5a3
|
/Python_codes/p03227/s459999028.py
|
03236a4d5fabd077163769a4c50f0ed805cccd94
|
[] |
no_license
|
Aasthaengg/IBMdataset
|
7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901
|
f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8
|
refs/heads/main
| 2023-04-22T10:22:44.763102
| 2021-05-13T17:27:22
| 2021-05-13T17:27:22
| 367,112,348
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 146
|
py
|
def main():
s = input().rstrip()
if len(s) == 2:
print(s)
else:
print(s[::-1])
if __name__ == "__main__":
main()
|
[
"66529651+Aastha2104@users.noreply.github.com"
] |
66529651+Aastha2104@users.noreply.github.com
|
d807abdc220d8649a4f546bf8715b4be597aec77
|
5a71ca1f5c964f803350e3c1238cb48986db565c
|
/coinlibbitfinex/tests/test_bitfinex_streamapi.py
|
25331ab7bbc5c1144c8083305e500db7203b9b85
|
[] |
no_license
|
tetocode/coinliball
|
fd644cbc16039ecad7e43228ea4e287ead5c8e5f
|
41ebbac13c1fbba98aedaa766b9a505cb157f374
|
refs/heads/master
| 2022-09-28T21:58:08.130006
| 2020-06-04T03:00:56
| 2020-06-04T03:00:56
| 269,247,318
| 0
| 2
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,491
|
py
|
import time
from queue import Queue, Empty
import pytest
from coinlib.datatypes.streamdata import StreamData
from coinlibbitbankcc.streamapi import StreamApi
WAIT = 3
N = 10
def test_subscribe(stream_api: StreamApi):
xrp_usd_params = {
'event': 'subscribe',
'channel': 'book',
'pair': 'XRPUSD',
'prec': 'P0',
'freq': 'F0',
'len': '25',
}
xrp_btc_params = xrp_usd_params.copy()
xrp_btc_params['pair'] = 'XRPBTC'
q = Queue()
stream_api.on_raw_data = q.put
stream_api.subscribe(('xrp_usd', xrp_usd_params))
stream_api.subscribe(('xrp_btc', xrp_btc_params))
keys = set()
time.sleep(1)
for _ in range(N):
d: StreamData = q.get(timeout=WAIT)
k = d.key
keys.add(k)
assert keys == {'xrp_usd', 'xrp_btc'}
stream_api.unsubscribe('xrp_usd')
time.sleep(1)
for _ in range(q.qsize() + N):
q.get(timeout=WAIT)
keys = set()
for _ in range(q.qsize() + N):
d = q.get(timeout=WAIT)
k = d.key
keys.add(k)
assert keys == {'xrp_btc'}
stream_api.unsubscribe('xrp_btc')
with pytest.raises(Empty):
for _ in range(q.qsize() + N):
q.get(timeout=WAIT)
# re-subscribe
stream_api.subscribe(('xrp_usd', xrp_usd_params), ('xrp_btc', xrp_btc_params))
keys = set()
for _ in range(N):
d = q.get(timeout=WAIT)
k = d.key
keys.add(k)
assert keys == {'xrp_usd', 'xrp_btc'}
|
[
"_"
] |
_
|
d8bef9d2257e646945921eef2184ee0089672dc5
|
dabc9c7ec7cce125a12c6243ff67fd91e620d636
|
/tap/line.py
|
2784be13d982350342f9cef81eb316a081153234
|
[
"BSD-3-Clause",
"BSD-2-Clause"
] |
permissive
|
Mark-E-Hamilton/tappy
|
7634209c2862c9e837b58602d4b59636fd9a8e89
|
62c1a4ef1d9e724d3c7bbb31361c17c3bf071d04
|
refs/heads/master
| 2021-01-15T09:04:09.813683
| 2016-03-21T04:51:45
| 2016-03-21T04:51:45
| 53,630,217
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,921
|
py
|
# Copyright (c) 2016, Matt Layman
class Line(object):
"""Base type for TAP data.
TAP is a line based protocol. Thus, the most primitive type is a line.
"""
@property
def category(self):
raise NotImplementedError
class Result(Line):
"""Information about an individual test line."""
def __init__(
self, ok, number=None, description='', directive=None,
diagnostics=None):
self._ok = ok
if number:
self._number = int(number)
else:
# The number may be an empty string so explicitly set to None.
self._number = None
self._description = description
self.directive = directive
self.diagnostics = diagnostics
@property
def category(self):
""":returns: ``test``"""
return 'test'
@property
def ok(self):
"""Get the ok status.
:rtype: bool
"""
return self._ok
@property
def number(self):
"""Get the test number.
:rtype: int
"""
return self._number
@property
def description(self):
"""Get the description."""
return self._description
@property
def skip(self):
"""Check if this test was skipped.
:rtype: bool
"""
return self.directive.skip
@property
def todo(self):
"""Check if this test was a TODO.
:rtype: bool
"""
return self.directive.todo
def __str__(self):
is_not = ''
if not self.ok:
is_not = 'not '
directive = ''
if self.directive is not None:
directive = ' # {0}'.format(self.directive.text)
diagnostics = ''
if self.diagnostics is not None:
diagnostics = '\n' + self.diagnostics.rstrip()
return "{0}ok {1} - {2}{3}{4}".format(
is_not, self.number, self.description, directive, diagnostics)
class Plan(Line):
"""A plan line to indicate how many tests to expect."""
def __init__(self, expected_tests, directive=None):
self._expected_tests = expected_tests
self.directive = directive
@property
def category(self):
""":returns: ``plan``"""
return 'plan'
@property
def expected_tests(self):
"""Get the number of expected tests.
:rtype: int
"""
return self._expected_tests
@property
def skip(self):
"""Check if this plan should skip the file.
:rtype: bool
"""
return self.directive.skip
class Diagnostic(Line):
"""A diagnostic line (i.e. anything starting with a hash)."""
def __init__(self, text):
self._text = text
@property
def category(self):
""":returns: ``diagnostic``"""
return 'diagnostic'
@property
def text(self):
"""Get the text."""
return self._text
class Bail(Line):
"""A bail out line (i.e. anything starting with 'Bail out!')."""
def __init__(self, reason):
self._reason = reason
@property
def category(self):
""":returns: ``bail``"""
return 'bail'
@property
def reason(self):
"""Get the reason."""
return self._reason
class Version(Line):
"""A version line (i.e. of the form 'TAP version 13')."""
def __init__(self, version):
self._version = version
@property
def category(self):
""":returns: ``version``"""
return 'version'
@property
def version(self):
"""Get the version number.
:rtype: int
"""
return self._version
class Unknown(Line):
"""A line that represents something that is not a known TAP line.
This exists for the purpose of a Null Object pattern.
"""
@property
def category(self):
""":returns: ``unknown``"""
return 'unknown'
|
[
"matthewlayman@gmail.com"
] |
matthewlayman@gmail.com
|
b11986b3974295a315c63bf1ec08b07e1e0e3087
|
dde9442399c78414c05f7f36803c861638065ca3
|
/Multidimensional-Lists-Exercise/Radioactive-Mutant-Vampire-Bunnies.py
|
a22c9f63fe0ef1c68063385ce0f936bf2bfc334d
|
[] |
no_license
|
Vigyrious/python_advanced
|
6778eed9e951b5a11b22f6c6d8ea5b160c3aa00d
|
67db470e78b194aea1f9a35283d5a88b0f6ab94c
|
refs/heads/main
| 2023-03-23T12:24:59.688699
| 2021-03-12T20:53:04
| 2021-03-12T20:53:04
| 347,192,305
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,396
|
py
|
row, col = map(int, input().split(" "))
matrix = []
[matrix.append(list(input())) for _ in range(row)]
movements = list(input())
player_row, player_col = [[row_index,col_index] for row_index in range(row) for col_index in range(col) if matrix[row_index][col_index] == "P"][0]
is_dead = False
has_won = False
while not is_dead and not has_won:
bunnies = [[bunny_row, bunny_col] for bunny_row in range(row) for bunny_col in range(col) if matrix[bunny_row][bunny_col] == "B"]
current_movement = movements.pop(0)
if current_movement == "U":
if player_row-1 in range(row):
if matrix[player_row-1][player_col] == "B":
player_row -= 1
matrix[player_row][player_col] = "B"
is_dead = True
else:
matrix[player_row][player_col] = "."
matrix[player_row - 1][player_col] = "P"
player_row -= 1
else:
matrix[player_row][player_col] = "."
has_won = True
elif current_movement == "D":
if player_row+1 in range(row):
if matrix[player_row+1][player_col] == "B":
player_row += 1
matrix[player_row][player_col] = "B"
is_dead = True
else:
matrix[player_row][player_col] = "."
matrix[player_row + 1][player_col] = "P"
player_row += 1
else:
matrix[player_row][player_col] = "."
has_won = True
elif current_movement == "L":
if player_col-1 in range(col):
if matrix[player_row][player_col - 1] == "B":
player_col -= 1
matrix[player_row][player_col] = "B"
is_dead = True
else:
matrix[player_row][player_col] = "."
matrix[player_row][player_col - 1] = "P"
player_col -= 1
else:
matrix[player_row][player_col] = "."
has_won = True
elif current_movement == "R":
if player_col+1 in range(col):
if matrix[player_row][player_col + 1] == "B":
player_col += 1
matrix[player_row][player_col] = "B"
is_dead = True
else:
matrix[player_row][player_col] = "."
matrix[player_row][player_col + 1] = "P"
player_col += 1
else:
matrix[player_row][player_col] = "."
has_won = True
for bunny in bunnies:
bunny_row, bunny_col = bunny
if bunny_row+1 in range(row):
if matrix[bunny_row+1][bunny_col] == "P":
is_dead = True
matrix[bunny_row + 1][bunny_col] = "B"
if bunny_row-1 in range(row):
if matrix[bunny_row-1][bunny_col] == "P":
is_dead = True
matrix[bunny_row - 1][bunny_col] = "B"
if bunny_col + 1 in range(col):
if matrix[bunny_row][bunny_col+1] == "P":
is_dead = True
matrix[bunny_row][bunny_col+1] = "B"
if bunny_col - 1 in range(col):
if matrix[bunny_row][bunny_col-1] == "P":
is_dead = True
matrix[bunny_row][bunny_col-1] = "B"
[print(''.join(sub)) for sub in matrix]
print(f"won: {player_row} {player_col}") if has_won else print(f"dead: {player_row} {player_col}")
|
[
"73179295+Vigyrious@users.noreply.github.com"
] |
73179295+Vigyrious@users.noreply.github.com
|
369add1f2e8ed2f7a86b91b166f88feef21733e3
|
63b79eb44cf682ece74be1fc866f7651837db448
|
/powerplay/models/game_content_media.py
|
cc2654fda4508741a4901f39caab2b020b8b674c
|
[] |
no_license
|
bclark86/powerplay-py
|
c8cc4df8acd9ada91299706b7a7113ab9c963645
|
584d754629936a93d95157356ff806a5c68438dc
|
refs/heads/main
| 2023-07-19T04:23:16.510338
| 2021-09-02T13:17:12
| 2021-09-02T13:17:12
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,861
|
py
|
# coding: utf-8
"""
NHL API
Documenting the publicly accessible portions of the NHL API. # noqa: E501
OpenAPI spec version: 1.0.0
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re # noqa: F401
import six
class GameContentMedia(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'epg': 'list[AnyOfGameContentMediaEpgItems]',
'milestones': 'GameContentMediaMilestones'
}
attribute_map = {
'epg': 'epg',
'milestones': 'milestones'
}
def __init__(self, epg=None, milestones=None): # noqa: E501
"""GameContentMedia - a model defined in Swagger""" # noqa: E501
self._epg = None
self._milestones = None
self.discriminator = None
if epg is not None:
self.epg = epg
if milestones is not None:
self.milestones = milestones
@property
def epg(self):
"""Gets the epg of this GameContentMedia. # noqa: E501
:return: The epg of this GameContentMedia. # noqa: E501
:rtype: list[AnyOfGameContentMediaEpgItems]
"""
return self._epg
@epg.setter
def epg(self, epg):
"""Sets the epg of this GameContentMedia.
:param epg: The epg of this GameContentMedia. # noqa: E501
:type: list[AnyOfGameContentMediaEpgItems]
"""
self._epg = epg
@property
def milestones(self):
"""Gets the milestones of this GameContentMedia. # noqa: E501
:return: The milestones of this GameContentMedia. # noqa: E501
:rtype: GameContentMediaMilestones
"""
return self._milestones
@milestones.setter
def milestones(self, milestones):
"""Sets the milestones of this GameContentMedia.
:param milestones: The milestones of this GameContentMedia. # noqa: E501
:type: GameContentMediaMilestones
"""
self._milestones = milestones
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
if issubclass(GameContentMedia, dict):
for key, value in self.items():
result[key] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, GameContentMedia):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
|
[
"saiem.gilani@gmail.com"
] |
saiem.gilani@gmail.com
|
d7b4e049f95736c4a3a270a0a6e326a8bc7e03d5
|
887b9fd5f4fd4b9448f32750788b138b2e94be3e
|
/stock/futu/import_requests _income.py
|
6411f1232d8b5870521859d6a0da9b07f5f729fa
|
[] |
no_license
|
hong0396/hotwind_git
|
8fa11b3bc46aadd0b83b297cb6c6919102b7b920
|
544d984d8a8cdc42b422792a5064d19d24e0c831
|
refs/heads/master
| 2020-04-04T01:11:50.010424
| 2018-11-03T07:24:59
| 2018-11-03T07:24:59
| 136,184,346
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 863
|
py
|
import requests
headers={
'Accept': 'application/json, text/javascript, */*; q=0.01',
'Accept-Encoding': 'gzip, deflate, br',
'Accept-Language': 'zh-CN,zh;q=0.9',
'Connection': 'keep-alive',
'Cookie': 'UM_distinctid=165fa9285fb762-07c06f613d5cac-8383268-e1000-165fa9285fc20a; cipher_device_id=1537507232150902; tgw_l7_route=8d34ab350eb9a9772a5a0c377f34d47d',
'Host': 'finance.futunn.com',
'Origin': 'https://www.futunn.com',
'Referer': 'https://www.futunn.com/quote/stock-info?m=us&code=CYTXW&type=finance_analyse',
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/69.0.3497.100 Safari/537.36'
}
url='https://finance.futunn.com/api/finance/balance-sheet?code=CYTXW&label=us&quarter=0&page=0'
r = requests.get(url,headers=headers).json()
print(r.get("data").get("list"))
print(r.get("data").get("pages"))
|
[
"hong0396@126.com"
] |
hong0396@126.com
|
7db05f705d72bdf87180f6a7bff371d915d8b61e
|
299e5934971f9de638692e2667d6e270bcab5cbd
|
/214.最短回文串.py
|
fd576408b90eb365d8d4759abcade422cdf7f582
|
[] |
no_license
|
ycj123/Leetcode-Python3
|
14bcd6c9f4d26191d5d40c77e923df4d0be4c0e5
|
1593960cdf2655ef1dcf68e3517e7121670c6ac3
|
refs/heads/master
| 2022-12-16T23:12:19.326702
| 2020-09-18T00:17:45
| 2020-09-18T00:17:45
| 295,302,872
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 804
|
py
|
#
# @lc app=leetcode.cn id=214 lang=python3
#
# [214] 最短回文串
#
# https://leetcode-cn.com/problems/shortest-palindrome/description/
#
# algorithms
# Hard (36.30%)
# Likes: 262
# Dislikes: 0
# Total Accepted: 23.3K
# Total Submissions: 64.2K
# Testcase Example: '"aacecaaa"'
#
# 给定一个字符串 s,你可以通过在字符串前面添加字符将其转换为回文串。找到并返回可以用这种方式转换的最短回文串。
#
# 示例 1:
#
# 输入: "aacecaaa"
# 输出: "aaacecaaa"
#
#
# 示例 2:
#
# 输入: "abcd"
# 输出: "dcbabcd"
#
#
# @lc code=start
class Solution:
def shortestPalindrome(self, s: str) -> str:
r = s[::-1]
for i in range(len(s) + 1):
if s.startswith(r[i:]):
return r[:i] + s
# @lc code=end
|
[
"yangchijiang@icloud.com"
] |
yangchijiang@icloud.com
|
ec7acf98f9484508ac4aef0ff75457eae8bd99f0
|
f05acf7451fe58b64ec11744e8afddf142014efa
|
/crowdsorter/views/_session.py
|
dd75bd5c8d2a06dddfad4e0d7a5a23f7570d19a0
|
[
"MIT"
] |
permissive
|
iCodeIN/crowdsorter
|
899ac58b1df43ca134d3f966dcf2ec1c4a49e0df
|
1c847f1f0284fc810ec1f2dd501acb4dbfa16bbb
|
refs/heads/master
| 2023-03-26T16:09:04.914897
| 2020-11-11T18:25:55
| 2020-11-11T18:25:55
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,337
|
py
|
from flask import session
VERSION = 2 # increment when session logic changes to clear sessions
VOTED_NAMES = f"voted-names:{VERSION}:"
SKIPPED_NAMES = f"skipped-names:{VERSION}:"
VIEWED_PAIRS = f"viewed-pairs:{VERSION}:"
def get_voted_names(code):
return _get(VOTED_NAMES, code)
def set_voted_names(code, names):
_set(VOTED_NAMES, code, names)
def add_voted_name(code, name):
names = get_voted_names(code)
if name not in names:
names.append(name)
set_voted_names(code, names)
def get_skipped_names(code):
return _get(SKIPPED_NAMES, code)
def set_skipped_names(code, names):
_set(SKIPPED_NAMES, code, names)
def add_skipped_name(code, name):
names = get_skipped_names(code)
if name not in names:
names.append(name)
set_skipped_names(code, names)
def get_viewed_pairs(code):
return _get(VIEWED_PAIRS, code)
def set_viewed_pairs(code, pairs):
_set(VIEWED_PAIRS, code, pairs)
def add_viewed_pair(code, pair):
pairs = get_viewed_pairs(code)
if pair not in pairs:
pairs.append(pair)
set_viewed_pairs(code, pairs)
def _get(prefix, code):
key = prefix + code
value = session.get(key) or []
return value
def _set(prefix, code, value):
key = prefix + code
session[key] = value
session.permanent = True
|
[
"jacebrowning@gmail.com"
] |
jacebrowning@gmail.com
|
69aa022e185b5ec3bb7d2f6da610e01aedc92957
|
fba1ae1672a770846eb219e4e092ba4c162aec40
|
/2.py
|
01fbf8c10fb3569d0961749560f345f35e124f7d
|
[] |
no_license
|
peterbe/optimize-titles.json
|
9272ad39d7565c448dce2b22a3d844ef0e7524d6
|
ff7f8a01a5a742906ebb350c55cc963ca3b85e73
|
refs/heads/master
| 2022-11-23T05:33:05.004619
| 2020-07-15T18:18:48
| 2020-07-15T18:18:48
| 279,936,544
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 275
|
py
|
import json
import csv
with open('0.json') as f:
data = json.load(f)
import csv
with open('2.csv', 'w') as f:
writer = csv.writer(f, delimiter="|")
for each, value in data['titles'].items():
writer.writerow([each, value['title'], value['popularity']])
|
[
"mail@peterbe.com"
] |
mail@peterbe.com
|
986e4045b106ad579041853e9891735e06800efd
|
52b5773617a1b972a905de4d692540d26ff74926
|
/.history/rottenOranges_20200810191228.py
|
2e8869821ab7c96fc3b8d53eff3ef2e939c3ffb4
|
[] |
no_license
|
MaryanneNjeri/pythonModules
|
56f54bf098ae58ea069bf33f11ae94fa8eedcabc
|
f4e56b1e4dda2349267af634a46f6b9df6686020
|
refs/heads/master
| 2022-12-16T02:59:19.896129
| 2020-09-11T12:05:22
| 2020-09-11T12:05:22
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 492
|
py
|
def markrotten(i,j,row,column)
def oranges(grid):
# loop through the grid
# if there is no fresh orange just return 0
# if there is a two check all its four neighbours
# recursive call
# count when a one becomes a two
row = len(grid)
column = len(grid[0])
for i in range(len(grid)):
for j in range(len(i)):
if grid[i][j] == 2:
markrotten(i,j,row,column,grid)
oranges( [[2,1,1],[0,1,1],[1,0,1]])
|
[
"mary.jereh@gmail.com"
] |
mary.jereh@gmail.com
|
d20e606c613d78c5971e9e9c8e93448c465bcbe1
|
68aa9bf99d62a5b991dc5aaa3d794f4bcd6e355a
|
/Programiranje/gui/Capital_Cities.py
|
24cfdaf80e27e2162949498aef012db6a6261742
|
[] |
no_license
|
dujodujo/lemur
|
82c9e695459597ab1b3430e566bc375af84d563c
|
1e6350b33f86f89f89c5bddbd3924364f027160e
|
refs/heads/master
| 2021-01-01T16:49:35.386172
| 2013-11-06T09:59:12
| 2013-11-06T09:59:12
| 14,150,163
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,022
|
py
|
import sys, os, random
from PyQt4.QtCore import *
from PyQt4.QtGui import *
class Form(QDialog):
def __init__(self,parent = None):
super(Form, self).__init__(parent)
self.get_data()
self.answers = 0
self.count = 0
self.countryLabel = QLabel("Country:")
self.fromCountryLabel = QLabel()
self.fromCountryLabel.setText("Slovenija")
self.capitalLabel = QLabel("Capital:")
self.fromLineEdit = QLineEdit()
self.countLabel = QLabel()
self.resultLabel = QLabel()
grid = QGridLayout()
grid.addWidget(self.countryLabel,0,0)
grid.addWidget(self.fromCountryLabel,0,1)
grid.addWidget(self.capitalLabel,1,0)
grid.addWidget(self.fromLineEdit,1,1)
grid.addWidget(self.countLabel,2,0)
grid.addWidget(self.resultLabel,2,1)
self.setLayout(grid)
self.connect(self.fromLineEdit, SIGNAL("returnPressed()"), self.update_ui)
def select(self):
self.fromCountryLabel.setText(random.choice([x for x in self.capitals.keys()]))
def update_ui(self):
capitals = self.capitals
country = self.fromCountryLabel.text()
name = self.fromLineEdit.text()
if name == capitals[country]:
self.resultLabel.setText("Pravilno")
self.count +=1
else:
self.resultLabel.setText("Nepravilno, pravilni odgovor je " + capitals[country] )
self.answers +=1
self.countLabel.setText("{}/{}".format(self.count,self.answers))
self.fromLineEdit.clear()
self.select()
def get_data(self):
self.capitals = {}
if os.path.exists(os.getcwd() + "\\imena.txt"):
for line in open("imena.txt", "rt"):
line = line.strip()
data = line.split(", ")
country = data[0]
capital = data[1]
self.capitals[country] = capital
app = QApplication(sys.argv)
form = Form()
form.show()
app.exec_()
|
[
"avsic.ziga@gmail.com"
] |
avsic.ziga@gmail.com
|
3e7c227a882f2cd39cdaf02c0f17a021690effc5
|
40a04920dea94179878e25a0804ce4a6b459aca9
|
/Python/Django/Portfolio/apps/first_app/urls.py
|
8e117530ce911208aad1a83f1f376ca1c35e005b
|
[] |
no_license
|
Kryptonian92/pythonAssignments
|
5c7dd9140d07c94b19816ebbcaba579338479136
|
06355e0481307a77e5acd53b86b1fc144e98302a
|
refs/heads/master
| 2021-01-20T15:50:46.355224
| 2017-10-28T19:02:52
| 2017-10-28T19:02:52
| 90,771,012
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 202
|
py
|
from django.conf.urls import url
from . import views # This line is new!
urlpatterns = [
url(r'^$', views.index), # This line has changed!
url(r'^testimonials$', views.show)
]
|
[
"ausar_mcgruder@yahoo.com"
] |
ausar_mcgruder@yahoo.com
|
6c7376c8231168cb83ab28cd66f7376c7363aa20
|
0b842bcb3bf20e1ce628d39bf7e11abd7699baf9
|
/oscar/a/sys/platform/manager/blinky/__init__.py
|
856452a17cd28db74867fb490c14dee2212dcaf8
|
[] |
no_license
|
afeset/miner2-tools
|
75cc8cdee06222e0d81e39a34f621399e1ceadee
|
81bcc74fe7c0ca036ec483f634d7be0bab19a6d0
|
refs/heads/master
| 2016-09-05T12:50:58.228698
| 2013-08-27T21:09:56
| 2013-08-27T21:09:56
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 574
|
py
|
#
# Copyright Qwilt, 2012
#
# The code contained in this file may not be used by any other entities without explicit written permission from Qwilt.
#
# Author: shmulika
#
G_GROUP_NAME_PLATFORM_POWER_BLINKY_ADAPTOR = "power-blinky-adaptor"
G_GROUP_NAME_PLATFORM_FANS_BLINKY_ADAPTOR = "fans-blinky-adaptor"
G_GROUP_NAME_PLATFORM_TEMPERATURE_BLINKY_ADAPTOR = "temperature-blinky-adaptor"
G_GROUP_NAME_PLATFORM_MANAGER_BLINKY_ADAPTOR = "platform-manager-blinky-adaptor"
G_GROUP_NAME_PLATFORM_SOURCE_BLINKY_ADAPTOR = "source-blinky-adaptor"
|
[
"afeset@gmail.com"
] |
afeset@gmail.com
|
b2c1be6d03658e2b794333b2d98e59fda98d2e05
|
7d97daf9b9f46d68bbe29441d8db554918dfcdc4
|
/leetcode/StringtoInteger8.py
|
5a117868d64f0d0ad26bb4ae61baff99e7332feb
|
[] |
no_license
|
hundyoung/LeetCode
|
9a56c4f078dcb4e875a6178c14665b7784c1a0a2
|
803e164d3a21b593cb89206b3a362c1ab1eb9abf
|
refs/heads/master
| 2020-09-23T02:51:13.657444
| 2020-05-06T12:53:07
| 2020-05-06T12:53:07
| 225,383,854
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 664
|
py
|
class Solution:
def myAtoi(self, str: str) -> int:
str1 = str.strip()
result = ""
for i in range(len(str1)):
char = str1[i]
if(i==0and (char=="+" or char=="-")):
result= result+char
elif char.isdigit():
result = result + char
else:
break
# print(str1)
try:
result = int(result)
result = min(2**31-1,result)
result = max((-2)**31,result)
return result
except:
return 0
if __name__ == '__main__':
solution = Solution()
print(solution.myAtoi("-5-"))
|
[
"285080301@qq.com"
] |
285080301@qq.com
|
6b1515908b2fe16543fdcf82ee9325387b7d572b
|
9743d5fd24822f79c156ad112229e25adb9ed6f6
|
/xai/brain/wordbase/verbs/_plagued.py
|
33674872c908833ea1ef79864be519cf6ce0d184
|
[
"MIT"
] |
permissive
|
cash2one/xai
|
de7adad1758f50dd6786bf0111e71a903f039b64
|
e76f12c9f4dcf3ac1c7c08b0cc8844c0b0a104b6
|
refs/heads/master
| 2021-01-19T12:33:54.964379
| 2017-01-28T02:00:50
| 2017-01-28T02:00:50
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 238
|
py
|
from xai.brain.wordbase.verbs._plague import _PLAGUE
#calss header
class _PLAGUED(_PLAGUE, ):
def __init__(self,):
_PLAGUE.__init__(self)
self.name = "PLAGUED"
self.specie = 'verbs'
self.basic = "plague"
self.jsondata = {}
|
[
"xingwang1991@gmail.com"
] |
xingwang1991@gmail.com
|
2bd765f9129f0648c344eac691a54cae5729812b
|
cc0d06e2aad3d30152c4a3f3356befdc58748313
|
/13nov_til_19nov/17_novin1900.py
|
0e049e544ca995c89370175743c5e1de70beedec
|
[] |
no_license
|
lasse-steinnes/IN1900
|
db0bb4da33fa024d4fe9207337c0f1d956197c50
|
c8d97c2903078471f8e419f88cc8488d9b8fc7da
|
refs/heads/master
| 2020-12-14T15:34:36.429764
| 2020-01-18T19:59:46
| 2020-01-18T19:59:46
| 234,789,653
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 204
|
py
|
#### Forelesning in1900 ####
### Siste forelesning. Foilbasert.
## siste del av ODE-løsere og modellering av smittsomme sykdommer
## System av ODE´s
### Skal lage klassehierarki for ODE løsere.
###
|
[
"lasse.steinnes93@gmail.com"
] |
lasse.steinnes93@gmail.com
|
9a68a892ee7454b8952addae4614751aba7824f7
|
0789e92ff05448f511352982dbc9fcc8b481e806
|
/kikar_hamedina/reporting/management/commands/export_commentator_data_to_csv.py
|
728384914535440e35a1486adb779002cbeca29e
|
[] |
no_license
|
danielhers/kikar-hamedina
|
9645dfc554c004092cb44bb5189b63e9940b3801
|
a838a2fc675ea7100c620477bae438f215c741f7
|
refs/heads/dev
| 2020-06-14T14:08:05.069290
| 2017-05-04T17:22:03
| 2017-05-04T17:22:03
| 75,173,287
| 0
| 0
| null | 2016-11-30T09:47:01
| 2016-11-30T09:47:01
| null |
UTF-8
|
Python
| false
| false
| 6,338
|
py
|
#!encoding utf-8
from csv import DictWriter
from django.utils import timezone
from facebook_feeds.management.commands.kikar_base_commands import KikarBaseCommand
from facebook_feeds.models import Facebook_Feed, Facebook_Status
DELIMITER = '~'
class Command(KikarBaseCommand):
def add_arguments(self, parser):
parser.add_argument('--year',
action='store',
dest='year',
default=None,
help="choose year to filter on"
)
parser.add_argument('--feed',
action='store',
dest='feed',
default=None,
help="choose year to filter on"
)
parser.add_argument('--total',
action='store_true',
dest='total',
default=False,
help="Get statistics for total of activity, not separated by feed"
)
def build_commentator_data(self, statuses, year=None):
years = ['2014', '2015'] if not year else [year]
counter = dict()
counter['unique'] = {'likes_2014': set(), 'likes_2015': set(), 'comments_2014': set(),
'comments_2015': set()}
counter['full'] = {'likes_2014': long(), 'likes_2015': long(), 'comments_2014': long(),
'comments_2015': long()}
for year in years:
for status in statuses.filter(published__year=year).order_by('published'):
if not status.is_comment:
counter['unique']['likes_%s' % year] = counter['unique'][
'likes_%s' % year].union(
set(status.likes.values_list('user', flat=True)))
counter['unique']['comments_%s' % year] = counter['unique'][
'comments_%s' % year].union(
set(status.comments.values_list('comment_from_id', flat=True)))
counter['full']['likes_%s' % year] += status.likes.count()
counter['full']['comments_%s' % year] += status.comments.count()
print('\t%s' % status.published)
return counter
def handle(self, *args, **options):
print('Start.')
feed = options['feed']
feeds = Facebook_Feed.objects.filter(id=feed) if feed else Facebook_Feed.objects.all()
counter = dict()
if options['total']:
statuses = Facebook_Status.objects.all()
counter['total'] = self.build_commentator_data(statuses, year=options['year'])
else:
for feed in feeds.order_by('id'):
print(feed.id)
statuses = feed.facebook_status_set.filter(is_comment=False)
counter[feed.id] = self.build_commentator_data(statuses, year=options['year'])
file_name = 'commentator_data_{}.csv'.format(timezone.now().strftime('%Y_%m_%d_%H_%M_%S'))
with open(file_name, 'wb') as f:
field_names = [
'feed_id',
'link',
'mk_id',
'mk_name',
'mk_party',
'likes_2014_unique',
'likes_2015_unique',
'likes_2014_full',
'likes_2015_full',
'comments_2014_unique',
'comments_2015_unique',
'comments_2014_full',
'comments_2015_full'
]
csv_data = DictWriter(f, fieldnames=field_names, delimiter=DELIMITER)
headers = {field_name: field_name for field_name in field_names}
csv_data.writerow(headers)
if options['total']:
row = {'mk_id': 'total',
'mk_name': 'total',
'mk_party': None,
'feed_id': 'total',
'link': None,
'likes_2014_unique': len(counter['total']['unique']['likes_2014']),
'likes_2015_unique': len(counter['total']['unique']['likes_2015']),
'likes_2014_full': counter['total']['full']['likes_2014'],
'likes_2015_full': counter['total']['full']['likes_2015'],
'comments_2014_unique': len(counter['total']['unique']['comments_2014']),
'comments_2015_unique': len(counter['total']['unique']['comments_2015']),
'comments_2014_full': counter['total']['full']['comments_2014'],
'comments_2015_full': counter['total']['full']['comments_2015']
}
csv_data.writerow(row)
else:
for feed in feeds:
row = {'mk_id': feed.persona.object_id,
'mk_name': unicode(feed.persona.content_object.name).encode(
'utf-8') if feed.persona.content_object else feed.username,
'mk_party': unicode(feed.persona.content_object.current_party.name).encode(
'utf-8') if feed.persona.content_object else None,
'feed_id': feed.id,
'link': 'http://www.facebook.com/{}'.format(feed.vendor_id),
'likes_2014_unique': len(counter[feed.id]['unique']['likes_2014']),
'likes_2015_unique': len(counter[feed.id]['unique']['likes_2015']),
'likes_2014_full': counter[feed.id]['full']['likes_2014'],
'likes_2015_full': counter[feed.id]['full']['likes_2015'],
'comments_2014_unique': len(counter[feed.id]['unique']['comments_2014']),
'comments_2015_unique': len(counter[feed.id]['unique']['comments_2015']),
'comments_2014_full': counter[feed.id]['full']['comments_2014'],
'comments_2015_full': counter[feed.id]['full']['comments_2015']
}
csv_data.writerow(row)
print('Done.')
|
[
"yotammanor@gmail.com"
] |
yotammanor@gmail.com
|
98f50633d7a2f376fa62cba3433f8d1dd51588f3
|
ebe5167148cfff43d24b6c66e44634bb55513b72
|
/solutions/linkedlist/160.Intersection.of.Two.Linked.Lists.py
|
2cf42f3f9cf91b328616216b08a513d48d5ff246
|
[] |
no_license
|
ljia2/leetcode.py
|
c90ac38a25331d61d3ff77fd135b82372da3a09f
|
08c6d27498e35f636045fed05a6f94b760ab69ca
|
refs/heads/master
| 2020-03-25T03:37:13.318582
| 2019-07-18T23:14:41
| 2019-07-18T23:14:41
| 143,351,386
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,122
|
py
|
# Definition for singly-linked list.
# class ListNode(object):
# def __init__(self, x):
# self.val = x
# self.next = None
class Solution(object):
def getIntersectionNode(self, headA, headB):
"""
Write a program to find the node at which the intersection of two singly linked lists begins.
For example, the following two linked lists:
begin to intersect at node c1.
Example 1:
Input: intersectVal = 8, listA = [4,1,8,4,5], listB = [5,0,1,8,4,5], skipA = 2, skipB = 3
Output: Reference of the node with value = 8
Input Explanation: The intersected node's value is 8 (note that this must not be 0 if the two lists intersect). From the head of A, it reads as [4,1,8,4,5]. From the head of B, it reads as [5,0,1,8,4,5]. There are 2 nodes before the intersected node in A; There are 3 nodes before the intersected node in B.
Example 2:
Input: intersectVal = 2, listA = [0,9,1,2,4], listB = [3,2,4], skipA = 3, skipB = 1
Output: Reference of the node with value = 2
Input Explanation: The intersected node's value is 2 (note that this must not be 0 if the two lists intersect). From the head of A, it reads as [0,9,1,2,4]. From the head of B, it reads as [3,2,4]. There are 3 nodes before the intersected node in A; There are 1 node before the intersected node in B.
Example 3:
Input: intersectVal = 0, listA = [2,6,4], listB = [1,5], skipA = 3, skipB = 2
Output: null
Input Explanation: From the head of A, it reads as [2,6,4]. From the head of B, it reads as [1,5]. Since the two lists do not intersect, intersectVal must be 0, while skipA and skipB can be arbitrary values.
Explanation: The two lists do not intersect, so return null.
Notes:
If the two linked lists have no intersection at all, return null.
The linked lists must retain their original structure after the function returns.
You may assume there are no cycles anywhere in the entire linked structure.
Your code should preferably run in O(n) time and use only O(1) memory.
:type head1, head1: ListNode
:rtype: ListNode
"""
if not headA or not headB:
return None
lenA = self.get_length(headA)
lenB = self.get_length(headB)
# adjust to ensure A is longer than B;
# swap both headA/B and lenA/B
if lenA < lenB:
headA, headB = headB, headA
lenA, lenB = lenB, lenA
stepA = 0
runnerA = headA
while stepA + lenB < lenA and runnerA:
stepA += 1
runnerA = runnerA.next
runnerB = headB
while runnerA and runnerB and runnerA != runnerB:
runnerA = runnerA.next
runnerB = runnerB.next
if runnerA != runnerB:
return None
else:
return runnerA
def get_length(self, head):
length = 0
runner = head
while runner:
length += 1
runner = runner.next
return length
|
[
"ljia@conversantmedia.com"
] |
ljia@conversantmedia.com
|
c797fec39e87cec2724d05c13ea1be0f98111384
|
7f66c66eb82b480e8a23ecbfb8613aae02cb50f7
|
/tests/integration/parity/utils.py
|
572d4c4af3500566de67c6e37afa8c80f6465074
|
[
"MIT"
] |
permissive
|
y19818/web3.py
|
03ddedcfdbd4dde2c1a458b31f5e796509b3c7c6
|
32a85a287ab63220d1e0c06d77be74de595ff02f
|
refs/heads/master
| 2021-06-25T00:30:50.312173
| 2019-12-02T08:21:40
| 2019-12-02T08:21:40
| 225,276,093
| 0
| 0
|
MIT
| 2019-12-02T03:20:47
| 2019-12-02T03:20:47
| null |
UTF-8
|
Python
| false
| false
| 1,939
|
py
|
import signal
import socket
import subprocess
import time
from vns_utils import (
to_text,
)
import requests
def wait_for_socket(ipc_path, timeout=60):
start = time.time()
while time.time() < start + timeout:
try:
sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
sock.connect(ipc_path)
sock.settimeout(timeout)
except (FileNotFoundError, socket.error):
time.sleep(0.01)
else:
break
def wait_for_http(endpoint_uri, timeout=60):
start = time.time()
while time.time() < start + timeout:
try:
requests.get(endpoint_uri)
except requests.ConnectionError:
time.sleep(0.01)
else:
break
def get_process(command_list, terminates=False):
proc = subprocess.Popen(
command_list,
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
bufsize=1,
)
if terminates:
wait_for_popen(proc, 30)
try:
yield proc
finally:
kill_proc_gracefully(proc)
output, errors = proc.communicate()
print(
"Parity Process Exited:\n"
"stdout:{0}\n\n"
"stderr:{1}\n\n".format(
to_text(output),
to_text(errors),
)
)
def wait_for_popen(proc, timeout):
start = time.time()
while time.time() < start + timeout:
if proc.poll() is None:
time.sleep(0.01)
else:
break
def kill_proc_gracefully(proc):
if proc.poll() is None:
proc.send_signal(signal.SIGINT)
wait_for_popen(proc, 13)
if proc.poll() is None:
proc.terminate()
wait_for_popen(proc, 5)
if proc.poll() is None:
proc.kill()
wait_for_popen(proc, 2)
|
[
"y19818@gmail.com"
] |
y19818@gmail.com
|
8c4e25032a017464274c3783f28d6988a1017590
|
15f321878face2af9317363c5f6de1e5ddd9b749
|
/solutions_python/Problem_135/3673.py
|
56a2a64feb4db88351e2187df1ddbb45f569ef30
|
[] |
no_license
|
dr-dos-ok/Code_Jam_Webscraper
|
c06fd59870842664cd79c41eb460a09553e1c80a
|
26a35bf114a3aa30fc4c677ef069d95f41665cc0
|
refs/heads/master
| 2020-04-06T08:17:40.938460
| 2018-10-14T10:12:47
| 2018-10-14T10:12:47
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 583
|
py
|
import sys
def t_process():
n1 = int(sys.stdin.readline())
n1 -= 1
n1_matrix = [set(map(int, sys.stdin.readline().split())) for _ in range(4)]
n2 = int(sys.stdin.readline())
n2 -= 1
n2_matrix = [set(map(int, sys.stdin.readline().split())) for _ in range(4)]
sol = list(n1_matrix[n1].intersection(n2_matrix[n2]))
if len(sol) > 1:
return "Bad magician!"
if len(sol) == 0:
return "Volunteer cheated!"
if len(sol) == 1:
return int(sol[0])
def main():
t = int(sys.stdin.readline())
for k in range(1, t + 1):
print("Case #{0}: {1}".format(k, t_process()))
main()
|
[
"miliar1732@gmail.com"
] |
miliar1732@gmail.com
|
1152f9facac5c0cb34d89abe0989f056a54199fe
|
0ab3ab2cda94a700f015ff172ef37abc3402ed75
|
/drawfromfile.py
|
3150dd3f92c114e2f97a979d71243be2403f76c8
|
[] |
no_license
|
mikerr/laserPOV
|
719c85493f8a4dc05e92267695e9e0804aac0b64
|
215ee38db2c3a2ff6e92e1c4f5aa18615ec76839
|
refs/heads/master
| 2016-09-06T07:49:40.767385
| 2015-04-11T20:49:39
| 2015-04-11T20:49:39
| 33,660,512
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 879
|
py
|
#!/usr/bin/python
import subprocess,time
file = open('drawing','r')
x,y = [], []
for line in file:
row = line.split()
x.append(row[0])
y.append(row[1])
SPEED = 0.09
REPS = 10
XOFFSET = 160
YOFFSET = 110
for loop in range (REPS):
for i in range (len(x)):
xpos = int(x[i]) + XOFFSET
ypos = int(y[i]) + YOFFSET
command = "echo 7=" + str(xpos) + ">/dev/servoblaster"
subprocess.call (command, shell=True)
command = "echo 0=" + str(ypos) + ">/dev/servoblaster"
subprocess.call (command, shell=True)
time.sleep(SPEED)
for i in reversed (range (len(x))):
xpos = int(x[i]) + XOFFSET
ypos = int(y[i]) + YOFFSET
command = "echo 7=" + str(xpos) + ">/dev/servoblaster"
subprocess.call (command, shell=True)
command = "echo 0=" + str(ypos) + ">/dev/servoblaster"
subprocess.call (command, shell=True)
time.sleep(SPEED)
|
[
"pi@raspberrypi.(none)"
] |
pi@raspberrypi.(none)
|
23afbdc21f3c52e6711d6a97008f609df14f55bf
|
a2ad46d4995b2dbe182e645a15b7d5a7047d3b56
|
/2018.12.05.provetta/all-CMS-submissions-2018-12-05/2018-12-05.12:18:30.099314.VR437605.conta_multipli.py
|
e4d78f797d2d4c80a6e99aaa5ded613795628fb5
|
[] |
no_license
|
romeorizzi/temi_prog_public
|
ccf634a0291dd943b503f8dc57ed03de4d9b1a68
|
e9e3e98d4a9a3cddec45d514180b83fd5004fe7b
|
refs/heads/master
| 2020-04-09T04:27:35.687265
| 2019-03-25T14:04:04
| 2019-03-25T14:04:04
| 160,024,031
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 725
|
py
|
"""
* user: VR437605
* fname: ANNALISA
* lname: DETTORI
* task: conta_multipli
* score: 100.0
* date: 2018-12-05 12:18:30.099314
"""
#!/usr/bin/env python3
# Template per soluzione conta_multipli
from __future__ import print_function
import sys
if sys.version_info < (3, 0):
input = raw_input # in python2, l'equivalente di input è raw_input
# Devi modificare l'implementazione di questa funzione per fare
# quanto richiesto dal testo dell'esercizio
def conta_multipli(a, b, c):
p=0
for n in range (1,c+1):
if n%a==0 and n%b!=0 :
p+=1
return p
# Lettura input: non devi modificare il codice sotto questa riga
a, b, c = map(int, input().split())
print(conta_multipli(a, b, c))
|
[
"romeo.rizzi@univr.it"
] |
romeo.rizzi@univr.it
|
497b09aec342a86f55cb820435ec603f2aab872a
|
07fbdae51275b4bab2074524fc4c1ae58ac53d08
|
/List's/Lists Basics/Exercise/Solutions/10. Bread Factory.py
|
ef0ba277e0e12230313d1b24fb7eeaa1489595d6
|
[] |
no_license
|
rimisarK-blue/Python-Fundamental-knowledge
|
85c2afa4401f848c9919f672c7fa3d54a43e761f
|
a182fb1c7c3ce11f9e26ce0afefe5c2069d70e8d
|
refs/heads/main
| 2023-03-09T02:08:34.411768
| 2021-02-15T20:19:52
| 2021-02-15T20:19:52
| 326,009,707
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,155
|
py
|
events = input().split('|')
energy = 100
coins = 100
good_day = True
for com in events:
command, value = com.split('-')
value = int(value)
if command == 'rest':
if energy == 100:
print("You gained 0 energy.")
print(f"Current energy: 100.")
elif energy + value > 100:
print(f"You gained {value} energy.")
print(f"Current energy: 100.")
else:
energy += value
print(f"You gained {value} energy.")
print(f"Current energy: {energy}.")
elif command == 'order':
if energy >= 30:
energy -= 30
coins += value
print(f"You earned {value} coins.")
else:
energy += 50
print("You had to rest!")
else:
if coins - value > 0:
coins -= value
print(f"You bought {command}.")
else:
good_day = False
print(f"Closed! Cannot afford {command}.")
break
if good_day and coins > 0 and energy > 0:
print("Day completed!")
print(f"Coins: {coins}")
print(f"Energy: {energy}")
|
[
"rimisark92@gmail.com"
] |
rimisark92@gmail.com
|
68cf3e5e2413d7edeffddb03c066dfb7a3e78310
|
4e187a73d451f8c500955098e8f7d466b90d05de
|
/Flasky0.1.py
|
299910de2cdaa6ebb388c7732ee6b2261932d8dc
|
[] |
no_license
|
LinZiYU1996/Flask_Login
|
21c3592b6116ca49a17bab98eb4171ea4721b551
|
420d540cf18f4627054ecf589872611e6e6ff8b6
|
refs/heads/master
| 2021-01-02T08:48:38.669567
| 2017-08-02T03:14:37
| 2017-08-02T03:14:37
| 99,066,660
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 572
|
py
|
from flask import Flask,render_template,flash,url_for,redirect
from flask_bootstrap import Bootstrap
from flask_moment import Moment
from flask_wtf import FlaskForm
from flask_login import LoginManager,login_user,UserMixin,logout_user,login_required
from flask_sqlalchemy import SQLAlchemy
app = Flask(__name__)
app.config['SECRET_KEY']='kkk'
bootstrap = Bootstrap(app)
moment=Moment(app)
login_manger=LoginManager()
login_manger.session_protection='strong'
login_manger.login_view='login'
login_manger.init_app(app)
if __name__ == '__main__':
app.run()
|
[
"2669093302@qq.com"
] |
2669093302@qq.com
|
757bb5db334a4b6518bf2b293c9f9cc451d67ebf
|
5891051796778cfb44a255248ce38789bfef9e70
|
/P_base/python_pdf/kp.py
|
1849b1e321f912c79b6c02533938157eb9a214ea
|
[] |
no_license
|
Faithlmy/Python_base
|
cc546a5d86b123e102a69df1227cde9b6e567493
|
5a43557e6375dc9dbe5f6701d7c10e549873a5ab
|
refs/heads/master
| 2021-01-01T17:07:04.097978
| 2018-03-31T16:44:01
| 2018-03-31T16:44:01
| 98,000,621
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,258
|
py
|
#!/usr/bin/env python3
# encoding: utf-8
import sys
import importlib
importlib.reload(sys)
from pdfminer.pdfparser import PDFParser,PDFDocument
from pdfminer.pdfinterp import PDFResourceManager, PDFPageInterpreter
from pdfminer.converter import PDFPageAggregator
from pdfminer.layout import LTTextBoxHorizontal,LAParams
from pdfminer.pdfinterp import PDFTextExtractionNotAllowed
'''
解析pdf 文本,保存到txt文件中
'''
path = r'/home/faith/Desktop/phtoword.pdf'
def parse():
fp = open(path, 'rb') # 以二进制读模式打开
#用文件对象来创建一个pdf文档分析器
praser = PDFParser(fp)
# 创建一个PDF文档
doc = PDFDocument()
# 连接分析器 与文档对象
praser.set_document(doc)
doc.set_parser(praser)
# 提供初始化密码
# 如果没有密码 就创建一个空的字符串
doc.initialize()
# 检测文档是否提供txt转换,不提供就忽略
if not doc.is_extractable:
raise PDFTextExtractionNotAllowed
else:
# 创建PDf 资源管理器 来管理共享资源
rsrcmgr = PDFResourceManager()
# 创建一个PDF设备对象
laparams = LAParams()
device = PDFPageAggregator(rsrcmgr, laparams=laparams)
# 创建一个PDF解释器对象
interpreter = PDFPageInterpreter(rsrcmgr, device)
# 循环遍历列表,每次处理一个page的内容
for page in doc.get_pages(): # doc.get_pages() 获取page列表
interpreter.process_page(page)
# 接受该页面的LTPage对象
layout = device.get_result()
# print(layout)
# 这里layout是一个LTPage对象 里面存放着 这个page解析出的各种对象 一般包括LTTextBox, LTFigure, LTImage, LTTextBoxHorizontal 等等 想要获取文本就获得对象的text属性,
for x in layout:
print(x.get_text())
# if (isinstance(x, LTTextBoxHorizontal)):
# with open(r'/root/pdf/t_pdf/turn_pdf2.txt', 'a') as f:
# results = x.get_text().encode('utf-8')
# print(results)
# f.write(results + '\n')
if __name__ == '__main__':
parse()
|
[
"lmengyy@126.com"
] |
lmengyy@126.com
|
1bab715b0c564a7a2941200a68f23a04ab4bfd58
|
be2c022b270522fe24475b794d53a3fd973a5de1
|
/영동/05_11049_행렬 곱셈 순서.py
|
9a26a4594789aceefcc502611d23e25d9aedf66e
|
[] |
no_license
|
zeroistfilm/week04
|
ea4a358be0931fe28202b7ce543ed246536a1c50
|
fdb5985e2d899c8b1a60cb81d660937304fa5bcb
|
refs/heads/main
| 2023-02-09T09:35:27.795180
| 2021-01-07T02:29:28
| 2021-01-07T02:29:28
| 325,717,500
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 562
|
py
|
# https://www.acmicpc.net/problem/11049
import sys
#sys.stdin = open("input.txt", "r")
N = int(sys.stdin.readline())
M = [0 for i in range(N+1)]
for i in range(N):
a,b = map(int, sys.stdin.readline().split())
M[i]=a
M[i+1] = b
Matrix = [[0 for i in range(N)] for i in range(N)]
for i in range(1,N):
r = 0
c = i
for _ in range(N,i,-1):
tmp=[]
for k in range(r,c):
tmp.append(Matrix[r][k]+Matrix[k+1][c]+(M[r]*M[k+1]*M[c+1]))
Matrix[r][c]=min(tmp)
r += 1
c += 1
print(Matrix[0][-1])
|
[
"zeroistfilm@naver.com"
] |
zeroistfilm@naver.com
|
1f4ddfa1c8bc8ae0575ee67ac34d8226efa92e7e
|
e1efc8e0b0e4629dea61504fbc816c0527691bd9
|
/3.jvm/24-静态分派.py
|
4057e69948dec7c7341531bc1d10fa9e78285067
|
[] |
no_license
|
xiongmengmeng/xmind-technology
|
2bb67a0bf92cfd660cac01f8ab3a2454423ccba5
|
e2fdb6987ef805a65f0a4feb52d84383853f4b77
|
refs/heads/main
| 2023-07-31T07:10:29.868120
| 2021-09-11T08:18:17
| 2021-09-11T08:18:17
| 307,636,242
| 3
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,314
|
py
|
import os,sys
parentdir = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
sys.path.insert(0,parentdir)
import xmind
xmind_name="jvm"
w = xmind.load(os.path.dirname(os.path.abspath(__file__))+"\\"+xmind_name+".xmind")
s2=w.createSheet()
s2.setTitle("静态分派")
r2=s2.getRootTopic()
r2.setTitle("静态分派")
content={
'Java具备面向对象的3个基本特征':[
'继承',
'封装(get/set)',
{'多态':[
'继承,重写(Override),向上转型(Human h=new Man())三大必要条件',
'方法重载:同一个方法名,参数或者类型不同。(Overload)',
'方法重写:父类与子类有同样的方法名和参数,这叫方法覆盖。(Override)'
]}
],
'任务':[
'不等同于【方法执行】,该阶段唯一任务是确定【被调用方法版本】,不涉及方法内部具体运行过程'
],
'五条字节码指令':[
{'invokestatic':[
'调用静态方法'
]},
{'invokespecial':[
'调用实例构造器<init>()方法、私有方法和父类中的方法'
]},
{'invokevirtual':[
'调用所有的虚方法'
]},
{'invokeinterface':[
'调用接口方法,在运行时确定一个实现该接口的对象'
]},
{'invokedynamic':[
'运行时动态解析出调用点限定符所引用的方法,然后再执行该方法'
]}
],
'解析':[
{'定义':[
'静态过程',
'编译期间确定',
'把【符号引用】转变为【直接引用】,确定唯一的【方法调用版本】',
'如能被invokestatic和invokespecial指令调用的方法'
]},
{'分类':[
{'静态方法':[
'与类型直接关联,不能通过【重写】出现别的版本,适合类加载阶段进行解析'
]},
{'私有方法':[
'外部不可被访问,不能通过【继承】出现别的版本,适合类加载阶段进行解析'
]},
'实例构造器',
'父类方法',
{'被final修饰的方法(invokevirtual指令调用)':[
'【无法被覆盖】,没有其他版本的可能'
]}
]},
],
'静态分派':[
{'定义':[
'依赖【静态类型】决定【方法执行版本】',
'发生在【编译阶段】,不由虚拟机来执行的',
{'典型表现':[
'方法重载'
]}
]},
{'重载':[
'通过【参数的静态类型】而不是实际类型作为判定依据的',
'静态类型是在【编译期可知】',
'实际类型在运行期才可确认'
]},
{'重载时目标方法选择(字面量没有显示的静态类型时)':[
'1.char>int>long>float>double的顺序转型进行匹配',
'2.一次自动装箱,封装类型java.lang.Character',
'3.java.lang.Serializable,是java.lang.Character类实现的一个接口,自动装箱之后还是找不到装箱类,会找装箱类所实现的接口类型',
'4.Object,如果有多个父类,那将在继承关系中从下往上开始搜索',
'5.变长参数的重载优先级是最低的'
]}
],
}
#构建xmind
xmind.build(content,r2)
#保存xmind
xmind.save(w,os.path.dirname(os.path.abspath(__file__))+"\\"+xmind_name+".xmind")
|
[
"xiongmengmeng@qipeipu.com"
] |
xiongmengmeng@qipeipu.com
|
f8c70c1da41cfea53b6d1f02569fd71e0439f618
|
35e00d1996515ccf3151067ff28ff3357078f0b6
|
/samples/generated_samples/pubsub_v1_generated_schema_service_validate_message_async.py
|
add86c6fa8941035f2205bc30efda5abdc9894e2
|
[
"Apache-2.0"
] |
permissive
|
googleapis/python-pubsub
|
5bb18674307bd89236a61c0d7c5079f10e19467e
|
1b9724324c58d27bcee42020b751cda58d80fddb
|
refs/heads/main
| 2023-09-03T13:14:22.894233
| 2023-08-28T13:18:36
| 2023-08-28T13:18:36
| 226,992,581
| 321
| 195
|
Apache-2.0
| 2023-09-10T23:29:10
| 2019-12-10T00:09:52
|
Python
|
UTF-8
|
Python
| false
| false
| 1,876
|
py
|
# -*- coding: utf-8 -*-
# Copyright 2023 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Generated code. DO NOT EDIT!
#
# Snippet for ValidateMessage
# NOTE: This snippet has been automatically generated for illustrative purposes only.
# It may require modifications to work in your environment.
# To install the latest published package dependency, execute the following:
# python3 -m pip install google-cloud-pubsub
# [START pubsub_v1_generated_SchemaService_ValidateMessage_async]
# This snippet has been automatically generated and should be regarded as a
# code template only.
# It will require modifications to work:
# - It may require correct/in-range values for request initialization.
# - It may require specifying regional endpoints when creating the service
# client as shown in:
# https://googleapis.dev/python/google-api-core/latest/client_options.html
from google import pubsub_v1
async def sample_validate_message():
# Create a client
client = pubsub_v1.SchemaServiceAsyncClient()
# Initialize request argument(s)
request = pubsub_v1.ValidateMessageRequest(
name="name_value",
parent="parent_value",
)
# Make the request
response = await client.validate_message(request=request)
# Handle the response
print(response)
# [END pubsub_v1_generated_SchemaService_ValidateMessage_async]
|
[
"noreply@github.com"
] |
googleapis.noreply@github.com
|
8152f5de1e216e50d57f2ee029225b5144c4beb2
|
ed2be337ce4b8a3c772862fce99ec99416784a62
|
/play/models.py
|
c889f87afcafa52f0ca12af45ece8a4485629983
|
[
"MIT"
] |
permissive
|
fraferra/PlayPaloAltoServer
|
e5ecc7557a02b2b14750e929f656a121984a560f
|
a7128d363efd6059007df2c9da77f7bd033f7987
|
refs/heads/master
| 2020-05-20T05:30:19.020450
| 2014-07-08T02:34:14
| 2014-07-08T02:34:14
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,899
|
py
|
from django.db import models
from datetime import date
from django import forms
from django.contrib.auth.models import User,UserManager
from django.utils import timezone
from django.db.models.signals import post_save
from django.utils.translation import ugettext as _
from utils import *
from social_auth.models import UserSocialAuth
import constants
from django.core.exceptions import *
import charity.models
import shop.models
# Create your models here.
import requests
import datetime
#from social_auth.backends.pipeline.user import update_user_details
class Player(models.Model):
user=models.ForeignKey(User)
custom_auth = forms.BooleanField(initial=False)
token=models.CharField(max_length=100, null=True, default=None)
score=models.DecimalField(max_digits=4, decimal_places=0, null=True, default=20)
experience=models.DecimalField(max_digits=5, decimal_places=0, null=True, default=0)
level=models.DecimalField(max_digits=4, decimal_places=0, null=True, default=0)
picture_url=models.CharField(max_length=400, null=True, default='/static/img/avatar-1.png')
facebook_pic=models.BooleanField(default=True)
def __unicode__(self): # Python 3: def __str__(self):
return unicode(self.user) or u''
def create_user_profile(sender, instance, created, **kwargs):
if created:
Player.objects.create(user=instance)
post_save.connect(create_user_profile, sender=User)
class CouponHistory(models.Model):
title=models.CharField(max_length=100, null=True)
#coupon=models.ForeignKey(Coupon, related_name='coupon')
shop=models.CharField(max_length=100, null=True)
player=models.ForeignKey(Player)
#shop=models.ForeignKey(Shop, related_name='created')
class EventHistory(models.Model):
date=models.DateTimeField( null=True)
title=models.CharField(max_length=100, null=True)
#event_done=models.ForeignKey(Event, related_name='created')
organization=models.CharField(max_length=100, null=True)
player=models.ForeignKey(Player)
points=models.DecimalField(max_digits=4, decimal_places=0)
event_type=models.CharField(max_length=50,choices=constants.TYPE, default=None, null=True)
#organization=models.ForeignKey(Organization, related_name='organization')
class Idea(models.Model):
title=models.CharField(max_length=100, null=True)
author=models.CharField(max_length=100, null=True)
description=models.TextField(max_length=500, null=True)
points=models.DecimalField(max_digits=4, decimal_places=0)
experience=models.DecimalField(max_digits=5, decimal_places=0, null=True, default=0)
class Comment(models.Model):
comment=models.TextField(max_length=500, null=True)
commenter=models.ForeignKey(Player)
event=models.ForeignKey('charity.Event')
date=models.DateTimeField( null=True, default=datetime.datetime.now)
class Feed(models.Model):
player=models.ForeignKey(Player)
event=models.ForeignKey('charity.Event')
likes= models.DecimalField(max_digits=4, decimal_places=0, default=0)
date=models.DateTimeField( null=True, default=datetime.datetime.now)
class CommentFeed(models.Model):
comment=models.TextField(max_length=500, null=True)
commenter=models.ForeignKey(Player)
feed=models.ForeignKey(Feed)
date=models.DateTimeField( null=True, default=datetime.datetime.now)
class Badge(models.Model):
player=models.ForeignKey(Player)
title=models.CharField(max_length=100, null=True, default='Beginner!')
icon=models.CharField(max_length=50,choices=constants.ICON, default='fa-thumbs-o-up')
'''
def assign_badge(sender, instance, created, **kwargs):
if created:
badge=Badge.objects.create(player=instance.player)
type_event=['Animals', 'Food','Art', 'Shopping', 'Elders', 'Environment']
for tt in type_event:
post_save.connect(assign_badge, sender=EventHistory) '''
|
[
"fraferra@cisco.com"
] |
fraferra@cisco.com
|
fec6a3aa31a220c668b93a5b34d034e735fbae41
|
233087c1eb99e1d13f80de6f43d2cc3264aa9ca6
|
/polyaxon_cli/cli/version.py
|
e1a7f0433468d235fe651db2f75bb5fd16ca9f7f
|
[
"MIT"
] |
permissive
|
DXist/polyaxon-cli
|
e33cd3b3633df5b21b9eb3cc48d7a6affed8e4ec
|
0b01512548f9faea77fb60cb7c6bd327e0638b13
|
refs/heads/master
| 2020-07-08T07:02:43.248549
| 2019-08-15T16:00:05
| 2019-08-15T16:04:31
| 203,601,306
| 0
| 0
|
MIT
| 2019-08-21T14:27:56
| 2019-08-21T14:27:56
| null |
UTF-8
|
Python
| false
| false
| 5,988
|
py
|
# -*- coding: utf-8 -*-
from __future__ import absolute_import, division, print_function
import sys
import click
from polyaxon_deploy.operators.pip import PipOperator
from polyaxon_cli import pkg
from polyaxon_cli.client import PolyaxonClient
from polyaxon_cli.client.exceptions import (
AuthorizationError,
PolyaxonHTTPError,
PolyaxonShouldExitError
)
from polyaxon_cli.logger import clean_outputs, logger
from polyaxon_cli.managers.auth import AuthConfigManager
from polyaxon_cli.managers.cli import CliConfigManager
from polyaxon_cli.utils import indentation
from polyaxon_cli.utils.formatting import Printer, dict_tabulate
from polyaxon_client.exceptions import PolyaxonClientException
PROJECT_CLI_NAME = "polyaxon-cli"
def pip_upgrade(project_name=PROJECT_CLI_NAME):
PipOperator.execute(['install', '--upgrade', project_name], stream=True)
click.echo('polyaxon-cli upgraded.')
def session_expired():
AuthConfigManager.purge()
CliConfigManager.purge()
click.echo('Session has expired, please try again.')
sys.exit(1)
def get_version(package):
import pkg_resources
try:
return pkg_resources.get_distribution(package).version
except pkg_resources.DistributionNotFound:
logger.error('`%s` is not installed', package)
def get_current_version():
return pkg.VERSION
def get_server_version():
try:
return PolyaxonClient().version.get_cli_version()
except AuthorizationError:
session_expired()
sys.exit(1)
except (PolyaxonHTTPError, PolyaxonShouldExitError, PolyaxonClientException) as e:
Printer.print_error('Could not get cli version.')
Printer.print_error('Error message `{}`.'.format(e))
sys.exit(1)
def get_log_handler():
try:
return PolyaxonClient().version.get_log_handler()
except AuthorizationError:
session_expired()
sys.exit(1)
except (PolyaxonHTTPError, PolyaxonShouldExitError, PolyaxonClientException) as e:
Printer.print_error('Could not get cli version.')
Printer.print_error('Error message `{}`.'.format(e))
sys.exit(1)
def check_cli_version():
"""Check if the current cli version satisfies the server requirements"""
if not CliConfigManager.should_check():
return
from distutils.version import LooseVersion # pylint:disable=import-error
server_version = get_server_version()
current_version = get_current_version()
CliConfigManager.reset(current_version=current_version,
min_version=server_version.min_version)
if LooseVersion(current_version) < LooseVersion(server_version.min_version):
click.echo("""Your version of CLI ({}) is no longer compatible with server.""".format(
current_version))
if click.confirm("Do you want to upgrade to "
"version {} now?".format(server_version.latest_version)):
pip_upgrade()
sys.exit(0)
else:
indentation.puts("Your can manually run:")
with indentation.indent(4):
indentation.puts("pip install -U polyaxon-cli")
indentation.puts(
"to upgrade to the latest version `{}`".format(server_version.latest_version))
sys.exit(0)
elif LooseVersion(current_version) < LooseVersion(server_version.latest_version):
indentation.puts("New version of CLI ({}) is now available. To upgrade run:".format(
server_version.latest_version
))
with indentation.indent(4):
indentation.puts("pip install -U polyaxon-cli")
elif LooseVersion(current_version) > LooseVersion(server_version.latest_version):
indentation.puts("You version of CLI ({}) is ahead of the latest version "
"supported by Polyaxon Platform ({}) on your cluster, "
"and might be incompatible.".format(current_version,
server_version.latest_version))
@click.command()
@click.option('--cli', is_flag=True, default=False, help='Version of the Polyaxon cli.')
@click.option('--platform', is_flag=True, default=False, help='Version of the Polyaxon platform.')
@clean_outputs
def version(cli, platform):
"""Print the current version of the cli and platform."""
version_client = PolyaxonClient().version
cli = cli or not any([cli, platform])
if cli:
try:
server_version = version_client.get_cli_version()
except AuthorizationError:
session_expired()
sys.exit(1)
except (PolyaxonHTTPError, PolyaxonShouldExitError, PolyaxonClientException) as e:
Printer.print_error('Could not get cli version.')
Printer.print_error('Error message `{}`.'.format(e))
sys.exit(1)
cli_version = get_current_version()
Printer.print_header('Current cli version: {}.'.format(cli_version))
Printer.print_header('Supported cli versions:')
dict_tabulate(server_version.to_dict())
if platform:
try:
platform_version = version_client.get_platform_version()
except AuthorizationError:
session_expired()
sys.exit(1)
except (PolyaxonHTTPError, PolyaxonShouldExitError, PolyaxonClientException) as e:
Printer.print_error('Could not get platform version.')
Printer.print_error('Error message `{}`.'.format(e))
sys.exit(1)
chart_version = version_client.get_chart_version()
Printer.print_header('Current platform version: {}.'.format(chart_version.version))
Printer.print_header('Supported platform versions:')
dict_tabulate(platform_version.to_dict())
@click.command()
@clean_outputs
def upgrade():
"""Install/Upgrade polyaxon-cli."""
try:
pip_upgrade(PROJECT_CLI_NAME)
except Exception as e:
logger.error(e)
|
[
"mouradmourafiq@gmail.com"
] |
mouradmourafiq@gmail.com
|
1be13eebadb30837a50498bf56c567f3ae17a166
|
4f00c6a08db5755b294bd519b9377866f5ff6c19
|
/src/tests/google/appengine/api/xmpp/xmpp_service_stub.py
|
8071acee686aa1637f430255e904c727b3a3af37
|
[
"Apache-2.0",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
cooljeanius/cauliflowervest
|
02035a8455b1dde469ebfd0b202c02456820a679
|
a9bc209b610a927083bf16274d8451c6c45227bf
|
refs/heads/main
| 2022-12-24T15:28:30.616604
| 2020-09-25T23:55:15
| 2020-09-25T23:55:15
| 303,812,548
| 1
| 0
|
Apache-2.0
| 2023-09-04T16:48:46
| 2020-10-13T19:46:58
|
Python
|
UTF-8
|
Python
| false
| false
| 5,161
|
py
|
#!/usr/bin/env python
#
# Copyright 2007 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""Stub version of the XMPP API, writes messages to logs."""
import logging
import os
from google.appengine.api import apiproxy_stub
from google.appengine.api import app_identity
from google.appengine.api import xmpp
from google.appengine.api.xmpp import xmpp_service_pb
class XmppServiceStub(apiproxy_stub.APIProxyStub):
"""Python only xmpp service stub.
This stub does not use an XMPP network. It prints messages to the console
instead of sending any stanzas.
"""
def __init__(self, log=logging.info, service_name='xmpp'):
"""Initializer.
Args:
log: A logger, used for dependency injection.
service_name: Service name expected for all calls.
"""
super(XmppServiceStub, self).__init__(service_name)
self.log = log
def _Dynamic_GetPresence(self, request, response):
"""Implementation of XmppService::GetPresence.
Returns online if the first character of the JID comes before 'm' in the
alphabet, otherwise returns offline.
Args:
request: A PresenceRequest.
response: A PresenceResponse.
"""
jid = request.jid()
self._GetFrom(request.from_jid())
if jid[0] < 'm':
response.set_is_available(True)
else:
response.set_is_available(False)
def _Dynamic_SendMessage(self, request, response):
"""Implementation of XmppService::SendMessage.
Args:
request: An XmppMessageRequest.
response: An XmppMessageResponse .
"""
from_jid = self._GetFrom(request.from_jid())
self.log('Sending an XMPP Message:')
self.log(' From:')
self.log(' ' + from_jid)
self.log(' Body:')
self.log(' ' + request.body())
self.log(' Type:')
self.log(' ' + request.type())
self.log(' Raw Xml:')
self.log(' ' + str(request.raw_xml()))
self.log(' To JIDs:')
for jid in request.jid_list():
self.log(' ' + jid)
for jid in request.jid_list():
response.add_status(xmpp_service_pb.XmppMessageResponse.NO_ERROR)
def _Dynamic_SendInvite(self, request, response):
"""Implementation of XmppService::SendInvite.
Args:
request: An XmppInviteRequest.
response: An XmppInviteResponse .
"""
from_jid = self._GetFrom(request.from_jid())
self.log('Sending an XMPP Invite:')
self.log(' From:')
self.log(' ' + from_jid)
self.log(' To: ' + request.jid())
def _Dynamic_SendPresence(self, request, response):
"""Implementation of XmppService::SendPresence.
Args:
request: An XmppSendPresenceRequest.
response: An XmppSendPresenceResponse .
"""
from_jid = self._GetFrom(request.from_jid())
self.log('Sending an XMPP Presence:')
self.log(' From:')
self.log(' ' + from_jid)
self.log(' To: ' + request.jid())
if request.type():
self.log(' Type: ' + request.type())
if request.show():
self.log(' Show: ' + request.show())
if request.status():
self.log(' Status: ' + request.status())
def _GetFrom(self, requested):
"""Validates that the from JID is valid.
Args:
requested: The requested from JID.
Returns:
string, The from JID.
Raises:
xmpp.InvalidJidError if the requested JID is invalid.
"""
appid = app_identity.get_application_id()
if requested == None or requested == '':
return appid + '@appspot.com/bot'
node, domain, resource = ('', '', '')
at = requested.find('@')
if at == -1:
self.log('Invalid From JID: No \'@\' character found. JID: %s', requested)
raise xmpp.InvalidJidError()
node = requested[:at]
rest = requested[at+1:]
if rest.find('@') > -1:
self.log('Invalid From JID: Second \'@\' character found. JID: %s',
requested)
raise xmpp.InvalidJidError()
slash = rest.find('/')
if slash == -1:
domain = rest
resource = 'bot'
else:
domain = rest[:slash]
resource = rest[slash+1:]
if resource.find('/') > -1:
self.log('Invalid From JID: Second \'/\' character found. JID: %s',
requested)
raise xmpp.InvalidJidError()
if domain == 'appspot.com' and node == appid:
return node + '@' + domain + '/' + resource
elif domain == appid + '.appspotchat.com':
return node + '@' + domain + '/' + resource
self.log('Invalid From JID: Must be appid@appspot.com[/resource] or '
'node@appid.appspotchat.com[/resource]. JID: %s', requested)
raise xmpp.InvalidJidError()
|
[
"egall@gwmail.gwu.edu"
] |
egall@gwmail.gwu.edu
|
2f50c5d8fbaf7359990e0e5264f56327e41de7cc
|
0e2a58dce33bb412f19d019b98168d68af9bdeec
|
/model.py
|
9d1eb91c29159a77b3e6e6ab01503bc436ef9099
|
[] |
no_license
|
Naveenprabaharan/Salary_Prediction
|
0ea2810a177b7c0d3de8f4044970f35d51efa820
|
c36cf19545667c4e330cb08bb273c45afa74b06a
|
refs/heads/master
| 2023-08-23T16:16:10.834688
| 2021-10-23T15:39:28
| 2021-10-23T15:39:28
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 754
|
py
|
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
from sklearn.model_selection import train_test_split
from sklearn.linear_model import LinearRegression
import pickle
# MODEL TRAINING:
# dataset = pd.read_csv('__demo\Salary_Data.csv')
# X = dataset.iloc[:, :-1].values
# y = dataset.iloc[:, -1].values
# X_train, X_test, y_train, y_test = train_test_split(X, y, test_size = 1/3, random_state = 0)
# regressor = LinearRegression()
# regressor.fit(X_train, y_train)
# X_test = input("enter year:")
# y_pred = regressor.predict([[X_test]])
# print(y_pred)
# MODEL DEPLOYEMENT:
def salaryPrediction(hrs):
model = pickle.load(open('__demo/reg_model.p','rb'))
year = hrs
y_out = model.predict([[year]])
return y_out
|
[
"you@example.com"
] |
you@example.com
|
0547381d2254684900dcf79141d5b76f75c00912
|
65fce73a1e6a36718238cdef09a17493b19532a0
|
/8/swagger_client/__init__.py
|
d6ffda23006c1131e1217a75ca10767be0046ebb
|
[
"Apache-2.0"
] |
permissive
|
apitore/apitore-sdk-python
|
eb419589609efb86bd279cd1733c2a03cdc03680
|
c0814c5635ddd09e9a20fcb155b62122bee41d33
|
refs/heads/master
| 2020-03-21T10:06:34.557781
| 2018-06-23T21:26:27
| 2018-06-23T21:26:27
| 138,434,217
| 3
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,044
|
py
|
# coding: utf-8
# flake8: noqa
"""
Word2Vec APIs
Word2Vec.<BR />[Endpoint] https://api.apitore.com/api/8 # noqa: E501
OpenAPI spec version: 1.0.2
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
# import apis into sdk package
from swagger_client.api.word_2_vec_controller_api import Word2VecControllerApi
# import ApiClient
from swagger_client.api_client import ApiClient
from swagger_client.configuration import Configuration
# import models into sdk package
from swagger_client.models.analogy_response_entity import AnalogyResponseEntity
from swagger_client.models.distance_entity import DistanceEntity
from swagger_client.models.distance_response_entity import DistanceResponseEntity
from swagger_client.models.similarity_response_entity import SimilarityResponseEntity
from swagger_client.models.vector_distance_response_entity import VectorDistanceResponseEntity
from swagger_client.models.word_vector_response_entity import WordVectorResponseEntity
|
[
"keigoht@gmail.com"
] |
keigoht@gmail.com
|
d973653f84166354990b4df25cb162438aa56b9e
|
ed9b286cc1fba177abae3449540e95cde558b7e3
|
/tests/unit/test_logging.py
|
57a6cff2087deaf7e117e341b0311904534212d9
|
[
"Apache-2.0"
] |
permissive
|
AndrewNg/anchore
|
e706f0a0c47e298be3295d1aa6d167ec58788cd2
|
308e91881be65dd546dbfc79b9d3982b501252a8
|
refs/heads/master
| 2020-09-29T04:58:03.114023
| 2019-12-09T20:07:08
| 2019-12-09T20:07:08
| 226,957,427
| 0
| 0
|
Apache-2.0
| 2019-12-09T20:06:16
| 2019-12-09T20:06:15
| null |
UTF-8
|
Python
| false
| false
| 2,428
|
py
|
import logging
import unittest
import anchore.cli.logs
import anchore.cli.common
import anchore.util
class TestLogging (unittest.TestCase):
@staticmethod
def do_generic(some_logger, name=None):
assert isinstance(some_logger, logging.Logger)
some_logger.debug('debug message - ' + name)
some_logger.info('info message - ' + name)
some_logger.warn('warn message - ' + name)
some_logger.error('error message - ' + name)
try:
raise KeyError('Some key not found')
except KeyError:
some_logger.exception('Some exception caught - ' + name)
@staticmethod
def do_anchore_logging():
print '--ANCHORE LOGGER'
anchore_logger = logging.getLogger('anchore')
TestLogging.do_generic(anchore_logger, 'anchore')
@staticmethod
def do_non_anchore_logging():
print '--NON-ANCHORE LOGGER'
rand_logger = logging.getLogger('somepackage.somemodule')
TestLogging.do_generic(rand_logger, 'non-anchore')
@staticmethod
def reset_logging_config():
logging.root.setLevel('NOTSET')
for f in logging.root.filters:
logging.root.filters.remove(f)
for f in logging.root.handlers:
print 'Removing handler %s' % str(f)
logging.root.handlers.remove(f)
def test_quiet(self):
print '--STARTING TEST: quiet'
TestLogging.reset_logging_config()
anchore.cli.logs.init_output_formatters(output_verbosity='quiet')
TestLogging.do_anchore_logging()
TestLogging.do_non_anchore_logging()
def test_normal(self):
print '--STARTING TEST: normal'
TestLogging.reset_logging_config()
anchore.cli.logs.init_output_formatters(output_verbosity='normal')
TestLogging.do_anchore_logging()
TestLogging.do_non_anchore_logging()
def test_verbose(self):
print '--STARTING TEST: verbose'
TestLogging.reset_logging_config()
anchore.cli.logs.init_output_formatters(output_verbosity='verbose')
TestLogging.do_anchore_logging()
TestLogging.do_non_anchore_logging()
def test_debug(self):
print '--STARTING TEST: debug'
TestLogging.reset_logging_config()
anchore.cli.logs.init_output_formatters(output_verbosity='debug')
TestLogging.do_anchore_logging()
TestLogging.do_non_anchore_logging()
|
[
"nurmi@anchore.com"
] |
nurmi@anchore.com
|
630a17eceb74a3892bd59ab00b61f09ff63f75c5
|
949ebd7bc2ab1526b3d535def4c90c80fab907f0
|
/Decision_Tree_Classification/decision_tree_classification_f1score.py
|
543282a5320bd6834cdfb946ee193307187f8799
|
[] |
no_license
|
mbhushan/ml
|
1c5c0d79f56dbc374f5163a032900da14ca5bc58
|
89441760c489bb265339bcdcbe975888686fc8a5
|
refs/heads/master
| 2021-05-15T05:31:47.801454
| 2018-05-12T17:34:23
| 2018-05-12T17:34:23
| 116,192,180
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,358
|
py
|
# Decision Tree Classification
# Importing the libraries
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
# Importing the dataset
dataset = pd.read_csv('Social_Network_Ads.csv')
X = dataset.iloc[:, [2, 3]].values
y = dataset.iloc[:, 4].values
# Splitting the dataset into the Training set and Test set
from sklearn.cross_validation import train_test_split
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size = 0.20, random_state = 0)
# Feature Scaling
from sklearn.preprocessing import StandardScaler
sc = StandardScaler()
X_train = sc.fit_transform(X_train)
X_test = sc.transform(X_test)
# Fitting Decision Tree Classification to the Training set
from sklearn.tree import DecisionTreeClassifier
classifier = DecisionTreeClassifier(criterion = 'entropy', min_samples_split=25,
random_state = 1, max_depth=3,
min_samples_leaf=5, splitter='best')
classifier.fit(X_train, y_train)
# Predicting the Test set results
y_pred = classifier.predict(X_test)
# Making the Confusion Matrix
from sklearn.metrics import confusion_matrix
from sklearn.metrics import precision_recall_fscore_support
from sklearn.metrics import f1_score
cm = confusion_matrix(y_test, y_pred)
tn, fp, fn, tp = cm.ravel()
print ('TN: %s, FP: %s, FN: %s, TP: %s' %(tn, fp, fn, tp))
precision, recall, fscore, support = precision_recall_fscore_support(y_test, y_pred, average='binary')
print ('fscore: %s' % (fscore))
print ('precision: %s' % (precision))
print ('recall: %s' % (recall))
# f1_score = f1_score(y_test, y_pred, average='binary')
# print ('F1 Score: ', str(f1_score))
# Visualising the Training set results
from matplotlib.colors import ListedColormap
X_set, y_set = X_train, y_train
X1, X2 = np.meshgrid(np.arange(start = X_set[:, 0].min() - 1, stop = X_set[:, 0].max() + 1, step = 0.01),
np.arange(start = X_set[:, 1].min() - 1, stop = X_set[:, 1].max() + 1, step = 0.01))
plt.contourf(X1, X2, classifier.predict(np.array([X1.ravel(), X2.ravel()]).T).reshape(X1.shape),
alpha = 0.75, cmap = ListedColormap(('red', 'green')))
plt.xlim(X1.min(), X1.max())
plt.ylim(X2.min(), X2.max())
for i, j in enumerate(np.unique(y_set)):
plt.scatter(X_set[y_set == j, 0], X_set[y_set == j, 1],
c = ListedColormap(('red', 'green'))(i), label = j)
plt.title('Decision Tree Classification (Training set)')
plt.xlabel('Age')
plt.ylabel('Estimated Salary')
plt.legend()
plt.show()
# Visualising the Test set results
from matplotlib.colors import ListedColormap
X_set, y_set = X_test, y_test
X1, X2 = np.meshgrid(np.arange(start = X_set[:, 0].min() - 1, stop = X_set[:, 0].max() + 1, step = 0.01),
np.arange(start = X_set[:, 1].min() - 1, stop = X_set[:, 1].max() + 1, step = 0.01))
plt.contourf(X1, X2, classifier.predict(np.array([X1.ravel(), X2.ravel()]).T).reshape(X1.shape),
alpha = 0.75, cmap = ListedColormap(('red', 'green')))
plt.xlim(X1.min(), X1.max())
plt.ylim(X2.min(), X2.max())
for i, j in enumerate(np.unique(y_set)):
plt.scatter(X_set[y_set == j, 0], X_set[y_set == j, 1],
c = ListedColormap(('red', 'green'))(i), label = j)
plt.title('Decision Tree Classification (Test set)')
plt.xlabel('Age')
plt.ylabel('Estimated Salary')
plt.legend()
plt.show()
|
[
"manibhushan.cs@gmail.com"
] |
manibhushan.cs@gmail.com
|
7cae145eeb1765e1dc1249a7c25c4f9b5a5a80c0
|
2612f336d667a087823234daf946f09b40d8ca3d
|
/python/lib/Lib/site-packages/django/utils/formats.py
|
c23a37cb2b51c0f4fca23725608c0e1326cc71ee
|
[
"Apache-2.0"
] |
permissive
|
tnorbye/intellij-community
|
df7f181861fc5c551c02c73df3b00b70ab2dd589
|
f01cf262fc196bf4dbb99e20cd937dee3705a7b6
|
refs/heads/master
| 2021-04-06T06:57:57.974599
| 2018-03-13T17:37:00
| 2018-03-13T17:37:00
| 125,079,130
| 2
| 0
|
Apache-2.0
| 2018-03-13T16:09:41
| 2018-03-13T16:09:41
| null |
UTF-8
|
Python
| false
| false
| 6,513
|
py
|
import decimal
import datetime
from django.conf import settings
from django.utils.translation import get_language, to_locale, check_for_language
from django.utils.importlib import import_module
from django.utils.encoding import smart_str
from django.utils import dateformat, numberformat, datetime_safe
from django.utils.safestring import mark_safe
# format_cache is a mapping from (format_type, lang) to the format string.
# By using the cache, it is possible to avoid running get_format_modules
# repeatedly.
_format_cache = {}
_format_modules_cache = {}
def iter_format_modules(lang):
"""
Does the heavy lifting of finding format modules.
"""
if check_for_language(lang) or settings.USE_L10N:
format_locations = ['django.conf.locale.%s']
if settings.FORMAT_MODULE_PATH:
format_locations.append(settings.FORMAT_MODULE_PATH + '.%s')
format_locations.reverse()
locale = to_locale(lang)
locales = set((locale, locale.split('_')[0]))
for location in format_locations:
for loc in locales:
try:
yield import_module('.formats', location % loc)
except ImportError:
pass
def get_format_modules(reverse=False):
"""
Returns an iterator over the format modules found
"""
lang = get_language()
modules = _format_modules_cache.setdefault(lang, list(iter_format_modules(lang)))
if reverse:
modules.reverse()
return modules
def get_format(format_type, lang=None, use_l10n=None):
"""
For a specific format type, returns the format for the current
language (locale), defaults to the format in the settings.
format_type is the name of the format, e.g. 'DATE_FORMAT'
If use_l10n is provided and is not None, that will force the value to
be localized (or not), overriding the value of settings.USE_L10N.
"""
format_type = smart_str(format_type)
if use_l10n or (use_l10n is None and settings.USE_L10N):
if lang is None:
lang = get_language()
cache_key = (format_type, lang)
try:
return _format_cache[cache_key] or getattr(settings, format_type)
except KeyError:
for module in get_format_modules():
try:
val = getattr(module, format_type)
_format_cache[cache_key] = val
return val
except AttributeError:
pass
_format_cache[cache_key] = None
return getattr(settings, format_type)
def date_format(value, format=None, use_l10n=None):
"""
Formats a datetime.date or datetime.datetime object using a
localizable format
If use_l10n is provided and is not None, that will force the value to
be localized (or not), overriding the value of settings.USE_L10N.
"""
return dateformat.format(value, get_format(format or 'DATE_FORMAT', use_l10n=use_l10n))
def time_format(value, format=None, use_l10n=None):
"""
Formats a datetime.time object using a localizable format
If use_l10n is provided and is not None, that will force the value to
be localized (or not), overriding the value of settings.USE_L10N.
"""
return dateformat.time_format(value, get_format(format or 'TIME_FORMAT', use_l10n=use_l10n))
def number_format(value, decimal_pos=None, use_l10n=None):
"""
Formats a numeric value using localization settings
If use_l10n is provided and is not None, that will force the value to
be localized (or not), overriding the value of settings.USE_L10N.
"""
if use_l10n or (use_l10n is None and settings.USE_L10N):
lang = get_language()
else:
lang = None
return numberformat.format(
value,
get_format('DECIMAL_SEPARATOR', lang, use_l10n=use_l10n),
decimal_pos,
get_format('NUMBER_GROUPING', lang, use_l10n=use_l10n),
get_format('THOUSAND_SEPARATOR', lang, use_l10n=use_l10n),
)
def localize(value, use_l10n=None):
"""
Checks if value is a localizable type (date, number...) and returns it
formatted as a string using current locale format.
If use_l10n is provided and is not None, that will force the value to
be localized (or not), overriding the value of settings.USE_L10N.
"""
if isinstance(value, bool):
return mark_safe(unicode(value))
elif isinstance(value, (decimal.Decimal, float, int, long)):
return number_format(value, use_l10n=use_l10n)
elif isinstance(value, datetime.datetime):
return date_format(value, 'DATETIME_FORMAT', use_l10n=use_l10n)
elif isinstance(value, datetime.date):
return date_format(value, use_l10n=use_l10n)
elif isinstance(value, datetime.time):
return time_format(value, 'TIME_FORMAT', use_l10n=use_l10n)
else:
return value
def localize_input(value, default=None):
"""
Checks if an input value is a localizable type and returns it
formatted with the appropriate formatting string of the current locale.
"""
if isinstance(value, (decimal.Decimal, float, int, long)):
return number_format(value)
elif isinstance(value, datetime.datetime):
value = datetime_safe.new_datetime(value)
format = smart_str(default or get_format('DATETIME_INPUT_FORMATS')[0])
return value.strftime(format)
elif isinstance(value, datetime.date):
value = datetime_safe.new_date(value)
format = smart_str(default or get_format('DATE_INPUT_FORMATS')[0])
return value.strftime(format)
elif isinstance(value, datetime.time):
format = smart_str(default or get_format('TIME_INPUT_FORMATS')[0])
return value.strftime(format)
return value
def sanitize_separators(value):
"""
Sanitizes a value according to the current decimal and
thousand separator setting. Used with form field input.
"""
if settings.USE_L10N:
decimal_separator = get_format('DECIMAL_SEPARATOR')
if isinstance(value, basestring):
parts = []
if decimal_separator in value:
value, decimals = value.split(decimal_separator, 1)
parts.append(decimals)
if settings.USE_THOUSAND_SEPARATOR:
parts.append(value.replace(get_format('THOUSAND_SEPARATOR'), ''))
else:
parts.append(value)
value = '.'.join(reversed(parts))
return value
|
[
"dmitry.trofimov@jetbrains.com"
] |
dmitry.trofimov@jetbrains.com
|
112d2d8320692eba6ef70e6342254ab8abb37bd3
|
875921eb2b486923cfef0c2af249e8f456bdf0c9
|
/config.py
|
71df786a370703f818458fa7a87ac6837cb8727c
|
[
"Apache-2.0"
] |
permissive
|
baozixifan/ASRFrame
|
c0d3d477409b0e262fbf760860c6c7b6ddd59caf
|
307596dc729f7611b270b9f6d279fefa05ef488d
|
refs/heads/master
| 2020-06-23T07:06:07.973172
| 2019-07-18T16:26:07
| 2019-07-18T16:26:07
| 198,550,805
| 1
| 0
| null | 2019-07-24T03:23:48
| 2019-07-24T03:23:47
| null |
UTF-8
|
Python
| false
| false
| 1,964
|
py
|
import platform
import os
project_path = os.path.split(os.path.realpath(__file__))[0] #
thu_datapath = None # 目录下应该有data/ dev/ 等目录
z200_datapath = None # 目录下应该有一大堆G../格式的目录
aishell_datapath = None # 目录下应有wav/和transcript/两个目录
prime_datapath = None # 目录下应有一个json文件和一个目录
stcmd_datapath = None # 目录下应该直接是音频文件
wiki_datapath = None
if platform.system() == "Linux":
thu_datapath = "/data/voicerec/thchs30/data_thchs30"
z200_datapath = "/data/voicerec/z200"
aishell_datapath = "/data/voicerec/ALShell-1/data_aishell"
prime_datapath = "/data/voicerec/Primewords Chinese Corpus Set 1/primewords_md_2018_set1"
stcmd_datapath = "/data/voicerec/Free ST Chinese Mandarin Corpus/ST-CMDS-20170001_1-OS"
wiki_datapath = "/data/voicerec/wiki/wiki_corpus_2"
elif platform.system() == "Windows":
thu_datapath = r"C:\E\jupyter_notebook\voice_reco\Dataset\thchs30"
z200_datapath = r"C:\E\jupyter_notebook\voice_reco\Dataset\z200"
aishell_datapath = r"C:\E\jupyter_notebook\voice_reco\Dataset\data_aishell"
prime_datapath = r"C:\E\jupyter_notebook\voice_reco\Dataset\primewords_md_2018_set1"
stcmd_datapath = r"C:\E\jupyter_notebook\voice_reco\Dataset\ST-CMDS-20170001_1-OS"
model_dir = os.path.join(project_path,"model") # ./model
dict_dir = os.path.join(project_path,"util","dicts") #./util/dicts
acoustic_model_dir = os.path.join(model_dir, "acoustic") # ./acoustic
language_model_dir = os.path.join(model_dir, "language") # ./language
loss_dir = "./loss_plot/"
acoustic_loss_dir = os.path.join(loss_dir,"acoustic") # ./loss_plot/acoustic
language_loss_dir = os.path.join(loss_dir,"language") # ./loss_plot/language
join_model_path = lambda x:os.path.join(model_dir, x)
chs_dict_path = os.path.join(dict_dir,"pure_chs.txt") # ./util/dicts/...
py_dict_path = os.path.join(dict_dir,"pure_py.txt") # ./util/dicts/...
|
[
"sailist@outlook.com"
] |
sailist@outlook.com
|
977922ac36268edcaa041e79fd97eed215a5b6ac
|
179577ecdd7fda84ad970b3aad573a575fef56bc
|
/exercicios/ex034.py
|
cc2175d2d31399159743980d7251f1a8965d04fb
|
[] |
no_license
|
Elvis-Lopes/Curso-em-video-Python
|
6c12fa17a5c38c722a7c8e9677f6d9596bc5653c
|
65f093975af9bd59c8aaa37606ba648b7ba1e1c4
|
refs/heads/master
| 2021-02-11T12:15:13.580496
| 2020-05-05T21:55:06
| 2020-05-05T21:55:06
| 244,490,886
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 218
|
py
|
sal = float(input('Insira o salário: '))
aumento = float()
if sal > 1250:
aumento = (sal*15)/100
sal = sal + aumento
else:
aumento = (sal*10)/100
sal = sal + aumento
print(f'Novo salario R${sal:.2f}')
|
[
"elvislopes1996@hotmail.com"
] |
elvislopes1996@hotmail.com
|
3c53e42d5a2371b1683e62b91621f013f2474ebd
|
7e50b94379132a4156fd693bc73d640ff6752ed9
|
/tests/conftest.py
|
6981e1f250018bce62a66937c9462a5ed171ebab
|
[
"MIT"
] |
permissive
|
Pylons/plaster_pastedeploy
|
145ac4c5310babf78ea7a0f7ad0639cc1b3f8a33
|
c0a146cdfac61781057ecaaa1b7938ef53dae9af
|
refs/heads/main
| 2023-06-12T04:08:37.382145
| 2023-01-03T02:44:28
| 2023-01-03T02:44:28
| 60,292,293
| 7
| 8
|
MIT
| 2023-09-09T04:19:56
| 2016-06-02T19:40:32
|
Python
|
UTF-8
|
Python
| false
| false
| 515
|
py
|
import os.path
import sys
import pkg_resources
import pytest
@pytest.fixture(scope="session")
def fake_packages():
# we'd like to keep this scope more focused but it's proven really
# difficult to fully monkeypatch pkg_resources and so for now we just
# install the packages for the duration of the test suite
test_dir = os.path.dirname(__file__)
info_dir = os.path.join(test_dir, "fake_packages", "FakeApp")
sys.path.insert(0, info_dir)
pkg_resources.working_set.add_entry(info_dir)
|
[
"michael@merickel.org"
] |
michael@merickel.org
|
a73f8302a9249594d2ed5b77f6688c6768dc5b63
|
6a2b0db7d6c4ecef8434f3b35fcaef71eeb0d896
|
/VENV/py3_venv/lib/python3.6/site-packages/pyntc/templates/__init__.py
|
f9a12282a24b39159158a59ac474ea95c08b289c
|
[] |
no_license
|
pseudonode/nornircourse
|
9bf890ecfadd1a08691f113e0cd2acadd4b9bffa
|
1ad0372f9673de784233937cc15779bc2391e267
|
refs/heads/master
| 2022-11-09T20:18:22.714703
| 2019-10-04T08:06:42
| 2019-10-04T08:06:42
| 211,856,983
| 3
| 2
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 926
|
py
|
import os
import textfsm
TEMPLATE_PATH_ENV_VAR = "NTC_TEMPLATES"
def get_structured_data(template_name, rawtxt):
"""Returns structured data given raw text using
TextFSM templates
"""
template_file = get_template(template_name)
with open(template_file) as template:
fsm = textfsm.TextFSM(template)
table = fsm.ParseText(rawtxt)
structured_data = []
for row in table:
temp_dict = {}
for index, element in enumerate(row):
temp_dict[fsm.header[index].lower()] = element
structured_data.append(temp_dict)
return structured_data
def get_template(template_name):
template_dir = get_template_dir()
return os.path.join(template_dir, template_name)
def get_template_dir():
try:
return os.environ[TEMPLATE_PATH_ENV_VAR]
except KeyError:
return os.path.realpath(os.path.dirname(__file__))
|
[
"andre@recursivenet.com"
] |
andre@recursivenet.com
|
3eb6d193517b5ddaa0e343c16513ad7fff94180c
|
216ee8ab7ca468638aa2dc6ccb7f89ea76dd0b35
|
/Project/Solutions/b_print_to_csv/scraper.py
|
53d02a6e14c5fe2a789323404aefe9f094fd9c4d
|
[] |
no_license
|
satishkbe/python-level-2
|
7b44d0f676bc830f0a94f823aeb6e0f628215628
|
834411f74d54019b9675a87004fd39072dc5fba0
|
refs/heads/master
| 2023-03-13T20:32:17.993938
| 2021-03-16T00:19:07
| 2021-03-16T00:19:07
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,099
|
py
|
import csv
import requests
from bs4 import BeautifulSoup
URL = "https://en.wikipedia.org/wiki/Member_states_of_the_United_Nations"
# Todo: Update with your info
name = None
email = None
assert name and email
headers = {'User-Agent': f'{name} ({email})'}
response = requests.get(URL, headers=headers)
assert response.status_code == 200, f'Response got {response.status_code}'
html_doc = response.text
soup = BeautifulSoup(html_doc, 'html.parser')
table = soup.find('table', class_='wikitable')
countries = []
for row in table.find_all('tr'):
name_column = row.find('td')
if name_column:
country_dict = {}
name_link = name_column.find_all('a')[1]
name = name_link.string
country_dict['Name'] = name
date_column = row.find_all('td')[1]
date_joined = date_column.span.text
country_dict['Date Joined'] = date_joined
countries.append(country_dict)
with open('data/countries.csv', 'w') as file:
writer = csv.DictWriter(file, fieldnames=('Name', 'Date Joined'))
writer.writeheader()
writer.writerows(countries)
|
[
"ariannedee@gmail.com"
] |
ariannedee@gmail.com
|
1571db30fcb1897a7c23bbc6da84249efffefe12
|
13cf11440998376d3b52a49f1e4fb8936c360ac4
|
/chainer_chemistry/saliency/visualizer/table_visualizer.py
|
4e27c19c5a807d63f9d5844832ecaecdfb772adc
|
[
"MIT"
] |
permissive
|
k-ishiguro/chainer-chemistry
|
87e3db724de0e99042d9585cd4bd5fff38169339
|
aec33496def16e76bdfbefa508ba01ab9f79a592
|
refs/heads/master
| 2021-07-06T22:58:20.127907
| 2019-02-04T02:51:34
| 2019-02-04T02:51:34
| 169,345,375
| 1
| 1
|
MIT
| 2020-07-30T06:04:13
| 2019-02-06T02:27:39
|
Python
|
UTF-8
|
Python
| false
| false
| 3,289
|
py
|
import numpy
import matplotlib.pyplot as plt
from chainer_chemistry.saliency.visualizer.base_visualizer import BaseVisualizer # NOQA
from chainer_chemistry.saliency.visualizer.visualizer_utils import abs_max_scaler # NOQA
class TableVisualizer(BaseVisualizer):
"""Saliency visualizer for table data"""
def visualize(self, saliency, feature_names=None, save_filepath=None,
num_visualize=-1, scaler=abs_max_scaler,
sort='descending', title='Feature Importance', color='b',
xlabel='Importance', bbox_inches='tight'):
"""Visualize or save `saliency` in bar plot.
Args:
saliency (numpy.ndarray): 1-dim saliency array (num_feature,)
feature_names (list or numpy.ndarray): Feature names of `saliency`
save_filepath (str or None): If specified, file is saved to path.
num_visualize (int): If positive value is set, only plot specified
number of features.
scaler (callable): function which takes `x` as input and outputs
scaled `x`, for plotting.
sort (str): Below sort options are supported.
none: not sort
ascending: plot in ascending order
descending: plot in descending order
title (str or None): title of plot
color (str): color of bar in plot
xlabel (str): x label legend
bbox_inches (str or Bbox or None): used for `plt.savefig` option.
"""
# --- type check ---
if saliency.ndim != 1:
raise ValueError("[ERROR] Unexpected value saliency.shape={}"
.format(saliency.shape))
num_total_feat = saliency.shape[0]
if feature_names is not None:
# type check
if len(feature_names) != num_total_feat:
raise ValueError(
"feature_names={} must have same length with `saliency`"
.format(feature_names))
else:
feature_names = numpy.arange(num_total_feat)
if sort == 'none':
indices = numpy.arange(num_total_feat)
elif sort == 'ascending':
indices = numpy.argsort(saliency)[::-1]
elif sort == 'descending':
indices = numpy.argsort(saliency)
else:
raise ValueError("[ERROR] Unexpected value sort={}".format(sort))
saliency = saliency[indices]
feature_names = numpy.asarray(feature_names)[indices]
if scaler is not None:
# Normalize to [-1, 1] or [0, 1]
saliency = scaler(saliency)
if num_visualize > 0:
saliency = saliency[:num_visualize]
if feature_names is not None:
feature_names = feature_names[:num_visualize]
else:
num_visualize = num_total_feat
plt.figure()
plt.clf()
if title is not None:
plt.title(title)
plt.barh(range(num_visualize), saliency, color=color, align='center')
plt.yticks(range(num_visualize), feature_names)
plt.xlabel(xlabel)
if save_filepath:
plt.savefig(save_filepath, bbox_inches=bbox_inches)
else:
plt.show()
|
[
"acc1ssnn9terias@gmail.com"
] |
acc1ssnn9terias@gmail.com
|
af23bfe2581b749cad1c35dc75d23d8ece968b41
|
e756bfb5290cd336d20f0cf0cde04eec2a35caae
|
/src/actions/custom/utils/pick_card.py
|
9847ba74cd50d6deef5415fb9cb4dde04b52eee6
|
[
"MIT"
] |
permissive
|
StetHD/Lonabot
|
ff1b9113f1e8d6618a271a17752e86679e0c6274
|
615ce2c176607d6da71c84d38644d8aaaf0d3a0b
|
refs/heads/master
| 2021-01-22T10:40:55.989293
| 2016-08-24T10:17:29
| 2016-08-24T10:17:29
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,101
|
py
|
from actions.action_base import ActionBase
from random import choice, randint
class PickCardAction(ActionBase):
def __init__(self):
super().__init__(name="PICK A CARD FROM THE DECK",
keywords=['pick a card (INT)',
'pick (INT) cards?'])
def act(self, data):
times = data.get_match_int(1, fallback=1)
if times > 48: # Avoid too many
self.send_msg(data,
"there are 48 cards in a deck (no joker here), "
"how am i supposed to pick {}?!".format(times))
return
if times == 48:
self.send_msg(data, "there are 48 cards in the deck, BUT, if that makes you happy:".format(times))
# Add unique choices until we have enough
result = []
while len(result) < times:
# Pick a random value
value = randint(2, 14)
if value == 11:
value = 'jack'
elif value == 12:
value = 'queen'
elif value == 13:
value = 'king'
elif value == 14:
value = 'ace'
# And a random suit
suit = choice(['♠️', '♣️', '♥️', '♦️'])
current = '{}{}'.format(suit, value)
# Add the random value with the choice if it wasn't in yet
if current not in result:
result.append(current)
if times > 4: # If too many times, let's make a pretty table!
row_size = 4
spacing = 7
msg = '```\n'
for i in range(0, times, row_size):
# Join the results from i..i+row_size with a '.'
msg += '.'.join(str(result[j]).ljust(spacing, '.')
for j in range(i, i + row_size) if j < times)
msg += '\n'
msg += '```'
self.send_msg(data, msg, markdown=True)
else: # Else just join multiline
self.send_msg(data, '\n'.join(result), markdown=True)
|
[
"totufals@hotmail.com"
] |
totufals@hotmail.com
|
b26f46287c34e1c977675f1a1da4680ab338880a
|
d578dc0955028ee86656f06423ceaa4a50c5ba92
|
/Final Project/Centroid.py
|
6915378daa05fc205509cf703953b537c1f5ae35
|
[] |
no_license
|
ohsuz/CSI4106-Repository
|
1b17482d9215c0dcfff60edb90494833d11e069a
|
d81c5a2600b7c8bf67dd02fbd30138a7f8245e47
|
refs/heads/master
| 2023-06-11T10:29:33.009843
| 2021-06-26T18:02:50
| 2021-06-26T18:02:50
| 230,781,524
| 3
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,509
|
py
|
#!/usr/bin/env python
# coding: utf-8
# In[33]:
#import packages
import glob
import os
import imutils
import cv2
#read the images from the folder
images = [cv2.imread(file) for file in glob.glob("C:/Users/cjtol/CSI4106/Pocket/*.png")]
#covert image to grayscale
gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
#blur to reduce high frequency noise
blurred = cv2.GaussianBlur(gray, (5, 5), 0)
#binarize the image with a threshold
thresh = cv2.threshold(blurred, 60, 255, cv2.THRESH_BINARY)[1]
#thresh = cv2.adaptiveThreshold(blurred,255, cv2.ADAPTIVE_THRESH_MEAN_C, cv2.THRESH_BINARY, 7, 2)[1]
#get the rock
cnts = cv2.findContours(thresh.copy(), cv2.RETR_EXTERNAL,
cv2.CHAIN_APPROX_SIMPLE)
cnts = imutils.grab_contours(cnts)
# In[34]:
# compute the center of the contour
def get_contour(image):
for c in cnts:
M = cv2.moments(c)
if M["m00"] != 0:
cX = int(M["m10"] / M["m00"])
cY = int(M["m01"] / M["m00"])
else:
cX, cY = 0, 0
#draw contour and center of shape
cv2.drawContours(image, [c], -1, (0, 255, 0), 2)
cv2.circle(image, (cX, cY), 7, (255, 255, 255), -1)
cv2.putText(image, "center", (cX - 20, cY - 20),
cv2.FONT_HERSHEY_SIMPLEX, 0.5, (255, 255, 255), 2)
cv2.imwrite(os.path.join(path , "output.png"),image)
#display modified image
cv2.imshow("Image", image)
cv2.waitKey(0)
# In[ ]:
|
[
"gl_gd@naver.com"
] |
gl_gd@naver.com
|
cd6c93b19fecb396cb0458d2561de26a3b8b110a
|
f40ad51a600e64f12710fc4847c356a35cd0a3d2
|
/S08/oop/geometry.py
|
69181f8bd1a3113ef4a969527338183d111a722a
|
[] |
no_license
|
pymft/py-mft1
|
0aa1b854ea80e17e18c0eacc6f4dc7428a71af39
|
f4657fe17e56b6f54bdc8b1076edfc388b85cb05
|
refs/heads/master
| 2020-05-09T09:32:59.020361
| 2019-07-05T13:59:19
| 2019-07-05T13:59:19
| 181,006,072
| 1
| 5
| null | 2019-05-03T20:06:03
| 2019-04-12T12:42:38
|
Python
|
UTF-8
|
Python
| false
| false
| 940
|
py
|
import math
class Parallelogram:
def __init__(self, a, b, angle):
print("paralleogram", self.__class__)
self.a = a
self.b = b
self.angle = angle
@property
def area(self):
return self.a * self.b * math.sin(math.radians(self.angle))
@property
def perimeter(self):
return (self.a + self.b) * 2
class Diamond(Parallelogram):
def __init__(self, a, angle):
print("diamond")
super().__init__(a, a, angle)
class Rectangle(Parallelogram):
def __init__(self, w, h):
print("rect")
super().__init__(w, h, 90)
#
# class Square(Rectangle):
# def __init__(self, a):
# super().__init__(a, a)
class Square(Diamond):
def __init__(self, a):
print("square")
super().__init__(a, 90)
#
# r = Rectangle(10, 4)
# print(r.area, r.perimeter)
s = Diamond(7, 45)
print(s.area, s.perimeter)
#
# print(s, hex(id(s)))
|
[
"naeini.v@gmail.com"
] |
naeini.v@gmail.com
|
56e49ec8b756e2762d4f46ee992731ee54be86f1
|
15f321878face2af9317363c5f6de1e5ddd9b749
|
/solutions_python/Problem_116/379.py
|
45af30e36a16a8b8f0a6a9536d9e5d1ddb753e2b
|
[] |
no_license
|
dr-dos-ok/Code_Jam_Webscraper
|
c06fd59870842664cd79c41eb460a09553e1c80a
|
26a35bf114a3aa30fc4c677ef069d95f41665cc0
|
refs/heads/master
| 2020-04-06T08:17:40.938460
| 2018-10-14T10:12:47
| 2018-10-14T10:12:47
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,246
|
py
|
def hasLine(squares, symbol):
for i in range(4):
if squares[i][0] == squares[i][1] == squares[i][2] == squares[i][3] == symbol:
return True
for i in range(4):
if squares[0][i] == squares[1][i] == squares[2][i] == squares[3][i] == symbol:
return True
if squares[0][0] == squares[1][1] == squares[2][2] == squares[3][3] == symbol:
return True
if squares[0][3] == squares[1][2] == squares[2][1] == squares[3][0] == symbol:
return True
return False
def hasEmpty(squares):
for i in range(4):
for j in range(4):
if squares[i][j] == '.':
return True
return False
file = open("A-large.in")
n = int(file.readline())
for case in range(n):
squares = [list(file.readline()) for i in range(4)]
file.readline()
print("Case #{:d}:".format(case+1)),
Tpos = None
for i in range(4):
if 'T' in squares[i]:
index = squares[i].index('T')
Tpos = (i, index)
break
if Tpos != None:
squares[Tpos[0]][Tpos[1]] = 'X'
if hasLine(squares, 'X'):
print("X won")
else:
if Tpos != None:
squares[Tpos[0]][Tpos[1]] = 'O'
if hasLine(squares, 'O'):
print("O won")
else:
if hasEmpty(squares):
print("Game has not completed")
else:
print("Draw")
file.close()
|
[
"miliar1732@gmail.com"
] |
miliar1732@gmail.com
|
fd7326df55080e803d9ef5dcf9ef75f5bfd70c6c
|
2c872fedcdc12c89742d10c2f1c821eed0470726
|
/pbase/day12/code/text2.py
|
416a82954f3664da8fa1e1eb23e2cb329b0f8028
|
[] |
no_license
|
zuigehulu/AID1811
|
581c3c7a37df9fa928bc632e4891fc9bafe69201
|
10cab0869875290646a9e5d815ff159d0116990e
|
refs/heads/master
| 2020-04-19T16:33:04.174841
| 2019-01-30T07:58:24
| 2019-01-30T07:58:24
| 168,307,918
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 479
|
py
|
# 编写函数fun 基功能是计算下列多项式的和
# Sn = 1 + 1/1! + 1/2! + 1/3! + .... + 1/n!
# (建议用数学模块中的factorial)
# 求当n得20时 Sn的值
# 即:
# print(fun(20)) # 2.718281828...
import math
# def sumfun(n):
# Sn = 1
# for x in range(1,n+1):
# Sn += 1/math.factorial(x)
# return Sn
# print(sumfun(20))
def sumfun(n):
s = sum(map(lambda x :1/math.factorial(x),range(n+1)))
print(s)
sumfun(20)
|
[
"442315617@qq.com"
] |
442315617@qq.com
|
e1797abbb517a5b0d9e49e93536eb28f286dff74
|
a214e706c875e0af7221c0c9ae193d9d93ee20a7
|
/merge_pedmap.py
|
a521b00da550343f20052204c786390bad354afb
|
[] |
no_license
|
inambioinfo/bioinformatics_scripts
|
fa2292e91ad4134204a09ace27c8a91ae70fa34c
|
3a23611f382b7f3dd60e5e2abe841b84408c0d44
|
refs/heads/master
| 2020-03-20T21:17:10.163061
| 2017-03-28T23:41:39
| 2017-03-28T23:41:39
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,809
|
py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from optparse import OptionParser
import os
__author__ = "Raony Guimarães"
__copyright__ = "Copyright 2012, Filter Analysis"
__credits__ = ["Raony Guimarães"]
__license__ = "GPL"
__version__ = "1.0.1"
__maintainer__ = "Raony Guimarães"
__email__ = "raonyguimaraes@gmail.com"
__status__ = "Production"
#run example
#python gatk.py -i alignment/exome.sorted.bam
parser = OptionParser()
parser.add_option("-p", dest="p1",
help="PED File", metavar="pedfile")
parser.add_option("-q", dest="p2",
help="PED File", metavar="pedfile")
parser.add_option("-o", dest="outfile",
help="PED File", metavar="pedfile")
(options, args) = parser.parse_args()
f1 = ".".join(options.p1.split("/")[-1].split(".")[:-1])
f1 = options.p1.replace('.ped','')
f2 = ".".join(options.p2.split("/")[-1].split(".")[:-1])
f2 = options.p2.replace('.ped','')
outfile = options.outfile
plink_dir = '/projects/relatedness/plink-1.07-x86_64'
#first identify the ones to remove
command = '%s/plink --file %s --merge %s.ped %s.map --recode --out %s --noweb --geno 0' % (plink_dir, f1, f2, f2, outfile)
os.system(command)
#commando remove snps
command = 'mv %s.missnp removesnps' % (outfile)
os.system(command)
print 'remove snps in file one'
command = '%s/plink --file %s --recode --out %s.snpsless --noweb --exclude removesnps' % (plink_dir, f1, f1)
os.system(command)
print 'remove snps in file two'
command = '%s/plink --file %s --recode --out %s.snpsless --noweb --exclude removesnps' % (plink_dir, f2, f2)
os.system(command)
print 'finally merge'
command = '%s/plink --file %s.snpsless --merge %s.snpsless.ped %s.snpsless.map --recode --out %s --noweb --geno 0' % (plink_dir, f1, f2, f2, options.outfile)
os.system(command)
|
[
"raonyguimaraes@gmail.com"
] |
raonyguimaraes@gmail.com
|
dd713f3a180a0c82f82e9b6a9e9358a8c8649ab4
|
f4d78406cda8cb7e8232873dfd4d735763a36f07
|
/result/migrations/0004_auto_20170331_2017.py
|
5ac9fa86e89e90bb810b3663b4339951b7cc5e5d
|
[] |
no_license
|
pratulyab/ipu-results-bot
|
722b646a098f95e21bb12a47bcaff69d7e8a034a
|
aa000f28cad79ad49d14547203877247fae7327d
|
refs/heads/master
| 2022-07-13T18:03:39.107959
| 2019-06-24T20:45:56
| 2019-06-24T20:45:56
| 193,555,061
| 0
| 0
| null | 2022-06-21T22:12:19
| 2019-06-24T18:05:45
|
Python
|
UTF-8
|
Python
| false
| false
| 550
|
py
|
# -*- coding: utf-8 -*-
# Generated by Django 1.10 on 2017-03-31 14:47
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('result', '0003_auto_20170331_1806'),
]
operations = [
migrations.RemoveField(
model_name='score',
name='verdict',
),
migrations.AddField(
model_name='score',
name='passed',
field=models.BooleanField(default=True),
),
]
|
[
"pratulyabubna@outlook.com"
] |
pratulyabubna@outlook.com
|
0b3788d4fbdbbf609b1d07cec5135630f51a7c4b
|
ed7b5c24d9a13d0c717fd6f6293f3464f43d7cbf
|
/demo/sjh_web/demo55.py
|
0dd3351eafd4b929e7b8c9d051f64ed3d14dee2a
|
[] |
no_license
|
befallenStar/python
|
ccb93d456dc161a8087a78220a7aaab21320ab8b
|
e44ce8c11b820f03fe2d60dfa84053d8cc356c80
|
refs/heads/master
| 2022-12-07T18:34:03.091146
| 2020-08-20T02:33:56
| 2020-08-20T02:33:56
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,659
|
py
|
# -*- encoding: utf-8 -*-
import urllib3
pcUserAgent = {
'IE-agent': 'Mozilla/5.0 (compatible; MSIE 9.0; Windwos NT 6.1; Trident/5.0;',
'firefox-agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:69.0) Gecko/20100101 Firefox/69.0',
'chrome-agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/77.0.3865.90 Safari/537.36',
}
mobileUserAgent = {
'Touch capable Windows 8 device': 'Mozilla/5.0 (compatible; MSIE 10.0; Windows NT 6.2; Trident/6.0; Touch)',
'Kindle Fire': 'Mozilla/5.0 (Macintosh; U; Intel Mac OS X 10_6_3; en-us; Silk/1.1.0-80) AppleWebKit/533.16 (KHTML, like Gecko) Version/5.0 Safari/533.16 Silk-Accelerated=true',
'iPad': 'Mozilla/5.0(iPad; U; CPU iPhone OS 3_2 like Mac OS X; en-us) AppleWebKit/531.21.10 (KHTML, like Gecko) Version/4.0.4 Mobile/7B314 Safari/531.21.10',
'Samsung Galaxy S3': 'Mozilla/5.0 (Linux; U; Android 4.0.4; en-gb; GT-I9300 Build/IMM76D) AppleWebKit/534.30 (KHTML, like Gecko) Version/4.0 Mobile Safari/534.30',
'BlackBerry': 'BlackBerry9700/5.0.0.862 Profile/MIDP-2.1 Configuration/CLDC-1.1 VendorID/331 UNTRUSTED/1.0 3gpp-gba',
'iPhone': 'Mozilla/5.0 (iPhone; CPU iPhone OS 5_1 like Mac OS X) AppleWebKit/534.46 (KHTML, like Gecko) Version/5.1 Mobile/9B179 Safari/7534.48.3',
'UC standard': 'NOKIA5700/ UCWEB7.0.2.37/28/999'
}
http = urllib3.PoolManager()
r = http.request('GET', 'http://www.baidu.com/s', {'wd': 'hello'},
pcUserAgent['ff-agent']) # 伪造头部信息欺骗服务器
print(r) # <urllib3.response.HTTPResponse object at 0x000002A0FB49EE88>
print(r.status) # 200
print(r.data.decode('utf-8'))
|
[
"sy5622_5@126.com"
] |
sy5622_5@126.com
|
572b84a3f569162ee860e6f7b20ac524c04a19b9
|
6ab31b5f3a5f26d4d534abc4b197fe469a68e8e5
|
/katas/kyu_7/linked_lists_get_nth_node.py
|
1f2b39f86f418fb40df8cc42b845bc21a735c961
|
[
"MIT"
] |
permissive
|
mveselov/CodeWars
|
e4259194bfa018299906f42cd02b8ef4e5ab6caa
|
1eafd1247d60955a5dfb63e4882e8ce86019f43a
|
refs/heads/master
| 2021-06-09T04:17:10.053324
| 2017-01-08T06:36:17
| 2017-01-08T06:36:17
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 311
|
py
|
class Node(object):
def __init__(self, data):
self.data = data
self.next = None
def get_nth(node, index):
current = node
dex = -1
while current is not None:
dex += 1
if dex == index:
return current
current = current.next
raise Exception
|
[
"the-zebulan@users.noreply.github.com"
] |
the-zebulan@users.noreply.github.com
|
d47b760098656ec22905595db57af143f04c9a99
|
b5cf99c4ed0ff18e351394ae85a91068a74dcc16
|
/libdemo/bs_demo.py
|
445ff43c30b45cdfd2a3a0e39920958a494e0f96
|
[] |
no_license
|
srikanthpragada/DEMO_PYTHON_19_NOV_2019
|
8966d218af8531c8e77accf7e2740094e2c1902f
|
ac50fdbb7de94d671e0ab5274d6aadd133b70899
|
refs/heads/master
| 2020-09-14T08:19:55.453868
| 2019-12-23T03:00:07
| 2019-12-23T03:00:07
| 223,076,035
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 204
|
py
|
from bs4 import BeautifulSoup
st = "<html><body><h1>Title1</h1><h2>Title2.1</h2><h2>Title2.2</h2></body></html>"
bs = BeautifulSoup(st, 'html.parser')
for tag in bs.find_all("h2"):
print(tag.text)
|
[
"srikanthpragada@gmail.com"
] |
srikanthpragada@gmail.com
|
5a991fed1d4a0e7596274c8eb7335d9e09592e6a
|
8f5f0c3ef83fdd482387973149738f6178477a42
|
/medium/trees/next_right_pointer.py
|
e1eb4ce5802ddc9adc9779869feb56faa06352f2
|
[] |
no_license
|
nicokuzak/leetcode
|
79a5771ad83786cc7dbfd790f8fffcf1ce58794e
|
39b0235dc429a97a7cba0689d44641a6af6d7a32
|
refs/heads/main
| 2023-04-06T21:02:09.553185
| 2021-04-14T22:21:20
| 2021-04-14T22:21:20
| 336,847,511
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,414
|
py
|
"""You are given a perfect binary tree where all leaves are on the same level, and every parent has two children. The binary tree has the following definition:
struct Node {
int val;
Node *left;
Node *right;
Node *next;
}
Populate each next pointer to point to its next right node. If there is no next right node, the next pointer should be set to NULL.
Initially, all next pointers are set to NULL.
Follow up:
You may only use constant extra space.
Recursive approach is fine, you may assume implicit stack space does not count as extra space for this problem.
Example 1:
Input: root = [1,2,3,4,5,6,7]
Output: [1,#,2,3,#,4,5,6,7,#]
Explanation: Given the above perfect binary tree (Figure A), your function should populate each next pointer to point to its next right node, just like in Figure B. The serialized output is in level order as connected by the next pointers, with '#' signifying the end of each level.
"""
class Solution:
def connect(self, root: 'Node') -> 'Node':
if root is None or root.left is None:
return root
root.left.next = root.right #Child left -> Child Right
if root.next: #If it is a left node that has something to the right
root.right.next = root.next.left #Child right next is parent right's left
self.connect(root.left)
self.connect(root.right)
return root
|
[
"nicokuzak95@gmail.com"
] |
nicokuzak95@gmail.com
|
5b21a4c2067e74e7ff233876453a7bbb84d6ebc6
|
3bc4b502fdb5ffecdbecc9239a0c25746dc31022
|
/Ch06/p157.py
|
9df9fb2464141935daf597c1cf1f74a731857083
|
[] |
no_license
|
pkc-3/python
|
68da873bbe7ad9a3e0db4e22ddaa412a9377720f
|
d8410d897c3784c6017f7edc215ce8763e557518
|
refs/heads/master
| 2023-05-31T06:40:30.279748
| 2021-06-10T09:00:09
| 2021-06-10T09:00:09
| 361,634,592
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 465
|
py
|
#self 명령어 예
class multiply3:
#멤버 변수 없음
#생성자 없음
#동적 멤버변수 생성/초기화
def data(self,x,y):
self.x = x
self.y = y
#곱셈 연산
def mul(self):
result = self.x * self.y
self.display(result) #메서드 호출
#결과 출력
def display(self, result):
print("곱셈 = %d" % (result))
obj = multiply3() #기본 생성자
obj.data(10, 20)
obj.mul()
|
[
"pkc_3@naver.com"
] |
pkc_3@naver.com
|
1ad1cdf4c211d1ad2cfc0e6db523776b6a91d5d7
|
711756b796d68035dc6a39060515200d1d37a274
|
/output_cog_tags/initial_859.py
|
a64a24fccf2efc8b865aa813310e625203f34f62
|
[] |
no_license
|
batxes/exocyst_scripts
|
8b109c279c93dd68c1d55ed64ad3cca93e3c95ca
|
a6c487d5053b9b67db22c59865e4ef2417e53030
|
refs/heads/master
| 2020-06-16T20:16:24.840725
| 2016-11-30T16:23:16
| 2016-11-30T16:23:16
| 75,075,164
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,330
|
py
|
import _surface
import chimera
try:
import chimera.runCommand
except:
pass
from VolumePath import markerset as ms
try:
from VolumePath import Marker_Set, Link
new_marker_set=Marker_Set
except:
from VolumePath import volume_path_dialog
d= volume_path_dialog(True)
new_marker_set= d.new_marker_set
marker_sets={}
surf_sets={}
if "Cog1_Anch" not in marker_sets:
s=new_marker_set('Cog1_Anch')
marker_sets["Cog1_Anch"]=s
s= marker_sets["Cog1_Anch"]
mark=s.place_marker((740, 588, 378), (0, 0, 1), 21.9005)
if "Cog2_GFPN" not in marker_sets:
s=new_marker_set('Cog2_GFPN')
marker_sets["Cog2_GFPN"]=s
s= marker_sets["Cog2_GFPN"]
mark=s.place_marker((934, 253, 192), (1, 0.5, 0), 21.9005)
if "Cog2_GFPC" not in marker_sets:
s=new_marker_set('Cog2_GFPC')
marker_sets["Cog2_GFPC"]=s
s= marker_sets["Cog2_GFPC"]
mark=s.place_marker((18, 558, 379), (1, 0.5, 0), 21.9005)
if "Cog2_Anch" not in marker_sets:
s=new_marker_set('Cog2_Anch')
marker_sets["Cog2_Anch"]=s
s= marker_sets["Cog2_Anch"]
mark=s.place_marker((553, 818, 131), (1, 0.5, 0), 21.9005)
if "Cog3_GFPN" not in marker_sets:
s=new_marker_set('Cog3_GFPN')
marker_sets["Cog3_GFPN"]=s
s= marker_sets["Cog3_GFPN"]
mark=s.place_marker((756, 296, 36), (1, 0.87, 0), 21.9005)
if "Cog3_GFPC" not in marker_sets:
s=new_marker_set('Cog3_GFPC')
marker_sets["Cog3_GFPC"]=s
s= marker_sets["Cog3_GFPC"]
mark=s.place_marker((816, 91, 319), (1, 0.87, 0), 21.9005)
if "Cog3_Anch" not in marker_sets:
s=new_marker_set('Cog3_Anch')
marker_sets["Cog3_Anch"]=s
s= marker_sets["Cog3_Anch"]
mark=s.place_marker((649, 924, 860), (1, 0.87, 0), 21.9005)
if "Cog4_GFPN" not in marker_sets:
s=new_marker_set('Cog4_GFPN')
marker_sets["Cog4_GFPN"]=s
s= marker_sets["Cog4_GFPN"]
mark=s.place_marker((341, 421, 253), (0.97, 0.51, 0.75), 21.9005)
if "Cog4_GFPC" not in marker_sets:
s=new_marker_set('Cog4_GFPC')
marker_sets["Cog4_GFPC"]=s
s= marker_sets["Cog4_GFPC"]
mark=s.place_marker((623, 816, 736), (0.97, 0.51, 0.75), 21.9005)
if "Cog4_Anch" not in marker_sets:
s=new_marker_set('Cog4_Anch')
marker_sets["Cog4_Anch"]=s
s= marker_sets["Cog4_Anch"]
mark=s.place_marker((88, 643, 970), (0.97, 0.51, 0.75), 21.9005)
if "Cog5_GFPN" not in marker_sets:
s=new_marker_set('Cog5_GFPN')
marker_sets["Cog5_GFPN"]=s
s= marker_sets["Cog5_GFPN"]
mark=s.place_marker((302, 317, 967), (0.39, 0.31, 0.14), 21.9005)
if "Cog5_GFPC" not in marker_sets:
s=new_marker_set('Cog5_GFPC')
marker_sets["Cog5_GFPC"]=s
s= marker_sets["Cog5_GFPC"]
mark=s.place_marker((635, 925, 161), (0.39, 0.31, 0.14), 21.9005)
if "Cog5_Anch" not in marker_sets:
s=new_marker_set('Cog5_Anch')
marker_sets["Cog5_Anch"]=s
s= marker_sets["Cog5_Anch"]
mark=s.place_marker((490, 53, 130), (0.39, 0.31, 0.14), 21.9005)
if "Cog6_GFPN" not in marker_sets:
s=new_marker_set('Cog6_GFPN')
marker_sets["Cog6_GFPN"]=s
s= marker_sets["Cog6_GFPN"]
mark=s.place_marker((933, 833, 769), (0.6, 0.31, 0.64), 21.9005)
if "Cog6_GFPC" not in marker_sets:
s=new_marker_set('Cog6_GFPC')
marker_sets["Cog6_GFPC"]=s
s= marker_sets["Cog6_GFPC"]
mark=s.place_marker((362, 701, 371), (0.6, 0.31, 0.64), 21.9005)
if "Cog6_Anch" not in marker_sets:
s=new_marker_set('Cog6_Anch')
marker_sets["Cog6_Anch"]=s
s= marker_sets["Cog6_Anch"]
mark=s.place_marker((190, 600, 839), (0.6, 0.31, 0.64), 21.9005)
if "Cog7_GFPN" not in marker_sets:
s=new_marker_set('Cog7_GFPN')
marker_sets["Cog7_GFPN"]=s
s= marker_sets["Cog7_GFPN"]
mark=s.place_marker((310, 511, 365), (0.89, 0.1, 0.1), 21.9005)
if "Cog7_GFPC" not in marker_sets:
s=new_marker_set('Cog7_GFPC')
marker_sets["Cog7_GFPC"]=s
s= marker_sets["Cog7_GFPC"]
mark=s.place_marker((295, 883, 14), (0.89, 0.1, 0.1), 21.9005)
if "Cog7_Anch" not in marker_sets:
s=new_marker_set('Cog7_Anch')
marker_sets["Cog7_Anch"]=s
s= marker_sets["Cog7_Anch"]
mark=s.place_marker((639, 840, 123), (0.89, 0.1, 0.1), 21.9005)
if "Cog8_GFPC" not in marker_sets:
s=new_marker_set('Cog8_GFPC')
marker_sets["Cog8_GFPC"]=s
s= marker_sets["Cog8_GFPC"]
mark=s.place_marker((761, 18, 329), (0.3, 0.69, 0.29), 21.9005)
if "Cog8_Anch" not in marker_sets:
s=new_marker_set('Cog8_Anch')
marker_sets["Cog8_Anch"]=s
s= marker_sets["Cog8_Anch"]
mark=s.place_marker((107, 498, 442), (0.3, 0.69, 0.29), 21.9005)
for k in surf_sets.keys():
chimera.openModels.add([surf_sets[k]])
|
[
"batxes@gmail.com"
] |
batxes@gmail.com
|
d7f63dcc0bc4be0be92e1b193db9abad6b55f611
|
c9500ad778b8521aaa85cb7fe3239989efaa4799
|
/plugins/microsoft_intune/icon_microsoft_intune/actions/get_managed_apps/schema.py
|
950f7ebf08d7248b32cb5c69cb6007c0c35c5b04
|
[
"MIT"
] |
permissive
|
rapid7/insightconnect-plugins
|
5a6465e720f114d71b1a82fe14e42e94db104a0b
|
718d15ca36c57231bb89df0aebc53d0210db400c
|
refs/heads/master
| 2023-09-01T09:21:27.143980
| 2023-08-31T10:25:36
| 2023-08-31T10:25:36
| 190,435,635
| 61
| 60
|
MIT
| 2023-09-14T08:47:37
| 2019-06-05T17:05:12
|
Python
|
UTF-8
|
Python
| false
| false
| 4,712
|
py
|
# GENERATED BY KOMAND SDK - DO NOT EDIT
import insightconnect_plugin_runtime
import json
class Component:
DESCRIPTION = "Returns InTune manageable apps"
class Input:
APP = "app"
class Output:
MANAGED_APPS = "managed_apps"
class GetManagedAppsInput(insightconnect_plugin_runtime.Input):
schema = json.loads("""
{
"type": "object",
"title": "Variables",
"properties": {
"app": {
"type": "string",
"title": "App",
"description": "Application ID or name, if empty returns all applications",
"order": 1
}
}
}
""")
def __init__(self):
super(self.__class__, self).__init__(self.schema)
class GetManagedAppsOutput(insightconnect_plugin_runtime.Output):
schema = json.loads("""
{
"type": "object",
"title": "Variables",
"properties": {
"managed_apps": {
"type": "array",
"title": "Managed Apps",
"description": "Application details",
"items": {
"$ref": "#/definitions/value"
},
"order": 1
}
},
"definitions": {
"value": {
"type": "object",
"title": "value",
"properties": {
"@odata.context": {
"type": "string",
"title": "Odata Context",
"description": "Odata context",
"order": 2
},
"@odata.type": {
"type": "string",
"title": "Odata Type",
"description": "Odata type",
"order": 1
},
"appAvailability": {
"type": "string",
"title": "App Availability",
"description": "App availability",
"order": 17
},
"appStoreUrl": {
"type": "string",
"title": "App Store URL",
"description": "App store URL",
"order": 20
},
"createdDateTime": {
"type": "string",
"title": "Created Datetime",
"description": "Created datetime",
"order": 8
},
"description": {
"type": "string",
"title": "Description",
"description": "Description",
"order": 5
},
"developer": {
"type": "string",
"title": "Developer",
"description": "Developer",
"order": 14
},
"displayName": {
"type": "string",
"title": "Display Name",
"description": "Display Name",
"order": 4
},
"id": {
"type": "string",
"title": "ID",
"description": "ID",
"order": 3
},
"informationUrl": {
"type": "string",
"title": "Information URL",
"description": "Information URL",
"order": 12
},
"isFeatured": {
"type": "boolean",
"title": "Is Featured",
"description": "Is featured",
"order": 10
},
"largeIcon": {
"type": "object",
"title": "Large Icon",
"description": "Large icon",
"order": 7
},
"lastModifiedDateTime": {
"type": "string",
"title": "Last Modified Datetime",
"description": "Last modified datetime",
"order": 9
},
"minimumSupportedOperatingSystem": {
"type": "object",
"title": "Minimum Supported Operating System",
"description": "Minimum supported operating system",
"order": 21
},
"notes": {
"type": "string",
"title": "Notes",
"description": "Notes",
"order": 15
},
"owner": {
"type": "string",
"title": "Owner",
"description": "Owner",
"order": 13
},
"packageId": {
"type": "string",
"title": "Package ID",
"description": "Package ID",
"order": 19
},
"privacyInformationUrl": {
"type": "string",
"title": "Privacy Information URL",
"description": "Privacy information URL",
"order": 11
},
"publisher": {
"type": "string",
"title": "Publisher",
"description": "Publisher",
"order": 6
},
"publishingState": {
"type": "string",
"title": "Publishing State",
"description": "Publishing state",
"order": 16
},
"version": {
"type": "string",
"title": "Version",
"description": "Version",
"order": 18
}
}
}
}
}
""")
def __init__(self):
super(self.__class__, self).__init__(self.schema)
|
[
"noreply@github.com"
] |
rapid7.noreply@github.com
|
d09cbbe00b827f394ca2273cd1219aa9bad0fd43
|
9c7091f82a5108261cbc3e5209f0e6df42f55530
|
/node/src/fuzzers/peach_fuzzbang.py
|
3a1d9883ba450b54c00eee87fd997b6a106f6edc
|
[
"MIT"
] |
permissive
|
hatRiot/PeachOrchard
|
881b24bdf8ceb5c1e23c989fdb612f8b70dfd192
|
cd11ab0ccbcce2349408d5c2e4b651eb99a4e9c1
|
refs/heads/master
| 2021-06-18T03:27:03.835834
| 2019-09-23T19:24:02
| 2019-09-23T19:24:02
| 23,305,215
| 46
| 26
|
MIT
| 2021-06-10T19:48:39
| 2014-08-25T07:12:20
|
Python
|
UTF-8
|
Python
| false
| false
| 3,101
|
py
|
from src.core.log import *
from src.core import config
from src.core import utility
from src.core.fuzzer import Fuzzer
from re import findall
import os
class peach_fuzzbang(Fuzzer):
""" Class implements the interface for the Peach fuzzer. This has
been tested with FuzzBang as well as regular ol' Peach.
"""
def __init__(self):
self.name = "Peach FuzzBang"
def fetch_crashes(self):
"""
"""
base = config.MONITOR_DIR + '/' + config.SESSION
crashes = {}
# build a list of files from session root
pot_files = []
for (root, subFolders, files) in os.walk(base):
for file in files:
f = os.path.join(root, file)
pot_files.append(f.replace('\\', '/'))
# massage these into our crashes dictionary
for entry in pot_files:
if '_description.txt' in entry:
# found description entry, parse it
e = entry.rsplit('/', 2)
crashes[e[1]] = entry
return crashes
def get_status(self):
""" Parse the status file and pull the latest iteration update
"""
try:
data = None
spath = config.MONITOR_DIR + '/' + config.SESSION + '/' + 'status.txt'
with open(spath) as f:
data = f.read().split('\n')
# chop it up
status = None
data = [x for x in data if len(x) > 0]
if 'Test finished' in data[:-1]:
status = 'Completed'
else:
(cidx, total) = findall("Iteration (.*?) of (.*?) :", data[-1])[0]
status = '%s/%s' % (cidx, total)
except Exception, e:
utility.msg("Failed to parse status update: %s" % e, ERROR)
status = "Error"
return status
def check_session(self):
"""
"""
valid = False
try:
if config.MONITOR_DIR and os.path.isdir(config.MONITOR_DIR):
if config.SESSION:
# validate session
if config.SESSION not in os.listdir(config.MONITOR_DIR):
utility.msg("Session %s not found in %s" % (config.SESSION, config.MONITOR_DIR))
else:
valid = True
else:
# fetch latest version
tmp = os.listdir(config.MONITOR_DIR)
if len(tmp) <= 0:
utility.msg("No running sessions found", ERROR)
valid = False
else:
config.SESSION = tmp[-1]
utility.msg("Setting session to %s" % config.SESSION, LOG)
valid = True
else:
utility.msg("Directory '%s' not found" % config.MONITOR_DIR, ERROR)
valid = False
except Exception, e:
utility.msg("Error checking session: %s" % e, ERROR)
valid = False
return valid
|
[
"shodivine@gmail.com"
] |
shodivine@gmail.com
|
9dd49b3cf82fa3b52f4bc3b9c1514bcf1c23dca0
|
63ba933a294865f65409635f62e0f1d59f725f37
|
/src/trees/isBalanced.py
|
7ecb0495d36c1aecf3938a94d2007c4730bf1f19
|
[
"CC0-1.0"
] |
permissive
|
way2arun/datastructures_algorithms
|
fc4302bdbb923ef8912a4acf75a286f2b695de2a
|
4ea4c1579c28308455be4dfa02bd45ebd88b2d0a
|
refs/heads/master
| 2021-12-07T04:34:35.732026
| 2021-09-30T12:11:32
| 2021-09-30T12:11:32
| 203,658,808
| 1
| 0
| null | 2020-08-08T15:55:09
| 2019-08-21T20:23:46
|
Python
|
UTF-8
|
Python
| false
| false
| 1,995
|
py
|
"""
Balanced Binary Tree
Given a binary tree, determine if it is height-balanced.
For this problem, a height-balanced binary tree is defined as:
a binary tree in which the left and right subtrees of every node differ in height by no more than 1.
Example 1:
Input: root = [3,9,20,null,null,15,7]
Output: true
Example 2:
Input: root = [1,2,2,3,3,null,null,4,4]
Output: false
Example 3:
Input: root = []
Output: true
Constraints:
The number of nodes in the tree is in the range [0, 5000].
-104 <= Node.val <= 104
"""
# Definition for a binary tree node.
class TreeNode:
def __init__(self, val=0, left=None, right=None):
self.val = val
self.left = left
self.right = right
class Solution:
def isBalanced(self, root: TreeNode) -> bool:
# Solution 1 - 48 ms
#return self.dfs(root)[1]
# Solution 2 - 28 ms
h, is_b = self.helper(root)
return is_b
def helper(self, root):
if root is None:
return 0, True
hl, lb = self.helper(root.left)
hr, rb = self.helper(root.right)
if lb and rb and abs(hl - hr) <= 1:
return max(hl, hr) + 1, True
else:
return -1, False
def dfs(self, root): # return (depth, isBalance)
if root is None:
return 0, True
leftH, leftB = self.dfs(root.left) # left height, left balance
rightH, rightB = self.dfs(root.right) # right height, right balance
return max(leftH, rightH) + 1, abs(leftH - rightH) <= 1 and leftB and rightB
# Main Call
root = TreeNode(3)
root.left = TreeNode(9)
root.right = TreeNode(20)
root.right.left = TreeNode(15)
root.right.right = TreeNode(7)
solution = Solution()
print(solution.isBalanced(root))
root = TreeNode(1)
root.right = TreeNode(2)
root.left = TreeNode(2)
root.left.right = TreeNode(3)
root.left.left = TreeNode(3)
root.left.left.right = TreeNode(4)
root.left.left.left = TreeNode(4)
print(solution.isBalanced(root))
|
[
"way2aru@yahoo.com"
] |
way2aru@yahoo.com
|
64728e5c76187cf4177e6d19c48c73b797430c05
|
d94b6845aeeb412aac6850b70e22628bc84d1d6d
|
/perso_arabic_norm/describe_splits.py
|
70788b7f48dbdc399d1fcc680fe3b99a08017009
|
[
"CC-BY-4.0",
"Apache-2.0"
] |
permissive
|
ishine/google-research
|
541aea114a68ced68736340e037fc0f8257d1ea2
|
c1ae273841592fce4c993bf35cdd0a6424e73da4
|
refs/heads/master
| 2023-06-08T23:02:25.502203
| 2023-05-31T01:00:56
| 2023-05-31T01:06:45
| 242,478,569
| 0
| 0
|
Apache-2.0
| 2020-06-23T01:55:11
| 2020-02-23T07:59:42
|
Jupyter Notebook
|
UTF-8
|
Python
| false
| false
| 2,067
|
py
|
# coding=utf-8
# Copyright 2023 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
r"""Collects basic stats for training and test splits from the results file.
Example:
--------
LANGUAGE=...
cat data/ngrams/results/reading/00/baselines/${LANGUAGE}.*.tsv > /tmp/${LANGUAGE}.tsv
python describe_splits.py \
--results_tsv_file /tmp/${LANGUAGE}.tsv
Dependencies:
-------------
absl
pandas
statsmodels
"""
from typing import Sequence
import logging
from absl import app
from absl import flags
import pandas as pd
import statsmodels.stats.api as sms
flags.DEFINE_string(
"results_tsv_file", "",
"Results text file in tab-separated (tsv) format.")
FLAGS = flags.FLAGS
def _to_str(stats):
"""Retrieves basic stats from the object."""
return f"mean: {stats.mean} var: {stats.var} std: {stats.std}"
def main(argv):
if len(argv) > 1:
raise app.UsageError("Too many command-line arguments.")
if not FLAGS.results_tsv_file:
raise app.UsageError("Specify --results_tsv_file [FILE]!")
logging.info(f"Reading metrics from {FLAGS.results_tsv_file} ...")
df = pd.read_csv(FLAGS.results_tsv_file, sep="\t", header=None)
logging.info(f"Read {df.shape[0]} samples")
num_train_toks = list(df[0]) # Token can be char or word.
train_stats = sms.DescrStatsW(num_train_toks)
logging.info(f"Train stats: {_to_str(train_stats)}")
num_test_toks = list(df[1])
test_stats = sms.DescrStatsW(num_test_toks)
logging.info(f"Test stats: {_to_str(test_stats)}")
if __name__ == "__main__":
app.run(main)
|
[
"copybara-worker@google.com"
] |
copybara-worker@google.com
|
ad55a036719eab54161bb16e9344fa465842a9b0
|
003ffcf8144565404636f3d74590a8d6b10a90a4
|
/620-not-boring-movies/620-not-boring-movies.py
|
649086294562ebc1cd5148e624db643e5a39e3ab
|
[] |
no_license
|
congve1/leetcode
|
fb31edf93049e21210d73f7b3e7b9b82057e1d7a
|
ce1e802b5052da2cdb919d6d7e39eed860e0b61b
|
refs/heads/master
| 2020-05-13T19:19:58.835432
| 2019-05-06T00:44:07
| 2019-05-06T00:44:07
| 181,652,371
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 154
|
py
|
# Write your MySQL query statement below
select id,movie,description,rating
from cinema
where description != 'boring' and id%2 = 1
order by rating DESC
|
[
"congve1@live.com"
] |
congve1@live.com
|
656400a9a3c0238586b3bc67900a8c9c266c3cfb
|
5891051796778cfb44a255248ce38789bfef9e70
|
/DjangoLearn/bgfaith/urls.py
|
2fae220197d9f146c5fbb61d9e5154182b10d282
|
[] |
no_license
|
Faithlmy/Python_base
|
cc546a5d86b123e102a69df1227cde9b6e567493
|
5a43557e6375dc9dbe5f6701d7c10e549873a5ab
|
refs/heads/master
| 2021-01-01T17:07:04.097978
| 2018-03-31T16:44:01
| 2018-03-31T16:44:01
| 98,000,621
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 856
|
py
|
"""bgfaith URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/1.8/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')
Including another URLconf
1. Add an import: from blog import urls as blog_urls
2. Add a URL to urlpatterns: url(r'^blog/', include(blog_urls))
"""
from django.conf.urls import include, url
from django.contrib import admin
# from bgapp.views import *
urlpatterns = [
# url(r'^admin/', include(admin.site.urls)),
url('^bgapp/', include('bgapp.urls', namespace='', app_name=''))
]
|
[
"lmengyy@126.com"
] |
lmengyy@126.com
|
4fbda2699b9145b694ef3f7a10590380ae779cad
|
f3b233e5053e28fa95c549017bd75a30456eb50c
|
/mcl1_input/L54/54-23_MD_NVT_rerun/set_4.py
|
5cd6d8796cee46fdf49e2b9f80b0d39eff8896aa
|
[] |
no_license
|
AnguseZhang/Input_TI
|
ddf2ed40ff1c0aa24eea3275b83d4d405b50b820
|
50ada0833890be9e261c967d00948f998313cb60
|
refs/heads/master
| 2021-05-25T15:02:38.858785
| 2020-02-18T16:57:04
| 2020-02-18T16:57:04
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 740
|
py
|
import os
dir = '/mnt/scratch/songlin3/run/mcl1/L54/MD_NVT_rerun/ti_one-step/54_23/'
filesdir = dir + 'files/'
temp_prodin = filesdir + 'temp_prod_4.in'
temp_pbs = filesdir + 'temp_4.pbs'
lambd = [ 0.00922, 0.04794, 0.11505, 0.20634, 0.31608, 0.43738, 0.56262, 0.68392, 0.79366, 0.88495, 0.95206, 0.99078]
for j in lambd:
os.chdir("%6.5f" %(j))
workdir = dir + "%6.5f" %(j) + '/'
#prodin
prodin = workdir + "%6.5f_prod_4.in" %(j)
os.system("cp %s %s" %(temp_prodin, prodin))
os.system("sed -i 's/XXX/%6.5f/g' %s" %(j, prodin))
#PBS
pbs = workdir + "%6.5f_4.pbs" %(j)
os.system("cp %s %s" %(temp_pbs, pbs))
os.system("sed -i 's/XXX/%6.5f/g' %s" %(j, pbs))
#submit pbs
#os.system("qsub %s" %(pbs))
os.chdir(dir)
|
[
"songlin3@msu.edu"
] |
songlin3@msu.edu
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.