blob_id stringlengths 40 40 | directory_id stringlengths 40 40 | path stringlengths 3 288 | content_id stringlengths 40 40 | detected_licenses listlengths 0 112 | license_type stringclasses 2 values | repo_name stringlengths 5 115 | snapshot_id stringlengths 40 40 | revision_id stringlengths 40 40 | branch_name stringclasses 684 values | visit_date timestamp[us]date 2015-08-06 10:31:46 2023-09-06 10:44:38 | revision_date timestamp[us]date 1970-01-01 02:38:32 2037-05-03 13:00:00 | committer_date timestamp[us]date 1970-01-01 02:38:32 2023-09-06 01:08:06 | github_id int64 4.92k 681M ⌀ | star_events_count int64 0 209k | fork_events_count int64 0 110k | gha_license_id stringclasses 22 values | gha_event_created_at timestamp[us]date 2012-06-04 01:52:49 2023-09-14 21:59:50 ⌀ | gha_created_at timestamp[us]date 2008-05-22 07:58:19 2023-08-21 12:35:19 ⌀ | gha_language stringclasses 147 values | src_encoding stringclasses 25 values | language stringclasses 1 value | is_vendor bool 2 classes | is_generated bool 2 classes | length_bytes int64 128 12.7k | extension stringclasses 142 values | content stringlengths 128 8.19k | authors listlengths 1 1 | author_id stringlengths 1 132 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
dff0faa0489696fca7561a52f1eed242eed66498 | a838d4bed14d5df5314000b41f8318c4ebe0974e | /sdk/mixedreality/azure-mixedreality-authentication/tests/test_static_access_token_credential_async.py | f9b7171460fd4f5c33b8e663c249d8255f338ad3 | [
"MIT",
"LicenseRef-scancode-generic-cla",
"LGPL-2.1-or-later"
] | permissive | scbedd/azure-sdk-for-python | ee7cbd6a8725ddd4a6edfde5f40a2a589808daea | cc8bdfceb23e5ae9f78323edc2a4e66e348bb17a | refs/heads/master | 2023-09-01T08:38:56.188954 | 2021-06-17T22:52:28 | 2021-06-17T22:52:28 | 159,568,218 | 2 | 0 | MIT | 2019-08-11T21:16:01 | 2018-11-28T21:34:49 | Python | UTF-8 | Python | false | false | 933 | py | # -------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
# --------------------------------------------------------------------------
from azure.core.credentials import AccessToken
from devtools_testutils import AzureTestCase
from azure.mixedreality.authentication._shared.aio.static_access_token_credential import StaticAccessTokenCredential
class TestAsyncStaticAccessTokenCredential:
@AzureTestCase.await_prepared_test
async def test_get_token(self):
token = "My access token"
expiration = 0
access_token = AccessToken(token=token, expires_on=expiration)
staticAccessToken = StaticAccessTokenCredential(access_token)
actual = await staticAccessToken.get_token()
assert access_token == actual
| [
"noreply@github.com"
] | scbedd.noreply@github.com |
7944dbc113b7f3dbd221b524d325116a297c83f5 | f640fcb49bf99ebec5f34603748121fbbe9171dc | /lib_openshift/models/v1_image_change_trigger.py | b8b6b0fc4058951c8cd4fd60243032141171ce3b | [] | no_license | tbielawa/lib_openshift | bea8a11c4904a7d6c815abdd2b206de5a4cc7a93 | 34ca0f6a0c5388624a040223f29552dc4c0f8c49 | refs/heads/master | 2023-06-16T22:41:15.894021 | 2016-07-11T21:26:59 | 2016-07-11T21:26:59 | 63,156,531 | 0 | 0 | null | 2016-07-12T12:35:29 | 2016-07-12T12:35:29 | null | UTF-8 | Python | false | false | 4,909 | py | # coding: utf-8
"""
Copyright 2016 SmartBear Software
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
Ref: https://github.com/swagger-api/swagger-codegen
"""
from pprint import pformat
from six import iteritems
class V1ImageChangeTrigger(object):
"""
NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
def __init__(self):
"""
V1ImageChangeTrigger - a model defined in Swagger
:param dict swaggerTypes: The key is attribute name
and the value is attribute type.
:param dict attributeMap: The key is attribute name
and the value is json key in definition.
"""
self.swagger_types = {
'last_triggered_image_id': 'str',
'_from': 'V1ObjectReference'
}
self.attribute_map = {
'last_triggered_image_id': 'lastTriggeredImageID',
'_from': 'from'
}
self._last_triggered_image_id = None
self.__from = None
@property
def last_triggered_image_id(self):
"""
Gets the last_triggered_image_id of this V1ImageChangeTrigger.
LastTriggeredImageID is used internally by the ImageChangeController to save last used image ID for build
:return: The last_triggered_image_id of this V1ImageChangeTrigger.
:rtype: str
"""
return self._last_triggered_image_id
@last_triggered_image_id.setter
def last_triggered_image_id(self, last_triggered_image_id):
"""
Sets the last_triggered_image_id of this V1ImageChangeTrigger.
LastTriggeredImageID is used internally by the ImageChangeController to save last used image ID for build
:param last_triggered_image_id: The last_triggered_image_id of this V1ImageChangeTrigger.
:type: str
"""
self._last_triggered_image_id = last_triggered_image_id
@property
def _from(self):
"""
Gets the _from of this V1ImageChangeTrigger.
From is a reference to an ImageStreamTag that will trigger a build when updated It is optional. If no From is specified, the From image from the build strategy will be used. Only one ImageChangeTrigger with an empty From reference is allowed in a build configuration.
:return: The _from of this V1ImageChangeTrigger.
:rtype: V1ObjectReference
"""
return self.__from
@_from.setter
def _from(self, _from):
"""
Sets the _from of this V1ImageChangeTrigger.
From is a reference to an ImageStreamTag that will trigger a build when updated It is optional. If no From is specified, the From image from the build strategy will be used. Only one ImageChangeTrigger with an empty From reference is allowed in a build configuration.
:param _from: The _from of this V1ImageChangeTrigger.
:type: V1ObjectReference
"""
self.__from = _from
def to_dict(self):
"""
Returns the model properties as a dict
"""
result = {}
for attr, _ in iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""
Returns the string representation of the model
"""
return pformat(self.to_dict())
def __repr__(self):
"""
For `print` and `pprint`
"""
return self.to_str()
def __eq__(self, other):
"""
Returns true if both objects are equal
"""
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""
Returns true if both objects are not equal
"""
return not self == other
| [
"jdetiber@redhat.com"
] | jdetiber@redhat.com |
7065790d2280e02944b4499fb05d3b4af79dbde3 | ce18877752c43eb66f03bdc169e3ef45a1720d15 | /src/apps/shop/__init__.py | e929914882f6d6647c0575acdfc548c0d37f700c | [] | no_license | ajlexgit/robin | 26e8682ae09795acf0f3fc1297d20044285b83df | 25ac1c3455838fc26656cfa16d05b2943d0cbba6 | refs/heads/master | 2021-07-13T22:49:09.177207 | 2017-10-13T07:44:42 | 2017-10-13T07:44:42 | 103,655,240 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,396 | py | """
Интернет-магазин с корзиной в localStorage и сессии.
Установка:
settings.py:
INSTALLED_APPS = (
...
'mptt',
'shop',
...
)
SUIT_CONFIG = {
...
{
'app': 'shop',
'icon': 'icon-shopping-cart',
'models': (
'ShopOrder',
'ShopProduct',
'ShopCategory',
'ShopConfig',
)
},
...
}
MIDDLEWARE_CLASSES = (
...
'libs.js_storage.middleware.JSStorageMiddleware',
...
'shop.middleware.CartMiddleware',
...
)
urls.py:
...
url(r'^shop/', include('shop.urls', namespace='shop')),
...
При конкретной реализации, нужно вызывать Django-сигналы
для подтверждения, оплаты и отмены заказа:
from .signals import order_confirmed
...
order_confirmed.send(sender=ShopOrder, order=order, request=request)
"""
default_app_config = 'shop.apps.Config'
| [
"pix666@ya.ru"
] | pix666@ya.ru |
8e8a7aaabbfac8bdccff57b2331e7edeaf82e5b9 | 9dab41a71bf19a9ad17ee3e9f77c0f58aebd1d6d | /python/DistrictUserUploadTask/main.py | 21fca594a2a3b2985d5fe605991b29340f8573b3 | [] | no_license | apollowesley/Demo | f0ef8ec6c4ceb0aec76771da8dd9a62fb579eac8 | 471c4af95d3a7222d6933afc571a8e52e8fe4aee | refs/heads/master | 2021-02-15T04:01:51.590697 | 2018-01-29T01:44:29 | 2018-01-29T01:44:29 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,274 | py | #!/usr/bin/env python
# -*- coding:utf-8 -*-
import logging
import logging.config
import ConfigParser
import string
import os
import sys
import re
import getpass
from task import UploadTask
# 日志
logging.config.fileConfig("./log.conf")
logger = logging.getLogger("example01")
# 获取配置信息
def load_config():
mysql = {}
try:
cf = ConfigParser.ConfigParser()
cf.read("config.conf")
# 获取所有节
secs = cf.sections()
for sec in secs:
task = {}
if sec == "mysql":
for item in cf.items(sec):
mysql[item[0]] = item[1]
except Exception as e:
logger.error("加载配置文件出现异常:{}".format(e))
return mysql
def prn_obj(obj):
print '\n'.join(['%s:%s' % item for item in obj.__dict__.items()])
def main():
reload(sys)
sys.setdefaultencoding('utf-8')
logger.info("准备运行工具")
# 加载配置文件
mysql = load_config()
logger.info("MySQL配置信息为 %s" % str(mysql))
# 获取目录upload下文件,查看文件命名是否符合要求,如果有不符合退出
upload_files = os.listdir('./upload')
if len(upload_files) == 0:
print("上传目录文件为空,请先放置上传文件")
return
for filename in upload_files:
print(filename)
if not re.match(".*area_(\d+)_.*\..*", filename):
print("文件名不符合规范. %s" % filename)
os.exit(1)
# 提示输入数据库密码
password = getpass.getpass("请输入MySQL用户%s密码:" % mysql["username"])
mysql["password"] = password
# 遍历查看Excel记录是否符合要求
logger.info("核对Excel记录是否正确")
for filename in upload_files:
task = UploadTask(filename, mysql, logger)
if not task.verify_records():
print("上传文件中内容格式有误,请按提示修改后,再执行操作")
os.exit(1)
# 执行上传功能
logger.info("准备写入Excel记录到MySQL上")
for filename in upload_files:
task = UploadTask(filename, mysql, logger)
task.upload()
logger.info("上传完毕")
if __name__ == "__main__":
main() | [
"36821277@qq.com"
] | 36821277@qq.com |
e478a8dd22f338846584b48de48cc352e36990e4 | a574d0c0ebc8e17eb641777f93544c0ae43850c9 | /final_problem_set/3_blackjack.py | 0cf5d30adb0ea1093e5eedc11666ee8dcdb2ceff | [] | no_license | broepke/GTx | 1e33c97d0f86e95124ceb5f0436f965154822466 | e12143c9b1fc93d4489eb0f6c093637503139bf6 | refs/heads/master | 2020-04-08T09:35:41.884572 | 2020-01-03T03:37:34 | 2020-01-03T03:37:34 | 159,230,824 | 3 | 2 | null | null | null | null | UTF-8 | Python | false | false | 3,883 | py | # -----------------------------------------------------------
# In this problem, we're going to explore a little of how
# game AI works. We'll do this with a simple problem: building
# an agent to play the popular card game Blackjack.
#
# Blackjack is a card game played with a standard 52-card
# deck. Suits do not matter in Blackjack, and so we'll just
# use letters to indicate the different cards: A, 2, 3, 4, 5,
# 6, 7, 8, 9, 10, J, Q, K.
#
# The goal of Blackjack is to get as close to 21 points as
# possible without going higher. Each of the thirteen cards
# above has a point total attached: the numerals are worth
# their given value (2 points for 2, 7 points for 7, etc.).
# J, Q, and K are worth 10 points. A is worth either 1 or 11
# points, whichever is better for the player.
#
# At any time, the player has some number of cards in their
# hand. They must then make a decision of whether to Hit or
# Stay. Hit means they request an additional card, Stay means
# they stop with their current total. Players generally try
# to Hit until it is likely that another card will push them
# over 21. For example, if a player has a 5 and a 7, there is
# a relatively low chance that another card would push them
# over 21 (only J, Q, and K would do so, since 12 + 10 = 22).
# On the other hand, if they have a 5, a 6, and a 7, they will
# likely stay because any card above 3 will push them over 21
# points.
#
# The specific goal in Blackjack is to get closer to 21 than
# the dealer. Dealers must follow a set of prescribed rules
# for when to Hit and Stay. These are the rules we'll use for
# our Blackjack-playing AI.
#
# The rules are:
#
# - The dealer must Hit if their total is below 17.
# - The dealer must Stay as soon as their total is 17 or
# higher.
# - An Ace (A) should be counted as 11 if it puts the
# dealer between 17 and 21 points. If it puts them over
# 21, though, it should be counted as 1.
#
# For example, imagine the dealer's first cards are A and 3.
# Their point total is either 4 or 14, both below 17, so they
# Hit. The next card is a 9. If we count the A as 11, then
# their total is now 23 (11 + 3 + 9), and so we count the
# A as 1. Their total is 13, and so they Hit again. The next
# card is a 7, so their total is 20, so they Stay.
#
# Write a function called next_move. next_move should have
# one parameter, a string. Each character of the string will
# be a card in the dealer's current hand, such as "AK" or
# "175". The function should return one of three strings:
#
# - "Hit" if the dealer should take another card.
# - "Stay" if the dealer should not take another card.
# - "Bust" if the sum is already over 21.
#
# Remember, your function is only responsible for playing
# one move at a time. Take in a string representing the
# current hand, return "Hit", "Stay", or "Bust".
# Add your code here!
def next_move(cards):
face_cards = ["J", "Q", "K"]
total = 0
ace_count = 0
# turn the string into a list for easy processing
cards = list(cards)
# sum up the total
for item in cards:
if item.isdigit():
total += int(item)
elif item in face_cards:
total += 10
elif item == "A":
ace_count += 1
# now add the aces after the other cards have been added
for i in range(0, ace_count):
if total + 11 > 21:
total += 1
else:
total += 11
# return the dealer's response
if total < 17:
return "Hit"
elif 16 <= total <= 21:
return "Stay"
elif total > 21:
return "Bust"
return total
# Below are some lines of code that will test your function.
# You can change the value of the variable(s) to test your
# function with different inputs.
#
# If your function works correctly, this will originally
# print: Hit, Hit, Stay, and Bust.
print(next_move("A39"))
| [
"broepke@gmail.com"
] | broepke@gmail.com |
b0c462aa3ac6c9a36bdd6a3d58d5bea208e8680a | 47dd9eadf5d2c7421eeb0c39d1ff0bad440ac513 | /handprint/__version__.py | f6beec44457f0f8b28ee2024b1f5b6b6faa9147e | [
"CC-BY-3.0",
"BSD-3-Clause",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | JanaldoChen/handprint | 53a26f9195e96f1a649dc2c026c85f85d66ce28e | 0bc155b65d222fae8e753f75394a6f203d37e47a | refs/heads/master | 2020-05-02T10:09:46.746419 | 2018-12-22T01:48:09 | 2018-12-22T01:48:09 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 844 | py | # =============================================================================
# @file __version__.py
# @brief handprint version info
# @author Michael Hucka <mhucka@caltech.edu>
# @license Please see the file named LICENSE in the project directory
# @website https://github.com/caltechlibrary/handprint
# =============================================================================
__version__ = '0.9.0'
__title__ = 'handprint'
__description__ = 'Handprint: HANDwritten Page RecognitIoN Test for Caltech Archives'
__url__ = 'https://github.com/caltechlibrary/handprint'
__author__ = 'Michael Hucka <mhucka@caltech.edu>'
__email__ = 'helpdesk@library.caltech.edu'
__license__ = 'BSD 3-clause license -- see LICENSE file'
__copyright__ = 'Copyright (C) 2018 by the California Institute of Technology'
| [
"mhucka@caltech.edu"
] | mhucka@caltech.edu |
970f9a3a0c4bb549735c394ed87a95815931f452 | f29336e5442d59961e22f925b3e6ee96ed3bf0ca | /src/mnemotopy/middleware/locale.py | 8f090b2d5fe154c0ff33927932ce488927e364f7 | [] | no_license | louiseGrandjonc/mnemotopy | be00b304924689ecd1cfd714da8416aaac55466c | 7744745a7fcba7b5721577ce18c654df48aaa1ba | refs/heads/master | 2022-12-10T07:32:47.100612 | 2020-01-02T10:01:03 | 2020-01-02T10:01:03 | 84,743,189 | 1 | 1 | null | 2022-12-08T03:22:03 | 2017-03-12T17:18:10 | JavaScript | UTF-8 | Python | false | false | 2,327 | py | from django.conf import settings
from django.conf.urls.i18n import is_language_prefix_patterns_used
from django.http import HttpResponseRedirect
from django.urls import get_script_prefix, is_valid_path
from django.utils import translation
from django.utils.cache import patch_vary_headers
from django.middleware.locale import LocaleMiddleware
class CustomLocaleMiddleware(LocaleMiddleware):
def process_request(self, request):
urlconf = getattr(request, 'urlconf', settings.ROOT_URLCONF)
i18n_patterns_used, _ = is_language_prefix_patterns_used(urlconf)
language = translation.get_language_from_request(request, check_path=i18n_patterns_used)
translation.activate(language)
request.LANGUAGE_CODE = translation.get_language()
def process_response(self, request, response):
language = translation.get_language()
language_from_path = translation.get_language_from_path(request.path_info)
urlconf = getattr(request, 'urlconf', settings.ROOT_URLCONF)
i18n_patterns_used, prefixed_default_language = is_language_prefix_patterns_used(urlconf)
if not language_from_path and language != settings.LANGUAGE_CODE:
language_path = '/%s%s' % (language, request.path_info)
path_valid = is_valid_path(language_path, urlconf)
path_needs_slash = (
not path_valid and (
settings.APPEND_SLASH and not language_path.endswith('/') and
is_valid_path('%s/' % language_path, urlconf)
)
)
if path_valid or path_needs_slash:
script_prefix = get_script_prefix()
# Insert language after the script prefix and before the
# rest of the URL
language_url = request.get_full_path(force_append_slash=path_needs_slash).replace(
script_prefix,
'%s%s/' % (script_prefix, language),
1
)
return self.response_redirect_class(language_url)
if not (i18n_patterns_used and language_from_path):
patch_vary_headers(response, ('Accept-Language',))
if 'Content-Language' not in response:
response['Content-Language'] = language
return response
| [
"louve.grandjonc@gmail.com"
] | louve.grandjonc@gmail.com |
5ffb6b6dd9d960be90008d1703cfd4f5baa5923c | 915c31ce84a826d225bcb1cc5f1e0323e712f6e4 | /phase_1.py | 23d9a60800f43eabfa859f9270499bce8902d0f7 | [
"Apache-2.0"
] | permissive | mac389/overdosed | 64162aaf8f57f7ca57bcc95678d0d18e231cda87 | 434255db4ea36581c9f94c7aa09ca6ca15169e8a | refs/heads/master | 2021-01-10T07:44:41.804936 | 2015-06-25T23:22:51 | 2015-06-25T23:22:51 | 36,990,551 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 1,081 | py | import os, gzip, json,tokenize,token
import nltk, csv
from nltk.corpus import stopwords
import utils as tech
import matplotlib.pyplot as plt
from pprint import pprint
#Rule-in component
CONTROL = os.path.join(os.getcwd(),'data','control')
CASE = os.path.join(os.getcwd(),'data','case')
corpus = {'case':CASE}
text = {}
for condition,path in corpus.iteritems():
if not os.path.isfile(os.path.join(path,'combined.txt')):
for filename in os.listdir(path):
if filename.endswith('.gz'):
'''
Salvaging attempt, this will only pull one tweet text and id from each file.
This will hurt controls more than case tweets, which is ok.
'''
with gzip.open(os.path.join(path,filename),'rb') as fid, open(os.path.join(path,'combined.txt'),'a+') as outfile:
print>>outfile, '\t '.join(tech.get_field_damaged_string(fid.read()))
else:
text[condition] = open(os.path.join(path,'combined.txt'),'rb').read().splitlines()
#reddit and r/Drugs in particular another good source of information
#Identify most common words that are not stopwords in case series
| [
"mac389@gmail.com"
] | mac389@gmail.com |
1e543f53fd9bfe1768f6b104c50eb5b05ea71eaf | be1907ef4b4eb56567d8df5703a98a7a6a34c88a | /modules/bsg/bsg_encoder.py | 75081a8fa6698841440e75fcde77b8df308846f4 | [] | no_license | griff4692/LMC | f9bd4803c5b37bbae98cbe5014a719d3c9ea7a2f | f07dfa472d3f6bfd7ce7f7ac7168687beb8efdaf | refs/heads/master | 2023-01-30T00:37:53.505757 | 2020-12-08T17:35:48 | 2020-12-08T17:35:48 | 223,016,633 | 13 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,448 | py | import os
import sys
import torch
from torch import nn
import torch.utils.data
home_dir = os.path.expanduser('~/LMC/')
sys.path.insert(0, os.path.join(home_dir, 'utils'))
from compute_utils import compute_att
class BSGEncoder(nn.Module):
"""
BSG Encoder as described in original BSG paper. It accepts a center word and context words
and outputs Gaussian parameters mu and sigma.
Its parameters are the shared variational parameters for the distribution q(z|w, c)
where z represents latent meaning, w the center word, and c the list of context tokens
"""
def __init__(self, vocab_size, input_dim=100, hidden_dim=64):
super(BSGEncoder, self).__init__()
self.embeddings = nn.Embedding(vocab_size, input_dim, padding_idx=0)
self.dropout = nn.Dropout(0.2)
self.lstm = nn.LSTM(input_dim * 2, hidden_dim, bidirectional=True, batch_first=True)
self.att = nn.Linear(hidden_dim * 2, 1, bias=True)
self.u = nn.Linear(hidden_dim * 2, input_dim, bias=True)
self.v = nn.Linear(hidden_dim * 2, 1, bias=True)
def forward(self, center_ids, context_ids, mask, token_mask_p=0.2):
"""
:param center_ids: LongTensor of batch_size
:param context_ids: LongTensor of batch_size x 2 * context_window
:param mask: BoolTensor of batch_size x 2 * context_window (which context_ids are just the padding idx)
:return: mu (batch_size, latent_dim), var (batch_size, 1)
"""
batch_size, num_context_ids = context_ids.shape
center_embedding = self.embeddings(center_ids)
if token_mask_p is not None:
device = 'cuda' if torch.cuda.is_available() else 'cpu'
context_mask = torch.FloatTensor(batch_size, num_context_ids).uniform_().to(device) < token_mask_p
mask.masked_fill_(context_mask, True)
context_embedding = self.embeddings(context_ids)
center_embedding_tiled = center_embedding.unsqueeze(1).repeat(1, num_context_ids, 1)
merged_embeds = torch.cat([center_embedding_tiled, context_embedding], dim=-1)
merged_embeds = self.dropout(merged_embeds)
mask_tiled = mask.unsqueeze(-1).repeat(1, 1, merged_embeds.size()[-1])
merged_embeds.masked_fill_(mask_tiled, 0)
h_reps, (h, _) = self.lstm(merged_embeds)
h_sum = self.dropout(compute_att(h_reps, mask, self.att))
return self.u(h_sum), self.v(h_sum).exp()
| [
"griff4692@gmail.com"
] | griff4692@gmail.com |
c56aaf008716622e25169ac587aa548837eeda2e | 3e5cc67b0c8336114110d1558d481a3ff5d0eb3c | /apps/h5/views.py | f565ab6e6efb0570e5fea943a59e3da852b45b65 | [] | no_license | xiaoxiaolulu/mtserver | 32b46488705bc2c962d60c288c482f81dca0a2b0 | 912222bbf1cd7c9bd242e96062570073b6544f6a | refs/heads/master | 2021-02-22T07:42:57.579124 | 2020-03-10T17:50:28 | 2020-03-10T17:50:28 | 245,372,121 | 5 | 1 | null | null | null | null | UTF-8 | Python | false | false | 3,677 | py | import random
from rest_framework import views
from django.contrib.auth import get_user_model
from apps.h5.serializers import LoginSerializer
from apps.h5.throttles import SMSCodeRateThrottle
from apps.meituan.models import Merchant
from apps.meituan.serializers import MerchantSerializer
from apps.mtauth.authentications import generate_jwt
from apps.mtauth.serializers import UserSerializer
from utils.CCPSDK import CCPRestSDK
from rest_framework.response import Response
from rest_framework import status
from django.core.cache import cache
from django.utils.timezone import now
from rest_framework import viewsets
from rest_framework import mixins
from rest_framework.pagination import PageNumberPagination
from rest_framework import generics
from rest_framework import filters
User = get_user_model()
class SmSCodeView(views.APIView):
throttle_classes = [SMSCodeRateThrottle]
def __init__(self, *args, **kwargs):
super(SmSCodeView, self).__init__(*args, **kwargs)
self.number = ["0", "1", "2", "3", "4", "5", "6", "7", "8", "9"]
def generate_sms_code(self):
return "".join(random.choices(self.number, k=4))
def get(self, request):
telephone = request.GET.get('tel')
if telephone:
auth_token = 'a2573d4b2d9a4136b23cc54911a999b7'
auth_sid = '8aaf070870bf34550170bf6075260039'
app_id = '8aaf070870bf34550170bf607590003f'
rest = CCPRestSDK.REST(auth_sid, auth_token, app_id)
code = self.generate_sms_code()
result = rest.sendTemplateSMS(telephone, [code, 5], "1")
cache.set(telephone, code, 60 * 5)
return Response({"code": code})
# if result['statusCode'] == '000000':
# return Response("success")
# else:
# return Response(status=status.HTTP_500_INTERNAL_SERVER_ERROR)
else:
return Response(status=status.HTTP_400_BAD_REQUEST)
class LoginView(views.APIView):
def generate_sms_code(self):
number = ["0", "1", "2", "3", "4", "5", "6", "7", "8", "9"]
return "".join(random.choices(number, k=6))
def post(self, request):
serializer = LoginSerializer(data=request.data)
if serializer.is_valid():
telephone = serializer.validated_data.get('telephone')
try:
user = User.objects.get(telephone=telephone)
user.last_login = now()
user.save()
except:
username = "美团用户" + self.generate_sms_code()
password = ""
user = User.objects.create(username=username, password=password, telephone=telephone, last_login=now())
serializer = UserSerializer(user)
token = generate_jwt(user)
return Response({"user": serializer.data, "token": token})
else:
return Response(data={"message": dict(serializer.errors)}, status=status.HTTP_400_BAD_REQUEST)
class MerchantPagination(PageNumberPagination):
page_size = 10
page_query_param = 'page'
class MerchantViewSet(
viewsets.GenericViewSet,
mixins.ListModelMixin,
mixins.RetrieveModelMixin
):
queryset = Merchant.objects.all()
serializer_class = MerchantSerializer
pagination_class = MerchantPagination
class MerchantSearchView(generics.ListAPIView):
class MerchantSearchFilter(filters.SearchFilter):
search_param = 'q'
queryset = Merchant.objects.all()
serializer_class = MerchantSerializer
filter_backends = [MerchantSearchFilter]
search_fields = ['name', 'categories__name', 'categories__goods_list__name']
| [
"546464268@qq.com"
] | 546464268@qq.com |
5d2eb91da355a9b16280bef5d63c1395eae24179 | f421d6ae91120184a7ce1b82e779255fd64fdf85 | /structs/models/conv.py | b8a0b86846e3b9280b45ce61e3af9572f72dbada | [] | no_license | landjbs/MCTS | 54f8a979878ad3ca895e6ff151bd3c872e96859c | db6f490cb3c27b6ffc31b48350ce63c68b59315b | refs/heads/master | 2020-11-27T05:23:09.726422 | 2019-12-27T19:44:01 | 2019-12-27T19:44:01 | 229,320,234 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,281 | py | import torch
import torch.nn as nn
class Conv(nn.Module):
'''
The Conv model is tasked with prediciting a vector (p) of move probabilities
across the avaliable moves and a scalar (v) of win probability at the
current state. To avoid shape issues, p always has length 8. Input board
is a 4th order tensor with shape (boardSize, boardSize, 4) where the last
order has indicies (0-shots, 1-walls, 2-enemies, 3-player).
Sizes are currently hard-coded.
'''
def __init__(self, lr, boardSize=20):
super(Conv, self).__init__()
# layers
self.conv1 = nn.Sequential(
nn.Conv2d(4, 20, kernel_size=1, stride=1, padding=0),
nn.ReLU()
) #nn.MaxPool2d(kernel_size=2, stride=2)
self.conv2 = nn.Sequential(
nn.Conv2d(20, 20, kernel_size=1, stride=1, padding=0),
nn.ReLU()
) #nn.MaxPool2d(kernel_size=2, stride=2)
self.dropout = nn.Dropout(p=0.3)
self.lin1 = nn.Linear(3 * 3 * 20, 1000)
self.pLin = nn.Linear(1000, 8)
self.soft = nn.Softmax(dim=1)
self.vLin = nn.Linear(1000, 1)
self.sig = nn.Sigmoid()
# optimizers and loss
self.optim = torch.optim.Adam(self.parameters(), lr=lr)
# self.pCriterion = nn.CrossEntropyLoss()
self.vCriterion = nn.BCELoss()
def pCriterion(self, p, target):
pC = p[0, target]
pLog = torch.log(pC)
loss = -(pLog)
return loss
def forward(self, boardTensor):
convOut = self.conv1(boardTensor)
convOut = self.conv2(convOut)
convOut = convOut.reshape(convOut.size(0), -1)
convOut = self.dropout(convOut)
linOut = self.lin1(convOut)
p = self.soft(self.pLin(linOut))
v = self.sig(self.vLin(linOut))[0]
# print(f'p: {p} | v: {v}')
return p, v
def eval_and_prop(self, pX, vX, pY, vY):
pLoss = self.pCriterion(pX, pY)
vLoss = self.vCriterion(vX, torch.tensor([vY], dtype=torch.float))
loss = pLoss + vLoss
self.optim.zero_grad()
loss.backward()
self.optim.step()
return loss
def train_step(self, x, yP, yV):
p, v = self(x)
return self.eval_and_prop(p, v, yP, yV)
| [
"landjbs@gmail.com"
] | landjbs@gmail.com |
95a32e6f81e5d22d9e2310154eb8ea8d02f8449a | 805705b87e889c18ac0b8be6ee50fcc292bf32e1 | /main.py | 7d5e45fd1fab3b9071debd0070268a77e592ef62 | [] | no_license | jaybenaim/day18-reinforcements | 283ea6578cb4dbc252da6b30e77a2899b6feac03 | e29ae6f1b16ea8c0b5d4fd1eba931799c0a70c57 | refs/heads/master | 2020-06-24T07:16:45.253006 | 2019-07-25T20:32:52 | 2019-07-25T20:32:52 | 198,893,102 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 482 | py | def word_counter(string):
word_list = []
for words in string:
each_word = string.split()
if words == None:
return 0
else:
for word in each_word:
word_list.append(word)
return len(word_list)
print(word_counter('this is a test'))
print(word_counter("Hello world") )# returns 2
print(word_counter("This is a sentence")) # returns 4
print(word_counter("")) # returns 0
| [
"benaimjacob@gmail.com"
] | benaimjacob@gmail.com |
b2524bfa2f3066ef2a19eac93a3434a6ce088a9b | 223b5e108951378f9de33258d2602fc01c17c4cb | /django16/project_name/settings.py | 3e07f544761d6b0d1b95bea8f4090296f58955c3 | [
"MIT"
] | permissive | barscka/django-project-template | 55934fb14f665aae5accd0dfbef23ed7efa0c3ba | 2515d7a4f0964feb8aef2f340db4aa2f820c1d87 | refs/heads/master | 2021-05-31T09:06:17.254535 | 2015-07-28T00:14:10 | 2015-07-28T00:14:10 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,571 | py | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
"""
Django settings for {{ project_name }} project.
For more information on this file, see
https://docs.djangoproject.com/en/1.6/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.6/ref/settings/
"""
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
import os
BASE_DIR = os.path.dirname(os.path.dirname(__file__))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.6/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '{{ secret_key }}'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
TEMPLATE_DEBUG = True
ALLOWED_HOSTS = [
'*',
]
# Application definition
INSTALLED_APPS = (
# django core
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
# third party apps
# local apps
# south in the end
'south',
)
MIDDLEWARE_CLASSES = (
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
)
ROOT_URLCONF = '{{ project_name }}.urls'
WSGI_APPLICATION = '{{ project_name }}.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.6/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Internationalization
# https://docs.djangoproject.com/en/1.6/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.6/howto/static-files/
STATIC_URL = '/static/'
# =============================================================================
# Django Local settings here
# =============================================================================
ADMINS = (
('John Doe', 'johndoe@email.com'),
)
EMAIL_BACKEND = 'django.core.mail.backends.console.EmailBackend'
MEDIA_ROOT = os.path.join(BASE_DIR, 'media')
MEDIA_URL = '/media/'
STATIC_ROOT = os.path.join(BASE_DIR, 'static')
TEMPLATE_CONTEXT_PROCESSORS = (
'django.contrib.auth.context_processors.auth',
'django.core.context_processors.debug',
'django.core.context_processors.i18n',
'django.core.context_processors.media',
'django.core.context_processors.static',
'django.core.context_processors.tz',
'django.contrib.messages.context_processors.messages',
'django.core.context_processors.request',
)
TEMPLATE_DIRS = (
os.path.join(BASE_DIR, 'templates'),
)
# =============================================================================
# {{ project_name }} settings here
# =============================================================================
SITE_NAME = '{{ project_name }}'
SITE_DOMAIN = '{{ project_name }}.com'
# =============================================================================
# Load settings_local.py if exists
# =============================================================================
try:
from .settings_local import *
except ImportError:
pass
| [
"allisson@gmail.com"
] | allisson@gmail.com |
4fac3d9ddcb200a86ec0789f848a9079e75c1994 | 0c452a191d35a26499abec71854f8a8cdb1efc68 | /test/unit/backends/msbuild/test_writer.py | 81b41a280bbd23b9d969441b074964552239bd10 | [
"BSD-3-Clause"
] | permissive | luc3001/bfg9000 | fe609d64d7b605fef0ffb375873729c8cf0bd641 | 41452e9dd12f1a44bae68d3bf44f362d283e6802 | refs/heads/master | 2020-09-15T17:10:52.434281 | 2019-11-22T04:33:26 | 2019-11-22T04:33:42 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,200 | py | import mock
from ... import *
from bfg9000.backends.msbuild.writer import version
from bfg9000.versioning import Version
def mock_bad_which(*args, **kwargs):
raise IOError()
def mock_bad_execute(*args, **kwargs):
raise OSError()
class TestMsBuildVersion(TestCase):
def test_good(self):
with mock.patch('bfg9000.shell.which', return_value=['command']), \
mock.patch('bfg9000.shell.execute',
return_value='MSBuild 1.23'): # noqa
self.assertEqual(version({}), Version('1.23'))
def test_unrecognized_version(self):
with mock.patch('bfg9000.shell.which', return_value=['command']), \
mock.patch('bfg9000.shell.execute',
return_value='MSBuild'): # noqa
self.assertEqual(version({}), None)
def test_not_found(self):
with mock.patch('bfg9000.shell.which', mock_bad_which):
self.assertEqual(version({}), None)
def test_bad_execute(self):
with mock.patch('bfg9000.shell.which', return_value=['command']), \
mock.patch('bfg9000.shell.execute', mock_bad_execute): # noqa
self.assertEqual(version({}), None)
| [
"itsjimporter@gmail.com"
] | itsjimporter@gmail.com |
64ea2ea9564dcd0bab25d5cf5c5a4115f4769d93 | a81c1492783e7cafcaf7da5f0402d2d283b7ce37 | /google/ads/google_ads/v6/proto/enums/shared_set_status_pb2.py | 6b94727fd3b8dc33bf74d4a95e08d59671258bef | [
"Apache-2.0"
] | permissive | VincentFritzsche/google-ads-python | 6650cf426b34392d1f58fb912cb3fc25b848e766 | 969eff5b6c3cec59d21191fa178cffb6270074c3 | refs/heads/master | 2023-03-19T17:23:26.959021 | 2021-03-18T18:18:38 | 2021-03-18T18:18:38 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | true | 4,087 | py | # -*- coding: utf-8 -*-
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: google/ads/googleads/v6/enums/shared_set_status.proto
"""Generated protocol buffer code."""
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
from google.api import annotations_pb2 as google_dot_api_dot_annotations__pb2
DESCRIPTOR = _descriptor.FileDescriptor(
name='google/ads/googleads/v6/enums/shared_set_status.proto',
package='google.ads.googleads.v6.enums',
syntax='proto3',
serialized_options=b'\n!com.google.ads.googleads.v6.enumsB\024SharedSetStatusProtoP\001ZBgoogle.golang.org/genproto/googleapis/ads/googleads/v6/enums;enums\242\002\003GAA\252\002\035Google.Ads.GoogleAds.V6.Enums\312\002\035Google\\Ads\\GoogleAds\\V6\\Enums\352\002!Google::Ads::GoogleAds::V6::Enums',
create_key=_descriptor._internal_create_key,
serialized_pb=b'\n5google/ads/googleads/v6/enums/shared_set_status.proto\x12\x1dgoogle.ads.googleads.v6.enums\x1a\x1cgoogle/api/annotations.proto\"`\n\x13SharedSetStatusEnum\"I\n\x0fSharedSetStatus\x12\x0f\n\x0bUNSPECIFIED\x10\x00\x12\x0b\n\x07UNKNOWN\x10\x01\x12\x0b\n\x07\x45NABLED\x10\x02\x12\x0b\n\x07REMOVED\x10\x03\x42\xe9\x01\n!com.google.ads.googleads.v6.enumsB\x14SharedSetStatusProtoP\x01ZBgoogle.golang.org/genproto/googleapis/ads/googleads/v6/enums;enums\xa2\x02\x03GAA\xaa\x02\x1dGoogle.Ads.GoogleAds.V6.Enums\xca\x02\x1dGoogle\\Ads\\GoogleAds\\V6\\Enums\xea\x02!Google::Ads::GoogleAds::V6::Enumsb\x06proto3'
,
dependencies=[google_dot_api_dot_annotations__pb2.DESCRIPTOR,])
_SHAREDSETSTATUSENUM_SHAREDSETSTATUS = _descriptor.EnumDescriptor(
name='SharedSetStatus',
full_name='google.ads.googleads.v6.enums.SharedSetStatusEnum.SharedSetStatus',
filename=None,
file=DESCRIPTOR,
create_key=_descriptor._internal_create_key,
values=[
_descriptor.EnumValueDescriptor(
name='UNSPECIFIED', index=0, number=0,
serialized_options=None,
type=None,
create_key=_descriptor._internal_create_key),
_descriptor.EnumValueDescriptor(
name='UNKNOWN', index=1, number=1,
serialized_options=None,
type=None,
create_key=_descriptor._internal_create_key),
_descriptor.EnumValueDescriptor(
name='ENABLED', index=2, number=2,
serialized_options=None,
type=None,
create_key=_descriptor._internal_create_key),
_descriptor.EnumValueDescriptor(
name='REMOVED', index=3, number=3,
serialized_options=None,
type=None,
create_key=_descriptor._internal_create_key),
],
containing_type=None,
serialized_options=None,
serialized_start=141,
serialized_end=214,
)
_sym_db.RegisterEnumDescriptor(_SHAREDSETSTATUSENUM_SHAREDSETSTATUS)
_SHAREDSETSTATUSENUM = _descriptor.Descriptor(
name='SharedSetStatusEnum',
full_name='google.ads.googleads.v6.enums.SharedSetStatusEnum',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
],
extensions=[
],
nested_types=[],
enum_types=[
_SHAREDSETSTATUSENUM_SHAREDSETSTATUS,
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=118,
serialized_end=214,
)
_SHAREDSETSTATUSENUM_SHAREDSETSTATUS.containing_type = _SHAREDSETSTATUSENUM
DESCRIPTOR.message_types_by_name['SharedSetStatusEnum'] = _SHAREDSETSTATUSENUM
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
SharedSetStatusEnum = _reflection.GeneratedProtocolMessageType('SharedSetStatusEnum', (_message.Message,), {
'DESCRIPTOR' : _SHAREDSETSTATUSENUM,
'__module__' : 'google.ads.googleads.v6.enums.shared_set_status_pb2'
# @@protoc_insertion_point(class_scope:google.ads.googleads.v6.enums.SharedSetStatusEnum)
})
_sym_db.RegisterMessage(SharedSetStatusEnum)
DESCRIPTOR._options = None
# @@protoc_insertion_point(module_scope)
| [
"noreply@github.com"
] | VincentFritzsche.noreply@github.com |
1522d5404ed627f0931f89bb5684acad594c31df | 493a36f1f8606c7ddce8fc7fe49ce4409faf80be | /.history/B073040023/client_20210614185239.py | 0b152bdb8714864bface6716198073ed81196b44 | [] | no_license | ZhangRRz/computer_network | f7c3b82e62920bc0881dff923895da8ae60fa653 | 077848a2191fdfe2516798829644c32eaeded11e | refs/heads/main | 2023-05-28T02:18:09.902165 | 2021-06-15T06:28:59 | 2021-06-15T06:28:59 | 376,568,344 | 0 | 0 | null | 2021-06-13T14:48:36 | 2021-06-13T14:48:36 | null | UTF-8 | Python | false | false | 5,029 | py | import socket
import threading
import tcppacket
import struct
from time import sleep
# socket.socket() will create a TCP socket (default)
# socket.socket(socket.AF_INET, socket.SOCK_STREAM) to explicitly define a TCP socket
sock = socket.socket(socket.AF_INET,socket.SOCK_DGRAM) # explicitly define a UDP socket
udp_host = '127.0.0.1' # Host IP
udp_port = 12345 # specified port to connect
def init_new_calc_req(msg):
sock = socket.socket(socket.AF_INET,socket.SOCK_DGRAM)
oldmsg = msg
tcp = tcppacket.TCPPacket(data=msg)
tcp.assemble_tcp_feilds()
sock.sendto(tcp.raw, (udp_host, udp_port))
# print("UDP target IP:", udp_host)
# print("UDP target Port:", udp_port) # Sending message to UDP server
while True:
data, address = sock.recvfrom(512*1024)
sock.connect(address)
s = struct.calcsize('!HHLLBBH')
unpackdata = struct.unpack('!HHLLBBH', data[:s])
msg = data[s:].decode('utf-8')
print(oldmsg,"is", msg)
if(unpackdata[5] % 2):
# fin_falg
fin_falg = 1
else:
fin_falg = 0
tcp = tcppacket.TCPPacket(
data="ACK".encode('utf-8'),
flags_ack=1,
flags_fin=fin_falg)
tcp.assemble_tcp_feilds()
print("ACK send to (IP,port):", address)
sock.sendto(tcp.raw, address)
if(fin_falg):
break
def init_new_videoreq_req(i):
sock = socket.socket(socket.AF_INET,socket.SOCK_DGRAM)
msg = "video 1".encode('utf-8')
# print("UDP target IP:", udp_host)
# print("UDP target Port:", udp_port)
tcp = tcppacket.TCPPacket(data=msg)
tcp.assemble_tcp_feilds()
sock.sendto(tcp.raw, (udp_host, udp_port)) # Sending message to UDP server
recvdata = b''
ack_seq = 0
seq = 0
counter = 0
while True:
data, address = sock.recvfrom(512*1024)
s = struct.calcsize('!HHLLBBHHH')
raw = struct.unpack('!HHLLBBHHH', data[:s])
print("receive packet from ", address,
"with header", raw)
if(raw[2] == ack_seq and raw[7] == 0):
recvdata += data[s:]
if(raw[5] % 2):
# fin_falg
fin_flag = 1
else:
fin_flag = 0
ack_seq += 1
counter += 1
else:
print("Receive ERROR packet from ", address)
fin_flag = 1
counter = 3
# --------------------------------------------
# send ACK
if(counter == 3):
tcp = tcppacket.TCPPacket(
data=str("ACK").encode('utf-8'),
seq=seq, ack_seq=ack_seq,
flags_ack=1,
flags_fin=fin_flag)
tcp.assemble_tcp_feilds()
print("ACK send to (IP,port):", address,
"with ack seq: ", ack_seq, " and seq: ", seq)
sock.sendto(tcp.raw, address)
if(not fin_flag):
counter = 0
seq += 1
# --------------------------------------------
print(fin_flag)
if(fin_flag):
break
savename = str(i+1)+"received.mp4"
f = open(savename, "wb")
f.write(recvdata)
f.close()
def init_new_dns_req(i):
# ---------------------
sock = socket.socket(socket.AF_INET,socket.SOCK_DGRAM)
oldmsg = msg = "dns google.com"
msg = msg.encode('utf-8')
tcp = tcppacket.TCPPacket(data=msg)
tcp.assemble_tcp_feilds()
sock.sendto(tcp.raw, (udp_host, udp_port))
# print("UDP target IP:", udp_host)
# print("UDP target Port:", udp_port)
while True:
data, address = sock.recvfrom(512*1024)
sock.connect(address)
s = struct.calcsize('!HHLLBBH')
unpackdata = struct.unpack('!HHLLBBH', data[:s])
msg = data[s:].decode('utf-8')
print(oldmsg,"is", msg)
if(unpackdata[5] % 2):
# fin_falg
fin_falg = 1
else:
fin_falg = 0
tcp = tcppacket.TCPPacket(
data="ACK".encode('utf-8'),
flags_ack=1,
flags_fin=fin_falg)
tcp.assemble_tcp_feilds()
print("ACK send to (IP,port):", address)
sock.sendto(tcp.raw, address)
if(fin_falg):
break
# ----------------------
# def init_new
threads = []
#Calculation
print("Demo calculation function")
init_new_calc_req("calc 2 + 6")
sleep(0.25)
init_new_calc_req("calc 2 - 6")
sleep(0.25)
init_new_calc_req("calc 2 * 6")
sleep(0.25)
init_new_calc_req("calc 2 / 6")
sleep(0.25)
init_new_calc_req("calc 2 + 6")
sleep(0.25)
init_new_calc_req("calc 2 + 6")
sleep(0.25)
threads.append(threading.Thread(target = init_new_calc_req, args = (i,)))
threads[-1].start()
for i in range(1):
threads.append(threading.Thread(target = init_new_dns_req, args = (i,)))
threads[-1].start()
for i in range(1):
threads.append(threading.Thread(target = init_new_videoreq_req, args = (i,)))
threads[-1].start() | [
"tom95011@gmail.com"
] | tom95011@gmail.com |
fbe2b17d459317f2efe8ffe5d2cae5b3c831ec34 | 4e5b20fdcca20f458322f0a8cd11bbdacb6fb3e5 | /suning/api/oto/OrderGetRequest.py | 4a5953e4ea31fa728bfb4e0a68491e4b13475f83 | [] | no_license | shijingyu/sunningAPI | 241f33b0660dc84635ce39688fed499f5c57a5da | 4a3b2ef7f9bdc4707d1eaff185bc7eb636fe90d5 | refs/heads/master | 2020-04-24T22:15:11.584028 | 2019-02-24T06:41:20 | 2019-02-24T06:41:20 | 172,305,179 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 489 | py | # -*- coding: utf-8 -*-
'''
Created on 2016-12-2
@author: suning
'''
from suning.api.abstract import AbstractApi
class OrderGetRequest(AbstractApi):
'''
'''
def __init__(self):
AbstractApi.__init__(self)
self.b2cOrderId = None
self.setParamRule({
'b2cOrderId':{'allow_empty':False}
})
def getApiBizName(self):
return 'getOrder'
def getApiMethod(self):
return 'suning.oto.order.get'
| [
"945090896@qq.com"
] | 945090896@qq.com |
f7054ba24a5ccaf1ec397da00458afa1b156dfc4 | 9184e230f8b212e8f686a466c84ecc89abe375d1 | /arcseventdata/tests/module/events2IQQQE_TestCase.py | 99df9481553bd7ad7dbd5f5d4277ab0a0d79268b | [] | no_license | danse-inelastic/DrChops | 75b793d806e6351dde847f1d92ab6eebb1ef24d2 | 7ba4ce07a5a4645942192b4b81f7afcae505db90 | refs/heads/master | 2022-04-26T17:37:41.666851 | 2015-05-02T23:21:13 | 2015-05-02T23:21:13 | 34,094,584 | 0 | 1 | null | 2020-09-10T01:50:10 | 2015-04-17T03:30:52 | Python | UTF-8 | Python | false | false | 2,272 | py | #!/usr/bin/env python
#
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
#
# Jiao Lin
# California Institute of Technology
# (C) 2007 All Rights Reserved
#
# {LicenseText}
#
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
#
import unittest
from unittest import TestCase
class events2IQQQE_TestCase(TestCase):
def test1(self):
'events2IQQQE: default intensity type (int)'
import arcseventdata.arcseventdata as aa
events = aa.readevents( "events.dat", 10 )
import numpy
I = numpy.zeros( 20*20*20*10, 'i' )
Ei = 60.
pixelPositions = aa.readpixelpositions( 'pixelID2position.bin' )
aa.events2IQQQE_numpyarray(
events, 10,
-10, 10, 1.,
-10, 10, 1.,
-10, 10, 1.,
-50, 50, 10. ,
I,
Ei, pixelPositions)
return
def test2(self):
'events2IQQQE: intensity type is "double"'
import arcseventdata.arcseventdata as aa
events = aa.readevents( "events.dat", 10 )
import numpy
I = numpy.zeros( 20*20*20*10, 'double' )
Ei = 60.
pixelPositions = aa.readpixelpositions( 'pixelID2position.bin' )
ntotpixels = 115*8*128
tofUnit = 1e-7
mod2sample = 13.6
toffset = 0
intensity_npy_typecode = numpy.dtype('double').num
aa.events2IQQQE_numpyarray(
events, 10,
-10, 10, 1.,
-10, 10, 1.,
-10, 10, 1.,
-50, 50, 10. ,
I,
Ei, pixelPositions,
ntotpixels, tofUnit,
mod2sample, toffset, intensity_npy_typecode,
)
return
pass # end of events2IQQQE_TestCase
def pysuite():
suite1 = unittest.makeSuite(events2IQQQE_TestCase)
return unittest.TestSuite( (suite1,) )
def main():
pytests = pysuite()
alltests = unittest.TestSuite( (pytests, ) )
unittest.TextTestRunner(verbosity=2).run(alltests)
return
if __name__ == '__main__': main()
# version
__id__ = "$Id$"
# End of file
| [
"linjiao@caltech.edu"
] | linjiao@caltech.edu |
9fa8a0e9bb487fcb5ad1e68336225d1649d3661b | ccf94dcb6b1500fcbbd56964ae8c4832a496b8b3 | /python/baiduads-sdk-auto/test/test_campaign_feed_type.py | e632ecbca72b678f7a4db97229be13d4f1347343 | [
"Apache-2.0"
] | permissive | baidu/baiduads-sdk | 24c36b5cf3da9362ec5c8ecd417ff280421198ff | 176363de5e8a4e98aaca039e4300703c3964c1c7 | refs/heads/main | 2023-06-08T15:40:24.787863 | 2023-05-20T03:40:51 | 2023-05-20T03:40:51 | 446,718,177 | 16 | 11 | Apache-2.0 | 2023-06-02T05:19:40 | 2022-01-11T07:23:17 | Python | UTF-8 | Python | false | false | 1,017 | py | """
dev2 api schema
'dev2.baidu.com' api schema # noqa: E501
Generated by: https://openapi-generator.tech
"""
import sys
import unittest
import baiduads
from baiduads.campaignfeed.model.app_info_shadow_type import AppInfoShadowType
from baiduads.campaignfeed.model.app_info_type import AppInfoType
from baiduads.campaignfeed.model.schedule_type import ScheduleType
globals()['AppInfoShadowType'] = AppInfoShadowType
globals()['AppInfoType'] = AppInfoType
globals()['ScheduleType'] = ScheduleType
from baiduads.campaignfeed.model.campaign_feed_type import CampaignFeedType
class TestCampaignFeedType(unittest.TestCase):
"""CampaignFeedType unit test stubs"""
def setUp(self):
pass
def tearDown(self):
pass
def testCampaignFeedType(self):
"""Test CampaignFeedType"""
# FIXME: construct object with mandatory attributes with example values
# model = CampaignFeedType() # noqa: E501
pass
if __name__ == '__main__':
unittest.main()
| [
"tokimekiyxp@foxmail.com"
] | tokimekiyxp@foxmail.com |
d6bcca1d303319ff1a55c17cac8ab8a370e5a279 | a303be0a547d717b0deb19b5bdcc75010e131b51 | /Contests/Others/ Week of Code/ Week of Code 37 /p5.py | e817e377cb4991fe33d589076be6739cb4fb319f | [] | no_license | harrypotter0/competitive-programming | ff883c4dc5aa8d72f1af589bb654a422e32c8a38 | 82a8497e69212dc62e75af74b0d5a3b390b8aca2 | refs/heads/master | 2023-03-23T07:07:14.295053 | 2021-03-17T01:24:45 | 2021-03-17T01:24:45 | 70,964,689 | 16 | 9 | null | 2021-03-17T01:24:49 | 2016-10-15T03:52:53 | Python | UTF-8 | Python | false | false | 6,684 | py | # /*
# *
# ********************************************************************************************
# * AUTHOR : AKASH KANDPAL *
# * Language : Python2 *
# * Motto : The master has failed more times than the beginner has even tried. *
# * IDE used: Atom *
# * My Domain : http://harrypotter.tech/ *
# ********************************************************************************************
# *
# */
from collections import Counter
from math import ceil
from fractions import gcd
import math
import itertools
from itertools import permutations
from itertools import combinations
import calendar
from itertools import product
from datetime import date
from string import ascii_uppercase
def printdec(ans):
print '{0:.6f}'.format(ans)
def countchars(stra):
s=Counter(stra)
return s
def readInts():
return list(map(int, raw_input().strip().split()))
def readInt():
return int(raw_input())
def readStrs():
return raw_input().split()
def readStr():
return raw_input().strip()
def readarr(n):
return [map(int,list(readStr())) for i in xrange(n)]
def readnumbertolist():
a=[int(i) for i in list(raw_input())]
return a
def strlistTostr(list1):
return ''.join(list1)
def numlistTostr(list1):
return ''.join(str(e) for e in list1)
def strTolist(str):
return str.split()
def strlistTointlist(str):
return map(int, str)
def slicenum(number,x):
return int(str(number)[:x])
def precise(num):
return "{0:.10f}".format(num)
def rsorted(a):
return sorted(a,reverse=True)
def binar(x):
return '{0:063b}'.format(x)
def findpermute(word):
perms = [''.join(p) for p in permutations(word)]
perms = list(set(perms))
return perms
def findsubsets(S,m):
return list(set(itertools.combinations(S, m)))
def sort1(yy,index):
return yy.sort(key = lambda x:x[index])
def reversepair(yy):
return yy[::-1]
def checkint(x):
return (x).is_integer()
def sum_digits(n):
s = 0
while n:
s += n % 10
n //= 10
return s
def vowel_count(str):
count = 0
vowel = set("aeiouAEIOU")
for alphabet in str:
if alphabet in vowel:
count = count + 1
return count
def leapyear(year):
return calendar.isleap(year)
def factorial(n):
p=1
for i in range(2,n+1):
p=p*i
return p
def primes_sieve(limit):
limitn = limit+1
not_prime = set()
primes = []
for i in range(2, limitn):
if i in not_prime:
continue
for f in range(i*2, limitn, i):
not_prime.add(f)
primes.append(i)
return primes
def distinctstr(s):
t =''.join(set(s))
return t
def countdict(s):
d ={}
for i in range(len(s)):
if s[i] not in d.keys():
d[s[i]]=1
else:
d[s[i]]+=1
return d
import operator as op
def nck(n, k):
k = min(n-k,k)
result = 1
for i in range(1, k+1):
result = result* (n-i+1) / i
return result
def gcd(a,b):
while b > 0:
a, b = b, a % b
return a
def lcm(a, b):
return a * b / gcd(a, b)
def matrixcheck(x,y):
faadu = []
directions = zip((0,0,1,-1),(1,-1,0,0))
for dx,dy in directions:
if R>x+dx>=0<=y+dy<C and A[x+dx][y+dy]==0:
faadu.append((x+dx,y+dy))
return faadu
def stringcount(s):
return [s.count(i) for i in "abcdefghijklmnopqrstuvwxyz"]
def bubbleSort(arr):
n = len(arr)
for i in range(n):
for j in range(0, n-i-1):
if arr[j] > arr[j+1] :
arr[j], arr[j+1] = arr[j+1], arr[j]
def isSubsetSum(st, n, sm) :
# arr, n, k
subset=[[True] * (sm+1)] * (n+1)
for i in range(0, n+1) :
subset[i][0] = True
for i in range(1, sm + 1) :
subset[0][i] = False
for i in range(1, n+1) :
for j in range(1, sm+1) :
if(j < st[i-1]) :
subset[i][j] = subset[i-1][j]
if (j >= st[i-1]) :
subset[i][j] = subset[i-1][j] or subset[i - 1][j-st[i-1]]
return subset[n][sm];
def decimal_to_octal(dec):
decimal = int(dec)
return oct(decimal)
def decimal_to_binary(dec):
decimal = int(dec)
return bin(decimal)
def decimal_to_hexadecimal(dec):
decimal = int(dec)
return hex(decimal)
def find_duplicate(expr):
stack=[]
char_in_between = 0
f =1
for i in range(0, len(expr)):
if expr[i] == '}' or expr[i] == ')':
pair = '{' if expr[i] == '}' else '('
pop=''
while(len(stack) > 0 and pop != pair):
pop = stack.pop()
if (pop != '{' and pop != '('): char_in_between +=1
if char_in_between == 0:
print "Duplicate"
f =0
break
char_in_between = 0
else:
stack.append(expr[i])
return f
def dictlist(keys,values):
{d.setdefault(key,[]).append(value) for key, value in zip(keys,values)}
return d
def mullistbyconst(my_list,r):
my_new_list = []
for i in my_list:
my_new_list.append(i * r)
return my_new_list
def coinchange(S, m, n):
# (arr,length,sum)
table = [0 for k in range(n+1)]
table[0] = 1
for i in range(0,m):
for j in range(S[i],n+1):
table[j] += table[j-S[i]]
return table[n]
def palincheck(i):
return str(i) == str(i)[::-1]
def days(year1,year2):
begin = date(year1, 1, 1)
end = date(year2, 1, 1)
return (end-begin).days
from functools import reduce
def factors(n):
return set(reduce(list.__add__,
([i, n//i] for i in range(1, int(pow(n, 0.5) + 1)) if n % i == 0)))
def prelongfact(factt):
for i in reversed(range(1,int(factt**0.5))):
if factt%i==0:
break
return factt/i
def factmul(n,lim,m):
mul=1
ans=1
if(n>=lim):
print 0
else:
for j in range(1,n+1):
mul=(mul*j)%m
ans=(ans*mul)%m
print ans
def knapSack(W , wt , val , n):
if n == 0 or W == 0 :
return 0
if (wt[n-1] > W):
return knapSack(W , wt , val , n-1)
else:
return max(val[n-1] + knapSack(W-wt[n-1] , wt , val , n-1),
knapSack(W , wt , val , n-1))
m = 329885391853
lim = prelongfact(m)
mod = 10 ** 9 + 7
# fact=[1]
# for i in xrange(1,100001):
# fact.append(((arr[i-1]%mod)*(i%mod))%mod)
# for i,j in product(xrange(R),xrange(C)):
# print "Case #{}: {}".format(i+1,ans)
for __ in range(readInt()):
n,k = readInts()
'''
'''
| [
"9654263057akashkandpal@gmail.com"
] | 9654263057akashkandpal@gmail.com |
a06675d8d7e084e4ae02e553a590be3a8e9ce495 | e206ea09a316757e8028d803616634a4a9a50f72 | /atcoder/abc129/b.py | fbb59bcae9ff0c22019f642b3dc086816f80c85a | [] | no_license | seiichiinoue/procon | 46cdf27ab42079002c4c11b8abe84662775b34a4 | f0b33062a5f31cf0361c7973f4a5e81e8d5a428f | refs/heads/master | 2021-06-26T19:02:24.797354 | 2020-11-01T14:12:54 | 2020-11-01T14:12:54 | 140,285,300 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 203 | py | n, w = int(input()), list(map(int, input().split()))
l, r = 0, sum(w)
ans = 10 ** 8
for i in range(len(w)):
l += w[i]
r -= w[i]
tmp = abs(l - r)
if tmp < ans:
ans = tmp
print(ans) | [
"d35inou108@gmail.com"
] | d35inou108@gmail.com |
0401788f6eda4715532dae6556764db985193daa | 75dcb56e318688499bdab789262839e7f58bd4f6 | /_algorithms_challenges/pybites/100DaysOfCode-master/081/test_whotweeted.py | 081e54fb084f4de91e390ed679575664df25c697 | [] | no_license | syurskyi/Algorithms_and_Data_Structure | 9a1f358577e51e89c862d0f93f373b7f20ddd261 | 929dde1723fb2f54870c8a9badc80fc23e8400d3 | refs/heads/master | 2023-02-22T17:55:55.453535 | 2022-12-23T03:15:00 | 2022-12-23T03:15:00 | 226,243,987 | 4 | 1 | null | 2023-02-07T21:01:45 | 2019-12-06T04:14:10 | Jupyter Notebook | UTF-8 | Python | false | false | 1,989 | py | import unittest
from unittest.mock import patch
import tweepy
from whotweeted import get_country_code, who_is_output
from whotweeted import load_cache
DATA = dict(AU='875639674244444160',
ES='875669971954806784',
nopb='846302762736504833',
noloc='844092059988508673',
badid='8756396742444441da'
)
get_tweet = lambda x: load_cache(DATA.get(x)) # noqa E731
class WhoTweetedTestCase(unittest.TestCase):
@patch.object(tweepy.API, 'get_status', return_value=get_tweet('AU'))
def test_julian(self, mock_method):
tweetid = DATA.get('AU')
country = get_country_code(tweetid)
who_is_out = who_is_output(country)
self.assertEqual(country, 'AU')
self.assertIn('Julian', who_is_out)
@patch.object(tweepy.API, 'get_status', return_value=get_tweet('ES'))
def test_bob(self, mock_method):
tweetid = DATA.get('ES')
country = get_country_code(tweetid)
who_is_out = who_is_output(country)
self.assertEqual(country, 'ES')
self.assertIn('Bob', who_is_out)
@patch.object(tweepy.API, 'get_status', return_value=get_tweet('nopb'))
def test_no_pybites_account(self, mock_method):
tweetid = DATA.get('nopb')
with self.assertRaises(ValueError):
get_country_code(tweetid)
@patch.object(tweepy.API, 'get_status', return_value=get_tweet('noloc'))
def test_no_location_in_tweet(self, mock_method):
tweetid = DATA.get('noloc')
with self.assertRaises(AttributeError):
get_country_code(tweetid)
# not really a return value, it crashes before decorator can cash tweet
@patch.object(tweepy.API, 'get_status', return_value=get_tweet('nopb'))
def test_bad_tweet_id(self, mock_method):
tweetid = DATA.get('badid')
print(tweetid)
with self.assertRaises(ValueError):
get_country_code(tweetid)
if __name__ == '__main__':
unittest.main()
| [
"sergejyurskyj@yahoo.com"
] | sergejyurskyj@yahoo.com |
fb7e5991c19a66f9ab6e23cb29f7dd6b7855614f | 09e57dd1374713f06b70d7b37a580130d9bbab0d | /benchmark/startPyquil3353.py | 660ce9caee9673fe64e6ea34e24751bbdfda931e | [
"BSD-3-Clause"
] | permissive | UCLA-SEAL/QDiff | ad53650034897abb5941e74539e3aee8edb600ab | d968cbc47fe926b7f88b4adf10490f1edd6f8819 | refs/heads/main | 2023-08-05T04:52:24.961998 | 2021-09-19T02:56:16 | 2021-09-19T02:56:16 | 405,159,939 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,931 | py | # qubit number=4
# total number=41
import pyquil
from pyquil.api import local_forest_runtime, QVMConnection
from pyquil import Program, get_qc
from pyquil.gates import *
import numpy as np
conn = QVMConnection()
def make_circuit()-> Program:
prog = Program() # circuit begin
prog += H(3) # number=30
prog += CZ(0,3) # number=31
prog += H(3) # number=32
prog += CNOT(0,3) # number=33
prog += X(3) # number=34
prog += CNOT(0,3) # number=35
prog += CNOT(0,3) # number=29
prog += H(1) # number=2
prog += CNOT(2,0) # number=38
prog += Z(2) # number=39
prog += CNOT(2,0) # number=40
prog += H(2) # number=3
prog += H(3) # number=4
prog += H(2) # number=37
prog += Y(3) # number=12
prog += H(0) # number=5
prog += H(1) # number=6
prog += H(2) # number=7
prog += H(3) # number=8
prog += H(0) # number=9
prog += Y(2) # number=10
prog += Y(2) # number=11
prog += CNOT(1,0) # number=13
prog += H(0) # number=15
prog += CZ(1,0) # number=16
prog += H(1) # number=20
prog += H(2) # number=19
prog += CNOT(3,0) # number=24
prog += Z(3) # number=25
prog += CNOT(3,0) # number=26
prog += H(0) # number=17
prog += CNOT(2,0) # number=21
prog += X(1) # number=23
prog += CNOT(2,0) # number=22
# circuit end
return prog
def summrise_results(bitstrings) -> dict:
d = {}
for l in bitstrings:
if d.get(l) is None:
d[l] = 1
else:
d[l] = d[l] + 1
return d
if __name__ == '__main__':
prog = make_circuit()
qvm = get_qc('4q-qvm')
results = qvm.run_and_measure(prog,1024)
bitstrings = np.vstack([results[i] for i in qvm.qubits()]).T
bitstrings = [''.join(map(str, l)) for l in bitstrings]
writefile = open("../data/startPyquil3353.csv","w")
print(summrise_results(bitstrings),file=writefile)
writefile.close()
| [
"wangjiyuan123@yeah.net"
] | wangjiyuan123@yeah.net |
928689715808bfb8d54dac1bb4fbce927c5ef862 | 4fb5eb0a9a24fa5c112a4ebc854ee2604b04adda | /python/test/test_account_properties.py | 459f9f00474222b10ed1e1fd2aa16ae83ec57295 | [
"MIT"
] | permissive | KoenBal/OANDA_V20_Client | ed4c182076db62ecf7a216c3e3246ae682300e94 | e67b9dbaddff6ed23e355d3ce7f9c9972799c702 | refs/heads/master | 2020-03-27T20:42:25.777471 | 2019-12-02T15:44:06 | 2019-12-02T15:44:06 | 147,088,130 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,130 | py | # coding: utf-8
"""
OANDA v20 REST API
The full OANDA v20 REST API Specification. This specification defines how to interact with v20 Accounts, Trades, Orders, Pricing and more. To authenticate use the string 'Bearer ' followed by the token which can be obtained at https://www.oanda.com/demo-account/tpa/personal_token # noqa: E501
OpenAPI spec version: 3.0.23
Contact: api@oanda.com
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import unittest
import oanda
from oanda.models.account_properties import AccountProperties # noqa: E501
from oanda.rest import ApiException
class TestAccountProperties(unittest.TestCase):
"""AccountProperties unit test stubs"""
def setUp(self):
pass
def tearDown(self):
pass
def testAccountProperties(self):
"""Test AccountProperties"""
# FIXME: construct object with mandatory attributes with example values
# model = oanda.models.account_properties.AccountProperties() # noqa: E501
pass
if __name__ == '__main__':
unittest.main()
| [
"koen.bal@gmail.com"
] | koen.bal@gmail.com |
25e99ed5ed165357611d11826c5c6747f9a2c1c7 | 015a54d9bfdf81184da6f8513d1870a2cbc17224 | /busshaming/conf/dev_settings.py | 99b6a3342cc982e6ef9d087e331d059103510590 | [
"MIT"
] | permissive | katharosada/bus-shaming | 54e4cc4fa36b55faa1da1b6b8a15a390a2f681a6 | c8d7cd4baf9ff049cda49c92da4d5ca10f68e6a9 | refs/heads/master | 2021-01-15T22:08:33.290718 | 2018-12-03T08:19:24 | 2018-12-03T08:19:24 | 99,885,413 | 44 | 7 | MIT | 2018-01-29T06:44:45 | 2017-08-10T05:38:22 | Python | UTF-8 | Python | false | false | 1,447 | py | """
Django settings for busshaming project.
Generated by 'django-admin startproject' using Django 1.9.7.
For more information on this file, see
https://docs.djangoproject.com/en/1.9/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.9/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '#$sxqt-6%d*d95@7*=j%bg*-32(ic@lst#396=0f$54_4*++r3'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
CORS_ORIGIN_WHITELIST = (
'127.0.0.1:8000',
'localhost:8000',
'127.0.0.1:8080',
'localhost:8080',
)
ROOT_URLCONF = 'busshaming.urls'
# Database
# https://docs.djangoproject.com/en/1.9/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.contrib.gis.db.backends.postgis',
'NAME': 'busshaming-local',
'HOST': 'localhost',
'PORT': '',
}
}
LOGGING = {
'version': 1,
'disable_existing_loggers': False,
'handlers': {
'console': {
'level': 'DEBUG',
'class': 'logging.StreamHandler',
}
},
'loggers': {
'django.db.backends': {
'handlers': ['console'],
'level': 'INFO',
},
}
}
| [
"katie@katharos.id.au"
] | katie@katharos.id.au |
c19cb11d4583210fc5f30bc666a548f7590b7f2d | 1f05211127ded22bad7b947d771578d658f4ef77 | /apostello/templatetags/apostello_extras.py | 1c513fe3ca0769e7b3a0e8f288cbb81540291ced | [
"MIT"
] | permissive | dmccubbing/apostello | 60afa716b756a9619f5a01b690223e7705b98b10 | 859a54e9936097628719680ed3c0a67fe4553947 | refs/heads/master | 2021-01-12T02:38:59.332852 | 2016-12-24T15:10:09 | 2016-12-24T15:10:09 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,722 | py | from django import template
from django.urls import reverse
from django.utils.safestring import mark_safe
register = template.Library()
@register.simple_tag
def fab_button(href, text, icon_name):
"""Output a formatted fab link"""
result = '''
<a class="hvr-backward item" href="{href}">
<i class="large {icon_name} icon"></i>
<div class="content"><div class="header">{text}</div>
</div>
</a>
'''.format(
href=href,
text=text,
icon_name=icon_name,
)
return mark_safe(result)
# Contacts
@register.simple_tag
def fab_new_contact():
return fab_button(reverse('recipient'), 'New Contact', 'plus')
@register.simple_tag
def fab_contacts_archive():
return fab_button(
reverse('recipients_archive'), 'Archived Contacts', 'table'
)
# Groups
@register.simple_tag
def fab_new_group():
return fab_button(reverse('group'), 'New Group', 'plus')
@register.simple_tag
def fab_groups_archive():
return fab_button(
reverse('recipient_groups_archive'), 'Archived Groups', 'table'
)
@register.simple_tag
def fab_groups():
return fab_button(reverse('recipient_groups'), 'Groups', 'table')
# Incoming SMS
@register.simple_tag
def fab_incoming_wall():
return fab_button(reverse('incoming_wall'), 'Live Updates', 'inbox')
@register.simple_tag
def fab_incoming_wall_curator():
return fab_button(
reverse('incoming_wall_curator'), 'Live Curator', 'table'
)
# Keywords
@register.simple_tag
def fab_new_keyword():
return fab_button(reverse('keyword'), 'New Keyword', 'plus')
@register.simple_tag
def fab_keywords():
return fab_button(reverse('keywords'), 'Keywords', 'table')
@register.simple_tag
def fab_keywords_archive():
return fab_button(
reverse('keywords_archive'), 'Archived Keywords', 'table'
)
@register.simple_tag
def fab_keyword_csv(keyword):
return fab_button(
reverse(
'keyword_csv', args=[keyword.pk]
),
'Export {k} responses'.format(k=keyword.keyword),
'download'
)
@register.simple_tag
def fab_keyword_edit(keyword):
return fab_button(reverse('keyword', args=[keyword.pk]), 'Edit', 'edit')
@register.simple_tag
def fab_keyword_responses(keyword):
return fab_button(
reverse(
'keyword_responses', args=[keyword.pk]
),
'Replies ({n})'.format(n=keyword.num_matches),
'inbox'
)
@register.simple_tag
def fab_keyword_responses_archive(keyword):
return fab_button(
reverse(
'keyword_responses_archive', args=[keyword.pk]
),
'Archived Replies ({n})'.format(n=keyword.num_archived_matches),
'inbox'
)
| [
"montgomery.dean97@gmail.com"
] | montgomery.dean97@gmail.com |
499db621a567d7c9bcdf75cb9232c3c6fbe6ad2e | 031b223ef09542b34ad495a941eb895ab367c2fa | /framework/codejam/extract/identifier.py | 2ce2957c989136bb6062e295cd1990ac9bc0d662 | [
"MIT"
] | permissive | neizod/coding-analysis | 7fb41af09cf193b18cac7b58da1f7c4ae085bc6c | cc086bcf204e570032d11b12a46ac819cfe93f2b | refs/heads/master | 2021-01-10T07:02:45.672795 | 2015-05-29T00:15:31 | 2015-05-29T00:15:31 | 36,041,627 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,916 | py | import os
import logging
from framework._utils import FunctionHook
class CodeJamExtractIdentifier(FunctionHook):
''' This method will extract all identifiers in submitted source code
from each contestants for futher analysis. '''
@staticmethod
def get_identifiers(directory):
''' returns all identifiers in source code files in a directory. '''
from framework._utils.misc import datapath
from framework._utils.source import SourceCode
identifiers = set()
for filename in os.listdir(directory):
filepath = datapath('codejam', directory, filename)
if not os.path.isfile(filepath):
continue
source_code = SourceCode.open(filepath)
try:
identifiers |= source_code.identifiers().keys()
except NotImplementedError:
continue
return identifiers
def main(self, year, force=False, **_):
from framework._utils import write
from framework._utils.misc import datapath, make_ext
from framework.codejam._helper import iter_submission
os.makedirs(datapath('codejam', 'extract'), exist_ok=True)
outpath = datapath('codejam', 'extract',
make_ext('identifier', year, 'json'))
if not force and os.path.isfile(outpath):
return logging.warn('output file already exists, aborting.')
extracted_data = []
for _, pid, pio, uname in iter_submission(year):
directory = datapath('codejam', 'source', pid, pio, uname)
logging.info('extracting: %i %i %s', pid, pio, uname)
extracted_data += [{
'pid': pid,
'io': pio,
'uname': uname,
'identifiers': sorted(self.get_identifiers(directory)),
}]
write.json(extracted_data, open(outpath, 'w'))
| [
"neizod@gmail.com"
] | neizod@gmail.com |
018091484c376685fd97ef99f65f8580350e4606 | 38c10c01007624cd2056884f25e0d6ab85442194 | /device/hid/hid.gyp | 00b9a90cafed806e6061d85486132b364e924b55 | [
"BSD-3-Clause"
] | permissive | zenoalbisser/chromium | 6ecf37b6c030c84f1b26282bc4ef95769c62a9b2 | e71f21b9b4b9b839f5093301974a45545dad2691 | refs/heads/master | 2022-12-25T14:23:18.568575 | 2016-07-14T21:49:52 | 2016-07-23T08:02:51 | 63,980,627 | 0 | 2 | BSD-3-Clause | 2022-12-12T12:43:41 | 2016-07-22T20:14:04 | null | UTF-8 | Python | false | false | 2,332 | gyp | # Copyright 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
{
'variables': {
'chromium_code': 1,
},
'targets': [
{
'target_name': 'device_hid',
'type': 'static_library',
'include_dirs': [
'../..',
],
'dependencies': [
'../../components/components.gyp:device_event_log_component',
'../../net/net.gyp:net',
'../core/core.gyp:device_core',
],
'sources': [
'device_monitor_linux.cc',
'device_monitor_linux.h',
'fake_input_service_linux.cc',
'fake_input_service_linux.h',
'hid_collection_info.cc',
'hid_collection_info.h',
'hid_connection.cc',
'hid_connection.h',
'hid_connection_linux.cc',
'hid_connection_linux.h',
'hid_connection_mac.cc',
'hid_connection_mac.h',
'hid_connection_win.cc',
'hid_connection_win.h',
'hid_device_filter.cc',
'hid_device_filter.h',
'hid_device_info.cc',
'hid_device_info.h',
'hid_device_info_linux.cc',
'hid_device_info_linux.h',
'hid_report_descriptor.cc',
'hid_report_descriptor.h',
'hid_report_descriptor_item.cc',
'hid_report_descriptor_item.h',
'hid_service.cc',
'hid_service.h',
'hid_service_linux.cc',
'hid_service_linux.h',
'hid_service_mac.cc',
'hid_service_mac.h',
'hid_service_win.cc',
'hid_service_win.h',
'hid_usage_and_page.cc',
'hid_usage_and_page.h',
'input_service_linux.cc',
'input_service_linux.h',
],
'conditions': [
['use_udev==1', {
'dependencies': [
'../udev_linux/udev.gyp:udev_linux',
],
}, { # use_udev==0
# The Linux implementation is based on Udev.
'sources!': [
'device_monitor_linux.cc',
'device_monitor_linux.h',
'hid_service_linux.cc',
'hid_service_linux.h',
'fake_input_service_linux.cc',
'fake_input_service_linux.h',
'input_service_linux.cc',
'input_service_linux.h',
],
}],
],
},
],
}
| [
"zeno.albisser@hemispherian.com"
] | zeno.albisser@hemispherian.com |
665ecc18a624f9b4460d72d5909f74e5e93b8d51 | 09e57dd1374713f06b70d7b37a580130d9bbab0d | /benchmark/startQiskit_QC1775.py | aaf9aaec4c26ff1808999763b15d3f1700a67c47 | [
"BSD-3-Clause"
] | permissive | UCLA-SEAL/QDiff | ad53650034897abb5941e74539e3aee8edb600ab | d968cbc47fe926b7f88b4adf10490f1edd6f8819 | refs/heads/main | 2023-08-05T04:52:24.961998 | 2021-09-19T02:56:16 | 2021-09-19T02:56:16 | 405,159,939 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,846 | py | # qubit number=5
# total number=60
import cirq
import qiskit
from qiskit import IBMQ
from qiskit.providers.ibmq import least_busy
from qiskit import QuantumCircuit, QuantumRegister, ClassicalRegister
from qiskit import BasicAer, execute, transpile
from pprint import pprint
from qiskit.test.mock import FakeVigo
from math import log2,floor, sqrt, pi
import numpy as np
import networkx as nx
def build_oracle(n: int, f) -> QuantumCircuit:
# implement the oracle O_f^\pm
# NOTE: use U1 gate (P gate) with \lambda = 180 ==> CZ gate
# or multi_control_Z_gate (issue #127)
controls = QuantumRegister(n, "ofc")
oracle = QuantumCircuit(controls, name="Zf")
for i in range(2 ** n):
rep = np.binary_repr(i, n)
if f(rep) == "1":
for j in range(n):
if rep[j] == "0":
oracle.x(controls[j])
# oracle.h(controls[n])
if n >= 2:
oracle.mcu1(pi, controls[1:], controls[0])
for j in range(n):
if rep[j] == "0":
oracle.x(controls[j])
# oracle.barrier()
return oracle
def make_circuit(n:int,f) -> QuantumCircuit:
# circuit begin
input_qubit = QuantumRegister(n,"qc")
classical = ClassicalRegister(n, "qm")
prog = QuantumCircuit(input_qubit, classical)
prog.h(input_qubit[0]) # number=3
prog.h(input_qubit[1]) # number=4
prog.h(input_qubit[2]) # number=5
prog.h(input_qubit[3]) # number=6
prog.h(input_qubit[4]) # number=21
Zf = build_oracle(n, f)
repeat = floor(sqrt(2 ** n) * pi / 4)
for i in range(repeat):
prog.append(Zf.to_gate(), [input_qubit[i] for i in range(n)])
prog.h(input_qubit[0]) # number=1
prog.h(input_qubit[1]) # number=2
prog.h(input_qubit[2]) # number=7
prog.h(input_qubit[3]) # number=8
prog.h(input_qubit[0]) # number=31
prog.y(input_qubit[1]) # number=56
prog.cz(input_qubit[1],input_qubit[0]) # number=32
prog.h(input_qubit[1]) # number=52
prog.h(input_qubit[0]) # number=33
prog.h(input_qubit[1]) # number=44
prog.cz(input_qubit[0],input_qubit[1]) # number=45
prog.h(input_qubit[1]) # number=46
prog.cx(input_qubit[0],input_qubit[1]) # number=53
prog.cx(input_qubit[0],input_qubit[1]) # number=57
prog.x(input_qubit[1]) # number=58
prog.cx(input_qubit[0],input_qubit[1]) # number=59
prog.cx(input_qubit[0],input_qubit[1]) # number=55
prog.h(input_qubit[1]) # number=48
prog.cz(input_qubit[0],input_qubit[1]) # number=49
prog.h(input_qubit[1]) # number=50
prog.x(input_qubit[0]) # number=26
prog.cx(input_qubit[1],input_qubit[0]) # number=27
prog.h(input_qubit[1]) # number=37
prog.cz(input_qubit[0],input_qubit[1]) # number=38
prog.h(input_qubit[1]) # number=39
prog.x(input_qubit[1]) # number=35
prog.cx(input_qubit[0],input_qubit[1]) # number=36
prog.x(input_qubit[2]) # number=11
prog.x(input_qubit[3]) # number=12
prog.cx(input_qubit[3],input_qubit[2]) # number=43
prog.cx(input_qubit[3],input_qubit[2]) # number=47
if n>=2:
prog.mcu1(pi,input_qubit[1:],input_qubit[0])
prog.x(input_qubit[0]) # number=13
prog.cx(input_qubit[0],input_qubit[1]) # number=22
prog.x(input_qubit[1]) # number=23
prog.cx(input_qubit[0],input_qubit[1]) # number=24
prog.x(input_qubit[2]) # number=15
prog.x(input_qubit[1]) # number=29
prog.y(input_qubit[4]) # number=28
prog.x(input_qubit[3]) # number=16
prog.h(input_qubit[0]) # number=17
prog.h(input_qubit[1]) # number=18
prog.h(input_qubit[3]) # number=51
prog.h(input_qubit[2]) # number=19
prog.h(input_qubit[3]) # number=20
# circuit end
for i in range(n):
prog.measure(input_qubit[i], classical[i])
return prog
if __name__ == '__main__':
key = "00000"
f = lambda rep: str(int(rep == key))
prog = make_circuit(5,f)
IBMQ.load_account()
provider = IBMQ.get_provider(hub='ibm-q')
provider.backends()
backend = least_busy(provider.backends(filters=lambda x: x.configuration().n_qubits >= 2 and not x.configuration().simulator and x.status().operational == True))
sample_shot =7924
info = execute(prog, backend=backend, shots=sample_shot).result().get_counts()
backend = FakeVigo()
circuit1 = transpile(prog,backend,optimization_level=2)
writefile = open("../data/startQiskit_QC1775.csv","w")
print(info,file=writefile)
print("results end", file=writefile)
print(circuit1.depth(),file=writefile)
print(circuit1,file=writefile)
writefile.close()
| [
"wangjiyuan123@yeah.net"
] | wangjiyuan123@yeah.net |
ecefc29203c6dbd9ad3567e3c4923a92540392cc | 1c390cd4fd3605046914767485b49a929198b470 | /codechef/MODULO3.py | 56225738efe0487b5051721c932373bd79d04918 | [] | no_license | wwwwodddd/Zukunft | f87fe736b53506f69ab18db674311dd60de04a43 | 03ffffee9a76e99f6e00bba6dbae91abc6994a34 | refs/heads/master | 2023-01-24T06:14:35.691292 | 2023-01-21T15:42:32 | 2023-01-21T15:42:32 | 163,685,977 | 7 | 8 | null | null | null | null | UTF-8 | Python | false | false | 155 | py | for t in range(int(input())):
a, b = map(int, input().split())
if a % 3 == 0 or b % 3 == 0:
print(0)
elif a % 3 == b % 3:
print(1)
else:
print(2) | [
"wwwwodddd@gmail.com"
] | wwwwodddd@gmail.com |
6386bb45b6d3ab17a1f9afcb15656b0f1a6892fe | 989b3499948137f57f14be8b2c77d0610d5975e6 | /question_python(resolved)/chapter3_data_type(완결)/ii_replace_it.py | 01732f47a8bda1b2a4f65637446d87f13dd4f158 | [] | no_license | namkiseung/python_BasicProject | 76b4c070934ad4cb9d16ce844efa05f64fb09ac0 | 460d05248b2d1431624aba960e28bece888643e4 | refs/heads/master | 2022-12-13T21:12:06.865241 | 2020-04-23T01:30:08 | 2020-04-23T01:30:08 | 142,980,920 | 1 | 1 | null | 2022-12-08T02:27:40 | 2018-07-31T07:49:17 | Python | UTF-8 | Python | false | false | 449 | py | # -*- coding: utf-8 -*-
def replace_it(input_str):
""" 입력된 문자열에 소문자 o를, 대문자 O로 바꿔서 반환하는 함수를 작성해보자
hint: replace
sample data: "google"
expected output: "gOOgle"
"""
result=input_str.replace('o', 'O')
# 여기 작성
return result
if __name__ == "__main__":
input_str = 'google'
print(replace_it(input_str))
pass
| [
"rlzld100@gmail.com"
] | rlzld100@gmail.com |
415e464cf1107b46d06e17330af69e5a62cabaa7 | 86986fc336d87823b45c427ac2326d6d733c7f91 | /social/groups/views.py | ab699144185ce0ecbebdbef63e928827bb1996db | [] | no_license | CryptAthlos/cryptathloscap | b0d83efb1ed9d628677aec47a24841f6fa52ed4c | 1a91ce7a0f57548523e02a7f1544018ccd587d4d | refs/heads/master | 2020-03-15T12:11:36.453570 | 2018-05-29T09:09:50 | 2018-05-29T09:09:50 | 132,138,497 | 0 | 0 | null | 2018-05-29T09:09:51 | 2018-05-04T12:39:09 | Python | UTF-8 | Python | false | false | 1,927 | py | from django.contrib import messages
from django.db import IntegrityError
from django.shortcuts import render, get_object_or_404
from django.contrib.auth.mixins import LoginRequiredMixin, PermissionRequiredMixin
from django.core.urlresolvers import reverse
from django.views import generic
from groups.models import Group, GroupMember
# Create your views here.
class CreateGroup(LoginRequiredMixin, generic.CreateView):
fields = ('name', 'description')
model = Group
class SingleGroup(generic.DetailView):
model = Group
class ListGroups(generic.ListView):
model = Group
class JoinGroup(LoginRequiredMixin, generic.RedirectView):
def get_redirect_url(self, *args, **kwargs):
return reverse('groups:single', kwargs={'slug': self.kwargs.get('slug')})
def get(self, request, *args, **kwargs):
group = get_object_or_404(Group, slug=self.kwargs.get('slug'))
try:
GroupMember.objects.create(user=self.request.user, group=group)
except IntegrityError:
messages.warning(self.request, 'Warning already a memebr.')
else:
messages.success(self.request, 'You are a member')
return super().get(request, *args, **kwargs)
class LeaveGroup(LoginRequiredMixin, generic.RedirectView):
def get_redirect_url(self, *args, **kwargs):
return reverse('groups:single', kwargs={'slug': self.kwargs.get('slug')})
def get(self, request, *args, **kwargs):
try:
membership = GroupMember.objects.filter(
user=self.request.user,
group__slug=self.kwargs.get('slug')
).get()
except GroupMember.DoesNotExist:
messages.warning(self.request, 'You are not in this group')
else:
membership.delete()
messages.success(self.request, 'You have left the group')
return super().get(request, *args, **kwargs)
| [
"jon.froiland@gmail.com"
] | jon.froiland@gmail.com |
a4aebac3982dc6e6551d597587c45f909039ce3b | c3612d29df2fd6224c61b693a0cbd3554b6213f2 | /03_django/06_django_axios/articles/views.py | c25a540c0aa075716a900db72f11ad0f2cf75694 | [] | no_license | mycomax0416/TIL | 5a12b8067e22fc095c7998b9ffb17d6fb210933f | 21d3b62db84b1fd64318a278eddc287bdc9678f0 | refs/heads/master | 2023-01-11T00:59:42.851469 | 2019-11-28T08:48:54 | 2019-11-28T08:48:54 | 195,918,118 | 3 | 0 | null | 2023-01-07T16:48:23 | 2019-07-09T02:31:05 | Jupyter Notebook | UTF-8 | Python | false | false | 6,798 | py | import hashlib
from django.http import JsonResponse, HttpResponseBadRequest
from IPython import embed
from django.contrib.auth import get_user_model
from django.contrib.auth.decorators import login_required
from django.shortcuts import render, redirect, get_object_or_404
from django.http import Http404, HttpResponse
from django.views.decorators.http import require_POST
from .models import Article, Comment, Hashtag
from .forms import ArticleForm, CommentForm
# Create your views here.
def index(request):
# session 에 visit_num 키로 접근해 값을 가져온다.
# 기본적으로 존재하지 않는 키이기 때문에 키가 없다면(방문한적이 없다면) 0 값을 가져오도록 한다.
visits_num = request.session.get('visits_num', 0)
# 그리고 가져온 값은 session 에 visits_num 에 매번 1씩 증가한 값으로 할당한다. (유저의 다음 방문을 위해)
request.session['visits_num'] = visits_num + 1
# session data 안에 있는 새로운 정보를 수정했따면 django 는 수정한 사실을 알아채지 못하기 때문에 다음과 같이 설정.
request.session.modified = True
articles = Article.objects.all()
context = {'articles': articles, 'visits_num': visits_num,}
return render(request, 'articles/index.html', context)
@login_required
def create(request):
if request.method == 'POST':
# form 인스턴스를 생성하고 요청에 의한 데이터를 인자로 받는다. (binding)
# 이 처리과정은 binding 이라고 불리며 유효성 체크를 할 수 있도록 해준다.
form = ArticleForm(request.POST)
# form 이 유효한지 체크한다.
if form.is_valid():
article = form.save(commit=False)
article.user = request.user
article.save()
# hashtag 의 시작점
for word in article.content.split(): # content 를 공백 기준으로 리스트로 변경
if word.startswith('#'): # '#' 으로 시작하는 요소만 선택
hashtag, created = Hashtag.objects.get_or_create(content=word) # word랑 같은 해시태그를 찾는데 있으면 기존 객체(.get), 없으면 새로운 객체를 생성(.create)
article.hashtags.add(hashtag) # created 를 사용하지 않았다면, hashtag[0] 로 작성
return redirect(article)
else:
form = ArticleForm()
# 상황에 따라 context 에 넘어가는 2가지 form
# 1. GET : 기본 form
# 2. POST : 검증에 실패 후의 form(is_valid == False)
context = {'form': form,}
return render(request, 'articles/form.html', context)
def detail(request, article_pk):
article = get_object_or_404(Article, pk=article_pk)
comments = article.comment_set.all() # article 의 모든 댓글
person = get_object_or_404(get_user_model(), pk=article.user_id)
comment_form = CommentForm() # 댓글 폼
context = {'article': article, 'comment_form': comment_form, 'comments': comments, 'person': person,}
return render(request, 'articles/detail.html', context)
@require_POST
def delete(request, article_pk):
if request.user.is_authenticated:
article = get_object_or_404(Article, pk=article_pk)
if request.user == article.user:
article.delete()
else:
return redirect(article)
return redirect('articles:index')
@login_required
def update(request, article_pk):
article = get_object_or_404(Article, pk=article_pk)
if request.user == article.user:
if request.method == 'POST':
form = ArticleForm(request.POST, instance=article)
if form.is_valid():
article = form.save()
# hashtag
article.hashtags.clear() # 해당 article 의 hashtag 전체 삭제
for word in article.content.split():
if word.startswith('#'):
hashtag, created = Hashtag.objects.get_or_create(content=word)
article.hashtags.add(hashtag)
return redirect(article)
else:
form = ArticleForm(instance=article)
else:
return redirect('articles:index')
context = {'form': form, 'article': article,}
return render(request, 'articles/form.html', context)
@require_POST
def comments_create(request, article_pk):
if request.user.is_authenticated:
comment_form = CommentForm(request.POST)
if comment_form.is_valid():
# 객체를 Create 하지만, db 에 레코드는 작성하지 않는다.
comment = comment_form.save(commit=False)
comment.article_id = article_pk
comment.user = request.user
comment.save()
return redirect('articles:detail', article_pk)
@require_POST
def comments_delete(request, article_pk, comment_pk):
if request.user.is_authenticated:
comment = get_object_or_404(Comment, pk=comment_pk)
if request.user == comment.user:
comment.delete()
return redirect('articles:detail', article_pk)
return HttpResponse('You are Unauthorized', status=401)
@login_required
def like(request, article_pk):
if request.is_ajax():
article = get_object_or_404(Article, pk=article_pk)
if article.like_users.filter(pk=request.user.pk).exists():
article.like_users.remove(request.user)
liked = False
else:
article.like_users.add(request.user)
liked = True
context = {'liked': liked, 'count': article.like_users.count()}
return JsonResponse(context)
else:
return HttpResponseBadRequest()
# 해당 게시글에 좋아요를 누른 사람들 중에서 현재 접속유저가 있다면 좋아요를 취소
# if request.user in article.like_users.all():
# article.like_users.remove(request.user) # 좋아요 취소
# else:
# article.like_users.add(request.user) # 좋아요
@login_required
def follow(request, article_pk, user_pk):
# 게시글 유저
person = get_object_or_404(get_user_model(), pk=user_pk)
# 접속 유저
user = request.user
if person != user:
# 내(request.user)가 게시글 유저 팔로워 목록에 이미 존재 한다면,
if person.followers.filter(pk=user.pk).exists():
person.followers.remove(user)
else:
person.followers.add(user)
return redirect('articles:detail', article_pk)
def hashtag(request, hash_pk):
hashtag = get_object_or_404(Hashtag, pk=hash_pk)
articles = hashtag.article_set.order_by('-pk')
context = {'hashtag': hashtag, 'articles': articles,}
return render(request, 'articles/hashtag.html', context) | [
"mycomax0416@gmail.com"
] | mycomax0416@gmail.com |
d7269d784f0dd8367a8b8ad07ab5661409480603 | 51d0377511a5da902033fb9d80184db0e096fe2c | /21-deep-learning-in-python/1-basics-of-deep-learning-and-neural-networks/01-coding-the-forward-propagation-algorithm.py | 52eed540367f55989509b534772aa4bdd0b4485f | [] | no_license | sashakrasnov/datacamp | c28c6bda178163337baed646220b2f7dcc36047d | 759f4cec297883907e21118f24a3449d84c80761 | refs/heads/master | 2021-12-07T02:54:51.190672 | 2021-09-17T21:05:29 | 2021-09-17T21:05:29 | 157,093,632 | 6 | 5 | null | null | null | null | UTF-8 | Python | false | false | 2,066 | py | '''
Coding the forward propagation algorithm
In this exercise, you'll write code to do forward propagation (prediction) for your first neural network:
https://s3.amazonaws.com/assets.datacamp.com/production/course_3524/datasets/1_4.png
Each data point is a customer. The first input is how many accounts they have, and the second input is how many children they have. The model will predict how many transactions the user makes in the next year. You will use this data throughout the first 2 chapters of this course.
The input data has been pre-loaded as input_data, and the weights are available in a dictionary called weights. The array of weights for the first node in the hidden layer are in weights['node_0'], and the array of weights for the second node in the hidden layer are in weights['node_1'].
The weights feeding into the output node are available in weights['output'].
NumPy will be pre-imported for you as np in all exercises.
'''
import numpy as np
input_data = np.array([3, 5])
weights = {
'node_0': np.array([ 2, 4]),
'node_1': np.array([ 4, -5]),
'output': np.array([ 2, 7])
}
'''
INSTRUCTIONS
* Calculate the value in node 0 by multiplying input_data by its weights weights['node_0'] and computing their sum. This is the 1st node in the hidden layer.
* Calculate the value in node 1 using input_data and weights['node_1']. This is the 2nd node in the hidden layer.
* Put the hidden layer values into an array. This has been done for you.
* Generate the prediction by multiplying hidden_layer_outputs by weights['output'] and computing their sum.
* Hit 'Submit Answer' to print the output!
'''
# Calculate node 0 value: node_0_value
node_0_value = (input_data * weights['node_0']).sum()
# Calculate node 1 value: node_1_value
node_1_value = (input_data * weights['node_1']).sum()
# Put node values into array: hidden_layer_outputs
hidden_layer_outputs = np.array([node_0_value, node_1_value])
# Calculate output: output
output = (hidden_layer_outputs * weights['output']).sum()
# Print output
print(output)
| [
"a@skrasnov.com"
] | a@skrasnov.com |
5b8c798cf4696fcbcf01bc1c454ccfac58142539 | 3f7240da3dc81205a0a3bf3428ee4e7ae74fb3a2 | /src/Week2/NestedForLoops/printMysteryStarShape.py | e984963bec5498ff2b46607c922afbc823618747 | [] | no_license | theguyoverthere/CMU15-112-Spring17 | b4ab8e29c31410b4c68d7b2c696a76b9d85ab4d8 | b8287092b14e82d2a3aeac6c27bffbc95382eb34 | refs/heads/master | 2021-04-27T08:52:45.237631 | 2018-10-02T15:38:18 | 2018-10-02T15:38:18 | 107,882,442 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 188 | py | def printMysteryStarShape(n):
for row in range(n):
print(row, end=" ")
for col in range(row):
print("*", end=" ")
print()
printMysteryStarShape(5)
| [
"tariqueanwer@outlook.com"
] | tariqueanwer@outlook.com |
39cbb5a9beaf9637dc7fb56113ad2c3884e152c0 | 93c43f774b11d3f44a3c4b83ab94be3600d606df | /Deep-Learning/R-CNN/utils.py | 40a7d2ba1b2752903b37b5576f40183ecfa2485c | [
"Apache-2.0"
] | permissive | sadbb/CVCode | 299afeafb5a09d030b42ec00c6cd9c8087bc718f | c7c8b527af786d8f113122231e6296987b242b59 | refs/heads/master | 2020-04-07T04:24:04.040378 | 2018-11-17T14:13:23 | 2018-11-17T14:13:23 | 158,053,757 | 1 | 0 | Apache-2.0 | 2018-11-18T05:41:43 | 2018-11-18T05:41:42 | null | UTF-8 | Python | false | false | 1,408 | py | # -*- coding:utf-8 -*-
import numpy as np
def get_IoU(ground_truth, region):
# xmin, ymin, xmax, ymax
x1 = max(ground_truth[0], region[0])
y1 = max(ground_truth[1], region[1])
x2 = min(ground_truth[2], region[2])
y2 = min(ground_truth[3], region[3])
if x2 - x1 < 0:
return 0
inter_area = (x2 - x1 + 1) * (y2 - y1 + 1)
outer_area = (region[2] - region[0]) * (region[3] - region[1]) \
+ (ground_truth[2] - ground_truth[0]) * (ground_truth[3] - ground_truth[1]) - inter_area
iou = inter_area / outer_area
return iou
def NMS(nms_sum):
regions = []
nms_sum = nms_sum[nms_sum[:,6]!=20]
for i in range(len(nms_sum)):
i_xmin, i_ymin, i_width, i_height, i_image_region, i_score, i_label = nms_sum[i]
flag = False
for j in range(len(nms_sum)):
if i == j:
continue
j_xmin, j_ymin, j_width, j_height, j_image_region, j_score, j_label = nms_sum[j]
iou = get_IoU([i_xmin, i_xmin+i_width, i_ymin, i_ymin+i_height],
[j_xmin, j_xmin+j_width, j_ymin, j_ymin+j_height])
if iou > 0.5 and i_score > j_score:
flag = True
elif i_score < j_score:
break
if flag == True:
regions.append([[i_xmin, i_ymin, i_width, i_height], i_label])
return np.asarray(regions)
| [
"529768926@qq.com"
] | 529768926@qq.com |
b881534e43a240f73a78798cdc8a12f464a714f5 | 0ddfc02a2cc459e6ccd5322571b430af3b86001b | /book_management/book_management/asgi.py | bec0c1dc1ddae3d2adafba607aa143eb00c27844 | [] | no_license | XMLPro/2020_groupdev | 87e5d6d6f9d91bff79f56c29bf24e02b6322fac8 | 3f01e3e7e74ed76950d2526d74de566158bf3971 | refs/heads/master | 2023-07-18T16:17:23.892887 | 2020-06-09T12:50:07 | 2020-06-09T12:50:07 | 267,032,912 | 1 | 1 | null | 2021-09-22T19:11:47 | 2020-05-26T12:10:05 | Python | UTF-8 | Python | false | false | 407 | py | """
ASGI config for book_management project.
It exposes the ASGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/3.0/howto/deployment/asgi/
"""
import os
from django.core.asgi import get_asgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'book_management.settings')
application = get_asgi_application()
| [
"sakakazu2468@icloud.com"
] | sakakazu2468@icloud.com |
661012d59719e3422c970f2479d9719ecb5d6489 | 40132307c631dccbf7aa341eb308f69389715c73 | /OLD/idmt/maya/MTD/getInfoByName.py | b34d1be3aea426c52a8e50c35ea69f19dc77fa97 | [] | no_license | Bn-com/myProj_octv | be77613cebc450b1fd6487a6d7bac991e3388d3f | c11f715996a435396c28ffb4c20f11f8e3c1a681 | refs/heads/master | 2023-03-25T08:58:58.609869 | 2021-03-23T11:17:13 | 2021-03-23T11:17:13 | 348,676,742 | 1 | 2 | null | null | null | null | UTF-8 | Python | false | false | 744 | py | __author__ = 'xuweijian'
import maya.cmds as mc
class getInfoByName():
def getInfoByScene(self):
dict={}
fileName = mc.file(q=1, sn=1,shn=1)
SceneName=fileName
info=SceneName.split('_')
SCinfo=[info[1][1:3],info[1][4:7]]
dict['project']=info[0]
dict['scene']=SCinfo[0]
dict['camera']=SCinfo[1]
dict['part']=info[2]
return dict
def getInfoByAsset(self):
dict={}
fileName = mc.file(q=1, sn=1,shn=1)
SceneName=fileName
info=SceneName.split('_')
SCinfo=[info[1][1:2],info[1][4:6]]
dict['project']=info[0]
dict['type']=info[1]
dict['name']=info[2]
dict['part']=info[3]
return dict | [
"snakelonely@outlook.com"
] | snakelonely@outlook.com |
1df2a7f99628c9b74c20ae77a1c0534f60c03ee2 | 46f2834ae92da9e17463def0c635f75bf05886a1 | /abc/abc138/D/main.py | 9b51ace7c6d8605a5b3a4db56c898dc568cb7d84 | [] | no_license | replu/atcoder | bf3da10c937c955ca1bc3fa33b8f24c74d2d6c50 | a6183d03355058bccc2b89db5e07b7f72598fea3 | refs/heads/master | 2023-03-30T15:03:47.879783 | 2021-03-28T17:08:19 | 2021-03-28T17:08:19 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 979 | py | #!/usr/bin/env python3
import sys
def solve(N: int, Q: int, a: "List[int]", b: "List[int]", p: "List[int]", x: "List[int]"):
return
# Generated by 1.1.7.1 https://github.com/kyuridenamida/atcoder-tools (tips: You use the default template now. You can remove this line by using your custom template)
def main():
def iterate_tokens():
for line in sys.stdin:
for word in line.split():
yield word
tokens = iterate_tokens()
N = int(next(tokens)) # type: int
Q = int(next(tokens)) # type: int
a = [int()] * (N - 1) # type: "List[int]"
b = [int()] * (N - 1) # type: "List[int]"
for i in range(N - 1):
a[i] = int(next(tokens))
b[i] = int(next(tokens))
p = [int()] * (Q) # type: "List[int]"
x = [int()] * (Q) # type: "List[int]"
for i in range(Q):
p[i] = int(next(tokens))
x[i] = int(next(tokens))
solve(N, Q, a, b, p, x)
if __name__ == '__main__':
main()
| [
"n.small.island@gmail.com"
] | n.small.island@gmail.com |
89fdfc7929383d46dd47a2c43d1b8f94270d44c9 | e3b9aa9b17ebb55e53dbc4fa9d1f49c3a56c6488 | /duo_auth/vendor/duo_client_python/duo_client/accounts.py | 556580b7f43fe411ee3f8915eed02cbb5f3ca610 | [
"Apache-2.0",
"BSD-3-Clause",
"MIT"
] | permissive | OSSSP/insightconnect-plugins | ab7c77f91c46bd66b10db9da1cd7571dfc048ab7 | 846758dab745170cf1a8c146211a8bea9592e8ff | refs/heads/master | 2023-04-06T23:57:28.449617 | 2020-03-18T01:24:28 | 2020-03-18T01:24:28 | 248,185,529 | 1 | 0 | MIT | 2023-04-04T00:12:18 | 2020-03-18T09:14:53 | null | UTF-8 | Python | false | false | 1,287 | py | """
Duo Security Accounts API reference client implementation.
<http://www.duosecurity.com/docs/accountsapi>
"""
from __future__ import absolute_import
from . import client
class Accounts(client.Client):
def get_child_accounts(self):
"""
Return a list of all child accounts of the integration's account.
"""
params = {}
response = self.json_api_call('POST',
'/accounts/v1/account/list',
params)
return response
def create_account(self, name):
"""
Create a new child account of the integration's account.
"""
params = {
'name': name,
}
response = self.json_api_call('POST',
'/accounts/v1/account/create',
params)
return response
def delete_account(self, account_id):
"""
Delete a child account of the integration's account.
"""
params = {
'account_id': account_id,
}
response = self.json_api_call('POST',
'/accounts/v1/account/delete',
params)
return response
| [
"jonschipp@gmail.com"
] | jonschipp@gmail.com |
5ea84d476170a94be1ff7dcd2b8b67bd8b5db885 | 782316ea755c025a331bf084b89b7d8e775a36eb | /main.py | c94a3077af22bb818b1e35bf4284517ec30f27e5 | [] | no_license | ravenusmc/market | eb7c5f1e1736a3de02d77df7f7e63828dab6bca6 | ad9fed2e24b8789e8ae6b554dfa219cd50d089d6 | refs/heads/master | 2021-08-19T14:49:04.153170 | 2017-11-26T18:34:51 | 2017-11-26T18:34:51 | 106,032,373 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,564 | py | #importing outside libraries for use in the project
from flask import Flask, session, jsonify, redirect, url_for, escape, render_template, request, flash
import json
import requests
#importing files I made for this project
from food import *
from food_data import *
from quote import *
from user import *
#Setting up Flask
app = Flask(__name__)
#This route takes the user to the landing page
@app.route('/', methods=['GET', 'POST'])
def landing():
if request.method == 'POST':
#Recieving the information from the user.
username = request.form['username']
password = request.form['password']
#Creating a user object
user = User()
#Checking to see if the user is in the database.
flag, not_found, password_no_match = user.check(username, password)
#Conditional statement to test if the user is a member of the site.
if flag == True:
#If the user is in the database, the user gets sent to the index page.
session['username'] = request.form['username']
#Sending the user to the index page
return redirect(url_for('home'))
else:
#If the user is not in the database then they will be sent to the
#sign up page.
if not_found:
flash('Username not found, maybe sign up!')
elif password_no_match:
flash('Password does not match! Maybe sign up!')
return render_template('login.html')
#This route takes the user to the signup page
@app.route('/signup', methods=['GET', 'POST'])
def signup():
if request.method == 'POST':
name = request.form['name']
username = request.form['username']
password = request.form['password']
#Creating the user object
user = User()
#Encrypting the password
password, hashed = user.encrypt_pass(password)
#Adding the user to the database
user.insert(name, username, hashed)
#Letting them into the index Page
return redirect(url_for('home'))
return render_template('signup.html')
#This route takes the user to the home page
@app.route('/home', methods=['GET', 'POST'])
def home():
#This session will prevent users who have not signed up from coming in.
if 'username' not in session:
return redirect(url_for('signup'))
#This method fires when the user hits the submit button.
if request.method == 'POST':
#Creating a food object
food = Food()
#Calling the get_food method which will set food amounts for each type of food.
food_list = food.get_Food()
#Creating the food data object to insert food into the database
data = Food_Data()
data.insert_food(food_list)
username = session['username']
#Creating a list to hold the quotes
quotes = []
#Setting up the quote object
quote = Quote()
#Getting the response from my quote api
response = quote.getting_quotes()
#parsing out the data that I need-the actual quotes
quotes = quote.get_data(response)
return render_template('home.html', name = username, quotes = quotes)
#This route will take the user to the stats page
@app.route('/stats', methods=['GET', "POST"])
def stats():
#Creating a food object
food = Food_Data()
#Calling the pull_food method which will pull the data out of the database
food_data = food.pull_food()
#Here I'm getting the total pounds for EACH food
pound_data = food.get_pounds(food_data)
#Here I'm getting the total profit for EACH food
profit_data = food.get_profit(food_data)
#Here I'm getting the total pounds for all the food
total_pounds = food.total_pounds(pound_data)
#Here I'm getting the total profit for all the food
total_profit = food.total_profit(profit_data)
return render_template('stats_page.html', pound_info = pound_data, total_pounds = total_pounds,
profit_info =profit_data, total_profit = total_profit, pound_data=json.dumps(pound_data),
profit_data=json.dumps(profit_data))
#This function is what will log out the user.
@app.route('/sign_out')
def logout():
# remove the username from the session if it's there
session.pop('username', None)
#Redirect to Landing page
return redirect(url_for('landing'))
# set the secret key. keep this really secret:
app.secret_key = 'n3A\xef(\xb0Cf^\xda\xf7\x97\xb1x\x8e\x94\xd5r\xe0\x11\x88\x1b\xb9'
#This line will actually run the app.
if __name__ == '__main__':
app.run(debug=True)
| [
"mcuddy77@gmail.com"
] | mcuddy77@gmail.com |
c0eaa6d05399b136b2483494825e6cbc648f18a0 | fd11d784974fc316b57dfd48827c038ff0d98909 | /baselines/jft/experiments/jft300m_vit_base16.py | 108071743048fce2f8459f78789299f0b9641fde | [
"Apache-2.0"
] | permissive | SuhongMoon/uncertainty-baselines | ada6c3382f91aeff548d5fb707863f08acd8fc7b | 1a7b24f86994c7b69d9263bf47be7169736f0da9 | refs/heads/main | 2023-08-05T21:07:12.515521 | 2021-09-13T20:50:57 | 2021-09-13T20:51:22 | 398,609,284 | 0 | 0 | Apache-2.0 | 2021-08-21T16:45:13 | 2021-08-21T16:45:12 | null | UTF-8 | Python | false | false | 3,225 | py | # coding=utf-8
# Copyright 2021 The Uncertainty Baselines Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# pylint: disable=line-too-long
r"""ViT-B/16.
"""
# pylint: enable=line-too-long
import ml_collections
import get_fewshot # local file import
def get_config():
"""Config for training a patch-transformer on JFT."""
config = ml_collections.ConfigDict()
# Directory for the version de-dup'd from BiT downstream test-sets.
config.dataset = 'jft/entity:1.0.0'
config.val_split = 'test[:49511]' # aka tiny_test/test[:5%] in task_adapt
config.train_split = 'train' # task_adapt used train+validation so +64167
config.num_classes = 18291
config.init_head_bias = -10.0
config.trial = 0
config.batch_size = 4096
config.num_epochs = 7
pp_common = '|value_range(-1, 1)'
pp_common += f'|onehot({config.num_classes})'
# To use ancestor 'smearing', use this line instead:
# pp_common += f'|onehot({config.num_classes}, key='labels_extended', key_result='labels') # pylint: disable=line-too-long
pp_common += '|keep("image", "labels")'
config.pp_train = 'decode_jpeg_and_inception_crop(224)|flip_lr' + pp_common
config.pp_eval = 'decode|resize_small(256)|central_crop(224)' + pp_common
config.shuffle_buffer_size = 250_000 # Per host, so small-ish is ok.
config.log_training_steps = 50
config.log_eval_steps = 1000
# NOTE: eval is very fast O(seconds) so it's fine to run it often.
config.checkpoint_steps = 1000
# Model section
config.model = ml_collections.ConfigDict()
config.model.patches = ml_collections.ConfigDict()
config.model.patches.size = [16, 16]
config.model.hidden_size = 768
config.model.transformer = ml_collections.ConfigDict()
config.model.transformer.attention_dropout_rate = 0.
config.model.transformer.dropout_rate = 0.
config.model.transformer.mlp_dim = 3072
config.model.transformer.num_heads = 12
config.model.transformer.num_layers = 12
config.model.classifier = 'token' # Or 'gap'
config.model.representation_size = 768
# Optimizer section
config.optim_name = 'Adam'
config.optim = ml_collections.ConfigDict()
config.optim.weight_decay = 0.1
config.optim.beta1 = 0.9
config.optim.beta2 = 0.999
config.weight_decay = None # No explicit weight decay
# TODO(lbeyer): make a mini-language like preprocessings.
config.lr = ml_collections.ConfigDict()
config.lr.base = 8e-4 # LR has to be lower for larger models!
config.lr.warmup_steps = 10_000
config.lr.decay_type = 'linear'
config.lr.linear_end = 1e-5
# Few-shot eval section
config.fewshot = get_fewshot()
config.fewshot.log_steps = 25_000
config.args = {}
return config
def get_sweep(hyper):
return hyper.product([])
| [
"copybara-worker@google.com"
] | copybara-worker@google.com |
ffbf4f577cb055f3ba0aff800f1e40d5f72aef31 | 15f321878face2af9317363c5f6de1e5ddd9b749 | /solutions_python/Problem_135/3039.py | c0fc7a7b9f2a2f7aa673c3c70009e8f2ea1a127d | [] | no_license | dr-dos-ok/Code_Jam_Webscraper | c06fd59870842664cd79c41eb460a09553e1c80a | 26a35bf114a3aa30fc4c677ef069d95f41665cc0 | refs/heads/master | 2020-04-06T08:17:40.938460 | 2018-10-14T10:12:47 | 2018-10-14T10:12:47 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,341 | py | #!/usr/bin/python
import sys,os
def solve(index1, grid1, index2, grid2):
"""Returns a string result to one case of a problem"""
intersection = set(grid1[index1]) & set(grid2[index2])
if len(intersection) > 1 :
return "Bad magician!"
if len(intersection) < 1 :
return "Volunteer cheated!"
return intersection.pop()
#Shared########################################################################
def main():
with open(sys.argv[1], 'rU') as f_in:
cases = int(f_in.readline().strip())
for case in range(1,cases+1):
#Get input data
index1 = int(f_in.readline().strip()) - 1
grid1 = [[int(x) for x in f_in.readline().strip().split()] for _ in range(4)]
index2 = int(f_in.readline().strip()) - 1
grid2 = [[int(x) for x in f_in.readline().strip().split()] for _ in range(4)]
#Solve and output
print("Case #{}: {}".format(case, solve(index1, grid1, index2, grid2)))
if __name__ == '__main__':
if len(sys.argv) > 1 and os.path.exists(sys.argv[1]):
main()
elif len(sys.argv) > 1 and not os.path.exists(sys.argv[1]):
print "File '"+str(sys.argv[1])+"' does not exist!"
else:
print "No file supplied! Run program this way: '"+str(sys.argv[0])+" something.in'"
| [
"miliar1732@gmail.com"
] | miliar1732@gmail.com |
bae6c38d93ed8a773579e39d1ec375da72b1dfee | 0ef993b4dc63d6ed10e579304f6258c04f5d9f47 | /032.py | 668f6bd489044477e6250af0ad17fe73f9ba29ac | [] | no_license | yu5shi8/yukicoder | c5eb9f77d355cfdb1ae4e36634acca18b1d797f2 | 2bfc5b1b044b8f2c5a33db037585788787ebbc88 | refs/heads/master | 2020-05-02T04:46:17.694923 | 2020-03-12T08:37:28 | 2020-03-12T08:37:28 | 177,757,480 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 655 | py | # -*- coding: utf-8 -*-
# No.32 貯金箱の憂鬱
# https://yukicoder.me/problems/5
# 硬貨の枚数を入力
l = int(input())
m = int(input())
n = int(input())
# 後から見返してみると、if を毎度付けなくても良かった件
# 1円硬貨の場合
if n >= 25:
m = m + (n // 25)
n = n % 25
# 25円硬貨の場合
if m >= 4:
l = l + (m // 4)
m = m % 4
# 100円硬貨の場合
if l >= 10:
l = l % 10
# 硬貨の合計数
coin_count = l + m + n
print(coin_count)
'''
【参考回答】
https://yukicoder.me/submissions/321110
l,m,n = [int(input()) for _ in '1'*3]
print(n%25 + (n//25+m)%4 + ((n//25+m)//4+l)%10)
''' | [
"royal_unicorn411@hotmail.co.jp"
] | royal_unicorn411@hotmail.co.jp |
9331f10c63e60f252f6aea317e8e0f8981837a4e | d1c6de4e0d4aafbe1e7d15a02487494f86bf9b7e | /알고리즘문제/공통조상.py | fb4e31b961fce86f0136ebde7630b07013b14abe | [] | no_license | kdm604/TIL | d2ce2122e0b828a595530ac2a405a4661cf60205 | 554bbd8e884f4e7fbebdefbfa22a1a5eee0fa452 | refs/heads/master | 2023-01-11T21:41:57.845549 | 2020-03-24T08:55:10 | 2020-03-24T08:55:10 | 195,938,033 | 0 | 0 | null | 2023-01-05T01:14:37 | 2019-07-09T05:23:00 | Python | UTF-8 | Python | false | false | 1,048 | py | def check(T):
if T:
ans.append(nxn[T][2])
check(nxn[T][2])
def check2(T):
if T:
ans2.append(nxn[T][2])
check2(nxn[T][2])
def number(T):
if T:
num.append(T)
number(nxn[T][0])
number(nxn[T][1])
T = int(input())
for test in range(T):
V, E, A, B = map(int, input().split())
nxn = [[0 for _ in range(3)] for _ in range(V + 1)]
arr = list(map(int, input().split()))
ans = []
ans2 = []
max = 0
b = 0
for i in range(0, len(arr), 2):
if nxn[arr[i]][0] == 0:
nxn[arr[i]][0] = arr[i + 1]
nxn[arr[i + 1]][2] = arr[i]
else:
nxn[arr[i]][1] = arr[i + 1]
nxn[arr[i + 1]][2] = arr[i]
check(A)
check2(B)
for i in range(len(ans)):
for j in range(len(ans2)):
if ans[i] == ans2[j]:
max = ans[i]
b = 1
break
if b == 1:
break
num = []
number(max)
print("#%d %d %d" % (test+1, max, len(num)))
| [
"kdm604@naver.com"
] | kdm604@naver.com |
9e1518600878f1c780387c3d1ba96ee3f7a7b7d8 | f889bc01147869459c0a516382e7b95221295a7b | /test/test_sales_data_shipment_item_creation_interface.py | b6198d3e36b25d454919911ba2064c4129ed7f47 | [] | no_license | wildatheart/magento2-api-client | 249a86f5c0289743f8df5b0324ccabd76f326512 | e6a707f85b37c6c3e4ef3ff78507a7deb8f71427 | refs/heads/master | 2021-07-14T16:01:17.644472 | 2017-10-18T13:33:08 | 2017-10-18T13:33:08 | 107,412,121 | 1 | 1 | null | null | null | null | UTF-8 | Python | false | false | 1,135 | py | # coding: utf-8
"""
Magento Community
No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen)
OpenAPI spec version: 2.2
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import os
import sys
import unittest
import swagger_client
from swagger_client.rest import ApiException
from swagger_client.models.sales_data_shipment_item_creation_interface import SalesDataShipmentItemCreationInterface
class TestSalesDataShipmentItemCreationInterface(unittest.TestCase):
""" SalesDataShipmentItemCreationInterface unit test stubs """
def setUp(self):
pass
def tearDown(self):
pass
def testSalesDataShipmentItemCreationInterface(self):
"""
Test SalesDataShipmentItemCreationInterface
"""
# FIXME: construct object with mandatory attributes with example values
#model = swagger_client.models.sales_data_shipment_item_creation_interface.SalesDataShipmentItemCreationInterface()
pass
if __name__ == '__main__':
unittest.main()
| [
"sander@wildatheart.eu"
] | sander@wildatheart.eu |
31f909913d17784821fc85958b1ace8371fbcc6c | e0378adb07d3a051b65420e98ed8d141aabcbf18 | /Assignment 10/tests/test_repository.py | 1992e7d97bbfc504289619266be344a85d787f3c | [] | no_license | BVlad917/Fundamentals-of-Programming | bc36c961be8d7144dd57aaf97b7419db551bd291 | afd98cd572d456b2dda40c41882c1f2eb8f139f5 | refs/heads/main | 2023-08-06T17:02:26.966825 | 2021-09-20T19:56:11 | 2021-09-20T19:56:11 | 408,119,112 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,590 | py | import unittest
from domain.person import Person
from repository.custom_repo import Repository
from repository.in_memory_repo import Repository as InMemoryRepo
from repository.repository_exceptions import DeleteException, AddException, RepositoryException
class TestRepository(unittest.TestCase):
def setUp(self):
self.pers_1 = Person(1, 'Vlad Bogdan', '0745000111')
self.pers_2 = Person(2, 'Test Person', '0241234567')
self.custom_repo = Repository()
self.in_memory_repo = InMemoryRepo()
def test_length_repo_attribute(self):
self.assertEqual(len(self.custom_repo.elements), 0)
self.custom_repo.add_to_repo(self.pers_1)
self.custom_repo.add_to_repo(self.pers_2)
self.assertEqual(len(self.custom_repo.elements), 2)
def test_contains_attribute_repo(self):
self.custom_repo.add_to_repo(self.pers_1)
self.assertTrue(self.pers_1 in self.custom_repo.elements)
self.assertFalse(self.pers_2 in self.custom_repo.elements)
def test_iter_repo(self):
self.custom_repo.add_to_repo(self.pers_1)
list_of_pers = []
# Test to see if repository is iterable
for pers in self.custom_repo.elements:
list_of_pers.append(pers)
# Test to see if repository is 'index-able'
self.assertEqual(list_of_pers[0], self.custom_repo.elements[0])
def test_delete_item_from_repo_by_id(self):
self.custom_repo.add_to_repo(self.pers_1)
self.custom_repo.delete_by_id(1)
self.assertEqual(len(self.custom_repo.elements), 0)
self.assertRaises(DeleteException, self.custom_repo.delete_by_id, 1)
self.assertRaises(DeleteException, self.in_memory_repo.delete_by_id, 1)
def test_get_all_ids(self):
self.custom_repo.add_to_repo(self.pers_1)
self.custom_repo.add_to_repo(self.pers_2)
self.assertEqual(self.custom_repo.get_all_ids(), [1, 2])
def test_find_by_id(self):
self.custom_repo.add_to_repo(self.pers_1)
self.custom_repo.add_to_repo(self.pers_2)
self.assertEqual(self.custom_repo.find_by_id(1)[0], self.pers_1)
def test_add_to_repo(self):
self.assertEqual(len(self.custom_repo.elements), 0)
self.custom_repo.add_to_repo(self.pers_1)
self.custom_repo.add_to_repo(self.pers_2)
self.assertEqual(len(self.custom_repo.elements), 2)
self.assertRaises(AddException, self.custom_repo.add_to_repo, self.pers_1)
self.in_memory_repo.add_to_repo(self.pers_1)
self.assertRaises(AddException, self.in_memory_repo.add_to_repo, self.pers_1)
def test_get_all(self):
self.assertEqual(self.custom_repo.elements, [])
self.custom_repo.add_to_repo(self.pers_1)
self.custom_repo.add_to_repo(self.pers_2)
self.assertEqual(self.custom_repo.elements, [self.pers_1, self.pers_2])
def test_update(self):
self.custom_repo.add_to_repo(self.pers_1)
self.custom_repo.add_to_repo(self.pers_2)
update_pers = Person(15, 'New Name', '0745 094 735')
self.assertRaises(RepositoryException, self.custom_repo.update, update_pers)
update_pers = Person(1, 'New Name', '0745 094 735')
self.custom_repo.update(update_pers)
pers1, _ = self.custom_repo.find_by_id(1)
self.assertEqual(pers1.name, 'New Name')
self.assertEqual(pers1.phone_number, '0745 094 735')
self.assertRaises(RepositoryException, self.in_memory_repo.update, self.pers_1)
| [
"noreply@github.com"
] | BVlad917.noreply@github.com |
04196cf405334cb323c1846ac46eeb6777d8eef4 | 8b634dc196162dff328d61bf6f8d4121dfb59bd4 | /Queue/movingAverage.py | 87c8bc033130a1c01772012764b36c6e3cf616d4 | [] | no_license | kqg13/LeetCode | 84268b2146dc8323cb71f041b6664069baaa339c | 1c584f4ca4cda7a3fb3148801a1ff4c73befed24 | refs/heads/master | 2023-08-05T09:46:28.103910 | 2023-07-29T21:02:26 | 2023-07-29T21:02:26 | 165,123,023 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 744 | py | # Easy queue problem 346: Moving Average from Data Stream
# Given a stream of integers and a window size, calculate the moving average
# of all integers in the sliding window.
# Example:
# MovingAverage m = new MovingAverage(3);
# m.next(1) = 1
# m.next(10) = (1 + 10) / 2
# m.next(3) = (1 + 10 + 3) / 3
# m.next(5) = (10 + 3 + 5) / 3
from collections import deque
class MovingAverage:
def __init__(self, size):
"""
Initialize your data structure here.
:type size: int
"""
self.size = size
self.d = deque(maxlen=self.size)
def next(self, val):
"""
:type val: int
:rtype: float
"""
self.d.append(val)
return sum(self.d) / len(self.d)
| [
"skg2016@nyu.edu"
] | skg2016@nyu.edu |
6d1846fe2f0d777ba67f4e06e571587e1499e552 | 10ddfb2d43a8ec5d47ce35dc0b8acf4fd58dea94 | /Python/remove-all-ones-with-row-and-column-flips.py | c8c1b705b138b90f15933da251f8e4dc0c4d8bf7 | [
"MIT"
] | permissive | kamyu104/LeetCode-Solutions | f54822059405ef4df737d2e9898b024f051fd525 | 4dc4e6642dc92f1983c13564cc0fd99917cab358 | refs/heads/master | 2023-09-02T13:48:26.830566 | 2023-08-28T10:11:12 | 2023-08-28T10:11:12 | 152,631,182 | 4,549 | 1,651 | MIT | 2023-05-31T06:10:33 | 2018-10-11T17:38:35 | C++ | UTF-8 | Python | false | false | 302 | py | # Time: O(m * n)
# Space: O(1)
class Solution(object):
def removeOnes(self, grid):
"""
:type grid: List[List[int]]
:rtype: bool
"""
return all(grid[i] == grid[0] or all(grid[i][j] != grid[0][j] for j in xrange(len(grid[0]))) for i in xrange(1, len(grid)))
| [
"noreply@github.com"
] | kamyu104.noreply@github.com |
71f373e412b3cdc65f292f31da7234801c241465 | fb16f7024e0d93ecb07c122e633c1a957a8ab645 | /django/project31/app31/views.py | 879c85bbdf9271ed121d3b5472a6fc43df156373 | [] | no_license | rajeshanu/rajeshprograms | c23cf550e060040c7b336242a805e274d3305371 | 83f0fc9c4a8628bba590d1066ca93fd98137f0bc | refs/heads/master | 2020-04-04T13:17:55.986558 | 2018-11-03T06:42:51 | 2018-11-03T06:42:51 | 155,956,676 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,634 | py | from django.shortcuts import render
from .models import register
from app31.models import login
# Create your views here.
def showindex(request):
return render(request,"index.html")
def show(request):
name=request.POST.get("t1")
cno=request.POST.get("t2")
email=request.POST.get("t3")
password=request.POST.get("t4")
r=register(name=name,cno=cno,email=email,password=password)
r.save()
return render(request,"index.html",{"msg":"data saved"})
def login(request):
email=request.POST.get("t5")
password=request.POST.get("t6")
#p=register(email=email,password=password)
#p.save()
s=register.objects.get(email=email)
if s.email==email and s.password==password:
return render(request,"details.html",{"email":s.email})
else:
return render(request,"index.html",{"msg2":"invalid"})
def profile(request):
email=request.GET.get("email")
d=register.objects.get(email=email)
return render(request,"profile.html",{"email":email})
def feedback(request):
uemail=request.GET.get("email")
try:
fmail=request.session["email"]
if uemail==fmail:
return render(request,"details.html",{"msg1":" already given feedback"})
else:
return render(request,"feedback.html",{"email":uemail})
except:
return render(request,"feedback.html",{"email":uemail})
def savefeedback(request):
email=request.POST.get("id")
feed=request.POST.get("msg")
l=login(feedback=feed,uemail=email)
l.save()
request.session["email"]=email
return render(request,"details.html",{"msg":"feedback given "}) | [
"44720126+rajeshanu@users.noreply.github.com"
] | 44720126+rajeshanu@users.noreply.github.com |
b8997bbbf7cc05c670173ee6e09e86e7926a283b | 6fcfb638fa725b6d21083ec54e3609fc1b287d9e | /python/eliben_deep-learning-samples/deep-learning-samples-master/logistic-regression/timer.py | 91f4c60c70cdc0ca403d542731a68b3ae57a0cdd | [] | no_license | LiuFang816/SALSTM_py_data | 6db258e51858aeff14af38898fef715b46980ac1 | d494b3041069d377d6a7a9c296a14334f2fa5acc | refs/heads/master | 2022-12-25T06:39:52.222097 | 2019-12-12T08:49:07 | 2019-12-12T08:49:07 | 227,546,525 | 10 | 7 | null | 2022-12-19T02:53:01 | 2019-12-12T07:29:39 | Python | UTF-8 | Python | false | false | 411 | py | from __future__ import print_function
import sys
import time
class Timer(object):
def __init__(self, name=None):
self.name = name
def __enter__(self):
self.tstart = time.time()
if self.name:
print('[%s] ' % self.name, end='')
sys.stdout.flush()
def __exit__(self, type, value, traceback):
print('Elapsed: %s' % (time.time() - self.tstart))
| [
"659338505@qq.com"
] | 659338505@qq.com |
5583ff3947649c6f8c30dd9edb02cad147b9a2fe | 60543d88cb19e7a264cf30622cb45ea039b243f8 | /mnist/keras/mnist_train.py | 07627f828c25aef98e42601ab9c313a26050aa83 | [] | no_license | andrewsmedina/kaggle | 90b3a9bb6945b839d75ee4a9bb52d76b4b5c9bef | 51bc2fad9e6b57e00ed50e36897870cdbead6372 | refs/heads/master | 2021-01-10T22:19:58.366222 | 2018-07-29T16:39:55 | 2018-07-29T16:39:55 | 69,138,999 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,973 | py | from __future__ import print_function
import keras
from keras.datasets import mnist
from keras.models import Sequential
from keras.layers import Dense, Dropout, Flatten
from keras.layers import Conv2D, MaxPooling2D
from keras import backend as K
batch_size = 128
num_classes = 10
epochs = 12
# input image dimensions
img_rows, img_cols = 28, 28
# the data, split between train and test sets
(x_train, y_train), (x_test, y_test) = mnist.load_data()
if K.image_data_format() == 'channels_first':
x_train = x_train.reshape(x_train.shape[0], 1, img_rows, img_cols)
x_test = x_test.reshape(x_test.shape[0], 1, img_rows, img_cols)
input_shape = (1, img_rows, img_cols)
else:
x_train = x_train.reshape(x_train.shape[0], img_rows, img_cols, 1)
x_test = x_test.reshape(x_test.shape[0], img_rows, img_cols, 1)
input_shape = (img_rows, img_cols, 1)
x_train = x_train.astype('float32')
x_test = x_test.astype('float32')
x_train /= 255
x_test /= 255
print('x_train shape:', x_train.shape)
print(x_train.shape[0], 'train samples')
print(x_test.shape[0], 'test samples')
# convert class vectors to binary class matrices
y_train = keras.utils.to_categorical(y_train, num_classes)
y_test = keras.utils.to_categorical(y_test, num_classes)
model = Sequential()
model.add(Conv2D(32, kernel_size=(3, 3),
activation='relu',
input_shape=input_shape))
model.add(Conv2D(64, (3, 3), activation='relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Dropout(0.25))
model.add(Flatten())
model.add(Dense(128, activation='relu'))
model.add(Dropout(0.5))
model.add(Dense(num_classes, activation='softmax'))
model.compile(loss=keras.losses.categorical_crossentropy,
optimizer=keras.optimizers.Adadelta(),
metrics=['accuracy'])
model.fit(x_train, y_train,
batch_size=batch_size,
epochs=epochs,
verbose=1,
validation_data=(x_test, y_test))
model.save('my_model.h5') | [
"andrewsmedina@gmail.com"
] | andrewsmedina@gmail.com |
0819c7e1251822f165d64e5c64fa8b790cb86716 | bb4dc40ec0b62e5d2fc3ce1234013aebd4e648d5 | /src/modules/customised/payroll_test_2/payroll_currupt/hr_payroll_arrear/hr_payroll_arrear.py | e84abac33ee604e572e2008b06091712ae5e3934 | [] | no_license | kakamble-aiims/work | ba6cbaf4c525ff7bc28d0a407f16c829d0c35983 | cd392bf0e80d71c4742568e9c1dd5e5211da56a9 | refs/heads/master | 2022-04-02T14:45:58.515014 | 2019-12-31T14:00:51 | 2019-12-31T14:00:51 | 199,015,147 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 688 | py | from trytond.pool import Pool, PoolMeta
__all__ = [
'HrSalaryRule',
]
class HrSalaryRule(metaclass=PoolMeta):
__name__ = 'hr.salary.rule'
def calculate_OTA(self, payslip, employee, contract):
amount = 0
pool = Pool()
ota = pool.get('hr.allowance.ota')
ot_allowance = ota.search([
('employee', '=', employee),
('salary_code', '=', employee.salary_code),
#('state', '=', 'approve')
])
if ot_allowance != []:
employee_record = ot_allowance[0]
amount = employee_record.amount
return amount
else:
return amount
| [
"kakamble.aiims@gmail.com"
] | kakamble.aiims@gmail.com |
6c87fe2f66b078f6b284dbe97e0a53bba2d75771 | 35c8d8762a87f9adf8964d1ec50c7ed69583d92e | /lecturer/migrations/0004_auto_20171213_2143.py | f91ccaab02e610d73c024ecf38f583e4da85ef7a | [
"MIT"
] | permissive | VirginiaNdungu1/Grading-System | f15b9c1346986fc10336ab75938079393331330e | 7f30a81cf2b1717c9f0a5a7f5814404c646eee9d | refs/heads/master | 2021-05-06T06:34:39.719493 | 2017-12-24T14:30:04 | 2017-12-24T14:30:04 | 113,868,441 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 877 | py | # Generated by Django 2.0 on 2017-12-13 18:43
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('lecturer', '0003_auto_20171213_1916'),
]
operations = [
migrations.AddField(
model_name='profile',
name='roles',
field=models.CharField(blank=True, choices=[('STUDENT', 'Student'), ('TEACHER', 'Teacher'), ('DEAN', 'Dean')], max_length=30),
),
migrations.AlterField(
model_name='profile',
name='gender',
field=models.CharField(blank=True, choices=[('FEMALE', 'F'), ('MALE', 'M'), ('NONE', 'None')], default='NONE', max_length=30),
),
migrations.AlterField(
model_name='profile',
name='reg_date',
field=models.DateTimeField(auto_now_add=True),
),
]
| [
"ndungu.wairimu22@gmail.com"
] | ndungu.wairimu22@gmail.com |
99a93a979b8bff83507c7790ad34b390b3361f5b | 3f4edccfbcada494673214bb4556dea5098a7af3 | /dataset/binary_train.py | 9321d1f73510ec2044f114ff440353e7bd602448 | [] | no_license | Below0/konlpy-nf-analyzer | a79b401c641586fd191f518286c2aa54f9f4cf25 | e6abf1181efccdad15bfd1119320f70d709ab440 | refs/heads/master | 2023-01-21T11:29:29.053651 | 2020-11-29T09:54:09 | 2020-11-29T09:54:09 | 315,824,970 | 0 | 0 | null | 2020-11-25T11:34:16 | 2020-11-25T04:05:55 | Jupyter Notebook | UTF-8 | Python | false | false | 3,982 | py | import codecs
import csv
import pandas as pd
import numpy as np
import os
import sys
import urllib.request
from urllib.parse import *
import requests
from bs4 import BeautifulSoup
#from kafka import KafkaProducer
import json
import re
import json
import datetime
from konlpy.tag import *
import konlpy
import re
from keras.preprocessing.text import Tokenizer
from keras_preprocessing.text import tokenizer_from_json
from keras.models import model_from_json
def below_threshold_len(max_len, nested_list):
cnt = 0
for s in nested_list:
if len(s) <= max_len:
cnt = cnt + 1
print('전체 샘플 중 길이가 %s 이하인 샘플의 비율: %s' % (max_len, (cnt / len(nested_list)) * 100))
dataset = pd.read_csv('./dataset3.csv')
train_data = dataset[:11000]
test_data = dataset[11000:]
stopwords = ['.', ',', '', '의', '가', '이', '은', '들', '는', '좀', '잘', '걍', '과', '도', '를', '으로', '자', '에', '와', '한', '하다']
# In[72]:
okt = Mecab()
hangul = re.compile('[^ ㄱ-ㅣ가-힣]+')
X_train = []
for sentence in train_data['content']:
temp_X = okt.morphs(sentence) # 토큰화
token_X = []
for word in temp_X:
temp = hangul.sub('', word)
if temp == '' or temp in stopwords:
continue
token_X.append(temp)
X_train.append(token_X)
X_test = []
for sentence in test_data['content']:
temp_X = okt.morphs(sentence) # 토큰화
token_X = []
for word in temp_X:
temp = hangul.sub('', word)
if temp == '' or temp in stopwords:
continue
token_X.append(temp)
X_test.append(token_X)
print('tokenizing complete!')
# In[73]:
max_words = 50000
tokenizer = Tokenizer(num_words=max_words)
tokenizer.fit_on_texts(X_train)
X_train = tokenizer.texts_to_sequences(X_train)
X_test = tokenizer.texts_to_sequences(X_test)
tokenizer_json = tokenizer.to_json()
with open('tokenizer3.json', 'w', encoding='utf-8') as f:
f.write(json.dumps(tokenizer_json, ensure_ascii=False))
y_train = []
y_test = []
for i in range(len(train_data['label'])):
if train_data['label'].iloc[i] == 1:
y_train.append([1])
elif train_data['label'].iloc[i] == -1:
y_train.append([0])
for i in range(len(test_data['label'])):
if test_data['label'].iloc[i] == 1:
y_test.append([1])
elif test_data['label'].iloc[i] == -1:
y_test.append([0])
y_train = np.array(y_train)
y_test = np.array(y_test)
print('리뷰의 최대 길이 :', max(len(l) for l in X_train))
print('리뷰의 평균 길이 :', sum(map(len, X_train)) / len(X_train))
below_threshold_len(200, X_train)
# In[77]:
from keras.layers import Embedding, Dense, LSTM
from keras.models import Sequential
from keras.preprocessing.sequence import pad_sequences
from tensorflow.keras.models import load_model
from keras.callbacks import EarlyStopping, ModelCheckpoint
max_len = 200 # 전체 데이터의 길이를 20로 맞춘다
X_train = pad_sequences(X_train, maxlen=max_len)
X_test = pad_sequences(X_test, maxlen=max_len)
model = Sequential()
model.add(Embedding(max_words, 128))
model.add(LSTM(128))
model.add(Dense(1, activation='sigmoid'))
es = EarlyStopping(monitor='val_loss', mode='min', verbose=1, patience=7)
mc = ModelCheckpoint('best_model.h5', monitor='val_acc', mode='max', verbose=1, save_best_only=True)
model.compile(optimizer='rmsprop', loss='binary_crossentropy', metrics=['acc'])
history = model.fit(X_train, y_train, epochs=15, callbacks=[es, mc], batch_size=1000, validation_split=0.1)
model_json = model.to_json()
with open("model3.json", "w") as json_file :
json_file.write(model_json)
# In[78]:
with open("model3.json", "r") as json_file:
loaded_model_json = json_file.read()
loaded_model = model_from_json(loaded_model_json)
loaded_model.load_weights("best_model.h5")
loaded_model.compile(optimizer='rmsprop', loss='binary_crossentropy', metrics=['acc'])
print(model.evaluate(X_test, y_test)[1] * 100) | [
"l4538@naver.com"
] | l4538@naver.com |
91759c4c00968d80511f1adeb92b86e2bbe92547 | 163bbb4e0920dedd5941e3edfb2d8706ba75627d | /Code/CodeRecords/2169/60615/257442.py | 8f2708f19202be73ba8ddf22bf44ef88570c0474 | [] | no_license | AdamZhouSE/pythonHomework | a25c120b03a158d60aaa9fdc5fb203b1bb377a19 | ffc5606817a666aa6241cfab27364326f5c066ff | refs/heads/master | 2022-11-24T08:05:22.122011 | 2020-07-28T16:21:24 | 2020-07-28T16:21:24 | 259,576,640 | 2 | 1 | null | null | null | null | UTF-8 | Python | false | false | 606 | py |
time=int(input())
result=[]
opset=['+','-','*','/']
while time>0:
postfix=[i for i in input()] #231*+9-
num=[]
for item in postfix:
if item not in opset:
num.append(int(item))
else:
if item=='+':
temp=num[-2]+num[-1]
elif item=='-':
temp=num[-2]-num[-1]
elif item=='*':
temp=num[-2]*num[-1]
else:
temp=num[-2]/num[-1]
del num[-2:]
num.append(temp)
result.append(num[0])
time=time-1
for res in result:
print(res)
| [
"1069583789@qq.com"
] | 1069583789@qq.com |
e88db4589a9f0746f6feef2ebb164eba6e4790e2 | 62974b03a14008f950db0c68d5962311a3401d3b | /medium/slidewindow/test_209_Minimum_Size_Subarray_Sum.py | 0932a43778243b2896f0388d188dcf9c386f90e7 | [] | no_license | wuxu1019/leetcode_sophia | 818f39e8324aaf2b588150b1f7edbc2897f64935 | 0e99f9a5226507706b3ee66fd04bae813755ef40 | refs/heads/master | 2018-10-13T13:58:29.762836 | 2018-07-24T04:20:23 | 2018-07-24T04:20:23 | 111,624,101 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,020 | py | """
Given an array of n positive integers and a positive integer s, find the minimal length of a contiguous subarray of which the sum ≥ s. If there isn't one, return 0 instead.
Example:
Input: [2,3,1,2,4,3], s = 7
Output: 2
Explanation: the subarray [4,3] has the minimal length under the problem constraint.
"""
class Solution(object):
def minSubArrayLen_bruteforce(self, s, nums):
"""
:type s: int
:type nums: List[int]
:rtype: int
"""
if not nums:
return 0
ans = float('INF')
record = [nums[0]]
for i in range(1, len(nums)):
record.append(record[i-1] + nums[i])
for i in range(len(nums)):
for j in range(i, len(nums)):
sm = record[j] - record[i] + nums[i]
if sm >= s:
ans = min(ans, j - i + 1)
return ans if ans != float('INF') else 0
def minSubArrayLen_binarysearch(self, s, nums):
"""
:type s: int
:type nums: List[int]
:rtype: int
"""
if not nums:
return 0
ans = float('INF')
record = [nums[0]]
for i in range(1, len(nums)):
record.append(record[i - 1] + nums[i])
for i in range(0, len(nums)):
if i == 0:
to_find = s
else:
to_find = s + record[i - 1]
j = bisect.bisect_left(record, to_find, i)
if j < len(nums):
ans = min(ans, j - i + 1)
return ans if ans != float('INF') else 0
def minSubArrayLen_onepass(self, s, nums):
"""
:type s: int
:type nums: List[int]
:rtype: int
"""
j = 0
sm = 0
ans = float('INF')
for i in range(len(nums)):
sm += nums[i]
while sm >= s:
ans = min(ans, i - j + 1)
sm -= nums[j]
j += 1
return ans if ans != float('INF') else 0
| [
"wuxu1019@hotmail.com"
] | wuxu1019@hotmail.com |
4c75404055009fd4e4fe3fb26886920537e95665 | 15f321878face2af9317363c5f6de1e5ddd9b749 | /solutions_python/Problem_200/3855.py | 20efe8a1143906981815f06e93781546149a4733 | [] | no_license | dr-dos-ok/Code_Jam_Webscraper | c06fd59870842664cd79c41eb460a09553e1c80a | 26a35bf114a3aa30fc4c677ef069d95f41665cc0 | refs/heads/master | 2020-04-06T08:17:40.938460 | 2018-10-14T10:12:47 | 2018-10-14T10:12:47 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,522 | py | import sys
class InputFileParser(object):
def __init__(self, filename):
self.test_cases = []
with open(filename, 'r') as in_f:
self.test_case_count = int(in_f.readline())
for i in xrange(1, self.test_case_count + 1):
tc = TestCase(i)
tc.last = int(in_f.readline())
self.test_cases.append(tc)
class TestCase(object):
def __init__(self, index):
self.index = index
self.last = 0
self.result = 0
def to_digits(number):
return map(lambda digit_str: int(digit_str), str(number))
def to_number(digits):
return int(''.join(map(str, digits)))
def drop_last(digits):
result = digits[:-1]
result[-1] = result[-1] - 1
return result
def is_tidy(digits):
previous = 0
for digit in digits:
if digit < previous:
return False
previous = digit
return True
def last_tidy(last):
left = to_digits(last)
right = []
while not is_tidy(left):
right.append(9)
left = drop_last(left)
return to_number(left + right)
if __name__ == '__main__':
if len(sys.argv) < 3:
exit('input and output file not specified!')
parser = InputFileParser(sys.argv[1])
with open(sys.argv[2], 'w') as out_f:
for tc in parser.test_cases:
tc.result = last_tidy(tc.last)
print 'Case #{0}: {1}'.format(tc.index, tc.result)
out_f.write('Case #{0}: {1}\n'.format(tc.index, tc.result))
| [
"miliar1732@gmail.com"
] | miliar1732@gmail.com |
8433440edf9c893209f3b5aff55141b655b9b5d9 | ae5a285b64731886c736a076b9cb35b6f5e18266 | /blogs/mainapp/urls.py | b25f20645ded6357044666bb29924b915c61d538 | [] | no_license | MichaelDc86/test_task_django | 0c398af6c748532bcdc217f9dd23acdfd48ce0c5 | 04a58a29f6786037e310329a9b28a13efbfe2d41 | refs/heads/master | 2023-04-26T14:48:32.183376 | 2019-06-28T13:29:21 | 2019-06-28T13:29:21 | 193,847,278 | 0 | 0 | null | 2023-04-21T20:33:20 | 2019-06-26T06:57:04 | JavaScript | UTF-8 | Python | false | false | 732 | py | from django.contrib.auth.views import LogoutView
from django.urls import re_path
import mainapp.views as mainapp
app_name = 'mainapp'
urlpatterns = [
re_path(r'^$', mainapp.BlogListView.as_view(), name='blog_list'),
re_path(r'login/$', mainapp.BloggerLogin.as_view(), name='login'),
re_path(r'register/$', mainapp.BloggerRegister.as_view(), name='register'),
re_path(r'logout/$', LogoutView.as_view(), name='logout'),
re_path(r'post_create/$', mainapp.PostblogCreateView.as_view(), name='post_create'),
re_path(r'post_update/(?P<pk>\d+)/$', mainapp.PostBlogUpdateView.as_view(), name='post_update'),
re_path(r'post_delete/(?P<pk>\d+)/$', mainapp.PostBlogDeleteView.as_view(), name='post_delete'),
]
| [
"lenskymiwa@ya.ru"
] | lenskymiwa@ya.ru |
257e6320821547e22d65ca68d945fad58dc00cfe | 276c86a451c4110ba0885dbe8509d46f23c21715 | /esp32-micropython/examples/oled_thermometer.py | e94deebb1ab3bf42ebb3dc5fddf9718e04a3d88f | [] | no_license | vtt-info/octopuslab | 055c9bfdc1466a6e5acf90a3cd0db2826e72dee7 | 3d20933c05bae3eec4d0c033f228bde369e46e07 | refs/heads/master | 2022-07-27T14:19:16.386241 | 2020-05-16T13:50:21 | 2020-05-16T13:50:21 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 456 | py | # octopusLAB example - 2019
# simple example: dallas thermometer and oled display
from time import sleep
from util.octopus import oled_init
from util.iot import Thermometer
from util.display_segment import threeDigits
print("init > ")
ts = Thermometer()
oled = oled_init()
print("start > ")
while True:
temp = ts.get_temp()
print(temp)
temp10 = int(temp * 10)
threeDigits(oled, temp10, True, True)
sleep(1)
| [
"noreply@github.com"
] | vtt-info.noreply@github.com |
b79572bf6408b286a553a38cf007ee7d293068da | b2db386a35e167dd67d6de90d95c06d5c2ed91cd | /403_FrogJump.py | fcab3104b67560cffd6596e6451af41ef135978a | [] | no_license | rohitmungre/leetcode | 9edb1b8b0cd714eb1a5e1fa847f2e17c455fd624 | d49836b2b46a980f073bb9a6f2e47c4a903e48ac | refs/heads/master | 2020-08-07T16:55:38.699188 | 2020-03-12T11:00:13 | 2020-03-12T11:00:13 | 213,531,119 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,977 | py | import copy
class Solution(object):
def canCross(self, stones):
"""
:type stones: List[int]
:rtype: bool
"""
if len(stones) == 0:
return False
return self.cc_dp(stones, 0, [], {})
def cc_dp(self, stones, idx, steps, memo):
if idx == len(stones)-1:
return True
if idx == 0:
if stones[1] != 1:
return False
k = 0
csteps = copy.copy(steps)
csteps.append(1)
return self.cc_dp(stones, 1 , csteps, memo)
else:
k = steps[-1]
mstr = str(idx) + '~' + str(k)
if mstr in memo:
return memo[mstr]
stm1 = stones[idx] + k-1
st0 = stones[idx] + k
stp1 = stones[idx] + k+1
if stm1 not in stones and stp1 not in stones and st0 not in stones:
memo[mstr] = False
return False
if stp1 not in stones and st0 not in stones and k==1:
memo[mstr] = False
return False
if stp1 in stones:
csteps = copy.copy(steps)
csteps.append(k+1)
rp = self.cc_dp(stones, stones.index(stp1), csteps, memo)
if rp:
memo[mstr] = True
return True
if stm1 in stones and k !=1 :
csteps = copy.copy(steps)
csteps.append(k-1)
rm = self.cc_dp(stones, stones.index(stm1), csteps, memo)
if rm:
memo[mstr] = True
return True
if st0 in stones and k !=0 :
csteps = copy.copy(steps)
csteps.append(k)
r0 = self.cc_dp(stones, stones.index(st0), csteps, memo)
if r0:
memo[mstr] = True
return True
memo[mstr] = False
return memo[mstr]
| [
"noreply@github.com"
] | rohitmungre.noreply@github.com |
db2c9ab6255eab8ada3b3adeee266a9885a90be2 | 31a928cff4960236923b6bc3b68e34bb2f46f470 | /Speculator/speculator/utils/stats.py | b5694a5daf9476ee9825d50ef9478a2dc65ded64 | [
"MIT",
"BSD-3-Clause"
] | permissive | webclinic017/ml_monorepo | 707df2afd2f986eb0721d26430e6135c917817c6 | 945f0a83d6b94282c547bb6f4805f3381ad9c16a | refs/heads/master | 2021-10-19T21:02:53.322944 | 2019-02-19T20:58:51 | 2019-02-23T20:06:04 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 443 | py | """
Various math functions used throughout Speculator
"""
def avg(vals, count=None):
""" Returns the average value
Args:
vals: List of numbers to calculate average from.
count: Int of total count that vals was part of.
Returns:
Float average value throughout a count.
"""
sum = 0
for v in vals:
sum += v
if count is None:
count = len(vals)
return float(sum) / count
| [
"tmichael.yu@gmail.com"
] | tmichael.yu@gmail.com |
15e688993d21c158b79180b3621f8f9c92e788ca | f5c3841a08c3faa1818d3ee210c8b9921dc9499d | /sql/selection.py | 158e8c8c265a604e3ee97f88126eb69744e3da20 | [] | no_license | villancikos/realpython-book2 | a4e74b51fe1d3a8e5af206c2938ff4966ef00df6 | 6c9a2ef714531f1163f3c78c80fad335661dacf2 | refs/heads/master | 2016-09-06T10:06:49.227106 | 2014-09-22T18:56:58 | 2014-09-22T18:56:58 | 23,493,659 | 1 | 1 | null | 2014-09-19T23:35:40 | 2014-08-30T14:44:52 | Python | UTF-8 | Python | false | false | 304 | py | # SELECT statement
import sqlite3
with sqlite3.connect('new.db') as connection:
c = connection.cursor()
#for row in c.execute("SELECT firstname, lastname from employees"):
# print row
c.execute("SELECT firstname, lastname from employees")
rows = c.fetchall()
for r in rows:
print r[0],r[1]
| [
"villancikos@gmail.com"
] | villancikos@gmail.com |
3e8ef970d06299d87bd03604d228bc22a57da735 | 283bbf2ce575ea72010e9823907285b08d20fce4 | /breathecode/tests/mocks/screenshotmachine/requests_mock.py | bc661c61e3d798348dce1188edecdeee67e37d3f | [] | no_license | AnMora/apiv2 | c084ffcb4ff5b7a0a01dac8fca26f4f4c37aad97 | fa3b3f0ce4a069facdecd18e133c7b4222a0004a | refs/heads/master | 2023-05-19T23:00:34.257230 | 2021-06-08T21:17:56 | 2021-06-08T21:17:56 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 563 | py | """
Requests mock
"""
class ResponseMock():
"""Simutate Response to be used by mocks"""
status_code = None
data = None
content = None
def __init__(self, status_code=200, data=''):
self.status_code = status_code
if isinstance(data, str):
self.content = data
else:
self.data = data
def json(self) -> dict:
"""Convert Response to JSON"""
return self.data
def get_mock(url: str, stream=False):
"""Requests get mock"""
return ResponseMock(data='ok', status_code=200)
| [
"jdefreitaspinto@gmail.com"
] | jdefreitaspinto@gmail.com |
cc7e44a17c3f456e0f8fa5d7f81a928fcb8c2e17 | d680f545562589a34224f3a422c00c1697c84c5d | /mutasi_aset_2021/perkim/pm_perkim_ke_dinas_koperasi/pm_perkim_ke_dinas_koperasi_insert.py | 43c13c1e420a8f0068401f47303524a0b009cc78 | [
"BSD-2-Clause"
] | permissive | muntaza/catatan_openaset_balangan | 9362d0beb4de6e0abc3f4ec5ebb63a5aaff66294 | 01d1eb79ea8f203d231956d74b88e39789d54429 | refs/heads/master | 2022-02-18T10:27:21.494091 | 2022-02-14T10:19:50 | 2022-02-14T10:19:50 | 233,332,577 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,923 | py | from peralatanmesin.models import PeralatanMesin
from gedungbangunan.clone_object import clone_object
def pm_perkim_ke_dinas_koperasi():
clone_object(PeralatanMesin.objects.get(pk=28772))
clone_object(PeralatanMesin.objects.get(pk=28773))
clone_object(PeralatanMesin.objects.get(pk=28774))
clone_object(PeralatanMesin.objects.get(pk=28776))
clone_object(PeralatanMesin.objects.get(pk=28777))
clone_object(PeralatanMesin.objects.get(pk=28778))
clone_object(PeralatanMesin.objects.get(pk=28779))
clone_object(PeralatanMesin.objects.get(pk=28781))
clone_object(PeralatanMesin.objects.get(pk=28782))
clone_object(PeralatanMesin.objects.get(pk=28783))
clone_object(PeralatanMesin.objects.get(pk=28784))
clone_object(PeralatanMesin.objects.get(pk=28830))
clone_object(PeralatanMesin.objects.get(pk=28832))
clone_object(PeralatanMesin.objects.get(pk=28833))
clone_object(PeralatanMesin.objects.get(pk=28834))
clone_object(PeralatanMesin.objects.get(pk=28838))
clone_object(PeralatanMesin.objects.get(pk=28841))
clone_object(PeralatanMesin.objects.get(pk=28843))
clone_object(PeralatanMesin.objects.get(pk=28844))
clone_object(PeralatanMesin.objects.get(pk=28845))
clone_object(PeralatanMesin.objects.get(pk=28846))
clone_object(PeralatanMesin.objects.get(pk=28847))
clone_object(PeralatanMesin.objects.get(pk=28848))
clone_object(PeralatanMesin.objects.get(pk=28849))
clone_object(PeralatanMesin.objects.get(pk=52028))
clone_object(PeralatanMesin.objects.get(pk=71070))
clone_object(PeralatanMesin.objects.get(pk=88871))
clone_object(PeralatanMesin.objects.get(pk=88873))
clone_object(PeralatanMesin.objects.get(pk=88874))
clone_object(PeralatanMesin.objects.get(pk=88875))
clone_object(PeralatanMesin.objects.get(pk=88882))
clone_object(PeralatanMesin.objects.get(pk=88883))
clone_object(PeralatanMesin.objects.get(pk=88884))
clone_object(PeralatanMesin.objects.get(pk=88891))
clone_object(PeralatanMesin.objects.get(pk=88892))
clone_object(PeralatanMesin.objects.get(pk=88893))
clone_object(PeralatanMesin.objects.get(pk=88898))
clone_object(PeralatanMesin.objects.get(pk=88908))
clone_object(PeralatanMesin.objects.get(pk=88909))
clone_object(PeralatanMesin.objects.get(pk=88910))
clone_object(PeralatanMesin.objects.get(pk=88911))
clone_object(PeralatanMesin.objects.get(pk=88912))
clone_object(PeralatanMesin.objects.get(pk=88913))
clone_object(PeralatanMesin.objects.get(pk=88914))
clone_object(PeralatanMesin.objects.get(pk=88915))
clone_object(PeralatanMesin.objects.get(pk=89759))
clone_object(PeralatanMesin.objects.get(pk=89760))
clone_object(PeralatanMesin.objects.get(pk=89761))
clone_object(PeralatanMesin.objects.get(pk=89762))
clone_object(PeralatanMesin.objects.get(pk=89763))
clone_object(PeralatanMesin.objects.get(pk=89764))
clone_object(PeralatanMesin.objects.get(pk=89955))
clone_object(PeralatanMesin.objects.get(pk=89956))
clone_object(PeralatanMesin.objects.get(pk=99266))
clone_object(PeralatanMesin.objects.get(pk=99385))
clone_object(PeralatanMesin.objects.get(pk=99386))
clone_object(PeralatanMesin.objects.get(pk=99387))
clone_object(PeralatanMesin.objects.get(pk=99388))
clone_object(PeralatanMesin.objects.get(pk=99389))
clone_object(PeralatanMesin.objects.get(pk=99393))
clone_object(PeralatanMesin.objects.get(pk=99394))
clone_object(PeralatanMesin.objects.get(pk=99395))
clone_object(PeralatanMesin.objects.get(pk=99396))
clone_object(PeralatanMesin.objects.get(pk=99410))
clone_object(PeralatanMesin.objects.get(pk=99411))
clone_object(PeralatanMesin.objects.get(pk=99412))
clone_object(PeralatanMesin.objects.get(pk=99413))
clone_object(PeralatanMesin.objects.get(pk=99414))
clone_object(PeralatanMesin.objects.get(pk=99415))
clone_object(PeralatanMesin.objects.get(pk=99416))
clone_object(PeralatanMesin.objects.get(pk=99417))
clone_object(PeralatanMesin.objects.get(pk=99418))
clone_object(PeralatanMesin.objects.get(pk=99419))
clone_object(PeralatanMesin.objects.get(pk=99433))
clone_object(PeralatanMesin.objects.get(pk=99436))
clone_object(PeralatanMesin.objects.get(pk=99437))
clone_object(PeralatanMesin.objects.get(pk=99438))
clone_object(PeralatanMesin.objects.get(pk=99466))
clone_object(PeralatanMesin.objects.get(pk=99467))
clone_object(PeralatanMesin.objects.get(pk=99468))
clone_object(PeralatanMesin.objects.get(pk=99469))
clone_object(PeralatanMesin.objects.get(pk=99470))
clone_object(PeralatanMesin.objects.get(pk=99471))
clone_object(PeralatanMesin.objects.get(pk=99472))
clone_object(PeralatanMesin.objects.get(pk=99473))
clone_object(PeralatanMesin.objects.get(pk=99474))
clone_object(PeralatanMesin.objects.get(pk=99475))
| [
"muhammad@muntaza.id"
] | muhammad@muntaza.id |
84936bffedb2e3603a2b4acff728d7de87b37826 | cf7c928d6066da1ce15d2793dcf04315dda9b9ed | /Jungol/Lv1_LCoder_Python/py20_변수와입력/Main_JO_725_변수와입력_자가진단6.py | bf0bc102b6ac2ee3d0fcceed1c228fdd9570335d | [] | no_license | refresh6724/APS | a261b3da8f53de7ff5ed687f21bb1392046c98e5 | 945e0af114033d05d571011e9dbf18f2e9375166 | refs/heads/master | 2022-02-01T23:31:42.679631 | 2021-12-31T14:16:04 | 2021-12-31T14:16:04 | 251,617,280 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 371 | py | print("Number 1?")
Number1 = int(input())
print("Number 2?")
Number2 = int(input())
mul = Number1*Number2
div = float(Number1)/Number2
print("%d * %d = %d" % (Number1, Number2, Number1 * Number2))
# 왜 다를까
# print("%d / %d = %f" % (Number1, Number2, Number1 / Number2))
# 79 / 74 = 1.067568
print(f"{Number1} / {Number2} = {div}")
# 79 / 74 = 1.0675675675675675
| [
"refresh6724@gmail.com"
] | refresh6724@gmail.com |
c8aa8d0b005670120daa41f4f2b57539208a43e2 | a5a386c05ea962cd34f27214130d4117e650f1e7 | /awards/serializer.py | ee55415fb39d51df5732b1ccd57ce5a92ccf01d0 | [
"MIT"
] | permissive | iankabugi/Awards | eccba8866b16830ced36227c657c773a1bb5088b | 3d0e58faf679b71cf4e588e59e8c705660830d2f | refs/heads/master | 2021-09-09T09:33:12.535405 | 2019-06-25T22:43:32 | 2019-06-25T22:43:32 | 175,820,927 | 1 | 0 | MIT | 2021-09-08T00:54:20 | 2019-03-15T13:00:27 | Python | UTF-8 | Python | false | false | 370 | py | from rest_framework import serializers
from .models import Profile, Project
class ProfSerializer(serializers.ModelSerializer):
class Meta:
model = Profile
fields = ('bio', 'profile_pic', 'user')
class ProjectSerializer(serializers.ModelSerializer):
class Meta:
model = Project
fields = ('title', 'description', 'landing_page')
| [
"iank299@gmail.com"
] | iank299@gmail.com |
b0b14e8a0b6dfcb29da3c2d478cb88bddfc84212 | 68c1cb7e2e7b7bb4174951c817a36b16cf1e9f83 | /pytype/tools/arg_parser.py | c3fe1d9b319c84b1f507a25a020a0c5d6b218caa | [
"Apache-2.0",
"MIT"
] | permissive | priyansh19/pytype | f28f439943859f61e3ce5ff51d8a56a925ae5cb8 | 44b1f6f7cddccb326abac4c21b4f26688369764e | refs/heads/master | 2020-06-19T07:11:11.508597 | 2019-07-11T18:34:08 | 2019-07-11T18:34:08 | 196,610,656 | 2 | 0 | NOASSERTION | 2019-07-12T16:23:13 | 2019-07-12T16:23:13 | null | UTF-8 | Python | false | false | 2,790 | py | """Argument parsing for tools that pass args on to pytype_single."""
import argparse
from pytype import config as pytype_config
class ParserWrapper(object):
"""Wrapper that adds arguments to a parser while recording them."""
def __init__(self, parser, actions=None):
self.parser = parser
self.actions = {} if actions is None else actions
def add_argument(self, *args, **kwargs):
try:
action = self.parser.add_argument(*args, **kwargs)
except argparse.ArgumentError:
# We might want to mask some pytype-single options.
pass
else:
self.actions[action.dest] = action
def add_argument_group(self, *args, **kwargs):
group = self.parser.add_argument_group(*args, **kwargs)
wrapped_group = self.__class__(group, actions=self.actions)
return wrapped_group
def string_to_bool(s):
return s == 'True' if s in ('True', 'False') else s
def convert_string(s):
s = s.replace('\n', '')
try:
return int(s)
except ValueError:
return string_to_bool(s)
class Parser(object):
"""Parser that integrates tool and pytype-single args."""
def __init__(self, parser, pytype_single_args):
"""Initialize a parser.
Args:
parser: An argparse.ArgumentParser or compatible object
pytype_single_args: Iterable of args that will be passed to pytype_single
"""
self.parser = parser
self.pytype_single_args = pytype_single_args
def create_initial_args(self, keys):
"""Creates the initial set of args.
Args:
keys: A list of keys to create args from
Returns:
An argparse.Namespace.
"""
return argparse.Namespace(**{k: None for k in keys})
def parse_args(self, argv):
"""Parses argv.
Args:
argv: sys.argv[1:]
Returns:
An argparse.Namespace.
"""
args = self.create_initial_args(self.pytype_single_args)
self.parser.parse_args(argv, args)
self.postprocess(args)
return args
def postprocess(self, args, from_strings=False):
"""Postprocesses the subset of pytype_single_args that appear in args.
Args:
args: an argparse.Namespace.
from_strings: Whether the args are all strings. If so, we'll do our best
to convert them to the right types.
"""
names = set()
for k in self.pytype_single_args:
if hasattr(args, k):
names.add(k)
if from_strings:
setattr(args, k, convert_string(getattr(args, k)))
pytype_config.Postprocessor(names, args).process()
def get_pytype_kwargs(self, args):
"""Return a set of kwargs to pass to pytype.config.Options.
Args:
args: an argparse.Namespace.
Returns:
A dict of kwargs with pytype_single args as keys.
"""
return {k: getattr(args, k) for k in self.pytype_single_args}
| [
"rechen@google.com"
] | rechen@google.com |
38e12f8b9db38ea3e3eae5f141e881819ae484c8 | 6320fef2ea7376c2b35f97f1a5af004e90f09098 | /1-2주차 실습(복습)/venv/Lib/site-packages/pygame/_numpysndarray.py | 5daa10746bf656d4595f81604d3399fbe65bd257 | [] | no_license | Dplo1514/ploaistudy | 7aa08d7f71653748a9e32dcc09ee8f6cec0aaed9 | e35e42b1e5f0c90cc1e2a59993a1ef73d8872d0c | refs/heads/master | 2023-09-03T00:45:55.601651 | 2021-10-24T12:19:38 | 2021-10-24T12:19:38 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,660 | py | ## pygame - Python Game Library
## Copyright (C) 2008 Marcus von Appen
##
## This library is free software; you can redistribute it and/or
## modify it under the terms of the GNU Library General Public
## License as published by the Free Software Foundation; either
## version 2 of the License, or (at your option) any later version.
##
## This library is distributed in the hope that it will be useful,
## but WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
## Library General Public License for more details.
##
## You should have received a copy of the GNU Library General Public
## License along with this library; if not, write to the Free
## Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
##
## Marcus von Appen
## mva@sysfault.org
"""pygame module for accessing sound sample data using numpy
Functions to convert between numpy arrays and Sound objects. This module
will only be available when pygame can use the external numpy package.
Sound data is made of thousands of samples per second, and each sample
is the amplitude of the wave at a particular moment in time. For
example, in 22-kHz format, element number 5 of the array is the
amplitude of the wave after 5/22000 seconds.
Each sample is an 8-bit or 16-bit integer, depending on the data format.
A stereo sound file has two values per sample, while a mono sound file
only has one.
"""
import pygame.mixer as mixer
import numpy
def array(sound):
"""pygame._numpysndarray.array(Sound): return array
Copy Sound samples into an array.
Creates a new array for the sound data and copies the samples. The
array will always be in the format returned from
pygame.mixer.get_init().
"""
return numpy.array(sound, copy=True)
def samples(sound):
"""pygame._numpysndarray.samples(Sound): return array
Reference Sound samples into an array.
Creates a new array that directly references the samples in a Sound
object. Modifying the array will change the Sound. The array will
always be in the format returned from pygame.mixer.get_init().
"""
return numpy.array(sound, copy=False)
def make_sound(array):
"""pygame._numpysndarray.make_sound(array): return Sound
Convert an array into a Sound object.
Create a new playable Sound object from an array. The mixer module
must be initialized and the array format must be similar to the mixer
audio format.
"""
return mixer.Sound(array=array)
| [
"dladlsgur3334@gmail.com"
] | dladlsgur3334@gmail.com |
ebca73888ad4378afca4d9cc286766ded94c5a00 | ca7aa979e7059467e158830b76673f5b77a0f5a3 | /Python_codes/p03612/s108764043.py | 931ac55645fd8b56e3c8ddb91cf3650d5feaebe2 | [] | no_license | Aasthaengg/IBMdataset | 7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901 | f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8 | refs/heads/main | 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 325 | py | N = int(input())
A = list(map(int, input().split()))
cnt = 0
for i, a in enumerate(A, 1):
if a == i:
if i != len(A):
x = A[i - 1]
y = A[i]
A[i - 1] = y
A[i] = x
cnt += 1
else:
x = A[i - 2]
y = A[i - 1]
A[i - 2] = y
A[i - 1] = x
cnt += 1
print(cnt) | [
"66529651+Aastha2104@users.noreply.github.com"
] | 66529651+Aastha2104@users.noreply.github.com |
fb1aef471f1ebba3fa245f15ce0af02930d314ec | a37c6678a5630925e6600fe65113828918f80563 | /raylab/envs/environments/hvac.py | c23e639295c021d002a4d06af42c2efd75694361 | [
"MIT"
] | permissive | rudrasohan/raylab | 658186d2955b7f949ab13045fb8f72ac8bd17978 | 12b93003a863caf7f9ead621d3afe4c83c4d7ee1 | refs/heads/master | 2023-03-24T11:03:57.626283 | 2021-03-11T19:53:53 | 2021-03-11T19:53:53 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,344 | py | # pylint:disable=missing-docstring,invalid-name
import gym
import numpy as np
import torch
DEFAULT_CONFIG = {
"ADJ": [[False, True, True], [False, False, True], [False, False, False]],
"ADJ_OUTSIDE": [True, True, False],
"ADJ_HALL": [True, False, True],
"R_OUTSIDE": [4.0, 4.0, 4.0],
"R_HALL": [2.0, 2.0, 2.0],
"R_WALL": [[1.5, 1.5, 1.5], [1.5, 1.5, 1.5], [1.5, 1.5, 1.5]],
"IS_ROOM": [True, True, True],
"CAP": [80.0, 80.0, 80.0],
"CAP_AIR": 1.006,
"COST_AIR": 1.0,
"TIME_DELTA": 1.0,
"TEMP_AIR": 40.0,
"TEMP_UP": [23.5, 23.5, 23.5],
"TEMP_LOW": [20.0, 20.0, 20.0],
"PENALTY": 20000.0,
"AIR_MAX": [10.0, 10.0, 10.0],
"TEMP_OUTSIDE_MEAN": [6.0, 6.0, 6.0],
"TEMP_OUTSIDE_VARIANCE": [1.0, 1.0, 1.0],
"TEMP_HALL_MEAN": [10.0, 10.0, 10.0],
"TEMP_HALL_VARIANCE": [1.0, 1.0, 1.0],
"init": {"temp": [10.0, 10.0, 10.0]},
"horizon": 40,
}
class HVACEnv(gym.Env):
metadata = {"render.modes": ["human"]}
def __init__(self, config=None):
self._config = {**DEFAULT_CONFIG, **(config or {})}
self._num_rooms = len(self._config["init"]["temp"])
self.observation_space = gym.spaces.Box(
low=np.array([-np.inf] * self._num_rooms + [0.0], dtype=np.float32),
high=np.array([np.inf] * self._num_rooms + [1.0], dtype=np.float32),
)
self.action_space = gym.spaces.Box(
low=np.array([0.0] * self._num_rooms, dtype=np.float32),
high=np.array([1.0] * self._num_rooms, dtype=np.float32),
)
self._horizon = self._config["horizon"]
self._state = None
self.reset()
def reset(self):
self._state = np.array(self._config["init"]["temp"] + [0.0])
return self._state
@property
def temp(self):
obs, _ = self._unpack_state(self._state)
return torch.as_tensor(obs, dtype=torch.float32)
@torch.no_grad()
def step(self, action):
state, action = map(torch.as_tensor, (self._state, action))
next_state, _ = self.transition_fn(state, action)
reward = self.reward_fn(state, action, next_state).item()
self._state = next_state.numpy()
return self._state, reward, self._terminal(), {}
def transition_fn(self, state, action, sample_shape=()):
# pylint:disable=missing-docstring
state, time = self._unpack_state(state)
AIR_MAX = torch.as_tensor(self._config["AIR_MAX"])
action = torch.as_tensor(action) * AIR_MAX
temp_hall, logp_temp_hall = self._temp_hall(sample_shape)
temp_outside, logp_temp_outside = self._temp_outside(sample_shape)
next_state = self._temp(action, temp_outside, temp_hall)
logp = logp_temp_hall + logp_temp_outside
time = self._step_time(time)
time = time.expand_as(next_state[..., -1:])
return torch.cat([next_state, time], dim=-1), logp
def _temp_hall(self, sample_shape=()):
TEMP_HALL_MEAN = torch.as_tensor(self._config["TEMP_HALL_MEAN"])
TEMP_HALL_VARIANCE = torch.sqrt(
torch.as_tensor(self._config["TEMP_HALL_VARIANCE"])
)
dist = torch.distributions.Normal(TEMP_HALL_MEAN, TEMP_HALL_VARIANCE)
sample = dist.rsample(sample_shape)
logp = dist.log_prob(sample.detach())
return sample, logp
def _temp_outside(self, sample_shape=()):
TEMP_OUTSIDE_MEAN = torch.as_tensor(self._config["TEMP_OUTSIDE_MEAN"])
TEMP_OUTSIDE_VARIANCE = torch.sqrt(
torch.as_tensor(self._config["TEMP_OUTSIDE_VARIANCE"])
)
dist = torch.distributions.Normal(TEMP_OUTSIDE_MEAN, TEMP_OUTSIDE_VARIANCE)
sample = dist.rsample(sample_shape)
logp = dist.log_prob(sample.detach())
return sample, logp
def _temp(self, action, temp_outside, temp_hall): # pylint:disable=too-many-locals
air = action
TIME_DELTA = torch.as_tensor(self._config["TIME_DELTA"])
CAP = torch.as_tensor(self._config["CAP"])
CAP_AIR = torch.as_tensor(self._config["CAP_AIR"])
TEMP_AIR = torch.as_tensor(self._config["TEMP_AIR"])
IS_ROOM = torch.as_tensor(self._config["IS_ROOM"])
ADJ = torch.as_tensor(self._config["ADJ"])
ADJ_OUTSIDE = torch.as_tensor(self._config["ADJ_OUTSIDE"])
ADJ_HALL = torch.as_tensor(self._config["ADJ_HALL"])
R_OUTSIDE = torch.as_tensor(self._config["R_OUTSIDE"])
R_HALL = torch.as_tensor(self._config["R_HALL"])
R_WALL = torch.as_tensor(self._config["R_WALL"])
temp = self.temp
temp_ = temp + TIME_DELTA / CAP * (
air * CAP_AIR * (TEMP_AIR - temp) * IS_ROOM
+ ((ADJ | ADJ.T) * (temp[np.newaxis] - temp[np.newaxis].T) / R_WALL).sum(
dim=-1
)
+ ADJ_OUTSIDE * (temp_outside - temp) / R_OUTSIDE
+ ADJ_HALL * (temp_hall - temp) / R_HALL
)
return temp_
def _step_time(self, time):
timestep = torch.round(self._horizon * time)
return torch.clamp((timestep + 1) / self._horizon, 0, 1)
def reward_fn(self, state, action, next_state):
# pylint:disable=unused-argument,missing-docstring
AIR_MAX = torch.as_tensor(self._config["AIR_MAX"])
air = torch.as_tensor(action) * AIR_MAX
temp, _ = self._unpack_state(next_state)
IS_ROOM = torch.as_tensor(self._config["IS_ROOM"])
COST_AIR = torch.as_tensor(self._config["COST_AIR"])
TEMP_LOW = torch.as_tensor(self._config["TEMP_LOW"])
TEMP_UP = torch.as_tensor(self._config["TEMP_UP"])
PENALTY = torch.as_tensor(self._config["PENALTY"])
reward = -(
IS_ROOM
* (
air * COST_AIR
+ ((temp < TEMP_LOW) | (temp > TEMP_UP)) * PENALTY
+ 10.0 * torch.abs((TEMP_UP + TEMP_LOW) / 2.0 - temp)
)
).sum(dim=-1)
return reward
def _terminal(self):
_, time = self._unpack_state(self._state)
return time.item() >= 1.0
@staticmethod
def _unpack_state(state):
obs = torch.as_tensor(state[..., :-1], dtype=torch.float32)
time = torch.as_tensor(state[..., -1:], dtype=torch.float32)
return obs, time
def render(self, mode="human"):
pass
| [
"angelolovatto@gmail.com"
] | angelolovatto@gmail.com |
77f6155a68dccbb2a045fcc13e502d7bf2afd801 | d7fac5517b409224584e5ffef20b1bf3dbb895cc | /test/test_generic_throttle_policy.py | 09f96db22b801129ab65e7415ccc4d0ff3aa0864 | [] | no_license | junetigerlee/python-wso2-apim-adminclient | 0bac09899e02a8eee6fd5a0e80cc34472ec6b055 | 7eca928a727e8eb6901c38be83bc1adeb87540cf | refs/heads/master | 2021-01-01T16:12:24.090906 | 2017-07-25T06:22:09 | 2017-07-25T06:22:09 | 97,788,017 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,096 | py | # coding: utf-8
"""
WSO2 API Manager - Admin
This document specifies a **RESTful API** for WSO2 **API Manager** - Admin Portal. It is written with [swagger 2](http://swagger.io/).
OpenAPI spec version: 0.11.0
Contact: architecture@wso2.com
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import os
import sys
import unittest
import wso2_apim_adminclient
from wso2_apim_adminclient.rest import ApiException
from wso2_apim_adminclient.models.generic_throttle_policy import GenericThrottlePolicy
class TestGenericThrottlePolicy(unittest.TestCase):
""" GenericThrottlePolicy unit test stubs """
def setUp(self):
pass
def tearDown(self):
pass
def testGenericThrottlePolicy(self):
"""
Test GenericThrottlePolicy
"""
# FIXME: construct object with mandatory attributes with example values
#model = wso2_apim_adminclient.models.generic_throttle_policy.GenericThrottlePolicy()
pass
if __name__ == '__main__':
unittest.main()
| [
"junetigerlee@gmail.com"
] | junetigerlee@gmail.com |
204856232cbe127a20e8e3eae73148857c5dab65 | d5f0377d31e708b0be7f1e79dca714e274deba67 | /hex_game/create_databases.py | 97e08f94919b393048db84a91eca2f71dc2fed7e | [] | no_license | Phyronnaz/TIPE_Hex | b65a2651476661930e52f07654d2a843b1201ddb | fda070968368204dac08ac81d79a4ab0d604296a | refs/heads/master | 2021-01-16T21:52:07.530676 | 2017-06-15T15:34:48 | 2017-06-15T15:34:48 | 68,295,841 | 0 | 0 | null | 2017-05-18T11:46:33 | 2016-09-15T13:22:49 | Jupyter Notebook | UTF-8 | Python | false | false | 477 | py | import os
import sys
# path = os.path.dirname(os.path.realpath(__file__))[:-8]
path = "/home/victor/PycharmProjects/TIPE_Hex/"
if path not in sys.path:
sys.path.insert(0, path)
import numpy as np
from hex_game.q_learning import create_database
size = int(sys.argv[1])
path = os.path.expanduser("~") + "/Hex/database_{}.npy".format(size)
if os.path.exists(path):
database = np.load(path)
else:
database = create_database(size, 10000)
np.save(path, database) | [
"phyronnaz@gmail.com"
] | phyronnaz@gmail.com |
9f230e3838e4776ce4c598ffa0521f145bb1d3d8 | 7a0144da5a567d8497551b09875298ea224bb5bd | /백준/백준 1904.py | 8e1eae03c8e54734ee929270ed6129b7f8fe2c5d | [] | no_license | SLT-DJH/algorithm | dba34614bb0fbbad0ecf5d85b02cb541ab047c5a | e33c843be4efdfc6c6a7300ab4e53b9a7c4b2e67 | refs/heads/master | 2023-03-08T08:27:01.476793 | 2021-02-25T15:06:42 | 2021-02-25T15:06:42 | 297,017,366 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 320 | py | def fibo(num) :
if num == 1 :
return 1
elif num == 2 :
return 2
else :
num = num - 2
a = 1
b = 2
while num != 0 :
c = (a+b) % 15746
a = b
b = c
num = num - 1
return b
a = int(input())
print(fibo(a))
| [
"jydwww@naver.com"
] | jydwww@naver.com |
3efae8c54e17d120f845bd7510d914395fad5f69 | 6ab31b5f3a5f26d4d534abc4b197fe469a68e8e5 | /tests/kyu_4_tests/test_roman_numerals_encoder.py | e944d06c5e05cbbdc925b8114ab23e040fd2a232 | [
"MIT"
] | permissive | mveselov/CodeWars | e4259194bfa018299906f42cd02b8ef4e5ab6caa | 1eafd1247d60955a5dfb63e4882e8ce86019f43a | refs/heads/master | 2021-06-09T04:17:10.053324 | 2017-01-08T06:36:17 | 2017-01-08T06:36:17 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 348 | py | import unittest
from katas.kyu_4.roman_numerals_encoder import solution
class RomanNumeralsEncoderTestCase(unittest.TestCase):
def test_equals(self):
self.assertEqual(solution(1), 'I')
def test_equals_2(self):
self.assertEqual(solution(4), 'IV')
def test_equals_3(self):
self.assertEqual(solution(6), 'VI')
| [
"the-zebulan@users.noreply.github.com"
] | the-zebulan@users.noreply.github.com |
5f809c27b6953e210a747515d832cb54e2f3eadb | e34cbf5fce48f661d08221c095750240dbd88caf | /python/homework/day23_cmdb_web/s16MadKing/asset/views.py | 7ded2a7085ab28d0954eacb4d1a5ae9594548088 | [] | no_license | willianflasky/growup | 2f994b815b636e2582594375e90dbcb2aa37288e | 1db031a901e25bbe13f2d0db767cd28c76ac47f5 | refs/heads/master | 2023-01-04T13:13:14.191504 | 2020-01-12T08:11:41 | 2020-01-12T08:11:41 | 48,899,304 | 2 | 0 | null | 2022-12-26T19:46:22 | 2016-01-02T05:04:39 | C | UTF-8 | Python | false | false | 2,631 | py | from django.shortcuts import render, HttpResponse
import json
from django.views.decorators.csrf import csrf_exempt
from asset import core
from asset import utils
from asset.models import *
import datetime
# Create your views here.
@csrf_exempt
def asset_with_no_asset_id(request):
if request.method == 'POST':
ass_handler = core.Asset(request)
res = ass_handler.get_asset_id_by_sn()
# return render(request,'assets/acquire_asset_id_test.html',{'response':res})
return HttpResponse(json.dumps(res))
@csrf_exempt
@utils.token_required
def asset_report(request):
if request.method == "POST":
ass_handler = core.Asset(request)
if ass_handler.data_is_valid():
ass_handler.data_inject()
return HttpResponse(json.dumps(ass_handler.response))
def login(request):
pass
def index(request):
return render(request, 'index.html', locals())
def asset(request):
from asset.page import PageInfo
all_count = Asset.objects.all().count()
page_info = PageInfo(request.GET.get('p'), 20, all_count, request.path_info, page_range=3)
objs = Asset.objects.all()[page_info.start():page_info.end()]
# result = []
# all_data = Asset.objects.all().values()
# for line in all_data:
# line['create_date'] = line['create_date'].strftime('%Y-%m-%d %H:%M:%S')
# line['update_date'] = line['update_date'].strftime('%Y-%m-%d %H:%M:%S')
# result.append(line)
return render(request, 'asset.html', locals())
def data(request):
# 图表1
asset_list_num = []
asset_list = []
asset_dic = {'server': "服务器", 'networkdevice': "网络设备", 'storagedevice': "存储设备",
'securitydevice': "安全设备", 'idcdevice': "IDC设备",
'accescories': "备件", 'software': "软件"}
for item, value in asset_dic.items():
res = Asset.objects.filter(asset_type=item).count()
asset_list_num.append(res)
asset_list.append(value)
# 图表2
status_list = []
status_result = []
status_dic = {0: '在线', 1: '已下线', 2: '未知', 3: '故障', 4: '备用'}
for item, value in status_dic.items():
status_tmp = {'value': 0, 'name': ""}
res = Asset.objects.filter(status=item).count()
status_list.append(value)
status_tmp['value'] = res
status_tmp['name'] = value
status_result.append(status_tmp)
data = {
'p11': asset_list,
'p12': asset_list_num,
'p21': status_list,
'p22': status_result
}
return HttpResponse(json.dumps(data))
| [
"284607860@qq.com"
] | 284607860@qq.com |
5b510ccf31da37f6df7b6f12e6ab9ac61759eaec | fffa5f1fe7e1d2bd875f302fefa2295563773440 | /posts/migrations/0001_initial.py | 204feab05266016c6f35f1128814ec13ff33dfd0 | [] | no_license | Awalamoo7/blogs-api | 9d2fe2d37837366863e8ffbab4372aa927e49875 | fd8112d666b10d728a3f12cfc284d7379eeedf41 | refs/heads/main | 2023-08-28T13:06:45.637222 | 2021-10-24T01:20:02 | 2021-10-24T01:20:02 | 420,555,567 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,159 | py | # Generated by Django 3.2.8 on 2021-10-23 17:30
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Post',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('date_time', models.DateTimeField(auto_created=True)),
('title', models.CharField(max_length=400)),
('description', models.TextField()),
('active', models.BooleanField(default=True)),
],
),
migrations.CreateModel(
name='Comment',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('date_time', models.DateTimeField(auto_created=True)),
('text', models.TextField()),
('post', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='posts.post')),
],
),
]
| [
"bmayowa25@gmail.com"
] | bmayowa25@gmail.com |
cb9081784eab4efbdda08880b0b3ceeae5077746 | 1d9138d777744fa2d9d6e3b629a43041f2358d06 | /real_time/abc/116/2.py | 30a70f19d2c5f010e386aaa7b16333c9f9b3ca64 | [] | no_license | Yuyats/AtCoderAnswers | f1956b790ee64a4d0b3b48b98791a91679a30244 | fac7e3eb74a888e77ba7a6b6a15d836c589baa3e | refs/heads/master | 2021-06-24T16:19:45.848524 | 2021-06-13T03:51:07 | 2021-06-13T03:51:07 | 198,857,448 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 548 | py | def f(n):
if n % 2 == 0:
return n / 2
else:
return 3 * n + 1
s = int(input())
a = []
for i in range(1, 1000000):
if i == 1:
a.append(s)
else:
ai = f(a[-1])
if ai in a:
print(i)
break
else:
a.append(ai)
def f(n):
if n % 2 == 0:
return n / 2
else:
return 3 * n + 1
s = int(input())
a = [s]
for i in range(2, 1000001):
ai = f(a[-1])
if ai in a:
print(i)
break
else:
a.append(ai)
| [
"unitednum@gmail.com"
] | unitednum@gmail.com |
97d7f3bed79bfe6a5b45e7b0f4a83a690de7fcc1 | 10a18920f93008659f49b18fb016b84a21765e73 | /server/venv_ubuntu/lib/python3.8/site-packages/oslo_reports/tests/test_openstack_generators.py | 84bd46deb5bdd50843dced8da3a4d297ad4c65bb | [] | no_license | psitadmin/network-junco | 48fa704878fe9566cda393eddd88dafae07fa47c | 63a1e4afa46514852345e03eb32a2911621540f2 | refs/heads/master | 2023-08-18T07:01:48.836761 | 2021-09-28T09:47:42 | 2021-09-28T09:47:42 | 403,928,187 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,757 | py | # Copyright 2011 OpenStack Foundation.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import threading
from unittest import mock
import greenlet
from oslo_config import cfg
from oslotest import base
import six
from oslo_reports.generators import conf as os_cgen
from oslo_reports.generators import threading as os_tgen
from oslo_reports.generators import version as os_pgen
from oslo_reports.models import threading as os_tmod
class TestOpenstackGenerators(base.BaseTestCase):
def test_thread_generator(self):
model = os_tgen.ThreadReportGenerator()()
# self.assertGreaterEqual(len(model.keys()), 1)
self.assertTrue(len(model.keys()) >= 1)
was_ok = False
for val in model.values():
self.assertIsInstance(val, os_tmod.ThreadModel)
self.assertIsNotNone(val.stack_trace)
if val.thread_id == threading.current_thread().ident:
was_ok = True
break
self.assertTrue(was_ok)
model.set_current_view_type('text')
self.assertIsNotNone(six.text_type(model))
def test_thread_generator_tb(self):
class FakeModel(object):
def __init__(self, thread_id, tb):
self.traceback = tb
with mock.patch('oslo_reports.models'
'.threading.ThreadModel', FakeModel):
model = os_tgen.ThreadReportGenerator("fake traceback")()
curr_thread = model.get(threading.current_thread().ident, None)
self.assertIsNotNone(curr_thread, None)
self.assertEqual("fake traceback", curr_thread.traceback)
def test_green_thread_generator(self):
curr_g = greenlet.getcurrent()
model = os_tgen.GreenThreadReportGenerator()()
# self.assertGreaterEqual(len(model.keys()), 1)
self.assertTrue(len(model.keys()) >= 1)
was_ok = False
for tm in model.values():
if tm.stack_trace == os_tmod.StackTraceModel(curr_g.gr_frame):
was_ok = True
break
self.assertTrue(was_ok)
model.set_current_view_type('text')
self.assertIsNotNone(six.text_type(model))
def test_config_model(self):
conf = cfg.ConfigOpts()
conf.register_opt(cfg.StrOpt('crackers', default='triscuit'))
conf.register_opt(cfg.StrOpt('secrets', secret=True,
default='should not show'))
conf.register_group(cfg.OptGroup('cheese', title='Cheese Info'))
conf.register_opt(cfg.IntOpt('sharpness', default=1),
group='cheese')
conf.register_opt(cfg.StrOpt('name', default='cheddar'),
group='cheese')
conf.register_opt(cfg.BoolOpt('from_cow', default=True),
group='cheese')
conf.register_opt(cfg.StrOpt('group_secrets', secret=True,
default='should not show'),
group='cheese')
model = os_cgen.ConfigReportGenerator(conf)()
model.set_current_view_type('text')
# oslo.config added a default config_source opt which gets included
# in our output, but we also need to support older versions where that
# wasn't the case. This logic can be removed once the oslo.config
# lower constraint becomes >=6.4.0.
config_source_line = ' config_source = \n'
try:
conf.config_source
except cfg.NoSuchOptError:
config_source_line = ''
target_str = ('\ncheese: \n'
' from_cow = True\n'
' group_secrets = ***\n'
' name = cheddar\n'
' sharpness = 1\n'
'\n'
'default: \n'
'%s'
' crackers = triscuit\n'
' secrets = ***') % config_source_line
self.assertEqual(target_str, six.text_type(model))
def test_package_report_generator(self):
class VersionObj(object):
def vendor_string(self):
return 'Cheese Shoppe'
def product_string(self):
return 'Sharp Cheddar'
def version_string_with_package(self):
return '1.0.0'
model = os_pgen.PackageReportGenerator(VersionObj())()
model.set_current_view_type('text')
target_str = ('product = Sharp Cheddar\n'
'vendor = Cheese Shoppe\n'
'version = 1.0.0')
self.assertEqual(target_str, six.text_type(model))
def test_package_report_generator_without_vendor_string(self):
class VersionObj(object):
def product_string(self):
return 'Sharp Cheddar'
def version_string_with_package(self):
return '1.0.0'
model = os_pgen.PackageReportGenerator(VersionObj())()
model.set_current_view_type('text')
target_str = ('product = Sharp Cheddar\n'
'vendor = None\n'
'version = 1.0.0')
self.assertEqual(target_str, six.text_type(model))
| [
"enrique.garcia.practicas@telefonica.com"
] | enrique.garcia.practicas@telefonica.com |
376ec43ace0b27750be44e8dc6139fac96157d25 | 9e138b34d78573f70ef9bdb3335efa1fd65712aa | /users/migrations/0002_auto_20210122_0209.py | 138dbebb6e18566f27d87a81983c82259772b95b | [] | no_license | crowdbotics-apps/sample-23979 | 425d0a0f1542ddc4189d68ca941eb3e0b1dcbae6 | 08630ad101da5da1bff294b827630c9114be5f22 | refs/heads/master | 2023-02-20T05:10:47.820119 | 2021-01-22T02:09:03 | 2021-01-22T02:09:03 | 331,809,685 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 394 | py | # Generated by Django 2.2.17 on 2021-01-22 02:09
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('users', '0001_initial'),
]
operations = [
migrations.AlterField(
model_name='user',
name='name',
field=models.CharField(blank=True, max_length=255, null=True),
),
]
| [
"team@crowdbotics.com"
] | team@crowdbotics.com |
19078666f52c0a258fbe2e96b9bbd9da7c8386df | 65485cb1233e59b21dd5c5349cc88a015ad52661 | /ecommerce/store/admin.py | 39642d3a049ee140025f0b9107ed07f2c0ab8b88 | [] | no_license | danielmichaels/django-projects | 966929f889c4e31508b6bbcb728ef02f00549f0b | dc8ca4b9ca788dea6388b434e9d7744e1500128f | refs/heads/master | 2022-12-25T18:50:48.192647 | 2020-10-07T00:45:13 | 2020-10-07T00:45:13 | 289,652,384 | 0 | 0 | null | 2020-10-07T00:45:14 | 2020-08-23T09:13:00 | Python | UTF-8 | Python | false | false | 262 | py | from django.contrib import admin
from .models import Product, Order, OrderItem, Customer, ShippingAddress
admin.site.register(Product)
admin.site.register(OrderItem)
admin.site.register(Order)
admin.site.register(Customer)
admin.site.register(ShippingAddress)
| [
"dans.address@outlook.com"
] | dans.address@outlook.com |
a0b6217aff8ab3fd4a1a3074a6882d2a1be08888 | ac235a23f22be0d6f1818bb53902177f9969813a | /benchmarks/base/run.py | e2c0dda98b3f5b281160157a52897c485b5be00c | [
"Apache-2.0",
"BSD-3-Clause"
] | permissive | DataDog/dd-trace-py | f09d6d48c4c69aea68f999fc8a458ade5c6150cf | 1e3bd6d4edef5cda5a0831a6a7ec8e4046659d17 | refs/heads/1.x | 2023-09-01T20:25:26.746324 | 2023-09-01T18:54:37 | 2023-09-01T18:54:37 | 61,572,326 | 461 | 426 | NOASSERTION | 2023-09-14T20:38:57 | 2016-06-20T18:52:23 | Python | UTF-8 | Python | false | false | 993 | py | #!/usr/bin/env python3
import os
import subprocess
import sys
import yaml
def read_config(path):
with open(path, "r") as fp:
return yaml.load(fp, Loader=yaml.FullLoader)
def run(scenario_py, cname, cvars, output_dir):
cmd = [
"python",
scenario_py,
# necessary to copy PYTHONPATH for venvs
"--copy-env",
"--append",
os.path.join(output_dir, "results.json"),
"--name",
cname,
]
for (cvarname, cvarval) in cvars.items():
cmd.append("--{}".format(cvarname))
cmd.append(str(cvarval))
proc = subprocess.Popen(cmd)
proc.wait()
if __name__ == "__main__":
if len(sys.argv) != 2:
print("Usage: {} <output dir>".format(sys.argv[0]))
sys.exit(1)
output_dir = sys.argv[1]
print("Saving results to {}".format(output_dir))
config = read_config("config.yaml")
for (cname, cvars) in config.items():
run("scenario.py", cname, cvars, output_dir)
| [
"noreply@github.com"
] | DataDog.noreply@github.com |
71ea6cbad24da6e055d2aa9a4b7228bb842df0dd | 42c48f3178a48b4a2a0aded547770027bf976350 | /google/ads/google_ads/v4/services/mobile_app_category_constant_service_client_config.py | 1f790391c546b7cd2eddb5721b1b18609c8d5f0c | [
"Apache-2.0"
] | permissive | fiboknacky/google-ads-python | e989464a85f28baca1f28d133994c73759e8b4d6 | a5b6cede64f4d9912ae6ad26927a54e40448c9fe | refs/heads/master | 2021-08-07T20:18:48.618563 | 2020-12-11T09:21:29 | 2020-12-11T09:21:29 | 229,712,514 | 0 | 0 | Apache-2.0 | 2019-12-23T08:44:49 | 2019-12-23T08:44:49 | null | UTF-8 | Python | false | false | 837 | py | config = {
"interfaces": {
"google.ads.googleads.v4.services.MobileAppCategoryConstantService": {
"retry_codes": {
"idempotent": [
"DEADLINE_EXCEEDED",
"UNAVAILABLE"
],
"non_idempotent": []
},
"retry_params": {
"default": {
"initial_retry_delay_millis": 5000,
"retry_delay_multiplier": 1.3,
"max_retry_delay_millis": 60000,
"initial_rpc_timeout_millis": 3600000,
"rpc_timeout_multiplier": 1.0,
"max_rpc_timeout_millis": 3600000,
"total_timeout_millis": 3600000
}
},
"methods": {
"GetMobileAppCategoryConstant": {
"timeout_millis": 60000,
"retry_codes_name": "idempotent",
"retry_params_name": "default"
}
}
}
}
}
| [
"noreply@github.com"
] | fiboknacky.noreply@github.com |
30f1f2705332c4549d124f51b50b3a9a72acb8d7 | 05c5f5bbc2ddfa850d4ae28148f176c5d63a4a7b | /tbkt/apps/sx_normal/urls.py | db94b6c0aa22cb4c88965821420b3e9b7610328a | [] | no_license | GUAN-YE/hd_api_djs | f7234643c06f47c03c348c7740266d45989c5e77 | 1f08cbfccc1ae2123d92670c0afed9b59ae645b8 | refs/heads/master | 2020-03-25T02:32:03.613535 | 2018-08-02T12:52:45 | 2018-08-02T12:52:45 | 143,294,216 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 863 | py | # coding:utf-8
"""
2017数学常态活动
映射路径
"""
from django.conf.urls import url
from stu import views as stu_views
import views as com_views
# 学生
urlpatterns = [
url(r'stu/sign$', stu_views.r_sign), # 签到
url(r'stu/share$', stu_views.r_share), # 分享
]
# 活动公用方法
urlpatterns += [
url(r'^com/info$', com_views.r_info), # 用户积分详情
url(r'score/detail$', com_views.r_score_detail), # 用户积分信息
url(r'class/ranks$', com_views.r_class_rank), # 积分排名
url(r'award/display$', com_views.r_award_info), # 奖品静态展示
# 河南活动未下线,三门峡活动暂不用
url(r'award/winner$', com_views.r_award_winner), # 奖品静态展示
url(r'score/ranks$', com_views.r_score_rank), # 积分排名
] | [
"15670549987@163.com"
] | 15670549987@163.com |
78bb32bf7c2e23216cb019c336e44b0b0ba11969 | 97c37b210fad85895f35b7db183a52cf6c37504f | /webhook.py | 1f1c1aaeea9975e4e4de3c8e1b1c17e5a5454547 | [] | no_license | polakowo/py-webhook | 9762b4c810f6cc81d73e0451f298070354102f5a | ee1c38b0ceccdc056fcf121ca45d5c7949ac7075 | refs/heads/master | 2021-01-23T23:53:11.194683 | 2018-02-24T14:10:20 | 2018-02-24T14:10:20 | 122,743,148 | 3 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,025 | py | #!/usr/bin/env python3
from http.server import BaseHTTPRequestHandler, HTTPServer
hostName = ""
hostPort = 8555
def execute():
print("Script is being executed")
import subprocess
subprocess.call(['sh', './script.sh'])
class MyServer(BaseHTTPRequestHandler):
def do_POST(self):
print("Request received")
self.send_response(200)
self.end_headers()
# Redeploy
content_length = int(self.headers['Content-Length'])
post_data = self.rfile.read(content_length).decode("utf-8")
import json
payload = json.loads(post_data)
# Add logic here
execute()
def run():
host_address = (hostName, hostPort)
myServer = HTTPServer(host_address, MyServer)
print("Webhook listening - %s:%s" % host_address)
try:
myServer.serve_forever()
except KeyboardInterrupt:
pass
finally:
myServer.server_close()
print("Webhook stopped - %s:%s" % host_address)
if __name__ == "__main__":
run()
| [
"olegpolakow@gmail.com"
] | olegpolakow@gmail.com |
2993e71ab0448d4ec4870ceb291435ef26e334d5 | 63ace5832d453e325681d02f6496a0999b72edcb | /examples/bip38_ec.py | c454a9f615016da836de3cf8a3ab9a067af476d8 | [
"MIT"
] | permissive | ebellocchia/bip_utils | c9ec04c687f4247e57434319e36b2abab78f0b32 | d15c75ddd74e4838c396a0d036ef6faf11b06a4b | refs/heads/master | 2023-09-01T13:38:55.567370 | 2023-08-16T17:04:14 | 2023-08-16T17:04:14 | 251,130,186 | 244 | 88 | MIT | 2023-08-23T13:46:19 | 2020-03-29T20:42:48 | Python | UTF-8 | Python | false | false | 2,472 | py | """Example of private key encryption/decryption with EC multiplication using BIP38."""
import binascii
from bip_utils import Bip38Decrypter, Bip38EcKeysGenerator, Bip38Encrypter, Bip38PubKeyModes, WifEncoder
# BIP38 passphrase
passphrase = "DummyPassphrase"
# Generate an intermediate passphrase without lot and sequence numbers
int_pass = Bip38EcKeysGenerator.GenerateIntermediatePassphrase(passphrase)
print(f"Intermediate passphrase: {int_pass}")
# Generate an encrypted private key from the intermediate passphrase
priv_key_enc = Bip38EcKeysGenerator.GeneratePrivateKey(int_pass, Bip38PubKeyModes.COMPRESSED)
print(f"Encrypted private key (no lot/sequence): {priv_key_enc}")
# Decrypt
priv_key_dec, pub_key_mode = Bip38Decrypter.DecryptEc(priv_key_enc, passphrase)
print(f"Decrypted private key (bytes): {binascii.hexlify(priv_key_dec)}")
print(f"Decrypted private key (WIF): {WifEncoder.Encode(priv_key_dec, pub_key_mode=pub_key_mode)}")
# Generate an intermediate passphrase with lot and sequence numbers
int_pass = Bip38EcKeysGenerator.GenerateIntermediatePassphrase(passphrase,
lot_num=100000,
sequence_num=1)
print(f"Intermediate passphrase: {int_pass}")
# Generate an encrypted private key from the intermediate passphrase
priv_key_enc = Bip38EcKeysGenerator.GeneratePrivateKey(int_pass, Bip38PubKeyModes.UNCOMPRESSED)
print(f"Encrypted private key (with lot/sequence): {priv_key_enc}")
# Decrypt
priv_key_dec, pub_key_mode = Bip38Decrypter.DecryptEc(priv_key_enc, passphrase)
print(f"Decrypted private key (bytes): {binascii.hexlify(priv_key_dec)}")
print(f"Decrypted private key (WIF): {WifEncoder.Encode(priv_key_dec, pub_key_mode=pub_key_mode)}")
# Or, you can use Bip38Encrypter for generating keys in one-shot
priv_key_enc = Bip38Encrypter.GeneratePrivateKeyEc(passphrase,
Bip38PubKeyModes.COMPRESSED,
lot_num=100000,
sequence_num=1)
print(f"Encrypted private key (with Bip38Encrypter): {priv_key_enc}")
# Decrypt
priv_key_dec, pub_key_mode = Bip38Decrypter.DecryptEc(priv_key_enc, passphrase)
print(f"Decrypted private key (bytes): {binascii.hexlify(priv_key_dec)}")
print(f"Decrypted private key (WIF): {WifEncoder.Encode(priv_key_dec, pub_key_mode=pub_key_mode)}")
| [
"54482000+ebellocchia@users.noreply.github.com"
] | 54482000+ebellocchia@users.noreply.github.com |
bd9959ca27e75ccb48e9805f865384b753390f96 | d35a60fd5242080d87c6408f8f9c2f087c754883 | /server/serverWithDraw.py | 57c22bdc885c0a16c0880c4ae4dfa2911fc17245 | [] | no_license | zhengchengyy/BBDetection | 765bfab717760f319568b991fb38ede64e626629 | 0aa192437dc5cf7526dcceab993a710b712933f7 | refs/heads/master | 2020-04-01T08:34:45.658324 | 2019-11-14T12:47:17 | 2019-11-14T12:47:17 | 153,037,386 | 4 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,923 | py | import pyformulas as pf
import threading
import socketserver
import os
import matplotlib.pyplot as plt
import numpy as np
# configurate the figure
import matplotlib as mpl
mpl.rc('lines', linewidth=1, color='r', linestyle='-')
plt.rcParams['figure.figsize'] = (10.0, 6.0)
class PlotThread(threading.Thread):
def __init__(self, xs, ys):
super(PlotThread, self).__init__()
self.xs = xs
self.ys = ys
self.xindicator = -1
def run(self):
fig = plt.figure()
canvas = np.zeros((480, 640))
screen = pf.screen(canvas, 'Examine')
# plt.ylim(0.4, 1.6)
plt.ylim(0.6, 1.0)
# plt.ylim(-0.5, 2)
# plt.ylim(0.695, 0.705)
# plt.ylim(0.76, 0.77)
# plt.ylim(1730, 1750)
# plt.ylim(0.81,0.8125)
while True:
# threadLock.acquire()
plt.xlim(xs[-1] - 20, xs[-1] + 2)
plt.plot(self.xs, self.ys, c='blue')
# threadLock.release()
fig.canvas.draw()
image = np.fromstring(fig.canvas.tostring_rgb(), dtype=np.uint8, sep='')
image = image.reshape(fig.canvas.get_width_height()[::-1] + (3,))
screen.update(image)
class ThreadedUDPRequestHandler(socketserver.BaseRequestHandler):
def __init__(self, request, client_address, server):
self.request = request
self.client_address = client_address
self.server = server
try:
self.handle()
finally:
self.finish()
def updateData(self, x, y):
# threadLock.acquire()
xs.append(x)
ys.append(y)
if len(xs) > 50:
del xs[0]
del ys[0]
# xs.pop(0)
# ys.pop(0)
# threadLock.release()
def handle(self):
# transform original data
data, addr = self.request[1].recvfrom(1024) # 收到字节数组(bytes)数据,request[1]为socket
str = data.decode('utf-8') # 解码成utf-8格式的字符串
dic = eval(str)[0] # 转换成字典,eval()函数用来执行一个字符串表达式,并返回表达式的值。
volt = dic['voltage']
time = dic['time']
# update data
self.updateData(time, volt)
class ThreadedUDPServer(socketserver.ThreadingMixIn, socketserver.UDPServer):
pass
if __name__ == "__main__":
threadLock = threading.Lock()
xs = [0]
ys = [0]
plotThread = PlotThread(xs, ys)
plotThread.start()
HOST, PORT = "", 20000
server = ThreadedUDPServer((HOST, PORT), ThreadedUDPRequestHandler)
server_thread = threading.Thread(target=server.serve_forever)
server_thread.daemon = True
print("Server loop running in thread:", server_thread.name)
print(" .... waiting for connection")
# Activate the server; this will keep running until you
# interrupt the program with Ctrl-C
server.serve_forever() | [
"zhengchengyy@qq.com"
] | zhengchengyy@qq.com |
52b05d3878abcb4c78e0e865d6199cb611986340 | 891127dfec1e5255cd403f4aedbe0ef18b5c8567 | /python-notify2/lilac.py | 8678e267dc883732492e08565ebc85e226202699 | [] | no_license | kktt007/repo | df708b5841474fbbf720994f9278a906a908b63b | fc039d08425081834384f84c8ffe9d3923849165 | refs/heads/master | 2020-04-03T06:03:10.517303 | 2018-10-28T11:23:08 | 2018-10-28T11:23:08 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 737 | py | #!/usr/bin/env python3
#
# This file is the most simple lilac.py file,
# and it suits for most packages in AUR.
#
from lilaclib import *
build_prefix = 'extra-x86_64'
post_build = aur_post_build
def pre_build():
aur_pre_build()
need_rebuild = False
for line in edit_file('PKGBUILD'):
# edit PKGBUILD
if line.strip().startswith("depends="):
words = line.split(" ")
words.insert(-1, "'python-setuptools'")
line = " ".join(words)
if line.strip().startswith("pkgver=0.3"):
need_rebuild = True
if need_rebuild and line.strip().startswith("pkgrel=1"):
line = "pkgrel=2"
print(line)
if __name__ == '__main__':
single_main()
| [
"farseerfc@gmail.com"
] | farseerfc@gmail.com |
ca5216e0c168037fe25b76c19ba7275392c36af3 | f41bd639f249ef6029e310bee84c6ef03f5d6f19 | /databundles/database/__init__.py | 8a086a851a744ff6f4b771d0f9f09df297feec4a | [] | no_license | kball/databundles | 5e3d478c1977a0481d77131dd573c8f199e2c95d | 142f20705c8be6cb136adef3a94c8fa7b7119b88 | refs/heads/master | 2021-01-21T03:30:32.822333 | 2014-01-23T23:57:57 | 2014-01-23T23:57:57 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,795 | py | """
Copyright (c) 2013 Clarinova. This file is licensed under the terms of the
Revised BSD License, included in this distribution as LICENSE.txt
"""
from __future__ import absolute_import
from ..dbexceptions import ConfigurationError
from collections import namedtuple
def new_database(config, bundle=None, class_=None):
service = config['driver']
if 'class' in config and class_ and config['class'] != class_:
raise ConfigurationError("Mismatch in class configuration {} != {}".format(config['class'], class_))
class_ = config['class'] if 'class' in config else class_
k = (service,class_)
if k == ('sqlite',None):
from .sqlite import SqliteBundleDatabase #@UnresolvedImport
return SqliteBundleDatabase(bundle=bundle, **config)
elif k == ('mysql',None):
raise NotImplemented()
elif k == ('postgres',None):
from .relational import RelationalDatabase #@UnresolvedImport
return RelationalDatabase(**config)
elif k == ('postgis',None):
from .postgis import PostgisDatabase #@UnresolvedImport
return PostgisDatabase(**config)
elif k == ('sqlite','bundle'):
from .sqlite import SqliteBundleDatabase #@UnresolvedImport
return SqliteBundleDatabase(bundle=bundle, **config)
elif k == ('sqlite','warehouse'):
from .sqlite import SqliteWarehouseDatabase #@UnresolvedImport
dbname = config['dbname']
del config['dbname']
return SqliteWarehouseDatabase(dbname, **config)
elif k == ('mysql','warehouse'):
raise NotImplemented()
elif k == ('postgres','warehouse'):
raise NotImplemented()
class DatabaseInterface(object):
@property
def name(self):
raise NotImplementedError()
def exists(self):
raise NotImplementedError()
def create(self):
raise NotImplementedError()
def add_post_create(self, f):
raise NotImplementedError()
def delete(self):
raise NotImplementedError()
def open(self):
raise NotImplementedError()
def close(self):
raise NotImplementedError()
def inserter(self, table_or_name=None,**kwargs):
raise NotImplementedError()
def updater(self, table_or_name=None,**kwargs):
raise NotImplementedError()
def commit(self):
raise NotImplementedError()
def tables(self):
raise NotImplementedError()
def has_table(self, table_name):
raise NotImplementedError()
def create_table(self, table):
raise NotImplementedError()
def drop_table(self, table_name):
raise NotImplementedError()
| [
"eric@clarinova.com"
] | eric@clarinova.com |
eca77010769b565283b9e1d850d441836c5dc8f3 | 990b92264109dc01dbfddeb6f5e75675037fd829 | /app/cito_engine/poller/pluginpoller.py | 123be6b2b244c4a01e2c2503321bc5c5146f53f8 | [
"Apache-2.0"
] | permissive | CitoEngine/cito_engine | 20efa189abab1b684b60b260c1ea9ed16f6ea0f2 | 95852dd109d86a344726d7b11ed1132d4e48426b | refs/heads/master | 2020-05-21T15:04:24.011603 | 2019-02-08T04:51:42 | 2019-02-08T04:51:42 | 17,123,947 | 9 | 13 | Apache-2.0 | 2019-02-08T04:51:43 | 2014-02-24T03:17:04 | Python | UTF-8 | Python | false | false | 3,548 | py | # Django settings for cito project.
# Copyright (c) 2012-2013 Cyrus Dasadia <cyrus@extremeunix.com>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
import requests
import logging
from cito_engine.models import Plugin, PluginServer
logger = logging.getLogger('poller_logger')
def pluginpoller(server):
url = server.url+'/getallplugins'
try:
response = requests.get(url, verify=server.ssl_verify)
except Exception, e:
logger.error('Could not connect to PluginServer: %s [EXCEPTION] %s' % (url, e))
return False
try:
jsondata = response.json()
except:
logger.error('PluginServer: %s gave invalid JSON response')
return False
logger.info("Found %s plugins" % len(jsondata))
pluginNames = []
#Add or update plugins
for k in jsondata:
pluginNames.append(k['plugins']['name'])
try:
p = Plugin.objects.get(server=server, name__iexact=k['plugins']['name'])
p.description = k['plugins']['description']
p.status = k['plugins']['status']
p.save()
logger.info("Plugin: %s already existed and updated" % k['plugins']['name'])
except Plugin.DoesNotExist:
Plugin.objects.create(server=server,
name=k['plugins']['name'],
description=k['plugins']['description'],
status=k['plugins']['status'])
logger.info('Plugin: %s added' % k['plugins']['name'])
except Plugin.MultipleObjectsReturned:
logger.error("More than one plugin exists for %s, remove the duplicates!" % k['plugins']['name'])
except Exception as e:
logger.error("Could not add plugin, reason:%s" % e)
#Disable all deprecated plugins
plugins = Plugin.objects.filter(server=server)
if plugins is not None:
for p in plugins:
if p.name not in pluginNames:
logger.info("Plugin: %s is deprecated on plugin server, disabling here" % p.name)
p.description = 'WARNING: This plugin has been DEPRECATED on remote server!!!!\n'
p.status = False
p.save()
return True
def update_plugins():
pluginservers = PluginServer.objects.all()
for server in pluginservers:
if server.status:
logger.info("Fetching from %s" % server.name)
pluginpoller(server)
else:
logger.info("Server at %s ignored." % server.name)
| [
"cyrus@extremeunix.com"
] | cyrus@extremeunix.com |
0da2a858c8b43266110789edbbd9fbfc44c0485f | 0ca9e6a6fa9a05231d2248e9991c50d74173e546 | /B0_CNN-RNN-text/predict2.py | d19a91c2ae46186a8cd09254b83dc0537f1e4037 | [] | no_license | Timaos123/LogstashAI | 8aa07b74c0bfd46ab9bd4ff6b10549057e36c477 | 0b9ec6a27ebc07584503dc69e29475e98ebc102b | refs/heads/master | 2020-04-02T14:34:48.648972 | 2018-10-24T16:44:41 | 2018-10-24T16:44:41 | 154,530,107 | 4 | 1 | null | null | null | null | UTF-8 | Python | false | false | 5,365 | py | import os
import sys
import json
import shutil
import pickle
import logging
import data_helper
import numpy as np
import pandas as pd
import tensorflow as tf
from text_cnn_rnn import TextCNNRNN
logging.getLogger().setLevel(logging.INFO)
def load_trained_params(trained_dir):
params = json.loads(open(trained_dir + 'trained_parameters.json').read())
words_index = json.loads(open(trained_dir + 'words_index.json').read())
labels = json.loads(open(trained_dir + 'labels.json').read())
with open(trained_dir + 'embeddings.pickle', 'rb') as input_file:
fetched_embedding = pickle.load(input_file)
embedding_mat = np.array(fetched_embedding, dtype = np.float32)
return params, words_index, labels, embedding_mat
def load_test_data(test_file, labels):
df = pd.read_csv(test_file)
df.loc[df.severity=="crit","severity"]=1
df.loc[df.severity=="err","severity"]=1
df.loc[df.severity!=1,"severity"]=0
# print(df)
select = ['message']
df = df.dropna(axis=0, how='any', subset=select)
test_examples = df[select[0]].apply(lambda x: data_helper.clean_str(x).split(' ')).tolist()
num_labels = len(labels)
one_hot = np.zeros((num_labels, num_labels), int)
np.fill_diagonal(one_hot, 1)
label_dict = dict(zip(labels, one_hot))
y_ = None
if 'severity' in df.columns:
select.append('severity')
y_ = df[select[1]].apply(lambda x: label_dict[x]).tolist()
not_select = list(set(df.columns) - set(select))
df = df.drop(not_select, axis=1)
return test_examples, y_, df
def map_word_to_index(examples, words_index):
x_ = []
for example in examples:
temp = []
for word in example:
if word in words_index:
temp.append(words_index[word])
else:
temp.append(0)
x_.append(temp)
return x_
def getRP(preY,testY):
yP=list(zip(preY,testY))
tp=0
fp=0
tn=0
fn=0
for yPItem in yP:
if yPItem[0]==yPItem[1] and yPItem[1]==1:
tp=tp+1
if yPItem[0]!=yPItem[1] and yPItem[1]==1:
fp=fp+1
if yPItem[0]==yPItem[1] and yPItem[1]==0:
tn=tn+1
if yPItem[0]!=yPItem[1] and yPItem[1]==0:
fn=fn+1
recall=tp/(tp+fp)
precision=tp/(tp+fn)
return recall,precision
def predict_unseen_data():
trained_dir = "./trained_results_1524214944/"
if not trained_dir.endswith('/'):
trained_dir += '/'
test_file = "valData.csv"
params, words_index, labels, embedding_mat = load_trained_params(trained_dir)
x_, y_, df = load_test_data(test_file, labels)
x_ = data_helper.pad_sentences(x_, forced_sequence_length=params['sequence_length'])
x_ = map_word_to_index(x_, words_index)
x_test, y_test = np.asarray(x_), None
if y_ is not None:
y_test = np.asarray(y_)
timestamp = trained_dir.split('/')[-2].split('_')[-1]
predicted_dir = './predicted_results_' + timestamp + '/'
if os.path.exists(predicted_dir):
shutil.rmtree(predicted_dir)
os.makedirs(predicted_dir)
with tf.Graph().as_default():
session_conf = tf.ConfigProto(allow_soft_placement=True, log_device_placement=False)
sess = tf.Session(config=session_conf)
with sess.as_default():
cnn_rnn = TextCNNRNN(
embedding_mat = embedding_mat,
non_static = params['non_static'],
hidden_unit = params['hidden_unit'],
sequence_length = len(x_test[0]),
max_pool_size = params['max_pool_size'],
filter_sizes = map(int, params['filter_sizes'].split(",")),
num_filters = params['num_filters'],
num_classes = len(labels),
embedding_size = params['embedding_dim'],
l2_reg_lambda = params['l2_reg_lambda'])
def real_len(batches):
return [np.ceil(np.argmin(batch + [0]) * 1.0 / params['max_pool_size']) for batch in batches]
def predict_step(x_batch):
feed_dict = {
cnn_rnn.input_x: x_batch,
cnn_rnn.dropout_keep_prob: 1.0,
cnn_rnn.batch_size: len(x_batch),
cnn_rnn.pad: np.zeros([len(x_batch), 1, params['embedding_dim'], 1]),
cnn_rnn.real_len: real_len(x_batch),
}
predictions = sess.run([cnn_rnn.predictions], feed_dict)
return predictions
checkpoint_file = trained_dir + 'best_model.ckpt'
saver = tf.train.Saver(tf.all_variables())
saver = tf.train.import_meta_graph("{}.meta".format(checkpoint_file))
saver.restore(sess, checkpoint_file)
logging.critical('{} has been loaded'.format(checkpoint_file))
batches = data_helper.batch_iter(list(x_test), params['batch_size'], 1, shuffle=False)
predictions, predict_labels = [], []
for x_batch in batches:
batch_predictions = predict_step(x_batch)[0]
for batch_prediction in batch_predictions:
predictions.append(batch_prediction)
predict_labels.append(labels[batch_prediction])
# Save the predictions back to file
df['NEW_PREDICTED'] = predict_labels
columns = sorted(df.columns, reverse=True)
df.to_csv(predicted_dir + 'predictions_all.csv', index=False, columns=columns)
if y_test is not None:
y_test = np.array(np.argmax(y_test, axis=1))
accuracy = sum(np.array(predictions) == y_test) / float(len(y_test))
logging.critical('The prediction accuracy is: {}'.format(accuracy))
recall,precision=getRP(np.array(predictions),y_test)
logging.critical('The prediction Recall and Precision are: {},{}'.format(recall,precision))
logging.critical('Prediction is complete, all files have been saved: {}'.format(predicted_dir))
if __name__ == '__main__':
# python3 predict.py ./trained_results_1478563595/ ./data/small_samples.csv
predict_unseen_data()
| [
"noreply@github.com"
] | Timaos123.noreply@github.com |
25d5fc581028991eafab2166d9a68b78fd919440 | 9de28c08400250025f4bdc5676bac3b82f4a404d | /Unified/assignSchema.py | fb2db6eb24f398dbbfc3540c186ddfbc2babaa93 | [] | no_license | AndrewLevin/WmAgentScripts | f4b8edb6b668d128e12d55b396c67f9efb727e6f | 4eab70df00d7a314a4d7d115e1eb0dbdac823165 | refs/heads/master | 2020-12-31T03:35:14.564731 | 2016-12-06T20:37:51 | 2016-12-06T20:37:51 | 14,967,465 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,692 | py | import os
import sys
from sqlalchemy import Column, ForeignKey, Integer, String, PickleType, Float
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy.orm import relationship
from sqlalchemy import create_engine
Base = declarative_base()
#class McMID(Base):
# __tablename__ = 'mcm'
# id = Column(Integer, primary_key=True)
# pid = Column(String(400))
# ## and whatever else you want
class Workflow(Base):
__tablename__ = 'workflow'
id = Column(Integer, primary_key=True)
name = Column(String(400))
status = Column(String(30),default='considered') ## internal status
wm_status = Column(String(30),default='assignment-approved') ## status in req manager : we might not be carrying much actually since we are between ass-approved and assigned, although announced is coming afterwards
fraction_for_closing = Column(Float,default=0.90)
class Output(Base):
__tablename__ = 'output'
id = Column(Integer, primary_key=True)
datasetname = Column(String(400))
nlumis = Column(Integer)
expectedlumis = Column(Integer)
nevents = Column(Integer)
nblocks = Column(Integer)
dsb_status = Column(String(30)) ## in DBS ?
status = Column(String(30))
## workflow it belongs to
workfow_id = Column(Integer,ForeignKey('workflow.id'))
workflow = relationship(Workflow)
date = Column(Integer)
class Transfer(Base):
__tablename__ = 'transfer'
id = Column(Integer, primary_key=True)
phedexid = Column(Integer)
workflows_id = Column(PickleType)
#status = Column(String(30)) ## to be added ?
engine = create_engine('sqlite:///Unified/assignRecord.db')
Base.metadata.create_all(engine)
| [
"vlimant@cern.ch"
] | vlimant@cern.ch |
89d42651a7c265a1445be2fc09631bf0afbe9c41 | ca7aa979e7059467e158830b76673f5b77a0f5a3 | /Python_codes/p02818/s265134890.py | 8482530bc7f94733b76388fa8ccee8aa0e5fd826 | [] | no_license | Aasthaengg/IBMdataset | 7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901 | f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8 | refs/heads/main | 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 679 | py | #!/usr/bin/env python3
# Generated by https://github.com/kyuridenamida/atcoder-tools
from typing import *
import collections
import functools
import itertools
import math
import sys
INF = float('inf')
def solve(A: int, B: int, K: int):
return f'{max(A-K,0)} {max(B-max(0,K-A),0)}'
def main():
sys.setrecursionlimit(10 ** 6)
def iterate_tokens():
for line in sys.stdin:
for word in line.split():
yield word
tokens = iterate_tokens()
A = int(next(tokens)) # type: int
B = int(next(tokens)) # type: int
K = int(next(tokens)) # type: int
print(f'{solve(A, B, K)}')
if __name__ == '__main__':
main()
| [
"66529651+Aastha2104@users.noreply.github.com"
] | 66529651+Aastha2104@users.noreply.github.com |
9d871e6ca56da43854bf44e0d344e875c0c9981e | 2ed4ed28dc150a96954c663f203808ba917712c8 | /learning_site/tracks/views.py | e6c04e087c767566263f7b3f4718117d88d2d770 | [] | no_license | Mostacosta/Mosta-learning-website | 66e813c6fe17b018d750ffa824df751428a20ce8 | a70e94649355e07a5d819e75b09100f7dc5ccc59 | refs/heads/master | 2022-04-01T17:36:23.084318 | 2020-02-04T08:56:37 | 2020-02-04T08:56:37 | 200,877,940 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,211 | py | from django.shortcuts import render
from django.http import HttpResponse,JsonResponse
from .models import track,course,lesson,exam,exam_result
from questions.models import question ,answer
from questions.forms import question_form
from datetime import datetime
import dateutil.parser
from django.utils import timezone
from django.views.decorators.cache import cache_page
import random
from django.core.paginator import Paginator, EmptyPage, PageNotAnInteger
from django.contrib.auth.decorators import login_required
from django.contrib import messages
# Create your views here.
def track_list(request):
tracks = track.objects.all()
page = request.GET.get('page', 1)
paginator = Paginator(tracks, 3)
try:
tracks = paginator.page(page)
except PageNotAnInteger:
tracks = paginator.page(1)
except EmptyPage:
tracks = paginator.page(paginator.num_pages)
return render (request,'tracks/track-list.html',{"tracks":tracks})
def course_list(request,pk):
my_track = track.objects.get(pk=pk)
courses = course.objects.filter(track=my_track).order_by("order")
points = my_track.points.split(",")
lessons_ = []
for course_ in courses:
lessons = lesson.objects.filter(course=course_)
lessons_.append(lessons)
zip_ = zip(courses,lessons_)
if request.user.is_authenticated:
pass
else:
messages.error(request,"login to be able to preview lessons")
return render (request,'tracks/course-details.html',{"courses":zip_,"track":my_track,"len":len(courses),"points":points})
def lesson_list(request,pk):
my_course = course.objects.get(pk=pk)
lessons = lesson.objects.filter(course=my_course)
return render (request,'tracks/lesson_list.html',{"lessons":lessons,"course":my_course.name})
def lesson_view (request,pk):
form = question_form()
lesson_ = lesson.objects.get(pk=pk)
if request.method == "POST":
form = question_form(request.POST,request.FILES)
if form.is_valid():
ques_=form.save(commit=False)
ques_.user = request.user
ques_.lesson=lesson_
form.save()
questions = question.objects.filter(lesson=lesson_)
answers = []
for question_ in questions :
answers.append(answer.objects.filter(question=question_))
zip_list = zip (questions,answers)
return render (request,"questions/answer_list.html",{"zip":zip_list,"form":form})
def lesson_watch (request,pk):
lesson_ = lesson.objects.get(pk=pk)
if request.user not in lesson_.watching_users.all():
lesson_.watching_users.add(request.user)
lesson_.save()
return HttpResponse("watched")
@login_required(redirect_field_name="contacts:signup")
@cache_page(60)
def exam_view (request,pk):
course_ = course.objects.get(pk=pk)
questions = sorted(exam.objects.filter(course=course_),key=lambda x: random.random())
try:
result = exam_result.objects.get(course=course_,user=request.user)
except :
result = None
if result :
last_time = result.date
last_time = last_time.replace(tzinfo=None)
dif = datetime.now()-last_time
dif = dif.total_seconds()
expire_date = 259200*result.times
hours = expire_date//(60*60)
if dif < expire_date:
return HttpResponse ("comeback after"+str(hours))
if request.method == "POST":
if request.session.get('time'):
time_ = request.session.get('time')
past_time = dateutil.parser.parse(time_)
now_time = datetime.now()
dif = now_time-past_time
dif = dif.total_seconds()
if dif is not None and dif<1800:
score = 0
for question in questions:
if question.right_answer == request.POST[question.name]:
score +=1
precentage = (score/len(questions)) * 100
if precentage > 50 :
if request.user not in course_. succeeded_users.all():
course_. succeeded_users.add(request.user)
course_.save()
if result:
result.case = 'success'
result.times =1
result.date = timezone.now()
result.degree=precentage
else:
result =exam_result(user=request.user,course=course_,case='success',degree=precentage)
result.save()
return HttpResponse ("you succed")
else :
if result:
result.case = 'failed'
result.times +=1
result.date = timezone.now()
result.degree=precentage
else:
result =exam_result(user=request.user,course=course_,case='failed',degree=precentage)
result.save()
return HttpResponse ("you failed")
else:
return HttpResponse ("no session")
else:
request.session['time'] = datetime.now().isoformat()
print(request.session['time'])
return render (request,'tracks/exam.html',{"questions":questions})
| [
"mostafaelhassan910@gmail.com"
] | mostafaelhassan910@gmail.com |
df3a6464a3e0334638d68082c12f2f987f3ce943 | be1b836b022a52204bc862878ba8d7a9200bd59b | /website/unicode/create_json.py | afea12542356f37620cc445dfd70b497d34b720d | [
"MIT"
] | permissive | templateK/write-math | b418e7c40a59ce2e673f5804b1c042acd8eb527b | ece645f70341431ac7ca14740ce26ad8153a3900 | refs/heads/master | 2021-01-18T21:48:14.725324 | 2016-05-17T17:57:22 | 2016-05-17T17:57:22 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 824 | py | #!/usr/bin/env python
"""
Create a json file which maps unicode decimal codepoints to descriptions.
https://github.com/w3c/xml-entities is used for that.
"""
import json
data = {}
import xml.etree.ElementTree
e = xml.etree.ElementTree.parse('xml-entities/unicode.xml').getroot()
for atype in e.findall('charlist'):
print("## Charlist found")
for character in atype.findall('character'):
try:
dec = int(character.get('dec'))
desc = ''
for description in character.findall('description'):
desc = description.text
# print("%s: - %s" % (dec, desc))
data[dec] = desc
except:
# Just ignore errors
pass
with open('unicode.json', 'w') as outfile:
json.dump(data, outfile, sort_keys=True, indent=1)
| [
"info@martin-thoma.de"
] | info@martin-thoma.de |
f0ed3ca2be636e6ff87f3318f3e243f68762b6f4 | 4bc24011c65cb5194eb94abfd8d394a6b0dc6a50 | /packages/OpenCV/nodes/OpenCV___BilateralFilter0/OpenCV___BilateralFilter0.py | 6f78c7fc2a7dc5946060fcc88fa05dad405492c9 | [
"MIT"
] | permissive | ManojKumarTiwari/Ryven | 6c76ebdf89599bb7c9b4ce020f195eea135d9da1 | 2b8ef0bdcf05a458a6cf8791cbc2fda6870932f8 | refs/heads/master | 2022-11-12T00:23:45.303378 | 2020-07-08T09:32:10 | 2020-07-08T09:32:10 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,485 | py | from custom_src.NodeInstance import NodeInstance
from custom_src.Node import Node
from custom_src.retain import m
import cv2
# USEFUL
# self.input(index) <- access to input data
# self.outputs[index].set_val(val) <- set output data port value
# self.main_widget <- access to main widget
class BilateralFilter_NodeInstance(NodeInstance):
def __init__(self, parent_node: Node, flow, configuration=None):
super(BilateralFilter_NodeInstance, self).__init__(parent_node, flow, configuration)
# self.special_actions['action name'] = self.actionmethod ...
self.img_unfiltered = None
self.img_filtered = None
self.initialized()
def update_event(self, input_called=-1):
self.img_unfiltered = self.input(0)
d_val = self.input(1)
d_val = int(d_val)
sigmaColor_val=self.input(2)
sigmaColor_val=int(sigmaColor_val)
sigmaSpace_val=self.input(3)
sigmaSpace_val=int(sigmaSpace_val)
self.img_filtered = cv2.bilateralFilter( self.img_unfiltered, d_val, sigmaColor_val,sigmaSpace_val)
self.main_widget.show_image(self.img_filtered)
self.set_output_val(0, self.img_filtered)
def get_data(self):
data = {}
# ...
return data
def set_data(self, data):
pass
# ...
# optional - important for threading - stop everything here
def removing(self):
pass
| [
"leon.thomm@gmx.de"
] | leon.thomm@gmx.de |
49aad707822cea050e45c3f070042ccf5c2f2dba | a8123a86db99b9365b10ba76dd509d58caa7bc10 | /python/practice/start_again/2020/11032020/Assesment6.py | c505235f1844afeeb02c99144c972776b142ec93 | [] | no_license | smohapatra1/scripting | c0404081da8a10e92e7c7baa8b540acc16540e77 | 3628c9109204ad98231ae8ee92b6bfa6b27e93cd | refs/heads/master | 2023-08-22T20:49:50.156979 | 2023-08-22T20:43:03 | 2023-08-22T20:43:03 | 147,619,016 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 216 | py | #Use List Comprehension to create a list of the first letters of every word in the string below:
st = 'Create a list of the first letters of every word in this string'
mylist=[w[0] for w in st.split()]
print (mylist) | [
"samarendra.mohapatra121@gmail.com"
] | samarendra.mohapatra121@gmail.com |
4e8b7d7b60995bf843c9a4de85b38976e365d990 | 84a1f9d626828b6ecaee4ef037081f4d8750a990 | /编程/4月/4.13/test_survey.py | 9c1c877c16243557dde6b28b5c1afc599957f8ed | [] | no_license | dujiaojingyu/Personal-programming-exercises | 5a8f001efa038a0cb3b6d0aa10e06ad2f933fe04 | 72a432c22b52cae3749e2c18cc4244bd5e831f64 | refs/heads/master | 2020-03-25T17:36:40.734446 | 2018-10-01T01:47:36 | 2018-10-01T01:47:36 | 143,986,099 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,840 | py | __author__ = "Narwhale"
# import unittest
# from survey import AnonymousSurvey
#
# class TestAnonymousSurvey(unittest.TestCase):
# '''测试survey.py'''
# def test_store_single_response(self):
# '''测试单个答案会不会妥善储存'''
# question = "What langage did you first learn to speak?"
# my_survey = AnonymousSurvey(question)
# my_survey.store_response('English')
# self.assertIn('English',my_survey.responses)
#
# def test_store_three_response(self):
# '''测试三个答案会不会妥善储存'''
# question = "What langage did you first learn to speak?"
# my_survey = AnonymousSurvey(question)
# responses = ['English','Spanish','Mandarin']
# for response in responses:
# my_survey.store_response(response)
# for response in responses:
# self.assertIn(response,my_survey.responses)
#
import unittest
from survey import AnonymousSurvey
class TestAnonymousSurvey(unittest.TestCase):
'''测试survey.py'''
def setUp(self):
'''创建一个调查对象和一答案,供使用的测试方法使用'''
question = "What langage did you first learn to speak?"
self.my_survey = AnonymousSurvey(question)
self.responses = ['English','Spanish','Mandarin']
def test_store_single_response(self):
'''测试单个答案会不会妥善储存'''
self.my_survey.store_response(self.responses[0])
self.assertIn(self.responses[0],self.my_survey.responses)
def test_store_three_response(self):
'''测试三个答案会不会妥善储存'''
for response in self.responses:
self.my_survey.store_response(response)
for response in self.responses:
self.assertIn(response,self.my_survey.responses)
| [
"34296128+dujiaojingyu@users.noreply.github.com"
] | 34296128+dujiaojingyu@users.noreply.github.com |
c0ca10e6617bcc841033face0deb5832c499e704 | 3de69270140c915a71611b07f9e5ae7e0ba5d3e6 | /hedgehog/__init__.py | af439387011fcc89560aa07bba8500c8a529cdb3 | [
"MIT"
] | permissive | dongyu1990/hedgehog | f66380f77751d2dd6dc8d888ed4634d3cc8d9225 | 98c97d0c70b4aa01b0bfb1115a1dfbe18f976ae9 | refs/heads/master | 2022-09-15T14:21:15.701239 | 2020-05-31T13:12:40 | 2020-05-31T13:12:40 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 405 | py | import os
from .bayes_net import BayesNet
from .examples import load_alarm
from .examples import load_asia
from .examples import load_grades
from .examples import load_sprinkler
__all__ = [
'BayesNet',
'load_alarm',
'load_asia',
'load_grades',
'load_sprinkler'
]
def cli_hook():
here = os.path.dirname(os.path.realpath(__file__))
os.system(f'streamlit run {here}/gui.py')
| [
"maxhalford25@gmail.com"
] | maxhalford25@gmail.com |
f2a49225a1fc85adf7640b670ac6c14374ae7785 | 280342a3961132a6f62507e17cb0dadf3598f2ea | /models/extends_financiera_prestamo_cuota.py | 96a9a2953247b4107c7c846ec38bf120d3c9ebfe | [] | no_license | levislibra/financiera_pagos_360 | 5e8f6f2fe43311ea3b918daff359ec126ecadc0b | 4881773281e970ff23c3c9e913ee0a5260e91502 | refs/heads/master | 2023-07-06T01:09:20.089773 | 2023-06-21T14:57:10 | 2023-06-21T14:57:10 | 205,934,743 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,869 | py | # -*- coding: utf-8 -*-
from openerp import models, fields, api, _
from datetime import datetime, timedelta, date
PAGOS360_MONTO_MINIMO = 10
class ExtendsFinancieraPrestamoCuota(models.Model):
_inherit = 'financiera.prestamo.cuota'
_name = 'financiera.prestamo.cuota'
pagos_360_generar_pago_voluntario = fields.Boolean('Pagos360 - Generar cupon de pago voluntario')
pagos_360_solicitud_id = fields.Integer('Pagos360 - ID de la solicitud')
pagos_360_solicitud_previa1_id = fields.Integer('Pagos360 - ID de la solicitud previa 1')
pagos_360_solicitud_previa1_fecha = fields.Date('Pagos360 - Fecha de la solicitud previa 1')
pagos_360_solicitud_previa2_id = fields.Integer('Pagos360 - ID de la solicitud previa 2')
pagos_360_solicitud_previa2_fecha = fields.Date('Pagos360 - Fecha de la solicitud previa 2')
pagos_360_solicitud_id_origen_pago = fields.Integer('Pagos360 - ID de la solicitud de pago', readonly=1)
pagos_360_solicitud_state = fields.Selection([
('pending', 'Pendiente'), ('paid', 'Pagada'),
('expired', 'Expirada'), ('reverted', 'Revertida')],
string='Pagos360 - Estado', readonly=True, default='pending')
pagos_360_first_due_date = fields.Date('Pagos360 - Primer Vencimiento')
pagos_360_first_total = fields.Float('Pagos360 - Importe', digits=(16,2))
pagos_360_second_due_date = fields.Date('Pagos360 - Segundo Vencimiento')
pagos_360_second_total = fields.Float('Pagos360 - Importe', digits=(16,2))
pagos_360_barcode = fields.Char('Pagos360 - Barcode')
pagos_360_checkout_url = fields.Char('Pagos360 - Url de pago online')
pagos_360_barcode_url = fields.Char('Pagos360 - Url imagen del codigo de barras')
pagos_360_pdf_url = fields.Char('Pagos360 - Url de cupon de pago en pdf')
# Nueva integracion
solicitud_ids = fields.One2many('financiera.pagos360.solicitud', 'cuota_id', 'Solicitudes de Pago')
@api.one
def pagos_360_crear_solicitud(self):
if self.state in ('activa', 'judicial', 'incobrable') and self.saldo >= PAGOS360_MONTO_MINIMO:
solicitud_id = self.env['financiera.pagos360.solicitud'].crear_solicitud(self)
solicitud_id.generar_solicitud()
@api.one
def pagos_360_cobrar_y_facturar(self, payment_date, journal_id, factura_electronica, amount, invoice_date, punitorio_stop_date, solicitud_id=None):
print("pagos_360_cobrar_y_facturar")
partner_id = self.partner_id
fpcmc_values = {
'partner_id': partner_id.id,
'company_id': self.company_id.id,
}
multi_cobro_id = self.env['financiera.prestamo.cuota.multi.cobro'].create(fpcmc_values)
partner_id.multi_cobro_ids = [multi_cobro_id.id]
# Fijar fecha punitorio
self.punitorio_fecha_actual = punitorio_stop_date
print("Punitorio stop date: ", str(punitorio_stop_date))
if self.saldo > 0:
self.confirmar_cobrar_cuota(payment_date, journal_id, amount, multi_cobro_id)
if len(multi_cobro_id.payment_ids) > 0:
if solicitud_id:
solicitud_id.pagos_360_payment_id = multi_cobro_id.payment_ids[0]
# Facturacion cuota
if not self.facturada:
fpcmf_values = {
'invoice_type': 'interes',
'company_id': self.company_id.id,
}
multi_factura_id = self.env['financiera.prestamo.cuota.multi.factura'].create(fpcmf_values)
self.facturar_cuota(invoice_date, factura_electronica, multi_factura_id, multi_cobro_id)
if multi_factura_id.invoice_amount == 0:
multi_factura_id.unlink()
multi_factura_punitorio_id = None
if self.punitorio_a_facturar > 0:
fpcmf_values = {
'invoice_type': 'punitorio',
'company_id': self.company_id.id,
}
multi_factura_punitorio_id = self.env['financiera.prestamo.cuota.multi.factura'].create(fpcmf_values)
self.facturar_punitorio_cuota(invoice_date, factura_electronica, multi_factura_punitorio_id, multi_cobro_id)
if multi_factura_punitorio_id != None and multi_factura_punitorio_id.invoice_amount == 0:
multi_factura_punitorio_id.unlink()
| [
"levislibra@hotmail.com"
] | levislibra@hotmail.com |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.