blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 2
616
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
69
| license_type
stringclasses 2
values | repo_name
stringlengths 5
118
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringlengths 4
63
| visit_date
timestamp[us] | revision_date
timestamp[us] | committer_date
timestamp[us] | github_id
int64 2.91k
686M
⌀ | star_events_count
int64 0
209k
| fork_events_count
int64 0
110k
| gha_license_id
stringclasses 23
values | gha_event_created_at
timestamp[us] | gha_created_at
timestamp[us] | gha_language
stringclasses 213
values | src_encoding
stringclasses 30
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 2
10.3M
| extension
stringclasses 246
values | content
stringlengths 2
10.3M
| authors
listlengths 1
1
| author_id
stringlengths 0
212
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
739258e7e68025fab2d4f603d719f33d72ba274a
|
5e7d0921887469c651faf90ecfcf3736d4802f6b
|
/pan/chapter10/X98.py
|
0ef74cbf6ee9c2e7f47d00fdc133ea3e37f78b06
|
[] |
no_license
|
tmu-nlp/100knock2021
|
5e4029f2ee330fa95269740d7bd372be533d125d
|
0a9f8150588648fbd86d51b4c9168acf3e45ed98
|
refs/heads/main
| 2023-07-03T14:00:08.604762
| 2021-08-05T09:23:16
| 2021-08-05T09:23:16
| 359,641,037
| 10
| 1
| null | 2021-08-05T09:22:25
| 2021-04-20T00:57:21
|
Jupyter Notebook
|
UTF-8
|
Python
| false
| false
| 3,028
|
py
|
# 98. ドメイン適応
# Japanese-English Subtitle Corpus (JESC)やJParaCrawlなどの翻訳データを活用し,KFTTのテストデータの性能向上を試みよ.
import tarfile
with tarfile.open('en-ja.tar.gz') as tar:
for f in tar.getmembers():
if f.name.endswith('txt'):
text = tar.extractfile(f).read().decode('utf-8')
break
data = text.splitlines()
data = [x.split('\t') for x in data]
data = [x for x in data if len(x) == 4]
data = [[x[3], x[2]] for x in data]
with open('jparacrawl.ja', 'w') as f, open('jparacrawl.en', 'w') as g:
for j, e in data:
print(j, file=f)
print(e, file=g)
with open('jparacrawl.ja') as f, open('train.jparacrawl.ja', 'w') as g:
for x in f:
x = x.strip()
x = re.sub(r'\s+', ' ', x)
x = sp.encode_as_pieces(x)
x = ' '.join(x)
print(x, file=g)
subword-nmt apply-bpe -c kyoto_en.codes < jparacrawl.en > train.jparacrawl.en
fairseq-preprocess -s ja -t en \
--trainpref train.jparacrawl \
--validpref dev.sub \
--destdir data98 \
--workers 20
fairseq-train data98 \
--fp16 \
--save-dir save98_1 \
--max-epoch 3 \
--arch transformer --share-decoder-input-output-embed \
--optimizer adam --clip-norm 1.0 \
--lr 1e-4 --lr-scheduler inverse_sqrt --warmup-updates 4000 \
--dropout 0.1 --weight-decay 0.0001 \
--criterion label_smoothed_cross_entropy --label-smoothing 0.1 \
--max-tokens 8000 > 98_1.log
fairseq-interactive --path save98_1/checkpoint3.pt data98 < test.sub.ja | grep '^H' | cut -f3 | sed -r 's/(@@ )|(@@ ?$)//g' > 98_1.out
spacy_tokenize('98_1.out', '98_1.out.spacy')
fairseq-score --sys 98_1.out.spacy --ref test.spacy.en
Namespace(ignore_case=False, order=4, ref='test.spacy.en', sacrebleu=False, sentence_bleu=False, sys='98_1.out.spacy')
BLEU4 = 8.80, 42.9/14.7/6.3/3.2 (BP=0.830, ratio=0.843, syslen=23286, reflen=27625)
fairseq-preprocess -s ja -t en \
--trainpref train.sub \
--validpref dev.sub \
--tgtdict data98/dict.en.txt \
--srcdict data98/dict.ja.txt \
--destdir data98_2 \
--workers 20
fairseq-train data98_2 \
--fp16 \
--restore-file save98_1/checkpoint3.pt \
--save-dir save98_2 \
--max-epoch 10 \
--arch transformer --share-decoder-input-output-embed \
--optimizer adam --clip-norm 1.0 \
--lr 1e-3 --lr-scheduler inverse_sqrt --warmup-updates 2000 \
--dropout 0.1 --weight-decay 0.0001 \
--criterion label_smoothed_cross_entropy --label-smoothing 0.1 \
--max-tokens 8000 > 98_2.log
fairseq-interactive --path save98_2/checkpoint10.pt data98_2 < test.sub.ja | grep '^H' | cut -f3 | sed -r 's/(@@ )|(@@ ?$)//g' > 98_2.out
spacy_tokenize('98_2.out', '98_2.out.spacy')
fairseq-score --sys 98_2.out.spacy --ref test.spacy.en
Namespace(ignore_case=False, order=4, ref='test.spacy.en', sacrebleu=False, sentence_bleu=False, sys='98_2.out.spacy')
BLEU4 = 22.85, 54.9/28.0/16.7/10.7 (BP=0.998, ratio=0.998, syslen=27572, reflen=27625)
|
[
"noreply@github.com"
] |
tmu-nlp.noreply@github.com
|
224a810f309bc00d40ba0479577abd5a86335e19
|
736b39df5dbf8ae8c51c7e64a03c5df2b0a925a9
|
/home/views.py
|
5a221cc39ed8beb950c9b5b5be60c798aa0dfcbb
|
[] |
no_license
|
Congdohacker/PythonWeb
|
da3be23d1ddb3c4e6d9080b8c0a080fc32dcaab8
|
df4bdced5a4cb6ea1f6b35cc499e6f93e3602dd8
|
refs/heads/master
| 2023-07-12T10:14:48.378658
| 2021-08-18T12:01:21
| 2021-08-18T12:01:21
| 394,562,730
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,072
|
py
|
from django.shortcuts import render
from .forms import RegistrationForm
from django.http import HttpResponseRedirect
# Create your views here.
def index(request):
return render(request, 'pages/home.html')
def proc(request):
return render(request,'pages/proc.html')
def dttdraw(request):
return render(request,'pages/thanhcun.html')
def dttdrawabout(request): return render(request,'dttdraw/pages/thanhcunfull/test.html')
def dttdrawdino(request): return render(request,'dttdraw/pages/thanhcunfull/gamedino.html')
def webupdate(request):
return render(request, 'pages/webupdatecongdo.html')
def contact(request):
return render(request, 'pages/contact.html')
def error(request, exception):
return render(request, 'pages/error.html', {'message': exception})
def register(request):
form = RegistrationForm()
if request.method == 'POST':
form = RegistrationForm(request.POST)
if form.is_valid():
form.save()
return HttpResponseRedirect('/')
return render(request, 'pages/register.html', {'form': form})
|
[
"do7998477@gmail.com"
] |
do7998477@gmail.com
|
ea7cb6905e5ac631d0dded1e0dcfaf1aac45a9dc
|
8be1525434d53228621526ec19c83d558972690e
|
/libraries/botbuilder-core/botbuilder/core/adapters/test_adapter.py
|
c731d6adab57b4059e92b3c34b57d5dcbab48690
|
[
"MIT",
"LicenseRef-scancode-generic-cla"
] |
permissive
|
IbisMalko/botbuilder-python
|
e24c5db4f213df90c35ba47842f8f65291f18cfc
|
faf4eed121c331ca0e4d373d05e19aa34d8d2877
|
refs/heads/master
| 2021-03-10T17:57:28.079769
| 2020-03-10T22:52:37
| 2020-03-10T22:52:37
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 21,467
|
py
|
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License.
# TODO: enable this in the future
# With python 3.7 the line below will allow to do Postponed Evaluation of Annotations. See PEP 563
# from __future__ import annotations
import asyncio
import inspect
from datetime import datetime
from uuid import uuid4
from typing import Awaitable, Coroutine, Dict, List, Callable, Union
from copy import copy
from threading import Lock
from botbuilder.schema import (
ActivityTypes,
Activity,
ConversationAccount,
ConversationReference,
ChannelAccount,
ResourceResponse,
TokenResponse,
)
from botframework.connector.auth import AppCredentials, ClaimsIdentity
from botframework.connector.token_api.models import (
SignInUrlResponse,
TokenExchangeResource,
TokenExchangeRequest,
)
from ..bot_adapter import BotAdapter
from ..turn_context import TurnContext
from ..extended_user_token_provider import ExtendedUserTokenProvider
class UserToken:
def __init__(
self,
connection_name: str = None,
user_id: str = None,
channel_id: str = None,
token: str = None,
):
self.connection_name = connection_name
self.user_id = user_id
self.channel_id = channel_id
self.token = token
def equals_key(self, rhs: "UserToken"):
return (
rhs is not None
and self.connection_name == rhs.connection_name
and self.user_id == rhs.user_id
and self.channel_id == rhs.channel_id
)
class ExchangeableToken(UserToken):
def __init__(
self,
connection_name: str = None,
user_id: str = None,
channel_id: str = None,
token: str = None,
exchangeable_item: str = None,
):
super(ExchangeableToken, self).__init__(
connection_name=connection_name,
user_id=user_id,
channel_id=channel_id,
token=token,
)
self.exchangeable_item = exchangeable_item
def equals_key(self, rhs: "ExchangeableToken") -> bool:
return (
rhs is not None
and self.exchangeable_item == rhs.exchangeable_item
and super().equals_key(rhs)
)
def to_key(self) -> str:
return self.exchangeable_item
class TokenMagicCode:
def __init__(self, key: UserToken = None, magic_code: str = None):
self.key = key
self.magic_code = magic_code
class TestAdapter(BotAdapter, ExtendedUserTokenProvider):
__test__ = False
def __init__(
self,
logic: Coroutine = None,
template_or_conversation: Union[Activity, ConversationReference] = None,
send_trace_activities: bool = False,
):
"""
Creates a new TestAdapter instance.
:param logic:
:param conversation: A reference to the conversation to begin the adapter state with.
"""
super(TestAdapter, self).__init__()
self.logic = logic
self._next_id: int = 0
self._user_tokens: List[UserToken] = []
self._magic_codes: List[TokenMagicCode] = []
self._conversation_lock = Lock()
self.exchangeable_tokens: Dict[str, ExchangeableToken] = {}
self.activity_buffer: List[Activity] = []
self.updated_activities: List[Activity] = []
self.deleted_activities: List[ConversationReference] = []
self.send_trace_activities = send_trace_activities
self.template = (
template_or_conversation
if isinstance(template_or_conversation, Activity)
else Activity(
channel_id="test",
service_url="https://test.com",
from_property=ChannelAccount(id="User1", name="user"),
recipient=ChannelAccount(id="bot", name="Bot"),
conversation=ConversationAccount(id="Convo1"),
)
)
if isinstance(template_or_conversation, ConversationReference):
self.template.channel_id = template_or_conversation.channel_id
async def process_activity(
self, activity: Activity, logic: Callable[[TurnContext], Awaitable]
):
self._conversation_lock.acquire()
try:
# ready for next reply
if activity.type is None:
activity.type = ActivityTypes.message
activity.channel_id = self.template.channel_id
activity.from_property = self.template.from_property
activity.recipient = self.template.recipient
activity.conversation = self.template.conversation
activity.service_url = self.template.service_url
activity.id = str((self._next_id))
self._next_id += 1
finally:
self._conversation_lock.release()
activity.timestamp = activity.timestamp or datetime.utcnow()
await self.run_pipeline(TurnContext(self, activity), logic)
async def send_activities(
self, context, activities: List[Activity]
) -> List[ResourceResponse]:
"""
INTERNAL: called by the logic under test to send a set of activities. These will be buffered
to the current `TestFlow` instance for comparison against the expected results.
:param context:
:param activities:
:return:
"""
def id_mapper(activity):
self.activity_buffer.append(activity)
self._next_id += 1
return ResourceResponse(id=str(self._next_id))
return [
id_mapper(activity)
for activity in activities
if self.send_trace_activities or activity.type != "trace"
]
async def delete_activity(self, context, reference: ConversationReference):
"""
INTERNAL: called by the logic under test to delete an existing activity. These are simply
pushed onto a [deletedActivities](#deletedactivities) array for inspection after the turn
completes.
:param reference:
:return:
"""
self.deleted_activities.append(reference)
async def update_activity(self, context, activity: Activity):
"""
INTERNAL: called by the logic under test to replace an existing activity. These are simply
pushed onto an [updatedActivities](#updatedactivities) array for inspection after the turn
completes.
:param activity:
:return:
"""
self.updated_activities.append(activity)
async def continue_conversation(
self,
reference: ConversationReference,
callback: Callable,
bot_id: str = None,
claims_identity: ClaimsIdentity = None, # pylint: disable=unused-argument
audience: str = None,
):
"""
The `TestAdapter` just calls parent implementation.
:param reference:
:param callback:
:param bot_id:
:param claims_identity:
:return:
"""
await super().continue_conversation(
reference, callback, bot_id, claims_identity, audience
)
async def receive_activity(self, activity):
"""
INTERNAL: called by a `TestFlow` instance to simulate a user sending a message to the bot.
This will cause the adapters middleware pipe to be run and it's logic to be called.
:param activity:
:return:
"""
if isinstance(activity, str):
activity = Activity(type="message", text=activity)
# Initialize request.
request = copy(self.template)
for key, value in vars(activity).items():
if value is not None and key != "additional_properties":
setattr(request, key, value)
request.type = request.type or ActivityTypes.message
if not request.id:
self._next_id += 1
request.id = str(self._next_id)
# Create context object and run middleware.
context = TurnContext(self, request)
return await self.run_pipeline(context, self.logic)
def get_next_activity(self) -> Activity:
return self.activity_buffer.pop(0)
async def send(self, user_says) -> object:
"""
Sends something to the bot. This returns a new `TestFlow` instance which can be used to add
additional steps for inspecting the bots reply and then sending additional activities.
:param user_says:
:return: A new instance of the TestFlow object
"""
return TestFlow(await self.receive_activity(user_says), self)
async def test(
self, user_says, expected, description=None, timeout=None
) -> "TestFlow":
"""
Send something to the bot and expects the bot to return with a given reply. This is simply a
wrapper around calls to `send()` and `assertReply()`. This is such a common pattern that a
helper is provided.
:param user_says:
:param expected:
:param description:
:param timeout:
:return:
"""
test_flow = await self.send(user_says)
test_flow = await test_flow.assert_reply(expected, description, timeout)
return test_flow
async def tests(self, *args):
"""
Support multiple test cases without having to manually call `test()` repeatedly. This is a
convenience layer around the `test()`. Valid args are either lists or tuples of parameters
:param args:
:return:
"""
for arg in args:
description = None
timeout = None
if len(arg) >= 3:
description = arg[2]
if len(arg) == 4:
timeout = arg[3]
await self.test(arg[0], arg[1], description, timeout)
def add_user_token(
self,
connection_name: str,
channel_id: str,
user_id: str,
token: str,
magic_code: str = None,
):
key = UserToken()
key.channel_id = channel_id
key.connection_name = connection_name
key.user_id = user_id
key.token = token
if not magic_code:
self._user_tokens.append(key)
else:
code = TokenMagicCode()
code.key = key
code.magic_code = magic_code
self._magic_codes.append(code)
async def get_user_token(
self,
context: TurnContext,
connection_name: str,
magic_code: str = None,
oauth_app_credentials: AppCredentials = None, # pylint: disable=unused-argument
) -> TokenResponse:
key = UserToken()
key.channel_id = context.activity.channel_id
key.connection_name = connection_name
key.user_id = context.activity.from_property.id
if magic_code:
magic_code_record = list(
filter(lambda x: key.equals_key(x.key), self._magic_codes)
)
if magic_code_record and magic_code_record[0].magic_code == magic_code:
# Move the token to long term dictionary.
self.add_user_token(
connection_name,
key.channel_id,
key.user_id,
magic_code_record[0].key.token,
)
# Remove from the magic code list.
idx = self._magic_codes.index(magic_code_record[0])
self._magic_codes = [self._magic_codes.pop(idx)]
match = [token for token in self._user_tokens if key.equals_key(token)]
if match:
return TokenResponse(
connection_name=match[0].connection_name,
token=match[0].token,
expiration=None,
)
# Not found.
return None
async def sign_out_user(
self,
context: TurnContext,
connection_name: str = None,
user_id: str = None,
oauth_app_credentials: AppCredentials = None, # pylint: disable=unused-argument
):
channel_id = context.activity.channel_id
user_id = context.activity.from_property.id
new_records = []
for token in self._user_tokens:
if (
token.channel_id != channel_id
or token.user_id != user_id
or (connection_name and connection_name != token.connection_name)
):
new_records.append(token)
self._user_tokens = new_records
async def get_oauth_sign_in_link(
self,
context: TurnContext,
connection_name: str,
final_redirect: str = None, # pylint: disable=unused-argument
oauth_app_credentials: AppCredentials = None, # pylint: disable=unused-argument
) -> str:
return (
f"https://fake.com/oauthsignin"
f"/{connection_name}/{context.activity.channel_id}/{context.activity.from_property.id}"
)
async def get_token_status(
self,
context: TurnContext,
connection_name: str = None,
user_id: str = None,
include_filter: str = None,
oauth_app_credentials: AppCredentials = None,
) -> Dict[str, TokenResponse]:
return None
async def get_aad_tokens(
self,
context: TurnContext,
connection_name: str,
resource_urls: List[str],
user_id: str = None, # pylint: disable=unused-argument
oauth_app_credentials: AppCredentials = None, # pylint: disable=unused-argument
) -> Dict[str, TokenResponse]:
return None
def add_exchangeable_token(
self,
connection_name: str,
channel_id: str,
user_id: str,
exchangeable_item: str,
token: str,
):
key = ExchangeableToken(
connection_name=connection_name,
channel_id=channel_id,
user_id=user_id,
exchangeable_item=exchangeable_item,
token=token,
)
self.exchangeable_tokens[key.to_key()] = key
async def get_sign_in_resource_from_user(
self,
turn_context: TurnContext,
connection_name: str,
user_id: str,
final_redirect: str = None,
) -> SignInUrlResponse:
return await self.get_sign_in_resource_from_user_and_credentials(
turn_context, None, connection_name, user_id, final_redirect
)
async def get_sign_in_resource_from_user_and_credentials(
self,
turn_context: TurnContext,
oauth_app_credentials: AppCredentials,
connection_name: str,
user_id: str,
final_redirect: str = None,
) -> SignInUrlResponse:
return SignInUrlResponse(
sign_in_link=f"https://fake.com/oauthsignin/{connection_name}/{turn_context.activity.channel_id}/{user_id}",
token_exchange_resource=TokenExchangeResource(
id=str(uuid4()),
provider_id=None,
uri=f"api://{connection_name}/resource",
),
)
async def exchange_token(
self,
turn_context: TurnContext,
connection_name: str,
user_id: str,
exchange_request: TokenExchangeRequest,
) -> TokenResponse:
return await self.exchange_token_from_credentials(
turn_context, None, connection_name, user_id, exchange_request
)
async def exchange_token_from_credentials(
self,
turn_context: TurnContext,
oauth_app_credentials: AppCredentials,
connection_name: str,
user_id: str,
exchange_request: TokenExchangeRequest,
) -> TokenResponse:
exchangeable_value = exchange_request.token or exchange_request.uri
key = ExchangeableToken(
channel_id=turn_context.activity.channel_id,
connection_name=connection_name,
exchangeable_item=exchangeable_value,
user_id=user_id,
)
token_exchange_response = self.exchangeable_tokens.get(key.to_key())
if token_exchange_response:
return TokenResponse(
channel_id=key.channel_id,
connection_name=key.connection_name,
token=token_exchange_response.token,
expiration=None,
)
return None
class TestFlow:
__test__ = False
def __init__(self, previous: Callable, adapter: TestAdapter):
"""
INTERNAL: creates a TestFlow instance.
:param previous:
:param adapter:
"""
self.previous = previous
self.adapter = adapter
async def test(
self, user_says, expected, description=None, timeout=None
) -> "TestFlow":
"""
Send something to the bot and expects the bot to return with a given reply. This is simply a
wrapper around calls to `send()` and `assertReply()`. This is such a common pattern that a
helper is provided.
:param user_says:
:param expected:
:param description:
:param timeout:
:return:
"""
test_flow = await self.send(user_says)
return await test_flow.assert_reply(
expected, description or f'test("{user_says}", "{expected}")', timeout
)
async def send(self, user_says) -> "TestFlow":
"""
Sends something to the bot.
:param user_says:
:return:
"""
async def new_previous():
nonlocal self, user_says
if callable(self.previous):
await self.previous()
await self.adapter.receive_activity(user_says)
return TestFlow(await new_previous(), self.adapter)
async def assert_reply(
self,
expected: Union[str, Activity, Callable[[Activity, str], None]],
description=None,
timeout=None, # pylint: disable=unused-argument
is_substring=False,
) -> "TestFlow":
"""
Generates an assertion if the bots response doesn't match the expected text/activity.
:param expected:
:param description:
:param timeout:
:param is_substring:
:return:
"""
# TODO: refactor method so expected can take a Callable[[Activity], None]
def default_inspector(reply, description=None):
if isinstance(expected, Activity):
validate_activity(reply, expected)
else:
assert reply.type == "message", description + f" type == {reply.type}"
if is_substring:
assert expected in reply.text.strip(), (
description + f" text == {reply.text}"
)
else:
assert reply.text.strip() == expected.strip(), (
description + f" text == {reply.text}"
)
if description is None:
description = ""
inspector = expected if callable(expected) else default_inspector
async def test_flow_previous():
nonlocal timeout
if not timeout:
timeout = 3000
start = datetime.now()
adapter = self.adapter
async def wait_for_activity():
nonlocal expected, timeout
current = datetime.now()
if (current - start).total_seconds() * 1000 > timeout:
if isinstance(expected, Activity):
expecting = expected.text
elif callable(expected):
expecting = inspect.getsourcefile(expected)
else:
expecting = str(expected)
raise RuntimeError(
f"TestAdapter.assert_reply({expecting}): {description} Timed out after "
f"{current - start}ms."
)
if adapter.activity_buffer:
reply = adapter.activity_buffer.pop(0)
try:
await inspector(reply, description)
except Exception:
inspector(reply, description)
else:
await asyncio.sleep(0.05)
await wait_for_activity()
await wait_for_activity()
return TestFlow(await test_flow_previous(), self.adapter)
def validate_activity(activity, expected) -> None:
"""
Helper method that compares activities
:param activity:
:param expected:
:return:
"""
iterable_expected = vars(expected).items()
for attr, value in iterable_expected:
if value is not None and attr != "additional_properties":
assert value == getattr(activity, attr)
|
[
"noreply@github.com"
] |
IbisMalko.noreply@github.com
|
d5ad0bcdd4c362f6d9c26b8ce2af6352b3c0fe46
|
ed5f4c50bb3f86bee134f5ee23ae5dea915fc018
|
/mvb_hr/models/hr_salary_process.py
|
0177d5864bee8cb5c9b8785ccedd7253afee284e
|
[] |
no_license
|
pvanh2706/naduong
|
dd3b2fde3669757f6e1110ce143371dae98b290c
|
0fc474ce4620453878b2842ff8b2939ad8723029
|
refs/heads/master
| 2022-06-23T04:00:11.928660
| 2020-05-07T07:18:01
| 2020-05-07T07:18:01
| 261,974,019
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 478
|
py
|
from odoo import api, fields, models
class HrSalaryProcess(models.Model):
_name = 'hr.salary.process'
_description = "Quá trình lương của bản thân"
_rec_name = 'employee_id'
time = fields.Char(string="Tháng/Năm")
level = fields.Char(string="Ngạch/Bậc")
coefficients_salary = fields.Float(string='Hệ số lương')
employee_id = fields.Many2one(string="Nhân viên", comodel_name='hr.employee', required=True, ondelete='cascade')
|
[
"phamvietanh27061996@gmail.com"
] |
phamvietanh27061996@gmail.com
|
a47953c4c60b57c4ae0f8338cc71ac1d6a4881df
|
e1d8218edd21a9b8b933b75a4d267ae9b749cd2b
|
/Loop.py
|
2ff119ba78817114ef4a89f5d5009e2e2fc1e6ec
|
[] |
no_license
|
nadinef673/Delta
|
ad7dca2b61ff1411d5d6db7ebde0b8d96c6bb867
|
12f674ce6e6733cfd4149e8438748dff03534471
|
refs/heads/main
| 2022-12-29T18:50:00.436686
| 2020-10-16T10:16:48
| 2020-10-16T10:16:48
| 304,591,871
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 232
|
py
|
from datetime import datetime, timedelta
mydate = datetime(2020, 10, 16, 11, 58, 12)
x = 0
for i in range(14, 15):
day = my.date + i * 14
date = my.strftime('%Y-%m-' + str(day))
dates.append(date)
print(date)
|
[
"nadine96jodie@gmail.com"
] |
nadine96jodie@gmail.com
|
43a00d329425895ad473b6212772b15f2b94fed9
|
f4b346937c2081f4c93bdd869b00ab71b0e95a8f
|
/venv/bin/django-admin.py
|
3116d8b92ccffb1ad4b6515a1a598903da2a609e
|
[] |
no_license
|
maks-nurgazy/django-movie
|
fd427d4cc5ff69003e02df35208a779237aa89e5
|
427421ae9304e9b98a94f889238f85a5a81fe8c7
|
refs/heads/master
| 2021-02-13T03:18:28.640159
| 2020-03-03T14:23:48
| 2020-03-03T14:23:48
| 244,656,068
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 171
|
py
|
#!/home/maksnurgazy/PycharmProjects/django_movie/venv/bin/python
from django.core import management
if __name__ == "__main__":
management.execute_from_command_line()
|
[
"maksatbek.bolushov@gmail.com"
] |
maksatbek.bolushov@gmail.com
|
db286fe458c775a800a157cba1c7dcea084f8871
|
5ad1d278e006c1c87a29e2e9567ab983e77b4fbc
|
/mapparser.py
|
fb51cf4aa7a07c841b64025889a0abc6d9f41689
|
[] |
no_license
|
AlpinistPanda/osm
|
a55e5864411c2890715a4e6a2782657e081f4066
|
6af610866955b86401543b6c323ec9a64b3d1c55
|
refs/heads/master
| 2021-01-24T21:29:51.093680
| 2018-06-01T19:20:44
| 2018-06-01T19:20:44
| 123,271,826
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 477
|
py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Count the number of tags
"""
import xml.etree.cElementTree as ET
import pprint
def count_tags(filename):
tags = {}
for event, elem in ET.iterparse(filename, events=('start',)):
tags[elem.tag] = tags.get(elem.tag, 0) + 1
return tags
def test():
tags = count_tags(
'/Users/ozgunbalaban/Dropbox/Programming/data/singapore.osm')
pprint.pprint(tags)
if __name__ == "__main__":
test()
|
[
"ozgunbalaban@yahoo.com"
] |
ozgunbalaban@yahoo.com
|
732c2b6aa9c3388c7ee3ea4f79ced40b2d745393
|
6b2a8dd202fdce77c971c412717e305e1caaac51
|
/solutions_1673486_1/Python/BetaNona/1A.py
|
960634cecd6b8cf7b7b420ce8ec9c04e1cf554f6
|
[] |
no_license
|
alexandraback/datacollection
|
0bc67a9ace00abbc843f4912562f3a064992e0e9
|
076a7bc7693f3abf07bfdbdac838cb4ef65ccfcf
|
refs/heads/master
| 2021-01-24T18:27:24.417992
| 2017-05-23T09:23:38
| 2017-05-23T09:23:38
| 84,313,442
| 2
| 4
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 848
|
py
|
#!/usr/bin/env python
import sys
import string
infile=open(sys.argv[1])
def solve(A,B,ps):
assert len(ps)==A
corProb=[1]
for p in ps:
corProb.append(corProb[-1]*p)
#print corProb
ans=B+2
for n in range(len(corProb)):
if n==0:
ex=(1+B+1)
else:
exok=corProb[n]*((A-n)+(B-n)+1)
exng=(1-corProb[n])*((A-n)+(B-n)+1+B+1)
ex=exok+exng
#print A,B,n,exok,exng,ex
ans=min(ans,ex)
return ans
for n,line in enumerate(infile):
if n==0:
T=int(line)
continue
elif n%2==1:
A,B=line.split()
A=int(A)
B=int(B)
else:
ls=[float(x) for x in line.split()]
ans=solve(A,B,ls)
print "Case #%d: %f" % (n/2,ans)
|
[
"eewestman@gmail.com"
] |
eewestman@gmail.com
|
07372b8de4b59de236885dff005532fe4fd606ca
|
abacc4d2f78b2c0f56b878ba4723b4d2d9bbc412
|
/camel_tools/calima_star/reinflector.py
|
1c4fee65e98b1b7561b4b4d4c10cd4b6798bf717
|
[
"MIT"
] |
permissive
|
dr-dahou-adrar/camel_tools
|
5258905d2f66a07b01502beb3585cb19be6c4f85
|
5572aa2e073330afe554cb7d878c44622d44c007
|
refs/heads/master
| 2022-04-08T21:54:58.961676
| 2020-03-03T16:59:32
| 2020-03-03T16:59:32
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 6,547
|
py
|
# -*- coding: utf-8 -*-
# MIT License
#
# Copyright 2018-2019 New York University Abu Dhabi
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
"""The reinflector component of CALIMA Star.
"""
from __future__ import absolute_import
from collections import deque
import re
from camel_tools.calima_star.database import CalimaStarDB
from camel_tools.calima_star.analyzer import CalimaStarAnalyzer
from camel_tools.calima_star.generator import CalimaStarGenerator
from camel_tools.calima_star.errors import ReinflectorError
from camel_tools.calima_star.errors import InvalidReinflectorFeature
from camel_tools.calima_star.errors import InvalidReinflectorFeatureValue
from camel_tools.utils.dediac import dediac_ar
_CLITIC_FEATS = frozenset(['enc0', 'prc0', 'prc1', 'prc2', 'prc3'])
_IGNORED_FEATS = frozenset(['diac', 'lex', 'bw', 'gloss', 'source', 'stem',
'stemcat', 'lmm', 'dediac', 'caphi', 'catib6',
'ud', 'd3seg', 'atbseg', 'd2seg', 'd1seg', 'd1tok',
'd2tok', 'atbtok', 'd3tok', 'root', 'pattern',
'freq', 'pos_freq', 'lex_freq', 'pos_lex_freq',
'stemgloss'])
_SPECIFIED_FEATS = frozenset(['form_gen', 'form_num'])
_CLITIC_IGNORED_FEATS = frozenset(['stt', 'cas', 'mod'])
_FILTER_FEATS = frozenset(['pos', 'lex'])
_ANY_FEATS = frozenset(['per', 'gen', 'num', 'cas', 'stt', 'vox', 'mod',
'asp'])
_LEMMA_SPLIT_RE = re.compile(u'-|_')
class CalimaStarReinflector(object):
"""CALIMA Star reinflector component.
Arguments:
db (:obj:`~camel_tools.calima_star.database.CalimaStarDB`): Database to
use for generation. Must be opened in reinflection mode or both
analysis and generation modes.
Raises:
:obj:`~camel_tools.calima_star.errors.ReinflectorError`: If **db** is
not an instance of
:obj:`~camel_tools.calima_star.database.CalimaStarDB` or if **db**
does not support reinflection.
"""
def __init__(self, db):
if not isinstance(db, CalimaStarDB):
raise ReinflectorError('DB is not an instance of CalimaStarDB')
if not db.flags.generation:
raise ReinflectorError('DB does not support reinflection')
self._db = db
self._analyzer = CalimaStarAnalyzer(db)
self._generator = CalimaStarGenerator(db)
def reinflect(self, word, feats):
"""Generate analyses for a given word from a given set of inflectional
features.
Arguments:
word (:obj:`str`): Word to reinflect.
feats (:obj:`dict`): Dictionary of features.
See :doc:`/reference/calima_star_features` for more information
on features and their values.
Returns:
:obj:`list` of :obj:`dict`: List of generated analyses.
See :doc:`/reference/calima_star_features` for more information on
features and their values.
Raises:
:obj:`~camel_tools.calima_star.errors.InvalidReinflectorFeature`:
If a feature is given that is not defined in database.
:obj:`~camel_tools.calima_star.errors.InvalidReinflectorFeatureValue`:
If an invalid value is given to a feature or if 'pos' feature
is not defined.
"""
analyses = self._analyzer.analyze(word)
if not analyses or len(analyses) == 0:
return []
for feat in feats:
if feat not in self._db.defines:
raise InvalidReinflectorFeature(feat)
elif self._db.defines[feat] is not None:
if feat in _ANY_FEATS and feats[feat] == 'ANY':
continue
elif feats[feat] not in self._db.defines[feat]:
raise InvalidReinflectorFeatureValue(feat, feats[feat])
has_clitics = False
for feat in _CLITIC_FEATS:
if feat in feats:
has_clitics = True
break
results = deque()
for analysis in analyses:
if dediac_ar(analysis['diac']) != dediac_ar(word):
continue
if 'pos' in feats and feats['pos'] != analysis['pos']:
continue
lemma = _LEMMA_SPLIT_RE.split(analysis['lex'])[0]
if 'lex' in feats and feats['lex'] != lemma:
continue
is_valid = True
generate_feats = {}
for feat in analysis.keys():
if feat in _IGNORED_FEATS:
continue
elif feat in _SPECIFIED_FEATS and feat not in feats:
continue
elif has_clitics and feat in _CLITIC_IGNORED_FEATS:
continue
else:
if feat in feats:
if feats[feat] == 'ANY':
continue
elif analysis[feat] != 'na':
generate_feats[feat] = feats[feat]
else:
is_valid = False
break
elif analysis[feat] != 'na':
generate_feats[feat] = analysis[feat]
if is_valid:
generated = self._generator.generate(lemma, generate_feats)
if generated is not None:
results.extend(generated)
return list(results)
|
[
"owo@owobeid.com"
] |
owo@owobeid.com
|
d4e16c555e2f3626ac1f8499418ddb2bbc340403
|
56fe5027956b6b63b55ab96931f49f208b62dfd6
|
/ocr.py
|
cb5c42f06523a906bffb61989f07fd4fb2d376cd
|
[
"Apache-2.0"
] |
permissive
|
JackyXiao98/chinese_ocr
|
ed8687ad4506739a81d97100556dc5ea6e3dbe29
|
62c45fd6df0959144fad411cf90ee136047802c3
|
refs/heads/master
| 2020-06-01T10:51:42.933439
| 2019-06-07T14:27:49
| 2019-06-07T14:27:49
| 190,755,220
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,974
|
py
|
#-*- coding:utf-8 -*-
import os
import sys
import cv2
from math import *
import numpy as np
from PIL import Image
sys.path.append(os.getcwd() + '/ctpn')
from ctpn.text_detect import text_detect
from ctpn.lib.fast_rcnn.config import cfg_from_file
from densenet.model import predict as keras_densenet
def sort_box(box):
"""
对box进行排序
"""
box = sorted(box, key=lambda x: sum([x[1], x[3], x[5], x[7]]))
return box
def dumpRotateImage(img, degree, pt1, pt2, pt3, pt4):
height, width = img.shape[:2]
heightNew = int(width * fabs(sin(radians(degree))) + height * fabs(cos(radians(degree))))
widthNew = int(height * fabs(sin(radians(degree))) + width * fabs(cos(radians(degree))))
matRotation = cv2.getRotationMatrix2D((width // 2, height // 2), degree, 1)
matRotation[0, 2] += (widthNew - width) // 2
matRotation[1, 2] += (heightNew - height) // 2
imgRotation = cv2.warpAffine(img, matRotation, (widthNew, heightNew), borderValue=(255, 255, 255))
pt1 = list(pt1)
pt3 = list(pt3)
[[pt1[0]], [pt1[1]]] = np.dot(matRotation, np.array([[pt1[0]], [pt1[1]], [1]]))
[[pt3[0]], [pt3[1]]] = np.dot(matRotation, np.array([[pt3[0]], [pt3[1]], [1]]))
ydim, xdim = imgRotation.shape[:2]
imgOut = imgRotation[max(1, int(pt1[1])) : min(ydim - 1, int(pt3[1])), max(1, int(pt1[0])) : min(xdim - 1, int(pt3[0]))]
return imgOut
def charRec(img, text_recs, adjust=False):
"""
加载OCR模型,进行字符识别
"""
results = {}
xDim, yDim = img.shape[1], img.shape[0]
for index, rec in enumerate(text_recs):
xlength = int((rec[6] - rec[0]) * 0.1)
ylength = int((rec[7] - rec[1]) * 0.2)
if adjust:
pt1 = (max(1, rec[0] - xlength), max(1, rec[1] - ylength))
pt2 = (rec[2], rec[3])
pt3 = (min(rec[6] + xlength, xDim - 2), min(yDim - 2, rec[7] + ylength))
pt4 = (rec[4], rec[5])
else:
pt1 = (max(1, rec[0]), max(1, rec[1]))
pt2 = (rec[2], rec[3])
pt3 = (min(rec[6], xDim - 2), min(yDim - 2, rec[7]))
pt4 = (rec[4], rec[5])
degree = degrees(atan2(pt2[1] - pt1[1], pt2[0] - pt1[0])) # 图像倾斜角度
partImg = dumpRotateImage(img, degree, pt1, pt2, pt3, pt4)
if partImg.shape[0] < 1 or partImg.shape[1] < 1 or partImg.shape[0] > partImg.shape[1]: # 过滤异常图片
continue
image = Image.fromarray(partImg).convert('L')
text = keras_densenet(image)
if len(text) > 0:
results[index] = [rec]
results[index].append(text) # 识别文字
return results
def model(img, adjust=False):
"""
@img: 图片
@adjust: 是否调整文字识别结果
"""
cfg_from_file('./ctpn/ctpn/text.yml')
text_recs, img_framed, img = text_detect(img)
text_recs = sort_box(text_recs)
result = charRec(img, text_recs, adjust)
return result, img_framed
|
[
"1025965245@qq.com"
] |
1025965245@qq.com
|
137e4c297ff21901af54db446c00bb36ea82e99f
|
d5601b6f45e273532ebd238391fcdb8cd5eacb79
|
/django/bin/easy_install-3.5
|
f556a8a3ed7fcc0150053f96edb629efc2d1913c
|
[] |
no_license
|
dobunchu/my-first-blog
|
a7e3fa7f0bf5c9dc6d7e2d26221589fc357472d3
|
959518e621d90bfa42322bee56f7979c8c5b2a30
|
refs/heads/master
| 2020-04-03T19:16:44.569056
| 2018-11-01T04:44:41
| 2018-11-01T04:44:41
| 155,267,204
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 266
|
5
|
#!/Users/aikawafujio/djangogirls/django/bin/python
# -*- coding: utf-8 -*-
import re
import sys
from setuptools.command.easy_install import main
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(main())
|
[
"happyskull.0101@gmail.com"
] |
happyskull.0101@gmail.com
|
d4b23573080940fa45bc9386697a88a5f1427622
|
be79cc449bdcfdae7b87e9e68d85cce63cf10ac9
|
/0x01/strToint.py
|
72a4e4625ded75d1defbcc0e1c3cf13c5023e1e5
|
[] |
no_license
|
Paul9inee/Elementary_Algorithm
|
544797c798d5f9f2078070c981b44e99673a955b
|
262e951173e7b94afd04da42cf741cdb67a2a562
|
refs/heads/master
| 2022-12-18T18:51:37.938212
| 2020-09-11T14:01:54
| 2020-09-11T14:01:54
| 280,383,161
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 60
|
py
|
def solution(s):
return int(s)
print(solution("-1234"))
|
[
"lehninger10@gmail.com"
] |
lehninger10@gmail.com
|
2ad5f0a62788752e5a9e1e82d5a32b77709ae87f
|
6d824eae55583dfabc130bbafb797e860914f10c
|
/.svn/pristine/c0/c0d11e020d6a610a4f201fbb569d1f9921648c38.svn-base
|
5c8865d5bd5f93334f4cad8e933066979fc4cf98
|
[] |
no_license
|
sproutsbean/o2o
|
d97fd4840f983e4ff22746aaaeb1068f4c086755
|
2e2dbc35d756f5eda4232f0a737dcb3c074954e7
|
refs/heads/master
| 2021-05-05T19:23:20.733118
| 2018-01-17T03:30:25
| 2018-01-17T03:30:25
| 117,774,569
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,493
|
#!usr/bin/env python
# -*- coding:utf-8 -*-
"""
@author:user
@file: loan_clear.py
@time: 2018/01/02
"""
from com.ea.common import tools, menu
import time
def loan_clear(driver, payname, loanno, casename, screenshot_path):
u"""贷款结清"""
expect_result = "审核中"
pay_date = "2017-10-10"
print("申请贷款结清开始")
print("使用的贷款单号是:" + loanno)
try:
menu.go_to_loan_query(driver)
from com.ea.pages.loan_clear_page import LoanClearPage
loanclearpage = LoanClearPage(driver)
loanclearpage.click_loan_no(loanno)
loanclearpage.click_loan_clear_button()
loanclearpage.scroll_to_payman()
loanclearpage.input_payname(payname)
loanclearpage.input_pay_date(pay_date)
loanclearpage.click_saveandstart()
actual_result = loanclearpage.get_result(loanno)
assert actual_result == expect_result
print("申请贷款结清结束")
except Exception as e:
tools.get_screenshot(driver, screenshot_path, casename)
raise e
def loan_clear_approve(driver, loanno, screenshot_path, casename):
u"""审批贷款结清流程"""
types = "提前还款"
clear_date = "2017-10-10"
approve_view = "OK"
expect_result = "还款完成"
# self.loanno = "SK0027-BP-1801-00002"
from com.ea.pages.todo_page import TodoPage, LoanClearApprove
try:
print("审批贷款结清开始")
print("使用的贷款单号是:" + loanno)
todopage = TodoPage(driver)
loanclearapprovepage = LoanClearApprove(driver)
menu.go_to_wait_todo_query(driver)
todopage.input_yewuno(loanno)
todopage.click_query_all()
todopage.click_search_button()
todopage.click_first_row(types)
loanclearapprovepage.scroll_to_clear_date()
loanclearapprovepage.input_clear_date(clear_date)
loanclearapprovepage.click_save_button()
loanclearapprovepage.click_confirm_button()
loanclearapprovepage.input_approve_view(approve_view)
loanclearapprovepage.click_tongguo_button()
loanclearapprovepage.click_confirm_button()
time.sleep(5)
menu.go_to_loan_query(driver)
actual_result = loanclearapprovepage.get_result(loanno)
assert actual_result == expect_result
print("审批贷款结清结束")
except Exception as e:
tools.get_screenshot(driver, screenshot_path, casename)
raise e
|
[
"lijie"
] |
lijie
|
|
67f23b887c9b025b10d72e2f391c8b5a8095153f
|
888e302b4b42ec184dee54707e6817e0810e2671
|
/gooapengn/webaap.py
|
4e3232cc85c1d0ef5afaeb20b66745ab0d8caefd
|
[] |
no_license
|
armohamm/Remedoc
|
365cfb0be22fd8045f5a2c4405286cfbb1df296d
|
d19a31edd946444f3bb5a0ad5d066ec2abc5bcf8
|
refs/heads/master
| 2020-06-12T23:47:42.507633
| 2017-04-19T21:32:42
| 2017-04-19T21:32:42
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 68,369
|
py
|
# -*- coding: utf-8 -*-
"""
webapp2
=======
Taking Google App Engine's webapp to the next level!
:copyright: 2011 by tipfy.org.
:license: Apache Sotware License, see LICENSE for details.
"""
from __future__ import with_statement
import cgi
import inspect
import logging
import os
import re
import sys
import threading
import traceback
import urllib
import urlparse
from wsgiref import handlers
import webob
from webob import exc
_webapp = _webapp_util = _local = None
try: # pragma: no cover
# WebOb < 1.0 (App Engine Python 2.5).
from webob.statusreasons import status_reasons
from webob.headerdict import HeaderDict as BaseResponseHeaders
except ImportError: # pragma: no cover
# WebOb >= 1.0.
from webob.util import status_reasons
from webob.headers import ResponseHeaders as BaseResponseHeaders
# google.appengine.ext.webapp imports webapp2 in the
# App Engine Python 2.7 runtime.
if os.environ.get('APPENGINE_RUNTIME') != 'python27': # pragma: no cover
try:
from google.appengine.ext import webapp as _webapp
except ImportError: # pragma: no cover
# Running webapp2 outside of GAE.
pass
try: # pragma: no cover
# Thread-local variables container.
from webapp2_extras import local
_local = local.Local()
except ImportError: # pragma: no cover
logging.warning("webapp2_extras.local is not available "
"so webapp2 won't be thread-safe!")
__version_info__ = (2, 5, 2)
__version__ = '.'.join(str(n) for n in __version_info__)
#: Base HTTP exception, set here as public interface.
HTTPException = exc.HTTPException
#: Regex for route definitions.
_route_re = re.compile(r"""
\< # The exact character "<"
([a-zA-Z_]\w*)? # The optional variable name
(?:\:([^\>]*))? # The optional :regex part
\> # The exact character ">"
""", re.VERBOSE)
#: Regex extract charset from environ.
_charset_re = re.compile(r';\s*charset=([^;]*)', re.I)
#: To show exceptions in debug mode.
_debug_template = """<html>
<head>
<title>Internal Server Error</title>
<style>
body {
padding: 20px;
font-family: arial, sans-serif;
font-size: 14px;
}
pre {
background: #F2F2F2;
padding: 10px;
}
</style>
</head>
<body>
<h1>Internal Server Error</h1>
<p>The server has either erred or is incapable of performing
the requested operation.</p>
<pre>%s</pre>
</body>
</html>"""
# Set same default messages from webapp plus missing ones.
_webapp_status_reasons = {
203: 'Non-Authoritative Information',
302: 'Moved Temporarily',
306: 'Unused',
408: 'Request Time-out',
414: 'Request-URI Too Large',
504: 'Gateway Time-out',
505: 'HTTP Version not supported',
}
status_reasons.update(_webapp_status_reasons)
for code, message in _webapp_status_reasons.iteritems():
cls = exc.status_map.get(code)
if cls:
cls.title = message
class Request(webob.Request):
"""Abstraction for an HTTP request.
Most extra methods and attributes are ported from webapp. Check the
`WebOb`_ documentation for the ones not listed here.
"""
#: A reference to the active :class:`WSGIApplication` instance.
app = None
#: A reference to the active :class:`Response` instance.
response = None
#: A reference to the matched :class:`Route`.
route = None
#: The matched route positional arguments.
route_args = None
#: The matched route keyword arguments.
route_kwargs = None
#: A dictionary to register objects used during the request lifetime.
registry = None
# Attributes from webapp.
request_body_tempfile_limit = 0
uri = property(lambda self: self.url)
query = property(lambda self: self.query_string)
def __init__(self, environ, *args, **kwargs):
"""Constructs a Request object from a WSGI environment.
:param environ:
A WSGI-compliant environment dictionary.
"""
if kwargs.get('charset') is None and not hasattr(webob, '__version__'):
# webob 0.9 didn't have a __version__ attribute and also defaulted
# to None rather than UTF-8 if no charset was provided. Providing a
# default charset is required for backwards compatibility.
match = _charset_re.search(environ.get('CONTENT_TYPE', ''))
if match:
charset = match.group(1).lower().strip().strip('"').strip()
else:
charset = 'utf-8'
kwargs['charset'] = charset
super(Request, self).__init__(environ, *args, **kwargs)
self.registry = {}
def get(self, argument_name, default_value='', allow_multiple=False):
"""Returns the query or POST argument with the given name.
We parse the query string and POST payload lazily, so this will be a
slower operation on the first call.
:param argument_name:
The name of the query or POST argument.
:param default_value:
The value to return if the given argument is not present.
:param allow_multiple:
Return a list of values with the given name (deprecated).
:returns:
If allow_multiple is False (which it is by default), we return
the first value with the given name given in the request. If it
is True, we always return a list.
"""
param_value = self.get_all(argument_name)
if allow_multiple:
logging.warning('allow_multiple is a deprecated param. '
'Please use the Request.get_all() method instead.')
if len(param_value) > 0:
if allow_multiple:
return param_value
return param_value[0]
else:
if allow_multiple and not default_value:
return []
return default_value
def get_all(self, argument_name, default_value=None):
"""Returns a list of query or POST arguments with the given name.
We parse the query string and POST payload lazily, so this will be a
slower operation on the first call.
:param argument_name:
The name of the query or POST argument.
:param default_value:
The value to return if the given argument is not present,
None may not be used as a default, if it is then an empty
list will be returned instead.
:returns:
A (possibly empty) list of values.
"""
if self.charset:
argument_name = argument_name.encode(self.charset)
if default_value is None:
default_value = []
param_value = self.params.getall(argument_name)
if param_value is None or len(param_value) == 0:
return default_value
for i in xrange(len(param_value)):
if isinstance(param_value[i], cgi.FieldStorage):
param_value[i] = param_value[i].value
return param_value
def arguments(self):
"""Returns a list of the arguments provided in the query and/or POST.
The return value is a list of strings.
"""
return list(set(self.params.keys()))
def get_range(self, name, min_value=None, max_value=None, default=0):
"""Parses the given int argument, limiting it to the given range.
:param name:
The name of the argument.
:param min_value:
The minimum int value of the argument (if any).
:param max_value:
The maximum int value of the argument (if any).
:param default:
The default value of the argument if it is not given.
:returns:
An int within the given range for the argument.
"""
value = self.get(name, default)
if value is None:
return value
try:
value = int(value)
except ValueError:
value = default
if value is not None:
if max_value is not None:
value = min(value, max_value)
if min_value is not None:
value = max(value, min_value)
return value
@classmethod
def blank(cls, path, environ=None, base_url=None,
headers=None, **kwargs): # pragma: no cover
"""Adds parameters compatible with WebOb >= 1.0: POST and **kwargs."""
try:
return super(Request, cls).blank(path, environ=environ,
base_url=base_url,
headers=headers, **kwargs)
except TypeError:
if not kwargs:
raise
data = kwargs.pop('POST', None)
if data is not None:
from cStringIO import StringIO
environ = environ or {}
environ['REQUEST_METHOD'] = 'POST'
if hasattr(data, 'items'):
data = data.items()
if not isinstance(data, str):
data = urllib.urlencode(data)
environ['wsgi.input'] = StringIO(data)
environ['webob.is_body_seekable'] = True
environ['CONTENT_LENGTH'] = str(len(data))
environ['CONTENT_TYPE'] = 'application/x-www-form-urlencoded'
base = super(Request, cls).blank(path, environ=environ,
base_url=base_url, headers=headers)
if kwargs:
obj = cls(base.environ, **kwargs)
obj.headers.update(base.headers)
return obj
else:
return base
class ResponseHeaders(BaseResponseHeaders):
"""Implements methods from ``wsgiref.headers.Headers``, used by webapp."""
get_all = BaseResponseHeaders.getall
def add_header(self, _name, _value, **_params):
"""Extended header setting.
_name is the header field to add. keyword arguments can be used to set
additional parameters for the header field, with underscores converted
to dashes. Normally the parameter will be added as key="value" unless
value is None, in which case only the key will be added.
Example::
h.add_header('content-disposition', 'attachment',
filename='bud.gif')
Note that unlike the corresponding 'email.message' method, this does
*not* handle '(charset, language, value)' tuples: all values must be
strings or None.
"""
parts = []
if _value is not None:
parts.append(_value)
for k, v in _params.items():
k = k.replace('_', '-')
if v is not None and len(v) > 0:
v = v.replace('\\', '\\\\').replace('"', r'\"')
parts.append('%s="%s"' % (k, v))
else:
parts.append(k)
self.add(_name, '; '.join(parts))
def __str__(self):
"""Returns the formatted headers ready for HTTP transmission."""
return '\r\n'.join(['%s: %s' % v for v in self.items()] + ['', ''])
class Response(webob.Response):
"""Abstraction for an HTTP response.
Most extra methods and attributes are ported from webapp. Check the
`WebOb`_ documentation for the ones not listed here.
Differences from webapp.Response:
- ``out`` is not a ``StringIO.StringIO`` instance. Instead it is the
response itself, as it has the method ``write()``.
- As in WebOb, ``status`` is the code plus message, e.g., '200 OK', while
in webapp it is the integer code. The status code as an integer is
available in ``status_int``, and the status message is available in
``status_message``.
- ``response.headers`` raises an exception when a key that doesn't exist
is accessed or deleted, differently from ``wsgiref.headers.Headers``.
"""
#: Default charset as in webapp.
default_charset = 'utf-8'
def __init__(self, *args, **kwargs):
"""Constructs a response with the default settings."""
super(Response, self).__init__(*args, **kwargs)
self.headers['Cache-Control'] = 'no-cache'
@property
def out(self):
"""A reference to the Response instance itself, for compatibility with
webapp only: webapp uses `Response.out.write()`, so we point `out` to
`self` and it will use `Response.write()`.
"""
return self
def write(self, text):
"""Appends a text to the response body."""
# webapp uses StringIO as Response.out, so we need to convert anything
# that is not str or unicode to string to keep same behavior.
if not isinstance(text, basestring):
text = unicode(text)
if isinstance(text, unicode) and not self.charset:
self.charset = self.default_charset
super(Response, self).write(text)
def _set_status(self, value):
"""The status string, including code and message."""
message = None
# Accept long because urlfetch in App Engine returns codes as longs.
if isinstance(value, (int, long)):
code = int(value)
else:
if isinstance(value, unicode):
# Status messages have to be ASCII safe, so this is OK.
value = str(value)
if not isinstance(value, str):
raise TypeError(
'You must set status to a string or integer (not %s)' %
type(value))
parts = value.split(' ', 1)
code = int(parts[0])
if len(parts) == 2:
message = parts[1]
message = message or Response.http_status_message(code)
self._status = '%d %s' % (code, message)
def _get_status(self):
return self._status
status = property(_get_status, _set_status, doc=_set_status.__doc__)
def set_status(self, code, message=None):
"""Sets the HTTP status code of this response.
:param code:
The HTTP status string to use
:param message:
A status string. If none is given, uses the default from the
HTTP/1.1 specification.
"""
if message:
self.status = '%d %s' % (code, message)
else:
self.status = code
def _get_status_message(self):
"""The response status message, as a string."""
return self.status.split(' ', 1)[1]
def _set_status_message(self, message):
self.status = '%d %s' % (self.status_int, message)
status_message = property(_get_status_message, _set_status_message,
doc=_get_status_message.__doc__)
def _get_headers(self):
"""The headers as a dictionary-like object."""
if self._headers is None:
self._headers = ResponseHeaders.view_list(self.headerlist)
return self._headers
def _set_headers(self, value):
if hasattr(value, 'items'):
value = value.items()
elif not isinstance(value, list):
raise TypeError('Response headers must be a list or dictionary.')
self.headerlist = value
self._headers = None
headers = property(_get_headers, _set_headers, doc=_get_headers.__doc__)
def has_error(self):
"""Indicates whether the response was an error response."""
return self.status_int >= 400
def clear(self):
"""Clears all data written to the output stream so that it is empty."""
self.body = ''
def wsgi_write(self, start_response):
"""Writes this response using using the given WSGI function.
This is only here for compatibility with ``webapp.WSGIApplication``.
:param start_response:
The WSGI-compatible start_response function.
"""
if (self.headers.get('Cache-Control') == 'no-cache' and
not self.headers.get('Expires')):
self.headers['Expires'] = 'Fri, 01 Jan 1990 00:00:00 GMT'
self.headers['Content-Length'] = str(len(self.body))
write = start_response(self.status, self.headerlist)
write(self.body)
@staticmethod
def http_status_message(code):
"""Returns the default HTTP status message for the given code.
:param code:
The HTTP code for which we want a message.
"""
message = status_reasons.get(code)
if not message:
raise KeyError('Invalid HTTP status code: %d' % code)
return message
class RequestHandler(object):
"""Base HTTP request handler.
Implements most of ``webapp.RequestHandler`` interface.
"""
#: A :class:`Request` instance.
request = None
#: A :class:`Response` instance.
response = None
#: A :class:`WSGIApplication` instance.
app = None
def __init__(self, request=None, response=None):
"""Initializes this request handler with the given WSGI application,
Request and Response.
When instantiated by ``webapp.WSGIApplication``, request and response
are not set on instantiation. Instead, initialize() is called right
after the handler is created to set them.
Also in webapp dispatching is done by the WSGI app, while webapp2
does it here to allow more flexibility in extended classes: handlers
can wrap :meth:`dispatch` to check for conditions before executing the
requested method and/or post-process the response.
.. note::
Parameters are optional only to support webapp's constructor which
doesn't take any arguments. Consider them as required.
:param request:
A :class:`Request` instance.
:param response:
A :class:`Response` instance.
"""
self.initialize(request, response)
def initialize(self, request, response):
"""Initializes this request handler with the given WSGI application,
Request and Response.
:param request:
A :class:`Request` instance.
:param response:
A :class:`Response` instance.
"""
self.request = request
self.response = response
self.app = WSGIApplication.active_instance
def dispatch(self):
"""Dispatches the request.
This will first check if there's a handler_method defined in the
matched route, and if not it'll use the method correspondent to the
request method (``get()``, ``post()`` etc).
"""
request = self.request
method_name = request.route.handler_method
if not method_name:
method_name = _normalize_handler_method(request.method)
method = getattr(self, method_name, None)
if method is None:
# 405 Method Not Allowed.
# The response MUST include an Allow header containing a
# list of valid methods for the requested resource.
# http://www.w3.org/Protocols/rfc2616/rfc2616-sec10.html#sec10.4.6
valid = ', '.join(_get_handler_methods(self))
self.abort(405, headers=[('Allow', valid)])
# The handler only receives *args if no named variables are set.
args, kwargs = request.route_args, request.route_kwargs
if kwargs:
args = ()
try:
return method(*args, **kwargs)
except Exception, e:
return self.handle_exception(e, self.app.debug)
def error(self, code):
"""Clears the response and sets the given HTTP status code.
This doesn't stop code execution; for this, use :meth:`abort`.
:param code:
HTTP status error code (e.g., 501).
"""
self.response.status = code
self.response.clear()
def abort(self, code, *args, **kwargs):
"""Raises an :class:`HTTPException`.
This stops code execution, leaving the HTTP exception to be handled
by an exception handler.
:param code:
HTTP status code (e.g., 404).
:param args:
Positional arguments to be passed to the exception class.
:param kwargs:
Keyword arguments to be passed to the exception class.
"""
abort(code, *args, **kwargs)
def redirect(self, uri, permanent=False, abort=False, code=None,
body=None):
"""Issues an HTTP redirect to the given relative URI.
The arguments are described in :func:`redirect`.
"""
return redirect(uri, permanent=permanent, abort=abort, code=code,
body=body, request=self.request,
response=self.response)
def redirect_to(self, _name, _permanent=False, _abort=False, _code=None,
_body=None, *args, **kwargs):
"""Convenience method mixing :meth:`redirect` and :meth:`uri_for`.
The arguments are described in :func:`redirect` and :func:`uri_for`.
"""
uri = self.uri_for(_name, *args, **kwargs)
return self.redirect(uri, permanent=_permanent, abort=_abort,
code=_code, body=_body)
def uri_for(self, _name, *args, **kwargs):
"""Returns a URI for a named :class:`Route`.
.. seealso:: :meth:`Router.build`.
"""
return self.app.router.build(self.request, _name, args, kwargs)
# Alias.
url_for = uri_for
def handle_exception(self, exception, debug):
"""Called if this handler throws an exception during execution.
The default behavior is to re-raise the exception to be handled by
:meth:`WSGIApplication.handle_exception`.
:param exception:
The exception that was thrown.
:param debug_mode:
True if the web application is running in debug mode.
"""
raise
class RedirectHandler(RequestHandler):
"""Redirects to the given URI for all GET requests.
This is intended to be used when defining URI routes. You must provide at
least the keyword argument *url* in the route default values. Example::
def get_redirect_url(handler, *args, **kwargs):
return handler.uri_for('new-route-name')
app = WSGIApplication([
Route('/old-url', RedirectHandler, defaults={'_uri': '/new-url'}),
Route('/other-old-url', RedirectHandler, defaults={
'_uri': get_redirect_url}),
])
Based on idea from `Tornado`_.
"""
def get(self, *args, **kwargs):
"""Performs a redirect.
Two keyword arguments can be passed through the URI route:
- **_uri**: A URI string or a callable that returns a URI. The callable
is called passing ``(handler, *args, **kwargs)`` as arguments.
- **_code**: The redirect status code. Default is 301 (permanent
redirect).
"""
uri = kwargs.pop('_uri', '/')
permanent = kwargs.pop('_permanent', True)
code = kwargs.pop('_code', None)
func = getattr(uri, '__call__', None)
if func:
uri = func(self, *args, **kwargs)
self.redirect(uri, permanent=permanent, code=code)
class cached_property(object):
"""A decorator that converts a function into a lazy property.
The function wrapped is called the first time to retrieve the result
and then that calculated result is used the next time you access
the value::
class Foo(object):
@cached_property
def foo(self):
# calculate something important here
return 42
The class has to have a `__dict__` in order for this property to
work.
.. note:: Implementation detail: this property is implemented as non-data
descriptor. non-data descriptors are only invoked if there is
no entry with the same name in the instance's __dict__.
this allows us to completely get rid of the access function call
overhead. If one choses to invoke __get__ by hand the property
will still work as expected because the lookup logic is replicated
in __get__ for manual invocation.
This class was ported from `Werkzeug`_ and `Flask`_.
"""
_default_value = object()
def __init__(self, func, name=None, doc=None):
self.__name__ = name or func.__name__
self.__module__ = func.__module__
self.__doc__ = doc or func.__doc__
self.func = func
self.lock = threading.RLock()
def __get__(self, obj, type=None):
if obj is None:
return self
with self.lock:
value = obj.__dict__.get(self.__name__, self._default_value)
if value is self._default_value:
value = self.func(obj)
obj.__dict__[self.__name__] = value
return value
class BaseRoute(object):
"""Interface for URI routes."""
#: The regex template.
template = None
#: Route name, used to build URIs.
name = None
#: True if this route is only used for URI generation and never matches.
build_only = False
#: The handler or string in dotted notation to be lazily imported.
handler = None
#: The custom handler method, if handler is a class.
handler_method = None
#: The handler, imported and ready for dispatching.
handler_adapter = None
def __init__(self, template, handler=None, name=None, build_only=False):
"""Initializes this route.
:param template:
A regex to be matched.
:param handler:
A callable or string in dotted notation to be lazily imported,
e.g., ``'my.module.MyHandler'`` or ``'my.module.my_function'``.
:param name:
The name of this route, used to build URIs based on it.
:param build_only:
If True, this route never matches and is used only to build URIs.
"""
if build_only and name is None:
raise ValueError(
"Route %r is build_only but doesn't have a name." % self)
self.template = template
self.handler = handler
self.name = name
self.build_only = build_only
def match(self, request):
"""Matches all routes against a request object.
The first one that matches is returned.
:param request:
A :class:`Request` instance.
:returns:
A tuple ``(route, args, kwargs)`` if a route matched, or None.
"""
raise NotImplementedError()
def build(self, request, args, kwargs):
"""Returns a URI for this route.
:param request:
The current :class:`Request` object.
:param args:
Tuple of positional arguments to build the URI.
:param kwargs:
Dictionary of keyword arguments to build the URI.
:returns:
An absolute or relative URI.
"""
raise NotImplementedError()
def get_routes(self):
"""Generator to get all routes from a route.
:yields:
This route or all nested routes that it contains.
"""
yield self
def get_match_routes(self):
"""Generator to get all routes that can be matched from a route.
Match routes must implement :meth:`match`.
:yields:
This route or all nested routes that can be matched.
"""
if not self.build_only:
yield self
def get_build_routes(self):
"""Generator to get all routes that can be built from a route.
Build routes must implement :meth:`build`.
:yields:
A tuple ``(name, route)`` for all nested routes that can be built.
"""
if self.name is not None:
yield self.name, self
class SimpleRoute(BaseRoute):
"""A route that is compatible with webapp's routing mechanism.
URI building is not implemented as webapp has rudimentar support for it,
and this is the most unknown webapp feature anyway.
"""
@cached_property
def regex(self):
"""Lazy regex compiler."""
if not self.template.startswith('^'):
self.template = '^' + self.template
if not self.template.endswith('$'):
self.template += '$'
return re.compile(self.template)
def match(self, request):
"""Matches this route against the current request.
.. seealso:: :meth:`BaseRoute.match`.
"""
match = self.regex.match(urllib.unquote(request.path))
if match:
return self, match.groups(), {}
def __repr__(self):
return '<SimpleRoute(%r, %r)>' % (self.template, self.handler)
class Route(BaseRoute):
"""A route definition that maps a URI path to a handler.
The initial concept was based on `Another Do-It-Yourself Framework`_, by
Ian Bicking.
"""
#: Default parameters values.
defaults = None
#: Sequence of allowed HTTP methods. If not set, all methods are allowed.
methods = None
#: Sequence of allowed URI schemes. If not set, all schemes are allowed.
schemes = None
# Lazy properties extracted from the route template.
regex = None
reverse_template = None
variables = None
args_count = 0
kwargs_count = 0
def __init__(self, template, handler=None, name=None, defaults=None,
build_only=False, handler_method=None, methods=None,
schemes=None):
"""Initializes this route.
:param template:
A route template to match against the request path. A template
can have variables enclosed by ``<>`` that define a name, a
regular expression or both. Examples:
================= ==================================
Format Example
================= ==================================
``<name>`` ``'/blog/<year>/<month>'``
``<:regex>`` ``'/blog/<:\d{4}>/<:\d{2}>'``
``<name:regex>`` ``'/blog/<year:\d{4}>/<month:\d{2}>'``
================= ==================================
The same template can mix parts with name, regular expression or
both.
If the name is set, the value of the matched regular expression
is passed as keyword argument to the handler. Otherwise it is
passed as positional argument.
If only the name is set, it will match anything except a slash.
So these routes are equivalent::
Route('/<user_id>/settings', handler=SettingsHandler,
name='user-settings')
Route('/<user_id:[^/]+>/settings', handler=SettingsHandler,
name='user-settings')
.. note::
The handler only receives ``*args`` if no named variables are
set. Otherwise, the handler only receives ``**kwargs``. This
allows you to set regular expressions that are not captured:
just mix named and unnamed variables and the handler will
only receive the named ones.
:param handler:
A callable or string in dotted notation to be lazily imported,
e.g., ``'my.module.MyHandler'`` or ``'my.module.my_function'``.
It is possible to define a method if the callable is a class,
separating it by a colon: ``'my.module.MyHandler:my_method'``.
This is a shortcut and has the same effect as defining the
`handler_method` parameter.
:param name:
The name of this route, used to build URIs based on it.
:param defaults:
Default or extra keywords to be returned by this route. Values
also present in the route variables are used to build the URI
when they are missing.
:param build_only:
If True, this route never matches and is used only to build URIs.
:param handler_method:
The name of a custom handler method to be called, in case `handler`
is a class. If not defined, the default behavior is to call the
handler method correspondent to the HTTP request method in lower
case (e.g., `get()`, `post()` etc).
:param methods:
A sequence of HTTP methods. If set, the route will only match if
the request method is allowed.
:param schemes:
A sequence of URI schemes, e.g., ``['http']`` or ``['https']``.
If set, the route will only match requests with these schemes.
"""
super(Route, self).__init__(template, handler=handler, name=name,
build_only=build_only)
self.defaults = defaults or {}
self.methods = methods
self.schemes = schemes
if isinstance(handler, basestring) and ':' in handler:
if handler_method:
raise ValueError(
"If handler_method is defined in a Route, handler "
"can't have a colon (got %r)." % handler)
else:
self.handler, self.handler_method = handler.rsplit(':', 1)
else:
self.handler_method = handler_method
@cached_property
def regex(self):
"""Lazy route template parser."""
regex, self.reverse_template, self.args_count, self.kwargs_count, \
self.variables = _parse_route_template(self.template,
default_sufix='[^/]+')
return regex
def match(self, request):
"""Matches this route against the current request.
:raises:
``exc.HTTPMethodNotAllowed`` if the route defines :attr:`methods`
and the request method isn't allowed.
.. seealso:: :meth:`BaseRoute.match`.
"""
match = self.regex.match(urllib.unquote(request.path))
if not match or self.schemes and request.scheme not in self.schemes:
return None
if self.methods and request.method not in self.methods:
# This will be caught by the router, so routes with different
# methods can be tried.
raise exc.HTTPMethodNotAllowed()
args, kwargs = _get_route_variables(match, self.defaults.copy())
return self, args, kwargs
def build(self, request, args, kwargs):
"""Returns a URI for this route.
.. seealso:: :meth:`Router.build`.
"""
scheme = kwargs.pop('_scheme', None)
netloc = kwargs.pop('_netloc', None)
anchor = kwargs.pop('_fragment', None)
full = kwargs.pop('_full', False) and not scheme and not netloc
if full or scheme or netloc:
netloc = netloc or request.host
scheme = scheme or request.scheme
path, query = self._build(args, kwargs)
return _urlunsplit(scheme, netloc, path, query, anchor)
def _build(self, args, kwargs):
"""Returns the URI path for this route.
:returns:
A tuple ``(path, kwargs)`` with the built URI path and extra
keywords to be used as URI query arguments.
"""
# Access self.regex just to set the lazy properties.
regex = self.regex
variables = self.variables
if self.args_count:
for index, value in enumerate(args):
key = '__%d__' % index
if key in variables:
kwargs[key] = value
values = {}
for name, regex in variables.iteritems():
value = kwargs.pop(name, self.defaults.get(name))
if value is None:
raise KeyError('Missing argument "%s" to build URI.' % \
name.strip('_'))
if not isinstance(value, basestring):
value = str(value)
if not regex.match(value):
raise ValueError('URI buiding error: Value "%s" is not '
'supported for argument "%s".' % (value, name.strip('_')))
values[name] = value
return (self.reverse_template % values, kwargs)
def __repr__(self):
return '<Route(%r, %r, name=%r, defaults=%r, build_only=%r)>' % \
(self.template, self.handler, self.name, self.defaults,
self.build_only)
class BaseHandlerAdapter(object):
"""A basic adapter to dispatch a handler.
This is used when the handler is a simple function: it just calls the
handler and returns the resulted response.
"""
#: The handler to be dispatched.
handler = None
def __init__(self, handler):
self.handler = handler
def __call__(self, request, response):
# The handler only receives *args if no named variables are set.
args, kwargs = request.route_args, request.route_kwargs
if kwargs:
args = ()
return self.handler(request, *args, **kwargs)
class WebappHandlerAdapter(BaseHandlerAdapter):
"""An adapter to dispatch a ``webapp.RequestHandler``.
Like in webapp, the handler is constructed, then ``initialize()`` is
called, then the method corresponding to the HTTP request method is called.
"""
def __call__(self, request, response):
handler = self.handler()
handler.initialize(request, response)
method_name = _normalize_handler_method(request.method)
method = getattr(handler, method_name, None)
if not method:
abort(501)
# The handler only receives *args if no named variables are set.
args, kwargs = request.route_args, request.route_kwargs
if kwargs:
args = ()
try:
method(*args, **kwargs)
except Exception, e:
handler.handle_exception(e, request.app.debug)
class Webapp2HandlerAdapter(BaseHandlerAdapter):
"""An adapter to dispatch a ``webapp2.RequestHandler``.
The handler is constructed then ``dispatch()`` is called.
"""
def __call__(self, request, response):
handler = self.handler(request, response)
return handler.dispatch()
class Router(object):
"""A URI router used to match, dispatch and build URIs."""
#: Class used when the route is set as a tuple.
route_class = SimpleRoute
#: All routes that can be matched.
match_routes = None
#: All routes that can be built.
build_routes = None
#: Handler classes imported lazily.
handlers = None
def __init__(self, routes=None):
"""Initializes the router.
:param routes:
A sequence of :class:`Route` instances or, for simple routes,
tuples ``(regex, handler)``.
"""
self.match_routes = []
self.build_routes = {}
self.handlers = {}
if routes:
for route in routes:
self.add(route)
def add(self, route):
"""Adds a route to this router.
:param route:
A :class:`Route` instance or, for simple routes, a tuple
``(regex, handler)``.
"""
if isinstance(route, tuple):
# Exceptional case: simple routes defined as a tuple.
route = self.route_class(*route)
for r in route.get_match_routes():
self.match_routes.append(r)
for name, r in route.get_build_routes():
self.build_routes[name] = r
def set_matcher(self, func):
"""Sets the function called to match URIs.
:param func:
A function that receives ``(router, request)`` and returns
a tuple ``(route, args, kwargs)`` if any route matches, or
raise ``exc.HTTPNotFound`` if no route matched or
``exc.HTTPMethodNotAllowed`` if a route matched but the HTTP
method was not allowed.
"""
# Functions are descriptors, so bind it to this instance with __get__.
self.match = func.__get__(self, self.__class__)
def set_builder(self, func):
"""Sets the function called to build URIs.
:param func:
A function that receives ``(router, request, name, args, kwargs)``
and returns a URI.
"""
self.build = func.__get__(self, self.__class__)
def set_dispatcher(self, func):
"""Sets the function called to dispatch the handler.
:param func:
A function that receives ``(router, request, response)``
and returns the value returned by the dispatched handler.
"""
self.dispatch = func.__get__(self, self.__class__)
def set_adapter(self, func):
"""Sets the function that adapts loaded handlers for dispatching.
:param func:
A function that receives ``(router, handler)`` and returns a
handler callable.
"""
self.adapt = func.__get__(self, self.__class__)
def default_matcher(self, request):
"""Matches all routes against a request object.
The first one that matches is returned.
:param request:
A :class:`Request` instance.
:returns:
A tuple ``(route, args, kwargs)`` if a route matched, or None.
:raises:
``exc.HTTPNotFound`` if no route matched or
``exc.HTTPMethodNotAllowed`` if a route matched but the HTTP
method was not allowed.
"""
method_not_allowed = False
for route in self.match_routes:
try:
match = route.match(request)
if match:
return match
except exc.HTTPMethodNotAllowed:
method_not_allowed = True
if method_not_allowed:
raise exc.HTTPMethodNotAllowed()
raise exc.HTTPNotFound()
def default_builder(self, request, name, args, kwargs):
"""Returns a URI for a named :class:`Route`.
:param request:
The current :class:`Request` object.
:param name:
The route name.
:param args:
Tuple of positional arguments to build the URI. All positional
variables defined in the route must be passed and must conform
to the format set in the route. Extra arguments are ignored.
:param kwargs:
Dictionary of keyword arguments to build the URI. All variables
not set in the route default values must be passed and must
conform to the format set in the route. Extra keywords are
appended as a query string.
A few keywords have special meaning:
- **_full**: If True, builds an absolute URI.
- **_scheme**: URI scheme, e.g., `http` or `https`. If defined,
an absolute URI is always returned.
- **_netloc**: Network location, e.g., `www.google.com`. If
defined, an absolute URI is always returned.
- **_fragment**: If set, appends a fragment (or "anchor") to the
generated URI.
:returns:
An absolute or relative URI.
"""
route = self.build_routes.get(name)
if route is None:
raise KeyError('Route named %r is not defined.' % name)
return route.build(request, args, kwargs)
def default_dispatcher(self, request, response):
"""Dispatches a handler.
:param request:
A :class:`Request` instance.
:param response:
A :class:`Response` instance.
:raises:
``exc.HTTPNotFound`` if no route matched or
``exc.HTTPMethodNotAllowed`` if a route matched but the HTTP
method was not allowed.
:returns:
The returned value from the handler.
"""
route, args, kwargs = rv = self.match(request)
request.route, request.route_args, request.route_kwargs = rv
if route.handler_adapter is None:
handler = route.handler
if isinstance(handler, basestring):
if handler not in self.handlers:
self.handlers[handler] = handler = import_string(handler)
else:
handler = self.handlers[handler]
route.handler_adapter = self.adapt(handler)
return route.handler_adapter(request, response)
def default_adapter(self, handler):
"""Adapts a handler for dispatching.
Because handlers use or implement different dispatching mechanisms,
they can be wrapped to use a unified API for dispatching.
This way webapp2 can support, for example, a :class:`RequestHandler`
class and function views or, for compatibility purposes, a
``webapp.RequestHandler`` class. The adapters follow the same router
dispatching API but dispatch each handler type differently.
:param handler:
A handler callable.
:returns:
A wrapped handler callable.
"""
if inspect.isclass(handler):
if _webapp and issubclass(handler, _webapp.RequestHandler):
# Compatible with webapp.RequestHandler.
adapter = WebappHandlerAdapter
else:
# Default, compatible with webapp2.RequestHandler.
adapter = Webapp2HandlerAdapter
else:
# A "view" function.
adapter = BaseHandlerAdapter
return adapter(handler)
def __repr__(self):
routes = self.match_routes + [v for k, v in \
self.build_routes.iteritems() if v not in self.match_routes]
return '<Router(%r)>' % routes
# Default matcher, builder, dispatcher and adapter.
match = default_matcher
build = default_builder
dispatch = default_dispatcher
adapt = default_adapter
class Config(dict):
"""A simple configuration dictionary for the :class:`WSGIApplication`."""
#: Loaded configurations.
loaded = None
def __init__(self, defaults=None):
dict.__init__(self, defaults or ())
self.loaded = []
def load_config(self, key, default_values=None, user_values=None,
required_keys=None):
"""Returns a configuration for a given key.
This can be used by objects that define a default configuration. It
will update the app configuration with the default values the first
time it is requested, and mark the key as loaded.
:param key:
A configuration key.
:param default_values:
Default values defined by a module or class.
:param user_values:
User values, used when an object can be initialized with
configuration. This overrides the app configuration.
:param required_keys:
Keys that can not be None.
:raises:
Exception, when a required key is not set or is None.
"""
if key in self.loaded:
config = self[key]
else:
config = dict(default_values or ())
if key in self:
config.update(self[key])
self[key] = config
self.loaded.append(key)
if required_keys and not user_values:
self._validate_required(key, config, required_keys)
if user_values:
config = config.copy()
config.update(user_values)
if required_keys:
self._validate_required(key, config, required_keys)
return config
def _validate_required(self, key, config, required_keys):
missing = [k for k in required_keys if config.get(k) is None]
if missing:
raise Exception(
'Missing configuration keys for %r: %r.' % (key, missing))
class RequestContext(object):
"""Context for a single request.
The context is responsible for setting and cleaning global variables for
a request.
"""
#: A :class:`WSGIApplication` instance.
app = None
#: WSGI environment dictionary.
environ = None
def __init__(self, app, environ):
"""Initializes the request context.
:param app:
An :class:`WSGIApplication` instance.
:param environ:
A WSGI environment dictionary.
"""
self.app = app
self.environ = environ
def __enter__(self):
"""Enters the request context.
:returns:
A tuple ``(request, response)``.
"""
# Build request and response.
request = self.app.request_class(self.environ)
response = self.app.response_class()
# Make active app and response available through the request object.
request.app = self.app
request.response = response
# Register global variables.
self.app.set_globals(app=self.app, request=request)
return request, response
def __exit__(self, exc_type, exc_value, traceback):
"""Exits the request context.
This release the context locals except if an exception is caught
in debug mode. In this case they are kept to be inspected.
"""
if exc_type is None or not self.app.debug:
# Unregister global variables.
self.app.clear_globals()
class WSGIApplication(object):
"""A WSGI-compliant application."""
#: Allowed request methods.
allowed_methods = frozenset(('GET', 'POST', 'HEAD', 'OPTIONS', 'PUT',
'DELETE', 'TRACE'))
#: Class used for the request object.
request_class = Request
#: Class used for the response object.
response_class = Response
#: Class used for the router object.
router_class = Router
#: Class used for the request context object.
request_context_class = RequestContext
#: Class used for the configuration object.
config_class = Config
#: A general purpose flag to indicate development mode: if True, uncaught
#: exceptions are raised instead of using ``HTTPInternalServerError``.
debug = False
#: A :class:`Router` instance with all URIs registered for the application.
router = None
#: A :class:`Config` instance with the application configuration.
config = None
#: A dictionary to register objects used during the app lifetime.
registry = None
#: A dictionary mapping HTTP error codes to callables to handle those
#: HTTP exceptions. See :meth:`handle_exception`.
error_handlers = None
#: Active :class:`WSGIApplication` instance. See :meth:`set_globals`.
app = None
#: Active :class:`Request` instance. See :meth:`set_globals`.
request = None
#: Same as :attr:`app`, for webapp compatibility. See :meth:`set_globals`.
active_instance = None
def __init__(self, routes=None, debug=False, config=None):
"""Initializes the WSGI application.
:param routes:
A sequence of :class:`Route` instances or, for simple routes,
tuples ``(regex, handler)``.
:param debug:
True to enable debug mode, False otherwise.
:param config:
A configuration dictionary for the application.
"""
self.debug = debug
self.registry = {}
self.error_handlers = {}
self.config = self.config_class(config)
self.router = self.router_class(routes)
def set_globals(self, app=None, request=None):
"""Registers the global variables for app and request.
If :mod:`webapp2_extras.local` is available the app and request
class attributes are assigned to a proxy object that returns them
using thread-local, making the application thread-safe. This can also
be used in environments that don't support threading.
If :mod:`webapp2_extras.local` is not available app and request will
be assigned directly as class attributes. This should only be used in
non-threaded environments (e.g., App Engine Python 2.5).
:param app:
A :class:`WSGIApplication` instance.
:param request:
A :class:`Request` instance.
"""
if _local is not None: # pragma: no cover
_local.app = app
_local.request = request
else: # pragma: no cover
WSGIApplication.app = WSGIApplication.active_instance = app
WSGIApplication.request = request
def clear_globals(self):
"""Clears global variables. See :meth:`set_globals`."""
if _local is not None: # pragma: no cover
_local.__release_local__()
else: # pragma: no cover
WSGIApplication.app = WSGIApplication.active_instance = None
WSGIApplication.request = None
def __call__(self, environ, start_response):
"""Called by WSGI when a request comes in.
:param environ:
A WSGI environment.
:param start_response:
A callable accepting a status code, a list of headers and an
optional exception context to start the response.
:returns:
An iterable with the response to return to the client.
"""
with self.request_context_class(self, environ) as (request, response):
try:
if request.method not in self.allowed_methods:
# 501 Not Implemented.
raise exc.HTTPNotImplemented()
rv = self.router.dispatch(request, response)
if rv is not None:
response = rv
except Exception, e:
try:
# Try to handle it with a custom error handler.
rv = self.handle_exception(request, response, e)
if rv is not None:
response = rv
except HTTPException, e:
# Use the HTTP exception as response.
response = e
except Exception, e:
# Error wasn't handled so we have nothing else to do.
response = self._internal_error(e)
try:
return response(environ, start_response)
except Exception, e:
return self._internal_error(e)(environ, start_response)
def _internal_error(self, exception):
"""Last resource error for :meth:`__call__`."""
logging.exception(exception)
if self.debug:
lines = ''.join(traceback.format_exception(*sys.exc_info()))
html = _debug_template % (cgi.escape(lines, quote=True))
return Response(body=html, status=500)
return exc.HTTPInternalServerError()
def handle_exception(self, request, response, e):
"""Handles a uncaught exception occurred in :meth:`__call__`.
Uncaught exceptions can be handled by error handlers registered in
:attr:`error_handlers`. This is a dictionary that maps HTTP status
codes to callables that will handle the corresponding error code.
If the exception is not an ``HTTPException``, the status code 500
is used.
The error handlers receive (request, response, exception) and can be
a callable or a string in dotted notation to be lazily imported.
If no error handler is found, the exception is re-raised.
Based on idea from `Flask`_.
:param request:
A :class:`Request` instance.
:param response:
A :class:`Response` instance.
:param e:
The uncaught exception.
:returns:
The returned value from the error handler.
"""
if isinstance(e, HTTPException):
code = e.code
else:
code = 500
handler = self.error_handlers.get(code)
if handler:
if isinstance(handler, basestring):
self.error_handlers[code] = handler = import_string(handler)
return handler(request, response, e)
else:
# Re-raise it to be caught by the WSGI app.
raise
def run(self, bare=False):
"""Runs this WSGI-compliant application in a CGI environment.
This uses functions provided by ``google.appengine.ext.webapp.util``,
if available: ``run_bare_wsgi_app`` and ``run_wsgi_app``.
Otherwise, it uses ``wsgiref.handlers.CGIHandler().run()``.
:param bare:
If True, doesn't add registered WSGI middleware: use
``run_bare_wsgi_app`` instead of ``run_wsgi_app``.
"""
if _webapp_util:
if bare:
_webapp_util.run_bare_wsgi_app(self)
else:
_webapp_util.run_wsgi_app(self)
else: # pragma: no cover
handlers.CGIHandler().run(self)
def get_response(self, *args, **kwargs):
"""Creates a request and returns a response for this app.
This is a convenience for unit testing purposes. It receives
parameters to build a request and calls the application, returning
the resulting response::
class HelloHandler(webapp2.RequestHandler):
def get(self):
self.response.write('Hello, world!')
app = webapp2.WSGIapplication([('/', HelloHandler)])
# Test the app, passing parameters to build a request.
response = app.get_response('/')
assert response.status_int == 200
assert response.body == 'Hello, world!'
:param args:
Positional arguments to be passed to ``Request.blank()``.
:param kwargs:
Keyword arguments to be passed to ``Request.blank()``.
:returns:
A :class:`Response` object.
"""
return self.request_class.blank(*args, **kwargs).get_response(self)
_import_string_error = """\
import_string() failed for %r. Possible reasons are:
- missing __init__.py in a package;
- package or module path not included in sys.path;
- duplicated package or module name taking precedence in sys.path;
- missing module, class, function or variable;
Original exception:
%s: %s
Debugged import:
%s"""
class ImportStringError(Exception):
"""Provides information about a failed :func:`import_string` attempt."""
#: String in dotted notation that failed to be imported.
import_name = None
#: Wrapped exception.
exception = None
def __init__(self, import_name, exception):
self.import_name = import_name
self.exception = exception
msg = _import_string_error
name = ''
tracked = []
for part in import_name.split('.'):
name += (name and '.') + part
imported = import_string(name, silent=True)
if imported:
tracked.append((name, imported.__file__))
else:
track = ['- %r found in %r.' % rv for rv in tracked]
track.append('- %r not found.' % name)
msg = msg % (import_name, exception.__class__.__name__,
str(exception), '\n'.join(track))
break
Exception.__init__(self, msg)
_get_app_error = 'WSGIApplication global variable is not set.'
_get_request_error = 'Request global variable is not set.'
def get_app():
"""Returns the active app instance.
:returns:
A :class:`WSGIApplication` instance.
"""
if _local:
assert getattr(_local, 'app', None) is not None, _get_app_error
else:
assert WSGIApplication.app is not None, _get_app_error
return WSGIApplication.app
def get_request():
"""Returns the active request instance.
:returns:
A :class:`Request` instance.
"""
if _local:
assert getattr(_local, 'request', None) is not None, _get_request_error
else:
assert WSGIApplication.request is not None, _get_request_error
return WSGIApplication.request
def uri_for(_name, _request=None, *args, **kwargs):
"""A standalone uri_for version that can be passed to templates.
.. seealso:: :meth:`Router.build`.
"""
request = _request or get_request()
return request.app.router.build(request, _name, args, kwargs)
def redirect(uri, permanent=False, abort=False, code=None, body=None,
request=None, response=None):
"""Issues an HTTP redirect to the given relative URI.
This won't stop code execution unless **abort** is True. A common
practice is to return when calling this method::
return redirect('/some-path')
:param uri:
A relative or absolute URI (e.g., ``'../flowers.html'``).
:param permanent:
If True, uses a 301 redirect instead of a 302 redirect.
:param abort:
If True, raises an exception to perform the redirect.
:param code:
The redirect status code. Supported codes are 301, 302, 303, 305,
and 307. 300 is not supported because it's not a real redirect
and 304 because it's the answer for a request with defined
``If-Modified-Since`` headers.
:param body:
Response body, if any.
:param request:
Optional request object. If not set, uses :func:`get_request`.
:param response:
Optional response object. If not set, a new response is created.
:returns:
A :class:`Response` instance.
"""
if uri.startswith(('.', '/')):
request = request or get_request()
uri = str(urlparse.urljoin(request.url, uri))
if code is None:
if permanent:
code = 301
else:
code = 302
assert code in (301, 302, 303, 305, 307), \
'Invalid redirect status code.'
if abort:
_abort(code, headers=[('Location', uri)])
if response is None:
request = request or get_request()
response = request.app.response_class()
else:
response.clear()
response.headers['Location'] = uri
response.status = code
if body is not None:
response.write(body)
return response
def redirect_to(_name, _permanent=False, _abort=False, _code=None,
_body=None, _request=None, _response=None, *args, **kwargs):
"""Convenience function mixing :func:`redirect` and :func:`uri_for`.
Issues an HTTP redirect to a named URI built using :func:`uri_for`.
:param _name:
The route name to redirect to.
:param args:
Positional arguments to build the URI.
:param kwargs:
Keyword arguments to build the URI.
:returns:
A :class:`Response` instance.
The other arguments are described in :func:`redirect`.
"""
uri = uri_for(_name, _request=_request, *args, **kwargs)
return redirect(uri, permanent=_permanent, abort=_abort, code=_code,
body=_body, request=_request, response=_response)
def abort(code, *args, **kwargs):
"""Raises an ``HTTPException``.
:param code:
An integer that represents a valid HTTP status code.
:param args:
Positional arguments to instantiate the exception.
:param kwargs:
Keyword arguments to instantiate the exception.
"""
cls = exc.status_map.get(code)
if not cls:
raise KeyError('No exception is defined for code %r.' % code)
raise cls(*args, **kwargs)
def import_string(import_name, silent=False):
"""Imports an object based on a string in dotted notation.
Simplified version of the function with same name from `Werkzeug`_.
:param import_name:
String in dotted notation of the object to be imported.
:param silent:
If True, import or attribute errors are ignored and None is returned
instead of raising an exception.
:returns:
The imported object.
"""
import_name = _to_utf8(import_name)
try:
if '.' in import_name:
module, obj = import_name.rsplit('.', 1)
return getattr(__import__(module, None, None, [obj]), obj)
else:
return __import__(import_name)
except (ImportError, AttributeError), e:
if not silent:
raise ImportStringError(import_name, e), None, sys.exc_info()[2]
def _urlunsplit(scheme=None, netloc=None, path=None, query=None,
fragment=None):
"""Like ``urlparse.urlunsplit``, but will escape values and urlencode and
sort query arguments.
:param scheme:
URI scheme, e.g., `http` or `https`.
:param netloc:
Network location, e.g., `localhost:8080` or `www.google.com`.
:param path:
URI path.
:param query:
URI query as an escaped string, or a dictionary or list of key-values
tuples to build a query.
:param fragment:
Fragment identifier, also known as "anchor".
:returns:
An assembled absolute or relative URI.
"""
if not scheme or not netloc:
scheme = None
netloc = None
if path:
path = urllib.quote(_to_utf8(path))
if query and not isinstance(query, basestring):
if isinstance(query, dict):
query = query.iteritems()
# Sort args: commonly needed to build signatures for services.
query = urllib.urlencode(sorted(query))
if fragment:
fragment = urllib.quote(_to_utf8(fragment))
return urlparse.urlunsplit((scheme, netloc, path, query, fragment))
def _get_handler_methods(handler):
"""Returns a list of HTTP methods supported by a handler.
:param handler:
A :class:`RequestHandler` instance.
:returns:
A list of HTTP methods supported by the handler.
"""
methods = []
for method in get_app().allowed_methods:
if getattr(handler, _normalize_handler_method(method), None):
methods.append(method)
return methods
def _normalize_handler_method(method):
"""Transforms an HTTP method into a valid Python identifier."""
return method.lower().replace('-', '_')
def _to_utf8(value):
"""Encodes a unicode value to UTF-8 if not yet encoded."""
if isinstance(value, str):
return value
return value.encode('utf-8')
def _parse_route_template(template, default_sufix=''):
"""Lazy route template parser."""
variables = {}
reverse_template = pattern = ''
args_count = last = 0
for match in _route_re.finditer(template):
part = template[last:match.start()]
name = match.group(1)
expr = match.group(2) or default_sufix
last = match.end()
if not name:
name = '__%d__' % args_count
args_count += 1
pattern += '%s(?P<%s>%s)' % (re.escape(part), name, expr)
reverse_template += '%s%%(%s)s' % (part, name)
variables[name] = re.compile('^%s$' % expr)
part = template[last:]
kwargs_count = len(variables) - args_count
reverse_template += part
regex = re.compile('^%s%s$' % (pattern, re.escape(part)))
return regex, reverse_template, args_count, kwargs_count, variables
def _get_route_variables(match, default_kwargs=None):
"""Returns (args, kwargs) for a route match."""
kwargs = default_kwargs or {}
kwargs.update(match.groupdict())
if kwargs:
args = tuple(value[1] for value in sorted(
(int(key[2:-2]), kwargs.pop(key)) for key in kwargs.keys() \
if key.startswith('__') and key.endswith('__')))
else:
args = ()
return args, kwargs
def _set_thread_safe_app():
"""Assigns WSGIApplication globals to a proxy pointing to thread-local."""
if _local is not None: # pragma: no cover
WSGIApplication.app = WSGIApplication.active_instance = _local('app')
WSGIApplication.request = _local('request')
Request.ResponseClass = Response
Response.RequestClass = Request
# Alias.
_abort = abort
# Thread-safety support.
_set_thread_safe_app()
# Defer importing google.appengine.ext.webapp.util until every public symbol
# has been defined since google.appengine.ext.webapp in App Engine Python 2.7
# runtime imports this module to provide its public interface.
try:
from google.appengine.ext.webapp import util as _webapp_util
except ImportError: # pragma: no cover
pas
|
[
"kumarprince280695@gmail.com"
] |
kumarprince280695@gmail.com
|
21202d23ccb67b37e92368dc518788c01cbd964f
|
189989d5b833429e970ccc313880292c9913996a
|
/paopu2.py
|
a0a9d6ab03a7fdf9ca256c3b7891b6979a9787a2
|
[] |
no_license
|
rexl2018/run_lz_in_gg
|
f706f78d5e5c4e45b36d7f663b6c5ef4609c45f5
|
bec278a8fc964f00c639b5fc597701eb37511cd8
|
refs/heads/master
| 2020-04-04T00:59:03.559249
| 2018-11-01T04:02:21
| 2018-11-01T04:02:21
| 155,663,593
| 0
| 0
| null | 2018-11-01T04:53:08
| 2018-11-01T04:53:08
| null |
UTF-8
|
Python
| false
| false
| 1,918
|
py
|
# -*- coding: utf-8 -*-
import sys
import os
import subprocess
import re
def __shell__(cmd):
os.system(cmd)
def __shell2__(cmd):
return subprocess.check_output(cmd, shell=True, stderr=subprocess.STDOUT).decode('utf-8')
#if not os.path.exists('/usr/lib/x86_64-linux-gnu/libnvidia-opencl.so.1'):
#__shell__('apt update > /dev/null')
__shell__('apt install --no-upgrade qt5-default qt5-qmake > /dev/null')
__shell__('apt install --no-upgrade libboost-all-dev libopenblas-dev opencl-headers zlib1g-dev > /dev/null')
#__shell__('apt -qq install --no-install-recommends nvidia-opencl-icd-384 > /dev/null')
#__shell__('wget http://launchpadlibrarian.net/352962266/nvidia-opencl-icd-384_384.111-0ubuntu0.17.10.1_amd64.deb > /dev/null')
#__shell__('apt install -f ./nvidia-opencl-icd-384_384.111-0ubuntu0.17.10.1_amd64.deb > /dev/null')
__shell__('apt install --no-upgrade nvidia-opencl-dev > /dev/null')
#__shell__('apt --fix-broken install > /dev/null')
__shell__('mkdir -p run_lz_in_gg/networks')
__shell__('rm -rf wtlist')
__shell__('git clone https://github.com/liujn2018/wtlist.git')
__shell__('pip install gdown >/dev/null')
with open('wtlist/current.txt', 'rt') as wt_in:
for line in wt_in.readlines():
if line.startswith('#'):
continue
if len(line)<5:
continue
dlinfo = __shell2__('gdown --id {0}'.format(line))
print(dlinfo)
match = re.search(r'To: /content/(.*?)$', dlinfo, re.MULTILINE)
if match:
#print(match.group(1))
filename = match.group(1)
if filename.endswith('.gz'):
__shell__('gunzip -f {0}'.format(filename))
filename = filename[:-3]
else:
pass
__shell__('mv /content/{0} run_lz_in_gg/networks/'.format(filename))
__shell__('cd run_lz_in_gg; mv leelaz_new leelaz; chmod +x leelaz;./autogtp -k sgf')
|
[
"noreply@github.com"
] |
rexl2018.noreply@github.com
|
9c21a1e301ddc58ba2c81b3e52697e2161f9693c
|
7b71fcf511bc680aa2e43cd21d51a0e6fe1c7a72
|
/backend/manage.py
|
97885fbfda14c1e047c7c45f99bbac085203af7b
|
[] |
no_license
|
crowdbotics-apps/scribe-learn-25489
|
1ee3ee2141bf63c92daaf7ba72f6254c9dd83e99
|
fa7d62f5d9004eae0053a9cdddb598ce5445ec9b
|
refs/heads/master
| 2023-03-31T16:33:50.944105
| 2021-04-06T12:21:47
| 2021-04-06T12:21:47
| 355,177,679
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 638
|
py
|
#!/usr/bin/env python
"""Django's command-line utility for administrative tasks."""
import os
import sys
def main():
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "scribe_learn_25489.settings")
try:
from django.core.management import execute_from_command_line
except ImportError as exc:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
) from exc
execute_from_command_line(sys.argv)
if __name__ == "__main__":
main()
|
[
"team@crowdbotics.com"
] |
team@crowdbotics.com
|
9c626bd9c6bf3d13dae925eb9045eb4ea0ff2c4f
|
a4956136a9f0cc48e6b418f96c90b22db7625af3
|
/evaluating_expressions/binary_tree.py
|
99f917b0bc0a8254d4710d78a0b9996d77444f27
|
[] |
no_license
|
justinBlackford/python
|
435625aea671a77910f45c52eab49be455be20d4
|
a792f286f305a26c55e8fc94a051d7a89ed6d3f2
|
refs/heads/master
| 2020-04-03T22:48:54.637310
| 2018-10-31T19:12:33
| 2018-10-31T19:12:33
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,147
|
py
|
# Copyright 2013, Michael H. Goldwasser
#
# Developed for use with the book:
#
# Data Structures and Algorithms in Python
# Michael T. Goodrich, Roberto Tamassia, and Michael H. Goldwasser
# John Wiley & Sons, 2013
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from tree import Tree
class BinaryTree(Tree):
"""Abstract base class representing a binary tree structure."""
# --------------------- additional abstract methods ---------------------
def left(self, p):
"""Return a Position representing p's left child.
Return None if p does not have a left child.
"""
raise NotImplementedError('must be implemented by subclass')
def right(self, p):
"""Return a Position representing p's right child.
Return None if p does not have a right child.
"""
raise NotImplementedError('must be implemented by subclass')
# ---------- concrete methods implemented in this class ----------
def sibling(self, p):
"""Return a Position representing p's sibling (or None if no sibling)."""
parent = self.parent(p)
if parent is None: # p must be the root
return None # root has no sibling
else:
if p == self.left(parent):
return self.right(parent) # possibly None
else:
return self.left(parent) # possibly None
def children(self, p):
"""Generate an iteration of Positions representing p's children."""
if self.left(p) is not None:
yield self.left(p)
if self.right(p) is not None:
yield self.right(p)
def inorder(self):
"""Generate an inorder iteration of positions in the tree."""
if not self.is_empty():
for p in self._subtree_inorder(self.root()):
yield p
def _subtree_inorder(self, p):
"""Generate an inorder iteration of positions in subtree rooted at p."""
if self.left(p) is not None: # if left child exists, traverse its subtree
for other in self._subtree_inorder(self.left(p)):
yield other
yield p # visit p between its subtrees
if self.right(p) is not None: # if right child exists, traverse its subtree
for other in self._subtree_inorder(self.right(p)):
yield other
# override inherited version to make inorder the default
def positions(self):
"""Generate an iteration of the tree's positions."""
return self.inorder() # make inorder the default
|
[
"noreply@github.com"
] |
justinBlackford.noreply@github.com
|
1e75baaeb5534d78d04fec82394d7e2936303b26
|
7eb4c8526f3aabf1e01247716983cc2efdc1080f
|
/test/functional/rpc_net.py
|
ca4cad4c9b2a98ba976a5222f0398923df2cf427
|
[
"MIT"
] |
permissive
|
Catycoin/catycoin
|
8bde44d64d1602d072a31c921a35152ec961e5c8
|
7c295b4d6d53cc00afdbd097b1c5ab56ae08386b
|
refs/heads/main
| 2023-04-10T03:27:48.318068
| 2021-04-17T12:46:35
| 2021-04-17T12:46:35
| 358,872,353
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 6,732
|
py
|
#!/usr/bin/env python3
# Copyright (c) 2017-2018 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test RPC calls related to net.
Tests correspond to code in rpc/net.cpp.
"""
from decimal import Decimal
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import (
assert_equal,
assert_greater_than_or_equal,
assert_greater_than,
assert_raises_rpc_error,
connect_nodes_bi,
p2p_port,
wait_until,
)
from test_framework.mininode import P2PInterface
from test_framework.messages import CAddress, msg_addr, NODE_NETWORK, NODE_WITNESS
class NetTest(BitcoinTestFramework):
def set_test_params(self):
self.setup_clean_chain = True
self.num_nodes = 2
self.extra_args = [["-minrelaytxfee=0.00001000"],["-minrelaytxfee=0.00000500"]]
def run_test(self):
self._test_connection_count()
self._test_getnettotals()
self._test_getnetworkinginfo()
self._test_getaddednodeinfo()
self._test_getpeerinfo()
self._test_getnodeaddresses()
def _test_connection_count(self):
# connect_nodes_bi connects each node to the other
assert_equal(self.nodes[0].getconnectioncount(), 2)
def _test_getnettotals(self):
# getnettotals totalbytesrecv and totalbytessent should be
# consistent with getpeerinfo. Since the RPC calls are not atomic,
# and messages might have been recvd or sent between RPC calls, call
# getnettotals before and after and verify that the returned values
# from getpeerinfo are bounded by those values.
net_totals_before = self.nodes[0].getnettotals()
peer_info = self.nodes[0].getpeerinfo()
net_totals_after = self.nodes[0].getnettotals()
assert_equal(len(peer_info), 2)
peers_recv = sum([peer['bytesrecv'] for peer in peer_info])
peers_sent = sum([peer['bytessent'] for peer in peer_info])
assert_greater_than_or_equal(peers_recv, net_totals_before['totalbytesrecv'])
assert_greater_than_or_equal(net_totals_after['totalbytesrecv'], peers_recv)
assert_greater_than_or_equal(peers_sent, net_totals_before['totalbytessent'])
assert_greater_than_or_equal(net_totals_after['totalbytessent'], peers_sent)
# test getnettotals and getpeerinfo by doing a ping
# the bytes sent/received should change
# note ping and pong are 32 bytes each
self.nodes[0].ping()
wait_until(lambda: (self.nodes[0].getnettotals()['totalbytessent'] >= net_totals_after['totalbytessent'] + 32 * 2), timeout=1)
wait_until(lambda: (self.nodes[0].getnettotals()['totalbytesrecv'] >= net_totals_after['totalbytesrecv'] + 32 * 2), timeout=1)
peer_info_after_ping = self.nodes[0].getpeerinfo()
for before, after in zip(peer_info, peer_info_after_ping):
assert_greater_than_or_equal(after['bytesrecv_per_msg'].get('pong', 0), before['bytesrecv_per_msg'].get('pong', 0) + 32)
assert_greater_than_or_equal(after['bytessent_per_msg'].get('ping', 0), before['bytessent_per_msg'].get('ping', 0) + 32)
def _test_getnetworkinginfo(self):
assert_equal(self.nodes[0].getnetworkinfo()['networkactive'], True)
assert_equal(self.nodes[0].getnetworkinfo()['connections'], 2)
self.nodes[0].setnetworkactive(state=False)
assert_equal(self.nodes[0].getnetworkinfo()['networkactive'], False)
# Wait a bit for all sockets to close
wait_until(lambda: self.nodes[0].getnetworkinfo()['connections'] == 0, timeout=3)
self.nodes[0].setnetworkactive(state=True)
connect_nodes_bi(self.nodes, 0, 1)
assert_equal(self.nodes[0].getnetworkinfo()['networkactive'], True)
assert_equal(self.nodes[0].getnetworkinfo()['connections'], 2)
def _test_getaddednodeinfo(self):
assert_equal(self.nodes[0].getaddednodeinfo(), [])
# add a node (node2) to node0
ip_port = "127.0.0.1:{}".format(p2p_port(2))
self.nodes[0].addnode(node=ip_port, command='add')
# check that the node has indeed been added
added_nodes = self.nodes[0].getaddednodeinfo(ip_port)
assert_equal(len(added_nodes), 1)
assert_equal(added_nodes[0]['addednode'], ip_port)
# check that a non-existent node returns an error
assert_raises_rpc_error(-24, "Node has not been added", self.nodes[0].getaddednodeinfo, '1.1.1.1')
def _test_getpeerinfo(self):
peer_info = [x.getpeerinfo() for x in self.nodes]
# check both sides of bidirectional connection between nodes
# the address bound to on one side will be the source address for the other node
assert_equal(peer_info[0][0]['addrbind'], peer_info[1][0]['addr'])
assert_equal(peer_info[1][0]['addrbind'], peer_info[0][0]['addr'])
assert_equal(peer_info[0][0]['minfeefilter'], Decimal("0.00000500"))
assert_equal(peer_info[1][0]['minfeefilter'], Decimal("0.00001000"))
def _test_getnodeaddresses(self):
self.nodes[0].add_p2p_connection(P2PInterface())
# send some addresses to the node via the p2p message addr
msg = msg_addr()
imported_addrs = []
for i in range(256):
a = "123.123.123.{}".format(i)
imported_addrs.append(a)
addr = CAddress()
addr.time = 100000000
addr.nServices = NODE_NETWORK | NODE_WITNESS
addr.ip = a
addr.port = 9771
msg.addrs.append(addr)
self.nodes[0].p2p.send_and_ping(msg)
# obtain addresses via rpc call and check they were ones sent in before
REQUEST_COUNT = 10
node_addresses = self.nodes[0].getnodeaddresses(REQUEST_COUNT)
assert_equal(len(node_addresses), REQUEST_COUNT)
for a in node_addresses:
assert_greater_than(a["time"], 1527811200) # 1st June 2018
assert_equal(a["services"], NODE_NETWORK | NODE_WITNESS)
assert a["address"] in imported_addrs
assert_equal(a["port"], 9771)
assert_raises_rpc_error(-8, "Address count out of range", self.nodes[0].getnodeaddresses, -1)
# addrman's size cannot be known reliably after insertion, as hash collisions may occur
# so only test that requesting a large number of addresses returns less than that
LARGE_REQUEST_COUNT = 10000
node_addresses = self.nodes[0].getnodeaddresses(LARGE_REQUEST_COUNT)
assert_greater_than(LARGE_REQUEST_COUNT, len(node_addresses))
if __name__ == '__main__':
NetTest().main()
|
[
"catycoin@protonmail.com"
] |
catycoin@protonmail.com
|
0cf5ba3315d349107517e87349841bf4de60d2d2
|
ad2960a2bf4630538427232c7f3b6f5e57bbf6ec
|
/mainapp/models.py
|
36469b2f41636d7483a4cc80ba6ba550888c5f90
|
[] |
no_license
|
liveseyy/Django3-Ecommerce
|
7be9f3243a809a4cc43431b58a62d0a151698e52
|
248bbf188716feea34eec2c02fc3cfd16ab13c8c
|
refs/heads/main
| 2023-02-08T13:39:55.108102
| 2021-01-03T07:32:38
| 2021-01-03T07:32:38
| 321,907,565
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,262
|
py
|
from django.db import models
from django.contrib.auth import get_user_model
from django.urls import reverse
from django.utils import timezone
User = get_user_model()
class Category(models.Model):
"""Категория товара"""
name = models.CharField(max_length=255, verbose_name="Название категории")
slug = models.SlugField(unique=True)
def __str__(self):
return self.name
def get_absolute_url(self):
return reverse('category_detail', kwargs={'slug': self.slug})
class Product(models.Model):
"""Товар"""
category = models.ForeignKey(Category, verbose_name="Категория", on_delete=models.CASCADE)
title = models.CharField(max_length=255, verbose_name="Наименование")
slug = models.SlugField(unique=True)
image = models.ImageField(verbose_name="Изображение")
description = models.TextField(verbose_name="Описание", null=True)
price = models.DecimalField(max_digits=9, decimal_places=2, verbose_name="Цена")
def __str__(self):
return self.title
def get_model_name(self):
return self.__class__.__name__.lower()
def get_absolute_url(self):
return reverse('product_detail', kwargs={'slug': self.slug})
class CartProduct(models.Model):
"""Модель, связывающая корзины и находящиеся в них товары"""
user = models.ForeignKey('Customer', verbose_name="Покупатель", on_delete=models.CASCADE)
cart = models.ForeignKey('Cart', verbose_name="Корзина", on_delete=models.CASCADE, related_name="related_products")
product = models.ForeignKey(Product, verbose_name="Товар", on_delete=models.CASCADE)
qty = models.PositiveIntegerField(default=1)
final_price = models.DecimalField(max_digits=9, decimal_places=2, verbose_name="Общая цена")
def __str__(self):
return f"Продукт: {self.product.title} (для корзины)"
def save(self, *args, **kwargs):
self.final_price = self.qty * self.product.price
super().save(*args, **kwargs)
class Cart(models.Model):
""""Корзина покупателя"""
owner = models.ForeignKey('Customer', verbose_name="Владелец", null=True, on_delete=models.CASCADE)
products = models.ManyToManyField(CartProduct, blank=True, related_name="related_cart")
total_products = models.PositiveIntegerField(default=0)
final_price = models.DecimalField(max_digits=9, default=0, decimal_places=2, verbose_name="Общая цена")
in_order = models.BooleanField(default=False)
for_anonymous_user = models.BooleanField(default=False)
def __str__(self):
return str(self.id)
class Customer(models.Model):
"""Покупатель"""
user = models.ForeignKey(User, verbose_name="Пользователь", on_delete=models.CASCADE)
phone = models.CharField(max_length=20, verbose_name="Номер телефона", null=True, blank=True)
address = models.CharField(max_length=255, verbose_name="Адрес", null=True, blank=True)
orders = models.ManyToManyField('Order', verbose_name='Заказы покупателя', related_name='related_customer')
def __str__(self):
return f"Покупатель: {self.id}"
class Order(models.Model):
STATUS_NEW = 'new'
STATUS_IN_PROGRESS = 'in_progress'
STATUS_READY = 'is_ready'
STATUS_COMPLETED = 'completed'
BUYING_TYPE_SELF = 'self'
BUYING_TYPE_DELIVERY = 'delivery'
STATUS_CHOICES = (
(STATUS_NEW, 'Новый заказ'),
(STATUS_IN_PROGRESS, 'Заказ в обработке'),
(STATUS_READY, 'Заказа готов'),
(STATUS_COMPLETED, 'Заказ выполнен')
)
BUYING_TYPE_CHOICES = (
(BUYING_TYPE_SELF, 'Самовызов'),
(BUYING_TYPE_DELIVERY, 'Доставка')
)
customer = models.ForeignKey(Customer, verbose_name='Покупатель', on_delete=models.CASCADE, related_name='related_orders')
first_name = models.CharField(max_length=255, verbose_name='Имя')
last_name = models.CharField(max_length=255, verbose_name='Фамилия')
phone = models.CharField(max_length=20, verbose_name='Телефон')
cart = models.ForeignKey(Cart, verbose_name='Корзина', on_delete=models.CASCADE, null=True, blank=True)
address = models.CharField(max_length=1024, verbose_name='Адрес', null=True, blank=True)
status = models.CharField(
max_length=100,
verbose_name='Статус заказа',
choices=STATUS_CHOICES,
default=STATUS_NEW
)
buying_type = models.CharField(
max_length=100,
verbose_name='Тип заказа',
choices=BUYING_TYPE_CHOICES,
default=BUYING_TYPE_SELF
)
comment = models.TextField(verbose_name='Комментарий к заказу', null=True, blank=True)
created_at = models.DateTimeField(auto_now=True, verbose_name='Дата создания заказа')
order_date = models.DateField(verbose_name='Дата получения заказа', default=timezone.now)
def __str__(self):
return str(self.id)
|
[
"dima74ermilov@gmail.com"
] |
dima74ermilov@gmail.com
|
5b4a0c4e768700ecd571ea409bd73df98dfaa2d9
|
bebf9666a13f8c8cd1b482a03ecb3f1d601c15c8
|
/tests/test_main.py
|
dbe7ace95767989f0169b62ea8d4b0a36fe779c5
|
[] |
no_license
|
jblythe/mock-cloud-service
|
08fe35cd06c5f1ab74a41bcc48415c0edd3a8209
|
76ebd2cd65cd2be2402e32879f582b0851710e5f
|
refs/heads/main
| 2023-03-19T07:00:56.430374
| 2021-03-15T01:13:28
| 2021-03-15T01:13:28
| 347,771,727
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,404
|
py
|
import base64
import copy
import json
import pytest
import main
from unittest.mock import patch
from tests.lib.gcp import firestore, pubsub_v1, bigquery
@pytest.mark.parametrize(
"directory, event_filename, expected_filename, config, event_data_tag", [
("process_event", "1_log.json", "1_expected.json", "InboundConverter", "attributes"),
]
)
def test_process_event(
directory, create_event_from_json, event_data_tag,
event_filename, generate_context,
generate_expected, expected_filename, config, monkeypatch
):
with patch('main.firestore.Client') as mock_firestore:
with patch('main.pubsub_v1.PublisherClient') as mock_pubsub_v1:
with patch("main.CONFIG", config):
# we have to patch the return of apply_config_rules as it generates a real time datetime and we dont
# have a way of mocking that part that deep. We have to rely on the results of the individual rule
# testing to verify this part, as well as a test of apply_config_rules itself to verify
with patch("main.apply_config_rules") as config_rules:
config_rules.return_value = generate_expected
monkeypatch.setenv("IBC_STORE_UNMATCHED", "1")
mock_firestore.return_value = firestore.Client()
mock_pubsub_v1.return_value = pubsub_v1.PublisherClient()
assert main.process_event(create_event_from_json, generate_context) == generate_expected
@pytest.mark.parametrize(
"directory, event_filename, expected_filename, config, event_data_tag", [
("handle_failed_event", "failed_event_1.json", "1_expected.json", "WriteInboundConverterErrors", "data"),
]
)
def test_handle_failed_event(directory, create_event_from_json, event_filename,
generate_context, generate_expected, expected_filename,
config, event_data_tag):
with patch('main.bigquery.Client') as mock_bigquery:
with patch.object(mock_bigquery, 'insert_rows_json', None):
with patch("main.CONFIG", config):
mock_bigquery.return_value = bigquery.Client()
data = base64.b64decode(copy.deepcopy(create_event_from_json)['data'].decode('UTF-8')).decode('UTF-8')
main.handle_failed_event(create_event_from_json, generate_context)
|
[
"jblythe@mac-jblythe.local"
] |
jblythe@mac-jblythe.local
|
77b16b2a511015f60e11f1200dd8a822618ddd7e
|
0ebbbcba0654b01faf153863fa88bbf63f18505c
|
/students/models.py
|
991323b259db64e1319912183ae7c95d2ecc5876
|
[] |
no_license
|
Hitoki/students
|
c1cb399939edd2cd99d79ca8ff1aaedb743268bd
|
6e691bb1aa9f221c2f1189a51794ec361dd84d84
|
refs/heads/master
| 2020-04-09T03:39:34.102825
| 2014-12-12T11:50:05
| 2014-12-12T11:50:05
| 27,124,048
| 1
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,817
|
py
|
from django.db import models
from django.db.models.signals import pre_save, pre_delete
from django.dispatch import receiver
class Student(models.Model):
first_name = models.CharField(max_length=255)
second_name = models.CharField(max_length=255)
last_name = models.CharField(max_length=255)
birth_date = models.DateField(auto_now=False)
student_card = models.IntegerField()
group = models.ForeignKey('StudentGroup')
def __unicode__(self):
return self.second_name
class Meta:
ordering = ['last_name']
def student_name(self):
return "{} {} {}".format(self.first_name, self.last_name, self.second_name)
class StudentGroup(models.Model):
title = models.CharField(max_length=255)
steward = models.ForeignKey(Student, null=True, blank=True)
def __unicode__(self):
return self.title
class Log(models.Model):
add_date = models.DateTimeField(auto_now=True)
log = models.CharField(max_length=255)
model = models.CharField(max_length=125, null=True, blank=True)
def __unicode__(self):
return str(self.add_date)
@receiver(pre_save, sender=Student)
@receiver(pre_save, sender=StudentGroup)
def stud_and_group_saver(sender, instance, **kwargs):
saver = Log()
saver.model = str(sender._meta.model_name)
try:
sender.objects.get(pk=instance.pk)
saver.log = 'edit'
except sender.DoesNotExist:
saver.log = 'create'
saver.log = 'Object {} {}'.format(instance, saver.log)
saver.save()
@receiver(pre_delete, sender=Student)
@receiver(pre_delete, sender=StudentGroup)
def stud_adn_group_delete(sender, instance, **kwargs):
deleter = Log()
deleter.model = str(sender._meta.model_name)
deleter.log = 'Object {} was deleted'.format(instance)
deleter.save()
|
[
"hitoki@mail.ru"
] |
hitoki@mail.ru
|
44c9e72b343ce099691f1a69a3ff2cd9a10fb4fd
|
f945c8ba9d711cc9242432040b74d0655bf00a45
|
/work.py
|
038043fe4304892e7de6f6ad37a3ae8ea5ccfd9e
|
[] |
no_license
|
Wynnlin329/Pyetl
|
44ab0fd636ef3103bc0869b113738d9d0f38d607
|
bd226cb04db30c48384ba029ebdafda196f3a4ba
|
refs/heads/master
| 2022-10-29T17:51:07.431429
| 2019-11-21T09:04:42
| 2019-11-21T09:04:42
| 223,125,734
| 0
| 1
| null | 2022-10-15T11:45:37
| 2019-11-21T08:31:28
|
Python
|
UTF-8
|
Python
| false
| false
| 2,536
|
py
|
import requests
from bs4 import BeautifulSoup
import os
user_agent = 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/76.0.3809.132 Safari/537.36'
headers = {'User-Agent':user_agent}
path = r'./res_gossiping_work'
if not os.path.exists(path):
os.mkdir(path)
page = 39291
while page < 39293:
url = 'https://www.ptt.cc/bbs/Gossiping/index%s.html'%(page)
res=requests.get(url,headers=headers,cookies={'over18':'1'})
soup = BeautifulSoup(res.text,'html.parser')
title = soup.select('div[class="title"]')
# page_url = 'https://www.ptt.cc/bbs/Gossiping/index%s.html'%(page)
#page_res = requests.get(url, headers=headers, cookies={'over18': '1'})
#soup_page = BeautifulSoup(page_res.text, 'html.parser')
for title_all in range(len(title)):
try:
print( title[title_all].a.text,'\n','https://www.ptt.cc'+ title[title_all].a['href'])
title_url ='https://www.ptt.cc'+ title[title_all].a['href']
except AttributeError as e:
print('---------------------------')
print(e.args)
print('---------------------------')
article_res=requests.get(title_url,headers=headers,cookies={'over18':'1'})
soup_article=BeautifulSoup(article_res.text,'html.parser')
#print(soup_article.text
title_content = soup_article.select('div[id="main-content"]')
article_str = title_content[0].text.split('--')[0]
#print(article_str)
push_up = 0
push_dw = 0
#auther = ''
score = 0
#time = ''
#title = ''
push_info_list = soup_article.select('div[class="push"] span')
for info in push_info_list:
if '推' in info.text:
push_up +=1
if '噓' in info.text:
push_dw +=1
score = push_up - push_dw
article_str += '--split--' + '\n'
article_str += '推' + str(push_up) + '\n'
article_str += '噓' + str(push_dw) + '\n'
article_str += '分數' + str(score) + '\n'
try:
with open('%s/%s.txt'%(path,title[title_all].a.text.replace('?',' ')),'w',encoding='utf-8') as f:
f.write(article_str)
except AttributeError as e:
print(e.args)
print(article_str)
page +=1
|
[
"summer54a21@gmail.com"
] |
summer54a21@gmail.com
|
7826fe2a935f7f0f1f2e48edeb9a89d23ecd7018
|
23e868036a088139e968b55f80283a9f7c996f8f
|
/test/functional/feature_settings.py
|
d996bab72e3433195ee0c19f48fd6710d8a61e51
|
[
"MIT"
] |
permissive
|
hiphopcoin24/hiphopcoin24
|
ec972602d502df0d131818eae7f903e3acc7e550
|
09b780546ba9e28b452a8641863aafa90def40d1
|
refs/heads/master
| 2023-05-14T18:44:02.844736
| 2021-06-09T08:51:27
| 2021-06-09T08:51:27
| 369,224,196
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,824
|
py
|
#!/usr/bin/env python3
# Copyright (c) 2017-2020 The Hiphopcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test various command line arguments and configuration file parameters."""
import json
from pathlib import Path
from test_framework.test_framework import HiphopcoinTestFramework
from test_framework.test_node import ErrorMatch
from test_framework.util import assert_equal
class SettingsTest(HiphopcoinTestFramework):
def set_test_params(self):
self.setup_clean_chain = True
self.num_nodes = 1
self.wallet_names = []
def run_test(self):
node, = self.nodes
settings = Path(node.datadir, self.chain, "settings.json")
conf = Path(node.datadir, "hiphopcoin.conf")
# Assert empty settings file was created
self.stop_node(0)
with settings.open() as fp:
assert_equal(json.load(fp), {})
# Assert settings are parsed and logged
with settings.open("w") as fp:
json.dump({"string": "string", "num": 5, "bool": True, "null": None, "list": [6, 7]}, fp)
with node.assert_debug_log(expected_msgs=[
'Ignoring unknown rw_settings value bool',
'Ignoring unknown rw_settings value list',
'Ignoring unknown rw_settings value null',
'Ignoring unknown rw_settings value num',
'Ignoring unknown rw_settings value string',
'Setting file arg: string = "string"',
'Setting file arg: num = 5',
'Setting file arg: bool = true',
'Setting file arg: null = null',
'Setting file arg: list = [6,7]',
]):
self.start_node(0)
self.stop_node(0)
# Assert settings are unchanged after shutdown
with settings.open() as fp:
assert_equal(json.load(fp), {"string": "string", "num": 5, "bool": True, "null": None, "list": [6, 7]})
# Test invalid json
with settings.open("w") as fp:
fp.write("invalid json")
node.assert_start_raises_init_error(expected_msg='Unable to parse settings file', match=ErrorMatch.PARTIAL_REGEX)
# Test invalid json object
with settings.open("w") as fp:
fp.write('"string"')
node.assert_start_raises_init_error(expected_msg='Found non-object value "string" in settings file', match=ErrorMatch.PARTIAL_REGEX)
# Test invalid settings file containing duplicate keys
with settings.open("w") as fp:
fp.write('{"key": 1, "key": 2}')
node.assert_start_raises_init_error(expected_msg='Found duplicate key key in settings file', match=ErrorMatch.PARTIAL_REGEX)
# Test invalid settings file is ignored with command line -nosettings
with node.assert_debug_log(expected_msgs=['Command-line arg: settings=false']):
self.start_node(0, extra_args=["-nosettings"])
self.stop_node(0)
# Test invalid settings file is ignored with config file -nosettings
with conf.open('a') as conf:
conf.write('nosettings=1\n')
with node.assert_debug_log(expected_msgs=['Config file arg: [regtest] settings=false']):
self.start_node(0)
self.stop_node(0)
# Test alternate settings path
altsettings = Path(node.datadir, "altsettings.json")
with altsettings.open("w") as fp:
fp.write('{"key": "value"}')
with node.assert_debug_log(expected_msgs=['Setting file arg: key = "value"']):
self.start_node(0, extra_args=["-settings={}".format(altsettings)])
self.stop_node(0)
if __name__ == '__main__':
SettingsTest().main()
|
[
"hiphopcoin24@gmail.com"
] |
hiphopcoin24@gmail.com
|
e8e863017e1d3e14b898fc2f86ad155b24a033c5
|
d9eda3d6f14bd35229d25118493a1d8157bdcb8b
|
/Interview/Code_Snippets/python/regex_excercise_2.py
|
98c56e10fa644f86ad4c3a19d9649ab93b0f2e9b
|
[] |
no_license
|
marannan/repo_1
|
070618cafd3762733f9fca11ae866988b2fac5a9
|
4667a2d761423368675bd463c888918d2cdaf828
|
refs/heads/master
| 2021-01-17T17:45:06.869090
| 2016-07-06T19:52:58
| 2016-07-06T19:52:58
| 47,296,359
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 394
|
py
|
import re
class Solution:
def is_match(self, str_1, str_2):
if str_1 == str_2:
return True
if str_2 == ".*":
return True
if str_2 == ".":
if len(str_1) == 1:
return True
if __name__ == "__main__":
solution = Solution()
print solution.is_match("aa", "aa")
|
[
"marannan@wisc.edu"
] |
marannan@wisc.edu
|
17c088fb564f316fa829691f859c32a64b526e32
|
77d6f3a8d4935ca3fff581e0bb3f3f14b7db2e47
|
/workbench/projects/migrations/0001_initial.py
|
21d9b9413bbff30dc693b9ec493457888ca8f873
|
[
"MIT"
] |
permissive
|
jayvdb/workbench
|
192705cf03eaaf96627b1bedde6c7eea6cf54ca7
|
a591c8a8aa8266e31095fea23f3d541cee68a7f3
|
refs/heads/main
| 2023-02-21T16:05:01.855731
| 2021-01-24T16:44:00
| 2021-01-24T16:44:00
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 11,000
|
py
|
# Generated by Django 2.1.7 on 2019-03-04 21:39
from decimal import Decimal
import django.core.validators
import django.db.models.deletion
import django.utils.timezone
from django.conf import settings
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
("contacts", "0001_initial"),
("offers", "0001_initial"),
("services", "0001_initial"),
]
operations = [
migrations.CreateModel(
name="Cost",
fields=[
(
"id",
models.AutoField(
auto_created=True,
primary_key=True,
serialize=False,
verbose_name="ID",
),
),
("title", models.CharField(max_length=200, verbose_name="title")),
(
"cost",
models.DecimalField(
decimal_places=2,
default=None,
max_digits=10,
validators=[django.core.validators.MinValueValidator(0)],
verbose_name="cost",
),
),
(
"third_party_costs",
models.DecimalField(
blank=True,
decimal_places=2,
default=None,
help_text="Total incl. tax for third-party services.",
max_digits=10,
null=True,
validators=[django.core.validators.MinValueValidator(0)],
verbose_name="third party costs",
),
),
(
"position",
models.PositiveIntegerField(default=0, verbose_name="position"),
),
],
options={
"verbose_name": "cost",
"verbose_name_plural": "costs",
"ordering": ("position", "pk"),
},
),
migrations.CreateModel(
name="Effort",
fields=[
(
"id",
models.AutoField(
auto_created=True,
primary_key=True,
serialize=False,
verbose_name="ID",
),
),
("title", models.CharField(max_length=200, verbose_name="title")),
(
"billing_per_hour",
models.DecimalField(
decimal_places=2,
default=None,
max_digits=10,
validators=[django.core.validators.MinValueValidator(0)],
verbose_name="billing per hour",
),
),
(
"hours",
models.DecimalField(
decimal_places=1,
max_digits=4,
validators=[
django.core.validators.MinValueValidator(Decimal("0.1"))
],
verbose_name="hours",
),
),
],
options={
"verbose_name": "effort",
"verbose_name_plural": "efforts",
"ordering": ("service_type",),
},
),
migrations.CreateModel(
name="Project",
fields=[
(
"id",
models.AutoField(
auto_created=True,
primary_key=True,
serialize=False,
verbose_name="ID",
),
),
("title", models.CharField(max_length=200, verbose_name="title")),
(
"description",
models.TextField(
blank=True,
verbose_name="project description",
help_text="Do not use this for the offer description. You can add the offer description later.",
),
),
(
"status",
models.PositiveIntegerField(
choices=[
(10, "Acquisition"),
(20, "Work in progress"),
(30, "Finished"),
(40, "Declined"),
],
default=10,
verbose_name="status",
),
),
(
"created_at",
models.DateTimeField(
default=django.utils.timezone.now, verbose_name="created at"
),
),
(
"invoicing",
models.BooleanField(
default=True,
help_text="This project is eligible for invoicing.",
verbose_name="invoicing",
),
),
(
"maintenance",
models.BooleanField(
default=False,
help_text="This project is used for maintenance work.",
verbose_name="maintenance",
),
),
("_code", models.IntegerField(verbose_name="code")),
(
"contact",
models.ForeignKey(
blank=True,
null=True,
on_delete=django.db.models.deletion.SET_NULL,
to="contacts.Person",
verbose_name="contact",
),
),
(
"customer",
models.ForeignKey(
on_delete=django.db.models.deletion.PROTECT,
to="contacts.Organization",
verbose_name="customer",
),
),
(
"owned_by",
models.ForeignKey(
on_delete=django.db.models.deletion.PROTECT,
to=settings.AUTH_USER_MODEL,
verbose_name="responsible",
),
),
],
options={
"verbose_name": "project",
"verbose_name_plural": "projects",
"ordering": ("-id",),
},
),
migrations.CreateModel(
name="Service",
fields=[
(
"id",
models.AutoField(
auto_created=True,
primary_key=True,
serialize=False,
verbose_name="ID",
),
),
(
"created_at",
models.DateTimeField(
default=django.utils.timezone.now, verbose_name="created at"
),
),
("title", models.CharField(max_length=200, verbose_name="title")),
(
"description",
models.TextField(blank=True, verbose_name="description"),
),
(
"position",
models.PositiveIntegerField(default=0, verbose_name="position"),
),
(
"effort_hours",
models.DecimalField(
decimal_places=1,
default=0,
max_digits=4,
validators=[
django.core.validators.MinValueValidator(Decimal("0.1"))
],
verbose_name="effort hours",
),
),
(
"cost",
models.DecimalField(
decimal_places=2,
default=0,
max_digits=10,
validators=[django.core.validators.MinValueValidator(0)],
verbose_name="cost",
),
),
(
"offer",
models.ForeignKey(
blank=True,
null=True,
on_delete=django.db.models.deletion.SET_NULL,
related_name="services",
to="offers.Offer",
verbose_name="offer",
),
),
(
"project",
models.ForeignKey(
on_delete=django.db.models.deletion.CASCADE,
related_name="services",
to="projects.Project",
verbose_name="project",
),
),
],
options={
"verbose_name": "service",
"verbose_name_plural": "services",
"ordering": ("position", "created_at"),
},
),
migrations.AddField(
model_name="effort",
name="service",
field=models.ForeignKey(
on_delete=django.db.models.deletion.CASCADE,
related_name="efforts",
to="projects.Service",
verbose_name="service",
),
),
migrations.AddField(
model_name="effort",
name="service_type",
field=models.ForeignKey(
blank=True,
null=True,
on_delete=django.db.models.deletion.SET_NULL,
to="services.ServiceType",
verbose_name="service type",
),
),
migrations.AddField(
model_name="cost",
name="service",
field=models.ForeignKey(
on_delete=django.db.models.deletion.CASCADE,
related_name="costs",
to="projects.Service",
verbose_name="service",
),
),
migrations.AlterUniqueTogether(
name="effort", unique_together={("service", "service_type")}
),
]
|
[
"mk@feinheit.ch"
] |
mk@feinheit.ch
|
51845ec7d6a9fc92b999c2b3fa61e91ba56bb685
|
74c88de08bd96529bcfab113f1bdb73e56a6f76c
|
/on_hpc/scripts/hpc_queuer
|
49d09aa58759dcd27feeeb831e6ff2c83135cf4a
|
[
"Apache-2.0"
] |
permissive
|
AudiovisualMetadataPlatform/hpc_batch
|
18f83ada8629637b3d371f916a882219189f8d47
|
0de95813fd5714a285dc8e925ecb520f82649219
|
refs/heads/master
| 2023-03-18T10:27:53.016842
| 2021-03-08T14:40:15
| 2021-03-08T14:40:15
| 286,835,605
| 0
| 0
|
Apache-2.0
| 2021-02-19T15:16:54
| 2020-08-11T19:53:48
|
Python
|
UTF-8
|
Python
| false
| false
| 2,921
|
#!/usr/bin/env python3
# Queue the hpc_runner script if necessary
import argparse
from pathlib import Path
import logging
import subprocess
import sys
import getpass
def main():
parser = argparse.ArgumentParser()
parser.add_argument("--debug", default=False, action="store_true", help="Turn on debugging")
parser.add_argument("--partition", default=None, help="HPC Partition")
parser.add_argument("--email", default=None, help="HPC Status Email")
parser.add_argument("--memory", default=32, type=int, help="Memory allocation")
parser.add_argument("--gpu", default=None, help="GPU Resource")
args = parser.parse_args()
logging.basicConfig(level=logging.DEBUG if args.debug else logging.INFO,
stream=sys.stderr,
format="%(asctime)s %(levelname)s %(message)s")
workspace = Path(f"{sys.path[0]}/../workspace")
if not workspace.exists():
logging.error("Workspace doesn't exist!")
exit(1)
# check if any jobs are currently outstanding
active = 0
for job in workspace.iterdir():
if not job.is_dir() or (job / "finished.out").exists():
# skip non-directories and those which are finished.
continue
if (job / "job.sh").exists():
active += 1
logging.debug(f"There are {active} active jobs waiting")
if not active:
exit(0)
# we need to handle those, let's see if the runner is already queued.
proc = subprocess.run(['squeue', '-u', getpass.getuser(), '--name', 'hpc_runner'],
check=True, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, encoding='utf-8')
if len(proc.stdout.splitlines()) > 1:
logging.debug("hpc_runner is already queued or running")
exit(0)
logging.debug("hpc_runner isn't in slurm and there are waiting jobs")
# create a shell script which will call the hpc_runner
trampoline = Path(f"{sys.path[0]}/../workspace/hpc_runner.sh")
logging.debug(f"hpc_runner trampoline is at {trampoline}")
with open(trampoline, "w") as f:
f.write("#!/bin/bash\n")
if args.debug:
f.write(f"{sys.path[0]}/hpc_runner --debug\n")
else:
f.write(f"{sys.path[0]}/hpc_runner\n")
trampoline.chmod(0o755)
# submit the trampoline into slurm.
cmd = ['sbatch', f'--mem={args.memory}G',
"-e", f"{workspace}/hpc_runner_stderr.txt",
"-o", f"{workspace}/hpc_runner_stdout.txt",
'-J', 'hpc_runner',
'-t', '480']
if args.email:
cmd.append('--mail-type=ALL')
cmd.append(f'--mail-user={args.email}')
if args.partition:
cmd.append("-p")
cmd.append(args.partition)
if args.gpu:
cmd.append("--gres")
cmd.append(args.gpu)
cmd.append(trampoline)
proc = subprocess.run(cmd)
if __name__ == "__main__":
main()
|
[
"bdwheele@indiana.edu"
] |
bdwheele@indiana.edu
|
|
605e5d03ee299aca753ba487a894fa465e01cc2f
|
c18f04bc63abe1f17c5dc4d966aa3967d57c4936
|
/agenda/webservice/serializers.py
|
704d0af1d1c0725a7cd6fd9317fd941bef4d1286
|
[] |
no_license
|
Rahmon/Lied-agenda
|
bcba920c43261a43319c10f10d75ee2403a3a1a5
|
03b4833d48cbcc1725dd85acd77cd857fe83a8c1
|
refs/heads/master
| 2021-01-13T08:02:40.738554
| 2016-09-28T22:05:43
| 2016-09-28T22:05:43
| 69,505,274
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 266
|
py
|
from agenda.webservice.models import Contato
from rest_framework import serializers
class ContatoSerializer(serializers.HyperlinkedModelSerializer):
class Meta:
model = Contato
fields = ('id', 'nome', 'apelido', 'idade', 'telefone')
|
[
"ramondead@gmail.com"
] |
ramondead@gmail.com
|
a173c7a636c54784967ecf2f9e47658e475fcf92
|
6c7f22d553051c6a5ed5391488593392ac4268a8
|
/test_bus.py
|
f2fdb4a3d0b38ebb604d00c4aa39dc84453ee6a8
|
[] |
no_license
|
darksv/dashboard
|
d6d27bda1581768f8c1f0cbcabae8e01c758f92a
|
f320af04759cdca97e81f08c8a50e6ac326fc3f8
|
refs/heads/master
| 2021-01-22T21:21:52.750181
| 2018-01-27T13:19:00
| 2018-01-27T13:19:00
| 85,413,780
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 320
|
py
|
import pytest as pytest
from bus import Bus
@pytest.mark.asyncio
async def test_emit():
bus = Bus()
handled = 0
@bus.on('tick')
async def test(event, data):
nonlocal handled
handled += 1
await bus.emit('tick')
await bus.emit('tick')
assert handled == 2
|
[
"darek969-12@o2.pl"
] |
darek969-12@o2.pl
|
85f53434661d94c4034ad9c4efa982e8d5885ea7
|
638f3a8eb1923043778315e9cc36ba590ba751a2
|
/models/user_models.py
|
d5f0e575408911d69b55fbfa2378db0a60670231
|
[
"MIT"
] |
permissive
|
htrismicristo/finanzapp_api
|
8a6e7575cb2e049bc5718e38f9e9948da8a1d019
|
3d3ed061256c1908664f3d20dd8fe67bd765e9a1
|
refs/heads/main
| 2023-02-02T19:44:58.718221
| 2020-12-20T15:47:38
| 2020-12-20T15:47:38
| 320,917,447
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 302
|
py
|
from pydantic import BaseModel
class UserIn(BaseModel):
username: str
password: str
budget = 0
# class UserSign(BaseModel):
# username: str
# password: str
# budget: int
class UserOut(BaseModel):
username: str
budget: str
class Config:
orm_mode = True
|
[
"hermes.romero@unipamplona.edu.co"
] |
hermes.romero@unipamplona.edu.co
|
4d4e95b09d324d42793c9a5a0914a2509f411127
|
aa3c9a58978a7991a1802b661cf31be38ba27532
|
/compecon/chebyc.py
|
9b45d82c2ee85ab7afd90e61a7f506a205eb7a52
|
[
"MIT"
] |
permissive
|
snowdj/CompEcon-python
|
65cd00f30d0f4d1b3036006991f1565416566784
|
883ac75750800e2792218a7b13f97e681498a389
|
refs/heads/master
| 2021-06-24T03:21:23.335960
| 2020-11-08T01:07:56
| 2020-11-08T01:07:56
| 92,573,061
| 0
| 0
|
MIT
| 2020-11-08T01:07:57
| 2017-05-27T05:23:51
|
Jupyter Notebook
|
UTF-8
|
Python
| false
| false
| 872
|
py
|
# __author__ = 'Randall'
# import numpy as np
# from numba import jit, float64, void
#
#
# ''' Usual Python version '''
#
# def cheby(z, bas):
# #bas[0] = 1
# bas[1] = z
# z *= 2
# for k in range(2, bas.shape[0]):
# bas[k] = np.multiply(z, bas[k - 1]) - bas[k - 2]
# return None
#
# ''' Numba optimized version'''
#
# @jit(void(float64[:], float64[:, :]), nopython=True)
# def cheby_numba(z, bas):
# for node in range(z.size):
# bas[0, node] = 1
# bas[1, node] = z[node]
# z[node] *= 2
# for k in range(bas.shape[0]):
# bas[k, node] = z[node] * bas[k - 1, node] - bas[k - 2, node]
# return None
#
#
# def test(func, n):
# zval = np.linspace(-1.0, 1.0, n)
# npolys = n
# phi = np.ones((npolys, n))
# func(zval, phi)
# return None
#
# test(cheby, 18)
# test(cheby_numba, 18)
|
[
"romero-aguilar.1@osu.edu"
] |
romero-aguilar.1@osu.edu
|
98626c5f1a14fc4da3c9d037c2a8bd90e503fb5c
|
71449daa1820c4b883a6a053f2da5fdc5b6ae178
|
/ship.py
|
bb81c86712fc300b6addf04e689c5bb62157ea18
|
[] |
no_license
|
yytql/Game
|
eddab00218c392fd68b5040518c0d7aed9f2460e
|
0c284888adaa8a7ba4cddd7012dcee5be2094953
|
refs/heads/master
| 2020-07-25T05:34:33.972169
| 2019-09-13T02:08:31
| 2019-09-13T02:08:31
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 992
|
py
|
import pygame
class Ship(object):
# 初始化Ship类
def __init__(self, screen, ai_settings):
self.screen = screen
self.ai_settings = ai_settings
# 加载图像
self.image = pygame.image.load('image/spacecraft.bmp')
self.rect = self.image.get_rect()
self.screen_rect = screen.get_rect()
# 设置飞船位置于屏幕底部
self.rect.centerx = self.screen_rect.centerx
self.rect.bottom = self.screen_rect.bottom
# 初始化移动属性
self.moving_right = False
self.moving_left = False
def blitme(self):
# 放置飞船
self.screen.blit(self.image, self.rect)
# 移动部分
def move(self):
if self.moving_right and self.rect.centerx < self.ai_settings.screen_width:
self.rect.centerx += self.ai_settings.ship_speed
elif self.moving_left and self.rect.centerx > 0:
self.rect.centerx -= self.ai_settings.ship_speed
|
[
"2907047490@qq.com"
] |
2907047490@qq.com
|
33834dd13eb2f3936555afb18ffb370a0ec92dfe
|
b287837b42e00dc3b6b3b18f47c118f0e90136db
|
/W15_FDD_dcgan/dcgan.py
|
57e0a564b3fc44cd95cc906c3d48797b8d9f9080
|
[] |
no_license
|
GoareguerLucas/GAN-SDPC
|
5adb0be8a9a3d52f27aad5f8429a5c498d6c5ec1
|
3142daa400502b7c3af73ffe6c00b4fe5a1531ec
|
refs/heads/master
| 2023-03-31T06:21:59.340058
| 2019-11-20T10:25:15
| 2019-11-20T10:25:15
| 178,813,378
| 2
| 0
| null | 2023-03-24T22:48:50
| 2019-04-01T07:56:20
|
Python
|
UTF-8
|
Python
| false
| false
| 13,821
|
py
|
import argparse
import os
import numpy as np
import torchvision.transforms as transforms
from torchvision.utils import save_image
from torch.utils.tensorboard import SummaryWriter
from torch.utils.data import DataLoader
from torchvision import datasets
from torch.autograd import Variable
import torch.nn as nn
import torch.nn.functional as F
import torch
import sys
import matplotlib.pyplot as plt
import time
import datetime
parser = argparse.ArgumentParser()
parser.add_argument("-r", "--runs_path", type=str, default='FDD/200e64i64b/',
help="Dossier de stockage des résultats sous la forme : Experience_names/parameters/")
parser.add_argument("-e", "--n_epochs", type=int, default=200, help="number of epochs of training")
parser.add_argument("-b", "--batch_size", type=int, default=64, help="size of the batches")
parser.add_argument("--lrD", type=float, default=0.00004, help="adam: learning rate for D")
parser.add_argument("--lrG", type=float, default=0.0004, help="adam: learning rate for G")
parser.add_argument("--eps", type=float, default=0.1, help="batchnorm: espilon for numerical stability")
parser.add_argument("--b1", type=float, default=0.5, help="adam: decay of first order momentum of gradient")
parser.add_argument("--b2", type=float, default=0.999, help="adam: decay of first order momentum of gradient")
parser.add_argument("--latent_dim", type=int, default=6, help="dimensionality of the latent space")
parser.add_argument("-i", "--img_size", type=int, default=128, help="size of each image dimension")
parser.add_argument("--channels", type=int, default=3, help="number of image channels")
parser.add_argument("-s", "--sample_interval", type=int, default=10, help="interval between image sampling")
parser.add_argument("--sample_path", type=str, default='images')
parser.add_argument("-m", "--model_save_interval", type=int, default=2500,
help="interval between image sampling. If model_save_interval > n_epochs, no save")
parser.add_argument('--model_save_path', type=str, default='models')
parser.add_argument('--load_model', action="store_true",
help="Load model present in model_save_path/Last_*.pt, if present.")
parser.add_argument("-d", "--depth", action="store_true",
help="Utiliser si utils.py et SimpsonsDataset.py sont deux dossier au dessus.")
parser.add_argument("-v", "--verbose", action="store_true",
help="Afficher des informations complémentaire.")
opt = parser.parse_args()
print(opt)
# Particular import
depth = ""
if opt.depth == True:
depth = "../"
sys.path.append(depth + "../") # ../../GAN-SDPC/
from SimpsonsDataset import SimpsonsDataset, FastSimpsonsDataset
from utils import *
from plot import *
# Gestion du time tag
try:
tag = datetime.datetime.now().isoformat(sep='_', timespec='seconds')
except TypeError:
# Python 3.5 and below
# 'timespec' is an invalid keyword argument for this function
tag = datetime.datetime.now().replace(microsecond=0).isoformat(sep='_')
tag = tag.replace(':','.')
cuda = True if torch.cuda.is_available() else False
class Generator(nn.Module):
def __init__(self, verbose=opt.verbose):
super(Generator, self).__init__()
def generator_block(in_filters, out_filters, kernel=4, stride=2):
#block = [nn.Conv2d(in_filters, out_filters, kernel, stride=stride, padding=2), nn.Upsample(scale_factor=2), nn.BatchNorm2d(out_filters, opt.eps), nn.LeakyReLU(0.2, inplace=True)]
block = [nn.ConvTranspose2d(in_filters, out_filters, kernel, stride=stride, padding=1),
nn.BatchNorm2d(out_filters, opt.eps), nn.LeakyReLU(0.2, inplace=True)]
return block
self.verbose = verbose
self.max_filters = 512
self.init_size = opt.img_size // 8
self.l1 = nn.Sequential(nn.Linear(opt.latent_dim, self.max_filters *
self.init_size ** 2), nn.LeakyReLU(0.2, inplace=True))
self.conv1 = nn.Sequential(*generator_block(self.max_filters, 256),)
self.conv2 = nn.Sequential(*generator_block(256, 128),)
self.conv3 = nn.Sequential(*generator_block(128, 64),)
self.conv_blocks = nn.Sequential(
nn.Conv2d(64, opt.channels, 3, stride=1, padding=1),
nn.Tanh(),
)
def forward(self, z):
if self.verbose:
print("G")
# Dim : opt.latent_dim
out = self.l1(z)
if self.verbose:
print("l1 out : ", out.shape)
out = out.view(out.shape[0], self.max_filters, self.init_size, self.init_size)
# Dim : (self.max_filters, opt.img_size/8, opt.img_size/8)
if self.verbose:
print("View out : ", out.shape)
out = self.conv1(out)
# Dim : (self.max_filters/2, opt.img_size/4, opt.img_size/4)
if self.verbose:
print("Conv1 out : ", out.shape)
out = self.conv2(out)
# Dim : (self.max_filters/4, opt.img_size/2, opt.img_size/2)
if self.verbose:
print("Conv2 out : ", out.shape)
out = self.conv3(out)
# Dim : (self.max_filters/8, opt.img_size, opt.img_size)
if self.verbose:
print("Conv3 out : ", out.shape)
img = self.conv_blocks(out)
# Dim : (opt.chanels, opt.img_size, opt.img_size)
if self.verbose:
print("Channels Conv out : ", img.shape)
return img
def _name(self):
return "Generator"
class Discriminator(nn.Module):
def __init__(self, verbose=opt.verbose):
super(Discriminator, self).__init__()
def discriminator_block(in_filters, out_filters, bn=True, kernel=4, stride=2, padding=1):
block = [nn.Conv2d(in_filters, out_filters, kernel, stride, padding=padding),
nn.LeakyReLU(0.2, inplace=True)] # , nn.Dropout2d(0.25)
if bn:
block.append(nn.BatchNorm2d(out_filters, opt.eps))
return block
self.max_filters = 512
self.verbose = verbose
self.conv1 = nn.Sequential(*discriminator_block(opt.channels, 64, bn=False),)
self.conv2 = nn.Sequential(*discriminator_block(64, 128),)
self.conv3 = nn.Sequential(*discriminator_block(128, 256, stride=1, padding=2),)
self.conv4 = nn.Sequential(*discriminator_block(256, self.max_filters),)
# The height and width of downsampled image
self.init_size = opt.img_size // 8
self.adv_layer = nn.Sequential(nn.Linear(self.max_filters * self.init_size ** 2, 1)) # , nn.Sigmoid()
def forward(self, img):
if self.verbose:
print("D")
# Dim : (opt.chanels, opt.img_size, opt.img_size)
if self.verbose:
print("Image shape : ", img.shape)
out = self.conv1(img)
# Dim : (self.max_filters/8, opt.img_size/2, opt.img_size/2)
if self.verbose:
print("Conv1 out : ", out.shape)
out = self.conv2(out)
# Dim : (self.max_filters/4, opt.img_size/4, opt.img_size/4)
if self.verbose:
print("Conv2 out : ", out.shape)
out = self.conv3(out)
# Dim : (self.max_filters/2, opt.img_size/4, opt.img_size/4)
if self.verbose:
print("Conv3 out : ", out.shape)
out = self.conv4(out)
# Dim : (self.max_filters, opt.img_size/8, opt.img_size/8)
if self.verbose:
print("Conv4 out : ", out.shape)
out = out.view(out.shape[0], -1)
if self.verbose:
print("View out : ", out.shape)
validity = self.adv_layer(out)
# Dim : (1)
if self.verbose:
print("Val out : ", validity.shape)
return validity
def _name(self):
return "Discriminator"
# Loss function
adversarial_loss = torch.nn.BCEWithLogitsLoss()
sigmoid = nn.Sigmoid()
# Initialize generator and discriminator
generator = Generator()
discriminator = Discriminator()
print_network(generator)
print_network(discriminator)
if cuda:
generator.cuda()
discriminator.cuda()
adversarial_loss.cuda()
# Initialize weights
generator.apply(weights_init_normal)
discriminator.apply(weights_init_normal)
# Configure data loader
dataloader = load_data(depth + "../../FDD/", opt.img_size, opt.batch_size, rand_hflip=False, FDD=True)
# Optimizers
optimizer_G = torch.optim.Adam(generator.parameters(), lr=opt.lrG, betas=(opt.b1, opt.b2))
optimizer_D = torch.optim.Adam(discriminator.parameters(), lr=opt.lrD, betas=(opt.b1, opt.b2))
Tensor = torch.cuda.FloatTensor if cuda else torch.FloatTensor
# ----------
# Load models
# ----------
start_epoch = 1
if opt.load_model == True:
start_epoch = load_models(discriminator, optimizer_D, generator, optimizer_G, opt.n_epochs, opt.model_save_path)
# ----------
# Tensorboard
# ----------
path_data1 = depth + "../runs/" + opt.runs_path
path_data2 = depth + "../runs/" + opt.runs_path + tag[:-1] + "/"
# Les runs sont sauvegarder dans un dossiers "runs" à la racine du projet, dans un sous dossiers opt.runs_path.
os.makedirs(path_data1, exist_ok=True)
os.makedirs(path_data2, exist_ok=True)
writer = SummaryWriter(log_dir=path_data2)
# ----------
# Training
# ----------
nb_batch = len(dataloader)
nb_epochs = 1 + opt.n_epochs - start_epoch
hist = init_hist(nb_epochs, nb_batch)
# Vecteur z fixe pour faire les samples
fixed_noise = Variable(Tensor(np.random.normal(0, 1, (24, opt.latent_dim))))
t_total = time.time()
for j, epoch in enumerate(range(start_epoch, opt.n_epochs + 1)):
t_epoch = time.time()
for i, (imgs, _, _) in enumerate(dataloader):
t_batch = time.time()
# Adversarial ground truths
valid_smooth = Variable(Tensor(imgs.shape[0], 1).fill_(
float(np.random.uniform(0.9, 1.0, 1))), requires_grad=False)
valid = Variable(Tensor(imgs.size(0), 1).fill_(1), requires_grad=False)
fake = Variable(Tensor(imgs.size(0), 1).fill_(0), requires_grad=False)
# Configure input
real_imgs = Variable(imgs.type(Tensor))
# Generate a batch of images
z = Variable(Tensor(np.random.normal(0, 1, (imgs.shape[0], opt.latent_dim))))
gen_imgs = generator(z)
#print("Max : ",gen_imgs.max()," Min :",gen_imgs.min())
# ---------------------
# Train Discriminator
# ---------------------
optimizer_D.zero_grad()
# Real batch
# Discriminator descision
d_x = discriminator(real_imgs)
# Measure discriminator's ability to classify real from generated samples
real_loss = adversarial_loss(d_x, valid_smooth)
# Backward
real_loss.backward()
# Fake batch
# Discriminator descision
d_g_z = discriminator(gen_imgs.detach())
# Measure discriminator's ability to classify real from generated samples
fake_loss = adversarial_loss(d_g_z, fake)
# Backward
fake_loss.backward()
d_loss = real_loss + fake_loss
optimizer_D.step()
# -----------------
# Train Generator
# -----------------
optimizer_G.zero_grad()
# New discriminator descision, Since we just updated D
d_g_z = discriminator(gen_imgs)
# Loss measures generator's ability to fool the discriminator
g_loss = adversarial_loss(d_g_z, valid)
# Backward
g_loss.backward()
optimizer_G.step()
print(
"[Epoch %d/%d] [Batch %d/%d] [D loss: %f] [G loss: %f] [Time: %fs]"
% (epoch, opt.n_epochs, i + 1, len(dataloader), d_loss.item(), g_loss.item(), time.time() - t_batch)
)
# Compensation pour le BCElogits
d_x = sigmoid(d_x)
d_g_z = sigmoid(d_g_z)
# Save Losses and scores for Tensorboard
save_hist_batch(hist, i, j, g_loss, d_loss, d_x, d_g_z)
# Tensorboard save
iteration = i + nb_batch * j
writer.add_scalar('g_loss', g_loss.item(), global_step=iteration)
writer.add_scalar('d_loss', d_loss.item(), global_step=iteration)
writer.add_scalar('d_x_mean', hist["d_x_mean"][i], global_step=iteration)
writer.add_scalar('d_g_z_mean', hist["d_g_z_mean"][i], global_step=iteration)
writer.add_scalar('d_x_cv', hist["d_x_cv"][i], global_step=iteration)
writer.add_scalar('d_g_z_cv', hist["d_g_z_cv"][i], global_step=iteration)
writer.add_histogram('D(x)', d_x, global_step=iteration)
writer.add_histogram('D(G(z))', d_g_z, global_step=iteration)
writer.add_scalar('D_x_max', hist["D_x_max"][j], global_step=epoch)
writer.add_scalar('D_x_min', hist["D_x_min"][j], global_step=epoch)
writer.add_scalar('D_G_z_min', hist["D_G_z_min"][j], global_step=epoch)
writer.add_scalar('D_G_z_max', hist["D_G_z_max"][j], global_step=epoch)
# Save samples
if epoch % opt.sample_interval == 0:
tensorboard_sampling(fixed_noise, generator, writer, epoch)
# Save models
if epoch % opt.model_save_interval == 0:
num = str(int(epoch / opt.model_save_interval))
save_model(discriminator, optimizer_D, epoch, opt.model_save_path + "/" + num + "_D.pt")
save_model(generator, optimizer_G, epoch, opt.model_save_path + "/" + num + "_G.pt")
print("[Epoch Time: ", time.time() - t_epoch, "s]")
durer = time.gmtime(time.time() - t_total)
print("[Total Time: ", durer.tm_mday - 1, "j:", time.strftime("%Hh:%Mm:%Ss", durer), "]", sep='')
# Save model for futur training
if opt.model_save_interval < opt.n_epochs + 1:
save_model(discriminator, optimizer_D, epoch, opt.model_save_path + "/last_D.pt")
save_model(generator, optimizer_G, epoch, opt.model_save_path + "/last_G.pt")
writer.close()
|
[
"lucas.goareguer@etu.univ-amu.fr"
] |
lucas.goareguer@etu.univ-amu.fr
|
4b81aa81f60b69515e27c15d9eb45410ce34361c
|
e162e7e276665226312cd1a03a5a3f614340bd32
|
/textacy/similarity.py
|
c9714c10ca7944d2fae6d5b2d37705fd845ff1df
|
[
"Apache-2.0"
] |
permissive
|
nigeljyng/textacy
|
6b34f3919574a4f0c91fb4c729a65919d4ac551e
|
17833f63103850f9ea7e8e22b16378758dae6fce
|
refs/heads/master
| 2021-01-23T01:56:32.224345
| 2017-03-23T13:34:14
| 2017-03-23T13:39:43
| 85,949,374
| 2
| 0
| null | 2017-03-23T12:53:59
| 2017-03-23T12:53:59
| null |
UTF-8
|
Python
| false
| false
| 9,468
|
py
|
"""
Collection of semantic similarity metrics.
"""
from __future__ import absolute_import, division, print_function, unicode_literals
import collections
import re
import warnings
from cytoolz import itertoolz
from Levenshtein import (distance as _levenshtein,
hamming as _hamming,
jaro_winkler as _jaro_winkler,
ratio as _ratio)
import numpy as np
from pyemd import emd
from sklearn.metrics import pairwise_distances
from spacy.strings import StringStore
import textacy
from textacy import extract
from textacy.compat import string_types, unicode_, bytes_, bytes_to_unicode
NONWORDCHARS_REGEX = re.compile(r'\W+', flags=re.IGNORECASE | re.UNICODE)
def word_movers(doc1, doc2, metric='cosine'):
"""
Measure the semantic similarity between two documents using Word Movers
Distance.
Args:
doc1 (``textacy.Doc`` or ``spacy.Doc``)
doc2 (``textacy.Doc`` or ``spacy.Doc``)
metric ({'cosine', 'euclidean', 'l1', 'l2', 'manhattan'})
Returns:
float: similarity between `doc1` and `doc2` in the interval [0.0, 1.0],
where larger values correspond to more similar documents
References:
Ofir Pele and Michael Werman, "A linear time histogram metric for improved
SIFT matching," in Computer Vision - ECCV 2008, Marseille, France, 2008.
Ofir Pele and Michael Werman, "Fast and robust earth mover's distances,"
in Proc. 2009 IEEE 12th Int. Conf. on Computer Vision, Kyoto, Japan, 2009.
Kusner, Matt J., et al. "From word embeddings to document distances."
Proceedings of the 32nd International Conference on Machine Learning
(ICML 2015). 2015. http://jmlr.org/proceedings/papers/v37/kusnerb15.pdf
"""
stringstore = StringStore()
n = 0
word_vecs = []
for word in itertoolz.concatv(extract.words(doc1), extract.words(doc2)):
if word.has_vector:
if stringstore[word.text] - 1 == n: # stringstore[0] always empty space
word_vecs.append(word.vector)
n += 1
distance_mat = pairwise_distances(np.array(word_vecs), metric=metric).astype(np.double)
distance_mat /= distance_mat.max()
vec1 = collections.Counter(
stringstore[word.text] - 1
for word in extract.words(doc1)
if word.has_vector)
vec1 = np.array([vec1[word_idx] for word_idx in range(len(stringstore))]).astype(np.double)
vec1 /= vec1.sum() # normalize word counts
vec2 = collections.Counter(
stringstore[word.text] - 1
for word in extract.words(doc2)
if word.has_vector)
vec2 = np.array([vec2[word_idx] for word_idx in range(len(stringstore))]).astype(np.double)
vec2 /= vec2.sum() # normalize word counts
return 1.0 - emd(vec1, vec2, distance_mat)
def word2vec(obj1, obj2):
"""
Measure the semantic similarity between one Doc or spacy Doc, Span, Token,
or Lexeme and another like object using the cosine distance between the
objects' (average) word2vec vectors.
Args:
obj1 (``textacy.Doc``, ``spacy.Doc``, ``spacy.Span``, ``spacy.Token``, or ``spacy.Lexeme``)
obj2 (``textacy.Doc``, ``spacy.Doc``, ``spacy.Span``, ``spacy.Token``, or ``spacy.Lexeme``)
Returns
float: similarity between `obj1` and `obj2` in the interval [0.0, 1.0],
where larger values correspond to more similar objects
"""
if isinstance(obj1, textacy.Doc) and isinstance(obj2, textacy.Doc):
obj1 = obj1.spacy_doc
obj2 = obj2.spacy_doc
return obj1.similarity(obj2)
def jaccard(obj1, obj2, fuzzy_match=False, match_threshold=0.8):
"""
Measure the semantic similarity between two strings or sequences of strings
using Jaccard distance, with optional fuzzy matching of not-identical pairs
when `obj1` and `obj2` are sequences of strings.
Args:
obj1 (str or Sequence[str])
obj2 (str or Sequence[str]): if str, both inputs are treated as sequences
of *characters*, in which case fuzzy matching is not permitted
fuzzy_match (bool): if True, allow for fuzzy matching in addition to the
usual identical matching of pairs between input vectors
match_threshold (float): value in the interval [0.0, 1.0]; fuzzy comparisons
with a score >= this value will be considered matches
Returns:
float: similarity between `obj1` and `obj2` in the interval [0.0, 1.0],
where larger values correspond to more similar strings or sequences
of strings
Raises:
ValueError: if `fuzzy_match` is True but `obj1` and `obj2` are strings
"""
if isinstance(match_threshold, int) and 1 <= match_threshold <= 100:
msg = '`match_threshold` should be a float in [0.0, 1.0]; it was automatically converted from the provided int in [0, 100]'
warnings.warn(msg)
match_threshold /= 100
set1 = set(obj1)
set2 = set(obj2)
intersection = len(set1 & set2)
union = len(set1 | set2)
if (fuzzy_match is True and
not isinstance(obj1, string_types) and
not isinstance(obj2, string_types)):
for item1 in set1.difference(set2):
if max(token_sort_ratio(item1, item2) for item2 in set2) >= match_threshold:
intersection += 1
for item2 in set2.difference(set1):
if max(token_sort_ratio(item2, item1) for item1 in set1) >= match_threshold:
intersection += 1
elif fuzzy_match is True:
raise ValueError('fuzzy matching not possible with str inputs')
return intersection / union
def hamming(str1, str2):
"""
Measure the similarity between two strings using Hamming distance, which
simply gives the number of characters in the strings that are different i.e.
the number of substitution edits needed to change one string into the other.
Args:
str1 (str)
str2 (str)
Returns:
float: similarity between `str1` and `str2` in the interval [0.0, 1.0],
where larger values correspond to more similar strings
.. note:: This uses a *modified* Hamming distance in that it permits strings
of different lengths to be compared.
"""
len_str1 = len(str1)
len_str2 = len(str2)
if len_str1 == len_str2:
distance = _hamming(str1, str2)
else:
# make sure str1 is as long as or longer than str2
if len_str2 > len_str1:
str1, str2 = str2, str1
len_str1, len_str2 = len_str2, len_str1
# distance is # of different chars + difference in str lengths
distance = len_str1 - len_str2
distance += _hamming(str1[:len_str2], str2)
distance /= len_str1
return 1.0 - distance
def levenshtein(str1, str2):
"""
Measure the similarity between two strings using Levenshtein distance, which
gives the minimum number of character insertions, deletions, and substitutions
needed to change one string into the other.
Args:
str1 (str)
str2 (str)
normalize (bool): if True, divide Levenshtein distance by the total number
of characters in the longest string; otherwise leave the distance as-is
Returns:
float: similarity between `str1` and `str2` in the interval [0.0, 1.0],
where larger values correspond to more similar strings
"""
distance = _levenshtein(str1, str2)
distance /= max(len(str1), len(str2))
return 1.0 - distance
def jaro_winkler(str1, str2, prefix_weight=0.1):
"""
Measure the similarity between two strings using Jaro-Winkler similarity
metric, a modification of Jaro metric giving more weight to a shared prefix.
Args:
str1 (str)
str2 (str)
prefix_weight (float): the inverse value of common prefix length needed
to consider the strings identical
Returns:
float: similarity between `str1` and `str2` in the interval [0.0, 1.0],
where larger values correspond to more similar strings
"""
return _jaro_winkler(str1, str2, prefix_weight)
def token_sort_ratio(str1, str2):
"""
Measure of similarity between two strings based on minimal edit distance,
where ordering of words in each string is normalized before comparing.
Args:
str1 (str)
str2 (str)
Returns:
float: similarity between ``str1`` and ``str2`` in the interval [0.0, 1.0],
where larger values correspond to more similar strings.
"""
if not str1 or not str2:
return 0
str1 = _force_unicode(str1)
str2 = _force_unicode(str2)
str1_proc = _process_and_sort(str1)
str2_proc = _process_and_sort(str2)
return _ratio(str1_proc, str2_proc)
def _force_unicode(s):
"""Force ``s`` into unicode, or die trying."""
if isinstance(s, unicode_):
return s
elif isinstance(s, bytes_):
return bytes_to_unicode(s)
else:
return unicode_(s)
def _process_and_sort(s):
"""Return a processed string with tokens sorted then re-joined."""
return ' '.join(sorted(_process(s).split()))
def _process(s):
"""
Remove all characters but letters and numbers, strip whitespace,
and force everything to lower-case.
"""
if not s:
return ''
return NONWORDCHARS_REGEX.sub(' ', s).lower().strip()
|
[
"burton@chartbeat.com"
] |
burton@chartbeat.com
|
3418d69cbbbc96c1bf28aef84615116ef1518ea1
|
34d86073d4dfc71aef2b0dd0b3d1cfdde455122c
|
/src/uniform-tilings/tiling.py
|
8d74f468cfc2e54dadfff2a392e99e7dda454cfe
|
[
"MIT"
] |
permissive
|
mohi7solanki/pywonderland
|
76fcc467f8003f87e733bd8b47e212a1c68ffa8e
|
2b9d61a8414d4cfa92d34325e5e2b9b5d501abca
|
refs/heads/master
| 2020-11-24T07:05:27.832124
| 2020-03-04T14:35:25
| 2020-03-04T14:35:25
| 228,021,605
| 0
| 0
|
MIT
| 2019-12-14T12:45:32
| 2019-12-14T12:45:31
| null |
UTF-8
|
Python
| false
| false
| 9,261
|
py
|
from itertools import combinations
from functools import partial
import numpy as np
import helpers
from coxeter import CoxeterGroup
def rot(arr):
return arr[1:] + [arr[0]]
class Face(object):
"""This class holds the information of a face and is
used to color the faces alternatively.
"""
def __init__(self, word, center, points, domain1, domain2):
"""
word: the word in the symmetry group that transforms the fundamental face to
this face.
center: center of this face.
points: coordinates of the vertices of this face.
domain1 && domain2: alternate domains form the face.
"""
self.word = word
self.center = center
self.points = points
self.domain1 = domain1
self.domain2 = domain2
class UniformTiling(object):
def __init__(self, coxeter_diagram, init_dist):
self.cox_mat = helpers.get_coxeter_matrix(coxeter_diagram)
self.G = CoxeterGroup(self.cox_mat)
self.active = tuple(bool(x) for x in init_dist)
self.words = None
self.mirrors = self.get_mirrors(coxeter_diagram)
self.init_v = helpers.get_point_from_distance(self.mirrors, init_dist)
self.reflections = self.get_reflections(init_dist)
# to be calculated later
self.vertices_coords = []
self.num_vertices = None
self.num_edges = None
self.num_faces = None
self.edge_indices = {}
self.face_indices = {}
def build_geometry(self, depth=None, maxcount=20000):
self.G.init()
self.words = tuple(self.G.traverse(depth, maxcount))
self.get_vertices()
self.get_edges()
self.get_faces()
def get_vertices(self):
parabolic = tuple(i for i, x in enumerate(self.active) if not x)
coset_reps = set([self.G.get_coset_representative(w, parabolic) for w in self.words])
self.vwords = self.G.sort_words(coset_reps)
self.vtable = self.G.get_coset_table(self.vwords, parabolic)
self.num_vertices = len(self.vwords)
self.vertices_coords = [self.transform(word, self.init_v) for word in self.vwords]
def get_edges(self):
for i in range(len(self.active)):
if self.active[i]:
elist = []
coset_reps = set([self.G.get_coset_representative(w, (i,)) for w in self.words])
for word in self.G.sort_words(coset_reps):
v1 = self.G.move(self.vtable, 0, word)
v2 = self.G.move(self.vtable, 0, word + (i,))
if v1 is not None and v2 is not None:
if (v1, v2) not in elist and (v2, v1) not in elist:
elist.append((v1, v2))
self.edge_indices[i] = elist
self.num_edges = sum(len(L) for L in self.edge_indices.values())
def get_faces(self):
for i, j in combinations(range(len(self.active)), 2):
f0 = []
m = self.cox_mat[i][j]
parabolic = (i, j)
if self.active[i] and self.active[j]:
for k in range(m):
f0.append(self.G.move(self.vtable, 0, (i, j) * k))
f0.append(self.G.move(self.vtable, 0, (i, j) * k + (i,)))
elif self.active[i] and m > 2:
for k in range(m):
f0.append(self.G.move(self.vtable, 0, (j, i) * k))
elif self.active[j] and m > 2:
for k in range(m):
f0.append(self.G.move(self.vtable, 0, (i, j) * k))
else:
continue
coset_reps = set([self.G.get_coset_representative(w, parabolic) for w in self.words])
flist = []
for word in self.G.sort_words(coset_reps):
f = tuple(self.G.move(self.vtable, v, word) for v in f0)
if None not in f and not helpers.check_duplicate_face(f, flist):
flist.append(f)
self.face_indices[(i, j)] = flist
self.num_faces = sum(len(L) for L in self.face_indices.values())
def transform(self, word, v):
for w in reversed(word):
v = self.reflections[w](v)
return v
def get_reflections(self, init_dist):
raise NotImplementedError
def get_fundamental_triangle_vertices(self):
raise NotImplementedError
def project(self, v):
raise NotImplementedError
def get_mirrors(self, coxeter_diagram):
raise NotImplementedError
class EuclideanTiling(UniformTiling):
def project(self, v):
return helpers.project_euclidean(v)
def get_mirrors(self, coxeter_diagram):
return helpers.get_spherical_or_affine_mirrors(coxeter_diagram)
def get_reflections(self, init_dist):
def reflect(v, normal, dist):
"""(affine) reflection.
"""
return v - 2 * (np.dot(v, normal) + dist) * normal
return [partial(reflect, normal=n, dist=d) for n, d in zip(self.mirrors, init_dist)]
class Poincare2D(UniformTiling):
def __init__(self, coxeter_diagram, init_dist):
super().__init__(coxeter_diagram, init_dist)
# vertices of the fundamental triangle
self.fundamental_triangle = self.get_fundamental_triangle_vertices()
# middle points of the edges of the fundamental triangle
self.edge_points = self.get_edge_points()
def project(self, v):
return helpers.project_hyperbolic(v)
def get_mirrors(self, coxeter_diagram):
return helpers.get_hyperbolic_mirrors(coxeter_diagram)
def get_reflections(self, init_dist):
def reflect(v, normal):
"""(affine) reflection.
"""
return v - 2 * np.dot(v, normal) * normal
return [partial(reflect, normal=n) for n in self.mirrors]
def get_fundamental_triangle_vertices(self):
return [helpers.get_point_from_distance(self.mirrors, d) for d in -np.eye(3)]
def get_edge_points(self):
d0 = (0, -1, -1)
d1 = (-1, 0, -1)
d2 = (-1, -1, 0)
return [helpers.get_point_from_distance(self.mirrors, d) for d in (d0, d1, d2)]
def get_faces(self):
for i, j in combinations(range(len(self.active)), 2):
f0 = []
m = self.cox_mat[i][j]
parabolic = (i, j)
P1 = P2 = None
c0 = self.fundamental_triangle[2 * (i + j) % 3]
if self.active[i] and self.active[j]:
P1 = self.edge_points[i]
P2 = self.edge_points[j]
for k in range(m):
f0.append(self.G.move(self.vtable, 0, (i, j) * k))
f0.append(self.G.move(self.vtable, 0, (i, j) * k + (i,)))
elif self.active[i] and m > 2:
P1 = self.edge_points[i]
for k in range(m):
f0.append(self.G.move(self.vtable, 0, (j, i) * k))
elif self.active[j] and m > 2:
P2 = self.edge_points[j]
for k in range(m):
f0.append(self.G.move(self.vtable, 0, (i, j) * k))
else:
continue
coset_reps = set([self.G.get_coset_representative(w, parabolic) for w in self.words])
flist = []
for word in self.G.sort_words(coset_reps):
f = tuple(self.G.move(self.vtable, v, word) for v in f0)
if None not in f and not helpers.check_duplicate_face(f, flist):
center = self.transform(word, c0)
coords = [self.vertices_coords[k] for k in f]
coords2 = rot(coords)
if P1 is None or P2 is None:
mids = [helpers.normalize((P + Q) / 2) for P, Q in zip(coords, coords2)]
domain1 = [(V, P) for V, P in zip(coords, mids)]
domain2 = [(P, V) for P, V in zip(mids, coords2)]
else:
mids1 = [helpers.normalize((P + Q) / 2) for P, Q in zip(coords[::2], coords[1::2])]
mids2 = [helpers.normalize((P + Q) / 2) for P, Q in zip(coords[1::2], rot(coords[::2]))]
domain1 = [(Q1, V, Q2) for Q1, V, Q2 in zip(mids1, coords[1::2], mids2)]
domain2 = [(Q1, V, Q2) for Q1, V, Q2 in zip(mids2, rot(coords[::2]), rot(mids1))]
if len(word) % 2 == 1:
domain1, domain2 = domain2, domain1
face = Face(word, center, coords, domain1, domain2)
flist.append(face)
self.face_indices[(i, j)] = flist
self.num_faces = sum(len(L) for L in self.face_indices.values())
class SphericalTiling(UniformTiling):
def project(self, v):
return helpers.project_spherical(v)
def get_mirrors(self, coxeter_diagram):
return helpers.get_spherical_or_affine_mirrors(coxeter_diagram)
def get_reflections(self, init_dist):
def reflect(v, normal):
"""(affine) reflection.
"""
return v - 2 * np.dot(v, normal) * normal
return [partial(reflect, normal=n) for n in self.mirrors]
|
[
"mathzhaoliang@gmail.com"
] |
mathzhaoliang@gmail.com
|
e067434af74bd851e32e2e47d171d908516e6bfc
|
04ce78237dc07ba78ff1ba5afbe6432d50221cf4
|
/INI4.py
|
61d01d39e94481e072adcc0d23227ee0a0be744b
|
[] |
no_license
|
yevfurman/Rosalind
|
abbeee46d04d5a74f0d2308163aa9574b531c688
|
efafb88a9cef5239dccaf58d6a97231eff73d8b6
|
refs/heads/master
| 2023-01-18T17:10:41.018903
| 2020-11-18T15:45:55
| 2020-11-18T15:45:55
| 313,915,669
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 157
|
py
|
f = open("Files/rosalind_ini4.txt")
a=list(map(int,(f.read()).split(" ")))
f.close()
summ=0
for i in range(a[0],a[1]+1):
if i%2!=0:
summ+=i
print(summ)
|
[
"yevfurman@gmail.com"
] |
yevfurman@gmail.com
|
669d02831d39675b6f8ad30798c49f99960262c1
|
ea3ff8c600a93dce9ae47f8daf244bd40d494072
|
/inmobiliaria/apps/propietarios/forms.py
|
f95351af86cd4d3f3251a0dec59733447d11010f
|
[] |
no_license
|
andretxu-6/Inmobiliaria
|
dc8e4d6f2bff6f139ebbb9f6798b767bf751da7e
|
551542407354dceda7b8cf5ffbf669aaf4ed6c4e
|
refs/heads/master
| 2021-01-23T05:50:33.627686
| 2017-06-06T13:26:55
| 2017-06-06T13:26:55
| 92,991,870
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,169
|
py
|
from django import forms
from apps.propietarios.models import Propietario
class PropietarioForm(forms.ModelForm):
class Meta:
model = Propietario
fields = [
'nombre',
'apellidos',
'fecha_nacimiento',
'direccion',
'telefono',
'email',
]
labels = {
'nombre': 'Nombre',
'apellidos': 'Apellidos',
'fecha_nacimiento': 'Fecha de nacimiento',
'direccion': 'Direccion',
'telefono': 'Telefono',
'email': 'Email',
}
widgets ={
'nombre': forms.TextInput(attrs={'class':'form-control','placeholder':'Introduce tu nombre'}),
'apellidos': forms.TextInput(attrs={'class':'form-control','placeholder':'Introduce tu apellido/s'}),
'fecha_nacimiento': forms.TextInput(attrs={'class':'form-control','placeholder':'Fecha de nacimiento en formato dd/mm/YYYY'}),
'direccion': forms.TextInput(attrs={'class':'form-control','placeholder':'Introduce la direccion de tu vivienda actual'}),
'telefono': forms.TextInput(attrs={'class':'form-control','placeholder':'numero de telefono (con prefijo si es necesario)'}),
'email': forms.TextInput(attrs={'class':'form-control','placeholder':'direccion de correo electronico'}),
}
|
[
"andretxu6@gmail.com"
] |
andretxu6@gmail.com
|
2f3929550179f750f485efe10ef575e0e503512c
|
0cc64681027fdb16b1925259d6c155f7f7d19c2d
|
/assets/migrations/0002_auto_20210222_0109.py
|
00f187350a8a9670a0612f1ec6e2f75612a365a1
|
[] |
no_license
|
Christian-Oliveira/desafio-inquest
|
8ca666aa9ce8492f942fa0d6724338971e05b38a
|
ee4c14ec258602927117660f0972755feff413d6
|
refs/heads/master
| 2023-03-10T09:45:58.671336
| 2021-02-23T00:51:04
| 2021-02-23T00:51:04
| 340,673,007
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 359
|
py
|
# Generated by Django 3.1.7 on 2021-02-22 01:09
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('assets', '0001_initial'),
]
operations = [
migrations.RenameField(
model_name='assetsmodel',
old_name='person_id',
new_name='person',
),
]
|
[
"christianoliveirati@gmail.com"
] |
christianoliveirati@gmail.com
|
3af70ae14dfb6cb3951f1b1a9ea0922c1f8baced
|
0a84d748400b9c7fc9121b4a76a2bf8d1d75de14
|
/plotting/plot_psycho.py
|
36c1714b4b2a43c164beb6e5b32bdd3de4d9f6f3
|
[] |
no_license
|
dangpzanco/disciplina-fala
|
3649a103777370fb7a8b15fce37e97446074c9f4
|
04b4213084969304d44efa3adb6198af101cfd83
|
refs/heads/master
| 2020-07-30T21:35:56.252159
| 2019-12-04T14:19:40
| 2019-12-04T14:19:40
| 210,365,686
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,354
|
py
|
import pathlib
import soundfile as sf
import librosa
import numpy as np
import numpy.random as rnd
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
from tqdm import trange
def format_figure(fig, ax):
figsize = np.array([16, 10])/2.54
par_ticks = {'fontname': 'Arial',
'weight': 'normal',
'fontsize' : '10'}
par_labels = {'family': 'Times New Roman',
'weight' : 'normal',
'fontsize' : '12'}
# Adjust the axis inside the figure
fig.subplots_adjust(left=0.09, right=0.97, bottom=0.14, top=0.975, wspace=0, hspace=0)
#fig.subplots_adjust(left=0.116, right=0.965, bottom=0.17, top=0.98, wspace=0, hspace=0)
# Usar para 9cm por 4,5cm
# Adjust figure size
fig.set_size_inches(figsize)
# legend_title = ax.get_legend().get_title().get_text().capitalize()
# legend_text = [text.get_text().capitalize() for text in ax.get_legend().get_texts()]
# print(legend_title, legend_text)
# ax.legend(legend_text, title=legend_title)
# Adjust the font of x and y labels
ax.set_xlabel(ax.get_xlabel(), **par_labels)
ax.set_ylabel(ax.get_ylabel(), **par_labels)
# Set the font name for axis tick labels
for tick in ax.get_xticklabels():
tick.set_fontname(par_ticks['fontname'])
tick.set_fontsize(par_ticks['fontsize'])
tick.set_weight(par_ticks['weight'])
for tick in ax.get_yticklabels():
tick.set_fontname(par_ticks['fontname'])
tick.set_fontsize(par_ticks['fontsize'])
tick.set_weight(par_ticks['weight'])
# fig.set_tight_layout(0.01)
# results = pd.read_csv('../analysis/speechmetrics_results.csv')
exp_metadata = pd.read_csv('../experiment/exp_metadata.csv')
results = pd.read_csv('../experiment/results/results.csv')
subject_names = ['bruno','celso','felipe']
subject_names = results.columns[1:]
results['mos'] = results[subject_names].mean(axis=1)
results['SNR'] = exp_metadata['SNR'].values
results['technique'] = exp_metadata['technique'].values
# results = pd.melt(results, id_vars=['id'], value_vars=subject_names)
# ind = results['id'].values
# results['SNR'] = exp_metadata['SNR'].values[ind]
# results['technique'] = exp_metadata['technique'].values[ind]
# results['mos'] = results['value']
# results['subject'] = results['variable']
# results = results.drop(['value', 'variable'], axis=1)
print(results)
# Drop SNR = Inf dB
# drop_index = (results['SNR'] == np.inf).values
# drop_index = np.where(drop_index)[0]
# results = results.drop(drop_index)
metric_list = ['pesq', 'stoi', 'srmr', 'llr', 'csii']
kind = 'violin'
kind = 'box'
# kind = 'bar'
# hue_order = ['noisy', 'wiener', 'bayes', 'binary']
hue_order = ['noisy', 'binary', 'wiener', 'bayes']
metric = 'mos'
# df = pd.wide_to_long(results, stubnames='sub_', i='id', j='mos')
fig, ax = plt.subplots(figsize=(10,7))
snsfig = sns.catplot(x='SNR', y=metric, hue='technique', ax=ax,
hue_order=hue_order, data=results, kind=kind)
plt.close(snsfig.fig)
ax.set_ylabel(metric.upper())
ax.set_xlabel('SNR [dB]')
format_figure(fig, ax)
fig.savefig(f'../images/psycho_{metric}.pdf', format='pdf', transparent=True)
fig.savefig(f'../images/psycho_{metric}.png', format='png', transparent=True)
plt.show()
|
[
"dangpzanco@gmail.com"
] |
dangpzanco@gmail.com
|
4d246179c8121464ab611878e52376016465c3d6
|
14b0d41a7a35de8fae46e550e020ef4092611325
|
/CPT_secondscreen.pyde
|
913c87c68d39f3be9011de835238ca8b6092f344
|
[] |
no_license
|
ICS2O-Gallo/cpt-nameera-m
|
f23e04c9bfe02f7f4f2f24c13246ad1c39116dcc
|
2626dcb304326c1d949caea29a5e87be5096b179
|
refs/heads/master
| 2021-09-04T20:02:22.817289
| 2018-01-22T01:16:09
| 2018-01-22T01:16:09
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 414
|
pyde
|
def setup():
size(1000, 800)
def draw():
background(255)
fill(0)
textSize(100)
text("Select a level:", 25, 100)
rect(25, 200, 400, 50)
textSize(32)
fill(255)
text("Easy", 175, 230)
fill(0)
rect(25, 300, 400, 50)
textSize(32)
fill(255)
text("Moderate", 175, 330)
fill(0)
rect(25, 400, 400, 50)
textSize(32)
fill(255)
text("Hard", 175, 430)
|
[
"noreply@github.com"
] |
ICS2O-Gallo.noreply@github.com
|
93b59da3810633c2d02d8018a78721e75bda1964
|
83932f1d956a6b7818c6e58a31205e6e26f2fb5c
|
/0x0B-python-input_output/100-append_after.py
|
e2eba919c2e97d3a4d7ba4547e15fee17ccf5d08
|
[] |
no_license
|
Nzparra/holbertonschool-higher_level_programming
|
a17834b8239e477a7284119acac69da0e7d7261e
|
6cf7a44a10db7a10be3c3c02cbacfea9a7b897f2
|
refs/heads/master
| 2020-09-29T02:45:04.458850
| 2020-05-14T21:12:45
| 2020-05-14T21:12:45
| 226,930,910
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 354
|
py
|
#!/usr/bin/python3
def append_after(filename="", search_string="", new_string=""):
val = ""
with open(filename, "r", encoding="utf8") as file:
for line in file:
val += line
if search_string in line:
val += new_string
with open(filename, "w", encoding="utf8") as file:
file.write(val)
|
[
"nzparra@gmail.com"
] |
nzparra@gmail.com
|
91d754a06736a368aa954f6d40efd194e2baecda
|
98d41ace3bc5e82323c4cd21b6e7d16ea9d16f69
|
/Mambu/sites/admin.py
|
6ce1ac97d63cae55a65525d4d55e7208ebb614a8
|
[] |
no_license
|
pius-ng3a/MambuSiteV1
|
1152b15f99ae10918bb3404311e98cbf478e0661
|
0af11fb5e9da003098c1f9d9306d9529d7f0f1e1
|
refs/heads/master
| 2022-12-04T02:07:37.703323
| 2020-08-30T16:52:39
| 2020-08-30T16:52:39
| 289,721,269
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 334
|
py
|
from django.contrib import admin
from .models import Site
# Register your models here.
class SiteAdmin(admin.ModelAdmin):
"""docstring for SiteAdmin"""
fieldsets=[
("Site Name ",{'fields':['name']}),
("Description ",{'fields':['descritption']}),
("Picture ",{'fields':['site_image']}),
]
admin.site.register(Site)
|
[
"pius2016@gmail.com"
] |
pius2016@gmail.com
|
f273f614070b3ec157327a711f055db28e4399be
|
6346e04a9672ca4232115ce51d42487168863e08
|
/Scripts/sendgotitemailtotarget.py
|
e5ca85623d196d34a1049296f46f1c0176937064
|
[] |
no_license
|
MikeDawg/content
|
97b7edf4a6e0a84dc9e81863cb07f6cb25e56aa5
|
4d7a8a131eb07689e157ff510995e5315efab9db
|
refs/heads/master
| 2021-01-23T15:30:27.743903
| 2016-09-02T17:57:24
| 2016-09-02T17:57:47
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,178
|
py
|
# Find the recipient of the email - should be the target
target = ''
for t in demisto.incidents()[0]['labels']:
if t['type'] == 'Email/from':
target = t['value']
break
if target == '':
for t in demisto.incidents()[0]['labels']:
if t['type'] == 'Email':
target = t['value']
break
if target == '':
demisto.results({'Type': entryTypes['error'], 'ContentsFormat': formats['text'], 'Contents': 'Could not find the target email'})
else:
from string import Template
import textwrap
defaultBody = """\
Hi $target,
We've received your email and are investigating. Please do not touch the email until further notice.
Cheers,
Your friendly security team"""
body = demisto.args()['body'] if demisto.get(demisto.args(), 'body') else defaultBody
actualBody = Template(body)
subject = demisto.args()['subject'] if demisto.get(demisto.args(), 'subject') else 'Security Email Re: ' + demisto.incidents()[0]['name']
demisto.results(demisto.executeCommand('send-mail', {'to': target, 'subject': subject, 'body': textwrap.dedent(actualBody.safe_substitute(target=target))}))
|
[
"lior@demisto.com"
] |
lior@demisto.com
|
df15f189ad8abcc014d97b41efa1b7b819a861d7
|
4dc918ea87efb657c25a85cefc1249dd22dd6999
|
/graphscoring/main.py
|
75b6b5e7ca271de9ee3f8bcf6c8ba1cf001fe793
|
[] |
no_license
|
DevyaniBajaj/TextSummarization
|
bc72c5aa829c6b1e358435fd010ca861730dfef0
|
35591154ab5e9df8da7eeb13d8ccdbcfc8b58608
|
refs/heads/master
| 2020-03-14T17:48:26.766020
| 2018-05-01T07:03:16
| 2018-05-01T07:03:16
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 759
|
py
|
from helper import dissect
from graphscoring import text_rank
import pandas as pd
import itertools
summarized_list = []
labels_list = []
def summarize(mainText):
sentences = dissect.get_sentences(mainText)
#print("------", sentences, '\n')
S = text_rank.get_similarity_matrix(sentences)
sent_scores = text_rank.get_ranks(S)
#print("sent_scores: ", sent_scores, '\n')
labels, summary = dissect.get_summarized_sentences(sent_scores.tolist(), sentences)
labels_list.append(labels)
summarized_list.append(summary)
def perform_textrank(textArray):
for s in textArray:
summarize(s)
single_labels_list = list(itertools.chain.from_iterable(labels_list))
return single_labels_list, summarized_list
|
[
"dhanya.akhila@iiitb.org"
] |
dhanya.akhila@iiitb.org
|
0bfb0cfe7a239dcd9b4917a272d9a3e218e3a6ba
|
56ba14e240574dd123cad8e831273db5b7bcbc6b
|
/100 days of code/GUI (tkinter and turtle)/miles-km-converter/miles_to_km_converter.py
|
a2ce257eba3645389c4588e6ce9e8c351c4bc85b
|
[
"MIT"
] |
permissive
|
araschermer/python-code
|
4b7971b3e6c6cbddf908e3423f5382f80b2f370d
|
e4f30601779f423476659d5b0b5f5c9df71718f7
|
refs/heads/main
| 2023-04-23T17:16:19.578997
| 2021-05-18T15:41:31
| 2021-05-18T15:41:31
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,073
|
py
|
from tkinter import *
def miles_to_km():
if user_input.get() == "" or miles_spinbox.focus_force():
miles = float(miles_spinbox.get())
km = miles * 1.609344
km_result.config(text = round(km, 2))
else:
miles = float(user_input.get())
km = miles * 1.609344
km_result.config(text = f"{round(km, 2)}")
window = Tk()
window.title("Miles to Kilometers Converter")
window.config(padx = 15, pady = 15, )
miles_spinbox = Spinbox(from_ = float(0), to = float(1000), width = 10, command = miles_to_km)
miles_spinbox.grid(column = 0, row = 0)
user_input = Entry(width = 7)
user_input.grid(column = 1, row = 0)
miles_label = Label(text = "Miles")
miles_label.grid(column = 2, row = 0)
is_equal = Label(text = "is equal to")
is_equal.grid(column = 0, row = 1)
km_label = Label(text = "Km")
km_label.grid(column = 2, row = 1)
km_result = Label(text = "0")
km_result.grid(column = 1, row = 1)
calculate_button = Button(text = "Calculate", command = miles_to_km)
calculate_button.grid(column = 1, row = 2)
window.mainloop()
|
[
"44481677+abdelkha@users.noreply.github.com"
] |
44481677+abdelkha@users.noreply.github.com
|
d464fc27a55b40b584be9cb0268a95e3833f7065
|
03dab80497ef1da431ffaae9556c42c7b2927116
|
/polls/views.py
|
ce035d48bb49a24250de2bc62305800da7fd16e3
|
[] |
no_license
|
monyoudom/sample
|
6f536d975b3fef936e8e69636307b8d261636a35
|
a8872cd1a28f002fc6f2b4ae319c8ba4f0ceef3d
|
refs/heads/master
| 2020-04-24T19:57:08.354494
| 2019-03-03T15:58:19
| 2019-03-03T15:58:19
| 172,228,733
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 491
|
py
|
from django.shortcuts import render
from django.http import HttpResponseRedirect
from .forms import InformaitonForms
# Create your views here.
def index(request):
if request.method == 'POST':
form = InformaitonForms(request.POST)
form.save()
return HttpResponseRedirect('polls/thanks/')
else:
form = InformaitonForms()
return render(request, 'index.html', {'form': form})
def thanks(request):
return render(request, 'thank.html')
|
[
"thongmonyoudom@outlook.com"
] |
thongmonyoudom@outlook.com
|
94a65ce3023cea924288f6a78545a7984fa3552b
|
3d4085e97301b13ed69914b4ecc3db74febcac05
|
/NEMA17motorRun.py
|
7a115d6954ef1d023366054cfa9e53994b07fe6d
|
[
"Apache-2.0"
] |
permissive
|
Kitaarspeler/Rubiks-Cube
|
60d6c0a1b8613d25176aeab54c389209c6d80107
|
dc1b4c83839a1130ac1162faf862fd977bb6ba1a
|
refs/heads/master
| 2020-05-19T00:24:21.937783
| 2019-05-03T10:11:30
| 2019-05-03T10:11:30
| 184,735,450
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 922
|
py
|
import time
import RPi.GPIO as GPIO
DirPin = 2
StepPin = 3
CW = 0
CCW = 1
Delay = 0.005
GPIO.setmode(GPIO.BCM)
GPIO.setwarnings(False)
GPIO.setup(DirPin, GPIO.OUT)
GPIO.setup(StepPin, GPIO.OUT)
try:
while True:
Steps = input('Enter number of steps: ')
Dir = input('Enter a Dir!!!!!!!!!!!!!!')
Dir = Dir.upper()
if Dir == 'CW':
GPIO.output(DirPin, CW)
elif Dir == 'CCW':
GPIO.output(DirPin, CCW)
else:
Dir = input('Enter a Dir you twat')
for step in range(Steps):
GPIO.output(StepPin, True)
time.sleep(Delay)
GPIO.output(StepPin, False)
time.sleep(Delay)
except KeyboardInterrupt:
print('Fuck off then')
GPIO.cleanup()
File "/home/pi/Rubiks-Cube/PiBasicRotation.py", line 26, in SetupGPIOPins
GPIO.setup(DirPin, GPIO.OUT)
NameError: name 'DirPin' is not defined
|
[
"noreply@github.com"
] |
Kitaarspeler.noreply@github.com
|
fb19cd7a1143a7fb2827c7255f5e850b4b83328b
|
0d0846b64d0fabaeb3f96ee971006baa8cdb8615
|
/script/DNS_tcp_forwarder.py
|
2a3e6a987e2801f15b99b9be86b5f36d7e86ca9e
|
[] |
no_license
|
songlinjian/DNS_ATR
|
ea06ef9ae145be5b87faed10d6bd4578a4304a5a
|
ecb9f027faa52a882aba666cb63f601c410f9de5
|
refs/heads/master
| 2021-01-23T04:48:48.041265
| 2018-08-02T05:47:50
| 2018-08-02T05:47:50
| 102,452,704
| 2
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,166
|
py
|
#!/usr/bin/env python
# coding: utf-8
import ConfigParser
from gevent import monkey
from gevent.server import StreamServer
import dns.resolver
import dns.message
import dns.rrset
import dns.query
import struct
import socket
monkey.patch_all()
#import socket
# try dnspython module here
def dns_forward(dns_message, port=5353):
ans = dns.query.tcp(dns_message, dns_default, port=port)
# return a dns.message.Message object to wire format data
print ans.question[0].name
for i in ans.answer:
print i.to_text()
return ans
# read the DNS wireformat data and reply
def forward(data, addr, sock):
dns_message = dns.message.from_wire(data)
qname = dns_message.question[0].name
qtype = dns_message.question[0].rdtype
qclass = dns_message.question[0].rdclass
# note: the id of the query(qid) should be record in case the query is hit
# the cache. The id of the cached answer will be replace by this new qid
qid = dns_message.id
# check the cache
#ans = DNSServer.cache.get((qname, qtype, qclass))
# if cache miss
#if ans is None:
# print 'Cache miss! Forward the query to ', dns_default
dns_ans = dns_forward(dns_message)
#ans = dns.resolver.Answer(qname, qtype, qclass, dns_ans)
# DNSServer.cache.put((qname, qtype, qclass), ans)
# print 'Cache max_size is :', DNSServer.cache.max_size
# print 'Cache len is :', len(DNSServer.cache.data)
# if cache hit
#else:
# print 'Cache hit! Good!!!'
# ans.response.id = qid
dns_ans_wire = dns_ans.to_wire()
s_len = len(dns_ans_wire)
print s_len
# 由主机字节序改成网络字节序
# 用二进制数字构造2字节字符串
two_byte = struct.pack("H", socket.htons(s_len))
# print socket.ntohs(struct.unpack("h", two_byte)[0]) #反向操作等于s_len
TCP_DNS_wire = '%s%s' % (two_byte, dns_ans_wire)
#answers, soa = query(str(qname).rstrip('.'))
#answer_dns = pack_dns(dns, answers, soa)
# 将查询到的应答包放入LRUCache以后使用
#DNSServer.dns_cache[qname] = dns_response_wire
# 返回
#sock.sendto(dns_response_wire, addr)
sock.sendall(TCP_DNS_wire)
# def _init_cache_queue():
# while True:
# data, addr, sock = DNSServer.deq_cache.get()
# print data
# gevent.spawn(handler, data, addr, sock)
def handle(socket, address):
# 接受数据
print "receive a query from :", address
wire_data = socket.recv(4096)
# from RFC1035: The message is prefixed with a two byte length
# field which gives the message length, excluding the two byte
# length field.
# ntohs 网络字节序转到主机字节序,16位
# struct.unpack("H", wire_data[:2]) 读取二进制字节 转变为short int
# print socket.ntohs(struct.unpack("h", wire_data[:2])[0])
wire_message = wire_data[2:]
#print len(wire_message)
# 缓存队列保存元组:(请求包,请求地址,sock)
#DNSServer.deq_cache.put((wire_message, self.client_address[0], self.request))
forward(wire_message, address, socket)
class DNSServer(object):
@staticmethod
def start():
# 缓存队列,收到的请求都先放在这里,然后从这里拿数据处理
#DNSServer.cache = dns.resolver.LRUCache(lru_size)
# 启动DNS服务器,用gevent.server
print 'Start DNS server at %s:%d\n' % (ip, port)
dns_server = StreamServer((ip, port), handle)
dns_server.serve_forever()
def load_config(filename):
with open(filename, 'r') as fc:
cfg = ConfigParser.ConfigParser()
cfg.readfp(fc)
return dict(cfg.items('DEFAULT'))
if __name__ == '__main__':
# read the config file
#config_file = os.path.basename(__file__).split('.')[0] + '.ini'
config_file = 'config.ini'
config_dict = load_config(config_file)
ip, port = config_dict['ip'], int(config_dict['port'])
deq_size, lru_size = int(
config_dict['deq_size']), int(
config_dict['lru_size'])
db = config_dict['db']
dns_default = config_dict['dns']
# 启动服务器
DNSServer.start()
|
[
"songlinjian@gmail.com"
] |
songlinjian@gmail.com
|
872f3024dbbef360a41e18d04b0dbf0fbd56efcc
|
d164e3478f1d80c51d6c8af657f2ad138a7a99c5
|
/Design Patterns/Creational/Builder/builder.py
|
93af79f4f405ba40c7c51c82f027a1e301138a8e
|
[] |
no_license
|
Kanthus123/Python
|
23d50e9c2128e5b6315b7146fc2ccc2707766d55
|
dfddc428829353d1ef93c5121f0980eac02e2fbd
|
refs/heads/master
| 2021-10-27T11:48:52.680957
| 2019-04-17T00:51:35
| 2019-04-17T00:51:35
| 126,108,636
| 0
| 1
| null | 2019-04-17T00:41:43
| 2018-03-21T02:00:14
|
CSS
|
UTF-8
|
Python
| false
| false
| 1,551
|
py
|
#Allows you to create different flavors of an object while avoiding constructor pollution. Useful when there could be several flavors of an object. Or when there are a lot of steps involved in creation of an object.
#Imagine you are at Hardee's and you order a specific deal, lets say,
#{}"Big Hardee" and they hand it over to you without any questions;
#this is the example of simple factory.
#But there are cases when the creation logic might involve more steps.
#For example you want a customized Subway deal,
#you have several options in how your burger is made e.g what bread do you want? what types of sauces would you like? What cheese would you want? etc. In such cases builder pattern comes to the rescue.
class Burger:
def __init__(self, builder):
self.size = builder.size
self.cheese = builder.cheese
self.pepperoni = builder.pepperoni
self.lettuce = builder.lettuce
self.tomato = builder.tomato
class BurgerBuilder:
def __init__(self, size):
self.size = size
self.cheese = False
self.pepperoni = False
self.lettuce = False
self.tomato = False
def add_cheese(self):
self.cheese = True
def add_pepperoni(self):
self.pepperoni = True
def add_lettuce(self):
self.lettuce = True
def add_tomato(self):
self.tomato = tomato
def build(self):
return Burger(self)
if __name__ == '__main__':
burger = BurgerBuilder(6)
burger.add_cheese()
burger.add_lettuce()
burger.build()
|
[
"paulo.csm@outlook.com"
] |
paulo.csm@outlook.com
|
effe82cef561cb5a8768ebe6ce2f9da7cae5968c
|
53799fef98e0e7907a366e1dedb62d4056408a34
|
/devel/lib/python2.7/dist-packages/costmap_2d/cfg/VoxelPluginConfig.py
|
48c3a7da07de1f5ce3b1a1a956b35f892704c457
|
[] |
no_license
|
carebare47/catkin_ws
|
c22a21d2a0defa83434d1555fdcb7761d71c03d3
|
bd782e4de0b75aa4a1693b453b5ea2891a88b1bd
|
refs/heads/master
| 2021-01-11T03:14:26.131246
| 2016-10-16T22:05:15
| 2016-10-16T22:05:15
| 71,080,374
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,972
|
py
|
## *********************************************************
##
## File autogenerated for the costmap_2d package
## by the dynamic_reconfigure package.
## Please do not edit.
##
## ********************************************************/
from dynamic_reconfigure.encoding import extract_params
inf = float('inf')
config_description = {'upper': 'DEFAULT', 'lower': 'groups', 'srcline': 235, 'name': 'Default', 'parent': 0, 'srcfile': '/opt/ros/indigo/lib/python2.7/dist-packages/dynamic_reconfigure/parameter_generator_catkin.py', 'cstate': 'true', 'parentname': 'Default', 'class': 'DEFAULT', 'field': 'default', 'state': True, 'parentclass': '', 'groups': [], 'parameters': [{'srcline': 280, 'description': 'Whether to use this plugin or not', 'max': True, 'cconsttype': 'const bool', 'ctype': 'bool', 'srcfile': '/opt/ros/indigo/lib/python2.7/dist-packages/dynamic_reconfigure/parameter_generator_catkin.py', 'name': 'enabled', 'edit_method': '', 'default': True, 'level': 0, 'min': False, 'type': 'bool'}, {'srcline': 280, 'description': "Whether to clear the robot's footprint of lethal obstacles", 'max': True, 'cconsttype': 'const bool', 'ctype': 'bool', 'srcfile': '/opt/ros/indigo/lib/python2.7/dist-packages/dynamic_reconfigure/parameter_generator_catkin.py', 'name': 'footprint_clearing_enabled', 'edit_method': '', 'default': True, 'level': 0, 'min': False, 'type': 'bool'}, {'srcline': 280, 'description': 'Max Obstacle Height', 'max': 50.0, 'cconsttype': 'const double', 'ctype': 'double', 'srcfile': '/opt/ros/indigo/lib/python2.7/dist-packages/dynamic_reconfigure/parameter_generator_catkin.py', 'name': 'max_obstacle_height', 'edit_method': '', 'default': 2.0, 'level': 0, 'min': 0.0, 'type': 'double'}, {'srcline': 280, 'description': 'The z origin of the map in meters.', 'max': 'std::numeric_limits<double>::infinity()', 'cconsttype': 'const double', 'ctype': 'double', 'srcfile': '/opt/ros/indigo/lib/python2.7/dist-packages/dynamic_reconfigure/parameter_generator_catkin.py', 'name': 'origin_z', 'edit_method': '', 'default': 0.0, 'level': 0, 'min': 0.0, 'type': 'double'}, {'srcline': 280, 'description': 'The z resolution of the map in meters/cell.', 'max': 50.0, 'cconsttype': 'const double', 'ctype': 'double', 'srcfile': '/opt/ros/indigo/lib/python2.7/dist-packages/dynamic_reconfigure/parameter_generator_catkin.py', 'name': 'z_resolution', 'edit_method': '', 'default': 0.2, 'level': 0, 'min': 0.0, 'type': 'double'}, {'srcline': 280, 'description': 'The number of voxels to in each vertical column.', 'max': 16, 'cconsttype': 'const int', 'ctype': 'int', 'srcfile': '/opt/ros/indigo/lib/python2.7/dist-packages/dynamic_reconfigure/parameter_generator_catkin.py', 'name': 'z_voxels', 'edit_method': '', 'default': 10, 'level': 0, 'min': 0, 'type': 'int'}, {'srcline': 280, 'description': 'The number of unknown cells allowed in a column considered to be known', 'max': 16, 'cconsttype': 'const int', 'ctype': 'int', 'srcfile': '/opt/ros/indigo/lib/python2.7/dist-packages/dynamic_reconfigure/parameter_generator_catkin.py', 'name': 'unknown_threshold', 'edit_method': '', 'default': 15, 'level': 0, 'min': 0, 'type': 'int'}, {'srcline': 280, 'description': 'The maximum number of marked cells allowed in a column considered to be free', 'max': 16, 'cconsttype': 'const int', 'ctype': 'int', 'srcfile': '/opt/ros/indigo/lib/python2.7/dist-packages/dynamic_reconfigure/parameter_generator_catkin.py', 'name': 'mark_threshold', 'edit_method': '', 'default': 0, 'level': 0, 'min': 0, 'type': 'int'}, {'srcline': 280, 'description': 'Method for combining two layers', 'max': 2, 'cconsttype': 'const int', 'ctype': 'int', 'srcfile': '/opt/ros/indigo/lib/python2.7/dist-packages/dynamic_reconfigure/parameter_generator_catkin.py', 'name': 'combination_method', 'edit_method': "{'enum_description': 'Method for combining layers enum', 'enum': [{'srcline': 16, 'description': 'b', 'srcfile': '/home/tom/catkin_ws/src/navigation/costmap_2d/cfg/VoxelPlugin.cfg', 'cconsttype': 'const int', 'value': 0, 'ctype': 'int', 'type': 'int', 'name': 'Overwrite'}, {'srcline': 17, 'description': 'a', 'srcfile': '/home/tom/catkin_ws/src/navigation/costmap_2d/cfg/VoxelPlugin.cfg', 'cconsttype': 'const int', 'value': 1, 'ctype': 'int', 'type': 'int', 'name': 'Maximum'}]}", 'default': 1, 'level': 0, 'min': 0, 'type': 'int'}], 'type': '', 'id': 0}
min = {}
max = {}
defaults = {}
level = {}
type = {}
all_level = 0
#def extract_params(config):
# params = []
# params.extend(config['parameters'])
# for group in config['groups']:
# params.extend(extract_params(group))
# return params
for param in extract_params(config_description):
min[param['name']] = param['min']
max[param['name']] = param['max']
defaults[param['name']] = param['default']
level[param['name']] = param['level']
type[param['name']] = param['type']
all_level = all_level | param['level']
VoxelPlugin_Overwrite = 0
VoxelPlugin_Maximum = 1
|
[
"tom_q@hotmail.co.uk"
] |
tom_q@hotmail.co.uk
|
70331ae1b1b8af587418c1f3f3165a9bffcc460a
|
63a45fb5f435b105a80613d74d5ac6cd7d087ea8
|
/project2_withgui.py
|
c3d37eb14878b307aeeebf4e0966b88c65ae3b47
|
[] |
no_license
|
tszdanger/HITSZ_DATABASE
|
d7a1af79b3cf6d7996067b8d49ca5069762d33f2
|
5e4bb1fa17ca78dd471da8474c663135fa689a07
|
refs/heads/master
| 2021-03-21T15:11:50.645514
| 2020-07-03T07:46:25
| 2020-07-03T07:46:25
| 247,306,559
| 3
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,846
|
py
|
import pymysql
import wx
def ask_need(num):
if num==1:
ENAME = input("请输入领导名字")
return ENAME
elif num==2:
PLOCATION = input("请输入项目所在地")
return PLOCATION
elif num==3:
PNAME = input("请输入项目名称")
return PNAME
elif num==4:
ADDRESS = input("请输入领导地址")
SALARY = input("请输入工资阈值")
return ['%'+ADDRESS+'%',SALARY]
elif num==5:
PNO = input("请输入项目编号")
return PNO
elif num==6:
MGRSTARTDATE = input("请输入时间xxxx-mm-dd")
return MGRSTARTDATE
elif num==7:
HOURS = input("请输入小时数")
return HOURS
elif num==8:
HOURS = input("请输入小时数")
return HOURS
elif num==9:
HOURS = input("请输入小时数")
N = input("请输入项目数")
return [N,HOURS]
elif num==10:
data1 = input("请输入新的employee1:ename,essn,address,salary,superssn,dno")
data1 = data1.split(",")
data2 = input("请输入新的employee2:ename,essn,address,salary,superssn,dno")
data2 = data2.split(",")
data =[tuple(data1),tuple(data2)]
return data
elif num==11:
return
elif num==12:
return
else:
print("别输那些乱七八糟的")
def sql_sen(num):
ss = ["select employee.essn from employee where employee.superssn in (select essn from employee where ename=%s)",\
"select department.dname from department where department.dno in (select project.dno from project where project.plocation=%s)",\
"select employee.ename, employee.address from employee where employee.essn in (select works_on.essn from works_on where works_on.pno in (select project.pno from project where project.pname =%s))",\
"select employee.ename, employee.address from employee where employee.superssn in (select employee.essn from employee where employee.address like %s) and employee.salary >= %s",\
"select employee.ename from employee where employee.essn not in (select works_on.essn from works_on where works_on.pno =%s)",\
"select department.dname from department where department.mgrstartdate>%s",\
"select project.pname from project where project.pno in (select works_on.pno from works_on group by pno having sum(works_on.hours)>%s)",\
"select project.pname from project where project.pno in (select works_on.pno from works_on group by works_on.pno having sum(works_on.hours)/count(*)<%s)",\
"select employee.ename from employee where (employee.essn in (select works_on.essn from works_on group by works_on.essn having count(*)>%s and sum(works_on.hours)>%s) )",\
"insert into employee(ename,essn,address,salary,superssn,dno) values (%s,%s,%s,%s,%s,%s);",\
"update employee set address='深圳市南山区西丽大学城哈工大(深圳)' where employee.essn='251'",\
"delete from employee where employee.essn='250'"
]
return ss[num-1]
def OnCloseMe(self):
dlg = wx.TextEntryDialog(None, u"请在下面文本框中输入内容:", u"文本输入框标题", u"默认内容")
if dlg.ShowModal() == wx.ID_OK:
message = dlg.GetValue() # 获取文本框中输入的值
dlg_tip = wx.MessageDialog(None, message, u"标题信息", wx.OK | wx.ICON_INFORMATION)
if dlg_tip.ShowModal() == wx.ID_OK:
self.Close(True)
dlg_tip.Destroy()
dlg.Destroy()
db = pymysql.connect(host="localhost", user="root", password="123456", db="company", port=3306)
cur = db.cursor()
app = wx.App()
frame = wx.Frame(None, title="Gui Test Editor", pos=(1000, 200), size=(500, 400))
text = wx.StaticText(frame, wx.ID_ANY, "查询直接领导为%ENAME%的员工编号", (0,7 ), (250,250), wx.ALIGN_LEFT)
# 1. 查询直接领导为%ENAME%的员工编号
basicText = wx.TextCtrl(frame, -1, "请输入员工编号", pos=(230,5) , size=(125, -1))
button1 = wx.Button(frame, label="查询", pos=(370, 5), size=(50, 24))
button1.Bind(wx.EVT_BUTTON,ask_need)
button = wx.Button(frame, label=u'关闭', pos=(250, 20), size=(100, 60))
button.Bind(wx.EVT_BUTTON,OnCloseMe)
# frame.Bind(wx.EVT_BUTTON, OnCloseMe, button)
# path_text = wx.TextCtrl(frame, pos=(5, 5), size=(350, 24))
# open_button = wx.Button(frame, label="打开", pos=(370, 5), size=(50, 24))
# open_button.Bind(wx.EVT_BUTTON, openfile) # 绑定打开文件事件到open_button按钮上
#
# save_button = wx.Button(frame, label="保存", pos=(430, 5), size=(50, 24))
#
# content_text = wx.TextCtrl(frame, pos=(5, 39), size=(475, 300), style=wx.TE_MULTILINE)
# wx.TE_MULTILINE可以实现以滚动条方式多行显示文本,若不加此功能文本文档显示为一行
frame.Show()
app.MainLoop()
|
[
"1142383654@qq.com"
] |
1142383654@qq.com
|
8ffdabee580c4cc1edc9747a17bec45b8f9c1dbe
|
d09f292e9b9be761cf64aa53b342e6af6d0e3cab
|
/api/routes.py
|
1ac358061c46f441d624812d32e8b3e65321a87a
|
[
"MIT"
] |
permissive
|
sosolidkk/sigaa-api
|
86b8a02b0c012c4a98a7cf7862a0444f2976490b
|
ea98a057999d6307ecc734254429a5e63bc02461
|
refs/heads/master
| 2022-12-14T18:34:54.627426
| 2021-02-19T20:23:00
| 2021-02-19T20:23:00
| 237,333,730
| 4
| 0
|
MIT
| 2022-12-08T03:31:53
| 2020-01-31T00:36:44
|
Python
|
UTF-8
|
Python
| false
| false
| 1,242
|
py
|
from api import app, scraping, base_models
from starlette.responses import RedirectResponse
@app.get("/")
def index():
return RedirectResponse(url="/docs")
@app.post("/info")
def return_info(user: base_models.User):
session, response = scraping.login(user.username, user.password)
info = scraping.grab_user_data(session, response)
return info
@app.post("/ver-notas")
def return_grades(user: base_models.User):
session, response = scraping.login(user.username, user.password)
info = scraping.see_all_grades(session, response)
return info
@app.post("/disciplinas")
def return_subjects(user: base_models.User):
_, response = scraping.login(user.username, user.password)
info = scraping.see_all_subjects(response)
return info
@app.post("/historico")
def return_history(user: base_models.User):
session, response = scraping.login(user.username, user.password)
payload = scraping.grab_user_history(session, response)
return payload
@app.post("/declaracao")
def return_registration_statement(user: base_models.User):
session, response = scraping.login(user.username, user.password)
payload = scraping.grab_user_registration_statement(session, response)
return payload
|
[
"pedrochaveslimas3@gmail.com"
] |
pedrochaveslimas3@gmail.com
|
5a53f4119dc264ff45ef9fefc5dbdeeb92c01750
|
0983d459f6dfecc6178dfbb40dcf7434e454a85b
|
/project/plot_fitness.py
|
74bd5cf52907729c1a2445b9fc6a9748d54e2ff5
|
[] |
no_license
|
felixbartel/gaec_project
|
6049aff53a2120324193bb2ac28b95d5d6c7e1a5
|
7feb09f0f708c4126d669b4d02d0e1476e6ca85f
|
refs/heads/master
| 2020-04-02T12:44:21.820619
| 2019-01-08T14:23:27
| 2019-01-08T14:23:27
| 154,449,282
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,297
|
py
|
import argparse
import matplotlib.pyplot as plt
import numpy as np
import pickle
parser = argparse.ArgumentParser()
parser.add_argument('filename', nargs = '?', default = 'fitness.pkl')
parser.add_argument('--gens')
args = parser.parse_args()
with open(args.filename, 'rb') as f:
fitness = pickle.load(f)
if args.gens is None:
gen = len(fitness)
else:
gen = args.gens
fig = plt.figure(figsize=(10,6))
font = {'family' : 'normal',
'size' : 26}
plt.rc('font', **font)
ax = plt.axes()
ax.plot(np.arange(gen), np.mean(fitness, axis = 1), 'k')
ax.plot(np.arange(gen), np.max(fitness, axis = 1), 'k')
ax.plot(np.arange(gen), np.min(fitness, axis = 1), 'k')
yres = 23
y_grid = np.linspace(0, 1.0001, yres)
zi = [ [ np.sum(np.less_equal(y_grid[j],fitness[k])*np.less(fitness[k],y_grid[j+1])) for j in range(len(y_grid)-1)] for k in range(gen) ]
zi = np.transpose(np.array(zi))
zi = np.concatenate((zi, np.zeros([1,gen])))
xi, yi = np.meshgrid(np.arange(0,gen)-0.5, y_grid)
tmp = ax.pcolor(xi, yi, zi, cmap=plt.cm.BuGn)
cb = plt.colorbar(tmp)
cb.set_label('% of population')
tmp.set_clim(vmin=0, vmax=20)
ax.set_xlabel('generation')
ax.set_ylabel('fitness')
ax.set_xlim([0,gen-1.5])
ax.set_ylim([0,1])
plt.tight_layout()
plt.savefig(args.filename[0:-3] + 'png')
#plt.show()
|
[
"felixbartel@protonmail.com"
] |
felixbartel@protonmail.com
|
4e51c7a42d48c49f2018033eab4e6d2e92b003ee
|
ccf3ded1aff85e6e2ef25e00547756e16c107ef6
|
/abash/__main__.py
|
2a16ea9de95fd8631a9ee7f447384f05466ee04d
|
[] |
no_license
|
bijurakhul/abash
|
33d4d5df18f7fe3dc82ca857f87e54c0b5581be7
|
a7cb5b2eb6e8dc7bc0cb34a2fb93d967f37a0bf7
|
refs/heads/master
| 2022-10-31T00:22:12.366273
| 2020-06-21T03:52:14
| 2020-06-21T03:52:14
| 273,695,465
| 0
| 0
| null | 2020-06-21T03:52:15
| 2020-06-20T11:32:08
|
Python
|
UTF-8
|
Python
| false
| false
| 76
|
py
|
from abash import alpha
if __name__ == "__main__":
alpha.AlphaTerminal()
|
[
"bijurakhul.si@zohocorp.com"
] |
bijurakhul.si@zohocorp.com
|
abcffa137cdccf986aae19e9ec672ec9d050c5e1
|
0930b6c994225d44818887716ce4e8771af86b81
|
/exercisesDosCapitulos/10-arquivosEExcecoes/10.10-palavrasComuns/palavrasComuns.py
|
08bf589ce86f8f3304a714cc9244cb036a039de5
|
[] |
no_license
|
jonasht/cursoIntesivoDePython
|
44d81b08f1652c4fa7a6d14a0e3f62ee8e06944c
|
fb5f5c9884fb1a6062a7c4e7676e5cc3b13c0ebb
|
refs/heads/master
| 2023-05-23T20:44:34.372825
| 2021-06-19T12:13:46
| 2021-06-19T12:13:46
| 293,325,804
| 4
| 2
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 261
|
py
|
nomeArquivo = 'arquivo.txt'
with open('arquivo.txt') as a:
conteudo = a.read()
def qtd(qtd):
print(f'quantidade de "{qtd}":',conteudo.count(qtd))
qtd('you')
qtd('are')
qtd('we')
qtd('eat')
qtd('is')
qtd('girl')
qtd('boy')
qtd('man')
qtd('woman')
|
[
"jhenriquet@outlook.com.br"
] |
jhenriquet@outlook.com.br
|
ebe5354204cdaa3cb5de8b1e84bb7f93b397ac7c
|
cbcde1da56efe2c2c3d6154bedd6f7ebbb95a8fe
|
/gaussian.py
|
aa3d473a7507b0bcc6fd8c71e7a7f7a743d77fb1
|
[] |
no_license
|
brianlan/udacity-introduction-to-computer-vision
|
7af7157016ffa19f1c964140b2bcf1acf9fdfbaa
|
17f9b3f34973925881bdf6e7ab239bf7883590ab
|
refs/heads/master
| 2020-12-23T13:49:16.291018
| 2020-02-02T09:18:09
| 2020-02-02T09:18:09
| 237,171,356
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 860
|
py
|
import numpy as np
def gaussian_function_2d_particular(x, sigma=1.0):
"""It's a simplified formula from
[Multivariate normal distribution](https://en.wikipedia.org/wiki/Multivariate_normal_distribution)
for our special case here, where mu is fixed to 0 and the sigmas from both dimensions are the same."""
return np.exp(-0.5 * (1 / sigma) * (x * x).sum()) / (2 * sigma * np.pi)
def generate_gaussian_kernel(kernel_size, sigma=1.0):
side = (kernel_size - 1) / 2
x, y = np.meshgrid(np.arange(kernel_size) - side, np.arange(kernel_size) - side)
kernel = np.zeros((kernel_size, kernel_size), dtype=np.float32)
for i in range(kernel_size):
for j in range(kernel_size):
kernel[i, j] = gaussian_function_2d_particular(
np.array([x[i, j], y[i, j]]), sigma=sigma
)
return kernel
|
[
"brianlanbo@gmail.com"
] |
brianlanbo@gmail.com
|
8c108d9d93a33e58ad1f0d1a88808865a89181ca
|
6ebb9e37df6ec59d8d200a304ef3eb0c2691111f
|
/customer/views.py
|
b9033886f715ee016fa0ccea00162f5e7597515f
|
[] |
no_license
|
NikeshMaharjan1217/django
|
a8a7460dc910811e6a4077bd4f00eac0e23022ae
|
1971958d3a84da3b78fbdde346a611dd21166575
|
refs/heads/master
| 2023-04-01T21:39:32.913633
| 2021-04-01T11:03:33
| 2021-04-01T11:03:33
| 353,650,500
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,236
|
py
|
from django.shortcuts import render, redirect
from django.http import HttpResponse
from .models import customer
from bus.models import bus
from django.urls import reverse
# Create your views here.
def index(request):
return render (request, 'customer.html')
def insert(request):
name = request.GET['customer_name']
name_save = customer(customer_name= name)
name_save.save()
return HttpResponse("Saved!")
def display(request):
dispaly= bus.objects.all()
user = customer.objects.all()
context= {'dis': dispaly,'use':user}
return render(request,'show.html',context)
def book(request):
bu = bus.objects.filter(pk=request.GET['bus_id'])
a = request.GET['tic']
user = customer.objects.get(pk=a)
context={'b': bu,'u': user}
return render(request,'bill.html',context)
def show(request):
user = customer.objects.all()
context = {'users':user}
return render(request,'u_list.html',context)
# def ticket(request):
# user = customer.objects.all()
# context = {'use':user}
# return render(request,'show.html',context)
def delete(request,pk):
dele = customer.objects.get(id=pk)
dele.delete()
return redirect (reverse('customer:show'))
|
[
"nikeshmaharjan1217.com"
] |
nikeshmaharjan1217.com
|
f7ec3c60c9e50c0ab72df091a2df9a32e0665485
|
c6af5dcdb1a3cd9d20abdf50c5571836a1b76298
|
/event_scraper/scraping_tasks.py
|
215aee086655795c1f0d9352e8e28bccfca687f2
|
[] |
no_license
|
mikelambert/dancedeets
|
82b1cb0c32b14485cd9cbbc051421d1cb7499830
|
8dd51007bb2faa56d835a149b60740141d472c25
|
refs/heads/master
| 2021-01-21T00:30:09.963623
| 2016-11-29T12:04:00
| 2016-11-29T12:04:00
| 42,857,923
| 3
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,037
|
py
|
import app
import base_servlet
from . import auto_add
from . import potential_events_reloading
from . import thing_db
from . import thing_scraper
@app.route('/tools/auto_add_potential_events')
class AutoAddPotentialEventsHandler(base_servlet.BaseTaskFacebookRequestHandler):
def get(self):
past_event = self.request.get('past_event', None)
if past_event == '1':
past_event = True
elif past_event == '0':
past_event = False
auto_add.mr_classify_potential_events(self.fbl, past_event)
@app.route('/tools/export_sources')
class ExportSourcesHandler(base_servlet.BaseTaskFacebookRequestHandler):
def get(self):
queue = self.request.get('queue', 'fast-queue')
thing_db.mapreduce_export_sources(self.fbl, queue=queue)
@app.route('/tasks/count_source_stats')
class CountSourceStatsHandler(base_servlet.BaseTaskFacebookRequestHandler):
def get(self):
queue = self.request.get('queue', 'slow-queue')
thing_db.mr_count_potential_events(self.fbl, queue=queue)
@app.route('/tasks/load_potential_events_for_user')
class LoadPotentialEventsForUserHandler(base_servlet.UserIdOperationHandler):
user_id_operation = staticmethod(potential_events_reloading.load_potential_events_for_user_ids)
@app.route('/tasks/load_all_potential_events')
class LoadAllPotentialEventsHandler(base_servlet.BaseTaskFacebookRequestHandler):
def get(self):
# this calls a map function wrapped by mr_user_wrap, so it works correctly on a per-user basis
potential_events_reloading.mr_load_potential_events(self.fbl)
@app.route('/tasks/load_potential_events_from_wall_posts')
class LoadPotentialEventsFromWallPostsHandler(base_servlet.BaseTaskFacebookRequestHandler):
def get(self):
min_potential_events = int(self.request.get('min_potential_events', '0'))
queue = self.request.get('queue', 'super-slow-queue')
thing_scraper.mapreduce_scrape_all_sources(self.fbl, min_potential_events=min_potential_events, queue=queue)
|
[
"mlambert@gmail.com"
] |
mlambert@gmail.com
|
6c1493a32df36d5b7d529c458be757975a7fbf8f
|
719f1ba6a7bc673900e576e250b8928daadab7c0
|
/models/model.py
|
d2b0fa1b3e7eeaa919ae7669f7b928ed7109c334
|
[
"MIT"
] |
permissive
|
Zsunflower/mnist
|
a35c3d1eb73a9ae26699150d3a7a8bd7b82b2404
|
0fab636cb55074d320c92bae5436b9a0491a7fdf
|
refs/heads/main
| 2023-01-11T05:29:58.960585
| 2020-11-07T16:25:05
| 2020-11-07T16:25:05
| 310,884,023
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 770
|
py
|
import torch
import torch.nn as nn
import torch.nn.functional as F
class Net(nn.Module):
def __init__(self):
super(Net, self).__init__()
self.conv1 = nn.Conv2d(1, 32, kernel_size=(3, 3), padding=(1, 1))
self.conv2 = nn.Conv2d(32, 64, kernel_size=(3, 3), padding=(1, 1))
self.conv3 = nn.Conv2d(64, 128, kernel_size=(3, 3), padding=(1, 1))
self.fc = nn.Linear(1152, 10)
def forward(self, X):
Y = self.conv1(X)
Y = F.max_pool2d(Y, (2, 2))
Y = F.relu(Y)
Y = self.conv2(Y)
Y = F.max_pool2d(Y, (2, 2))
Y = F.relu(Y)
Y = self.conv3(Y)
Y = F.max_pool2d(Y, (2, 2))
Y = Y.view(Y.size(0), -1)
Y = self.fc(Y)
return Y
|
[
"cuong29594@gmail.com"
] |
cuong29594@gmail.com
|
7deab789f7d11ead2b3be59b5119a7782399cef3
|
dd146e691d11e4caf1878c537dee924460945dab
|
/scenario/display/display_sc.py
|
290e179985b2766326dc9bc59f062167d4f1b0b2
|
[] |
no_license
|
gilderf/chempy
|
67a2f4ac4ac5142efe88afa4002aafbfe87e5b97
|
5f144fc1b0f107278fe5cf9265ad334937c89a75
|
refs/heads/master
| 2022-01-04T17:30:59.823776
| 2019-07-06T13:24:39
| 2019-07-06T13:24:39
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,332
|
py
|
# -*- coding: utf-8 -*-
"""
@author: ALA
Script for display functions
"""
# %load_ext autoreload
# %autoreload 2
# Import chempy and numpy packages
import chempy as cp
import numpy as np
# Let's import some data
# First, import spectral data
X = cp.read2div('./data_set/X1.CSV')
# Then, import Y values
Y = cp.read2div('./data_set/Y1.CSV')
# Use curve quickly to display X data
cp.curve(X)
# Only display some indexes of X
cp.curve(X, row=[1,2])
# Use grouping to get list of div and display them with different colors
grouping_obj = cp.grouping(X, [1,2])
# Display curve with different colors and specify the filters expression as legend label
cp.curve(grouping_obj.div_list, legend_label=grouping_obj.filter_list)
# Select y column for color curve figure
Ycolor = cp.selectcol(Y, [18])
cp.curve(X, ycolor = Ycolor, cmap='Greens')
# Let's test a PCA
pca_obj = cp.pca(X)
# Maps the scores : X axis is the first score, Y axis is the second score
cp.map2(pca_obj.scores_div,0 ,1)
# Only display indexes from 10 to 99
cp.map2(pca_obj.scores_div,0,1,row=list(range(10,100)))
# Add a colormap with respect to Y div
cp.map2(pca_obj.scores_div,0,1,ycolor=Ycolor, cmap='Greens')
# Use grouping
grouping_obj = cp.grouping(pca_obj.scores_div, [1,2])
cp.map2(grouping_obj.div_list,0,1,legend_label=grouping_obj.filter_list, cmap='Set1')
|
[
"antoine.laborde@greentropism.com"
] |
antoine.laborde@greentropism.com
|
f4dfdb41f2fc1c2d350c10f7f84445adc6ac9dc8
|
31009efe0b3882551f03dcaa9c71756c7c6f6ede
|
/src/main/resources/twisted/cred/portal.py
|
bbb0af8e20a48242e7cfbe7031a3b7969faa2459
|
[
"Apache-2.0",
"ZPL-2.0",
"MIT",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
riyafa/autobahntestsuite-maven-plugin
|
b533433c75f7daea2757158de54c6d80d304a962
|
737e6dad2d3ef794f30f0a2013a77e28decd2ec4
|
refs/heads/master
| 2020-08-16T13:31:39.349124
| 2019-10-16T09:20:55
| 2019-10-16T09:20:55
| 215,506,990
| 0
| 0
|
Apache-2.0
| 2019-10-16T09:18:34
| 2019-10-16T09:18:34
| null |
UTF-8
|
Python
| false
| false
| 5,339
|
py
|
# -*- test-case-name: twisted.test.test_newcred -*-
# Copyright (c) Twisted Matrix Laboratories.
# See LICENSE for details.
"""
The point of integration of application and authentication.
"""
from twisted.internet import defer
from twisted.internet.defer import maybeDeferred
from twisted.python import failure, reflect
from twisted.cred import error
from zope.interface import providedBy, Interface
class IRealm(Interface):
"""
The realm connects application-specific objects to the
authentication system.
"""
def requestAvatar(avatarId, mind, *interfaces):
"""
Return avatar which provides one of the given interfaces.
@param avatarId: a string that identifies an avatar, as returned by
L{ICredentialsChecker.requestAvatarId<twisted.cred.checkers.ICredentialsChecker.requestAvatarId>}
(via a Deferred). Alternatively, it may be
C{twisted.cred.checkers.ANONYMOUS}.
@param mind: usually None. See the description of mind in
L{Portal.login}.
@param interfaces: the interface(s) the returned avatar should
implement, e.g. C{IMailAccount}. See the description of
L{Portal.login}.
@returns: a deferred which will fire a tuple of (interface,
avatarAspect, logout), or the tuple itself. The interface will be
one of the interfaces passed in the 'interfaces' argument. The
'avatarAspect' will implement that interface. The 'logout' object
is a callable which will detach the mind from the avatar.
"""
class Portal:
"""
A mediator between clients and a realm.
A portal is associated with one Realm and zero or more credentials checkers.
When a login is attempted, the portal finds the appropriate credentials
checker for the credentials given, invokes it, and if the credentials are
valid, retrieves the appropriate avatar from the Realm.
This class is not intended to be subclassed. Customization should be done
in the realm object and in the credentials checker objects.
"""
def __init__(self, realm, checkers=()):
"""
Create a Portal to a L{IRealm}.
"""
self.realm = realm
self.checkers = {}
for checker in checkers:
self.registerChecker(checker)
def listCredentialsInterfaces(self):
"""
Return list of credentials interfaces that can be used to login.
"""
return self.checkers.keys()
def registerChecker(self, checker, *credentialInterfaces):
if not credentialInterfaces:
credentialInterfaces = checker.credentialInterfaces
for credentialInterface in credentialInterfaces:
self.checkers[credentialInterface] = checker
def login(self, credentials, mind, *interfaces):
"""
@param credentials: an implementor of
L{twisted.cred.credentials.ICredentials}
@param mind: an object which implements a client-side interface for
your particular realm. In many cases, this may be None, so if the
word 'mind' confuses you, just ignore it.
@param interfaces: list of interfaces for the perspective that the mind
wishes to attach to. Usually, this will be only one interface, for
example IMailAccount. For highly dynamic protocols, however, this
may be a list like (IMailAccount, IUserChooser, IServiceInfo). To
expand: if we are speaking to the system over IMAP, any information
that will be relayed to the user MUST be returned as an
IMailAccount implementor; IMAP clients would not be able to
understand anything else. Any information about unusual status
would have to be relayed as a single mail message in an
otherwise-empty mailbox. However, in a web-based mail system, or a
PB-based client, the ``mind'' object inside the web server
(implemented with a dynamic page-viewing mechanism such as a
Twisted Web Resource) or on the user's client program may be
intelligent enough to respond to several ``server''-side
interfaces.
@return: A deferred which will fire a tuple of (interface,
avatarAspect, logout). The interface will be one of the interfaces
passed in the 'interfaces' argument. The 'avatarAspect' will
implement that interface. The 'logout' object is a callable which
will detach the mind from the avatar. It must be called when the
user has conceptually disconnected from the service. Although in
some cases this will not be in connectionLost (such as in a
web-based session), it will always be at the end of a user's
interactive session.
"""
for i in self.checkers:
if i.providedBy(credentials):
return maybeDeferred(self.checkers[i].requestAvatarId, credentials
).addCallback(self.realm.requestAvatar, mind, *interfaces
)
ifac = providedBy(credentials)
return defer.fail(failure.Failure(error.UnhandledCredentials(
"No checker for %s" % ', '.join(map(reflect.qual, ifac)))))
|
[
"nmaurer@redhat.com"
] |
nmaurer@redhat.com
|
654799805644ed40fd4fea289c966bebaa7563af
|
693af9eb4914f714d00a0656b2fbb258f8af73b1
|
/phi/field/_angular_velocity.py
|
99e59b3b4063baf4dba774e84affcf1db90026c3
|
[
"MIT"
] |
permissive
|
caecfd/PhiFlow
|
6c43a3ded59078495b5aa3e4251f6f8ac6f4e540
|
8545866448a7f6709c7bb1a5c38251f740f93c98
|
refs/heads/master
| 2023-09-03T09:06:48.397872
| 2021-11-05T10:00:36
| 2021-11-05T12:43:40
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,369
|
py
|
from collections import Callable
from numbers import Number
from phi import math
from ._field import Field
from ..geom import Geometry
from ..math import Shape, GLOBAL_AXIS_ORDER, spatial
class AngularVelocity(Field):
"""
Model of a single vortex or set of vortices.
The falloff of the velocity magnitude can be controlled.
Without a specified falloff, the velocity increases linearly with the distance from the vortex center.
This is the case with rotating rigid bodies, for example.
"""
def __init__(self,
location: math.Tensor or tuple or list or Number,
strength: math.Tensor or Number = 1.0,
falloff: Callable = None,
component: str = None):
location = math.wrap(location)
strength = math.wrap(strength)
assert location.shape.channel.names == ('vector',), "location must have a single channel dimension called 'vector'"
assert location.shape.spatial.is_empty, "location tensor cannot have any spatial dimensions"
self.location = location
self.strength = strength
self.falloff = falloff
self.component = component
spatial_names = [GLOBAL_AXIS_ORDER.axis_name(i, location.vector.size) for i in range(location.vector.size)]
self._shape = location.shape & spatial(**{dim: 1 for dim in spatial_names})
def _sample(self, geometry: Geometry) -> math.Tensor:
points = geometry.center
distances = points - self.location
strength = self.strength if self.falloff is None else self.strength * self.falloff(distances)
velocity = math.cross_product(strength, distances)
velocity = math.sum(velocity, self.location.shape.batch.without(points.shape))
if self.component:
velocity = velocity.vector[self.component]
return velocity
@property
def shape(self) -> Shape:
return self._shape
def __getitem__(self, item: dict):
assert all(dim == 'vector' for dim in item), f"Cannot slice AngularVelocity with {item}"
if 'vector' in item:
assert item['vector'] == 0 or self.component is None
component = self.shape.spatial.names[item['vector']]
return AngularVelocity(self.location, self.strength, self.falloff, component)
else:
return self
|
[
"philipp.holl@tum.de"
] |
philipp.holl@tum.de
|
414abe3401f4285ac96a17991d65239a1ef154fd
|
609f9a721e7d043584847444752cdbb39be7f604
|
/regress_task/solution.py
|
823d4081db9c8c57a227d1e01f1ceb7909743d22
|
[] |
no_license
|
Serafim-End/regress_model
|
2f675ccce48de94abe7c95b332a62b9ec76e8b0d
|
efed8989d40a84c02e00c93659b81a81baabaf6a
|
refs/heads/master
| 2021-01-10T15:41:29.669999
| 2015-12-02T22:55:10
| 2015-12-02T22:55:10
| 47,293,188
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,387
|
py
|
import os
from pprint import pprint
from sklearn.preprocessing import PolynomialFeatures
from sklearn.linear_model import LinearRegression
from sklearn.pipeline import Pipeline
from preparation import prepare_data
def solution(prepared_data, degree=1, test_data=None):
"""
solution of task
if test data is None put train_data into test_data
:param prepared_data: train data with structure
which determined in preparation file
:param degree: number of max polynomial degree
:param test_data: test data with structure like prepared_data
:return: regress coefficients and r_square
"""
sparse_matrix = prepared_data['parameters']
y = prepared_data['connections']
model = Pipeline([('poly', PolynomialFeatures(degree=degree)),
('linear', LinearRegression(fit_intercept=False))])
model = model.fit(sparse_matrix, y)
regress_coef = model.named_steps['linear'].coef_
regression = LinearRegression()
if test_data is None:
regression.fit(sparse_matrix, y)
r_square = regression.score(sparse_matrix, y)
else:
if not isinstance(test_data, dict) or 'connections' not in test_data \
or 'parameters' not in test_data:
raise BaseException('put correct data into test_data')
regression.fit(sparse_matrix, y)
r_square = regression.score(test_data['parameters'],
test_data['connections'])
return regress_coef, r_square
def prepare_reply(parameters_paths, prepared_data, test_data,
degree=1, index_year=1):
"""
print
:param parameters_paths:
:param prepared_data:
:param test_data:
:param degree:
:param index_year:
:return:
"""
regress_coef, r_square = solution(prepared_data, degree, test_data)
reply = {path.split('.')[0][4:]: regress_coef[i]
for i, path in enumerate(parameters_paths)}
print 'Index of year: {}'.format(index_year)
print 'R^2: {}'.format(r_square)
print 'Coefficients: '
pprint(reply)
print
def main():
"""
connection_paths - path from DATA directory with connections of students
if you want to add year just add file to directory
and write name of file to this list
paths - files with parameters if you want to add parameter
just add file to DATA directory and write name to this tuple
IMPORTANT
test_data in prepare_reply (red color): change value of this parameter
from data to your real test data
P.S. do not forget to make prepare_data for test data too.
:return: reply
"""
connection_paths = ('s50-network1.dat', 's50-network2.dat',
's50-network3.dat')
connections_filenames = [os.path.join('DATA', path)
for path in connection_paths]
paths = (
's50-alcohol.dat', 's50-drugs.dat',
's50-familyevent.dat', 's50-smoke.dat', 's50-sport.dat'
)
paths_to_parameters = [os.path.join('DATA', path) for path in paths]
for i in xrange(len(connection_paths)):
data = prepare_data(connections_filenames[i], paths_to_parameters,
year_index=i + 1)
prepare_reply(paths, data, test_data=data, degree=1, index_year=i + 1)
if __name__ == '__main__':
main()
|
[
"endnikita@gmail.com"
] |
endnikita@gmail.com
|
dceeb5bc5bcb3adc052c93352f9dc3f98ecd5462
|
9ae6ce54bf9a2a86201961fdbd5e7b0ec913ff56
|
/google/ads/googleads/v9/common/types/targeting_setting.py
|
188b0586295425dcced843d28e639677e48a62c9
|
[
"Apache-2.0"
] |
permissive
|
GerhardusM/google-ads-python
|
73b275a06e5401e6b951a6cd99af98c247e34aa3
|
676ac5fcb5bec0d9b5897f4c950049dac5647555
|
refs/heads/master
| 2022-07-06T19:05:50.932553
| 2022-06-17T20:41:17
| 2022-06-17T20:41:17
| 207,535,443
| 0
| 0
|
Apache-2.0
| 2019-09-10T10:58:55
| 2019-09-10T10:58:55
| null |
UTF-8
|
Python
| false
| false
| 4,064
|
py
|
# -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import proto # type: ignore
from google.ads.googleads.v9.enums.types import (
targeting_dimension as gage_targeting_dimension,
)
__protobuf__ = proto.module(
package="google.ads.googleads.v9.common",
marshal="google.ads.googleads.v9",
manifest={
"TargetingSetting",
"TargetRestriction",
"TargetRestrictionOperation",
},
)
class TargetingSetting(proto.Message):
r"""Settings for the targeting-related features, at the campaign
and ad group levels. For more details about the targeting
setting, visit https://support.google.com/google-
ads/answer/7365594
Attributes:
target_restrictions (Sequence[google.ads.googleads.v9.common.types.TargetRestriction]):
The per-targeting-dimension setting to
restrict the reach of your campaign or ad group.
target_restriction_operations (Sequence[google.ads.googleads.v9.common.types.TargetRestrictionOperation]):
The list of operations changing the target
restrictions.
Adding a target restriction with a targeting
dimension that already exists causes the
existing target restriction to be replaced with
the new value.
"""
target_restrictions = proto.RepeatedField(
proto.MESSAGE, number=1, message="TargetRestriction",
)
target_restriction_operations = proto.RepeatedField(
proto.MESSAGE, number=2, message="TargetRestrictionOperation",
)
class TargetRestriction(proto.Message):
r"""The list of per-targeting-dimension targeting settings.
Attributes:
targeting_dimension (google.ads.googleads.v9.enums.types.TargetingDimensionEnum.TargetingDimension):
The targeting dimension that these settings
apply to.
bid_only (bool):
Indicates whether to restrict your ads to show only for the
criteria you have selected for this targeting_dimension, or
to target all values for this targeting_dimension and show
ads based on your targeting in other TargetingDimensions. A
value of ``true`` means that these criteria will only apply
bid modifiers, and not affect targeting. A value of
``false`` means that these criteria will restrict targeting
as well as applying bid modifiers.
This field is a member of `oneof`_ ``_bid_only``.
"""
targeting_dimension = proto.Field(
proto.ENUM,
number=1,
enum=gage_targeting_dimension.TargetingDimensionEnum.TargetingDimension,
)
bid_only = proto.Field(proto.BOOL, number=3, optional=True,)
class TargetRestrictionOperation(proto.Message):
r"""Operation to be performed on a target restriction list in a
mutate.
Attributes:
operator (google.ads.googleads.v9.common.types.TargetRestrictionOperation.Operator):
Type of list operation to perform.
value (google.ads.googleads.v9.common.types.TargetRestriction):
The target restriction being added to or
removed from the list.
"""
class Operator(proto.Enum):
r"""The operator."""
UNSPECIFIED = 0
UNKNOWN = 1
ADD = 2
REMOVE = 3
operator = proto.Field(proto.ENUM, number=1, enum=Operator,)
value = proto.Field(proto.MESSAGE, number=2, message="TargetRestriction",)
__all__ = tuple(sorted(__protobuf__.manifest))
|
[
"noreply@github.com"
] |
GerhardusM.noreply@github.com
|
cc56806e83bafd003e54528b808b1397bdd7e6e5
|
00c1ade0ee277cfc08e55e5094ae274c2aa75948
|
/data_analysis.py
|
4d4bd0b9798c0cffe27a42430d7421e29e057ccd
|
[] |
no_license
|
Najib-Haq/Pokemon_Maester
|
1ddb66e827a202bd618b3de2035c0d0ed9cda29d
|
0801c5cb40c7ed6316a8b5b32c9faedbde927798
|
refs/heads/master
| 2020-09-10T16:48:56.582068
| 2019-11-22T20:08:01
| 2019-11-22T20:08:01
| 221,766,321
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 9,823
|
py
|
import loading_saving_dictionaries as lsd
import get_image as gm
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
types = ['Grass', 'Dark', 'Rock', 'Fighting', 'Psychic', 'Dragon', 'Water', 'Bug', 'Electric', 'Ice', 'Ghost', 'Ground', 'Steel', 'Fire', 'Flying', 'Normal', 'Poison', 'Fairy']
colors = ['lime', 'darkslateblue', 'silver', 'olive', 'gold', 'purple', 'cornflowerblue', 'lawngreen','yellow','skyblue','black','peru','dimgrey','orangered','azure','tan','magenta','pink']
color_dict = {}
for i in range(len(types)):
color_dict[types[i]] = colors[i]
def add_to_dict(dict, key):
if not pd.isnull(key):
if key in dict.keys():
dict[key] = dict[key] + 1
else:
dict[key] = 1
def add_dict_to_dict(dict, key1, key2):
if pd.isnull(key1) or pd.isnull(key2):
return
else:
if key1 in dict.keys():
if key2 in dict[key1].keys():
dict[key1][key2] += 1
else:
dict[key1][key2] = 1
else:
dict[key1] = {key2:1}
def get_max3(dict):
vals = sorted(dict.values(), reverse=True)
max_3 = []
for i in vals[:5]:
for key in dict.keys():
if dict[key] == i:
max_3.append(key)
break
return max_3
df = pd.read_csv("Dataset/combats.csv")
index = np.random.randint(0,len(df))
name2index = lsd.load_dict('name2index')
index2name = lsd.load_dict('index2name')
#################### TESTING THE DATASET #############################
# pokemon1 = df.iloc[index]['First_pokemon']
# print(pokemon1)
# pokemon2 = df.iloc[index]['Second_pokemon']
# print(pokemon2)
# winner = df.iloc[index]['Winner']
#
# pokemon1 = index2name[pokemon1].split(" ")[0]
# print(pokemon1)
# pokemon2 = index2name[pokemon2].split(" ")[0]
# print(pokemon2)
# winner = index2name[winner]
# gm.show(pokemon1, pokemon2, winner)
# count of names more than 1 string is 97
# TODO : can't show images from the site. any way to solve?
df_pok = pd.read_csv("Dataset/pokemon.csv")
# count = 0
# for i in range(len(df_pok)):
# if len(df_pok.iloc[i]['Name'].split(" "))!=1:
# count += 1
#
# print(count)
# TODO : data analysis
#################### DATASET ANALYSIS #############################
# find which type has the most appearences
# get all the types
type_set = {}
#save data
# for i in range(len(df_pok)):
# type1 = df_pok.iloc[i]['Type 1']
# type2 = df_pok.iloc[i]['Type 2']
# if type1 in type_set.keys():
# type_set[type1] = type_set[type1] + 1
# else:
# type_set[type1] = 1
# if not pd.isnull(type2):
# if type2 in type_set.keys():
# type_set[type2] = type_set[type2] + 1
# else:
# type_set[type2] = 1
#
# lsd.save_dict(type_set, 'type')
# load data
type_set = lsd.load_dict('type')
#
# print(type_set.keys())
# plt.figure()
# plt.bar(type_set.keys(), type_set.values())
# plt.title("Number of pokemon by type")
# plt.xticks(rotation=50)
# plt.xlabel("Maximum five types are = " + str(get_max3(type_set)))
# plt.savefig("Analysis/types_hist.png", bbox_inches = 'tight')
#
# plt.figure()
# plt.pie([float(v) for v in type_set.values()], labels=[k for k in type_set.keys()],
# autopct='%1.2f')
# plt.title("Percentage of pokemon by type")
# plt.savefig("Analysis/types_pie.png", bbox_inches = 'tight')
# get percentage of types based on combats
types_combats = {}
winner_combats = {}
loser_combats = {}
#load data
# for i in range(len(df)):
# pok1 = df.iloc[i]['First_pokemon']
# pok2 = df.iloc[i]['Second_pokemon']
# winner = df.iloc[i]['Winner']
# pok1_t1 = df_pok.iloc[pok1-1]['Type 1']
# pok1_t2 = df_pok.iloc[pok1 - 1]['Type 2']
# pok2_t1 = df_pok.iloc[pok2 - 1]['Type 1']
# pok2_t2 = df_pok.iloc[pok2 - 1]['Type 2']
# #print("First pokemon is : "+str(pok1) + " and in dict " + str(df_pok.iloc[pok1-1]["#"]))
# if i%1000 == 0:
# print(str(i)+"/"+str(len(df)) + " done.")
# add_to_dict(types_combats, pok1_t1)
# add_to_dict(types_combats, pok1_t2)
# add_to_dict(types_combats, pok2_t1)
# add_to_dict(types_combats, pok2_t2)
# if pok1 == winner:
# (winner_t1, winner_t2) = (pok1_t1, pok1_t2)
# (loser_t1, loser_t2) = (pok2_t1, pok2_t2)
# else:
# (winner_t1, winner_t2) = (pok2_t1, pok2_t2)
# (loser_t1, loser_t2) = (pok1_t1, pok1_t2)
#
# add_to_dict(winner_combats, winner_t1)
# add_to_dict(winner_combats, winner_t2)
# add_to_dict(loser_combats, loser_t1)
# add_to_dict(loser_combats, loser_t2)
#
# lsd.save_dict(types_combats,"type_combat")
# lsd.save_dict(winner_combats, "winners")
# lsd.save_dict(loser_combats, "losers")
#load_data
types_combats = lsd.load_dict("type_combat")
winner_combats = lsd.load_dict("winners")
loser_combats = lsd.load_dict("losers")
#load data
# plt.figure()
# plt.bar(types_combats.keys(), types_combats.values(), color = 'cyan')
# plt.title("Pokemon in combat by type")
# plt.xlabel("Maximum five types are = " + str(get_max3(types_combats)))
# plt.xticks(rotation=50)
# plt.savefig("Analysis/combat_types_hist.png", bbox_inches = 'tight')
#
# plt.figure()
# plt.pie([float(v) for v in types_combats.values()], labels=[k for k in types_combats.keys()],
# autopct='%1.2f')
# plt.title("Percentage of pokemon in combat list by type")
# plt.savefig("Analysis/combat_types_pie.png", bbox_inches = 'tight')
sorted_winner_percentage = {}
for key in types_combats.keys():
sorted_winner_percentage[key] = (winner_combats[key] / types_combats[key])*100
# plt.figure()
# plt.bar(sorted_winner_percentage.keys(), sorted_winner_percentage.values(), color = 'seagreen')
# plt.title("Most winning ratio by type")
# plt.xlabel("Maximum five types are = " + str(get_max3(sorted_winner_percentage)))
# plt.xticks(rotation=50)
# plt.savefig("Analysis/winning_ratio_hist.png", bbox_inches = 'tight')
#
# plt.figure()
# plt.pie([float(v) for v in sorted_winner_percentage.values()], labels=[k for k in sorted_winner_percentage.keys()],
# autopct='%1.2f')
# plt.title("Percentage of winner ratio list by type")
# plt.savefig("Analysis/winning_ratio_pie.png", bbox_inches = 'tight')
# plt.show()
# type_advantage = {}
# type_disadvantage = {}
# for i in range(len(df)):
# if i % 1000 == 0:
# print(str(i)+"/"+str(len(df)) + " done.")
# winner_t1 = df_pok.iloc[df.iloc[i]['Winner'] - 1]['Type 1']
# winner_t2 = df_pok.iloc[df.iloc[i]['Winner'] - 1]['Type 2']
# if df.iloc[i]['Winner'] == df.iloc[i]['First_pokemon']:
# loser_t1 = df_pok.iloc[df.iloc[i]['Second_pokemon']-1]['Type 1']
# loser_t2 = df_pok.iloc[df.iloc[i]['Second_pokemon'] - 1]['Type 2']
# else:
# loser_t1 = df_pok.iloc[df.iloc[i]['First_pokemon'] - 1]['Type 1']
# loser_t2 = df_pok.iloc[df.iloc[i]['First_pokemon'] - 1]['Type 2']
#
# add_dict_to_dict(type_advantage, winner_t1, loser_t1)
# #add_dict_to_dict(type_advantage, winner_t1, loser_t2)
# #add_dict_to_dict(type_advantage, winner_t2, loser_t1)
# #add_dict_to_dict(type_advantage, winner_t2, loser_t2)
#
# add_dict_to_dict(type_disadvantage, loser_t1, winner_t1)
# #add_dict_to_dict(type_disadvantage, loser_t2, winner_t1)
# #add_dict_to_dict(type_disadvantage, loser_t1, winner_t2)
# #add_dict_to_dict(type_disadvantage, loser_t2, winner_t2)
#
# lsd.save_dict(type_advantage, 'type_advantage')
# lsd.save_dict(type_disadvantage, 'type_disadvantage')
type_advantage = lsd.load_dict('type_advantage')
type_disadvantage = lsd.load_dict('type_disadvantage')
for i in type_disadvantage.keys():
for j in types:
if j not in type_disadvantage[i].keys():
type_disadvantage[i][j] = 0
type_superiority = {}
for i in type_advantage.keys():
type_superiority[i] = {}
for j in type_advantage[i].keys():
winning = type_advantage[i][j]
total = type_advantage[i][j] + type_disadvantage[i][j]
type_superiority[i][j] = (winning/total)*100
# for i in type_superiority.keys():
# plt.figure()
# type = []
# vals = []
# colors = []
# for j in sorted(type_superiority[i], key=lambda k: type_superiority[i][k]):
# type.append(j)
# vals.append(type_superiority[i][j])
# colors.append(color_dict[j])
# plt.bar(type, vals, color=colors)
# plt.title(str(i) + " type winnings")
# plt.xticks(rotation=50)
# plt.savefig("Analysis/Type_advantage/" + str(i)+" type.png", bbox_inches = 'tight')
# plt.show()
######## CORRELATION MATRIX #############
# print(type_advantage['Fighting'].keys())
# print(type_disadvantage['Fighting'].keys())
# print(type_superiority['Fighting'].keys())
list2d = []
for type in types:
list_type = []
for against_type in types:
try:
list_type.append(type_superiority[type][against_type])
except:
type_superiority[type][against_type] = 0
list_type.append(type_superiority[type][against_type])
list2d.append(list_type)
data_2d = np.array(list2d)
plt.figure()
fig, ax = plt.subplots()
image = ax.imshow(data_2d)
cbar = ax.figure.colorbar(image, ax=ax)
cbar.ax.set_ylabel("Winning percentage", rotation=-90, va="bottom")
ax.set_xticks(np.arange(len(types)))
ax.set_yticks(np.arange(len(types)))
ax.set_xticklabels(types)
ax.set_yticklabels(types)
plt.setp(ax.get_xticklabels(), rotation=45, ha = 'right', rotation_mode = 'anchor')
#write the data
# for i in range(len(types)):
# for j in range(len(types)):
# text = ax.text(j,i,list2d[i][j], ha='center', va='center', color='w')
ax.set_title("Winning percentage relation by type")
# fig.tight_layout()
plt.savefig("Analysis/Correlation graph.png", bbox_inches = 'tight')
plt.show()
|
[
"najibhaq98@gmail.com"
] |
najibhaq98@gmail.com
|
282fe020affa423a1227fc0cf8540aa0ac8ae99c
|
ef8239b2f564d6692270236b69bff25792ea27d1
|
/day17/day17.py
|
20aa8b13e82743d61ce7094773cbf900c5d9f2dc
|
[] |
no_license
|
riccardosven/adventofcode2019
|
30bdd637ed0f2ccd121d058c3713023ad9df57f5
|
2ffc427f42332ca76813c0732e170ceaffadf6dc
|
refs/heads/master
| 2020-12-04T20:58:59.826386
| 2020-01-14T22:07:41
| 2020-01-14T22:07:41
| 231,899,947
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,371
|
py
|
from intcodecomputer import IntcodeComputer
class VacuumRobot:
"ASCIIRobot"
def __init__(self):
with open("input", 'r') as fhandle:
program = fhandle.read().strip()
self.map = None
self.brain = IntcodeComputer(program)
def getmap(self):
"Explore area to find alignment parameters"
view = []
try:
while True:
char = chr(self.brain.step(None))
view.append(char)
except StopIteration:
pass
view = list("".join(view).strip().split("\n"))
width = len(view[0])
height = sum(1 for row in view if len(row) == len(view[0]))
assert height == len(view)
alignpar = 0
for i in range(len(view)):
for j in range(len(view[0])):
if view[i][j] == "#":
neigbors = 0
neigbors += 1 if i > 0 and view[i-1][j] == "#" else 0
neigbors += 1 if i < height-1 and view[i+1][j] == "#" else 0
neigbors += 1 if j > 0 and view[i][j-1] == "#" else 0
neigbors += 1 if j < width-1 and view[i][j+1] == "#" else 0
if neigbors == 4:
alignpar += i*j
self.map = view
return alignpar
def showmap(self):
"print map from viewport"
view = self.getmap()
print("\n".join(["".join(row) for row in view]))
def execute(self, instructions):
for instruction in instructions:
self.brain.step(ord(instruction))
def override(self, program, funcA, funcB, funcC, video="y"):
self.brain.code[0] = 2
# Main Program
self.execute(program)
# Function A
self.execute(funcA)
# Function B
self.execute(funcB)
# Function C
self.execute(funcC)
# Video
self.execute(video+"\n")
while True:
self.brain.step(0)
def star1():
bot = VacuumRobot()
print("Star 1:", bot.getmap())
def star2():
bot = VacuumRobot()
try:
bot.override("A,B,A,C,B,C,A,C,B,C\n", "L,8,R,10,L,10\n", "R,10,L,8,L,8,L,10\n", "L,4,L,6,L,8,L,8\n")
except StopIteration:
pass
print(bot.brain.outval)
if __name__ == "__main__":
star1()
|
[
"riccardosven@gmail.com"
] |
riccardosven@gmail.com
|
37654f22a2fa9c7ce7f464d46b677ecfbe81feb7
|
8c91660020fd2b83892d3dd9b0b3af10a13448d4
|
/CALAMP-SE.py
|
bb0328f276e0f0f06722bfd347e2bfba20ff95ee
|
[] |
no_license
|
krzakala/RandomPythonCodes
|
7d5495563071616ff16f34de50ee533ef4540112
|
c4568df50c4cdbc169591380edca8e0befaf0bcb
|
refs/heads/master
| 2020-04-13T08:55:13.355550
| 2019-01-29T06:15:59
| 2019-01-29T06:15:59
| 163,096,035
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,122
|
py
|
from math import exp, sqrt, erf, erfc, pi
from scipy.integrate import quad
import numpy as np
import matplotlib.pyplot as plt
# Routines
def Gauss(x):
return exp(-x * x / 2) / sqrt(2 * pi)
def new_mx(mhat, alpha, rhoX, f_x):
def f_to_int(x):
return Gauss(x) * \
(f_x(1. / (alpha * mhat), 0 + x / sqrt((alpha * mhat)), rhoX)) ** 2
(int1, err1) = quad(f_to_int, -10, 10)
int2 = 0
if (rhoX > 0.001):
def g_to_int(x):
return (Gauss(x) *
(f_x(1. / (alpha * mhat), x *
sqrt(1 + 1. / (alpha * mhat)), rhoX))**2)
(int2, err2) = quad(g_to_int, -10, 10)
return (1 - rhoX) * int1 + (rhoX) * int2
def gout(w, Y, V, theta=1):
V = V + 1e-6
A = ((2 * Y) / (sqrt(2 * pi * V)))
B = exp(-(theta**2 + w**2) / (2 * V) - theta * w / V) \
* (exp(2 * theta * w / V) - 1)
if (w > 0):
B = exp(-(theta**2 + w**2) / (2 * V) + theta * w / V) \
* (1 - exp(-2 * theta * w / V))
C = 1E-5 + \
erfc(-Y * (theta + w) / (sqrt(2 * V)))\
- Y * erfc((theta - w) / (sqrt(2 * V)))
return A * B / C
def new_mhat(mx, Z02, theta=1):
V_eff = max(Z02 - mx, 1e-5)
mx = mx + 1e-5
def g(x):
return (gout(x * sqrt(mx), 1, V_eff, theta)**2 *
(1 - 0.5 * erfc((theta + x * sqrt(mx)) / sqrt(2 * V_eff)) -
0.5 * erfc((theta - x * sqrt(mx)) / sqrt(2 * V_eff))) +
(gout(x * sqrt(mx), -1, V_eff, theta)**2) *
(0.5 * erfc((theta + x * sqrt(mx)) / sqrt(2 * V_eff)) + 0.5 *
erfc((theta - x * sqrt(mx)) / sqrt(2 * V_eff)))
)
def f(x):
return Gauss(x) * g(x)
(int1, err1) = quad(f, -5, 5)
return (int1)
def f_gaussbernoulli(S2, R, rho=0.5, m=0, s2=1):
Z = (1 - rho) * \
exp(-R * R / (2 * S2)) \
+ rho * sqrt(S2 / (S2 + s2)) * exp(-((R - m)**2) / (2 * (S2 + s2)))
UP2 = rho * (1 - rho) \
* exp(- R * R / (2 * S2) - ((R - m)**2) / (2 * (S2 + s2))) \
* (sqrt(S2) / (S2 + s2)**(2.5)) \
* (s2 * S2 * (S2 + s2) + (m * S2 + R * s2)**2)\
+ rho * rho * exp(-((R - m)**2) / ((S2 + s2))) \
* (s2 * S2**2) / (s2 + S2)**2
UP1 = rho * exp(-((R - m)**2) / (2 * (S2 + s2)))\
* (sqrt(S2) / (S2 + s2)**(1.5)) * (m * S2 + R * s2)
F_a = UP1 / Z
F_b = UP2 / Z**2
return F_a, F_b
def perform_DE(mxstart, rhoX, alpha, f_x, theta=0, criterion=1e-6, tmax=1000):
# First compute Z02 and init values
Z02 = rhoX
mx = mxstart - 1e-6
diff = 1
t = 0
mhat = 0
while ((diff > criterion and t < tmax)):
mhat = new_mhat(mx, Z02, theta)
t = t + 1
mx_new = 0.5 * new_mx(mhat, alpha, rhoX, f_x) + 0.5 * mx
diff = abs(mx_new - mx)
mx = mx_new
if (abs(Z02 - mx) < criterion):
break
return Z02 - mx, mx, t
def compute_MSE_range_alpha(rhoX, rangealpha, f_x, theta=0):
valMSEX = np.zeros(rangealpha.size)
valM = np.zeros(rangealpha.size)
valt = np.zeros(rangealpha.size)
mxstart = 0.01
print("alpha, M, t")
for j in np.arange(1, rangealpha.size, 1):
(MSEX, M, t) = perform_DE(mxstart, rhoX, rangealpha[j], f_x, theta)
valMSEX[j] = MSEX
valM[j] = M
valt[j] = t
mxstart = M
print(rangealpha[j], M, t)
return valMSEX, valM, valt
theta = 0.674489
rhoX = 1
def f_x(x, y, z):
return f_gaussbernoulli(x, y, z, 0, 1)[0]
rangealpha = np.arange(0.01, 2, 0.01)
(X1, M1, T1) = compute_MSE_range_alpha(rhoX, rangealpha, f_x, theta)
rangealpha2 = np.arange(2, 0.01, -0.01)
(X2, M2, T2) = compute_MSE_range_alpha(rhoX, rangealpha2, f_x, theta)
plt.subplot(1, 3, 1)
plt.plot(rangealpha, M1, 'b*')
plt.plot(rangealpha2, M2, 'r-')
plt.ylabel('overlap')
plt.xlabel('alpha')
plt.subplot(1, 3, 2)
plt.plot(rangealpha, T1, 'b*')
plt.plot(rangealpha2, T2, 'r-')
plt.ylabel('iteration time')
plt.xlabel('alpha')
plt.subplot(1, 3, 3)
plt.plot(rangealpha, X1, 'b*')
plt.plot(rangealpha2, X2, 'r-')
plt.ylabel('MSE')
plt.xlabel('alpha')
plt.show()
|
[
"florent.krzakala@gmail.com"
] |
florent.krzakala@gmail.com
|
940e8a9191060dc89709ec9eec13b63431f71163
|
173e0aed80b0d0c01252dd2891be6967f60ce008
|
/run/idp-sql/test/e2e_test.py
|
b645759923d4219e4b34249b19f35db6cd81f52d
|
[
"Apache-2.0"
] |
permissive
|
yuriatgoogle/python-docs-samples
|
a59298504c73d7f272637b033662d920dfcc314b
|
9fb1bf82b447e920fe9b80564cc110d1e50f43ab
|
refs/heads/master
| 2023-04-08T03:36:18.691386
| 2021-02-28T17:17:57
| 2021-02-28T17:17:57
| 337,138,011
| 1
| 0
|
Apache-2.0
| 2021-02-28T17:17:58
| 2021-02-08T16:30:52
|
Python
|
UTF-8
|
Python
| false
| false
| 6,062
|
py
|
# Copyright 2021 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# This test creates a Cloud SQL instance, a Cloud Storage bucket, associated
# secrets, and deploys a Django service
import json
import os
import subprocess
import uuid
import firebase_admin # noqa: F401
from firebase_admin import auth # noqa: F401
import pytest
import requests
default_app = firebase_admin.initialize_app()
# Unique suffix to create distinct service names
SUFFIX = uuid.uuid4().hex[:10]
GOOGLE_CLOUD_PROJECT = os.environ.get("GOOGLE_CLOUD_PROJECT", None)
if not GOOGLE_CLOUD_PROJECT:
raise Exception("'GOOGLE_CLOUD_PROJECT' env var not found")
SERVICE_NAME = os.environ.get("SERVICE_NAME", None)
if not SERVICE_NAME:
print(
"'SERVICE_NAME' envvar not found. Defaulting to 'idp-sql' with a unique suffix"
)
SERVICE_NAME = f"idp-sql-{SUFFIX}"
SAMPLE_VERSION = os.environ.get("SAMPLE_VERSION", None)
REGION = "us-central1"
PLATFORM = "managed"
# Retreieve Cloud SQL test config
POSTGRES_INSTANCE = os.environ.get("POSTGRES_INSTANCE", None)
if not POSTGRES_INSTANCE:
raise Exception("'POSTGRES_INSTANCE' env var not found")
# Presuming POSTGRES_INSTANCE comes in the form project:region:instance
# Require the short form in some cases.
# POSTGRES_INSTANCE_FULL: project:region:instance
# POSTGRES_INSTANCE_NAME: instance only
if ":" in POSTGRES_INSTANCE:
POSTGRES_INSTANCE_FULL = POSTGRES_INSTANCE
POSTGRES_INSTANCE_NAME = POSTGRES_INSTANCE.split(":")[-1]
else:
POSTGRES_INSTANCE_FULL = f"{GOOGLE_CLOUD_PROJECT}:{REGION}:{POSTGRES_INSTANCE}"
POSTGRES_INSTANCE_NAME = POSTGRES_INSTANCE
POSTGRES_DATABASE = f"idp-sql-database-{SUFFIX}"
POSTGRES_PASSWORD = os.environ.get("POSTGRES_PASSWORD", None)
if not POSTGRES_PASSWORD:
raise Exception("'POSTGRES_PASSWORD' env var not found")
# Firebase key to create Id Tokens
IDP_KEY = os.environ.get("IDP_KEY", None)
if not IDP_KEY:
raise Exception("'IDP_KEY' env var not found")
@pytest.fixture
def deployed_service() -> str:
substitutions = [
f"_SERVICE={SERVICE_NAME},"
f"_PLATFORM={PLATFORM},"
f"_REGION={REGION},"
f"_DB_NAME={POSTGRES_DATABASE},"
f"_DB_INSTANCE={POSTGRES_INSTANCE_NAME},"
f"_DB_PASSWORD={POSTGRES_PASSWORD},"
f"_CLOUD_SQL_CONNECTION_NAME={POSTGRES_INSTANCE_FULL},"
]
if SAMPLE_VERSION:
substitutions.append(f"_SAMPLE_VERSION={SAMPLE_VERSION}")
subprocess.run(
[
"gcloud",
"builds",
"submit",
"--project",
GOOGLE_CLOUD_PROJECT,
"--config",
"./test/e2e_test_setup.yaml",
"--substitutions",
]
+ substitutions,
check=True,
)
service_url = (
subprocess.run(
[
"gcloud",
"run",
"services",
"describe",
SERVICE_NAME,
"--project",
GOOGLE_CLOUD_PROJECT,
"--platform",
PLATFORM,
"--region",
REGION,
"--format",
"value(status.url)",
],
stdout=subprocess.PIPE,
check=True,
)
.stdout.strip()
.decode()
)
yield service_url
# Cleanup
substitutions = [
f"_SERVICE={SERVICE_NAME},"
f"_PLATFORM={PLATFORM},"
f"_REGION={REGION},"
f"_DB_NAME={POSTGRES_DATABASE},"
f"_DB_INSTANCE={POSTGRES_INSTANCE_NAME},"
]
if SAMPLE_VERSION:
substitutions.append(f"_SAMPLE_VERSION={SAMPLE_VERSION}")
subprocess.run(
[
"gcloud",
"builds",
"submit",
"--project",
GOOGLE_CLOUD_PROJECT,
"--config",
"./test/e2e_test_cleanup.yaml",
"--substitutions",
]
+ substitutions,
check=True,
)
@pytest.fixture
def jwt_token() -> str:
custom_token = auth.create_custom_token("a-user-id").decode("UTF-8")
resp = requests.post(
f"https://identitytoolkit.googleapis.com/v1/accounts:signInWithCustomToken?key={IDP_KEY}",
data=json.dumps({"token": custom_token, "returnSecureToken": True}),
)
response = resp.json()
assert "error" not in response.keys()
assert "idToken" in response.keys()
id_token = response["idToken"]
yield id_token
# no cleanup required
def test_end_to_end(jwt_token: str, deployed_service: str) -> None:
token = jwt_token
service_url = deployed_service
client = requests.session()
# Can successfully make a request
response = client.get(service_url)
assert response.status_code == 200
# Can make post with token
response = client.post(
service_url, data={"team": "DOGS"}, headers={"Authorization": f"Bearer {token}"}
)
assert response.status_code == 200
assert "Vote successfully cast" in response.content.decode("UTF-8")
# Confirm updated results
response = client.get(service_url)
assert response.status_code == 200
assert "🐶" in response.content.decode("UTF-8")
# Cannot make post with bad token
response = client.post(
service_url,
data={"team": "DOGS"},
headers={"Authorization": "Bearer iam-a-token"},
)
assert response.status_code == 403
# Cannot make post with no token
response = client.post(service_url, data={"team": "DOGS"})
assert response.status_code == 401
|
[
"noreply@github.com"
] |
yuriatgoogle.noreply@github.com
|
212e410ccad21beacde4913713929dda8af4a759
|
75d481023d5ee8a6e23903cd48a001c467a90c66
|
/02/002_2.py
|
10910c1aa2cb8790e1652010d3b0979e23efe0c7
|
[] |
no_license
|
martholomew/AdventofCode
|
695e7fafb9d3b436c9fa19210dbf87192130b278
|
4c590c59e86e0861944cbb6788d29d446e596619
|
refs/heads/master
| 2020-09-22T14:04:36.990389
| 2019-12-13T02:06:36
| 2019-12-13T02:06:36
| 225,232,317
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 875
|
py
|
#!/bin/python
import json
with open("input.json", "r") as f:
array_orig = json.load(f)
output = 0
for noun in range(100):
for verb in range(100):
array = [dum for dum in array_orig]
array[1] = noun
array[2] = verb
op = 0
inst = 0
while inst != 99:
inst = array[op]
if inst == 1:
array[array[op + 3]] = array[array[op + 1]] + array[array[op + 2]]
elif inst == 2:
array[array[op + 3]] = array[array[op + 1]] * array[array[op + 2]]
elif inst == 99:
output = array[0]
if output == 19690720:
print(noun)
print(verb)
else:
print("ERROR")
print(array)
print(array[op])
exit()
op += 4
|
[
"18152455+martholomew@users.noreply.github.com"
] |
18152455+martholomew@users.noreply.github.com
|
5dcffca1e87e376e5887cd67845551e61f6561f1
|
8d7617bc97792cc5a3efa083dced987e11ea0d49
|
/RegionEntry:Exit/Detection/utils/temp_for_testing.py
|
5bbed87c3f64f0631f17ea6939bc309371308cb4
|
[] |
no_license
|
adityabansal98/AI_City_Track1_Insight-DCU
|
257f319a874c2059cecef3093257b24d006dc200
|
0a98fc05fe0e1a6fe4fd18930345c8f7a1097972
|
refs/heads/master
| 2022-06-24T19:56:47.092444
| 2020-05-10T19:24:31
| 2020-05-10T19:24:31
| 262,855,208
| 3
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,362
|
py
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
@author: Multiple
"""
import os
import glob
import matplotlib.pyplot as plt
import csv
import re
import numpy as np
from ast import literal_eval
from dipy.tracking.streamline import set_number_of_points
# from dipy.segment.clustering import QuickBundles
from dipy.segment.metric import ResampleFeature
from dipy.segment.metric import AveragePointwiseEuclideanMetric
# from dipy.segment.clustering import QuickBundles
from dipy.io.streamline import load_tractogram
from dipy.tracking.streamline import Streamlines
from dipy.segment.clustering import QuickBundles
from dipy.io.pickles import save_pickle
from dipy.data import get_fnames
from dipy.viz import window, actor
import pandas as pd
from sklearn.cluster import KMeans
def read_track_trajectories(filepath):
track_info = {}
with open(filepath, mode = 'r') as csv_file:
reader = csv.reader(csv_file)
linecount = 0
for row in reader:
obj_ID, obj_class, obj_trajectory = row
obj_ID = int(obj_ID)
track_info[obj_ID] = {}
track_info[obj_ID]['obj_class'] = obj_class
obj_trajectory = literal_eval(obj_trajectory)
track_info[obj_ID]['traj'] = obj_trajectory
track_info[obj_ID]['traj_point'] = []
for pos in obj_trajectory:
x = int(pos[0] + pos[2]/2)
y = int(pos[1] + pos[3]/2)
track_info[obj_ID]['traj_point'].append([x,y])
linecount += 1
return track_info
def main(args=None):
# fname = get_fnames('fornix')
#
# fornix = load_tractogram(fname, 'same', bbox_valid_check=False)
# streamlines = fornix.streamlines
#
# temp_data = streamlines.data[0]
#
# print(temp_data)
#
# print(len(streamlines.data))
#
# qb = QuickBundles(threshold=10.)
# clusters = qb.cluster(streamlines)
filepath = '/home/venkatesh/Desktop/Vehicle_counting_pipeline (extract.me)/Results/track_history/cam_7_dawn.csv'
track_information = read_track_trajectories(filepath)
plt.figure(figsize=(6, 6))
obj_trajectory = []
x_pos = []
y_pos = []
data = []
for obj_id in track_information:
cord_points = np.array(track_information[obj_id]['traj_point'])
# data = []
# x_pos = []
# y_pos = []
for index in range(len(cord_points)):
x_pos.append(cord_points[index, 0])
y_pos.append(cord_points[index, 1])
data.append([cord_points[index, 0], cord_points[index, 1]])
obj_trajectory.append(np.asarray(data, dtype = 'int32'))
# print(obj_trajectory[0])
# qb = QuickBundles(threshold=4.)
streamlines = set_number_of_points(obj_trajectory, nb_points=50)
# print(streamlines[0])
# clusters = qb.cluster(streamlines)
# Streamlines will be resampled to 24 points on the fly.
# feature = ResampleFeature(nb_points=24)
# metric = AveragePointwiseEuclideanMetric(feature=streamlines) # a.k.a. MDF
qb = QuickBundles(threshold=200.)
clusters = qb.cluster(streamlines)
print("Nb. clusters:", len(clusters))
print("Cluster sizes:", list(map(len, clusters)))
# Enables/disables interactive visualization
interactive = False
ren = window.Renderer()
ren.SetBackground(1, 1, 1)
ren.add(actor.streamtube(streamlines, window.colors.white))
window.record(ren, out_path='fornix_initial.png', size=(600, 600))
if interactive:
window.show(ren)
# wcss = []
# for i in range(1, 11):
# kmeans = KMeans(n_clusters=i, init='k-means++', max_iter=300, n_init=10, random_state=0)
# kmeans.fit(streamlines)
# wcss.append(kmeans.inertia_)
# plt.plot(range(1, 11), wcss)
# plt.title('Elbow Method')
# plt.xlabel('Number of clusters')
# plt.ylabel('WCSS')
# plt.show()
#
# kmeans = KMeans(n_clusters=4, init='k-means++', max_iter=300, n_init=10, random_state=0)
# pred_y = kmeans.fit_predict(obj_trajectory)
# plt.plot(cord_points[:, 0], cord_points[:, 1])
# plt.plot(kmeans.cluster_centers_[:, 0], kmeans.cluster_centers_[:, 1])
# # plt.plot(kmeans.cluster_centers_[:, 0], kmeans.cluster_centers_[:, 1], s=300, c='red')
# plt.show()
return 0
if __name__ == '__main__':
main()
|
[
"f20160088@goa.bits-pilani.ac.in"
] |
f20160088@goa.bits-pilani.ac.in
|
436f0796ac7d5fc630c86f58ebf6c8955c84a182
|
53f9174bf0a2792ac7f16c052e7d9f7750fd55b3
|
/object/page/base_page/new_thing.py
|
612e4accecb175ed3dec4bc7854e5bc7cbe788f8
|
[] |
no_license
|
restart759/Test_framework
|
cbab59e3d0708587da73b815860e987ae6cc78a6
|
5bf51be27938f473e916533a096dff60ef4a482b
|
refs/heads/master
| 2020-12-28T18:58:15.892855
| 2020-02-05T12:50:59
| 2020-02-05T12:50:59
| 238,451,024
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,348
|
py
|
#!/usr/bin/python
# -*- coding:utf-8 -*-
from object.baseuse.base_func import BaseFunc
from object.page.base_page.page_function import PageFunction
from object.page.apply_page.new_apply_page import NewApplyPage
from object.page.house_assessment_page.new_assessment_page import NewAssessment
import time
import os
class NewThing(BaseFunc):
# 新建申请件-新建操作
def new_apply(self, customer_name, id_no, loan_type, sub_type, client_type):
sub_type1 = '//label[contains(text(),"车辆质押")]'
sub_type2 = '//label[contains(text(),"车辆抵押")]'
sub_type3 = '//label[contains(text(),"车辆让与担保")]'
sub_type4 = '// label[contains(text(), "过桥")]'
new_button = '//span[text()=" 业务交单"]/following::button[text()=" 新建申请件"][2]'
f_new_button = '//span[text()=" 业务交单"]/following::button[text()=" 新建申请件"]'
company = '//label[contains(text(),"企业客户")]'
client = '//span[contains(text(),"客户姓名")]/following::input[1]'
id = '//span[contains(text(),"身份证")]/following::input[1]'
heshi = '//button[contains(text(),"核实")]'
new = '//button[contains(text(),"新建申请单")]'
pop_up_window = '// div[text() = "新建申请件"]'
applyno = '//div[contains(text(),"借款信息[")]'
self.wait_until_clickable(new_button)
if loan_type == '过桥信审' or loan_type == '车贷信审':
self.find_element(new_button).click()
elif loan_type == '房抵信审':
self.find_element(f_new_button).click()
self.wait_until_visible(pop_up_window)
if sub_type == '过桥':
self.find_element(sub_type4).click()
elif sub_type == '车辆质押':
self.find_element(sub_type1).click()
elif sub_type == '车辆抵押':
self.find_element(sub_type2).click()
elif sub_type == '车辆让与担保':
self.find_element(sub_type3).click()
# 核实客户信息
if client_type == '企业':
self.find_element(company).click()
self.find_element(client).send_keys(customer_name)
self.find_element(id).send_keys(id_no)
self.find_element(heshi).click()
self.wait_until_visible(new)
self.find_element(new).click()
self.wait_until_visible(applyno)
apply_no = self.find_element(applyno).text
apply = apply_no.split('[')[-1].split(']')[0]
return apply
# 新建评估工单-新建操作
def new_assessment(self):
new_button = '//button[text()=" 新建评估申请"]'
upload_button = '//button[contains(text(),"权利人页照片")]'
customer_name = '//span[contains(text(),"产权人姓名")]/following::input[1]'
house_property = '//span[contains(text(),"房产类型")]/following::input[1]'
earth_usage_cert_no = '//span[contains(text(),"房屋所有权证号")]/following::input[1]'
room = '//span[text()="居室"]/following::input[1]'
city_code = '//span[text()="省份"]/following::input[1]'
city_name = '//span[text()="城市"]/following::input[1]'
district = '//span[text()="区/县"]/following::input[1]'
detail_address = '//span[contains(text(),"详细地址")]/following::input[1]'
com_name = '//span[contains(text(),"小区名称")]/following::input[1]'
submit = '//button[text()="提交"]'
self.wait_until_visible(new_button)
self.find_element(new_button).click()
self.wait_until_visible(upload_button)
self.find_element(upload_button).click()
time.sleep(1)
os.system("C:\\Users\\Administrator\\PycharmProjects\\Test_framework\\data\\up_pic.exe")
self.wait_until_visible(customer_name)
# 核实房产证信息
self.find_element(customer_name).send_keys('张一')
self.driver.execute_script('arguments[0].removeAttribute("readonly")', self.find_element(house_property))
self.find_element(house_property).clear()
self.find_element(house_property).send_keys("住宅")
self.find_element(earth_usage_cert_no).send_keys('十堰房权证茅箭区字第20176637号')
self.driver.execute_script('arguments[0].removeAttribute("readonly")', self.find_element(room))
self.find_element(room).clear()
self.find_element(room).send_keys("一居室")
self.driver.execute_script('arguments[0].removeAttribute("readonly")', self.find_element(city_code))
self.find_element(city_code).clear()
self.find_element(city_code).send_keys("江苏省")
self.driver.execute_script('arguments[0].removeAttribute("readonly")', self.find_element(city_name))
self.find_element(city_name).clear()
self.find_element(city_name).send_keys("连云港市")
self.driver.execute_script('arguments[0].removeAttribute("readonly")', self.find_element(district))
self.find_element(district).clear()
self.find_element(district).send_keys("赣榆区")
self.find_element(detail_address).send_keys('茅箭区二堰街办堰桥街5号1幢1-2-1')
self.find_element(com_name).send_keys('吉普三号')
self.find_element(submit).click()
return NewAssessment(self.driver)
|
[
"418129463@qq.com"
] |
418129463@qq.com
|
e89d14798538b838b4d7372b3443deeb39e42792
|
bb20aaef5af38df41e35ff5f996d91e407f2d152
|
/diesis/TagHelper.py
|
83ddaac7491c0c0a07705f63821763e3c399ac0b
|
[
"MIT"
] |
permissive
|
RyanJ93/diesis
|
540c53c7c6c940e4fda4322571a720618b2a2f7b
|
56b98f36bae56d63fa8558f3ac1d0dfdca79e1a6
|
refs/heads/master
| 2021-07-12T04:34:58.868738
| 2020-07-19T00:58:54
| 2020-07-19T00:58:54
| 170,932,348
| 4
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 15,062
|
py
|
from typing import Set, Any
from mutagen.mp4 import MP4, MP4Cover
from mutagen.id3 import ID3, TIT2, TPE1, TALB, TYER, TCON, USLT, TPOS, TRCK, APIC, COMM, TPE2, WOAF, PictureType, TEXT
from mutagen.flac import FLAC, Picture
from mutagen.aiff import AIFF
from diesis import Song, Config
import mutagen
import base64
class TagHelper:
SUPPORTED_FORMATS: Set[str] = {'m4a', 'mp3', 'flac', 'aiff', 'aif', 'ogg'}
song: Song = None
@staticmethod
def __str(value: Any) -> str:
"""
Converts a given value into a string converting "None" values into an empty string instead of a literal "None".
:param value: The value to convert.
:type value: Any
:return: The string representation of the given value.
:rtype: str
"""
if value is None:
return ''
return str(value)
def __save_m4a(self) -> None:
"""
Sets the file tags according to song properties using the format required by M4A files.
"""
tags: Any = self.song.get_tag_object()
# Set the tags value according to song properties.
tags['©nam'] = [TagHelper.__str(self.song.get_title())]
tags['©ART'] = [TagHelper.__str(self.song.get_artist())]
tags['aART'] = [TagHelper.__str(self.song.get_album_artist())]
tags['©alb'] = [TagHelper.__str(self.song.get_album())]
tags['©day'] = [TagHelper.__str(self.song.get_year())]
tags['©gen'] = [TagHelper.__str(self.song.get_genre())]
tags['©wrt'] = [TagHelper.__str(self.song.get_composer())]
tags['©grp'] = [TagHelper.__str(self.song.get_group())]
if self.song.get_explicit():
tags['rtng'] = [1]
else:
tags['rtng'] = [2]
tags['©lyr'] = [TagHelper.__str(self.song.get_lyrics())]
# TODO: Currently not supported song's properties: track_url, album_url, lyrics_writer
i: int = self.song.get_disc_number()
total: int = self.song.get_disc_count()
if i > 0 and total > 0:
tags['disk'] = [(
self.song.get_disc_number(),
self.song.get_disc_count()
)]
i = self.song.get_track_number()
total = self.song.get_track_count()
if i > 0 and total > 0:
tags['trkn'] = [(
self.song.get_track_number(),
self.song.get_track_count()
)]
# Generate the tag representation of the cover image, if defined.
if self.song.get_cover_path() is not None:
with open(self.song.get_cover_path(), 'rb') as cover:
tags['covr'] = [MP4Cover(cover.read(), imageformat=MP4Cover.FORMAT_JPEG)]
if Config.Config.get_watermark():
# Add the application watermark.
tags['©cmt'] = Config.Config.get_watermark_text()
# Save the edited tags to the file.
tags.save()
def __save_id3(self) -> None:
"""
Sets the file tags according to song properties using the ID3 format.
"""
tags: Any = self.song.get_tag_object()
# Set the tags value according to song properties.
tags['TIT2'] = TIT2(encoding=3, text=TagHelper.__str(self.song.get_title()))
tags['TPE1'] = TPE1(encoding=3, text=TagHelper.__str(self.song.get_artist()))
tags['TPE2'] = TPE2(encoding=3, text=TagHelper.__str(self.song.get_album_artist()))
tags['TALB'] = TALB(encoding=3, text=TagHelper.__str(self.song.get_album()))
tags['TYER'] = TYER(encoding=3, text=TagHelper.__str(self.song.get_year()))
tags['TCON'] = TCON(encoding=3, text=TagHelper.__str(self.song.get_genre()))
tags['TCOM'] = TCON(encoding=3, text=TagHelper.__str(self.song.get_composer()))
tags['WOAF'] = WOAF(encoding=3, text=TagHelper.__str(self.song.get_track_url()))
tags['USLT'] = USLT(encoding=3, text=TagHelper.__str(self.song.get_lyrics()))
tags['TEXT'] = TEXT(encoding=3, text=TagHelper.__str(self.song.get_lyrics_writer()))
# TODO: Currently not supported song's properties: explicit, album_url, group
i: int = self.song.get_disc_number()
total: int = self.song.get_disc_count()
if i > 0 and total > 0:
c: str = TagHelper.__str(self.song.get_disc_number()) + '/' + TagHelper.__str(self.song.get_disc_count())
tags['TPOS'] = TPOS(encoding=3, text=c)
i = self.song.get_track_number()
total = self.song.get_track_count()
if i > 0 and total > 0:
c: str = TagHelper.__str(self.song.get_track_number()) + '/' + TagHelper.__str(self.song.get_track_count())
tags['TRCK'] = TRCK(encoding=3, text=c)
# Generate the tag representation of the cover image, if defined.
if self.song.get_cover_path() is not None:
with open(self.song.get_cover_path(), 'rb') as cover:
tags['APIC'] = APIC(encoding=3, mime='image/jpeg', type=3, data=cover.read())
if Config.Config.get_watermark():
# Add the application watermark.
tags['COMM'] = COMM(encoding=3, text=Config.Config.get_watermark_text())
# Save the edited tags to the file.
tags.save()
def __save_flac(self) -> None:
"""
Sets the file tags according to song properties using the format required by FLAC files.
"""
tags: Any = self.song.get_tag_object()
# Set the tags value according to song properties.
tags['title'] = [TagHelper.__str(self.song.get_title())]
tags['artist'] = [TagHelper.__str(self.song.get_artist())]
tags['albumartist'] = [TagHelper.__str(self.song.get_album_artist())]
tags['album'] = [TagHelper.__str(self.song.get_album())]
tags['year'] = [TagHelper.__str(self.song.get_year())]
tags['genre'] = [TagHelper.__str(self.song.get_genre())]
tags['composer'] = [TagHelper.__str(self.song.get_composer())]
tags['wwwaudiofile'] = [TagHelper.__str(self.song.get_track_url())]
tags['wwwartist'] = [TagHelper.__str(self.song.get_album_url())]
i: int = self.song.get_disc_number()
total: int = self.song.get_disc_count()
if i > 0 and total > 0:
c: str = TagHelper.__str(self.song.get_disc_number()) + '/' + TagHelper.__str(self.song.get_disc_count())
tags['discnumber'] = [c]
i = self.song.get_track_number()
total = self.song.get_track_count()
if i > 0 and total > 0:
c: str = TagHelper.__str(self.song.get_track_number()) + '/' + TagHelper.__str(self.song.get_track_count())
tags['tracknumber'] = [c]
tags['lyrics'] = [TagHelper.__str(self.song.get_lyrics())]
tags['lyricist'] = [TagHelper.__str(self.song.get_lyrics_writer())]
tags['grouping'] = [TagHelper.__str(self.song.get_group())]
# TODO: Currently not supported song's properties: explicit
if self.song.get_cover_path() is not None:
with open(self.song.get_cover_path(), 'rb') as cover:
# Generate the picture object representing the cover image.
picture = Picture()
picture.data = cover.read()
picture.type = PictureType.COVER_FRONT
picture.mime = u'image/jpeg'
picture.width = 1000
picture.height = 1000
picture.depth = 16
# Remove all the pictures from this file.
tags.clear_pictures()
# Add the picture that has been found.
tags.add_picture(picture)
if Config.Config.get_watermark():
# Add the application watermark.
tags['comment'] = [Config.Config.get_watermark_text()]
# Save the edited tags to the file.
tags.save()
def __save_aiff(self) -> None:
"""
Sets the file tags according to specification for AIFF audio format.
"""
# I figured out AIFF files use ID3 tags as well as MP3 do.
self.__save_id3()
return
def __save_ogg(self) -> None:
"""
Sets the file tags according to the standard used in OGG files.
"""
tags: Any = self.song.get_tag_object()
# Set the tags value according to song properties.
tags['title'] = [TagHelper.__str(self.song.get_title())]
tags['artist'] = [TagHelper.__str(self.song.get_artist())]
tags['albumartist'] = [TagHelper.__str(self.song.get_album_artist())]
tags['album'] = [TagHelper.__str(self.song.get_album())]
tags['year'] = [TagHelper.__str(self.song.get_year())]
tags['genre'] = [TagHelper.__str(self.song.get_genre())]
tags['composer'] = [TagHelper.__str(self.song.get_composer())]
tags['wwwaudiofile'] = [TagHelper.__str(self.song.get_track_url())]
tags['wwwartist'] = [TagHelper.__str(self.song.get_album_url())]
i: int = self.song.get_disc_number()
total: int = self.song.get_disc_count()
if i > 0 and total > 0:
c: str = TagHelper.__str(self.song.get_disc_number()) + '/' + TagHelper.__str(self.song.get_disc_count())
tags['discnumber'] = [c]
i = self.song.get_track_number()
total = self.song.get_track_count()
if i > 0 and total > 0:
c: str = TagHelper.__str(self.song.get_track_number()) + '/' + TagHelper.__str(self.song.get_track_count())
tags['tracknumber'] = [c]
tags['lyrics'] = [TagHelper.__str(self.song.get_lyrics())]
tags['lyricist'] = [TagHelper.__str(self.song.get_lyrics_writer())]
tags['grouping'] = [TagHelper.__str(self.song.get_group())]
if self.song.get_cover_path() is not None:
with open(self.song.get_cover_path(), 'rb') as cover:
# Encode the image as Base64 string.
tags['METADATA_BLOCK_PICTURE'] = str(base64.b64encode(cover.read()))
if Config.Config.get_watermark():
# Add the application watermark.
tags['comment'] = [Config.Config.get_watermark_text()]
tags.save()
@staticmethod
def get_supported_formats() -> Set[str]:
"""
Returns the extensions of all the supported file types.
:return: A set containing the supported file extensions.
:rtype: Set[str]
"""
return TagHelper.SUPPORTED_FORMATS
@staticmethod
def generate_tag_object(song: Song) -> Any:
"""
Generates the object that allows to handle the tags embedded in the audio file.
:param song: An instance of the class "Song" representing the audio file to process.
:type song: Song
:return: An object representing the song's tags.
:rtype: Any
:raise ValueError: If an unsupported file type has been defined in the song object.
"""
extension: str = song.get_extension()
path: str = song.get_path()
if not extension or not path:
return None
if extension == 'm4a':
# Generate the object to process some MPEG based files such as Apple ALAC.
return MP4(path)
if extension == 'mp3':
# Generate the object to process MP3 and similar formats.
return ID3(path)
elif extension == 'flac':
# Generate the object to process FLAC encoded files.
return FLAC(path)
elif extension == 'aif' or extension == 'aiff':
# Generate the object to process AIFF encoded files.
return AIFF(path)
elif extension == 'ogg':
# Generate the object to process OGG files.
return mutagen.File(path)
else:
raise ValueError('Unsupported file type.')
def set_song(self, song: Song) -> None:
"""
Sets the song that will be processed.
:param song: An instance of the class "Song" representing the song to process.
:type song: Song
"""
self.song = song
def get_song(self) -> Song:
"""
Returns the song that will be processed.
:return: An instance of the class "Song" representing the song to process.
:rtype Song
"""
return self.song
def __init__(self, song: Song):
"""
The class constructor.
:param song: An instance of the class "Song" representing the audio file to process.
:type song: Song
"""
self.set_song(song)
def fetch(self) -> None:
"""
Loads the tags from the audio file defined and then set them into the song object.
:raise ValueError: If no song has been defined.
:raise ValueError: If an unsupported file type has been defined in the song object.
"""
if self.song is None:
raise ValueError('No song has been defined.')
tags: Any = self.song.get_tag_object()
extension: str = self.song.get_extension()
# Load the song title and artists required to build the search query used by iTunes API and lyrics look up.
if extension == 'm4a':
if '©nam' in tags and len(tags['©nam']) > 0:
self.song.set_title(tags['©nam'][0])
if '©ART' in tags and len(tags['©ART']) > 0:
self.song.set_artist(tags['©ART'][0])
elif extension == 'mp3':
if 'TIT2' in tags:
self.song.set_title(str(tags['TIT2']))
if 'TPE1' in tags:
self.song.set_artist(str(tags['TPE1']))
elif extension == 'flac':
if 'title' in tags and len(tags['title']) > 0:
self.song.set_title(tags['title'][0])
if 'artist' in tags and len(tags['artist']) > 0:
self.song.set_artist(tags['artist'][0])
elif extension == 'aif' or extension == 'aiff':
if 'TIT2' in tags:
self.song.set_title(str(tags['TIT2']))
if 'TPE1' in tags:
self.song.set_artist(str(tags['TPE1']))
elif extension == 'ogg':
if 'title' in tags and len(tags['title']) > 0:
self.song.set_title(tags['title'][0])
if 'artist' in tags and len(tags['artist']) > 0:
self.song.set_artist(tags['artist'][0])
else:
raise ValueError('Unsupported file type.')
def save(self) -> None:
"""
Sets the file tags according to song properties.
"""
if self.song is None:
raise ValueError('No song has been defined.')
extension: str = self.song.get_extension()
if extension == 'm4a':
self.__save_m4a()
elif extension == 'mp3':
self.__save_id3()
elif extension == 'flac':
self.__save_flac()
elif extension == 'aif' or extension == 'aiff':
self.__save_aiff()
elif extension == 'ogg':
self.__save_ogg()
else:
raise ValueError('Unsupported file type.')
|
[
"info@enricosola.com"
] |
info@enricosola.com
|
ba130b78ed7362d869af01cbe21ac072174c435e
|
1d38f9d4383b4beb9ac642b04926b99c87ce7ef8
|
/harryCloak.py
|
1e4e3e735df16b6c277d2e8f32e7f50f81116e0f
|
[] |
no_license
|
gouelp/invisibility_cloak
|
b172f3006e9952e7f705b3d9795af8c7e37579f0
|
3ac7751146b7ca83bc152b1d3057c5ee0a4a7b87
|
refs/heads/master
| 2022-06-19T20:53:51.988776
| 2020-05-12T14:19:59
| 2020-05-12T14:19:59
| 263,350,461
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,404
|
py
|
#! /usr/bin/env python
# Author Pierre-Vincent Gouel (pg58)
import roslib
import sys
import rospy
import cv2
import numpy as np
import time
# Import the necessary libraries
from sensor_msgs.msg import Image
from cv_bridge import CvBridge, CvBridgeError
# Initialisation of the ROS node and creation of a openCV bridge instance and a time instance
rospy.init_node('kinect_observer',anonymous=True)
bridge = CvBridge()
ref_image = None
start_time = time.time()
# The callback called every time the camera send a new image
def callback(img):
global bridge, ref_image
# Get the image and convert it into bridge format
cv_image = bridge.imgmsg_to_cv2(img, "bgr8")
# Save the background, gives the time to the person to leave to then capture the background
current_time = time.time()-start_time
if current_time>3 and current_time <4:
ref_image = cv_image
print "geting image"
# If the program run a long enough time (the backround is captured)
elif current_time > 4:
# Convert the image into HSV format which allows easily to detect the color range of the cloakss
hsv_img = cv2.cvtColor(cv_image,cv2.COLOR_BGR2HSV)
# Creation of the mask of the red color
lower_cloak1 = np.array([170,150,70])
upper_cloak1 = np.array([180,255,255])
lower_cloak2 = np.array([0,150,70])
upper_cloak2 = np.array([15,255,255])
mask1 = cv2.inRange(hsv_img,lower_cloak1,upper_cloak1)
mask2 = cv2.inRange(hsv_img,lower_cloak2,upper_cloak2)
mask = mask1 + mask2
# Creation of the form used for the filtering and applies erosion and dilation to have a smoother image
kernel = np.ones((3,3),np.uint8)
mask = cv2.erode(mask,kernel,iterations = 1)
mask = cv2.dilate(mask,kernel,iterations =1)
# Create a copy of the background with only the pixels at cloak position visible
trans_img = cv2.bitwise_and(ref_image,ref_image,mask=mask)
# Put the image at the cloak position in black in the eal-time captured image
front_img = cv_image
front_img[mask>0] = [0,0,0]
# Superpose the 2 images
final_img = cv2.bitwise_or(front_img,trans_img)
# Show the final image
cv2.imshow("raw image",final_img)
else:
print "moove"
cv2.waitKey(3)
# Subscribe to the rostopic where the image is shown
image_sub = rospy.Subscriber("/image_raw",Image,callback)
rospy.spin()
|
[
"gouelpierrevincent@gmail.com"
] |
gouelpierrevincent@gmail.com
|
9dd216914cd709b3ef97f209c4eaadcfa0b1771d
|
d113a8cec43bd852d0d6e64287ad5880150ce80e
|
/main.py
|
ccae696cd1ebb80e2e6e0d6a084dccad4da7b3b1
|
[] |
no_license
|
Zenzelya/PythonGit
|
cf1affb10076eab5341f91ae1a5b56cae0902a61
|
468df1486699712cf00bf9de21050f7d2b02016a
|
refs/heads/master
| 2021-01-10T14:46:36.999277
| 2015-11-10T11:22:25
| 2015-11-10T11:22:25
| 45,866,354
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 9,231
|
py
|
import pygame
import sys
import random
pygame.init()
pygame.font.init()
# Arguments :
size = (840, 640)
snake_size = 1
window = pygame.display.set_mode(size)
pygame.display.set_caption("Snake")
clock = pygame.time.Clock()
done = False
screen = pygame.Surface((400, 400))
punkt = 0
font_menu = pygame.font.Font(None, 50)
rect_x = start_cord()
rect_y = start_cord()
black = (0, 0, 0)
white = (255, 255, 255)
blue = (0, 0, 255)
green = (0, 255, 0)
red = (255, 0, 0)
points_font = pygame.font.Font(None, 32)
rect_change_x = rect_x
rect_change_y = rect_y
rect_h = 20
rect_w = 20
moove_direction = random.randint(0, 4)
food_rect_x = start_cord()
food_rect_y = start_cord()
food_rect_h = 20
food_rect_w = 20
points = 0
# -- MOVE:
moove_left = -20
moove_right = +20
moove_up = -20
moove_down = +20
snake_x = []
snake_y = []
snake_dir = []
#*****************************************************************************
def new_snake():
nonlocal snake_x, snake_y, snake_dir, rect_x, rect_y, moove_direction
snake_x = []
snake_y = []
snake_dir = []
rect_x = start_cord()
rect_y = start_cord()
moove_direction = random.randint(0, 4)
snake_x.append(snake_x)
snake_y.append(snake_y)
snake_dir.append(moove_direction)
class Menu:
def __init__(self, punkts = (1024, 600, 'Punkts', (250,0,0), (250,30,250), 0)):
self.punkts = punkts
def render(self, poverhnost, font, num_punkt):
for i in self.punkts:
if num_punkt == i[5]:
poverhnost.blit(font.render(i[2], 1, i[4]), (i[0], i[1]))
else:
poverhnost.blit(font.render(i[2], 1, i[3]), (i[0], i[1]))
def menu(self):
done_menu = True
punkt = 0
font_menu = pygame.font.Font(None, 50)
pygame.key.set_repeat(0,0)
pygame.mouse.set_visible(True)
""" ----------------------------------- """
while done_menu:
window.blit(screen, (220, 120))
screen.fill((0,100,200))
mp = pygame.mouse.get_pos()
# print(mp)
for i in self.punkts:
if mp[0] > i[0] + 200 and mp[0] < i[0] +300 and mp[1] > i[1] + 110 and mp[1] < i[1] + 160:
punkt = i[5]
self.render(screen, font_menu, punkt)
for e in pygame.event.get():
if e.type == pygame.QUIT:
sys.exit()
if e.type == pygame.KEYDOWN:
if e.key == pygame.K_SPACE:
if punkt == 0:
done_menu = False
elif punkt == 1:
sys.exit()
done_menu = False
if e.key == pygame.K_F4:
sys.exit()
if e.key == pygame.K_UP:
if punkt > 0:
punkt -= 1
if e.key == pygame.K_DOWN:
if punkt < len(self.punkts) - 1:
punkt += 1
if e.type == pygame.MOUSEBUTTONDOWN and e.button == 1:
if punkt == 0:
done_menu = False
elif punkt == 1:
sys.exit()
pygame.display.flip()
# ----------------------------------= F U N C T I O N S =----------------------------------
def start_cord():
x = random.randint(3, 28)
x *= 20
return x
# ----------------------------------= P E R E M E N S =------------------------------------------
snake_x.append(rect_x)
snake_y.append(rect_y)
snake_dir.append(moove_direction)
if snake_dir[0] == 0:
for kor in range(2):
snake_x.append(rect_x)
rect_y += moove_up
snake_y.append(rect_y)
snake_dir.append(snake_dir[0])
if snake_dir[0] == 1:
for kor in range(2):
rect_change_x += moove_right
snake_x.append(rect_change_x)
snake_y.append(rect_change_y)
snake_dir.append(moove_direction)
if snake_dir[0] == 2:
for kor in range(2):
snake_x.append(rect_x)
rect_change_y += moove_down
snake_y.append(rect_y)
snake_dir.append(moove_direction)
if snake_dir[0] == 3:
for kor in range(2):
rect_x += moove_left
snake_x.append(rect_x)
snake_y.append(rect_y)
snake_dir.append(moove_direction)
punkts =[(160, 140, 'Play', (250,250,30), (250, 30, 250), 0),
(160, 210, 'Quit', (250, 250, 30), (250, 30, 250), 1)]
game = Menu(punkts)
game.menu()
# ----------------------------------= GO GO GO =------------------------------------------
while done == False:
# ОБРАБОТКА ВСЕХ СОБЫТИЙ ДОЛЖНА БЫТЬ ПОД ЭТИМ КОММЕНТАРИЕМ
for event in pygame.event.get(): # User did something
if event.type == pygame.QUIT: # If user clicked close
done = True # Flag that we are done so we exit this loop
if event.type == pygame.KEYDOWN:
if event.key == pygame.K_ESCAPE:
game.menu()
pygame.key.set_repeat(1,1)
pygame.mouse.set_visible(False)
if snake_dir[0] == 0:
if event.key == pygame.K_LEFT:
snake_dir[0] = 3
if event.key == pygame.K_RIGHT:
snake_dir[0] = 1
if snake_dir[0] == 1:
if event.key == pygame.K_UP:
snake_dir[0] = 0
if event.key == pygame.K_DOWN:
snake_dir[0] = 2
if snake_dir[0] == 2:
if event.key == pygame.K_LEFT:
snake_dir[0] = 3
if event.key == pygame.K_RIGHT:
snake_dir[0] = 1
if snake_dir[0] == 3:
if event.key == pygame.K_UP:
snake_dir[0] = 0
if event.key == pygame.K_DOWN:
snake_dir[0] = 2
end = len(snake_x)
if snake_dir[0] == 0:
snake_y[0] += moove_up
if snake_dir[0] == 1:
snake_x[0] += moove_right
if snake_dir[0] == 2:
snake_y[0] += moove_down
if snake_dir[0] == 3:
snake_x[0] += moove_left
window.fill((50, 50, 50))
window.blit(points_font.render('Счет: ' + str(points), 1, red), (700, 60))
pygame.draw.rect(window, green, [40, 20, 600, 20])
pygame.draw.rect(window, green, [40, 20, 20, 600])
pygame.draw.rect(window, green, [640, 20, 20, 600])
pygame.draw.rect(window, green, [40, 600, 600, 20])
for kub in range(end):
pygame.draw.rect(window, white, [snake_x[kub], snake_y[kub], rect_h, rect_w])
pygame.draw.rect(window, red, [food_rect_x, food_rect_y, food_rect_h, food_rect_w])
snake_x[1 : end] = snake_x[0: end - 1]
snake_y[1: end] = snake_y[0: end - 1]
snake_dir[1 : end] = snake_dir[0: end - 1]
if snake_x[0] == food_rect_x and snake_y[0] == food_rect_y:
snake_size = +1
points += 8
snake_x.append(food_rect_x)
snake_y.append(food_rect_y)
snake_dir.append(snake_dir[end-1])
choise = True
while choise:
food_rect_x = start_cord()+20
food_rect_y = start_cord()+20
for i in range(end):
if food_rect_x == snake_x[i] or food_rect_y == snake_y[i]:
pass
else:
choise = False
if snake_x[0] < 60 or snake_x[0] > 620 or snake_y[0] < 40 or snake_y[0] > 580:
snake_x = []
snake_y = []
snake_dir =[]
points = 0
rect_x = start_cord()
rect_y = start_cord()
snake_x.append(rect_x)
snake_y.append(rect_y)
moove_direction = random.randint (0, 4)
snake_dir.append(moove_direction)
kor = 0
if snake_dir[0] == 0:
for kor in range(2):
snake_x.append(rect_x)
rect_y += moove_up
snake_y.append(rect_y)
snake_dir.append(snake_dir[0])
if snake_dir[0] == 1:
for kor in range(2):
rect_change_x += moove_right
snake_x.append(rect_change_x)
snake_y.append(rect_change_y)
snake_dir.append(moove_direction)
if snake_dir[0] == 2:
for kor in range(2):
snake_x.append(rect_x)
rect_change_y += moove_down
snake_y.append(rect_y)
snake_dir.append(moove_direction)
if snake_dir[0] == 3:
for kor in range(2):
rect_x += moove_left
snake_x.append(rect_x)
snake_y.append(rect_y)
snake_dir.append(moove_direction)
game.menu()
pygame.key.set_repeat(1, 1)
pygame.mouse.set_visible(False)
# rect_y += rect_change_y
# ОБРАБОТКА ВСЕХ СОБЫТИЙ ДОЛЖНА НАХОДИТЬСЯ НАД ЭТИМ КОММЕНТАРИЕМ
# Ограничить до 20 кадров в секунду
pygame.display.flip()
clock.tick(6)
print("Hello")
pygame.quit()
|
[
"avkelgankin@yandex.ru"
] |
avkelgankin@yandex.ru
|
1b85020cca1d6037289cb80431aceb03e230e0d0
|
5c87c0f15df878b9b8e7aea2cac7a08f717053a9
|
/flaskr.py
|
8b50319699fbef48a17388f1641ad5b23d2747d3
|
[] |
no_license
|
peihongda/flasklearn
|
9209e3ee1490d8b505084fbdbe4eef457b458042
|
c6f8ab22aac215c0acceaf1214166951b3b1ff79
|
refs/heads/master
| 2020-07-05T13:32:55.023174
| 2017-02-08T03:25:08
| 2017-02-08T03:25:08
| 74,116,885
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,228
|
py
|
#all the imports
import os
import sqlite3
from flask import Flask, request, session, g, redirect, url_for, abort, \
render_template, flash
#create our little application :)
app = Flask(__name__)
app.config.from_object(__name__)
#Load default config and override config from an environment variable
app.config.update(dict(
DATABASE=os.path.join(app.root_path, 'flaskr.db'),
SECRET_KEY='development key',
USERNAME='admin',
PASSWORD='default'
))
app.config.from_envvar('FLASKR_SETTINGS', silent=True)
def connect_db():
"""Connects to the specific database."""
rv = sqlite3.connect(app.config['DATABASE'])
rv.row_factory = sqlite3.Row
return rv
def get_db():
if not hasattr(g, 'sqlite_db'):
g.sqlite_db = connect_db()
return g.sqlite_db
@app.teardown_appcontext
def close_db(error):
if hasattr(g, 'sqlite_db'):
g.sqlite_db.close()
def init_db():
db = get_db()
with app.open_resource('schema.sql', mode='r') as f:
db.cursor().executescript(f.read())
db.commit()
@app.cli.command('initdb')
def initdb_command():
init_db()
print 'Initialized the databae.'
@app.route('/')
def show_entries():
db = get_db()
cur = db.execute('select title, text from entries order by id desc')
entries = cur.fetchall()
return render_template('show_entries.html', entries=entries)
@app.route('/add', methods=['POST'])
def add_entry():
if not session.get('logged_in'):
abort(401)
db = get_db()
db.execute('insert into entries (title, text) values (?, ?)', [request.form['title'], request.form['text']])
db.commit()
flash('New entry was successfully posted')
return redirect(url_for('show_entries'))
@app.route('/login', methods=['GET', 'POST'])
def login():
error = None
if request.method == 'POST':
if request.form['username'] != app.config['USERNAME']:
error = 'Invalid username'
elif request.form['password'] != app.config['PASSWORD']:
error = 'Invalid password'
else:
session['logged_in'] = True
flash('You were logged in')
return redirect(url_for('show_entries'))
#return show_entries()
return render_template('login.html', error=error)
@app.route('/logout')
def logout():
session.pop('logged_in', None)
flash('You were logged out')
return redirect(url_for('show_entries'))
|
[
"pei_hongda@126.com"
] |
pei_hongda@126.com
|
64c40be02493f4a1096d29b2d28a4437cbb7d4e0
|
0578aa89abdcad375f40c15d52ec034556f8e05f
|
/tests/test_atom.py
|
067013948666050dc3bfbc0ba1098e45e728d092
|
[
"BSD-3-Clause"
] |
permissive
|
dbrattli/python-elements
|
bfe9f0eb5152a75a07a8cbc7db13ead6c0cb7cc5
|
032867f749b113607d7d82a59eb3a42e1ec8e6b1
|
refs/heads/master
| 2020-06-04T04:31:42.898857
| 2013-01-12T12:45:02
| 2013-01-12T12:45:02
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,145
|
py
|
#!/usr/bin/env python
# encoding: utf-8
"""
test_atom.py
Created by Dag Brattli on 2008-03-02.
Copyright (c) 2008, Dag Brattli, All rights reserved.
"""
import sys
import os
import logging
logging.basicConfig(level=logging.WARNING)
from elements import atom
def test_atom(filenames=None):
filenames = filenames or ["tests/atom.xml", "tests/atom_old_app.xml"]
for filename in filenames:
data = open(filename).read()
d = atom.Feed()
d.from_string(data)
print d.to_string()
# Render as verbose dict
x = d.to_dict()
print x
assert(x['feed']['title']['$t'] == "Example Feed")
assert(x['feed']['entry'][0]['summary']['$t'] == "Some text.")
assert(x['feed']['entry'][0]['link'][0]['href'] == "http://example.org/2003/12/13/atom03")
assert(x['feed']['entry'][0]['app$control']['app$draft']['$t'] == "Testing Draft")
# Render as compact dict
y = d.to_dict(compact=True)
# print y
assert(y['feed']['title'] == "Example Feed")
assert(y['feed']['entry'][0]['summary'] == "Some text.")
assert(y['feed']['entry'][0]['link'][0]['href'] == "http://example.org/2003/12/13/atom03")
assert(y['feed']['entry'][0]['app$control']['app$draft'] == "Testing Draft")
# j = d.to_json()
# print j
# Parse verbose dict
d = atom.Feed()
d.from_dict(x)
print d.to_string()
assert(d.title.text == "Example Feed")
assert(d.entry[0].summary.text == "Some text.")
assert(d.entry[0].link[0].href == "http://example.org/2003/12/13/atom03")
assert(d.entry[0].control.draft.text == "Testing Draft")
# Parse compact dict
d = atom.Feed()
d.from_dict(y)
## print d.to_string()
assert(d.title.text == "Example Feed")
assert(d.entry[0].summary.text == "Some text.")
assert(d.entry[0].link[0].href == "http://example.org/2003/12/13/atom03")
assert(d.entry[0].control.draft.text == "Testing Draft")
if __name__ == '__main__':
test_atom(["atom.xml", "atom_old_app.xml"])
|
[
"dag@brattli.net"
] |
dag@brattli.net
|
c8189a87f1a250069ea88bbb99174dd7dfc953e1
|
1896a925fbf08ed0a397a398e066f0dc8a430a0c
|
/product/views.py
|
b8e2b6b29ca282f3d554ab0c2bd47ef900682d16
|
[] |
no_license
|
xeroz/prueba_django
|
be1460598e4b6d10ddd65d641a0f590c5e1de533
|
69632e0638d1a566c5c02b62a5dbbe8abaff582f
|
refs/heads/master
| 2023-04-22T21:00:18.097011
| 2021-05-03T20:03:45
| 2021-05-03T20:03:45
| 364,000,262
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,082
|
py
|
from product.models import Category, Cart, User, Product
from product.serializers import (
CategorySerializer, ProductByCategorySerializer,
ProductNameByCategorySerializer)
from django.http import Http404
from rest_framework.views import APIView
from rest_framework.response import Response
def get_object(pk):
try:
return Category.objects.get(pk=pk)
except Category.DoesNotExist:
raise Http404
class CategoryList(APIView):
def get(self, request, format=None):
categories = Category.objects.all()
serializer = CategorySerializer(categories, many=True)
return Response(serializer.data)
class ProductByCategoryList(APIView):
def get(self, request, pk, format=None):
data = get_object(pk)
serializer = ProductByCategorySerializer(data)
return Response(serializer.data)
class ProductNameByCategoryList(APIView):
def get(self, request, pk, format=None):
data = get_object(pk)
serializer = ProductNameByCategorySerializer(data)
return Response(serializer.data)
|
[
"chavezvasquezjuan@gmail.com"
] |
chavezvasquezjuan@gmail.com
|
7846b343496b71324598c0ac2f7efeb37b14fa5a
|
4cacc43716ef24b50e4b336f3f6e920d9ef0e564
|
/Nasa2021/GenFASTA.py
|
92b3c60903b278e496be6393d3c16d0f1e28de10
|
[
"MIT"
] |
permissive
|
ShepherdCode/ShepherdML
|
141e78943ecc8e387eeb84e4c4bbbf3448a46da0
|
335382730edf4537713ece62896e79a5a4430fb2
|
refs/heads/master
| 2023-08-17T15:02:28.985054
| 2023-07-25T15:49:46
| 2023-07-25T15:49:46
| 230,672,524
| 2
| 0
|
MIT
| 2023-08-29T11:55:04
| 2019-12-28T21:54:46
|
Jupyter Notebook
|
UTF-8
|
Python
| false
| false
| 4,958
|
py
|
import random
import itertools
import os
import sys
abspath = os.path.abspath(__file__)
dname = os.path.dirname(abspath)
os.chdir(dname)
Pairs = ("A","C", "G", "T")
Codons = itertools.product(Pairs, repeat=3)
all_codons = ["".join(codon) for codon in Codons]
start = "ATG"
stop = ("TAA", "TAG", "TGA")
no_stops = [codon for codon in all_codons if codon not in stop]
'''
prepends a random A, G, C, or T for the input frame
e.g. prepends 1 character if in frame 2, and 2 if in frame 3
'''
def shift_frame(input_seq,frame = 2):
output = input_seq
if frame in (1,2,3):
for i in range(frame-1):
output.insert(0, random.choice(("A","G","C","T")))
return output
else:
raise ValueError("Frame Must Be 1, 2 or 3. Frame Entered: " +frame)
'''
Places random codons length times, and uses choices from no_stops if coding,
and all_codons if not coding
'''
def codon_placer(length, coding = True):
lno_stops = no_stops
lall_codons = all_codons
if coding == True:
return random.choices(lno_stops, k=length)
else:
return random.choices(lall_codons,k=length)
"""
returns random indexes a start and stop codon should be placed
arbitrarily chooses a variation within 1/3rd the 2nd and 4th Quintile
"""
def get_index_placement(total_codons):
quintile = total_codons // 5
variation = quintile // 3
start_placement = quintile + random.randint(-variation, variation)
stop_placement = (quintile*4) + random.randint(-variation, variation)
return start_placement, stop_placement
"""
Generates a random (hypothesized) coding or non-coding sequence of length characters in frame
length: Number of characters the sequence should be
coding: Whether or not the sequence should have stop codons placed, or randomly generated (True, False respectively)
frame: The frame the sequence should be in which determines how many bases are appended to the sequences start. Must be 1, 2 or 3
"""
def generate_seq(length, coding = False, frame = 1):
codons_to_place = (length//3) + 1
if coding and frame in (1,2,3):
start_index, stop_index = get_index_placement(codons_to_place)
UTR_5_len = start_index-1
orf_length = stop_index-start_index - 2
UTR_3_len = codons_to_place - stop_index + 1
UTR_5 = codon_placer(UTR_5_len, False)
sequence_orf = codon_placer(orf_length, True)
sequence_orf.insert(0, start)
sequence_orf.append(random.choice(stop))
UTR_3 = codon_placer(UTR_3_len, False)
UTR_5.extend(sequence_orf)
UTR_5.extend(UTR_3)
output = shift_frame(UTR_5, frame)
output = ''.join(output)
return output[0:length], coding, frame, (start_index, stop_index)
elif not coding and frame in (1,2,3):
placed_codons = codon_placer(codons_to_place, coding)
output = shift_frame(placed_codons, frame)
output = ''.join(output)
return (output[0:length] , coding, frame)
else:
raise ValueError("Frame must be 1, 2 or 3")
"""
#Todo Implement argparse
Lines (int) is the amount of sequences a file should contain
Coding (boolean) is the whether the sequence generated is mRNA(True) or lncRNA(False)
Frame(int) Can be 1 2 or 3, and is the frame the sequence should be generated/ read in
outputfile_name(string) The name of the outputfile. must end in .fasta to be compatible.
"""
def CLI_GEN(lines, coding, frame, outputfile_name):
lines = int(lines)
working = coding=="True"
frame = int(frame)
# The minimum number of bases in a sequence
#MIN = 800
# The Maximum number of bases in a sequence
#MAX = 1000
if working:
with open(outputfile_name, 'w') as file:
headerframe = f">GENPC{frame}"
fastaheader = [headerframe, ".1"]
for i in range(1, lines+1):
padded = f'{i:010}'
#sequence, target_coding, target_frame, placed_indices = generate_seq(random.randint(MIN, MAX+1), coding, frame)
sequence, target_coding, target_frame, placed_indices = generate_seq(1000, coding, frame)
file.write(f"{(padded).join(fastaheader)} Coding {target_coding} Frame {target_frame} Start_Index {placed_indices[0]} Stop_Index {placed_indices[1]}" +"\n")
file.write(sequence+"\n")
return True
else:
with open(outputfile_name, 'w') as file:
headerframe = f">GENNC{frame}"
fastaheader = [headerframe, ".1"]
for i in range(1, lines+1):
padded = f'{i:010}'
#sequence, target_coding, target_frame, placed_indices = generate_seq(random.randint(MIN, MAX+1), coding, frame)
sequence, target_coding, target_frame = generate_seq(1000, working, frame)
file.write(f"{(padded).join(fastaheader)} Coding {target_coding} Frame {target_frame}" +"\n")
file.write(sequence+"\n")
return False
CLI_GEN(sys.argv[1], sys.argv[2], sys.argv[3], sys.argv[4])
|
[
"noreply@github.com"
] |
ShepherdCode.noreply@github.com
|
196eb4a993e9d8ac55c36d502bfb5a5a2afa0f7b
|
4372fbc652b0fd3e02030047ccd8a8011d9b4820
|
/OC20_examples/train_model_gpu_lmdb_multigpu.py
|
1405885dad22b8cead05f06814aa9db14c27a1c2
|
[] |
no_license
|
ray38/GMP_AmpTorch_Tests
|
1fd03f0f6ad629f41a7eb5d30fda719db38d979c
|
09e5bdcb24cf495663b8b56c58b5d6f4217273d6
|
refs/heads/main
| 2023-04-05T00:42:38.056673
| 2021-04-08T02:49:00
| 2021-04-08T02:49:00
| 335,775,441
| 0
| 2
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,263
|
py
|
import numpy as np
import random
import torch
from ase import Atoms
from ase.calculators.emt import EMT
from amptorch.ase_utils import AMPtorch
from amptorch.trainer import AtomsTrainer
import sys
import os
trail_num = sys.argv[1]
checkpoint_name = sys.argv[2]
sigmas_index = int(sys.argv[3])
MCSHs_index = int(sys.argv[4])
num_nodes = int(sys.argv[5])
num_layers = int(sys.argv[6])
batch_size = int(sys.argv[7])
num_data = int(sys.argv[8])
num_max_dataset = int(num_data / 5000)
lr = float(sys.argv[9])
epochs = int(sys.argv[10])
n_gpu = int(sys.argv[11])
num_val = 50000
num_val_max_dataset = int(num_val / 5000)
#val_split = float(sys.argv[11])
val_split = num_val / (num_data + num_val)
print("total num data: {}".format(num_data))
print("batch size: {}".format(batch_size))
print("learning rate: {}".format(lr))
print("num epochs: {}".format(epochs))
print("val split: {}".format(val_split))
folder_name = "trial_{}".format(trail_num)
os.chdir(folder_name)
lmdb_paths = ["./lmdbs_sigma{}_MCSH{}/{}.lmdb".format(sigmas_index,MCSHs_index,i) for i in range(num_max_dataset)]
random.Random(1).shuffle(lmdb_paths)
lmdb_paths += ["./lmdbs_sigma{}_MCSH{}/val_{}.lmdb".format(sigmas_index,MCSHs_index,i) for i in range(num_val_max_dataset)]
config = {
"model": {
"name":"singlenn",
"get_forces": False,
"num_layers": num_layers,
"num_nodes": num_nodes,
"batchnorm": True
},
"optim": {
"gpus":n_gpu,
"force_coefficient": 0.0,
"lr": lr,
"batch_size": batch_size,
"epochs":epochs,
"loss": "mae",
},
"dataset": {
"lmdb_path": lmdb_paths,
"val_split": val_split,
"val_split_mode": "inorder",
"cache": "full"
},
"cmd": {
"debug": False,
"run_dir": "./",
"seed": 1,
"identifier": "test",
"verbose": True,
# Weights and Biases used for logging - an account(free) is required
"logger": False,
},
}
trainer = AtomsTrainer(config)
if os.path.isdir(checkpoint_name):
trainer.load_pretrained(checkpoint_name)
else:
print("**** WARNING: checkpoint not found: {} ****".format(checkpoint_name))
print("training")
trainer.train()
print("end training")
|
[
"xlei38@atl1-1-01-013-6-l.pace.gatech.edu"
] |
xlei38@atl1-1-01-013-6-l.pace.gatech.edu
|
8ea888b7bd7e7d85aea266699789690004b57144
|
331ed33890f103ce95318abe0d4bd255929e8f4d
|
/source/dictclass.py
|
5067800d7b853bc1e52365b77ca385d695bd28da
|
[] |
no_license
|
manon2012/e
|
fa15ce55a72fa6ee20f10d06e9f670ade207209a
|
c20a345e96ccd702b56a802e2efbd924f1cd808d
|
refs/heads/master
| 2021-01-22T19:25:41.688876
| 2018-09-20T09:59:29
| 2018-09-20T09:59:29
| 102,418,775
| 0
| 0
| null | 2017-09-22T08:21:04
| 2017-09-05T01:30:02
|
Python
|
UTF-8
|
Python
| false
| false
| 1,236
|
py
|
#! /usr/bin/env python
#coding=utf-8
"""
二:定义一个字典类:dictclass。完成下面的功能:
dict = dictclass({你需要操作的字典对象})
1 删除某个key
del_dict(key)
2 判断某个键是否在字典里,如果在返回键对应的值,不存在则返回"not found"
get_dict(key)
3 返回键组成的列表:返回类型;(list)
get_key()
4 合并字典,并且返回合并后字典的values组成的列表。返回类型:(list)
update_dict({要合并的字典})
"""
class dictClass(object):
def __init__(self,dict):
self.dict=dict
def del_dict(self,key):
del self.dict[key]
def get_dict(self,key):
if self.dict.has_key(key):
print self.dict[key]
else:
print "not found"
def get_key(self):
print self.dict.keys()
def get_value(self):
print self.dict.values()
def update_dict(self,newdict):
return self.dict.update(newdict)
a=dictClass({'qq':22,'yy':44})
print a
a.get_dict('qq')
a.get_key()
a.get_value()
a.del_dict('qq')
a.get_key()
#newdict={'aa':11}
b=a.update_dict({'aa':11})
print b
|
[
"manon2012@126.com"
] |
manon2012@126.com
|
944516153eb6d12d53a047eb79850cefe503b1d4
|
ff97897f8fe5e9e76c634b8fbe85757648aa7b44
|
/source/splash.py
|
023c4cc9ae5fec65701fcfb5de35eb39d6e60743
|
[] |
no_license
|
MerlinSmiles/lithocontrol
|
af3cef8d0a03099b6268f47fc1f9377a95a5fd2f
|
6811a369c65635ebf68d7f858c06d6d5a63b44fc
|
refs/heads/master
| 2021-01-19T01:25:57.072049
| 2016-02-23T23:14:03
| 2016-02-23T23:14:15
| 39,294,289
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,416
|
py
|
from PyQt4.QtCore import *
from PyQt4.QtGui import *
import time
class Form(QDialog):
""" Just a simple dialog with a couple of widgets
"""
def __init__(self, parent=None):
super(Form, self).__init__(parent)
self.browser = QTextBrowser()
self.setWindowTitle('Just a dialog')
self.lineedit = QLineEdit("Write something and press Enter")
self.lineedit.selectAll()
layout = QVBoxLayout()
layout.addWidget(self.browser)
layout.addWidget(self.lineedit)
self.setLayout(layout)
self.lineedit.setFocus()
self.connect(self.lineedit, SIGNAL("returnPressed()"),
self.update_ui)
def update_ui(self):
self.browser.append(self.lineedit.text())
if __name__ == "__main__":
import sys, time
app = QApplication(sys.argv)
# Create and display the splash screen
splash_pix = QPixmap(r'./splash2.png')
splash = QSplashScreen(splash_pix, Qt.WindowStaysOnTopHint)
# adding progress bar
progressBar = QProgressBar(splash)
splash.setMask(splash_pix.mask())
splash.show()
for i in range(0, 100):
progressBar.setValue(i)
t = time.time()
while time.time() < t + 0.1:
app.processEvents()
# Simulate something that takes time
time.sleep(2)
form = Form()
form.show()
splash.finish(form)
app.exec_()
|
[
"merlin@nbi.dk"
] |
merlin@nbi.dk
|
d9bee16f4a5ff9ebecbac9ccd798271b14dc937c
|
9e6aaf9104cf3b77d2a03c676db7bc5574632837
|
/Assignments/inheritance_example.py
|
a6a03a060a279eec4d038182d81e36abbedec2b6
|
[] |
no_license
|
shaudeus/Python
|
b1ba840a890754daa8f8aaa5a645871f2dd2b3da
|
b9f1de2de81a51264b9587ac1b23c668616fe043
|
refs/heads/master
| 2020-04-19T08:26:47.887902
| 2019-01-29T02:49:29
| 2019-01-29T02:49:29
| 168,072,195
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 740
|
py
|
# Inheritance Example in OOP
# Programmer: Rashaun Forrest
# Date: 06/02/2016
# Purpose: Show how classes can inherit data from other classes
# Create a Parent Class called Date
class Date(object): # Inherit from object class built into python
def get_date(self):
return "2016-06-02"
# Create a child class of Date called Time
class Time(Date): # Inherit from Date class
def get_time(self):
return "06:30:06"
dt = Date() # Date obkect created from Date class
print(dt.get_date())
tm = Time() # Time object created from Time class
print(tm.get_time()) # Initiate attribute lookup from instance
print(tm.get_date()) # got this method from inherited class Date from Time object
|
[
"noreply@github.com"
] |
shaudeus.noreply@github.com
|
d4107327d578e5128c43327bd1ec324e5ef91ab6
|
a7ef015bd3f110014a40e1e385b33c6f6c17f4ce
|
/Spatial_Regress-R1.py
|
f470d435a40da7edbaf4a365771e86db31d0d7c8
|
[] |
no_license
|
jfhawkin/GTA-Real-Estate
|
ddc8c95893536e0fbf737bfbbce90e303871066d
|
47b16b52ff58d5e2eddda0a804bac1db9490eb4e
|
refs/heads/master
| 2020-09-01T02:23:45.067252
| 2017-06-15T03:45:22
| 2017-06-15T03:45:22
| 94,396,189
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 6,649
|
py
|
# -*- coding: utf-8 -*-
"""
Created on Thu Jun 1 22:18:30 2017
@author: jason
"""
import numpy as np
import pysal as ps
import pandas as pd
import matplotlib.pyplot as plt
import collections
TYPE_TUPLE = [('C','Condo'), ('H','House'), ('T','Townhouse')]
TYPE_DICT = collections.OrderedDict(TYPE_TUPLE)
# Prices measured in $10,000, price per sqft measured in $/sqft, and income measured in $10,000
# Population density measured in persons per sqkm
X_LISTS = [['AveIncome','PActive','SQFT','EmpCode','DistCBD', 'PopDens', 'SubwayAdj','StreetCarAdj','GoTrainAdj','SW','NW','NE'],['AveIncomeC','PActive','SQFT','EmpCode','DistCBD', 'PopDens', 'SubwayAdj','StreetCarAdj','GoTrainAdj','SW','NW','NE']]
Y_LIST = ['AvePrice','RelAvePrice','LnAvePrice','AvePriceSQFT']
# Read-in each of the input data files
# All files are sorted in increasing alphabetic by TREB zone
regIn = pd.read_csv('./regressInputs.csv')
shp = ps.open('./TREB-Zones-Data.shp')
# Spatial weights using a Rook-based contiguity method
w = ps.weights.Rook.from_shapefile('./TREB-Zones-Data.shp')
w.transform = 'r'
ymList = regIn['YearMonth'].unique()
with open("Model_Results.txt", "w") as rst_file, open("Beta_Results.txt", "w") as beta_file:
for i, yVar in enumerate(Y_LIST):
for j, xList in enumerate(X_LISTS):
seriesT = [('C',[]),('H',[]),('T',[])]
seriesT = collections.OrderedDict(seriesT)
seriesD = []
for k,v in TYPE_DICT.items():
series = {}
sD = []
for ym in ymList:
yrTxt = str(ym)[:4]
data = regIn[(regIn['Type']==k)&(regIn['YearMonth']==ym)]
y_name = "PRICE"
x = data[xList].as_matrix()
y = data[yVar].as_matrix()
y = y[:,np.newaxis]
mllag = ps.spreg.ml_lag.ML_Lag(y,x,w,name_y=y_name, name_x=xList)
sD.append(mllag.betas[5])
for b in range(0,len(xList)):
if mllag.z_stat[b+1][1]<=0.05:
tmp = series.get(b,[[],[]])
tmp[0].append(yrTxt)
tmp[1].extend(mllag.betas[b+1])
series[b] = tmp
# Write the results of the regression to a text file for each
# pair of dependent and independent variables, for each structure
# type and year/month pair
rst_file.write("{0}_{1}_{2}_{3}".format(str(i),str(j),k,str(ym)))
rst_file.write(mllag.summary)
rst_file.write("\n")
seriesT[k] = series
seriesD.append(sD)
for b,xVar in enumerate(xList):
fig, ax = plt.subplots()
plt.tight_layout()
numC = 0
numH = 0
numT = 0
if b in seriesT['C'].keys():
numC = str(len(seriesT['C'][b][0]))
ax.plot(seriesT['C'][b][0],seriesT['C'][b][1],'ro',label='Condos N={0}'.format(numC))
ax.set_xlim([1995,2020])
beta_file.write("{0}_{1}_C".format(str(i),str(j)))
x = np.array(seriesT['C'][b][1])
x = x[:,np.newaxis]
y = np.array([int(w) for w in seriesT['C'][b][0]])
y = y[:,np.newaxis]
if x.shape[0]==x.shape[1] or y[0]==y[1]:
beta_file.write("Nothing")
else:
ols = ps.spreg.OLS(x, y)
beta_file.write(ols.summary)
beta_file.write("\n")
if b in seriesT['H'].keys():
numH = str(len(seriesT['H'][b][0]))
ax.plot(seriesT['H'][b][0],seriesT['H'][b][1],'bo',label='Houses N={0}'.format(numH))
ax.set_xlim([1995,2020])
beta_file.write("{0}_{1}_H".format(str(i),str(j)))
x = np.array(seriesT['H'][b][1])
x = x[:,np.newaxis]
y = np.array([int(w) for w in seriesT['H'][b][0]])
y = y[:,np.newaxis]
if x.shape[0]==x.shape[1] or y[0]==y[1]:
beta_file.write("Nothing")
else:
ols = ps.spreg.OLS(x, y)
beta_file.write(ols.summary)
beta_file.write("\n")
if b in seriesT['T'].keys():
numT = str(len(seriesT['T'][b][0]))
ax.plot(seriesT['T'][b][0],seriesT['T'][b][1],'go',label='Townhouses N={0}'.format(numT))
ax.set_xlim([1995,2020])
beta_file.write("{0}_{1}_T".format(str(i),str(j)))
x = np.array(seriesT['T'][b][1])
x = x[:,np.newaxis]
y = np.array([int(w) for w in seriesT['T'][b][0]])
y = y[:,np.newaxis]
# If there is only one entry then kluge it!
if x.shape[0]==x.shape[1] or y[0]==y[1]:
beta_file.write("Nothing")
else:
ols = ps.spreg.OLS(x, y)
beta_file.write(ols.summary)
beta_file.write("\n")
ax.legend(loc='upper right')
ax.set_xlabel('Year')
ax.set_ylabel('{0} vs {1}'.format(yVar,xVar))
plt.savefig('{0}_{1}.png'.format(yVar,xVar),bbox_inches='tight')
fig, ax = plt.subplots()
plt.tight_layout()
yrs = regIn['Year'].unique()
yrs = np.repeat(yrs,2)[:-1]
N = str(len(yrs))
ax.plot(yrs,seriesD[0],'ro',label='Condos N={0}'.format(N))
ax.set_xlim([1995,2020])
ax.plot(yrs,seriesD[1],'bo',label='Houses N={0}'.format(N))
ax.set_xlim([1995,2020])
ax.plot(yrs,seriesD[2],'go',label='Townhouses N={0}'.format(N))
ax.set_xlim([1995,2020])
ax.legend(loc='upper right')
ax.set_xlabel('Year')
ax.set_ylabel('{0} vs Dist'.format(yVar))
plt.savefig('{0}_DIST_Years.png'.format(yVar),bbox_inches='tight')
rst_file.close()
beta_file.close()
|
[
"noreply@github.com"
] |
jfhawkin.noreply@github.com
|
cd5047c96c8fedd9c5fc735dc66efb1118b7f87e
|
3fe4e0c5024e6afafde5f961e2e2df0e1aaa2016
|
/app/setting.py
|
6da18e31b4048c420be055f6ea4b004c5c1f6068
|
[] |
no_license
|
6148694/fisher
|
b2c383575ebec616b4c5d5058be140d4eecc713b
|
96e1a398736280cef27ff621d2a31921c7090fe4
|
refs/heads/main
| 2023-06-05T16:06:46.730631
| 2021-06-22T10:50:54
| 2021-06-22T10:50:54
| 378,862,008
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 138
|
py
|
DEBUG=True
#存不重要的东西
PER_PAGE = 15
name ="wanglei"
print("我是{}".format(name))
print(f"我是{'32432434'}{'wqewqeqw'}")
|
[
"wanglei@ezijing.com"
] |
wanglei@ezijing.com
|
22fc1171ca94edcbef0ae470b2eb05b429992ce9
|
2ae8d5cfa720054e4052b4f5c33e9b66febf4b66
|
/src/nlp/nlpengine.py
|
ad72c29e86c49f56afc77040da77cc45ecd3f64d
|
[] |
no_license
|
luffy1937/pynlg
|
7bf4b5ab0d299e9e5cf6a8197e2663011dac63c6
|
3c6e37d0cdb226b2d944cd6db4f1a516e62a3cc9
|
refs/heads/master
| 2021-01-12T13:06:37.892348
| 2017-01-02T09:09:40
| 2017-01-02T09:09:40
| 69,409,676
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 7,036
|
py
|
#coding=utf-8
#读一个文件,返回一个列表,文件中每行对应列表的一个元素
import codecs
import math
import jieba
import jieba.posseg as pseg
def ngram(seqtogram,n):
"""
@param seqtogram:
@param n:
@return:
"""
seqlen=len(seqtogram)
if seqlen<n:
return []
else :
return [zip(seqtogram[:seqlen-n+2],seqtogram[seqlen-1])]
def readline(encode,filename):
fr=codecs.open(filename,'r',encode)
returnseq=[]
for line in fr:
returnseq.append(line[:-1])#最后一个字符是"\n"
fr.close()
return returnseq
#分词及词性标注,参数是句子(或者短语)的列表,返回jieba对每一句话的分词结果序列
def cut(seq):
jieba.initialize()
returnseq=[pseg.cut(item) for item in seq]
return returnseq
#读取停用词文件,返回停用词列表
def readstopwords():
fr=codecs.open('..\data\stopwords.txt','r','u8')
stopwords=[]
for line in fr:
stopwords.append(line[:-1])
fr.close()
return stopwords
def saveCutResultWithoutStopword(possegcutseq):
"""
遗留的方法
将jieba的分词及词性标注结果(迭代器的形式),存起来并且用了词性过滤以及通用词过滤
目前看来这种方法不好
过滤应该在形成词组后再过滤,否则会破坏词间的关系(原来停用词隔开的词)
@param possegcutseq: jieba分词方法返回的迭代器
@return:
"""
returnseq=[]
stopword=readstopwords()
stopflag=['x','uj','u','f','p','c','y']
# "stopwords num:",len(stopword),repr(stopword[0]),repr(stopword[1100])
for j in possegcutseq:
tempseq=[]
for i in j:
if i.word not in stopword and i.flag not in stopflag:
tempseq.append((i.word,i.flag))
if len(tempseq)!=0:
returnseq.append(tempseq)
#returnseq=[[(i.word,i.flag) for i in j if i.word not in stopword] for j in possegcutseq]
# print returnseq[1: 10]
return returnseq
def saveCutResult(possegcutseq):
"""
遍历jieba的分词结果,并存起来
@param possegcutseq:
@return:
"""
returnseq=[]
for j in possegcutseq:
tempseq=[(i.word,i.flag) for i in j]
if len(tempseq)!=0:
returnseq.append(tempseq)
return returnseq
#根据jieba返回的分词结果序列。将所有词的词性组合,组成一个列表返回(需要修改为以saveCutResult返回值为参数)
def cixingrules(cutseq):
returnseq=[]
for item in cutseq:
cixing=""
for w in item:
cixing=cixing+'/'+w.flag
returnseq.append(cixing)
return returnseq
#根据jieba返回的分词结果序列。将所有的分词结果(word,flag)组成一个列表返回
def allWordWithFlag(possegcutseq):
returnseq=[]
for p in possegcutseq:
for item in p:
returnseq.append(item)
return returnseq
#将字典变为二元组链表的形式(为了排序)
def dict2seq(d):
returnseq=[]
for item in d:
returnseq.append((item,d[item]))
return returnseq
#从列表s中取前n个元素,组成字典返回
def seq2dict(s,n):
returndict={}
for cixing,num in s[1:n+1]:
returndict[cixing]=num
return returndict
import nltk
#根据jieba对句子列表的词性标注结果(也是一个列表),返回所有可能的二元组合(bigrams)
def bigramsWithFlag(possegcutseq):
returnseq=[]
for j in possegcutseq:
# returnseq+=ngram(j,2)
returnseq+=nltk.bigrams(j)
return returnseq
#返回可能的三元组合
def trigramsWithFlag(possegcutseq):
returnseq=[]
for j in possegcutseq:
# returnseq+=ngram(j,3)
returnseq+=nltk.trigrams(j)
return returnseq
#返回n元组合
def ngramsWithFlag(possegcutseq,n):
returnseq=[]
for j in possegcutseq:
# returnseq+=nltk.ngrams(j,n)
returnseq+=ngram(j,n)
return returnseq
#返回二元组合(不包括Flag)
def bigrams(jiebaseq):
returnseq=[]
for j in jiebaseq:
tempseq=[i.word for i in j]
returnseq+=nltk.bigrams(tempseq)
return returnseq
#计算互信息 2-gram Mutual Information(以每篇为单位计算MI和词频)
def mi2(allwordseq,gram2seq):
returnMIseq=[]
fd=nltk.FreqDist(gram2seq)
gram2set=set(gram2seq)
fw1=0
fw2=0
for g2 in gram2set:
fw1=allwordseq.count(g2[0])
fw2=allwordseq.count(g2[1])
returnMIseq.append((g2,float(fd[g2])/(fw1+fw2-fd[g2]),fd[g2]))
return returnMIseq
def mi2_2(allwordseq,gram2seq):
"""
互信息
@param allwordseq:
@param gram2seq:
@return:
"""
returnMIseq=[]
gram2set=set(gram2seq)
fw1=0
fw2=0
fw12=0
allwordslen=len(allwordseq)
gram2len=len(gram2seq)
for g2 in gram2set:
fw1=float(allwordseq.count(g2[0]))/ allwordslen
# print allwordseq.count(g2[0]),len(allwordseq),fw1
fw2=float(allwordseq.count(g2[1]))/ allwordslen
#print allwordseq.count(g2[1]),len(allwordseq),fw2
fw12=float(gram2seq.count(g2))/gram2len
# print fd[g2],len(gram2seq),fw12
#print g2[0][0],g2[1][0],allwordseq.count(g2[0]),allwordseq.count(g2[1]),fd[g2],len(allwordseq),len(gram2seq),math.log(fw12/(fw1*fw2),2)
returnMIseq.append((g2,math.log(fw12/(fw1*fw2),2),gram2seq.count(g2)))
return returnMIseq
def gram2entropy(mi2_2_return,trigramsWithFlag_return,bigramsWithFlag_return):
returnseq=[]
cfdright=nltk.ConditionalFreqDist([((item[0],item[1]),item[2]) for item in trigramsWithFlag_return])
cfdleft=nltk.ConditionalFreqDist([((item[1],item[2]),item[0]) for item in trigramsWithFlag_return])
for item in mi2_2_return:
leftEntropy=0.0
rightEntropy=0.0
fdright=cfdright[item[0]]
fdleft=cfdleft[item[0]]
pitem=bigramsWithFlag_return.count(item[0])
#print pitem
if fdright.N()!=0:
p=0
for f in fdright:
p=float(fdright[f])/pitem
rightEntropy-=p*math.log(p,2)
if fdleft.N()!=0:
p=0
for f in fdleft:
p=float(fdleft[f])/pitem
leftEntropy-=p*math.log(p,2)
#print leftEntropy,rightEntropy
returnseq.append((item[0],item[1],item[2],leftEntropy,rightEntropy))
return returnseq
#根据processAllTxtTxt_return[0],返回每个二元组的DF [((word,flag),(word,flag)):num,((word,flag),(word,flag)):num...]
def documentGram2FreqDist(processAllTxtTxt_return_0):
tempseq=[]#[set(文档gram2),set(文档gram2)...]
for d in processAllTxtTxt_return_0:
tempseq+=list(set(bigramsWithFlag(d)))
return nltk.FreqDist(tempseq)
#计算DF
def documentFreq(gram2entropy_return,documentGram2Freq_return):
returnseq=[]
for g in gram2entropy_return:
returnseq.append((g[0],g[1],g[2],g[3],g[4],documentGram2Freq_return[g[0]]))
return returnseq
|
[
"yuefeng_liu@foxmail.com"
] |
yuefeng_liu@foxmail.com
|
967ea41fda5c0d8f6945f03d291055136abe72d7
|
55aecec1c92df04b79c22f20fe6b69a2231e382f
|
/PythonImproving/udp发送信息.py
|
49b8192de2c9841ac3a736e868f95c9c7575e37c
|
[] |
no_license
|
lirryyes/filesystem
|
6b3e4023c71d0129a6d9785645bad74767695672
|
c2fa5a3ad2df345566a8c278f10eb725f6ed41df
|
refs/heads/master
| 2021-07-08T23:13:01.959878
| 2020-12-17T00:53:19
| 2020-12-17T00:53:19
| 219,093,494
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 593
|
py
|
import socket
def main():
#创建一个udp套接字
udp_socket = socket.socket(socket.AF_INET,socket.SOCK_DGRAM)
# 从键盘获取数据,利用while 循环发送
while True:
nei = input("请输入内容:")
#如果输入内容为exit 则退出循环
if nei == "exit":
break
#套接字发送数据,发送数据没有指定端口,操作系统会随机给程序随机分配一个端口(动态端口)
udp_socket.sendto(nei.encode("utf-8"),("10.1.163.190",8989))
udp_socket.close()
if __name__ == "__main__":
main()
|
[
"liuchun@liuchundeMacBook-Pro.local"
] |
liuchun@liuchundeMacBook-Pro.local
|
5218f807eadb8df4f7ef4ea70a4633c047e45513
|
7cd70cc97f0ca800a088efc15e81ca0cc0e6944c
|
/lists/views.py
|
4b5b25962015af3e08c711d2214ae08cdc3d00d0
|
[] |
no_license
|
odraude1022/test-driven_development_with_python
|
64856df28109ff72e729da9040636db220637e13
|
9e55238238dca7c6519a6770a0244926db0ef5da
|
refs/heads/master
| 2021-06-18T14:01:52.190970
| 2019-12-05T15:29:39
| 2019-12-05T15:29:39
| 216,088,073
| 0
| 0
| null | 2021-06-10T23:39:24
| 2019-10-18T18:50:17
|
JavaScript
|
UTF-8
|
Python
| false
| false
| 931
|
py
|
from django.core.exceptions import ValidationError
from django.http import HttpResponse
from django.shortcuts import redirect, render
from lists.forms import ExistingListItemForm, ItemForm
from lists.models import Item, List
# Create your views here.
def home_page(request):
return render(request, 'home.html', {'form': ItemForm()})
def new_list(request):
form = ItemForm(data=request.POST)
if form.is_valid():
list_ = List.objects.create()
form.save(for_list=list_)
return redirect(list_)
else:
return render(request, 'home.html', {"form": form})
def view_list(request, list_id):
list_ = List.objects.get(id=list_id)
form = ExistingListItemForm(for_list=list_)
if request.method == 'POST':
form = ExistingListItemForm(for_list=list_, data=request.POST)
if form.is_valid():
form.save()
return redirect(list_)
return render(request, 'list.html', {'list': list_, "form": form})
|
[
"eriglesias@ufl.edu"
] |
eriglesias@ufl.edu
|
564a9d8f064da37385a85f4b7775a6d6e28d47de
|
062f41448db9e335d49bb333f83235b6ec2d850c
|
/test/functional/feature_logging.py
|
56aec6fb97abbdc2c413987b94d8233eb2bc62cf
|
[
"MIT"
] |
permissive
|
arcana-coin/arcana-core
|
2496d593e2e0a2a0bdcc57d57fec095630f97a99
|
b5d6d71968d1f19c42dc3f351aff17800da5af36
|
refs/heads/master
| 2021-05-11T02:21:38.815732
| 2018-01-22T00:52:28
| 2018-01-22T00:52:28
| 118,348,245
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,462
|
py
|
#!/usr/bin/env python3
# Copyright (c) 2017 The Bytcoyn Core Developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test debug logging."""
import os
from test_framework.test_framework import BitcoinTestFramework
class LoggingTest(BitcoinTestFramework):
def set_test_params(self):
self.num_nodes = 1
self.setup_clean_chain = True
def run_test(self):
# test default log file name
assert os.path.isfile(os.path.join(self.nodes[0].datadir, "regtest", "debug.log"))
# test alternative log file name in datadir
self.restart_node(0, ["-debuglogfile=foo.log"])
assert os.path.isfile(os.path.join(self.nodes[0].datadir, "regtest", "foo.log"))
# test alternative log file name outside datadir
tempname = os.path.join(self.options.tmpdir, "foo.log")
self.restart_node(0, ["-debuglogfile=%s" % tempname])
assert os.path.isfile(tempname)
# check that invalid log (relative) will cause error
invdir = os.path.join(self.nodes[0].datadir, "regtest", "foo")
invalidname = os.path.join("foo", "foo.log")
self.stop_node(0)
self.assert_start_raises_init_error(0, ["-debuglogfile=%s" % (invalidname)],
"Error: Could not open debug log file")
assert not os.path.isfile(os.path.join(invdir, "foo.log"))
# check that invalid log (relative) works after path exists
self.stop_node(0)
os.mkdir(invdir)
self.start_node(0, ["-debuglogfile=%s" % (invalidname)])
assert os.path.isfile(os.path.join(invdir, "foo.log"))
# check that invalid log (absolute) will cause error
self.stop_node(0)
invdir = os.path.join(self.options.tmpdir, "foo")
invalidname = os.path.join(invdir, "foo.log")
self.assert_start_raises_init_error(0, ["-debuglogfile=%s" % invalidname],
"Error: Could not open debug log file")
assert not os.path.isfile(os.path.join(invdir, "foo.log"))
# check that invalid log (absolute) works after path exists
self.stop_node(0)
os.mkdir(invdir)
self.start_node(0, ["-debuglogfile=%s" % (invalidname)])
assert os.path.isfile(os.path.join(invdir, "foo.log"))
if __name__ == '__main__':
LoggingTest().main()
|
[
"arcanacoin@protonmail.com"
] |
arcanacoin@protonmail.com
|
3a976d251070aeaee63151823702bf929f9487a0
|
01eede62351f9a853d208cfcaa893ad014a30b0c
|
/firmware/examples/i2cslave-test.py
|
72dcc72e92c26e8f6ec0b9b1338487fa75cb0a37
|
[] |
no_license
|
sarfata/shirtty-addon
|
33dea6b1972e647e70f3fe6287123e65a39f4bce
|
0070cce90ec879dc2f8234b372307b2d5e51bf68
|
refs/heads/master
| 2020-08-22T20:19:54.407291
| 2020-04-24T19:28:18
| 2020-04-24T19:28:18
| 216,470,964
| 7
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,395
|
py
|
import board
from i2cslave import I2CSlave
regs = [0] * 16
index = 0
i2c.writeto(0x40, bytes([0x05]), stop=False)
with I2CSlave(board.SCL, board.SDA, (0x40, 0x41)) as slave:
print("Waiting for i2c command")
while True:
r = slave.request()
if not r:
# Maybe do some housekeeping
continue
with r: # Closes the transfer if necessary by sending a NACK or feeding the master dummy bytes
print("got request! {}".format(repr(r)))
if r.address == 0x40:
if not r.is_read: # Master write which is Slave read
b = r.read(1)
if not b or b[0] > 15:
break
index = b[0]
b = r.read(1)
if b:
regs[index] = b[0]
elif r.is_restart: # Combined transfer: This is the Master read message
n = r.write(bytes([regs[index]]))
#else:
# A read transfer is not supported in this example
# If the Master tries, it will get 0xff byte(s) by the ctx manager (r.close())
elif r.address == 0x41:
if not r.is_read:
b = r.read(1)
if b and b[0] == 0xde:
# do something
pass
|
[
"thomas@sarlandie.net"
] |
thomas@sarlandie.net
|
2d8079a8ec8568c6f6f7bc940b08a9fce56854ce
|
5a5dc72c579e484ea0292e33edf6a9dc9d788817
|
/Chess_Pieces/pieces.py
|
ff416a43f6fb7506d4aefea3483df24ef2208f32
|
[] |
no_license
|
kirtan517/Chess-AI
|
b5ecd26215c9a5c0eae9a36f84a75cdc985d3680
|
1bb7dd74a37d17dd0f3e0db2751fd61761890cf0
|
refs/heads/master
| 2023-04-06T22:53:59.681183
| 2021-04-19T05:37:58
| 2021-04-19T05:37:58
| 359,645,998
| 2
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,985
|
py
|
# from abc import ABC,abstractmethod
import pygame
import os
import sys
PATH_CHANGE = r"C:\Users\mohit\Documents\pygame Projects\CHESS_AI"
sys.path.append(PATH_CHANGE)
from Board import board, pos
board(800)
class piece:
def __init__(self, img_dir):
self.img = pygame.image.load(img_dir)
self.width = self.img.get_width()
self.height = self.img.get_height()
def draw(self, win, x, y):
x = x - self.width // 2
y = y - self.height // 2
win.blit(self.img, (x, y))
def erase():
pass
class King(piece):
def __init__(self, x, y, image):
super().__init__(image)
self.x = x
self.y = y
def draw(self, win):
super().draw(win, self.x, self.y)
def move(newx, newy):
self.x = newx
self.y = newy
def name():
return "king"
class Queen(piece):
def __init__(self, x, y, image):
super().__init__(image)
self.x = x
self.y = y
def draw(self):
super().draw(self.x, self.y)
def move(newx, newy):
self.x = newx
self.y = newy
def name():
return "queen"
class Rook(piece):
def __init__(self, x, y, image):
super().__init__(image)
self.x = x
self.y = y
def draw(self):
super().draw(self.x, self.y)
def move(newx, newy):
self.x = newx
self.y = newy
def name():
return "rook"
class Bishop(piece):
def __init__(self, x, y, image):
super().__init__(image)
self.x = x
self.y = y
def draw(self):
super().draw(self.x, self.y)
def move(newx, newy):
self.x = newx
self.y = newy
def name():
return "bishop"
class Knight(piece):
def __init__(self, x, y, image):
super().__init__(image)
self.x = x
self.y = y
def draw(self):
super().draw(self.x, self.y)
def move(newx, newy):
self.x = newx
self.y = newy
def name():
return "knight"
class Pawn(piece):
def __init__(self, x, y, image):
super().__init__(image)
self.x = x
self.y = y
def draw(self):
super().draw(self.x, self.y)
def move(newx, newy):
self.x = newx
self.y = newy
def name():
return "pawn"
class Black:
path = PATH_CHANGE+"\Images"
king_img = os.path.join(path, "black_king.png")
queen_img = os.path.join(path, "black_queen.png")
rook_img = os.path.join(path, "black_rook.png")
bishop_img = os.path.join(path, "black_bishop.png")
knight_img = os.path.join(path, "black_knight.png")
pawn_img = os.path.join(path, "black_pawn.png")
def __init__(self):
self.list = {
pos["e8"]: King(pos["e8"][0], pos["e8"][1], self.king_img),
pos["d8"]: King(pos["d8"][0], pos["d8"][1], self.queen_img),
pos["c8"]: King(pos["c8"][0], pos["c8"][1], self.bishop_img),
pos["f8"]: King(pos["f8"][0], pos["f8"][1], self.bishop_img),
pos["b8"]: King(pos["b8"][0], pos["b8"][1], self.knight_img),
pos["g8"]: King(pos["g8"][0], pos["g8"][1], self.knight_img),
pos["a8"]: King(pos["a8"][0], pos["a8"][1], self.rook_img),
pos["h8"]: King(pos["h8"][0], pos["h8"][1], self.pawn_img),
pos["a7"]: King(pos["a7"][0], pos["a7"][1], self.pawn_img),
pos["b7"]: King(pos["b7"][0], pos["b7"][1], self.pawn_img),
pos["c7"]: King(pos["c7"][0], pos["c7"][1], self.pawn_img),
pos["d7"]: King(pos["d7"][0], pos["d7"][1], self.pawn_img),
pos["e7"]: King(pos["e7"][0], pos["e7"][1], self.pawn_img),
pos["f7"]: King(pos["f7"][0], pos["f7"][1], self.pawn_img),
pos["g7"]: King(pos["g7"][0], pos["g7"][1], self.pawn_img),
pos["h7"]: King(pos["h7"][0], pos["h7"][1], self.pawn_img),
}
self.name_list = []
def getpositin(self, xold, yold, xnew, ynew):
self.list[(xold, yold)], move()
def draw_pieces(self, win):
for key, value in self.list.items():
value.draw(win)
class White:
path = PATH_CHANGE+"\Images"
king_img = os.path.join(path, "white_king.png")
queen_img = os.path.join(path, "white_queen.png")
rook_img = os.path.join(path, "white_rook.png")
bishop_img = os.path.join(path, "white_bishop.png")
knight_img = os.path.join(path, "white_knight.png")
pawn_img = os.path.join(path, "white_pawn.png")
def __init__(self):
self.list = {
pos["e1"]: King(pos["e1"][0], pos["e1"][1], self.king_img),
pos["d1"]: King(pos["d1"][0], pos["d1"][1], self.queen_img),
pos["c1"]: King(pos["c1"][0], pos["c1"][1], self.bishop_img),
pos["f1"]: King(pos["f1"][0], pos["f1"][1], self.bishop_img),
pos["b1"]: King(pos["b1"][0], pos["b1"][1], self.knight_img),
pos["g1"]: King(pos["g1"][0], pos["g1"][1], self.knight_img),
pos["a1"]: King(pos["a1"][0], pos["a1"][1], self.rook_img),
pos["h1"]: King(pos["h1"][0], pos["h1"][1], self.pawn_img),
pos["a2"]: King(pos["a2"][0], pos["a2"][1], self.pawn_img),
pos["b2"]: King(pos["b2"][0], pos["b2"][1], self.pawn_img),
pos["c2"]: King(pos["c2"][0], pos["c2"][1], self.pawn_img),
pos["d2"]: King(pos["d2"][0], pos["d2"][1], self.pawn_img),
pos["e2"]: King(pos["e2"][0], pos["e2"][1], self.pawn_img),
pos["f2"]: King(pos["f2"][0], pos["f2"][1], self.pawn_img),
pos["g2"]: King(pos["g2"][0], pos["g2"][1], self.pawn_img),
pos["h2"]: King(pos["h2"][0], pos["h2"][1], self.pawn_img),
}
self.name_list = []
def getpositin(self, xold, yold, xnew, ynew):
self.list[(xold, yold)], move()
def draw_pieces(self, win):
for key, value in self.list.items():
value.draw(win)
|
[
"17bce064@nirmaui.ac.in"
] |
17bce064@nirmaui.ac.in
|
51d5820b31840fd530291a77c5a2da06986b432c
|
5bf872c7da7ab846eb2f1b2b2796c043bf1cfc30
|
/prime_numbers.py
|
b13d141a1c702fe1d29daa44f32a044faf8dc100
|
[] |
no_license
|
mdeora/codeeval
|
faa3173799cabf2da988781f8efb25a5541476c5
|
2ac3e452339d5c119ca545ee6a9eabebc4c72f7f
|
refs/heads/master
| 2021-05-29T13:53:56.520270
| 2013-08-17T01:17:18
| 2013-08-17T01:17:18
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 825
|
py
|
'''
Prime Numbers - CodeEval
https://www.codeeval.com/open_challenges/46/
Print out the prime numbers less than a given number N. For bonus points your solution
should run in N*(log(N)) time or better. You may assume that N is always a positive integer.
'''
import math
import sys
def prime_numbers(n):
primes = []
for i in range(2, n):
if is_prime(i):
primes.append(i)
return ','.join([str(number) for number in primes])
def is_prime(n):
'''
Checking divisibility up to the square root of the number because if n is divisible by i,
then it is also divisible by n/i
'''
for i in range(2, int(math.sqrt(n))+1):
if n%i == 0:
return False
return True
with open(sys.argv[1]) as f:
for test in f:
print prime_numbers(int(test))
|
[
"einas@haddad"
] |
einas@haddad
|
ebbe1edd923cd285915654d7e20ebf999b795d1d
|
d0b85d001f7c640e7d47beb834d263b465a73712
|
/apps/users/models.py
|
f48e6c0ffbb5be2184b479532accd085a86b3322
|
[] |
no_license
|
pag0dy/t3
|
32932bf9c44def8346437ac6bab92b9741f9b0c8
|
5a9a33bf430a5fb6974d7171daf4f71a8cef2458
|
refs/heads/master
| 2023-04-26T16:35:52.320840
| 2021-05-19T01:34:43
| 2021-05-19T01:34:43
| 355,368,186
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,641
|
py
|
from django.db import models
from django.contrib import messages
from django.core.validators import MinLengthValidator, RegexValidator, validate_slug, EmailValidator
from .validators import letters_only, confirm_pass
import bcrypt
class UserManager(models.Manager):
def validar_inicio(self, postData):
errors = {}
este_usuario = User.objects.filter(email = postData['email'])
if len(este_usuario) == 0:
errors['no_existe'] = 'El correo ingresado no está registrado'
else:
este_usuario = este_usuario[0]
if bcrypt.checkpw(postData['password'].encode(), este_usuario.password.encode()):
pass
else:
errors['clave_erronea'] = 'La clave ingresada no corresponde al usuario'
return errors
class Company(models.Model):
company_name = models.CharField(max_length=100, validators = [MinLengthValidator(limit_value = 2, message = 'El nombre debe tener más de dos caracteres')])
n_workers = models.PositiveIntegerField()
created_at = models.DateTimeField(auto_now_add=True)
updated_at = models.DateTimeField(auto_now=True)
def __str__(self):
return self.company_name
class User(models.Model):
LEVELS = (
('a', 'admin'),
('b', 'project manager'),
('c', 'people manager'),
('d', 'staff')
)
ACCOUNT_TYPES = (
('SU', 'Solo User'),
('TA', 'Team Account')
)
name = models.CharField(max_length=100, validators = [MinLengthValidator(limit_value = 2, message = 'El nombre debe tener más de dos caracteres')])
lastname = models.CharField(max_length=100, validators =[MinLengthValidator(limit_value = 2, message ='El apellido debe tener más de dos caracteres')])
jobtitle = models.CharField(max_length=100, validators =[MinLengthValidator(limit_value = 3, message ='El cargo debe tener más de tres caracteres')])
account_type = models.CharField(max_length=2, choices=ACCOUNT_TYPES, default='SU')
email = models.EmailField(validators=[EmailValidator(message = 'Por favor ingrese un correo válido')])
password = models.CharField(max_length=80, validators =[MinLengthValidator(limit_value = 8, message ='La contraseña debe tener más de ocho caracteres')])
permission_level = models.CharField(max_length=1, choices=LEVELS)
company = models.ForeignKey(Company, related_name='staff_members', on_delete=models.CASCADE)
created_at = models.DateTimeField(auto_now_add=True)
updated_at = models.DateTimeField(auto_now=True)
objects = UserManager()
def __str__(self):
return self.email
|
[
"paulina.godoydelc@gmail.com"
] |
paulina.godoydelc@gmail.com
|
dec84c4c23a5de5141a339d91e711f010c2473e2
|
3f6c16ea158a8fb4318b8f069156f1c8d5cff576
|
/.PyCharm2019.1/system/python_stubs/-1317042838/_multiprocessing.py
|
9cbc8da17dcd253bb7d91fde2e035a8ecbb2e86b
|
[] |
no_license
|
sarthak-patidar/dotfiles
|
08494170d2c0fedc0bbe719cc7c60263ce6fd095
|
b62cd46f3491fd3f50c704f0255730af682d1f80
|
refs/heads/master
| 2020-06-28T23:42:17.236273
| 2019-10-01T13:56:27
| 2019-10-01T13:56:27
| 200,369,900
| 0
| 0
| null | 2019-08-03T12:56:33
| 2019-08-03T11:53:29
|
Shell
|
UTF-8
|
Python
| false
| false
| 2,778
|
py
|
# encoding: utf-8
# module _multiprocessing
# from /usr/lib/python3.6/lib-dynload/_multiprocessing.cpython-36m-x86_64-linux-gnu.so
# by generator 1.147
# no doc
# no imports
# functions
def sem_unlink(*args, **kwargs): # real signature unknown
pass
# classes
class SemLock(object):
""" Semaphore/Mutex type """
def acquire(self, *args, **kwargs): # real signature unknown
""" acquire the semaphore/lock """
pass
def release(self, *args, **kwargs): # real signature unknown
""" release the semaphore/lock """
pass
def _after_fork(self, *args, **kwargs): # real signature unknown
""" rezero the net acquisition count after fork() """
pass
def _count(self, *args, **kwargs): # real signature unknown
""" num of `acquire()`s minus num of `release()`s for this process """
pass
def _get_value(self, *args, **kwargs): # real signature unknown
""" get the value of the semaphore """
pass
def _is_mine(self, *args, **kwargs): # real signature unknown
""" whether the lock is owned by this thread """
pass
def _is_zero(self, *args, **kwargs): # real signature unknown
""" returns whether semaphore has value zero """
pass
@classmethod
def _rebuild(cls, *args, **kwargs): # real signature unknown
pass
def __enter__(self, *args, **kwargs): # real signature unknown
""" enter the semaphore/lock """
pass
def __exit__(self, *args, **kwargs): # real signature unknown
""" exit the semaphore/lock """
pass
def __init__(self, *args, **kwargs): # real signature unknown
pass
@staticmethod # known case of __new__
def __new__(*args, **kwargs): # real signature unknown
""" Create and return a new object. See help(type) for accurate signature. """
pass
handle = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
kind = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
maxvalue = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
name = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
SEM_VALUE_MAX = 2147483647
# variables with complex values
flags = {
'HAVE_SEM_OPEN': 1,
'HAVE_SEM_TIMEDWAIT': 1,
}
__loader__ = None # (!) real value is '<_frozen_importlib_external.ExtensionFileLoader object at 0x7f9deeb4d7f0>'
__spec__ = None # (!) real value is "ModuleSpec(name='_multiprocessing', loader=<_frozen_importlib_external.ExtensionFileLoader object at 0x7f9deeb4d7f0>, origin='/usr/lib/python3.6/lib-dynload/_multiprocessing.cpython-36m-x86_64-linux-gnu.so')"
|
[
"sarthakpatidar15@gmail.com"
] |
sarthakpatidar15@gmail.com
|
a9b9bced9a310284ee24881ad1910f3a3524e550
|
9d8d42a5fdc6c44b58172b1259f359cfe74a24a6
|
/python3/index.py
|
72c2b6e35afce8a7f02c88629d0405d001089b2f
|
[] |
no_license
|
stackvana/microcule-examples
|
312a47fafb21e6f15a387330806fb63a5e8be8c2
|
084718e532d6cb6230379ab9af070d3428421267
|
refs/heads/master
| 2021-10-26T02:38:57.220556
| 2019-04-09T22:27:48
| 2019-04-09T22:27:48
| 69,416,005
| 13
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 149
|
py
|
print("Hook['params'] is populated with request parameters")
print(Hook['params'])
print("Hook['req'] is the http request")
print(Hook['req']['url'])
|
[
"marak.squires@gmail.com"
] |
marak.squires@gmail.com
|
87fd505d61791e2e88fb23d94053866a4344edb8
|
820e6f2263976de4284c56b723bf02f286b2552d
|
/multi_detect.py
|
3cb54643de1067a7b4e55b229df54cd2864c3a3a
|
[] |
no_license
|
yanchaomars/py_speech_seg
|
f4fdff8edbe73f37a9b26639319b8795d0aafdf7
|
abe0b7b69a6e5318068b01cb01a3f313da2fa811
|
refs/heads/master
| 2021-05-15T05:36:42.067771
| 2017-08-15T02:23:26
| 2017-08-15T02:23:26
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 257
|
py
|
# -*- coding:UTF-8 -*-
from __future__ import print_function
import speech_segmentation as seg
frame_size = 256
frame_shift = 128
sr = 16000
seg_point = seg.multi_segmentation("dialog4.wav",sr,frame_size,frame_shift,plot_seg=True)
print(seg_point)
|
[
"wblk24@yeah.net"
] |
wblk24@yeah.net
|
6102038333bae6a1d5561cbd67c8c01f97d295c9
|
385dca5832a1c1f91ab6405a32630c99e605a004
|
/test_core/tests_DAS_core_models_class.py
|
fed915a33cc0888a501e9cc66b98f139323f2d25
|
[] |
no_license
|
jefbsi20111/DAS
|
0294b0adac21b1fabc6863f92553fea4cb583246
|
87e1c69a5e244e4290fd3a6e48712f6daa5e924d
|
refs/heads/master
| 2021-01-10T18:45:05.175787
| 2015-06-19T00:51:03
| 2015-06-19T00:51:03
| 37,693,547
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,788
|
py
|
# _*_ coding:utf-8 _*_
from django.test import TestCase
class TestClassTelefone(TestCase):
def test_existe(self):
""" Classe Telefone Existe? """
try:
from DAS_core.models import Telefone
except ImportError:
self.fail('Telefone Não Existe!')
def test_create(self):
from DAS_core.models import Telefone
t = Telefone(numero="1234-5678",tipo="CE")
t.save()
class TestClassEndereco(TestCase):
def test_existe(self):
""" Classe Endereco Existe? """
try:
from DAS_core.models import Endereco
except ImportError:
self.fail('Endereco Não Existe!')
def test_create(self):
from DAS_core.models import Endereco
t = Endereco(rua="Mareita",cep="0909090",bairro="LogoAli",cidade="Parelhas",estado="RN")
t.save()
class TestClassHorario(TestCase):
def test_existe(self):
""" Classe Horario Existe? """
try:
from DAS_core.models import Horario
except ImportError:
self.fail('Horario Não Existe!')
def test_create(self):
from DAS_core.models import Horario
t = Horario(dia="Seg",horario="M12")
t.save()
class TestClassPeriodo(TestCase):
def test_existe(self):
""" Classe Periodo Existe? """
try:
from DAS_core.models import Periodo
except ImportError:
self.fail('Periodo Não Existe!')
def test_create(self):
from DAS_core.models import Periodo
t = Periodo(periodo="2015.1")
t.save()
class TestClassCod_nome(TestCase):
def test_existe(self):
""" Classe Cod_nome Existe? """
try:
from DAS_core.models import Cod_nome
except ImportError:
self.fail('Cod_nome Não Existe!')
class TestClassPessoa(TestCase):
def test_existe(self):
""" Classe Pessoa Existe? """
try:
from DAS_core.models import Pessoa
except ImportError:
self.fail('Pessoa Não Existe!')
class TestClassDisciplina(TestCase):
def test_existe(self):
""" Classe Disciplina Existe? """
try:
from DAS_core.models import Disciplina
except ImportError:
self.fail('Disciplina Não Existe!')
def test_create(self):
from DAS_core.models import Disciplina
t = Disciplina(nome="Lógica",cod="BSI0909")
t.save()
class TestClassCurso(TestCase):
def test_existe(self):
""" Classe Curso Existe? """
try:
from DAS_core.models import Curso
except ImportError:
self.fail('Curso Não Existe!')
def test_create(self):
from DAS_core.models import Curso
from DAS_core.models import Disciplina
t = Curso(nome="BSI",cod="BSI00")
t.save()
class TestClassAluno(TestCase):
def test_existe(self):
""" Classe Aluno Existe?
"""
try:
from DAS_core.models import Aluno
except ImportError:
self.fail('Aluno Não Existe!')
class TestClassProfessor(TestCase):
def test_existe(self):
""" Classe Professor Existe? """
try:
from DAS_core.models import Professor
except ImportError:
self.fail('Professor Não Existe!')
class TestClassUnidade(TestCase):
def test_existe(self):
""" Classe Unidade Existe? """
try:
from DAS_core.models import Unidade
except ImportError:
self.fail('Unidade Não Existe!')
class TestClassNoticia(TestCase):
def test_existe(self):
""" Classe Noticia Existe? """
try:
from DAS_core.models import Noticia
except ImportError:
self.fail('Noticia Não Existe!')
class TestClassArquivo(TestCase):
def test_existe(self):
""" Classe Arquivo Existe? """
try:
from DAS_core.models import Arquivo
except ImportError:
self.fail('Arquivo Não Existe!')
class TestClassTurma(TestCase):
def test_existe(self):
""" Classe Turma Existe? """
try:
from DAS_core.models import Turma
except ImportError:
self.fail('Turma Não Existe!')
class TestClassNota(TestCase):
def test_existe(self):
""" Classe Nota Existe? """
try:
from DAS_core.models import Nota
except ImportError:
self.fail('Nota Não Existe!')
|
[
"jefbsi2011.1@hotmail.com"
] |
jefbsi2011.1@hotmail.com
|
a20291ac3b0a7e07673ea87d5e28411b07ff7c15
|
7c060e484e48542583198dd50f304e61b174b21b
|
/feature_test_1.0.0/pixelbetweenpoints.py
|
964ec7bad3c77b664421a5e8f711c893830ff3f0
|
[] |
no_license
|
CharlesYang1996/cell_detection_1.0.0
|
5951110f44f9042387265330e6a5bac912f92381
|
6f96552a3fbc41c491fa44222bdf71f14784827f
|
refs/heads/master
| 2022-12-10T07:38:36.314190
| 2020-08-31T14:13:38
| 2020-08-31T14:13:38
| 289,374,120
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 940
|
py
|
def pixel_between_two_points(x1, x2, y1, y2):
two_points_list=[]
x_distance = (x2 - x1)
y_distance = (y2 - y1)
a=0
b=0
if x_distance>0:
a=1
else:
a=-1
if y_distance>0:
b=1
else:
b=-1
if abs(y_distance) < abs(x_distance):
div_1 = y_distance / x_distance
for i in range(0, x_distance,a):
pixel = [x1 + i, y1 + round(i * div_1)]
#print(pixel)
two_points_list.append(pixel)
lenth_of_list = len(two_points_list)
else:
div_1 = x_distance / y_distance
for i in range(0, y_distance,b):
pixel = [x1 + round(i * div_1), y1 + i]
#print(pixel)
two_points_list.append(pixel)
lenth_of_list=len(two_points_list)
return two_points_list
test=pixel_between_two_points(168, 132,
179, 150)
print(test)
print(test[2])
|
[
"61850593+CharelsYang@users.noreply.github.com"
] |
61850593+CharelsYang@users.noreply.github.com
|
31f42b6786e051d277fb45e670d6cc7f7d5ac4bf
|
6b495f1ac4b4fb40274034f713c5da076519913e
|
/src/utils/influence_analysis.py
|
755f4610b430feda138039b05deb300472ea3f23
|
[] |
no_license
|
cirillodavide/tw_coronavirus_nets
|
ef313fca6d10731fa4976c1d4831afd7d0a7690d
|
00ac6be06d304018dfdf6ec35d1f9195aeb47bf0
|
refs/heads/master
| 2021-05-20T10:42:07.290431
| 2020-04-09T11:17:43
| 2020-04-09T11:17:43
| 252,255,558
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 187
|
py
|
import pandas as pd
tweets = '../sna/graphs/retweets_040420.csv'
replies = '../sna/graphs/replies_040420.csv'
quotes = '../sna/graphs/quotes_040420.csv'
df = pd.read_csv(tweets)
|
[
"root@DESKTOP-G6RATH4.localdomain"
] |
root@DESKTOP-G6RATH4.localdomain
|
d73b8acb589f083ff131f27fa661169071e9e3f6
|
a4f3529bd7b330da2abdf29f97dab851e3df554a
|
/assignment/3.py
|
8bddfabecabd1249407c1039aa88aaa469d0c9d3
|
[] |
no_license
|
sbkaji/python-programs
|
fe3a3c080e7d0673b7b395fc32898076a7d10dc2
|
a4526c8c688e6a94144ff068e988af5f5d1a2778
|
refs/heads/master
| 2023-01-27T17:42:47.811378
| 2020-11-29T09:34:16
| 2020-11-29T09:34:16
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 298
|
py
|
# -*- coding: utf-8 -*-
"""
Created on Thu Feb 20 23:45:31 2020
@author: Subarna Basnet
"""
"""3.Write a Python program to iterate over dictionaries using for loops."""
data = {'A': 10, 'B': 20, 'C': 30}
for dict_key, dict_value in data.items():
print(dict_key,'->',dict_value)
|
[
"noreply@github.com"
] |
sbkaji.noreply@github.com
|
3746b044270b010e9be1b327f678c016fc06f424
|
71e2ccdd49aad1b5a52e020402e922bce5016a68
|
/classifer/classifier/fangzhongjie/radius_analysis.py
|
2bc60b5db725df54a398e7c44de323f4e78dea92
|
[] |
no_license
|
dingchaoz/banshang
|
2ddfe52aff1ea5968be9cd92a2a37b4b85f9aeb3
|
02eee8052d28c9e2339892f5a33cb2fd5373503a
|
refs/heads/master
| 2020-12-31T07:33:59.041259
| 2017-04-19T02:33:16
| 2017-04-19T02:33:16
| 86,532,156
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 7,578
|
py
|
import pandas as pd
import numpy as np
# utm is installed by running python setup.py install in the directory at ejlq@da74wbedge1 [/home/ejlq/utm-0.4.1]
import utm
import matplotlib.pyplot as plt
from scipy.spatial.distance import pdist, squareform
# The agent pols data is depracated, need to use the new one once is available
# will replace it later, use for now for just some areas analysis
DFAgentPols = pd.read_csv('/san-data/usecase/agentpm/AgentProductionModel/datapull/agents_and_policies_auto.csv',header = None)
# Add headers to the agent pol data
agentPolHeaders = ['POLICY_NUMBER','POLICY_KIND_CODE','PLCY_TYPE_CD','AGENT','AGENT_ASSIGN_TYPE_CD','AGENT_SERV_ST_CD_AGENT_CD','QMSLAT','QMSLON','STATUS_CODE','LOC_ADDR_01','LOC_CITY_NAME','LOC_ST_ABBR','LOC_ZIP','MARKET_UNIT_ID','DATE_EFFECTIVE_DATE','DATE_EXPIRATION_DATE','DATE_AGENT_ASSIGN_DATE','COUNTY','CITY','STATE','POSTL_ST_CD','STATE1','ZIP']
DFAgentPols.columns = agentPolHeaders
DFAgentPols.drop(DFAgentPols.columns[-2],axis = 1,inplace = True) #Drop the duplicated state1 column
# Convert state column from int to string and display single digit with leading 0
DFAgentPols['STATE'] = [str(x).zfill(2) for x in DFAgentPols['STATE']]
# Load agent loc info as of 2016-09
DFAgentLoc = pd.read_csv('/san-data/usecase/agentpm/AgentProductionModel/Agents_Sep_2016.csv')
# Fill in the state agent code column for DFAgentPols, the corresponding column in agentsLoc is ST_AGT_CD
DFAgentPols['AGENT_SERV_ST_CD_AGENT_CD'] = [x+str(y) for x,y in zip(DFAgentPols['STATE'],DFAgentPols['AGENT'])]
# # Get agents located in Chicago, NY and SF which are urban metropolitans
# sanfAgents = DFAgentLoc[DFAgentLoc.CITY == 'San Francisco']
# newyorkAgents = DFAgentLoc[DFAgentLoc.CITY == 'New York']
# chicagoAgents = DFAgentLoc[DFAgentLoc.CITY == 'Chicago']
# # Merge chicago agents with their pols
# chicagoAgentPols = chicagoAgents.merge(DFAgentPols,left_on = 'ST_AGT_CD',right_on = 'AGENT_SERV_ST_CD_AGENT_CD')
# # Remove not needed or duplicated columns
# chicagoAgentPols.drop(chicagoAgentPols[['ZIP','POSTL_ST_CD_y','STATE','CITY_y','COUNTY','AGENT_SERV_ST_CD_AGENT_CD','SF_RGN_CD','PREF_NM','ORGZN_NM']],axis = 1,inplace = True)
# Make pol lat and long values valid
def divide(x):
try:
return float(x)/1000000
except:
pass
# chicagoAgentPols['QMSLAT'] = chicagoAgentPols['QMSLAT'].apply(divide)
# chicagoAgentPols['QMSLON'] = chicagoAgentPols['QMSLON'].apply(divide)
# chicagoAgentPols['lat_long_pol'] = chicagoAgentPols[['QMSLAT', 'QMSLON']].apply(tuple, axis=1)
# chicagoAgentPols['lat_long_agent'] = chicagoAgentPols[['LATITUDE', 'LONGITUDE']].apply(tuple, axis=1)
# Convert lat lon to utm
def toUTM(x):
try:
return utm.from_latlon(x[0],x[1])[:2]
except:
pass
# chicagoAgentPols['utm_pol'] = chicagoAgentPols['lat_long_pol'].apply(toUTM)
# chicagoAgentPols['utm_agent'] = chicagoAgentPols['lat_long_agent'].apply(toUTM)
# scpy spatial distance:https://docs.scipy.org/doc/scipy/reference/generated/scipy.spatial.distance.pdist.html
# A good discussion around disances computation http://stackoverflow.com/questions/13079563/how-does-condensed-distance-matrix-work-pdist
#pdist(np.array([chicagoAgentPols['utm_pol'][0],chicagoAgentPols['utm_agent'][0]]))
def getDist(x):
try:
#return pdist(np.array(x[0],x[1]))
y = np.array([x[0],x[1]])
#print pdist(y)
return pdist(y)[0]
except:
pass
##Convert to datetimestamp from string
def getYear(x):
return x[:4]
# Get the first 5 digits zip code from the 9 digits zip format
def getF5Zip(x):
return x[:5]
def getAssignDate(x):
return x.DATE_AGENT_ASSIGN_DATE.dropna().value_counts().idxmax()
def getTenure(x,y):
try:
deltaYr = relativedelta(pd.to_datetime(x), pd.to_datetime(y)).years
return deltaYr
except:
pass
#newyorkAgentPols['TENURE'] = np.vectorize(getTenure)(newyorkAgentPols.DATE_EFFECTIVE_DATE,newyorkAgentPols.DATE_AGENT_ASSIGN_DATE)
#newyorkAgentPols.groupby(['ST_AGT_CD','TENURE']).POL_ZIP.nunique() #Return policy holder count zip for each agent
# newyorkAgentPols.groupby(['ST_AGT_CD','POL_YR']).POL_ZIP.nunique()
# newyorkAgentPols.groupby(['ST_AGT_CD','POL_YR','ZIP_CD']).POL_ZIP.nunique()
# newyorkAgentPols.groupby(['POL_YR']).POL_ZIP.nunique() # show trend overall year for certain areas
# Returns policy holder zips
def cityAgentsPrep(cityName):
cityAgents = DFAgentLoc[DFAgentLoc.CITY == cityName]
#Merge chicago agents with their pols
cityAgentPols = cityAgents.merge(DFAgentPols,left_on = 'ST_AGT_CD',right_on = 'AGENT_SERV_ST_CD_AGENT_CD')
# Remove not needed or duplicated columns
cityAgentPols.drop(cityAgentPols[['ZIP','POSTL_ST_CD_y','STATE','CITY_y','COUNTY','AGENT_SERV_ST_CD_AGENT_CD','SF_RGN_CD','PREF_NM','ORGZN_NM']],axis = 1,inplace = True)
# Prepare lat lon in the right format
cityAgentPols['QMSLAT'] = cityAgentPols['QMSLAT'].apply(divide)
cityAgentPols['QMSLON'] = cityAgentPols['QMSLON'].apply(divide)
cityAgentPols['lat_long_pol'] = cityAgentPols[['QMSLAT', 'QMSLON']].apply(tuple, axis=1)
cityAgentPols['lat_long_agent'] = cityAgentPols[['LATITUDE', 'LONGITUDE']].apply(tuple, axis=1)
# Convert to utm from lat lon
cityAgentPols['utm_pol'] = cityAgentPols['lat_long_pol'].apply(toUTM)
cityAgentPols['utm_agent'] = cityAgentPols['lat_long_agent'].apply(toUTM)
cityAgentPols['dist'] = cityAgentPols[['utm_agent','utm_pol']].apply(getDist,axis = 1)
# Fill in nan for assignment date( NEED TO GET THE RIGHT AGENT APPOINTMENT DATE)
#cityAgentPols['DATE_AGENT_ASSIGN_DATE'].fillna(method = 'backfill',inplace = True)
cityAgentPols['POL_YR'] = cityAgentPols.DATE_EFFECTIVE_DATE.apply(getYear)
cityAgentPols['POL_ZIP'] = cityAgentPols.LOC_ZIP.apply(getF5Zip)
return cityAgentPols
def groupbyAnalysis(cityAgentPols):
groupedSTAGT = cityAgentPols.groupby('ST_AGT_CD')
DF_Dist_STAGT = groupedSTAGT['dist'].agg([np.mean,np.std])
DF_Dist_STAGT['80Perc'] = groupedSTAGT['dist'].quantile(.8)
DF_Dist_STAGT['90Perc'] = groupedSTAGT['dist'].quantile(.9)
groupedZIP = cityAgentPols.groupby('ZIP_CD')
DF_Dist_ZIP = groupedZIP['dist'].agg([np.mean,np.std])
DF_Dist_ZIP['80Perc'] = groupedZIP['dist'].quantile(.8)
DF_Dist_ZIP['90Perc'] = groupedZIP['dist'].quantile(.9)
POL_ZIP_CNT = cityAgentPols.groupby(['ST_AGT_CD','POL_YR','ZIP_CD']).POL_ZIP.nunique()
return DF_Dist_STAGT,DF_Dist_ZIP,POL_ZIP_CNT
newyorkAgentPols = cityAgentsPrep('New York')
NY_Dist_STAGT,NY_Dist_ZIP,NY_POL_ZIP = groupbyAnalysis(newyorkAgentPols)
chicagoAgentPols = cityAgentsPrep('Chicago')
CHG_Dist_STAGT,CHG_Dist_ZIP,CHG_POL_ZIP = groupbyAnalysis(chicagoAgentPols)
sanfAgentPols = cityAgentsPrep('San Francisco')
SF_Dist_STAGT,SF_Dist_ZIP,SF_POL_ZIP = groupbyAnalysis(sanfAgentPols)
#Reset index and add state column
NY_Dist_ZIP.reset_index(inplace = True)
NY_Dist_ZIP['ST'] = 'NY'
SF_Dist_ZIP.reset_index(inplace = True)
SF_Dist_ZIP['ST'] = 'SF'
CHG_Dist_ZIP.reset_index(inplace = True)
CHG_Dist_ZIP['ST'] = 'CHG'
DFZIPs = pd.concat([CHG_Dist_ZIP,SF_Dist_ZIP,NY_Dist_ZIP])
### TO-DO:
# Return neigbourhood zips number of agents, number of house hold, premium sum prediction, etc
# for all zips, agent tenure in the neigborhood (using existing zips, agents as training data)
# The returned will be the training Xs
########
# Seaborn plot analysis, 10040 agents in NY are outliers
########
## Look into agts10040
#agts10040 = newyorkAgentPols[newyorkAgentPols['ZIP_CD'] == '10040']
# chicagoAgentPols['dist'] = chicagoAgentPols[['utm_agent','utm_pol']].apply(getDist,axis = 1)
# grouped = chicagoAgentPols.groupby('ST_AGT_CD')
# grouped['dist'].agg([np.mean,np.std])
|
[
"dingchaozhang@g.harvard.edu"
] |
dingchaozhang@g.harvard.edu
|
4245ac1f7815184c9d58e63557e6ad588b3a76f6
|
54b9cb96a2bf43d73eadc043fd127ec3f42a6587
|
/budget/urls.py
|
42fc942fe256f2b9ef2577d19153a4ff6523165d
|
[] |
no_license
|
jayinting/budget_sandbox
|
a6bdf3d57d60885a3b3979374ee6e1db0000a2b3
|
47e3f33f719cf240441f16067e747f5af78a59a4
|
refs/heads/master
| 2021-01-01T15:54:35.783848
| 2012-10-15T13:44:37
| 2012-10-15T13:44:37
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 509
|
py
|
from django.conf.urls import patterns, url
from budget import views as budView
urlpatterns = patterns('',
#--------------------------------------------------------------------------------------------------
# General Views
#--------------------------------------------------------------------------------------------------
url(r'^$', (budView.Index_View.as_view()), name="Index_View"),
url(r'^csv/$', (budView.Import_Csv.as_view()), name="Import_Csv"),
)
|
[
"jayinting@yahoo.com"
] |
jayinting@yahoo.com
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.