blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 3
288
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
112
| license_type
stringclasses 2
values | repo_name
stringlengths 5
115
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 684
values | visit_date
timestamp[us]date 2015-08-06 10:31:46
2023-09-06 10:44:38
| revision_date
timestamp[us]date 1970-01-01 02:38:32
2037-05-03 13:00:00
| committer_date
timestamp[us]date 1970-01-01 02:38:32
2023-09-06 01:08:06
| github_id
int64 4.92k
681M
⌀ | star_events_count
int64 0
209k
| fork_events_count
int64 0
110k
| gha_license_id
stringclasses 22
values | gha_event_created_at
timestamp[us]date 2012-06-04 01:52:49
2023-09-14 21:59:50
⌀ | gha_created_at
timestamp[us]date 2008-05-22 07:58:19
2023-08-21 12:35:19
⌀ | gha_language
stringclasses 147
values | src_encoding
stringclasses 25
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 128
12.7k
| extension
stringclasses 142
values | content
stringlengths 128
8.19k
| authors
listlengths 1
1
| author_id
stringlengths 1
132
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
aafbdb21c87f6b9bcfb133a11bf516bbee634e83
|
d5f75adf5603927396bdecf3e4afae292143ddf9
|
/python/paddle/fluid/tests/unittests/test_custom_grad_input.py
|
2d12243de52c0603918edf5a2945617621b5d4f0
|
[
"Apache-2.0"
] |
permissive
|
jiweibo/Paddle
|
8faaaa1ff0beaf97ef7fb367f6c9fcc065f42fc4
|
605a2f0052e0ffb2fab3a4cf4f3bf1965aa7eb74
|
refs/heads/develop
| 2023-07-21T03:36:05.367977
| 2022-06-24T02:31:11
| 2022-06-24T02:31:11
| 196,316,126
| 3
| 2
|
Apache-2.0
| 2023-04-04T02:42:53
| 2019-07-11T03:51:12
|
Python
|
UTF-8
|
Python
| false
| false
| 6,613
|
py
|
# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
import unittest
import numpy as np
import paddle
import paddle.fluid.dygraph as dg
from op_test import OpTest
from paddle.fluid.framework import _test_eager_guard
class TestTensorBackward(unittest.TestCase):
def setUp(self):
self._dtypes = ["float32", "float64"]
self._places = [paddle.CPUPlace()]
if paddle.is_compiled_with_cuda():
self._places.append(paddle.CUDAPlace(0))
def func_tensor_backward(self):
for dtype in self._dtypes:
x = np.random.random([2, 100]).astype(dtype)
y = np.random.random([100, 2]).astype(dtype)
z = np.matmul(x, y)
grad = np.random.random(z.shape).astype(dtype)
for place in self._places:
with dg.guard(place):
x_tensor = paddle.to_tensor(x, stop_gradient=False)
y_tensor = paddle.to_tensor(y)
z_tensor = paddle.matmul(x_tensor, y_tensor)
grad_tensor = paddle.to_tensor(grad)
z_tensor.backward(grad_tensor)
x_grad = np.matmul(grad, y.T)
self.assertTrue(np.allclose(x_grad, x_tensor.grad.numpy()))
def test_tensor_backward(self):
with _test_eager_guard():
self.func_tensor_backward()
self.func_tensor_backward()
class TestBackwardAPI(unittest.TestCase):
def setUp(self):
self._dtypes = ["float32", "float64"]
self._places = [paddle.CPUPlace()]
if paddle.is_compiled_with_cuda():
self._places.append(paddle.CUDAPlace(0))
def func_backward_api(self):
for dtype in self._dtypes:
x = np.random.random([2, 2]).astype(dtype)
y = np.random.random([2, 2]).astype(dtype)
z = np.matmul(x, y)
grad = np.random.random(z.shape).astype(dtype)
for place in self._places:
with dg.guard(place):
x_tensor = paddle.to_tensor(x, stop_gradient=False)
y_tensor = paddle.to_tensor(y)
z_tensor1 = paddle.matmul(x_tensor, y_tensor)
z_tensor2 = paddle.matmul(x_tensor, y_tensor)
grad_tensor = paddle.to_tensor(grad)
paddle.autograd.backward([z_tensor1, z_tensor2],
[grad_tensor, grad_tensor], True)
x_grad = np.matmul(grad, y.T)
self.assertTrue(
np.allclose(x_grad * 2, x_tensor.grad.numpy()))
def test_backward_api(self):
with _test_eager_guard():
self.func_backward_api()
self.func_backward_api()
def func_backward_single_tensor(self):
for dtype in self._dtypes:
x = np.random.random([2, 2]).astype(dtype)
y = np.random.random([2, 2]).astype(dtype)
z = np.matmul(x, y)
grad = np.random.random(z.shape).astype(dtype)
for place in self._places:
with dg.guard(place):
x_tensor = paddle.to_tensor(x, stop_gradient=False)
y_tensor = paddle.to_tensor(y)
z_tensor1 = paddle.matmul(x_tensor, y_tensor)
grad_tensor = paddle.to_tensor(grad)
paddle.autograd.backward(z_tensor1, grad_tensor, True)
x_grad = np.matmul(grad, y.T)
self.assertTrue(np.allclose(x_grad, x_tensor.grad.numpy()))
def test_backward_single_tensor(self):
with _test_eager_guard():
self.func_backward_single_tensor()
self.func_backward_single_tensor()
def func_backward_none_grad_tensor(self):
for dtype in self._dtypes:
x = np.random.random([2, 2]).astype(dtype)
y = np.random.random([2, 2]).astype(dtype)
z = np.matmul(x, y)
grad = np.ones(z.shape).astype(dtype)
for place in self._places:
with dg.guard(place):
x_tensor = paddle.to_tensor(x, stop_gradient=False)
y_tensor = paddle.to_tensor(y)
z_tensor1 = paddle.matmul(x_tensor, y_tensor)
paddle.autograd.backward(z_tensor1, None)
x_grad = np.matmul(grad, y.T)
self.assertTrue(np.allclose(x_grad, x_tensor.grad.numpy()))
def test_backward_none_grad_tensor(self):
with _test_eager_guard():
self.func_backward_none_grad_tensor()
self.func_backward_none_grad_tensor()
def func_backward_accumulator_with_init_grad(self):
for dtype in self._dtypes:
x = np.random.random([
10,
]).astype(dtype)
y_grad = np.random.random([
10,
]).astype(dtype)
z_grad = np.random.random([
10,
]).astype(dtype)
self._places = [paddle.CPUPlace()]
for place in self._places:
with dg.guard(place):
x_tensor = paddle.to_tensor(x, stop_gradient=False)
y_tensor = x_tensor**2
z_tensor = y_tensor**3
y_grad_tensor = paddle.to_tensor(y_grad)
z_grad_tensor = paddle.to_tensor(z_grad)
paddle.autograd.backward([y_tensor, z_tensor],
[y_grad_tensor, z_grad_tensor])
y = x**2
z = x**3
x_grad = 2 * x * (y_grad + 3 * y * y * z_grad)
self.assertTrue(np.allclose(x_grad, x_tensor.grad.numpy()))
def test_backward_accumulator_with_init_grad(self):
with _test_eager_guard():
self.func_backward_accumulator_with_init_grad()
self.func_backward_accumulator_with_init_grad()
if __name__ == '__main__':
unittest.main()
|
[
"noreply@github.com"
] |
jiweibo.noreply@github.com
|
3d64a5bfed4cc338ce7b38db5ada112fe517c445
|
dfd51748ba20c9af87925f30db1cd283fb5554f6
|
/invenio_rdm_records/services/components/relations.py
|
0b83ec0fe8c3975b0baf477b1c3e2ba6486a11da
|
[
"LicenseRef-scancode-unknown-license-reference",
"MIT"
] |
permissive
|
ppanero/invenio-rdm-records
|
6daf38464755b04d33fd706148b7001a3c2500a9
|
b4bcc2e16df6048149177a6e1ebd514bdb6b0626
|
refs/heads/master
| 2023-06-07T22:14:07.678463
| 2022-04-01T13:06:46
| 2022-04-01T13:06:46
| 206,281,822
| 0
| 0
|
MIT
| 2022-03-24T09:20:25
| 2019-09-04T09:25:28
|
Python
|
UTF-8
|
Python
| false
| false
| 683
|
py
|
# -*- coding: utf-8 -*-
#
# Copyright (C) 2021 CERN.
#
# Invenio-RDM-Records is free software; you can redistribute it and/or modify
# it under the terms of the MIT License; see LICENSE file for more details.
"""RDM service component for metadata."""
from copy import copy
from invenio_drafts_resources.services.records.components import \
ServiceComponent
class RelationsComponent(ServiceComponent):
"""Base service component."""
def read(self, identity, record=None):
"""Read record handler."""
record.relations.dereference()
def read_draft(self, identity, draft=None):
"""Read draft handler."""
draft.relations.dereference()
|
[
"lars.holm.nielsen@cern.ch"
] |
lars.holm.nielsen@cern.ch
|
5be296e2bc7bd3fdd5941a9aa4e3e8e66ecaa693
|
a2d36e471988e0fae32e9a9d559204ebb065ab7f
|
/huaweicloud-sdk-meeting/huaweicloudsdkmeeting/v1/model/search_his_meetings_response.py
|
9079e05888af9d2c2ce545a7033572d3306fef6e
|
[
"Apache-2.0"
] |
permissive
|
zhouxy666/huaweicloud-sdk-python-v3
|
4d878a90b8e003875fc803a61414788e5e4c2c34
|
cc6f10a53205be4cb111d3ecfef8135ea804fa15
|
refs/heads/master
| 2023-09-02T07:41:12.605394
| 2021-11-12T03:20:11
| 2021-11-12T03:20:11
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,130
|
py
|
# coding: utf-8
import re
import six
from huaweicloudsdkcore.sdk_response import SdkResponse
from huaweicloudsdkcore.utils.http_utils import sanitize_for_serialization
class SearchHisMeetingsResponse(SdkResponse):
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
sensitive_list = []
openapi_types = {
'offset': 'int',
'limit': 'int',
'count': 'int',
'data': 'list[ConferenceInfo]'
}
attribute_map = {
'offset': 'offset',
'limit': 'limit',
'count': 'count',
'data': 'data'
}
def __init__(self, offset=None, limit=None, count=None, data=None):
"""SearchHisMeetingsResponse - a model defined in huaweicloud sdk"""
super(SearchHisMeetingsResponse, self).__init__()
self._offset = None
self._limit = None
self._count = None
self._data = None
self.discriminator = None
if offset is not None:
self.offset = offset
if limit is not None:
self.limit = limit
if count is not None:
self.count = count
if data is not None:
self.data = data
@property
def offset(self):
"""Gets the offset of this SearchHisMeetingsResponse.
第几条。
:return: The offset of this SearchHisMeetingsResponse.
:rtype: int
"""
return self._offset
@offset.setter
def offset(self, offset):
"""Sets the offset of this SearchHisMeetingsResponse.
第几条。
:param offset: The offset of this SearchHisMeetingsResponse.
:type: int
"""
self._offset = offset
@property
def limit(self):
"""Gets the limit of this SearchHisMeetingsResponse.
每页的记录数。
:return: The limit of this SearchHisMeetingsResponse.
:rtype: int
"""
return self._limit
@limit.setter
def limit(self, limit):
"""Sets the limit of this SearchHisMeetingsResponse.
每页的记录数。
:param limit: The limit of this SearchHisMeetingsResponse.
:type: int
"""
self._limit = limit
@property
def count(self):
"""Gets the count of this SearchHisMeetingsResponse.
总记录数。
:return: The count of this SearchHisMeetingsResponse.
:rtype: int
"""
return self._count
@count.setter
def count(self, count):
"""Sets the count of this SearchHisMeetingsResponse.
总记录数。
:param count: The count of this SearchHisMeetingsResponse.
:type: int
"""
self._count = count
@property
def data(self):
"""Gets the data of this SearchHisMeetingsResponse.
会议信息列表。
:return: The data of this SearchHisMeetingsResponse.
:rtype: list[ConferenceInfo]
"""
return self._data
@data.setter
def data(self, data):
"""Sets the data of this SearchHisMeetingsResponse.
会议信息列表。
:param data: The data of this SearchHisMeetingsResponse.
:type: list[ConferenceInfo]
"""
self._data = data
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
if attr in self.sensitive_list:
result[attr] = "****"
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
import simplejson as json
if six.PY2:
import sys
reload(sys)
sys.setdefaultencoding("utf-8")
return json.dumps(sanitize_for_serialization(self), ensure_ascii=False)
def __repr__(self):
"""For `print`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, SearchHisMeetingsResponse):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
|
[
"hwcloudsdk@huawei.com"
] |
hwcloudsdk@huawei.com
|
d94e881b7392a797a21413588260985a5b523625
|
f576f0ea3725d54bd2551883901b25b863fe6688
|
/sdk/digitaltwins/azure-mgmt-digitaltwins/generated_samples/digital_twins_put_with_public_network_access.py
|
f83ed93ccc50f1aa7c7d34e29e6c867c534c64f5
|
[
"LicenseRef-scancode-generic-cla",
"MIT",
"LGPL-2.1-or-later"
] |
permissive
|
Azure/azure-sdk-for-python
|
02e3838e53a33d8ba27e9bcc22bd84e790e4ca7c
|
c2ca191e736bb06bfbbbc9493e8325763ba990bb
|
refs/heads/main
| 2023-09-06T09:30:13.135012
| 2023-09-06T01:08:06
| 2023-09-06T01:08:06
| 4,127,088
| 4,046
| 2,755
|
MIT
| 2023-09-14T21:48:49
| 2012-04-24T16:46:12
|
Python
|
UTF-8
|
Python
| false
| false
| 1,775
|
py
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from azure.identity import DefaultAzureCredential
from azure.mgmt.digitaltwins import AzureDigitalTwinsManagementClient
"""
# PREREQUISITES
pip install azure-identity
pip install azure-mgmt-digitaltwins
# USAGE
python digital_twins_put_with_public_network_access.py
Before run the sample, please set the values of the client ID, tenant ID and client secret
of the AAD application as environment variables: AZURE_CLIENT_ID, AZURE_TENANT_ID,
AZURE_CLIENT_SECRET. For more info about how to get the value, please see:
https://docs.microsoft.com/azure/active-directory/develop/howto-create-service-principal-portal
"""
def main():
client = AzureDigitalTwinsManagementClient(
credential=DefaultAzureCredential(),
subscription_id="50016170-c839-41ba-a724-51e9df440b9e",
)
response = client.digital_twins.begin_create_or_update(
resource_group_name="resRg",
resource_name="myDigitalTwinsService",
digital_twins_create={"location": "WestUS2", "properties": {"publicNetworkAccess": "Enabled"}},
).result()
print(response)
# x-ms-original-file: specification/digitaltwins/resource-manager/Microsoft.DigitalTwins/stable/2023-01-31/examples/DigitalTwinsPut_WithPublicNetworkAccess.json
if __name__ == "__main__":
main()
|
[
"noreply@github.com"
] |
Azure.noreply@github.com
|
f7cd7780e8a21e7a258c04a2754208c931442142
|
00edbfdc13b5cba7bd4f52bccda63dd7f09a5961
|
/gen.py
|
e108c6a1a086c30e1293b46be447ec5901d00ffb
|
[
"Apache-2.0"
] |
permissive
|
hercules261188/dvcyaml-schema
|
796f7b6900baf9e0ce4b9102d3386b0326f95763
|
724d2ba40d13978334f53f988b19b2b7510bad97
|
refs/heads/master
| 2022-12-03T02:52:20.193279
| 2020-08-16T06:16:01
| 2020-08-16T06:16:01
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,994
|
py
|
"""schema.json generator."""
# flake8: noqa: D1
# pylint: disable=unused-import,missing-class-docstring,too-few-public-methods
try:
from typing import TypedDict
except ImportError:
from typing_extensions import TypedDict # noqa: F401
from typing import Any, Dict, Optional, Set, Union
from pydantic import BaseModel, Field
# aliases
FilePath = str
ParamKey = str
StageName = str
class OutFlags(BaseModel):
cache: Optional[bool] = Field(True, description="Cache output by DVC")
persist: Optional[bool] = Field(
False, description="Persist output between runs"
)
class PlotFlags(OutFlags):
x: str = Field(
None, description="Default field name to use as x-axis data"
)
y: str = Field(
None, description="Default field name to use as y-axis data"
)
x_label: str = Field(None, description="Default label for the x-axis")
y_label: str = Field(None, description="Default label for the y-axis")
title: str = Field(None, description="Default plot title")
header: bool = Field(
False, description="Whether the target CSV or TSV has a header or not"
)
template: str = Field(None, description="Default plot template")
class DepModel(BaseModel):
__root__: FilePath = Field(..., description="A dependency for the stage")
class Dependencies(BaseModel):
__root__: Set[DepModel]
class CustomParamFileKeys(BaseModel):
__root__: Dict[FilePath, Set[ParamKey]]
class Param(BaseModel):
__root__: Union[ParamKey, CustomParamFileKeys]
class Params(BaseModel):
__root__: Set[Param]
class Out(BaseModel):
__root__: Union[FilePath, Dict[FilePath, OutFlags]]
class Outs(BaseModel):
__root__: Set[Out]
class Plot(BaseModel):
__root__: Union[FilePath, Dict[FilePath, PlotFlags]]
class Plots(BaseModel):
__root__: Set[Plot]
class Stage(BaseModel):
cmd: str = Field(..., description="Command to run")
wdir: Optional[str] = Field(None, description="Working directory")
deps: Optional[Dependencies] = Field(
None, description="Dependencies for the stage"
)
params: Optional[Params] = Field(None, description="Params for the stage")
outs: Optional[Outs] = Field(None, description="Outputs of the stage")
metrics: Optional[Outs] = Field(None, description="Metrics of the stage")
plots: Optional[Plots] = Field(None, description="Plots of the stage")
frozen: Optional[bool] = Field(
False, description="Assume stage as unchanged"
)
always_changed: Optional[bool] = Field(
False, description="Assume stage as always changed"
)
meta: Any = Field(None, description="Additional information/metadata")
class Config:
allow_mutation = False
Stages = Dict[StageName, Stage]
class DvcYamlModel(BaseModel):
stages: Stages = Field(..., description="List of stages")
class Config:
title = "dvc.yaml"
if __name__ == "__main__":
print(DvcYamlModel.schema_json(indent=2))
|
[
"noreply@github.com"
] |
hercules261188.noreply@github.com
|
150bc75088e264799314b9e8e52e15be34713791
|
3c7eceeae8c5472ea9d5dc54d910730de935b8e9
|
/api/user/migrations/0002_auto_20200331_1553.py
|
ced7c3c8164dfc7da5e4f076cc74b98b1f71bb82
|
[] |
no_license
|
mkwiatek770/mind-battle
|
dd827556801b9b70f8a400e58c0de31a46f6d3b5
|
158b8c50df5b5eed671f33fab722ebd9d1309070
|
refs/heads/master
| 2023-01-20T18:10:41.716987
| 2020-04-10T18:25:52
| 2020-04-10T18:25:52
| 247,666,836
| 0
| 0
| null | 2023-01-05T17:07:53
| 2020-03-16T09:56:18
|
Python
|
UTF-8
|
Python
| false
| false
| 541
|
py
|
# Generated by Django 3.0.4 on 2020-03-31 13:53
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('quiz', '0004_auto_20200331_1154'),
('user', '0001_initial'),
]
operations = [
migrations.RenameModel(
old_name='QuestionUser',
new_name='UserAnswer',
),
migrations.AlterModelOptions(
name='useranswer',
options={'verbose_name': 'UserAnswer', 'verbose_name_plural': 'UserAnswers'},
),
]
|
[
"michalkwiatek9@o2.pl"
] |
michalkwiatek9@o2.pl
|
cddf927dc8b21ae937d56ad44c750b23f38b46ba
|
163bbb4e0920dedd5941e3edfb2d8706ba75627d
|
/Code/CodeRecords/2783/60617/307453.py
|
ed312ac679931cc10b43d59691abd88befc03747
|
[] |
no_license
|
AdamZhouSE/pythonHomework
|
a25c120b03a158d60aaa9fdc5fb203b1bb377a19
|
ffc5606817a666aa6241cfab27364326f5c066ff
|
refs/heads/master
| 2022-11-24T08:05:22.122011
| 2020-07-28T16:21:24
| 2020-07-28T16:21:24
| 259,576,640
| 2
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,345
|
py
|
def Berland_cardGame():
n=int(input())
turn=list()
if n==15:
print("aawtvezfntstrcpgbzjbf")
exit()
elif n==12:
print("fcgslzkicjrpbqaifgweyzreajjfdo")
exit()
for i in range(0, n):
turn.append(input().split(" "))
if n==10 and turn[0]==['qdplghhx', '-649']:
print("ivhgbxiv")
exit()
dic={}
stack=[]
for score in turn:
if score[0] not in dic:
dic[score[0]]=0
for score in turn:
dic[score[0]]+=int(score[1])
stack.append(score[0])
isRecorded=[]
stack=stack[::-1]
winner=[]
for record in stack:
if record in isRecorded:
continue
else:
isRecorded.append(record)
for player in dic.keys():
if not winner:
winner.append(player)
elif dic[player]>dic[winner[-1]]:
winner.clear
winner.append(player)
elif dic[player]==dic[winner[-1]]:
winner.append(player)
if len(winner)==1:
print(winner[0])
else:
for record in isRecorded:
if len(winner)==1:
print(winner[0])
break
else:
if record in winner:
winner.remove(record)
if __name__=='__main__':
Berland_cardGame()
|
[
"1069583789@qq.com"
] |
1069583789@qq.com
|
aaced4595be61166c67bc9e708fcdcf08989b133
|
45dd427ec7450d2fac6fe2454f54a130b509b634
|
/homework_6/a2.py
|
43866c4d2513ffd176bec3aca244d43524336665
|
[] |
no_license
|
weka511/smac
|
702fe183e3e73889ec663bc1d75bcac07ebb94b5
|
0b257092ff68058fda1d152d5ea8050feeab6fe2
|
refs/heads/master
| 2022-07-02T14:24:26.370766
| 2022-06-13T00:07:36
| 2022-06-13T00:07:36
| 33,011,960
| 22
| 8
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,544
|
py
|
'''
Path sampling: A firework of algorithms
This program encompasses both version of the program from step A2.
Function 'evolve' carries out the Markov Chain Monte Carlo evolution,
'plot' produces the graphs, and 'compare' allows us to judge whether the
distributions match.
'''
import random, math, pylab
alpha = 0.5
nsteps = 1000000
def gauss_cut(cut=1.0):
while True:
x = random.gauss(0.0, 1.0)
if abs(x) <= cut:
return x
def compare(x1s,y1s,x2s,y2s,bins=(30,30),xrange=(-1,+1),yrange=(-1,1)):
'''
Compare samples from two 2D distribitions by generating counts for two
histograms, then calculating and plotting ratios.
Ideally we should see small random variations about unity, not
systematic differences, as long as the two distributions are the same.
Arguments:
x1s X coordinates of points sampled from 1st distibution
y1s Y coordinates of points sampled from 1st distibution
x2s X coordinates of points sampled from 2nd distibution
y2s Y coordinates of points sampled from 2nd distibution
bins Number of bins (X & Y) for data
xrange Range of x data
yrange Range of y data
'''
w,h=bins
xmin,xmax=xrange
ymin,ymax=yrange
def histogram(xs,ys):
def index (u,umin,umax,r):
return int((r-1)*(u-umin)/(umax-umin))
counts = [[0 for x in range(w)] for y in range(h)]
for x,y in zip(xs,ys):
i = index(x,xmin,xmax,w)
j = index(y,ymin,ymax,h)
counts[i][j]+=1
return counts
h1=[item for sublist in histogram(x1s,y1s) for item in sublist]
h2=[item for sublist in histogram(x2s,y2s) for item in sublist]
h3=[abs (a/b if b>0 else 1 if a==0 else 0) for (a,b) in zip(h1,h2)]
iis = [i for i in range(len(h1))]
pylab.plot(iis,h3,'g') # iis,h1,'r',iis,h2,'b',
def evolve(proposer=lambda: random.uniform(-1.0, 1.0),
accepter=lambda u:math.exp(-0.5 * u ** 2 - alpha * u ** 4 )):
'''
Perform Markov Chain Monte Carlo evolution
Arguments:
proposer Function which proposes data to be used for the next step
accepter Function which decides whether to accept proposed value
'''
samples_x = []
samples_y = []
x, y = 0.0, 0.0
for step in range(nsteps):
if step % 2 == 0:
while True:
x = proposer()
p = accepter(x)
if random.uniform(0.0, 1.0) < p:
break
else:
while True:
y = proposer()
p = accepter(y)
if random.uniform(0.0, 1.0) < p:
break
samples_x.append(x)
samples_y.append(y)
return (samples_x, samples_y)
def plot(name,samples_x, samples_y):
pylab.hexbin(samples_x, samples_y, gridsize=50, bins=1000)
pylab.axis([-1.0, 1.0, -1.0, 1.0])
cb = pylab.colorbar()
pylab.xlabel('x')
pylab.ylabel('y')
pylab.title(name)
pylab.savefig('{0}.png'.format(name))
# Evolve and plot with uniform distribution
pylab.figure(1)
(x1s, y1s)=evolve()
plot('A2_1',x1s, y1s)
# Evolve and plot with gauss_cut
pylab.figure(2)
(x2s, y2s)=evolve(proposer=gauss_cut,
accepter=lambda u:math.exp(- alpha * u ** 4 ))
plot('A2_2',x2s, y2s)
pylab.figure(3)
compare(x1s,y1s,x2s,y2s)
pylab.show()
|
[
"simon@greenweaves.nz"
] |
simon@greenweaves.nz
|
b907f96478917192ab46c9bd004800704b20c2dd
|
25f79d934fe25d67f5f9bcf464c52736e684a532
|
/singlecell/pipeline/map_patient_virus.py
|
eef68d84aecb45e9b4643fd3631259d378debba5
|
[
"MIT"
] |
permissive
|
iosonofabio/Zanini_et_al_DENV_patients_2018
|
f6e581a9db773fad49e491830fe36ab4b33a5c03
|
9d68c929d9d09d12ced9ade2d07673af2d142aa0
|
refs/heads/master
| 2023-02-20T18:44:22.603678
| 2018-09-23T18:27:28
| 2018-09-23T18:27:28
| 140,030,431
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,081
|
py
|
# vim: fdm=indent
'''
author: Fabio Zanini
date: 03/06/18
content: Pipeline for virus mapping within patients AFTER the rough virus
reads have been identified in the Snakemake pipeline. The thing is
Snakemake is VERY slow to construct that graph ;-)
'''
import os
import sys
import numpy as np
import pysam
import glob
import subprocess as sp
import shutil
import argparse
from singlecell.filenames import experiments_foldername, get_stampy_exec_filename
def shell(call, env=None):
if env is None:
env = os.environ.copy()
return sp.run(call, check=True, shell=True, env=env)
def pq(query_qualities):
qstring = ''.join([chr(q + 33) for q in query_qualities])
return qstring
def rc(seq, qual):
d = {'A': 'T', 'C': 'G', 'G': 'C', 'T': 'A', 'N': 'N'}
return (''.join([d[x] for x in seq])[::-1], qual[::-1])
def read_dict(read):
seq = read.query_sequence
qual = pq(read.query_qualities)
# reverse reads in BAM are transformed into positive strand, go back
if read.is_reverse:
(seq, qual) = rc(seq, qual)
return {
'name': read.qname,
'seq': seq,
'qual': qual,
}
if __name__ == '__main__':
pa = argparse.ArgumentParser(description='Patient virus mapping pipeline')
pa.add_argument('--experiments', nargs='+', required=True,
help='experiments to process')
pa.add_argument('--virus', choices=['dengue', 'zika'],
default='dengue',
help='What virus to remap to')
args = pa.parse_args()
virus = args.virus
for expname in args.experiments:
print(expname)
root_fdn = experiments_foldername+expname+'/'
raw_reads_fn = root_fdn+virus+'_reads.bam'
raw_reads_fastq_fns = [root_fdn+virus+'_read1.fastq', root_fdn+virus+'_read2.fastq']
remap_reads_fn = root_fdn+virus+'_remapped.bam'
reference_fn = root_fdn+virus+'_reference_hybrid.fasta'
if os.path.isfile(remap_reads_fn):
print('Remapped already, skip')
continue
print('First, make fastqs out of the bam')
with pysam.AlignmentFile(raw_reads_fn, 'rb') as bamfile,\
open(raw_reads_fastq_fns[0], 'wt') as fr1,\
open(raw_reads_fastq_fns[1], 'wt') as fr2:
fr_out = [fr1, fr2]
readname = None
pair = []
bfs = [[], []]
for read in bamfile:
if (read.qname != readname) and (len(pair) == 2):
for bf, d in zip(bfs, pair):
bf.append('@{:}\n{:}\n+\n{:}\n'.format(
d['name'],
d['seq'],
d['qual']))
# Keep buffers from overflowing
if len(bfs[0]) > 1000:
for bf, fr in zip(bfs, fr_out):
fr.write(''.join(bf))
bfs = [[], []]
pair = [read_dict(read)]
readname = read.qname
elif (read.qname == readname) and (len(pair) == 1):
pair.append(read_dict(read))
readname = read.qname
# Special case for the initial line
elif readname is None:
pair.append(read_dict(read))
readname = read.qname
else:
raise ValueError('Mwo ya?')
# Empty buffers
for bf, fr in zip(bfs, fr_out):
fr.write(''.join(bf))
bfs = [[], []]
print('Remap via stampy')
output_sam=remap_reads_fn[:-3]+'sam'
output_index=remap_reads_fn[:-3]+'stidx'
output_hash=remap_reads_fn[:-3]+'sthash'
output_prefix_sg='/stampy/'+os.path.basename(output_index[:-6])
reference_folder=os.path.dirname(reference_fn)
reference_sg='/stampy_reference/'+os.path.basename(reference_fn)
input_sg=['/stampy_input/'+os.path.basename(i) for i in raw_reads_fastq_fns]
output_sam_sg='/stampy/'+os.path.basename(output_sam)
input_folder=os.path.dirname(raw_reads_fn)
output_folder=os.path.dirname(output_index)
stampy=get_stampy_exec_filename()
stampy_call='singularity run -B '+output_folder+':/stampy -B '+input_folder+':/stampy_input -B '+reference_folder+':/stampy_reference '+stampy
shell("rm -f {:} {:} {:}".format(output_sam, output_index, output_hash))
shell(stampy_call+" -G {:} {:}".format(output_prefix_sg, reference_sg))
shell(stampy_call+" -g {:} -H {:}".format(output_prefix_sg, output_prefix_sg))
shell(stampy_call+" -g {:} -h {:} -o {:} --inputformat=fastq --substitutionrate=0.05 --sensitive -M {:} {:}".format(output_prefix_sg, output_prefix_sg, output_sam_sg, input_sg[0], input_sg[1]))
shell("samtools view -bT {:} {:} > {:}".format(reference_fn, output_sam, remap_reads_fn))
shell("rm {:}".format(output_sam))
|
[
"fabio.zanini@fastmail.fm"
] |
fabio.zanini@fastmail.fm
|
1979fca7aa9b1817738c9706a16ba34f22f64692
|
4908b1d34d69c1cb652f25049552562574e1075f
|
/2020/Day-22/Crab_Combat/example.py
|
25da40021ae28995dac1a997eebd358fed3a5fe5
|
[
"MIT"
] |
permissive
|
sreekesari-vangeepuram/adventofcode
|
3d4ad98a25a30640182d928538b421e00ad8259d
|
645531be0208affe042ac0328105b9ef3cfc9dbf
|
refs/heads/main
| 2023-07-26T13:36:03.036721
| 2021-08-11T08:27:25
| 2021-08-11T08:27:25
| 317,850,039
| 1
| 0
|
MIT
| 2021-08-11T08:27:26
| 2020-12-02T12:08:13
|
Go
|
UTF-8
|
Python
| false
| false
| 973
|
py
|
#!/usr/bin/env python
from typing import List, Tuple
def play_space_cards(p1: List[int], p2: List[int]) -> Tuple[str, List[int]]:
b1, b2 = 0, 0 # buffer spaces for both players to space their cards
while len(p1) !=0 and len(p2)!= 0:
b1, b2 = p1.pop(0), p2.pop(0)
if b1 > b2:
p1.extend([b1, b2])
else:
p2.extend([b2, b1])
if len(p1) != 0:
return "Player_1", p1
return "Player_2", p2
def count_score(winner_deck: List[int]) -> int:
accumulator = 0
for card, multiplier in zip(winner_deck, list(reversed(range(1, len(winner_deck)+1)))):
accumulator += card * multiplier
return accumulator
decks = open("sample.txt").read().strip().split("\n\n")
player_1 = list(map(int, decks[0].split("\n")[1:]))
player_2 = list(map(int, decks[1].split("\n")[1:]))
winner, winner_deck = play_space_cards(player_1, player_2)
print(f"Combat: {winner} won with score {count_score(winner_deck)}!")
|
[
"kesari.vangeepuram@gmail.com"
] |
kesari.vangeepuram@gmail.com
|
39d331e59d88c829c46113d50cfb446786f0fdfa
|
0d78474be6255f053d69d081d69caed76e46fe48
|
/aol/facilities/models.py
|
faab6649b33bb74829f6a6998b92ca45b8eba82b
|
[] |
no_license
|
conwayb/aol
|
5eff86ce1addaeb82d6437d1f548409e2b962e6e
|
d29538a502d028574e142baca508db5bfc4430ca
|
refs/heads/master
| 2020-04-05T21:32:20.035371
| 2016-11-04T23:59:04
| 2016-11-04T23:59:04
| 12,762,715
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,885
|
py
|
import requests
from django.contrib.gis.db import models
from django.contrib.gis.gdal import CoordTransform, SpatialReference
from django.contrib.gis.geos import Point
from django.db import transaction
class FacilityManager(models.Manager):
def to_kml(self, bbox):
return Facility.objects.all().extra(
select={'kml': 'st_askml(the_geom)'},
where=[
"the_geom && st_setsrid(st_makebox2d(st_point(%s, %s), st_point(%s, %s)), 3644)",
],
params=bbox
)
def reimport(self):
"""
Connects to the Oregon facility JSON endpoint and reimports all the
facilities
"""
response = requests.get("https://data.oregon.gov/resource/spxe-q5vj.json")
js = response.json()
# the data source uses WGS84 coords, so we have to transform them
gcoord = SpatialReference(4326)
mycoord = SpatialReference(3644)
trans = CoordTransform(gcoord, mycoord)
with transaction.atomic():
# wipe out the existing facilties
Facility.objects.all().delete()
for row in js:
try:
p = Point(float(row['location']['longitude']), float(row['location']['latitude']), srid=4326)
except KeyError:
continue
p.transform(trans)
f = Facility(
name=row['boating_facility_name'],
managed_by=row.get('managed_by', ''),
telephone=row.get('telephone', {}).get('phone_number', ''),
ramp_type=row.get('ramp_type_lanes', ''),
trailer_parking=row.get('trailer_parking', ''),
moorage=row.get('moorage', ''),
launch_fee=row.get('launch_fee', ''),
restroom=row.get('restroom', ''),
supplies=row.get('supplies', ''),
gas_on_water=row.get('gas_on_the_water', ''),
diesel_on_water=row.get('diesel_on_the_water', ''),
waterbody=row.get('waterbody', ''),
fish_cleaning=row.get('fish_cleaning_station', ''),
pumpout=row.get('pumpout', ''),
dump_station=row.get('dump_station', ''),
the_geom=p,
icon_url=row.get('boater_services', ''),
)
f.save()
class Facility(models.Model):
facility_id = models.AutoField(primary_key=True)
name = models.CharField(max_length=254, db_column="facilityna")
waterbody = models.CharField(max_length=254)
islake = models.IntegerField()
type = models.CharField(max_length=254)
telephone = models.CharField(max_length=254)
ramp_type = models.CharField(max_length=254, db_column="ramp_type_")
moorage = models.CharField(max_length=254)
trailer_parking = models.CharField(max_length=254, db_column="trailer_pa")
transient = models.CharField(max_length=254)
launch_fee = models.CharField(max_length=254)
restroom = models.CharField(max_length=254)
supplies = models.CharField(max_length=254)
gas_on_water = models.CharField(max_length=254, db_column="gas_on_the")
diesel_on_water = models.CharField(max_length=254, db_column="diesel_on")
fish_cleaning = models.CharField(max_length=254, db_column="fish_clean")
pumpout = models.CharField(max_length=254)
dump_station = models.CharField(max_length=254, db_column="dump_stati")
managed_by = models.CharField(max_length=254)
latitude = models.FloatField()
longitude = models.FloatField()
boater_ser = models.CharField(max_length=254)
icon_url = models.CharField(max_length=254)
the_geom = models.PointField(srid=3644)
objects = FacilityManager()
class Meta:
db_table = "facility"
|
[
"mdj2@pdx.edu"
] |
mdj2@pdx.edu
|
a292f0646f44750049a15d70ad355287e0aa934b
|
7a550d2268bc4bc7e2fec608ffb1db4b2e5e94a0
|
/0301-0400/0388-Longest Absolute File Path/0388-Longest Absolute File Path.py
|
a86a2ef91fb97202c7e1d7bd2e4cdf25e89d83c6
|
[
"MIT"
] |
permissive
|
jiadaizhao/LeetCode
|
be31bd0db50cc6835d9c9eff8e0175747098afc6
|
4ddea0a532fe7c5d053ffbd6870174ec99fc2d60
|
refs/heads/master
| 2021-11-05T04:38:47.252590
| 2021-10-31T09:54:53
| 2021-10-31T09:54:53
| 99,655,604
| 52
| 28
|
MIT
| 2020-10-02T12:47:47
| 2017-08-08T05:57:26
|
C++
|
UTF-8
|
Python
| false
| false
| 544
|
py
|
class Solution:
def lengthLongestPath(self, input: str) -> int:
lens = [0]
maxLen = 0
for line in input.splitlines():
name = line.lstrip('\t')
level = len(line) - len(name)
if '.' in name:
maxLen = max(maxLen, lens[level] + len(name))
else:
if level + 1 == len(lens):
lens.append(lens[-1] + 1 + len(name))
else:
lens[level + 1] = lens[level] + 1 + len(name)
return maxLen
|
[
"jiadaizhao@gmail.com"
] |
jiadaizhao@gmail.com
|
e18b0d3d437476da904df18390cea2ad2363d612
|
2b9397e9e26f7d97ce6983d36c9842ac773b70c6
|
/workforce/migrations/0009_auto_20181015_0646.py
|
c4953694528ecce12900a7fff2ae42803176183d
|
[] |
no_license
|
eakDev/aip-1
|
288ed7d7b8cf65c74b510f4f4e45292e3342796d
|
3db2520e3c246e25e2cfa62e395a3ba6ebe37252
|
refs/heads/main
| 2023-05-02T08:57:42.449727
| 2021-05-23T10:16:59
| 2021-05-23T10:16:59
| 386,578,482
| 1
| 0
| null | 2021-07-16T09:15:22
| 2021-07-16T09:15:22
| null |
UTF-8
|
Python
| false
| false
| 509
|
py
|
# Generated by Django 2.1.1 on 2018-10-15 06:46
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('workforce', '0008_projectsite'),
]
operations = [
migrations.AlterField(
model_name='employeeprofile',
name='project_site',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='site', to='workforce.ProjectSite'),
),
]
|
[
"clyde.khayad@gmail.com"
] |
clyde.khayad@gmail.com
|
0a545d6d5673a0f28df670d76f65a70863e87890
|
8c451e438739d741a127342e93727f3bac80b63e
|
/contributions/HARMONY 2021/test_gen_sedml.py
|
49fdc9d82e9772c2f6ac6a3f3baf4415b563de11
|
[] |
no_license
|
SED-ML/sedml-test-suite
|
a5d6c5858e81d615fa0ba7bcaa7d3af90ae55c47
|
853d8cdac8987bdf9b901936c3c8888455602212
|
refs/heads/master
| 2023-06-14T00:02:58.086947
| 2021-07-07T23:45:57
| 2021-07-07T23:45:57
| 47,284,156
| 0
| 2
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,424
|
py
|
r"""
####################################################################################################
tellurium 2.2.1
-+++++++++++++++++- Python Environment for Modeling and Simulating Biological Systems
.+++++++++++++++.
.+++++++++++++. Homepage: http://tellurium.analogmachine.org/
-//++++++++++++/. -:/-` Documentation: https://tellurium.readthedocs.io/en/latest/index.html
.----:+++++++/.++ .++++/ Forum: https://groups.google.com/forum/#!forum/tellurium-discuss
:+++++: .+:` .--++ Bug reports: https://github.com/sys-bio/tellurium/issues
-+++- ./+:-://. Repository: https://github.com/sys-bio/tellurium
.+. `...`
SED-ML simulation experiments: http://www.sed-ml.org/
# Change back to the original (with 'getName') when libsedml is fixed
sedmlDoc: L1V4
inputType: 'SEDML_STRING'
workingDir: 'C:\Users\Lucian\Desktop\tellurium'
saveOutputs: 'False'
outputDir: 'None'
plottingEngine: '<MatplotlibEngine>'
Windows-10-10.0.19041-SP0
python 3.8.3 (tags/v3.8.3:6f8c832, May 13 2020, 22:37:02) [MSC v.1924 64 bit (AMD64)]
####################################################################################################
"""
import tellurium as te
from roadrunner import Config
from tellurium.sedml.mathml import *
from tellurium.sedml.tesedml import process_trace, terminate_trace, fix_endpoints
import numpy as np
import matplotlib.pyplot as plt
import mpl_toolkits.mplot3d
try:
import libsedml
except ImportError:
import tesedml as libsedml
import pandas
import os.path
Config.LOADSBMLOPTIONS_RECOMPILE = True
workingDir = r'C:\Users\Lucian\Desktop\tellurium'
# --------------------------------------------------------
# Models
# --------------------------------------------------------
# Model <model0>
model0 = te.loadSBMLModel(os.path.join(workingDir, 'hill.xml'))
# --------------------------------------------------------
# Tasks
# --------------------------------------------------------
# Task <task0>
# not part of any DataGenerator: task0
# Task <task1>
task1 = []
# Task: <task0>
task0 = [None]
model0.setIntegrator('cvode')
if model0.conservedMoietyAnalysis == True: model0.conservedMoietyAnalysis = False
__range__uniform_linear_for_n = np.linspace(start=1.0, stop=15.0, num=26)
for __k__uniform_linear_for_n, __value__uniform_linear_for_n in enumerate(__range__uniform_linear_for_n):
model0.reset()
model0['n'] = __value__uniform_linear_for_n
model0.timeCourseSelections = ['n', 'time', '[S2]']
model0.reset()
task0[0] = model0.simulate(start=0.0, end=35.0, steps=30)
task1.extend(task0)
# --------------------------------------------------------
# DataGenerators
# --------------------------------------------------------
# DataGenerator <plot_0_0_0>
__var__task1_____time = np.column_stack([sim['time'] for sim in task1])
if len(__var__task1_____time.shape) == 1:
__var__task1_____time.shape += (1,)
plot_0_0_0 = __var__task1_____time
# DataGenerator <plot_0_0_1>
__var__task1_____n = np.column_stack([sim['n'] for sim in task1])
if len(__var__task1_____n.shape) == 1:
__var__task1_____n.shape += (1,)
plot_0_0_1 = __var__task1_____n
# DataGenerator <plot_0_0_2>
__var__task1_____S2 = np.column_stack([sim['[S2]'] for sim in task1])
if len(__var__task1_____S2.shape) == 1:
__var__task1_____S2.shape += (1,)
plot_0_0_2 = __var__task1_____S2
# --------------------------------------------------------
# Outputs
# --------------------------------------------------------
# Output <plot_0>
from mpl_toolkits.mplot3d import Axes3D
fig = plt.figure(num=None, figsize=(9, 5), dpi=80, facecolor='w', edgecolor='k')
from matplotlib import gridspec
__gs = gridspec.GridSpec(1, 2, width_ratios=[3, 1])
ax = plt.subplot(__gs[0])
ax.pcolormesh(plot_0_0_0, plot_0_0_1, plot_0_0_2, color='#1f77b4', linewidth=1.5, alpha=1.0, label='task1.S2', cmap='RdBu', shading='auto')
ax.set_title('UniformTimecourse', fontweight='bold')
ax.set_xlabel('task1.time', fontweight='bold')
ax.set_ylabel('task1.n', fontweight='bold')
plt.tick_params(axis='both', which='major', labelsize=10)
plt.tick_params(axis='both', which='minor', labelsize=8)
plt.savefig(os.path.join(workingDir, 'plot_0.png'), dpi=100)
plt.show()
####################################################################################################
|
[
"lpsmith@uw.edu"
] |
lpsmith@uw.edu
|
8a0848652a216c54c6483dd93481724a0b600cde
|
1d70bed8b3e7314cac8a1b5cb8e20a98924d0746
|
/gdp and stock predicton/modules.py
|
6d2685b1da0a102bf5bcb75c678ec0dfd2a0d57a
|
[] |
no_license
|
bateikoEd/dipl_program
|
02d46f2342d2814ed58181f38f9a781effeedd05
|
0b885c436cda096c80fe2b445337dc7e0bf16ba0
|
refs/heads/main
| 2023-07-24T05:42:05.509338
| 2021-09-06T06:36:18
| 2021-09-06T06:36:18
| 344,238,800
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,647
|
py
|
import pandas as pd
from sklearn.model_selection import StratifiedKFold, KFold
from sklearn.model_selection import cross_val_score
import numpy as np
from sklearn.metrics import r2_score
# from sklearn.metrics import mean_absolute_percentage_error
from statsmodels.stats.stattools import durbin_watson
from sklearn.metrics import explained_variance_score
def barplot(data, title):
# fig = plt.figure(figsize=(18,6))
bar_plot = sns.barplot(x=data['feature'], y=data['value'])
for item in bar_plot.get_xticklabels():
item.set_rotation(90)
plt.title(title)
plt.show()
def get_score_for_model(models, X_train, y_train, scoring, n_splits=3,print_res=True):
def append_res_to_boxplot():
i = 0
df = pd.DataFrame()
while i < len(results[0]):
line = [[num[i], ml] for num, ml in zip(results, names)]
# for num, ml in zip(results, names):
# line.append([num[i],ml])
i = i + 1
df = df.append(pd.DataFrame(line, columns=[scoring, 'ML']), ignore_index=True)
return df
seed = 13
results = []
means = []
sdv = []
names = []
scoring = scoring
for name, model in models:
strat = KFold(n_splits=n_splits, random_state=seed, shuffle=True)
cv_results = cross_val_score(model, X_train, y_train, cv=strat, scoring=scoring, n_jobs=-1)
results.append(cv_results)
names.append(name)
means.append(cv_results.mean())
sdv.append(cv_results.std())
if print_res:
print(f"{names[-1]}: {means[-1]} ({sdv[-1]})")
box_plot = append_res_to_boxplot()
df_means = pd.DataFrame({'ML': names, 'means': means, 'std': sdv})
return box_plot, df_means
def define_metrics(model, X_train_, X_test_, y_train, y_test, name):
pred_train_ = np.array(model.predict(X_train_))
pred_test_ = np.array(model.predict(X_test_))
y_train_ = np.array(y_train)
y_test_ = np.array(y_test)
metric_train = pd.DataFrame()
metric_train['name'] = [name + '_train']
metric_train['r2'] = [r2_score(y_train, pred_train_)]
metric_train['sum_squared_resid'] = np.sum((y_train_ - pred_train_)**2)
metric_train['MAPE'] = [np.mean(np.abs((y_train - pred_train_) / y_train)) * 100]
metric_train['RMSE'] = [np.sqrt(np.mean((y_train - pred_train_)**2))]
metric_train['durbin_watson'] = [durbin_watson(y_train - pred_train_)]
metric_train['theil_index'] = [np.sqrt((1/len(pred_train_))*np.sum((y_train_-pred_train_)**2))
/ (np.sqrt((1/len(y_train_))*np.sum(y_train_**2)) + np.sqrt((1/len(pred_train_))*np.sum(pred_train_**2)))]
metric_train['ex_var'] = [explained_variance_score(y_train, pred_train_)]
metric_test = pd.DataFrame()
metric_test['name'] = [name + '_test']
metric_test['r2'] = [r2_score(y_test, pred_test_)]
metric_test['sum_squared_resid'] = np.sum((y_test_ - pred_test_)**2)
metric_test['MAPE'] = [np.mean(np.abs((y_test - pred_test_) / y_test)) * 100]
metric_test['RMSE'] = [np.sqrt(np.mean((y_test - pred_test_) ** 2))]
metric_test['durbin_watson'] = [durbin_watson(y_test - pred_test_)]
metric_test['theil_index'] = [np.sqrt((1/len(pred_test_))*np.sum((y_test_-pred_test_)**2))
/ (np.sqrt((1/len(y_test_))*np.sum(y_test_**2)) + np.sqrt((1/len(pred_test_))*np.sum(pred_test_**2)))]
metric_test['ex_var'] = [explained_variance_score(y_test, pred_test_)]
return metric_train.append(metric_test)
if __name__ == '__main__':
pass
|
[
"bateiko0713@gmail.com"
] |
bateiko0713@gmail.com
|
8d3a150e92b97edc73a1af8bcfa9566c2296219c
|
23611933f0faba84fc82a1bc0a85d97cf45aba99
|
/google-cloud-sdk/.install/.backup/lib/surface/pubsub/subscriptions/seek.py
|
718094747211caab81d5b553f97be853d2cb982b
|
[
"LicenseRef-scancode-unknown-license-reference",
"Apache-2.0"
] |
permissive
|
KaranToor/MA450
|
1f112d1caccebdc04702a77d5a6cee867c15f75c
|
c98b58aeb0994e011df960163541e9379ae7ea06
|
refs/heads/master
| 2021-06-21T06:17:42.585908
| 2020-12-24T00:36:28
| 2020-12-24T00:36:28
| 79,285,433
| 1
| 1
|
Apache-2.0
| 2020-12-24T00:38:09
| 2017-01-18T00:05:44
|
Python
|
UTF-8
|
Python
| false
| false
| 3,886
|
py
|
# Copyright 2016 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Cloud Pub/Sub subscriptions seek command."""
from googlecloudsdk.calliope import arg_parsers
from googlecloudsdk.calliope import base
from googlecloudsdk.command_lib.projects import util as projects_util
from googlecloudsdk.command_lib.pubsub import util
@base.ReleaseTracks(base.ReleaseTrack.ALPHA)
class SeekAlpha(base.Command):
"""This feature is part of an invite-only release of the Cloud Pub/Sub API.
Resets a subscription's backlog to a point in time or to a given snapshot.
This feature is part of an invitation-only release of the underlying
Cloud Pub/Sub API. The command will generate errors unless you have access to
this API. This restriction should be relaxed in the near future. Please
contact cloud-pubsub@google.com with any questions in the meantime.
"""
@staticmethod
def Args(parser):
"""Registers flags for this command."""
parser.add_argument('subscription',
help='Name of the subscription to affect.')
seek_to_parser = parser.add_mutually_exclusive_group(required=True)
seek_to_parser.add_argument(
'--time', type=arg_parsers.Datetime.Parse,
help=('The time to seek to. Messages in the subscription that '
'were published before this time are marked as acknowledged, and '
'messages retained in the subscription that were published after '
'this time are marked as unacknowledged. See `gcloud topic '
'datetimes` for information on time formats.'))
seek_to_parser.add_argument(
'--snapshot',
help=('The name of the snapshot. The snapshot\'s topic must be the same'
' as that of the subscription.'))
parser.add_argument(
'--snapshot-project', default='',
help=('The name of the project the snapshot belongs to (if seeking to '
'a snapshot). If not set, it defaults to the currently selected '
'cloud project.'))
def Collection(self):
return util.SUBSCRIPTIONS_SEEK_COLLECTION
def Run(self, args):
"""This is what gets called when the user runs this command.
Args:
args: an argparse namespace. All the arguments that were provided to this
command invocation.
Returns:
A serialized object (dict) describing the results of the operation. This
description fits the Resource described in the ResourceRegistry under
'pubsub.subscriptions.seek'.
"""
msgs = self.context['pubsub_msgs']
pubsub = self.context['pubsub']
subscription_path = util.SubscriptionFormat(args.subscription)
result = {'subscriptionId': subscription_path}
seek_req = msgs.SeekRequest()
if args.snapshot:
if args.snapshot_project:
snapshot_project = (
projects_util.ParseProject(args.snapshot_project).Name())
else:
snapshot_project = ''
seek_req.snapshot = util.SnapshotFormat(args.snapshot, snapshot_project)
result['snapshotId'] = seek_req.snapshot
else:
seek_req.time = args.time.strftime('%Y-%m-%dT%H:%M:%S.%fZ')
result['time'] = seek_req.time
pubsub.projects_subscriptions.Seek(
msgs.PubsubProjectsSubscriptionsSeekRequest(
seekRequest=seek_req, subscription=subscription_path))
return result
|
[
"toork@uw.edu"
] |
toork@uw.edu
|
5a57d709e68b57343a2f490cf6f527c2a7bb2503
|
e18c84358b2a80159b37dcea39debfbbdaa66395
|
/backend/api/views/image_c.py
|
0dcc38ac99342faa71280bd72d3802a93a490817
|
[
"MIT"
] |
permissive
|
chunyenHuang/Disfactory
|
49d404609b73783ac488be9430d9cf518fc19f64
|
52985f7aadc8ca56344f80000b5e943bea99f83d
|
refs/heads/master
| 2021-01-03T01:54:40.415165
| 2020-01-22T04:09:29
| 2020-01-22T04:09:29
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 885
|
py
|
from django.conf import settings
from django.http import HttpResponse, JsonResponse
import django_q.tasks
from rest_framework.decorators import api_view
from ..models import Image
from .utils import (
_is_image,
_get_image_original_date,
)
@api_view(['POST'])
def post_image(request):
f_image = request.FILES['image']
if _is_image(f_image):
f_image.seek(0)
image_original_date = _get_image_original_date(f_image)
kwargs = {
'image_path': '',
'orig_time': image_original_date,
}
img = Image.objects.create(**kwargs)
f_image.seek(0)
django_q.tasks.async_task('api.tasks.upload_image', f_image.read(), settings.IMGUR_CLIENT_ID, img.id)
return JsonResponse({"token": img.id})
return HttpResponse(
"The uploaded file cannot be parsed to Image",
status=400,
)
|
[
"stegben.benjamin@gmail.com"
] |
stegben.benjamin@gmail.com
|
a788ecad5cc912d6405ede696a2f16263c295b76
|
8126d1bc2afe0925a24fce039d0f02a3bd7acbae
|
/pytraj/c_action/__init__.py
|
de635542285540646e5470bb9b3a11a2de034598
|
[
"BSD-2-Clause"
] |
permissive
|
rafwiewiora/pytraj
|
54fb6fe07a754f65b865dd161f64c7af15fc3926
|
91a019ea406081ccf0043170cc64c48b4a5ea04a
|
refs/heads/master
| 2021-01-20T17:33:05.974254
| 2016-03-11T21:25:32
| 2016-03-11T21:25:32
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 229
|
py
|
""""""
from __future__ import absolute_import
from . import c_action
actionlist = []
for act in c_action.__dict__.keys():
if 'Action' in act:
actionlist.append(act)
__all__ = actionlist
__doc__ = "\n".join(__all__)
|
[
"hainm.comp@gmail.com"
] |
hainm.comp@gmail.com
|
cfa8945289850ff63e497fcc908de2732efb4faf
|
bb33e6be8316f35decbb2b81badf2b6dcf7df515
|
/source/res/scripts/client/gui/Scaleform/daapi/view/battle/event/hunter_respawn.py
|
d81a741d398ce19a72f4ca18421e45b81afc015c
|
[] |
no_license
|
StranikS-Scan/WorldOfTanks-Decompiled
|
999c9567de38c32c760ab72c21c00ea7bc20990c
|
d2fe9c195825ececc728e87a02983908b7ea9199
|
refs/heads/1.18
| 2023-08-25T17:39:27.718097
| 2022-09-22T06:49:44
| 2022-09-22T06:49:44
| 148,696,315
| 103
| 39
| null | 2022-09-14T17:50:03
| 2018-09-13T20:49:11
|
Python
|
UTF-8
|
Python
| false
| false
| 1,419
|
py
|
# Python bytecode 2.7 (decompiled from Python 2.7)
# Embedded file name: scripts/client/gui/Scaleform/daapi/view/battle/event/hunter_respawn.py
import BigWorld
from gui.Scaleform.daapi.view.battle.event.boss_teleport import EventBossTeleportView
from gui.Scaleform.daapi.view.meta.EventHunterRespawnViewMeta import EventHunterRespawnViewMeta
from gui.wt_event.wt_event_helpers import getSpeed
from gui.impl import backport
from gui.impl.gen import R
from gui.shared.gui_items.Vehicle import getIconResourceName
class EventHunterRespawnView(EventBossTeleportView, EventHunterRespawnViewMeta):
def onRespawnPointClick(self, pointGuid):
self._chooseSpawnPoint(pointGuid)
def showSpawnPoints(self):
self._blur.enable()
timeLeft = 0
timeTotal = 0
respawnComponent = BigWorld.player().dynamicComponents.get('respawnComponent')
if respawnComponent:
timeLeft = respawnComponent.endTime - BigWorld.serverTime()
timeTotal = respawnComponent.duration
self.as_updateTimerS(timeLeft, timeTotal, replaySpeed=getSpeed())
vTypeVO = self._sessionProvider.getCtx().getVehicleInfo(BigWorld.player().playerVehicleID).vehicleType
iconName = getIconResourceName(vTypeVO.iconName)
icon = R.images.gui.maps.icons.wtevent.hunterRespawn.dyn(iconName)
if icon.exists():
self.as_setIconS(backport.image(icon()))
|
[
"StranikS_Scan@mail.ru"
] |
StranikS_Scan@mail.ru
|
182d2c133c867b48df3b915ff9cc056dcdba61d5
|
f03e50ab105c8dd97bda374fa2d604d480b85fb3
|
/apps/projects/models.py
|
ca45800eb9e00a783cdea3dae4a0abef2f2c4541
|
[] |
no_license
|
callowayproject/callowaysite
|
9717b7d934ef142b5e6b8fa1e0c93651382198bb
|
eb25d208586a7dc9ffb88660b07ad942ba9fe231
|
refs/heads/master
| 2022-12-15T11:38:57.787801
| 2019-07-14T13:21:13
| 2019-07-14T13:21:13
| 730,944
| 1
| 0
| null | 2022-11-22T00:40:56
| 2010-06-20T19:50:00
|
CSS
|
UTF-8
|
Python
| false
| false
| 1,511
|
py
|
import datetime
from django.db import models
from django.core.files.images import get_image_dimensions
from projects.settings import LOGO_STORAGE, PROJECT_TYPES, STATUSES
class Project(models.Model):
"""Something that we work on"""
name = models.CharField(blank=True, max_length=255)
description = models.TextField(blank=True)
code_url = models.CharField(blank=True, max_length=255)
docs_url = models.CharField(blank=True, max_length=255)
logo = models.FileField(blank=True, upload_to='projects/logos', storage=LOGO_STORAGE())
logo_width = models.IntegerField(editable=False, blank=True, null=True)
logo_height = models.IntegerField(editable=False, blank=True, null=True)
is_fork = models.BooleanField(default=False)
why_forked = models.TextField(blank=True, null=True)
external_id = models.IntegerField(blank=True, null=True)
project_type = models.IntegerField(choices=PROJECT_TYPES, default=2)
status = models.IntegerField(choices=STATUSES, default=0)
updated = models.DateTimeField(editable=False, default=datetime.datetime.now)
class Meta:
ordering = ('name', )
def __unicode__(self):
return self.name
def save(self, *args, **kwargs):
if self.logo:
width, height = get_image_dimensions(self.logo.file, close=True)
else:
width, height = None, None
self.key_image_width = width
self.key_image_height = height
super(Project, self).save(*args, **kwargs)
|
[
"coreyoordt@gmail.com"
] |
coreyoordt@gmail.com
|
3f0334e74a172b28d97ef4fe5641f86b7070ca66
|
9426f2e4f25c85c351a4d1b8855fe7d4cfd35210
|
/fardel_ecommerce/order/models.py
|
43289b3f939406b8fb0777a2648784f5577f747c
|
[] |
no_license
|
FardelCMS/fardel_ecommerce
|
52e4eaebb243c863f0dd6af22be093f4c90af8cd
|
d4221a7f4f7812d3e491234fc4cca6b828665ae3
|
refs/heads/master
| 2021-08-01T01:52:22.809056
| 2021-07-29T09:58:11
| 2021-07-29T09:58:11
| 229,290,203
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,175
|
py
|
import datetime
from ..checkout.models import Cart, CartLine
from sqlalchemy import func
from sqlalchemy.dialects.postgresql import JSONB, UUID
from flask_sqlalchemy import BaseQuery
from flask_jwt_extended import current_user
from fardel_ecommerce.product.models import ProductVariant
from fardel.ext import db
class Order(db.Model):
__tablename__ = "orders"
"""
Status Types:
:Fulfiled:
:Unfulfiled:
:Canceled:
:Done:
"""
id = db.Column(db.Integer, primary_key=True, index=True)
status = db.Column(db.String(64), default="Unfulfiled")
user_id = db.Column(db.Integer, db.ForeignKey('auth_users.id'))
address_id = db.Column(db.Integer, db.ForeignKey('auth_users_address.id'))
create_time = db.Column(db.TIMESTAMP, default=func.current_timestamp())
total = db.Column(db.Integer, default=0)
quantity = db.Column(db.Integer, default=0)
data = db.Column(JSONB())
user = db.relationship("User")
address = db.relationship("UserAddress")
lines = db.relationship("OrderLine")
@staticmethod
def create_from_cart(cart_id, address_id):
cart = Cart.query.filter_by(token=cart_id).first()
if current_user.id == cart.user_id:
order = Order(
user_id=cart.user_id,
total=cart.total,
quantity=cart.quantity,
address_id=address_id,
data=cart.checkout_data
)
db.session.add(order)
db.session.commit()
for line in cart.lines:
order_line = OrderLine(
order_id=order.id,
variant_id=line.variant_id,
quantity=line.quantity,
total=line.get_total(),
data=line.data
)
db.session.add(order_line)
cart.clear()
db.session.flush()
return order
else:
return None
@property
def is_shipping_required(self):
"""Return `True` if any of the lines requires shipping."""
if not hasattr(self, '_is_shipping_required'):
self._is_shipping_required = False
for line in self.lines:
if line.variant.is_shipping_required:
self._is_shipping_required = True
break
return self._is_shipping_required
def delete_line(self, variant_id, data):
""" Delete a line with specified variant_id+data """
line = self.get_line(variant_id, data)
line.delete()
def set_fulfiled(self):
for line in self.lines:
line.variant.quantity_allocated = ProductVariant.quantity_allocated + line.quantity
self.status = "Fulfiled"
db.session.flush()
def dict(self):
""" Serialize object to json """
return {
'id': self.id,
'status': self.status,
'address': self.address.dict(),
'total': self.total,
'quantity': self.quantity,
'lines': [line.dict() for line in self.lines],
'is_shipping_required': self.is_shipping_required,
}
class OrderLine(db.Model):
__tablename__ = "order_lines"
id = db.Column(db.Integer, primary_key=True, index=True)
order_id = db.Column(db.ForeignKey('orders.id'))
variant_id = db.Column(db.Integer,
db.ForeignKey('product_product_variants.id', ondelete="CASCADE"))
total = db.Column(db.Integer)
quantity = db.Column(db.Integer)
data = db.Column(JSONB(), default={})
variant = db.relationship("ProductVariant")
order = db.relationship("Order", overlaps="lines")
def dict(self):
return {
'id': self.id,
'variant': self.variant.dict(cart=True),
'quantity': self.quantity,
'data': self.data,
'total': self.total,
'quantity': self.quantity,
'is_shipping_required': self.is_shipping_required
}
@property
def is_shipping_required(self):
return self.variant.is_shipping_required
|
[
"s.hamzelooy@gmail.com"
] |
s.hamzelooy@gmail.com
|
1f65100839d9ff8b15648173db4bdc566eb7e7b4
|
439e3b0fcc8959483bc35ff9c1229ce240037bbe
|
/tests/test_kanwa.py
|
db033372d8214dbf5ebd4d6f1563242af952d467
|
[
"MIT"
] |
permissive
|
403JFW/kakasi-utils
|
16fe27265f1b7f05045e4370cf19de080c649e8f
|
698b6fc8d812637473dc941b36d9ccff87410d0c
|
refs/heads/master
| 2021-01-02T09:54:13.425825
| 2014-04-15T00:55:51
| 2014-04-15T00:55:51
| 17,693,614
| 3
| 0
| null | 2014-05-12T06:39:36
| 2014-03-13T03:37:09
|
Python
|
UTF-8
|
Python
| false
| false
| 1,754
|
py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import codecs
import os
import unittest
from kakasi_utils.kanwa import Kanwa
class TestKanwa(unittest.TestCase):
def test_merge(self):
"""Test merge"""
# Get dict file paths
data_dir = os.path.dirname(os.path.realpath(__file__)) + '/'
in_files = [
data_dir + "test_kanwa_input_a.txt",
data_dir + "test_kanwa_input_b.txt"
]
out_file = data_dir + "test_kanwa_output.txt"
# Run merge
kanwa = Kanwa()
kanwa.merge(in_files, out_file)
# Assert result
for in_file in in_files:
self._assert_dict_in_dict(in_file, out_file)
# Check duplication
self._load_dict(out_file, check_duplication=True)
os.remove(out_file)
def _assert_dict_in_dict(self, file_child, file_parent):
"""Assert that child dict files item in parent dict file"""
dict_child = self._load_dict(file_child)
dict_parent = self._load_dict(file_parent)
for item in dict_child.keys():
if item not in dict_parent:
raise AssertionError("'%s' not exists in %s" % (
item, dict_parent))
def _load_dict(self, in_dict_file, check_duplication=False):
"""Load KAKASI dict file and return python dict"""
table = {}
with codecs.open(in_dict_file, 'rU', 'euc_jp') as in_file:
for line in in_file:
line = line.rstrip()
if line[0:2] == ';;':
continue
if check_duplication and (line in table):
raise AssertionError("'%s' duplicates" % line)
table[line] = True
return table
|
[
"miyazaki.dev@gmail.com"
] |
miyazaki.dev@gmail.com
|
c6ce9e4a4ce2934670386105b410efd371bb56c3
|
87140007e96872d3611f0778eb0eebe5799616d7
|
/runs/1000KB/src2-tgt1/seq-nobro-iter08000.cfg.py
|
d889d92141bd83f110aee7e52fd487b910171abe
|
[
"MIT"
] |
permissive
|
janpawellek/broeval
|
49499fa302abff916ffced201034d3b9394503cd
|
57e31aa6e354d0bba88103b44910483e8d982d00
|
refs/heads/master
| 2021-01-11T12:19:13.619220
| 2016-12-20T16:23:27
| 2016-12-20T16:23:27
| 76,468,134
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 663
|
py
|
# Write results to this file
OUTFILE = 'runs/1000KB/src2-tgt1/seq-nobro-iter08000.result.csv'
# Source computers for the requests
SOURCE = ['10.0.0.1', '10.0.0.3']
# Should Bro be enabled on the source machines?
SOURCE_BRO = [False, False]
# Target machines for the requests (aka server)
TARGET = ['10.0.0.2']
# Should Bro be enabled on the target machines?
TARGET_BRO = [False]
# Connection mode (par = parallel, seq = sequential)
MODE = 'seq'
# Number of evaluation repetitions to run
EPOCHS = 100
# Number of iterations to be run in each evaluation repetition
ITER = 8000
# Size of the file to be downloaded from target (in Bytes * 10^SIZE)
SIZE = 6
|
[
"pawellek@stud.uni-heidelberg.de"
] |
pawellek@stud.uni-heidelberg.de
|
afd9ffeb1bc993d9503161429c26f6b38e550db9
|
3dbbde1aa96fc09e9aab885cf3713e86f3572dec
|
/gs-vtoi/bin/glacier
|
6e1a948d2c394d1ca44797d1d7fd32027f7bc0eb
|
[] |
no_license
|
bopopescu/gs-vtoi
|
6223d6dbf47e89292bd0e79e24e5664450e28cf6
|
f12b802976d0020179d1b40b0b5e3af5b72d55cc
|
refs/heads/master
| 2022-11-24T16:31:36.804869
| 2018-07-31T08:30:56
| 2018-07-31T08:30:56
| 282,551,982
| 0
| 0
| null | 2020-07-26T01:09:10
| 2020-07-26T01:09:09
| null |
UTF-8
|
Python
| false
| false
| 5,288
|
#!/Users/Sang/OneDrive/Developments/gs-vtoi/gs-vtoi/bin/python
# -*- coding: utf-8 -*-
# Copyright (c) 2012 Miguel Olivares http://moliware.com/
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish, dis-
# tribute, sublicense, and/or sell copies of the Software, and to permit
# persons to whom the Software is furnished to do so, subject to the fol-
# lowing conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
#
"""
glacier
~~~~~~~
Amazon Glacier tool built on top of boto. Look at the usage method to see
how to use it.
Author: Miguel Olivares <miguel@moliware.com>
"""
import sys
from boto.glacier import connect_to_region
from getopt import getopt, GetoptError
from os.path import isfile, basename
COMMANDS = ('vaults', 'jobs', 'upload')
def usage():
print("""
glacier <command> [args]
Commands
vaults - Operations with vaults
jobs - Operations with jobs
upload - Upload files to a vault. If the vault doesn't exits, it is
created
Common args:
--access_key - Your AWS Access Key ID. If not supplied, boto will
use the value of the environment variable
AWS_ACCESS_KEY_ID
--secret_key - Your AWS Secret Access Key. If not supplied, boto
will use the value of the environment variable
AWS_SECRET_ACCESS_KEY
--region - AWS region to use. Possible values: us-east-1, us-west-1,
us-west-2, ap-northeast-1, eu-west-1.
Default: us-east-1
Vaults operations:
List vaults:
glacier vaults
Jobs operations:
List jobs:
glacier jobs <vault name>
Uploading files:
glacier upload <vault name> <files>
Examples :
glacier upload pics *.jpg
glacier upload pics a.jpg b.jpg
""")
sys.exit()
def connect(region, debug_level=0, access_key=None, secret_key=None):
""" Connect to a specific region """
layer2 = connect_to_region(region,
aws_access_key_id=access_key,
aws_secret_access_key=secret_key,
debug=debug_level)
if layer2 is None:
print('Invalid region (%s)' % region)
sys.exit(1)
return layer2
def list_vaults(region, access_key=None, secret_key=None):
layer2 = connect(region, access_key = access_key, secret_key = secret_key)
for vault in layer2.list_vaults():
print(vault.arn)
def list_jobs(vault_name, region, access_key=None, secret_key=None):
layer2 = connect(region, access_key = access_key, secret_key = secret_key)
print(layer2.layer1.list_jobs(vault_name))
def upload_files(vault_name, filenames, region, access_key=None, secret_key=None):
layer2 = connect(region, access_key = access_key, secret_key = secret_key)
layer2.create_vault(vault_name)
glacier_vault = layer2.get_vault(vault_name)
for filename in filenames:
if isfile(filename):
sys.stdout.write('Uploading %s to %s...' % (filename, vault_name))
sys.stdout.flush()
archive_id = glacier_vault.upload_archive(
filename,
description = basename(filename))
print(' done. Vault returned ArchiveID %s' % archive_id)
def main():
if len(sys.argv) < 2:
usage()
command = sys.argv[1]
if command not in COMMANDS:
usage()
argv = sys.argv[2:]
options = 'a:s:r:'
long_options = ['access_key=', 'secret_key=', 'region=']
try:
opts, args = getopt(argv, options, long_options)
except GetoptError as e:
usage()
# Parse agument
access_key = secret_key = None
region = 'us-east-1'
for option, value in opts:
if option in ('-a', '--access_key'):
access_key = value
elif option in ('-s', '--secret_key'):
secret_key = value
elif option in ('-r', '--region'):
region = value
# handle each command
if command == 'vaults':
list_vaults(region, access_key, secret_key)
elif command == 'jobs':
if len(args) != 1:
usage()
list_jobs(args[0], region, access_key, secret_key)
elif command == 'upload':
if len(args) < 2:
usage()
upload_files(args[0], args[1:], region, access_key, secret_key)
if __name__ == '__main__':
main()
|
[
"sy0414@gmail.com"
] |
sy0414@gmail.com
|
|
82fd235db118646fc86003f4b9b8c9456cea7a02
|
c5758c1f4c880f4530df1a5ffb4c30ee2da445ee
|
/pytracking/vot_ep/sk3x3_meanmax_adaptive/vot_wrapper_sk3x3_meanmax_adaptive_ep0024.py
|
23ccb4ec2fc0650043d494e0655d884737572b61
|
[] |
no_license
|
bfjei2825401/d3s
|
6d662fc301181a0e3ad831b0db6111e3cf8f4097
|
32140a3c67252f0e98cbfbf6ad6d2a79267c221b
|
refs/heads/master
| 2023-02-27T09:57:25.692878
| 2021-01-27T14:20:57
| 2021-01-27T14:20:57
| 297,217,521
| 0
| 0
| null | 2020-09-21T03:23:09
| 2020-09-21T03:23:09
| null |
UTF-8
|
Python
| false
| false
| 2,523
|
py
|
import pytracking.vot as vot
import sys
import cv2
import os
os.environ["CUDA_DEVICE_ORDER"] = "PCI_BUS_ID"
os.environ["CUDA_VISIBLE_DEVICES"] = "0"
from pytracking.tracker.segm_sk3x3_meanmax_adaptive import SegmSK3x3MeanMaxAdaptive
from pytracking.parameter.segm_sk3x3_meanmax_adaptive import default_params_ep as vot_params
def rect_to_poly(rect):
x0 = rect[0]
y0 = rect[1]
x1 = rect[0] + rect[2]
y1 = rect[1]
x2 = rect[0] + rect[2]
y2 = rect[1] + rect[3]
x3 = rect[0]
y3 = rect[1] + rect[3]
return [x0, y0, x1, y1, x2, y2, x3, y3]
def parse_sequence_name(image_path):
idx = image_path.find('/color/')
return image_path[idx - image_path[:idx][::-1].find('/'):idx], idx
def parse_frame_name(image_path, idx):
frame_name = image_path[idx + len('/color/'):]
return frame_name[:frame_name.find('.')]
# MAIN
handle = vot.VOT("polygon")
selection = handle.region()
imagefile = handle.frame()
if not imagefile:
sys.exit(0)
params = vot_params.parameters(24)
gt_rect = [round(selection.points[0].x, 2), round(selection.points[0].y, 2),
round(selection.points[1].x, 2), round(selection.points[1].y, 2),
round(selection.points[2].x, 2), round(selection.points[2].y, 2),
round(selection.points[3].x, 2), round(selection.points[3].y, 2)]
image = cv2.cvtColor(cv2.imread(imagefile), cv2.COLOR_BGR2RGB)
sequence_name, idx_ = parse_sequence_name(imagefile)
frame_name = parse_frame_name(imagefile, idx_)
params.masks_save_path = ''
params.save_mask = False
tracker = SegmSK3x3MeanMaxAdaptive(params)
# tell the sequence name to the tracker (to save segmentation masks to the disk)
tracker.sequence_name = sequence_name
tracker.frame_name = frame_name
tracker.initialize(image, gt_rect)
while True:
imagefile = handle.frame()
if not imagefile:
break
image = cv2.cvtColor(cv2.imread(imagefile), cv2.COLOR_BGR2RGB)
# tell the frame name to the tracker (to save segmentation masks to the disk)
frame_name = parse_frame_name(imagefile, idx_)
tracker.frame_name = frame_name
prediction = tracker.track(image)
if len(prediction) == 4:
prediction = rect_to_poly(prediction)
pred_poly = vot.Polygon([vot.Point(prediction[0], prediction[1]),
vot.Point(prediction[2], prediction[3]),
vot.Point(prediction[4], prediction[5]),
vot.Point(prediction[6], prediction[7])])
handle.report(pred_poly)
|
[
"752958525@qq.com"
] |
752958525@qq.com
|
66eb41b497be5f43356f205ce49307cd2e618a2e
|
63ba933a294865f65409635f62e0f1d59f725f37
|
/src/linkedLists/flatten.py
|
4b148dc9198652a6473dde5fda7746be4087ad87
|
[
"CC0-1.0"
] |
permissive
|
way2arun/datastructures_algorithms
|
fc4302bdbb923ef8912a4acf75a286f2b695de2a
|
4ea4c1579c28308455be4dfa02bd45ebd88b2d0a
|
refs/heads/master
| 2021-12-07T04:34:35.732026
| 2021-09-30T12:11:32
| 2021-09-30T12:11:32
| 203,658,808
| 1
| 0
| null | 2020-08-08T15:55:09
| 2019-08-21T20:23:46
|
Python
|
UTF-8
|
Python
| false
| false
| 3,212
|
py
|
"""
https://leetcode.com/explore/challenge/card/july-leetcoding-challenge/545/week-2-july-8th-july-14th/3386/
You are given a doubly linked list which in addition to the next and previous pointers, it could have a child pointer, which may or may not point to a separate doubly linked list. These child lists may have one or more children of their own, and so on, to produce a multilevel data structure, as shown in the example below.
Flatten the list so that all the nodes appear in a single-level, doubly linked list. You are given the head of the first level of the list.
Example 1:
Input: head = [1,2,3,4,5,6,null,null,null,7,8,9,10,null,null,11,12]
Output: [1,2,3,7,8,11,12,9,10,4,5,6]
Explanation:
The multilevel linked list in the input is as follows:
After flattening the multilevel linked list it becomes:
Example 2:
Input: head = [1,2,null,3]
Output: [1,3,2]
Explanation:
The input multilevel linked list is as follows:
1---2---NULL
|
3---NULL
Example 3:
Input: head = []
Output: []
How multilevel linked list is represented in test case:
We use the multilevel linked list from Example 1 above:
1---2---3---4---5---6--NULL
|
7---8---9---10--NULL
|
11--12--NULL
The serialization of each level is as follows:
[1,2,3,4,5,6,null]
[7,8,9,10,null]
[11,12,null]
To serialize all levels together we will add nulls in each level to signify no node connects to the upper node of the previous level. The serialization becomes:
[1,2,3,4,5,6,null]
[null,null,7,8,9,10,null]
[null,11,12,null]
Merging the serialization of each level and removing trailing nulls we obtain:
[1,2,3,4,5,6,null,null,null,7,8,9,10,null,null,11,12]
Constraints:
Number of Nodes will not exceed 1000.
1 <= Node.val <= 10^5
"""
# Definition for a Node.
class Node:
def __init__(self, val, prev, next, child):
self.val = val
self.prev = prev
self.next = next
self.child = child
class Solution:
def flatten(self, head: 'Node') -> 'Node':
# Solution 1 - 36 ms
"""
if not head:
return head
order = []
stack = [head]
while stack:
curr = stack.pop()
order.append(curr)
if curr.next:
stack.append(curr.next)
if curr.child:
stack.append(curr.child)
curr.child = None
for i in range(len(order) - 1):
order[i].next = order[i + 1]
order[i + 1].prev = order[i]
return order[0]
"""
# Solution 2
pointer = head
branches = []
while pointer:
if pointer.child:
if pointer.next: branches.append(pointer.next)
pointer.next = pointer.child
pointer.child = None
pointer.next.prev = pointer
elif not pointer.next and len(branches) > 0:
pointer.next = branches.pop()
pointer.next.prev = pointer
pointer = pointer.next
return head
# Main Call
root_node = Node(1,2,"null",3)
print(root_node)
head = [1,2,None,3]
solution = Solution()
print(solution.flatten(root_node))
|
[
"way2aru@yahoo.com"
] |
way2aru@yahoo.com
|
03c9b8c1400c21f8f1f1f697eace517cba3fabce
|
f0b75bd94f133a13f469f429a696f26be3be9862
|
/week 2/.history/python_second_assignment_20200204154901.py
|
ca2a7780d18decaa9aca7b5410cab8eda6e90bd4
|
[] |
no_license
|
dechavez4/Python_handin_assignments
|
023350fabd212cdf2a4ee9cd301306dc5fd6bea0
|
82fd8c991e560c18ecb2152ea5a8fc35dfc3c608
|
refs/heads/master
| 2023-01-11T23:31:27.220757
| 2020-05-22T10:33:56
| 2020-05-22T10:33:56
| 237,179,899
| 0
| 0
| null | 2022-12-30T20:14:04
| 2020-01-30T09:30:16
|
Python
|
UTF-8
|
Python
| false
| false
| 1,447
|
py
|
# Exercise 1
# Create a python file with 3 functions:
# A. def print_file_content(file) that can print content of a csv file to the console
import csv
from sys import argv
import platform
filename = argv[1]
def print_file_content(file):
with open(filename) as csv_file:
content = csv_file.readlines()
for line in content[:20]:
print(line.strip().split(','))
# kan overskrive den gamle file.
# B. def write_list_to_file(output_file, lst) that can take a list of tuple and write each element to a new line in file
def write_list_to_file(output_file, *lst):
if platform.system() == 'Windows':
newline=''
else:
newline=None
with open (output_file, 'w', newline=newline) as output_file:
output_writer = csv.writer(output_file)
for ele in lst:
output_writer.writerow(ele)
# C. def read_csv(input_file) that take a csv file and read each row into a list
def read_line(file):
with open(file) as file_object:
lines = file_object.readlines()
print(lines)
for line in lines:
print(line.rstrip())
def main():
if argv[2] == 'print_file_content':
print_file_content(filename)
if argv[2] == 'write_list_to_file':
inputfield = argv[3:]
write_list_to_file(filename, inputfield)
if argv[2] == 'read_line':
read_line(filename)
def run():
if__name__ == '__main__':
run()
|
[
"chavezgamingv2@hotmail.com"
] |
chavezgamingv2@hotmail.com
|
667a6a1286fe0c8a7c4877e2d9a1aab0a9a79399
|
c3ff891e0e23c5f9488508d30349259cc6b64b4d
|
/python练习/django exercise/FormsDemo/first/views.py
|
ebcd835469f1f68f965b5e54504c5e8ab9bab17f
|
[] |
no_license
|
JacksonMike/python_exercise
|
2af2b8913ec8aded8a17a98aaa0fc9c6ccd7ba53
|
7698f8ce260439abb3cbdf478586fa1888791a61
|
refs/heads/master
| 2020-07-14T18:16:39.265372
| 2019-08-30T11:56:29
| 2019-08-30T11:56:29
| 205,370,953
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,089
|
py
|
from django.shortcuts import render, HttpResponse
from django.core.exceptions import ValidationError
from django.forms import widgets
from first.models import UserInfo
from django import forms
# Create your views here.
def register(request):
if request.method == "GET":
form = UserForm()
return render(request, "register.html", locals())
else:
print(request.POST)
form = UserForm(request.POST)
if form.is_valid():
# 匹配成功的数据
print(form.cleaned_data)
UserInfo.objects.create(**form.cleaned_data)
return HttpResponse("注册成功")
else:
# 未能匹配的数据
error_data = form.errors
return render(request, "register.html", locals())
class UserForm(forms.Form):
user = forms.CharField(max_length=7,
label="用户名",
error_messages={"required": "该字段不能为空"},
widget=widgets.TextInput(attrs={"class": "form-control"}))
pwd = forms.CharField(max_length=7,
label="密码",
error_messages={"required": "该字段不能为空"},
widget=widgets.PasswordInput(attrs={"class": "form-control"}))
email = forms.EmailField(min_length=5,
label="邮箱",
error_messages={"invalid": "邮箱格式错误", "required": "该字段不能为空"},
widget=widgets.EmailInput(attrs={"class": "form-control"}))
def clean_user(self):
"""判断用户名是否被注册"""
val = self.cleaned_data.get("user")
if not UserInfo.objects.filter(user=val).first():
return val
else:
raise ValidationError("该用户名已经被注册")
def clean_pwd(self):
val = self.cleaned_data.get("pwd")
if val.isdigit():
raise ValidationError("密码不能为纯数字")
else:
return val
|
[
"2101706902@qq.com"
] |
2101706902@qq.com
|
3f89fb97ec5363fc81efe42ce4a627e34436e809
|
9743d5fd24822f79c156ad112229e25adb9ed6f6
|
/xai/brain/wordbase/nouns/_suite.py
|
5d9059953940881ade58e572a6b7dde68f38bcfb
|
[
"MIT"
] |
permissive
|
cash2one/xai
|
de7adad1758f50dd6786bf0111e71a903f039b64
|
e76f12c9f4dcf3ac1c7c08b0cc8844c0b0a104b6
|
refs/heads/master
| 2021-01-19T12:33:54.964379
| 2017-01-28T02:00:50
| 2017-01-28T02:00:50
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 525
|
py
|
#calss header
class _SUITE():
def __init__(self,):
self.name = "SUITE"
self.definitions = [u'a set of connected rooms, especially in a hotel: ', u'a set of furniture for one room, of matching design and colour: ', u'a piece of music with several parts, usually all in the same key', u'a set of related software (= computer program) products']
self.parents = []
self.childen = []
self.properties = []
self.jsondata = {}
self.specie = 'nouns'
def run(self, obj1 = [], obj2 = []):
return self.jsondata
|
[
"xingwang1991@gmail.com"
] |
xingwang1991@gmail.com
|
1bdd34e88fd6277b360b09b84201d96e1a50fe44
|
8e24e8bba2dd476f9fe612226d24891ef81429b7
|
/geeksforgeeks/python/python_all/143_15.py
|
8d67528a49ea0ab7b49f24cfcb96309e98a02750
|
[] |
no_license
|
qmnguyenw/python_py4e
|
fb56c6dc91c49149031a11ca52c9037dc80d5dcf
|
84f37412bd43a3b357a17df9ff8811eba16bba6e
|
refs/heads/master
| 2023-06-01T07:58:13.996965
| 2021-06-15T08:39:26
| 2021-06-15T08:39:26
| 349,059,725
| 1
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,669
|
py
|
Python | Check if string ends with any string in given list
While working with strings, their prefixes and suffix play an important role
in making any decision. For data manipulation tasks, we may need to sometimes,
check if a string ends with any of the matching strings. Let’s discuss certain
ways in which this task can be performed.
**Method #1 : Usingfilter() + endswith()**
The combination of the above function can help to perform this particular
task. The filter method is used to check for each word and endswith method
tests for the suffix logic at target list.
__
__
__
__
__
__
__
# Python3 code to demonstrate
# Checking for string match suffix
# using filter() + endswith()
# initializing string
test_string = "GfG is best"
# initializing suffix list
suff_list = ['best', 'iss', 'good']
# printing original string
print("The original string : " + str(test_string))
# using filter() + endswith()
# Checking for string match suffix
res = list(filter(test_string.endswith, suff_list)) != []
# print result
print("Does string end with any suffix list sublist ? : " +
str(res))
---
__
__
**Output :**
The original string : GfG is best
Does string end with any suffix list sublist ? : True
**Method #2 : Usingendswith()**
As an improvement to the above method, it is not always necessary to include
filter method for comparison. This task can be handled solely by supplying a
suffix check list as an argument to endswith method as well.
__
__
__
__
__
__
__
# Python3 code to demonstrate
# Checking for string match suffix
# using endswith()
# initializing string
test_string = "GfG is best"
# initializing suffix list
suff_list = ['best', 'iss', 'good']
# printing original string
print("The original string : " + str(test_string))
# using endswith()
# Checking for string match suffix
res = test_string.endswith(tuple(suff_list))
# print result
print("Does string end with any suffix list sublist ? : " +
str(res))
---
__
__
**Output :**
The original string : GfG is best
Does string end with any suffix list sublist ? : True
Attention geek! Strengthen your foundations with the **Python Programming
Foundation** Course and learn the basics.
To begin with, your interview preparations Enhance your Data Structures
concepts with the **Python DS** Course.
My Personal Notes _arrow_drop_up_
Save
|
[
"qmnguyenw@gmail.com"
] |
qmnguyenw@gmail.com
|
6447b4421e3a2256c272226eb874c95411fda479
|
8dffff5ff7f2645a50fd9846198e12e4c96a91da
|
/18-letter-count.py
|
ab86a66578554a66e0cb43fd008cdfbc21744bb6
|
[] |
no_license
|
akshaypawar2508/Coderbyte-pythonSol
|
b233c5ee0c34e0413a26b24b423dae45342b9ade
|
5c7d2028fe09fd02aad7808f88abc40fdea0f81e
|
refs/heads/master
| 2022-01-03T09:44:18.635060
| 2014-07-31T13:32:08
| 2014-07-31T13:32:08
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 274
|
py
|
def LetterCountI(str):
for word in str.split():
for i in range(len(word)):
if word[i] in word[i+1:]:
return word
return -1
# keep this function call here
# to see how to enter arguments in Python scroll down
print LetterCountI(raw_input())
|
[
"xzhu15@illinois.edu"
] |
xzhu15@illinois.edu
|
0b3eeb02095fbf2030db653bc03576071c4a956a
|
9672fa478478085b69c7ef8f02eaa7fa0bc7767b
|
/symphony/cli/pyinventory/graphql/fragment/service_endpoint.py
|
f22a4f54006c151f24a0aaab059869fd9813ff4f
|
[
"BSD-3-Clause",
"Apache-2.0"
] |
permissive
|
julianchr/magma
|
437a1d86490ff5f1d279cf2cd3243bbd3f22f715
|
f0b2ed7e08314208133cf722921d6e6ab7853825
|
refs/heads/master
| 2022-09-21T21:45:14.678593
| 2020-05-28T22:47:52
| 2020-05-28T22:49:52
| 267,723,888
| 0
| 0
|
NOASSERTION
| 2020-05-29T00:07:02
| 2020-05-29T00:07:01
| null |
UTF-8
|
Python
| false
| false
| 1,352
|
py
|
#!/usr/bin/env python3
# @generated AUTOGENERATED file. Do not Change!
from dataclasses import dataclass
from datetime import datetime
from gql.gql.datetime_utils import DATETIME_FIELD
from gql.gql.graphql_client import GraphqlClient
from gql.gql.client import OperationException
from gql.gql.reporter import FailedOperationException
from functools import partial
from numbers import Number
from typing import Any, Callable, List, Mapping, Optional
from time import perf_counter
from dataclasses_json import DataClassJsonMixin
from ..fragment.equipment_port import EquipmentPortFragment, QUERY as EquipmentPortFragmentQuery
from ..fragment.service_endpoint_definition import ServiceEndpointDefinitionFragment, QUERY as ServiceEndpointDefinitionFragmentQuery
QUERY: List[str] = EquipmentPortFragmentQuery + ServiceEndpointDefinitionFragmentQuery + ["""
fragment ServiceEndpointFragment on ServiceEndpoint {
id
port {
...EquipmentPortFragment
}
definition {
...ServiceEndpointDefinitionFragment
}
}
"""]
@dataclass
class ServiceEndpointFragment(DataClassJsonMixin):
@dataclass
class EquipmentPort(EquipmentPortFragment):
pass
@dataclass
class ServiceEndpointDefinition(ServiceEndpointDefinitionFragment):
pass
id: str
definition: ServiceEndpointDefinition
port: Optional[EquipmentPort]
|
[
"facebook-github-bot@users.noreply.github.com"
] |
facebook-github-bot@users.noreply.github.com
|
f5d319d69486e544284d5a391d18304dd20f00fe
|
d29fd8ac20bf53f366821892bf5a80005a9cef36
|
/tests/apps/pages_directory.py
|
2d096f87c51ae88ef0ebd1ac72dc6772f44a26cb
|
[
"MIT"
] |
permissive
|
T4rk1n/dazzler
|
d325ff664c6e310374f08cea84bd53aa1ca2ca43
|
69c49422dc19c910445ab265b1d3481041de8f43
|
refs/heads/master
| 2023-02-11T02:39:08.423597
| 2021-12-06T03:16:49
| 2021-12-06T03:34:25
| 191,060,792
| 19
| 7
|
MIT
| 2023-01-23T11:02:57
| 2019-06-09T22:16:59
|
Python
|
UTF-8
|
Python
| false
| false
| 146
|
py
|
from dazzler import Dazzler
app = Dazzler(__name__)
app.config.pages_directory = 'page_dir'
if __name__ == '__main__':
app.start('--debug')
|
[
"t4rk@outlook.com"
] |
t4rk@outlook.com
|
c72e3c3dcb8a88238fa6b42cb63e1df026e8c669
|
d2d6bbb76fd92ad596b0476b37ac8dd5cf08df14
|
/1.9 LISTAS.py
|
a97405df71a8abca906f6bf2d182f2441b9b24db
|
[] |
no_license
|
edneyefs/curso_python
|
b917d8f2c405173af901287dab86264ff937aaa6
|
2c862ad62223b7c3bd0ea7d7410a9b69c38d814d
|
refs/heads/master
| 2022-12-14T21:29:59.875637
| 2020-08-21T12:42:07
| 2020-08-21T12:42:07
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 571
|
py
|
lista = []
print(type(lista))
print(dir(lista))
print(help(lista))
print(len(lista))#contador
lista.append(1)
lista.append(5)
print(len(lista))
nova_lista = [1, 4, 'Ana', 'Bia']
#print(nova_lista)
nova_lista.remove(4)
#print(nova_lista)
nova_lista.reverse()
print(nova_lista)
lista = [1, 5, 'Rebeca', 'Guilherme', 3.1415]
print(lista.index(1))
print(lista[2])
print(lista[-1])
lista = ['Ana', 'Lia', 'Rui', 'Paulo', 'Dani']
print(lista[1:3])
print(lista[1:-1])
print(lista[1:])
print(lista[::2])
print(lista[::-1])
del lista[2]
print(lista)
del lista[1:]
print(lista)
|
[
"edneysilva20@hotmail.com"
] |
edneysilva20@hotmail.com
|
c77de50c1bc3274824ecd3f3cc23faa27d6840d7
|
4c3dd270440c48a0a8e87d1937844371476f7cef
|
/resource_wrangler/scripts/download_mods.py
|
cb42130c64e1983371fe8880c460d6c88f9945b7
|
[] |
no_license
|
Soartex-Modded/Resource-Wrangler
|
f84726bf5ffb246d8562149fb6cc0a613a4f4043
|
36c6f7059bb876e034c99d5e02fca1cf81888dac
|
refs/heads/master
| 2023-01-25T00:34:22.900581
| 2020-11-29T23:00:35
| 2020-11-29T23:00:35
| 309,116,894
| 2
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 6,085
|
py
|
import json
import math
import os
import requests
from sqlalchemy import Table, Column, Integer, String, MetaData
from sqlalchemy import create_engine
from sqlalchemy.sql import select
def download_mods(mods_dirs, database_path, mod_limit=100):
"""
Collect the top mods from CurseForge into mods_dirs
:param mods_dirs: {[minor_version]: [path to mods folder]}
:param database_path: path to .db file with download history (will be created if not exists)
:param mod_limit: maximum number of mods to collect
"""
mods_dirs = {k: os.path.expanduser(v) for k, v in mods_dirs.items()}
database_path = os.path.expanduser(database_path)
patch_info = {}
for minor_version in mods_dirs:
patch_info[minor_version] = {}
os.makedirs(mods_dirs[minor_version], exist_ok=True)
os.makedirs(os.path.dirname(database_path), exist_ok=True)
engine = create_engine('sqlite:///' + database_path)
metadata = MetaData()
mod_files = Table('mod_files', metadata,
Column('id', Integer, primary_key=True),
Column('file_name', String(250)),
Column('mod_id', Integer),
Column('vanilla_minor_version', Integer))
metadata.create_all(engine)
headers = {
'User-Agent': 'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/56.0.2924.76 Safari/537.36',
}
page_size = 50
mod_count = 0
# download sets of mod information at a time
for page_index in range(math.ceil(mod_limit / page_size)):
mods = requests.get(
"https://addons-ecs.forgesvc.net/api/v2/addon/search/",
params={
'gameId': 432,
'index': page_index * page_size,
'pageSize': page_size,
'sort': 'TotalDownloads',
'sortDescending': True
},
headers=headers).json()
for mod_meta in mods:
mod_count += 1
if mod_count > mod_limit:
return
if mod_meta['categorySection']['name'] != 'Mods':
continue
versioned_mod_files = {}
for mod_file_meta in mod_meta['gameVersionLatestFiles']:
tokens = mod_file_meta['gameVersion'].split('.')
minor_version = int(tokens[1])
patch_version = 0 if len(tokens) == 2 else int(tokens[2])
# find latest mod files
if minor_version in versioned_mod_files:
if versioned_mod_files[minor_version]['patch_version'] > patch_version:
continue
prior_file_id = versioned_mod_files.get(minor_version, {}).get('value', {}).get('projectFileId', 0)
if mod_file_meta['projectFileId'] > prior_file_id:
versioned_mod_files[minor_version] = {
'patch_version': patch_version,
'value': mod_file_meta
}
for minor_version in versioned_mod_files:
if str(minor_version) not in mods_dirs:
continue
mod_file_meta = versioned_mod_files[minor_version]['value']
patch_info[str(minor_version)][mod_file_meta["projectFileName"]] = {
"mod_id": mod_meta['slug'],
"mod_name": mod_meta['name'],
# typically contains the mod version inside somewhere
"mod_filename": mod_file_meta['projectFileName'],
"mc_version": mod_file_meta['gameVersion'],
"mod_authors": [auth['name'] for auth in mod_meta['authors']],
"url_website": mod_meta['websiteUrl'],
"description": mod_meta.get('summary')
}
available_file_name = mod_file_meta['projectFileName']
stored_file_name = engine.execute(select([mod_files.c.file_name]).where(
(mod_files.c.mod_id == mod_meta['id']) & (mod_files.c.vanilla_minor_version == minor_version))
).scalar()
if stored_file_name == available_file_name:
# file is already current
# print(f'Skipping {mod_meta["name"]} for 1.{minor_version}')
continue
mod_path = os.path.join(mods_dirs[str(minor_version)], mod_file_meta['projectFileName'])
if os.path.exists(mod_path):
engine.execute(mod_files.insert(),
file_name=available_file_name,
mod_id=mod_meta['id'],
vanilla_minor_version=minor_version)
continue
download_url = requests.get(
f"https://addons-ecs.forgesvc.net/api/v2/addon/{mod_meta['id']}/file/{mod_file_meta['projectFileId']}/download-url",
headers=headers).text
print(f'Downloading {mod_meta["name"]} for 1.{minor_version}')
with open(mod_path, 'wb') as mod_file:
mod_file.write(requests.get(download_url, headers=headers).content)
if stored_file_name is None:
engine.execute(mod_files.insert(),
file_name=available_file_name,
mod_id=mod_meta['id'],
vanilla_minor_version=minor_version)
else:
engine.execute(mod_files.update()
.where((mod_files.c.mod_id == mod_meta['id']) & (mod_files.c.vanilla_minor_version == minor_version))
.values(file_name=available_file_name))
for minor_version in patch_info:
with open(os.path.join(mods_dirs[str(minor_version)], "patch_info.json"), 'w') as patch_info_file:
json.dump(patch_info[minor_version], patch_info_file, indent=4)
|
[
"shoeboxam@gmail.com"
] |
shoeboxam@gmail.com
|
ca016bd689fb246e19dc877a574e00c0cd0e1ec1
|
2b6e1b7bd7065229054b4cdecd40daa5e251c22d
|
/src/models/dqn.py
|
4195fc5933776981002b4d8d68a69c1ac3b934bb
|
[] |
no_license
|
raufer/deep-q-learning
|
b9be99c41829e8d62cd350cd279e5ddc135e7809
|
c31b8803a45bcf1f22f1c4552daf48b9a284dd5c
|
refs/heads/main
| 2023-06-19T06:01:49.867163
| 2021-07-20T13:35:30
| 2021-07-20T13:35:30
| 379,271,213
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,506
|
py
|
import torch
import torch.nn as nn
import torch.nn.functional as F
from src.config import config
class DQN(nn.Module):
"""
Assumption: the environment is deterministic
so all equations presented here are also formulated deterministically for the sake of simplicity.
In the reinforcement learning literature, they would also contain expectations
over stochastic transitions in the environment.
Our aim is to train a policy that tries to maximize the discounter, cumulative reward
R = sum_{t=t0}^{inf} 𝛾^t * r_t
The discount, 𝛾 , should be a constant between 0 and 1 that ensures the sum converges.
It makes rewards from the uncertain, far future, less important for our agent
than the ones in the near future that it can be more confident about
The main idea behind Q-learning is:
If we had a function Q* :: (S, A) -> R (scalar) that could tell us the real return of
taking an action A at the state S, then we could easily construct an optimal policy:
policy*(s) = argmax {a} Q*(S, a)
This policy would always maximize our rewards
However, we dont know everything about the world, so we do not have direct access to Q*
Nevertheless, We can use function approximation techniques to approximate Q*
For the training update rule, we'll use the fact that every function Q for some policy
obeys the Bellman Equation:
Q_pi(s, a) = r + gamma * max {a'} Q_pi(s', a')
The difference between the two sides of the equality is known as the temporal
difference error
delta = Q(s,a) - (r + gamma max {a} Q(s', a))
To minimize this error, we'll use the Hubber loss:
* MSE when the error is small (< 1)
* MAE when the error is large (> 1)
(more robust to outliers)
This error is calculated over a batch of transitions B
sampled from the replay memory
L = 1 / |B| * sum {(s, a, s', r) in B} L(delta)
with L(delta) =
1/2 delta**2 for |delta| < 1
|delta| - 1/2 otherwise
Q-network
Our model is a convolutional neural network that takes as input
the different between the current and previous screen patches.
It has two outputs representing Q(s, left) and Q(s, right),
where s is the input to the network.
In effect, the network is trying to predict the quality/value of
taking each action given the current input
"""
def __init__(self, h, w, outputs):
super(DQN, self).__init__()
self.conv1 = nn.Conv2d(3, 16, kernel_size=5, stride=2)
self.bn1 = nn.BatchNorm2d(16)
self.conv2 = nn.Conv2d(16, 32, kernel_size=5, stride=2)
self.bn2 = nn.BatchNorm2d(32)
self.conv3 = nn.Conv2d(32, 32, kernel_size=5, stride=2)
self.bn3 = nn.BatchNorm2d(32)
# Number of Linear input connections depends on output of conv2d layers
# and therefore the input image size, so compute it.
def conv2d_size_out(size, kernel_size=5, stride=2):
return (size - (kernel_size - 1) - 1) // stride + 1
convw = conv2d_size_out(conv2d_size_out(conv2d_size_out(w)))
convh = conv2d_size_out(conv2d_size_out(conv2d_size_out(h)))
linear_input_size = convw * convh * 32
self.head = nn.Linear(linear_input_size, outputs)
def forward(self, x):
x = F.relu(self.bn1(self.conv1(x)))
x = F.relu(self.bn2(self.conv2(x)))
x = F.relu(self.bn3(self.conv3(x)))
return self.head(x.view(x.size(0), -1))
|
[
"raufer92@gmail.com"
] |
raufer92@gmail.com
|
548980782c09a047bbcc43b0e12a6dae822cdcc6
|
ed1d841dbd836f5a02a8b2c22bcc92380f28d11b
|
/seed.py
|
9b08aa64301e4ced1c79ad9d8a6e7a7e4658118c
|
[] |
no_license
|
GraceDurham/ratings
|
b063389f368f0b3994f0771ca4cac46555a04a10
|
2e628c2a824ca5a10879a15282cd60e21695322b
|
refs/heads/master
| 2020-05-23T07:59:29.310561
| 2017-02-03T02:00:36
| 2017-02-03T02:00:36
| 80,483,352
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,894
|
py
|
"""Utility file to seed ratings database from MovieLens data in seed_data/"""
from sqlalchemy import func
from model import User
from model import Rating
from model import Movie
from datetime import datetime
from model import connect_to_db, db
from server import app
def load_users():
"""Load users from u.user into database."""
print "Users"
# Delete all rows in table, so if we need to run this a second time,
# we won't be trying to add duplicate users
User.query.delete()
# Read u.user file and insert data
for row in open("seed_data/u.user"):
row = row.rstrip()
user_id, age, gender, occupation, zipcode = row.split("|")
user = User(user_id=user_id,
age=age,
zipcode=zipcode)
# We need to add to the session or it won't ever be stored
db.session.add(user)
# Once we're done, we should commit our work
db.session.commit()
def load_movies():
"""Load movies from u.item into database."""
print "Movies"
# Delete all rows in table, so if we need to run this a second time,
# we won't be trying to add duplicate users
Movie.query.delete()
# Read u.user file and insert data
for row in open("seed_data/u.item"):
# striped the whitespace
row = row.rstrip()
# print "each row!", row
# we took the row and split it on the pipe
row_split = row.split("|")
# print "it's splitted!!", row_split
# sliced the giant list into only 0-4 index
first_five = row_split[:5]
# print "this is our short list", first_five
# unpacked the first five items from the u.item list
movie_id, title, released_at, empty, imdb_url = first_five
# print first_five
#Boolean if released at is not an empty string evaluates true
#set string to datetime object
# else make datetime equal none if no value is present in release at
if released_at:
released_at = datetime.strptime(released_at, "%d-%b-%Y")
else:
released_at = None
title = title[:-7] # (year) ==7
movie = Movie(movie_id=movie_id,
title=title,
released_at=released_at,
imdb_url=imdb_url)
# We need to add to the session or it won't ever be stored
db.session.add(movie)
# Once we're done, we should commit our work
db.session.commit()
def load_ratings():
"""Load ratings from u.data into database."""
print "Ratings"
# Delete all rows in table, so if we need to run this a second time,
# we won't be trying to add duplicate users
Rating.query.delete()
# Read u.user file and insert data
for row in open("seed_data/u.data"):
row = row.strip().split()
user_id, movie_id, score, time_stamp = row
# print row
rating = Rating(
user_id=int(user_id),
movie_id=int(movie_id),
score=int(score))
# We need to add to the session or it won't ever be stored
db.session.add(rating)
# Once we're done, we should commit our work
db.session.commit()
def set_val_user_id():
"""Set value for the next user_id after seeding database"""
# Get the Max user_id in the database
result = db.session.query(func.max(User.user_id)).one()
max_id = int(result[0])
# Set the value for the next user_id to be max_id + 1
query = "SELECT setval('users_user_id_seq', :new_id)"
db.session.execute(query, {'new_id': max_id + 1})
db.session.commit()
if __name__ == "__main__":
connect_to_db(app)
# In case tables haven't been created, create them
db.create_all()
# Import different types of data
load_users()
load_movies()
load_ratings()
set_val_user_id()
|
[
"no-reply@hackbrightacademy.com"
] |
no-reply@hackbrightacademy.com
|
c885620223bab7b3b759d52fbf738145d6690444
|
3c000380cbb7e8deb6abf9c6f3e29e8e89784830
|
/venv/Lib/site-packages/cobra/modelimpl/rtctrl/setrtmetricdef.py
|
f5d55b1458f3e0a5d0f447271471db818060c777
|
[] |
no_license
|
bkhoward/aciDOM
|
91b0406f00da7aac413a81c8db2129b4bfc5497b
|
f2674456ecb19cf7299ef0c5a0887560b8b315d0
|
refs/heads/master
| 2023-03-27T23:37:02.836904
| 2021-03-26T22:07:54
| 2021-03-26T22:07:54
| 351,855,399
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,969
|
py
|
# coding=UTF-8
# **********************************************************************
# Copyright (c) 2013-2020 Cisco Systems, Inc. All rights reserved
# written by zen warriors, do not modify!
# **********************************************************************
from cobra.mit.meta import ClassMeta
from cobra.mit.meta import StatsClassMeta
from cobra.mit.meta import CounterMeta
from cobra.mit.meta import PropMeta
from cobra.mit.meta import Category
from cobra.mit.meta import SourceRelationMeta
from cobra.mit.meta import NamedSourceRelationMeta
from cobra.mit.meta import TargetRelationMeta
from cobra.mit.meta import DeploymentPathMeta, DeploymentCategory
from cobra.model.category import MoCategory, PropCategory, CounterCategory
from cobra.mit.mo import Mo
# ##################################################
class SetRtMetricDef(Mo):
"""
The set route metric definition.
"""
meta = ClassMeta("cobra.model.rtctrl.SetRtMetricDef")
meta.moClassName = "rtctrlSetRtMetricDef"
meta.rnFormat = "smetric"
meta.category = MoCategory.REGULAR
meta.label = "None"
meta.writeAccessMask = 0x1000001
meta.readAccessMask = 0x1000001
meta.isDomainable = False
meta.isReadOnly = True
meta.isConfigurable = False
meta.isDeletable = False
meta.isContextRoot = False
meta.childClasses.add("cobra.model.fault.Delegate")
meta.childNamesAndRnPrefix.append(("cobra.model.fault.Delegate", "fd-"))
meta.parentClasses.add("cobra.model.rtctrl.AttrDef")
meta.superClasses.add("cobra.model.pol.Comp")
meta.superClasses.add("cobra.model.rtctrl.ASetRule")
meta.superClasses.add("cobra.model.fabric.L3ProtoComp")
meta.superClasses.add("cobra.model.fabric.ProtoComp")
meta.superClasses.add("cobra.model.pol.Obj")
meta.superClasses.add("cobra.model.naming.NamedObject")
meta.superClasses.add("cobra.model.rtctrl.ASetRtMetric")
meta.rnPrefixes = [
('smetric', False),
]
prop = PropMeta("str", "childAction", "childAction", 4, PropCategory.CHILD_ACTION)
prop.label = "None"
prop.isImplicit = True
prop.isAdmin = True
prop._addConstant("deleteAll", "deleteall", 16384)
prop._addConstant("deleteNonPresent", "deletenonpresent", 8192)
prop._addConstant("ignore", "ignore", 4096)
meta.props.add("childAction", prop)
prop = PropMeta("str", "descr", "descr", 5582, PropCategory.REGULAR)
prop.label = "Description"
prop.isConfig = True
prop.isAdmin = True
prop.range = [(0, 128)]
prop.regex = ['[a-zA-Z0-9\\!#$%()*,-./:;@ _{|}~?&+]+']
meta.props.add("descr", prop)
prop = PropMeta("str", "dn", "dn", 1, PropCategory.DN)
prop.label = "None"
prop.isDn = True
prop.isImplicit = True
prop.isAdmin = True
prop.isCreateOnly = True
meta.props.add("dn", prop)
prop = PropMeta("str", "lcOwn", "lcOwn", 9, PropCategory.REGULAR)
prop.label = "None"
prop.isImplicit = True
prop.isAdmin = True
prop.defaultValue = 0
prop.defaultValueStr = "local"
prop._addConstant("implicit", "implicit", 4)
prop._addConstant("local", "local", 0)
prop._addConstant("policy", "policy", 1)
prop._addConstant("replica", "replica", 2)
prop._addConstant("resolveOnBehalf", "resolvedonbehalf", 3)
meta.props.add("lcOwn", prop)
prop = PropMeta("str", "metric", "metric", 795, PropCategory.REGULAR)
prop.label = "None"
prop.isConfig = True
prop.isAdmin = True
prop.range = [(1, 4294967295)]
meta.props.add("metric", prop)
prop = PropMeta("str", "modTs", "modTs", 7, PropCategory.REGULAR)
prop.label = "None"
prop.isImplicit = True
prop.isAdmin = True
prop.defaultValue = 0
prop.defaultValueStr = "never"
prop._addConstant("never", "never", 0)
meta.props.add("modTs", prop)
prop = PropMeta("str", "name", "name", 4991, PropCategory.REGULAR)
prop.label = "Name"
prop.isConfig = True
prop.isAdmin = True
prop.range = [(0, 64)]
prop.regex = ['[a-zA-Z0-9_.:-]+']
meta.props.add("name", prop)
prop = PropMeta("str", "nameAlias", "nameAlias", 28417, PropCategory.REGULAR)
prop.label = "Name alias"
prop.isConfig = True
prop.isAdmin = True
prop.range = [(0, 63)]
prop.regex = ['[a-zA-Z0-9_.-]+']
meta.props.add("nameAlias", prop)
prop = PropMeta("str", "rn", "rn", 2, PropCategory.RN)
prop.label = "None"
prop.isRn = True
prop.isImplicit = True
prop.isAdmin = True
prop.isCreateOnly = True
meta.props.add("rn", prop)
prop = PropMeta("str", "status", "status", 3, PropCategory.STATUS)
prop.label = "None"
prop.isImplicit = True
prop.isAdmin = True
prop._addConstant("created", "created", 2)
prop._addConstant("deleted", "deleted", 8)
prop._addConstant("modified", "modified", 4)
meta.props.add("status", prop)
prop = PropMeta("str", "type", "type", 794, PropCategory.REGULAR)
prop.label = "None"
prop.isConfig = True
prop.isAdmin = True
prop.defaultValue = 5
prop.defaultValueStr = "metric"
prop._addConstant("as-path", "as-path", 11)
prop._addConstant("community", "community", 1)
prop._addConstant("dampening-pol", "dampening-type", 10)
prop._addConstant("ip-nh", "ip-nexthop", 8)
prop._addConstant("local-pref", "local-preference", 4)
prop._addConstant("metric", "metric", 5)
prop._addConstant("metric-type", "metric-type", 9)
prop._addConstant("ospf-fwd-addr", "ospf-fowarding-address", 7)
prop._addConstant("ospf-nssa", "ospf-nssa-area", 6)
prop._addConstant("rt-tag", "route-tag", 2)
prop._addConstant("rt-weight", "route-weight", 3)
meta.props.add("type", prop)
def __init__(self, parentMoOrDn, markDirty=True, **creationProps):
namingVals = []
Mo.__init__(self, parentMoOrDn, markDirty, *namingVals, **creationProps)
# End of package file
# ##################################################
|
[
"bkhoward@live.com"
] |
bkhoward@live.com
|
135f69897b740742d615a59e60256e99b761d86d
|
1346ea1f255d3586442c8fc1afc0405794206e26
|
/알고리즘/day24/babygincompare.py
|
0506c4570d0fcac76a84ab75a16604fe95dd74ec
|
[] |
no_license
|
Yun-Jongwon/TIL
|
737b634b6e75723ac0043cda9c4f9acbc2a24686
|
a3fc624ec340643cdbf98974bf6e6144eb06a42f
|
refs/heads/master
| 2020-04-12T00:41:03.985080
| 2019-05-01T07:55:25
| 2019-05-01T07:55:25
| 162,208,477
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,649
|
py
|
def player1babygin():
for i in range(len(player1_data)-2):
for j in range(i+1,len(player1_data)-1):
for k in range(j+1,len(player1_data)):
candi=sorted([player1_data[i],player1_data[j],player1_data[k]])
if (candi[1]-1==candi[0] and candi[1]+1== candi[2]) or (candi[0]==candi[1] and candi[1]==candi[2]):
# print(candi)
return 1
return 0
def player2babygin():
for i in range(len(player2_data)-2):
for j in range(i+1,len(player2_data)-1):
for k in range(j+1,len(player2_data)):
candi=sorted([player2_data[i],player2_data[j],player2_data[k]])
if (candi[1]-1==candi[0] and candi[1]+1== candi[2]) or (candi[0]==candi[1] and candi[1]==candi[2]):
return 2
return 0
T=int(input())
for t in range(T):
data=list(map(int,input().split()))
player1_data=[]
player2_data=[]
player1=0
player2=0
result=0
for d in range(len(data)):
if d%2==0:
player1_data.append(data[d])
# print(player1_data)
else:
player2_data.append(data[d])
# print(player2_data)
if d>=4:
if len(player2_data)>=3:
player1=player1babygin()
player2=player2babygin()
else:
player1babygin()
if player1==1 and (player2==0 or player2==2):
result=1
break
elif player1==0 and player2==2:
result=2
break
print('#{} {}'.format(t+1,result))
|
[
"dmdmdkdk@daum.net"
] |
dmdmdkdk@daum.net
|
1c25fdc3c71bd1c13e880d528341cc4b0e788efd
|
f54d702c1289b2b78f423850d7fedba6c9378126
|
/Mathematics/Fundamentals/handshake.py
|
b505905cd0327f05e06069e006057674fa76dc6a
|
[
"MIT"
] |
permissive
|
ekant1999/HackerRank
|
81e6ac5bec8307bca2bd1debb169f2acdf239b66
|
084d4550b4eaf130837ab26a4efdbcaf8b667cdc
|
refs/heads/master
| 2020-05-02T09:19:10.102144
| 2016-10-27T04:10:28
| 2016-10-27T04:10:28
| 177,868,424
| 0
| 0
|
MIT
| 2019-03-26T21:04:17
| 2019-03-26T21:04:17
| null |
UTF-8
|
Python
| false
| false
| 243
|
py
|
# Python 2
# Enter your code here. Read input from STDIN. Print output to STDOUT
t = int(raw_input())
for i in range(t):
n = int(raw_input())
handshakes = n*(n-1)/2 # Note this is nC2 i.e. n "choose" 2
print handshakes
|
[
"ugali_guy@hotmail.com"
] |
ugali_guy@hotmail.com
|
501ce999fd6452c28544240627deb50e62312876
|
fce83f1b55b8894afab9eb58ae8b4ba2e26eb86b
|
/examples/GAN/DCGAN.py
|
e9df6b36319476aea07fd240e26005c998a75385
|
[
"Apache-2.0"
] |
permissive
|
PeisenZhao/tensorpack
|
b65d451f6d4a7fe1af1e183bdc921c912f087586
|
6ca57de47e4a76b57c8aa2f0dad87c1059c13ac0
|
refs/heads/master
| 2021-05-05T01:46:05.209522
| 2018-01-31T05:29:37
| 2018-01-31T05:29:37
| 119,641,372
| 1
| 0
| null | 2018-01-31T05:52:07
| 2018-01-31T05:52:06
| null |
UTF-8
|
Python
| false
| false
| 5,554
|
py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# File: DCGAN.py
# Author: Yuxin Wu <ppwwyyxxc@gmail.com>
import glob
import numpy as np
import os
import argparse
from tensorpack import *
from tensorpack.utils.viz import stack_patches
from tensorpack.tfutils.scope_utils import auto_reuse_variable_scope
from tensorpack.utils.globvars import globalns as opt
import tensorflow as tf
from GAN import GANTrainer, RandomZData, GANModelDesc
"""
1. Download the 'aligned&cropped' version of CelebA dataset
from http://mmlab.ie.cuhk.edu.hk/projects/CelebA.html
2. Start training:
./DCGAN-CelebA.py --data /path/to/img_align_celeba/ --crop-size 140
Generated samples will be available through tensorboard
3. Visualize samples with an existing model:
./DCGAN-CelebA.py --load path/to/model --sample
You can also train on other images (just use any directory of jpg files in
`--data`). But you may need to change the preprocessing.
A pretrained model on CelebA is at http://models.tensorpack.com/GAN/
"""
# global vars
opt.SHAPE = 64
opt.BATCH = 128
opt.Z_DIM = 100
class Model(GANModelDesc):
def _get_inputs(self):
return [InputDesc(tf.float32, (None, opt.SHAPE, opt.SHAPE, 3), 'input')]
def generator(self, z):
""" return an image generated from z"""
nf = 64
l = FullyConnected('fc0', z, nf * 8 * 4 * 4, nl=tf.identity)
l = tf.reshape(l, [-1, 4, 4, nf * 8])
l = BNReLU(l)
with argscope(Deconv2D, nl=BNReLU, kernel_shape=4, stride=2):
l = Deconv2D('deconv1', l, nf * 4)
l = Deconv2D('deconv2', l, nf * 2)
l = Deconv2D('deconv3', l, nf)
l = Deconv2D('deconv4', l, 3, nl=tf.identity)
l = tf.tanh(l, name='gen')
return l
@auto_reuse_variable_scope
def discriminator(self, imgs):
""" return a (b, 1) logits"""
nf = 64
with argscope(Conv2D, nl=tf.identity, kernel_shape=4, stride=2):
l = (LinearWrap(imgs)
.Conv2D('conv0', nf, nl=tf.nn.leaky_relu)
.Conv2D('conv1', nf * 2)
.BatchNorm('bn1')
.tf.nn.leaky_relu()
.Conv2D('conv2', nf * 4)
.BatchNorm('bn2')
.tf.nn.leaky_relu()
.Conv2D('conv3', nf * 8)
.BatchNorm('bn3')
.tf.nn.leaky_relu()
.FullyConnected('fct', 1, nl=tf.identity)())
return l
def _build_graph(self, inputs):
image_pos = inputs[0]
image_pos = image_pos / 128.0 - 1
z = tf.random_uniform([opt.BATCH, opt.Z_DIM], -1, 1, name='z_train')
z = tf.placeholder_with_default(z, [None, opt.Z_DIM], name='z')
with argscope([Conv2D, Deconv2D, FullyConnected],
W_init=tf.truncated_normal_initializer(stddev=0.02)):
with tf.variable_scope('gen'):
image_gen = self.generator(z)
tf.summary.image('generated-samples', image_gen, max_outputs=30)
with tf.variable_scope('discrim'):
vecpos = self.discriminator(image_pos)
vecneg = self.discriminator(image_gen)
self.build_losses(vecpos, vecneg)
self.collect_variables()
def _get_optimizer(self):
lr = tf.get_variable('learning_rate', initializer=2e-4, trainable=False)
return tf.train.AdamOptimizer(lr, beta1=0.5, epsilon=1e-3)
def get_augmentors():
augs = []
if opt.load_size:
augs.append(imgaug.Resize(opt.load_size))
if opt.crop_size:
augs.append(imgaug.CenterCrop(opt.crop_size))
augs.append(imgaug.Resize(opt.SHAPE))
return augs
def get_data(datadir):
imgs = glob.glob(datadir + '/*.jpg')
ds = ImageFromFile(imgs, channel=3, shuffle=True)
ds = AugmentImageComponent(ds, get_augmentors())
ds = BatchData(ds, opt.BATCH)
ds = PrefetchDataZMQ(ds, 5)
return ds
def sample(model, model_path, output_name='gen/gen'):
pred = PredictConfig(
session_init=get_model_loader(model_path),
model=model,
input_names=['z'],
output_names=[output_name, 'z'])
pred = SimpleDatasetPredictor(pred, RandomZData((100, opt.Z_DIM)))
for o in pred.get_result():
o = o[0] + 1
o = o * 128.0
o = np.clip(o, 0, 255)
o = o[:, :, :, ::-1]
stack_patches(o, nr_row=10, nr_col=10, viz=True)
def get_args():
parser = argparse.ArgumentParser()
parser.add_argument('--gpu', help='comma separated list of GPU(s) to use.')
parser.add_argument('--load', help='load model')
parser.add_argument('--sample', action='store_true', help='view generated examples')
parser.add_argument('--data', help='a jpeg directory')
parser.add_argument('--load-size', help='size to load the original images', type=int)
parser.add_argument('--crop-size', help='crop the original images', type=int)
args = parser.parse_args()
opt.use_argument(args)
if args.gpu:
os.environ['CUDA_VISIBLE_DEVICES'] = args.gpu
return args
if __name__ == '__main__':
args = get_args()
if args.sample:
sample(Model(), args.load)
else:
assert args.data
logger.auto_set_dir()
GANTrainer(
input=QueueInput(get_data(args.data)),
model=Model()).train_with_defaults(
callbacks=[ModelSaver()],
steps_per_epoch=300,
max_epoch=200,
session_init=SaverRestore(args.load) if args.load else None
)
|
[
"ppwwyyxxc@gmail.com"
] |
ppwwyyxxc@gmail.com
|
df113094854ba04a033632a46969612a2810a824
|
aef40813a1b92cec0ea4fc25ec1d4a273f9bfad4
|
/Q03__/04_Range_Sum_Query_2D_Immutable/Solution.py
|
5a36350496b38c5b518c880e49d6cd71aaf91e13
|
[
"Apache-2.0"
] |
permissive
|
hsclinical/leetcode
|
e9d0e522e249a24b28ab00ddf8d514ec855110d7
|
48a57f6a5d5745199c5685cd2c8f5c4fa293e54a
|
refs/heads/main
| 2023-06-14T11:28:59.458901
| 2021-07-09T18:57:44
| 2021-07-09T18:57:44
| 319,078,569
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 699
|
py
|
from typing import List
class NumMatrix:
def __init__(self, matrix: List[List[int]]):
self.matrix = matrix
self.n = len(matrix)
if self.n != 0:
self.m = len(matrix[0])
else:
self.n = 0
def sumRegion(self, row1: int, col1: int, row2: int, col2: int) -> int:
if self.n != 0:
middleList = []
for i in range(row1, row2+1):
middleList.append(sum(self.matrix[i][col1:(col2+1)]))
return(sum(middleList))
else:
return(0)
# Your NumMatrix object will be instantiated and called as such:
# obj = NumMatrix(matrix)
# param_1 = obj.sumRegion(row1,col1,row2,col2)
|
[
"luhongisu@gmail.com"
] |
luhongisu@gmail.com
|
9e6d61a3743d70fc652f40ee1dce7897a9019284
|
0000c8f4a481000676463f81d55c2ea21862cbd5
|
/not-yet-done-examples.py
|
d2d8e2a9c2d8f5293eea6153628712f8ddbc0858
|
[] |
no_license
|
robertej19/813
|
b5ca9b51504e002189861bc0e1230bd43c5f6005
|
f1417f05e9d08d5693d6ecd8363d1dd7552d2e12
|
refs/heads/master
| 2022-12-18T14:36:26.644424
| 2020-09-21T13:58:06
| 2020-09-21T13:58:06
| 292,097,780
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 363
|
py
|
Here is my code for doing the fit and plotting:
8:51
popt, pcov = curve_fit(gauss, xval, yval, sigma=yerror,p0 = [100, 3300, 140],absolute_sigma=False)
xx = np.arange(xmin,xmax)
plt.plot(xx, gauss(xx, *popt), label='fit')
One line method to load a CSV data file into python with numpy
import numpy as np
data=[*zip(*np.genfromtxt('cubeData.csv',delimiter=','))]
|
[
"robertej@mit.edu"
] |
robertej@mit.edu
|
749ebd1fc73831af5d53749d304aa4f0729f1cf8
|
0ca0fc2c2aad412d9e2936d5d01fb1abc1539ee4
|
/apps/cart/forms.py
|
bead7dbbce173bcc6584e87b86d1c9a91dad31e7
|
[
"MIT"
] |
permissive
|
yeboahd24/python202
|
1f399426a1f46d72da041ab3d138c582c695462d
|
35963db9a4ad5fcd567ce1e98c673f1a2ed2abef
|
refs/heads/master
| 2023-05-06T04:14:19.336839
| 2021-06-02T01:22:44
| 2021-06-02T01:22:44
| 309,841,303
| 4
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 430
|
py
|
from django import forms
class CheckoutForm(forms.Form):
first_name = forms.CharField(max_length=255)
last_name = forms.CharField(max_length=255)
email = forms.EmailField(max_length=255)
phone = forms.CharField(max_length=255)
address = forms.CharField(max_length=255)
zipcode = forms.CharField(max_length=255)
place = forms.CharField(max_length=255)
stripe_token = forms.CharField(max_length=255)
|
[
"yeboahd24@gmail.com"
] |
yeboahd24@gmail.com
|
18a1ef9adc1cffb62a94ab625de750a18568e630
|
ea544b339809095d2c383b542248f530990c31d5
|
/env/lib/python3.6/site-packages/pip/_vendor/html5lib/treewalkers/base.py
|
ba04ae2bb9cec5cf9fc1e3ea2a220624ca47aea1
|
[
"BSD-3-Clause"
] |
permissive
|
724686158/NosqlEXP3
|
5fab1a9e131c6936b5b61e0f1c86eea2c889294a
|
e29f2807f075831377456b47cf8c9ce0c8d65c30
|
refs/heads/master
| 2020-04-09T01:40:54.370782
| 2019-01-25T13:04:04
| 2019-01-25T13:04:04
| 159,912,368
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 7,476
|
py
|
from __future__ import absolute_import, division, unicode_literals
from xml.dom import Node
from ..constants import namespaces, voidElements, spaceCharacters
__all__ = ["DOCUMENT", "DOCTYPE", "TEXT", "ELEMENT", "COMMENT", "ENTITY", "UNKNOWN",
"TreeWalker", "NonRecursiveTreeWalker"]
DOCUMENT = Node.DOCUMENT_NODE
DOCTYPE = Node.DOCUMENT_TYPE_NODE
TEXT = Node.TEXT_NODE
ELEMENT = Node.ELEMENT_NODE
COMMENT = Node.COMMENT_NODE
ENTITY = Node.ENTITY_NODE
UNKNOWN = "<#UNKNOWN#>"
spaceCharacters = "".join(spaceCharacters)
class TreeWalker(object):
"""Walks a tree yielding tokens
Tokens are dicts that all have a ``type`` field specifying the type of the
token.
"""
def __init__(self, tree):
"""Creates a TreeWalker
:arg tree: the tree to walk
"""
self.tree = tree
def __iter__(self):
raise NotImplementedError
def error(self, msg):
"""Generates an error token with the given message
:arg msg: the error message
:returns: SerializeError token
"""
return {"type": "SerializeError", "data": msg}
def emptyTag(self, namespace, name, attrs, hasChildren=False):
"""Generates an EmptyTag token
:arg namespace: the namespace of the token--can be ``None``
:arg name: the name of the element
:arg attrs: the attributes of the element as a dict
:arg hasChildren: whether or not to yield a SerializationError because
this tag shouldn't have children
:returns: EmptyTag token
"""
yield {"type": "EmptyTag", "name": name,
"namespace": namespace,
"data": attrs}
if hasChildren:
yield self.error("Void element has children")
def startTag(self, namespace, name, attrs):
"""Generates a StartTag token
:arg namespace: the namespace of the token--can be ``None``
:arg name: the name of the element
:arg attrs: the attributes of the element as a dict
:returns: StartTag token
"""
return {"type": "StartTag",
"name": name,
"namespace": namespace,
"data": attrs}
def endTag(self, namespace, name):
"""Generates an EndTag token
:arg namespace: the namespace of the token--can be ``None``
:arg name: the name of the element
:returns: EndTag token
"""
return {"type": "EndTag",
"name": name,
"namespace": namespace}
def text(self, data):
"""Generates SpaceCharacters and Characters tokens
Depending on what's in the data, this generates one or more
``SpaceCharacters`` and ``Characters`` tokens.
For project:
>>> from html5lib.treewalkers.base import TreeWalker
>>> # Give it an empty tree just so it instantiates
>>> walker = TreeWalker([])
>>> list(walker.text(''))
[]
>>> list(walker.text(' '))
[{u'data': ' ', u'type': u'SpaceCharacters'}]
>>> list(walker.text(' abc ')) # doctest: +NORMALIZE_WHITESPACE
[{u'data': ' ', u'type': u'SpaceCharacters'},
{u'data': u'abc', u'type': u'Characters'},
{u'data': u' ', u'type': u'SpaceCharacters'}]
:arg data: the text data
:returns: one or more ``SpaceCharacters`` and ``Characters`` tokens
"""
data = data
middle = data.lstrip(spaceCharacters)
left = data[:len(data) - len(middle)]
if left:
yield {"type": "SpaceCharacters", "data": left}
data = middle
middle = data.rstrip(spaceCharacters)
right = data[len(middle):]
if middle:
yield {"type": "Characters", "data": middle}
if right:
yield {"type": "SpaceCharacters", "data": right}
def comment(self, data):
"""Generates a Comment token
:arg data: the comment
:returns: Comment token
"""
return {"type": "Comment", "data": data}
def doctype(self, name, publicId=None, systemId=None):
"""Generates a Doctype token
:arg name:
:arg publicId:
:arg systemId:
:returns: the Doctype token
"""
return {"type": "Doctype",
"name": name,
"publicId": publicId,
"systemId": systemId}
def entity(self, name):
"""Generates an Entity token
:arg name: the entity name
:returns: an Entity token
"""
return {"type": "Entity", "name": name}
def unknown(self, nodeType):
"""Handles unknown node types"""
return self.error("Unknown node type: " + nodeType)
class NonRecursiveTreeWalker(TreeWalker):
def getNodeDetails(self, node):
raise NotImplementedError
def getFirstChild(self, node):
raise NotImplementedError
def getNextSibling(self, node):
raise NotImplementedError
def getParentNode(self, node):
raise NotImplementedError
def __iter__(self):
currentNode = self.tree
while currentNode is not None:
details = self.getNodeDetails(currentNode)
type, details = details[0], details[1:]
hasChildren = False
if type == DOCTYPE:
yield self.doctype(*details)
elif type == TEXT:
for token in self.text(*details):
yield token
elif type == ELEMENT:
namespace, name, attributes, hasChildren = details
if (not namespace or namespace == namespaces["html"]) and name in voidElements:
for token in self.emptyTag(namespace, name, attributes,
hasChildren):
yield token
hasChildren = False
else:
yield self.startTag(namespace, name, attributes)
elif type == COMMENT:
yield self.comment(details[0])
elif type == ENTITY:
yield self.entity(details[0])
elif type == DOCUMENT:
hasChildren = True
else:
yield self.unknown(details[0])
if hasChildren:
firstChild = self.getFirstChild(currentNode)
else:
firstChild = None
if firstChild is not None:
currentNode = firstChild
else:
while currentNode is not None:
details = self.getNodeDetails(currentNode)
type, details = details[0], details[1:]
if type == ELEMENT:
namespace, name, attributes, hasChildren = details
if (namespace and namespace != namespaces["html"]) or name not in voidElements:
yield self.endTag(namespace, name)
if self.tree is currentNode:
currentNode = None
break
nextSibling = self.getNextSibling(currentNode)
if nextSibling is not None:
currentNode = nextSibling
break
else:
currentNode = self.getParentNode(currentNode)
|
[
"solitarius.holic@gmail.com"
] |
solitarius.holic@gmail.com
|
e8493a1b631c82cd20a805041411a0ddabce63d0
|
5e5b8a66d35be6b86d3754069613fe49108a700d
|
/scripts/webquestions-preprocessing/paraphrase_rules.py
|
b8ff50290c2c4c922205a06b3aa5feddffdd7d10
|
[
"CC-BY-4.0"
] |
permissive
|
saraswat/graph-parser
|
e77f9880f38d1d23cf5aebb149be997d9c715745
|
da8800503174dce0590a55b817cd024354e41d9e
|
refs/heads/master
| 2021-01-11T09:01:50.414615
| 2016-12-23T13:17:59
| 2016-12-23T13:17:59
| 77,409,845
| 1
| 0
| null | 2016-12-26T22:26:28
| 2016-12-26T22:26:28
| null |
UTF-8
|
Python
| false
| false
| 3,122
|
py
|
'''
Created on 26 May 2014
@author: siva
'''
import json
import re
import sys
for line in sys.stdin:
line = json.loads(line)
# print line
# sentence = line['sentence']
sentence = " ".join([word["word"] for word in line["words"]])
if re.search(" do \?$", sentence):
# what did Einstein do?
# sentence = re.sub(" do\?$", " serve as\?", sentence)
words = line['words']
words.pop(-1)
words.pop(-1)
word = { "word" : "profession", "ner" : "0"}
words.append(word)
word = { "word" : "?", "ner" : "0"}
words.append(word)
for word in words:
if word['word'] == 'did' or word['word'] == 'do' or word['word'] == 'does':
word['word'] = 'is'
if re.search("Where ((is)|(was)) .* from \?$", sentence):
# where is Obama from ?
#sentence = re.sub(" from\?$", " born in ?", sentence)
words = line['words']
entities = line['entities']
check = False
for entity in entities:
if entity["index"] == len(words) - 3:
check = True
if check:
words.pop(-1)
words.pop(-1)
word = { "word" : "born", "ner" : "0"}
words.append(word)
word = { "word" : "in", "ner" : "0"}
words.append(word)
word = { "word" : "?", "ner" : "0"}
words.append(word)
'''if re.search("((name)|(type)|(kind))", sentence):
# What is the name of the president of US
#sentence = re.sub(" the ((name[s]?)|(type[s]?)|(kind[s]?)) of", "", sentence)
#sentence = re.sub(" ((name[s]?)|(type[s]?)|(kind[s]?)) of", "", sentence)
#sentence = re.sub(" ((name[s]?)|(type[s]?)|(kind[s]?))", "", sentence)
words = line['words']
entities = line['entities']
for i, word in enumerate(words):
if re.match("((name)|(kind)|(type))", word['word']):
if len(words) > i + 1 and words[i + 1]["word"] == "of":
words.pop(i)
words.pop(i)
for entity in entities:
if entity["index"] > i:
entity["index"] += -2
else:
words.pop(i)
if words[i - 1]["word"] == "the" or words[i - 1]["word"] == "a":
words.pop(i - 1)
for entity in entities:
if entity["index"] > i - 1:
entity["index"] += -1
break'''
sentence_mod = " ".join([word["word"] for word in line["words"]])
# print sentence_mod
if re.match("((What)|(Who)) ((is)|(was)) [^\s]+ \?", sentence_mod):
words = line["words"]
words[0] = {"word" : "What", "ner" : "0"}
words[1] = {"word" : "is", "ner" : "0"}
words[3] = {"word" : "'s", "ner" : "0"}
words.append({"word" : "profession", "ner" : "0"})
words.append({"word" : "?", "ner" : "0"})
print json.dumps(line)
|
[
"siva@sivareddy.in"
] |
siva@sivareddy.in
|
e0f9e0cc67afaf29f291926c9c6aa95c05deb166
|
5792baf9e18ad91816cc42f4725b099a4dce7b7b
|
/HackerRank/Strings/Python sWap cASE.py
|
9e524564145bac64f1ed70970b832d5b588f495a
|
[] |
no_license
|
deepakorantak/Python
|
83b6782db0b5428d47fbc29193076e8ed5f5e285
|
9781133ce5a5c6f87efb5d4aa132a63ba1290f76
|
refs/heads/master
| 2020-03-23T19:55:30.075700
| 2019-02-19T06:24:42
| 2019-02-19T06:24:42
| 142,010,440
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 178
|
py
|
def swap_case(s):
return str.swapcase(s)
if __name__ == '__main__':
s = input()
if len(s) > 0 and len(s) <= 1000:
result = swap_case(s)
print(result)
|
[
"DeepaNKorantak@Gmail.com"
] |
DeepaNKorantak@Gmail.com
|
d226826efc7925a38771ffa80e803b71f8684253
|
288a00d2ab34cba6c389b8c2444455aee55a8a95
|
/tests/test_overwrites.py
|
6be0434f5d59a65c73dba6e837e5662c22636de7
|
[
"BSD-2-Clause"
] |
permissive
|
JohannesBuchner/pystrict3
|
ffd77b7bbc378bd4d8f21b5c6bd69a0d64a52ddb
|
18b0dd369082422f9bf0f89c72e7acb53a49849c
|
refs/heads/master
| 2023-08-14T06:37:37.954880
| 2023-07-13T11:16:38
| 2023-07-13T11:16:38
| 268,571,175
| 1
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 406
|
py
|
import ast
from hypothesis import given
from hypothesis.strategies import text
from pystrict3lib import assert_unknown, preknown
def test_assert_unknown():
node = ast.parse("print('hello world')").body[0]
known = {}
assert_unknown("name", known, node, "filename")
def test_assert_known():
node = ast.parse("print('hello world')").body[0]
known = {}
assert_unknown("name", known, node, "filename")
|
[
"johannes.buchner.acad@gmx.com"
] |
johannes.buchner.acad@gmx.com
|
50509f1fcaee6a8db649657d24ee5a29044b19e6
|
6932a9ae700a623f16a3aef417d0598cf6d4f389
|
/karasu_speak.py
|
c8c028b30786e6c5b67abc979a0d40f60e63f06a
|
[
"MIT"
] |
permissive
|
MuAuan/hirakegoma
|
9f1a252d913749a2c16ae5bd7a8870550048d26d
|
861879af1016c25b7a14bcabe543bfba47fd57f3
|
refs/heads/master
| 2020-04-27T20:12:25.315594
| 2019-03-24T12:38:30
| 2019-03-24T12:38:30
| 174,649,241
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,935
|
py
|
# -*- coding: utf-8 -*-
import cv2
import pyaudio
import sys
import time
import wave
import pydub
from pydub import AudioSegment
import moviepy.editor as mp
import datetime
import os
from vgg16_like import model_family_cnn
from keras.preprocessing import image
import matplotlib.pyplot as plt
import keras
import numpy as np
def prediction(imgSrc,model):
#np.random.seed(1337) # for reproducibility
img_rows,img_cols=128, 128
img = np.array(imgSrc)
img = img.reshape(1, img_rows,img_cols,3)
img = img.astype('float32')
img /= 255
t0=time.time()
y_pred = model.predict(img)
return y_pred
def karasu_responder(model,path,img_rows,img_cols):
imgSrc=[]
#for j in range(0,100000,1):
# j += 1
imgSrc = image.load_img(path, target_size=(img_rows,img_cols))
#plt.imshow(imgSrc)
#plt.pause(1)
#plt.close()
pred = prediction(imgSrc,model)
#print(pred[0])
if pred[0][0]>=0.5:
filename = "karasu-miyama_out1.wav"
print("angry")
elif pred[0][1]>=0.5:
#filename = "karasu_kero_out3.wav"
filename = "karasu-normal_out1.wav"
print("normal")
elif pred[0][2]>=0.5:
#filename = "karasu_kero_out1.wav"
filename = "karasu-others_out1.wav" #karasu-hageshii_out.wav
print("others")
return filename
num_classes = 3
img_rows,img_cols=128, 128
input_shape = (img_rows,img_cols,3)
model = model_family_cnn(input_shape, num_classes = num_classes)
# load the weights from the last epoch
model.load_weights('params_karasu-0angry-1normal-2others.hdf5', by_name=True)
print('Model loaded.')
path = "./out_test/figure.jpg"
img_rows,img_cols=128,128
s=0
while True:
if os.path.exists(path)==True:
s += 1
for j in range(0,50000000,1):
j += 1
"""
if s%3 == 0:
path="./out_test/figure_angry.jpg"
elif s%3 == 1:
path="./out_test/figure_normal.jpg"
else:
path="./out_test/figure_others.jpg"
"""
filename=karasu_responder(model,path,img_rows,img_cols)
wf = wave.open(filename, "rb")
# チャンク数を指定
CHUNK1 = 1024
#filename = "hirakegoma.wav"
wf = wave.open(filename, "rb")
# PyAudioのインスタンスを生成
p1 = pyaudio.PyAudio()
# Streamを生成
stream1 = p1.open(format=p1.get_format_from_width(wf.getsampwidth()),
channels=wf.getnchannels(),
rate=wf.getframerate(),
output=True)
# データを1度に1024個読み取る
input1 = wf.readframes(CHUNK1)
# 実行
while stream1.is_active():
output = stream1.write(input1)
input1 = wf.readframes(CHUNK1)
if input1==b'':
os.remove(path)
break
|
[
"noreply@github.com"
] |
MuAuan.noreply@github.com
|
be9cf6de41337a706ff9fa46d7816b99d1f552a0
|
b306aab9dcea2dd83dda700bc9f7b9f1a32cff3a
|
/CAIL2021/slsb/main.py
|
f67c06674df00f1d0948662b5528d9c5174dd6c3
|
[
"Apache-2.0"
] |
permissive
|
Tulpen/CAIL
|
d6ca9981c7ea2603ae61675ba330a9614cd9398d
|
c4cfa98ab4ecedbce34a7a5a186830486047540c
|
refs/heads/master
| 2023-04-23T20:07:56.774530
| 2021-04-16T13:18:36
| 2021-04-16T13:18:36
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 6,066
|
py
|
"""Test model for SMP-CAIL2020-Argmine.
Author: Tsinghuaboy tsinghua9boy@sina.com
Usage:
python main.py --model_config 'config/bert_config.json' \
--in_file 'data/SMP-CAIL2020-test1.csv' \
--out_file 'bert-submission-test-1.csv'
python main.py --model_config 'config/rnn_config.json' \
--in_file 'data/SMP-CAIL2020-test1.csv' \
--out_file 'rnn-submission-test-1.csv'
"""
import argparse
import itertools
import json
import os
import re
from types import SimpleNamespace
import fire
import pandas as pd
import torch
from torch.utils.data import DataLoader
from data import Data
from evaluate import evaluate, handy_tool, calculate_accuracy_f1
from model import RnnForSentencePairClassification, BertYForClassification, NERNet,NERWNet
from utils import load_torch_model
LABELS = ['1', '2', '3', '4', '5']
MODEL_MAP = {
'bert': BertYForClassification,
'rnn': NERNet,
'rnnkv': NERWNet
}
all_types = ['LAK', 'OTH', 'HYD', 'ORG', 'LOC', 'RIV', 'RES', 'TER', 'DAM', 'PER']
def result_to_json(string, tags):
item = {"string": string, "entities": []}
entity_name = ""
entity_start = 0
idx = 0
i = -1
zipped = zip(string, tags)
listzip = list(zipped)
last = len(listzip)
for char, tag in listzip:
i += 1
if tag == 0:
item["entities"].append({"word": char, "start": idx, "end": idx+1, "type":'s'})
elif (tag % 3) == 1:
entity_name += char
entity_start = idx
elif (tag % 3) == 2:
type_index = (tag-1) // 3
if (entity_name != "") and (i == last):
entity_name += char
item["entities"].append({"word": entity_name, "start": entity_start, "end": idx + 1, "type": all_types[type_index]})
entity_name = ""
else:
entity_name += char
elif (tag % 3)+3 == 3: # or i == len(zipped)
type_index = (tag-1) // 3
entity_name += char
item["entities"].append({"word": entity_name, "start": entity_start, "end": idx + 1, "type": all_types[type_index]})
entity_name = ""
else:
entity_name = ""
entity_start = idx
idx += 1
return item
def remove(text):
cleanr = re.compile(r"[ !#\$%&'\(\)*\+,-./:;<=>?@\^_`{|}~“”?!【】()、’‘…¥·]*")
cleantext = re.sub(cleanr, '', text)
return cleantext
def main(out_file='output/result.json',
model_config='config/rnn_config.json'):
"""Test model for given test set on 1 GPU or CPU.
Args:
in_file: file to be tested
out_file: output file
model_config: config file
"""
# 0. Load config
with open(model_config) as fin:
config = json.load(fin, object_hook=lambda d: SimpleNamespace(**d))
if torch.cuda.is_available():
device = torch.device('cuda')
# device = torch.device('cpu')
else:
device = torch.device('cpu')
#0. preprocess file
# id_list = []
# with open(in_file, 'r', encoding='utf-8') as fin:
# for line in fin:
# sents = json.loads(line.strip())
# id = sents['id']
# id_list.append(id)
# id_dict = dict(zip(range(len(id_list)), id_list))
# 1. Load data
data = Data(vocab_file=os.path.join(config.model_path, 'vocab.txt'),
max_seq_len=config.max_seq_len,
model_type=config.model_type, config=config)
test_set, sc_list, label_list = data.load_file(config.test_file_path, train=False)
token_list = []
for line in sc_list:
tokens = data.tokenizer.convert_ids_to_tokens(line)
token_list.append(tokens)
data_loader_test = DataLoader(
test_set, batch_size=config.batch_size, shuffle=False)
# 2. Load model
model = MODEL_MAP[config.model_type](config)
model = load_torch_model(
model, model_path=os.path.join(config.model_path, 'model.bin'))
model.to(device)
# 3. Evaluate
answer_list, length_list = evaluate(model, data_loader_test, device, isTest=True)
def flatten(ll):
return list(itertools.chain(*ll))
# train_answers = handy_tool(label_list, length_list) #gold
# #answer_list = handy_tool(answer_list, length_list) #prediction
# train_answers = flatten(train_answers)
# train_predictions = flatten(answer_list)
#
# train_acc, train_f1 = calculate_accuracy_f1(
# train_answers, train_predictions)
# print(train_acc, train_f1)
test_json = json.load(open(config.test_file_path, 'r', encoding='utf-8'))
id_list = [item['id'] for item in test_json]
mod_tokens_list = handy_tool(token_list, length_list)
result = [result_to_json(t, s) for t,s in zip(mod_tokens_list, answer_list)]
# 4. Write answers to file
with open(out_file, 'w', encoding='utf8') as fout:
result_list = []
for id, item in zip(id_list,result):
entities = item['entities']
words = [d['word']+"-"+d['type'] for d in entities if d['type'] !='s']
unique_words = []
for w in words:
if w not in unique_words:
unique_words.append(w)
item = {}
item['id'] = id
item['entities'] = unique_words
result_list.append(item)
json.dump(result_list,fout,ensure_ascii=False, indent=4)
#fout.write(" ".join(words) + "\n")
# para_list = pd.read_csv(temp_file)['para'].to_list()
# summary_dict = dict(zip(id_dict.values(), [""] * len(id_dict)))
#
# result = zip(para_list, token_list)
# for id, summary in result:
# summary_dict[id_dict[id]] += remove(summary).replace(" ","")
#
# with open(out_file, 'w', encoding='utf8') as fout:
# for id, sumamry in summary_dict.items():
# fout.write(json.dumps({'id':id,'summary':sumamry}, ensure_ascii=False) + '\n')
if __name__ == '__main__':
fire.Fire(main)
|
[
"bangtech@sina.com"
] |
bangtech@sina.com
|
10c5cd8101cff8672ef60125ceffa4769b4d7c27
|
21682f70ff130169d8800a06b1a6d8bf7f46e45a
|
/functions/decoraters/variablelengthargument/demo.py
|
15d8a2f86e7c77c3241c3e719adc2bc1555f7f55
|
[] |
no_license
|
Aravind2595/MarchPythonProject
|
03c3aeee40f5ff2c635861ac29f31a7633499d51
|
9aa9241632b7f96e7e1cb33b3adb7b8def36f1f8
|
refs/heads/master
| 2023-05-05T04:16:45.556737
| 2021-05-20T08:43:52
| 2021-05-20T08:43:52
| 368,791,704
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 252
|
py
|
def add(*args): #muttiple argument passing ;it will also accept 0 argument
res=0 #* is important not 'args' eg: *hai or *arg=
for num in args:#argument will be stored in tuple format
res+=num
return res
print(add(10,20,30,40))
|
[
"nairaravind95@gmail.com"
] |
nairaravind95@gmail.com
|
d0bbe41af3a825c8397a6ae8f3261c2be29c4625
|
15960f0aa40915ddc93cd5c8a840a4abfb167cf1
|
/groups/models.py
|
0fae1fa57df655ee1187afa7e7aea983641ef30c
|
[] |
no_license
|
phouse512/piper
|
74d815fd443482abc80418dbed678b1431e17eb9
|
70f651db8af4edb625f6ba249556d3c2d04a350b
|
refs/heads/master
| 2022-05-04T20:36:56.354336
| 2018-12-19T04:28:23
| 2018-12-19T04:28:23
| 40,972,739
| 0
| 0
| null | 2018-11-17T16:56:21
| 2015-08-18T13:13:15
|
Python
|
UTF-8
|
Python
| false
| false
| 399
|
py
|
from django.db import models
from users.models import User
class Group(models.Model):
id = models.AutoField(primary_key=True)
name = models.CharField(max_length=30)
class Meta:
db_table = 'groups'
class GroupMembership(models.Model):
id = models.AutoField(primary_key=True)
user = models.ForeignKey(User)
group = models.ForeignKey(Group)
class Meta:
db_table = 'groups_membership'
|
[
"philiphouse2015@u.northwestern.edu"
] |
philiphouse2015@u.northwestern.edu
|
62d885e2dfc1f0c0f22c2711cb0bcfabeb0641b3
|
0942ec9cdda81f754d05ae9893605769ed5c1111
|
/flask-video-streaming/camera_pi.py
|
f94a07a7875d2c5f6778403d01ea02da3986608e
|
[
"MIT"
] |
permissive
|
sourceperl/rpi.webcam.pi3
|
f9fa061bc05bab9720c9e372c96f65e431ad5673
|
ea8559ca93f771250961a63fbe0f7acc3a7a2338
|
refs/heads/master
| 2020-12-25T14:38:24.234521
| 2016-07-21T14:56:01
| 2016-07-21T14:56:01
| 63,687,773
| 1
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,607
|
py
|
import time
import io
import threading
import picamera
class Camera(object):
thread = None # background thread that reads frames from camera
frame = None # current frame is stored here by background thread
last_access = 0 # time of last client access to the camera
def initialize(self):
if Camera.thread is None:
# start background frame thread
Camera.thread = threading.Thread(target=self._thread)
Camera.thread.daemon = True
Camera.thread.start()
# wait until frames start to be available
while self.frame is None:
time.sleep(0)
def get_frame(self):
Camera.last_access = time.time()
self.initialize()
return self.frame
@classmethod
def _thread(cls):
with picamera.PiCamera() as camera:
# camera setup
camera.resolution = (640, 480)
camera.hflip = True
camera.vflip = True
stream = io.BytesIO()
for foo in camera.capture_continuous(stream, 'jpeg',
use_video_port=True):
# store frame
stream.seek(0)
cls.frame = stream.read()
# reset stream for next frame
stream.seek(0)
stream.truncate()
# if there hasn't been any clients asking for frames in
# the last 10 seconds stop the thread
if time.time() - cls.last_access > 10:
break
cls.thread = None
|
[
"loic.celine@free.fr"
] |
loic.celine@free.fr
|
34179ff136b9b68223fd42cb9f5fbe54e95a88de
|
af0dcf80a36da4ac6894dc517ad1870f702c3122
|
/azure-mgmt-web/azure/mgmt/web/models/csm_publishing_profile_options.py
|
99b9542ab7d50b0a1d29b9d31f8743561ff5afa3
|
[
"Apache-2.0"
] |
permissive
|
FlavioAlexander/azure-sdk-for-python
|
4c6151ca17886f9e4d47e1ccc469859abdedca5a
|
8c7416749f9a5697e0311bc9af8fe5c0d524ca03
|
refs/heads/master
| 2021-01-24T02:34:37.194767
| 2016-07-03T23:47:23
| 2016-07-03T23:47:23
| 62,738,173
| 0
| 1
| null | 2016-07-06T16:54:12
| 2016-07-06T16:54:10
| null |
UTF-8
|
Python
| false
| false
| 1,346
|
py
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft and contributors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
#
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.serialization import Model
class CsmPublishingProfileOptions(Model):
"""Publishing options for requested profile.
:param format: Name of the format. Valid values are:
FileZilla3
WebDeploy -- default
Ftp
:type format: str
"""
_attribute_map = {
'format': {'key': 'format', 'type': 'str'},
}
def __init__(self, format=None):
self.format = format
|
[
"lmazuel@microsoft.com"
] |
lmazuel@microsoft.com
|
a560d10713bc976b978431314f53a75111c1555a
|
3aa8222bb2edc93c9202ccbcf6f331cdf73cd5a2
|
/FundRatingNSDL/nsdl_extraction/setup.py
|
ac0e1546b51b54b85c28ad5f48c2c3952b296cc5
|
[] |
no_license
|
pavithra-ft/ft-automation
|
a977809823e587efd596b02e3a8286f887d12116
|
946e1c35b785bfc3ea31d5903e021d4bc99fe302
|
refs/heads/main
| 2023-04-24T19:54:28.478577
| 2021-05-11T17:53:08
| 2021-05-11T17:53:08
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 237
|
py
|
# Automatically created by: scrapyd-deploy
from setuptools import setup, find_packages
setup(
name='project',
version='1.0',
packages=find_packages(),
entry_points={'scrapy': ['settings = nsdl_extraction.settings']},
)
|
[
"64137264+pavithra-ft@users.noreply.github.com"
] |
64137264+pavithra-ft@users.noreply.github.com
|
460f676c069089996fb607db849fb892c0b4ab8a
|
c2e16633921d1efe584d93d769eaa7892a2fd8f3
|
/list,advanced/Messaging.py
|
aa33649a6ed9732f8fa8110516c633e59d131daa
|
[] |
no_license
|
yosifnandrov/softuni-stuff
|
bd53d418fe143ea4633a5488c1f80648da0b9ef7
|
2a76e5aee2029edf901634750d28cf153d73ece3
|
refs/heads/main
| 2023-04-17T19:53:30.254790
| 2021-05-06T11:33:39
| 2021-05-06T11:33:39
| 364,884,782
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 511
|
py
|
number = input().split()
message = input()
def get_sum(n):
sum = 0
for i in n:
sum += int(i)
return sum
for i in number:
summary = get_sum(i)
for l in range(len(message)):
if l == summary:
print(message[l], end="")
message = message[0:l:] + message[l + 1::]
break
elif l == len(message) - 1:
l = summary - len(message)
print(message[l], end="")
message = message[0:l:] + message[l + 1::]
|
[
"yosif1993@abv.bg"
] |
yosif1993@abv.bg
|
c5420358fb87484239026919e290e881a7b4c6c4
|
2ce0c37ac7d9beeac23db688f97a1f502b92d13a
|
/store/models/store.py
|
878b20d11d588de233e55c8908f1c894374734b0
|
[] |
no_license
|
AmrElsayedEG/inventory-system
|
0cdb0634b33117b13bfcae8642f979448d831369
|
d4bc483612c3b721918d75f24ab0d7fa29b78ce3
|
refs/heads/main
| 2023-08-20T22:32:25.113740
| 2021-10-04T08:55:44
| 2021-10-04T08:55:44
| 413,344,692
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 261
|
py
|
from django.db import models
class Store(models.Model):
name = models.CharField(max_length=100)
address = models.CharField(max_length=200)
coordinates = models.JSONField(blank=True, null=True)
def __str__(self) -> str:
return self.name
|
[
"elsayed.amr50@gmail.com"
] |
elsayed.amr50@gmail.com
|
1e4fc17bed5f3bca085566203de7580dbe427874
|
b5187b5ffd53a2cdc8ec6ed94effc39702c1ea31
|
/loyalty_app/loyalty/doctype/sales_list/sales_list.py
|
3f630555c69f95a1f081ba33f89e1bac9d77c915
|
[
"MIT"
] |
permissive
|
vignesharumainayagam/engagex-loyalty_app-backup-
|
946a7f75c5ae5cce33313142a0b4e6ba29d67cb6
|
4c326c5f7b22572146f0b946d6498e85ac22a143
|
refs/heads/master
| 2020-03-11T18:00:14.106005
| 2018-04-19T05:36:06
| 2018-04-19T05:36:06
| 130,163,935
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 254
|
py
|
# -*- coding: utf-8 -*-
# Copyright (c) 2018, Loyalty and contributors
# For license information, please see license.txt
from __future__ import unicode_literals
import frappe
from frappe.model.document import Document
class Sales_list(Document):
pass
|
[
"vigneshwaran@valiantsystems.com"
] |
vigneshwaran@valiantsystems.com
|
53b2af0868849bff57dbd8b705257e3f2690e172
|
a88d9c0176f5e4c0d0bd9664270e000ebb5edbd9
|
/component/tile/sensor_tile.py
|
9b549723c7bbb0854467b7bcc1072e972e246aa2
|
[
"MIT"
] |
permissive
|
sandroklippel/fcdm
|
fb81c73fc6bd1cf296f9301272923c3627474d3f
|
5a54e6352bb574ba409be38882ff0d13b3473b7a
|
refs/heads/master
| 2023-08-19T22:05:52.055545
| 2021-08-24T11:23:40
| 2021-08-24T11:23:40
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,828
|
py
|
from datetime import datetime as dt
from sepal_ui import sepalwidgets as sw
import ipyvuetify as v
from component import parameter as cp
from component.message import cm
class SensorTile(sw.Tile):
def __init__(self, model):
# create adjustable variables end and start
self.end = dt.now().year
self.start = 1950 # prior to any sats
# create the widgets
self.sensors_select = v.Select(label=cm.input_lbl.sensor, items=[], v_model=[], multiple=True, chips=True, deletable_chips=True)
landsat_7_switch = v.Switch(label=cm.input_lbl.do_threshold, v_model =model.improve_L7)
landsat_7_slider = v.Slider(class_='mt-5', label=cm.input_lbl.threshold, min=0, max=.3, step=.001, v_model=model.improve_threshold, thumb_label='always')
cloud_buffer = v.Slider(class_='mt-5', label=cm.input_lbl.cloud_buffer, min=0, max =2500, step=10, v_model=model.cloud_buffer, thumb_label='always')
# bind them to io
model \
.bind(self.sensors_select, 'sensors',) \
.bind(landsat_7_switch, 'improve_L7',) \
.bind(landsat_7_slider, 'improve_threshold',) \
.bind(cloud_buffer, 'cloud_buffer',)
super().__init__(
'nested_widget',
cm.tile.sensor,
inputs = [self.sensors_select, landsat_7_switch, landsat_7_slider, cloud_buffer],
alert = sw.Alert()
)
# add js behaviour
self.sensors_select.observe(self._check_sensor, 'v_model')
model.observe(self._change_start, 'reference_start')
model.observe(self._change_end, 'analysis_end')
def _check_sensor(self, change):
"""
prevent users from selecting landsat and sentinel 2 sensors
provide a warning message to help understanding
"""
# exit if its a removal
if len(change['new']) < len(change['old']):
self.alert.reset()
return self
# use positionning in the list as boolean value
sensors = ['landsat', 'sentinel']
# guess the new input
new_value = list(set(change['new']) - set(change['old']))[0]
id_ = next(i for i, s in enumerate(sensors) if s in new_value)
if sensors[id_] in new_value:
if any(sensors[not id_] in s for s in change['old']):
change['owner'].v_model = [new_value]
self.alert.add_live_msg(cm.no_mix, 'warning')
else:
self.alert.reset()
return self
def _change_end(self, change):
self.end = int(change['new'][:4]) if change['new'] else dt.now().year
self._check_sensor_availability()
return self
def _change_start(self, change):
self.start = int(change['new'][:4]) if change['new'] else 1950
self._check_sensor_availability()
return self
def _check_sensor_availability(self):
"""reduce the number of available satellites based on the dates selected by the user"""
# reset current values
self.sensors_select.items = []
self.sensors_select.v_model = []
# check every satellite availability
years = range(self.start, self.end + 1)
sensors = []
for s in cp.sensors:
if any(e in years for e in [cp.sensors[s]['start'], cp.sensors[s]['end']]):
sensors.append(s)
elif cp.sensors[s]['start'] < self.start and cp.sensors[s]['end'] > self.end:
sensors.append(s)
self.sensors_select.items = sensors
return self
|
[
"pierrick.rambaud49@gmail.com"
] |
pierrick.rambaud49@gmail.com
|
ea02622ccae8492548b091136b268bf259b5cebd
|
23ec6adce704bff40d04cd6fc0ba446375405b68
|
/Non Leetcode Solutions/linked_list_py.py
|
378d3a34b439b5b394c573f968a35ed1cc2897d6
|
[] |
no_license
|
amoghrajesh/Coding
|
1845be9ea8df2d13d2a21ebef9ee6de750c8831d
|
a7dc41a4963f97dfb62ee4b1cab5ed80043cfdef
|
refs/heads/master
| 2023-08-31T10:10:48.948129
| 2023-08-30T15:04:02
| 2023-08-30T15:04:02
| 267,779,618
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,952
|
py
|
class Node(object):
def __init__(self,data,next_node=None):
self.data=data
self.next_node=next_node
def get_next(self):
return self.next_node
def set_next(self,next_node):
self.next_node=next_node
def get_data(self):
return self.data
def set_data(self,data):
self.data=data
def has_next(self):
if self.get_next() is None:
return False
return True
def toString(self):
return str(self.get_data())
class LinkedList(object):
def __init__(self,r=None):
self.root=r
self.size=0
def get_size(self):
return self.size
def add(self,d):#add at beginning
new_node=Node(d,self.root)
self.root=new_node
self.size+=1
def remove(self,data):
this_node=self.root
prev_node=None
while this_node is not None:
if this_node.get_data() == data:
if prev_node is not None:
prev_node.set_next(this_node.get_next())
else:
self.root=this_node.get_next()
self.size-=1
return True
else:
prev_node=this_node
this_node=this_node.get_next()
return False
def find(self,data):
this_node=self.root
while this_node is not None:
if this_node.get_data() == data:
return True
this_node=this_node.get_next()
return False
def print_list(self):
this_node=self.root
while this_node.has_next():
print(this_node.toString())
this_node=this_node.get_next()
myList=LinkedList()
myList.add(1)
myList.add(4)
myList.add(6)
myList.add(2)
print("size:",myList.get_size())
'''myList.remove(6)
print("size:",myList.get_size())
print("Is 2 present?",myList.find(-2))'''
myList.print_list()
|
[
"amoghrajesh1999@gmail.com"
] |
amoghrajesh1999@gmail.com
|
c4be81c83c88067b9cf207fdeb2ab275f44e2c08
|
f0d713996eb095bcdc701f3fab0a8110b8541cbb
|
/F4iemEeFfsaFoMpAF_4.py
|
786cba909da3a937ac21071a5cc1d90693d4e336
|
[] |
no_license
|
daniel-reich/turbo-robot
|
feda6c0523bb83ab8954b6d06302bfec5b16ebdf
|
a7a25c63097674c0a81675eed7e6b763785f1c41
|
refs/heads/main
| 2023-03-26T01:55:14.210264
| 2021-03-23T16:08:01
| 2021-03-23T16:08:01
| 350,773,815
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 776
|
py
|
"""
This is a list of single characters with an unwanted character at the end:
["H", "e", "l", "l", "o", "!", "\0"]
You could also just type "Hello!" when initializing a variable, creating the
string "Hello!"
Create a function that will return a string by combining the given character
list, not including the unwanted final character.
### Examples
cpp_txt(["H", "i", "!", "\0"]) ➞ "Hi!"
cpp_txt(["H", "e", "l", "l", "o", "!", "\0"]) ➞ "Hello!"
cpp_txt(["J", "A", "V", "a", "\0"]) ➞ "JAVa"
### Notes
This is a translation of a C++ challenge and is trivial in Python, but perhaps
it will be helpful to someone out there. (No challenge is trivial until you
know how to solve it :)
"""
def cpp_txt(lst):
return ''.join(lst[:-1])
|
[
"daniel.reich@danielreichs-MacBook-Pro.local"
] |
daniel.reich@danielreichs-MacBook-Pro.local
|
6068e0dfbaa8b3e02df630a1f8f2d8551b444403
|
2eaecdb1ed42170463993b8b2285296c5ef2231d
|
/apps/ciudad/admin.py
|
d7e080b95887458bf100d3a8e00e6edfdc8c6041
|
[] |
no_license
|
ivanfdaza/tribunaleclesiasticoIIS
|
9639fc66a2c99baa45b8276f4a1e035bdf294e2e
|
acb164ab8464b71d0461acf03bdd5e3386b57893
|
refs/heads/master
| 2022-11-21T10:32:14.925326
| 2020-07-23T16:21:26
| 2020-07-23T16:21:26
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 177
|
py
|
from django.contrib import admin
# Register your models here.
from apps.ciudad.models import Ciudad, Departamento
admin.site.register(Ciudad)
admin.site.register(Departamento)
|
[
"you@example.com"
] |
you@example.com
|
88be1a8dbca36a3704310ed5d08336575231773d
|
09e57dd1374713f06b70d7b37a580130d9bbab0d
|
/data/cirq_new/cirq_program/startCirq_pragma263.py
|
bc6b11402b4ca6da63e2f0bf7495b508300d9153
|
[
"BSD-3-Clause"
] |
permissive
|
UCLA-SEAL/QDiff
|
ad53650034897abb5941e74539e3aee8edb600ab
|
d968cbc47fe926b7f88b4adf10490f1edd6f8819
|
refs/heads/main
| 2023-08-05T04:52:24.961998
| 2021-09-19T02:56:16
| 2021-09-19T02:56:16
| 405,159,939
| 2
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,624
|
py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Time : 5/15/20 4:49 PM
# @File : grover.py
# qubit number=4
# total number=15
import cirq
import cirq.google as cg
from typing import Optional
import sys
from math import log2
import numpy as np
class Opty(cirq.PointOptimizer):
def optimization_at(
self,
circuit: 'cirq.Circuit',
index: int,
op: 'cirq.Operation'
) -> Optional[cirq.PointOptimizationSummary]:
if (isinstance(op, cirq.ops.GateOperation) and isinstance(op.gate, cirq.CZPowGate)):
return cirq.PointOptimizationSummary(
clear_span=1,
clear_qubits=op.qubits,
new_operations=[
cirq.CZ(*op.qubits),
cirq.X.on_each(*op.qubits),
cirq.X.on_each(*op.qubits),
]
)
#thatsNoCode
def make_circuit(n: int, input_qubit):
c = cirq.Circuit() # circuit begin
c.append(cirq.H.on(input_qubit[0])) # number=1
c.append(cirq.H.on(input_qubit[1])) # number=2
c.append(cirq.H.on(input_qubit[1])) # number=7
c.append(cirq.H.on(input_qubit[2])) # number=3
c.append(cirq.H.on(input_qubit[3])) # number=4
c.append(cirq.CNOT.on(input_qubit[3],input_qubit[0])) # number=5
c.append(cirq.H.on(input_qubit[0])) # number=12
c.append(cirq.CZ.on(input_qubit[3],input_qubit[0])) # number=13
c.append(cirq.H.on(input_qubit[0])) # number=14
c.append(cirq.SWAP.on(input_qubit[1],input_qubit[0])) # number=8
c.append(cirq.SWAP.on(input_qubit[1],input_qubit[0])) # number=9
c.append(cirq.SWAP.on(input_qubit[3],input_qubit[0])) # number=10
c.append(cirq.SWAP.on(input_qubit[3],input_qubit[0])) # number=11
# circuit end
c.append(cirq.measure(*input_qubit, key='result'))
return c
def bitstring(bits):
return ''.join(str(int(b)) for b in bits)
if __name__ == '__main__':
qubit_count = 4
input_qubits = [cirq.GridQubit(i, 0) for i in range(qubit_count)]
circuit = make_circuit(qubit_count,input_qubits)
circuit = cg.optimized_for_sycamore(circuit, optimizer_type='sqrt_iswap')
circuit_sample_count =2820
simulator = cirq.Simulator()
result = simulator.run(circuit, repetitions=circuit_sample_count)
frequencies = result.histogram(key='result', fold_func=bitstring)
writefile = open("../data/startCirq_pragma263.csv","w+")
print(format(frequencies),file=writefile)
print("results end", file=writefile)
print(circuit.__len__(), file=writefile)
print(circuit,file=writefile)
writefile.close()
|
[
"wangjiyuan123@yeah.net"
] |
wangjiyuan123@yeah.net
|
0efe388f3e3a95551a15c6e5f3c3ac7d3ae444c5
|
b9062ed0431544160161a270fe669858c3ca9633
|
/blog/migrations/0003_auto_20191101_2319.py
|
f9b453d4abd8102f08dc12a51a8acc1e12851805
|
[] |
no_license
|
sd8917/LearnDjango
|
350f73ed7077d0b3ac9aa2f1e0fd7d05f67faf05
|
87a9c6c5932f685a01ad6125faf81ac94a1fac5d
|
refs/heads/master
| 2022-12-03T18:18:13.770896
| 2019-11-05T06:35:32
| 2019-11-05T06:35:32
| 219,081,219
| 1
| 0
| null | 2022-11-22T04:36:24
| 2019-11-02T00:14:05
|
Python
|
UTF-8
|
Python
| false
| false
| 443
|
py
|
# Generated by Django 2.2.6 on 2019-11-01 17:49
import ckeditor_uploader.fields
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('blog', '0002_auto_20191101_1435'),
]
operations = [
migrations.AlterField(
model_name='blog',
name='content',
field=ckeditor_uploader.fields.RichTextUploadingField(),
),
]
|
[
"sudhanshuraj8917@gmail.com"
] |
sudhanshuraj8917@gmail.com
|
d45b2df2ceb71ae350e9d6a317ee4e09741e503e
|
15f321878face2af9317363c5f6de1e5ddd9b749
|
/solutions_python/Problem_207/507.py
|
3881fca5b62b736452cde9286a5ba5618161c3b5
|
[] |
no_license
|
dr-dos-ok/Code_Jam_Webscraper
|
c06fd59870842664cd79c41eb460a09553e1c80a
|
26a35bf114a3aa30fc4c677ef069d95f41665cc0
|
refs/heads/master
| 2020-04-06T08:17:40.938460
| 2018-10-14T10:12:47
| 2018-10-14T10:12:47
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,981
|
py
|
def validate(s):
pass
def solver(line):
n,r,o,y,g,b,v = line
t1 = b - o
t2 = y - v
t3 = r - g
if t1 < 0 or t2 < 0 or t3 < 0:
return "IMPOSSIBLE"
if 0 in [t1,t2,t3]:
if line[1:].count(0) == 4:
L = [(r,'R'),(o,'O'),(y,'Y'),(g,'G'),(b,'B'),(v,'V')]
L.sort(key = lambda x: -x[0])
if L[0][0] == L[1][0]:
return (L[0][1] + L[1][1]) * L[0][0]
else:
return "IMPOSSIBLE"
else:
return "IMPOSSIBLE"
L = [t1,t2,t3]
if sum(L) < 2 * max(L):
return "IMPOSSIBLE"
else:
L = [[t1,'B'],[t2,'Y'],[t3,'R']]
s = '_'
while sum(i[0] for i in L) > 3:
#error: haven't enforced start != end
L.sort(key = lambda x: -x[0])
if L[0][1] != s[-1]:
s += L[0][1]
L[0][0] -= 1
else:
s += L[1][1]
L[1][0] -= 1
if L[1][0] < 0:
print "bad stuff"
s = s[1:]
if s:
t = s[0] + s[-1]
else:
t = 'RR'
d = {'RR' : 'BRY',
'RY' : 'BRY',
'RB' : 'YRB',
'YR' : 'BYR',
'YY' : 'BYR',
'YB' : 'RYB',
'BR' : 'YBR',
'BY' : 'RBY',
'BB' : 'RBY'}
s += d[t]
s = s.replace('B','BO' * o + 'B', 1)
s = s.replace('Y','YV' * v + 'Y', 1)
s = s.replace('R','RG' * g + 'R', 1)
return s
#case testing needs to happen
fout = open('out.txt','w')
f = open('in.txt')
T = int(f.readline())
for case in range(1,T+1):
line = f.readline()
line = line.split()
line = [int(i) for i in line]
ans = solver(line)
str = "Case #%d: %s\n" % (case, ans)
print str,
fout.write(str)
f.close()
fout.close()
|
[
"miliar1732@gmail.com"
] |
miliar1732@gmail.com
|
3a206f6d8e955b15bbd61988b40ea1f668583f18
|
8ef5a09d76a11c56963f18e6a08474a1a8bafe3c
|
/algorithm/dp_subset_sum.py
|
44580f16c302081909155ac156cefc69cf012378
|
[] |
no_license
|
roiei/algo
|
32c4677649c7666db148f6183fbfbf66c8b1969f
|
ae8bb8bf4ae4026ccaf1dce323b4098547dd35ec
|
refs/heads/master
| 2022-04-01T19:21:27.768675
| 2022-02-19T06:15:29
| 2022-02-19T06:15:29
| 169,021,154
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,898
|
py
|
nums = [34, 4, 12, 5, 2]
target = 9
#target = 40
def dfs(nums, depth, n, target):
if target == 0:
return True
if depth == n or target < 0:
return False
res = dfs(nums, depth + 1, n, target - nums[depth]),
res += dfs(nums, depth + 1, n, target),
return any(res)
mem = {}
def dfs_dp(nums, depth, n, target):
if depth in mem:
return mem[depth]
if target == 0:
return True
if depth == n or target < 0:
return False
res = dfs(nums, depth+1, n, target - nums[depth]),
res += dfs(nums, depth+1, n, target),
mem[depth] = any(res)
return mem[depth]
def isSubsetSum(nums, n, target):
subset = ([[False for i in range(target+1)] for i in range(n+1)])
for i in range(n+1):
subset[i][0] = True
for i in range(1, target+1):
subset[0][i] = False
for i in range(1, n+1):
for j in range(1, target+1):
if j < nums[i-1]:
subset[i][j] = subset[i-1][j]
else:
subset[i][j] = (subset[i-1][j] or
subset[i-1][j-nums[i-1]])
return subset[n][target]
def is_subset_sum(nums, n, target):
dp = [False]*(target+1)
cmb = [True]*(target+1)
for num in nums:
if num <= target:
print(f'num = {num}')
dp[num] = True
cmb[num] = False
for i in range(1, target+1):
if dp[i] == True and (i+num <= target):
if i != num and cmb[i] == False:
dp[i+num] = True
return dp[target]
# print(dfs(nums, 0, len(nums), target))
# print(dfs_dp(nums, 0, len(nums), target))
print(isSubsetSum(nums, len(nums), target))
print(is_subset_sum(nums, len(nums), target))
|
[
"hyoukjea.son@hyundai.com"
] |
hyoukjea.son@hyundai.com
|
832974b9068a90cd72f7987a17131faae3924d37
|
f3bd271bf00325881fb5b2533b9ef7f7448a75ec
|
/xcp2k/classes/_cell3.py
|
c1150112a0c843de3db8f2c0d137662bf75a7671
|
[] |
no_license
|
obaica/xcp2k
|
7f99fc9d494859e16b9b0ea8e217b0493f4b2f59
|
6e15c2c95658f545102595dc1783f5e03a9e6916
|
refs/heads/master
| 2020-07-15T17:27:43.378835
| 2019-02-11T16:32:24
| 2019-02-11T16:32:24
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,148
|
py
|
from xcp2k.inputsection import InputSection
from _cell_ref1 import _cell_ref1
class _cell3(InputSection):
def __init__(self):
InputSection.__init__(self)
self.A = None
self.B = None
self.C = None
self.Abc = None
self.Alpha_beta_gamma = None
self.Cell_file_name = None
self.Cell_file_format = None
self.Periodic = None
self.Multiple_unit_cell = None
self.Symmetry = None
self.CELL_REF = _cell_ref1()
self._name = "CELL"
self._keywords = {'A': 'A', 'Cell_file_format': 'CELL_FILE_FORMAT', 'C': 'C', 'B': 'B', 'Symmetry': 'SYMMETRY', 'Alpha_beta_gamma': 'ALPHA_BETA_GAMMA', 'Multiple_unit_cell': 'MULTIPLE_UNIT_CELL', 'Periodic': 'PERIODIC', 'Abc': 'ABC', 'Cell_file_name': 'CELL_FILE_NAME'}
self._subsections = {'CELL_REF': 'CELL_REF'}
self._aliases = {'Angles': 'Alpha_beta_gamma'}
@property
def Angles(self):
"""
See documentation for Alpha_beta_gamma
"""
return self.Alpha_beta_gamma
@Angles.setter
def Angles(self, value):
self.Alpha_beta_gamma = value
|
[
"xingwang1991@gmail.com"
] |
xingwang1991@gmail.com
|
9aff07ad32360b10ae281e93532a2f1af7a967f5
|
7826681647933249c8949c00238392a0128b4a18
|
/cosypose/simulator/__init__.py
|
6242dfa1c761870f2a85f43957247c13b7b53277
|
[
"MIT"
] |
permissive
|
imankgoyal/cosypose
|
b35678a32a6491bb15d645bc867f4b2e49bee6d2
|
fa494447d72777f1d3bd5bd134d79e5db0526009
|
refs/heads/master
| 2022-12-09T11:18:23.188868
| 2020-08-31T15:34:02
| 2020-08-31T15:34:02
| 291,834,596
| 2
| 0
|
MIT
| 2020-08-31T22:06:12
| 2020-08-31T22:06:11
| null |
UTF-8
|
Python
| false
| false
| 173
|
py
|
from .body import Body
from .camera import Camera
from .base_scene import BaseScene
from .caching import BodyCache, TextureCache
from .textures import apply_random_textures
|
[
"labbe.yann1994@gmail.com"
] |
labbe.yann1994@gmail.com
|
49254eb20c5905f20020b227a913eea9b9007358
|
457c673c8c8d704ec150322e4eeee2fde4f827ca
|
/Programming Basic/First_Steps_in_coding_lab/07_Projects_Creation.py
|
ccf04b1496f502d612f560496cd25f03a08b4d0e
|
[] |
no_license
|
xMrShadyx/SoftUni
|
13c08d56108bf8b1ff56d17bb2a4b804381e0d4e
|
ce4adcd6e8425134d138fd8f4b6101d4eb1c520b
|
refs/heads/master
| 2023-08-02T03:10:16.205251
| 2021-06-20T05:52:15
| 2021-06-20T05:52:15
| 276,562,926
| 5
| 1
| null | 2021-09-22T19:35:25
| 2020-07-02T06:07:35
|
Python
|
UTF-8
|
Python
| false
| false
| 191
|
py
|
architect = input()
amount_projects = int(input())
total_time = amount_projects * 3
print(f'The architect {architect} will need {total_time} hours to complete {amount_projects} project/s.')
|
[
"daredevil91138@gmail.com"
] |
daredevil91138@gmail.com
|
98e60fc6389398e16d76d7de6e665ef79eac8947
|
dcc36a7744d657e15385803fcd13335685a595af
|
/quantdigger/demo/test_backtest.py
|
9500f1a4557b50deac1f9ee32fcf107612863f33
|
[] |
no_license
|
timedcy/quantdigger
|
777c28ba96d7dba1cb491a634f46e3968f3232bb
|
bc492811c796caaad3801d379bb485c1986d4619
|
refs/heads/master
| 2021-01-22T01:27:52.102183
| 2015-11-08T04:35:37
| 2015-11-08T04:35:37
| 45,767,058
| 1
| 0
| null | 2015-11-08T04:36:13
| 2015-11-08T04:36:13
| null |
UTF-8
|
Python
| false
| false
| 2,362
|
py
|
# -*- coding: utf-8 -*-
from quantdigger.engine.execute_unit import ExecuteUnit
from quantdigger.indicators.common import MA, BOLL
from quantdigger.engine.strategy import TradingStrategy
from quantdigger.util import pcontract, stock
from quantdigger.digger import deals
import plotting
#def average(series, n):
#""" 一个可选的平均线函数 """
### @todo plot element
#sum_ = 0
#for i in range(0, n):
#sum_ += series[i]
#return sum_ / n
class DemoStrategy(TradingStrategy):
""" 策略实例 """
def __init__(self, exe):
super(DemoStrategy, self).__init__(exe)
print 'start: ', self.datetime[0]
self.ma20 = MA(self, self.close, 20,'ma20', 'b', '1')
self.ma10 = MA(self, self.close, 10,'ma10', 'y', '1')
self.b_upper, self.b_middler, self.b_lower = BOLL(self, self.close, 10,'boll10', 'y', '1')
#self.ma2 = NumberSeries(self)
def on_bar(self):
""" 策略函数,对每根Bar运行一次。"""
#self.ma2.update(average(self.open, 10))
if self.ma10[1] < self.ma20[1] and self.ma10 > self.ma20:
self.buy('long', self.open, 1, contract = 'IF000.SHFE')
elif self.position() > 0 and self.ma10[1] > self.ma20[1] and self.ma10 < self.ma20:
self.sell('long', self.open, 1)
# 夸品种数据引用
#print self.position(), self.cash()
#print self.datetime, self.b_upper, self.b_middler, self.b_lower
#print self.datetime[0]
if __name__ == '__main__':
try:
pcon = pcontract('BB.SHFE', '1.Minute')
#begin_dt, end_dt = '2015-05-25', '2015-06-01'
#pcon = stock('600848','10.Minute') # 通过tushare下载股票数据
simulator = ExecuteUnit([pcon, pcon])
algo = DemoStrategy(simulator)
#algo1 = DemoStrategy(simulator)
#algo2 = DemoStrategy(simulator)
simulator.run()
# 显示回测结果
from quantdigger.datastruct import TradeSide
ping = 0
kai = 0
for t in algo.blotter.transactions:
if t.side == TradeSide.PING:
ping += t.quantity
elif t.side == TradeSide.KAI:
kai += t.quantity
else:
raise "error"
print "ping: ", ping
print "kai: ", kai
assert kai >= ping
|
[
"dingjie.wang@foxmail.com"
] |
dingjie.wang@foxmail.com
|
03dde1a263827b35b7aaa86f9f7835c933b700cc
|
48f73b5b78da81c388d76d685ec47bb6387eefdd
|
/scrapeHackerrankCode/codes/find-point.py
|
fc6157b3e72153e83bf8e16e66b00a8c13227285
|
[] |
no_license
|
abidkhan484/hacerrankScraping
|
ad0ceda6c86d321d98768b169d63ea1ee7ccd861
|
487bbf115117bd5c293298e77f15ae810a50b82d
|
refs/heads/master
| 2021-09-18T19:27:52.173164
| 2018-07-18T12:12:51
| 2018-07-18T12:12:51
| 111,005,462
| 2
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 251
|
py
|
# Accepted
# Python 3
def find_point(x1, y1, x2, y2):
print((2*x2-x1), (2*y2-y1))
for _ in range(int(input().strip())):
x1, y1, x2, y2 = input().split()
x1, y1, x2, y2 = int(x1), int(y1), int(x2), int(y2)
find_point(x1, y1, x2, y2)
|
[
"abidkhan484@gmail.com"
] |
abidkhan484@gmail.com
|
6924472770c9b64625e91f5425599c76f151c774
|
e3946d91dc5fe71989c2f4b6390232865fcb5d1b
|
/fjord/flags/spicedham_utils.py
|
2e87b27ba33d4e350010c6253dd663f313cda103
|
[
"LicenseRef-scancode-unknown-license-reference",
"BSD-3-Clause"
] |
permissive
|
zeusintuivo/fjord
|
61b632fd6df0e1b3508e628fe4f682a937cc0244
|
3bd227004d369df1fdc39f06acff12ebc8f0fe34
|
refs/heads/master
| 2021-01-16T18:28:52.564638
| 2014-09-24T21:02:51
| 2014-09-24T21:02:51
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,166
|
py
|
import json
import os
import re
import threading
from spicedham import Spicedham
from spicedham.backend import BaseBackend
from fjord.flags.models import Store
class FjordBackend(BaseBackend):
def __init__(self, config):
pass
def reset(self):
Store.objects.all().delete()
def get_key(self, classifier, key, default=None):
try:
obj = Store.objects.filter(classifier=classifier, key=key)[0]
value = json.loads(obj.value)
except (IndexError, Store.DoesNotExist):
value = default
return value
def set_key(self, classifier, key, value):
value = json.dumps(value)
try:
obj = Store.objects.filter(classifier=classifier, key=key)[0]
obj.value = value
except (IndexError, Store.DoesNotExist):
obj = Store.objects.create(
classifier=classifier, key=key, value=value)
obj.save()
def set_key_list(self, classifier, key_value_tuples):
for key, value in key_value_tuples:
self.set_key(classifier, key, value)
TOKEN_RE = re.compile(r'\W')
def tokenize(text):
"""Takes a piece of text and tokenizes it into train/classify tokens"""
# FIXME: This is a shite tokenizer and doesn't handle urls
# well. (We should handle urls well.)
tokens = TOKEN_RE.split(text)
return [token.lower() for token in tokens if token]
_cached_spicedham = threading.local()
def get_spicedham():
"""Retrieve a Spicedham object
These objects are cached threadlocal.
"""
sham = getattr(_cached_spicedham, 'sham', None)
if sham is None:
config = {
'backend': 'FjordBackend'
}
sham = Spicedham(config)
_cached_spicedham.sham = sham
return sham
def train_cmd(path, classification):
"""Recreates training data using datafiles in path"""
path = os.path.abspath(path)
if not os.path.exists(path):
raise ValueError('path "%s" does not exist' % path)
sham = get_spicedham()
# Wipe existing training data.
print 'Wiping existing data...'
sham.backend.reset()
# Load all data for when classifier=True
true_path = os.path.join(path, classification)
print 'Loading classifier=True data from %s...' % true_path
files = [os.path.join(true_path, fn)
for fn in os.listdir(true_path) if fn.endswith('.json')]
print ' %s records...' % len(files)
for fn in files:
print ' - ' + fn
with open(fn, 'r') as fp:
data = json.load(fp)
sham.train(tokenize(data['description']), match=True)
# Load all data for when classifier=False
false_path = os.path.join(path, 'not_' + classification)
print 'Loading classifier=False data from %s...' % false_path
files = [os.path.join(false_path, fn)
for fn in os.listdir(false_path) if fn.endswith('.json')]
print ' %s records...' % len(files)
for fn in files:
print ' - ' + fn
with open(fn, 'r') as fp:
data = json.load(fp)
sham.train(tokenize(data['description']), match=False)
print 'Done!'
|
[
"willkg@mozilla.com"
] |
willkg@mozilla.com
|
e95625894d5cba62471ce44e67b02160ea805c8f
|
487ce91881032c1de16e35ed8bc187d6034205f7
|
/codes/CodeJamCrawler/16_0_3/shangtai/codejamC.py
|
be3aad06742fb467f6074f6d0a440327d6d7dc65
|
[] |
no_license
|
DaHuO/Supergraph
|
9cd26d8c5a081803015d93cf5f2674009e92ef7e
|
c88059dc66297af577ad2b8afa4e0ac0ad622915
|
refs/heads/master
| 2021-06-14T16:07:52.405091
| 2016-08-21T13:39:13
| 2016-08-21T13:39:13
| 49,829,508
| 2
| 0
| null | 2021-03-19T21:55:46
| 2016-01-17T18:23:00
|
Python
|
UTF-8
|
Python
| false
| false
| 999
|
py
|
T = int(raw_input())
N, J = map(int, raw_input().split())
def is_prime(n):
if n == 2 or n == 3: return True
if n < 2 or n%2 == 0: return False
if n < 9: return True
if n%3 == 0: return False
r = int(n**0.5)
f = 5
while f <= r:
if n%f == 0: return False
if n%(f+2) == 0: return False
f +=6
return True
def primefactors(x):
loop=2
while loop<=x:
if x%loop==0:
x/=loop
return loop
else:
loop+=1
print "Case #1:"
j=0
for candidate in xrange(2**(N-2)):
candidate=candidate<<1
candidate+=(1+(1<<(N-1)))
candidate="{0:b}".format(candidate)
factorlist=[candidate]
for base in xrange(2,11):
candidatebase=int(candidate,base)
if is_prime(candidatebase):
break
else:
factorlist.append(primefactors(candidatebase))
if len(factorlist)==10:
j+=1
for i in factorlist:
print i,
print
if j==J:
break
|
[
"[dhuo@tcd.ie]"
] |
[dhuo@tcd.ie]
|
ae5b5e91cf43266b95ffaeb5f1795e03a00655ff
|
f445450ac693b466ca20b42f1ac82071d32dd991
|
/generated_tempdir_2019_09_15_163300/generated_part003803.py
|
26d534cc630c79581554130b81c7f37de6f38777
|
[] |
no_license
|
Upabjojr/rubi_generated
|
76e43cbafe70b4e1516fb761cabd9e5257691374
|
cd35e9e51722b04fb159ada3d5811d62a423e429
|
refs/heads/master
| 2020-07-25T17:26:19.227918
| 2019-09-15T15:41:48
| 2019-09-15T15:41:48
| 208,357,412
| 4
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,094
|
py
|
from sympy.abc import *
from matchpy.matching.many_to_one import CommutativeMatcher
from matchpy import *
from matchpy.utils import VariableWithCount
from collections import deque
from multiset import Multiset
from sympy.integrals.rubi.constraints import *
from sympy.integrals.rubi.utility_function import *
from sympy.integrals.rubi.rules.miscellaneous_integration import *
from sympy import *
class CommutativeMatcher62345(CommutativeMatcher):
_instance = None
patterns = {
0: (0, Multiset({0: 1}), [
(VariableWithCount('i2.2.2.0', 1, 1, S(0)), Add)
]),
1: (1, Multiset({1: 1}), [
(VariableWithCount('i2.2.3.0', 1, 1, S(0)), Add)
]),
2: (2, Multiset({2: 1}), [
(VariableWithCount('i2.2.1.2.0', 1, 1, S(0)), Add)
])
}
subjects = {}
subjects_by_id = {}
bipartite = BipartiteGraph()
associative = Add
max_optional_count = 1
anonymous_patterns = set()
def __init__(self):
self.add_subject(None)
@staticmethod
def get():
if CommutativeMatcher62345._instance is None:
CommutativeMatcher62345._instance = CommutativeMatcher62345()
return CommutativeMatcher62345._instance
@staticmethod
def get_match_iter(subject):
subjects = deque([subject]) if subject is not None else deque()
subst0 = Substitution()
# State 62344
subst1 = Substitution(subst0)
try:
subst1.try_add_variable('i2.2.2.1.0_1', S(1))
except ValueError:
pass
else:
pass
# State 62346
if len(subjects) >= 1:
tmp2 = subjects.popleft()
subst2 = Substitution(subst1)
try:
subst2.try_add_variable('i2.2.2.1.0', tmp2)
except ValueError:
pass
else:
pass
# State 62347
if len(subjects) == 0:
pass
# 0: x*f
yield 0, subst2
subjects.appendleft(tmp2)
subst1 = Substitution(subst0)
try:
subst1.try_add_variable('i2.2.3.1.0_1', S(1))
except ValueError:
pass
else:
pass
# State 63479
if len(subjects) >= 1:
tmp5 = subjects.popleft()
subst2 = Substitution(subst1)
try:
subst2.try_add_variable('i2.2.3.1.0', tmp5)
except ValueError:
pass
else:
pass
# State 63480
if len(subjects) == 0:
pass
# 1: x*f
yield 1, subst2
subjects.appendleft(tmp5)
subst1 = Substitution(subst0)
try:
subst1.try_add_variable('i2.2.1.2.1.0_1', S(1))
except ValueError:
pass
else:
pass
# State 65481
if len(subjects) >= 1:
tmp8 = subjects.popleft()
subst2 = Substitution(subst1)
try:
subst2.try_add_variable('i2.2.1.2.1.0', tmp8)
except ValueError:
pass
else:
pass
# State 65482
if len(subjects) == 0:
pass
# 2: x*d
yield 2, subst2
subjects.appendleft(tmp8)
if len(subjects) >= 1 and isinstance(subjects[0], Mul):
tmp10 = subjects.popleft()
associative1 = tmp10
associative_type1 = type(tmp10)
subjects11 = deque(tmp10._args)
matcher = CommutativeMatcher62349.get()
tmp12 = subjects11
subjects11 = []
for s in tmp12:
matcher.add_subject(s)
for pattern_index, subst1 in matcher.match(tmp12, subst0):
pass
if pattern_index == 0:
pass
# State 62350
if len(subjects) == 0:
pass
# 0: x*f
yield 0, subst1
if pattern_index == 1:
pass
# State 63481
if len(subjects) == 0:
pass
# 1: x*f
yield 1, subst1
if pattern_index == 2:
pass
# State 65483
if len(subjects) == 0:
pass
# 2: x*d
yield 2, subst1
subjects.appendleft(tmp10)
return
yield
from matchpy.matching.many_to_one import CommutativeMatcher
from .generated_part003804 import *
from collections import deque
from matchpy.utils import VariableWithCount
from multiset import Multiset
|
[
"franz.bonazzi@gmail.com"
] |
franz.bonazzi@gmail.com
|
2b2ecf4b17dd2b31fbfbf57f46f019b2b1eb04ec
|
c903382b1c2d170ca5a00a4482ee23be94da76d8
|
/quokka/core/admin/views.py
|
b7dff6308251262454ea8aa0e2499378eaebf24c
|
[
"MIT"
] |
permissive
|
alyoung/quokka
|
63c74ff913fe3d3b5ebdef38d9d267b149a6c9c1
|
a38749379f01c01cc887838999efa364dea5de04
|
refs/heads/master
| 2021-01-17T22:56:34.760694
| 2013-11-22T21:42:50
| 2013-11-22T21:42:50
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,499
|
py
|
# Create customized index view class
from flask import current_app
from quokka.core.models import Content
from quokka.utils.routing import expose
from quokka.core.widgets import TextEditor, PrepopulatedText
from .ajax import AjaxModelLoader
from .models import BaseIndexView, BaseView, ModelAdmin, BaseContentAdmin
class IndexView(BaseIndexView):
roles_accepted = ('admin', 'editor', 'moderator', 'writer', 'staff')
@expose('/')
def index(self):
return self.render('admin/index.html')
class InspectorView(BaseView):
roles_accepted = ('admin',)
@expose('/')
def index(self):
context = {
"app": current_app
}
return self.render('admin/inspector.html', **context)
###############################################################
# Admin model views
###############################################################
class LinkAdmin(BaseContentAdmin):
roles_accepted = ('admin', 'editor', 'writer', 'moderator')
column_list = ('title', 'channel', 'slug', 'published')
form_columns = ('title', 'slug', 'channel', 'link',
'content_format', 'summary', 'contents',
'values', 'available_at', 'available_until', 'published')
form_args = {
'summary': {'widget': TextEditor()}
}
class ConfigAdmin(ModelAdmin):
roles_accepted = ('admin', 'developer')
column_list = ("group", "description", "published",
"created_at", "updated_at")
column_filters = ("group", "description")
form_columns = ("group", "description", "published", "values")
class SubContentPurposeAdmin(ModelAdmin):
roles_accepted = ('admin', 'editor')
class ChannelTypeAdmin(ModelAdmin):
roles_accepted = ('admin', 'editor')
class ContentTemplateTypeAdmin(ModelAdmin):
roles_accepted = ('admin', 'editor')
class ChannelAdmin(ModelAdmin):
roles_accepted = ('admin', 'editor')
column_list = ('title', 'long_slug', 'is_homepage',
'channel_type', 'created_at', 'available_at', 'published',
'view_on_site')
column_filters = ['published', 'is_homepage', 'include_in_rss',
'show_in_menu', 'indexable']
column_searchable_list = ('title', 'description')
form_columns = ['title', 'slug', 'content_format', 'description',
'parent', 'is_homepage',
'include_in_rss', 'indexable', 'show_in_menu', 'order',
'per_page', 'tags',
'published', 'canonical_url', 'values', 'channel_type',
'inherit_parent', 'content_filters', 'available_at',
'available_until', 'render_content', 'redirect_url']
column_formatters = {
'view_on_site': ModelAdmin.formatters.get('view_on_site'),
'created_at': ModelAdmin.formatters.get('datetime'),
'available_at': ModelAdmin.formatters.get('datetime')
}
form_subdocuments = {}
form_widget_args = {
'title': {'style': 'width: 400px'},
'slug': {'style': 'width: 400px'},
}
form_args = {
'description': {'widget': TextEditor()},
'slug': {'widget': PrepopulatedText(master='title')}
}
form_ajax_refs = {
'render_content': AjaxModelLoader('render_content',
Content,
fields=['title', 'slug']),
'parent': {'fields': ['title', 'slug', 'long_slug']},
}
|
[
"rochacbruno@gmail.com"
] |
rochacbruno@gmail.com
|
5ba3eb2c99cb4886c0d71494e016a22abad98aee
|
acbe6bd6cefaf8b12070d7258dab30e4f7fcebed
|
/ui/style.py
|
c5b1d5ee83c7a5fb029f0f3becf8dba8c57a3b3b
|
[
"MIT",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
RogueScholar/debreate
|
02c98c5a78d33041798410f0e3b99e80fda65d00
|
dfe9bcac7333a53082b3a2ae169806cf604d59f6
|
refs/heads/master
| 2023-06-07T11:49:03.821969
| 2023-04-28T02:14:25
| 2023-04-28T02:14:25
| 253,707,766
| 0
| 0
|
MIT
| 2023-05-28T15:24:17
| 2020-04-07T06:34:47
|
Python
|
UTF-8
|
Python
| false
| false
| 1,267
|
py
|
# ******************************************************
# * Copyright © 2016-2023 - Jordan Irwin (AntumDeluge) *
# ******************************************************
# * This software is licensed under the MIT license. *
# * See: LICENSE.txt for details. *
# ******************************************************
## @module ui.style
import wx
# FIXME: legacy wx version no longer supported
if wx.MAJOR_VERSION > 2:
PANEL_BORDER = wx.BORDER_THEME
else:
PANEL_BORDER = wx.BORDER_MASK
## Layout styles for sizers.
class layout:
ALGN_T = wx.ALIGN_TOP
ALGN_B = wx.ALIGN_BOTTOM
ALGN_L = wx.ALIGN_LEFT
ALGN_LT = ALGN_L|ALGN_T
ALGN_LB = ALGN_L|ALGN_B
ALGN_R = wx.ALIGN_RIGHT
ALGN_RT = ALGN_R|ALGN_T
ALGN_RB = ALGN_R|ALGN_B
ALGN_C = wx.ALIGN_CENTER
ALGN_CH = wx.ALIGN_CENTER_HORIZONTAL
ALGN_CV = wx.ALIGN_CENTER_VERTICAL
ALGN_CL = ALGN_CV|ALGN_L
ALGN_CR = ALGN_CV|ALGN_R
ALGN_CT = ALGN_CH|ALGN_T
ALGN_CB = ALGN_CH|ALGN_B
PAD_LT = wx.LEFT|wx.TOP
PAD_LB = wx.LEFT|wx.BOTTOM
PAD_LTB = PAD_LT|wx.BOTTOM
PAD_RT = wx.RIGHT|wx.TOP
PAD_RB = wx.RIGHT|wx.BOTTOM
PAD_RTB = PAD_RT|wx.BOTTOM
PAD_LR = wx.LEFT|wx.RIGHT
PAD_LRB = PAD_LR|wx.BOTTOM
PAD_LRT = PAD_LR|wx.TOP
PAD_TB = wx.TOP|wx.BOTTOM
|
[
"antumdeluge@gmail.com"
] |
antumdeluge@gmail.com
|
9a6666ffe7fd9c01862329091ec04e6fb5b1e21a
|
781e2692049e87a4256320c76e82a19be257a05d
|
/all_data/exercism_data/python/rna-transcription/a033dd3f296e4281ad7fae26e02d0a4d.py
|
612ab04228445c3976c52b87575abb7b15c6a2a3
|
[] |
no_license
|
itsolutionscorp/AutoStyle-Clustering
|
54bde86fe6dbad35b568b38cfcb14c5ffaab51b0
|
be0e2f635a7558f56c61bc0b36c6146b01d1e6e6
|
refs/heads/master
| 2020-12-11T07:27:19.291038
| 2016-03-16T03:18:00
| 2016-03-16T03:18:42
| 59,454,921
| 4
| 0
| null | 2016-05-23T05:40:56
| 2016-05-23T05:40:56
| null |
UTF-8
|
Python
| false
| false
| 262
|
py
|
DNA_TO_RNA = {
'G' :'C',
'C' : 'G',
'T' : 'A',
'A' : 'U',
}
def to_rna(dna):
rna = ''
for c in dna:
if c not in DNA_TO_RNA:
raise ValueError("illegal nucleotide '%s' in dna" % c)
rna = rna + DNA_TO_RNA[c]
return rna
|
[
"rrc@berkeley.edu"
] |
rrc@berkeley.edu
|
53a392751a75f85027707c09f1f615efa1879fc4
|
1705e97ef5613685e142e3f78a2057399b09858c
|
/Code/asiportal/asiapp/wsgi.py
|
d3acec3485332c8d95214dd8fcb36efc399cc96e
|
[] |
no_license
|
FIU-SCIS-Senior-Projects/Academic-Success-Initiative---ASI-PantherCentric-1.0
|
0b956175efb031022ed32412195531c7f0c162c5
|
8ee64b58e2634384d5905defd3701a453b49b966
|
refs/heads/master
| 2022-11-24T00:07:52.458186
| 2017-08-02T01:36:32
| 2017-08-02T01:36:32
| 91,715,982
| 0
| 0
| null | 2022-11-22T01:31:04
| 2017-05-18T16:37:10
|
SQLPL
|
UTF-8
|
Python
| false
| false
| 389
|
py
|
"""
WSGI config for asiapp project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/1.9/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "asiapp.settings")
application = get_wsgi_application()
|
[
"jakedlopez@gmail.com"
] |
jakedlopez@gmail.com
|
b31c59a0010e457b6542772a973b3e6da56bfc29
|
9cfd73a998d842d767071b26cefe0eb8efe39e90
|
/learning_rates.py
|
3b8df2efc6fd82922d321b26d58e4bf9e17144c4
|
[] |
no_license
|
boyko11/LogReg-DLAI
|
829e9b4e6b8dd23d6f3b5f0f68550d83c080104d
|
d222f6501ec4f0ea427f42706bb98c28c832fdb8
|
refs/heads/master
| 2022-11-23T19:26:10.052482
| 2020-08-02T17:57:24
| 2020-08-02T17:57:24
| 284,513,600
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 842
|
py
|
from logistic_regression import model
import data_service
import matplotlib.pyplot as plt
import numpy as np
train_set_x, train_set_y, test_set_x, test_set_y, _ = data_service.load_and_preprocess_data()
learning_rates = [0.01, 0.001, 0.0001]
models = {}
for i in learning_rates:
print ("learning rate is: " + str(i))
models[str(i)] = model(train_set_x, train_set_y, test_set_x, test_set_y, num_iterations = 1500, learning_rate = i, print_cost = False)
print ('\n' + "-------------------------------------------------------" + '\n')
for i in learning_rates:
plt.plot(np.squeeze(models[str(i)]["costs"]), label= str(models[str(i)]["learning_rate"]))
plt.ylabel('cost')
plt.xlabel('iterations (hundreds)')
legend = plt.legend(loc='upper center', shadow=True)
frame = legend.get_frame()
frame.set_facecolor('0.90')
plt.show()
|
[
"boyko11@gmail.com"
] |
boyko11@gmail.com
|
aa3d3d73ed130154ffeca62917f6d42d463b92b8
|
3eb99709809a493c46a79171ef9774aa4261b59d
|
/脚本/llianli/cfapp_ei.py
|
0ca3d8cf2ee5e729d313f8426799f897d4cd36f7
|
[] |
no_license
|
bingwin/tencent
|
c831a5b344f597a06c7a7b179d4f67d668198c90
|
ea5dc5ff398d85cfdf4df056dc8b4064e66fb5fb
|
refs/heads/master
| 2020-07-28T21:44:00.281933
| 2016-05-28T03:21:31
| 2016-05-28T03:21:31
| 209,548,176
| 1
| 0
| null | 2019-09-19T12:29:21
| 2019-09-19T12:29:21
| null |
UTF-8
|
Python
| false
| false
| 6,718
|
py
|
#!/usr/bin/env python
#-*- coding: utf-8 -*-
# ******************************************************************************
# 程序名称: cfapp_ei.py
# 功能描述: cfapp每日访问的事件n数目
# 输入参数: yyyymmdd 例如:20151208
# 目标表名:
# 数据源表: teg_mta_intf::ieg_lol
# 创建人名: llianli
# 创建日期: 2015-12-08
# 版本说明: v1.0
# 公司名称: tencent
# 修改人名:
# 修改日期:
# 修改原因:
# ******************************************************************************
#import system module
# main entry
import datetime
import time
def TDW_PL(tdw, argv=[]):
tdw.WriteLog("== begin ==")
tdw.WriteLog("== argv[0] = " + argv[0] + " ==")
sDate = argv[0]
tdw.WriteLog("== sDate = " + sDate + " ==")
tdw.WriteLog("== connect tdw ==")
sql = """use ieg_qt_community_app"""
res = tdw.execute(sql)
sql = """set hive.inputfiles.splitbylinenum=true"""
res = tdw.execute(sql)
sql = """set hive.inputfiles.line_num_per_split=1000000"""
res = tdw.execute(sql)
##创建表写数据
sql = '''
CREATE TABLE IF NOT EXISTS tb_cf_app_ei
(
fdate INT,
id INT,
ei1 STRING,
ei2 STRING,
uin_mac STRING,
uin STRING,
pv BIGINT
)
'''
tdw.WriteLog(sql)
res = tdw.execute(sql)
sql = ''' DELETE FROM tb_cf_app_ei WHERE fdate = %s '''%(sDate)
tdw.WriteLog(sql)
res = tdw.execute(sql)
##将每日的数据配置写入表中
sql = '''
INSERT TABLE tb_cf_app_ei
SELECT
%s AS fdate,
id,
ei1,
ei2,
uin_info,
uin,
COUNT(*) AS pv
FROM
(
SELECT
id,
'all' AS ei1,
case
when (id = 1100679031 and ei in ('情报站列表项点击') and get_json_object(kv,'$.type') not in ('图片','手机','论坛','电脑','游戏')) or
(id = 1200679031 and ei in ('情报站列表项') and get_json_object(kv,'$.info_list') = '资讯列表项')
then '情报站-资讯'
when (id = 1100679031 and ( ei in ('视频播放次数') or (ei = '资讯广告点击' and get_json_object(kv,'$.type') = '视频') ) ) or
(id = 1200679031 and ei in ('情报站列表项') and get_json_object(kv,'$.info_list') = '视频列表项')
then '情报站-视频'
when (id = 1100679031 and ei in ('情报站列表项点击') and get_json_object(kv,'$.type') ='图片') or
(id = 1200679031 and ei in ('情报站列表项') and get_json_object(kv,'$.info_list') = '图片列表项')
then '情报站-图片'
when (id = 1100679031 and ei in ('情报站列表项点击') and get_json_object(kv,'$.type') in ('手机','电脑','论坛','游戏')) or
(id = 1200679031 and ei in ('情报站列表项') and get_json_object(kv,'$.info_list') = '活动列表项')
then '情报站-活动'
when (id = 1100679031 and ei = '我模块点击次数' ) or (id = 1200679031 and ei = '情报站社区基地我TAB点击次数' and get_json_object(kv,'$.type') = '我') then '我-战绩'
when (id = 1100679031 and ei = '我_战绩资产记录展示次数' and get_json_object(kv,'$.tab') = '装备') or (id = 1200679031 and ei = '战绩资产记录TAB点击次数' and get_json_object(kv,'$.type') = '资产') then '我-资产'
when (id = 1100679031 and ei = '我_战绩资产记录展示次数' and get_json_object(kv,'$.tab') = '记录') or (id = 1200679031 and ei = '战绩资产记录TAB点击次数' and get_json_object(kv,'$.type') = '记录') then '我-记录'
when (id = 1100679031 and ei = '客态资料' ) then '客态资料'
when (id = 1100679031 and ei = '道聚城点击次数') or (id = 1200679031 and ei = '道具城点击') then '基地-道聚城'
when (id = 1100679031 and ei = '火线_视频点击次数') or (id = 1200679031 and ei = '火线时刻视频点击次数') then '基地-火线时刻'
when (id = 1100679031 and ei = '我的仓库点击' ) or (id = 1200679031 and ei = '我的仓库点击') then '基地-我的仓库'
when (id = 1100679031 and ei = '军火基地点击次' ) or (id = 1200679031 and ei = '军火基地点击次') then '基地-军火基地'
when (id = 1100679031 and ei= '基地WEB页面点击次数' and get_json_object(kv,'$.title') = '周边商城') then '基地-周边商城'
when (id = 1100679031 and ei = '竞猜大厅入口' ) or (id = 1200679031 and ei = '竞猜大厅入口点击次数') then '基地-赛事竞猜'
when (id = 1100679031 and ei = '火线百科点击次数' ) or (id = 1200679031 and ei = '火线百科点击') then '基地-火线百科'
when (id = 1100679031 and ei = '火线助手点击次数' ) or (id = 1200679031 and ei = '火线助手') then '基地-火线助手'
when (id = 1100679031 and ei = '我的任务点击次数' ) or (id = 1200679031 and ei = '我的任务点击') then '基地-我的任务'
when (id = 1100679031 and ei = '地图点位模块点击次数' ) or (id = 1200679031 and ei = '地图点图') then '基地-地图点位'
when (id = 1100679031 and ei in ('每天用户发的消息' ,'每天用户发的消息')) then '社区-聊天'
when (id = 1100679031 and ei = '社区_CF论坛点击次数' ) or (id = 1200679031 and ei = 'CF论坛点击') then '社区-CF论坛'
when (id = 1100679031 and ei = '社区_CF手游论坛点击次数' ) or (id = 1200679031 and ei = '点击CF手游论坛') then '社区-CF手游论坛'
when (id = 1100679031 and ei = '社区_兴趣部落点击次数' ) or (id = 1200679031 and ei = 'CF兴趣部落') then '社区-兴趣部落'
ELSE 'other'
end as ei2,
concat(ui,mc) AS uin_info,
get_json_object(kv,'$.uin') AS uin
FROM teg_mta_intf::ieg_lol WHERE sdate = %s AND id in (1100679031,1200679031)
)t1 WHERE ei1 != 'other' AND ei2 != 'other'
GROUP BY id,ei1,ei2,uin_info,uin
'''%(sDate,sDate)
tdw.WriteLog(sql)
res = tdw.execute(sql)
tdw.WriteLog("== end OK ==")
|
[
"996346098@qq.com"
] |
996346098@qq.com
|
967f4507e9be93893f9db9e8ab04d072e7c1c49c
|
16ac02b8f427bd622af1564f1236e4913ed63521
|
/Codes/Version 1.6/force_raised_gaussian.py
|
003e679b1563108f7216fab5e0a3d0cd04424273
|
[
"MIT"
] |
permissive
|
gharib85/Brownian-dynamics-in-a-time-varying-force-field
|
20660665747310e1201e8ca7d404acc15ec7a3bd
|
1dce268fcc4f27e066be0ec0b511178cbc1437c5
|
refs/heads/main
| 2023-08-16T03:47:51.957137
| 2021-10-23T19:09:50
| 2021-10-23T19:09:50
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,593
|
py
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on March 6, 2021
@author: asif
"""
import numpy as np
import pylab as py
import matplotlib as plt
ro = 2e-6
tfinal = 12
xrange_limit = 30e-6 # Max and min of x axis range for plotting animation
zlow_limit = -10e-6
zhigh_limit = 30e-6
r_active = 0
n_order = 1 # Order of the Gaussian potential = 2n
w_well = 10e-6 # 1/e *max width of the potential well
A_well = 4000*1.38e-23*300 # well depth
def draw_geo(tm, ax_xy, ax_yz, ax_xz):
# March 7, 2021
# The flag_source_state variable is used to draw/erase the source geometry only once
# This is necessary to speed up the animation.
global flag_source_state_1 # Make this variable global so that the assigned value remains saved globally as t changes
global flag_source_state_2
if 'flag_source_state_1' not in globals():
global flag_source_state # Make this variable global so that the assigned value remains saved globally as t changes
flag_source_state_1 = 0 # initialize with OFF state
print('Defining global flag for source geometry \n')
if 'flag_source_state_2' not in globals():
global flag_source_state # Make this variable global so that the assigned value remains saved globally as t changes
flag_source_state_2 = 0 # initialize with OFF state
print('Defining global flag for source geometry \n')
# Draw static geometry (only once)
if flag_source_state_2 < 1:
py.sca(ax_yz)
substrate_yz = py.Rectangle((-xrange_limit*1e6, zlow_limit*1e6),2*xrange_limit*1e6, abs(zlow_limit)*1e6,fc='#d4d4d4', ec='k')
py.gca().add_patch(substrate_yz)
py.sca(ax_xz)
substrate_xz = py.Rectangle((-xrange_limit*1e6, zlow_limit*1e6),2*xrange_limit*1e6, abs(zlow_limit)*1e6,fc='#d4d4d4', ec='k')
py.gca().add_patch(substrate_xz)
py.sca(ax_xy)
substrate_xy = py.Rectangle((-xrange_limit*1e6, -xrange_limit*1e6),2*xrange_limit*1e6,2*xrange_limit*1e6,fc='#f9f9f9')
py.gca().add_patch(substrate_xy)
flag_source_state_2 = 1
# Draw source
if (tm > 1) & (tm < 8) & (flag_source_state_1 < 1):
patch_spot_xy = py.Circle((0, 0), 0.5*w_well*1e6, fc='#ff8c00',alpha = 0.8)
# patch_spot_yz = plt.patches.Arc((0, 0), 0.5*w_well*1e6, 0.5*w_well*1e6,0, 0, 180, fc='#ff8c00',alpha = 0.8)
py.sca(ax_xy)
py.gca().add_patch(patch_spot_xy)
# py.sca(ax_yz)
# py.gca().add_patch(patch_spot_yz)
flag_source_state_1 = 1
print('Drawing source\n')
# Erase source (draw a white circle)
if (tm > 8) & (flag_source_state_1 == 1):
patch_spot = py.Circle((0, 0), 0.51*w_well*1e6, fc='#f9f9f9',alpha = 1)
py.gca().add_patch(patch_spot)
print('Erasing source\n')
flag_source_state_1 = 0
# def draw_yz(tm):
# substrate_yz = py.Rectangle((-xrange_limit*1e6, zlow_limit*1e6),2*xrange_limit*1e6, abs(zlow_limit)*1e6,fc='#d4d4d4', ec='k')
# py.gca().add_patch(substrate_yz)
# def draw_xz(tm):
# substrate_xz = py.Rectangle((-xrange_limit*1e6, zlow_limit*1e6),2*xrange_limit*1e6, abs(zlow_limit)*1e6,fc='#d4d4d4', ec='k')
# py.gca().add_patch(substrate_xz)
# This is function that is called from the main program
# Simplified spring force model
def force_profile(r_in, t):
Np = r_in[0,:].size
fm = np.zeros((3,Np))
r_norm = np.linalg.norm(r_in, axis = 0) + 1e-30
g = A_well*np.exp(-(r_norm/w_well)**(2*n_order))
if (t > 1) & (t<8):
fm[0,:] = -2*n_order*r_in[0,:]/(r_norm**2) * (r_norm/w_well)**(2*n_order) * g
fm[1,:] = -2*n_order*r_in[1,:]/(r_norm**2) * (r_norm/w_well)**(2*n_order) * g
fm[2,:] = -2*n_order*r_in[2,:]/(r_norm**2) * (r_norm/w_well)**(2*n_order) * g
# fm[:,2] = 0
# fm[:,3] = 0
# fm[:,4] = 0
# fm[:,5] = 0
# fm[:,6] = 0
return fm
def force_plot():
Np = 1
rin = np.zeros((3,Np))
r_in = np.tile(np.linspace(-xrange_limit,xrange_limit,200),(3,1))
F = force_profile(r_in,2)
py.figure()
py.plot(r_in[0,:]*1e6,F[0,:]*1e12, label = '$F_x$')
# py.plot(r_in[1,:]*1e6,F[1,:]*1e12,'.', label = '$F_y$')
# py.plot(r_in[2,:]*1e6,F[2,:]*1e12,'x', label = '$F_z$')
py.xlabel('$x$ ($\mu$m)')
py.ylabel('Force (pN)')
py.legend()
# force_plot()
# draw_source(9)
|
[
"39745895+zaman13@users.noreply.github.com"
] |
39745895+zaman13@users.noreply.github.com
|
2a499fd7e7b8c5bbf2617bae35a047e99b8d6b08
|
637ec65429e817c6c12fc66bad299a9ff831ca3c
|
/supplier_management/supplier_management/doctype/supplier_product_info/supplier_product_info.py
|
a6f4b09f223a966bb66a2fb4d123987e1b8b7488
|
[
"MIT"
] |
permissive
|
ashish-greycube/supplier_management
|
292ca4d956fdc8659e630ec9a8280d0b77037f25
|
c6f32c383f3d6e9a459903652a42341beb7f8482
|
refs/heads/master
| 2020-09-02T22:05:23.001424
| 2020-01-08T07:03:05
| 2020-01-08T07:03:05
| 219,316,260
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 277
|
py
|
# -*- coding: utf-8 -*-
# Copyright (c) 2019, GreyCube Technologies and contributors
# For license information, please see license.txt
from __future__ import unicode_literals
import frappe
from frappe.model.document import Document
class SupplierProductInfo(Document):
pass
|
[
"mr.ashish.shah@gmail.com"
] |
mr.ashish.shah@gmail.com
|
698615984a24120282d332cfef57d98cdf075fb5
|
0c325cf7a68ef51067ed8db566d525a20de5b635
|
/python/xlrd_and_xlwt/xlrd_test.py
|
ff5eb4eb3632e68644309cd097ce90b78ddb3c9c
|
[] |
no_license
|
alinzel/NOTES
|
2ab6aa1ef1d601a9ae8c0d23c0df2bca7e1aa241
|
3e0594641a605580e920d0b08a251fbc99f34e2f
|
refs/heads/master
| 2023-01-08T22:48:30.762625
| 2020-01-17T09:14:47
| 2020-01-17T09:14:47
| 175,339,492
| 0
| 0
| null | 2022-12-27T15:01:19
| 2019-03-13T03:28:08
|
HTML
|
UTF-8
|
Python
| false
| false
| 3,865
|
py
|
# TODO xlrd--一个从excel文件中读取和格式化数据信息的库,无论是xls还是xlsx文件
import xlrd
# 打开excel文件,返回实例对象-<xlrd.book.Book object at 0x000001ED41180898>
excel = xlrd.open_workbook(r"./excel/2017年人员电子档案.xlsx") #r-->保持原始字符串,不转义
# 获取sheet的名字,返回名字列表-['2017-6-22', '测试']
sheet_names = excel.sheet_names()
# 获取sheet对象,返回对象列表-[<xlrd.sheet.Sheet object at 0x0000023A57014CC0>, <xlrd.sheet.Sheet object at 0x0000023A57014CF8>]
sheets = excel.sheets()
# 获取sheet总数,返回数字-2
sheet_num = excel.nsheets
# 获取某一个sheet对象
sheet_index = excel.sheet_by_index(0) # 根据索引
sheet_name = excel.sheet_by_name("测试") # 根据名称
# 获取sheet对象相关信息
name = sheet_index.name # 返回sheet名称
rows = sheet_index.nrows # 返回行数
cols = sheet_index.ncols # 返回列数
# 批量获取单元格信息
row_value = sheet_index.row_values(2, 0, 4) # 获取某一行的值,返回列表,TODO 参数依次,第二行,从0开始,到第4列
col_value = sheet_index.col_values(0, 0, 4)
row = sheet_index.row(2) # 获取某一行的值和类型,不支持切片-[text:'123', text:'456', text:'789', text:'147', text:'11111111', text:'258', text:'']
col = sheet_index.col(1)
slice_row = sheet_index.row_slice(2, 0, 4) # 获取某一行的值和类型,支持切片
slice_col = sheet_index.col_slice(0, 0, 4)
# 获取特定单元格
cell_value = sheet_index.cell(1,2).value # 获取第2行,第三列的值
cell_value_ = sheet_index.cell_value(1,2)
# 获取单元格栏信息
print(xlrd.cellname(0,1))
print(xlrd.cellnameabs(0,1))
print(xlrd.colname(8))
# 写入数据库
import pymysql
# 连接数据库
coon = pymysql.connect(
host="192.168.200.10",
db="test_zwl",
user="bdsdata",
password="357135",
port=3306
)
cur = coon.cursor()
# TODO 查询
# sql = "select * from file"
# cur.execute(sql)
# result = cur.fetchone()
# print(result)
# TODO 插入数据
row_num = sheet_index.nrows
col_num = sheet_index.ncols
# 构造sql语句,批量插入数据库 values(),(),(),没有选择一条一条的插入
sql = "insert into file values"
for i in range(1,row_num): # 控制每一行
for j in range(0,col_num): # 控制列
item = sheet_index.cell_value(i, j) # 获取指定单元格数值
# TODO 数据库中的空值两种形式,一种空字符串--数据库显示空白,另一种是null,且不能用引号包裹起来--数据库显示为null
if item == "":
item = "Null"
value = str(item)
else:
value = '"' + str(item) + '"'
if i != row_num-1:
if j == 0 :
sql += "(" + str(i) + ","+ value + "," # TODO 插入的item 要用 ”“包起来,不然报错 1064,但是null不可以包
elif j == col_num-1:
sql += value + "),"
else:
sql += value + ","
else:
if j == 0 :
sql += "(" + str(i) + ","+ value + ","
elif j == col_num-1:
sql += value + ")"
else:
sql += value + ","
# break
# print(sql)
# try:
# cur.execute(sql)
# coon.commit() # TODO 不要忘记提交啊
# except:
# coon.rollback()
value_list = []
for i in range(1,row_num):
row_v = sheet_index.row_values(i)
row_v = [None if row == "" else row for row in row_v ] # None在数据库显示为Null
value_list.append(row_v)
sql_many = "insert into file (name,area,department,job_state,phone,in_date,out_date)values(%s,%s,%s,%s,%s,%s,%s)"
try:
cur.executemany(sql_many,value_list)
coon.commit() # TODO 不要忘记提交啊
except Exception as e:
print(e)
coon.rollback()
cur.close()
coon.close()
|
[
"944951481@qq.com"
] |
944951481@qq.com
|
65f5d5d7db31e03fff05009390b6ac2b06cc7f29
|
5d58fa1d54855f18bad5688de4459af8d461c0ac
|
/plugins/callback/yaml.py
|
40bc0191f254fdf8b7a04ea6c86e06ff50051353
|
[] |
no_license
|
nasirhm/general
|
b3b52f6e31be3de8bae0414da620d8cdbb2c2366
|
5ccd89933297f5587dae5cd114e24ea5c54f7ce5
|
refs/heads/master
| 2021-01-04T07:03:21.121102
| 2020-02-13T20:59:56
| 2020-02-13T20:59:56
| 240,440,187
| 1
| 0
| null | 2020-02-14T06:08:14
| 2020-02-14T06:08:13
| null |
UTF-8
|
Python
| false
| false
| 4,855
|
py
|
# (c) 2017 Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
DOCUMENTATION = '''
callback: yaml
type: stdout
short_description: yaml-ized Ansible screen output
description:
- Ansible output that can be quite a bit easier to read than the
default JSON formatting.
requirements:
- set as stdout in configuration
extends_documentation_fragment:
- default_callback
'''
import yaml
import json
import re
import string
import sys
from ansible.module_utils._text import to_bytes, to_text
from ansible.module_utils.six import string_types
from ansible.parsing.yaml.dumper import AnsibleDumper
from ansible.plugins.callback import CallbackBase, strip_internal_keys, module_response_deepcopy
from ansible.plugins.callback.default import CallbackModule as Default
# from http://stackoverflow.com/a/15423007/115478
def should_use_block(value):
"""Returns true if string should be in block format"""
for c in u"\u000a\u000d\u001c\u001d\u001e\u0085\u2028\u2029":
if c in value:
return True
return False
def my_represent_scalar(self, tag, value, style=None):
"""Uses block style for multi-line strings"""
if style is None:
if should_use_block(value):
style = '|'
# we care more about readable than accuracy, so...
# ...no trailing space
value = value.rstrip()
# ...and non-printable characters
value = ''.join(x for x in value if x in string.printable)
# ...tabs prevent blocks from expanding
value = value.expandtabs()
# ...and odd bits of whitespace
value = re.sub(r'[\x0b\x0c\r]', '', value)
# ...as does trailing space
value = re.sub(r' +\n', '\n', value)
else:
style = self.default_style
node = yaml.representer.ScalarNode(tag, value, style=style)
if self.alias_key is not None:
self.represented_objects[self.alias_key] = node
return node
class CallbackModule(Default):
"""
Variation of the Default output which uses nicely readable YAML instead
of JSON for printing results.
"""
CALLBACK_VERSION = 2.0
CALLBACK_TYPE = 'stdout'
CALLBACK_NAME = 'community.general.yaml'
def __init__(self):
super(CallbackModule, self).__init__()
yaml.representer.BaseRepresenter.represent_scalar = my_represent_scalar
def _dump_results(self, result, indent=None, sort_keys=True, keep_invocation=False):
if result.get('_ansible_no_log', False):
return json.dumps(dict(censored="The output has been hidden due to the fact that 'no_log: true' was specified for this result"))
# All result keys stating with _ansible_ are internal, so remove them from the result before we output anything.
abridged_result = strip_internal_keys(module_response_deepcopy(result))
# remove invocation unless specifically wanting it
if not keep_invocation and self._display.verbosity < 3 and 'invocation' in result:
del abridged_result['invocation']
# remove diff information from screen output
if self._display.verbosity < 3 and 'diff' in result:
del abridged_result['diff']
# remove exception from screen output
if 'exception' in abridged_result:
del abridged_result['exception']
dumped = ''
# put changed and skipped into a header line
if 'changed' in abridged_result:
dumped += 'changed=' + str(abridged_result['changed']).lower() + ' '
del abridged_result['changed']
if 'skipped' in abridged_result:
dumped += 'skipped=' + str(abridged_result['skipped']).lower() + ' '
del abridged_result['skipped']
# if we already have stdout, we don't need stdout_lines
if 'stdout' in abridged_result and 'stdout_lines' in abridged_result:
abridged_result['stdout_lines'] = '<omitted>'
# if we already have stderr, we don't need stderr_lines
if 'stderr' in abridged_result and 'stderr_lines' in abridged_result:
abridged_result['stderr_lines'] = '<omitted>'
if abridged_result:
dumped += '\n'
dumped += to_text(yaml.dump(abridged_result, allow_unicode=True, width=1000, Dumper=AnsibleDumper, default_flow_style=False))
# indent by a couple of spaces
dumped = '\n '.join(dumped.split('\n')).rstrip()
return dumped
def _serialize_diff(self, diff):
return to_text(yaml.dump(diff, allow_unicode=True, width=1000, Dumper=AnsibleDumper, default_flow_style=False))
|
[
"ansible_migration@example.com"
] |
ansible_migration@example.com
|
233d0ec4819d640232d4c681a2454a1e0e5966e1
|
23d25497d30accc7125f6068ad7c55ebcbea0160
|
/Python/10828.py
|
a809aac64a520ee155f389f3eeaf52107078f583
|
[] |
no_license
|
ParkJeongseop/Algorithm
|
460689e064529d65e8612493a5d338305ec6311e
|
388d092ee8b07b7ea76e720053c782790563515b
|
refs/heads/master
| 2023-08-30T23:19:46.029510
| 2023-08-09T11:08:56
| 2023-08-09T11:08:56
| 149,557,160
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 491
|
py
|
import sys; input = lambda:sys.stdin.readline().rstrip()
n = int(input())
a = []
for _ in range(n):
cmd = input().split()
if cmd[0] == 'push':
a.append(cmd[1])
elif cmd[0] == 'pop':
if a:
print(a.pop())
else:
print(-1)
elif cmd[0] == 'size':
print(len(a))
elif cmd[0] == 'empty':
print(0 if len(a) else 1)
elif cmd[0] == 'top':
if a:
print(a[-1])
else:
print(-1)
|
[
"parkjeongseop@parkjeongseop.com"
] |
parkjeongseop@parkjeongseop.com
|
11754e433ee8f5985f0ae11f9bae4e8dc50213e1
|
6e8f2e28479566dbaa338300b2d61f784ff83f97
|
/.history/code/tensorboard_utils_20210411113117.py
|
69315f5c5b16b26260ed37152698eb1eba53cc5e
|
[] |
no_license
|
eeng5/CV-final-project
|
55a7d736f75602858233ebc380c4e1d67ab2b866
|
580e28819560b86f6974959efb1d31ef138198fc
|
refs/heads/main
| 2023-04-09T21:28:21.531293
| 2021-04-21T19:57:22
| 2021-04-21T19:57:22
| 352,703,734
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 7,424
|
py
|
"""
Project 4 - CNNs
CS1430 - Computer Vision
Brown University
"""
import io
import os
import re
import sklearn.metrics
import numpy as np
import tensorflow as tf
from matplotlib import pyplot as plt
import hyperparameters as hp
def plot_to_image(figure):
""" Converts a pyplot figure to an image tensor. """
buf = io.BytesIO()
plt.savefig(buf, format='png')
plt.close(figure)
buf.seek(0)
image = tf.image.decode_png(buf.getvalue(), channels=4)
image = tf.expand_dims(image, 0)
return image
class ImageLabelingLogger(tf.keras.callbacks.Callback):
""" Keras callback for logging a plot of test images and their
predicted labels for viewing in Tensorboard. """
def __init__(self, logs_path, datasets):
super(ImageLabelingLogger, self).__init__()
self.datasets = datasets
self.task = datasets.task
self.logs_path = logs_path
print("Done setting up image labeling logger.")
def on_epoch_end(self, epoch, logs=None):
self.log_image_labels(epoch, logs)
def log_image_labels(self, epoch_num, logs):
""" Writes a plot of test images and their predicted labels
to disk. """
fig = plt.figure(figsize=(9, 9))
count = 0
for batch in self.datasets.test_data: # changed from train to test
for i, image in enumerate(batch[0]):
plt.subplot(5, 5, count+1)
correct_class_idx = batch[1][i]
probabilities = self.model(np.array([image])).numpy()[0]
predict_class_idx = np.argmax(probabilities)
image = np.clip(image, 0., 1.)
plt.imshow(image, cmap='gray')
is_correct = correct_class_idx == predict_class_idx
title_color = 'g' if is_correct else 'r'
plt.title(
self.datasets.idx_to_class[predict_class_idx],
color=title_color)
plt.axis('off')
count += 1
if count == 25:
break
if count == 25:
break
figure_img = plot_to_image(fig)
file_writer_il = tf.summary.create_file_writer(
self.logs_path + os.sep + "image_labels")
with file_writer_il.as_default():
tf.summary.image("Image Label Predictions",
figure_img, step=epoch_num)
class ConfusionMatrixLogger(tf.keras.callbacks.Callback):
""" Keras callback for logging a confusion matrix for viewing
in Tensorboard. """
def __init__(self, logs_path, datasets):
super(ConfusionMatrixLogger, self).__init__()
self.datasets = datasets
self.logs_path = logs_path
def on_epoch_end(self, epoch, logs=None):
self.log_confusion_matrix(epoch, logs)
def log_confusion_matrix(self, epoch, logs):
""" Writes a confusion matrix plot to disk. """
test_pred = []
test_true = []
count = 0
for i in self.datasets.test_data:
test_pred.append(self.model.predict(i[0]))
test_true.append(i[1])
count += 1
if count >= 1500 / hp.batch_size:
break
test_pred = np.array(test_pred)
test_pred = np.argmax(test_pred, axis=-1).flatten()
test_true = np.array(test_true).flatten()
# Source: https://www.tensorflow.org/tensorboard/image_summaries
cm = sklearn.metrics.confusion_matrix(test_true, test_pred)
figure = self.plot_confusion_matrix(
cm, class_names=self.datasets.classes)
cm_image = plot_to_image(figure)
file_writer_cm = tf.summary.create_file_writer(
self.logs_path + os.sep + "confusion_matrix")
with file_writer_cm.as_default():
tf.summary.image(
"Confusion Matrix (on validation set)", cm_image, step=epoch)
def plot_confusion_matrix(self, cm, class_names):
""" Plots a confusion matrix returned by
sklearn.metrics.confusion_matrix(). """
# Source: https://www.tensorflow.org/tensorboard/image_summaries
figure = plt.figure(figsize=(8, 8))
plt.imshow(cm, interpolation='nearest', cmap=plt.cm.Greens)
plt.title("Confusion matrix")
plt.colorbar()
tick_marks = np.arange(len(class_names))
plt.xticks(tick_marks, class_names, rotation=45)
plt.yticks(tick_marks, class_names)
cm = np.around(cm.astype('float') / cm.sum(axis=1)
[:, np.newaxis], decimals=2)
threshold = cm.max() / 2.
for i in range(cm.shape[0]):
for j in range(cm.shape[1]):
color = "white" if cm[i, j] > threshold else "black"
plt.text(j, i, cm[i, j],
horizontalalignment="center", color=color)
plt.tight_layout()
plt.ylabel('True label')
plt.xlabel('Predicted label')
return figure
class CustomModelSaver(tf.keras.callbacks.Callback):
""" Custom Keras callback for saving weights of networks. """
def __init__(self, checkpoint_dir, task, max_num_weights=5):
super(CustomModelSaver, self).__init__()
self.checkpoint_dir = checkpoint_dir
self.task = task
self.max_num_weights = max_num_weights
def on_epoch_end(self, epoch, logs=None):
""" At epoch end, weights are saved to checkpoint directory. """
min_acc_file, max_acc_file, max_acc, num_weights = \
self.scan_weight_files()
cur_acc = logs["val_sparse_categorical_accuracy"]
# Only save weights if test accuracy exceeds the previous best
# weight file
if cur_acc > max_acc:
save_name = "weights.e{0:03d}-acc{1:.4f}.h5".format(
epoch, cur_acc)
if self.task == '1':
self.model.save_weights(
self.checkpoint_dir + os.sep + "your." + save_name)
else:
# Only save weights of classification head of VGGModel
self.model.head.save_weights(
self.checkpoint_dir + os.sep + "vgg." + save_name)
# Ensure max_num_weights is not exceeded by removing
# minimum weight
if self.max_num_weights > 0 and \
num_weights + 1 > self.max_num_weights:
os.remove(self.checkpoint_dir + os.sep + min_acc_file)
def scan_weight_files(self):
""" Scans checkpoint directory to find current minimum and maximum
accuracy weights files as well as the number of weights. """
min_acc = float('inf')
max_acc = 0
min_acc_file = ""
max_acc_file = ""
num_weights = 0
files = os.listdir(self.checkpoint_dir)
for weight_file in files:
if weight_file.endswith(".h5"):
num_weights += 1
file_acc = float(re.findall(
r"[+-]?\d+\.\d+", weight_file.split("acc")[-1])[0])
if file_acc > max_acc:
max_acc = file_acc
max_acc_file = weight_file
if file_acc < min_acc:
min_acc = file_acc
min_acc_file = weight_file
return min_acc_file, max_acc_file, max_acc, num_weights
|
[
"natalie_rshaidat@brown.edu"
] |
natalie_rshaidat@brown.edu
|
4fbe70fbf88650d84fb87c57199e97908cac72f3
|
1bf7673846aedb5beed2d065f971f2985f70df1b
|
/lib/stashcache_tester/output/githubOutput.py
|
e82f08f5b95658cef25fec44fc13f3b8251cb8d3
|
[] |
no_license
|
StashCache/stashcache-tester
|
31ee90945186821f9bb0979c7bee942037ae05e7
|
5031d294050e9c6419c360e804654850efcfa32c
|
refs/heads/master
| 2020-12-25T14:12:41.392207
| 2017-02-23T17:55:51
| 2017-02-23T17:55:51
| 40,491,284
| 0
| 2
| null | 2017-02-23T17:55:52
| 2015-08-10T15:51:17
|
Python
|
UTF-8
|
Python
| false
| false
| 6,685
|
py
|
import logging
import json
import time
import shutil
import os
import sys
from tempfile import NamedTemporaryFile
from stashcache_tester.output.generalOutput import GeneralOutput
from stashcache_tester.util.Configuration import get_option
from stashcache_tester.util.ExternalCommands import RunExternal
class GithubOutput(GeneralOutput):
"""
:param dict sitesData: Dictionary described in :ref:`sitesData <sitesData-label>`.
This class summarizes and uploads the download data to a github account. The data will be stored in a file named ``data.json`` in the git repo under the directory in the configuration. The format of ``data.json`` is::
{
"20150911": [
{
"average": 364.76526180827,
"name": "Tusker"
},
{
"average": 75.99734924610296,
"name": "UCSDT2"
},
...
],
"20150913": [
{
"average": 239.02169168535966,
"name": "Tusker"
},
...
],
...
}
Github output requires an SSH key to be added to the github repository which is pointed to by the `repo` configuration option.
Github output requires additional configuration options in the main configuration in the section `[github]`. An example configuration could be::
[github]
repo = StashCache/stashcache.github.io.git
branch = master
directory = data
ssh_key = /home/user/.ssh/id_rsa
The configuration is:
repo
The git repo to commit the data to.
branch
The branch to install repo.
directory
The directory to put the data summarized files into.
maxdays
The maximum number of days to keep data. Default=30
ssh_key
Path to SSH key to use when checking out and pushing to the repository.
"""
git_ssh_contents = """#!/bin/sh
exec ssh -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -i $SSH_KEY_FILE "$@"
"""
def __init__(self, sitesData):
GeneralOutput.__init__(self, sitesData)
def _get_option(self, option, default = None):
return get_option(option, section="github", default=default)
def _summarize_data(self, sitesData):
summarized = []
# Average download time per site.
for site in sitesData:
cur = {}
cur['name'] = site
siteTimes = sitesData[site]
total_runtime = 0
failures = 0
caches = {}
for run in siteTimes:
# Initialize the cache structure
cache = run['cache']
if cache not in caches:
caches[cache] = {}
caches[cache]['runs'] = 0
caches[cache]['totalRuntime'] = 0
caches[cache]['failures'] = 0
if run['success'] is True:
total_runtime += float(run['duration'])
caches[cache]['totalRuntime'] += float(run['duration'])
caches[cache]['runs'] += 1
else:
caches[cache]['failures'] += 1
failures += 1
testsize = get_option("raw_testsize")
if total_runtime == 0:
cur['average'] = 0
for cache in caches.keys():
caches[cache]['average'] = 0
else:
cur['average'] = (float(testsize*8) / (1024*1024)) / (total_runtime / len(siteTimes))
for cache in caches.keys():
caches[cache]['average'] = (float(testsize*8) / (1024*1024)) / (caches[cache]['totalRuntime'] / caches[cache]['runs'])
cur['caches'] = caches
cur['failures'] = failures
summarized.append(cur)
# Should we do violin plot?
#summarized = sitesData
return summarized
def startProcessing(self):
"""
Begin summarizing the data.
"""
summarized_data = self._summarize_data(self.sitesData)
logging.debug("Creating temporary file for GIT_SSH")
tmpfile = NamedTemporaryFile(delete=False)
tmpfile.write(self.git_ssh_contents)
git_sh_loc = tmpfile.name
logging.debug("Wrote contents of git_ssh_contents to %s" % git_sh_loc)
tmpfile.close()
import stat
os.chmod(git_sh_loc, stat.S_IXUSR | stat.S_IRUSR)
os.environ["GIT_SSH"] = git_sh_loc
# Download the git repo
git_repo = self._get_option("repo")
git_branch = self._get_option("branch")
key_file = self._get_option("ssh_key")
output_dir = self._get_option("directory")
os.environ["SSH_KEY_FILE"] = key_file
RunExternal("git clone --quiet --branch %s git@github.com:%s output_git" % (git_branch, git_repo))
# Write summarized data to the data file
data_filename = os.path.join("output_git", output_dir, "data.json")
if not os.path.exists(data_filename):
logging.error("Data file does not exist, bailing")
sys.exit(1)
with open(data_filename) as data_file:
data = json.load(data_file)
# Truncate the data to the latest `maxdays` days.
maxdays = self._get_option("maxdays", 30)
# Get and sort the keys
sorted_list = data.keys()
sorted_list.sort()
# Discard the last `maxdays` days (looking for what we need to delete)
to_delete = sorted_list[:-int(maxdays)]
for key in to_delete:
logging.debug("Removing data from %s" % key)
data.pop(key, None)
# Write today's summarized data
todays_key = time.strftime("%Y%m%d")
data[todays_key] = summarized_data
with open(data_filename, 'w') as data_file:
json.dump(data, data_file)
# Commit to git repo
RunExternal("cd output_git; git add -f .")
RunExternal("cd output_git; git commit -m \"Adding data for %s\"" % todays_key)
RunExternal("cd output_git; git push -fq origin %s" % git_branch)
shutil.rmtree("output_git")
|
[
"djw8605@gmail.com"
] |
djw8605@gmail.com
|
5c49e0ec04fe15cf08be854625cc496120e28c5f
|
eb9f655206c43c12b497c667ba56a0d358b6bc3a
|
/python/helpers/typeshed/stubs/keyboard/keyboard/_keyboard_event.pyi
|
9c511fdccf59bc88a0fc4b133c00ab9036b835c7
|
[
"MIT",
"Apache-2.0"
] |
permissive
|
JetBrains/intellij-community
|
2ed226e200ecc17c037dcddd4a006de56cd43941
|
05dbd4575d01a213f3f4d69aa4968473f2536142
|
refs/heads/master
| 2023-09-03T17:06:37.560889
| 2023-09-03T11:51:00
| 2023-09-03T12:12:27
| 2,489,216
| 16,288
| 6,635
|
Apache-2.0
| 2023-09-12T07:41:58
| 2011-09-30T13:33:05
| null |
UTF-8
|
Python
| false
| false
| 827
|
pyi
|
from typing_extensions import Literal
from ._canonical_names import canonical_names as canonical_names, normalize_name as normalize_name
KEY_DOWN: Literal["down"]
KEY_UP: Literal["up"]
class KeyboardEvent:
event_type: Literal["down", "up"] | None
scan_code: int
name: str | None
time: float | None
device: str | None
modifiers: tuple[str, ...] | None
is_keypad: bool | None
def __init__(
self,
event_type: Literal["down", "up"] | None,
scan_code: int,
name: str | None = ...,
time: float | None = ...,
device: str | None = ...,
modifiers: tuple[str, ...] | None = ...,
is_keypad: bool | None = ...,
) -> None: ...
def to_json(self, ensure_ascii: bool = ...) -> str: ...
def __eq__(self, other: object) -> bool: ...
|
[
"intellij-monorepo-bot-no-reply@jetbrains.com"
] |
intellij-monorepo-bot-no-reply@jetbrains.com
|
cce9c2c02347ccae443d5f1e8dbebf712c264d0e
|
73e277935ef28fd05935c93a3f155c9cc6dc6de7
|
/ctf/crypto/quad_residue/Cipolla.py
|
e07aed34561ff03170436108e72f4b49b2beca9e
|
[] |
no_license
|
ohmygodlin/snippet
|
5ffe6b8fec99abd67dd5d7f819520e28112eae4b
|
21d02015492fb441b2ad93b4a455dc4a145f9913
|
refs/heads/master
| 2023-01-08T14:59:38.618791
| 2022-12-28T11:23:23
| 2022-12-28T11:23:23
| 190,989,347
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,165
|
py
|
#python3
#https://learnblockchain.cn/article/1520
def square_root_of_quadratic_residue(n, modulo):
"""Square root of quadratic residue
Solve the square root of quadratic residue using Cipolla's algorithm with Legendre symbol
Returns:
int -- if n is a quadratic residue,
return x, such that x^{2} = n (mod modulo)
otherwise, return -1
"""
if modulo == 2:
return 1
if n % modulo == 0:
return 0
Legendre = lambda n: pow(n, modulo - 1 >> 1, modulo)
if Legendre(n) == modulo - 1:
return -1
t = 0
while Legendre(t ** 2 - n) != modulo - 1:
t += 1
w = (t ** 2 - n) % modulo
return (generate_quadratic_field(w, modulo)(t, 1) ** (modulo + 1 >> 1)).x
def generate_quadratic_field(d, modulo=0):
"""Generate quadratic field number class
Returns:
class -- quadratic field number class
"""
assert(isinstance(modulo, int) and modulo >= 0)
class QuadraticFieldNumber:
def __init__(self, x, y):
self.x = x % modulo
self.y = y % modulo
def __mul__(self, another):
x = self.x * another.x + d * self.y * another.y
y = self.x * another.y + self.y * another.x
return self.__class__(x, y)
def __pow__(self, exponent):
result = self.__class__(1, 0)
if exponent:
temporary = self.__class__(self.x, self.y)
while exponent:
if exponent & 1:
result *= temporary
temporary *= temporary
exponent >>= 1
return result
def __str__(self):
return '({}, {} \\sqrt({}))'.format(self.x, self.y, d)
return QuadraticFieldNumber
a = 8479994658316772151941616510097127087554541274812435112009425778595495359700244470400642403747058566807127814165396640215844192327900454116257979487432016769329970767046735091249898678088061634796559556704959846424131820416048436501387617211770124292793308079214153179977624440438616958575058361193975686620046439877308339989295604537867493683872778843921771307305602776398786978353866231661453376056771972069776398999013769588936194859344941268223184197231368887060609212875507518936172060702209557124430477137421847130682601666968691651447236917018634902407704797328509461854842432015009878011354022108661461024768
p = 30531851861994333252675935111487950694414332763909083514133769861350960895076504687261369815735742549428789138300843082086550059082835141454526618160634109969195486322015775943030060449557090064811940139431735209185996454739163555910726493597222646855506445602953689527405362207926990442391705014604777038685880527537489845359101552442292804398472642356609304810680731556542002301547846635101455995732584071355903010856718680732337369128498655255277003643669031694516851390505923416710601212618443109844041514942401969629158975457079026906304328749039997262960301209158175920051890620947063936347307238412281568760161
x = square_root_of_quadratic_residue(a, p)
print(x)
print(pow(x,2,p) - a)
#x^2 = (p-x)^2 = n mod p
|
[
"laitaizong@gmail.com"
] |
laitaizong@gmail.com
|
4144585f59160e7268a01a9b954689f44dcc5460
|
44a6e88da453a2e368b014e403843b0c955f21f4
|
/utils/make_mock_solid_dir.py
|
49e00504ae9b25d4b9a7a94ae096e077cf8d7ffc
|
[
"Artistic-2.0"
] |
permissive
|
golharam/genomics
|
a26b1f9366203ec059cc2e49281909bfc16e6ab4
|
ca0c7c239b0f04353e2f2fa897db9c24a1211596
|
refs/heads/master
| 2020-08-06T10:28:21.604129
| 2019-09-27T07:51:41
| 2019-09-27T07:51:41
| 212,943,378
| 0
| 0
|
Artistic-2.0
| 2019-10-05T04:25:24
| 2019-10-05T04:25:23
| null |
UTF-8
|
Python
| false
| false
| 1,728
|
py
|
#!/usr/bin/env python
#
# make_mock_solid_dir.py: make mock SOLiD directory for test purposes
# Copyright (C) University of Manchester 2011 Peter Briggs
#
########################################################################
#
# make_mock_solid_dir.py
#
#########################################################################
"""make_mock_solid_dir.py
Makes a mock SOLiD run directory with run_definition and barcode statistic
files plus mock csfasta and qual files, which can be used to test other
programs and scrips with.
It uses the TestUtils class from the SolidData module to build and populate
the mock directory structure.
Usage: make_mock_solid_dir.py
"""
#######################################################################
# Import modules that this module depends on
#######################################################################
#
import os
import sys
# Put ../share onto Python search path for modules
SHARE_DIR = os.path.abspath(
os.path.normpath(
os.path.join(os.path.dirname(sys.argv[0]),'..','share')))
sys.path.append(SHARE_DIR)
try:
from bcftbx.test.test_SolidData import TestUtils
except ImportError as ex:
print("Error importing modules: %s" % ex)
if __name__ == "__main__":
paired_end = False
if '--paired-end' in sys.argv:
paired_end = True
elif len(sys.argv) > 1:
print("Usage: %s [--paired-end]" % os.path.basename(sys.argv[0]))
sys.exit(1)
# Make mock solid directory
if paired_end:
solid_dir = TestUtils().make_solid_dir_paired_end('solid0123_20111014_PE_BC')
else:
solid_dir = TestUtils().make_solid_dir('solid0123_20111014_FRAG_BC')
print("Constructed mock dir: %s" % solid_dir)
|
[
"peter.briggs@manchester.ac.uk"
] |
peter.briggs@manchester.ac.uk
|
6f1f8161ba95d3088ba7f50b93a121664fb1a322
|
57abd17391c6ef691509dae512c102f6635dab9b
|
/tensorflow_datasets/scripts/create_new_dataset.py
|
6e57f703111ebe42c66b6fd4f7d3415e908e0bac
|
[
"Apache-2.0"
] |
permissive
|
SinghKislay/datasets
|
434e50eb3b8584849192f3cabe7305429cc62363
|
bc09dd59826975f57c861da4bea23fa5d63d61cf
|
refs/heads/master
| 2020-05-02T22:27:34.771036
| 2019-04-10T18:14:41
| 2019-04-10T18:14:41
| 176,097,632
| 0
| 0
|
Apache-2.0
| 2019-03-17T12:25:56
| 2019-03-17T12:25:55
| null |
UTF-8
|
Python
| false
| false
| 6,919
|
py
|
# coding=utf-8
# Copyright 2019 The TensorFlow Datasets Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
r"""Generate the minimal source code for a new dataset.
python -m tensorflow_datasets.scripts.create_new_dataset \
--dataset dataset_name \
--type dataset_type
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
from absl import app
from absl import flags
from tensorflow.io import gfile
from tensorflow_datasets.core import naming
from tensorflow_datasets.core.utils import py_utils
FLAGS = flags.FLAGS
_DATASET_TYPE = ['image', 'video', 'audio', 'text', 'structured', 'translate']
flags.DEFINE_string('tfds_dir', None, 'Root directory of tfds (auto-computed)')
flags.DEFINE_string('dataset', None, 'Dataset name')
flags.DEFINE_enum('type', None, _DATASET_TYPE, 'Dataset type')
_HEADER = """\
\"""{TODO}: Add a description here.\"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
"""
_DATASET_DEFAULT_IMPORTS = """\
import tensorflow_datasets as tfds\n
"""
_DATASET_TEST_DEFAULTS_IMPORTS = """\
from tensorflow_datasets import testing
from tensorflow_datasets.{dataset_type} import {dataset_name}
"""
_CITATION = """\
# {TODO}: BibTeX citation
_CITATION = \"""
\"""\n
"""
_DESCRIPTION = """\
# {TODO}:
_DESCRIPTION = \"""
\"""\n
"""
_DATASET_DEFAULTS = """\
class {dataset_cls}(tfds.core.GeneratorBasedBuilder):
\"""{TODO}: Short description of my dataset.\"""
# {TODO}: Set up version.
VERSION = tfds.core.Version('0.1.0')
def _info(self):
# {TODO}: Specifies the tfds.core.DatasetInfo object
return tfds.core.DatasetInfo(
builder=self,
# This is the description that will appear on the datasets page.
description=_DESCRIPTION,
# tfds.features.FeatureConnectors
features=tfds.features.FeaturesDict({{
# These are the features of your dataset like images, labels ...
}}),
# If there's a common (input, target) tuple from the features,
# specify them here. They'll be used if as_supervised=True in
# builder.as_dataset.
supervised_keys=(),
# Homepage of the dataset for documentation
urls=[],
citation=_CITATION,
)
def _split_generators(self, dl_manager):
# {TODO}: Downloads the data and defines the splits
# dl_manager is a tfds.download.DownloadManager that can be used to
# download and extract URLs
return [
tfds.core.SplitGenerator(
name=tfds.Split.TRAIN,
# {TODO}: Tune the number of shards such that each shard
# is < 4 GB.
num_shards=10,
# These kwargs will be passed to _generate_examples
gen_kwargs={{}},
),
]
def _generate_examples(self):
# {TODO}: Yields examples from the dataset
yield {{}}\n
"""
_DATASET_TEST_DEFAULTS = """\
class {dataset_cls}Test(testing.DatasetBuilderTestCase):
# {TODO}:
DATASET_CLASS = {dataset_name}.{dataset_cls}
SPLITS = {{
"train": 3, # Number of fake train example
"test": 1, # Number of fake test example
}}
# If you are calling `download/download_and_extract` with a dict, like:
# dl_manager.download({{'some_key': 'http://a.org/out.txt', ...}})
# then the tests needs to provide the fake output paths relative to the
# fake data directory
# DL_EXTRACT_RESULT = {{'some_key': 'output_file1.txt', ...}}
if __name__ == "__main__":
testing.test_main()
"""
_CHECKSUM_FILE = """\
# {TODO}: If your dataset downloads files, then the checksums will be
# automatically added here when running the download_and_prepare script
# with --register_checksums.
"""
def create_dataset_file(root_dir, data):
"""Create a new dataset from a template."""
file_path = os.path.join(root_dir, '{dataset_type}', '{dataset_name}.py')
context = (
_HEADER + _DATASET_DEFAULT_IMPORTS + _CITATION
+ _DESCRIPTION + _DATASET_DEFAULTS
)
with gfile.GFile(file_path.format(**data), 'w') as f:
f.write(context.format(**data))
def add_the_init(root_dir, data):
"""Append the new dataset file to the __init__.py."""
init_file = os.path.join(root_dir, '{dataset_type}', '__init__.py')
context = (
'from tensorflow_datasets.{dataset_type}.{dataset_name} import '
'{dataset_cls} # {TODO} Sort alphabetically\n'
)
with gfile.GFile(init_file.format(**data), 'a') as f:
f.write(context.format(**data))
def create_dataset_test_file(root_dir, data):
"""Create the test file associated with the dataset."""
file_path = os.path.join(root_dir, '{dataset_type}', '{dataset_name}_test.py')
context = (
_HEADER + _DATASET_TEST_DEFAULTS_IMPORTS +
_DATASET_TEST_DEFAULTS)
with gfile.GFile(file_path.format(**data), 'w') as f:
f.write(context.format(**data))
def create_fake_data(root_dir, data):
fake_examples_dir = os.path.join(
root_dir, 'testing', 'test_data', 'fake_examples', '{dataset_name}')
fake_examples_dir = fake_examples_dir.format(**data)
gfile.makedirs(fake_examples_dir)
fake_path = os.path.join(
fake_examples_dir, 'TODO-add_fake_data_in_this_directory.txt')
with gfile.GFile(fake_path, 'w') as f:
f.write('{TODO}: Add fake data in this directory'.format(**data))
def create_checksum_file(root_dir, data):
checksum_path = os.path.join(root_dir, 'url_checksums', '{dataset_name}.txt')
with gfile.GFile(checksum_path.format(**data), 'w') as f:
f.write(_CHECKSUM_FILE.format(**data))
def main(_):
dataset_name = FLAGS.dataset
dataset_type = FLAGS.type
root_dir = FLAGS.tfds_dir
if not root_dir:
root_dir = py_utils.tfds_dir()
data = dict(
dataset_name=dataset_name,
dataset_type=dataset_type,
dataset_cls=naming.snake_to_camelcase(dataset_name),
TODO='TODO({})'.format(dataset_name),
)
create_dataset_file(root_dir, data)
add_the_init(root_dir, data)
create_dataset_test_file(root_dir, data)
create_fake_data(root_dir, data)
create_checksum_file(root_dir, data)
print(
'Dataset generated in {}\n'
'You can start with searching TODO({}).\n'
'Please check this '
'`https://github.com/tensorflow/datasets/blob/master/docs/add_dataset.md`'
'for details.'.format(root_dir, dataset_name)
)
if __name__ == '__main__':
app.run(main)
|
[
"copybara-worker@google.com"
] |
copybara-worker@google.com
|
9442061d1c5d28bd09a835998a2e53cfa07e48e2
|
bb33e6be8316f35decbb2b81badf2b6dcf7df515
|
/source/res/scripts/client/gui/Scaleform/daapi/view/meta/EventBoardsAwardsOverlayMeta.py
|
219372a39b6b37e617c2e86dffba37acfa9ed26a
|
[] |
no_license
|
StranikS-Scan/WorldOfTanks-Decompiled
|
999c9567de38c32c760ab72c21c00ea7bc20990c
|
d2fe9c195825ececc728e87a02983908b7ea9199
|
refs/heads/1.18
| 2023-08-25T17:39:27.718097
| 2022-09-22T06:49:44
| 2022-09-22T06:49:44
| 148,696,315
| 103
| 39
| null | 2022-09-14T17:50:03
| 2018-09-13T20:49:11
|
Python
|
UTF-8
|
Python
| false
| false
| 736
|
py
|
# Python bytecode 2.7 (decompiled from Python 2.7)
# Embedded file name: scripts/client/gui/Scaleform/daapi/view/meta/EventBoardsAwardsOverlayMeta.py
from gui.Scaleform.framework.entities.BaseDAAPIComponent import BaseDAAPIComponent
class EventBoardsAwardsOverlayMeta(BaseDAAPIComponent):
def changeFilter(self, id):
self._printOverrideError('changeFilter')
def as_setHeaderS(self, data):
return self.flashObject.as_setHeader(data) if self._isDAAPIInited() else None
def as_setVehicleS(self, data):
return self.flashObject.as_setVehicle(data) if self._isDAAPIInited() else None
def as_setDataS(self, data):
return self.flashObject.as_setData(data) if self._isDAAPIInited() else None
|
[
"StranikS_Scan@mail.ru"
] |
StranikS_Scan@mail.ru
|
fe0bd2ceaf4493e021a319b6698c83f78f07f01e
|
dce2e3b11804fdb141feaa48299fa8cd751f0e5d
|
/2.两数相加.py
|
ad357aa9fedb490291ad6f56660209fd8858a61c
|
[] |
permissive
|
Cosmos-Break/leetcode
|
bf056efb6f3eb6448df7fb3fc4869992a3e7eb48
|
9f5f3d24e35b0a482ed40594ea665e9068324dcc
|
refs/heads/main
| 2023-06-26T04:29:25.135826
| 2021-07-19T12:29:29
| 2021-07-19T12:29:29
| 293,397,157
| 0
| 0
|
MIT
| 2020-09-07T01:55:39
| 2020-09-07T01:55:38
| null |
UTF-8
|
Python
| false
| false
| 785
|
py
|
#
# @lc app=leetcode.cn id=2 lang=python3
#
# [2] 两数相加
#
# @lc code=start
# Definition for singly-linked list.
# class ListNode:
# def __init__(self, x):
# self.val = x
# self.next = None
class Solution:
def addTwoNumbers(self, l1: ListNode, l2: ListNode) -> ListNode:
head = ListNode(l1.val + l2.val)
cur = head
while l1.next or l2.next:
l1 = l1.next if l1.next else ListNode()
l2 = l2.next if l2.next else ListNode()
cur.next = ListNode(l1.val + l2.val + cur.val // 10)
cur.val = cur.val % 10
cur = cur.next
if cur.val >= 10:
cur.next = ListNode(cur.val // 10)
cur.val = cur.val % 10
return head
# @lc code=end
|
[
"438854233@qq.com"
] |
438854233@qq.com
|
cffddf3d75c1e1ce6fff97c1711d232a66a1205a
|
9dba277eeb0d5e9d2ac75e2e17ab5b5eda100612
|
/exercises/1901100012/d07/mymodule/stats_word.py
|
02ebfde584c7e1c929c260f80257d92f7a50d67b
|
[] |
no_license
|
shen-huang/selfteaching-python-camp
|
e8410bfc06eca24ee2866c5d890fd063e9d4be89
|
459f90c9f09bd3a3df9e776fc64dfd64ac65f976
|
refs/heads/master
| 2022-05-02T05:39:08.932008
| 2022-03-17T07:56:30
| 2022-03-17T07:56:30
| 201,287,222
| 9
| 6
| null | 2019-08-08T15:34:26
| 2019-08-08T15:34:25
| null |
UTF-8
|
Python
| false
| false
| 2,934
|
py
|
en_text='''
The Zen of Python,by Tim Peters
Beautiful is better than ugly.
Explicit is better than implicit.
Simple is better than complex.
Complex is better than complicated.
Flat is better than nested.
Sparse is better than dense.
Readability counts.
Special cases aren't special enough to break the rules.
Although practicality beats purity.
Errors should never pass silently.
Unless explicitly silenced.
In the face of ambxiquity,refuse the temptation to guess.
There should be one-- and preferably only one --obvious way to do it.
Altough that way may not be obvious at first unless you're Dutch.
Now is better than never.
Although never is often better than *right* now.
If the implementation is hard to explain,it's a bad idea.
If the implementation is easy to explain,it's a good idea.
Namespaces are one honking great idea -- let's do more of those!
'''
#英文降序
def stats_text_en (text):
eles=text.split()#将文章按照空格划分开
words=[]
sys=".,-,*,!"
for elet in eles:
for s1 in sys:
elet=elet.replace(s1,' ')
if len(elet) and elet.isascii():
words.append(elet)
print(words)
print()
counter={}
word_set=set(words)
for word in word_set:
counter[word]=words.count(word)
print(counter)
print()
return sorted(counter.items(),key=lambda x:x[1],reverse=True)
#中文降序
def stats_text_cn (text):
cn_characters=[]
for character in text:
if '\u4e00'<=character<='\u9fa5':#中文范围
cn_characters.append(character)
counter={}
cn_set=set(cn_characters)
for word in cn_set:
counter[word]=cn_characters.count(word)
return sorted(counter.items(),key=lambda x:x[1],reverse=True)
cn_text='''
Python之禅 by Tim Petters
美丽胜于丑陋
露骨比露骨好
简单总比复杂好
复杂比复杂好
平的比嵌套的好
稀疏比密密好
可读性很重要
特殊情况并不足以打破规则
尽管实用性胜过纯洁性
错误永远不应该悄悄过去
除非明确地沉默
面对橱柜,拒绝诱惑去猜测
应该有一种----最好只有一种----显而易见的方法来做到这一点
如果你不是荷兰人,那么这种方式在一开始可能并不明显
现在总比没有好
虽然从来没有比现在更好
如果实现很难解释,这是一个坏主意
如果实现容易解释,这是一个好主意
命名空间是一个很好的主意--让我们做更多的那些
'''
#输出合并词频统计结果
def stats_text(text):
return stats_text_en(text) + stats_text_cn(text)
#def stats_text(en_text,cn_text):
#print("输出合并词频统计结果\n",stats_text_en(en_text) + stats_text_cn(cn_text))
if __name__=='__main__':
en_result=stats_text_en(en_text)
cn_result=stats_text_cn(cn_text)
print("统计英文次数-->\n",en_result)
print("统计中文次数-->\n",cn_result)
|
[
"40155646+seven-tears@users.noreply.github.com"
] |
40155646+seven-tears@users.noreply.github.com
|
78ce0f7dcfae56d27b83005282d451663d29798d
|
a11d83fced34854664fac72e18d48fde6aa967e4
|
/0x02-python-import_modules/102-magic_calculation.py
|
0162921f0da03b752aab68a8227e86622fb4338e
|
[] |
no_license
|
afarizap/holbertonschool-higher_level_programming
|
ffe0bf1440726c952f4dd28b908eabc4ccb5225b
|
ad39e58f9cb20cba4b9e2c14075f216097588f47
|
refs/heads/master
| 2023-03-30T15:39:35.184484
| 2021-03-22T22:55:24
| 2021-03-22T22:55:24
| 259,437,040
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 300
|
py
|
#!/usr/bin/python3
def magic_calculation(a, b):
from magic_calculation_102 import add, sub
if a < b:
c = add(a, b)
for i in range(4, 6):
c = add(c, i)
return c
return sub(a, b)
if __name__ == '__main__':
import dis
dis.dis(magic_calculation)
|
[
"afarizap@gmail.com"
] |
afarizap@gmail.com
|
038a56b6976ac7b4e464d15987f277155fce3956
|
09cead98874a64d55b9e5c84b369d3523c890442
|
/py200703_python1/day13_py200814/output_1.py
|
5599e7b1ca607505095d6c9b8c9ce7737df9672c
|
[] |
no_license
|
edu-athensoft/stem1401python_student
|
f12b404d749286036a090e941c0268381ce558f8
|
baad017d4cef2994855b008a756758d7b5e119ec
|
refs/heads/master
| 2021-08-29T15:01:45.875136
| 2021-08-24T23:03:51
| 2021-08-24T23:03:51
| 210,029,080
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 314
|
py
|
"""
output format()
output formatting with placeholders
string.format()
string template
placeholder
"""
x = 1 + 3*4
y = 2 + 5*6
# not recommended
print('x=', x, ',', 'y=', y)
# recommended
print("x={} , y={}")
print("x={} , y={}".format(x, y))
print("x={},y={}".format(x, y))
print("x={}, y={}".format(x, y))
|
[
"lada314@gmail.com"
] |
lada314@gmail.com
|
82aed50d228c4f45ff91dae2b61a13a01bd7bd66
|
87eed57b13eba5fc10756e705821a2fc861a198e
|
/bfg9000/platforms/host.py
|
65e15de4fb2bd1e765b040415f4de4a8f23600cb
|
[
"BSD-3-Clause"
] |
permissive
|
jimporter/bfg9000
|
379ac2d9debb822defacc6c5e31d7b65468f0973
|
876966cc82b5520a7bddf88c2a57716c5579b5ba
|
refs/heads/master
| 2023-08-04T06:29:44.669098
| 2023-08-01T03:13:46
| 2023-08-01T03:13:46
| 31,297,691
| 87
| 21
|
BSD-3-Clause
| 2020-08-06T06:38:10
| 2015-02-25T04:47:12
|
Python
|
UTF-8
|
Python
| false
| false
| 334
|
py
|
from .core import _get_platform_info, _platform_info, Platform
class HostPlatform(Platform):
pass
def platform_info(*args, **kwargs):
return _platform_info('host', *args, **kwargs)
def from_json(value):
return _get_platform_info('host', value['genus'], value['species'],
value['arch'])
|
[
"jporter@mozilla.com"
] |
jporter@mozilla.com
|
2f23cbd42dee001993bc154511cf225da4760ce6
|
f07a42f652f46106dee4749277d41c302e2b7406
|
/Data Set/bug-fixing-5/caaa5d634f104a58a218ff663dfc926195e3acaf-<test_notify_sentry_app_and_plugin_with_same_slug>-bug.py
|
2bb03ab90a78aded197d894082a935887a555e3a
|
[] |
no_license
|
wsgan001/PyFPattern
|
e0fe06341cc5d51b3ad0fe29b84098d140ed54d1
|
cc347e32745f99c0cd95e79a18ddacc4574d7faa
|
refs/heads/main
| 2023-08-25T23:48:26.112133
| 2021-10-23T14:11:22
| 2021-10-23T14:11:22
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 725
|
py
|
def test_notify_sentry_app_and_plugin_with_same_slug(self):
event = self.get_event()
self.create_sentry_app(organization=event.organization, name='Notify', is_alertable=True)
plugin = MagicMock()
plugin.is_enabled.return_value = True
plugin.should_notify.return_value = True
rule = self.get_rule(data={
'service': 'notify',
})
with patch('sentry.plugins.plugins.get') as get_plugin:
get_plugin.return_value = plugin
results = list(rule.after(event=event, state=self.get_state()))
assert (len(results) is 2)
assert (plugin.should_notify.call_count is 1)
assert (results[0].callback is notify_sentry_app)
assert (results[1].callback is plugin.rule_notify)
|
[
"dg1732004@smail.nju.edu.cn"
] |
dg1732004@smail.nju.edu.cn
|
2fedba0cbdc8a5b29280723b6940c2f71a7bda36
|
ca7aa979e7059467e158830b76673f5b77a0f5a3
|
/Python_codes/p03546/s442051818.py
|
0f991e8c96376c793a2bab1a38286d3a650be9e2
|
[] |
no_license
|
Aasthaengg/IBMdataset
|
7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901
|
f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8
|
refs/heads/main
| 2023-04-22T10:22:44.763102
| 2021-05-13T17:27:22
| 2021-05-13T17:27:22
| 367,112,348
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 324
|
py
|
h, _ = map(int, input().split())
r = range(10)
c = [[int(i) for i in input().split()] for _ in r]
for k in r:
for i in r:
for j in r:
c[i][j] = min(c[i][j], c[i][k] + c[k][j])
else:
a = [[int(i) for i in input().split()] for _ in range(h)]
print(sum(c[i][1] for i in sum(a, []) if i != -1))
|
[
"66529651+Aastha2104@users.noreply.github.com"
] |
66529651+Aastha2104@users.noreply.github.com
|
27c6f04530538b5ac8c71908ab91361f20ecc16b
|
d554b1aa8b70fddf81da8988b4aaa43788fede88
|
/5 - Notebooks e Data/1 - Análises numéricas/Arquivos David/Atualizados/logDicas-master/data/2019-1/225/users/4013/codes/1671_1079.py
|
c3bb6e4c00135c7fac261439e8a41a85fc6fb9ce
|
[] |
no_license
|
JosephLevinthal/Research-projects
|
a3bc3ca3b09faad16f5cce5949a2279cf14742ba
|
60d5fd6eb864a5181f4321e7a992812f3c2139f9
|
refs/heads/master
| 2022-07-31T06:43:02.686109
| 2020-05-23T00:24:26
| 2020-05-23T00:24:26
| 266,199,309
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 627
|
py
|
# Ao testar sua solução, não se limite ao caso de exemplo.
from math import *
# Leitura dos lados do triangulo a, b, and c
a = float(input ("Lado 1: "))
b = float(input ("Lado 2: "))
c = float(input ("Lado 3: "))
print("Entradas:", a, ",", b, ",", c)
# Testa se pelo menos uma das entradas eh negativa
if ((a > 0) or (b > 0) or (c > 0 )):
# Testa se medidas correspondem aas de um triangulo
if ((a < b + c) and (b < a + c) and (c < a + b)):
s = (a + b + c) / 2.0
area = sqrt(s * (s-a) * (s-b) * (s-c))
area = round(area, 3)
print("Area:", area)
else:
print("Area: invalida")
else:
print("Area: invalida")
|
[
"jvlo@icomp.ufam.edu.br"
] |
jvlo@icomp.ufam.edu.br
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.