blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 3
288
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
112
| license_type
stringclasses 2
values | repo_name
stringlengths 5
115
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 684
values | visit_date
timestamp[us]date 2015-08-06 10:31:46
2023-09-06 10:44:38
| revision_date
timestamp[us]date 1970-01-01 02:38:32
2037-05-03 13:00:00
| committer_date
timestamp[us]date 1970-01-01 02:38:32
2023-09-06 01:08:06
| github_id
int64 4.92k
681M
⌀ | star_events_count
int64 0
209k
| fork_events_count
int64 0
110k
| gha_license_id
stringclasses 22
values | gha_event_created_at
timestamp[us]date 2012-06-04 01:52:49
2023-09-14 21:59:50
⌀ | gha_created_at
timestamp[us]date 2008-05-22 07:58:19
2023-08-21 12:35:19
⌀ | gha_language
stringclasses 147
values | src_encoding
stringclasses 25
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 128
12.7k
| extension
stringclasses 142
values | content
stringlengths 128
8.19k
| authors
listlengths 1
1
| author_id
stringlengths 1
132
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
8e39496238ae7abc1a45eade812fa000e74ef3bb
|
11e81ec279ec17196bdbc75ce334305e95929b52
|
/8주차 이분탐색,그래프/파티/김승욱.py
|
526773d77fb4084503893cb8d8223ccb823b07d7
|
[] |
no_license
|
wheejoo/PythonCodeStudy
|
70992e1723d621fec933786dd2b3faa5d2518763
|
9e324f9deee1be044c07b64e7480c6bfac42876c
|
refs/heads/main
| 2023-07-28T23:11:19.335117
| 2021-10-03T11:37:09
| 2021-10-03T11:37:09
| 382,866,256
| 1
| 4
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,227
|
py
|
# x를 시작으로 가장 먼곳 간다음 그 곳에서 다시 x까지 거리 구하려햇는데
# 왕복 거리가 다르므로 이렇게 풀면 안됨
# 모든 정점에서 x까지 최단거리 구하고
# 다시 x에서 모든 거리까지 최단거리 구해서 서로 더한다음
# 가장 큰 값구함
import heapq
import sys
input = sys.stdin.readline
n, m, x = map(int,input().split())
INF = sys.maxsize
grpah = [[] for _ in range(n+1)]
maxx = 0
for i in range(m):
a,b,c = map(int, input().split())
grpah[a].append((c,b))
def dijstra(index):
queue = []
distance[index] = 0
heapq.heappush(queue, (0, index))
while queue:
wei, now = heapq.heappop(queue)
if distance[now] < wei:
continue
for w, next in grpah[now]:
next_wei = w + wei
if next_wei < distance[next]:
distance[next] = next_wei
heapq.heappush(queue, (next_wei, next))
result = []
for i in range(1, n+1):
distance = [INF for _ in range(n+1)]
dijstra(i)
result.append(distance[x])
distance = [INF for _ in range(n+1)]
dijstra(x)
for i in range(len(result)):
maxx = max(maxx, result[i] + distance[i+1])
print(maxx)
|
[
"rlatmd0829@naver.com"
] |
rlatmd0829@naver.com
|
ddc056b2b37e8b40bdb6264c89407197605144ef
|
566ce57c0a56a492895dc5b2e4b7ae1b49b301dd
|
/job/urls.py
|
2ca32cf10b8d4220993e331027bcaabdfa5591c6
|
[] |
no_license
|
ingafter60/job-portal
|
178e81c0f1d3fc4527242cf8c9bcc29c5d413ac9
|
6af2460927a29c914df74ea10172a731fcd528c6
|
refs/heads/master
| 2022-11-26T09:27:23.543390
| 2020-07-15T03:40:47
| 2020-07-15T03:40:47
| 279,142,881
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 214
|
py
|
# job/urls.py
from django.contrib import admin
from django.urls import path, include
urlpatterns = [
path('users/', include('users.urls')),
path('', include('jobs.urls')),
path('admin/', admin.site.urls),
]
|
[
"inyoman_gurnitha@yahoo.com"
] |
inyoman_gurnitha@yahoo.com
|
68adf7e197ced3869d0d0a33c74355024394305e
|
0beaf9d78d03100b2aebaaac38fb343d425f2b6a
|
/tests/regression/gsheet/test_chrome_gsheet_100r_number_chars_image.py
|
bc85a9757ae1b561082d29b318d73a1b600482a8
|
[] |
no_license
|
digitarald/Hasal
|
462fc044bb4a754c8d76c0bfb0df519f1786fdcc
|
c496afae6ec2e3743148f3a6288b78f120100513
|
refs/heads/master
| 2021-01-13T14:29:44.471037
| 2016-11-04T10:49:19
| 2016-11-04T10:49:19
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 459
|
py
|
from lib.perfBaseTest import PerfBaseTest
class TestSikuli(PerfBaseTest):
def setUp(self):
super(TestSikuli, self).setUp()
def test_chrome_gsheet_100r_number_chars_image(self):
self.test_url = self.env.GSHEET_TEST_URL_SPEC % self.env.TEST_TARGET_ID_100R_NUMBER_ENCHAR_IMAGE
self.sikuli_status = self.sikuli.run_test(self.env.test_name, self.env.output_name, test_target=self.test_url, script_dp=self.env.test_script_py_dp)
|
[
"sho@mozilla.com"
] |
sho@mozilla.com
|
f89f2605057672e195be30599e8b17bd6843fffa
|
3b84c4b7b16ccfd0154f8dcb75ddbbb6636373be
|
/google-cloud-sdk/lib/googlecloudsdk/command_lib/container/gkeonprem/flags.py
|
65a11cb83a4f743b2ab1aef526415180dfa7ce70
|
[
"Apache-2.0",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
twistedpair/google-cloud-sdk
|
37f04872cf1ab9c9ce5ec692d2201a93679827e3
|
1f9b424c40a87b46656fc9f5e2e9c81895c7e614
|
refs/heads/master
| 2023-08-18T18:42:59.622485
| 2023-08-15T00:00:00
| 2023-08-15T12:14:05
| 116,506,777
| 58
| 24
| null | 2022-02-14T22:01:53
| 2018-01-06T18:40:35
|
Python
|
UTF-8
|
Python
| false
| false
| 3,400
|
py
|
# -*- coding: utf-8 -*- #
# Copyright 2022 Google LLC. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Helpers for flags in commands for Anthos GKE On-Prem clusters."""
from __future__ import absolute_import
from __future__ import division
from __future__ import unicode_literals
from googlecloudsdk.calliope import parser_arguments
from googlecloudsdk.calliope.concepts import concepts
from googlecloudsdk.command_lib.util.concepts import concept_parsers
from googlecloudsdk.core import resources
def GetAdminClusterMembershipResource(membership_name):
return resources.REGISTRY.ParseRelativeName(
membership_name, collection='gkehub.projects.locations.memberships'
)
def AdminClusterMembershipAttributeConfig():
return concepts.ResourceParameterAttributeConfig(
name='admin_cluster_membership',
help_text=(
'admin cluster membership of the {resource}, in the form of'
' projects/PROJECT/locations/global/memberships/MEMBERSHIP. '
),
)
def LocationAttributeConfig():
"""Gets Google Cloud location resource attribute."""
return concepts.ResourceParameterAttributeConfig(
name='location',
help_text='Google Cloud location for the {resource}.',
)
def GetAdminClusterMembershipResourceSpec():
return concepts.ResourceSpec(
'gkehub.projects.locations.memberships',
resource_name='admin_cluster_membership',
membershipsId=AdminClusterMembershipAttributeConfig(),
locationsId=LocationAttributeConfig(),
projectsId=concepts.DEFAULT_PROJECT_ATTRIBUTE_CONFIG,
)
def AddAdminClusterMembershipResourceArg(
parser: parser_arguments.ArgumentInterceptor, positional=True, required=True
):
"""Adds a resource argument for a VMware admin cluster membership.
Args:
parser: The argparse parser to add the resource arg to.
positional: bool, whether the argument is positional or not.
required: bool, whether the argument is required or not.
"""
name = (
'admin_cluster_membership' if positional else '--admin-cluster-membership'
)
# TODO(b/227667209): Add fallthrough from cluster location when regional
# membership is implemented.
concept_parsers.ConceptParser.ForResource(
name,
GetAdminClusterMembershipResourceSpec(),
'membership of the admin cluster. Membership can be the membership ID or'
' the full resource name.',
required=required,
flag_name_overrides={
'location': '--admin-cluster-membership-location',
},
).AddToParser(parser)
parser.set_defaults(admin_cluster_membership_location='global')
def AddBinauthzEvaluationMode(parser):
parser.add_argument(
'--binauthz-evaluation-mode',
choices=['DISABLED', 'PROJECT_SINGLETON_POLICY_ENFORCE'],
default=None,
help='Set Binary Authorization evaluation mode for this cluster.',
hidden=True,
)
|
[
"gcloud@google.com"
] |
gcloud@google.com
|
02cc868019621649a62b17392c2f8891804a69a6
|
696e35ccdf167c3f6b1a7f5458406d3bb81987c9
|
/mash/DEPS
|
681fd163d9adfc6508b88a7df485d548ae8c59da
|
[
"BSD-3-Clause"
] |
permissive
|
mgh3326/iridium-browser
|
064e91a5e37f4e8501ea971483bd1c76297261c3
|
e7de6a434d2659f02e94917be364a904a442d2d0
|
refs/heads/master
| 2023-03-30T16:18:27.391772
| 2019-04-24T02:14:32
| 2019-04-24T02:14:32
| 183,128,065
| 0
| 0
|
BSD-3-Clause
| 2019-11-30T06:06:02
| 2019-04-24T02:04:51
| null |
UTF-8
|
Python
| false
| false
| 279
|
include_rules = [
"+ash/public",
"+components/prefs",
"+components/viz/common",
"+mojo/converters",
"+mojo/public",
"+services/catalog/public",
"+services/service_manager",
"+services/ws/common",
"+services/ws/public",
"+third_party/skia/include",
"+ui",
]
|
[
"commit-bot@chromium.org"
] |
commit-bot@chromium.org
|
|
6a83f1a2c02e53428cf6622fc3cc28737370842f
|
c47340ae6bcac6002961cc2c6d2fecb353c1e502
|
/test/test_known_hosts.py
|
1c2c8b6867e85b61f8a35c508543f3fccb4a802f
|
[
"MIT"
] |
permissive
|
rafaeldelrey/controlm_py
|
6d9f56b8b6e72750f329d85b932ace6c41002cbd
|
ed1eb648d1d23e587321227217cbfcc5065535ab
|
refs/heads/main
| 2023-04-23T09:01:32.024725
| 2021-05-19T00:25:53
| 2021-05-19T00:25:53
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 871
|
py
|
# coding: utf-8
"""
Control-M Services
Provides access to BMC Control-M Services # noqa: E501
OpenAPI spec version: 9.20.115
Contact: customer_support@bmc.com
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import unittest
import controlm_py
from controlm_py.models.known_hosts import KnownHosts # noqa: E501
from controlm_py.rest import ApiException
class TestKnownHosts(unittest.TestCase):
"""KnownHosts unit test stubs"""
def setUp(self):
pass
def tearDown(self):
pass
def testKnownHosts(self):
"""Test KnownHosts"""
# FIXME: construct object with mandatory attributes with example values
# model = controlm_py.models.known_hosts.KnownHosts() # noqa: E501
pass
if __name__ == '__main__':
unittest.main()
|
[
"dcompane@gmail.com"
] |
dcompane@gmail.com
|
c0367113441a8db2c3f75729fd8ff004260f1624
|
fdbcbb95a34e05e015c3ece6a071b04915c70346
|
/code_interview/settings.py
|
abae8f9ec0cbf5a01bc576629629e91621ec3687
|
[
"Apache-2.0"
] |
permissive
|
nonbeing/codeinterview-backend
|
ed0e8f4d4dddbc25e235a817cfbc22e491c5c5c9
|
f44a592640a4a663daebef06561063c062c8acb0
|
refs/heads/master
| 2022-09-04T11:28:04.105984
| 2020-06-01T05:13:22
| 2020-06-01T05:13:22
| 268,828,769
| 1
| 0
|
Apache-2.0
| 2020-06-02T14:49:35
| 2020-06-02T14:49:35
| null |
UTF-8
|
Python
| false
| false
| 3,870
|
py
|
import os
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
SECRET_KEY = 'j%4s9n79np!^nrq3&h4=6a8r2c^ex9s)gg3s(zsx((o@qll2yj'
DEBUG = True
ALLOWED_HOSTS = []
# Celery config
CELERY_BROKER_URL = 'pyamqp://'
CELERY_RESULT_BACKEND = 'redis://'
CELERY_TASK_ROUTES = {
# WARNING: room.tasks still need explicit queue name when chaining.
# see rooms.signals.dispatch_run_task. Help?
'rooms.tasks.*': {'queue': 'callbacks'},
'tasks.sandbox.run_user_code': {'queue': 'sandbox'}
}
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'corsheaders',
'rest_framework',
'django_filters',
'channels',
'rooms',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'corsheaders.middleware.CorsMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'code_interview.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [
os.path.join(BASE_DIR, 'templates'),
],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'code_interview.wsgi.application'
ASGI_APPLICATION = "code_interview.routing.application"
##### Channels-specific settings
redis_url = 'redis://'
# Channel layer definitions
# http://channels.readthedocs.io/en/latest/topics/channel_layers.html
CHANNEL_LAYERS = {
# "default": {
# # This example app uses the Redis channel layer implementation channels_redis
# "BACKEND": "channels_redis.core.RedisChannelLayer",
# "CONFIG": {
# "hosts": [redis_url,],
# },
# },
"default": {
"BACKEND": "channels.layers.InMemoryChannelLayer"
}
}
CORS_ORIGIN_WHITELIST = [
'http://localhost:8080' # front-end
]
# REST framework
default_renderers = []
if DEBUG:
default_renderers = ['rest_framework.renderers.BrowsableAPIRenderer']
REST_FRAMEWORK = {
'DEFAULT_RENDERER_CLASSES': [
'rest_framework.renderers.JSONRenderer',
] + default_renderers,
}
# Database
# https://docs.djangoproject.com/en/2.2/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/2.2/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/2.2/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/2.2/howto/static-files/
STATIC_URL = '/static/'
|
[
"areebbeigh@gmail.com"
] |
areebbeigh@gmail.com
|
3e02711859000bf568d3065860caad09b02d70f6
|
8aa5b087176a5de567664a9377eda56641cda628
|
/binaryapi/ws/chanels/contract_update_history.py
|
3a6a664953571745700447201f3bdf44426e7027
|
[] |
no_license
|
pabloapdz/binaryapi
|
ce55d2f4d125b2725ae0c5b23b953521c792cf27
|
c2cca1ec144d10b885df7aeda03c7c63dbe673de
|
refs/heads/master
| 2022-11-19T16:10:04.567328
| 2020-07-23T03:00:40
| 2020-07-23T03:00:40
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,316
|
py
|
"""Module for Binary contract_update_history websocket chanel."""
from binaryapi.ws.chanels.base import Base
# https://developers.binary.com/api/#contract_update_history
class ContractUpdateHistory(Base):
"""Class for Binary contract_update_history websocket chanel."""
name = "contract_update_history"
def __call__(self, contract_id: int, limit=None, passthrough=None, req_id: int = None):
"""Method to send message to contract_update_history websocket chanel.
Update Contract History (request)
Request for contract update history.
:param contract_id: Internal unique contract identifier.
:type contract_id: int
:param limit: [Optional] Maximum number of historical updates to receive.
:type limit:
:param passthrough: [Optional] Used to pass data through the websocket, which may be retrieved via the `echo_req` output field.
:type passthrough:
:param req_id: [Optional] Used to map request to response.
:type req_id: int
"""
data = {
"contract_update_history": int(1),
"contract_id": int(contract_id)
}
if limit:
data['limit'] = limit
return self.send_websocket_request(self.name, data, passthrough=passthrough, req_id=req_id)
|
[
"mdn522@gmail.com"
] |
mdn522@gmail.com
|
2fba0ba36775b47980f0366230afbcb1da18fd79
|
749efa5eb1ea53a5372b31832ed74d85dc15f641
|
/temporal_graph/structure_network/structure_graph.py
|
75ee3f2cae8ea41f57d59fa4cfb7acf6a2211fdc
|
[] |
no_license
|
summukhe/TemporalGraph
|
ac8f7be103620b5d53aeae6e6f462d2f74cd5f8c
|
722fedaf737950ac32d4dabd830afbd241a03a2c
|
refs/heads/master
| 2020-05-05T03:17:05.918359
| 2019-06-01T05:40:49
| 2019-06-01T05:40:49
| 179,666,707
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,220
|
py
|
import numpy as np
from copy import deepcopy
from temporal_graph.network_analysis import GeometricGraph3d
from temporal_graph.spatial_ds import *
from temporal_graph.pdb_processor import *
from temporal_graph.force_field import *
__version__ = "1.0"
__all__ = ['contact_graph',
'potential_contact_graph',
'contact_energy_graph']
def contact_graph(pdb_structure,
cutoff=12,
potential='charmm',
weight_normalizer=FFNormalizer()):
assert isinstance(pdb_structure, PDBStructure) or \
isinstance(pdb_structure, CaTrace)
if isinstance(cutoff, DistanceCutoff):
cutoff = cutoff.cutoff
assert cutoff > 0
assert potential in ['energy', 'charmm', 'mj']
if potential == 'energy':
if isinstance(pdb_structure, CaTrace):
return potential_contact_graph(pdb_structure,
cutoff=DistanceCutoff(def_cutoff=cutoff),
potential='charmm')
else:
g = contact_energy_graph(pdb_structure,
contact_radius=cutoff,
energy_score=weight_normalizer)
else:
if isinstance(pdb_structure, PDBStructure):
structure = pdb_to_catrace(pdb_structure)
else:
structure = deepcopy(pdb_structure)
g = potential_contact_graph(structure,
cutoff=DistanceCutoff(def_cutoff=cutoff),
potential=potential)
return g
def potential_contact_graph(ca_trace, cutoff=DistanceCutoff(), potential='mj'):
assert isinstance(ca_trace, CaTrace)
assert isinstance(cutoff, DistanceCutoff)
assert potential in ['mj', 'charmm']
res_ids = ca_trace.residue_ids
c_graph = GeometricGraph3d(directed=False)
for r in res_ids:
amino_key = ca_trace.key(r)
amino_crd = Coordinate3d(*ca_trace.xyz(r))
c_graph.add_vertex(amino_key, attribute=amino_crd)
for ri in res_ids:
amino_i = ca_trace.get_amino(ri)
x_i, y_i, z_i = ca_trace.xyz(ri)
for rj in res_ids:
if ri < rj:
amino_j = ca_trace.get_amino(rj)
x_j, y_j, z_j = ca_trace.xyz(rj)
c = cutoff(amino_i, amino_j)
d = np.sqrt((x_i-x_j)**2 + (y_i-y_j)**2 + (z_i-z_j)**2)
if d <= c:
p = get_pair_potential(amino_i, amino_j, d, pot_type=potential)
c_graph.add_edge('%s%d' % (amino_i, ri),
'%s%d' % (amino_j, rj),
weight=p)
return c_graph
def contact_energy_graph(pdb_struct,
contact_radius=12,
epsilon=1.,
elec_only=False,
summed=True,
energy_score=FFNormalizer()):
assert isinstance(pdb_struct, PDBStructure)
assert isinstance(energy_score, FFNormalizer)
ca_trace = pdb_to_catrace(pdb_struct)
residues = ca_trace.residue_ids
x_lst, y_lst, z_lst = [], [], []
for r in residues:
x, y, z = ca_trace.xyz(r)
x_lst.append(x)
y_lst.append(y)
z_lst.append(z)
grid = Grid3D(max_coord=Coordinate3d(np.max(x_lst), np.max(y_lst), np.max(z_lst)),
min_coord=Coordinate3d(np.min(x_lst), np.min(y_lst), np.min(z_lst)),
spacing=2)
for r in residues:
grid.register_obj(r, Coordinate3d(*ca_trace.xyz(r)))
neighbors = dict()
for r1 in residues:
neighbors[r1] = {r2: 0 for r2 in grid.neighbors(r1, contact_radius)}
ff = FFManager()
for r1 in neighbors:
residue_name1 = pdb_struct.residue_name(r1)
atom_names1 = pdb_struct.atom_names(r1)
for r2 in neighbors[r1]:
residue_name2 = pdb_struct.residue_name(r2)
atom_names2 = pdb_struct.atom_names(r2)
for atom1 in atom_names1:
for atom2 in atom_names2:
d = distance(Coordinate3d(*pdb_struct.xyz(r1, atom1)),
Coordinate3d(*pdb_struct.xyz(r2, atom2)))
neighbors[r1][r2] += ff.energy(residue_name1,
atom1,
residue_name2,
atom2,
distance=d,
epsilon=epsilon,
elec_only=elec_only,
summed=summed)
c_graph = GeometricGraph3d(directed=False)
for r in residues:
c_graph.add_vertex(pdb_struct.key(r),
attribute=Coordinate3d(*pdb_struct.xyz(r, 'CA')))
for r1 in neighbors:
for r2 in neighbors[r1]:
c_graph.add_edge(pdb_struct.key(r1),
pdb_struct.key(r2),
weight=energy_score(neighbors[r1][r2]))
return c_graph
|
[
"sumant199@gmail.com"
] |
sumant199@gmail.com
|
58e4e79d5aff5d675c44d475f8eb78c71f373b12
|
20d9130fdc21756c4f8fe255583922352f5c5762
|
/src/DIRAC/DataManagementSystem/Service/LcgFileCatalogProxyHandler.py
|
ba7ea68222b2cc3e09dd20556e2c46165ff46f1b
|
[] |
no_license
|
bopopescu/bes3-jinr
|
095314e43f41f08bd48b248fe3ca627a5c009f58
|
fdfd852c92a56192b8ee9970b66f0136e6e0afff
|
refs/heads/master
| 2022-11-26T06:01:36.718508
| 2014-03-17T06:03:50
| 2014-03-17T06:03:50
| 282,113,617
| 0
| 0
| null | 2020-07-24T03:30:10
| 2020-07-24T03:30:09
| null |
UTF-8
|
Python
| false
| false
| 3,628
|
py
|
########################################################################
# $HeadURL $
# File: LcgFileCatalogProxyHandler.py
########################################################################
""" :mod: LcgFileCatalogProxyHandler
================================
.. module: LcgFileCatalogProxyHandler
:synopsis: This is a service which represents a DISET proxy to the LCG File Catalog
"""
## imports
import os
from types import StringType, DictType, TupleType
## from DIRAC
from DIRAC import gLogger, S_OK, S_ERROR
from DIRAC.Core.DISET.RequestHandler import RequestHandler
from DIRAC.Resources.Catalog.FileCatalog import FileCatalog
from DIRAC.Core.Utilities.Subprocess import pythonCall
from DIRAC.FrameworkSystem.Client.ProxyManagerClient import gProxyManager
__RCSID__ = "6bbf469 (2013-01-31 23:24:59 +0100) Andrei Tsaregorodtsev <atsareg@in2p3.fr>"
def initializeLcgFileCatalogProxyHandler( _serviceInfo ):
""" service initalisation """
return S_OK()
class LcgFileCatalogProxyHandler( RequestHandler ):
"""
.. class:: LcgFileCatalogProxyHandler
"""
types_callProxyMethod = [ StringType, TupleType, DictType ]
def export_callProxyMethod( self, name, args, kargs ):
""" A generic method to call methods of the Storage Element.
"""
res = pythonCall( 120, self.__proxyWrapper, name, args, kargs )
if res['OK']:
return res['Value']
else:
return res
def __proxyWrapper( self, name, args, kwargs ):
""" The wrapper will obtain the client proxy and set it up in the environment.
The required functionality is then executed and returned to the client.
:param self: self reference
:param str name: fcn name
:param tuple args: fcn args
:param dict kwargs: fcn keyword args
"""
res = self.__prepareSecurityDetails()
if not res['OK']:
return res
try:
fileCatalog = FileCatalog( ['LcgFileCatalogCombined'] )
method = getattr( fileCatalog, name )
except AttributeError, error:
errStr = "LcgFileCatalogProxyHandler.__proxyWrapper: No method named %s" % name
gLogger.exception( errStr, name, error )
return S_ERROR( errStr )
try:
result = method( *args, **kwargs )
return result
except Exception, error:
errStr = "LcgFileCatalogProxyHandler.__proxyWrapper: Exception while performing %s" % name
gLogger.exception( errStr, name, error )
return S_ERROR( errStr )
def __prepareSecurityDetails( self ):
""" Obtains the connection details for the client """
try:
credDict = self.getRemoteCredentials()
clientDN = credDict[ 'DN' ]
clientUsername = credDict['username']
clientGroup = credDict['group']
gLogger.debug( "Getting proxy for %s@%s (%s)" % ( clientUsername, clientGroup, clientDN ) )
res = gProxyManager.downloadVOMSProxy( clientDN, clientGroup )
if not res['OK']:
return res
chain = res['Value']
proxyBase = "/tmp/proxies"
if not os.path.exists( proxyBase ):
os.makedirs( proxyBase )
proxyLocation = "%s/%s-%s" % ( proxyBase, clientUsername, clientGroup )
gLogger.debug( "Obtained proxy chain, dumping to %s." % proxyLocation )
res = gProxyManager.dumpProxyToFile( chain, proxyLocation )
if not res['OK']:
return res
gLogger.debug( "Updating environment." )
os.environ['X509_USER_PROXY'] = res['Value']
return res
except Exception, error:
exStr = "__getConnectionDetails: Failed to get client connection details."
gLogger.exception( exStr, '', error )
return S_ERROR( exStr )
|
[
"gavelock@gmail.com"
] |
gavelock@gmail.com
|
ff091989b08d7360d35edcde8d929dbfa9826630
|
185960fa68ac9ff6377eff50e3afc3900827c264
|
/one_model_n_data.py
|
29e8ab68e888ca6f56013c0a4b34873b52af055a
|
[] |
no_license
|
enjoysport2022/Incident_Detection
|
0a9158e163ff5c74c7d2bb1ebae6fad8adb53de4
|
87b6848ae3f038e7eab9c0c40c0ea19efa27208c
|
refs/heads/master
| 2023-02-22T11:29:00.385996
| 2015-12-07T03:21:07
| 2015-12-07T03:21:07
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 6,730
|
py
|
# -*- coding: UTF-8 -*-
#运行程序需要安装numpy,scipy,sklearn
# 需要导入的库:
import time
import requests
import conf
start=time.clock()
import csv
from sklearn.svm import SVC
import numpy as np
from sklearn import preprocessing,neighbors
from sklearn.cross_validation import train_test_split
from sklearn.metrics import confusion_matrix
from sklearn.ensemble import RandomForestClassifier
from sklearn.tree import DecisionTreeClassifier
####################################################################################
# 特征值读取
# featureList
allElectronicsData=open(r'F:\TrafficFlow\pems\7606\7606bu.csv','rb')#****bu.csv特征值文件
reader=csv.reader(allElectronicsData)
headers=reader.next()
temp1=[]
# temp2=[]
for row in reader:
c1=[]
c2=[]
for i in range(1,9):#读取第2列到第9列
b=0
b=float(row[i])-float(row[i+8])
c1.append(b)
temp1.append(c1)
# for i in range(9,17):#读取第10列到第16列
# b=0
# b=float(row[i])
# c2.append(b)
# temp2.append(c2)
# print("temp1:")
# print(temp1)
# print("temp2:")
# print(temp2)
n=len(temp1)
# print(n)
featureList1=[]
for i in range(0,n-4):
k1=[]
k1=temp1[i]
k2=temp1[i+1]
k3=temp1[i+2]
k4=temp1[i+3]
k5=temp1[i+4]
k1.extend(k2)
k1.extend(k3)
k1.extend(k4)
k1.extend(k5)
featureList1.append(k1)
featureList1=np.array(featureList1)
# print("featureList1:")
# temp1=np.array(temp1)
# print(featureList1.shape)
# print(len(featureList))
# print("temp2:")
# temp2=np.array(temp2)
# print(temp2)
# featureList=temp1-temp2
# print(featureList.shape)
# f=open('featurelist.txt','w')
# f.write(str(featureList))
# f.close()
####################################################################################
# 特征值读取
# featureList
allElectronicsData=open(r'F:\TrafficFlow\pems\6080\6080bu.csv','rb')#****bu.csv特征值文件
reader=csv.reader(allElectronicsData)
headers=reader.next()
temp1=[]
# temp2=[]
for row in reader:
c1=[]
c2=[]
for i in range(1,9):#读取第2列到第9列
b=0
b=float(row[i])-float(row[i+8])
c1.append(b)
temp1.append(c1)
# for i in range(9,17):#读取第10列到第16列
# b=0
# b=float(row[i])
# c2.append(b)
# temp2.append(c2)
# print("temp1:")
# print(temp1)
# print("temp2:")
# print(temp2)
n=len(temp1)
# print(n)
featureList2=[]
for i in range(0,n-4):
k1=[]
k1=temp1[i]
k2=temp1[i+1]
k3=temp1[i+2]
k4=temp1[i+3]
k5=temp1[i+4]
k1.extend(k2)
k1.extend(k3)
k1.extend(k4)
k1.extend(k5)
featureList2.append(k1)
featureList2=np.array(featureList2)
# print("featureList2:")
# temp1=np.array(temp1)
# print(featureList2.shape)
# print(len(featureList))
# print("temp2:")
# temp2=np.array(temp2)
# print(temp2)
# featureList=temp1-temp2
# print(featureList.shape)
# f=open('featurelist.txt','w')
# f.write(str(featureList))
# f.close()
featureList=np.vstack((featureList1,featureList2))
# print(featureList.shape)
##########################################################################################
# 标签读取
# labelList
incidentData=open(r'F:\TrafficFlow\pems\7606\7606label.csv','rb')#****label.csv标签文件
label=csv.reader(incidentData)
headers=label.next()
# print(headers)
labelList1=[]
for row in label:
labelList1.append(row[len(row)-1])
# print(labelList)
lb=preprocessing.LabelBinarizer()
dummyY1=lb.fit_transform(labelList1)
# dummyY=np.array(dummyY)
# print(dummyY)
# print(len(dummyY))
# print("dummyY:"+str(dummyY))
##########################################################################################
# 标签读取
# labelList
incidentData=open(r'F:\TrafficFlow\pems\6080\6080label.csv','rb')#****label.csv标签文件
label=csv.reader(incidentData)
headers=label.next()
# print(headers)
labelList2=[]
for row in label:
labelList2.append(row[len(row)-1])
# print(labelList)
lb=preprocessing.LabelBinarizer()
dummyY2=lb.fit_transform(labelList2)
# dummyY=np.array(dummyY)
# print(dummyY)
# print(len(dummyY))
# print("dummyY:"+str(dummyY))
dummyY=np.vstack((dummyY1,dummyY2))
# print(dummyY.shape)
# 将数据拆分成训练样本和测试样本:
X_train, X_test, y_train, y_test = train_test_split(featureList, dummyY, test_size=0.1)
print("Fitting the classifier to the training set---->")
#支持向量机模型:
# clf=SVC(kernel='rbf',C=1e3,gamma=0.001)
#kernel、C、gamma可调
#最近邻算法:
n_neighbors = 15
#n_neighbors可调
weights='uniform'
# weights='distance'
clf = neighbors.KNeighborsClassifier(n_neighbors,weights=weights)
#随机森林分类器:
# clf = RandomForestClassifier(n_estimators=10)
#决策树算法:
# clf = DecisionTreeClassifier()
##################################################
# 训练模型:
clf.fit(X_train,y_train)
########################################################
####################################################
# 测试模型过程
print("Predicting test set--->")
predictions=[]
for x in range(len(X_test)):
result=clf.predict(X_test[x])
predictions.append(result)
print('>predicted= '+repr(result)+',actual='+repr(y_test[x][-1]))
# 模型矩阵
y_pred = clf.predict(X_test)
conm=confusion_matrix(y_test, y_pred, labels=range(2))
print(conm)
# 准确率:
a=float(conm[0][0])
b=float(conm[0][1])
c=float(conm[1][0])
d=float(conm[1][1])
DR=(a/(a+c))
DR=DR*100
FAR=(b/(a+b))
FAR=FAR*100
print('Detection rate: '+repr(DR)+'%')
print('False alarm rate: '+repr(FAR)+'%')
# print(accuracy)
############################################################
######################################################
# 读取新的样本数据进行预测:
# p_featureList
allElectronicsData=open(r'F:\TrafficFlow\pems\3245\3245bu.csv','rb')#****bu.csv特征值文件
reader=csv.reader(allElectronicsData)
headers=reader.next()
temp1=[]
# temp2=[]
for row in reader:
c1=[]
c2=[]
for i in range(1,9):#读取第2列到第9列
b=0
b=float(row[i])-float(row[i+8])
c1.append(b)
temp1.append(c1)
n=len(temp1)
# print(n)
p_featureList=[]
for i in range(0,n-4):
k1=[]
k1=temp1[i]
k2=temp1[i+1]
k3=temp1[i+2]
k4=temp1[i+3]
k5=temp1[i+4]
k1.extend(k2)
k1.extend(k3)
k1.extend(k4)
k1.extend(k5)
p_featureList.append(k1)
print('predict------------->')
kk=clf.predict(k1)
if kk==0:
r = requests.post(conf.dz, data = {"key":"value","key":"value","key":"value"})
print(kk)
p=clf.predict(p_featureList)
###########################################################
print("all together:")
print(p)
end=time.clock()
print "time: %f s" % (end - start)
|
[
"946691288@qq.com"
] |
946691288@qq.com
|
280d992fa5b09c52dc7e19f51da135e40cdd64ec
|
d93fe0484fc3b32c8fd9b33cc66cfd636a148ec4
|
/AtCoder/ABC-C/112probC3.py
|
c3bbb316c6ac065245c203087a439022efbf8c8b
|
[] |
no_license
|
wattaihei/ProgrammingContest
|
0d34f42f60fa6693e04c933c978527ffaddceda7
|
c26de8d42790651aaee56df0956e0b206d1cceb4
|
refs/heads/master
| 2023-04-22T19:43:43.394907
| 2021-05-02T13:05:21
| 2021-05-02T13:05:21
| 264,400,706
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 647
|
py
|
N = int(input())
P = [list(map(int, input().split())) for _ in range(N)]
for cx in range(101):
for cy in range(101):
H = -1
ok = True
for x, y, h in P:
if h == 0:
continue
Hi = abs(x-cx) + abs(y-cy) + h
if H == -1:
H = Hi
continue
if Hi != H:
ok = False
break
if not ok:
continue
for x, y, h in P:
if h != max(H-abs(cx-x)-abs(cy-y), 0):
ok = False
if ok:
print(cx, cy, H)
break
if ok:
break
|
[
"wattaihei.rapyuta@gmail.com"
] |
wattaihei.rapyuta@gmail.com
|
388b1376ecae66dea21f6eb31224085c51938a87
|
426f216e3d38d2030d337c8be6463cc4cd7af6c3
|
/day08/monitor/server/conf/services/generic.py
|
abc781d82c426c6dae6cf7b1f236a2e0e65bc7f2
|
[
"Apache-2.0"
] |
permissive
|
zhangyage/Python-oldboy
|
c7b43801935fc9e08e973ee0b852daa8e8667fb7
|
a95c1b465929e2be641e425fcb5e15b366800831
|
refs/heads/master
| 2021-01-23T02:59:37.574638
| 2019-10-27T05:35:58
| 2019-10-27T05:35:58
| 86,039,220
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 364
|
py
|
#!/usr/bin/env python
# -*- coding:utf-8 -*-
'''
通用的
定义的是基本的监控项目 service
'''
class BaseService(object):
def __init__(self):
self.name = 'BaseService'
self.interval = 300 #监控间隔
self.last_time = 0
self.plugin_name = 'your_plugin' #监控插件
self.triggers = {} #监控阈值
|
[
"zhangyage2015@163.com"
] |
zhangyage2015@163.com
|
3af48aef1784b977a278aaf5bba88fe616c0c1b5
|
cc64a1dfa57d4b667129efdadb97506bafce73f0
|
/letsencrypt-nginx/setup.py
|
a37b8222b05d005279f96320447da3dd50ac18f6
|
[
"Apache-2.0",
"MIT"
] |
permissive
|
ryanwinchester-forks/letsencrypt
|
83e027e3f4e78c5b4fad5fc3cc5676d2cde1f8e9
|
9bff9c0edf0f1fa28684332c17729473aa42ebca
|
refs/heads/master
| 2021-01-18T02:00:13.041678
| 2015-10-20T16:43:55
| 2015-10-20T16:43:55
| 44,626,314
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,631
|
py
|
import sys
from setuptools import setup
from setuptools import find_packages
version = '0.1.0.dev0'
install_requires = [
'acme=={0}'.format(version),
'letsencrypt=={0}'.format(version),
'PyOpenSSL',
'pyparsing>=1.5.5', # Python3 support; perhaps unnecessary?
'setuptools', # pkg_resources
'zope.interface',
]
if sys.version_info < (2, 7):
install_requires.append('mock<1.1.0')
else:
install_requires.append('mock')
setup(
name='letsencrypt-nginx',
version=version,
description="Nginx plugin for Let's Encrypt client",
url='https://github.com/letsencrypt/letsencrypt',
author="Let's Encrypt Project",
author_email='client-dev@letsencrypt.org',
license='Apache License 2.0',
classifiers=[
'Development Status :: 3 - Alpha',
'Environment :: Plugins',
'Intended Audience :: System Administrators',
'License :: OSI Approved :: Apache Software License',
'Operating System :: POSIX :: Linux',
'Programming Language :: Python',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.7',
'Topic :: Internet :: WWW/HTTP',
'Topic :: Security',
'Topic :: System :: Installation/Setup',
'Topic :: System :: Networking',
'Topic :: System :: Systems Administration',
'Topic :: Utilities',
],
packages=find_packages(),
include_package_data=True,
install_requires=install_requires,
entry_points={
'letsencrypt.plugins': [
'nginx = letsencrypt_nginx.configurator:NginxConfigurator',
],
},
)
|
[
"jakub@warmuz.org"
] |
jakub@warmuz.org
|
ae580464cc83075118e66de8f98e34b16370cc90
|
52b5773617a1b972a905de4d692540d26ff74926
|
/.history/fractions_20200802121517.py
|
abb8224836def842553093f1a7ab4d84e93a4fd7
|
[] |
no_license
|
MaryanneNjeri/pythonModules
|
56f54bf098ae58ea069bf33f11ae94fa8eedcabc
|
f4e56b1e4dda2349267af634a46f6b9df6686020
|
refs/heads/master
| 2022-12-16T02:59:19.896129
| 2020-09-11T12:05:22
| 2020-09-11T12:05:22
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,432
|
py
|
def fractions(numerator,denominator):
if denominator == 0 :
return str(numerator)
number = numerator / denominator
if numerator % denominator == 0:
return str(numerator // denominator)
newStr = str(number)
print(newStr)
largeStr = newStr.split(".")
if len(largeStr[1]) > 1:
return largeStr[0] + "." + '(' + largeStr[1][0] + ')'
return newStr
def frac(numerator,denominator):
res = []
if numerator == 0:
return "0"
if denominator == 0:
return "undefined"
if (numerator < 0 and denominator > 0) or (numerator > 0 and denominator <0):
res.append("-")
numerator = abs(numerator)
denominator = abs(denominator)
if numerator % denominator == 0:
return str(numerator // denominator)
else:
# this means its has a remainder
res.append(str(numerator // denominator))
res.append(".")
newDict = {}
rem = numerator % denominator
while rem != 0:
print(newDict)
if rem in newDict:
res.insert(newDict[rem],"(")
res.append(")")
break
newDict[rem] = len(res)
rem *=10
res_part = rem // denominator
res .append(str(res_part))
rem = rem % denominator
return "".join(res)
print(frac(4,333))
|
[
"mary.jereh@gmail.com"
] |
mary.jereh@gmail.com
|
aed016ed1a15167c9ddb335b87695a86c7f128b7
|
0738d9f85b98a7e30d7f33b4fe94ceb58f44b123
|
/Algorithm/Backtracking/Python/N_queen_problem.py
|
69015ddf23b8dd022f18b5baa90d480ccbfc7420
|
[] |
no_license
|
arifkhan1990/Competitive-Programming
|
e51606b6bafc0671e8799df4297529b27eb6488e
|
9b8ca6e8f2ec4c74ea314e8d80657ac97489a315
|
refs/heads/master
| 2022-10-28T19:31:16.818123
| 2022-10-14T21:32:51
| 2022-10-14T21:32:51
| 199,027,360
| 5
| 4
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 778
|
py
|
def solveNQ(n):
col = set()
posDiag = set() # r+c
negDiag = set() # r-c
ans = []
board = [['0'] * n for _ in range(n)]
def backtraking(r):
if r == n:
copy = ["".join(r) for r in board]
ans.append(copy)
return
for c in range(n):
if c in col or (r+c) in posDiag or (r-c) in negDiag:
continue
col.add(c)
posDiag.add(r+c)
negDiag.add(r-c)
board[r][c] = "Q"
backtraking(r+1)
col.remove(c)
posDiag.remove(r+c)
negDiag.remove(r-c)
board[r][c] = "0"
backtraking(0)
return ans
print(solveNQ(4))
|
[
"arifkhanshubro@gmail.com"
] |
arifkhanshubro@gmail.com
|
2e559d799618df295505bb747e712d4de41097c4
|
09efb7c148e82c22ce6cc7a17b5140aa03aa6e55
|
/env/lib/python3.6/site-packages/pandas/tests/tslibs/test_parse_iso8601.py
|
a6e7aee46b485b6b524f363351f1bb011b8b0b0e
|
[
"MIT"
] |
permissive
|
harryturr/harryturr_garmin_dashboard
|
53071a23b267116e1945ae93d36e2a978c411261
|
734e04f8257f9f84f2553efeb7e73920e35aadc9
|
refs/heads/master
| 2023-01-19T22:10:57.374029
| 2020-01-29T10:47:56
| 2020-01-29T10:47:56
| 235,609,069
| 4
| 0
|
MIT
| 2023-01-05T05:51:27
| 2020-01-22T16:00:13
|
Python
|
UTF-8
|
Python
| false
| false
| 2,108
|
py
|
from datetime import datetime
import pytest
from pandas._libs import tslib
@pytest.mark.parametrize(
"date_str, exp",
[
("2011-01-02", datetime(2011, 1, 2)),
("2011-1-2", datetime(2011, 1, 2)),
("2011-01", datetime(2011, 1, 1)),
("2011-1", datetime(2011, 1, 1)),
("2011 01 02", datetime(2011, 1, 2)),
("2011.01.02", datetime(2011, 1, 2)),
("2011/01/02", datetime(2011, 1, 2)),
("2011\\01\\02", datetime(2011, 1, 2)),
("2013-01-01 05:30:00", datetime(2013, 1, 1, 5, 30)),
("2013-1-1 5:30:00", datetime(2013, 1, 1, 5, 30)),
],
)
def test_parsers_iso8601(date_str, exp):
# see gh-12060
#
# Test only the ISO parser - flexibility to
# different separators and leading zero's.
actual = tslib._test_parse_iso8601(date_str)
assert actual == exp
@pytest.mark.parametrize(
"date_str",
[
"2011-01/02",
"2011=11=11",
"201401",
"201111",
"200101",
# Mixed separated and unseparated.
"2005-0101",
"200501-01",
"20010101 12:3456",
"20010101 1234:56",
# HHMMSS must have two digits in
# each component if unseparated.
"20010101 1",
"20010101 123",
"20010101 12345",
"20010101 12345Z",
],
)
def test_parsers_iso8601_invalid(date_str):
msg = 'Error parsing datetime string "{s}"'.format(s=date_str)
with pytest.raises(ValueError, match=msg):
tslib._test_parse_iso8601(date_str)
def test_parsers_iso8601_invalid_offset_invalid():
date_str = "2001-01-01 12-34-56"
msg = "Timezone hours offset out of range " 'in datetime string "{s}"'.format(
s=date_str
)
with pytest.raises(ValueError, match=msg):
tslib._test_parse_iso8601(date_str)
def test_parsers_iso8601_leading_space():
# GH#25895 make sure isoparser doesn't overflow with long input
date_str, expected = ("2013-1-1 5:30:00", datetime(2013, 1, 1, 5, 30))
actual = tslib._test_parse_iso8601(" " * 200 + date_str)
assert actual == expected
|
[
"griffin.harrisonn@gmail.com"
] |
griffin.harrisonn@gmail.com
|
ea41839b6a0a26d593362635192d222831c3f1b4
|
18ad97292b34a679b8dea8a85090541c5bbf6174
|
/candlebox.py
|
72e9d35d1aae4302ce3baca36368789c9c7073a3
|
[] |
no_license
|
Jyotirm0y/kattis
|
b941044e39dc36d169450480fc33fd33bd2e0f8e
|
2b9c1819ba29419bbea3db2e8ad7851155abbb3a
|
refs/heads/master
| 2023-05-31T21:11:38.350044
| 2021-06-12T08:21:47
| 2021-06-12T08:21:47
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 204
|
py
|
import math
d = int(input())
r = int(input())
t = int(input())
x = math.ceil(math.sqrt(2*(r+6)))
y = x - d
while (x*(x+1)//2)-6 + (y*(y+1)//2)-3 != r + t:
x -= 1
y -= 1
print(r-((x*(x+1)//2)-6))
|
[
"ainunnajib@gmail.com"
] |
ainunnajib@gmail.com
|
9684ed8877dc1d5f50b40a958207c71a45c2687a
|
259cc507d97bfeff84d21de3a0ab56640676a9eb
|
/venv1/Lib/site-packages/tensorflow/contrib/eager/python/datasets.py
|
f9b6f54e4482a424f8f775e0fbbf659cfb0a31e7
|
[
"MIT",
"Apache-2.0"
] |
permissive
|
Soum-Soum/Tensorflow_Face_Finder
|
c3ef71b6f718f6720b80f8760d28b6ca6e11e6d2
|
fec6c15d2df7012608511ad87f4b55731bf99478
|
refs/heads/master
| 2020-03-22T20:31:39.606644
| 2018-07-12T13:47:56
| 2018-07-12T13:47:56
| 140,607,068
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 8,168
|
py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Iteration over tf.data.Datasets when eager execution is enabled."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import threading
from tensorflow.contrib.data.python.ops import prefetching_ops
from tensorflow.python.data.ops import iterator_ops
from tensorflow.python.data.util import nest
from tensorflow.python.data.util import sparse
from tensorflow.python.eager import context
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors
from tensorflow.python.framework import function
from tensorflow.python.framework import ops
from tensorflow.python.ops import gen_dataset_ops
from tensorflow.python.ops import resource_variable_ops
_uid_counter = 0
_uid_lock = threading.Lock()
def _generate_shared_name(prefix):
with _uid_lock:
global _uid_counter
uid = _uid_counter
_uid_counter += 1
return "{}{}".format(prefix, uid)
class Iterator(object):
"""An iterator producing tf.Tensor objects from a tf.data.Dataset."""
def __init__(self, dataset):
"""Creates a new iterator over the given dataset.
For example:
```python
dataset = tf.data.Dataset.range(4)
for x in Iterator(dataset):
print(x)
```
Tensors produced will be placed on the device on which this iterator object
was created.
Args:
dataset: A `tf.data.Dataset` object.
Raises:
RuntimeError: When invoked without eager execution enabled.
"""
if not context.executing_eagerly():
raise RuntimeError(
"{} objects can only be used when eager execution is enabled, use "
"tf.data.Dataset.make_initializable_iterator or "
"tf.data.Dataset.make_one_shot_iterator for graph construction".
format(type(self)))
with ops.device("/device:CPU:0"):
ds_variant = dataset._as_variant_tensor() # pylint: disable=protected-access
self._output_classes = dataset.output_classes
self._output_types = dataset.output_types
self._output_shapes = dataset.output_shapes
self._flat_output_types = nest.flatten(
sparse.as_dense_types(self._output_types, self._output_classes))
self._flat_output_shapes = nest.flatten(
sparse.as_dense_shapes(self._output_shapes, self._output_classes))
self._resource = gen_dataset_ops.iterator(
shared_name="",
container=_generate_shared_name("eageriterator"),
output_types=self._flat_output_types,
output_shapes=self._flat_output_shapes)
gen_dataset_ops.make_iterator(ds_variant, self._resource)
# Delete the resource when this object is deleted
self._resource_deleter = resource_variable_ops.EagerResourceDeleter(
handle=self._resource, handle_device="/device:CPU:0")
self._device = context.context().device_name
self._buffer_resource_handle = None
if not context.context().device_spec.device_type:
is_remote_device = False
else:
is_remote_device = context.context().device_spec.device_type != "CPU"
if is_remote_device:
with ops.device("/device:CPU:0"):
iter_string_handle = gen_dataset_ops.iterator_to_string_handle(
self._resource)
@function.Defun(dtypes.string)
def remote_fn(h):
remote_iterator = iterator_ops.Iterator.from_string_handle(
h, self._output_types, self._output_shapes)
return remote_iterator.get_next()
remote_fn.add_to_graph(None)
target = constant_op.constant("/device:CPU:0")
with ops.device(self._device):
self._buffer_resource_handle = prefetching_ops.function_buffering_resource( # pylint: disable=line-too-long
string_arg=iter_string_handle,
f=remote_fn,
target_device=target,
buffer_size=10,
thread_pool_size=1,
container="",
shared_name=_generate_shared_name("function_buffer_resource"))
self._buffer_resource_deleter = resource_variable_ops.EagerResourceDeleter( # pylint: disable=line-too-long
handle=self._buffer_resource_handle,
handle_device=self._device)
def __iter__(self):
return self
def __next__(self): # For Python 3 compatibility
return self.next()
def _next_internal(self):
"""Returns a nested structure of `tf.Tensor`s containing the next element.
"""
with ops.device(self._device):
if self._buffer_resource_handle is not None:
ret = prefetching_ops.function_buffering_resource_get_next(
function_buffer_resource=self._buffer_resource_handle,
output_types=self._flat_output_types)
else:
# TODO(ashankar): Consider removing this ops.device() contextmanager
# and instead mimic ops placement in graphs: Operations on resource
# handles execute on the same device as where the resource is placed.
# NOTE(mrry): Here we use the "_sync" variant of `iterator_get_next`
# because in eager mode this code will run synchronously on the calling
# thread. Therefore we do not need to make a defensive context switch
# to a background thread, and can achieve a small constant performance
# boost by invoking the iterator synchronously.
ret = gen_dataset_ops.iterator_get_next_sync(
self._resource,
output_types=self._flat_output_types,
output_shapes=self._flat_output_shapes)
return sparse.deserialize_sparse_tensors(
nest.pack_sequence_as(self._output_types, ret), self._output_types,
self._output_shapes, self._output_classes)
def next(self):
"""Returns a nested structure of `tf.Tensor`s containing the next element.
"""
try:
return self._next_internal()
except errors.OutOfRangeError:
raise StopIteration
@property
def output_classes(self):
"""Returns the class of each component of an element of this iterator.
The expected values are `tf.Tensor` and `tf.SparseTensor`.
Returns:
A nested structure of Python `type` objects corresponding to each
component of an element of this dataset.
"""
return self._output_classes
@property
def output_shapes(self):
"""Returns the shape of each component of an element of this iterator.
Returns:
A nested structure of `tf.TensorShape` objects corresponding to each
component of an element of this dataset.
"""
return self._output_shapes
@property
def output_types(self):
"""Returns the type of each component of an element of this iterator.
Returns:
A nested structure of `tf.DType` objects corresponding to each component
of an element of this dataset.
"""
return self._output_types
def get_next(self, name=None):
"""Returns a nested structure of `tf.Tensor`s containing the next element.
Args:
name: (Optional.) A name for the created operation. Currently unused.
Returns:
A nested structure of `tf.Tensor` objects.
Raises:
`tf.errors.OutOfRangeError`: If the end of the dataset has been reached.
"""
del name
return self._next_internal()
|
[
"pes.carceller@gmail.com"
] |
pes.carceller@gmail.com
|
66358a8cd5f8c3683912d15e44b84dc84ab92762
|
0b2ffe7092e4008c73fdbf8791d107a2ce583c5d
|
/terraform_builder/release.py
|
e459a6e688e3c9d51565d16f56827ef2e2a73d4d
|
[
"MIT"
] |
permissive
|
mrlesmithjr/terraform-builder
|
1f960741ca5a37a862e2498b8ad81a31dffc13b2
|
08ed71333e988682ce50c6ef865fdd8ba27de395
|
refs/heads/master
| 2022-10-16T03:05:34.351002
| 2020-06-19T03:53:53
| 2020-06-19T03:53:53
| 248,327,103
| 8
| 0
|
MIT
| 2022-09-30T20:02:28
| 2020-03-18T19:43:30
|
Python
|
UTF-8
|
Python
| false
| false
| 160
|
py
|
"""terraform_builder/release.py"""
# Version tracking for package.
__author__ = 'Larry Smith Jr.'
__version__ = '0.1.0'
__package_name__ = 'terraform_builder'
|
[
"mrlesmithjr@gmail.com"
] |
mrlesmithjr@gmail.com
|
bcabfd47909ebc6b12e84806dd30400748c428f8
|
60aae6fe961b6fadcbefa0154976012e84d29e6a
|
/molotov/tests/example5.py
|
4228ca09bfcc30279829df5c3a827295e5b6c979
|
[
"Apache-2.0"
] |
permissive
|
tarekziade/molotov
|
33aefd27e08be35b0f010a4d125f287e798a24c2
|
27f7599d9d04d86964878f3cac78e91c8b231d61
|
refs/heads/main
| 2023-08-03T12:07:54.036876
| 2023-07-18T06:58:50
| 2023-07-18T06:58:50
| 35,025,578
| 45
| 4
|
Apache-2.0
| 2023-07-20T16:33:12
| 2015-05-04T09:17:25
|
Python
|
UTF-8
|
Python
| false
| false
| 534
|
py
|
"""
This Molotov script demonstrates how to hook events.
"""
import molotov
@molotov.events()
async def print_request(event, **info):
if event == "sending_request":
print("=>")
@molotov.events()
async def print_response(event, **info):
if event == "response_received":
print("<=")
@molotov.scenario(100)
async def scenario_one(session):
async with session.get("http://localhost:8080") as resp:
res = await resp.json()
assert res["result"] == "OK"
assert resp.status == 200
|
[
"tarek@ziade.org"
] |
tarek@ziade.org
|
1d65e5ff19c250a211d18eccd6cf2e6535690ff3
|
17c0eeede746d8dc164d27ef1f1eea3167aa0484
|
/array/215.kth_largest.py
|
0fce094deb14b5fdfde0e6bb5179c82d2c0c7d95
|
[] |
no_license
|
mirzasaad/leetcode-solutions
|
a778c70e6ea5a94f9874fb90ec24d16d877ca5f2
|
53a3eb91411d5b732c91cbe7dafe44ed0ea7335f
|
refs/heads/master
| 2022-12-11T11:45:57.830633
| 2020-09-03T16:23:01
| 2020-09-03T16:23:01
| 264,158,622
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,190
|
py
|
def findKthLargest(self, nums: List[int], k: int) -> int:
def partition(A, lo, hi):
pivot = A[hi]
left = lo
for i in range(lo, hi):
if A[i] < pivot:
A[left], A[i] = A[i], A[left]
left += 1
A[left], A[hi] = A[hi], A[left]
return left
lo, hi = 0, len(nums) - 1
while lo <= hi:
index = partition(nums, lo, hi)
if index == len(nums) - k:
return nums[index]
elif index < len(nums) - k:
lo = index + 1
else:
hi = index - 1
return -1
#random pivot
def findKthLargest(self, nums, k):
"""
:type nums: List[int]
:type k: int
:rtype: int
"""
def partition(l, r):
ri = randint(l, r)
nums[r], nums[ri] = nums[ri], nums[r]
for i, v in enumerate(nums[l: r+1], l):
if v >= nums[r]:
nums[l], nums[i] = nums[i], nums[l]
l += 1
return l - 1
l, r, k = 0, len(nums) - 1, k - 1
while True:
pos = partition(l, r)
if pos < k:
l = pos + 1
elif pos > k:
r = pos - 1
else:
return nums[pos]
|
[
"saaad.mirxa@gmail.com"
] |
saaad.mirxa@gmail.com
|
7ab2d68fe32381cc7e34646cb4a849d9d429ff60
|
9981e61fd113fac5af9825b78b57617e001160e0
|
/test/test/spiders/dmoz_spider.py
|
a88147643fae839c825207112f340fcb96388b17
|
[] |
no_license
|
yfjelley/scrapy
|
9191ee94e4ed3732287bd040b5d2d2c16476ec12
|
dc9f1dc1e76603ea623e0ab9608084f0aedba802
|
refs/heads/master
| 2020-03-29T12:02:38.981328
| 2014-09-30T08:29:28
| 2014-09-30T08:29:28
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 427
|
py
|
from scrapy.spider import Spider
class DmozSpider(Spider):
name = "dmoz"
allowed_domains = ["dmoz.org"]
start_urls = [
"http://www.dmoz.org/Computers/Programming/Languages/Python/Books/",
"http://www.dmoz.org/Computers/Programming/Languages/Python/Resources/"
]
def parse(self, response):
filename = response.url.split("/")[-2]
open(filename, 'wb').write(response.body)
|
[
"549636719@qq.com"
] |
549636719@qq.com
|
38b7548ce83a179d7b6f7597294f350513664810
|
d7516481db51c31ae1690892a4bb19246c831ac4
|
/examples/reinforcement_learning_examples/dueling_ddqn_cartpole.py
|
4ebc65acb4014e727a5930cfdab6c1a1c501ad0b
|
[
"MIT"
] |
permissive
|
gyunt/polyaxon
|
0f7c3b026635ad62d28316bf68fc806c51fc4ccb
|
0c99cca9ae9a2a4e957febe1970bf6508225f292
|
refs/heads/master
| 2021-09-01T07:02:51.453682
| 2017-12-20T23:02:37
| 2017-12-20T23:02:37
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,293
|
py
|
# -*- coding: utf-8 -*-
from __future__ import absolute_import, division, print_function
from polyaxon_schemas.losses import HuberLossConfig
from polyaxon_schemas.optimizers import SGDConfig
from polyaxon_schemas.rl.explorations import DecayExplorationConfig
import polyaxon as plx
import tensorflow as tf
def main(*args):
"""Creates an dqn agent for the openai gym CartPole environment."""
env = plx.envs.GymEnvironment('CartPole-v0')
def graph_fn(mode, features):
return plx.layers.Dense(units=512)(features['state'])
def model_fn(features, labels, mode):
model = plx.models.DDQNModel(
mode,
graph_fn=graph_fn,
loss=HuberLossConfig(),
num_states=env.num_states,
num_actions=env.num_actions,
optimizer=SGDConfig(learning_rate=0.01),
exploration_config=DecayExplorationConfig(),
target_update_frequency=10,
summaries='all')
return model(features, labels)
memory = plx.rl.memories.Memory()
estimator = plx.estimators.Agent(
model_fn=model_fn, memory=memory, model_dir="/tmp/polyaxon_logs/ddqn_cartpole")
estimator.train(env)
if __name__ == '__main__':
tf.logging.set_verbosity(tf.logging.INFO)
tf.app.run()
|
[
"mouradmourafiq@gmail.com"
] |
mouradmourafiq@gmail.com
|
29ad398e603eb50a181b953682529ba792218ca0
|
a8750439f200e4efc11715df797489f30e9828c6
|
/HackerEarth/lcs_3.py
|
dbc637ba24752327cc743e3495ecfbebd8558e33
|
[] |
no_license
|
rajlath/rkl_codes
|
f657174305dc85c3fa07a6fff1c7c31cfe6e2f89
|
d4bcee3df2f501349feed7a26ef9828573aff873
|
refs/heads/master
| 2023-02-21T10:16:35.800612
| 2021-01-27T11:43:34
| 2021-01-27T11:43:34
| 110,989,354
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 394
|
py
|
def longest_increasing_subsequence(d):
'Return one of the L.I.S. of list d'
l = []
for i in range(len(d)):
l.append(max([l[j] for j in range(i) if l[j][-1] < d[i]] or [[]], key=len)
+ [d[i]])
return max(l, key=len)
if __name__ == '__main__':
for d in [[4,2,6,3,8]]:
print('a L.I.S. of %s is %s' % (d, longest_increasing_subsequence(d)))
|
[
"raj.lath@gmail.com"
] |
raj.lath@gmail.com
|
8b80f1b6b8ed8568ac76e0489c295bc5f828cb2f
|
e23a4f57ce5474d468258e5e63b9e23fb6011188
|
/125_algorithms/_exercises/templates/_algorithms_challenges/codewar/_CodeWars-Python-master/solutions/All_that_is_open_must_be_closed.py
|
d1806dce261468de3df26a597cc6e422df9c3fe6
|
[] |
no_license
|
syurskyi/Python_Topics
|
52851ecce000cb751a3b986408efe32f0b4c0835
|
be331826b490b73f0a176e6abed86ef68ff2dd2b
|
refs/heads/master
| 2023-06-08T19:29:16.214395
| 2023-05-29T17:09:11
| 2023-05-29T17:09:11
| 220,583,118
| 3
| 2
| null | 2023-02-16T03:08:10
| 2019-11-09T02:58:47
|
Python
|
UTF-8
|
Python
| false
| false
| 1,259
|
py
|
"""
All that is open must be closed...
http://www.codewars.com/kata/55679d644c58e2df2a00009c/train/python
"""
___ is_balanced(source, caps
count # dict
stack # list
___ c __ source:
__ c __ caps:
i caps.i.. c)
__ i % 2 __ 0:
__ caps[i] __ caps[i + 1]:
__ caps[i] __ count:
count[caps[i]] += 1
____
count[caps[i]] 1
____
stack.a..(c)
____
__ caps[i - 1] __ caps[i]:
__ caps[i] __ count:
count[caps[i]] += 1
____
count[caps[i]] 1
____
__ l..(stack) __ 0 o. stack.p.. ) !_ caps[i - 1]:
r.. F..
r.. (l..(stack) __ 0) a.. ((s..([v ___ k, v __ count.i..] % 2 __ 0)
print(is_balanced("(Sensei says yes!)", "()") __ T..)
print(is_balanced("(Sensei says no!", "()") __ F..)
print(is_balanced("(Sensei [says] yes!)", "()[]") __ T..)
print(is_balanced("(Sensei [says) no!]", "()[]") __ F..)
print(is_balanced("Sensei says -yes-!", "--") __ T..)
print(is_balanced("Sensei -says no!", "--") __ F..)
|
[
"sergejyurskyj@yahoo.com"
] |
sergejyurskyj@yahoo.com
|
7bb259878309e08221c4eed095a0919d1ca02770
|
314cf05e7acdfb2b83bf4a56de4ee65310bd28f2
|
/tests/outcomes/plot/bar/universal_tests/data_simple/matplotlib_x_column_string_plt.py
|
c460d965e0851ce752035f391d3b6331c3e2f2a1
|
[] |
no_license
|
hyperskill/hs-test-python
|
9f0201904cb68f3eb35275bb0c3b9bb70164a1e7
|
260313395d0534d148738e031753eb8f60de2e13
|
refs/heads/master
| 2023-05-10T17:49:26.400853
| 2023-04-26T11:49:52
| 2023-04-26T11:49:52
| 214,279,373
| 20
| 7
| null | 2023-04-26T11:49:53
| 2019-10-10T20:28:03
|
Python
|
UTF-8
|
Python
| false
| false
| 343
|
py
|
def plot():
try:
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
except ModuleNotFoundError:
return
df = pd.DataFrame(np.array([[1, 2], [2, 3], [3, 4], [4, 5], [5, 6]]),
columns=['one', 'two'])
plt.bar(df['one'], df['two'])
plt.show()
plot()
|
[
"aaaaaa2493@yandex.ru"
] |
aaaaaa2493@yandex.ru
|
73af4638f94ae74093fb40dec034e500c1ac23d7
|
5b3bd326998606188b45a7870852643eda024a97
|
/utils/test_utils_test.py
|
9ad7254bc0a5ca873d88901f4d4b83e24ee085cc
|
[] |
no_license
|
KuznetsovIllya/clearml_od_toy
|
31556d0726d15a054c1c18317c361d97801381a4
|
92f15f04a023d4e0e165a250fddc3129144913d0
|
refs/heads/main
| 2023-04-11T05:55:56.248478
| 2021-04-14T15:59:40
| 2021-04-14T15:59:40
| 357,827,336
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 129
|
py
|
version https://git-lfs.github.com/spec/v1
oid sha256:27f15ba16a39d8a04be71ec29510f423d102dac19cdfa5d0e7f09e8e55f55b4b
size 3328
|
[
"illia.kuznietsov@modern-expo.com"
] |
illia.kuznietsov@modern-expo.com
|
883988c4f6204ea5858a3976c048f2751e24b4f0
|
0c66e605e6e4129b09ea14dbb6aa353d18aaa027
|
/diventi/feedbacks/migrations/0006_auto_20181007_2139.py
|
b9a3a92a2f8c2db58839655d1c35c38662b0de2a
|
[
"Apache-2.0"
] |
permissive
|
flavoi/diventi
|
58fbc8c947f387cbcc1ce607878a59a6f2b72313
|
c0b1efe2baa3ff816d6ee9a8e86623f297973ded
|
refs/heads/master
| 2023-07-20T09:32:35.897661
| 2023-07-11T19:44:26
| 2023-07-11T19:44:26
| 102,959,477
| 2
| 1
|
Apache-2.0
| 2023-02-08T01:03:17
| 2017-09-09T14:10:51
|
Python
|
UTF-8
|
Python
| false
| false
| 556
|
py
|
# Generated by Django 2.0.8 on 2018-10-07 19:39
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('feedbacks', '0005_auto_20181007_1951'),
]
operations = [
migrations.RemoveField(
model_name='question',
name='survey',
),
migrations.AddField(
model_name='question',
name='survey',
field=models.ManyToManyField(blank=True, null=True, related_name='questions', to='feedbacks.Survey'),
),
]
|
[
"flavius476@gmail.com"
] |
flavius476@gmail.com
|
dd362d139002a4217fdd1735daa4f34396aee423
|
efceab61588936151e49bf9311fe6f949cdd81c8
|
/context_utils/regression_analysis/contexts/feature_weights.py
|
55efd2312a5d416e950262951e999744467a7568
|
[] |
no_license
|
GiovanniCassani/distributional_bootstrapping
|
c74b4dddcb91a083c8cc0c55263228bc1acff54c
|
324176f659835a29cfd6859febb570e99c1bad31
|
refs/heads/master
| 2021-03-19T11:45:38.364181
| 2018-09-10T15:24:37
| 2018-09-10T15:24:37
| 113,855,663
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,348
|
py
|
__author__ = 'GCassani'
"""Compute feature weights for the relevant contexts given an input co-occurrence vector space"""
import os
from collections import defaultdict
def compute_feature_weights(training_space, output_file):
"""
:param training_space: the path to a file containing the co-occurrence count matrix derived from the training
corpus
:param output_file: the path to a file where the weight of each context will be written
:return weights: a dictionary mapping each context to 4 strings, each indicating one of the possible
weighting schemes: gain ratio ('gr'), information gain ('ig'), X-square ('x2'), and shared
variance ('sv'). Each string map to the weight of the corresponding contexts under the
weighting scheme at hand. All scores are stored for later processing.
"""
weights = defaultdict(dict)
with open(training_space, 'r') as t:
first_line = t.readline()
n = len(first_line.strip().split("\t")) + 100
train_space = ' -f ' + training_space
out_file = ' -W ' + output_file
timbl_cmd = 'timbl -mN:I1 -N' + str(n) + train_space + out_file
print(timbl_cmd)
os.system(timbl_cmd)
with open(output_file, "r") as f:
gr, ig, x2, sv = [0, 0, 0, 0]
for line in f:
if line.strip() == '# gr':
gr, ig, x2, sv = [1, 0, 0, 0]
elif line.strip() == '# ig':
gr, ig, x2, sv = [0, 1, 0, 0]
elif line.strip() == '# x2':
gr, ig, x2, sv = [0, 0, 1, 0]
elif line.strip() == '# sv':
gr, ig, x2, sv = [0, 0, 0, 1]
if any([gr, ig, x2, sv]):
try:
feature, weight = line.strip().split("\t")
if gr:
weights[int(feature) - 2]['gr'] = float(weight)
elif ig:
weights[int(feature) - 2]['ig'] = float(weight)
elif x2:
weights[int(feature) - 2]['x2'] = float(weight)
elif sv:
weights[int(feature) - 2]['sv'] = float(weight)
except ValueError:
pass
return weights
|
[
"cassani.giovanni@gmail.com"
] |
cassani.giovanni@gmail.com
|
7ed3e17232f1e18ac0f0b32b4082aea884541ced
|
a1e6c25d701eacb0dd893802f7d3db316768dbc4
|
/featureExtrator/feature_extractor.py
|
e461e884f6bcae25d455741367a851a7a26da379
|
[] |
no_license
|
zhengchengyy/BBDataProcessing
|
c04f5053266881116f1ab764041f7fd4901561ab
|
5d4f98e4b4b7d7e98db2677d00b618d2bb6a74c8
|
refs/heads/master
| 2021-07-20T22:31:28.894304
| 2020-05-17T14:04:30
| 2020-05-17T14:04:30
| 165,455,028
| 2
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,672
|
py
|
'''
修改原有的feature_extractor文件,将按次数处理获得的数据改为按时间段处理
新增以及改变的代码均使用两行------标识
使用观察者模式实现特征提取,特征提取器(Subject)依赖于多个特征提取模块(Observer),特征提取器注册了多个特征提取模块,
当特征提取器状态改变(获取到新数据),通知所有特征模块更新状态(计算新的特征值)。
'''
from abc import ABC, abstractmethod
from queue import Queue
class ProcessModule(ABC):
"""Observer的抽象类,表示处理数据的模块。
每一个继承ProcessModule的类都包含一个存储数据的队列queue。
继承该类需要重写processFullQueue方法。
"""
# 改变为时间存储后,构造函数改变为如下
# ------
def __init__(self, interval=1, rate=0.5, size=0):
"""构造方法中,参数中的interval表示每次特征提取的时间跨度,
rate表示间隔多长时间进行一次特征提取,
上述参数的单位均为秒
"""
if (isinstance(interval, float) or isinstance(interval, int)) \
and (isinstance(rate, float) or isinstance(rate, int)):
if interval <= 0 or rate <= 0 or rate > interval:
raise ModuleProcessException("Illegal rate or interval.")
else:
raise ModuleProcessException("Interval and rate both should be float or int.")
self.interval = interval
self.rate = rate
self.size = size
# 考虑采集数据频率可能变化,且分析时间会变化,因此不设定队列最大长度
self.queue = Queue(maxsize=0)
super(ProcessModule, self).__init__()
# ------
@abstractmethod
def processFullQueue(self):
"""处理满队列中的所有元素,通常为统计值。需要返回值。"""
pass
def process(self, value):
"""Observer的update(),接收一个字典值{time:t,volt:v},将其添加到队列中,
如果队列中头尾的数据达到了interval定义的时间差,则进行处理,
并在处理后移除rate定义的时间差的数据。
"""
self.queue.put(value)
self.size += 1
if value['time'] - self.queue.queue[0]['time'] >= self.interval:
result = self.processFullQueue()
t = value['time']
while (value['time'] - self.queue.queue[0]['time']) > (self.interval - self.rate):
self.queue.get()
self.size -= 1
return result
class FeatureExtractor:
"""Subject提取特征值的类,该类需要配合ProcessModule使用。
FeatureExtractor中有一个用于存储ProcessModule的列表,使用register函数可以向列表中添加ProcessModule。
当FeatureExtractor接受到一个数据的时候,会让该列表中的所有PrcessModule接收这个数据并分别处理。
"""
def __init__(self):
self.modules = []
def register(self, processModule):
"""添加一个ProcessModule"""
self.modules.append(processModule)
def process(self, value):
"""Subject的notify(),接收一个value值,让self.modules中的每一个ProcessModule处理该值"""
result = {}
for module in self.modules:
output = module.process(value)
if (output != None):
result[module.FEATURE_NAME] = output
return result
def clear(self):
"""清理所有的ProcessModule"""
for module in self.modules:
module.clear()
class ModuleProcessException(Exception):
pass
|
[
"zhengchengyy@qq.com"
] |
zhengchengyy@qq.com
|
c6d92dc424e95b6378c43eb99d934375630c943d
|
e1fe66628d58e66b25d910c6d2f173a7dfa74301
|
/1. Edge AI Fundamentals with OpenVINO/4. Inference Engine/Workspaces/3. Integrate into an Edge App/solution/app-custom.py
|
48e61b9d9b66578fc97dd81f97cebbf7727c6f5e
|
[
"MIT"
] |
permissive
|
mmphego/Udacity-EdgeAI
|
7c5443c4f19eaaf4f6eb44739f7e6413ba26e106
|
25af22f85772adc25ff9d5a59ba8a33a1e5551cd
|
refs/heads/master
| 2022-12-31T15:39:07.077926
| 2020-10-20T11:45:58
| 2020-10-20T11:45:58
| 258,950,438
| 8
| 1
|
MIT
| 2020-05-25T14:38:30
| 2020-04-26T05:48:07
|
Jupyter Notebook
|
UTF-8
|
Python
| false
| false
| 4,277
|
py
|
import argparse
import cv2
from inference import Network
INPUT_STREAM = "test_video.mp4"
CPU_EXTENSION = "/opt/intel/openvino/deployment_tools/inference_engine/lib/intel64/libcpu_extension_sse4.so"
def get_args():
"""
Gets the arguments from the command line.
"""
parser = argparse.ArgumentParser("Run inference on an input video")
# -- Create the descriptions for the commands
m_desc = "The location of the model XML file"
i_desc = "The location of the input file"
d_desc = "The device name, if not 'CPU'"
### TODO: Add additional arguments and descriptions for:
### 1) Different confidence thresholds used to draw bounding boxes
### 2) The user choosing the color of the bounding boxes
c_desc = "The color of the bounding boxes to draw; RED, GREEN or BLUE"
ct_desc = "The confidence threshold to use with the bounding boxes"
# -- Add required and optional groups
parser._action_groups.pop()
required = parser.add_argument_group("required arguments")
optional = parser.add_argument_group("optional arguments")
# -- Create the arguments
required.add_argument("-m", help=m_desc, required=True)
optional.add_argument("-i", help=i_desc, default=INPUT_STREAM)
optional.add_argument("-d", help=d_desc, default="CPU")
optional.add_argument("-c", help=c_desc, default="BLUE")
optional.add_argument("-ct", help=ct_desc, default=0.5)
args = parser.parse_args()
return args
def convert_color(color_string):
"""
Get the BGR value of the desired bounding box color.
Defaults to Blue if an invalid color is given.
"""
colors = {"BLUE": (255, 0, 0), "GREEN": (0, 255, 0), "RED": (0, 0, 255)}
out_color = colors.get(color_string)
if out_color:
return out_color
else:
return colors["BLUE"]
def draw_boxes(frame, result, args, width, height):
"""
Draw bounding boxes onto the frame.
"""
for box in result[0][0]: # Output shape is 1x1x100x7
conf = box[2]
if conf >= args.ct:
xmin = int(box[3] * width)
ymin = int(box[4] * height)
xmax = int(box[5] * width)
ymax = int(box[6] * height)
cv2.rectangle(frame, (xmin, ymin), (xmax, ymax), args.c, 1)
return frame
def infer_on_video(args):
# Convert the args for color and confidence
args.c = convert_color(args.c)
args.ct = float(args.ct)
### TODO: Initialize the Inference Engine
plugin = Network()
### TODO: Load the network model into the IE
plugin.load_model(args.m, args.d, CPU_EXTENSION)
net_input_shape = plugin.get_input_shape()
# Get and open video capture
cap = cv2.VideoCapture(args.i)
cap.open(args.i)
# Grab the shape of the input
width = int(cap.get(3))
height = int(cap.get(4))
# Create a video writer for the output video
# The second argument should be `cv2.VideoWriter_fourcc('M','J','P','G')`
# on Mac, and `0x00000021` on Linux
out = cv2.VideoWriter("out.mp4", 0x00000021, 30, (width, height))
# Process frames until the video ends, or process is exited
while cap.isOpened():
# Read the next frame
flag, frame = cap.read()
if not flag:
break
key_pressed = cv2.waitKey(60)
### TODO: Pre-process the frame
p_frame = cv2.resize(frame, (net_input_shape[3], net_input_shape[2]))
p_frame = p_frame.transpose((2, 0, 1))
p_frame = p_frame.reshape(1, *p_frame.shape)
### TODO: Perform inference on the frame
plugin.async_inference(p_frame)
### TODO: Get the output of inference
if plugin.wait() == 0:
result = plugin.extract_output()
### TODO: Update the frame to include detected bounding boxes
frame = draw_boxes(frame, result, args, width, height)
# Write out the frame
out.write(frame)
# Break if escape key pressed
if key_pressed == 27:
break
# Release the out writer, capture, and destroy any OpenCV windows
out.release()
cap.release()
cv2.destroyAllWindows()
def main():
args = get_args()
infer_on_video(args)
if __name__ == "__main__":
main()
|
[
"mpho112@gmail.com"
] |
mpho112@gmail.com
|
3cd47a7d7dd674e5aa584bc0ac1049be6c8bdf48
|
b01646abacbef23719926477e9e1dfb42ac0f6a9
|
/Rebrov/training/673K/673K_O088N12_Rebrov_lib_and_all_families/input.py
|
d9cd3c9eddcac08e009b6e6733f837aefa7982fc
|
[] |
no_license
|
Tingchenlee/Test
|
41b0fd782f4f611d2b93fda6b63e70956881db33
|
37313c3f594f94cdc64c35e17afed4ae32d3e4e6
|
refs/heads/master
| 2023-06-02T05:38:32.884356
| 2021-06-10T11:59:02
| 2021-06-10T11:59:02
| 349,764,587
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,308
|
py
|
# Microkinetic model for ammonia oxidation
# E.V. Rebrov, M.H.J.M. de Croon, J.C. Schouten
# Development of the kinetic model of platinum catalyzed ammonia oxidation in a microreactor
# Chemical Engineering Journal 90 (2002) 61–76
database(
thermoLibraries=['surfaceThermoPt111', 'primaryThermoLibrary', 'thermo_DFT_CCSDTF12_BAC','DFT_QCI_thermo', 'GRI-Mech3.0-N', 'NitrogenCurran', 'primaryNS', 'CHON'],
reactionLibraries = ['Surface/CPOX_Pt/Deutschmann2006','Surface/Nitrogen','Surface/Rebrov_Pt111'],
seedMechanisms = [],
kineticsDepositories = ['training'],
kineticsFamilies = ['surface','default'],
kineticsEstimator = 'rate rules',
)
catalystProperties(
metal = 'Pt111'
)
generatedSpeciesConstraints(
allowed=['input species','seed mechanisms','reaction libraries'],
maximumNitrogenAtoms=2,
maximumOxygenAtoms=3,
)
# List of species
species(
label='X',
reactive=True,
structure=adjacencyList("1 X u0"),
)
species(
label='O2',
reactive=True,
structure=adjacencyList(
"""
multiplicity 3
1 O u1 p2 c0 {2,S}
2 O u1 p2 c0 {1,S}
"""),
)
species(
label='H2O',
reactive=True,
structure=SMILES("O"),
)
species(
label='N2',
reactive=True,
structure=SMILES("N#N"),
)
species(
label='NO',
reactive=True,
structure=adjacencyList(
"""
multiplicity 2
1 N u1 p1 c0 {2,D}
2 O u0 p2 c0 {1,D}
"""),
)
species(
label='NH3',
reactive=True,
structure=adjacencyList(
"""
1 N u0 p1 c0 {2,S} {3,S} {4,S}
2 H u0 p0 c0 {1,S}
3 H u0 p0 c0 {1,S}
4 H u0 p0 c0 {1,S}
"""),
)
species(
label='N2O',
reactive=True,
structure=adjacencyList(
"""
1 N u0 p2 c-1 {2,D}
2 N u0 p0 c+1 {1,D} {3,D}
3 O u0 p2 c0 {2,D}
"""),
)
species(
label='He',
reactive=False,
structure=adjacencyList(
"""
1 He u0 p1 c0
"""),
)
#-------------
#temperature from 523-673K
surfaceReactor(
temperature=(673,'K'),
initialPressure=(1.0, 'bar'),
nSims=12,
initialGasMoleFractions={
"NH3": 0.12,
"O2": 0.88,
"He": 0.0,
"NO":0.0,
"H2O":0.0,
"N2O":0.0,
"N2":0.0,
},
initialSurfaceCoverages={
"X": 1.0,
},
surfaceVolumeRatio=(2.8571428e4, 'm^-1'), #A/V = 280µm*π*9mm/140µm*140µm*π*9mm = 2.8571428e4^m-1
terminationConversion = {"NH3":0.99,},
#terminationTime=(10, 's'),
)
simulator( #default for surface reaction atol=1e-18,rtol=1e-12
atol=1e-18, #absolute tolerance are 1e-15 to 1e-25
rtol=1e-12, #relative tolerance is usually 1e-4 to 1e-8
)
model(
toleranceKeepInEdge=0.01, #recommend setting toleranceKeepInEdge to not be larger than 10% of toleranceMoveToCore
toleranceMoveToCore=0.1,
toleranceInterruptSimulation=1e8, #This value should be set to be equal to toleranceMoveToCore unless the advanced pruning feature is desired
#to always enable pruning should be set as a high value, e.g. 1e8
maximumEdgeSpecies=5000, #set up less than 200000
minCoreSizeForPrune=50, #default value
#toleranceThermoKeepSpeciesInEdge=0.5,
minSpeciesExistIterationsForPrune=2, #default value = 2 iteration
)
options(
units='si',
saveRestartPeriod=None,
generateOutputHTML=True,
generatePlots=True,
saveEdgeSpecies=True,
saveSimulationProfiles=True,
)
|
[
"lee.ting@northeastern.edu"
] |
lee.ting@northeastern.edu
|
3071ce5a9c9fdebf28175ab07ecb3091a84ba165
|
edfa045d12b8efb65de20261ff80a86160298e44
|
/checkout/migrations/0003_orderitem_order.py
|
97c09cc1a4f31557128d3510eed3d113a9d9b7d2
|
[
"MIT"
] |
permissive
|
yusif763/Unistore-pro
|
1d559a89bb71f3db8b5d1e89df64ed7113f00f2a
|
41ad0fa209c79a201d3f6a7aa68ec0ace707dcad
|
refs/heads/main
| 2023-04-24T02:50:30.085011
| 2021-04-29T11:00:11
| 2021-04-29T11:00:11
| 362,782,688
| 4
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 549
|
py
|
# Generated by Django 3.1.7 on 2021-04-13 09:08
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('checkout', '0002_auto_20210413_0746'),
]
operations = [
migrations.AddField(
model_name='orderitem',
name='order',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='orderwish', to='checkout.checkout', verbose_name='CheckOut'),
),
]
|
[
"you@example.com"
] |
you@example.com
|
4936d190287dd5249daf34313e71e03c891daab6
|
fb5d9f9b4ae3d7059d582ebb390916c2f9528852
|
/util/__init__.py
|
231dd87f66aa7d5f32d08714cbb4ea33acfaa90f
|
[] |
no_license
|
tianxiaguixin002/Code-Implementation-of-Super-Resolution-ZOO
|
32d4168f4d8d031968b7a601cf68b50730b15b06
|
f6ccf309c7653a27173de5184d17bb5933baab14
|
refs/heads/master
| 2022-11-13T17:09:11.484532
| 2020-07-06T01:51:25
| 2020-07-06T01:51:25
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,476
|
py
|
"""This package includes a miscellaneous collection of useful helper functions."""
import os
def mkdirs(paths):
"""create empty directories if they don't exist
Parameters:
paths (str list) -- a list of directory paths
"""
if isinstance(paths, list) and not isinstance(paths, str):
for path in paths:
mkdir(path)
else:
mkdir(paths)
def mkdir(path):
"""create a single empty directory if it didn't exist
Parameters:
path (str) -- a single directory path
"""
if not os.path.exists(path):
os.makedirs(path)
def remove_pad_for_tensor(tensor, HR_GT_h_w, factor, LR_flag=True):
assert len(tensor.shape) == 4
_, _, now_h, now_w = tensor.shape
des_h, des_w = HR_GT_h_w
assert des_h % factor == 0 and des_w % factor == 0
if LR_flag:
des_h = des_h // factor
des_w = des_w // factor
assert now_h >= des_h and now_w >= des_w
delta_h = now_h - des_h
delta_w = now_w - des_w
if LR_flag:
start_h = delta_h // 2
start_w = delta_w // 2
return tensor[..., start_h: start_h + des_h, start_w: start_w + des_w]
else:
assert delta_w % factor == 0 and delta_h % factor == 0
delta_h = delta_h // factor
delta_w = delta_w // factor
start_h = delta_h // 2
start_w = delta_w // 2
return tensor[..., start_h*factor: start_h*factor + des_h, start_w*factor: start_w*factor + des_w]
|
[
"chenyx.cs@gmail.com"
] |
chenyx.cs@gmail.com
|
38e065c61bb431a8fc4dd5b8d0a8130d39cb9dfd
|
4ddf4fa6a4a499d64b23fb99d70a7bb3802fd1b0
|
/exercises/flask_regression.py
|
4fd8a87deed5370e70ec83805bf14ac80fa11aac
|
[] |
no_license
|
biterbilen/MVML
|
2b318b3883c00ed1908ef75924077e3aab639094
|
76a79ded26d09452234b7ae2b4809e47aa93df70
|
refs/heads/master
| 2023-01-13T10:04:10.269589
| 2020-11-16T18:55:19
| 2020-11-16T18:55:19
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 364
|
py
|
import pickle
from flask import Flask, request, render_template
import pandas as pd
app = Flask(__name__)
with open('exercises/model.pkl', 'rb') as f:
model = pickle.load(f)
@app.route("/")
def index():
pass
@app.route("/result", methods=["POST"])
def predict():
new = pd.DataFrame({'X': [20]})
y = float(model.predict(new)[0])
return pass
|
[
"max.humber@gmail.com"
] |
max.humber@gmail.com
|
686a68dd2426c857f28b7069b29021b4b28d8624
|
9743d5fd24822f79c156ad112229e25adb9ed6f6
|
/xai/brain/wordbase/verbs/_monsters.py
|
e7f2bb60af124f30bf240705f5e9a379d84687ad
|
[
"MIT"
] |
permissive
|
cash2one/xai
|
de7adad1758f50dd6786bf0111e71a903f039b64
|
e76f12c9f4dcf3ac1c7c08b0cc8844c0b0a104b6
|
refs/heads/master
| 2021-01-19T12:33:54.964379
| 2017-01-28T02:00:50
| 2017-01-28T02:00:50
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 245
|
py
|
from xai.brain.wordbase.verbs._monster import _MONSTER
#calss header
class _MONSTERS(_MONSTER, ):
def __init__(self,):
_MONSTER.__init__(self)
self.name = "MONSTERS"
self.specie = 'verbs'
self.basic = "monster"
self.jsondata = {}
|
[
"xingwang1991@gmail.com"
] |
xingwang1991@gmail.com
|
90088be62e540370be33da0a6c2da6c4e57b429a
|
d3426a5d1bbecde0fe480e7af64a54bfdb8295eb
|
/students/migrations/0005_auto_20170802_1524.py
|
24c508cf44b2fa93d7d609ad89d31875eb8a08a5
|
[
"MIT"
] |
permissive
|
pu6ki/elsyser
|
5a3b83f25f236b4a4903180985f60ced98b3fb53
|
52261c93b58422b0e39cae656ae9409ea03a488d
|
refs/heads/master
| 2021-01-12T18:06:18.375185
| 2017-12-10T18:18:34
| 2017-12-10T18:18:34
| 71,325,732
| 5
| 4
|
MIT
| 2017-12-10T18:18:35
| 2016-10-19T06:26:47
|
Python
|
UTF-8
|
Python
| false
| false
| 739
|
py
|
# -*- coding: utf-8 -*-
# Generated by Django 1.10.2 on 2017-08-02 12:24
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('students', '0004_auto_20170801_1243'),
]
operations = [
migrations.DeleteModel(
name='CustomUser',
),
migrations.AddField(
model_name='student',
name='activation_key',
field=models.CharField(blank=True, max_length=40, null=True),
),
migrations.AddField(
model_name='teacher',
name='activation_key',
field=models.CharField(blank=True, max_length=40, null=True),
),
]
|
[
"wencakisa@gmail.com"
] |
wencakisa@gmail.com
|
0d243a771485ed631550fd265ff1c7dd644c4b81
|
d8af7c6372aff57012c80d3b8a9dfaab81499f71
|
/AIDStudy/01-PythonBase/day07/exercise05.py
|
ab36bcf266a5024e045bea117f549cbbf81366f4
|
[] |
no_license
|
fanxiao168/pythonStudy
|
4843c56019b8f997fd7fc566904a9e0162e9a541
|
f94e2238d40c41ee54ff4184c500d659c6820c03
|
refs/heads/master
| 2021-02-04T20:54:10.850770
| 2020-05-28T08:55:35
| 2020-05-28T08:55:35
| 243,708,800
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 456
|
py
|
# 3行5列的二维列表
list01 = [
[0, 1, 2, 3, 4],
[1, 28, 45, 6, 7],
[20, 7, 3, 65, 2]
]
# 将第二行元素打印出来
for item in list01[1]:
print(item, end=' ')
# 将第一列打印出来
print(list01[0][0])
print(list01[1][0])
print(list01[2][0])
for i in range(len(list01)):
print(list01[i][0])
# 将全部元素打印出来
for i in range(len(list01)):
for item in list01[i]:
print(item, end=' ')
print()
|
[
"fanxiao168@126.com"
] |
fanxiao168@126.com
|
46077844615c08090f6de524a45ad3b9f9d1e776
|
e415e4cdab3d1cd04a4aa587f7ddc59e71977972
|
/datetime/datetime.timedelta.py
|
77ed0bd2af1f40780439d18c8c5f04973b36cd10
|
[] |
no_license
|
nixawk/hello-python3
|
8c3ebba577b39f545d4a67f3da9b8bb6122d12ea
|
e0680eb49d260c5e3f06f9690c558f95a851f87c
|
refs/heads/master
| 2022-03-31T23:02:30.225702
| 2019-12-02T10:15:55
| 2019-12-02T10:15:55
| 84,066,942
| 5
| 7
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 691
|
py
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
class datetime.timedelta
A duration expressing the difference between two date, time,
or datetime instances to microsecond resolution.
"""
import datetime
def datetime_timedelta():
# def __new__(cls, days=0, seconds=0, microseconds=0, milliseconds=0, minutes=0, hours=0, weeks=0):
_timedelta = datetime.timedelta(
days=1,
seconds=0,
microseconds=0,
milliseconds=0,
minutes=0,
hours=0,
weeks=1
)
print(str(_timedelta)) # 8 days, 0:00:00
if __name__ == '__main__':
datetime_timedelta()
# reference
# https://docs.python.org/3/library/datetime.html
|
[
"hap.ddup@gmail.com"
] |
hap.ddup@gmail.com
|
b7020ef6386d6ad70f6a34fd08ff52d6e5aac54a
|
bf4f5e90fff95800f3ab944efcdb9aace29be71d
|
/banshee-master/api/mms/balance/mall_balance.py
|
2f573caaf12c82cd5229034b31716e1636ce01a9
|
[] |
no_license
|
tanlu01/testfan
|
c77c833d4e164a4786f20d7f28ffbb99cd6dcb2e
|
de8cf936dc7e80a9e2847fa47ae5c909729675b1
|
refs/heads/master
| 2023-04-20T17:12:57.515484
| 2021-05-10T15:53:15
| 2021-05-10T15:53:15
| 365,418,159
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,918
|
py
|
from api.mms.mms_ import Mms
class MallBalance(Mms):
method = 'post'
api = '/api/mall/Balance'
data = {}
error_resp = {
'code': 400000,
'message': '没有可以购买的商品'
}
expected_schema = {
"$schema": "http://json-schema.org/draft-06/schema#",
"title": "expected_data",
"type": "object",
"required": ["code", "payload"],
"properties": {
"code": {
"type": "number"
},
"payload": {
"type": "object",
"required": ["id", "mall_id", "goods_payment_income", "goods_payment_expend", "shop_deposit_cash", "activity_deposit_cash", "activity_subsidy", "created_at", "update_at", "default_shop_deposit_cash", "goods_payment_withdrawing", "shop_deposit_withdrawing", "activity_deposit_withdrawing", "activity_subsidy_withdrawing", "goods_payment_freeze", "is_open", "loan_withdraw_times", "remain_withdraw_times", "activation_status", "sub_mch_state", "address", "need_annual_fee", "has_factory_info"],
"properties": {
"id": {
"type": "string"
}, "mall_id": {
"type": "string"
}, "goods_payment_income": {
"type": "string"
}, "goods_payment_expend": {
"type": "string"
}, "shop_deposit_cash": {
"type": "string"
}, "activity_deposit_cash": {
"type": "string"
}, "activity_subsidy": {
"type": "string"
}, "created_at": {
"type": "string"
}, "update_at": {
"type": "string"
}, "default_shop_deposit_cash": {
"type": "string"
}, "goods_payment_withdrawing": {
"type": "string"
}, "shop_deposit_withdrawing": {
"type": "string"
}, "activity_deposit_withdrawing": {
"type": "string"
}, "activity_subsidy_withdrawing": {
"type": "string"
}, "goods_payment_freeze": {
"type": "string"
}, "is_open": {
"type": "string"
},
"punishment":{
"type": "object",
"required": [],
"properties": {}
},
"activity_forbidden":{
"type": "object",
"required": [],
"properties": {}
},
"loan_withdraw_times": {
"type": "number"
}, "remain_withdraw_times": {
"type": "number"
}, "activation_status": {
"type": "string"
}, "sub_mch_state": {
"type": "object",
"required": ["status", "info"],
"properties": {
"status": {
"type": "string"
},
"info": {
"type": "string"
},
}
}, "address": {
"type": "number"
}, "need_annual_fee": {
"type": "boolean"
}, "has_factory_info": {
"type": "boolean"
},
}
}
}
}
|
[
"tanlu01@100.me"
] |
tanlu01@100.me
|
eb7268205ffc0e037565f404d2dc6e35a518804e
|
5cfc22491d6c83e807b21883ce71fdb958a2c53f
|
/identify/trend.py
|
826bf058e30afc7807df0bd377da5c8348ad60e1
|
[] |
no_license
|
Jeffin-Studios/stocktool
|
ce29824bfa663927eac1b17dad3ed7830dbedf1f
|
96f4e96feb4de9a54028e4b5ef56bd8554f122eb
|
refs/heads/master
| 2020-03-28T03:27:27.271479
| 2018-10-18T08:03:08
| 2018-10-18T08:03:08
| 147,646,775
| 1
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,466
|
py
|
#rolling average for both stock and market
from pandas_datareader import data
import matplotlib.pyplot as plt
import pandas as pd
import numpy as np
class stocktrend():
# Initialization requires a ticker symbol
def __init__(self, stock_name, start_date = None, end_date = None, draw_graph = False):
self.name = stock_name.upper()
self.start = start_date
self.end = end_date
self.graph = draw_graph
self.stock = data.DataReader(stock_name, 'yahoo', start_date, end_date)
# Basic Historical Plots and Basic Statistics
def plot_stock(self, stats=[], series = [], serieslabels = [], xlabel="Date", ylabel="Price"):
fig, ax = plt.subplots(figsize=(16,9))
fig.suptitle(self.name, fontsize=20)
for stat in stats:
ax.plot(self.stock[stat].index, self.stock[stat], label=stat)
for i, data in enumerate(series):
ax.plot(data.index, data, label=serieslabels[i])
ax.set_xlabel(xlabel)
ax.set_ylabel(ylabel)
ax.legend()
plt.axhline(0, color='black')
plt.grid()
plt.show()
def rollingAverage(self, strict = False):
close_price = self.stock['Close']
if (strict):
# Getting all weekdays
all_weekdays = pd.date_range(start=self.start, end=self.end, freq='B')
close_price = close_price.reindex(all_weekdays)
close_price = close_price.fillna(method='ffill')
# weekly_roll
short_rolling_stock = close_price.rolling(window=5).mean()
medium_rolling_stock = close_price.rolling(window=20).mean()
long_rolling_stock = close_price.rolling(window=60).mean()
if (self.graph):
self.plot_stock(series=[close_price, short_rolling_stock, medium_rolling_stock, long_rolling_stock], serieslabels=["Closing Price", "5 days rolling", "20 days rolling", "60 days rolling"])
return (short_rolling_stock, medium_rolling_stock)
# Buy when this is at a zero or high positive slope
def daily_change(self):
if ('Adj. Close' not in self.stock.columns):
self.stock['Adj. Close'] = self.stock['Close']
self.stock['Adj. Open'] = self.stock['Open']
self.stock['y'] = self.stock['Adj. Close']
self.stock['Daily Change'] = self.stock['Adj. Close'] - self.stock['Adj. Open']
if (self.graph):
self.plot_stock(stats=['Daily Change'], ylabel="Change in Price")
def get_rsi(self, n=14):
prices = self.stock['Close']
dates = prices.index
deltas = np.diff(prices)
seed = deltas[:n+1]
up = seed[seed>=0].sum()/n
down = -seed[seed<0].sum()/n
rs = up/down
rsi = np.zeros_like(prices)
rsi[:n] = 100. - 100./(1.+rs)
for i in range(n, len(prices)):
delta = deltas[i-1] # cause the diff is 1 shorter
if delta>0:
upval = delta
downval = 0.
else:
upval = 0.
downval = -delta
up = (up*(n-1) + upval)/n
down = (down*(n-1) + downval)/n
rs = up/down
rsi[i] = 100. - 100./(1.+rs)
if (self.graph):
fig, ax = plt.subplots(figsize=(16,9))
fig.suptitle(self.name, fontsize=20)
ax.plot(dates, rsi, color = "purple", linewidth=1.5, label='RSI')
ax.axhline(70, color="red")
ax.axhline(30, color="green")
ax.fill_between(dates, rsi, 70, where=(rsi>=70), facecolor="red", edgecolor="red", alpha=0.5)
ax.fill_between(dates, rsi, 30, where=(rsi<=30), facecolor="green", edgecolor="green", alpha=0.5)
ax.set_yticks([30,70])
ax.legend()
ax.tick_params(axis='y')
ax.tick_params(axis='x')
ax.set_xlabel("Date")
ax.set_ylabel("Momentum")
ax.grid()
plt.show()
return rsi
######## Need to make this real time, to detect when climb starts and when dip starts
def fluctuation(self):
(short_rolling_stock, medium_rolling_stock) = self.rollingAverage()
self.stock["Fluctuation"] = short_rolling_stock - medium_rolling_stock
# Starts climbing when short term average passes long term average
# Starts dipping when short term average goes below long term average
### Code determines if change is at a zero, evaluates slope of the change, to see if its climbing or dipping, also concavity to make sure.
if (self.graph):
self.plot_stock(stats=['Fluctuation'], ylabel="Deviation From Average")
# return short_rolling_stock.std(ddof=0)
#How wildy stock prices fluctuate (look at daily changes)
def get_volatility(self):
return
if __name__ == "__main__":
start="2018-08-01"
end="2018-9-20"
tickers = ['CRON', 'ECOM', 'CLDR', 'HMI']
stock = stocktrend(tickers[3], start, end, draw_graph = True)
volatility = stock.fluctuation()
print(volatility)
stock.daily_change()
stock.get_rsi()
|
[
"richardbao419@gmail.com"
] |
richardbao419@gmail.com
|
0e361c2d25f1dc4f3d81ae0f99eff2116e37073d
|
eec0d71067c95a772e3fdeeadab242230cd2b31f
|
/mysite/settings.py
|
e3abb79a4fe1388ad0999909201edfd4ccbc3f9b
|
[] |
no_license
|
amcmy/my-first-blog
|
d543ef051ab9decca1a5ff073c3bb40ef552f74a
|
5bb9d7284633f31797e5cc83fcd3d14b9eed8960
|
refs/heads/master
| 2020-05-17T15:21:36.295933
| 2019-04-27T15:14:09
| 2019-04-27T15:14:09
| 183,788,627
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,189
|
py
|
"""
Django settings for mysite project.
Generated by 'django-admin startproject' using Django 2.2.
For more information on this file, see
https://docs.djangoproject.com/en/2.2/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/2.2/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/2.2/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'p+#p6o^(-%rxrujd$8wda&+c%!9ejyotrr&)hc0mwd&j(iomen'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = ['127.0.0.1', '.pythonanywhere.com']
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'blog',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'mysite.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'mysite.wsgi.application'
# Database
# https://docs.djangoproject.com/en/2.2/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/2.2/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/2.2/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'Europe/London'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/2.2/howto/static-files/
STATIC_URL = '/static/'
STATIC_ROOT = os.path.join(BASE_DIR, 'static')
|
[
"you@example.com"
] |
you@example.com
|
8214a4f2f5d28141284cdcea9a1c77e386cbbf48
|
cb4736c3db7288ca3868e319ace6c19364302d2f
|
/module_template/repeater.py
|
e453e33ab9530bcdf5872230c59496fffeb4f50f
|
[
"MIT"
] |
permissive
|
rstms/repeat
|
0928d9833463e5048071a2564916b1a2eb233fb6
|
af9f1aa01cc0d5d5fd8e6ff688e7c84267b8600f
|
refs/heads/master
| 2023-07-27T16:40:04.187344
| 2021-09-11T03:43:31
| 2021-09-11T03:43:31
| 399,602,397
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,784
|
py
|
# -*- coding: utf-8 -*-
"""repeater object for repeat command line tool
This simple module contains a Repeater() class with a run() method that
does the work of the program.
Example:
To use the function open an input stream and an output stream in text
mode, then cal the run method specifying the number of times to repeat
the copy of the input.
To use it from Python code::
>>> form repeat import Repeater
>>> Repeater(sys.stdin, sys.stdout, 1).run()
The function operates in text mode and uses line buffering. It is designed
to be called by the click api.
"""
import io
import tempfile
import sys
from .exception import ParameterError
class Repeater():
"""Repeat stream input to output mutliple times"""
def __init__(self):
"""Context constructor"""
pass
def run(self, infile, outfile, count=1, prefix=False):
"""Copy stdin to stdout and a buffer, then output the buffer multiple times.
Arguments
infile (:obj: `File`): input file
outfile (:obj: `File`): output file
count (int): number of output repetitions
prefix (bool): begin output with line count
"""
if count < 0:
raise ParameterError('I refuse to repeat a negative number of times.')
# see if the stream is binary or text
if isinstance(infile, io.TextIOBase):
mode = 'w+'
else:
mode = 'wb+'
buf = tempfile.SpooledTemporaryFile(mode=mode)
for (length, line) in enumerate(infile):
buf.write(line)
if prefix:
outfile.write(f'{length+1}\n')
for _ in range(0, count):
buf.seek(0)
for line in buf:
outfile.write(line)
|
[
"mkrueger@rstms.net"
] |
mkrueger@rstms.net
|
4f57c4e198a2dc78dc00b90d05162dd65d57d004
|
ebd1d1bbaa0fe30590e2c8c0d19a9d7eff180320
|
/arp/scripts/xtalk_preview.py
|
7110e508cd826977ff0fc86692a60f78449baab7
|
[] |
no_license
|
HERA-Team/hera_sandbox
|
3093009c21e13a79bf3914d64b521b8fbc4bc733
|
f9d292f4a91c0599947e3c013b48114b2097d76d
|
refs/heads/master
| 2021-11-26T16:54:38.665721
| 2021-10-05T19:41:40
| 2021-10-05T19:41:40
| 95,712,482
| 2
| 6
| null | 2017-07-10T19:46:22
| 2017-06-28T21:17:39
|
Python
|
UTF-8
|
Python
| false
| false
| 1,100
|
py
|
#! /usr/bin/env python
import aipy as a, capo as C, pylab as p, numpy as n
import sys
CH0,CH1 = 16,182
fqs = n.linspace(.1,.2,203)
jy2T = C.pspec.jy2T(fqs)
fqs = fqs[CH0:CH1]
aa = a.cal.get_aa('psa6240_v003', n.array([.15]))
t,dat,flg = C.arp.get_dict_of_uv_data(sys.argv[1:], 'cross', 'I')
window = a.dsp.gen_window(fqs.size,'blackman-harris')
norm = n.fft.ifft(window)[0]
tau = n.fft.fftfreq(fqs.size, fqs[1]-fqs[0])
tau = n.fft.fftshift(tau)
#for filename in sys.argv[1:]:
# t,dat,flg = C.arp.get_dict_of_uv_data([filename], 'cross', 'I')
for bl in dat:
i,j = a.miriad.bl2ij(bl)
print i,j
for pol in dat[bl]:
d = n.sum(dat[bl][pol], axis=0) * jy2T
if (aa[i] - aa[j])[1] < 0: d = d.conj()
w = n.sum(n.logical_not(flg[bl][pol]).astype(n.int), axis=0)
d = n.where(w[CH0:CH1] > 0, d[CH0:CH1]/w[CH0:CH1], 0)
w = n.where(w > 0, 1, 0)
p.subplot(121); p.plot(fqs, d)
_d = n.fft.ifft(window*d) / norm
_d = n.fft.fftshift(_d)
p.subplot(122); p.plot(tau, n.abs(_d))
p.xlabel('Delay [ns]')
p.ylabel('Power [mK]')
p.show()
|
[
"aparsons@berkeley.edu"
] |
aparsons@berkeley.edu
|
b3bb756dcc5fa36b2ee79947713b1d3b50e1fdda
|
03dddfda1c488f08ae4b7914e83dd96c24f1e3d7
|
/meadow_mall/meadow_mall/apps/oauth/utils.py
|
25c69391d72c9ec60ab021d7c100769fbc532cd7
|
[] |
no_license
|
zizle/MeadowMall
|
0223ed134baf164eb71358e0c0d3c7a0fbf58782
|
d5cc05e71a666724726bc324c1eb4841b828b284
|
refs/heads/master
| 2020-03-21T00:14:47.853180
| 2018-07-11T00:38:55
| 2018-07-11T00:38:55
| 137,884,601
| 0
| 0
| null | 2018-07-11T00:38:56
| 2018-06-19T11:50:28
|
HTML
|
UTF-8
|
Python
| false
| false
| 3,856
|
py
|
# _*_ coding:utf-8 _*_
# QQ登录辅助
from django.conf import settings
from urllib.parse import urlencode, parse_qs
from urllib.request import urlopen
from itsdangerous import TimedJSONWebSignatureSerializer as TJWSSerializer, BadData
import logging
import json
from .exceptions import OAuthQQAPIError
from . import constants
logger = logging.getLogger('django')
class OAuthQQ(object):
"""QQ认证登录辅助"""
def __init__(self, client_id=None, client_secret=None, redirect_uri=None, state=None):
self.state = state or settings.QQ_STATE
self.client_id = client_id if client_id else settings.QQ_CLIENT_ID
self.client_secret = client_secret if client_secret else settings.QQ_CLIENT_SECRET
self.redirect_uri = redirect_uri if redirect_uri else settings.QQ_REDIRECT_URI
def get_qq_login_url(self):
"""获取QQ登录的url"""
params = {
'response_type': 'code',
'client_id': self.client_id,
'redirect_uri': self.redirect_uri,
'state': self.state,
'scope': 'get_user_info',
}
url = 'https://graph.qq.com/oauth2.0/authorize?' + urlencode(params)
return url
def get_access_token(self, code):
"""获取access_token"""
params = {
'grant_type': 'authorization_code',
'client_id': self.client_id,
'client_secret': self.client_secret,
'code': code,
'redirect_uri': self.redirect_uri,
}
url = 'https://graph.qq.com/oauth2.0/token?' + urlencode(params)
try:
# 向qq服务器发起获取access_token的请求
response = urlopen(url)
response_data = response.read().decode()
# access_token = FE04 ** ** ** ** CCE2 & expires_in = 7776000 & refresh_token = 88E4 *** ** ** ** BE14
# 解析出来的数据是{access_token: [xxx], }
response_dict = parse_qs(response_data)
except Exception as e:
logger.error('获取access_token异常:%s' %e)
# 抛出错误
raise OAuthQQAPIError
else:
access_token = response_dict.get('access_token')
return access_token[0]
def get_openid(self, access_token):
"""
获取openid
:param access_token: 向QQ服务器获取openid必须参数
:return: openid
"""
url = 'https://graph.qq.com/oauth2.0/me?access_token=' + access_token
response = urlopen(url)
response_data = response.read().decode()
# callback({"client_id": "YOUR_APPID", "openid": "YOUR_OPENID"})\n;
try:
# 解析数据
response_dict = json.loads(response_data[10:-4])
print('获取openid时response_dict:',response_dict)
except Exception as e:
data = parse_qs(response_data)
logger.error('code=%s msg=%s' % ((data.get('code'), data.get('msg'))))
raise OAuthQQAPIError
# 获取openid
openid = response_dict.get('openid', None)
return openid
@staticmethod
def generate_access_token(openid):
"""
生成access_token
:return: token
"""
serializer = TJWSSerializer(settings.SECRET_KEY, expires_in=constants.SAVE_QQ_USER_TOKEN_EXPIRES)
token = serializer.dumps({"openid": openid})
return token.decode()
@staticmethod
def check_save_user_token(token):
"""
检验我们生成的access_token
:param token:
:return:
"""
serializer = TJWSSerializer(settings.SECRET_KEY, expires_in=constants.SAVE_QQ_USER_TOKEN_EXPIRES)
try:
data = serializer.loads(token)
except BadData:
return None
else:
return data.get('openid')
|
[
"zizle_lin@163.com"
] |
zizle_lin@163.com
|
df21b265a6cacdff01901795d819235d4a0eb590
|
f7e0780b4d73ebf6e50fe4053c01fd3cc4d6b227
|
/auctioning_platform/shipping_infrastructure/shipping_infrastructure/repositories/address.py
|
8ec2f8f1ec18b584e7ea6e8fffc9eacf0ddcf650
|
[
"MIT"
] |
permissive
|
Enforcer/clean-architecture
|
78d663585f913c51a0460bcafa4af35515cdf549
|
f0c1c0a8364996d309e7381b44933807529200b1
|
refs/heads/master
| 2023-02-20T01:40:24.653512
| 2022-08-02T20:59:03
| 2022-08-02T20:59:03
| 208,138,785
| 454
| 51
|
MIT
| 2023-02-16T01:31:26
| 2019-09-12T20:16:08
|
Python
|
UTF-8
|
Python
| false
| false
| 589
|
py
|
import uuid
import faker
from shipping import AddressRepository
from shipping.domain.entities import Address
from shipping.domain.value_objects import ConsigneeId
class FakeAddressRepository(AddressRepository):
def get(self, consignee_id: ConsigneeId) -> Address:
fake = faker.Faker()
return Address(
uuid=uuid.uuid4(),
street=fake.street_name(),
house_number=fake.building_number(),
city=fake.city(),
state=fake.state(),
zip_code=fake.zipcode(),
country=fake.country(),
)
|
[
"nnplaya@gmail.com"
] |
nnplaya@gmail.com
|
eb9f0a5bbe7e49980ce27a7e5656855d9bfe7f04
|
14427b4c73fef188791affb42c9fffe8e25b7dc1
|
/tests/solr_tests/tests/admin.py
|
082e19f81b3cb37860f748e658e952a736a542f1
|
[
"BSD-3-Clause",
"MIT"
] |
permissive
|
markng/django-haystack
|
697ea05dd49980be43002e0fb6bf5c2b6357a015
|
78160bb2f530f7fadc0caf22f2f8babbac89ef32
|
refs/heads/master
| 2021-01-17T23:13:00.755184
| 2010-10-21T01:02:38
| 2010-10-21T01:02:38
| 1,010,851
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,667
|
py
|
from django.conf import settings
from django.contrib.auth.models import User
from django.test import TestCase
from haystack import backends
from core.models import MockModel
from solr_tests.tests.solr_backend import SolrMockModelSearchIndex, clear_solr_index
class SearchModelAdminTestCase(TestCase):
fixtures = ['bulk_data.json']
def setUp(self):
super(SearchModelAdminTestCase, self).setUp()
# With the models registered, you get the proper bits.
import haystack
from haystack.sites import SearchSite
# Stow.
self.old_debug = settings.DEBUG
settings.DEBUG = True
self.old_site = haystack.site
test_site = SearchSite()
test_site.register(MockModel, SolrMockModelSearchIndex)
haystack.site = test_site
# Wipe it clean.
clear_solr_index()
# Force indexing of the content.
mockmodel_index = test_site.get_index(MockModel)
mockmodel_index.update()
superuser = User.objects.create_superuser(
username='superuser',
password='password',
email='super@user.com',
)
def tearDown(self):
# Restore.
import haystack
haystack.site = self.old_site
settings.DEBUG = self.old_debug
super(SearchModelAdminTestCase, self).tearDown()
def test_usage(self):
backends.reset_search_queries()
self.assertEqual(len(backends.queries), 0)
self.assertEqual(self.client.login(username='superuser', password='password'), True)
# First, non-search behavior.
resp = self.client.get('/admin/core/mockmodel/')
self.assertEqual(resp.status_code, 200)
self.assertEqual(len(backends.queries), 0)
self.assertEqual(resp.context['cl'].full_result_count, 23)
# Then search behavior.
resp = self.client.get('/admin/core/mockmodel/', data={'q': 'Haystack'})
self.assertEqual(resp.status_code, 200)
self.assertEqual(len(backends.queries), 2)
self.assertEqual(resp.context['cl'].full_result_count, 7)
# Ensure they aren't search results.
self.assertEqual(isinstance(resp.context['cl'].result_list[0], MockModel), True)
self.assertEqual(resp.context['cl'].result_list[0].id, 17)
# Make sure only changelist is affected.
resp = self.client.get('/admin/core/mockmodel/1/')
self.assertEqual(resp.status_code, 200)
self.assertEqual(len(backends.queries), 2)
self.assertEqual(resp.context['original'].id, 1)
|
[
"daniel@toastdriven.com"
] |
daniel@toastdriven.com
|
d6d19a882acc2888a4c56b24b29d7ea83b450ec4
|
b2cfcacbd898f758a56d095f2140681934205d89
|
/GeekShop_mentor/src_lesson_8/step_6(CBV_DetailView)/geekshop/geekshop/urls.py
|
f61dfb9bfdf3de46c576ad4a7bb2d44badcae26a
|
[] |
no_license
|
AndreySperansky/Django_1
|
7d3be3ea2ede8e46d932fdae146ce4a7c4e300b4
|
0fec0a9a02b887fd8b45a5b763b7da5dc6377208
|
refs/heads/master
| 2022-12-15T19:56:23.611288
| 2020-09-21T17:40:40
| 2020-09-21T17:40:40
| 284,131,625
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 677
|
py
|
from django.conf.urls import url, include
from django.contrib import admin
import mainapp.views as mainapp
from django.conf import settings
from django.conf.urls.static import static
urlpatterns = [
url(r'^$', mainapp.main, name='main'),
url(r'^products/', include('mainapp.urls', namespace='products')),
url(r'^contact/', mainapp.contact, name='contact'),
url(r'^auth/', include('authapp.urls', namespace='auth')),
url(r'^basket/', include('basketapp.urls', namespace='basket')),
url(r'^admin/', include('adminapp.urls', namespace='admin')),
]
if settings.DEBUG:
urlpatterns += static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)
|
[
"andrey.speransky@gmail.com"
] |
andrey.speransky@gmail.com
|
60f8ee32eec66d5a1aceb50eae7ede2d9486b5ac
|
12068d6ebe90fcbd13c635f35c23ce7c73884b12
|
/Lecture/Lecture_basic/TSP.py
|
bdc30261cdae927d3d5f5be96d23f11de1767206
|
[] |
no_license
|
jngcii/python-algorithm
|
6ffb7a90eb0288cadc64bc3b2bf2d103455d2337
|
e976b0033beeeb14217e7acc8d67f201a79184f2
|
refs/heads/master
| 2020-04-21T09:09:13.353113
| 2020-01-23T17:36:02
| 2020-01-23T17:36:02
| 169,439,831
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,119
|
py
|
def next_permutation(a): # 다음 순열 찾는 함수
# 맨 뒤부터 자기보다 자기 바로앞이 작은 놈을 찾는다.
i = len(a) - 1
if i > 0 and a[i] <= a[i-1]: i -= 1
# 끝까지 갔는데도 자기보다 작은놈을 못찾았다. ======> e.g) 987654321
if i <= 0: False
# 이제 i-1은 무조건 i 보다 크다.
# i부터 마지막중에 i-1보단 큰것중에 제일 작은거 찾기
# i-1 이후로는 내림차순(87654)으로 정렬되어있어서 맨뒤부터 보다가 i-1번째보다 큰거 나오면 그놈 저장
j = len(a) - 1
if a[i-1] > a[j]: j -= 1
# 저장된 j 랑 i-1 번째 교체
a[i-1], a[j] = a[j], a[i-1]
# 교체래도 i부터 마지막까지는 내림차순 정렬되어있음
# 오름차순으로 다시 정렬해버리기
j = len(a) - 1
for j > i:
a[i], a[j] = a[j], a[i]
i += 1
j -= 1
# 더이상 정렬된 놈을 못찾지 않고
# 정렬을 완료했다.
return True
n = int(input()) # 도시의 갯수 입력받음
d = list(range(n)) # 각 도시 0 ~ n-1로 이름 지어줌
w = [list(map(int, input().split())) for _ in range(n)] # 도시에서 도시로 가는 요금 입력받음
s = 0
res = 1000000000000
while True: # 파이썬에서 일단 반복 시작하는 방법은 일단 True로 해놓고 루프 안에서 if, break 로 탈출시키기
ok = True
# 현재 행렬의 가는길들을 모두 보면서 못가는 막힌길 있으면 ok=False로 하고 현재 루프 취소함 (마지막에서처음으로 가는 것부터)
for k in range(-1, n-1):
if w[d[k]][d[k+1]] == 0:
ok = False
break
else:
s += w[d[k]][d[k+1]]
# 앞에서 돈 루프가 무사히 돌아서 s에 다 더했고 res 와 비교해서 작은걸 res에 넣는다.
if ok: res = min(s, res)
# d 다음 순열을 구한다. 없으면 마무리
if not next_permutation(d): break
# d첫번째가 0인것만 할거다. 1로 넘어가면 바로 마무리
if d[0] != 0: break
print(res)
|
[
"concotree@gmail.com"
] |
concotree@gmail.com
|
f04ac3ed7b2bb5e3752f0ef2d96c1cb9e22b29a2
|
1670af31dd78a82915b8cda16c181c0aa31a1c3a
|
/favs/urls.py
|
4e213700c57ab1fa1133e8a7999e64f4f4711de4
|
[
"CC-BY-3.0",
"MIT"
] |
permissive
|
chyuting/dj4e-samples
|
ecd2eca6edc50402a55dee487c11f74ec873cb2d
|
bbe3dcd214789e4be96dd2460018524f8078b4e3
|
refs/heads/master
| 2020-09-15T18:12:47.472948
| 2019-11-22T20:28:57
| 2019-11-22T20:28:57
| 223,524,434
| 1
| 0
|
MIT
| 2019-11-23T03:24:03
| 2019-11-23T03:24:02
| null |
UTF-8
|
Python
| false
| false
| 1,083
|
py
|
from django.urls import path, reverse_lazy
from . import views
from django.views.generic import TemplateView
# In urls.py reverse_lazy('favs:all')
# In views.py class initialization reverse_lazy('favs:all')
# In views.py methods reverse('favs:all')
# In templates {% url 'favs:thing_update' thing.id %}
app_name='favs'
urlpatterns = [
path('', views.ThingListView.as_view(), name='all'),
path('thing/<int:pk>', views.ThingDetailView.as_view(), name='thing_detail'),
path('thing/create',
views.ThingCreateView.as_view(success_url=reverse_lazy('favs:all')), name='thing_create'),
path('thing/<int:pk>/update',
views.ThingUpdateView.as_view(success_url=reverse_lazy('favs:all')), name='thing_update'),
path('thing/<int:pk>/delete',
views.ThingDeleteView.as_view(success_url=reverse_lazy('favs:all')), name='thing_delete'),
path('thing/<int:pk>/favorite',
views.AddFavoriteView.as_view(), name='thing_favorite'),
path('thing/<int:pk>/unfavorite',
views.DeleteFavoriteView.as_view(), name='thing_unfavorite'),
]
|
[
"csev@umich.edu"
] |
csev@umich.edu
|
dbc327a8c84469e18f9c25854df9fec96a476708
|
0fb505765604b586c3a46e608fc23930f8501db5
|
/venv/lib/python2.7/site-packages/django/contrib/staticfiles/apps.py
|
ae69667b7b99c779598188e2cff803417bb26aa0
|
[
"MIT"
] |
permissive
|
domenicosolazzo/practice-django
|
b05edecc302d97c97b7ce1de809ea46d59e2f0e6
|
44e74c973384c38bd71e7c8a1aacd1e10d6a6893
|
refs/heads/master
| 2021-08-19T15:36:22.732954
| 2015-01-22T18:42:14
| 2015-01-22T18:42:14
| 25,118,384
| 0
| 0
|
MIT
| 2021-06-10T19:50:51
| 2014-10-12T12:08:47
|
Python
|
UTF-8
|
Python
| false
| false
| 206
|
py
|
from django.apps import AppConfig
from django.utils.translation import ugettext_lazy as _
class StaticFilesConfig(AppConfig):
name = 'django.contrib.staticfiles'
verbose_name = _("Static Files")
|
[
"solazzo.domenico@gmail.com"
] |
solazzo.domenico@gmail.com
|
7601589c18fcecc2f0a8da320cabeceadd1e1dbe
|
9ef6d625945ecdebb476614a96a83a2139a92e9b
|
/nlstruct/datasets/quaero.py
|
8d1aedb5e67c9965c45a5d4f2bc23add2d4fe78c
|
[
"MIT"
] |
permissive
|
percevalw/nlstruct
|
f0cdf8ed86a32a5a96204b1c787eb35b1a4a804a
|
7b5fa2230a555331e2e68b25fbb2b25e4c9404a0
|
refs/heads/master
| 2023-08-09T00:07:13.782518
| 2023-07-03T14:37:43
| 2023-07-03T14:37:43
| 229,176,303
| 13
| 7
|
MIT
| 2023-07-23T00:52:19
| 2019-12-20T02:38:34
|
Python
|
UTF-8
|
Python
| false
| false
| 6,137
|
py
|
import os
import random
import zipfile
from sklearn.datasets._base import RemoteFileMetadata
from nlstruct.datasets.brat import load_from_brat
from nlstruct.datasets.base import NetworkLoadMode, ensure_files, NormalizationDataset
class QUAERO(NormalizationDataset):
REMOTE_FILES = [
RemoteFileMetadata(
url="https://quaerofrenchmed.limsi.fr/QUAERO_FrenchMed_brat.zip",
checksum="2cf8b5715d938fdc1cd02be75c4eaccb5b8ee14f4148216b8f9b9e80b2445c10",
filename="QUAERO_FrenchMed_brat.zip")
]
def __init__(self, path, terminology=None, sources=("EMEA", "MEDLINE"), version="2016", val_split=None, seed=False, debug=False,
map_concepts=False, unmappable_concepts="raise", relabel_with_semantic_type=False, preprocess_fn=None):
assert version in ("2015", "2016")
if val_split is not None or seed is not False:
assert version == "2015", "As validation split already exist for Quaero 2016, leave val_split=None and seed=False"
val_split = val_split
if not isinstance(sources, (tuple, list)):
sources = (sources,)
self.sources = sources = tuple(sources)
train_data, val_data, test_data = self.download_and_extract(path, version, sources, val_split, seed, debug)
super().__init__(
train_data=train_data,
val_data=val_data,
test_data=test_data,
terminology=terminology,
map_concepts=map_concepts,
unmappable_concepts=unmappable_concepts,
relabel_with_semantic_type=relabel_with_semantic_type,
preprocess_fn=preprocess_fn,
)
def download_and_extract(self, path, version, sources=("EMEA", "MEDLINE"), val_split=False, seed=False, debug=False):
"""
Loads the Quaero dataset
Parameters
----------
path: str
Location of the Quaero files
version: str
Version to load, either '2015' or '2016'
val_split: float
Will only be used if version is '2015' since no dev set was defined for this version
seed: int
Will only be used if version is '2015' since no dev set was defined for this version
sources: tuple of str
Which sources to load, ie EMEA, MEDLINE
Returns
-------
Dataset
"""
[file] = ensure_files(path, self.REMOTE_FILES, mode=NetworkLoadMode.AUTO)
zip_ref = zipfile.ZipFile(file, "r")
zip_ref.extractall(path)
zip_ref.close()
train_data = [
*[{**doc, "source": "EMEA", "entities": [{**entity, "concept": tuple(sorted(part.strip() for comment in entity["comments"]
for part in comment["comment"].strip().strip("+").split(" ")))} for entity in doc["entities"]]}
for doc in load_from_brat(os.path.join(path, "QUAERO_FrenchMed/corpus/train/EMEA"))],
*[{**doc, "source": "MEDLINE", "entities": [{**entity, "concept": tuple(sorted(part.strip() for comment in entity["comments"]
for part in comment["comment"].strip().strip("+").split(" ")))} for entity in doc["entities"]]}
for doc in load_from_brat(os.path.join(path, "QUAERO_FrenchMed/corpus/train/MEDLINE"))],
]
train_data = [doc for doc in train_data if doc["source"] in sources]
val_data = [
*[{**doc, "source": "EMEA", "entities": [{**entity, "concept": tuple(sorted(part.strip() for comment in entity["comments"]
for part in comment["comment"].strip().strip("+").split(" ")))} for entity in doc["entities"]]}
for doc in load_from_brat(os.path.join(path, "QUAERO_FrenchMed/corpus/dev/EMEA"))],
*[{**doc, "source": "MEDLINE", "entities": [{**entity, "concept": tuple(sorted(part.strip() for comment in entity["comments"]
for part in comment["comment"].strip().strip("+").split(" ")))} for entity in doc["entities"]]}
for doc in load_from_brat(os.path.join(path, "QUAERO_FrenchMed/corpus/dev/MEDLINE"))],
]
val_data = [doc for doc in val_data if doc["source"] in sources]
test_data = [
*[{**doc, "source": "EMEA", "entities": [{**entity, "concept": tuple(sorted(part.strip() for comment in entity["comments"]
for part in comment["comment"].strip().strip("+").split(" ")))} for entity in doc["entities"]]}
for doc in load_from_brat(os.path.join(path, "QUAERO_FrenchMed/corpus/test/EMEA"))],
*[{**doc, "source": "MEDLINE", "entities": [{**entity, "concept": tuple(sorted(part.strip() for comment in entity["comments"]
for part in comment["comment"].strip().strip("+").split(" ")))} for entity in doc["entities"]]}
for doc in load_from_brat(os.path.join(path, "QUAERO_FrenchMed/corpus/test/MEDLINE"))],
]
test_data = [doc for doc in test_data if doc["source"] in sources]
if version == "2015":
if val_split:
shuffled_data = list(train_data)
if seed is not False:
random.Random(seed).shuffle(shuffled_data)
offset = val_split if isinstance(val_split, int) else int(val_split * len(shuffled_data))
val_data = shuffled_data[:offset]
train_data = shuffled_data[offset:]
else:
val_data = []
subset = slice(None) if not debug else slice(0, 50)
train_data = train_data[subset]
val_data = val_data[subset]
test_data = test_data # Never subset the test set, we don't want to give false hopes
return train_data, val_data, test_data
|
[
"perceval.wajsburt@gmail.com"
] |
perceval.wajsburt@gmail.com
|
1ecc6c19485b08f78b1da7819afdbb6fb6669109
|
93713f46f16f1e29b725f263da164fed24ebf8a8
|
/Library/bin3/jupyter-kernelspec
|
e82d73045ac485f46b7e7879db1c57e894321d7b
|
[
"Python-2.0",
"BSD-3-Clause"
] |
permissive
|
holzschu/Carnets
|
b83d15136d25db640cea023abb5c280b26a9620e
|
1ad7ec05fb1e3676ac879585296c513c3ee50ef9
|
refs/heads/master
| 2023-02-20T12:05:14.980685
| 2023-02-13T15:59:23
| 2023-02-13T15:59:23
| 167,671,526
| 541
| 36
|
BSD-3-Clause
| 2022-11-29T03:08:22
| 2019-01-26T09:26:46
|
Python
|
UTF-8
|
Python
| false
| false
| 344
|
#!/var/mobile/Containers/Data/Application/966C455F-0658-40E2-B076-EC684AFD0415/Library/bin/python
# -*- coding: utf-8 -*-
import re
import sys
from jupyter_client.kernelspecapp import KernelSpecApp
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(KernelSpecApp.launch_instance())
|
[
"nicolas.holzschuch@inria.fr"
] |
nicolas.holzschuch@inria.fr
|
|
e49d78dfbaf2eab206ce9b59421933eb775a7f3e
|
523f8f5febbbfeb6d42183f2bbeebc36f98eadb5
|
/424.py
|
25219c47f183e713f54738fc014afb70c66da0ff
|
[] |
no_license
|
saleed/LeetCode
|
655f82fdfcc3000400f49388e97fc0560f356af0
|
48b43999fb7e2ed82d922e1f64ac76f8fabe4baa
|
refs/heads/master
| 2022-06-15T21:54:56.223204
| 2022-05-09T14:05:50
| 2022-05-09T14:05:50
| 209,430,056
| 2
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 669
|
py
|
class Solution(object):
def characterReplacement(self, s, k):
"""
:type s: str
:type k: int
:rtype: int
"""
dict=[0]*26
tail=0
res=0
for i in range(len(s)):
# print(i,tail,dict)
dict[ord(s[i])-ord('A')]+=1
maxv=max(dict)
while i-tail+1-maxv>k:
dict[ord(s[tail])-ord('A')]-=1
tail+=1
maxv=max(dict)
# print(maxv)
if i-tail+1>res:
res=i-tail+1
return res
s = "AABABBA"
k = 1
print(ord('A'))
a=Solution()
print(a.characterReplacement(s,k))
|
[
"1533441387@qq.com"
] |
1533441387@qq.com
|
170f17debf8e0aa5216ed9e03bf1456f06c0cc04
|
a7205bcd48196c1391d8c56414a1e20c39b52aa7
|
/train_online.py
|
2f18b62b4240057a1e6866d5f6aabb0846fd5164
|
[] |
no_license
|
CS-433/cs-433-project-2-fesenjoon
|
f9f58ef0caa54b08c6d59ffca0cbf2a08642ecb5
|
cb0f7519901f16ae0cb1bb9b1ae8b89761a0b519
|
refs/heads/master
| 2021-04-18T19:56:06.737876
| 2020-12-17T13:42:15
| 2020-12-17T13:42:15
| 314,034,672
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,511
|
py
|
import argparse
import json
from datetime import datetime
import os
import numpy as np
import torch
import models
import datasets
from utils import train_one_epoch, eval_on_dataloader
try:
from tensorboardX import SummaryWriter
except:
from torch.utils.tensorboard import SummaryWriter
def build_parser():
parser = argparse.ArgumentParser(description="""Trains models in the online setting described in the original paper.""")
parser.add_argument('--title', type=str)
parser.add_argument('--exp-dir', type=str, default=None)
parser.add_argument('--model', type=str, default='resnet18', choices=models.get_available_models())
# parser.add_argument('--dataset', type=str, default='cifar10', choices=datasets.get_available_datasets())
parser.add_argument('--lr', type=float, default=0.001)
parser.add_argument('--split-size', type=int, default=5000)
parser.add_argument('--random-seed', type=int, default=42)
parser.add_argument('--convergence-epochs', type=int, default=5) # If the minimum val loss does not decrease in 3 epochs training will stop
# parser.add_argument('--save-per-epoch', action='store_true', default=False)
parser.add_argument('--checkpoint', default=None)
parser.add_argument('--checkpoint-shrink', default=1.0, type=float)
parser.add_argument('--checkpoint-perturb', default=0.0, type=float)
return parser
def main(args):
print("Running with arguments:")
args_dict = {}
for key in vars(args):
if key == "default_function":
continue
args_dict[key] = getattr(args, key)
print(key, ": ", args_dict[key])
print("---")
experiment_time = datetime.now().strftime('%b%d_%H-%M-%S')
if args.exp_dir:
experiment_dir = args.exp_dir
else:
experiment_dir = os.path.join('exp', args.title, experiment_time)
os.makedirs(experiment_dir, exist_ok=True)
with open(os.path.join(experiment_dir, "config.json"), "w") as f:
json.dump(args_dict, f, indent=4, sort_keys=True, default=lambda x: x.__name__)
if torch.cuda.is_available():
device = torch.device('cuda:0')
print("CUDA Recognized")
else:
device = torch.device('cpu')
try:
summary_writer = SummaryWriter(logdir=experiment_dir)
except:
summary_writer = SummaryWriter(experiment_dir)
print("Starting Online Learning")
#Online learning setup
torch.manual_seed(args.random_seed)
np.random.seed(args.random_seed)
model = models.get_model(args.model).to(device)
criterion = torch.nn.CrossEntropyLoss()
loaders = datasets.get_dataset("online_with_val_cifar10", split_size=args.split_size)
number_of_samples_online = []
test_accuracies_online = []
training_times_online = []
epoch = 0
for i, train_loader in enumerate(loaders['train_loaders']):
t_start = datetime.now()
n_train = (i + 1) * args.split_size
number_of_samples_online.append(n_train)
optimizer = torch.optim.Adam(model.parameters(), lr=args.lr)
random_model = models.get_model(args.model).to(device)
with torch.no_grad():
for real_parameter, random_parameter in zip(model.parameters(), random_model.parameters()):
real_parameter.mul_(args.checkpoint_shrink).add_(random_parameter, alpha=args.checkpoint_perturb)
train_accuracies = []
while True:
if epoch % 5 == 0:
print(f"Starting training in epoch {epoch + 1}")
train_loss, train_accuracy = train_one_epoch(device, model, optimizer, criterion, train_loader)
val_loss, val_accuracy = eval_on_dataloader(device, criterion, model, loaders['val_loader'])
test_loss, test_accuracy = eval_on_dataloader(device, criterion, model, loaders['test_loader'])
train_accuracies.append(train_accuracy)
epoch += 1
summary_writer.add_scalar("test_accuracy", test_accuracy, epoch)
summary_writer.add_scalar("test_loss", test_loss, epoch)
summary_writer.add_scalar("train_accuracy", train_accuracy, epoch)
summary_writer.add_scalar("train_loss", train_loss, epoch)
summary_writer.add_scalar("val_accuracy", val_accuracy, epoch)
summary_writer.add_scalar("val_loss", val_loss, epoch)
#if len(train_accuracies) >= args.convergence_epochs and \
# max(train_accuracies) not in train_accuracies[-args.convergence_epochs:]:
if train_accuracy >= 0.99:
print("Convergence condition met")
break
val_loss, val_accuracy = eval_on_dataloader(device, criterion, model, loaders['val_loader'])
test_loss, test_accuracy = eval_on_dataloader(device, criterion, model, loaders['test_loader'])
summary_writer.add_scalar("online_val_accuracy", val_accuracy, n_train)
summary_writer.add_scalar("online_val_loss", val_loss, n_train)
summary_writer.add_scalar("online_test_accuracy", test_accuracy, n_train)
summary_writer.add_scalar("online_test_loss", test_loss, n_train)
t_end = datetime.now()
training_time = (t_end - t_start).total_seconds()
training_times_online.append(training_time)
summary_writer.add_scalar("online_train_time", training_time, n_train)
summary_writer.close()
if __name__ == "__main__":
parser = build_parser()
args = parser.parse_args()
main(args)
|
[
"akmohtashami97@gmail.com"
] |
akmohtashami97@gmail.com
|
0b01562f680ea36d596485130d22202338aa0262
|
f0d713996eb095bcdc701f3fab0a8110b8541cbb
|
/9b4h6mK9CBMLwyGiY_6.py
|
1abf1ae0a48f31137fb13996b1512d64f8d6b0d6
|
[] |
no_license
|
daniel-reich/turbo-robot
|
feda6c0523bb83ab8954b6d06302bfec5b16ebdf
|
a7a25c63097674c0a81675eed7e6b763785f1c41
|
refs/heads/main
| 2023-03-26T01:55:14.210264
| 2021-03-23T16:08:01
| 2021-03-23T16:08:01
| 350,773,815
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,117
|
py
|
"""
In this challenge, you have to find the distance between two points placed on
a Cartesian plane. Knowing the coordinates of both the points, you have to
apply the **Pythagorean theorem** to find the distance between them.

Given two dictionaries `a` and `b` being the two points coordinates ( **x**
and **y** ), implement a function that returns the distance between the
points, rounded to the nearest thousandth.
### Examples
get_distance({"x": -2, "y": 1}, {"x": 4, "y": 3}) ➞ 6.325
get_distance({"x": 0, "y": 0}, {"x": 1, "y": 1}) ➞ 1.414
get_distance({"x": 10, "y": -5}, {"x": 8, "y": 16}) ➞ 21.095
### Notes
* Take a look at the **Resources** tab if you need a refresher on the geometry related to this challenge.
* The "distance" is the shortest distance between the two points, or the straight line generated from `a` to `b`.
"""
def get_distance(a, b):
p=list(a.values())
q=list(b.values())
dis=((p[0]-q[0])**2+(p[1]-q[1])**2)**.5
return round(dis,3)
|
[
"daniel.reich@danielreichs-MacBook-Pro.local"
] |
daniel.reich@danielreichs-MacBook-Pro.local
|
ee12d3b89ca259032c8f090a344825f2320900a2
|
45cfc0bf573d0419ff4c2e9cc8d73256be9bf8bb
|
/lws/translate.py
|
9a4d17f3e2092934a1d8e61e371afa9ac8f16708
|
[
"MIT"
] |
permissive
|
KEHANG/Life_Web_Services
|
c79c0f7df9aa78ca9efee79652c2fc8525076aae
|
2f519d38692492088fb6ba80a648d2099828b07e
|
refs/heads/master
| 2021-06-19T01:02:15.520716
| 2021-01-03T19:58:02
| 2021-01-03T19:58:02
| 149,108,564
| 0
| 0
|
MIT
| 2018-09-22T22:55:59
| 2018-09-17T10:27:50
|
Python
|
UTF-8
|
Python
| false
| false
| 744
|
py
|
import json
import requests
from flask_babel import _
from flask import current_app
def translate(text, source_language, dest_language):
if 'MS_TRANSLATOR_KEY' not in current_app.config or not current_app.config['MS_TRANSLATOR_KEY']:
return _('Error: the translation service is not configured.')
auth = {'Ocp-Apim-Subscription-Key': current_app.config['MS_TRANSLATOR_KEY']}
r = requests.get('https://api.microsofttranslator.com/v2/Ajax.svc'
'/Translate?text={}&from={}&to={}'.format(text, source_language, dest_language),
headers=auth)
if r.status_code != 200:
return _('Error: the translation service failed.')
return json.loads(r.content.decode('utf-8-sig'))
|
[
"kehanghan@gmail.com"
] |
kehanghan@gmail.com
|
3130f019200ef90cb43098229e4eae8adf5a1006
|
d08e6e7a2abcc7568fd3b9a6022c79091ebd0efa
|
/Logic/verify_no_duplicate_seqs_test.py
|
7924d76700dab6cb85ace88791fd019b1f32ef3e
|
[] |
no_license
|
hz-xmz/Kaggle-LANL
|
dd20b6951743fc068a866c799de93d9a89a2429d
|
0236d3bcaeb2f3f4c960902d6c31ef0c49e749c4
|
refs/heads/master
| 2020-09-11T01:25:06.633524
| 2019-06-07T16:50:41
| 2019-06-07T16:50:41
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,214
|
py
|
import numpy as np
import pandas as pd
data_folder = '/media/tom/cbd_drive/Kaggle/LANL/Data/'
if not 'test_combined' in locals():
test_combined = pd.read_csv(
data_folder + 'test_combined.csv').values.reshape(-1, 150000)
extreme_length = 10000
first_id = 0
def get_max_shared_seq_len(first_id, sec_id):
valid_ids = np.where(
start_test[sec_id] == end_test[first_id, extreme_length-1])[0]
longest_match = 0
while valid_ids.size:
longest_match += 1
valid_ids = valid_ids[np.logical_and(
valid_ids >= longest_match,
end_test[first_id, extreme_length-longest_match-1] == start_test[
sec_id, valid_ids-longest_match])]
return longest_match
#longest_match_sequence.max()
#longest_match_sequence.argmax()
#get_max_shared_seq_len(1418, 1232) # Longest sequence
num_test_files = test_combined.shape[0]
start_test = test_combined[:, :extreme_length]
end_test = test_combined[:, -extreme_length:]
longest_match_sequence = np.zeros((num_test_files, num_test_files))
for first_id in range(num_test_files):
print(first_id)
for sec_id in range(num_test_files):
longest_match_sequence[first_id, sec_id] = get_max_shared_seq_len(
first_id, sec_id)
|
[
"tvdwiele@gmail.com"
] |
tvdwiele@gmail.com
|
3a5d3b8bcd880c5bb11c80e625c2118f69744913
|
d3efc82dfa61fb82e47c82d52c838b38b076084c
|
/Autocase_Result/SjShObligationCall/YW_GGQQ_YWFSJHA_GOU_084.py
|
e0ca6b636bc265d2295b0d6957247055bbcce54d
|
[] |
no_license
|
nantongzyg/xtp_test
|
58ce9f328f62a3ea5904e6ed907a169ef2df9258
|
ca9ab5cee03d7a2f457a95fb0f4762013caa5f9f
|
refs/heads/master
| 2022-11-30T08:57:45.345460
| 2020-07-30T01:43:30
| 2020-07-30T01:43:30
| 280,388,441
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,913
|
py
|
#!/usr/bin/python
# -*- encoding: utf-8 -*-
import sys
import json
sys.path.append("/home/yhl2/workspace/xtp_test/xtp/api")
from xtp_test_case import *
sys.path.append("/home/yhl2/workspace/xtp_test/option/service")
from OptMainService import *
from OptQueryStkPriceQty import *
sys.path.append("/home/yhl2/workspace/xtp_test/service")
from log import *
from CaseParmInsertMysql import *
sys.path.append("/home/yhl2/workspace/xtp_test/option/mysql")
from Opt_SqlData_Transfer import *
sys.path.append("/home/yhl2/workspace/xtp_test/mysql")
from QueryOrderErrorMsg import queryOrderErrorMsg
sys.path.append("/home/yhl2/workspace/xtp_test/utils")
from env_restart import *
reload(sys)
sys.setdefaultencoding('utf-8')
class YW_GGQQ_YWFSJHA_GOU_084(xtp_test_case):
def setUp(self):
sql_transfer = Opt_SqlData_Transfer()
sql_transfer.transfer_fund_asset('YW_GGQQ_YWFSJHA_GOU_084')
clear_data_and_restart_sh()
Api.trade.Logout()
Api.trade.Login()
def test_YW_GGQQ_YWFSJHA_GOU_084(self):
title = '买平(义务方平仓):市价剩余转限价-验资(可用资金<=0且下单导致可用资金增加后变成正数)'
# 定义当前测试用例的期待值
# 期望状态:初始、未成交、部成、全成、部撤已报、部撤、已报待撤、已撤、废单、撤废、内部撤单
# xtp_ID和cancel_xtpID默认为0,不需要变动
case_goal = {
'期望状态': '全成',
'errorID': 0,
'errorMSG': '',
'是否生成报单': '是',
'是否是撤废': '否',
'xtp_ID': 0,
'cancel_xtpID': 0,
}
logger.warning(title)
# 定义委托参数信息------------------------------------------
# 参数:证券代码、市场、证券类型、证券状态、交易状态、买卖方向(B买S卖)、期望状态、Api
stkparm = QueryStkPriceQty('10001318', '1', '*', '1', '0', 'C', case_goal['期望状态'], Api)
# 如果下单参数获取失败,则用例失败
if stkparm['返回结果'] is False:
rs = {
'用例测试结果': stkparm['返回结果'],
'测试错误原因': '获取下单参数失败,' + stkparm['错误原因'],
}
logger.error('查询结果为False,错误原因: {0}'.format(
json.dumps(rs['测试错误原因'], encoding='UTF-8', ensure_ascii=False)))
self.assertEqual(rs['用例测试结果'], True)
else:
wt_reqs = {
'business_type':Api.const.XTP_BUSINESS_TYPE['XTP_BUSINESS_TYPE_OPTION'],
'order_client_id':2,
'market': Api.const.XTP_MARKET_TYPE['XTP_MKT_SH_A'],
'ticker': stkparm['证券代码'],
'side': Api.const.XTP_SIDE_TYPE['XTP_SIDE_BUY'],
'position_effect':Api.const.XTP_POSITION_EFFECT_TYPE['XTP_POSITION_EFFECT_CLOSE'],
'price_type': Api.const.XTP_PRICE_TYPE['XTP_PRICE_REVERSE_BEST_LIMIT'],
'price': stkparm['涨停价'],
'quantity': 1
}
ParmIni(Api, case_goal['期望状态'], wt_reqs['price_type'])
CaseParmInsertMysql(case_goal, wt_reqs)
rs = serviceTest(Api, case_goal, wt_reqs)
if rs['用例测试结果']:
logger.warning('执行结果为{0}'.format(str(rs['用例测试结果'])))
else:
logger.warning('执行结果为{0},{1},{2}'.format(
str(rs['用例测试结果']), str(rs['用例错误源']),
json.dumps(rs['用例错误原因'], encoding='UTF-8', ensure_ascii=False)))
self.assertEqual(rs['用例测试结果'], True) # 4
if __name__ == '__main__':
unittest.main()
|
[
"418033945@qq.com"
] |
418033945@qq.com
|
96cfc8f20622043eeff69349f2ee5eda49f0f6a3
|
7769cb512623c8d3ba96c68556b2cea5547df5fd
|
/mmdet/ops/carafe/grad_check.py
|
06820be2459c9766113796cbe34e76db7ae9c108
|
[
"MIT"
] |
permissive
|
JialeCao001/D2Det
|
0e49f4c76e539d574e46b02f278242ca912c31ea
|
a76781ab624a1304f9c15679852a73b4b6770950
|
refs/heads/master
| 2022-12-05T01:00:08.498629
| 2020-09-04T11:33:26
| 2020-09-04T11:33:26
| 270,723,372
| 312
| 88
|
MIT
| 2020-07-08T23:53:23
| 2020-06-08T15:37:35
|
Python
|
UTF-8
|
Python
| false
| false
| 2,118
|
py
|
import os.path as osp
import sys
import mmcv
import torch
from torch.autograd import gradcheck
sys.path.append(osp.abspath(osp.join(__file__, '../../')))
from mmdet.ops.carafe import CARAFENAIVE # noqa: E402, isort:skip
from mmdet.ops.carafe import carafe_naive # noqa: E402, isort:skip
from mmdet.ops.carafe import carafe, CARAFE # noqa: E402, isort:skip
feat = torch.randn(2, 64, 3, 3, requires_grad=True, device='cuda:0').double()
mask = torch.randn(
2, 100, 6, 6, requires_grad=True, device='cuda:0').sigmoid().double()
print('Gradcheck for carafe...')
test = gradcheck(CARAFE(5, 4, 2), (feat, mask), atol=1e-4, eps=1e-4)
print(test)
print('Gradcheck for carafe naive...')
test = gradcheck(CARAFENAIVE(5, 4, 2), (feat, mask), atol=1e-4, eps=1e-4)
print(test)
feat = torch.randn(
2, 1024, 100, 100, requires_grad=True, device='cuda:0').float()
mask = torch.randn(
2, 25, 200, 200, requires_grad=True, device='cuda:0').sigmoid().float()
loop_num = 500
time_forward = 0
time_backward = 0
bar = mmcv.ProgressBar(loop_num)
timer = mmcv.Timer()
for i in range(loop_num):
x = carafe(feat.clone(), mask.clone(), 5, 1, 2)
torch.cuda.synchronize()
time_forward += timer.since_last_check()
x.sum().backward(retain_graph=True)
torch.cuda.synchronize()
time_backward += timer.since_last_check()
bar.update()
print('\nCARAFE time forward: {} ms/iter | time backward: {} ms/iter'.format(
(time_forward + 1e-3) * 1e3 / loop_num,
(time_backward + 1e-3) * 1e3 / loop_num))
time_naive_forward = 0
time_naive_backward = 0
bar = mmcv.ProgressBar(loop_num)
timer = mmcv.Timer()
for i in range(loop_num):
x = carafe_naive(feat.clone(), mask.clone(), 5, 1, 2)
torch.cuda.synchronize()
time_naive_forward += timer.since_last_check()
x.sum().backward(retain_graph=True)
torch.cuda.synchronize()
time_naive_backward += timer.since_last_check()
bar.update()
print('\nCARAFE naive time forward: {} ms/iter | time backward: {} ms/iter'.
format((time_naive_forward + 1e-3) * 1e3 / loop_num,
(time_naive_backward + 1e-3) * 1e3 / loop_num))
|
[
"connor@tju.edu.cn"
] |
connor@tju.edu.cn
|
6f925f97b7f04bf835e0d8185ead4532b0c99e7b
|
9743d5fd24822f79c156ad112229e25adb9ed6f6
|
/xai/brain/wordbase/nouns/_orbs.py
|
66df141052b56b070130b15e4bf528655d9c8b3a
|
[
"MIT"
] |
permissive
|
cash2one/xai
|
de7adad1758f50dd6786bf0111e71a903f039b64
|
e76f12c9f4dcf3ac1c7c08b0cc8844c0b0a104b6
|
refs/heads/master
| 2021-01-19T12:33:54.964379
| 2017-01-28T02:00:50
| 2017-01-28T02:00:50
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 217
|
py
|
from xai.brain.wordbase.nouns._orb import _ORB
#calss header
class _ORBS(_ORB, ):
def __init__(self,):
_ORB.__init__(self)
self.name = "ORBS"
self.specie = 'nouns'
self.basic = "orb"
self.jsondata = {}
|
[
"xingwang1991@gmail.com"
] |
xingwang1991@gmail.com
|
61886c57f514349c383865d61b354268510431e7
|
350db570521d3fc43f07df645addb9d6e648c17e
|
/1367_Linked_List_in_Binary_Tree/solution_test.py
|
3b9df505ae99770b2edb20625a60761479c57dfc
|
[] |
no_license
|
benjaminhuanghuang/ben-leetcode
|
2efcc9185459a1dd881c6e2ded96c42c5715560a
|
a2cd0dc5e098080df87c4fb57d16877d21ca47a3
|
refs/heads/master
| 2022-12-10T02:30:06.744566
| 2022-11-27T04:06:52
| 2022-11-27T04:06:52
| 236,252,145
| 1
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 387
|
py
|
'''
1367. Linked List in Binary Tree
Level: Medium
https://leetcode.com/problems/linked-list-in-binary-tree
'''
import unittest
class TestSum(unittest.TestCase):
def test_sum(self):
self.assertEqual(sum([1, 2, 3]), 6, "Should be 6")
def test_sum_tuple(self):
self.assertEqual(sum((1, 2, 2)), 6, "Should be 6")
if __name__ == '__main__':
unittest.main()
|
[
"benjaminhuanghuang@gmail.com"
] |
benjaminhuanghuang@gmail.com
|
d8ffdcf84d2027052d36f9dd6c7668b3dec09237
|
17fb5e4cdcf8e557bd0ab8606dfd88074dc4d525
|
/ticket_26758/tests.py
|
0c5f0bcdd0c36962169c74cd5c324dbae1b70edd
|
[] |
no_license
|
charettes/django-ticketing
|
0b17c85afa049d1b73db244e1199798feb9a4b73
|
78ed6a345e760ea46434690e9385ae4d26fc2810
|
refs/heads/master
| 2021-01-17T06:38:35.337305
| 2016-06-15T02:33:38
| 2016-06-15T02:33:38
| 45,122,368
| 0
| 1
| null | 2016-02-09T20:21:48
| 2015-10-28T15:30:59
|
Python
|
UTF-8
|
Python
| false
| false
| 291
|
py
|
from django.test import TestCase
from django.db.models import Count
from .models import Company
class FooTests(TestCase):
def test_foo(self):
qs = Company.objects.annotate(ticketcount=Count('srservice')).exclude(ticketcount=0).order_by('-ticketcount')
print(list(qs))
|
[
"charette.s@gmail.com"
] |
charette.s@gmail.com
|
768bbacc62ddda437780f7a4469c365ecc15cccc
|
a31c21bcc4486fd44dd2c5b7f364e8f0320f7dd3
|
/mlsh/lab/lab/envs/mujoco/ant_obstaclesgen.py
|
9d1881775735ec9b7f26b3bb7e0f778226fe12c2
|
[
"MIT"
] |
permissive
|
SynthAI/SynthAI
|
0cb409a4f5eb309dfc6a22d21ac78447af075a33
|
4e28fdf2ffd0eaefc0d23049106609421c9290b0
|
refs/heads/master
| 2020-03-19T12:49:07.246339
| 2018-06-07T23:27:51
| 2018-06-07T23:27:51
| 136,542,297
| 3
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,907
|
py
|
import numpy as np
from lab import utils
from lab.envs.mujoco import mujoco_env
class AntObstaclesGenEnv(mujoco_env.MujocoEnv, utils.EzPickle):
def __init__(self):
self.count = 0
self.realgoal = 0
mujoco_env.MujocoEnv.__init__(self, 'ant_obstacles_gen.xml', 5)
utils.EzPickle.__init__(self)
self.randomizeCorrect()
def randomizeCorrect(self):
self.realgoal = self.np_random.choice([0, 1, 2, 3, 4, 5, 6, 7, 8])
# 0 = obstacle. 1 = no obstacle.
self.realgoal = 6
def _step(self, a):
self.count += 1
if self.count % 200 == 0:
n_qpos = self.init_qpos + self.np_random.uniform(size=self.model.nq, low=-.1, high=.1)
n_qvel = self.init_qvel + self.np_random.randn(self.model.nv) * .1
n_qpos[:2] = self.data.qpos[:2,0]
n_qpos[-11:] = self.data.qpos[-11:,0]
self.set_state(n_qpos, n_qvel)
goal = np.array([8, 24])
if self.realgoal == 0:
goal = np.array([8, 24])
if self.realgoal == 1:
goal = np.array([8, -24])
if self.realgoal == 2:
goal = np.array([24, 24])
if self.realgoal == 3:
goal = np.array([24, -24])
if self.realgoal == 4:
goal = np.array([48, 0])
if self.realgoal == 5:
goal = np.array([40, 24])
if self.realgoal == 6:
goal = np.array([40, -24])
if self.realgoal == 7:
goal = np.array([32, 16])
if self.realgoal == 8:
goal = np.array([32, -16])
# reward = -np.sum(np.square(self.data.qpos[:2,0] - goal)) / 100000
xposbefore = self.data.qpos[0,0]
yposbefore = self.data.qpos[1,0]
self.do_simulation(a, self.frame_skip)
xposafter = self.data.qpos[0,0]
yposafter = self.data.qpos[1,0]
if xposbefore < goal[0]:
forward_reward = (xposafter - xposbefore)/self.dt
else:
forward_reward = -1*(xposafter - xposbefore)/self.dt
if yposbefore < goal[1]:
forward_reward += (yposafter - yposbefore)/self.dt
else:
forward_reward += -1*(yposafter - yposbefore)/self.dt
ctrl_cost = .1 * np.square(a).sum()
reward = forward_reward - ctrl_cost
# print(reward)
done = False
ob = self._get_obs()
return ob, reward, done, {}
def _get_obs(self):
return np.concatenate([
self.data.qpos.flat[:-11],
self.data.qvel.flat[:-11],
# self.data.qpos.flat,
# self.data.qvel.flat,
])
def reset_model(self):
qpos = self.init_qpos + self.np_random.uniform(size=self.model.nq, low=-.1, high=.1)
qvel = self.init_qvel + self.np_random.randn(self.model.nv) * .1
# self.realgoal = 4
if self.realgoal == 0:
qpos[-11:] = np.array([80,0,0,80,0,0,0,0,0, 8, 24])
if self.realgoal == 1:
qpos[-11:] = np.array([0,0,0,80,0,0,80,0,0, 8, -24])
if self.realgoal == 2:
qpos[-11:] = np.array([0,80,0,80,80,0,0,0,0, 24, 24])
if self.realgoal == 3:
qpos[-11:] = np.array([0,0,0,80,80,0,0,80,0, 24, -24])
if self.realgoal == 4:
qpos[-11:] = np.array([0,0,0,80,80,80,0,0,0, 48, 0])
if self.realgoal == 5:
qpos[-11:] = np.array([0,0,80,80,80,80,0,0,0, 40, 24])
if self.realgoal == 6:
qpos[-11:] = np.array([0,0,0,80,80,80,0,0,80, 40, -24])
if self.realgoal == 7:
qpos[-11:] = np.array([80,80,0,80,0,0,0,0,0, 32, 16])
if self.realgoal == 8:
qpos[-11:] = np.array([0,0,0,80,0,0,80,80,0, 32, -16])
self.set_state(qpos, qvel)
return self._get_obs()
def viewer_setup(self):
self.viewer.cam.distance = self.model.stat.extent * 0.6
|
[
"foxxymoxxy@secmail.pro"
] |
foxxymoxxy@secmail.pro
|
55a0831d34cbfef8d1eeff1fafad9e2e71e5f77b
|
ea378480ba678eb123ef826e3ca0c3eb8f4e538f
|
/paused/02.DXF write/dxfwrite-1.1.0/tests/test_abstract_entity.py
|
d9db54a5c2ba688be3762d19b23babb1cd18b1fc
|
[] |
no_license
|
msarch/py
|
67235643666b1ed762d418263f7eed3966d3f522
|
dcd25e633a87cdb3710e90224e5387d3516c1cd3
|
refs/heads/master
| 2021-01-01T05:21:58.175043
| 2017-05-25T08:15:26
| 2017-05-25T08:15:26
| 87,453,820
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,296
|
py
|
#!/usr/bin/env python
#coding:utf-8
# Created: 15.11.2010
# Copyright (C) 2010, Manfred Moitzi
# License: GPLv3
__author__ = "mozman <mozman@gmx.at>"
try:
# Python 2.6 and earlier need the unittest2 package
# try: pip install unittest2
# or download source from: http://pypi.python.org/pypi/unittest2
import unittest2 as unittest
except ImportError:
import unittest
from dxfwrite.entities import _Entity, Line
class MockEntity(_Entity):
DXF_ENTITY_NAME = Line.DXF_ENTITY_NAME
DXF_ATTRIBUTES = Line.DXF_ATTRIBUTES
class TestEntity(unittest.TestCase):
def test_init(self):
e = MockEntity()
self.assertEqual(e['layer'], '0')
def test_init_with_kwargs(self):
e = MockEntity(layer='1')
self.assertEqual(e['layer'], '1')
def test_set_get_attribute(self):
e = MockEntity()
e['layer'] = '1'
self.assertEqual(e['layer'], '1')
def test_get_attribute_error(self):
e = MockEntity()
with self.assertRaises(KeyError):
result = e['mozman']
def test_set_attribute_error(self):
e = MockEntity()
with self.assertRaises(KeyError):
e['mozman'] = 'test'
if __name__=='__main__':
unittest.main()
|
[
"msarch@free.fr"
] |
msarch@free.fr
|
238d011d53f467d54bd37298772793a292644147
|
d87483a2c0b50ed97c1515d49d62c6e9feaddbe0
|
/.history/buy_top_fc_20210203213703.py
|
da47d36dfa2e2de55eb512db4b137d0e379b7d01
|
[
"MIT"
] |
permissive
|
HopperKremer/hoptrader
|
0d36b6e33922414003cf689fb81f924da076a54b
|
406793c10bc888648290fd15c7c2af62cf8c6c67
|
refs/heads/main
| 2023-06-12T15:51:00.910310
| 2021-07-06T16:15:41
| 2021-07-06T16:15:41
| 334,754,936
| 0
| 2
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,567
|
py
|
# Buy top tickers from Financhill
import requests
from tda import auth, client
from tda.orders.equities import equity_buy_market, equity_buy_limit
from tda.orders.common import Duration, Session
import tda
import os, sys
import time
currentdir = os.path.dirname(os.path.realpath(__file__))
parentdir = os.path.dirname(currentdir)
sys.path.append(parentdir)
import config # stored in parent directory for security
from selenium import webdriver
import json
token_path = 'token'
DRIVER_PATH = "/home/hopper/chromedriver"
driver = webdriver.Chrome(DRIVER_PATH)
redirect_uri = "https://localhost"
try:
c = auth.client_from_token_file(token_path, config.api_key)
except FileNotFoundError:
c = auth.client_from_login_flow(
driver, config.api_key, redirect_uri, token_path
)
#All this scraping code works
driver.get("https://financhill.com/screen/stock-score")
time.sleep(2)
driver.find_element_by_css_selector(
'span[data-sort-name="stock_score_normalized"]'
).click()
time.sleep(2)
tickers = driver.find_elements_by_tag_name("td")
i = 0
# [0]:Ticker, [1]:Share Price, [2]:Rating, [3]:Score, [4]:Rating Change Date, [5]:Price Change %
while i < 40:
ticker = str(tickers[i].text)
print(ticker)
# How many dollars of each stock to buy:
desired_dollar_amount = 1000
num_shares = round(desired_dollar_amount/float(tickers[i+1].text))
print(num_shares)
# order = equity_buy_market(ticker, 1)
# r = c.place_order(config.tda_acct_num, order)
# time.sleep(2)
# print(r.status_code)
i += 10
driver.quit()
|
[
"hopperkremer@gmail.com"
] |
hopperkremer@gmail.com
|
c315767ae48f4b9f82aeda1f355e3cd3dc81471b
|
7ddded3d38469cd3238a702d7d62cf816cb0d5d5
|
/cmsplugin_cascade/segmentation/mixins.py
|
92dd20dd334d71023b13787c88506ee2f5282004
|
[
"MIT"
] |
permissive
|
sayonetech/djangocms-cascade
|
8e249ab83bba97dad27aee468a2a6fce0eb58f3b
|
699d645cefae926d32879fbc6837693082f84c78
|
refs/heads/master
| 2022-05-25T02:31:58.059789
| 2020-04-25T07:33:29
| 2020-04-25T07:33:29
| 259,930,919
| 0
| 0
|
MIT
| 2020-04-29T13:20:54
| 2020-04-29T13:20:53
| null |
UTF-8
|
Python
| false
| false
| 7,488
|
py
|
from django import VERSION as DJANGO_VERSION
from django.conf.urls import url
from django.contrib import admin
from django.contrib.auth import get_user_model
from django.http import HttpResponse, HttpResponseBadRequest
from django.template.response import TemplateResponse
from django.urls import reverse
from django.utils.translation import ugettext_lazy as _, ungettext
from django.utils.html import format_html
from cms.constants import REFRESH_PAGE
class SegmentPluginModelMixin(object):
"""
TODO: whenever cmsplugin_cascade drops support for django-CMS < 3.4, this mixin class
shall be added to the plugin rather than to the model
"""
def get_context_override(self, request):
"""
Return a dictionary to override the request context object during evaluation with
alternative values. Normally this is an empty dict. However, when a staff user overrides
the segmentation, then update the context with this returned dict.
"""
return {}
def render_plugin(self, context=None, placeholder=None, admin=False, processors=None):
context.update(self.get_context_override(context['request']))
content = super().render_plugin(context, placeholder, admin, processors)
context.pop()
return content
class EmulateUserModelMixin(SegmentPluginModelMixin):
UserModel = get_user_model()
def get_context_override(self, request):
"""
Override the request object with an emulated user.
"""
context_override = super().get_context_override(request)
try:
if request.user.is_staff:
user = self.UserModel.objects.get(pk=request.session['emulate_user_id'])
context_override.update(user=user)
except (self.UserModel.DoesNotExist, KeyError):
pass
return context_override
class EmulateUserAdminMixin(object):
UserModel = get_user_model()
@staticmethod
def populate_toolbar(segmentation_menu, request):
active = 'emulate_user_id' in request.session
segmentation_menu.add_sideframe_item(
_("Emulate User"),
url=reverse('admin:emulate-users'),
active=active,
)
segmentation_menu.add_ajax_item(
_("Clear emulations"),
action=reverse('admin:clear-emulations'),
on_success=REFRESH_PAGE,
disabled=not active,
)
def get_urls(self):
return [
url(r'^emulate_users/$', self.admin_site.admin_view(self.emulate_users), name='emulate-users'),
url(r'^emulate_user/(?P<user_id>\d+)/$', self.admin_site.admin_view(self.emulate_user), name='emulate-user'),
url(r'^clear_emulations/$', self.admin_site.admin_view(self.clear_emulations), name='clear-emulations'),
] + super().get_urls()
def emulate_user(self, request, user_id):
try:
request.session['emulate_user_id'] = int(user_id)
return HttpResponse('OK')
except TypeError as err:
return HttpResponseBadRequest(err.message)
def emulate_users(self, request):
"""
The list view
"""
def display_as_link(obj):
try:
identifier = getattr(user_model_admin, list_display_link)(obj)
except AttributeError:
identifier = admin.utils.lookup_field(list_display_link, obj, model_admin=self)[2]
emulate_user_id = request.session.get('emulate_user_id')
if emulate_user_id == obj.id:
return format_html('<strong>{}</strong>', identifier)
fmtargs = {
'href': reverse('admin:emulate-user', kwargs={'user_id': obj.id}),
'identifier': identifier,
}
return format_html('<a href="{href}" class="emulate-user">{identifier}</a>', **fmtargs)
opts = self.UserModel._meta
app_label = opts.app_label
user_model_admin = self.admin_site._registry[self.UserModel]
request._lookup_model = self.UserModel
list_display_links = user_model_admin.get_list_display_links(request, user_model_admin.list_display)
# replace first entry in list_display_links by customized method display_as_link
list_display_link = list_display_links[0]
try:
list_display = list(user_model_admin.segmentation_list_display)
except AttributeError:
list_display = list(user_model_admin.list_display)
list_display.remove(list_display_link)
list_display.insert(0, 'display_as_link')
display_as_link.allow_tags = True # TODO: presumably not required anymore since Django-1.9
try:
display_as_link.short_description = user_model_admin.identifier.short_description
except AttributeError:
display_as_link.short_description = admin.utils.label_for_field(list_display_link, self.UserModel)
self.display_as_link = display_as_link
ChangeList = self.get_changelist(request)
if DJANGO_VERSION < (2, 1):
cl = ChangeList(request, self.UserModel, list_display,
(None,), # disable list_display_links in ChangeList, instead override that field
user_model_admin.list_filter,
user_model_admin.date_hierarchy, user_model_admin.search_fields,
user_model_admin.list_select_related, user_model_admin.list_per_page,
user_model_admin.list_max_show_all,
(), # disable list_editable
self)
else:
cl = ChangeList(request, self.UserModel, list_display,
(None,), # disable list_display_links in ChangeList, instead override that field
user_model_admin.list_filter,
user_model_admin.date_hierarchy, user_model_admin.search_fields,
user_model_admin.list_select_related, user_model_admin.list_per_page,
user_model_admin.list_max_show_all,
(), # disable list_editable
self,
None)
cl.formset = None
selection_note_all = ungettext('%(total_count)s selected',
'All %(total_count)s selected', cl.result_count)
context = {
'module_name': str(opts.verbose_name_plural),
'selection_note': _('0 of %(cnt)s selected') % {'cnt': len(cl.result_list)},
'selection_note_all': selection_note_all % {'total_count': cl.result_count},
'title': _("Select %(user_model)s to emulate") % {'user_model': opts.verbose_name},
'is_popup': cl.is_popup,
'cl': cl,
'media': self.media,
'has_add_permission': False,
'opts': cl.opts,
'app_label': app_label,
'actions_on_top': self.actions_on_top,
'actions_on_bottom': self.actions_on_bottom,
'actions_selection_counter': self.actions_selection_counter,
'preserved_filters': self.get_preserved_filters(request),
}
return TemplateResponse(request, self.change_list_template or [
'admin/%s/%s/change_list.html' % (app_label, opts.model_name),
'admin/%s/change_list.html' % app_label,
'admin/change_list.html'
], context)
def clear_emulations(self, request):
request.session.pop('emulate_user_id', None)
return HttpResponse('OK')
|
[
"jacob.rief@gmail.com"
] |
jacob.rief@gmail.com
|
b86fb3e8323c7fac53da50672674d2d083ec8e83
|
c137d7fb6eaa1c1900a63b8dae6b027176a98b6f
|
/MxShop/apps/user_operation/migrations/0002_auto_20191215_1324.py
|
67ebf35770168b3b304df8aed21406cfe8fb758c
|
[] |
no_license
|
LasterSmithKim/vuedjango
|
22220414ad2f928f0a0df1a0e68c9083e90c1cc7
|
4a5b7fee4dd3f2d31255d7dc9188ea977a75db29
|
refs/heads/master
| 2022-12-10T19:52:25.014956
| 2019-12-23T16:23:01
| 2019-12-23T16:23:01
| 225,315,491
| 0
| 0
| null | 2022-11-22T04:52:05
| 2019-12-02T07:47:12
|
JavaScript
|
UTF-8
|
Python
| false
| false
| 495
|
py
|
# Generated by Django 2.2.7 on 2019-12-15 13:24
from django.conf import settings
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('goods', '0003_auto_20191201_0823'),
('user_operation', '0001_initial'),
]
operations = [
migrations.AlterUniqueTogether(
name='userfav',
unique_together={('user', 'goods')},
),
]
|
[
"kingone@yeah.net"
] |
kingone@yeah.net
|
dac682aff595a8ac60201e3480935c04e5b62c3b
|
83cf642504313b6ef6527dda52158a6698c24efe
|
/scripts/addons/fd_scripting_tools/autocompletion/suggestions/dynamic/_bpy_fake/__private__/obstacle.py
|
7598560cf2b91178e54ad5d8d3e0f5402501e10f
|
[] |
no_license
|
PyrokinesisStudio/Fluid-Designer-Scripts
|
a4c40b871e8d27b0d76a8025c804d5a41d09128f
|
23f6fca7123df545f0c91bf4617f4de7d9c12e6b
|
refs/heads/master
| 2021-06-07T15:11:27.144473
| 2016-11-08T03:02:37
| 2016-11-08T03:02:37
| 113,630,627
| 1
| 0
| null | 2017-12-09T00:55:58
| 2017-12-09T00:55:58
| null |
UTF-8
|
Python
| false
| false
| 500
|
py
|
from . struct import Struct
from . bpy_struct import bpy_struct
import mathutils
class Obstacle(bpy_struct):
@property
def rna_type(self):
'''(Struct) RNA type definition'''
return Struct()
@property
def name(self):
'''(String) Unique name used in the code and scripting'''
return str()
@property
def bp_name(self):
'''(String)'''
return str()
@property
def base_point(self):
'''(String)'''
return str()
|
[
"dev.andrewpeel@gmail.com"
] |
dev.andrewpeel@gmail.com
|
358c3dc0cf6ca85640bf9dce63f0bc8db277b92d
|
c5291e50a3c72c885922378573a0ad423fcedf05
|
/EasyPay/billpay/controllers/transaction.py
|
5d51ce3230840967ef71d5b124a63aff15748a5b
|
[] |
no_license
|
raghurammanyam/django-projects
|
bcc3ed6285882af437a2995514cef33760fb063e
|
dd20ae354f7f111a0176a1cc047c099bd23e9f05
|
refs/heads/master
| 2022-12-12T19:22:31.698114
| 2018-12-09T09:41:45
| 2018-12-09T09:41:45
| 137,443,359
| 0
| 0
| null | 2022-11-22T03:01:07
| 2018-06-15T05:08:15
|
Python
|
UTF-8
|
Python
| false
| false
| 1,353
|
py
|
from rest_framework.views import APIView
from rest_framework.response import Response
from django.core import serializers
from django.http import JsonResponse
from billpay.models import transactions
from billpay.serializers.transactionserializer import transactionSerializer,gettransactionSerializer
from django.http import Http404
from rest_framework import status
from django.conf import settings
import logging
logger = logging.getLogger('billpay.transaction')
class addtransaction(APIView):
def get(self,request,*args,**kwargs):
try:
get=transactions.objects.all()
serializer=gettransactionSerializer(get,many=True)
return JsonResponse({"success":True,"data":serializer.data})
except transactions.DoesNotExist:
raise Http404
def post(self,request,format=None):
try:
serializer=transactionSerializer(data=request.data)
if serializer.is_valid():
serializer.save()
logger.info(serializer.data)
return JsonResponse({'data':serializer.data,'message':'transaction added suceesfully'})
logger.error(serializer.errors)
return JsonResponse({"message":serializer.errors})
except Http404:
return JsonResponse({"success":False,"message":"transaction not added"})
|
[
"manyamraghuram@gmail.com"
] |
manyamraghuram@gmail.com
|
3e2c4eec3c3fd761e5f94ad65d35d6609cc1d30a
|
c41edf53089b1ee466ea578aa74f0c1e9b95a8b3
|
/aqi_v3.0.py
|
04b339479b8e885f186fe65eb4e7373bb5804cc2
|
[] |
no_license
|
gjw199513/aqidemo
|
fb4e81e6d1f6b40c14bbcc5401ce57eac11beb2b
|
64dc64def274916aa513890cb24e18777518c375
|
refs/heads/master
| 2021-05-11T12:33:20.256133
| 2018-01-16T09:17:49
| 2018-01-16T09:17:49
| 117,661,305
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 884
|
py
|
# -*- coding:utf-8 -*-
__author__ = 'gjw'
__time__ = '2018/1/16 0016 上午 9:33'
# AQI计算
# 读取json文件
import json
import csv
def process_json_file(filepath):
"""
解码json文件
:param filepath:
:return:
"""
f = open(filepath, mode='r', encoding='utf-8')
city_list = json.load(f)
return city_list
def main():
"""
主函数
:return:
"""
filepath = input("请输入json文件名称:")
city_list = process_json_file(filepath)
city_list.sort(key=lambda city: city['aqi'])
lines = []
# 列名
lines.append(list(city_list[0].keys()))
for city in city_list:
lines.append(list(city.values()))
f = open('aqi.csv', 'w', encoding='utf-8', newline='')
writer = csv.writer(f)
for line in lines:
writer.writerow(line)
f.close()
if __name__ == '__main__':
main()
|
[
"gjw199513@163.com"
] |
gjw199513@163.com
|
55bd0b4a233bda1faa5458145b221dc1f947400a
|
e7c3d2b1fd7702b950e31beed752dd5db2d127bd
|
/code/pythagorean_tree/sol_395.py
|
a058d0ed47da7c5635ddf92ee8c14e841b46c257
|
[
"Apache-2.0"
] |
permissive
|
Ved005/project-euler-solutions
|
bbadfc681f5ba4b5de7809c60eb313897d27acfd
|
56bf6a282730ed4b9b875fa081cf4509d9939d98
|
refs/heads/master
| 2021-09-25T08:58:32.797677
| 2018-10-20T05:40:58
| 2018-10-20T05:40:58
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,396
|
py
|
# -*- coding: utf-8 -*-
'''
File name: code\pythagorean_tree\sol_395.py
Author: Vaidic Joshi
Date created: Oct 20, 2018
Python Version: 3.x
'''
# Solution to Project Euler Problem #395 :: Pythagorean tree
#
# For more information see:
# https://projecteuler.net/problem=395
# Problem Statement
'''
The Pythagorean tree is a fractal generated by the following procedure:
Start with a unit square. Then, calling one of the sides its base (in the animation, the bottom side is the base):
Attach a right triangle to the side opposite the base, with the hypotenuse coinciding with that side and with the sides in a 3-4-5 ratio. Note that the smaller side of the triangle must be on the 'right' side with respect to the base (see animation).
Attach a square to each leg of the right triangle, with one of its sides coinciding with that leg.
Repeat this procedure for both squares, considering as their bases the sides touching the triangle.
The resulting figure, after an infinite number of iterations, is the Pythagorean tree.
It can be shown that there exists at least one rectangle, whose sides are parallel to the largest square of the Pythagorean tree, which encloses the Pythagorean tree completely.
Find the smallest area possible for such a bounding rectangle, and give your answer rounded to 10 decimal places.
'''
# Solution
# Solution Approach
'''
'''
|
[
"vaidic@vaidicjoshi.in"
] |
vaidic@vaidicjoshi.in
|
4921921057dceaa1a35447e580f62d0b41e5898e
|
e6590826c10648c472743c20b898655ec0ef3ce5
|
/7.14.py
|
1417073e3f79cdfa391f4ccd257ba21c5e5cc211
|
[] |
no_license
|
suddencode/pythontutor_2018
|
a770fbf476af049dc8f04c8b0f81cce7922a63c4
|
767cdd0d980be290a613ebda455a49daad1a7902
|
refs/heads/master
| 2020-03-24T00:15:07.744889
| 2019-02-11T20:17:42
| 2019-02-11T20:17:42
| 142,281,968
| 2
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 131
|
py
|
a = [int(s) for s in input().split()]
c = ''
for f in range(len(a)):
if a.count(a[f]) == 1:
c += str(a[f]) + ' '
print(c)
|
[
"noreply@github.com"
] |
suddencode.noreply@github.com
|
5c740df3c278cbd5a651616a8c6362d2a129e5df
|
0a46b027e8e610b8784cb35dbad8dd07914573a8
|
/scripts/venv/lib/python2.7/site-packages/cogent/struct/selection.py
|
03f31e74ff518727fac35b01df2868657cf41af5
|
[
"MIT"
] |
permissive
|
sauloal/cnidaria
|
bb492fb90a0948751789938d9ec64677052073c3
|
fe6f8c8dfed86d39c80f2804a753c05bb2e485b4
|
refs/heads/master
| 2021-01-17T13:43:17.307182
| 2016-10-05T14:14:46
| 2016-10-05T14:14:46
| 33,726,643
| 3
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,229
|
py
|
"""Contains functions to select and group structural entities."""
from cogent.core.entity import StructureHolder, ModelHolder, ChainHolder, \
ResidueHolder, AtomHolder, HIERARCHY
__author__ = "Marcin Cieslik"
__copyright__ = "Copyright 2007-2012, The Cogent Project"
__credits__ = ["Marcin Cieslik"]
__license__ = "GPL"
__version__ = "1.5.3"
__maintainer__ = "Marcin Cieslik"
__email__ = "mpc4p@virginia.edu"
__status__ = "Development"
def select(entities, level, *args, **kwargs):
"""Shorthand for ``einput`` and subsequent ``selectChildren``. Returns
Returns a ``Holder`` instance. The "name" can be specified.
Additional arguments and keyworded arguments are passed to the
``selectChildren`` method of the holder instance.
"""
try:
name = kwargs.pop('name')
except KeyError:
name = 'select'
holder = einput(entities, level)
selection = holder.selectChildren(*args, **kwargs)
try:
holder = einput(selection.values(), level, name)
except ValueError:
raise ValueError('No entities have been selected')
return holder
def einput(entities, level, name=None):
"""Creates a ``XyzHolder`` instance of entities at the specified level. Where
Xyz is 'Structure', 'Model', 'Chain', Residue' or 'Atom'.
Arguments:
- entities: ``Entity`` instance or sequence of entities.
- level: one of 'H', 'S', 'M', 'C', 'R', 'A'
- name: optional name of the ``XyzHolder`` instance.
"""
# Keep it bug-free
all = {}
index = HIERARCHY.index(level)
for entity in entities: # __iter__ override in Entity
if index > HIERARCHY.index(entity.level): # call for children
all.update(get_children(entity, level))
elif index < HIERARCHY.index(entity.level): # call for parents
all.update(get_parent(entity, level))
else:
all.update({entity.getFull_id():entity}) # call for self
higher_level = HIERARCHY[index - 1] # one up;)
if all:
name = name or higher_level
if higher_level == 'C':
holder = ResidueHolder(name, all)
elif higher_level == 'R':
holder = AtomHolder(name, all)
elif higher_level == 'M':
holder = ChainHolder(name, all)
elif higher_level == 'S':
holder = ModelHolder(name, all)
elif higher_level == 'H':
holder = StructureHolder(name, all)
else:
raise ValueError, "einput got no input entites."
holder.setSort_tuple()
return holder
def get_children(entity, level):
"""Return unique entities of lower or equal level
Arguments:
- entity: any ``Entity`` instance.
- level: one of 'H', 'S', 'M', 'C', 'R', 'A'
"""
entity.setTable()
return entity.table[level]
def get_parent(entity, level):
"""Returns unique entities of higher level.
Arguments:
- entity: any ``Entity`` instance.
- level: one of 'H', 'S', 'M', 'C', 'R', 'A'
"""
parent = entity.getParent(level) # get the correct parent
return {parent.getFull_id(): parent}
|
[
"sauloal@gmail.com"
] |
sauloal@gmail.com
|
653912fc3bb836925fa3ad1cc2c80b096d3ce413
|
c36d9d70cbb257b2ce9a214bcf38f8091e8fe9b7
|
/1480_running_sum_of_1d_array.py
|
2963386c35e2ba41f6b22c1535d379e20a42c43a
|
[] |
no_license
|
zdadadaz/coding_practice
|
3452e4fc8f4a79cb98d0d4ea06ce0bcae85f96a0
|
5ed070f22f4bc29777ee5cbb01bb9583726d8799
|
refs/heads/master
| 2021-06-23T17:52:40.149982
| 2021-05-03T22:31:23
| 2021-05-03T22:31:23
| 226,006,763
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 218
|
py
|
class Solution:
def runningSum(self, nums: List[int]) -> List[int]:
n = len(nums)
if n == 1:
return nums
for i in range(1,n):
nums[i] += nums[i-1]
return nums
|
[
"zdadadaz5566@gmail.com"
] |
zdadadaz5566@gmail.com
|
1cbbbcd8c445f2dc867f77a6fe853653e4a2819d
|
0b69a011c9ffee099841c140be95ed93c704fb07
|
/problemsets/Codeforces/Python/A586.py
|
d7228d5b1012ec2539b03decd80c92a6c9012979
|
[
"Apache-2.0"
] |
permissive
|
juarezpaulino/coderemite
|
4bd03f4f2780eb6013f07c396ba16aa7dbbceea8
|
a4649d3f3a89d234457032d14a6646b3af339ac1
|
refs/heads/main
| 2023-01-31T11:35:19.779668
| 2020-12-18T01:33:46
| 2020-12-18T01:33:46
| 320,931,351
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 178
|
py
|
"""
*
* Author: Juarez Paulino(coderemite)
* Email: juarez.paulino@gmail.com
*
"""
import re
input()
print(len(''.join(re.split('00+',input().replace(' ','').strip('0')))))
|
[
"juarez.paulino@gmail.com"
] |
juarez.paulino@gmail.com
|
9bf48177a873f6126e4cfa4661fed22742055db9
|
15f321878face2af9317363c5f6de1e5ddd9b749
|
/solutions_python/Problem_116/795.py
|
219e317d76df5272a163058483e9e3d06bc08a8e
|
[] |
no_license
|
dr-dos-ok/Code_Jam_Webscraper
|
c06fd59870842664cd79c41eb460a09553e1c80a
|
26a35bf114a3aa30fc4c677ef069d95f41665cc0
|
refs/heads/master
| 2020-04-06T08:17:40.938460
| 2018-10-14T10:12:47
| 2018-10-14T10:12:47
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,355
|
py
|
#!/usr/bin/python2
def won(player):
return rows(player) or cols(player) or diag1(player) or diag2(player)
def rows(player):
for i in range(0,4):
if row(i,player):
return True
return False
def cols(player):
for j in range(0,4):
if col(j,player):
return True
return False
def row(i, player):
for j in range(0,4):
if board[i][j] not in [player,"T"]:
return False
return True
def col(j, player):
for i in range(0,4):
if board[i][j] not in [player,"T"]:
return False
return True
def diag1(player):
for i in range(0,4):
if board[i][i] not in [player,"T"]:
return False
return True
def diag2(player):
for j in range(0,4):
i = 3-j
if board[i][j] not in [player,"T"]:
return False
return True
def evaluate(board):
if won("X"):
return "X won"
if won("O"):
return "O won"
for i in range(0,4):
for j in range(0,4):
if board[i][j] == ".":
return "Game has not completed"
return "Draw"
lines = open("A-large.in").readlines()
num_cases = int(lines[0])
x = 1
y = 4
for n in range(0,num_cases):
board = lines[x:y+1]
#print board
print "Case #{0}: {1}".format(n+1, evaluate(board))
x += 5
y += 5
|
[
"miliar1732@gmail.com"
] |
miliar1732@gmail.com
|
85f5a86b401a45080844f34bc6d9b786c830a113
|
71c7683331a9037fda7254b3a7b1ffddd6a4c4c8
|
/Phys/BsJPsiKst/python/BsJPsiKst/syst.py
|
64b1b8e87dad10cd219df5f4cd53e134b168dbff
|
[] |
no_license
|
pseyfert-cern-gitlab-backup/Urania
|
edc58ba4271089e55900f8bb4a5909e9e9c12d35
|
1b1c353ed5f1b45b3605990f60f49881b9785efd
|
refs/heads/master
| 2021-05-18T13:33:22.732970
| 2017-12-15T14:42:04
| 2017-12-15T14:42:04
| 251,259,622
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 130
|
py
|
from math import *
eps = 1.011
f = 0.951
k = 1.063
def err(s1,s2,v):
a1 = abs(s1-v)
a2 = abs(s2-v)
return max(a1,a2)
|
[
"liblhcb@cern.ch"
] |
liblhcb@cern.ch
|
bb64dbc2667d5b007272f60222fdce55f6a7ba45
|
21f98d8bb31264c94e7a98fb8eb806d7f5bd396e
|
/Binary Search/081. Search in Rotated Sorted Array II.py
|
996738cbc12be3cb5963f0b2d735bfec29253141
|
[] |
no_license
|
mcfair/Algo
|
e1500d862a685e598ab85e8ed5b68170632fdfd0
|
051e2a9f6c918907cc8b665353c46042e7674e66
|
refs/heads/master
| 2021-10-31T01:25:46.917428
| 2021-10-08T04:52:44
| 2021-10-08T04:52:44
| 141,460,584
| 2
| 2
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 875
|
py
|
"""
When pivot has duplicates, nums[0]==nums[-1], the solution for LC33 doesn't apply here anymore
"""
class Solution(object):
def search(self, nums, target):
if not nums: return False
if target == nums[0]: return True
pivot = nums[0]
l, r = 0, len(nums)
#this while loop has worst case O(n)
while r>0 and nums[r-1] == pivot:
r-=1
while l<r:
mid = (l+r)//2
compare = nums[mid]
if (target < pivot) ^ (compare < pivot):
if target < pivot:
compare = float('-inf')
else:
compare = float('inf')
if compare == target:
return True
if compare < target:
l = mid + 1
else:
r = mid
return False
|
[
"noreply@github.com"
] |
mcfair.noreply@github.com
|
c381edbdc210fa7e13c9646b2572c064b2fb4914
|
9b68d23d4409af5921a51e2d91344090996787e5
|
/main_backup20190512.py
|
ce5655b0b8aebe49a859eeb899f1dea4d9abaf20
|
[] |
no_license
|
sasakishun/segm
|
48381b77b9a876c44edfbcdbb4a2ef19096b48f4
|
d48158243853d737b7056815108aef4e0ac5b6ca
|
refs/heads/master
| 2020-05-05T10:23:12.673182
| 2019-05-14T07:46:07
| 2019-05-14T07:46:07
| 179,943,924
| 0
| 0
| null | 2019-04-07T09:31:08
| 2019-04-07T09:31:08
| null |
UTF-8
|
Python
| false
| false
| 7,122
|
py
|
from model import Model
from load_data import Datagen, plot_data
import tensorflow as tf
from util import plot_segm_map, calc_iou
import numpy as np
import networkx as nx
import adjacency
import matplotlib.pyplot as plt
import scipy
import math
np.set_printoptions(threshold=np.inf)
G = nx.Graph()
nodes = np.array(list(range(32 * 32)))
G.add_nodes_from(nodes)
# グリッド状グラフの全辺リストを生成
edges = []
grid = adjacency.grid_points([32, 32])
for _grid in grid:
edges.append((_grid[0], _grid[1]))
edges.append((_grid[1], _grid[0]))
# グラフにグリッド辺を追加
G.add_edges_from(edges)
"""
# グラフ表示
pos = nx.spring_layout(G)
nx.draw_networkx(G, pos, with_labels=True)
plt.axis("off")
plt.show()
"""
# グラフコンボリューションのための行列を用意
A = nx.adjacency_matrix(G).astype("float32")
D = nx.laplacian_matrix(G).astype("float32") + A
for i in range(G.number_of_nodes()):
D[i, i] = 1 / math.sqrt(D[i, i])
A_chil_ = D.dot(A.dot(D))
# scipy.sparse -> tf.SparseTensorへの変換のための関数
def convert_sparse_matrix_to_sparse_tensor(X):
coo = X.tocoo()
indices = np.mat([coo.row, coo.col]).transpose()
return tf.SparseTensor(indices, coo.data, coo.shape)
A_chil = convert_sparse_matrix_to_sparse_tensor(A_chil_)
# GCN layerを出力する関数
def GCN_layer(A, layer_input, W, activation):
if activation is None:
return tf.matmul(tf.sparse.matmul(A, layer_input), W)
else:
return activation(tf.matmul(tf.sparse.matmul(A, layer_input), W))
d = 1 # 最終出力の次元
hidden_size = 8 # 1層目の出力サイズ
learning_rate = 1e-3 # 学習率
# モデル定義
X = tf.placeholder(tf.float32, shape=[G.number_of_nodes(), 256])
# _segm_map = tf.sparse_placeholder(tf.float32)
_segm_map = tf.placeholder(tf.float32, shape=[32*32, 1])
# _segm_map = convert_sparse_matrix_to_sparse_tensor(_segm_map)
# _segm_map = tf.SparseTensor(indices=[[0, 0], [1, 2]], values=[1, 2], dense_shape=[32*32, 1])
W_1 = tf.Variable(tf.random_normal([256, hidden_size]), dtype=tf.float32)
W_2 = tf.Variable(tf.random_normal([hidden_size, d]), dtype=tf.float32)
L1 = GCN_layer(A_chil, X, W_1, tf.nn.relu)
L2 = GCN_layer(A_chil, L1, W_2, None)
print("W_1:{}".format(tf.shape(W_1)))
print("W_2:{}".format(tf.shape(W_2)))
print("A_chil:{}".format(tf.shape(A_chil)))
print("L1:{}".format(tf.shape(L1)))
print("L2:{}".format(tf.shape(L2)))
A_rec = tf.sigmoid(tf.matmul(L2, tf.transpose(L2)))
# loss = tf.nn.l2_loss(tf.sparse.add(-1 * A_rec, A_chil))
# L2 = tf.sparse.to_dense(L2)
loss = tf.nn.l2_loss(tf.add(-1 * L2, _segm_map))
# loss = tf.nn.l2_loss(tf.sparse.add(-1 * L2, _segm_map))
# loss = tf.transpose(loss)
train = tf.train.AdamOptimizer(learning_rate).minimize(loss)
batch_size = 1
dg = Datagen('data/mnist', 'data/cifar')
data, segm_map = dg.sample(batch_size, norm=False)
# 学習部分
epoch = 10000
# x = np.identity(G.number_of_nodes(), dtype="float32")
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
loss_list = list()
for e in range(epoch):
# 学習
data_batch, segm_map_batch = dg.sample(batch_size, norm=False)
data_batch = data_batch.reshape([3, 1024])[0]
data_batch = np.array(data_batch, dtype=np.int64)
data_batch = np.identity(256)[data_batch]
# print("#{} databatch:{}".format(e, data_batch))
x = data_batch.reshape([G.number_of_nodes(), 256])
segm_map_batch = np.array(segm_map_batch, dtype=np.int64)
# print("segm_map_batch.shape:{}".format(segm_map_batch.shape))
"""
indices = [[], []]
values = []
for i in range(segm_map_batch[0].shape[0]):
for j in range(segm_map_batch[0].shape[1]):
if segm_map_batch[0, i, j] != 0:
indices[0].append(i)
indices[1].append(j)
values.append(segm_map_batch[0, i, j])
print("indices:{}".format(indices))
print("values:{}".format(values))
indices = np.array([[i for i in indices[0]],
[i for i in indices[1]]], dtype=np.int64)
values = np.array([i for i in values], dtype=np.float32)
shape = np.array([32 * 32, 1], dtype=np.int64)
"""
segm_map_batch = segm_map_batch.reshape([32 * 32, 1])
# segm_map_batch = scipy.sparse.lil_matrix(segm_map_batch)
# print("x:{}".format(x.shape))
# print("L1 in sess:{}".format(sess.run(tf.shape(L1), feed_dict={X: x})))
# print("L2 in sess:{}".format(sess.run(tf.shape(L2), feed_dict={X: x})))
# print("A_rec in sess:{}".format(sess.run(tf.shape(A_rec), feed_dict={X: x})))
# print("segm_map_batch:{}".format(segm_map_batch.shape))
tloss, _ = sess.run([loss, train], feed_dict={X: x, _segm_map: segm_map_batch})
# segm_map_batch})
loss_list.append(tloss)
print("#{} loss:{}".format(e, tloss))
if e % 100 == 0:
test_loss, segm_map_pred = sess.run([loss, L2],
feed_dict={X: data_batch, _segm_map: segm_map_batch})
print("shapes input:{} output:{} target:{}".format(np.shape(data_batch), np.shape(segm_map_batch), np.shape(segm_map_pred)))
plot_segm_map(np.squeeze(data_batch), np.squeeze(segm_map_batch), np.squeeze(segm_map_pred))
"""
# 学習結果の出力
if (e + 1) % 100 == 0:
emb = sess.run(L2, feed_dict={X: x})
fig, ax = plt.subplots()
for i in range(G.number_of_nodes()):
ax.scatter(emb[i][0], emb[i][1], color=color[i])
plt.title("epoch" + str(e + 1))
plt.show()
plt.title("epoch" + str(e + 1))
nx.draw_networkx(G, pos=emb, node_color=color)
plt.show()
"""
"""
batch_size = 64
dropout = 0.7
dg = Datagen('data/mnist', 'data/cifar')
data, segm_map = dg.sample(batch_size)
model = Model(batch_size, dropout)
num_iter = 500
sess = tf.Session()
sess.run(tf.global_variables_initializer())
for iter in range(num_iter):
data_batch, segm_map_batch = dg.sample(batch_size)
train_loss, _ = sess.run([model.total_loss, model.train_step],
feed_dict={model.image: data_batch, model.segm_map: segm_map_batch})
if iter % 50 == 0:
data_batch, segm_map_batch = dg.sample(batch_size, dataset='test')
test_loss, segm_map_pred = sess.run([model.total_loss, model.h4],
feed_dict={model.image: data_batch, model.segm_map: segm_map_batch})
print('iter %5i/%5i loss is %5.3f and mIOU %5.3f' % (
iter, num_iter, test_loss, calc_iou(segm_map_batch, segm_map_pred)))
# Final run
data_batch, segm_map_batch = dg.sample(batch_size, dataset='test')
test_loss, segm_map_pred = sess.run([model.total_loss, model.h4],
feed_dict={model.image: data_batch, model.segm_map: segm_map_batch})
plot_segm_map(data_batch, segm_map_batch, segm_map_pred)
"""
|
[
"Pakka-xeno@keio.jp"
] |
Pakka-xeno@keio.jp
|
7ef60b70a910574209907c3a9e4d6c0dc73d5b45
|
67b0d2d2e1e3b2b9fba4cfc14adc31f503bb0b91
|
/AirConditioningV2/filters.py
|
54b56a16c2e40a45057929cf6936b15a8652dbdb
|
[] |
no_license
|
Hk4Fun/qtstudy
|
02a5059555462f5e7fe626632e351f4af69206f6
|
7b38853e29c8e055f9db2828c34815000158bf28
|
refs/heads/master
| 2020-03-14T11:29:32.501540
| 2018-06-16T07:48:35
| 2018-06-16T07:48:35
| 131,591,363
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,397
|
py
|
__author__ = 'Hk4Fun'
__date__ = '2018/5/15 17:24'
import time
import datetime
from PyQt5.QtCore import QDate
from AirConditioningV2.settings import *
def mapWindSpeed_c2w(wind_speed):
return {LOW_WIND: '低风', MID_WIND: '中风', HIGH_WIND: '高风'}[wind_speed]
def mapWindSpeed_w2c(wind_speed):
return {'低风': LOW_WIND, '中风': MID_WIND, '高风': HIGH_WIND}[wind_speed]
def mapMode_c2w(mode):
return {COLD_MODE: '制冷', WARM_MODE: '制热'}[mode]
def mapUserLevel_c2w(userLevel):
return {USER_NORMAL: '普通用户', USER_VIP: 'VIP'}[userLevel]
def timeFormat(timeStamp):
return time.strftime("%Y-%m-%d %H:%M:%S", time.localtime(timeStamp))
def isSettle(orderId):
return '未结帐' if orderId == '0' else orderId
def mapDiscount(userLevel):
return {USER_NORMAL: NORMAL_DISCOUNT, USER_VIP: VIP_DISCOUNT}[userLevel]
def discountFormat(discount):
if discount == 1:
return '无'
return str(discount * 100) + '%'
def durationFormat(start, end): # start,end -- timestamp
start = datetime.datetime.fromtimestamp(start).replace(microsecond=0) # microsecond=0忽略毫秒数
end = datetime.datetime.fromtimestamp(end).replace(microsecond=0)
return str(end - start)
def isEqDate(date, queryDate):
t = datetime.datetime.strptime(queryDate, "%Y-%m-%d %H:%M:%S")
return date == QDate(t.year, t.month, t.day)
|
[
"941222165chenhongwen@gmail.com"
] |
941222165chenhongwen@gmail.com
|
029838e1292b4def0204d31ffebfc8890bbee7bc
|
8906e04870524f190a11f3eb3caf8fe377ab3a24
|
/Chapter14/Chapter_14/multiprocessing_env.py
|
9c110a17857d5a7f8028ddace2f37a2e3e7c1954
|
[
"MIT"
] |
permissive
|
PacktPublishing/Hands-On-Reinforcement-Learning-for-Games
|
8719c086c8410a2da2b4fb9852b029a4c8f67f60
|
609d63ee5389b80b760a17f7f43abe632d99a9bb
|
refs/heads/master
| 2023-02-08T19:35:30.005167
| 2023-01-30T09:09:07
| 2023-01-30T09:09:07
| 231,567,217
| 54
| 32
|
MIT
| 2022-04-21T06:47:24
| 2020-01-03T10:43:21
|
Python
|
UTF-8
|
Python
| false
| false
| 4,886
|
py
|
#This code is from openai baseline
#https://github.com/openai/baselines/tree/master/baselines/common/vec_env
import numpy as np
from multiprocessing import Process, Pipe
def worker(remote, parent_remote, env_fn_wrapper):
parent_remote.close()
env = env_fn_wrapper.x()
while True:
cmd, data = remote.recv()
if cmd == 'step':
ob, reward, done, info = env.step(data)
if done:
ob = env.reset()
remote.send((ob, reward, done, info))
elif cmd == 'reset':
ob = env.reset()
remote.send(ob)
elif cmd == 'reset_task':
ob = env.reset_task()
remote.send(ob)
elif cmd == 'close':
remote.close()
break
elif cmd == 'get_spaces':
remote.send((env.observation_space, env.action_space))
else:
raise NotImplementedError
class VecEnv(object):
"""
An abstract asynchronous, vectorized environment.
"""
def __init__(self, num_envs, observation_space, action_space):
self.num_envs = num_envs
self.observation_space = observation_space
self.action_space = action_space
def reset(self):
"""
Reset all the environments and return an array of
observations, or a tuple of observation arrays.
If step_async is still doing work, that work will
be cancelled and step_wait() should not be called
until step_async() is invoked again.
"""
pass
def step_async(self, actions):
"""
Tell all the environments to start taking a step
with the given actions.
Call step_wait() to get the results of the step.
You should not call this if a step_async run is
already pending.
"""
pass
def step_wait(self):
"""
Wait for the step taken with step_async().
Returns (obs, rews, dones, infos):
- obs: an array of observations, or a tuple of
arrays of observations.
- rews: an array of rewards
- dones: an array of "episode done" booleans
- infos: a sequence of info objects
"""
pass
def close(self):
"""
Clean up the environments' resources.
"""
pass
def step(self, actions):
self.step_async(actions)
return self.step_wait()
class CloudpickleWrapper(object):
"""
Uses cloudpickle to serialize contents (otherwise multiprocessing tries to use pickle)
"""
def __init__(self, x):
self.x = x
def __getstate__(self):
import cloudpickle
return cloudpickle.dumps(self.x)
def __setstate__(self, ob):
import pickle
self.x = pickle.loads(ob)
class SubprocVecEnv(VecEnv):
def __init__(self, env_fns, spaces=None):
"""
envs: list of gym environments to run in subprocesses
"""
self.waiting = False
self.closed = False
nenvs = len(env_fns)
self.nenvs = nenvs
self.remotes, self.work_remotes = zip(*[Pipe() for _ in range(nenvs)])
self.ps = [Process(target=worker, args=(work_remote, remote, CloudpickleWrapper(env_fn)))
for (work_remote, remote, env_fn) in zip(self.work_remotes, self.remotes, env_fns)]
for p in self.ps:
p.daemon = True # if the main process crashes, we should not cause things to hang
p.start()
for remote in self.work_remotes:
remote.close()
self.remotes[0].send(('get_spaces', None))
observation_space, action_space = self.remotes[0].recv()
VecEnv.__init__(self, len(env_fns), observation_space, action_space)
def step_async(self, actions):
for remote, action in zip(self.remotes, actions):
remote.send(('step', action))
self.waiting = True
def step_wait(self):
results = [remote.recv() for remote in self.remotes]
self.waiting = False
obs, rews, dones, infos = zip(*results)
return np.stack(obs), np.stack(rews), np.stack(dones), infos
def reset(self):
for remote in self.remotes:
remote.send(('reset', None))
return np.stack([remote.recv() for remote in self.remotes])
def reset_task(self):
for remote in self.remotes:
remote.send(('reset_task', None))
return np.stack([remote.recv() for remote in self.remotes])
def close(self):
if self.closed:
return
if self.waiting:
for remote in self.remotes:
remote.recv()
for remote in self.remotes:
remote.send(('close', None))
for p in self.ps:
p.join()
self.closed = True
def __len__(self):
return self.nenvs
|
[
"josephs@packt.com"
] |
josephs@packt.com
|
74b490218935aa0cdad3e8d7ddf4d99da22c41ec
|
2542edca28a456aa5c217be10419e56fc581fa4e
|
/dataread.py
|
ad68b69900a80d898c714dc5dd7e53ff56578f0a
|
[] |
no_license
|
neeshuagr/profile_match_score
|
16bfef57c38e6993148175bf40c29acae6e7e9a1
|
77f4f8035746f075b8232e466907f18aa3eef641
|
refs/heads/master
| 2020-04-29T08:22:35.568600
| 2019-03-16T15:28:11
| 2019-03-16T15:28:11
| 175,984,719
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 7,041
|
py
|
#!/usr/bin/python3.4
import datareadfiletypes
import config
import filemanager
import utility
import dcrnlp
import custom
import datetime
import dictionaries
import pyodbc
import dbmanager
def route_dataread(filepaths):
data_read_count = int(utility.read_from_file(
config.ConfigManager().ExecutioncountFile, 'r'))
file_read_count = 0
file_path_count = 0
configdocs = custom.retrieve_data_from_DB(int(config.ConfigManager().MongoDBPort), config.ConfigManager(
).DataCollectionDB, config.ConfigManager().ConfigCollection)
docid_count = int(configdocs[0]['docid_count'])
connection = dbmanager.mongoDB_connection(
int(config.ConfigManager().MongoDBPort))
utility.write_to_file(config.ConfigManager().LogFile,
'a', 'dataread running')
for filepath in filepaths:
data_text = ''
try:
file_path_count += 1
print('File number: ' + str(file_path_count))
print('Processing file..' + filepath)
if filepath[-4:].lower() == ".txt":
data_text = datareadfiletypes.read_text_text(
filepath, data_text)
elif filepath[-4:].lower() == ".pdf":
data_text = datareadfiletypes.read_pdf_text(
filepath, data_text)
elif filepath[-5:].lower() == ".docx":
data_text = datareadfiletypes.read_docx_text(
filepath, data_text)
elif filepath[-4:].lower() == ".doc":
data_text = datareadfiletypes.read_doc_text(
filepath, data_text)
elif filepath[-4:].lower() == ".xls":
# data_text = datareadfiletypes.read_excel_text(
# filepath, data_text)
docid_count = custom.process_excel_rowdata(
filepath, docid_count)
elif filepath[-5:].lower() == ".xlsx":
# data_text = datareadfiletypes.read_excel_text(
# filepath, data_text)
docid_count = custom.process_excel_rowdata(
filepath, docid_count)
elif filepath[-4:].lower() == ".csv":
data_text = datareadfiletypes.read_csv_text(
filepath, data_text)
elif filepath[-4:].lower() == ".odt":
data_text = datareadfiletypes.read_odt_text(
filepath, data_text)
elif filepath[-4:].lower() == ".xml":
docid_count = custom.process_xml_data(filepath, docid_count)
if not data_text == '':
docid_count += 1
file_read_count += 1
# dcrnlp.extract_nounphrases_sentences(data_text)
noun_phrases = ''
dictionaries.DataProperties['description'] = data_text
dictionaries.DataProperties['nounPhrases'] = noun_phrases
dictionaries.DataProperties[
'documentType'] = utility.filefolder_from_filepath(filepath)
dictionaries.DataProperties[
'dataSource'] = config.ConfigManager().Misc # config.ConfigManager().JobPortal
dictionaries.DataProperties['doc_id'] = docid_count
dictionaries.DataProperties[
'documentTitle'] = utility.filename_from_filepath(filepath)
dictionaries.DataProperties['documentDesc'] = (
dictionaries.DataProperties['description'])[0:200]
jsonfordatastore = custom.prepare_json_for_datastore(
dictionaries.DataProperties)
jsonfordatastore_deserialized = utility.jsonstring_deserialize(
jsonfordatastore)
custom.insert_data_to_DB(
jsonfordatastore_deserialized, connection)
phrases_file_data = custom.prepare_phrases_file_data(
noun_phrases, data_read_count, file_read_count)
utility.write_to_file(
config.ConfigManager().PhraseFile, 'a', phrases_file_data)
except BaseException as ex:
exception_message = '\n' + 'Exception:' + \
str(datetime.datetime.now()) + '\n'
exception_message += 'File: ' + filepath + '\n'
exception_message += '\n' + str(ex) + '\n'
exception_message += '-' * 100
utility.write_to_file(
config.ConfigManager().LogFile, 'a', exception_message)
data_read_count += 1
utility.write_to_file(config.ConfigManager(
).ExecutioncountFile, 'w', str(data_read_count))
dictionaries.UpdateTemplateWhere['_id'] = configdocs[0]['_id']
dictionaries.UpdateTemplateSet['docid_count'] = docid_count
dictionaries.DBSet['$set'] = dictionaries.UpdateTemplateSet
custom.update_data_to_Db_noupsert(int(config.ConfigManager().MongoDBPort), config.ConfigManager().DataCollectionDB, config.ConfigManager(
).ConfigCollection, dictionaries.UpdateTemplateWhere, dictionaries.DBSet, connection)
if __name__ == "__main__":
file_paths = []
directory_list = []
directory_list = utility.string_to_array(
config.ConfigManager().DirectoryList, ',', directory_list)
file_paths = filemanager.directory_iterate(directory_list)
route_dataread(file_paths)
# utility.archive_content(
# file_paths, config.ConfigManager().ArchiveDirectory)
#connection = dbmanager.mongoDB_connection(int(config.ConfigManager().MongoDBPort))
# configdocs = custom.retrieve_data_from_DB(int(config.ConfigManager(
#).MongoDBPort), config.ConfigManager().DataCollectionDB, config.ConfigManager().ConfigCollection)
#docid_count = int(configdocs[0]['docid_count'])
# docid_count = custom.data_from_DB(config.ConfigManager().STConnStr, config.ConfigManager(
#).STJobQueryId, config.ConfigManager().JobDetails, config.ConfigManager().ST, docid_count)
# docid_count = custom.data_from_DB(config.ConfigManager().STConnStr, config.ConfigManager(
#).STCandidateQueryId, config.ConfigManager().CandidateDetails, config.ConfigManager().ST, docid_count)
# docid_count = custom.data_from_DB(config.ConfigManager().XchangeConnStr, config.ConfigManager(
#).XchangeJobQueryId, config.ConfigManager().JobDetails, config.ConfigManager().Xchange, docid_count)
# docid_count = custom.data_from_DB(config.ConfigManager().XchangeConnStr, config.ConfigManager(
#).XchangeCandidateQueryId, config.ConfigManager().CandidateDetails, config.ConfigManager().Xchange, docid_count)
#dictionaries.UpdateTemplateWhere['_id'] = configdocs[0]['_id']
#dictionaries.UpdateTemplateSet['docid_count'] = docid_count
#dictionaries.DBSet['$set'] = dictionaries.UpdateTemplateSet
# custom.update_data_to_Db_noupsert(int(config.ConfigManager().MongoDBPort), config.ConfigManager().DataCollectionDB,
# config.ConfigManager().ConfigCollection,
# dictionaries.UpdateTemplateWhere, dictionaries.DBSet, connection)
|
[
"noreply@github.com"
] |
neeshuagr.noreply@github.com
|
24f8c5f6b848ba541a0f03a6241859851b481ca3
|
53fab060fa262e5d5026e0807d93c75fb81e67b9
|
/backup/user_386/ch92_2019_10_02_18_01_55_591754.py
|
c842af9ea317fe2aab5bd222878e1be5c6911078
|
[] |
no_license
|
gabriellaec/desoft-analise-exercicios
|
b77c6999424c5ce7e44086a12589a0ad43d6adca
|
01940ab0897aa6005764fc220b900e4d6161d36b
|
refs/heads/main
| 2023-01-31T17:19:42.050628
| 2020-12-16T05:21:31
| 2020-12-16T05:21:31
| 306,735,108
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 276
|
py
|
dict = {}
dict['felipe']="lacombe"
dict['ohara']="shiba"
def simplifica_dict(dict):
dictLista = []
for key, value in dict.items():
dictLista = [key,value]
dictLista.append(dictLista)
print (dictLista)
for i in dictLista:
print (i)
|
[
"you@example.com"
] |
you@example.com
|
f408e84b593fd962f105e3137ee4041a8f4baee6
|
ea4e262f3dc18a089895fef08bedefc60b66e373
|
/supervised_learning/0x01-classification/4-neuron.py
|
5ceefc3c0dea7f50a83093a29231b625e4e999eb
|
[] |
no_license
|
d1sd41n/holbertonschool-machine_learning
|
777899d4914e315883ba0c887d891c0c8ab01c8a
|
5f86dee95f4d1c32014d0d74a368f342ff3ce6f7
|
refs/heads/main
| 2023-07-17T09:22:36.257702
| 2021-08-27T03:44:24
| 2021-08-27T03:44:24
| 317,399,917
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,257
|
py
|
#!/usr/bin/env python3
"""[summary]
Raises:
TypeError: [description]
ValueError: [description]
"""
import numpy as np
class Neuron:
"""[summary]
"""
def __init__(self, nx):
"""[summary]
Args:
nx ([type]): [description]
Raises:
TypeError: [description]
ValueError: [description]
"""
if not isinstance(nx, int):
raise TypeError("nx must be an integer")
if nx < 1:
raise ValueError("nx must be a positive integer")
self.__W = np.random.normal(size=(1, nx))
self.__b = 0
self.__A = 0
@property
def W(self):
"""[summary]
Returns:
[type]: [description]
"""
return self.__W
@property
def b(self):
"""[summary]
Returns:
[type]: [description]
"""
return self.__b
@property
def A(self):
"""[summary]
Returns:
[type]: [description]
"""
return self.__A
def forward_prop(self, X):
"""[summary]
Args:
X ([type]): [description]
Returns:
[type]: [description]
"""
Z = np.matmul(self.__W, X) + self.__b
self.__A = self.sigmoid(Z)
return self.__A
def sigmoid(self, Z):
"""[summary]
Args:
Z ([type]): [description]
Returns:
[type]: [description]
"""
return 1 / (1 + np.exp(-Z))
def cost(self, Y, A):
"""[summary]
Args:
Y ([type]): [description]
A ([type]): [description]
Returns:
[type]: [description]
"""
m = Y.shape[1]
cost = -1 * (1 / m) * np.sum(Y * np.log(A) +
(1 - Y) * np.log(1.0000001 - A))
return cost
def evaluate(self, X, Y):
"""[summary]
Args:
X ([type]): [description]
Y ([type]): [description]
Returns:
[type]: [description]
"""
A = self.forward_prop(X)
Y_hat = np.where(A >= 0.5, 1, 0)
cost = self.cost(Y, A)
return Y_hat, cost
|
[
"1498@holbertonschool.com"
] |
1498@holbertonschool.com
|
cb884614e8d42df912c3aefa67005b33d068d5f6
|
73758dde83d1a1823c103e1a4ba71e7c95168f71
|
/nsd2003/py01/day02/login2.py
|
4eefdabd2111bbcb78f2b762df349d7530f21edb
|
[] |
no_license
|
tonggh220/md_5_nsd_notes
|
07ffdee7c23963a7a461f2a2340143b0e97bd9e1
|
a58a021ad4c7fbdf7df327424dc518f4044c5116
|
refs/heads/master
| 2023-07-02T01:34:38.798929
| 2021-05-12T08:48:40
| 2021-05-12T08:48:40
| 393,885,415
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 228
|
py
|
import getpass
username = input('username: ')
password = getpass.getpass('password: ')
if username == 'bob' and password == '123456':
print('\033[32;1m登陆成功\033[0m')
else:
print('\033[31;1m登陆失败\033[0m')
|
[
"zhangzg@tedu.cn"
] |
zhangzg@tedu.cn
|
11852eaac17d838c7e11ed135d73821c3fb707bd
|
8985adb377e92b4764d1b138b5c23c92fab44244
|
/tests/test_open_file.py
|
380fbd698fba2697e062433f5e6d758c6238ff2a
|
[
"MIT"
] |
permissive
|
vuillaut/pyeventio
|
b8f2b4ecabb9b2c94d1e3f7d651b8c5ef83e4a5f
|
f7fd0f3546099e34fdab1ccb3f43f5bc515f2143
|
refs/heads/master
| 2020-04-11T04:44:59.932722
| 2019-01-18T07:51:59
| 2019-01-18T07:51:59
| 161,524,392
| 0
| 0
| null | 2018-12-12T17:45:57
| 2018-12-12T17:45:56
| null |
UTF-8
|
Python
| false
| false
| 885
|
py
|
import eventio
from os import path
import pkg_resources
def test_is_install_folder_a_directory():
dir_ = path.dirname(eventio.__file__)
assert path.isdir(dir_)
def test_can_open_file():
testfile = 'tests/resources/one_shower.dat'
eventio.EventIOFile(testfile)
def test_file_is_iterable():
testfile = 'tests/resources/one_shower.dat'
f = eventio.EventIOFile(testfile)
for event in f:
pass
def test_file_has_correct_types():
testfile = 'tests/resources/one_shower.dat'
f = eventio.EventIOFile(testfile)
types = [o.header.type for o in f]
assert types == [1200, 1212, 1201, 1202, 1203, 1204, 1209, 1210]
def test_types_gzipped():
testfile = 'tests/resources/one_shower.dat'
f = eventio.EventIOFile(testfile)
types = [o.header.type for o in f]
assert types == [1200, 1212, 1201, 1202, 1203, 1204, 1209, 1210]
|
[
"maximilian.noethe@tu-dortmund.de"
] |
maximilian.noethe@tu-dortmund.de
|
05329d547998d9406a4a55e93a03a512da6d1f66
|
494e3fbbdff5cf6edb087f3103ad5f15acbc174f
|
/company/migrations/0005_employee_checkedin.py
|
6dd66f7f62dbe154d700daa4addec76dd04d087a
|
[] |
no_license
|
TalentoUnicamp/my
|
1209048acdedbb916b8ae8ec80761d09f6ad7754
|
3d87a33cd282d97dbbbd5f62658f231456f12765
|
refs/heads/master
| 2020-03-23T21:12:58.316033
| 2018-08-14T06:11:36
| 2018-08-14T06:11:36
| 142,090,262
| 11
| 0
| null | 2018-08-17T05:13:26
| 2018-07-24T01:53:23
|
JavaScript
|
UTF-8
|
Python
| false
| false
| 394
|
py
|
# Generated by Django 2.0.3 on 2018-08-12 02:44
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('company', '0004_company_access_level'),
]
operations = [
migrations.AddField(
model_name='employee',
name='checkedin',
field=models.BooleanField(default=False),
),
]
|
[
"gustavomaronato@gmail.com"
] |
gustavomaronato@gmail.com
|
a03f8b20320b2b0cd2a0c8a40c42c197b83f1048
|
18ad97292b34a679b8dea8a85090541c5bbf6174
|
/chess.py
|
0fa35ed4de237177a9e5fefa36517a277692e1e0
|
[] |
no_license
|
Jyotirm0y/kattis
|
b941044e39dc36d169450480fc33fd33bd2e0f8e
|
2b9c1819ba29419bbea3db2e8ad7851155abbb3a
|
refs/heads/master
| 2023-05-31T21:11:38.350044
| 2021-06-12T08:21:47
| 2021-06-12T08:21:47
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,184
|
py
|
n = int(input())
r = 'ABCDEFGH'
for _ in range(n):
s = input().split()
a = r.index(s[0])
b = int(s[1])-1
x = r.index(s[2])
y = int(s[3])-1
m = abs(x-a)
n = abs(y-b)
if a == x and b == y:
print(0,s[0],s[1])
elif m == n:
print(1, s[0],s[1], s[2],s[3])
elif (m+n)%2 != 0:
print("Impossible")
else:
if m > n:
# move horizontally
extra = (m-n)//2
extraplus = extra + n
if x > a: # move to right
if max(b,y)+extra < 8:
# up first
if y <= b:
print(2, s[0],s[1], r[a+extra],(max(b,y)+extra+1), s[2],s[3])
else:
print(2, s[0],s[1], r[a+extraplus],(max(b,y)+extra+1), s[2],s[3])
elif min(b,y)-extra >= 0:
# down first
if y >= b:
print(2, s[0],s[1], r[a+extra],(min(b,y)-extra+1), s[2],s[3])
else:
print(2, s[0],s[1], r[a+extraplus],(min(b,y)-extra+1), s[2],s[3])
elif x < a: # move to left
if max(b,y)+extra < 8:
# up first
if y <= b:
print(2, s[0],s[1], r[a-extra],(max(b,y)+extra+1), s[2],s[3])
else:
print(2, s[0],s[1], r[a-extraplus],(max(b,y)+extra+1), s[2],s[3])
elif min(b,y)-extra >= 0:
# down first
if y >= b:
print(2, s[0],s[1], r[a-extra],(min(b,y)-extra+1), s[2],s[3])
else:
print(2, s[0],s[1], r[a-extraplus],(min(b,y)-extra+1), s[2],s[3])
elif m < n:
# move vertically
extra = (n-m)//2
extraplus = extra + m
if y > b: # move up
if max(a,x)+extra < 8:
# right first
if x <= a:
print(2, s[0],s[1], r[max(a,x)+extra],(b+extra+1), s[2],s[3])
else:
print(2, s[0],s[1], r[max(a,x)+extra],(b+extraplus+1), s[2],s[3])
elif min(a,x)-extra >= 0:
# left first
if x >= a:
print(2, s[0],s[1], r[min(a,x)-extra],(b+extra+1), s[2],s[3])
else:
print(2, s[0],s[1], r[min(a,x)-extra],(b+extraplus+1), s[2],s[3])
elif y < b: # move down
if max(a,x)+extra < 8:
# right first
if x <= a:
print(2, s[0],s[1], r[max(a,x)+extra],(b-extra+1), s[2],s[3])
else:
print(2, s[0],s[1], r[max(a,x)+extra],(b-extraplus+1), s[2],s[3])
elif min(a,x)-extra >= 0:
# left first
if x >= a:
print(2, s[0],s[1], r[min(a,x)-extra],(b-extra+1), s[2],s[3])
else:
print(2, s[0],s[1], r[min(a,x)-extra],(b-extraplus+1), s[2],s[3])
|
[
"ainunnajib@gmail.com"
] |
ainunnajib@gmail.com
|
2d0753b8960fa3c7bb7014d2317bc0555cd17999
|
fcbe21026e7ae483c535f6eb38ffbfaaa2aa06c2
|
/.history/main_20210412094009.py
|
7efdf4db3a5b77a3a63e5eb7f311427f5aca19c3
|
[] |
no_license
|
robertstrauss/discordAIchatbot
|
8e4c85920f73d49daeb1394afbd1ce8baffbb97b
|
8347e6a591d352ace1b8fe4c4629e831763eb0ba
|
refs/heads/master
| 2023-04-03T12:55:07.641825
| 2021-04-13T17:54:57
| 2021-04-13T17:54:57
| 357,641,172
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,858
|
py
|
import discord
import re
import requests
import random
import time
import asyncio
import pytorch as torch
import pandas as pd
client = discord.Client()
guildID = 755473404643115170
guild = None
channelID = 831188849946394654
talkchannel = None
# async def pingean():
# while True:
# await asyncio.sleep(pinginterval)
# print(pinging)
# if (pinging):
# role = random.choice(ean.roles)
# await pingchannel.send('{}'.format(role.mention))
# def startpingean():
# global pinging
# pinging = True
# def stoppingean():
# global pinging
# pinging = False
@client.event
async def on_ready():
global guild, pingchannel, ean, logan, sam
print('We have logged in as {0.user}'.format(client))
guild = client.get_guild(guildID)
talkchannel = client.get_channel(pingEanID)
@client.event
async def on_message(message):
global pinging, pinginterval, pingchannel, ean
if message.author == client.user:
return
if collecting:
data = data.append({'content': message.content,
'time': message.created_at,
'author': message.author.name}, ignore_index=True)
def collectmessages(channel):
data = pd.DataFrame(columns=['content', 'time', 'author'])
async for msg in channel.history(limit=100000):
if msg.author != client..user:
data = data.append({'content': msg.content,
'time': msg.created_at,
'author': msg.author.name}, ignore_index=True)
if len(data) == limit:
break
data.to_csv('messagelogs/{}'.format(channel.name))
with open('token.txt', 'r') as tokentxt:
asyncio.get_event_loop().create_task(pingean())
client.run(tokentxt.read())
|
[
"robert.strauss@protonmail.com"
] |
robert.strauss@protonmail.com
|
7199acf0e62fd850cfe1c8e99807a92710e8e2f2
|
854d0673d18cf1db557d2b9b27c248dd879ba28a
|
/test/test1.py
|
3c51d8306463a0078a64f3dbd5b8dc13fba22485
|
[] |
no_license
|
Miao1127/code-charpter3
|
51e141c0e463f1ea63f371a498d967b520f59853
|
313dae0b53f1f68fb7ce713ac3eab7e1a2d1b001
|
refs/heads/master
| 2023-07-15T21:27:22.688910
| 2021-08-23T01:13:59
| 2021-08-23T01:13:59
| 398,937,184
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 381
|
py
|
# _*_ coding:utf-8 _*_
# 开发人员:103中山分队-苗润龙
# 开发时间:2019/12/1620:50
# 文件名:test1.py
# 开发工具:PyCharm
# 功能:测试全局变量
import time
import sys
def show():
global a
print(a)
a = 3
def s():
global a
print(a)
a = 5
if __name__ == '__main__':
a = 1
show()
print(a)
s()
print(a)
|
[
"Miao@DESKTOP-AJA95IE"
] |
Miao@DESKTOP-AJA95IE
|
df98aec7cbcf6664ef07942ff574d25f1dc989c7
|
599913671c29ca3c427514fa6cb22260d1420d35
|
/st2actions/st2actions/runners/remote_script_runner.py
|
d0fc84e70bce2ff066dd85ba069a41d4e6435b31
|
[
"Apache-2.0"
] |
permissive
|
grengojbo/st2
|
2c9642c157f95b57a95175b04d17de7a312164cf
|
24956136df165a8b8ac629ba6be1f6de0d77385a
|
refs/heads/master
| 2020-12-30T21:42:57.664608
| 2015-08-20T14:23:56
| 2015-08-20T14:23:56
| 39,152,248
| 0
| 0
| null | 2015-07-15T17:58:43
| 2015-07-15T17:58:42
| null |
UTF-8
|
Python
| false
| false
| 3,618
|
py
|
# Licensed to the StackStorm, Inc ('StackStorm') under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import uuid
from oslo_config import cfg
from st2common import log as logging
from st2actions.runners.fabric_runner import BaseFabricRunner
from st2actions.runners.fabric_runner import RUNNER_REMOTE_DIR
from st2common.models.system.action import FabricRemoteScriptAction
__all__ = [
'get_runner',
'RemoteScriptRunner'
]
LOG = logging.getLogger(__name__)
def get_runner():
return RemoteScriptRunner(str(uuid.uuid4()))
class RemoteScriptRunner(BaseFabricRunner):
def run(self, action_parameters):
remote_action = self._get_remote_action(action_parameters)
LOG.debug('Will execute remote_action : %s.', str(remote_action))
result = self._run(remote_action)
LOG.debug('Executed remote_action: %s. Result is : %s.', remote_action, result)
status = self._get_result_status(result, cfg.CONF.ssh_runner.allow_partial_failure)
return (status, result, None)
def _get_remote_action(self, action_parameters):
# remote script actions without entry_point don't make sense, user probably wanted to use
# "run-remote" action
if not self.entry_point:
msg = ('Action "%s" is missing entry_point attribute. Perhaps wanted to use '
'"run-remote" runner?')
raise Exception(msg % (self.action_name))
script_local_path_abs = self.entry_point
pos_args, named_args = self._get_script_args(action_parameters)
named_args = self._transform_named_args(named_args)
env_vars = self._get_env_vars()
remote_dir = self.runner_parameters.get(RUNNER_REMOTE_DIR,
cfg.CONF.ssh_runner.remote_dir)
remote_dir = os.path.join(remote_dir, self.liveaction_id)
return FabricRemoteScriptAction(self.action_name,
str(self.liveaction_id),
script_local_path_abs,
self.libs_dir_path,
named_args=named_args,
positional_args=pos_args,
env_vars=env_vars,
on_behalf_user=self._on_behalf_user,
user=self._username,
password=self._password,
private_key=self._private_key,
remote_dir=remote_dir,
hosts=self._hosts,
parallel=self._parallel,
sudo=self._sudo,
timeout=self._timeout,
cwd=self._cwd)
|
[
"tomaz@tomaz.me"
] |
tomaz@tomaz.me
|
b2ea215b35210f9d73283dae43957dc12d943e32
|
29345337bf86edc938f3b5652702d551bfc3f11a
|
/python/src/main/python/pyalink/alink/tests/examples/from_docs/test_softmaxpredictstreamop.py
|
ed44324ed0eac0718b830e65b094dc71dad0a64e
|
[
"Apache-2.0"
] |
permissive
|
vacaly/Alink
|
32b71ac4572ae3509d343e3d1ff31a4da2321b6d
|
edb543ee05260a1dd314b11384d918fa1622d9c1
|
refs/heads/master
| 2023-07-21T03:29:07.612507
| 2023-07-12T12:41:31
| 2023-07-12T12:41:31
| 283,079,072
| 0
| 0
|
Apache-2.0
| 2020-07-28T02:46:14
| 2020-07-28T02:46:13
| null |
UTF-8
|
Python
| false
| false
| 953
|
py
|
import unittest
from pyalink.alink import *
import numpy as np
import pandas as pd
class TestSoftmaxPredictStreamOp(unittest.TestCase):
def test_softmaxpredictstreamop(self):
df_data = pd.DataFrame([
[2, 1, 1],
[3, 2, 1],
[4, 3, 2],
[2, 4, 1],
[2, 2, 1],
[4, 3, 2],
[1, 2, 1],
[5, 3, 3]
])
batchData = BatchOperator.fromDataframe(df_data, schemaStr='f0 int, f1 int, label int')
dataTest = StreamOperator.fromDataframe(df_data, schemaStr='f0 int, f1 int, label int')
colnames = ["f0","f1"]
lr = SoftmaxTrainBatchOp().setFeatureCols(colnames).setLabelCol("label")
model = batchData.link(lr)
predictor = SoftmaxPredictStreamOp(model).setPredictionCol("pred")
predictor.linkFrom(dataTest).print()
StreamOperator.execute()
pass
|
[
"shaomeng.wang.w@gmail.com"
] |
shaomeng.wang.w@gmail.com
|
20d782976fa64be463fcafcdcad085eb498f3c78
|
3034e86347c71bf7e7af9e5f7aa44ab5ad61e14b
|
/pweb/day06/queue2.py
|
1e303bf16b69ae88861efeb393b81111e60f2ba4
|
[] |
no_license
|
jason12360/AID1803
|
bda039b82f43d6609aa8028b0d9598f2037c23d5
|
f0c54a3a2f06881b3523fba7501ab085cceae75d
|
refs/heads/master
| 2020-03-17T00:43:42.541761
| 2018-06-29T10:07:44
| 2018-06-29T10:07:44
| 133,127,628
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 293
|
py
|
from multiprocessing import Process,Queue
import time
#创建消息队列
q = Queue()
def fun1():
time.sleep(1)
q.put('我是进程1')
def fun2():
time.sleep(2)
print('取消息',q.get())
p1 = Process(target = fun1)
p2 = Process(target = fun2)
p1.start()
p2.start()
p1.join()
p2.join()
|
[
"370828117@qq.com"
] |
370828117@qq.com
|
3f84cd518baad07c427aacb915704490f4665028
|
452e376385cf9294c7f416748e353ddbfeb8460c
|
/migrations/versions/d2d0eb2fe55a_create_info_type_table.py
|
41952b64c1710a34e5225043c0bb4c6c68964741
|
[] |
no_license
|
huozhihui/atuiguang
|
e7576de0acc53998a67669f577cb371f386a001d
|
7e6db16746e6124cd95a2cf71d063db67a9b0d1f
|
refs/heads/master
| 2021-01-20T08:37:50.479023
| 2017-09-05T14:28:46
| 2017-09-05T14:28:46
| 101,567,814
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,026
|
py
|
"""create info_type table
Revision ID: d2d0eb2fe55a
Revises: 818a0a098c23
Create Date: 2017-09-02 18:54:31.644588
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = 'd2d0eb2fe55a'
down_revision = '818a0a098c23'
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.create_table('info_types',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('created_at', sa.DateTime(), nullable=True),
sa.Column('updated_at', sa.DateTime(), nullable=True),
sa.Column('name', sa.String(length=30), nullable=False),
sa.Column('user_id', sa.Integer(), nullable=True),
sa.ForeignKeyConstraint(['user_id'], ['users.id'], ),
sa.PrimaryKeyConstraint('id'),
sa.UniqueConstraint('name')
)
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_table('info_types')
# ### end Alembic commands ###
|
[
"240516816@qq.com"
] |
240516816@qq.com
|
0eee740e8053dfaa728e96765f202ca5904ce08d
|
37c3b81ad127c9e3cc26fa9168fda82460ca9bda
|
/Baekjoon/boj_2628.py
|
3860cbf720e66de2d4735521eb108e758f043ee3
|
[] |
no_license
|
potomatoo/TIL
|
5d85b69fdaed68966db7cfe2a565b7c64ed3e816
|
395dc190fa13e5ed036e1e3c7d9e0bc2e1ee4d6c
|
refs/heads/master
| 2021-07-08T16:19:40.410097
| 2021-04-19T02:33:40
| 2021-04-19T02:33:40
| 238,872,774
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 467
|
py
|
w, h = map(int,input().split())
n = int(input())
garo = []
sero = []
for i in range(n):
where, idx = map(int,input().split())
if where == 0:
garo.append(idx)
else:
sero.append(idx)
garo.append(0)
sero.append(0)
garo.append(h)
sero.append(w)
garo.sort()
sero.sort()
ls1 = []
ls2 = []
for i in range(len(garo)-1):
ls1.append(garo[i+1] - garo[i])
for i in range(len(sero)-1):
ls2.append(sero[i+1] - sero[i])
print(max(ls1)*max(ls2))
|
[
"duseh73@gmail.com"
] |
duseh73@gmail.com
|
c76607201a54c4b15b585cebd9ac427bc7e11dd0
|
af32cdbbf31d52d01753ddfa5e813e851411403b
|
/tests/job/test_databaseproperties.py
|
5b4cbb469a7497b64814c7ed06d2148275bafbf3
|
[
"BSD-3-Clause"
] |
permissive
|
pmrv/pyiron_base
|
3e72298df7790f4c3b84d78c990b5d7e12e9016b
|
af1729708a8226575ca2c84f574e7cb046b7f7cd
|
refs/heads/master
| 2022-12-25T03:15:09.270651
| 2020-09-13T13:56:40
| 2020-09-13T13:56:40
| 294,116,811
| 0
| 0
|
BSD-3-Clause
| 2020-09-09T13:10:31
| 2020-09-09T13:10:31
| null |
UTF-8
|
Python
| false
| false
| 5,610
|
py
|
# coding: utf-8
# Copyright (c) Max-Planck-Institut für Eisenforschung GmbH - Computational Materials Design (CM) Department
# Distributed under the terms of "New BSD License", see the LICENSE file.
import unittest
import datetime
import os
from pyiron_base.project.generic import Project
from pyiron_base.job.core import DatabaseProperties
class TestDatabaseProperties(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.database_entry = {
"id": 150,
"parentid": None,
"masterid": None,
"projectpath": "/Users/jan/PyIron_data/projects/",
"project": "2019-02-14-database-properties/test/",
"job": "vasp",
"subjob": "/vasp",
"chemicalformula": "Fe2",
"status": "finished",
"hamilton": "Vasp",
"hamversion": "5.4",
"username": "pyiron",
"computer": "pyiron@MacBook-Pro-4.local#1",
"timestart": datetime.datetime(2019, 2, 14, 8, 4, 7, 248427),
"timestop": datetime.datetime(2019, 2, 14, 8, 4, 8, 366365),
"totalcputime": 1.0,
}
cls.database_property = DatabaseProperties(job_dict=cls.database_entry)
def test_properties(self):
self.assertEqual(self.database_property.id, 150)
self.assertEqual(self.database_property.parentid, None)
self.assertEqual(self.database_property.masterid, None)
self.assertEqual(
self.database_property.projectpath, "/Users/jan/PyIron_data/projects/"
)
self.assertEqual(
self.database_property.project, "2019-02-14-database-properties/test/"
)
self.assertEqual(self.database_property.job, "vasp")
self.assertEqual(self.database_property.subjob, "/vasp")
self.assertEqual(self.database_property.chemicalformula, "Fe2")
self.assertEqual(self.database_property.status, "finished")
self.assertEqual(self.database_property.hamilton, "Vasp")
self.assertEqual(self.database_property.hamversion, "5.4")
self.assertEqual(self.database_property.username, "pyiron")
self.assertEqual(
self.database_property.computer, "pyiron@MacBook-Pro-4.local#1"
)
self.assertEqual(
self.database_property.timestart,
datetime.datetime(2019, 2, 14, 8, 4, 7, 248427),
)
self.assertEqual(
self.database_property.timestop,
datetime.datetime(2019, 2, 14, 8, 4, 8, 366365),
)
self.assertEqual(self.database_property.totalcputime, 1.0)
def test_dir(self):
self.assertEqual(
sorted(list(self.database_entry.keys())),
sorted(dir(self.database_property)),
)
def test_bool(self):
self.assertTrue(bool(self.database_property))
self.assertFalse(bool(DatabaseProperties()))
with self.assertRaises(AttributeError):
_ = DatabaseProperties().job
class DatabasePropertyIntegration(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.file_location = os.path.dirname(os.path.abspath(__file__))
cls.project = Project(os.path.join(cls.file_location, "database_prop"))
cls.ham = cls.project.create_job('ScriptJob', "job_test_run")
cls.ham.save()
@classmethod
def tearDownClass(cls):
project = Project(
os.path.join(os.path.dirname(os.path.abspath(__file__)), "database_prop")
)
ham = project.load(project.get_job_ids()[0])
ham.remove()
project.remove(enable=True)
def test_properties(self):
job_db_entry_dict = self.ham.project.db.get_item_by_id(self.ham.job_id)
self.assertIsNotNone(job_db_entry_dict)
self.assertEqual(self.ham.database_entry.id, job_db_entry_dict["id"])
self.assertEqual(
self.ham.database_entry.parentid, job_db_entry_dict["parentid"]
)
self.assertEqual(
self.ham.database_entry.masterid, job_db_entry_dict["masterid"]
)
self.assertEqual(self.ham.database_entry.projectpath, self.project.root_path)
self.assertEqual(self.ham.database_entry.project, self.project.project_path)
self.assertEqual(self.ham.database_entry.job, "job_test_run")
self.assertEqual(self.ham.database_entry.subjob, "/job_test_run")
self.assertEqual(self.ham.database_entry.status, "created")
self.assertEqual(self.ham.database_entry.hamilton, 'Script')
self.assertEqual(self.ham.database_entry.hamversion, "0.1")
self.assertEqual(self.ham.database_entry.username, "pyiron")
def test_inspect_job(self):
job_inspect = self.project.inspect(self.ham.job_name)
self.assertIsNotNone(job_inspect)
self.assertEqual(job_inspect.database_entry.parentid, None)
self.assertEqual(job_inspect.database_entry.masterid, None)
self.assertEqual(job_inspect.database_entry.projectpath, self.project.root_path)
self.assertEqual(job_inspect.database_entry.project, self.project.project_path)
self.assertEqual(job_inspect.database_entry.job, "job_test_run")
self.assertEqual(job_inspect.database_entry.subjob, "/job_test_run")
self.assertEqual(job_inspect.database_entry.status, "created")
self.assertEqual(job_inspect.database_entry.hamilton, 'Script')
self.assertEqual(job_inspect.database_entry.hamversion, "0.1")
self.assertEqual(job_inspect.database_entry.username, "pyiron")
if __name__ == "__main__":
unittest.main()
|
[
"janssen@mpie.de"
] |
janssen@mpie.de
|
3fb2cae71bf50676b6228b742a5f2bc1b3d9856b
|
3012e5a0f34dd54fbac568c70506826973192ce1
|
/pylib/lines.py
|
0568e2c4370b658c9fb453b1094d7360c2fd0918
|
[] |
no_license
|
metatab-packages/civicknowledge.com-osm-demosearch
|
89999227bda7bae91259c10bd651f220ae35c52f
|
d4ecb7775662a50413c848c3ae5a901b147ef532
|
refs/heads/master
| 2023-05-14T12:39:25.328559
| 2021-06-08T13:52:39
| 2021-06-08T13:52:39
| 334,572,037
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 6,999
|
py
|
"""
"""
from itertools import chain
from pathlib import Path
import fiona # Import first to work around an import bug
import geopandas as gpd
import numpy as np
import pandas as pd
import shapely
from demosearch import FileCache
from demosearch.util import run_mp
from shapely.wkt import loads as loads_wkt
from tqdm.notebook import tqdm
from .util import get_cache
tqdm.pandas()
import logging
lines_logger = logging.getLogger(__name__)
class LPError(Exception):
pass
hw_type = {
'residential': 'r',
'primary': '1',
'secondary': '2',
'tertiary': '3',
'motorway': 'm',
'motorway_link ': 'l',
'trunk': 't'
}
# Process each of the separate files, then
# write them back out for later recombination
#
# Write out the lines files into chunks so we can run it in multiple
# processes
def estimate_lines(fp):
"""Estimate the number of lines in a very long line-oriented file"""
lengths = []
means = []
sz = Path(fp).stat().st_size
mean = 1
std = 1
ln = 1
tq = tqdm(total=6000) # SHould take less than 6K line to get estimate
with fp.open() as f:
while True:
l = f.readline()
if not l or (len(l) > 1000 and std < 2):
return int(sz / mean)
lengths.append(len(l))
mean = np.mean(lengths).round(0)
means.append(mean)
std = np.std(means[-500:]).round(0)
tq.update(1)
tq.set_description(f"Est #lines {int(sz / mean)}")
ln += 1
def split_lines(pkg, limit=None):
cache = get_cache(pkg)
try:
# Returned the cached keys if this is already done
return cache.get('splits/splits_keys')
except KeyError:
pass
fp = pkg.reference('lines').resolved_url.fspath
try:
approx_lines = cache.config['lines_file_size']
except KeyError:
approx_lines = estimate_lines(fp)
cache.config['lines_file_size'] = approx_lines
chunksize = 10000
total = int(approx_lines / chunksize)
splits = []
with pd.read_csv(fp, chunksize=chunksize, low_memory=False) as reader:
for i, df in tqdm(enumerate(reader), total=total, desc='Split file'):
if limit and i > limit:
break
key = f'splits/{i}'
if not cache.exists(key):
cache.put_df(key, df)
splits.append(key)
cache.put('splits/splits_keys', splits)
return splits
def ro_key(rec_key):
return f"recombine/{Path(rec_key).name}"
def f_run_overlay(cache_dir, key, okey):
cache = FileCache(cache_dir)
if cache.exists(okey):
return okey
t = cache.get_df(key)
utm = cache.get_df('utm_grid')
t = t[t.highway.isin(list(hw_type.keys()))]
t['highway'] = t.highway.replace(hw_type) # Cuts file size by 100M
t['geometry'] = t.geometry.apply(shapely.wkt.loads)
if len(t) == 0:
return None
gdf = gpd.GeoDataFrame(t, crs=4326)
try:
t = gpd.overlay(gdf, utm)
try:
cache.put_df(okey, t)
except:
if cache.exists(okey):
cache.delete(key)
raise
except IndexError as e:
raise LPError(f"Failed for {key} gdf:{len(gdf)} hashes:{len(utm)}: {e}")
return okey
def run_overlay(pkg, splits, force=False):
cache = get_cache(pkg)
if not force:
try:
# Returned the cached keys if this is already done
recombine_keys = cache.get('recombine/recombine_keys')
if len(recombine_keys) == len(splits):
return recombine_keys
except KeyError:
pass
tasks = [[cache.root, e, ro_key(e)] for e in splits]
recombine_keys = run_mp(f_run_overlay, tasks, desc='Overlay Geohash')
cache.put('recombine/recombine_keys', recombine_keys)
return list(filter(bool, recombine_keys))
def f_simplify_lines(cache_dir, key):
cache = FileCache(cache_dir)
if not key:
return []
try:
df = cache.get_df(key)
except EOFError as e:
raise LPError(f"Failed to load key {key}: {e}")
except AttributeError as e:
raise LPError(f"Failed to load key {key}: {e}")
okeys = []
for idx, g in df.groupby('epsg'):
_, fn = key.split('/')
okey = f'simplified/{idx}/{fn}'
if not cache.exists(okey):
geometry = g.to_crs(epsg=idx).geometry \
.simplify(20, False) \
.to_crs(4326) \
.apply(lambda e: shapely.wkt.dumps(e, rounding_precision=0))
g = pd.DataFrame(g).assign(geometry=geometry)
cache.put_df(okey, g)
okeys.append(okey)
return okeys
def simplify_lines(pkg, recombine_keys):
cache = get_cache(pkg)
try:
# Returned the cached keys if this is already done
return cache.get('simplified/simplified_keys')
except KeyError:
pass
simplified_keys = run_mp(f_simplify_lines, [(cache.root, e) for e in recombine_keys],
desc='Simplify')
simplified_keys = list(chain(*simplified_keys))
cache.put('simplified/simplified_keys', simplified_keys)
return simplified_keys
def write_files(pkg, simplified_keys):
pkg_root = Path(pkg.path).parent
cache = FileCache(pkg_root.joinpath('data', 'cache'))
f1 = pkg_root.joinpath('data', 'residential_roads.csv')
f2 = pkg_root.joinpath('data', 'nonres_roads.csv')
if f1.exists() and f2.exists():
lines_logger.info('Both roads files exists, not writing')
return
t = pd.concat([cache.get_df(e) for e in simplified_keys])
t = t[['zone', 'epsg', 'us_state','cus_state', 'highway', 'geometry']]
residential_roads = t[t.highway == 'r']
nonres_roads = t[t.highway != 'r']
if not f1.exists():
residential_roads.to_csv(f1, index=False)
if not f2.exists():
nonres_roads.to_csv(f2, index=False)
def build_lines(pkg):
cache = open_cache(pkg)
lines_logger.info('Split the input file')
splits = split_lines(pkg)
lines_logger.info(f' {len(splits)} splits keys')
lines_logger.info('Run the overlay process')
recombine_keys = run_overlay(pkg, splits, cache)
print(f' {len(recombine_keys)} recombine keys')
if False:
lines_logger.info('Simplify lines')
simplified_keys = simplify_lines(pkg, recombine_keys)
lines_logger.info(f' {len(simplified_keys)} simplified keys')
else:
simplified_keys = recombine_keys
lines_logger.info('Write the roads files')
write_files(pkg, simplified_keys)
def open_cache(pkg):
cache = get_cache(pkg)
if not cache.exists('hashes'):
hashes = pkg.reference('us_geohashes').geoframe()
cache.put_df('hashes', hashes)
if not cache.exists('utm_grid'):
utm_grid = pkg.reference('utm_grid').geoframe()
cache.put_df('utm_grid', utm_grid)
return cache
|
[
"eric@civicknowledge.com"
] |
eric@civicknowledge.com
|
1049de780f9f5d1abb00009e1767374fd69fa854
|
1b5404b8099de74d4e39e0a41b1d04c61defa8d4
|
/fractals/pythagoras_tree.py
|
9aaeb7710c9cda5161272fbed91510d7491c7690
|
[] |
no_license
|
ipeterov/random-stuff
|
5d07bdcfdcb145d06ed89095f2ad34b70ff0f0bd
|
dbb38d42331f636919fd149b23783e02ee2c9afb
|
refs/heads/master
| 2023-05-14T00:41:51.122251
| 2023-05-04T12:10:26
| 2023-05-04T12:10:26
| 206,028,412
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,292
|
py
|
import math
from types import SimpleNamespace
import pygame
from abstract_drawer import AbstractDrawer
SIZE = WIDTH, HEIGHT = 500, 500
class PythagorasTreeDrawer(AbstractDrawer):
DEFAULT_PARAMS = {
'angle': 45,
'starting_size': 100,
'max_depth': 2,
}
PARAMS_SCHEMA = {
'angle': float,
'starting_size': float,
'max_depth': int,
}
@staticmethod
def counterclockwise_rotate(points, anchor_index, angle):
rad_angle = math.radians(-angle)
anchor = points[anchor_index]
new_points = []
for point in points:
if point == anchor:
new_points.append(point)
continue
anc_point = point - anchor
new_point = anchor + pygame.math.Vector2(
anc_point.x * math.cos(rad_angle) - anc_point.y * math.sin(rad_angle),
anc_point.x * math.sin(rad_angle) + anc_point.y * math.cos(rad_angle),
)
new_points.append(new_point)
return new_points
def draw_square(self, start, size, lean, angle):
assert lean in ('left', 'right')
if lean == 'left':
left, bottom = start
anchor_index = 0
else:
left, bottom = start[0] - size, start[1]
anchor_index = 1
angle *= -1
points = [
pygame.math.Vector2(left, bottom),
pygame.math.Vector2(left + size, bottom),
pygame.math.Vector2(left + size, bottom - size),
pygame.math.Vector2(left, bottom - size),
]
points = self.counterclockwise_rotate(points, anchor_index, angle)
pygame.draw.polygon(
self.screen,
(255, 255, 255),
points,
)
square = SimpleNamespace()
square.points = points
square.size = size
square.angle = angle
return square
def draw_small_squares(self, big_square, depth):
angle = self.params['angle']
rad_angle = math.radians(angle)
left_square = self.draw_square(
big_square.points[-1],
math.cos(rad_angle) * big_square.size,
lean='left',
angle=big_square.angle + angle,
)
right_square = self.draw_square(
big_square.points[-2],
math.sin(rad_angle) * big_square.size,
lean='right',
angle=90 - angle - big_square.angle,
)
if depth < self.params['max_depth']:
self.draw_small_squares(left_square, depth + 1)
self.draw_small_squares(right_square, depth + 1)
def _get_default_start(self, width, height):
return width / 2 - self.params['starting_size'] / 2, height
def _draw(self, start):
starting_square = self.draw_square(
start,
self.params['starting_size'],
lean='left',
angle=0,
)
self.draw_small_squares(
starting_square,
depth=1,
)
if __name__ == '__main__':
pygame.init()
screen = pygame.display.set_mode(SIZE)
drawer = PythagorasTreeDrawer(screen)
drawer.draw()
pygame.display.flip()
input('Press any key to quit... ')
|
[
"ipeterov1@gmail.com"
] |
ipeterov1@gmail.com
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.