blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 3
288
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
112
| license_type
stringclasses 2
values | repo_name
stringlengths 5
115
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 684
values | visit_date
timestamp[us]date 2015-08-06 10:31:46
2023-09-06 10:44:38
| revision_date
timestamp[us]date 1970-01-01 02:38:32
2037-05-03 13:00:00
| committer_date
timestamp[us]date 1970-01-01 02:38:32
2023-09-06 01:08:06
| github_id
int64 4.92k
681M
⌀ | star_events_count
int64 0
209k
| fork_events_count
int64 0
110k
| gha_license_id
stringclasses 22
values | gha_event_created_at
timestamp[us]date 2012-06-04 01:52:49
2023-09-14 21:59:50
⌀ | gha_created_at
timestamp[us]date 2008-05-22 07:58:19
2023-08-21 12:35:19
⌀ | gha_language
stringclasses 147
values | src_encoding
stringclasses 25
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 128
12.7k
| extension
stringclasses 142
values | content
stringlengths 128
8.19k
| authors
listlengths 1
1
| author_id
stringlengths 1
132
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
60d4b90698ef3312ded89560fe60b0bb05059590
|
9743d5fd24822f79c156ad112229e25adb9ed6f6
|
/xai/brain/wordbase/otherforms/_luring.py
|
d57b90bae21a1db2e394effd996a53fd1662c59d
|
[
"MIT"
] |
permissive
|
cash2one/xai
|
de7adad1758f50dd6786bf0111e71a903f039b64
|
e76f12c9f4dcf3ac1c7c08b0cc8844c0b0a104b6
|
refs/heads/master
| 2021-01-19T12:33:54.964379
| 2017-01-28T02:00:50
| 2017-01-28T02:00:50
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 216
|
py
|
#calss header
class _LURING():
def __init__(self,):
self.name = "LURING"
self.definitions = lure
self.parents = []
self.childen = []
self.properties = []
self.jsondata = {}
self.basic = ['lure']
|
[
"xingwang1991@gmail.com"
] |
xingwang1991@gmail.com
|
d9f79170b88b22d84a6fd118b71d63ef8095c4fb
|
4a0e0608ae505df68a43604efd363beaec742159
|
/src/muypicky/settings/base.py
|
69f2cdabe2214781ab498528588edb21fd7f7808
|
[] |
no_license
|
ionescuig/trydjango1-11
|
728c65a7481f7d54cdffa35d5456474a9eb69889
|
99e205880af8d307e2ef496afc983d3deeb435e8
|
refs/heads/master
| 2021-04-29T19:24:15.710487
| 2018-04-13T00:18:06
| 2018-04-13T00:18:06
| 121,702,491
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,813
|
py
|
"""
Django settings for muypicky project.
Generated by 'django-admin startproject' using Django 1.11.2.
For more information on this file, see
https://docs.djangoproject.com/en/1.11/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.11/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.11/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '5h97=bi^k5_rdp-6urx1(3)z_ldcv^g^($@@9-847)*%3*-ju8'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
EMAIL_HOST = 'smtp.gmail.com'
EMAIL_HOST_USER = 'youremail@gmail.com'
EMAIL_HOST_PASSWORD = 'yourpassword'
EMAIL_PORT = 587
EMAIL_USE_TLS = True
DEFAULT_FROM_EMAIL = 'YourName <youremail@gmail.com>'
ADMINS = (
'YourName <youremail@gmail.com>',
)
MANAGERS = ADMINS
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'menus',
'profiles',
'restaurants',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'muypicky.urls'
LOGIN_URL = '/login/'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [os.path.join(BASE_DIR, 'templates')],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'muypicky.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.11/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/1.11/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/1.11/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.11/howto/static-files/
STATIC_URL = '/static/'
LOGOUT_REDIRECT_URL = '/'
LOGIN_REDIRECT_URL = '/'
CORS_REPLACE_HTTPS_REFERER = False
HOST_SCHEME = "http://"
SECURE_PROXY_SSL_HEADER = None
SECURE_SSL_REDIRECT = False
SESSION_COOKIE_SECURE = False
CSRF_COOKIE_SECURE = False
SECURE_HSTS_SECONDS = None
SECURE_HSTS_INCLUDE_SUBDOMAINS = False
SECURE_FRAME_DENY = False
|
[
"ionescuig@yahoo.com"
] |
ionescuig@yahoo.com
|
bf64b0f79d6364e6e1c98299288c5bd337a03015
|
f10db3b11131ddf2bf5026e42cdd72c275e49693
|
/ToolsX/leetcode/0012/0012_1.py
|
86533e91b9177214e14e1ab80ed1278373daa4c7
|
[] |
no_license
|
JunLei-MI/PythonX
|
36def40e33c9ebb64ce28af2b5da010393b08356
|
efea806d49f07d78e3db0390696778d4a7fc6c28
|
refs/heads/master
| 2023-04-07T10:58:45.647430
| 2021-01-25T16:54:37
| 2021-04-15T13:41:27
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 903
|
py
|
class Solution:
def intToRoman(self, num: int) -> str:
"""
1
https://leetcode.com/problems/integer-to-roman/discuss/6274/Simple-Solution
这个答案笑到我了
>>> Solution().intToRoman(3)
'III'
>>> Solution().intToRoman(4)
'IV'
>>> Solution().intToRoman(9)
'IX'
>>> Solution().intToRoman(58)
'LVIII'
>>> Solution().intToRoman(1994)
'MCMXCIV'
"""
M = ['', 'M', 'MM', 'MMM']
C = ['', 'C', 'CC', 'CCC', 'CD', 'D', 'DC', 'DCC', 'DCCC', 'CM']
X = ['', 'X', 'XX', 'XXX', 'XL', 'L', 'LX', 'LXX', 'LXXX', 'XC']
I = ['', 'I', 'II', 'III', 'IV', 'V', 'VI', 'VII', 'VIII', 'IX']
return M[num // 1000] + C[(num % 1000) // 100] + X[(num % 100) // 10] + I[num % 10]
if __name__ == '__main__':
import doctest
doctest.testmod(verbose=True)
|
[
"pingfangx@pingfangx.com"
] |
pingfangx@pingfangx.com
|
aba1e97c6943b979394ff3086c1a6feb58340f0e
|
eafddc14e1381db53b87d42e7aa12dfb4bcf2d6e
|
/pandemonium/implementations/sarsa.py
|
a66e3d0d8b81ef0035bbce22252096518a5927b5
|
[] |
no_license
|
konichuvak/pandemonium
|
b9d1d2f8c3529b6869f6bda1d6ca10c6c0f94052
|
57083b311ea209fe156f8575cc682e6c88211b74
|
refs/heads/master
| 2022-11-23T21:57:21.276033
| 2020-07-26T03:42:51
| 2020-07-26T03:42:51
| 240,851,837
| 1
| 0
| null | 2020-07-06T19:54:41
| 2020-02-16T07:45:09
|
Python
|
UTF-8
|
Python
| false
| false
| 339
|
py
|
from pandemonium.demons.control import SARSA, OfflineTDControl, OnlineTDControl
from pandemonium.demons.offline_td import TTD
from pandemonium.utilities.utilities import get_all_classes
class MultistepSARSA(SARSA, OfflineTDControl, TTD):
...
class OnlineSARSA(SARSA, OnlineTDControl):
...
__all__ = get_all_classes(__name__)
|
[
"arialinvlad@gmail.com"
] |
arialinvlad@gmail.com
|
79c71fa20f12d5ed5a0151bf6692b97ba1da8d44
|
4fcad69a9b2aec97fa29e0010d82f0f085cdc446
|
/tsampi/pypy/lib_pypy/distributed/test/test_socklayer.py
|
6870eda430d4ec75a8c7d5f2e3475edfb0534513
|
[] |
no_license
|
tsampi/tsampi-0
|
b64d4457f58314343630b04232c6ecc74c7bfda1
|
5e0183e80718d5668b4b5b96631853942e344b64
|
refs/heads/master
| 2021-01-19T04:35:05.640785
| 2016-09-12T18:34:25
| 2016-09-12T18:34:25
| 49,612,767
| 1
| 3
| null | 2016-03-25T10:35:41
| 2016-01-14T01:02:18
|
Python
|
UTF-8
|
Python
| false
| false
| 995
|
py
|
import py
from pypy.conftest import gettestobjspace
def setup_module(mod):
py.test.importorskip("pygreen") # found e.g. in py/trunk/contrib
# XXX think how to close the socket
class AppTestSocklayer:
def setup_class(cls):
cls.space = gettestobjspace(**{"objspace.std.withtproxy": True,
"usemodules":("_stackless","_socket", "select")})
def test_socklayer(self):
class X(object):
z = 3
x = X()
try:
import py
except ImportError:
skip("pylib not importable")
from pygreen.pipe.gsocke import GreenSocket
from distributed.socklayer import socket_loop, connect
from pygreen.greensock2 import oneof, allof
def one():
socket_loop(('127.0.0.1', 21211), {'x':x}, socket=GreenSocket)
def two():
rp = connect(('127.0.0.1', 21211), GreenSocket)
assert rp.x.z == 3
oneof(one, two)
|
[
"tim@readevalprint.com"
] |
tim@readevalprint.com
|
05fa5d26d2b5020a17dd191dd3777319051f64af
|
d1626536c867604efdb1b9b1d8305729f28233df
|
/tests/types/test_urls.py
|
f5483ca750cd1176a1aea6c1ed6716e369de006a
|
[
"MIT"
] |
permissive
|
Rafiot/followthemoney
|
3fb056604ab672de1d9eea3330cd890af794c01e
|
18dd0ec410e598f8766f300b1f820e484034920f
|
refs/heads/master
| 2022-09-15T10:02:18.145766
| 2020-05-05T08:37:05
| 2020-05-05T08:37:05
| 259,663,871
| 1
| 0
|
MIT
| 2020-05-04T22:25:09
| 2020-04-28T14:41:07
| null |
UTF-8
|
Python
| false
| false
| 1,065
|
py
|
import unittest
from followthemoney.types import registry
urls = registry.url
class UrlsTest(unittest.TestCase):
def test_is_url(self):
self.assertTrue(urls.validate('http://foo.org'))
self.assertFalse(urls.validate(None))
self.assertFalse(urls.validate('hello'))
def test_unicode_url(self):
utext = 'http://ko.wikipedia.org/wiki/위키백과:대문'
self.assertTrue(urls.validate(utext))
self.assertFalse(urls.validate(utext.encode('euc-kr')))
def test_parse_url(self):
self.assertEqual(urls.clean('http://foo.com'), 'http://foo.com/')
self.assertEqual(urls.clean('http://foo.com/#lala'), 'http://foo.com/')
self.assertEqual(urls.clean('http://foo.com?b=1&a=2'),
'http://foo.com/?a=2&b=1')
self.assertEqual(urls.clean('http://FOO.com'), 'http://foo.com/')
self.assertEqual(urls.clean('http://FOO.com/A'), 'http://foo.com/A')
def test_specificity(self):
self.assertEqual(urls.specificity('http://foo.com/'), 1)
|
[
"friedrich@pudo.org"
] |
friedrich@pudo.org
|
db89ca96687b30bfe5e1575b3c951f4d67d15153
|
e8574a1eb466c37ab15a3722f762702023998ce8
|
/tests/test_parse/test_tinyseq.py
|
6ba1597dd6aa6e106a3e600f21714cbd4ff86c7f
|
[
"BSD-3-Clause"
] |
permissive
|
cogent3/c3test
|
0415210e5aee197ea7256fd3747698073a5b29c3
|
e200ed18a7fbc317abf7ebe76871fb2a7004375c
|
refs/heads/master
| 2021-04-16T12:35:36.652805
| 2020-05-19T00:03:10
| 2020-05-19T00:03:10
| 249,356,408
| 0
| 1
|
BSD-3-Clause
| 2020-05-04T02:46:01
| 2020-03-23T06:50:50
|
Python
|
UTF-8
|
Python
| false
| false
| 1,762
|
py
|
#!/usr/bin/env python
import xml.dom.minidom
from io import StringIO
from unittest import TestCase, main
from cogent3.parse.tinyseq import TinyseqParser
__author__ = "Matthew Wakefield"
__copyright__ = "Copyright 2007-2020, The Cogent Project"
__credits__ = ["Matthew Wakefield"]
__license__ = "BSD-3"
__version__ = "2020.2.7a"
__maintainer__ = "Matthew Wakefield"
__email__ = "wakefield@wehi.edu.au"
__status__ = "Production"
data = """<?xml version="1.0"?>
<!DOCTYPE TSeqSet PUBLIC "-//NCBI//NCBI TSeq/EN" "http://www.ncbi.nlm.nih.gov/dtd/NCBI_TSeq.dtd">
<TSeqSet>
<TSeq>
<TSeq_seqtype value="nucleotide"/>
<TSeq_gi>31322957</TSeq_gi>
<TSeq_accver>AY286018.1</TSeq_accver>
<TSeq_taxid>9315</TSeq_taxid>
<TSeq_orgname>Macropus eugenii</TSeq_orgname>
<TSeq_defline>Macropus eugenii medium wave-sensitive opsin 1 (OPN1MW) mRNA, complete cds</TSeq_defline>
<TSeq_length>99</TSeq_length>
<TSeq_sequence>GGCAGGGAAAGGGAAGAAAGTAAAGGGGCCATGACACAGGCATGGGACCCTGCAGGGTTCTTGGCTTGGCGGCGGGACGAGAACGAGGAGACGACTCGG</TSeq_sequence>
</TSeq>
</TSeqSet>
"""
sample_seq = ">AY286018.1\nGGCAGGGAAAGGGAAGAAAGTAAAGGGGCCATGACACAGGCATGGGACCCTGCAGGGTTCTTGGCTTGGCGGCGGGACGAGAACGAGGAGACGACTCGG\n"
sample_annotations = (
'[genbank_id "AY286018.1" at [0:99]/99, organism "Macropus eugenii" at [0:99]/99]'
)
class ParseTinyseq(TestCase):
def test_parse(self):
for name, seq in [
next(TinyseqParser(data)),
next(TinyseqParser(xml.dom.minidom.parseString(data))),
]:
self.assertEqual(name, "AY286018.1")
self.assertEqual(sample_seq, seq.to_fasta(block_size=len(sample_seq)))
self.assertEqual(str(seq.annotations), sample_annotations)
pass
if __name__ == "__main__":
main()
|
[
"Gavin.Huttley@anu.edu.au"
] |
Gavin.Huttley@anu.edu.au
|
2ac2aedd914fe42678d31bbf51cad93ef6ff6d78
|
35dbd536a17d7127a1dd1c70a2903ea0a94a84c2
|
/tests/snuba/api/endpoints/test_discover_homepage_query.py
|
71f3bc1e624c3e770c60f1a832a392d146a6f94c
|
[
"Apache-2.0",
"BUSL-1.1"
] |
permissive
|
nagyist/sentry
|
efb3ef642bd0431990ca08c8296217dabf86a3bf
|
d9dd4f382f96b5c4576b64cbf015db651556c18b
|
refs/heads/master
| 2023-09-04T02:55:37.223029
| 2023-01-09T15:09:44
| 2023-01-09T15:09:44
| 48,165,782
| 0
| 0
|
BSD-3-Clause
| 2022-12-16T19:13:54
| 2015-12-17T09:42:42
|
Python
|
UTF-8
|
Python
| false
| false
| 5,947
|
py
|
from django.urls import reverse
from sentry.api.serializers import serialize
from sentry.discover.models import DiscoverSavedQuery
from tests.snuba.api.endpoints.test_discover_saved_queries import DiscoverSavedQueryBase
FEATURES = ("organizations:discover-query",)
class DiscoverHomepageQueryTest(DiscoverSavedQueryBase):
def setUp(self):
super().setUp()
self.url = reverse("sentry-api-0-discover-homepage-query", args=[self.org.slug])
self.query = {"fields": ["test"], "conditions": [], "limit": 10}
self.project_ids = [
self.create_project(organization=self.org).id,
self.create_project(organization=self.org).id,
]
def test_returns_no_response_if_no_homepage_query_for_user(self):
with self.feature(FEATURES):
response = self.client.get(self.url)
assert response.status_code == 204, response.content
assert response.data is None
def test_returns_serialized_saved_query_if_homepage_is_set(self):
saved_query = DiscoverSavedQuery.objects.create(
organization=self.org,
created_by=self.user,
name="Test query",
query=self.query,
is_homepage=True,
)
with self.feature(FEATURES):
response = self.client.get(self.url)
assert response.status_code == 200, response.content
assert response.data == serialize(saved_query)
def test_put_updates_existing_homepage_query_to_reflect_new_data(self):
saved_query = DiscoverSavedQuery.objects.create(
organization=self.org,
created_by=self.user,
name="Test query",
query=self.query,
is_homepage=True,
)
with self.feature(FEATURES):
response = self.client.put(
self.url,
{
"name": "A new homepage query update",
"projects": self.project_ids,
"fields": ["field1", "field2"],
},
)
assert response.status_code == 200, response.content
saved_query.refresh_from_db()
assert response.data == serialize(saved_query)
assert saved_query.query["fields"] == ["field1", "field2"]
assert set(saved_query.projects.values_list("id", flat=True)) == set(self.project_ids)
def test_put_creates_new_discover_saved_query_if_none_exists(self):
homepage_query_payload = {
"version": 2,
"name": "New Homepage Query",
"projects": self.project_ids,
"environment": ["alpha"],
"fields": ["environment", "platform.name"],
"orderby": "-timestamp",
"range": None,
}
with self.feature(FEATURES):
response = self.client.put(self.url, data=homepage_query_payload)
assert response.status_code == 201, response.content
new_query = DiscoverSavedQuery.objects.get(
created_by=self.user, organization=self.org, is_homepage=True
)
assert response.data == serialize(new_query)
assert new_query.query["fields"] == homepage_query_payload["fields"]
assert new_query.query["environment"] == homepage_query_payload["environment"]
assert set(new_query.projects.values_list("id", flat=True)) == set(self.project_ids)
def test_put_responds_with_saved_empty_name_field(self):
homepage_query_payload = {
"version": 2,
"name": "New Homepage Query",
"projects": self.project_ids,
"environment": ["alpha"],
"fields": ["environment", "platform.name"],
"orderby": "-timestamp",
"range": None,
}
with self.feature(FEATURES):
response = self.client.put(self.url, data=homepage_query_payload)
assert response.status_code == 201, response.content
new_query = DiscoverSavedQuery.objects.get(
created_by=self.user, organization=self.org, is_homepage=True
)
assert new_query.name == ""
assert response.data["name"] == ""
def test_put_with_no_name(self):
homepage_query_payload = {
"version": 2,
"name": "",
"projects": self.project_ids,
"environment": ["alpha"],
"fields": ["environment", "platform.name"],
"orderby": "-timestamp",
"range": None,
}
with self.feature(FEATURES):
response = self.client.put(self.url, data=homepage_query_payload)
assert response.status_code == 201, response.content
new_query = DiscoverSavedQuery.objects.get(
created_by=self.user, organization=self.org, is_homepage=True
)
assert new_query.name == ""
assert response.data["name"] == ""
def test_post_not_allowed(self):
homepage_query_payload = {
"version": 2,
"name": "New Homepage Query",
"projects": ["-1"],
"environment": ["alpha"],
"fields": ["environment", "platform.name"],
"orderby": "-timestamp",
"range": None,
}
with self.feature(FEATURES):
response = self.client.post(self.url, data=homepage_query_payload)
assert response.status_code == 405, response.content
def test_delete_resets_saved_query(self):
DiscoverSavedQuery.objects.create(
organization=self.org,
created_by=self.user,
name="Test query",
query=self.query,
is_homepage=True,
)
with self.feature(FEATURES):
response = self.client.delete(self.url)
assert response.status_code == 204
assert not DiscoverSavedQuery.objects.filter(
created_by=self.user, organization=self.org, is_homepage=True
).exists()
|
[
"noreply@github.com"
] |
nagyist.noreply@github.com
|
565b2f83dc13e0eb8d5e64aed59cdbec399264f0
|
78a20d2ceac95d3afb55b215305a8c40670e873f
|
/QsimMaster/clients/kiethley_control/kiethley_controller.py
|
2b777b9dee35479f43c628755f50cc07e298445e
|
[] |
no_license
|
johnpalsberg/John-Palsberg
|
9957dd2fb2a6da62e5118c4e2a9a471d5e2f4c9b
|
91aa6476bc319524c6f6a4bfc5561ca6aa95e6c4
|
refs/heads/master
| 2023-01-13T07:26:04.250541
| 2023-01-05T08:48:17
| 2023-01-05T08:48:17
| 206,402,363
| 0
| 0
| null | 2022-06-20T16:29:42
| 2019-09-04T19:57:25
|
Python
|
UTF-8
|
Python
| false
| false
| 2,269
|
py
|
from common.lib.clients.qtui.QCustomSpinBox import QCustomSpinBox
from twisted.internet.defer import inlineCallbacks
from PyQt4 import QtGui
from common.lib.clients.qtui.q_custom_text_changing_button import \
TextChangingButton
class kiethleyclient(QtGui.QWidget):
def __init__(self, reactor, parent = None):
"""initializels the GUI creates the reactor
"""
super(kiethleyclient, self).__init__()
self.setSizePolicy(QtGui.QSizePolicy.Minimum, QtGui.QSizePolicy.Fixed)
self.connect()
self.reactor = reactor
@inlineCallbacks
def connect(self):
"""Creates an Asynchronous connection
"""
from labrad.wrappers import connectAsync
from labrad.units import WithUnit as U
self.U = U
self.cxn = yield connectAsync(name = "kiethley client")
self.server = self.cxn.keithley_2230g_server
yield self.server.select_device(0)
self.initializeGUI()
def initializeGUI(self):
layout = QtGui.QGridLayout()
self.setWindowTitle('kiethley Control')
qBox = QtGui.QGroupBox('Kiethley 2230G')
subLayout = QtGui.QGridLayout()
qBox.setLayout(subLayout)
layout.addWidget(qBox, 0, 0)
self.volt1widget = QCustomSpinBox('Amplitude (Vpp)', (0, 30))
self.volt2widget = QCustomSpinBox('Amplitude (Vpp)', (0, 30))
self.volt1widget.spinLevel.valueChanged.connect(lambda value = self.volt1widget.spinLevel.value(), chan = 1 : self.voltchanged(chan, value))
self.volt2widget.spinLevel.valueChanged.connect(lambda value = self.volt2widget.spinLevel.value(), chan = 2 : self.voltchanged(chan, value))
subLayout.addWidget(self.volt1widget, 1,1)
subLayout.addWidget(self.volt2widget, 1,3)
self.setLayout(layout)
@inlineCallbacks
def voltchanged(self, chan, value):
value = self.U(value, 'V')
yield self.server.voltage(chan, value)
def closeEvent(self, x):
self.reactor.stop()
if __name__ == "__main__":
a = QtGui.QApplication([])
import qt4reactor
qt4reactor.install()
from twisted.internet import reactor
kiethleyWidget = kiethleyclient(reactor)
kiethleyWidget.show()
run = reactor.run()
|
[
"jpalsberg2023@chadwickschool.org"
] |
jpalsberg2023@chadwickschool.org
|
2f90b60ed7ed37e8e46b21c3764967ab7950903c
|
57ea759b2f400f1dc155b7637533732c78a3f1b7
|
/marketplace/settings.py
|
23ff23f48b98d6562bca294310e93b56e9313612
|
[] |
no_license
|
hanifmisbah/Marketplace
|
219dce7b8cfe2a9509a6549729fea3febbe9cd3b
|
ee40a9a624021d10ff57befac88f306b1730bac7
|
refs/heads/master
| 2023-01-01T06:56:07.605659
| 2020-10-29T09:49:26
| 2020-10-29T09:49:26
| 308,255,051
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,505
|
py
|
"""
Django settings for marketplace project.
Generated by 'django-admin startproject' using Django 2.2.
For more information on this file, see
https://docs.djangoproject.com/en/2.2/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/2.2/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/2.2/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'f*^(1n3zwav8smf-rb4n&&08@q6u&or2d@5g^fy1nxpt^#7_gu'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
# ALLOWED_HOSTS = ['192.168.43.164']
ALLOWED_HOSTS = ['*']
CORS_ORIGIN_ALLOW_ALL = True
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'widget_tweaks',
'crispy_forms',
'sales',
'toko',
'customers',
'products',
'suppliers',
'corsheaders',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
# 'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'marketplace.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'marketplace.wsgi.application'
# Database
# https://docs.djangoproject.com/en/2.2/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.postgresql_psycopg2',
'NAME': 'market',
'USER': 'postgres',
'PASSWORD':'hanifmisbah97',
'HOST' : 'localhost',
'PORT' : '',
}
}
# Password validation
# https://docs.djangoproject.com/en/2.2/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/2.2/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/2.2/howto/static-files/
STATIC_URL = '/static/'
STATICFILES_DIRS =[
BASE_DIR + "/static",
'var/www/static',
]
USE_THOUSAND_SEPARATOR = True
|
[
"hanifmisbah97@gmail.com"
] |
hanifmisbah97@gmail.com
|
25e3e1ad2d878f59ad263ae884e96026e3554a26
|
0cb42f98050eef8689f3d87067367b688871bd47
|
/petshop/api/urls.py
|
81f5d9a65074aae151c6af209f32f083baf07932
|
[] |
no_license
|
infsolution/AtividadeFinalAngular
|
b42554add907a5b502940997460451a7410ecb54
|
1071c38968d2e2d74c81030b7bd380442045c327
|
refs/heads/master
| 2023-01-12T09:07:10.807024
| 2019-07-14T03:46:07
| 2019-07-14T03:46:07
| 196,790,171
| 0
| 0
| null | 2023-01-07T07:45:41
| 2019-07-14T03:39:09
|
TypeScript
|
UTF-8
|
Python
| false
| false
| 809
|
py
|
from django.urls import path
from . import views
urlpatterns = [
path('', views.index, name='index'),
path('user/', views.UserList.as_view(), name=views.UserList.name),
path('user/<int:pk>/', views.UserDetail.as_view(), name=views.UserDetail.name),
path('pet/', views.PetList.as_view(), name=views.PetList.name),
path('pet/<int:pk>/', views.PetDetail.as_view(), name=views.PetDetail.name),
path('atendimento/', views.AtendimentoList.as_view(), name=views.AtendimentoList.name),
path('atendimento/<int:pk>/', views.AtendimentoDetail.as_view(), name=views.AtendimentoDetail.name),
path('medicamento/', views.MedicamentoList.as_view(), name=views.MedicamentoList.name),
path('medicamento/<int:pk>/', views.MedicamentoDetail.as_view(), name=views.MedicamentoDetail.name),
]
|
[
"clsinfsolution@gmail.com"
] |
clsinfsolution@gmail.com
|
d3ad1ddb9912f5b9b310d13e2e040d02b687faba
|
9cc76b1b1dd0064ab6613cbca6ce93bc179db355
|
/ros_ws/devel/lib/python3/dist-packages/object_finder/msg/_objectFinderGoal.py
|
16fd1b88d6e38231d2f5710965045d1b42331d05
|
[] |
no_license
|
ABCaps35/learning_ros_ready_ws
|
1131c32b2ecadffa8dd186c9ebcfdba7284f30ad
|
1aa9c512d5006584e8bc84101a715e16a222a47d
|
refs/heads/main
| 2023-04-03T20:32:58.671255
| 2021-04-13T23:41:13
| 2021-04-13T23:41:13
| 357,715,306
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,668
|
py
|
# This Python file uses the following encoding: utf-8
"""autogenerated by genpy from object_finder/objectFinderGoal.msg. Do not edit."""
import codecs
import sys
python3 = True if sys.hexversion > 0x03000000 else False
import genpy
import struct
class objectFinderGoal(genpy.Message):
_md5sum = "8657c16ee27b175765f0d86cc8f66fbd"
_type = "object_finder/objectFinderGoal"
_has_header = False # flag to mark the presence of a Header object
_full_text = """# ====== DO NOT MODIFY! AUTOGENERATED FROM AN ACTION DEFINITION ======
#objectFinder.action
#goal:
#get object ID codes from <object_manipulation_properties/object_ID_codes.h>
#goal fields to fill in:
int32 object_id
#boolean to declare if object is on a horizontal surface of known height:
bool known_surface_ht
#if surface ht is known, fill it in
float32 surface_ht
"""
__slots__ = ['object_id','known_surface_ht','surface_ht']
_slot_types = ['int32','bool','float32']
def __init__(self, *args, **kwds):
"""
Constructor. Any message fields that are implicitly/explicitly
set to None will be assigned a default value. The recommend
use is keyword arguments as this is more robust to future message
changes. You cannot mix in-order arguments and keyword arguments.
The available fields are:
object_id,known_surface_ht,surface_ht
:param args: complete set of field values, in .msg order
:param kwds: use keyword arguments corresponding to message field names
to set specific fields.
"""
if args or kwds:
super(objectFinderGoal, self).__init__(*args, **kwds)
# message fields cannot be None, assign default values for those that are
if self.object_id is None:
self.object_id = 0
if self.known_surface_ht is None:
self.known_surface_ht = False
if self.surface_ht is None:
self.surface_ht = 0.
else:
self.object_id = 0
self.known_surface_ht = False
self.surface_ht = 0.
def _get_types(self):
"""
internal API method
"""
return self._slot_types
def serialize(self, buff):
"""
serialize message into buffer
:param buff: buffer, ``StringIO``
"""
try:
_x = self
buff.write(_get_struct_iBf().pack(_x.object_id, _x.known_surface_ht, _x.surface_ht))
except struct.error as se: self._check_types(struct.error("%s: '%s' when writing '%s'" % (type(se), str(se), str(locals().get('_x', self)))))
except TypeError as te: self._check_types(ValueError("%s: '%s' when writing '%s'" % (type(te), str(te), str(locals().get('_x', self)))))
def deserialize(self, str):
"""
unpack serialized message in str into this message instance
:param str: byte array of serialized message, ``str``
"""
codecs.lookup_error("rosmsg").msg_type = self._type
try:
end = 0
_x = self
start = end
end += 9
(_x.object_id, _x.known_surface_ht, _x.surface_ht,) = _get_struct_iBf().unpack(str[start:end])
self.known_surface_ht = bool(self.known_surface_ht)
return self
except struct.error as e:
raise genpy.DeserializationError(e) # most likely buffer underfill
def serialize_numpy(self, buff, numpy):
"""
serialize message with numpy array types into buffer
:param buff: buffer, ``StringIO``
:param numpy: numpy python module
"""
try:
_x = self
buff.write(_get_struct_iBf().pack(_x.object_id, _x.known_surface_ht, _x.surface_ht))
except struct.error as se: self._check_types(struct.error("%s: '%s' when writing '%s'" % (type(se), str(se), str(locals().get('_x', self)))))
except TypeError as te: self._check_types(ValueError("%s: '%s' when writing '%s'" % (type(te), str(te), str(locals().get('_x', self)))))
def deserialize_numpy(self, str, numpy):
"""
unpack serialized message in str into this message instance using numpy for array types
:param str: byte array of serialized message, ``str``
:param numpy: numpy python module
"""
codecs.lookup_error("rosmsg").msg_type = self._type
try:
end = 0
_x = self
start = end
end += 9
(_x.object_id, _x.known_surface_ht, _x.surface_ht,) = _get_struct_iBf().unpack(str[start:end])
self.known_surface_ht = bool(self.known_surface_ht)
return self
except struct.error as e:
raise genpy.DeserializationError(e) # most likely buffer underfill
_struct_I = genpy.struct_I
def _get_struct_I():
global _struct_I
return _struct_I
_struct_iBf = None
def _get_struct_iBf():
global _struct_iBf
if _struct_iBf is None:
_struct_iBf = struct.Struct("<iBf")
return _struct_iBf
|
[
"acapelli345@gmail.com"
] |
acapelli345@gmail.com
|
580c7380ba8954b267f1ada589a644997b08598f
|
8ee12ccce396e0d43bd8473ec9f0a13c9c7844c7
|
/Mani_Vijay/python buit-in functions/002_raw+add.py
|
c440b472c2479eeb4a1cccf56783f1d859f043e9
|
[] |
no_license
|
Purushotamprasai/Python
|
4ed44e26ca5cec7bb39c5561f545bfc68499bcfd
|
ed6fbd0f73cc7be91661f544f464222030197097
|
refs/heads/master
| 2023-06-05T13:39:04.602783
| 2020-01-23T14:30:25
| 2020-01-23T14:30:25
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 281
|
py
|
# wap addition of two numbers using raw_input functions
def main( ):
var = raw_input("enter some data")
var = complex(var)
print "var = ",var
print " type of var ",type(var)
print " id of var ",id(var)
if(__name__ == "__main__"):
main()
|
[
"noreply@github.com"
] |
Purushotamprasai.noreply@github.com
|
05197295c0708c220fb104fc0d6cdc664e16e627
|
1b15b42087d58002432daff45fafb7eb4d0ca2d8
|
/100_same_tree.py
|
7a4573991e2ee2aa8d8e23c58b17e4b3cb9268a4
|
[] |
no_license
|
georgebzhang/Python_LeetCode
|
2b92be66880eaf4642a603897386622dc81fbaf3
|
c1703358ceeed67e3e85de05eda74447f31176a2
|
refs/heads/master
| 2020-04-26T01:38:33.750580
| 2019-06-21T21:51:13
| 2019-06-21T21:51:13
| 173,209,953
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 962
|
py
|
class TreeNode:
def __init__(self, x):
self.val = x
self.left = None
self.right = None
class Solution(object):
def isSameTree(self, p, q):
def list_traversal(n):
def traversal(n):
if n is None:
l.append('null')
else:
l.append(n.val)
traversal(n.left)
traversal(n.right)
l = []
traversal(n)
return l
l_p = list_traversal(p)
l_q = list_traversal(q)
return l_p == l_q
def print_answer(self, ans):
print(ans)
def test(self):
p = TreeNode(1)
p.left = TreeNode(2)
p.right = TreeNode(3)
q = TreeNode(1)
q.left = TreeNode(2)
q.right = TreeNode(3)
ans = self.isSameTree(p, q)
self.print_answer(ans)
if __name__ == '__main__':
s = Solution()
s.test()
|
[
"georgebzhang5@gmail.com"
] |
georgebzhang5@gmail.com
|
358b4ca183c5bb806e89aba3e3424841ea807616
|
8f4c691f190a1d4ffd4261ea6dca6a2d3a96284c
|
/csa/csa/doctype/union/union.py
|
86d5969dfee1ff7a1083ecbd7571ff80e1308109
|
[
"MIT"
] |
permissive
|
Jishnu70055/usermanagement
|
57abb738160fb213acdc2c71b40244eae4b06cee
|
f7b526335c2b99899afac188696071fa35df09ca
|
refs/heads/master
| 2023-09-03T17:30:50.147750
| 2021-10-21T13:27:38
| 2021-10-21T13:27:38
| 399,362,509
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 181
|
py
|
# Copyright (c) 2021, s and contributors
# For license information, please see license.txt
# import frappe
from frappe.model.document import Document
class Union(Document):
pass
|
[
"jishnudq70055@gmail.com"
] |
jishnudq70055@gmail.com
|
c72fd3b58d7b69c030a63601b5e20b4f72228f2a
|
7966fa31437cc8a539621a5a0642ce24c1c9de50
|
/PycharmProjects/leetcode/knapsack/139单词拆分.py
|
202f72100f96201909c7949f236c9d1a17e238ce
|
[] |
no_license
|
crystal30/DataStructure
|
4f938508f4c60af9c5f8ec5520d5acedbe2dc90e
|
c55b0cfd2967a2221c27ed738e8de15034775945
|
refs/heads/master
| 2021-06-25T17:49:03.048853
| 2021-01-22T00:37:04
| 2021-01-22T00:37:04
| 192,374,326
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,272
|
py
|
class Solution:
def __init__(self):
self.memo = dict()
def wordBreak(self, s, wordDict) -> bool:
# s 和 wordDict均为非空
if len(s) == 0:
return True
if s in self.memo.keys():
return self.memo[s]
re = []
for word in wordDict:
re_s = self.word_isvalid(s, word)
if len(s) != len(re_s):
re.append(self.wordBreak(re_s, wordDict))
for sub_re in re:
if sub_re is True:
self.memo[s] = True
return True
self.memo[s] = False
return False
def word_isvalid(self, s, word):
len_word = len(word)
if s[:len_word] == word:
return s[len_word:]
elif s[-len_word:] == word:
return s[:-len_word]
else:
return s
if __name__ == "__main__":
so = Solution()
# s = "catsandog"
# wordDict = ["cats", "dog", "san", "and", "cat"]
s = "catskicatcats"
wordDict = ["cats", "cat", "dog", "ski"]
# s = "leetcode"
# wordDict = ["leet", "code"]
# s = "applepenapple"
# wordDict = ["apple", "pen"]
re = so.wordBreak(s, wordDict)
print(re)
# re = so.word_isvalid(s, "cats")
# print(re)
|
[
"zhao_crystal@126.com"
] |
zhao_crystal@126.com
|
c4744c241dfcca1e7a26ee3f2f6e104452cc97f7
|
cb61ba31b27b232ebc8c802d7ca40c72bcdfe152
|
/Misc/subarray_sum_negative.py
|
0847ef7b4914676b35fe5134c25da1ab1f5c7345
|
[
"Apache-2.0"
] |
permissive
|
saisankargochhayat/algo_quest
|
c7c48187c76b5cd7c2ec3f0557432606e9096241
|
a24f9a22c019ab31d56bd5a7ca5ba790d54ce5dc
|
refs/heads/master
| 2021-07-04T15:21:33.606174
| 2021-02-07T23:42:43
| 2021-02-07T23:42:43
| 67,831,927
| 5
| 1
|
Apache-2.0
| 2019-10-28T03:51:03
| 2016-09-09T20:51:29
|
Python
|
UTF-8
|
Python
| false
| false
| 183
|
py
|
t=int(input())
for i in range(t):
arr=list()
N,X=map(int,input().split())
for j in range(N):
num=int(input())
arr.append(num)
subarray(arr,N,X)
|
[
"saisankargochhayat@gmail.com"
] |
saisankargochhayat@gmail.com
|
acdc0bb6b71acfc1b51ebfe3911a130e22a71455
|
6a74ae0a776dfa50e946651362ff97326fc9f6e1
|
/200/pt3/089.py
|
49dee26b7d7005bec0e64fda65c4c349d6b6596a
|
[] |
no_license
|
teotiwg/studyPython
|
799c1307d50ad77a27b8a8ca59c79b79f07c29cd
|
fd0c7f8af9b0ba9d832818d42aec320386bf857b
|
refs/heads/master
| 2023-02-17T17:04:23.809231
| 2021-01-11T09:27:20
| 2021-01-11T09:27:20
| 327,474,697
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 292
|
py
|
numstr = input('숫자를 입력하세요')
try:
num = int(numstr)
print("입력한 수는 정수 %d 입니다." %num)
except:
try:
num = float(numstr)
print('입력한 수는 실수 %f 입니다.' %num)
except:
print('+++숫자를 입력하세요+++')
|
[
"kes2521@naver.com"
] |
kes2521@naver.com
|
8e4e9a84ac90cc3c82cd6cdaf5e67eb084cac8c2
|
d5ba475a6a782b0eed5d134b66eb8c601c41421c
|
/terrascript/resource/kubernetes.py
|
b36d593401d9adecbf53bb7af121910a44601d7d
|
[
"BSD-2-Clause",
"Python-2.0"
] |
permissive
|
amlodzianowski/python-terrascript
|
ab42a06a5167e53ad8093b656a9bf14a03cb031d
|
142b1a4d1164d1012ac8865d12fdcc72f1e7ae75
|
refs/heads/master
| 2021-05-19T11:59:47.584554
| 2020-03-26T07:13:47
| 2020-03-26T07:13:47
| 251,688,045
| 0
| 0
|
BSD-2-Clause
| 2020-03-31T18:00:22
| 2020-03-31T18:00:22
| null |
UTF-8
|
Python
| false
| false
| 2,743
|
py
|
# terrascript/resource/kubernetes.py
import terrascript
class kubernetes_api_service(terrascript.Resource):
pass
class kubernetes_cluster_role(terrascript.Resource):
pass
class kubernetes_cluster_role_binding(terrascript.Resource):
pass
class kubernetes_config_map(terrascript.Resource):
pass
class kubernetes_cron_job(terrascript.Resource):
pass
class kubernetes_daemonset(terrascript.Resource):
pass
class kubernetes_deployment(terrascript.Resource):
pass
class kubernetes_endpoints(terrascript.Resource):
pass
class kubernetes_horizontal_pod_autoscaler(terrascript.Resource):
pass
class kubernetes_ingress(terrascript.Resource):
pass
class kubernetes_job(terrascript.Resource):
pass
class kubernetes_limit_range(terrascript.Resource):
pass
class kubernetes_namespace(terrascript.Resource):
pass
class kubernetes_network_policy(terrascript.Resource):
pass
class kubernetes_persistent_volume(terrascript.Resource):
pass
class kubernetes_persistent_volume_claim(terrascript.Resource):
pass
class kubernetes_pod(terrascript.Resource):
pass
class kubernetes_pod_disruption_budget(terrascript.Resource):
pass
class kubernetes_priority_class(terrascript.Resource):
pass
class kubernetes_replication_controller(terrascript.Resource):
pass
class kubernetes_role_binding(terrascript.Resource):
pass
class kubernetes_resource_quota(terrascript.Resource):
pass
class kubernetes_role(terrascript.Resource):
pass
class kubernetes_secret(terrascript.Resource):
pass
class kubernetes_service(terrascript.Resource):
pass
class kubernetes_service_account(terrascript.Resource):
pass
class kubernetes_stateful_set(terrascript.Resource):
pass
class kubernetes_storage_class(terrascript.Resource):
pass
__all__ = [
"kubernetes_api_service",
"kubernetes_cluster_role",
"kubernetes_cluster_role_binding",
"kubernetes_config_map",
"kubernetes_cron_job",
"kubernetes_daemonset",
"kubernetes_deployment",
"kubernetes_endpoints",
"kubernetes_horizontal_pod_autoscaler",
"kubernetes_ingress",
"kubernetes_job",
"kubernetes_limit_range",
"kubernetes_namespace",
"kubernetes_network_policy",
"kubernetes_persistent_volume",
"kubernetes_persistent_volume_claim",
"kubernetes_pod",
"kubernetes_pod_disruption_budget",
"kubernetes_priority_class",
"kubernetes_replication_controller",
"kubernetes_role_binding",
"kubernetes_resource_quota",
"kubernetes_role",
"kubernetes_secret",
"kubernetes_service",
"kubernetes_service_account",
"kubernetes_stateful_set",
"kubernetes_storage_class",
]
|
[
"markus@juenemann.net"
] |
markus@juenemann.net
|
b6ae8c8519f939cc6a51e7ee6f2fefd15c8b2259
|
3de2a746243ad1cb000994a06a0f9699db9a901f
|
/abc184b.py
|
64694289e5c4aec649f12bd9438abd27db4e5b01
|
[] |
no_license
|
takumi152/atcoder
|
71d726ffdf2542d8abac0d9817afaff911db7c6c
|
ebac94f1227974aa2e6bf372e18605518de46441
|
refs/heads/master
| 2022-10-30T12:14:41.742596
| 2022-09-29T19:49:32
| 2022-09-29T19:49:32
| 181,502,518
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 285
|
py
|
def main():
n, x = map(int, input().split())
s = input()
score = x
for i in range(n):
if s[i] == 'o':
score += 1
elif s[i] == 'x' and score > 0:
score -= 1
print(score)
if __name__ == '__main__':
main()
|
[
"takumi152@hotmail.com"
] |
takumi152@hotmail.com
|
931bfaa4cc8c3856ad42bb1d98c301e63b8c1bd6
|
c1fcfa74629b0ab3cf806c2a565aa869f7fea3d9
|
/Contents/Libraries/Shared/resources/lib/proxies/anonymster.py
|
b424e52f3034d1352d01ebb78a5228d0b7a1245b
|
[] |
no_license
|
gus4520/FMoviesPlus.bundle
|
e884e37f0aca68ac1d4c1e8d7dc7ff741ea323eb
|
102baa1a5c7cef3ef3f728db226e01fbdf34da7f
|
refs/heads/master
| 2020-03-10T02:43:18.087100
| 2018-01-15T20:07:11
| 2018-01-15T20:07:11
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,657
|
py
|
import re,urllib,urlparse,base64,time,json
from resources.lib.libraries import cleantitle
from resources.lib.libraries import client
from resources.lib.libraries import control
from resources.lib import resolvers
# Web Proxy
name = 'Anonymster'
loggertxt = []
PROXY_URL = "https://proxy.anonymster.com/browse.php?b=2&u="
class proxy:
def __init__(self):
del loggertxt[:]
self.ver = '0.0.1'
self.update_date = 'Dec. 19, 2017'
log(type='INFO', method='init', err=' -- Initializing %s %s %s Start --' % (name, self.ver, self.update_date))
self.base_link = 'https://proxy.anonymster.com'
self.name = name
self.loggertxt = []
self.disabled = False
self.captcha = False
self.ssl = True
self.speedtest = 0
self.headers = {'Connection' : 'keep-alive', 'User-Agent' : client.randomagent()}
self.working = self.testSite()
log(type='INFO', method='init', err=' -- Initializing %s %s %s End --' % (name, self.ver, self.update_date))
def getLog(self):
self.loggertxt = loggertxt
return self.loggertxt
def testSite(self):
try:
if self.disabled == True:
log('INFO','testSite', 'Plugin Disabled')
return False
x1 = time.time()
http_res = client.request(url=self.base_link, output='responsecode')
self.speedtest = time.time() - x1
if http_res in client.HTTP_GOOD_RESP_CODES:
log('SUCCESS', 'testSite', 'HTTP Resp : %s for %s' % (http_res,self.base_link))
return True
log('ERROR', 'testSite', 'HTTP Resp : %s via proxy for %s' % (http_res,self.base_link))
return False
except Exception as e:
log('ERROR','testSite', '%s' % e)
return False
def request(self, url, close=True, redirect=True, followredirect=False, error=False, proxy=None, post=None, headers=None, mobile=False, limit=None, referer=None, cookie=None, output='', timeout='30', httpsskip=False, use_web_proxy=False, XHR=False, IPv4=False):
if self.working == False:
log("Proxy working status is %s" % self.working)
return None
if headers == None:
headers = self.headers
return requestdirect(url=url, close=close, redirect=redirect, followredirect=followredirect, error=error, proxy=proxy, post=post, headers=headers, mobile=mobile, limit=limit, referer=referer, cookie=cookie, output=output, timeout=timeout, httpsskip=httpsskip, use_web_proxy=use_web_proxy, XHR=XHR, IPv4=IPv4)
def requestdirect(url, close=True, redirect=True, followredirect=False, error=False, proxy=None, post=None, headers=None, mobile=False, limit=None, referer=None, cookie=None, output='', timeout='30', httpsskip=False, use_web_proxy=False, XHR=False, IPv4=False):
try:
urlhost = re.findall('([\w]+[.][\w]+)$', urlparse.urlparse(url.strip().lower()).netloc)[0]
if headers == None:
headers = {'Connection' : 'keep-alive'}
headers['User-Agent'] = client.randomagent()
res = client.request(url = PROXY_URL + url, close=close, redirect=redirect, followredirect=followredirect, error=error, proxy=proxy, post=post, headers=headers, mobile=mobile, limit=limit, referer=referer, cookie=cookie, output=output, timeout=timeout, httpsskip=httpsskip, use_web_proxy=use_web_proxy, XHR=XHR, IPv4=IPv4)
page_data_string = client.getPageDataBasedOnOutput(res, output)
#print page_data_string
pattern = re.compile('<script[\s\S]+?/script>')
page_data_string = re.sub(pattern, '', page_data_string)
try:
page_data_string = page_data_string.replace('\n','')
#page_data_string = page_data_string.replace('\r','r').replace('\n','<br/>').replace('\w','').replace('\.','').replace('\t','').replace('\ ','')
except Exception as e:
log('FAIL','requestdirect-1', '%s' % e, dolog=False)
#print page_data_string
try:
page_data_stringx = json.dumps(page_data_string)
page_data_stringx = page_data_stringx.replace('\\','')
page_data_stringx = page_data_stringx[1:-1]
page_data_string = page_data_stringx
except Exception as e:
log('FAIL','requestdirect-2', '%s' % e, dolog=False)
#print page_data_string
#page_data_string = str(page_data_string)
try:
r = unicode(page_data_string, "utf-8")
page_data_string = r
except Exception as e:
log('FAIL','requestdirect-3', '%s' % e, dolog=False)
try:
r = str(page_data_string)
page_data_string = r
except Exception as e:
log('FAIL','requestdirect-4', '%s' % e, dolog=False)
page_data_string = page_data_string.replace('https://proxy.anonymster.com/browse.php?', '')
page_data_string = page_data_string.replace('/browse.php?u=', '')
page_data_string = page_data_string.replace('&b=2', '')
page_data_string = page_data_string.replace('b=2', '')
page_data_string = page_data_string.replace('u=', '')
page_data_string = page_data_string.replace('&http', 'http')
page_data_string = page_data_string.replace('/http', 'http')
try:
page_data_string = page_data_string.decode('utf-8')
except:
pass
try:
page_data_string = urllib.unquote_plus(page_data_string)
except:
pass
try:
page_data_string = page_data_string.encode('utf-8')
except:
pass
return client.getResponseDataBasedOnOutput(page_data_string, res, output)
except Exception as e:
log('ERROR','requestdirect', '%s' % e)
return None
def log(type='INFO', method='undefined', err='', dolog=True, logToControl=False, doPrint=True):
try:
msg = '%s: %s > %s > %s : %s' % (time.ctime(time.time()), type, name, method, err)
if dolog == True:
loggertxt.append(msg)
if logToControl == True:
control.log(msg)
if control.doPrint == True and doPrint == True:
print msg
except Exception as e:
control.log('Error in Logging: %s >>> %s' % (msg,e))
|
[
"coderalphaakv@gmail.com"
] |
coderalphaakv@gmail.com
|
c69f07208e4745e5ea251ca85528efeaedd27a54
|
e21330ac23917670799616e7fc44d3a73171042d
|
/algorithm_project/users/urls.py
|
4af926f1a6913423f31c430f03386b4e9e85ebd4
|
[
"MIT"
] |
permissive
|
godwon2095/algorithm_project
|
813b25e16a26723d0d1748ee24b23b4d16e70974
|
c8140f75a14535592cac06a62c480be13c45d7c1
|
refs/heads/master
| 2020-05-28T06:28:22.094441
| 2019-05-27T21:37:35
| 2019-05-27T21:37:35
| 188,909,216
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 375
|
py
|
from django.urls import path
from algorithm_project.users.views import (
user_redirect_view,
user_update_view,
user_detail_view,
)
app_name = "users"
urlpatterns = [
path("~redirect/", view=user_redirect_view, name="redirect"),
path("~update/", view=user_update_view, name="update"),
path("<str:username>/", view=user_detail_view, name="detail"),
]
|
[
"jsw2095@naver.com"
] |
jsw2095@naver.com
|
cfd4a4929001cfecb69580906667d2c26c280741
|
664646ccbeb6575582299e7d1c6ccc696f07ccba
|
/tools/oneforall/modules/datasets/passivedns_api.py
|
13da6b1fa0bfbddd994246c0aad0e6be01b09d64
|
[] |
no_license
|
0xss/bayonet
|
3f1ce5832a06eef7e60b198c6c56cf59e4543199
|
d723dbf0299ac86d9a4419741a197985558e283c
|
refs/heads/master
| 2021-02-25T20:21:11.342592
| 2020-03-06T04:40:14
| 2020-03-06T04:40:14
| 245,462,098
| 0
| 1
| null | 2020-03-06T16:02:33
| 2020-03-06T16:02:32
| null |
UTF-8
|
Python
| false
| false
| 1,501
|
py
|
from config import Oneforall
from tools.oneforall.common.query import Query
class PassiveDnsAPI(Query):
def __init__(self, domain):
Query.__init__(self)
self.domain = self.register(domain)
self.module = 'Dataset'
self.source = 'PassiveDnsQuery'
self.addr = Oneforall.passivedns_api_addr or 'http://api.passivedns.cn'
self.token = Oneforall.passivedns_api_token
def query(self):
"""
向接口查询子域并做子域匹配
"""
self.header = self.get_header()
self.header.update({'X-AuthToken': self.token})
self.proxy = self.get_proxy(self.source)
url = self.addr + '/flint/rrset/*.' + self.domain
resp = self.get(url)
if not resp:
return
subdomains = self.match(self.domain, str(resp.json()))
# 合并搜索子域名搜索结果
self.subdomains = self.subdomains.union(subdomains)
def run(self):
"""
类执行入口
"""
if not self.check(self.addr):
return
self.begin()
self.query()
self.finish()
self.save_json()
self.gen_result()
self.save_db()
def do(domain): # 统一入口名字 方便多线程调用
"""
类统一调用入口
:param str domain: 域名
"""
query = PassiveDnsAPI(domain)
query.run()
if __name__ == '__main__':
do('example.com')
|
[
"1767986993@qq.com"
] |
1767986993@qq.com
|
209bbad24dacc64dfcafafba3760d51026cf9ce4
|
7ec35bd037077e9b65d3fa26a91978e8652c7409
|
/Stream-3/Full-Stack-Development/21.Django REST Framework/2.Serializers-And-Class-Based-Views/django_todo/todo/views.py
|
f79744a2bc2b32f9331530f531e9a4543b000663
|
[
"MIT"
] |
permissive
|
GunnerJnr/_CodeInstitute
|
8f743abef66c33a77ce13ca719963e93ffe22607
|
efba0984a3dc71558eef97724c85e274a712798c
|
refs/heads/master
| 2023-01-05T10:53:57.536047
| 2020-01-27T13:04:12
| 2020-01-27T13:04:12
| 99,014,961
| 8
| 6
|
MIT
| 2022-12-26T20:24:35
| 2017-08-01T15:15:55
|
CSS
|
UTF-8
|
Python
| false
| false
| 947
|
py
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.shortcuts import render
from rest_framework.response import Response
from rest_framework.views import APIView
from todo.serializers import TodoSerializer
from todo.models import Todo
# Create your views here.
class TodoView(APIView):
"""
TodoView: used to handle the incoming requests relating to `todo` items
"""
def get(self, request):
"""
Retrieve a complete list of `todo` items from the Todo
model, serialize them to JSON and return the serialized
todo items
"""
todo_items = Todo.objects.all()
# Serialize the data retrieved from the DB and serialize
# them using the `TodoSerializer`
serializer = TodoSerializer(todo_items, many=True)
# Store the serialized data `serialized_data`
serialized_data = serializer.data
return Response(serialized_data)
|
[
"gunnerjnr@live.co.uk"
] |
gunnerjnr@live.co.uk
|
cbe47d4a7fded25f5b1a067869bebac87802ba37
|
4f325c55882487af0aab8853179a7ed5867953b9
|
/pssshclient.py
|
15bd33ee5692b7fcac7d53d0c6cd2adec6d166e1
|
[] |
no_license
|
ravijaya/oct26
|
6ce7112f85db86fa41fbb2ff6f9f01650055ad6a
|
64f7dbe759ddc652297365c8a635b239e1ef0cba
|
refs/heads/master
| 2020-08-28T12:29:54.644234
| 2019-10-26T11:41:41
| 2019-10-26T11:41:41
| 217,700,205
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 552
|
py
|
"""single threaded ssh client"""
import paramiko
def ssh_client(host, port, user, pwd, job):
ssh = paramiko.SSHClient()
ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy())
ssh.connect(host, port, user, pwd)
stdin, stdout, stderr = ssh.exec_command(job)
output = stdout.read()
response = output if output else stderr.read() # if else conditional operator
ssh.close()
return response.decode()
if __name__ == '__main__':
r = ssh_client('52.66.251.190', '22', 'training', 'training', 'lscpu')
print(r)
|
[
"ravijaya@localhost.localdomain"
] |
ravijaya@localhost.localdomain
|
c12bee6b4ad787e6305c5304f580559473b1b30b
|
4c45bd5cb5d71e8563c8aca3e706e7275965e5fd
|
/users/tests.py
|
5ea92a18481ef8300ddf38bc8967401bb7f7ce4b
|
[
"BSD-2-Clause"
] |
permissive
|
diogobaeder/n2n
|
4794faa6de46e1dadb9ac6c3611cb9ac738411c3
|
aca2488d3d54067d4aea2f69ec37643a897735eb
|
refs/heads/master
| 2021-01-23T10:43:14.405346
| 2017-06-01T18:19:52
| 2017-06-01T18:19:52
| 93,084,654
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,584
|
py
|
from uuid import UUID
from django.test import TestCase
from .models import *
class UserTest(TestCase):
def test_creates_basic_user(self):
user = User.objects.create(name='John')
self.assertEqual(user.name, 'John')
self.assertIsInstance(user.uuid, UUID)
def test_adds_a_project(self):
user = User.objects.create(name='John')
project = Project.objects.create(name='Lunch')
user.projects.add(project)
self.assertEqual(user.projects.first().name, 'Lunch')
def test_can_belong_to_a_company(self):
company = Company.objects.create(name='Acme')
user = User.objects.create(name='John', company=company)
self.assertEqual(user.company.name, 'Acme')
def test_gets_by_uuid(self):
user = User.objects.create(name='John')
retrieved = User.objects.get_by_uuid(user.uuid)
self.assertEqual(retrieved.id, user.id)
class CompanyTest(TestCase):
def test_creates_basic_company(self):
company = Company.objects.create(name='Acme')
self.assertEqual(company.name, 'Acme')
self.assertIsInstance(company.uuid, UUID)
def test_gets_by_uuid(self):
company = Company.objects.create(name='Acme')
retrieved = Company.objects.get_by_uuid(company.uuid)
self.assertEqual(retrieved.id, company.id)
class ProjectTest(TestCase):
def test_creates_basic_project(self):
project = Project.objects.create(name='Lunch')
self.assertEqual(project.name, 'Lunch')
self.assertIsInstance(project.uuid, UUID)
|
[
"diogobaeder@yahoo.com.br"
] |
diogobaeder@yahoo.com.br
|
4a2d45213d44ffeeaded394b450ea7daf5b65bb7
|
bb198232df12a1adb9e8a6164ff2a403bf3107cf
|
/wifi-dump-parser-3/bar_graph_plot.py
|
4c8876a1e9badd818c8ba52c278afd5a7bcc5621
|
[] |
no_license
|
vanello/wifi-arsenal
|
9eb79a43dfdd73d3ead1ccd5d2caf9bad9e327ee
|
1ca4c5a472687f8f017222893f09a970652e9a51
|
refs/heads/master
| 2021-01-16T22:00:37.657041
| 2015-09-03T03:40:43
| 2015-09-03T03:40:43
| 42,060,303
| 1
| 0
| null | 2015-09-07T15:24:11
| 2015-09-07T15:24:11
| null |
UTF-8
|
Python
| false
| false
| 5,829
|
py
|
#Author : Abhinav Narain
#Date : 9-sept-2013
#Purpose : To plot the #devices,AP inside homes
from magicplott import *
def pickle_reader(input_folder):
print "the pickle reader called "
data_fs=os.listdir(input_folder)
home_device_table=defaultdict(list)
home_ap_table=defaultdict(list)
for f_name in data_fs :
#router_id,ap_macs,device_macs,ap_map,device_map,rate_map ; maps are of times
_f_content= pickle.load(open(input_folder+f_name,'rb'))
router_id= _f_content[0]
ap_mac=_f_content[1]
device_mac=_f_content[2]
home_device_table[router_id]=device_mac
home_ap_table[router_id]=ap_mac
return [home_ap_table,home_device_table]
def pickle_reader_time_map(input_folder):
print "the pickle reader called "
data_fs=os.listdir(input_folder)
home_device_table=defaultdict(list)
home_ap_table=defaultdict(list)
for f_name in data_fs :
#router_id,ap_macs,device_macs,ap_map,device_map,rate_map ; maps are of times
_f_content= pickle.load(open(input_folder+f_name,'rb'))
router_id= _f_content[0]
ap_map=_f_content[3]
device_map=_f_content[4]
home_device_table[router_id]=device_map
home_ap_table[router_id]=ap_map
return [home_ap_table,home_device_table]
if __name__=='__main__':
'''
This main function is for plotting the number
of distinct devices and Access Points seen by
the BISmark Access Points inside homes
'''
if len(sys.argv) !=4:
print "usage : python unpickeler.py <data_folder_2GHz> <data_folder_5GHz> <filename(without png extention)> "
sys.exit(0)
input_folder = sys.argv[1]
input_folder5 = sys.argv[2]
outfile_name = sys.argv[3]
home_ap_2_table=defaultdict(list)
home_ap_5_table=defaultdict(list)
home_device_2_table=defaultdict(list)
home_device_5_table=defaultdict(list)
[home_ap_2_table,home_device_2_table]=pickle_reader(input_folder)
[home_ap_5_table,home_device_5_table]=pickle_reader(input_folder5)
new_list_2=[]
for k,v in home_ap_2_table.iteritems():
list_devices=home_device_2_table[k]
new_list_devices= [x for x in list_devices if x not in v]
new_list_2.append([k,len(new_list_devices),len(v)])
new_list_2.sort(key=lambda x: x[1])
labels_2,home_device_count_2,home_ap_count_2=[],[],[]
for i in new_list_2 :
labels_2.append(i[0])
home_device_count_2.append(i[1])
home_ap_count_2.append(i[2])
new_list_5=[]
for k,v in home_ap_5_table.iteritems():
list_devices=home_device_5_table[k]
new_list_devices= [x for x in list_devices if x not in v]
new_list_5.append([k,len(new_list_devices),len(v)])
new_list_5.sort(key=lambda x: x[1])
labels_5,home_device_count_5,home_ap_count_5=[],[],[]
for i in new_list_5 :
labels_5.append(i[0])
home_device_count_5.append(i[1])
home_ap_count_5.append(i[2])
bar_graph_plotter(labels_5,
home_device_count_5,
'RouterID',
'Device Count',
'Number of Devices observed in homes(5 GHz)',
outfile_name+'5_devices.png'
)
bar_graph_plotter(labels_2,
home_device_count_2,
'RouterID',
'Device Count',
'Number of Devices observed in homes(2.4 GHz)',
outfile_name+'2_4_devices.png'
)
new_list_2.sort(key=lambda x: x[2])
labels_2,home_device_count_2,home_ap_count_2=[],[],[]
for i in new_list_2 :
labels_2.append(i[0])
home_device_count_2.append(i[1])
home_ap_count_2.append(i[2])
new_list_5.sort(key=lambda x: x[2])
labels_5,home_device_count_5,home_ap_count_5=[],[],[]
for i in new_list_5 :
labels_5.append(i[0])
home_device_count_5.append(i[1])
home_ap_count_5.append(i[2])
bar_graph_plotter(labels_5,
home_ap_count_5,
'RouterID',
'Access Points Count',
'Number of Access Points observed in homes(5 GHz)',
outfile_name+'5_ap.png'
)
bar_graph_plotter(labels_2,
home_ap_count_2,
'RouterID',
'Device Count',
'Number of Devices and Access Points observed in homes(2.4 GHz)',
outfile_name+'2_4_ap.png'
)
#Date : 15 Sept, 2012
#Partially written; needs to be completed
if 0:# __name__=='__main__':
'''
This function is for plotting the number of Devices
and Access Points witnessed by BISmark Access Point
*persistently*
'''
if len(sys.argv) !=3:
print "usage : python unpickeler.py data_folder_2GHz filename.png "
sys.exit(0)
input_folder = sys.argv[1]
outfile_name = sys.argv[2]
home_ap_2_table=defaultdict(list)
home_device_2_table=defaultdict(list)
[home_ap_2_table,home_device_2_table]=pickle_reader_time_map(input_folder)
new_list_2=[]
for k,ap_time_map in home_ap_2_table.iteritems():
for time,list_of_aps in ap_time_map.iteritems():
print time, len(list_of_aps)
print "++++"
sys.exit(1)
new_list_devices= [x for x in list_devices if x not in v]
new_list_2.append([k,len(new_list_devices),len(v)])
new_list_2.sort(key=lambda x: x[1])
labels_2,home_device_count_2,home_ap_count_2=[],[],[]
for i in new_list_2 :
labels_2.append(i[0])
home_device_count_2.append(i[1])
home_ap_count_2.append(i[2])
|
[
"oleg.kupreev@gmail.com"
] |
oleg.kupreev@gmail.com
|
eecd8f9f717bf3993d28bdb0a215b4bf7bcc4cf9
|
8ffccb986def3f1f669b475a8575e05dccd77163
|
/py02프로그래밍기초/py02_09ex3_average.py
|
05c1e29c77f959eaded24fb96d3d2e583ce1fbf5
|
[] |
no_license
|
pythonJhu/testProject
|
09f5d412b0271e17b31f78fd7133d6e54464c41a
|
74f27b0f5c2b798122c3edbd66f7b485a8add6d5
|
refs/heads/master
| 2021-01-01T10:55:56.370576
| 2020-03-08T07:09:33
| 2020-03-08T07:09:33
| 239,248,164
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 554
|
py
|
value1 = input("첫번째 과목 점수를 입력하세요 : ")
value2 = input("두번째 과목 점수를 입력하세요 : ")
value1 = int(value1) # 문자열 value1 을 정수로 변환, 형변환
value2 = int(value2) # 문자열 value1 을 정수로 변환, 형변환
sum = value1 + value2
average = sum / 2
print(' ------------------ value1 = ', value1, ' value2 = ', value2, '-------------------- ')
if average >= 95:
print('verry good')
else:
print('just good')
print(' ------------------- average = ', average, '------------------- ')
|
[
"bvyuguy@md.com"
] |
bvyuguy@md.com
|
7e174e8fb9a2e4c5b0c0a5341a661db12f49e731
|
b9cee0411d39d25a534c44d7e43e18924af02214
|
/highway_env/road/lane.py
|
46acc9e73f7079c07e9687b19374e078548c74ba
|
[
"MIT"
] |
permissive
|
galleon/highway-env
|
1a848b0c742d6bb5c888da680a98c9f21d665b31
|
2fba2bda9c6b29218db3a6d2a7c2d7de2f1a4bf1
|
refs/heads/master
| 2022-11-06T17:46:48.533001
| 2020-06-22T05:31:48
| 2020-06-22T05:31:48
| 272,162,696
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,309
|
py
|
import numpy as np
class LineType:
"""
A lane side line type.
"""
NONE = 0
STRIPED = 1
CONTINUOUS = 2
CONTINUOUS_LINE = 3
class StraightLane:
"""
A straight line lane
"""
def __init__(beg, end, width=4, line_types=None, forbidden=False, speed_limit=20, priority=0):
self.beg = beg
self.end = end
self.width = width
self.heading = 0
self.length = np.linalg.norm(self.end - self.beg)
self.line_types = line_types or [LineType.STRIPED, LineType.STRIPED]
self.direction = (self.end - self.beg) / self.length
self.direction_lateral = np.array([-self.direction[1], self.direction[0]])
self.forbidden = forbidden
self.priority = priority
self.speed_limit = speed_limit
def position(self, longitudinal, lateral):
return self.beg + longitudinal*self.direction + lateral*self.direction_lateral
def heading_at(self, longitudinal):
return self.heading
def width_at(self, longitudinal):
return self.longitudinal
def local_coordinates(self, position):
delta = position - self.start
longitudinal = np.dot(delta, self.direction)
lateral = self.dot(delta, self.direction_lateral)
return longitudinal, lateral
|
[
"guillaume.alleon@gmail.com"
] |
guillaume.alleon@gmail.com
|
39f5ffac6c1fa9cc6b5aca33866d41cba6910f02
|
b7125b27e564d2cc80a2ce8d0a6f934aa22c8445
|
/.history/sudoku_20201103142305.py
|
4850f9b602a9fe01ba0698b7ba97c70a49c9bd0d
|
[] |
no_license
|
JensVL96/Puzzle-solver-for-fun
|
4c15dcd570c3705b7ac555efb56b52913e81083c
|
6d8a4378a480372213a596a336a4deca727a00fc
|
refs/heads/master
| 2021-07-15T05:19:42.185495
| 2020-11-08T13:59:49
| 2020-11-08T13:59:49
| 224,855,888
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 6,312
|
py
|
# -*- coding: utf-8 -*-
from __future__ import print_function
from config import *
from create_board import *
from solve_bloard import *
from display_board import *
from string import *
from math import floor
import pygame as pg
import numpy as np
# For error highlighting
def set_highlight(row, col, blk, lock):
global input_lock
input_lock = lock
global row_index
row_index = row
global col_index
col_index = blk
global blk_index
blk_index = col
def get_cord(pos):
global box_index_x
box_index_x = int((pos[0] - TOP_LX)//BLOCK_SIZE)
global box_index_y
box_index_y = int((pos[1] - TOP_LY)//BLOCK_SIZE)
def valid(grid, x, y, val):
input_lock = 0
row = col = blk = (0, 0)
for index in range(9):
# Check if value in column
if grid[x][index] == val:
col = (x, index)
input_lock = 1
# Check if value in row
if grid[index][y] == val:
row = (index, y)
input_lock = 1
# Finds the block
index_x = x // 3 # integer division
index_y = y // 3
# Check if value in block
for i in range(index_x * 3, index_x * 3 + 3):
for j in range (index_y * 3, index_y * 3 + 3):
if grid[i][j] == val:
blk = (i, j)
input_lock = 1
if input_lock == 1:
set_highlight(row, col, blk, input_lock)
return False
return True
def valid_cdt(cdt_list, val):
if type(cdt_list) == int: # ignore
return True
if len(cdt_list) > 9:
return False
else:
for iter in cdt_list:
if iter == val:
return False
return True
class Main():
def __init__(self):
self.board = []
self.run()
def run(self):
pg.init()
self.screen = pg.display.set_mode(SCREEN_RES)
pg.display.set_caption('Sudoku solver')
display = Display_board(self.screen)
val = 0
blink = False
alpha = 1
a_change = True
blink_color = GREEN
candidates = []
get_cord(INITIAL_CORDS)
set_highlight(INITIAL_CORDS, INITIAL_CORDS, INITIAL_CORDS, INITIAL_LOCK)
board = create_board().board
while 1:
for event in pg.event.get():
if event.type == pg.QUIT or (event.type == pg.KEYDOWN and event.key == pg.K_ESCAPE):
exit()
if event.type == pg.MOUSEBUTTONDOWN and input_lock != 1:
pos = pg.mouse.get_pos()
get_cord(pos)
# Checks if selection is on the board
if pos[0] < TOP_LX or pos[1] < TOP_LY or pos[0] > int(BOT_RX) or pos[1] > int(BOT_RY):
blink = False
else:
blink = True
if event.type == pg.KEYDOWN and input_lock != 1:
if event.key == pg.K_1:
val = 1
if event.key == pg.K_2:
val = 2
if event.key == pg.K_3:
val = 3
if event.key == pg.K_4:
val = 4
if event.key == pg.K_5:
val = 5
if event.key == pg.K_6:
val = 6
if event.key == pg.K_7:
val = 7
if event.key == pg.K_8:
val = 8
if event.key == pg.K_9:
val = 9
if event.key == pg.K_BACKSPACE:
board[int(box_index_x)][int(box_index_y)] = 0
elif event.type == pg.KEYDOWN and input_lock == 1:
if event.key == pg.K_BACKSPACE:
val = 0
set_highlight(INITIAL_CORDS, INITIAL_CORDS, INITIAL_CORDS, INITIAL_LOCK)
blink_color = GREEN
board[int(box_index_x)][int(box_index_y)] = 0
if val != 0:
# display.draw_val(val, box_index_x, box_index_y)
candidates = []
print("value: ", val,"occupying position: ", board[box_index_x][box_index_y])
if valid(board, box_index_x, box_index_y, val):
if type(board[box_index_x][box_index_y]) == 0: # No candidates
board[box_index_x][box_index_y] = val
elif valid_cdt(board[box_index_x][box_index_y], val): # Switching from number to list
candidates.append(val)
board[box_index_x][box_index_y] = candidates
elif valid_cdt(board[box_index_x][box_index_y], val): # Adding candidate to list
candidates = board[box_index_x][box_index_y]
candidates.append(val)
board[box_index_x][box_index_y] = candidates
else:
board[box_index_x][box_index_y] = val
# Draws the screen
pg.draw.rect(self.screen, BLACK, (0, 0, self.screen.get_width(), self.screen.get_height()))
self.screen.fill(BEIGE)
# Draws the board
display.draw(board)
# Check if cell is selected
if blink:
cell = display.find_cell(box_index_x, box_index_y)
blink = display.blink(alpha, a_change)
alpha = blink[0]
a_change = blink[1]
myRect = pg.Rect(cell)
rectSurf = pg.Surface(myRect.size, pg.SRCALPHA)
rectSurf.fill(blink_color)
rectSurf.set_alpha(alpha)
self.screen.blit(rectSurf, (myRect.x, myRect.y))
# Check if incorrect input
if input_lock == 1 and val != 0:
display.update(board, row_index, col_index, blk_index)
blink_color = RED
val = 0
# display.draw_box()
pg.display.update()
self.solution = solve_board(board)
self.solution.assign_flags(board)
if __name__ == '__main__':
Main()
|
[
"jle040@uit.no"
] |
jle040@uit.no
|
72f861f016dcd6fdff6b6467986d2be6d78a4439
|
927fc31a0144c308a5c8d6dbe46ba8f2728276c9
|
/tasks/final_tasks/iterator/1.simple_string_iterator.py
|
2f13e87860aabfa853eaa9c7ff6fea24321ff01b
|
[] |
no_license
|
ChandraSiva11/sony-presamplecode
|
b3ee1ba599ec90e357a4b3a656f7a00ced1e8ad3
|
393826039e5db8a448fa4e7736b2199c30f5ed24
|
refs/heads/master
| 2023-01-14T00:09:19.185822
| 2020-11-23T02:07:00
| 2020-11-23T02:07:00
| 299,527,171
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 727
|
py
|
# First Simple iterator progrma in python
def main():
string = "Hi Hellow world"
itr_obj = iter(string)
# For loop will automatically handle the stop iteration
# for i in itr_obj:
# print(i)
# With out for loop
try :
print(next(itr_obj))
print(next(itr_obj))
print(next(itr_obj))
print(next(itr_obj))
print(next(itr_obj))
print(next(itr_obj))
print(next(itr_obj))
print(next(itr_obj))
print(next(itr_obj))
print(next(itr_obj))
print(next(itr_obj))
print(next(itr_obj))
print(next(itr_obj))
print(next(itr_obj))
print(next(itr_obj))
# print(next(itr_obj))
except Exception as error:
print('next iteration Error : ', error)
# print(itr_obj)
if __name__ == '__main__':
main()
|
[
"chandra2.s@aricent.com"
] |
chandra2.s@aricent.com
|
714df2c4872aa8297b8ca60a4c4ec251566f37f7
|
6fa7f99d3d3d9b177ef01ebf9a9da4982813b7d4
|
/wPMgjmQMoCwm3G6mt_5.py
|
1c4eae077e1c33c1d203e3a596da095b005adddc
|
[] |
no_license
|
daniel-reich/ubiquitous-fiesta
|
26e80f0082f8589e51d359ce7953117a3da7d38c
|
9af2700dbe59284f5697e612491499841a6c126f
|
refs/heads/master
| 2023-04-05T06:40:37.328213
| 2021-04-06T20:17:44
| 2021-04-06T20:17:44
| 355,318,759
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 148
|
py
|
def upload_count(dates, month):
l = len(month)
ctr = 0
for i in dates:
if i[:l] == month:
ctr += 1
return ctr
|
[
"daniel.reich@danielreichs-MacBook-Pro.local"
] |
daniel.reich@danielreichs-MacBook-Pro.local
|
f4cbe2ace7b56a43abc0197ceb831adbd5082b8d
|
5b7c2feb27a71837edf526315d413706a6bf82ff
|
/tests/utils/test_env.py
|
5a4a4d3a3b4e68fd3b764190b3d69f00f25ea3ec
|
[
"BSD-3-Clause"
] |
permissive
|
facebookresearch/mmf
|
df675223566dc8fb2359aa3e1a2d49db5e3c2b9a
|
63f76fbcfe2d056b88734fc41a983251d20e6c61
|
refs/heads/main
| 2023-08-23T23:40:46.827046
| 2023-07-11T06:18:50
| 2023-07-11T06:18:50
| 138,831,170
| 2,432
| 592
|
NOASSERTION
| 2023-08-11T20:26:11
| 2018-06-27T04:52:40
|
Python
|
UTF-8
|
Python
| false
| false
| 4,585
|
py
|
# Copyright (c) Facebook, Inc. and its affiliates.
import contextlib
import io
import os
import sys
import unittest
from mmf.common.registry import registry
from mmf.utils.configuration import get_mmf_env
from mmf.utils.env import import_user_module, setup_imports
from mmf.utils.general import get_mmf_root
from mmf_cli.run import run
from tests.test_utils import make_temp_dir, search_log
class TestUtilsEnvE2E(unittest.TestCase):
def _delete_dirty_modules(self):
for key in list(sys.modules.keys()):
if key not in self._initial_modules:
del sys.modules[key]
def _sanitize_registry(self):
registry.mapping["builder_name_mapping"].pop("always_one", None)
registry.mapping["model_name_mapping"].pop("simple", None)
registry.mapping["state"] = {}
def _get_user_dir(self, abs_path=True):
if abs_path:
return os.path.join(get_mmf_root(), "..", "tests", "data", "user_dir")
else:
return os.path.join("tests", "data", "user_dir")
def setUp(self):
setup_imports()
self._initial_modules = set(sys.modules)
self._sanitize_registry()
def tearDown(self):
self._delete_dirty_modules()
self._sanitize_registry()
def _test_user_import_e2e(self, extra_opts=None):
if extra_opts is None:
extra_opts = []
MAX_UPDATES = 50
user_dir = self._get_user_dir()
with make_temp_dir() as temp_dir:
opts = [
"model=simple",
"run_type=train_val_test",
"dataset=always_one",
"config=configs/experiment.yaml",
f"env.user_dir={user_dir}",
"training.seed=1",
"training.num_workers=3",
f"training.max_updates={MAX_UPDATES}",
f"env.save_dir={temp_dir}",
]
opts = opts + extra_opts
out = io.StringIO()
with contextlib.redirect_stdout(out):
run(opts)
train_log = os.path.join(temp_dir, "train.log")
log_line = search_log(
train_log,
search_condition=[
lambda x: x["progress"] == f"{MAX_UPDATES}/{MAX_UPDATES}",
lambda x: "best_val/always_one/accuracy" in x,
],
)
self.assertEqual(float(log_line["val/always_one/accuracy"]), 1)
log_line = search_log(
train_log,
search_condition=[
lambda x: x["progress"] == f"{MAX_UPDATES}/{MAX_UPDATES}",
lambda x: "test/always_one/accuracy" in x,
],
)
self.assertEqual(float(log_line["test/always_one/accuracy"]), 1)
def test_user_import_e2e(self):
self._test_user_import_e2e()
def test_cpu_evaluation_e2e(self):
self._test_user_import_e2e(extra_opts=["evaluation.use_cpu=True"])
def test_import_user_module_from_directory_absolute(self, abs_path=True):
# Make sure the modules are not available first
self.assertIsNone(registry.get_builder_class("always_one"))
self.assertIsNone(registry.get_model_class("simple"))
self.assertFalse("mmf_user_dir" in sys.modules)
# Now, import and test
user_dir = self._get_user_dir(abs_path)
import_user_module(user_dir)
self.assertIsNotNone(registry.get_builder_class("always_one"))
self.assertIsNotNone(registry.get_model_class("simple"))
self.assertTrue("mmf_user_dir" in sys.modules)
self.assertTrue(user_dir in get_mmf_env("user_dir"))
def test_import_user_module_from_directory_relative(self):
self.test_import_user_module_from_directory_absolute(abs_path=False)
user_dir = self._get_user_dir(abs_path=False)
self.assertEqual(user_dir, get_mmf_env("user_dir"))
def test_import_user_module_from_file(self):
self.assertIsNone(registry.get_builder_class("always_one"))
self.assertIsNone(registry.get_model_class("simple"))
user_dir = self._get_user_dir()
user_file = os.path.join(user_dir, "models", "simple.py")
import_user_module(user_file)
# Only model should be found and build should be none
self.assertIsNone(registry.get_builder_class("always_one"))
self.assertIsNotNone(registry.get_model_class("simple"))
self.assertTrue("mmf_user_dir" in sys.modules)
self.assertTrue(user_dir in get_mmf_env("user_dir"))
|
[
"facebook-github-bot@users.noreply.github.com"
] |
facebook-github-bot@users.noreply.github.com
|
d82b06ed955306e40a89c3f9dae61aae64c70312
|
c51eef37bb983a9c35635c7ccc96a0cf689a7438
|
/lecture/lecture_gn3/week3/appendix_pandas.py
|
0344d0f2f1232e55f1999224d4ea9967a9fa725b
|
[] |
no_license
|
Kyeongrok/python_crawler
|
0a717b43be36584af1b0f7c1ad0c79108a5d11e0
|
5a5da8af7bb080f752a9a066741ac8adab136a3a
|
refs/heads/master
| 2022-09-13T03:15:08.053639
| 2022-08-02T15:45:03
| 2022-08-02T15:45:03
| 124,719,435
| 40
| 34
| null | 2019-02-27T08:29:52
| 2018-03-11T03:20:32
|
HTML
|
UTF-8
|
Python
| false
| false
| 134
|
py
|
import pandas as pd
df = pd.read_excel("./jongmok.xlsx")
print(df['code'])
for item in df['code']:
print(item.replace("'", ""))
|
[
"oceanfog1@gmail.com"
] |
oceanfog1@gmail.com
|
118d9f8fd993b2d062494e29f3c3420c020ff27b
|
894e2bc2b02226a23fcaff30d5d75c53b111fbe9
|
/www/models.py
|
29d709a9450cf26880b06894b2678e47a52e5236
|
[] |
no_license
|
frank-xman/python_web
|
26b57614e70a7f58fe5ccfffef77340e4dff28be
|
76bb0a590ebd113d391f9fd75bc6f3756ac920da
|
refs/heads/master
| 2020-03-20T08:14:55.974308
| 2018-06-17T11:27:23
| 2018-06-17T11:27:23
| 137,302,737
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,352
|
py
|
import time, uuid
from orm import Model, StringField, BooleanField, FloatField, TextField
def next_id():
return '%015d%s000' % (int(time.time() * 1000), uuid.uuid4().hex)
class User(Model):
__table__ = 'users'
id = StringField(primary_key=True, default=next_id, ddl='varchar(50)')
email = StringField(ddl='varchar(50)')
password = StringField(ddl='varchar(50)')
admin = BooleanField()
name = StringField(ddl='varchar(50)')
image = StringField(ddl='varchar(500)')
created_at = FloatField(default=time.time)
class Blog(Model):
__table__ = 'blogs'
id = StringField(primary_key=True, default=next_id, ddl='varchar(50)')
user_id = StringField(ddl='varchar(50)')
user_name = StringField(ddl='varchar(50)')
user_image = StringField(ddl='varchar(500)')
name = StringField(ddl='varchar(50)')
summary = StringField(ddl='varchar(200)')
content = TextField()
created_at = FloatField(default=time.time)
class Comment(Model):
__table__ = 'comments'
id = StringField(primary_key=True, default=next_id, ddl='varchar(50)')
blog_id = StringField(ddl='varchar(50)')
user_id = StringField(ddl='varchar(50)')
user_name = StringField(ddl='varchar(50)')
user_image = StringField(ddl='varchar(500)')
content = TextField()
created_at = FloatField(default=time.time)
|
[
"root@localhost.localdomain"
] |
root@localhost.localdomain
|
3fb64a3e8d1896b2dbb68da8318452a55210d6b7
|
4521bb234771215d678890ed084f6336e3653542
|
/_examples/seqs/test.py
|
9ba8745db744cd39dd83292b9f5b362bf39e73e9
|
[
"BSD-3-Clause"
] |
permissive
|
mingrammer/gopy
|
16b8cfcec31ff993e62a383032192600b33b7681
|
3b8a754b6c689175fac23de448e31b96c231e001
|
refs/heads/master
| 2021-01-01T06:28:35.777032
| 2017-07-13T23:04:38
| 2017-07-16T13:19:44
| 97,432,545
| 1
| 0
| null | 2017-07-17T03:42:30
| 2017-07-17T03:42:30
| null |
UTF-8
|
Python
| false
| false
| 880
|
py
|
# Copyright 2015 The go-python Authors. All rights reserved.
# Use of this source code is governed by a BSD-style
# license that can be found in the LICENSE file.
## py2/py3 compat
from __future__ import print_function
import seqs
### test docs
print("doc(seqs): %r" % (seqs.__doc__,))
print("arr = seqs.Array(xrange(2))")
arr = seqs.Array(xrange(2))
print("arr = %s" % (arr,))
print("s = seqs.Slice()")
s = seqs.Slice()
print("s = %s" % (s,))
print("s = seqs.Slice([1,2])")
s = seqs.Slice([1,2])
print("s = %s" % (s,))
print("s = seqs.Slice(range(10))")
s = seqs.Slice(range(10))
print("s = %s" % (s,))
print("s = seqs.Slice(xrange(10))")
s = seqs.Slice(xrange(10))
print("s = %s" % (s,))
print("s = seqs.Slice()")
s = seqs.Slice()
print("s = %s" % (s,))
print("s += [1,2]")
s += [1,2]
print("s = %s" % (s,))
print("s += [10,20]")
s += [10,20]
print("s = %s" % (s,))
|
[
"binet@cern.ch"
] |
binet@cern.ch
|
8b4ed7b0a5ab69cd77a18bc6d948271caf27517a
|
15514b8cdb9ef2bb25a33e44a2abe79e5eb86439
|
/analyze_in_vivo/analyze_domnisoru/check_basic/velocity_threshold.py
|
326534e21640372aed2da1a278da69d0f76db9a4
|
[] |
no_license
|
cafischer/analyze_in_vivo
|
389ce0d51c6cbeb3e39648aaff13263f0c99060a
|
e38e1057420b5329504f7095f1ee89e2a293df23
|
refs/heads/master
| 2021-06-10T00:18:47.741793
| 2019-09-14T08:47:53
| 2019-09-14T08:47:53
| 100,512,098
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,288
|
py
|
from __future__ import division
import numpy as np
import matplotlib.pyplot as pl
import os
from analyze_in_vivo.load.load_domnisoru import load_cell_ids, load_data, get_celltype, get_track_len
from analyze_in_vivo.analyze_domnisoru.check_basic.in_out_field import threshold_by_velocity
from scipy.ndimage.filters import convolve
from analyze_in_vivo.analyze_domnisoru.position_vs_firing_rate import get_spike_train
pl.style.use('paper')
if __name__ == '__main__':
save_dir_img = '/home/cf/Phd/programming/projects/analyze_in_vivo/analyze_in_vivo/results/domnisoru/check/velocity'
save_dir = '/home/cf/Phd/programming/projects/analyze_in_vivo/analyze_in_vivo/data/domnisoru'
cell_type = 'grid_cells'
cell_ids = load_cell_ids(save_dir, cell_type)
param_list = ['Vm_ljpc', 'Y_cm', 'vel_100ms', 'spiketimes'] # TODO ['Vm_ljpc', 'Y_cm', 'fY_cm', 'vel_100ms', 'spiketimes']
threshold = 1 # cm/s
save_dir_img = os.path.join(save_dir_img, cell_type)
if not os.path.exists(save_dir_img):
os.makedirs(save_dir_img)
time_lost = np.zeros(len(cell_ids))
spikes_lost = np.zeros(len(cell_ids))
for cell_idx, cell_id in enumerate(cell_ids):
print cell_id
# load
data = load_data(cell_id, param_list, save_dir)
v = data['Vm_ljpc']
t = np.arange(0, len(v)) * data['dt']
dt = data['dt']
position = data['Y_cm']
velocity_domnisoru = data['vel_100ms']
# spike train
AP_max_idxs = data['spiketimes']
spike_train = get_spike_train(AP_max_idxs, len(v))
# # velocity from position
# velocity = np.concatenate((np.array([0]), np.diff(position) / (np.diff(t)/1000.)))
#
# # put velocity at switch from end of track to the beginning to 0
# run_start_idxs = np.where(np.diff(position) < -get_track_len(cell_id)/2.)[0] + 1 # +1 because diff shifts one to front
# velocity[run_start_idxs] = 0
#
# # smoothed by a 100 ms uniform sliding window
# window = np.ones(int(round(100. / data['dt'])))
# window /= np.sum(window)
# velocity_smoothed = convolve(velocity, window, mode='nearest')
#
# # threshold by velocity
# [position_thresholded], _ = threshold_by_velocity([position], velocity)
#
# # check same length
# print 'Length Domnisoru - me (s): ', (len(data['fY_cm']) - len(position_thresholded)) * data['dt'] / 1000
#
# pl.figure()
# pl.plot(np.arange(len(position_thresholded)) * data['dt'], position_thresholded)
# pl.plot(np.arange(len(data['fY_cm'])) * data['dt'], data['fY_cm'])
# pl.show()
# threshold by velocity
[t_thresholded, spike_train_thresholded], vel = threshold_by_velocity([t, spike_train], velocity_domnisoru, threshold)
time_lost[cell_idx] = (len(t) - len(t_thresholded)) / float(len(t)) * 100 # %
spikes_lost[cell_idx] = (np.sum(spike_train) - np.sum(spike_train_thresholded)) / float(np.sum(spike_train)) * 100 # %
# print time_lost[cell_idx]
# print spikes_lost[cell_idx]
# pl.figure()
# pl.plot(t, velocity_domnisoru, 'k')
# pl.plot(t[velocity_domnisoru < threshold], velocity_domnisoru[velocity_domnisoru < threshold], 'ro', markersize=2)
# pl.figure()
# pl.plot(t, spike_train, 'k')
# pl.plot(t[velocity_domnisoru < threshold], spike_train[velocity_domnisoru < threshold], 'ro',
# markersize=2)
# pl.figure()
# pl.plot(np.arange(0, len(vel))*dt, vel, 'k')
# pl.show()
if cell_type == 'grid_cells':
n_rows = 3
n_columns = 9
fig, axes = pl.subplots(n_rows, n_columns, sharex='all', sharey='all', figsize=(14, 8.5))
cell_idx = 0
for i1 in range(n_rows):
for i2 in range(n_columns):
if cell_idx < len(cell_ids):
if get_celltype(cell_ids[cell_idx], save_dir) == 'stellate':
axes[i1, i2].set_title(cell_ids[cell_idx] + ' ' + u'\u2605', fontsize=12)
elif get_celltype(cell_ids[cell_idx], save_dir) == 'pyramidal':
axes[i1, i2].set_title(cell_ids[cell_idx] + ' ' + u'\u25B4', fontsize=12)
else:
axes[i1, i2].set_title(cell_ids[cell_idx], fontsize=12)
axes[i1, i2].bar(0, time_lost[cell_idx], color='0.5')
axes[i1, i2].bar(1, spikes_lost[cell_idx], color='0.5')
axes[i1, i2].set_xlim(-1, 2)
axes[i1, i2].set_ylim(0, 100)
axes[i1, i2].set_xticks([0, 1])
axes[i1, i2].set_xticklabels(['Time \nlost', '#APs \nlost'], fontsize=12)
if i2 == 0:
axes[i1, i2].set_ylabel('Percentage')
else:
axes[i1, i2].spines['left'].set_visible(False)
axes[i1, i2].spines['bottom'].set_visible(False)
axes[i1, i2].set_xticks([])
axes[i1, i2].set_yticks([])
cell_idx += 1
pl.tight_layout()
pl.savefig(os.path.join(save_dir_img, 'loss.png'))
pl.show()
|
[
"coralinefischer@gmail.com"
] |
coralinefischer@gmail.com
|
d8e0bc2788db178b1f857236383ca57f5278fc99
|
62e58c051128baef9452e7e0eb0b5a83367add26
|
/x12/5010/568005010.py
|
8f5b66f935d9f1b463871ee861e0a2ab2b098725
|
[] |
no_license
|
dougvanhorn/bots-grammars
|
2eb6c0a6b5231c14a6faf194b932aa614809076c
|
09db18d9d9bd9d92cefbf00f1c0de1c590fe3d0d
|
refs/heads/master
| 2021-05-16T12:55:58.022904
| 2019-05-17T15:22:23
| 2019-05-17T15:22:23
| 105,274,633
| 0
| 0
| null | 2017-09-29T13:21:21
| 2017-09-29T13:21:21
| null |
UTF-8
|
Python
| false
| false
| 1,751
|
py
|
from bots.botsconfig import *
from records005010 import recorddefs
syntax = {
'version' : '00403', #version of ISA to send
'functionalgroup' : 'D5',
}
structure = [
{ID: 'ST', MIN: 1, MAX: 1, LEVEL: [
{ID: 'BGN', MIN: 1, MAX: 1},
{ID: 'AMT', MIN: 1, MAX: 2},
{ID: 'N1', MIN: 0, MAX: 10, LEVEL: [
{ID: 'N2', MIN: 0, MAX: 2},
{ID: 'N3', MIN: 0, MAX: 2},
{ID: 'N4', MIN: 0, MAX: 1},
{ID: 'PER', MIN: 0, MAX: 1},
]},
{ID: 'CS', MIN: 0, MAX: 99999, LEVEL: [
{ID: 'N9', MIN: 0, MAX: 3},
{ID: 'DTM', MIN: 0, MAX: 1},
{ID: 'LM', MIN: 0, MAX: 10, LEVEL: [
{ID: 'LQ', MIN: 1, MAX: 100},
]},
{ID: 'REF', MIN: 0, MAX: 99999, LEVEL: [
{ID: 'LX', MIN: 0, MAX: 99999, LEVEL: [
{ID: 'N9', MIN: 1, MAX: 1},
{ID: 'AMT', MIN: 0, MAX: 99999},
{ID: 'QTY', MIN: 0, MAX: 1},
{ID: 'LM', MIN: 0, MAX: 10, LEVEL: [
{ID: 'LQ', MIN: 1, MAX: 100},
]},
{ID: 'N1', MIN: 0, MAX: 1, LEVEL: [
{ID: 'N2', MIN: 0, MAX: 2},
{ID: 'N3', MIN: 0, MAX: 2},
{ID: 'N4', MIN: 0, MAX: 1},
{ID: 'N9', MIN: 0, MAX: 2},
]},
]},
{ID: 'FA1', MIN: 0, MAX: 99999, LEVEL: [
{ID: 'FA2', MIN: 1, MAX: 99999},
]},
]},
]},
{ID: 'BAL', MIN: 0, MAX: 99999, LEVEL: [
{ID: 'N9', MIN: 0, MAX: 99999},
{ID: 'RTE', MIN: 0, MAX: 99999},
]},
{ID: 'CTT', MIN: 0, MAX: 1},
{ID: 'SE', MIN: 1, MAX: 1},
]}
]
|
[
"jason.capriotti@gmail.com"
] |
jason.capriotti@gmail.com
|
a767d8388d15206e4a7a88d87019fd89dfd13dfd
|
a2d36e471988e0fae32e9a9d559204ebb065ab7f
|
/huaweicloud-sdk-swr/huaweicloudsdkswr/v2/model/create_image_sync_repo_request_body.py
|
cb2762d95c71188bd7d4b164190eeca326be4fed
|
[
"Apache-2.0"
] |
permissive
|
zhouxy666/huaweicloud-sdk-python-v3
|
4d878a90b8e003875fc803a61414788e5e4c2c34
|
cc6f10a53205be4cb111d3ecfef8135ea804fa15
|
refs/heads/master
| 2023-09-02T07:41:12.605394
| 2021-11-12T03:20:11
| 2021-11-12T03:20:11
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,587
|
py
|
# coding: utf-8
import re
import six
from huaweicloudsdkcore.utils.http_utils import sanitize_for_serialization
class CreateImageSyncRepoRequestBody:
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
sensitive_list = []
openapi_types = {
'remote_region_id': 'str',
'remote_namespace': 'str',
'sync_auto': 'bool',
'override': 'bool'
}
attribute_map = {
'remote_region_id': 'remoteRegionId',
'remote_namespace': 'remoteNamespace',
'sync_auto': 'syncAuto',
'override': 'override'
}
def __init__(self, remote_region_id=None, remote_namespace=None, sync_auto=None, override=None):
"""CreateImageSyncRepoRequestBody - a model defined in huaweicloud sdk"""
self._remote_region_id = None
self._remote_namespace = None
self._sync_auto = None
self._override = None
self.discriminator = None
self.remote_region_id = remote_region_id
self.remote_namespace = remote_namespace
if sync_auto is not None:
self.sync_auto = sync_auto
if override is not None:
self.override = override
@property
def remote_region_id(self):
"""Gets the remote_region_id of this CreateImageSyncRepoRequestBody.
目标region ID。
:return: The remote_region_id of this CreateImageSyncRepoRequestBody.
:rtype: str
"""
return self._remote_region_id
@remote_region_id.setter
def remote_region_id(self, remote_region_id):
"""Sets the remote_region_id of this CreateImageSyncRepoRequestBody.
目标region ID。
:param remote_region_id: The remote_region_id of this CreateImageSyncRepoRequestBody.
:type: str
"""
self._remote_region_id = remote_region_id
@property
def remote_namespace(self):
"""Gets the remote_namespace of this CreateImageSyncRepoRequestBody.
目标组织
:return: The remote_namespace of this CreateImageSyncRepoRequestBody.
:rtype: str
"""
return self._remote_namespace
@remote_namespace.setter
def remote_namespace(self, remote_namespace):
"""Sets the remote_namespace of this CreateImageSyncRepoRequestBody.
目标组织
:param remote_namespace: The remote_namespace of this CreateImageSyncRepoRequestBody.
:type: str
"""
self._remote_namespace = remote_namespace
@property
def sync_auto(self):
"""Gets the sync_auto of this CreateImageSyncRepoRequestBody.
自动同步,默认为false
:return: The sync_auto of this CreateImageSyncRepoRequestBody.
:rtype: bool
"""
return self._sync_auto
@sync_auto.setter
def sync_auto(self, sync_auto):
"""Sets the sync_auto of this CreateImageSyncRepoRequestBody.
自动同步,默认为false
:param sync_auto: The sync_auto of this CreateImageSyncRepoRequestBody.
:type: bool
"""
self._sync_auto = sync_auto
@property
def override(self):
"""Gets the override of this CreateImageSyncRepoRequestBody.
是否覆盖,默认为false
:return: The override of this CreateImageSyncRepoRequestBody.
:rtype: bool
"""
return self._override
@override.setter
def override(self, override):
"""Sets the override of this CreateImageSyncRepoRequestBody.
是否覆盖,默认为false
:param override: The override of this CreateImageSyncRepoRequestBody.
:type: bool
"""
self._override = override
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
if attr in self.sensitive_list:
result[attr] = "****"
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
import simplejson as json
if six.PY2:
import sys
reload(sys)
sys.setdefaultencoding("utf-8")
return json.dumps(sanitize_for_serialization(self), ensure_ascii=False)
def __repr__(self):
"""For `print`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, CreateImageSyncRepoRequestBody):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
|
[
"hwcloudsdk@huawei.com"
] |
hwcloudsdk@huawei.com
|
ea786f962c4bb8d037ea2e65e05397c5a17f9a0c
|
3474b315da3cc5cb3f7823f19a18b63a8da6a526
|
/scratch/KRAMS/src/ytta/resp_func/redesign/rf_filament.py
|
8567adadf509a4ecf5e11c731371ecc5217a1c3a
|
[] |
no_license
|
h4ck3rm1k3/scratch
|
8df97462f696bc2be00f1e58232e1cd915f0fafd
|
0a114a41b0d1e9b2d68dbe7af7cf34db11512539
|
refs/heads/master
| 2021-01-21T15:31:38.718039
| 2013-09-19T10:48:24
| 2013-09-19T10:48:24
| 29,173,525
| 0
| 0
| null | 2015-01-13T04:58:57
| 2015-01-13T04:58:56
| null |
UTF-8
|
Python
| false
| false
| 3,894
|
py
|
#-------------------------------------------------------------------------------
#
# Copyright (c) 2009, IMB, RWTH Aachen.
# All rights reserved.
#
# This software is provided without warranty under the terms of the BSD
# license included in simvisage/LICENSE.txt and may be redistributed only
# under the conditions described in the aforementioned license. The license
# is also available online at http://www.simvisage.com/licenses/BSD.txt
#
# Thanks for using Simvisage open source!
#
# Created on Jun 2, 2010 by: rch
from enthought.traits.api import \
HasTraits, Float, Str, implements
from i_rf import \
IRF
from rf import \
RF
import os
from numpy import sign, linspace, array
from matplotlib import pyplot as plt
from scipy.weave import inline, converters
from types import ListType
def Heaviside( x ):
return ( sign( x ) + 1.0 ) / 2.0
class Filament( RF ):
'''Linear elastic, brittle filament.
'''
implements( IRF )
title = Str( 'brittle filament' )
xi = Float( 0.017857, auto_set = False, enter_set = True,
distr = ['weibull_min', 'uniform'],
scale = 0.0178, shape = 4.0 )
theta = Float( 0.01, auto_set = False, enter_set = True,
distr = ['uniform', 'norm'],
loc = 0.01, scale = 0.001 )
lambd = Float( 0.2, auto_set = False, enter_set = True,
distr = ['uniform'],
loc = 0.0, scale = 0.1 )
A = Float( 5.30929158457e-10, auto_set = False, enter_set = True,
distr = ['weibull_min', 'uniform', 'norm'],
scale = 5.3e-10, shape = 8 )
E_mod = Float( 70.0e9, auto_set = False, enter_set = True,
distr = ['weibull_min', 'uniform', 'norm'],
scale = 70e9, shape = 8 )
eps = Float( ctrl_range = ( 0, 0.2, 20 ), auto_set = False, enter_set = True )
C_code = '''
double eps_ = ( eps - theta * ( 1 + lambd ) ) /
( ( 1 + theta ) * ( 1 + lambd ) );
// Computation of the q( ... ) function
if ( eps_ < 0 || eps_ > xi ){
q = 0.0;
}else{
q = E_mod * A * eps_;
}
'''
def __call__( self, eps, xi, theta, lambd, A, E_mod ):
'''
Implements the response function with arrays as variables.
first extract the variable discretizations from the orthogonal grid.
'''
# NOTE: as each variable is an array oriented in different direction
# the algebraic expressions (-+*/) perform broadcasting,. i.e. performing
# the operation for all combinations of values. Thus, the resulgin eps
# is contains the value of local strain for any combination of
# global strain, xi, theta and lambda
#
eps_ = ( eps - theta * ( 1 + lambd ) ) / ( ( 1 + theta ) * ( 1 + lambd ) )
# cut off all the negative strains due to delayed activation
#
eps_ *= Heaviside( eps_ )
# broadcast eps also in the xi - dimension
# (by multiplying with array containing ones with the same shape as xi )
#
eps_grid = eps_ * Heaviside( xi - eps_ )
# cut off all the realizations with strain greater than the critical one.
#
# eps_grid[ eps_grid >= xi ] = 0
# transform it to the force
#
q_grid = E_mod * A * eps_grid
return q_grid
if __name__ == '__main__':
f = Filament()
f.configure_traits()
print 'keys', f.param_keys
print 'values', f.param_list
print 'uniform', f.traits( distr = lambda x: x != None and 'uniform' in x )
X = linspace( 0, 0.05, 100 )
Y = []
for eps in X:
Y.append( f( eps, .017, .01, .2, 5.30929158457e-10, 70.e9 ) )
plt.plot( X, Y, linewidth = 2, color = 'navy' )
plt.show()
|
[
"Axel@Axel-Pc"
] |
Axel@Axel-Pc
|
89f1ae74a600f564db6992784235bc311a79bfe9
|
beed259c9aaf824c5307d93ffa736255f2d98831
|
/month06/Machine_learning/day06/01_grid_search_demo.py
|
6e9ec278441e7aa3b3c2e35ec90effc1330900e1
|
[
"Apache-2.0"
] |
permissive
|
chaofan-zheng/python_learning_code
|
21345f97ebf74c3cad0ef488a93ec8a7fd771a63
|
5d05848911d55aa49eaee4afd7ffd80536fad7aa
|
refs/heads/main
| 2023-05-27T16:17:18.130492
| 2021-06-06T14:23:31
| 2021-06-06T14:23:31
| 338,234,801
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,889
|
py
|
# 01_grid_search_demo.py
# 网格搜索示例
""" 网格搜索
1) 寻找超参数组合的一种方式
2) 利用穷举法, 将可选的参数组合, 选择最优者
3) 能够简化对参数选择过程
"""
import numpy as np
import sklearn.model_selection as ms
import sklearn.svm as svm
import sklearn.metrics as sm
import matplotlib.pyplot as mp
# 读取数据
x, y = [], []
with open("../data/multiple2.txt", "r") as f:
for line in f.readlines():
data = [float(substr)
for substr in line.split(",")]
x.append(data[:-1])
y.append(data[-1])
x = np.array(x)
y = np.array(y)
# 定义需要挑选的参数
params = [
{
"kernel": ["linear"],
"C": [1, 10, 100, 1000]
},
{
"kernel": ["poly"],
"C": [1],
"degree": [2, 3]
},
{
"kernel": ["rbf"],
"C": [1, 10, 100, 1000],
"gamma": [1, 0.1, 0.01, 0.001]
}
]
model = ms.GridSearchCV(svm.SVC(), # 原模型
params, # 待验证的参数
cv=5) # 折叠数量
model.fit(x, y) # 训练
print("最好成绩:", model.best_score_)
print("最优组合:", model.best_params_)
l, r, h = x[:, 0].min() - 1, x[:, 0].max() + 1, 0.005
b, t, v = x[:, 1].min() - 1, x[:, 1].max() + 1, 0.005
grid_x = np.meshgrid(np.arange(l, r, h), np.arange(b, t, v))
flat_x = np.c_[grid_x[0].ravel(), grid_x[1].ravel()]
flat_y = model.predict(flat_x)
grid_y = flat_y.reshape(grid_x[0].shape)
mp.figure("SVM RBF Classifier", facecolor="lightgray")
mp.title("SVM RBF Classifier", fontsize=14)
mp.xlabel("x", fontsize=14)
mp.ylabel("y", fontsize=14)
mp.tick_params(labelsize=10)
mp.pcolormesh(grid_x[0], grid_x[1], grid_y, cmap="gray")
C0, C1 = (y == 0), (y == 1)
mp.scatter(x[C0][:, 0], x[C0][:, 1], c="orangered", s=80)
mp.scatter(x[C1][:, 0], x[C1][:, 1], c="limegreen", s=80)
mp.show()
|
[
"417355570@qq.com"
] |
417355570@qq.com
|
08ee0a9717d7a5b84c5ebec7fd711e4b60ec1a77
|
65d93b3db37f488356faa1789f1001f17191e345
|
/isi_mip/core/migrations/0005_headerlink_menu_items.py
|
bbd567159b1e8dd19206dd7ca3b9554ba3cbeffa
|
[
"MIT",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
ISI-MIP/isimip
|
b4a19310dd772356eef87259783084836107cf4a
|
c2a78c727337e38f3695031e00afd607da7d6dcb
|
refs/heads/master
| 2021-09-14T15:42:14.453031
| 2021-05-25T09:33:45
| 2021-05-25T09:33:45
| 237,446,232
| 0
| 0
|
MIT
| 2020-01-31T14:27:04
| 2020-01-31T14:27:03
| null |
UTF-8
|
Python
| false
| false
| 803
|
py
|
# -*- coding: utf-8 -*-
# Generated by Django 1.10.4 on 2017-03-06 12:23
from __future__ import unicode_literals
from django.db import migrations
import wagtail.core.blocks
import wagtail.core.fields
class Migration(migrations.Migration):
dependencies = [
('core', '0004_auto_20170118_1443'),
]
operations = [
migrations.AddField(
model_name='headerlink',
name='menu_items',
field=wagtail.core.fields.StreamField((('jump_link', wagtail.core.blocks.StructBlock((('name', wagtail.core.blocks.CharBlock()), ('link', wagtail.core.blocks.URLBlock())))), ('page_link', wagtail.core.blocks.StructBlock((('name', wagtail.core.blocks.CharBlock()), ('page', wagtail.core.blocks.PageChooserBlock()))))), blank=True, null=True),
),
]
|
[
"hi@brueck.io"
] |
hi@brueck.io
|
097d789ff031bc02e11ed666f3d663f1e107cd89
|
a2d36e471988e0fae32e9a9d559204ebb065ab7f
|
/huaweicloud-sdk-kms/huaweicloudsdkkms/v1/model/api_version_detail.py
|
3c337f810bd10e1917aa9bfeff016dbb057dce3c
|
[
"Apache-2.0"
] |
permissive
|
zhouxy666/huaweicloud-sdk-python-v3
|
4d878a90b8e003875fc803a61414788e5e4c2c34
|
cc6f10a53205be4cb111d3ecfef8135ea804fa15
|
refs/heads/master
| 2023-09-02T07:41:12.605394
| 2021-11-12T03:20:11
| 2021-11-12T03:20:11
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 7,327
|
py
|
# coding: utf-8
import re
import six
from huaweicloudsdkcore.utils.http_utils import sanitize_for_serialization
class ApiVersionDetail:
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
sensitive_list = []
openapi_types = {
'id': 'str',
'links': 'list[ApiLink]',
'version': 'str',
'status': 'str',
'updated': 'str',
'min_version': 'str'
}
attribute_map = {
'id': 'id',
'links': 'links',
'version': 'version',
'status': 'status',
'updated': 'updated',
'min_version': 'min_version'
}
def __init__(self, id=None, links=None, version=None, status=None, updated=None, min_version=None):
"""ApiVersionDetail - a model defined in huaweicloud sdk"""
self._id = None
self._links = None
self._version = None
self._status = None
self._updated = None
self._min_version = None
self.discriminator = None
if id is not None:
self.id = id
if links is not None:
self.links = links
if version is not None:
self.version = version
if status is not None:
self.status = status
if updated is not None:
self.updated = updated
if min_version is not None:
self.min_version = min_version
@property
def id(self):
"""Gets the id of this ApiVersionDetail.
版本ID(版本号),如“v1.0”。
:return: The id of this ApiVersionDetail.
:rtype: str
"""
return self._id
@id.setter
def id(self, id):
"""Sets the id of this ApiVersionDetail.
版本ID(版本号),如“v1.0”。
:param id: The id of this ApiVersionDetail.
:type: str
"""
self._id = id
@property
def links(self):
"""Gets the links of this ApiVersionDetail.
JSON对象,详情请参见links字段数据结构说明。
:return: The links of this ApiVersionDetail.
:rtype: list[ApiLink]
"""
return self._links
@links.setter
def links(self, links):
"""Sets the links of this ApiVersionDetail.
JSON对象,详情请参见links字段数据结构说明。
:param links: The links of this ApiVersionDetail.
:type: list[ApiLink]
"""
self._links = links
@property
def version(self):
"""Gets the version of this ApiVersionDetail.
若该版本API支持微版本,则填支持的最大微版本号,如果不支持微版本,则返回空字符串。
:return: The version of this ApiVersionDetail.
:rtype: str
"""
return self._version
@version.setter
def version(self, version):
"""Sets the version of this ApiVersionDetail.
若该版本API支持微版本,则填支持的最大微版本号,如果不支持微版本,则返回空字符串。
:param version: The version of this ApiVersionDetail.
:type: str
"""
self._version = version
@property
def status(self):
"""Gets the status of this ApiVersionDetail.
版本状态,包含如下3种: - CURRENT:表示该版本为主推版本。 - SUPPORTED:表示为老版本,但是现在还继续支持。 - DEPRECATED:表示为废弃版本,存在后续删除的可能。
:return: The status of this ApiVersionDetail.
:rtype: str
"""
return self._status
@status.setter
def status(self, status):
"""Sets the status of this ApiVersionDetail.
版本状态,包含如下3种: - CURRENT:表示该版本为主推版本。 - SUPPORTED:表示为老版本,但是现在还继续支持。 - DEPRECATED:表示为废弃版本,存在后续删除的可能。
:param status: The status of this ApiVersionDetail.
:type: str
"""
self._status = status
@property
def updated(self):
"""Gets the updated of this ApiVersionDetail.
版本发布时间,要求用UTC时间表示。如v1.发布的时间2014-06-28T12:20:21Z。
:return: The updated of this ApiVersionDetail.
:rtype: str
"""
return self._updated
@updated.setter
def updated(self, updated):
"""Sets the updated of this ApiVersionDetail.
版本发布时间,要求用UTC时间表示。如v1.发布的时间2014-06-28T12:20:21Z。
:param updated: The updated of this ApiVersionDetail.
:type: str
"""
self._updated = updated
@property
def min_version(self):
"""Gets the min_version of this ApiVersionDetail.
若该版本API 支持微版本,则填支持的最小微版本号,如果不支持微版本,则返回空字符串。
:return: The min_version of this ApiVersionDetail.
:rtype: str
"""
return self._min_version
@min_version.setter
def min_version(self, min_version):
"""Sets the min_version of this ApiVersionDetail.
若该版本API 支持微版本,则填支持的最小微版本号,如果不支持微版本,则返回空字符串。
:param min_version: The min_version of this ApiVersionDetail.
:type: str
"""
self._min_version = min_version
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
if attr in self.sensitive_list:
result[attr] = "****"
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
import simplejson as json
if six.PY2:
import sys
reload(sys)
sys.setdefaultencoding("utf-8")
return json.dumps(sanitize_for_serialization(self), ensure_ascii=False)
def __repr__(self):
"""For `print`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, ApiVersionDetail):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
|
[
"hwcloudsdk@huawei.com"
] |
hwcloudsdk@huawei.com
|
734d7779a3e742141face3d32132d429ae9aaf73
|
c2e16633921d1efe584d93d769eaa7892a2fd8f3
|
/OOP/project.zoo/animal_init.py
|
4a1d1f4ef87641924483dded75122042ed61a8ed
|
[] |
no_license
|
yosifnandrov/softuni-stuff
|
bd53d418fe143ea4633a5488c1f80648da0b9ef7
|
2a76e5aee2029edf901634750d28cf153d73ece3
|
refs/heads/main
| 2023-04-17T19:53:30.254790
| 2021-05-06T11:33:39
| 2021-05-06T11:33:39
| 364,884,782
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 164
|
py
|
def initialization_animals(self,*args):
attributes = self.__annotations__
for attr,argument in zip(attributes,args):
setattr(self,attr,argument)
|
[
"yosif1993@abv.bg"
] |
yosif1993@abv.bg
|
b82c181401e82d543fbec8baf19ab692ca195486
|
1280ef2fd358ddb094f4147a25d1c21c5d718c43
|
/socialNewsApp/src/mikroblog/urls.py
|
bbc8ecb1247ff3a183a88abc172e69b049bd33b7
|
[] |
no_license
|
PatkaSm/SocialNewsApp
|
aeac569c76f554b637efa2235afad8230bf5bb44
|
bea67724e86523939fad20411807513202c1a1b8
|
refs/heads/master
| 2023-04-06T11:52:31.755797
| 2021-04-21T07:35:54
| 2021-04-21T07:35:54
| 279,606,338
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 627
|
py
|
from comment.views import delete_micropost_comment, like_micropost_comment
from django.urls import path
from mikroblog.views import MicroPostListView, micro_post_delete, micro_post_like
urlpatterns = [
path('', MicroPostListView.as_view(), name='mikroblog'),
path('mikro-post/delete/<int:pk>',micro_post_delete , name='micro-post-delete'),
path('mikro-post/like/<int:pk>', micro_post_like, name='micro-post-like'),
path('wpis/comment/delete/<int:id>/', delete_micropost_comment, name='micropost-comment-delete'),
path('wpis/comment/like/<int:id>/', like_micropost_comment, name='micropost-comment-like'),
]
|
[
"patrycja.s449@gmail.com"
] |
patrycja.s449@gmail.com
|
fb60256c701c3ceddc6c6bad0eed101f230346b1
|
db7aac75e31d35c4a18c966170b46f269d015d0b
|
/webgl_canvas_gadget/apps/projects/migrations/0044_auto_20160819_0115.py
|
3c13e9f0687f9d9d8dba9341538656d11f099466
|
[] |
no_license
|
jjpastprojects/Django
|
12fbf3cf27a9230db98a21cc1013216aeadaae1e
|
c55562be7226f29b4ec213f8f018b6c2dd50c420
|
refs/heads/master
| 2022-12-12T22:15:49.493289
| 2017-09-05T12:51:20
| 2017-09-05T12:51:20
| 101,995,798
| 0
| 0
| null | 2022-12-07T23:21:46
| 2017-08-31T11:55:00
|
JavaScript
|
UTF-8
|
Python
| false
| false
| 622
|
py
|
# -*- coding: utf-8 -*-
# Generated by Django 1.9 on 2016-08-18 19:15
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('projects', '0043_auto_20160816_2354'),
]
operations = [
migrations.AddField(
model_name='model3d',
name='camera_max_distance',
field=models.FloatField(default=6),
),
migrations.AddField(
model_name='model3d',
name='camera_min_distance',
field=models.FloatField(default=1.5),
),
]
|
[
"sam.noreaksey@outlook.com"
] |
sam.noreaksey@outlook.com
|
84214983efbd775549489eac0f074bacbb7d4429
|
068d271e241d8cdb46dbf4243166e4b8ee7025b2
|
/day06/day6/3.今日内容.py
|
95e2682ddf4ad17f4ff7f20bcc9fad627442c403
|
[] |
no_license
|
caiqinxiong/python
|
f6e226e76cb62aac970bcfbcb6c8adfc64858b60
|
9029f6c528d2cb742b600af224e803baa74cbe6a
|
refs/heads/master
| 2023-05-26T19:41:34.911885
| 2020-05-15T09:02:08
| 2020-05-15T09:02:08
| 195,261,757
| 1
| 0
| null | 2021-06-10T23:33:33
| 2019-07-04T15:01:42
|
JavaScript
|
UTF-8
|
Python
| false
| false
| 343
|
py
|
# 内置模块
# re模块讲完
# os模块:和操作系统打交道
# 时间模块 time和datetime
# sys模块 :和python解释器相关的
# 序列化模块 :json pickle
# logging模块:打印日志用,规范日志格式
# hashlib模块 :摘要算法的集合 - 密文的登录验证
# 下周开始:开面向对象
|
[
"13269469526@163.com"
] |
13269469526@163.com
|
f967e820b74cfdce9295d196ecf1cddad3653ee7
|
321b4ed83b6874eeb512027eaa0b17b0daf3c289
|
/67/67.add-binary.282298911.Accepted.leetcode.python3.py
|
bacf67b5bc939359b252bd80e3cb448d5e8ac740
|
[] |
no_license
|
huangyingw/submissions
|
7a610613bdb03f1223cdec5f6ccc4391149ca618
|
bfac1238ecef8b03e54842b852f6fec111abedfa
|
refs/heads/master
| 2023-07-25T09:56:46.814504
| 2023-07-16T07:38:36
| 2023-07-16T07:38:36
| 143,352,065
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,476
|
py
|
"""
Problem Link: https://leetcode.com/problems/add-binary/
Given two binary strings, return their sum (also a binary string).
The input strings are both non-empty and contains only characters 1 or 0.
Example 1:
Input: a = "11", b = "1"
Output: "100"
Example 2:
Input: a = "1010", b = "1011"
Output: "10101"
"""
# there are two cases for every bit
# 1. they are equal
# - 1 and 1 -> 0 but result will depend on carry.
# - 0 and 0 -> 0 but result will depend on carry
# [if carry is 1, res = 1, if carry is 0, res = 0]
# 2. they are unequal
# - 1 and 0 -> result carry + 1 -> result = carry === 0 ? 1 : 0
# - 0 and 1 -> result carry + 1 -> result = carry === 0 ? 1 : 0
# don't touch the carry because if it was 0, it will remain 0 even after this sum
# why?
# - carry = 0, now bits are: 1 + 0 = 1. Add carry: 1 + 0(carry) -> carry will remain 0
# - carry = 1, now bits are: 1 + 0 = 1. Add carry: 1 + 1(carry) -> carry will remain 1
class Solution:
# Method 1
def addBinary(self, a: str, b: str) -> str:
res = []
i = len(a) - 1
j = len(b) - 1
carry = '0'
while i >= 0 or j >=0:
ach = a[i] if i >= 0 else '0'
bch = b[j] if j >= 0 else '0'
if ach == bch:
res.append(carry)
carry = ach
else:
res.append('1' if carry == '0' else '0')
i -= 1
j -= 1
if carry == '1':
res.append(carry)
return ''.join(res[::-1])
# Method 2
def addBinary_integer(self, a: str, b: str) -> str:
res = []
i = len(a) - 1
j = len(b) - 1
carry = 0
while i >= 0 or j >=0:
ach = int(a[i]) if i >= 0 else 0
bch = int(b[j]) if j >= 0 else 0
summ = ach + bch + carry
res.append(str(summ%2))
carry = 1 if summ > 1 else 0
i -= 1
j -= 1
if carry == 1:
res.append(str(carry))
return ''.join(res[::-1])
# Method 3
def addBinary_xor(self, a: str, b: str) -> str:
res = []
i = len(a) - 1
j = len(b) - 1
carry = 0
while i >= 0 or j >= 0 or carry > 0:
ach = ord(a[i]) - ord('0') if i >= 0 else 0
bch = ord(b[j]) - ord('0') if j >= 0 else 0
res.append(str(ach ^ bch ^ carry))
carry = (ach+bch+carry) >> 1 # It is equivalent to (a+b+c)/2
i -= 1
j -= 1
return ''.join(res[::-1])
|
[
"huangyingw@gmail.com"
] |
huangyingw@gmail.com
|
8df572327d1476dac62dac98652932fcec33a758
|
124df74bce796598d224c4380c60c8e95756f761
|
/com.raytheon.viz.avnconfig/localization/aviation/python/TafViewer.py
|
063ff9b84526cb0322a560a288dcb2089c769676
|
[] |
no_license
|
Mapoet/AWIPS-Test
|
19059bbd401573950995c8cc442ddd45588e6c9f
|
43c5a7cc360b3cbec2ae94cb58594fe247253621
|
refs/heads/master
| 2020-04-17T03:35:57.762513
| 2017-02-06T17:17:58
| 2017-02-06T17:17:58
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 7,841
|
py
|
##
# This software was developed and / or modified by Raytheon Company,
# pursuant to Contract DG133W-05-CQ-1067 with the US Government.
#
# U.S. EXPORT CONTROLLED TECHNICAL DATA
# This software product contains export-restricted data whose
# export/transfer/disclosure is restricted by U.S. law. Dissemination
# to non-U.S. persons whether in the United States or abroad requires
# an export license or other authorization.
#
# Contractor Name: Raytheon Company
# Contractor Address: 6825 Pine Street, Suite 340
# Mail Stop B8
# Omaha, NE 68106
# 402.291.0100
#
# See the AWIPS II Master Rights File ("Master Rights File.pdf") for
# further licensing information.
##
#
# Name:
# TafViewer.py
# GFS1-NHD:A6644.0000-SCRIPT;1.10
#
# Status:
# DELIVERED
#
# History:
# Revision 1.10 (DELIVERED)
# Created: 14-MAY-2007 10:04:47 OBERFIEL
# Removed references to the obsolete prototype XTF product.
# Allow decoder and encoder to format TAF in two different
# ways. New format will be triggered by day and time to be
# specified at a later date.
#
# Revision 1.9 (DELIVERED)
# Created: 23-JAN-2006 08:23:18 TROJAN
# stdr 956
#
# Revision 1.8 (DELIVERED)
# Created: 19-SEP-2005 13:47:39 TROJAN
# spr 7011
#
# Revision 1.7 (DELIVERED)
# Created: 06-JUL-2005 18:16:42 TROJAN
# spr 6548
#
# Revision 1.6 (DELIVERED)
# Created: 07-MAY-2005 11:39:14 OBERFIEL
# Added Item Header Block
#
# Revision 1.5 (DELIVERED)
# Created: 24-JAN-2005 21:18:48 TROJAN
# spr 6612
#
# Revision 1.4 (APPROVED)
# Created: 09-JUL-2004 19:11:05 OBERFIEL
# Replaced busy dialogs
#
# Revision 1.3 (APPROVED)
# Created: 01-JUL-2004 14:59:55 OBERFIEL
# Update
#
# Revision 1.2 (DELIVERED)
# Created: 08-JAN-2004 21:40:30 PCMS
# Updating for code cleanup
#
# Revision 1.1 (APPROVED)
# Created: 06-NOV-2003 16:46:22 OBERFIEL
# date and time created -2147483647/-2147483648/-2147481748
# -2147483648:-2147483648:-2147483648 by oberfiel
#
# Change Document History:
# 1:
# Change Document: GFS1-NHD_SPR_7277
# Action Date: 19-MAR-2008 07:59:13
# Relationship Type: In Response to
# Status: CLOSED
# Title: AvnFPS: (OB8.2) AvnFPS decoders need to conform to new ICAO format for TAFs
#
#
import logging, time
from Tkinter import *
import Pmw
from Balloon import Balloon
import Avn, AvnLib, Busy, Globals
TAG = 'warning'
_Logger = logging.getLogger(__name__)
class Viewer(object):
def __init__(self, master, getcmd, editcmd):
# master: parent widget (page in a notebook)
# getdata: data access method, returnig dictionary
# d = { 'raw': raw, 'dcd': decoded}
self._master = master
self._id = None
self._taf = None
self._getcmd = getcmd
self._editcmd = editcmd
self._tkShowHeaders = IntVar()
self._tkShowHeaders.set(int(master.option_get('showHeaders', '')))
self._tkNumTaf = IntVar()
self._tkNumTaf.set(int(master.option_get('numTafs', '')))
frame = Frame(master)
btnbox = Pmw.ButtonBox(frame)
btn = btnbox.add('Text Editor', command=self._editcmd)
Balloon().bind(btn, 'Initializes editor page with current forecast')
btnbox.alignbuttons()
btnbox.pack(side='left', expand='no', fill='x')
menu = Pmw.OptionMenu(frame,
labelpos='w',
label_text='Num TAFs',
menubutton_width=3,
menubutton_textvariable=self._tkNumTaf,
items=('1', '3', '99'),
command=self.load,
)
menu.pack(side='right', expand='no', fill='x', padx=2)
Balloon().bind(menu, 'Number of TAFs to display')
cb = Checkbutton(frame,
text='Show Headers',
variable=self._tkShowHeaders,
command=self.load,
)
cb.pack(side='right', padx=5)
Balloon().bind(cb, 'Display WMO header')
frame.pack(side='top', expand='no', fill='x')
self.text = Pmw.ScrolledText(master,
borderframe = 1,
vscrollmode='static',
text_state='disabled',
text_wrap='word',
)
self.text.pack(side='top', expand='yes', fill='both')
self.text.tag_configure(TAG, background='red')
self.text.component('text').bind('<Button-3>', self.__popupMenu)
self.popmenu = Menu(master,
tearoff=0,
type='normal',
)
self.popmenu.add_command(label='Copy', command=self.__copy)
def __copy(self):
try:
t = self.text.component('text')
t.selection_own()
selection = t.selection_get()
t.clipboard_clear()
t.clipboard_append(selection)
except:
pass
def __popupMenu(self, e):
self.popmenu.tk_popup(e.widget.winfo_rootx() + e.x,
e.widget.winfo_rooty() + e.y)
##############################################################################
# public methods
##############################################################################
def highlight(self, mtrdata):
# called by MetarViewer
# needs to change logic if other viewers use this method
if not self._taf or not 'group' in self._taf.dcd \
or not self._taf.dcd['group']:
return
p = self._taf.dcd['group'][0]
t = max(time.time(), p['prev']['time']['from'])
for p in self._taf.dcd['group']:
if t < p['prev']['time']['to']:
if 'ocnl' in p and \
p['ocnl']['time']['from'] <= t < p['ocnl']['time']['to']:
tempo = p['ocnl']
else:
tempo = None
prev = p['prev']
break
else:
return
if self._taf.header and self._tkShowHeaders.get():
hlen = self._taf.header.count('\n')
else:
hlen = 0
for e in [e for e in mtrdata['status'] if \
mtrdata['status'][e].severity >= 2 and e != 'tempo']:
for ix in AvnLib.findIndex(e, prev, hlen):
self.text.tag_add(TAG, *ix)
if tempo:
for ix in AvnLib.findIndex(e, tempo, hlen):
self.text.tag_add(TAG, *ix)
def load(self, arg=None):
self.text.configure(text_state='normal')
self.text.clear()
try:
self._taf = self._getcmd(self._id)
if self._taf is None:
raise Avn.AvnError('')
if self._tkShowHeaders.get():
self.text.insert('end', self._taf.header)
# first line of most recent TAF
self.text.insert('end', self._taf.text+'\n')
for taf in Globals.DRC.getTafs(self._id, False, 0,
self._tkNumTaf.get())[1:]:
if self._tkShowHeaders.get():
self.text.insert('end', taf.header)
self.text.insert('end', taf.text+'\n')
except Exception:
msg = 'Cannot load data for %s' % self._id
_Logger.exception(msg)
if self._master.winfo_ismapped():
Busy.showwarning(msg, self._master)
self.text.configure(text_state='disabled')
def setSite(self, id):
self._id = id
|
[
"joshua.t.love@saic.com"
] |
joshua.t.love@saic.com
|
a7dac8a6c149989da09d918c09c3700c2a8ee2d9
|
08cfc4fb5f0d2f11e4e226f12520a17c5160f0a2
|
/kubernetes/client/models/v1_subject.py
|
1e131c521a19b4ff2a12fcdefa5275c6f8708630
|
[
"Apache-2.0"
] |
permissive
|
ex3cv/client-python
|
5c6ee93dff2424828d064b5a2cdbed3f80b74868
|
2c0bed9c4f653472289324914a8f0ad4cbb3a1cb
|
refs/heads/master
| 2021-07-12T13:37:26.049372
| 2017-10-16T20:19:01
| 2017-10-16T20:19:01
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 6,118
|
py
|
# coding: utf-8
"""
Kubernetes
No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen)
OpenAPI spec version: v1.8.1
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from pprint import pformat
from six import iteritems
import re
class V1Subject(object):
"""
NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'api_group': 'str',
'kind': 'str',
'name': 'str',
'namespace': 'str'
}
attribute_map = {
'api_group': 'apiGroup',
'kind': 'kind',
'name': 'name',
'namespace': 'namespace'
}
def __init__(self, api_group=None, kind=None, name=None, namespace=None):
"""
V1Subject - a model defined in Swagger
"""
self._api_group = None
self._kind = None
self._name = None
self._namespace = None
self.discriminator = None
if api_group is not None:
self.api_group = api_group
self.kind = kind
self.name = name
if namespace is not None:
self.namespace = namespace
@property
def api_group(self):
"""
Gets the api_group of this V1Subject.
APIGroup holds the API group of the referenced subject. Defaults to \"\" for ServiceAccount subjects. Defaults to \"rbac.authorization.k8s.io\" for User and Group subjects.
:return: The api_group of this V1Subject.
:rtype: str
"""
return self._api_group
@api_group.setter
def api_group(self, api_group):
"""
Sets the api_group of this V1Subject.
APIGroup holds the API group of the referenced subject. Defaults to \"\" for ServiceAccount subjects. Defaults to \"rbac.authorization.k8s.io\" for User and Group subjects.
:param api_group: The api_group of this V1Subject.
:type: str
"""
self._api_group = api_group
@property
def kind(self):
"""
Gets the kind of this V1Subject.
Kind of object being referenced. Values defined by this API group are \"User\", \"Group\", and \"ServiceAccount\". If the Authorizer does not recognized the kind value, the Authorizer should report an error.
:return: The kind of this V1Subject.
:rtype: str
"""
return self._kind
@kind.setter
def kind(self, kind):
"""
Sets the kind of this V1Subject.
Kind of object being referenced. Values defined by this API group are \"User\", \"Group\", and \"ServiceAccount\". If the Authorizer does not recognized the kind value, the Authorizer should report an error.
:param kind: The kind of this V1Subject.
:type: str
"""
if kind is None:
raise ValueError("Invalid value for `kind`, must not be `None`")
self._kind = kind
@property
def name(self):
"""
Gets the name of this V1Subject.
Name of the object being referenced.
:return: The name of this V1Subject.
:rtype: str
"""
return self._name
@name.setter
def name(self, name):
"""
Sets the name of this V1Subject.
Name of the object being referenced.
:param name: The name of this V1Subject.
:type: str
"""
if name is None:
raise ValueError("Invalid value for `name`, must not be `None`")
self._name = name
@property
def namespace(self):
"""
Gets the namespace of this V1Subject.
Namespace of the referenced object. If the object kind is non-namespace, such as \"User\" or \"Group\", and this value is not empty the Authorizer should report an error.
:return: The namespace of this V1Subject.
:rtype: str
"""
return self._namespace
@namespace.setter
def namespace(self, namespace):
"""
Sets the namespace of this V1Subject.
Namespace of the referenced object. If the object kind is non-namespace, such as \"User\" or \"Group\", and this value is not empty the Authorizer should report an error.
:param namespace: The namespace of this V1Subject.
:type: str
"""
self._namespace = namespace
def to_dict(self):
"""
Returns the model properties as a dict
"""
result = {}
for attr, _ in iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""
Returns the string representation of the model
"""
return pformat(self.to_dict())
def __repr__(self):
"""
For `print` and `pprint`
"""
return self.to_str()
def __eq__(self, other):
"""
Returns true if both objects are equal
"""
if not isinstance(other, V1Subject):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""
Returns true if both objects are not equal
"""
return not self == other
|
[
"mehdy@google.com"
] |
mehdy@google.com
|
36d3e7e80d615098ce61bc85d31fe8ad49acb08b
|
bc2d2d99ed7a814fd36eab5ca3defc4fbfc9ea29
|
/hesab/__init__.py
|
c91d821b54591fd6ac370087fb20c187a97e7785
|
[
"MIT"
] |
permissive
|
soul4code/django-hesab
|
f4ba7d8167ff1e42ab775e0bdea61a7c11bdfb98
|
3ec72def22283475d958a60abc3a572d8ccb63cc
|
refs/heads/master
| 2021-12-30T08:47:44.131542
| 2016-07-30T20:14:00
| 2016-07-30T20:14:00
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 204
|
py
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
__title__ = 'django-hesab'
__version__ = '1.0.0'
__author__ = 'Rolf Haavard Blindheim'
__license__ = 'MIT License'
VERSION = __version__
|
[
"rhblind@gmail.com"
] |
rhblind@gmail.com
|
eb26945e850a7ec79e2f98859ec3962e49ed2159
|
53fab060fa262e5d5026e0807d93c75fb81e67b9
|
/backup/user_315/ch4_2020_09_14_20_47_35_320810.py
|
fcf55ef1a3a2c80781e08fabde27b771f5906d87
|
[] |
no_license
|
gabriellaec/desoft-analise-exercicios
|
b77c6999424c5ce7e44086a12589a0ad43d6adca
|
01940ab0897aa6005764fc220b900e4d6161d36b
|
refs/heads/main
| 2023-01-31T17:19:42.050628
| 2020-12-16T05:21:31
| 2020-12-16T05:21:31
| 306,735,108
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 207
|
py
|
def classifica_idade (x):
if x <= 11:
return ('Criança')
elif 12 >= x <= 17:
return ('Adolescentes')
else:
return ('Adulto')
idade =
print (classifica_idade(idade))
|
[
"you@example.com"
] |
you@example.com
|
ca6dc956467dab844f128b9f78f9895994507ef7
|
f0354782628e51b1a301eba1a69e9808b4adc664
|
/Problem/1837.py
|
3079b66b692fc1dbe6e5fc8cf1cfaff6a2a29e35
|
[] |
no_license
|
HyunIm/Baekjoon_Online_Judge
|
9b289ea27440c150ef34372dc91e6f92f4102659
|
f3a4670ea2b6ee81fa4b1bdcad3412cb995e64f2
|
refs/heads/master
| 2023-05-26T16:54:39.643360
| 2023-05-23T04:07:08
| 2023-05-23T04:07:08
| 119,958,249
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 496
|
py
|
def get_prime_list(n):
sieve = [True] * n
m = int(n ** 0.5)
for i in range(2, m + 1):
if sieve[i] == True:
for j in range(i+i, n, i):
sieve[j] = False
return [i for i in range(2, n) if sieve[i] == True]
def isGood(P, primeList):
for i in primeList:
if P % i == 0:
return ('BAD ' + str(i))
return 'GOOD'
P, K = map(int, input().split())
primeList = get_prime_list(K)
result = isGood(P, primeList)
print(result)
|
[
"hyunzion@gmail.com"
] |
hyunzion@gmail.com
|
c16aabe7c644a2982c6ea02d45ae4030dc12cb68
|
3a891a79be468621aae43defd9a5516f9763f36e
|
/desktop/core/ext-py/docutils-0.14/test/test_dependencies.py
|
298e8a43bbe4945559d1c54dd12072619e493bde
|
[
"Apache-2.0",
"GPL-1.0-or-later",
"GPL-3.0-or-later",
"BSD-3-Clause",
"LicenseRef-scancode-free-unknown",
"LicenseRef-scancode-other-permissive",
"Python-2.0",
"LicenseRef-scancode-other-copyleft",
"LicenseRef-scancode-public-domain",
"BSD-2-Clause",
"LicenseRef-scancode-proprietary-license"
] |
permissive
|
oyorooms/hue
|
b53eb87f805063a90f957fd2e1733f21406269aa
|
4082346ef8d5e6a8365b05752be41186840dc868
|
refs/heads/master
| 2020-04-15T20:31:56.931218
| 2019-01-09T19:02:21
| 2019-01-09T19:05:36
| 164,998,117
| 4
| 2
|
Apache-2.0
| 2019-01-10T05:47:36
| 2019-01-10T05:47:36
| null |
UTF-8
|
Python
| false
| false
| 4,961
|
py
|
#! /usr/bin/env python
# $Id: test_dependencies.py 8059 2017-04-19 16:47:35Z milde $
# Author: Lea Wiemann <LeWiemann@gmail.com>
# Copyright: This module has been placed in the public domain.
"""
Test module for the --record-dependencies option.
"""
import os.path
import unittest
import sys
import DocutilsTestSupport # must be imported before docutils
import docutils.core
import docutils.utils
import docutils.io
from docutils.parsers.rst.directives.images import PIL
# docutils.utils.DependencyList records POSIX paths,
# i.e. "/" as a path separator even on Windows (not os.path.join).
paths = {'include': u'data/include.txt', # included rst file
'raw': u'data/raw.txt', # included raw "HTML file"
'scaled-image': u'../docs/user/rst/images/biohazard.png',
'figure-image': u'../docs/user/rst/images/title.png',
'stylesheet': u'data/stylesheet.txt',
}
class RecordDependenciesTests(unittest.TestCase):
def get_record(self, **settings):
recordfile = 'record.txt'
recorder = docutils.utils.DependencyList(recordfile)
# (Re) create the record file by running a conversion:
settings.setdefault('source_path',
os.path.join('data', 'dependencies.txt'))
settings.setdefault('settings_overrides', {})
settings['settings_overrides'].update(_disable_config=True,
record_dependencies=recorder)
docutils.core.publish_file(destination=DocutilsTestSupport.DevNull(),
**settings)
recorder.close()
# Read the record file:
record = docutils.io.FileInput(source_path=recordfile,
encoding='utf8')
return record.read().splitlines()
def test_dependencies(self):
# Note: currently, raw input files are read (and hence recorded) while
# parsing even if not used in the chosen output format.
# This should change (see parsers/rst/directives/misc.py).
keys = ['include', 'raw']
if PIL:
keys += ['figure-image']
expected = [paths[key] for key in keys]
record = self.get_record(writer_name='xml')
# the order of the files is arbitrary
record.sort()
expected.sort()
self.assertEqual(record, expected)
def test_dependencies_html(self):
keys = ['include', 'raw']
if PIL:
keys += ['figure-image', 'scaled-image']
expected = [paths[key] for key in keys]
# stylesheets are tested separately in test_stylesheet_dependencies():
so = {'stylesheet_path': None, 'stylesheet': None}
record = self.get_record(writer_name='html', settings_overrides=so)
# the order of the files is arbitrary
record.sort()
expected.sort()
self.assertEqual(record, expected)
def test_dependencies_latex(self):
# since 0.9, the latex writer records only really accessed files, too.
# Note: currently, raw input files are read (and hence recorded) while
# parsing even if not used in the chosen output format.
# This should change (see parsers/rst/directives/misc.py).
keys = ['include', 'raw']
if PIL:
keys += ['figure-image']
expected = [paths[key] for key in keys]
record = self.get_record(writer_name='latex')
# the order of the files is arbitrary
record.sort()
expected.sort()
self.assertEqual(record, expected)
def test_csv_dependencies(self):
try:
import csv
csvsource = os.path.join('data', 'csv_dep.txt')
self.assertEqual(self.get_record(source_path=csvsource),
['data/csv_data.txt'])
except ImportError:
pass
def test_stylesheet_dependencies(self):
stylesheet = paths['stylesheet']
so = {'stylesheet_path': paths['stylesheet'],
'stylesheet': None}
so['embed_stylesheet'] = False
record = self.get_record(writer_name='html', settings_overrides=so)
self.assertTrue(stylesheet not in record,
'%r should not be in %r' % (stylesheet, record))
record = self.get_record(writer_name='latex', settings_overrides=so)
self.assertTrue(stylesheet not in record,
'%r should not be in %r' % (stylesheet, record))
so['embed_stylesheet'] = True
record = self.get_record(writer_name='html', settings_overrides=so)
self.assertTrue(stylesheet in record,
'%r should be in %r' % (stylesheet, record))
record = self.get_record(writer_name='latex', settings_overrides=so)
self.assertTrue(stylesheet in record,
'%r should be in %r' % (stylesheet, record))
if __name__ == '__main__':
unittest.main()
|
[
"ranade@cloudera.com"
] |
ranade@cloudera.com
|
7cc8883ebb5f0dd25746047288ce32ce7fa4947c
|
72ec201effe17c3875f3d26ab98d6e56f808b0ac
|
/aoomuki_comp/app/migrations/0031_auto_20210118_1549.py
|
aa56a163b40bbbd5237d06285a7476af7d32fbd8
|
[
"MIT"
] |
permissive
|
Kamelgasmi/aoomuki_competences
|
549f9c9167f82d084ef6048cec72e87fe90f4c35
|
e02f3546f7efb54b825dbcfab968296607775903
|
refs/heads/master
| 2023-04-06T17:48:35.921460
| 2021-04-16T08:49:15
| 2021-04-16T08:49:15
| 330,929,238
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 646
|
py
|
# Generated by Django 2.1 on 2021-01-18 14:49
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('app', '0030_collaborater_interest'),
]
operations = [
migrations.AddField(
model_name='competence',
name='interest',
field=models.ManyToManyField(blank=True, related_name='competences', to='app.ListInterest'),
),
migrations.AddField(
model_name='competence',
name='level',
field=models.ManyToManyField(blank=True, related_name='competences', to='app.ListLevel'),
),
]
|
[
"kam_7@hotmail.fr"
] |
kam_7@hotmail.fr
|
ee6bdb9c84c524d31881c748f2c22d11a57d93ab
|
1fb60677cf35066e631d618ec002d48e21aeda7a
|
/profil3r/modules/domain/domain.py
|
cfe36160927867d187c6c8c800da38fbc2629d67
|
[
"MIT"
] |
permissive
|
dannymas/Profil3r
|
32c57b7f17f1c0718c7486b89ff90efed616afba
|
7e3ed9d832c5bdc5a55516b5a60df4f34524d41a
|
refs/heads/main
| 2023-04-24T23:45:27.699593
| 2021-05-03T19:56:50
| 2021-05-03T19:56:50
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,604
|
py
|
import requests
import time
class Domain:
def __init__(self, config, permutations_list):
# 100 ms
self.delay = config['plateform']['domain']['rate_limit'] / 1000
# {permutation}.{tld}
self.format = config['plateform']['domain']['format']
# Top level domains
self.tld = config['plateform']['domain']['TLD']
# domains are not case sensitive
self.permutations_list = [perm.lower() for perm in permutations_list]
# domain
self.type = config['plateform']['domain']['type']
# Generate all potential domains names
def possible_domains(self):
possible_domains = []
# search all TLD (.com, .net, .org...), you can add more in the config.json file
for domain in self.tld:
for permutation in self.permutations_list:
possible_domains.append(self.format.format(
permutation = permutation,
domain = domain
))
return possible_domains
def search(self):
domains_lists = {
"type": self.type,
"accounts": []
}
possible_domains_list = self.possible_domains()
for domain in possible_domains_list:
try:
r = requests.head(domain, timeout=5)
except requests.ConnectionError:
pass
# If the domain exists
if r.status_code < 400:
domains_lists ["accounts"].append({"value": domain})
time.sleep(self.delay)
return domains_lists
|
[
"r0g3r5@protonmail.com"
] |
r0g3r5@protonmail.com
|
c55f523611483c34014135fc431d81dfc7d59241
|
7c7a258f52a6a2a1710507b3543a0c082933250b
|
/webscaff/commands/run/dj.py
|
cf5b4d01d7b8c54242efc464153dd9b9a997fd2d
|
[
"BSD-3-Clause"
] |
permissive
|
idlesign/webscaff
|
c487407da7a1a89bbfb52d803b219b49e15a8c18
|
407bbd3e1870aaab80036b3131054599b58072de
|
refs/heads/master
| 2022-12-25T06:42:42.009768
| 2022-12-09T14:56:30
| 2022-12-09T14:56:30
| 211,610,572
| 3
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,125
|
py
|
from pathlib import Path
from invoke import task
from .fs import create_dir
from ..sys import fs as sys_fs
from ..utils import link_config, echo
@task
def manage(ctx, cmd):
"""Runs Django manage command(s).
:param str|list cmd:
"""
if not isinstance(cmd, list):
cmd = [cmd]
project_name = ctx.project.name
for command in cmd:
ctx.sudo(f'{project_name} {command}', pty=True, user=project_name)
def rollout(ctx):
"""Rolls out migrations and statics."""
migrate(ctx)
manage(ctx, 'collectstatic --noinput')
@task
def migrate(ctx):
"""Runs Django manage command for project to launch migrations."""
manage(ctx, 'migrate')
def create_superuser(ctx):
"""Runs Django manage command for project to create a superuser.
Tries to get e-mail from settings, and username from e-mail.
"""
command = 'createsuperuser'
username = ''
email = ctx.project.email or ''
if email:
username = email.partition('@')[0]
command += f' --email {email} --username {username}'
echo('\nCreating Django superuser %s ...' % f'[{username}]' if username else '')
manage(ctx, command)
def bootstrap(ctx):
"""Puts Django production settings file to remote."""
# Create media and static directories.
dir_state = ctx.paths.remote.project.state
create_dir(ctx, dir_state.static)
create_dir(ctx, dir_state.media)
link_config(
ctx,
title='Django',
name_local='env_production.py',
name_remote='env_production.py',
dir_remote_confs=Path(ctx.paths.remote.project.base) / 'settings'
)
migrate(ctx)
create_superuser(ctx)
def dump(ctx, target_dir):
"""Dumps Django related stuff into a target directory."""
sys_fs.gzip_dir(
ctx,
ctx.paths.remote.project.state.media,
target_dir,
)
def restore(ctx, source_dir):
"""Restores Django related stuff from a source directory."""
sys_fs.gzip_extract(
ctx,
archive=source_dir / 'media.tar.gz',
target_dir=ctx.paths.remote.project.state.media
)
|
[
"idlesign@yandex.ru"
] |
idlesign@yandex.ru
|
78f74a8eb59a7952d622fd4781ae53f2185f223d
|
0f923ef8d024915edbe4088ce0da24ee952cd63e
|
/venv/Scripts/pip3-script.py
|
4c1d9b27d8335a7cb149c92842c69228cf3051f7
|
[] |
no_license
|
Rpaul88/POM_Naukrii
|
95c53cb1d314eca8798a07f5b6be01e34aaded0e
|
f399248a29c8e08bf173864f8080828ed34184e4
|
refs/heads/master
| 2020-06-18T03:01:25.009516
| 2019-07-10T06:26:23
| 2019-07-10T06:26:23
| 196,144,551
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 426
|
py
|
#!"C:\Users\Guest User\PycharmProjects\POM_Practice1\venv\Scripts\python.exe"
# EASY-INSTALL-ENTRY-SCRIPT: 'pip==10.0.1','console_scripts','pip3'
__requires__ = 'pip==10.0.1'
import re
import sys
from pkg_resources import load_entry_point
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(
load_entry_point('pip==10.0.1', 'console_scripts', 'pip3')()
)
|
[
"you@example.com"
] |
you@example.com
|
d8ff7cb47e5c9e6b1480a7b97b390f610bd742e8
|
9e204a5b1c5ff4ea3b115ff0559b5af803ab4d15
|
/186 Reverse Words in a String II.py
|
5d43c1616b0c5b156304fff51182135263cb7232
|
[
"MIT"
] |
permissive
|
Aminaba123/LeetCode
|
178ed1be0733cc7390f30e676eb47cc7f900c5b2
|
cbbd4a67ab342ada2421e13f82d660b1d47d4d20
|
refs/heads/master
| 2020-04-20T10:40:00.424279
| 2019-01-31T08:13:58
| 2019-01-31T08:13:58
| 168,795,374
| 1
| 0
|
MIT
| 2019-02-02T04:50:31
| 2019-02-02T04:50:30
| null |
UTF-8
|
Python
| false
| false
| 789
|
py
|
"""
Premium Question
"""
__author__ = 'Daniel'
class Solution(object):
def reverseWords(self, s):
"""
in-place without allocating extra space
:type s: a list of 1 length strings (List[str])
:rtype: nothing
"""
self.reverse(s, 0, len(s))
i = 0
while i < len(s):
j = i+1
while j < len(s) and s[j] != " ":
j += 1
self.reverse(s, i, j)
i = j+1
def reverse(self, s, start, end):
i = start
j = end
while i < j-1:
s[i], s[j-1] = s[j-1], s[i]
i += 1
j -= 1
if __name__ == "__main__":
lst = list("the sky is blue")
Solution().reverseWords(lst)
assert "".join(lst) == "blue is sky the"
|
[
"zhangdanyangg@gmail.com"
] |
zhangdanyangg@gmail.com
|
472e0f1f2bcbc12ab46ecc36e9b0b889f774b546
|
d8ea695288010f7496c8661bfc3a7675477dcba0
|
/examples/sound/baidu_to_mp3.py
|
18dc62be42aa2966c7e393740f7042699d421b77
|
[] |
no_license
|
dabolau/demo
|
de9c593dabca26144ef8098c437369492797edd6
|
212f4c2ec6b49baef0ef5fcdee6f178fa21c5713
|
refs/heads/master
| 2021-01-17T16:09:48.381642
| 2018-10-08T10:12:45
| 2018-10-08T10:12:45
| 90,009,236
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 657
|
py
|
from aip import AipSpeech
""" 你的百度 APPID AK SK
https://console.bce.baidu.com/ai/#/ai/speech/app/list 应用列表
http://ai.baidu.com/docs#/TTS-Online-Python-SDK/top API
"""
APP_ID = '9288864'
API_KEY = '7OOA9UFvHwC3pplzPZnqQ9pF'
SECRET_KEY = '4ea30a42379528355abb0fa6e31516a2'
client = AipSpeech(APP_ID, API_KEY, SECRET_KEY)
text = input('请输入要转换为语音的文本:')
result = client.synthesis(text, 'zh', 1, {
'vol': 5,
})
# 识别正确返回语音二进制 错误则返回dict 参照下面错误码
if not isinstance(result, dict):
with open('auido.wav', 'wb') as f:
f.write(result)
|
[
"dabolau@qq.com"
] |
dabolau@qq.com
|
99c12bbc5aaaf4c9ef74e2e7c22addb524520f57
|
3ab7e700203054e104e6c60295c0a8455bc388b1
|
/i_entity_extractor/extractors/financing_events/financing_events_extractor.py
|
5aed8640d1b6ed90cab151ea36dcd81fd788ed26
|
[] |
no_license
|
youfeng243/crawler
|
e8114ab5ef68bb9fd7e4296452d63b53d3d4080a
|
59eaabef94de67444f09cfe5b25d481034d10f29
|
refs/heads/master
| 2021-07-11T10:12:27.946819
| 2017-10-12T11:35:27
| 2017-10-12T11:35:27
| 106,583,181
| 2
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,315
|
py
|
# coding=utf8
import sys
import re
import time
sys.path.append('../../')
sys.path.append('../../../')
from i_entity_extractor.extractors.default.default_extractor import DefaultExtractor
from i_entity_extractor.common_parser_lib import etl_tool
import copy
class FinancingEventsExtractor(DefaultExtractor):
def __init__(self, topic_info, log):
DefaultExtractor.__init__(self, topic_info, log)
def format_extract_data(self, extract_data, topic_id):
"""实体解析抽取数据"""
item = copy.deepcopy(extract_data)
proportion_share_transfer = item.get('proportion_share_transfer', '')
item['proportion_share_transfer'] = etl_tool.regex_remove_na(proportion_share_transfer).strip()
# 处理行业 行业按级拆分
industry = item.get('industry', '')
lst_industry = etl_tool.regex_chinese(industry)
item['industry'] = '/'.join(lst_industry)
# 处理省市县的拆分
lst_location = etl_tool.regex_chinese(item.get('region', ''))
map_region = etl_tool.map_region(lst_location)
for region_key in map_region.keys():
item[region_key] = map_region[region_key]
item['region'] = ''.join(lst_location)
# 融资金额
amount = item.get('amount', '')
res_amount = self.parser_tool.money_parser.new_trans_money(amount, u"万", False)
item['amounts'] = res_amount[0]
item['units'] = res_amount[1]
item['currency'] = res_amount[2] if res_amount[2] else u'人民币'
# 发布时间
public_date = item.get(u'public_date', u'')
public_date = etl_tool.str2datetime(public_date, '%Y-%m-%d %H:%M:%S')
item["public_date"] = '' if not public_date else public_date.strftime("%Y-%m-%d")
return item
if __name__ == '__main__':
import pytoml
import sys
import time
from common.log import log
sys.path.append('../../')
with open('../../entity.toml', 'rb') as config:
conf = pytoml.load(config)
log.init_log(conf, console_out=conf['logger']['console'])
conf['log'] = log
topic_id = 33
from i_entity_extractor.entity_extractor_route import EntityExtractorRoute
from i_entity_extractor.common_parser_lib.mongo import MongDb
import json
route = EntityExtractorRoute(conf)
topic_info = route.all_topics.get(topic_id, None)
begin_time = time.time()
obj = FinancingEventsExtractor(topic_info, log)
extract_data = {
"_site_record_id": "http://need.pedata.cn/265460.html",
"amount": "",
"amounts": "NaN",
"city": "",
"currency": "",
"describe": "投资界消息,拟融资企业无锡睿泰科技有限公司参加“无锡服务外包企业投融资合作对接洽谈会”。",
"district": "",
"enterprise_full_name": "无锡睿泰科技有限公司",
"enterprise_short_name": "",
"enterprise_short_name_en": "",
"enterprise_site": "",
"industry": "软件外包",
"information_sources": "投资界资讯",
"innotree_score": "",
"mode": "私募融资",
"phone": "",
"project_highlights": "",
"project_name": "睿泰科技",
"proportion_share_transfer": "",
"province": "",
"public_date": "2011-11-01",
"region": "北京 · 朝阳区",
"round": "A",
"source_site": "私募通",
"tag": [],
"units": "万元"
}
entity_data = obj.format_extract_data(extract_data,topic_id)
print "-----------------------------"
for key, value in entity_data.items():
if isinstance(value, list):
for v in value:
print key, ":", v
elif isinstance(value, dict):
for key2, value2 in value.items():
print key2, ":", value2
else:
print key, ":", value
keys = ['units', 'source_site', 'describe', 'currency', 'tag', 'city', 'enterprise_short_name_en', 'district', 'amounts', 'innotree_score', 'public_date', 'founders', 'province', 'project_name', 'phone', 'enterprise_full_name', 'information_sources', 'proportion_share_transfer', 'enterprise_short_name', 'industry', 'region', 'enterprise_site', 'amount', 'project_highlights', 'mode', 'round', '_in_time', '_src', '_record_id', '_id']
transfer_data(keys, 'financing_events')
print keys
|
[
"you@163.com"
] |
you@163.com
|
cb98b3436e683f576b7be3d1ab7586b93b861a14
|
631b26854aa790915fc3ee65dc68a7c9bd1fed5b
|
/2048 Game/main.py
|
77358e640f0730a2d973005d69316862e6533d66
|
[] |
no_license
|
Aniketthani/Kivy-Projects
|
6e7dc1c079255697acc812497a481f1ffdeada54
|
0aec55f10d82b8f756a132fdc833c5aaab695a6f
|
refs/heads/main
| 2023-04-29T10:17:15.262192
| 2021-05-05T17:29:02
| 2021-05-05T17:29:02
| 341,630,352
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 7,085
|
py
|
from kivy.app import App
from kivy.uix.widget import Widget
from kivy.graphics import BorderImage,Color
from kivy.core.window import Window,Keyboard
from kivy.utils import get_color_from_hex
from kivy.properties import NumericProperty,ListProperty
import random
from kivy.animation import Animation
from kivy.vector import Vector
from kivy.uix.label import Label
from kivy.uix.button import Button
spacing=10
#27:38
key_vectors={Keyboard.keycodes['up']:(0,1),Keyboard.keycodes['right']:(1,0),Keyboard.keycodes['down']:(0,-1),Keyboard.keycodes['left']:(-1,0)}
colors=['EEE4DA','EDE0C8','F2B179','F59563','F6765F','F65E3B','EDCF72','EDCC61','EDC850','EDC53F','EDC22E']
tile_colors={2**i:color for i,color in enumerate(colors,start=1)}
def all_cells(flip_x=False,flip_y=False):
for x in (reversed(range(4) if flip_x else range(4))):
for y in (reversed(range(4)) if flip_y else range(4)):
yield(x,y)
class Tile(Widget):
font_size=NumericProperty(24)
number=NumericProperty(2)
color=ListProperty(get_color_from_hex(tile_colors[2]))
number_color=ListProperty(get_color_from_hex('776E65'))
def __init__(self,number=2,**kwargs):
super(Tile,self).__init__(**kwargs)
self.font_size=self.width*0.5
self.number=number
self.update_colors()
def update_colors(self):
self.color=get_color_from_hex(tile_colors[self.number])
if self.number>4:
self.number_color=get_color_from_hex('F9F6F2')
def resize(self,pos,size):
self.pos=pos
self.size=size
self.font_size=0.5*self.width
class Board(Widget):
game_won=False
moving=False
b=None
def is_deadlocked(self):
for x,y in all_cells():
if self.b[x][y] is None:
return False
number=self.b[x][y].number
if (self.can_combine(x+1,y,number) or self.can_combine(x,y+1,number)):
return False
return True
def can_combine(self,board_x,board_y,number):
return (self.valid_cells(board_x,board_y) and self.b[board_x][board_y] is not None and self.b[board_x][board_y].number==number)
def on_touch_up(self,touch):
v=Vector(touch.pos) - Vector(touch.opos) #touch.opos is initial position of touch
if v.length() <20: #discarding small touches or taps
return
if abs(v.x) > abs(v.y):
v.y=0
else:
v.x=0
v=v.normalize()
v.x=int(v.x)
v.y=int(v.y)
self.move(*v)
def valid_cells(self,board_x,board_y):
return(board_x>=0 and board_y>=0 and board_x<=3 and board_y<=3)
def can_move(self,board_x,board_y):
return(self.valid_cells(board_x,board_y) and self.b[board_x][board_y] is None)
def move(self,dir_x,dir_y):
if self.game_won:
return
if self.moving:
return
for board_x,board_y in all_cells(dir_x>0,dir_y>0):
tile=self.b[board_x][board_y]
if not tile:
continue
x,y=board_x,board_y
while self.can_move(x+dir_x,y+dir_y):
self.b[x][y]=None
x+=dir_x
y+=dir_y
self.b[x][y]=tile
if self.can_combine(x+dir_x,y+dir_y,tile.number):
self.b[x][y]=None
x+=dir_x
y+=dir_y
self.remove_widget(self.b[x][y])
self.b[x][y]=tile
self.b[x][y].number *=2
self.b[x][y].update_colors()
if self.b[x][y].number==2048:
message_box=self.parent.ids.message_box
message_box.add_widget(Label(text="Congratulation !!You Won The Game",font_size=20,color=(0,0,0,1),bold=True))
message_box.add_widget(Button(text="New Game", font_size=20,on_press=app.new_game))
self.game_won=True
if x==board_x and y==board_y:
continue
anim=Animation(pos=self.cell_pos(x,y),duration=0.25,transition="linear")
if not self.moving:
anim.on_complete=self.new_tile
self.moving=True
anim.start(tile)
def new_tile(self,*args):
empty_cells=[(x,y) for x,y in all_cells() if self.b[x][y]==None]
x,y=random.choice(empty_cells)
tile=Tile(pos=self.cell_pos(x,y),size=self.cell_size)
self.b[x][y]=tile
self.add_widget(tile)
if len(empty_cells)==1 and self.is_deadlocked():
message_box=self.parent.ids.message_box
message_box.add_widget(Label(text="Game over (board is deadlocked)",font_size=20,color=(0,0,0,1),bold=True))
message_box.add_widget(Button(text="New Game", font_size=20,on_press=app.new_game))
self.moving=False
def reset(self,*args):
self.b=[[None for i in range(4)] for j in range(4)]
self.new_tile()
self.new_tile()
def __init__(self, **kwargs):
super(Board, self).__init__(**kwargs)
self.resize()
def resize(self, *args):
self.cell_size=(0.25*(self.width-5*spacing),)*2
self.canvas.before.clear()
with self.canvas.before:
BorderImage(pos=self.pos, size=self.size, source='images/board.png')
Color(*get_color_from_hex("ccc0b4"))
for board_x,board_y in all_cells():
BorderImage(pos=self.cell_pos(board_x,board_y),size=self.cell_size,source="images/cell.png")
if not self.b:
return
for board_x,board_y in all_cells():
tile=self.b[board_x][board_y]
if tile:
tile.resize(pos=self.cell_pos(board_x,board_y),size=self.cell_size)
def on_key_down(self,window,key,*args):
if key in key_vectors:
self.move(*key_vectors[key])
def cell_pos(self,board_x,board_y):
return (self.x + spacing + (self.cell_size[0] + spacing)*board_x , self.y + spacing + (self.cell_size[1] + spacing)*board_y)
on_pos = resize
on_size = resize
class GameApp(App):
def on_start(self):
board = self.root.ids.board
board.reset()
Window.bind(on_key_down=board.on_key_down)
def new_game(self,*args):
message_box=self.root.ids.message_box
m_children=message_box.children[:]
for w in m_children:
message_box.remove_widget(w)
board=self.root.ids.board
b_children=board.children[:]
for wid in b_children:
board.remove_widget(wid)
board.b=[[None for i in range(4)] for j in range(4)]
board.new_tile()
board.new_tile()
self.game_won=False
if __name__ == '__main__':
Window.clearcolor=(1,1,1,1)
app=GameApp()
app.run()
|
[
"noreply@github.com"
] |
Aniketthani.noreply@github.com
|
cee91f204aa9e2228d918ecb5502764b58f7d261
|
9912570da4f0fc380c3eece7797b8deb5a4240c1
|
/colcon_core/topological_order.py
|
73d49ac0ed96cfd3cfa8a06bbc31696f2fe91b63
|
[
"Apache-2.0"
] |
permissive
|
ruffsl/colcon-core
|
2c26aa3d71ed4a1178e0e94e0371d4a13fab9aff
|
8dc3d7ec98e36397f349ede03e487da0cad336f4
|
refs/heads/master
| 2021-06-25T13:10:24.936062
| 2019-10-17T20:44:19
| 2019-10-17T20:44:19
| 215,967,652
| 0
| 0
|
Apache-2.0
| 2019-10-18T07:38:14
| 2019-10-18T07:38:12
| null |
UTF-8
|
Python
| false
| false
| 3,071
|
py
|
# Copyright 2016-2018 Dirk Thomas
# Licensed under the Apache License, Version 2.0
from collections import OrderedDict
import copy
from colcon_core.package_decorator import PackageDecorator
def topological_order_packages(
descriptors, direct_categories=None, recursive_categories=None,
):
"""
Order packages topologically.
:param descriptors: the package descriptors
:type descriptors: set of
:py:class:`colcon_core.package_descriptor.PackageDescriptor`
:returns: list of package decorators
:rtype: list of :py:class:`colcon_core.package_decorator.PackageDecorator`
"""
# get recursive dependencies for all packages
queued = set()
for descriptor in descriptors:
rec_deps = descriptor.get_recursive_dependencies(
descriptors,
direct_categories=direct_categories,
recursive_categories=recursive_categories)
d = _PackageDependencies(
descriptor=descriptor,
recursive_dependencies=rec_deps,
remaining_dependencies=copy.deepcopy(rec_deps),
)
queued.add(d)
ordered = OrderedDict()
while len(ordered) < len(descriptors):
# remove dependencies on already ordered packages
ordered_names = {descriptor.name for descriptor in ordered.keys()}
for q in queued:
q.remaining_dependencies -= ordered_names
# find all queued packages without remaining dependencies
ready = list(filter(lambda q: not q.remaining_dependencies, queued))
if not ready:
lines = [
'%s: %s' % (
q.descriptor.name, sorted(q.remaining_dependencies))
for q in queued]
lines.sort()
raise RuntimeError(
'Unable to order packages topologically:\n' + '\n'.join(lines))
# order ready jobs alphabetically for a deterministic order
ready.sort(key=lambda d: d.descriptor.name)
# add all ready jobs to ordered dictionary
for r in ready:
ordered[r.descriptor] = r.recursive_dependencies
queued.remove(r)
# create ordered list of package decorators
decorators = []
ordered_keys = [descriptor.name for descriptor in ordered.keys()]
for descriptor, recursive_dependencies in ordered.items():
decorator = PackageDecorator(descriptor)
# reorder recursive dependencies according to the topological ordering
decorator.recursive_dependencies = sorted(
(d for d in recursive_dependencies if d in ordered_keys),
key=ordered_keys.index)
decorators.append(decorator)
return decorators
class _PackageDependencies:
__slots__ = (
'descriptor', 'recursive_dependencies', 'remaining_dependencies')
def __init__(
self, descriptor, recursive_dependencies, remaining_dependencies,
):
self.descriptor = descriptor
self.recursive_dependencies = recursive_dependencies
self.remaining_dependencies = remaining_dependencies
|
[
"dirk-thomas@users.noreply.github.com"
] |
dirk-thomas@users.noreply.github.com
|
12005d23ef4de33c4530ad05d94c0ad17d294d7d
|
6ca3acb227e340edbee80668591e7008cc256b5b
|
/flask_appbuilder/security/forms.py
|
4f908185ef48991a76bb2d12b026c79356b05360
|
[
"BSD-3-Clause"
] |
permissive
|
tuxskar/Flask-AppBuilder
|
4c69dce5c13f85b930d5b4761945b33ffb231ef7
|
4f65bbbd7edc6e7ca7c5f62a499677565e0662e1
|
refs/heads/master
| 2021-01-12T21:04:22.702263
| 2014-12-17T00:20:58
| 2014-12-17T00:20:58
| 28,113,074
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,288
|
py
|
from wtforms import StringField, BooleanField, PasswordField
from flask.ext.wtf.recaptcha import RecaptchaField
from flask.ext.babelpkg import lazy_gettext
from wtforms.validators import DataRequired, EqualTo, Email
from ..fieldwidgets import BS3PasswordFieldWidget, BS3TextFieldWidget
from ..forms import DynamicForm
from ..validators import Unique
class LoginForm_oid(DynamicForm):
openid = StringField(lazy_gettext('openid'), validators=[DataRequired()])
username = StringField(lazy_gettext('User Name'))
remember_me = BooleanField(lazy_gettext('remember_me'), default=False)
class LoginForm_db(DynamicForm):
username = StringField(lazy_gettext('User Name'), validators=[DataRequired()])
password = PasswordField(lazy_gettext('Password'), validators=[DataRequired()])
class ResetPasswordForm(DynamicForm):
password = PasswordField(lazy_gettext('Password'),
description=lazy_gettext(
'Please use a good password policy, this application does not check this for you'),
validators=[DataRequired()],
widget=BS3PasswordFieldWidget())
conf_password = PasswordField(lazy_gettext('Confirm Password'),
description=lazy_gettext('Please rewrite the password to confirm'),
validators=[EqualTo('password', message=lazy_gettext('Passwords must match'))],
widget=BS3PasswordFieldWidget())
class RegisterUserDBForm(DynamicForm):
username = StringField(lazy_gettext('User Name'), validators=[DataRequired()], widget=BS3TextFieldWidget())
first_name = StringField(lazy_gettext('First Name'), validators=[DataRequired()], widget=BS3TextFieldWidget())
last_name = StringField(lazy_gettext('Last Name'), validators=[DataRequired()], widget=BS3TextFieldWidget())
email = StringField(lazy_gettext('Email'), validators=[DataRequired(), Email()], widget=BS3TextFieldWidget())
password = PasswordField(lazy_gettext('Password'),
description=lazy_gettext(
'Please use a good password policy, this application does not check this for you'),
validators=[DataRequired()],
widget=BS3PasswordFieldWidget())
conf_password = PasswordField(lazy_gettext('Confirm Password'),
description=lazy_gettext('Please rewrite the password to confirm'),
validators=[EqualTo('password', message=lazy_gettext('Passwords must match'))],
widget=BS3PasswordFieldWidget())
recaptcha = RecaptchaField()
class RegisterUserOIDForm(DynamicForm):
username = StringField(lazy_gettext('User Name'), validators=[DataRequired()], widget=BS3TextFieldWidget())
first_name = StringField(lazy_gettext('First Name'), validators=[DataRequired()], widget=BS3TextFieldWidget())
last_name = StringField(lazy_gettext('Last Name'), validators=[DataRequired()], widget=BS3TextFieldWidget())
email = StringField(lazy_gettext('Email'), validators=[DataRequired(), Email()], widget=BS3TextFieldWidget())
recaptcha = RecaptchaField()
|
[
"danielvazgaspar@gmail.com"
] |
danielvazgaspar@gmail.com
|
44a597405bfe6e771c71784c3ec66d5a28942841
|
72d70d86bde200aab004ffe019b84f89f0978fd1
|
/postfix_mta_sts_resolver/internal_cache.py
|
30607f837ca672c5e3b9b1db6896707d403da275
|
[
"MIT"
] |
permissive
|
Kernel-Error/postfix-mta-sts-resolver
|
4da8d198e449d4bb6460a19a78a64d1a5783fdf7
|
5d1fba4d45d022bdd419e4a352d8555c4c66a3a3
|
refs/heads/master
| 2020-05-01T16:58:06.581584
| 2019-03-28T07:19:05
| 2019-03-28T07:19:05
| 177,587,050
| 0
| 0
|
MIT
| 2019-03-25T12:59:44
| 2019-03-25T12:59:44
| null |
UTF-8
|
Python
| false
| false
| 612
|
py
|
import collections
class InternalLRUCache(object):
def __init__(self, capacity=10000):
self._capacity = capacity
self._cache = collections.OrderedDict()
async def get(self, key):
try:
value = self._cache.pop(key)
self._cache[key] = value
return value
except KeyError:
return None
async def set(self, key, value):
try:
self._cache.pop(key)
except KeyError:
if len(self._cache) >= self._capacity:
self._cache.popitem(last=False)
self._cache[key] = value
|
[
"vladislav@vm-0.com"
] |
vladislav@vm-0.com
|
ac825cfa60bf4b58f7ffaaeb66f3d47eb41b178a
|
a718de5d51c8d430e791aca6092669c04548fd64
|
/Census-Analyser-master/census_analyser/stateCensusAnalyser.py
|
65bb4d6c9734650262bd48799052c4630712bd47
|
[] |
no_license
|
santoshikalaskar/Basic_Advance_python_program
|
d0fef4134ed4b14f84ff05a3b37e1773c111a2d1
|
84df5c336d5304c3c727102194ba62417640643a
|
refs/heads/master
| 2023-01-22T15:06:24.909145
| 2020-12-02T14:01:29
| 2020-12-02T14:01:29
| 314,511,681
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 6,858
|
py
|
import pandas as pd
from custom_exceptions import (FileIsNotCSVTypeException,
EmptyFileException,
InvalidDelimiterException)
from abc import ABC, abstractmethod
import json
'''
StatusCensusAnalyser class will load StateCensus data
'''
class StateCensusAnalyser:
def __init__(self):
self.state = 'State'
self.population = 'Population'
self.areaInSqKm = 'AreaInSqKm'
self.densityPerSqKm = 'DensityPerSqKm'
def __repr__(self):
return self.state +','+ self.population +','+ self.areaInSqKm +','+ self.densityPerSqKm
'''
CSVState class will load data from state code csv file
'''
class CSVState:
def __init__(self):
self.srNo = 'SrNo'
self.stateName = 'StateName'
self.tin = 'TIN'
self.stateCode = 'StateCode'
def __repr__(self):
return self.srNo +','+ self.stateName +','+ self.tin +','+ self.stateCode
'''
CSVStateCensus class will inherit StateCensusAnalyser and CSVState to load data from csv file.
'''
class CSVStateCensus(StateCensusAnalyser, CSVState):
def __init__(self, file_name):
self.file_name = file_name
@property
def col_list(self):
if self.file_name == 'StateCode.csv':
col_list = repr(CSVState()).split(",")
else:
col_list = repr(StateCensusAnalyser()).split(",")
return col_list
@property
def load_CSV(self):
if self.file_name[-4:] != '.csv':
raise FileIsNotCSVTypeException
try:
df = pd.read_csv(self.file_name, usecols=self.col_list)
if df.isnull().values.any():
raise InvalidDelimiterException
return df
except pd.errors.EmptyDataError:
raise EmptyFileException
except ValueError:
return "InvalidHeader"
def iterate_df(self, dataframe): #Iterate dataframe into touples
df_list = [list(row) for row in dataframe.values]
return df_list
def number_of_records(self, dataframe): #Return Number of rows in csv or records
return len(dataframe) - 1
'''
SortData class will have all method according to all sorting method and save data into json
'''
class SortData(CSVStateCensus):
def __init__(self):
self.code_data_frame = CSVStateCensus("StateCode.csv").load_CSV
self.census_data_frame = CSVStateCensus("IndiaStateCensusData.csv").load_CSV
def __sorting_function(self,dataframe,col_name,ascending=True): #Sorting functtion
return dataframe.sort_values([col_name],ascending=ascending)
def __sort_InidaCensusData_in_alphabetical_order_in_JSON(self): #sort and returns stateCensus data according to state
sorted_df = self.__sorting_function(self.census_data_frame,"State")
sorted_df.to_json(r'IndiStateCensusData.json', orient='records')
with open('IndiStateCensusData.json','r') as json_file:
census = json.load(json_file)
return census
def __sort_StateCode_in_stateCode_order_in_JSON(self): #sort and returns stateCode data according to Code
sorted_df = self.__sorting_function(self.code_data_frame,'StateCode')
sorted_df.to_json(r'StateCode.json', orient='records')
with open('StateCode.json','r') as json_file:
census = json.load(json_file)
return census
def __sort_InidaCensusData_in_asc_population_order_in_JSON(self): #sort and returns stateCensus data according to population
sorted_df = self.__sorting_function(self.census_data_frame,'Population')
sorted_df.to_json(r'IndiStateCensusData_asc_population.json', orient='records')
with open('IndiStateCensusData_asc_population.json','r') as json_file:
census = json.load(json_file)
return census
def __sort_InidaCensusData_in_asc_population_density_order_in_JSON(self): #sort and returns stateCensus data according to populationSensity
sorted_df = self.__sorting_function(self.census_data_frame,"DensityPerSqKm")
sorted_df.to_json(r'IndiStateCensusData_asc_populationDensity.json', orient='records')
with open('IndiStateCensusData_asc_populationDensity.json','r') as json_file:
census = json.load(json_file)
return census
def __sort_InidaCensusData_in_desc_area_order_in_JSON(self): #sort and returns stateCensus data according to descending area Area
sorted_df = self.__sorting_function(self.census_data_frame,"AreaInSqKm",ascending=False)
sorted_df.to_json(r'IndiStateCensusData_desc_area.json', orient='records')
with open('IndiStateCensusData_desc_area.json','r') as json_file:
census = json.load(json_file)
return census
'''
Mapping class inherits SortData class and map state census with state code.
'''
class Mapping(SortData):
def __map_state_census_with_state_code_according_to_code(self):
merge_inner = pd.merge(left=self.code_data_frame, right=self.census_data_frame,left_on='StateName',right_on='State')
merged_data = merge_inner.drop(['SrNo'], axis=1)
sort_state_code = merged_data.sort_values('StateCode')
sort_state_code.to_json(r'Mapped_data_acc_to_stateCode.json', orient='records')
with open('Mapped_data_acc_to_stateCode.json','r') as map_file:
map_data = json.load(map_file)
return map_data
# file_name = "IndiaStateCensusData.csv"
# invalid_header_file = "csv_with_invalid_header.csv"
# invalid_delimiter_file = "csv_with_invalid_delimiter.csv"
# demo_empty_csv = "demo_empty.csv"
# demo_txt = "demo_empty.txt"
# code_csv = 'StateCode.csv'
# obj = CSVStateCensus(file_name)
# df = obj.load_CSV
# d = df.sort_values(['State'])
# print(d)
# s = sort_ref.sort_InidaCensusData_in_asc_population_density_order_in_JSON(df)
# print(s)
# print(sorted_df)
# print(df)
# df_list = obj.iterate_df(df)
# print(df_list)
# if df.isnull().values.any():
# print("yes")
# for index in df.index:
# print(df['DensityPerSqKm'][index])
# if df['DensityPerSqKm'][index] == None:
# print("Invalid")
# print(df)
# print(df._engine.data.dialect.delimiter)
# total_records = obj.number_of_records(df)
# print(total_records)
# print(len(df))
# obj.iterate_df(df)
# df_goa = df.loc[df["State"]=="Goa"]
# print(df_goa)
# print(df_goa['Population'])
# for ind in df.index:
# print(df['State'][ind])
# s = SortData()
# d = s._SortData__sort_InidaCensusData_in_asc_population_density_order_in_JSON()
# print(d)
# for data in d:
# print(data['State'])
# m = Mapping()
# c = m._Mapping__map_state_census_with_state_code_according_to_code()
# print(c)
|
[
"kalaskars1996@gmail.com"
] |
kalaskars1996@gmail.com
|
ce93ee342b53ef4659e02efb6bf0f51b77633dac
|
eb7ef1340440c36a51b155943a3536f6e37fc6da
|
/codeonline/migrations/0015_auto_20160801_1430.py
|
b89f90a6231a391b265c768ac5a1db499a9c6628
|
[] |
no_license
|
shineforever/codeonlinesystem
|
48140d113ba36646c0d91cf7c6e7980a31cc1f41
|
d3fd33238b1a0148e3bce86a60e29fedf34b93f1
|
refs/heads/master
| 2020-06-12T13:09:45.238748
| 2016-09-12T09:36:58
| 2016-09-12T09:36:58
| 75,816,574
| 0
| 1
| null | 2016-12-07T08:42:15
| 2016-12-07T08:42:15
| null |
UTF-8
|
Python
| false
| false
| 515
|
py
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('codeonline', '0014_auto_20160801_1429'),
]
operations = [
migrations.AlterField(
model_name='onlinerequest',
name='maintenance_manager_comfirm_time',
field=models.DateTimeField(null=True, verbose_name='\u8fd0\u7ef4\u7ecf\u7406\u786e\u8ba4\u65f6\u95f4', blank=True),
),
]
|
[
"zhaobin022@qq.com"
] |
zhaobin022@qq.com
|
ec2bd921cea11a71e77592eb7d65b7d23ba058d8
|
0cdcee391e178092d7073734957075c72681f037
|
/leetcode/LeetCode-150/Linked-Lists/146-LRU-Cache.py
|
6b25ffca12654a902b028bc202ab82000ff7cde2
|
[] |
no_license
|
hrishikeshtak/Coding_Practises_Solutions
|
6b483bbf19d5365e18f4ea1134aa633ff347a1c1
|
86875d7436a78420591a60b716acd2780287b4a8
|
refs/heads/master
| 2022-10-06T18:44:56.992451
| 2022-09-25T03:29:03
| 2022-09-25T03:29:03
| 125,744,102
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,607
|
py
|
"""
146. LRU Cache
"""
class DLLNode:
def __init__(self, key, val):
self.key = key
self.val = val
self.prev = self.next = None
class LRUCache:
def __init__(self, capacity: int):
self.cache = {}
self.capacity = capacity
self.left, self.right = DLLNode(0, 0), DLLNode(0, 0)
self.left.next = self.right
self.right.prev = self.left
def insert(self, node):
# insert at right
# update left ptr
prev, nxt = self.right.prev, self.right
prev.next = nxt.prev = node
node.next = nxt
node.prev = prev
def remove(self, node):
# remove from left
prev, nxt = node.prev, node.next
prev.next = nxt
nxt.prev = prev
def get(self, key: int) -> int:
if key in self.cache:
self.remove(self.cache[key])
self.insert(self.cache[key])
return self.cache[key].val
return -1
def put(self, key: int, value: int) -> None:
if key in self.cache:
self.remove(self.cache[key])
self.cache[key] = DLLNode(key, value)
self.insert(self.cache[key])
if len(self.cache) > self.capacity:
# remove from list and delete from cache
lru = self.left.next
self.remove(lru)
del self.cache[lru.key]
# Your LRUCache object will be instantiated and called as such:
capacity = 2
obj = LRUCache(capacity)
print(f"get 1: {obj.get(1)}")
obj.put(1, 1)
obj.put(2, 2)
print(f"get 1: {obj.get(1)}")
obj.put(3, 3)
print(f"get 1: {obj.get(1)}")
|
[
"hrishikesh.tak@ness.com"
] |
hrishikesh.tak@ness.com
|
09155e31de77f9be942b8e3ed3e5fe489bea2798
|
0deefdcfc6219f20fa1ff9ff8d766baa30af993a
|
/smappdragon/tools/tweet_cleaner.py
|
5e4ae3993517fd74494508526ab58fe179285b3b
|
[
"MIT"
] |
permissive
|
yinleon/smappdragon
|
c743af04ff9b46ef1221ca1b7824c4cc6fd00a24
|
691cb66f26543e47293f38cf5658c4172d676013
|
refs/heads/master
| 2021-01-19T04:14:57.897437
| 2017-03-29T20:17:58
| 2017-03-29T20:17:58
| 87,361,256
| 1
| 0
| null | 2017-04-05T22:14:05
| 2017-04-05T22:14:05
| null |
UTF-8
|
Python
| false
| false
| 1,774
|
py
|
import os
import csv
import glob
import bson
import json
def clean_tweets(input_file_path, output_file_path, error_file_path):
json_handle = open(input_file_path, 'r', encoding='utf-8')
with open(output_file_path, 'w', encoding='utf-8') as fo:
with open(error_file_path, 'w', encoding='utf-8') as f:
for count, line in enumerate(json_handle):
try:
tweet = json.loads(line)
fo.write(json.dumps(tweet))
fo.write('\n')
except:
f.write(line)
json_handle.close()
def clean_tweets_multiple(input_file_pattern, output_file_path, error_file_path):
for path in glob.glob(os.path.expanduser(input_file_pattern)):
json_handle = open(path, 'r', encoding='utf-8')
with open(output_file_path, 'a', encoding='utf-8') as fo:
with open(error_file_path, 'a', encoding='utf-8') as f:
for count, line in enumerate(json_handle):
try:
tweet = json.loads(line)
fo.write(json.dumps(tweet))
fo.write('\n')
except:
f.write(line)
json_handle.close()
class SmappError(Exception):
pass
'''
@yvan
can be used to clean tweets in a general catch all sense
kept separate from the data sources bson_collection, etc
to keep datasource implementation simple, its also not
a core function, its really more of an extra, how you clean
your data is up to you, we jsut offer this way.
methods can get big and out of hand very quickly for cleaning
data. so im putting this here in an attempt to keep other parts
of the code from getting too crazy and unmaintainable.
'''
|
[
"yvanscher@gmail.com"
] |
yvanscher@gmail.com
|
c6ae54111ef04b17b81e9f854d4bd1972e7d553a
|
74f5a4630c708e71224af55bb3eb11a503014f6f
|
/test01/wifi.py
|
03924499a81ca6e6519bb39a0059234054c32e4c
|
[] |
no_license
|
msgpo/lopy_tests
|
e6bbc3c77d701a7303567e5c53856e9e04615b2b
|
513178b01422e46cab3cc5f39b4c65d1e5f5a408
|
refs/heads/master
| 2021-06-12T23:07:26.697394
| 2017-02-13T16:05:32
| 2017-02-13T16:05:32
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,704
|
py
|
# Wifi Configuration
#
import machine
from network import WLAN
import time
_wlan = WLAN(mode=WLAN.STA)
# connect to wifi
def connect():
global wifi_config, _wlan
nets = _wlan.scan()
for net in nets:
if net.ssid == wifi_config.name:
print('Wifi: network %s found.' % net.ssid)
_wlan.connect(net.ssid, auth=(net.sec, wifi_config.password), timeout=5000)
tries=15
for i in range(tries):
print("%d/%d. Trying to connect." %(i+1, tries))
machine.idle()
time.sleep(1)
if _wlan.isconnected(): break
break
if _wlan.isconnected():
print('Wifi: connection succeeded!')
print(_wlan.ifconfig())
else:
print('Wifi: connection failed!')
accesspoint()
def accesspoint():
global _wlan
print('Wifi: activating accesspoint.')
_wlan = WLAN(mode=WLAN.AP)
def connected():
return _wlan.isconnected()
def config():
return _wlan.ifconfig()
def delete():
import os
os.remove("wifi_config.py")
# TODO: clear internal wifi assignment
accesspoint()
def remove():
delete()
def scan():
nets = _wlan.scan()
l=[]
for n in nets:
l.append( n.ssid )
return l
# write config and connect
def setup( name, password ):
global wifi_config
f=open("wifi_config.py", "w")
f.write("name=\"%s\"\npassword=\"%s\"" % (name,password))
f.close()
wifi_config.name = name
wifi_config.password = password
connect()
# Try to find wifi_config
try:
import wifi_config
connect()
except ImportError:
class wifi_config():
pass
|
[
"devel@mail.ulno.net"
] |
devel@mail.ulno.net
|
f0277cc63056e502ced1d713fdc7bbe567df39d1
|
2157782cf5875767f8d1fe0bb07243da2e87600d
|
/test_from_myself/djangoTest/Testsite/Testsite/settings.py
|
fbac8fa33d4eb59c9d67cff359e472346d15d4b0
|
[] |
no_license
|
mouday/SomeCodeForPython
|
9bc79e40ed9ed851ac11ff6144ea080020e01fcd
|
ddf6bbd8a5bd78f90437ffa718ab7f17faf3c34b
|
refs/heads/master
| 2021-05-09T22:24:47.394175
| 2018-05-11T15:34:22
| 2018-05-11T15:34:22
| 118,750,143
| 1
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,224
|
py
|
"""
Django settings for Testsite project.
Generated by 'django-admin startproject' using Django 2.1.
For more information on this file, see
https://docs.djangoproject.com/en/dev/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/dev/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/dev/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = ')(3%s0c#_7ns&a-7-5x@-io63fuy1^ojl+a#$1$60#yj9@n599'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = ["*"]
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'blog',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'Testsite.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [os.path.join(BASE_DIR, "blog/templates")],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'Testsite.wsgi.application'
# Database
# https://docs.djangoproject.com/en/dev/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.mysql',
'NAME': 'myblog',
'USER': 'root',
'PASSWORD': '123456',
'HOST': 'localhost',
'PORT': '3306',
}
}
# Password validation
# https://docs.djangoproject.com/en/dev/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/dev/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/dev/howto/static-files/
STATIC_URL = '/static/'
|
[
"1940607002@qq.com"
] |
1940607002@qq.com
|
261d930a3beec9c0e30f08186acacbe12e3b86f9
|
22f5818e99c3593dc1c405a155ea6d7aa0c765a1
|
/backend/home/migrations/0002_load_initial_data.py
|
c6809ed748daf29a7b5fb9d036d76db99016723e
|
[] |
no_license
|
crowdbotics-apps/new-expo-app-dev-8468
|
2ecc8f7824505ab68ae538eb07aeb1969c7a206a
|
d249a27a9db5b32efb5111bd2a96d31bc54b6ac3
|
refs/heads/master
| 2022-11-28T01:23:00.770574
| 2020-08-04T16:56:17
| 2020-08-04T16:56:17
| 285,042,210
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,305
|
py
|
from django.db import migrations
def create_customtext(apps, schema_editor):
CustomText = apps.get_model("home", "CustomText")
customtext_title = "New expo app"
CustomText.objects.create(title=customtext_title)
def create_homepage(apps, schema_editor):
HomePage = apps.get_model("home", "HomePage")
homepage_body = """
<h1 class="display-4 text-center">New expo app</h1>
<p class="lead">
This is the sample application created and deployed from the Crowdbotics app.
You can view list of packages selected for this application below.
</p>"""
HomePage.objects.create(body=homepage_body)
def create_site(apps, schema_editor):
Site = apps.get_model("sites", "Site")
custom_domain = "new-expo-app-dev-8468.botics.co"
site_params = {
"name": "New expo app",
}
if custom_domain:
site_params["domain"] = custom_domain
Site.objects.update_or_create(defaults=site_params, id=1)
class Migration(migrations.Migration):
dependencies = [
("home", "0001_initial"),
("sites", "0002_alter_domain_unique"),
]
operations = [
migrations.RunPython(create_customtext),
migrations.RunPython(create_homepage),
migrations.RunPython(create_site),
]
|
[
"team@crowdbotics.com"
] |
team@crowdbotics.com
|
c58fe73f582a1e2ec908eb34aa02357ebd5bb5c3
|
acff427a36d6340486ff747ae9e52f05a4b027f2
|
/main/x11/misc/xdm/actions.py
|
02b788d616e653a2c5eb2ed167bd24162614505b
|
[] |
no_license
|
jeremie1112/pisilinux
|
8f5a03212de0c1b2453132dd879d8c1556bb4ff7
|
d0643b537d78208174a4eeb5effeb9cb63c2ef4f
|
refs/heads/master
| 2020-03-31T10:12:21.253540
| 2018-10-08T18:53:50
| 2018-10-08T18:53:50
| 152,126,584
| 2
| 1
| null | 2018-10-08T18:24:17
| 2018-10-08T18:24:17
| null |
UTF-8
|
Python
| false
| false
| 1,282
|
py
|
# -*- coding: utf-8 -*-
#
# Licensed under the GNU General Public License, version 3.
# See the file http://www.gnu.org/copyleft/gpl.txt.
from pisi.actionsapi import get
from pisi.actionsapi import autotools
from pisi.actionsapi import pisitools
def setup():
autotools.autoreconf("-vif")
autotools.configure("--disable-static \
--enable-unix-transport \
--enable-tcp-transport \
--enable-local-transport \
--enable-secure-rpc \
--enable-xpm-logos \
--enable-xdm-auth \
--with-pam \
--with-libaudit \
--with-xdmconfigdir=/etc/X11/xdm \
--with-default-vt=vt7 \
--with-config-type=ws \
--with-xft \
--with-pixmapdir=/usr/share/X11/xdm/pixmaps \
")
pisitools.dosed("libtool", " -shared ", " -Wl,-O1,--as-needed -shared ")
def build():
autotools.make()
def install():
autotools.rawInstall("DESTDIR=%s" % get.installDIR())
pisitools.dodir("/var/lib/xdm")
pisitools.dodoc("AUTHORS", "COPYING", "README")
|
[
"erkanisik@yahoo.com"
] |
erkanisik@yahoo.com
|
6b9b1ca6b331ddb6677f82ecb8c974dfb1bc9620
|
974dc8113a265ebe0d54b818333be78f000d293f
|
/google-api-client-generator/src/googleapis/codegen/import_definition.py
|
03b506f51f213eaaa0605a157fef431809c0d777
|
[
"Apache-2.0"
] |
permissive
|
googleapis/discovery-artifact-manager
|
9e0948600ec4c2f05a889d7f157c1eaec12ea6b4
|
19f268e0b7935ea3d87d0d124c7791efb5a78646
|
refs/heads/master
| 2023-08-30T02:49:49.476556
| 2023-08-29T18:24:14
| 2023-08-29T18:24:14
| 72,768,841
| 43
| 53
|
Apache-2.0
| 2023-09-14T21:56:14
| 2016-11-03T17:17:02
|
Java
|
UTF-8
|
Python
| false
| false
| 921
|
py
|
#!/usr/bin/python2.7
# Copyright 2011 Google Inc. All Rights Reserved.
"""Contains information necessary to define an import."""
__author__ = 'rmistry@google.com (Ravi Mistry)'
class ImportDefinition(object):
"""Contains all required information about an import.
Intended for use in the type_format_to_datatype_and_imports dictionaries.
"""
def __init__(self, imports=None, template_values=None):
"""Construct a definition of an import.
Args:
imports: (sequence of str) Contains all imports required by a data type.
template_values: (sequence of str) Contains all required additional
template values that are required to be set to handle the imports.
"""
self._imports = imports or []
self._template_values = template_values or []
@property
def imports(self):
return self._imports
@property
def template_values(self):
return self._template_values
|
[
"noreply@github.com"
] |
googleapis.noreply@github.com
|
5fee350412b5d59a8441bdc2cc29a017d518de6b
|
9657d72ca77081e699c472241f4d565e03cda32e
|
/fable/fable_sources/libtbx/command_line/sge_available_slots.py
|
5354cace06085aee47e03bc6afadc19bbbb26a54
|
[
"BSD-3-Clause-LBNL",
"MIT"
] |
permissive
|
hickerson/bbn
|
b37a4a64a004982dd219a3bd92e519b62f4ea2b3
|
17ef63ad1717553ab2abb50592f8de79228c8523
|
refs/heads/master
| 2021-01-17T09:50:23.863448
| 2018-08-12T19:04:58
| 2018-08-12T19:04:58
| 20,693,028
| 4
| 3
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 702
|
py
|
from __future__ import division
def run(args):
assert len(args) == 0
from libtbx import easy_run
qstat_buffer = easy_run.fully_buffered(command="qstat -g c")
el = qstat_buffer.stderr_lines
ol = qstat_buffer.stdout_lines
if (len(el) != 0):
print -1
elif (len(ol) < 3):
print -2
elif ( " ".join(ol[0].split())
!= "CLUSTER QUEUE CQLOAD USED AVAIL TOTAL aoACDS cdsuE"):
print -3
elif (not ol[1].startswith("----------")):
print -4
else:
sum_available = 0
for line in ol[2:]:
flds = line.split()
assert len(flds) == 7
sum_available += int(flds[3])
print sum_available
if (__name__ == "__main__"):
import sys
run(sys.argv[1:])
|
[
"hickerson@gmail.com"
] |
hickerson@gmail.com
|
225125f179e96196d2131b86cc89a97751d4cf96
|
ca7aa979e7059467e158830b76673f5b77a0f5a3
|
/Python_codes/p02848/s598772866.py
|
282413bef5247516e1db318c8edad90021b349bc
|
[] |
no_license
|
Aasthaengg/IBMdataset
|
7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901
|
f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8
|
refs/heads/main
| 2023-04-22T10:22:44.763102
| 2021-05-13T17:27:22
| 2021-05-13T17:27:22
| 367,112,348
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 233
|
py
|
N = int(input())
S = input()
chars = "abcdefghijklmnopqrstuvwxyz" + "abcdefghijklmnopqrstuvwxyz"
chars = chars.upper()
dic = {chars[i]: chars[i+N] for i in range(26)}
ans = ""
for i in range(len(S)):
ans += dic[S[i]]
print(ans)
|
[
"66529651+Aastha2104@users.noreply.github.com"
] |
66529651+Aastha2104@users.noreply.github.com
|
c7530d80f8eab8e9d0c69ff3835ccebf526d4d34
|
5bab92faf12fe329b78743b877f0c154da6b0498
|
/aula_1/ex_5.py
|
4090c1a596e7660bab8bf3d2918b0d041410ca89
|
[] |
no_license
|
elrrocha/python-521
|
943bd4ab23c7bb4bae2e9653085c93bf43b73256
|
8837e5f911bb8b0e389bb45f5d1684e23648aebe
|
refs/heads/master
| 2020-06-27T22:28:01.979671
| 2019-08-01T20:15:20
| 2019-08-01T20:15:20
| 200,068,830
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 502
|
py
|
import requests
DOMAIN_URL = 'https://gen-net.herokuapp.com/api/users/{}'
response = requests.put(DOMAIN_URL)
user_id = input ('Digite seu id: ')
name = input('Digite seu nome: ')
email = input('Digite seu email: ')
password = ('Digite sua senha: ')
payload = {
'name': name,
'email': email,
'password': password
}
response = requests.put(DOMAIN_URL.format(user_id), payload)
if response.status_code ==200:
print('Usuário atualizado com sucesso')
else:
print('Erro ao atualizar o usuário')
|
[
"leonardo.mendes@4linux.com.br"
] |
leonardo.mendes@4linux.com.br
|
5343e91b21a1d17b86b3d2134e35ceb2a8c7d7b9
|
fb63c25920d15f9b48b8e9e604ffbbde99a7aeae
|
/svmrfe/run_sgdrfe.py
|
c60a611d3bd8fcb5299bfd8e0ea8fa28aa157d0c
|
[] |
no_license
|
kellyhennigan/cueexp_scripts
|
763385c96772867a4875c734732bb32a3b051d1c
|
7264b0033a9cc83cc362c96cd04d94706728c994
|
refs/heads/master
| 2023-07-21T20:35:11.607859
| 2022-01-10T20:18:12
| 2022-01-10T20:18:12
| 55,538,490
| 1
| 1
| null | 2023-07-06T21:16:20
| 2016-04-05T19:17:21
|
Python
|
UTF-8
|
Python
| false
| false
| 2,453
|
py
|
"""
Script to run svmrfe on a given subject with a number of different parameters.
usage: python run_sgdrfe.py [test subject id]
"""
import os
import sys
from sgdrfe import SGDRFE
######################### USER PARAMS #################################
SUBJECT_BASE_PATH = '/scratch/PI/knutson/cuesvm/' #where we find subj folder
SUBJECT_FILE = '/scratch/PI/knutson/cuesvm/cue_patients_subjects.txt'
NIFTII = 'pp_cue_tlrc_afni.nii.gz'
BEHAVIORAL = 'drugs_vs_neutral_trial_onsets.1D'
NIFTII_OUT_NAME = 'cue_drug_trial_vs_neutral.nii.gz'
TRS = [1, 2, 3, 4]
LAG = 2 # so really we're looking at trs 2+trs = [3, 4] of every trial (1-indexed)
CLASSIFIERS = ['linearsvc', 'elasticnet']
CUT = .05 # throw out the bottom cut % of features every iteration
STOP_THRESHOLD = .025 # stop at this % of features out of what we start with
TEST = False
######################################################################
class Subject(object):
def __init__(self, name):
self.name = name
self.path = os.path.join(SUBJECT_BASE_PATH, name)
def file_path(self, filename):
return os.path.join(self.path, filename)
def has_file(self, filename):
return os.path.exists(self.file_path(filename))
class Project(object):
def __init__(self, subs):
self.subjects = [Subject(x) for x in subs]
if __name__=="__main__":
if not TEST:
try:
test_subject = sys.argv[1]
except IndexError:
test_subject = None
with open(SUBJECT_FILE, 'r') as f:
subjects = [x for x in f.read().split('\n') if len(x) == 8]
if not TEST and test_subject not in subjects:
print("No test subject found, using all subjects...")
test_subject = None
if TEST:
test_subject = subjects[2]
subjects = subjects[:3]
for clf in CLASSIFIERS:
for cval in [100.]:
#[.0001,.001,.01,.1,1.,10.,100.,1000.]:
project = Project(subjects)
rfe = SGDRFE(project, NIFTII, BEHAVIORAL, TRS,
test_subj=test_subject, lag=LAG, clftype=clf, cut=CUT,
C=cval, stop_threshold=STOP_THRESHOLD)
rfe.run()
test_sub_name = test_subject if test_subject is not None else 'all_subjects'
niftii_name = '_'.join([test_sub_name, str(cval), clf, NIFTII_OUT_NAME ])
rfe.save_nii(savename=niftii_name)
if TEST:
break
|
[
"kelhennigan@gmail.com"
] |
kelhennigan@gmail.com
|
15c33705404a528dcb3a06706e16d33f5ccdb127
|
bcd8f5a7352a1e0b2e1a83fea9ae2e4182eda805
|
/algobook/Py2Adt/src/ch6/ListNode3.py
|
40ee447a973587684ac20f5f8322cc147d3772f7
|
[] |
no_license
|
JediChou/jedichou-study-algo
|
455c7709250d424cad1b326efd99f0be7a000edc
|
4f0ce79556d7543870dfc13399c7ae85ba773508
|
refs/heads/master
| 2020-06-24T23:21:34.310122
| 2020-04-01T05:53:33
| 2020-04-01T05:53:33
| 199,122,658
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 554
|
py
|
class ListNode:
def __init__(self, data):
self.data = data
self.next = None
def iterateSingleList(node):
print(node.data)
if node.next is not None:
iterateSingleList(node.next)
if __name__ == "__main__":
# Define node.
n1 = ListNode(78)
n2 = ListNode(83)
n3 = ListNode(6)
n4 = ListNode(73)
n5 = ListNode(68)
# Create linked list.
n1.next = n2
n2.next = n3
n3.next = n4
n4.next = n5
# Iterate single linked list.
iterateSingleList(n1)
|
[
"jedigit@163.com"
] |
jedigit@163.com
|
c91fa793eaaf0fd3f563d34be77f1ff4359be204
|
fd4510e0bf959de7527bd0c62d3b4fb3f78cee5e
|
/detection/CD1/std.py
|
3be0d5c0f1b57f11db53818081eabe2ad0add0f4
|
[] |
no_license
|
RuoAndo/nii-cyber-security-admin
|
8dde8ab68b0f7fa882adbe8e828546aa1739e685
|
e77b9d581e124f9fd5f721e18cd77d3bccecad19
|
refs/heads/master
| 2022-12-13T21:40:46.330389
| 2022-12-07T14:01:00
| 2022-12-07T14:01:00
| 71,614,880
| 5
| 1
| null | 2020-10-13T08:40:46
| 2016-10-22T03:41:30
|
Python
|
UTF-8
|
Python
| false
| false
| 897
|
py
|
import numpy as np
import sys
argvs = sys.argv
argc = len(argvs)
f = open(argvs[3])
line = f.readline()
instid = []
instname = []
while line:
tmp = line.split("\t")
instid.append(tmp[0])
instname.append(tmp[1])
line = f.readline()
f.close()
f = open(argvs[1])
line = f.readline()
l = np.array([])
while line:
tmp = line.split(",")
l = np.append( l, float(tmp[1]) )
line = f.readline()
def main():
std = np.std(l)
lr = l / np.linalg.norm(l)
#print(lr)
tmp = str(argvs[2]).split("-")
counter = 0
for i in instid:
if int(i) == int(tmp[1]):
uname = instname[counter]
counter = counter + 1
counter = 0
for i in lr:
print(str(counter)+","+str(i))+","+str(tmp[1])+","+uname.strip()
counter = counter + 1
if __name__ == '__main__':
main()
|
[
"ando.ruo@gmail.com"
] |
ando.ruo@gmail.com
|
c30fa16878570e0c12a069b540ddadb291b6b66a
|
3012e5a0f34dd54fbac568c70506826973192ce1
|
/pylib/points.py
|
23464ed8064625df58f47a9559f4b6586991896f
|
[] |
no_license
|
metatab-packages/civicknowledge.com-osm-demosearch
|
89999227bda7bae91259c10bd651f220ae35c52f
|
d4ecb7775662a50413c848c3ae5a901b147ef532
|
refs/heads/master
| 2023-05-14T12:39:25.328559
| 2021-06-08T13:52:39
| 2021-06-08T13:52:39
| 334,572,037
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,987
|
py
|
"""
"""
from itertools import chain
import geopandas as gpd
import libgeohash as gh
import numpy as np
import pandas as pd
import shapely
from shapely.geometry import Point
from shapely.wkt import loads as loads_wkt
from tqdm import tqdm
tqdm.pandas()
from pathlib import Path
from demosearch.util import run_mp
from .lines import open_cache
import logging
from metapack.appurl import SearchUrl
SearchUrl.initialize() # This makes the 'index:" urls work
points_logger = logging.getLogger(__name__)
extract_tag_names = ['amenity', 'tourism', 'shop', 'leisure', 'natural', 'parking']
def extract_tags(df, extract_tags):
from sqlalchemy.dialects.postgresql import HSTORE
h = HSTORE()
f = h.result_processor(None, None)
# Prune the dataset to just the records that have the tags we want.
# before getting to the more expensive operation of extracting the tags.
# This should reduce the dataset from 24M rows to less than 6M.
t = df.dropna(subset=['other_tags'])
t = t[t.highway.isnull()]
flags = [t.other_tags.str.contains(e) for e in extract_tags]
comb_flags = [any(e) for e in list(zip(*flags))]
t = t[comb_flags]
rows = []
errors = []
for idx, r in t.set_index('osm_id')[['other_tags']].iterrows():
try:
d = f(r.other_tags)
rows.append([idx] + [d.get(e) for e in extract_tags])
except TypeError as e:
errors.append(r, e)
return (rows, errors)
def make_tags_df(pkg):
"""Create the tags dataframe"""
cache = open_cache(pkg)
points_logger.info('Make tags dataframe')
try:
tags_df = cache.get_df('points/tags_df')
except KeyError:
points_df = pkg.reference('points').read_csv(low_memory=False)
# Split the file and extract tags in multiprocessing
N_task = 200
tasks = [(e, extract_tag_names) for e in np.array_split(points_df, N_task)]
results = run_mp(extract_tags, tasks, 'Split OSM other_tags')
tags = list(chain(*[e[0] for e in results]))
errors = list(chain(*[e[1] for e in results]))
tags_df = pd.DataFrame(tags, columns=['osm_id'] + extract_tag_names)
# 1/2 the entries, 2.7M are trees and rocks
tags_df = tags_df[~tags_df.natural.isin(['tree', 'rock'])]
tags_df = pd.merge(tags_df, points_df[['osm_id', 'geometry']], on='osm_id')
def encode(v):
return gh.encode(*list(map(float, v[7:-1].split()))[::-1])
tags_df['geohash'] = tags_df.geometry.progress_apply(encode)
tags_df['geometry'] = tags_df.geometry.progress_apply(shapely.wkt.loads)
tags_df = gpd.GeoDataFrame(tags_df, geometry='geometry', crs=4326)
cache.put_df('points/tags_df', tags_df)
return tags_df
def extract_class_columns(tags_df):
tags_df['class'] = tags_df.loc[:, ('amenity', 'tourism', 'shop', 'leisure', 'natural', 'parking')].fillna(
method='ffill', axis=1).fillna(method='bfill', axis=1).iloc[:, 0]
replace = {'parking': 'parking_space',
'pub': 'bar',
}
cls = ['restaurant', 'bar', 'cafe', 'fast_food', 'supermarket', 'grave_yard', 'playground',
'bicycle_parking', 'park', 'fuel', 'bank', 'hotel', 'fitness_centre',
'laundry', 'clothes', 'convenience', 'parking', 'parking_space']
t = tags_df[['geohash', 'class']].replace(replace)
t = t[t['class'].isin(cls)]
cls_df = t.groupby([t.geohash.str.slice(0, 8), 'class']).count().unstack().fillna(0).droplevel(0, axis=1)
return cls_df
def make_geotags_df(pkg, tags_df, cls_df):
# At 8 digits, geohashes are, on average 4m by 20M over the US
# At 6, 146m x 610m
# At 4, 4Km x 20Km
# Clip to 8 because it's really unlikely that there are actually more than 10
# amenities in a cell.
pkg_root = Path(pkg.path).parent
f = pkg_root.joinpath('data', 'point_tags.csv')
if f.exists():
points_logger.info(f'Geotags dataframe {f} already exists')
return
points_logger.info('Make geotags dataframe')
group_counts = tags_df.groupby(tags_df.geohash.str.slice(0, 8)) \
[['amenity', 'tourism', 'shop', 'leisure', 'natural', 'parking']].count().clip(0, 10)
t = group_counts.join(cls_df, how='outer').fillna(0).astype(int)
t['geometry'] = [Point(gh.decode(e)[::-1]) for e in t.index]
t = gpd.GeoDataFrame(t, geometry='geometry', crs=4326).reset_index()
cbsa = pkg.reference('cbsa').geoframe().to_crs(4326)
geohash_tags = gpd.sjoin(t, cbsa[['geometry', 'geoid']], how='left')
cols = ['geohash', 'geoid'] + list(geohash_tags.loc[:, 'amenity':'supermarket'].columns) + ['geometry']
geohash_tags = geohash_tags[cols]
geohash_tags.to_csv(f, index=False)
return geohash_tags
def build_points(pkg):
tags_df = make_tags_df(pkg)
points_logger.info('Extract class Columns')
cls_df = extract_class_columns(tags_df)
make_geotags_df(pkg, tags_df, cls_df)
|
[
"eric@civicknowledge.com"
] |
eric@civicknowledge.com
|
613776148909d6748ac5dcc5401258ee19f74b31
|
4519b4b24f3907da1dde513f72d432fd9b4391f4
|
/crds/jwst/specs/niriss_gain.spec
|
00ecc62e7c0cb6112bf6dbeadffff39454d1d588
|
[
"BSD-2-Clause"
] |
permissive
|
spacetelescope/crds
|
0bd712b7c7c6864c274987e7ba94a051e19d1e48
|
08da10721c0e979877dc9579b4092c79f4ceee27
|
refs/heads/master
| 2023-07-23T17:07:33.889579
| 2023-06-29T20:04:56
| 2023-06-29T20:04:56
| 52,045,957
| 9
| 29
|
NOASSERTION
| 2023-09-14T17:42:28
| 2016-02-18T23:15:38
|
Python
|
UTF-8
|
Python
| false
| false
| 468
|
spec
|
{
'derived_from' : 'cloning tool 0.05b (2013-04-12) used on 2013-10-04',
'file_ext' : '.fits',
'filekind' : 'GAIN',
'filetype' : 'GAIN',
'instrument' : 'NIRISS',
'mapping' : 'REFERENCE',
'name' : 'jwst_niriss_gain_0000.rmap',
'observatory' : 'JWST',
'parkey' : (('META.INSTRUMENT.DETECTOR', 'META.INSTRUMENT.FILTER'),),
'sha1sum' : 'fee4fbd0950196f5211a6badfb7b51067489072b',
'suffix' : 'gain',
'text_descr' : 'Gain',
}
|
[
"jmiller@stsci.edu@stsci.edu"
] |
jmiller@stsci.edu@stsci.edu
|
a150e33292f2b4df7313dc18e92ceae92ca16a81
|
803bab6f782099d995bcdb99d163486f4fff8c50
|
/habitat-lab/habitat/sims/habitat_simulator/actions.py
|
6252636c3399b2da8795019b46dbe220986fa63d
|
[
"MIT",
"LicenseRef-scancode-unknown-license-reference",
"CC-BY-NC-SA-3.0"
] |
permissive
|
facebookresearch/habitat-lab
|
7088506509f64da6d682f5dc69427589f71a58a9
|
f5b29e62df0788d70ba3618fc738fa4e947428ba
|
refs/heads/main
| 2023-08-24T14:00:02.707343
| 2023-08-23T04:53:48
| 2023-08-23T04:53:48
| 169,164,391
| 792
| 298
|
MIT
| 2023-09-14T15:20:03
| 2019-02-04T23:12:51
|
Python
|
UTF-8
|
Python
| false
| false
| 2,517
|
py
|
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and its affiliates.
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from enum import Enum
from typing import Dict
import attr
from habitat.core.utils import Singleton
class _DefaultHabitatSimActions(Enum):
stop = 0
move_forward = 1
turn_left = 2
turn_right = 3
look_up = 4
look_down = 5
@attr.s(auto_attribs=True, slots=True)
class HabitatSimActionsSingleton(metaclass=Singleton):
r"""Implements an extendable Enum for the mapping of action names
to their integer values.
This means that new action names can be added, but old action names cannot
be removed nor can their mapping be altered. This also ensures that all
actions are always contigously mapped in :py:`[0, len(HabitatSimActions) - 1]`
This accesible as the global singleton :ref:`HabitatSimActions`
"""
_known_actions: Dict[str, int] = attr.ib(init=False, factory=dict)
def __attrs_post_init__(self):
for action in _DefaultHabitatSimActions:
self._known_actions[action.name] = action.value
def extend_action_space(self, name: str) -> int:
r"""Extends the action space to accommodate a new action with
the name :p:`name`
:param name: The name of the new action
:return: The number the action is registered on
Usage:
.. code:: py
from habitat.sims.habitat_simulator.actions import HabitatSimActions
HabitatSimActions.extend_action_space("MY_ACTION")
print(HabitatSimActions.MY_ACTION)
"""
assert (
name not in self._known_actions
), "Cannot register an action name twice"
self._known_actions[name] = len(self._known_actions)
return self._known_actions[name]
def has_action(self, name: str) -> bool:
r"""Checks to see if action :p:`name` is already register
:param name: The name to check
:return: Whether or not :p:`name` already exists
"""
return name in self._known_actions
def __getattr__(self, name):
return self._known_actions[name]
def __getitem__(self, name):
return self._known_actions[name]
def __len__(self):
return len(self._known_actions)
def __iter__(self):
return iter(self._known_actions)
HabitatSimActions: HabitatSimActionsSingleton = HabitatSimActionsSingleton()
|
[
"noreply@github.com"
] |
facebookresearch.noreply@github.com
|
90bee35d3c81017c7b8558351920c8d208c8605e
|
ed843fd5c2f6693e3ee682cf579e49dbd9957375
|
/savu/test/data_test.py
|
dd5a1001c6042ce98a794f9095a4d42a5158e006
|
[
"CC-BY-2.0",
"Apache-2.0"
] |
permissive
|
mjn19172/Savu
|
98707cd0f89b1b6626341b67211f7890d969ac97
|
b9b456928387eaf81d4d0f314394f6d337bbb90b
|
refs/heads/master
| 2020-12-30T22:56:49.605584
| 2015-09-18T12:32:07
| 2015-09-18T12:32:07
| 36,367,826
| 0
| 0
| null | 2015-09-18T12:32:08
| 2015-05-27T13:16:27
|
Python
|
UTF-8
|
Python
| false
| false
| 1,284
|
py
|
# Copyright 2014 Diamond Light Source Ltd.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
.. module:: data_test
:platform: Unix
:synopsis: unittest test class data structures
.. moduleauthor:: Mark Basham <scientificsoftware@diamond.ac.uk>
"""
import unittest
import savu.test.test_utils as tu
import savu.plugins.utils as pu
class Test(unittest.TestCase):
@unittest.skip("This test should be updated to use the new setup system")
def test_create_smaller_data_block(self):
data = tu.get_nx_tomo_test_data()
plugin = pu.load_plugin("savu.plugins.downsample_filter")
output = tu.get_appropriate_output_data(plugin, data)[0]
self.assertEqual(output.get_data_shape(), (111, 68, 80))
if __name__ == "__main__":
unittest.main()
|
[
"mark.basham@diamond.ac.uk"
] |
mark.basham@diamond.ac.uk
|
c35da0bd6fcac14b0c3221c0d46ff93bb1b5cb86
|
c7262339c9436af9e71f9a9beb3408d92016d04c
|
/CVServer/basic_view.py
|
47bb4e4248a0cf4279f1256a08171ec062db6030
|
[] |
no_license
|
Zachary4biz/dm_server
|
3edbe8ee2072c53dabfb36036565fd9b700ff926
|
8172fae7ae8c5146bb7bbe61defefdabc34f256c
|
refs/heads/master
| 2022-08-23T07:46:51.363195
| 2020-05-25T04:01:10
| 2020-05-25T04:01:10
| 195,803,576
| 2
| 1
| null | 2022-06-21T23:03:40
| 2019-07-08T12:02:43
|
Python
|
UTF-8
|
Python
| false
| false
| 788
|
py
|
# encoding=utf-8
from django.http import HttpResponse
# 返回渲染过的html页面
from django.shortcuts import render
outside_value="abc"
def hello(request):
context = {}
context['param1'] = outside_value
context['section'] = request.GET['section'] if 'section' in request.GET else ''
return render(request, 'basic_view.html', context)
def hello_post(request):
sec = request.POST['section'] if 'section' in request.POST else ''
return render(request, 'basic_view.html', {"param1":"First Param","section":sec})
import json
def test(request):
params = request.GET
if 'img_url' in params and 'id' in params:
json_str = json.dumps({"img_url":params["img_url"], "id":params["id"]})
return HttpResponse(json_str, status=200, content_type="application/json,charset=utf-8")
|
[
"zhoutong@apusapps.com"
] |
zhoutong@apusapps.com
|
9ef8b78b6ef1f97f7c36da6e1bf048580bb6b4ee
|
28def0cefc61b38723a393ea76610e7a3fbc27c0
|
/LipSDP/examples/mnist_example.py
|
5179207e908a8e2c9bf11ee432fb85f414d3a6de
|
[
"MIT"
] |
permissive
|
arobey1/LipSDP
|
1d9d1645cf9b11680113a65979669e0fd90a15b3
|
bf01cafe97195330f4eb516d7b3a57247b31c5b3
|
refs/heads/master
| 2022-03-16T06:54:46.580493
| 2022-03-07T15:44:14
| 2022-03-07T15:44:14
| 217,307,826
| 49
| 16
|
MIT
| 2019-12-22T04:21:15
| 2019-10-24T13:36:41
|
MATLAB
|
UTF-8
|
Python
| false
| false
| 5,145
|
py
|
import torch
import torch.nn as nn
from torchvision import datasets, transforms
import torch.optim as optim
from torchsummary import summary
from MNIST_Net import Network
from scipy.io import savemat
import numpy as np
import os
INPUT_SIZE = 784
OUTPUT_SIZE = 10
BATCH_SIZE = 100
NUM_EPOCHS = 10
LEARNING_RATE = 1e-3
def main():
train_loader, test_loader = create_data_loaders()
fname = os.path.join(os.getcwd(), 'saved_weights/mnist_weights.mat')
# define neural network model and print summary
net_dims = [INPUT_SIZE, 50, OUTPUT_SIZE]
model = Network(net_dims, activation=nn.ReLU).net
summary(model, (1, INPUT_SIZE))
# train model
accuracy = train_network(model, train_loader, test_loader)
# save data to saved_weights/ directory
weights = extract_weights(model)
data = {'weights': np.array(weights, dtype=np.object)}
savemat(fname, data)
def extract_weights(net):
"""Extract weights of trained neural network model
params:
* net: torch.nn instance - trained neural network model
returns:
* weights: list of arrays - weights of neural network
"""
weights = []
for param_tensor in net.state_dict():
tensor = net.state_dict()[param_tensor].detach().numpy().astype(np.float64)
if 'weight' in param_tensor:
weights.append(tensor)
return weights
def train_network(model, train_loader, test_loader):
"""Train a neural network with Adam optimizer
params:
* model: torch.nn instance - neural network model
* train_loader: DataLoader instance - train dataset loader
* test_loader: DataLoader instance - test dataset loader
returns:
* accuracy: float - accuracy of trained neural network
"""
criterion = nn.CrossEntropyLoss()
optimizer = optim.Adam(model.parameters(), lr=LEARNING_RATE)
for epoch_num in range(1, NUM_EPOCHS + 1):
train_model(model, train_loader, optimizer, criterion, epoch_num)
accurary = test_model(model, test_loader)
return accurary
def create_data_loaders():
"""Create DataLoader instances for training and testing neural networks
returns:
* train_loader: DataLoader instance - loader for training set
* test_loader: DataLoader instance - loader for test set
"""
train_set = datasets.MNIST('/tmp', train=True, download=True, transform=transforms.ToTensor())
train_loader = torch.utils.data.DataLoader(train_set, batch_size=BATCH_SIZE, shuffle=False, num_workers=2)
test_set = datasets.MNIST('/tmp', train=False, download=True, transform=transforms.ToTensor())
test_loader = torch.utils.data.DataLoader(test_set, batch_size=BATCH_SIZE, shuffle=False, num_workers=2)
return train_loader, test_loader
def train_model(model, train_loader, optimizer, criterion, epoch_num, log_interval=200):
"""Train neural network model with Adam optimizer for a single epoch
params:
* model: nn.Sequential instance - NN model to be tested
* train_loader: DataLoader instance - Training data for NN
* optimizer: torch.optim instance - Optimizer for NN
* criterion: torch.nn.CrossEntropyLoss instance - Loss function
* epoch_num: int - Number of current epoch
* log_interval: int - interval to print output
modifies:
weights of neural network model instance
"""
model.train() # Set model to training mode
for batch_idx, (data, target) in enumerate(train_loader):
data = data.view(BATCH_SIZE, -1)
optimizer.zero_grad() # Zero gradient buffers
output = model(data) # Pass data through the network
loss = criterion(output, target) # Calculate loss
loss.backward() # Backpropagate
optimizer.step() # Update weights
if batch_idx % log_interval == 0:
print('Train Epoch: {} [{}/{} ({:.0f}%)]\tCross-Entropy Loss: {:.6f}'.format(
epoch_num, batch_idx * len(data), len(train_loader.dataset),
100. * batch_idx / len(train_loader), loss.data.item()))
def test_model(model, test_loader):
"""Test neural network model using argmax classification
params:
* model: nn.Sequential instance - torch NN model to be tested
* test_loader: - Test data for NN
returns:
* test_accuracy: float - testing classification accuracy
"""
model.eval()
total, correct = 0, 0
with torch.no_grad():
for data, labels in test_loader:
data = data.view(BATCH_SIZE, -1)
output = model(data)
_, predicted = torch.max(output.data, 1)
total += labels.size(0) # Increment the total count
correct += (predicted == labels).sum() # Increment the correct count
test_accuracy = 100 * correct.numpy() / float(total)
print('Test Accuracy: %.3f %%\n' % test_accuracy)
return test_accuracy
if __name__ == '__main__':
main()
|
[
"arobey1@seas.upenn.edu"
] |
arobey1@seas.upenn.edu
|
aa24bd9aa8388674cc963220cfe43d39a2a3bb60
|
d78ed7a8a1eef3e386d0ac88b9a96647d004e540
|
/pitchmyob/apps/candidacy/api/serializers.py
|
cbe8d5b1d97c309abb62bf5ff81cdc1beee77930
|
[] |
no_license
|
yannistannier/django-pitchmyjob
|
d4cdcb39da72c28dc2867af4e3271a5d44fcf053
|
7b78feac01bb4b8b2ad8da8e82323f22fba478c4
|
refs/heads/master
| 2021-05-09T01:48:29.967322
| 2018-01-27T17:36:44
| 2018-01-27T17:36:44
| 119,186,768
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,142
|
py
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals, absolute_import
from rest_framework import serializers
from django.utils import timezone
from django.utils.translation import ugettext as _
from apps.applicant.api.serializers import ApplicantFullSerializer
from apps.authentication.api.serializers import UserSerializer
from apps.job.api.serializers import JobFullSerializer, ValidateJobSerializer
from ..models import Candidacy, CandidacyComment
class CandidacyProReadSerializer(serializers.ModelSerializer):
applicant = ApplicantFullSerializer()
class Meta:
model = Candidacy
fields = ('id', 'applicant', 'status', 'date_matching', 'date_like', 'date_request', 'date_video',
'date_decision')
class CandidacyProResumeSerializer(serializers.ModelSerializer):
class Meta:
model = Candidacy
fields = ('id', 'job', 'applicant', 'status')
class CandidacyProRequestSerializer(ValidateJobSerializer, serializers.ModelSerializer):
class Meta:
model = Candidacy
fields = ('id', 'job', 'applicant', 'status')
read_only_fields = ('id', 'status',)
def get_validated_data(self, validated_data):
validated_data.update({
'collaborator': self.context.get('request').user,
'status': Candidacy.REQUEST,
'date_request': timezone.now(),
})
return validated_data
def create(self, validated_data):
return super(CandidacyProRequestSerializer, self).create(self.get_validated_data(validated_data))
def update(self, instance, validated_data):
return super(CandidacyProRequestSerializer, self).update(instance, self.get_validated_data(validated_data))
class CandidacyProActionSerializer(serializers.ModelSerializer):
class Meta:
model = Candidacy
fields = ('id', 'job', 'applicant', 'status')
read_only_fields = ('job', 'applicant', 'status')
def update(self, instance, validated_data):
return super(CandidacyProActionSerializer, self).update(instance, {
'status': self.status_value,
'date_decision': timezone.now()
})
class CandidacyProApproveSerializer(CandidacyProActionSerializer):
status_value = Candidacy.SELECTED
class CandidacyProDisapproveSerializer(CandidacyProActionSerializer):
status_value = Candidacy.NOT_SELECTED
class CandidacyApplicantReadSerializer(serializers.ModelSerializer):
job = JobFullSerializer()
class Meta:
model = Candidacy
fields = ('id', 'job', 'status', 'date_matching', 'date_like', 'date_request', 'date_video', 'date_decision',
'matching_score')
class CandidacyApplicantActionSerializer(CandidacyProActionSerializer):
class Meta:
model = Candidacy
fields = ('job', 'applicant', 'status')
read_only_fields = ('job', 'applicant', 'status')
def update(self, instance, validated_data):
return super(CandidacyApplicantActionSerializer, self).update(instance, {
'status': self.status_value,
self.date_field: timezone.now()
})
class CandidacyApplicantLikeSerializer(CandidacyApplicantActionSerializer):
status_value = Candidacy.LIKE
date_field = 'date_like'
class CandidacyApplicantVideoSerializer(CandidacyApplicantActionSerializer):
status_value = Candidacy.VIDEO
date_field = 'date_video'
class CandidacyProCommentSerializer(serializers.ModelSerializer):
collaborator = serializers.PrimaryKeyRelatedField(read_only=True, default=serializers.CurrentUserDefault())
collaborator_extra = UserSerializer(source='collaborator', read_only=True)
class Meta:
model = CandidacyComment
fields = ('id', 'candidacy', 'collaborator', 'collaborator_extra', 'message', 'created')
read_only_fields = ('id',)
def validate_candidacy(self, value):
request = self.context.get('request')
if value.job.pro != request.user.pro:
raise serializers.ValidationError(_('La candidature ne correspond pas à une offre de votre structure'))
return value
|
[
"tannier.yannis@gmail.com"
] |
tannier.yannis@gmail.com
|
783268fec98dfa0e163bb100237680e80bc7922d
|
c9ddbdb5678ba6e1c5c7e64adf2802ca16df778c
|
/cases/synthetic/exp-big-1129.py
|
a58276df6085978dee05b5aa8fd7af4fdbd39de1
|
[] |
no_license
|
Virtlink/ccbench-chocopy
|
c3f7f6af6349aff6503196f727ef89f210a1eac8
|
c7efae43bf32696ee2b2ee781bdfe4f7730dec3f
|
refs/heads/main
| 2023-04-07T15:07:12.464038
| 2022-02-03T15:42:39
| 2022-02-03T15:42:39
| 451,969,776
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,184
|
py
|
# Compute x**y
def exp(x: int, y: int) -> int:
a: int = 0
a2: int = 0
a3: int = 0
a4: int = 0
a5: int = 0
def f(i: int) -> int:
nonlocal a
nonlocal a2
nonlocal a3
nonlocal a4
nonlocal a5
def geta() -> int:
return a
if i <= 0:
return geta()
else:
a = a * x
a2 = a * x
a3 = a * x
a4 = a * x
a5 = a * x
return f(i-1)
a = 1
a2 = 1
a3 = 1
a4 = 1
a5 = 1
return f(y)
def exp2(x: int, y: int, x2: int, y2: int) -> int:
a: int = 0
a2: int = 0
a3: int = 0
a4: int = 0
a5: int = 0
def f(i: int) -> int:
nonlocal a
nonlocal a2
nonlocal a3
nonlocal a4
nonlocal a5
def geta() -> int:
return a
if i <= 0:
return geta()
else:
a = a * x
a2 = a * x
a3 = a * x
a4 = a * x
a5 = a * x
return f(i-1)
a = 1
a2 = 1
a3 = 1
a4 = 1
a5 = 1
return f(y)
def exp3(x: int, y: int, x2: int, y2: int, x3: int, y3: int) -> int:
a: int = 0
a2: int = 0
a3: int = 0
a4: int = 0
a5: int = 0
def f(i: int) -> int:
nonlocal a
nonlocal a2
nonlocal a3
nonlocal a4
nonlocal a5
def geta() -> int:
return a
if i <= 0:
return geta()
else:
a = a * x
a2 = a * x
a3 = a * x
a4 = a * x
a5 = a * x
return f(i-1)
a = 1
a2 = 1
a3 = 1
a4 = 1
a5 = 1
return f(y)
def exp4(x: int, y: int, x2: int, y2: int, x3: int, y3: int, x4: int, y4: int) -> int:
a: int = 0
a2: int = 0
a3: int = 0
a4: int = 0
a5: int = 0
def f(i: int) -> int:
nonlocal a
nonlocal a2
nonlocal a3
nonlocal a4
nonlocal a5
def geta() -> int:
return a
if i <= 0:
return geta()
else:
a = a * x
a2 = a * x
a3 = a * x
a4 = a * x
a5 = a * x
return f(i-1)
a = 1
a2 = 1
a3 = 1
a4 = 1
a5 = 1
return f(y)
def exp5(x: int, y: int, x2: int, y2: int, x3: int, y3: int, x4: int, y4: int, x5: int, y5: int) -> int:
a: int = 0
a2: int = 0
a3: int = 0
a4: int = $Literal
a5: int = 0
def f(i: int) -> int:
nonlocal a
nonlocal a2
nonlocal a3
nonlocal a4
nonlocal a5
def geta() -> int:
return a
if i <= 0:
return geta()
else:
a = a * x
a2 = a * x
a3 = a * x
a4 = a * x
a5 = a * x
return f(i-1)
a = 1
a2 = 1
a3 = 1
a4 = 1
a5 = 1
return f(y)
# Input parameter
n:int = 42
n2:int = 42
n3:int = 42
n4:int = 42
n5:int = 42
# Run [0, n]
i:int = 0
i2:int = 0
i3:int = 0
i4:int = 0
i5:int = 0
# Crunch
while i <= n:
print(exp(2, i % 31))
i = i + 1
|
[
"647530+Virtlink@users.noreply.github.com"
] |
647530+Virtlink@users.noreply.github.com
|
34cab7fee83819bf804417cf82f7a6de1598ece0
|
0141361f7c4d276f471ac278580479fa15bc4296
|
/Stack/nextGreaterElement.py
|
984c8067d73140262d8e6272db8305856be9046a
|
[] |
no_license
|
tr1503/LeetCode
|
a7f2f1801c9424aa96d3cde497290ac1f7992f58
|
6d361cad2821248350f1d8432fdfef86895ca281
|
refs/heads/master
| 2021-06-24T19:03:08.681432
| 2020-10-09T23:53:22
| 2020-10-09T23:53:22
| 146,689,171
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 555
|
py
|
class Solution:
def nextGreaterElement(self, nums1, nums2):
"""
:type nums1: List[int]
:type nums2: List[int]
:rtype: List[int]
"""
res = []
stack = []
m = {}
for num in nums2:
while len(stack) != 0 and stack[-1] < num:
m[stack[-1]] = num
stack.pop()
stack.append(num)
for num in nums1:
if num in m:
res.append(m[num])
else:
res.append(-1)
return res
|
[
"noreply@github.com"
] |
tr1503.noreply@github.com
|
b9ecce42b1202cc56ef15d028af8b46a2235c8d4
|
28badfbfa1e1325ffb9da62e92e0b524e747f8e1
|
/1678. Goal Parser Interpretation/1678.py
|
e581adf94fa949417a99e5c3b317f360b14b2f86
|
[] |
no_license
|
saransappa/My-leetcode-solutions
|
b53fab3fc9bcd96ac0bc4bb03eb916820d17584c
|
3c5c7a813489877021109b152b190456cdc34de6
|
refs/heads/master
| 2021-08-16T13:52:33.230832
| 2021-07-30T11:54:06
| 2021-07-30T11:54:06
| 217,449,617
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 170
|
py
|
class Solution:
def interpret(self, command: str) -> str:
s = command
s = s.replace("()",'o')
s = s.replace("(al)","al")
return s
|
[
"saran.sappa@gmail.com"
] |
saran.sappa@gmail.com
|
e2a0f865427c8f0fdbd23b1ba4230258a10d1af1
|
376e1818d427b5e4d32fa6dd6c7b71e9fd88afdb
|
/graphics/py-ggplot/patches/patch-ggplot_utils.py
|
b76148150a1804a15c7f31595981c388dcbc5a11
|
[] |
no_license
|
NetBSD/pkgsrc
|
a0732c023519650ef821ab89c23ab6ab59e25bdb
|
d042034ec4896cc5b47ed6f2e5b8802d9bc5c556
|
refs/heads/trunk
| 2023-09-01T07:40:12.138283
| 2023-09-01T05:25:19
| 2023-09-01T05:25:19
| 88,439,572
| 321
| 138
| null | 2023-07-12T22:34:14
| 2017-04-16T20:04:15
| null |
UTF-8
|
Python
| false
| false
| 357
|
py
|
$NetBSD: patch-ggplot_utils.py,v 1.1 2019/06/17 20:27:16 adam Exp $
Fix for newer Pandas.
--- ggplot/utils.py.orig 2019-06-17 20:04:20.000000000 +0000
+++ ggplot/utils.py
@@ -78,7 +78,7 @@ def is_iterable(obj):
return False
date_types = (
- pd.tslib.Timestamp,
+ pd.Timestamp,
pd.DatetimeIndex,
pd.Period,
pd.PeriodIndex,
|
[
"adam@pkgsrc.org"
] |
adam@pkgsrc.org
|
a1d79cca2c5104fb9206731a66b2537a7d285f19
|
b92c39c8498e0c6579a65430e63b7db927d01aea
|
/python/zookeeper/t.py
|
8e6891b9cc06ac9523ab041255547bfa5f4cfd5d
|
[] |
no_license
|
szqh97/test
|
6ac15ad54f6d36e1d0efd50cbef3b622d374bb29
|
ba76c6ad082e2763554bdce3f1b33fea150865dc
|
refs/heads/master
| 2020-04-06T05:40:55.776424
| 2019-01-14T06:37:38
| 2019-01-14T06:37:38
| 14,772,703
| 4
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 411
|
py
|
#!/usr/bin/env python
import os
import logging
import sys
log = logging.getLogger('test-log')
formatter = logging.Formatter('%(threadName)s %(asctime)s %(name)-15s %(levelname)-8s: %(message)s\n')
file_handler = logging.FileHandler('test.log')
file_handler.setFormatter(formatter)
stream_handler = logging.StreamHandler(sys.stderr)
log.addHandler(file_handler)
log.addHandler(stream_handler)
log.info('sss')
|
[
"szqh97@163.com"
] |
szqh97@163.com
|
1cd5da762984aa9eec883c2279dafda8f823ba8d
|
f3b233e5053e28fa95c549017bd75a30456eb50c
|
/p38a_input/L2N/2N-3FN_MD_NVT_rerun/set.py
|
f0fb68d8e34600e77687fd0bb497d33d7acb1086
|
[] |
no_license
|
AnguseZhang/Input_TI
|
ddf2ed40ff1c0aa24eea3275b83d4d405b50b820
|
50ada0833890be9e261c967d00948f998313cb60
|
refs/heads/master
| 2021-05-25T15:02:38.858785
| 2020-02-18T16:57:04
| 2020-02-18T16:57:04
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,084
|
py
|
import os
dir = '/mnt/scratch/songlin3/run/p38a/L2N/MD/ti_one-step/2N_3FN/'
filesdir = dir + 'files/'
temp_equiin = filesdir + 'temp_equi.in'
temp_prodin = filesdir + 'temp_prod.in'
temp_pbs = filesdir + 'temp.pbs'
lambd = [ 0.00922, 0.04794, 0.11505, 0.20634, 0.31608, 0.43738, 0.56262, 0.68392, 0.79366, 0.88495, 0.95206, 0.99078]
for j in lambd:
os.system("rm -r %6.5f" %(j))
os.system("mkdir %6.5f" %(j))
os.chdir("%6.5f" %(j))
os.system("rm *")
workdir = dir + "%6.5f" %(j) + '/'
#equiin
eqin = workdir + "%6.5f_equi.in" %(j)
os.system("cp %s %s" %(temp_equiin, eqin))
os.system("sed -i 's/XXX/%6.5f/g' %s" %(j, eqin))
#prodin
prodin = workdir + "%6.5f_prod.in" %(j)
os.system("cp %s %s" %(temp_prodin, prodin))
os.system("sed -i 's/XXX/%6.5f/g' %s" %(j, prodin))
#PBS
pbs = workdir + "%6.5f.pbs" %(j)
os.system("cp %s %s" %(temp_pbs, pbs))
os.system("sed -i 's/XXX/%6.5f/g' %s" %(j, pbs))
#top
os.system("cp ../2N-3FN_merged.prmtop .")
os.system("cp ../0.5_equi_0.rst .")
#submit pbs
os.system("qsub %s" %(pbs))
os.chdir(dir)
|
[
"songlin3@msu.edu"
] |
songlin3@msu.edu
|
adb5db04bf882b17a899d77604e756bf0d9d820e
|
56f5b2ea36a2258b8ca21e2a3af9a5c7a9df3c6e
|
/CMGTools/H2TauTau/prod/25aug_corrMC/up/mc/DY3JetsToLL_M-50_TuneZ2Star_8TeV-madgraph/Summer12_DR53X-PU_S10_START53_V7A-v1/AODSIM/V5_B/PAT_CMG_V5_16_0_1377544839/HTT_24Jul_newTES_manzoni_Up_Jobs/Job_206/run_cfg.py
|
b9f6e4a67763432bc61a0361d65f5f9d9897e0c4
|
[] |
no_license
|
rmanzoni/HTT
|
18e6b583f04c0a6ca10142d9da3dd4c850cddabc
|
a03b227073b2d4d8a2abe95367c014694588bf98
|
refs/heads/master
| 2016-09-06T05:55:52.602604
| 2014-02-20T16:35:34
| 2014-02-20T16:35:34
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,495
|
py
|
import FWCore.ParameterSet.Config as cms
import os,sys
sys.path.append('/afs/cern.ch/user/m/manzoni/summer13/CMGTools/CMSSW_5_3_9/src/CMGTools/H2TauTau/prod/25aug_corrMC/up/mc/DY3JetsToLL_M-50_TuneZ2Star_8TeV-madgraph/Summer12_DR53X-PU_S10_START53_V7A-v1/AODSIM/V5_B/PAT_CMG_V5_16_0_1377544839/HTT_24Jul_newTES_manzoni_Up_Jobs')
from base_cfg import *
process.source = cms.Source("PoolSource",
noEventSort = cms.untracked.bool(True),
inputCommands = cms.untracked.vstring('keep *',
'drop cmgStructuredPFJets_cmgStructuredPFJetSel__PAT'),
duplicateCheckMode = cms.untracked.string('noDuplicateCheck'),
fileNames = cms.untracked.vstring('/store/cmst3/user/cmgtools/CMG/DY3JetsToLL_M-50_TuneZ2Star_8TeV-madgraph/Summer12_DR53X-PU_S10_START53_V7A-v1/AODSIM/V5_B/PAT_CMG_V5_16_0/cmgTuple_983.root',
'/store/cmst3/user/cmgtools/CMG/DY3JetsToLL_M-50_TuneZ2Star_8TeV-madgraph/Summer12_DR53X-PU_S10_START53_V7A-v1/AODSIM/V5_B/PAT_CMG_V5_16_0/cmgTuple_984.root',
'/store/cmst3/user/cmgtools/CMG/DY3JetsToLL_M-50_TuneZ2Star_8TeV-madgraph/Summer12_DR53X-PU_S10_START53_V7A-v1/AODSIM/V5_B/PAT_CMG_V5_16_0/cmgTuple_985.root',
'/store/cmst3/user/cmgtools/CMG/DY3JetsToLL_M-50_TuneZ2Star_8TeV-madgraph/Summer12_DR53X-PU_S10_START53_V7A-v1/AODSIM/V5_B/PAT_CMG_V5_16_0/cmgTuple_986.root',
'/store/cmst3/user/cmgtools/CMG/DY3JetsToLL_M-50_TuneZ2Star_8TeV-madgraph/Summer12_DR53X-PU_S10_START53_V7A-v1/AODSIM/V5_B/PAT_CMG_V5_16_0/cmgTuple_987.root')
)
|
[
"riccardo.manzoni@cern.ch"
] |
riccardo.manzoni@cern.ch
|
e5ef76fee0173c91abd8ae221d0dbccf96b57853
|
5608a9cd3bec8cab1c3f9d7f42896107b78593cc
|
/tests/unit/cfngin/hooks/test_iam.py
|
1a5cfdf027e579f17585b5d71d8e6463801586c8
|
[
"Apache-2.0"
] |
permissive
|
troyready/runway
|
cdee6d94f42173c8aa0bd414620b68be36a510aa
|
4fd299961a4b73df39e14f4f19a7236f7be17dd8
|
refs/heads/master
| 2021-06-18T16:05:30.712211
| 2021-01-14T01:44:32
| 2021-01-14T01:44:32
| 151,314,626
| 0
| 0
|
Apache-2.0
| 2018-10-02T19:55:09
| 2018-10-02T19:55:08
| null |
UTF-8
|
Python
| false
| false
| 2,784
|
py
|
"""Tests for runway.cfngin.hooks.iam."""
import unittest
import boto3
from awacs.helpers.trust import get_ecs_assumerole_policy
from botocore.exceptions import ClientError
from moto import mock_iam
from runway.cfngin.hooks.iam import _get_cert_arn_from_response, create_ecs_service_role
from ..factories import mock_context, mock_provider
REGION = "us-east-1"
# No test for stacker.hooks.iam.ensure_server_cert_exists until
# updated version of moto is imported
# (https://github.com/spulec/moto/pull/679) merged
class TestIAMHooks(unittest.TestCase):
"""Tests for runway.cfngin.hooks.iam."""
def setUp(self):
"""Run before tests."""
self.context = mock_context(namespace="fake")
self.provider = mock_provider(region=REGION)
def test_get_cert_arn_from_response(self):
"""Test get cert arn from response."""
arn = "fake-arn"
# Creation response
response = {"ServerCertificateMetadata": {"Arn": arn}}
self.assertEqual(_get_cert_arn_from_response(response), arn)
# Existing cert response
response = {"ServerCertificate": response}
self.assertEqual(_get_cert_arn_from_response(response), arn)
def test_create_service_role(self):
"""Test create service role."""
with mock_iam():
client = boto3.client("iam", region_name=REGION)
role_name = "ecsServiceRole"
with self.assertRaises(ClientError):
client.get_role(RoleName=role_name)
self.assertTrue(
create_ecs_service_role(context=self.context, provider=self.provider,)
)
role = client.get_role(RoleName=role_name)
self.assertIn("Role", role)
self.assertEqual(role_name, role["Role"]["RoleName"])
policy_name = "AmazonEC2ContainerServiceRolePolicy"
client.get_role_policy(RoleName=role_name, PolicyName=policy_name)
def test_create_service_role_already_exists(self):
"""Test create service role already exists."""
with mock_iam():
client = boto3.client("iam", region_name=REGION)
role_name = "ecsServiceRole"
client.create_role(
RoleName=role_name,
AssumeRolePolicyDocument=get_ecs_assumerole_policy().to_json(),
)
self.assertTrue(
create_ecs_service_role(context=self.context, provider=self.provider,)
)
role = client.get_role(RoleName=role_name)
self.assertIn("Role", role)
self.assertEqual(role_name, role["Role"]["RoleName"])
policy_name = "AmazonEC2ContainerServiceRolePolicy"
client.get_role_policy(RoleName=role_name, PolicyName=policy_name)
|
[
"noreply@github.com"
] |
troyready.noreply@github.com
|
29e29394f428b36553cae90bdd87ff1634203acf
|
1dacbf90eeb384455ab84a8cf63d16e2c9680a90
|
/pkgs/numexpr-2.6.1-np111py27_nomkl_0/lib/python2.7/site-packages/numexpr/version.py
|
489f86851cdbbbb05f7542c3461194c22d116ca2
|
[
"Apache-2.0",
"BSD-3-Clause",
"LicenseRef-scancode-unknown"
] |
permissive
|
wangyum/Anaconda
|
ac7229b21815dd92b0bd1c8b7ec4e85c013b8994
|
2c9002f16bb5c265e0d14f4a2314c86eeaa35cb6
|
refs/heads/master
| 2022-10-21T15:14:23.464126
| 2022-10-05T12:10:31
| 2022-10-05T12:10:31
| 76,526,728
| 11
| 10
|
Apache-2.0
| 2022-10-05T12:10:32
| 2016-12-15T05:26:12
|
Python
|
UTF-8
|
Python
| false
| false
| 366
|
py
|
###################################################################
# Numexpr - Fast numerical array expression evaluator for NumPy.
#
# License: MIT
# Author: See AUTHORS.txt
#
# See LICENSE.txt and LICENSES/*.txt for details about copyright and
# rights to use.
####################################################################
version = '2.6.1'
|
[
"wgyumg@mgail.com"
] |
wgyumg@mgail.com
|
2d2d2b7570f4b96dd7bebf921993bfae435fa0e2
|
b1d1797241a12fb318c0f841fceebece4e34078b
|
/{{cookiecutter.project_slug}}/src/{{cookiecutter.package_name}}/application_config.py
|
b7f5033eacd9881a72f8b9712e4aaac0cb2767e7
|
[
"MIT"
] |
permissive
|
ITISFoundation/cookiecutter-simcore-pyservice
|
407a3012a20787bf48b0360498c8c45e5b8425d0
|
8e7cb6729f3ec663b6715cca54396075f1e4593e
|
refs/heads/master
| 2022-08-24T04:50:58.561916
| 2022-06-10T14:28:53
| 2022-06-10T14:28:53
| 151,701,223
| 0
| 3
|
MIT
| 2022-06-10T14:29:52
| 2018-10-05T09:52:29
|
Python
|
UTF-8
|
Python
| false
| false
| 2,089
|
py
|
""" app's configuration
This module loads the schema defined by every subsystem and injects it in the
application's configuration scheams
It was designed in a similar fashion to the setup protocol of the application
where every subsystem is imported and queried in a specific order. The application
depends on the subsystem and not the other way around.
The app configuration is created before the application instance exists.
{# TODO: can this be done using declarative programming?? #}
{# TODO: add more strict checks with re #}
{# TODO: add support for versioning.
- check shema fits version
- parse/format version in schema
#}
"""
import logging
import trafaret as T
from servicelib import application_keys # pylint:disable=unused-import
from servicelib.application_keys import APP_CONFIG_KEY
from .resources import resources
from . import rest_config
logger = logging.getLogger(__name__)
def create_schema():
"""
Build schema for the configuration's file
by aggregating all the subsystem configurations
"""
schema = T.Dict({
"version": T.String(),
"main": T.Dict({
"host": T.IP,
"port": T.Int(),
"log_level": T.Enum(*logging._nameToLevel.keys()), # pylint: disable=protected-access
"enabled_development_mode": T.Bool(),
}),
rest_config.CONFIG_SECTION_NAME: rest_config.schema,
## Add here more configurations
})
section_names = [k.name for k in schema.keys]
assert len(section_names) == len(set(section_names)), "Found repeated section names in %s" % section_names
return schema
# app[APP_CONFIG_KEY] = key for config object
APP_CONFIG_KEY = APP_CONFIG_KEY # pylint: disable=self-assigning-variable,bad-option-value
# config/${CLI_DEFAULT_CONFIGFILE}
CLI_DEFAULT_CONFIGFILE = 'config-container-prod.yml'
# schema for app config's startup file
app_schema = create_schema()
assert resources.exists( 'config/' + CLI_DEFAULT_CONFIGFILE ), \
"'config/%s' does not exist" % CLI_DEFAULT_CONFIGFILE
|
[
"noreply@github.com"
] |
ITISFoundation.noreply@github.com
|
2dad42052ebb90a57b78dfa90f03fb1439d28cc0
|
7427e92cc5205276a1f4b7d6244b67fc8b31a976
|
/reverseInParentheses.py
|
74eb9eb7934a9289f6b2c8a924171b95d9b34192
|
[] |
no_license
|
newfull5/CodeSignal
|
1d10837d33fa4126d6a59b76ee2b99b003043887
|
ad24711a30b3ccd252247d0eee6cf4a0b4d96e1e
|
refs/heads/master
| 2021-07-09T09:37:19.648588
| 2020-12-03T07:25:55
| 2020-12-03T07:25:55
| 216,835,128
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 634
|
py
|
def reverseInParentheses(inputString):
if inputString[0] == '(' or inputString[-1]:
return inputString[1:len(inputString)-1][::-1]
left = ''
right = ''
center = ''
answer = ''
centence = ''
for i in range(0, len(inputString)):
if inputString[i] == '(':
left += inputString[:i]
center += inputString[i+1:]
for i in range(1, len(inputString)):
if inputString[-i] == ')':
right += inputString[-i+1:]
answer += center[:(-i)]
centence = left + answer[::-1] + right
return centence
|
[
"noreply@github.com"
] |
newfull5.noreply@github.com
|
61812c0f57e61aa84dc418b44729db8b264f680a
|
6fa7f99d3d3d9b177ef01ebf9a9da4982813b7d4
|
/uWW8cZymSkrREdDpQ_16.py
|
95cfdd8b10b76c690ed082e36332618b4dcb9bf5
|
[] |
no_license
|
daniel-reich/ubiquitous-fiesta
|
26e80f0082f8589e51d359ce7953117a3da7d38c
|
9af2700dbe59284f5697e612491499841a6c126f
|
refs/heads/master
| 2023-04-05T06:40:37.328213
| 2021-04-06T20:17:44
| 2021-04-06T20:17:44
| 355,318,759
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 377
|
py
|
def sums_up(lst):
FinalPairList = list()
tmpList = list()
for num in lst:
for i in tmpList:
if (i+num) == 8:
pair= []
if i<num:
pair.append(i)
pair.append(num)
else:
pair.append(num)
pair.append(i)
FinalPairList.append(pair)
tmpList.append(num)
return{'pairs':FinalPairList }
|
[
"daniel.reich@danielreichs-MacBook-Pro.local"
] |
daniel.reich@danielreichs-MacBook-Pro.local
|
fc0e88abd089496570a23940186a2fd152baac22
|
82be39549f5d90b1ca1bb65407ae7695e1686ed8
|
/code_challenges/147/hundred_days.py
|
fa433e7ef84669a91dd1d805173af3281a0475b4
|
[] |
no_license
|
dcribb19/bitesofpy
|
827adc9a8984d01c0580f1c03855c939f286507f
|
a1eb0a5553e50e88d3568a36b275138d84d9fb46
|
refs/heads/master
| 2023-03-02T02:04:46.865409
| 2021-02-12T01:20:30
| 2021-02-12T01:20:30
| 259,764,008
| 1
| 0
| null | 2020-10-06T13:48:16
| 2020-04-28T22:16:38
|
HTML
|
UTF-8
|
Python
| false
| false
| 484
|
py
|
from datetime import date
from dateutil.rrule import rrule, DAILY, MO, TU, WE, TH, FR
TODAY = date(year=2018, month=11, day=29)
def get_hundred_weekdays(start_date=TODAY):
"""Return a list of hundred date objects starting from
start_date up till 100 weekdays later, so +100 days
skipping Saturdays and Sundays"""
weekdays = list(rrule(DAILY, count=100, byweekday=(MO, TU, WE, TH, FR), dtstart=start_date))
return [weekday.date() for weekday in weekdays]
|
[
"daniel.cribb.10@gmail.com"
] |
daniel.cribb.10@gmail.com
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.