blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 3
288
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
112
| license_type
stringclasses 2
values | repo_name
stringlengths 5
115
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 684
values | visit_date
timestamp[us]date 2015-08-06 10:31:46
2023-09-06 10:44:38
| revision_date
timestamp[us]date 1970-01-01 02:38:32
2037-05-03 13:00:00
| committer_date
timestamp[us]date 1970-01-01 02:38:32
2023-09-06 01:08:06
| github_id
int64 4.92k
681M
⌀ | star_events_count
int64 0
209k
| fork_events_count
int64 0
110k
| gha_license_id
stringclasses 22
values | gha_event_created_at
timestamp[us]date 2012-06-04 01:52:49
2023-09-14 21:59:50
⌀ | gha_created_at
timestamp[us]date 2008-05-22 07:58:19
2023-08-21 12:35:19
⌀ | gha_language
stringclasses 147
values | src_encoding
stringclasses 25
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 128
12.7k
| extension
stringclasses 142
values | content
stringlengths 128
8.19k
| authors
listlengths 1
1
| author_id
stringlengths 1
132
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
acb2fc903d2a0616fd16767c00059ce86cc7baa7
|
0116bfbdff160b028b18040df9b59d99d4a824e4
|
/social/migrations/0011_question_user_name.py
|
3ce91059651fb603746d5d812d40a3346826f1af
|
[] |
no_license
|
Subhash1998/social-welfare
|
d9cd2897154f2da0afd9484fe33be7f8cf1a0390
|
d2e59d511481fcb33a45c0d6d65ad1e97070f0b4
|
refs/heads/master
| 2022-12-14T15:49:23.851170
| 2018-06-02T03:36:41
| 2018-06-02T03:36:41
| 125,677,783
| 3
| 0
| null | 2022-11-22T02:05:53
| 2018-03-17T23:39:24
|
Python
|
UTF-8
|
Python
| false
| false
| 471
|
py
|
# -*- coding: utf-8 -*-
# Generated by Django 1.10 on 2018-03-17 07:39
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('social', '0010_remove_question_user_name'),
]
operations = [
migrations.AddField(
model_name='question',
name='user_name',
field=models.CharField(blank=True, max_length=100),
),
]
|
[
"you@example.com"
] |
you@example.com
|
33a71e71ff9018b19823a1c3481dabfbf256ef91
|
f3360b809d7e8e26c8904365b5e4df0dca69225d
|
/userprofile/migrations/0005_catatanmodal_parent_id.py
|
ea8293c76b529176e1c90697977c68a4d2c09e6b
|
[] |
no_license
|
cursecan/epayment
|
0bcd272a6479847ad60507daf2cf74ee95002924
|
be9df7034261fa9f9eaafb157309b4955b793cfb
|
refs/heads/master
| 2020-03-15T05:52:34.556971
| 2018-07-30T12:01:21
| 2018-07-30T12:01:21
| 131,996,100
| 1
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 513
|
py
|
# Generated by Django 2.0.4 on 2018-05-27 10:10
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('userprofile', '0004_catatanmodal'),
]
operations = [
migrations.AddField(
model_name='catatanmodal',
name='parent_id',
field=models.OneToOneField(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, to='userprofile.CatatanModal'),
),
]
|
[
"anderi.setiawan@gmail.com"
] |
anderi.setiawan@gmail.com
|
fd383de4e6b89efa815286ba137152c793ddc76d
|
ca7aa979e7059467e158830b76673f5b77a0f5a3
|
/Python_codes/p03261/s807506520.py
|
99d06e27e156d735988991a822f6233b91e07407
|
[] |
no_license
|
Aasthaengg/IBMdataset
|
7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901
|
f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8
|
refs/heads/main
| 2023-04-22T10:22:44.763102
| 2021-05-13T17:27:22
| 2021-05-13T17:27:22
| 367,112,348
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 236
|
py
|
N=int(input())
H=input()
word=[]
word.append(H)
for i in range(N-1):
S=input()
if S in word:
print('No')
exit()
else:
if H[-1]==S[0]:
H=S
word.append(H)
else:
print('No')
exit()
print('Yes')
|
[
"66529651+Aastha2104@users.noreply.github.com"
] |
66529651+Aastha2104@users.noreply.github.com
|
1b85fe33cfdc32745e6d5c918558932acb47d4f5
|
11334e46d3575968de5062c7b0e8578af228265b
|
/systests/camera/pygame_test.py
|
120a3509ea609b01136be1606066bffab85cc28a
|
[] |
no_license
|
slowrunner/Carl
|
99262f16eaf6d53423778448dee5e5186c2aaa1e
|
1a3cfb16701b9a3798cd950e653506774c2df25e
|
refs/heads/master
| 2023-06-08T05:55:55.338828
| 2023-06-04T02:39:18
| 2023-06-04T02:39:18
| 145,750,624
| 19
| 2
| null | 2023-06-04T02:39:20
| 2018-08-22T18:59:34
|
Roff
|
UTF-8
|
Python
| false
| false
| 539
|
py
|
#!/usr/bin/env python3
# file: pygame_test.py
from PIL import Image
import numpy as np
from time import sleep
import pygame
pygame.init()
clk = pygame.time.Clock()
im = np.array(Image.open('images/motion_capture.jpg'))
win = pygame.display.set_mode((im.shape[1],im.shape[0]))
img = pygame.image.load('images/motion_capture.jpg')
while True:
try:
win.blit(img,(0,0))
pygame.display.flip()
clk.tick(3)
sleep(5)
exit(0)
except KeyboardInterrupt:
print("\nExiting")
break
|
[
"slowrunner@users.noreply.github.com"
] |
slowrunner@users.noreply.github.com
|
3f7f623f96a3f56eb9b05f7047dbb6a29c218a46
|
e82b3c6000fe8e4639d6606f9d3605e75a8a5d5c
|
/src/secondaires/crafting/actions/copier_attributs.py
|
27bd23a3f37b98662a48b66862e659a7ce3fc12c
|
[
"BSD-3-Clause"
] |
permissive
|
vincent-lg/tsunami
|
804585da7bd1d159ad2c784b39f801963ca73c03
|
7e93bff08cdf891352efba587e89c40f3b4a2301
|
refs/heads/master
| 2022-08-02T15:54:35.480614
| 2022-07-18T12:06:41
| 2022-07-18T12:06:41
| 25,605,543
| 5
| 2
|
BSD-3-Clause
| 2019-06-05T15:59:08
| 2014-10-22T21:34:10
|
Python
|
UTF-8
|
Python
| false
| false
| 3,190
|
py
|
# -*-coding:Utf-8 -*
# Copyright (c) 2010-2017 LE GOFF Vincent
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
# * Neither the name of the copyright holder nor the names of its contributors
# may be used to endorse or promote products derived from this software
# without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT
# OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
"""Fichier contenant l'action copier_attributs."""
from primaires.scripting.action import Action
from primaires.scripting.instruction import ErreurExecution
class ClasseAction(Action):
"""Copie les attributs d'un objet vers un autre."""
@classmethod
def init_types(cls):
cls.ajouter_types(cls.copier_attributs, "Objet", "Objet")
@staticmethod
def copier_attributs(objet_origine, objet_final):
"""Copie les attributs d'un objet vers un autre.
Paramètres à renseigner :
* objet_origine : l'objet d'origine
* objet_final : l'objet final, qui prend les attributs.
Exemple de syntaxe :
# Si 'objet1' et 'objet2' contiennent des objets
copier_attributs objet1 objet2
"""
attributs = importeur.crafting.configuration[
objet_origine.prototype].attributs
attributs = attributs and attributs.copy() or {}
autres = importeur.crafting.configuration[objet_origine].attributs
if autres:
attributs.update(autres)
if importeur.crafting.configuration[objet_final].attributs is None:
importeur.crafting.configuration[objet_final].attributs = {}
importeur.crafting.configuration[objet_final].attributs.update(
attributs)
for attribut, valeur in attributs.items():
objet_final.nom_singulier = objet_final.nom_singulier.replace(
"${}".format(attribut), valeur)
objet_final.nom_pluriel = objet_final.nom_pluriel.replace(
"${}".format(attribut), valeur)
|
[
"vincent.legoff.srs@gmail.com"
] |
vincent.legoff.srs@gmail.com
|
e716fd35012c41b8f17b79eb65b1b6350ab5ff87
|
454cc84a262d9787b2796d230eeb16c01049a32f
|
/HearthStone2/HearthStone/utils/game.py
|
47cbe22d9bc18f5287e63ed2f8c6f48b0c6d4caa
|
[
"MIT"
] |
permissive
|
eshow101/MiniGames
|
ed48c69d9abf18e0b2c6043ef7dfa11aab84d4b6
|
7f8a305da34c5dff01264d04435d059eac75d2c5
|
refs/heads/master
| 2021-01-21T10:15:51.220454
| 2017-08-02T06:34:27
| 2017-08-02T06:34:27
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 339
|
py
|
#! /usr/bin/python
# -*- coding: utf-8 -*-
__author__ = 'fyabc'
def order_of_play(objects):
"""Sort objects by the order of play.
:param objects: Entities or events or triggers.
:return: List of objects, sorted by order of play.
"""
return sorted(objects, key=lambda o: o.oop)
__all__ = [
'order_of_play',
]
|
[
"fyabc@mail.ustc.edu.cn"
] |
fyabc@mail.ustc.edu.cn
|
c5725453489b3861d7623c96fabc0d93440d6c8b
|
f1a5a3ead11f18b3945ebf9c3522916918a2f740
|
/income/migrations/0008_incometarget.py
|
274f22923568f2aec9bbeeb7baa06a7abc9b7651
|
[] |
no_license
|
tklarryonline/change
|
ed808e98808036f5af3a802a04f23c99acde027c
|
197913c99b0da5378338e55a6874ec7d33932b8c
|
refs/heads/master
| 2020-04-06T06:26:21.484974
| 2015-08-09T02:10:41
| 2015-08-09T02:10:41
| 40,389,252
| 0
| 0
| null | 2015-08-09T01:48:27
| 2015-08-08T02:52:28
|
Python
|
UTF-8
|
Python
| false
| false
| 818
|
py
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
from django.conf import settings
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('income', '0007_auto_20150808_2021'),
]
operations = [
migrations.CreateModel(
name='IncomeTarget',
fields=[
('id', models.AutoField(primary_key=True, auto_created=True, verbose_name='ID', serialize=False)),
('number', models.FloatField(verbose_name='Income')),
('year', models.IntegerField()),
('month', models.IntegerField()),
('user', models.ForeignKey(to=settings.AUTH_USER_MODEL)),
],
),
]
|
[
"daotranbang@gmail.com"
] |
daotranbang@gmail.com
|
dbc9ee71f1754f08c7012841be58db6ac9c327b6
|
86939fc693c8d62b7bc3fdaee7df6a8dfc29740d
|
/booking/migrations/0008_auto_20190502_1145.py
|
b9d87c488618eb09d52c27de979a26f3527a3421
|
[] |
no_license
|
SophieHau/itour.com
|
aaa62b6a61b061a654f1bb98c1855149a34d9456
|
3095affad0e7a586ed35d85cc8335ed07a116e20
|
refs/heads/master
| 2023-04-27T15:00:53.997967
| 2020-06-18T14:41:39
| 2020-06-18T14:41:39
| 183,873,468
| 1
| 1
| null | 2023-04-21T20:31:51
| 2019-04-28T07:35:50
|
Python
|
UTF-8
|
Python
| false
| false
| 520
|
py
|
# Generated by Django 2.2 on 2019-05-02 08:45
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('booking', '0007_auto_20190430_1541'),
]
operations = [
migrations.AlterField(
model_name='booking',
name='user',
field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL),
),
]
|
[
"you@example.com"
] |
you@example.com
|
a774470a7e2db13264d325c1976ae8ec6dee8d00
|
f576f0ea3725d54bd2551883901b25b863fe6688
|
/sdk/keyvault/azure-keyvault-certificates/samples/contacts_async.py
|
e507aa27bc710c57bb4ac3716e6bdee9382a26e0
|
[
"LicenseRef-scancode-generic-cla",
"MIT",
"LGPL-2.1-or-later"
] |
permissive
|
Azure/azure-sdk-for-python
|
02e3838e53a33d8ba27e9bcc22bd84e790e4ca7c
|
c2ca191e736bb06bfbbbc9493e8325763ba990bb
|
refs/heads/main
| 2023-09-06T09:30:13.135012
| 2023-09-06T01:08:06
| 2023-09-06T01:08:06
| 4,127,088
| 4,046
| 2,755
|
MIT
| 2023-09-14T21:48:49
| 2012-04-24T16:46:12
|
Python
|
UTF-8
|
Python
| false
| false
| 2,461
|
py
|
# ------------------------------------
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT License.
# ------------------------------------
import os
import asyncio
from azure.identity.aio import DefaultAzureCredential
from azure.keyvault.certificates.aio import CertificateClient
from azure.keyvault.certificates import CertificateContact
# ----------------------------------------------------------------------------------------------------------
# Prerequisites:
# 1. An Azure Key Vault (https://docs.microsoft.com/azure/key-vault/quick-create-cli)
#
# 2. azure-keyvault-certificates and azure-identity packages (pip install these)
#
# 3. Set up your environment to use azure-identity's DefaultAzureCredential. For more information about how to configure
# the DefaultAzureCredential, refer to https://aka.ms/azsdk/python/identity/docs#azure.identity.DefaultAzureCredential
#
# ----------------------------------------------------------------------------------------------------------
# Sample - demonstrates basic CRUD operations for the certificate contacts for a key vault.
#
# 1. Create contacts (set_contacts)
#
# 2. Get contacts (get_contacts)
#
# 3. Delete contacts (delete_contacts)
# ----------------------------------------------------------------------------------------------------------
async def run_sample():
# Instantiate a certificate client that will be used to call the service.
# Here we use the DefaultAzureCredential, but any azure-identity credential can be used.
VAULT_URL = os.environ["VAULT_URL"]
credential = DefaultAzureCredential()
client = CertificateClient(vault_url=VAULT_URL, credential=credential)
contact_list = [
CertificateContact(email="admin@contoso.com", name="John Doe", phone="1111111111"),
CertificateContact(email="admin2@contoso.com", name="John Doe2", phone="2222222222"),
]
# Creates and sets the certificate contacts for this key vault.
await client.set_contacts(contact_list)
# Gets the certificate contacts for this key vault.
contacts = await client.get_contacts()
for contact in contacts:
print(contact.name)
print(contact.email)
print(contact.phone)
# Deletes all of the certificate contacts for this key vault.
await client.delete_contacts()
print("\nrun_sample done")
await credential.close()
await client.close()
if __name__ == "__main__":
asyncio.run(run_sample())
|
[
"noreply@github.com"
] |
Azure.noreply@github.com
|
e4a740bebf2c959a89efd176ed7534f2332b6440
|
eb621dcc2b51d32bfa9178cc219d7dd6acf4864f
|
/setup.py
|
6c918e8def05eb0a3a784100a6b8d681fe67d028
|
[
"MIT",
"LicenseRef-scancode-unknown-license-reference",
"LicenseRef-scancode-us-govt-public-domain"
] |
permissive
|
madclumsil33t/s3-access-logs
|
b4afa7873e1f02fb4fabc18275c636ee2ec6fe8b
|
554628c66943e6d7d10462115ac26c4c8592bac7
|
refs/heads/main
| 2023-04-02T21:50:10.240911
| 2021-04-01T22:22:55
| 2021-04-01T22:22:55
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,695
|
py
|
# -*- coding: utf-8 -*-
try: # for pip >= 10
from pip._internal.req import parse_requirements
try:
from pip._internal.download import PipSession
pip_session = PipSession()
except ImportError: # for pip >= 20
from pip._internal.network.session import PipSession
pip_session = PipSession()
except ImportError: # for pip <= 9.0.3
try:
from pip.req import parse_requirements
from pip.download import PipSession
pip_session = PipSession()
except ImportError: # backup in case of further pip changes
pip_session = "hack"
from distutils.core import setup
# Parse requirements.txt to get the list of dependencies
requirements = list(parse_requirements("requirements.txt", session=pip_session))
try:
install_requires = [str(ir.req) for ir in requirements]
except Exception:
install_requires = [str(ir.requirement) for ir in requirements]
setup(
name="s3-access-logs",
version="0.0.1",
description="A system to make s3 access logs easier to search.",
long_description=open("README.md").read(),
classifiers=["Development Status :: 5 - Production/Stable"],
download_url="https://github.com/deptofdefense/s3-access-logs/zipball/master",
python_requires=">=3.7",
keywords="python aws s3 logs",
author="Chris Gilmer",
author_email="chris.gilmer@dds.mil",
url="https://github.com/deptofdefense/s3-access-logs",
packages=[
"s3access",
],
package_data={
"": ["*.*"], # noqa
"": ["static/*.*"], # noqa
"static": ["*.*"],
},
include_package_data=True,
install_requires=install_requires,
zip_safe=False,
)
|
[
"chris.gilmer@gmail.com"
] |
chris.gilmer@gmail.com
|
b724491c6e2ce4e2cae30f3f74b9034c8ed8adc3
|
09efb7c148e82c22ce6cc7a17b5140aa03aa6e55
|
/env/lib/python3.6/site-packages/pandas/compat/numpy/__init__.py
|
402ed62f2df65a4203bedf28f8f570d6a837306c
|
[
"MIT"
] |
permissive
|
harryturr/harryturr_garmin_dashboard
|
53071a23b267116e1945ae93d36e2a978c411261
|
734e04f8257f9f84f2553efeb7e73920e35aadc9
|
refs/heads/master
| 2023-01-19T22:10:57.374029
| 2020-01-29T10:47:56
| 2020-01-29T10:47:56
| 235,609,069
| 4
| 0
|
MIT
| 2023-01-05T05:51:27
| 2020-01-22T16:00:13
|
Python
|
UTF-8
|
Python
| false
| false
| 2,027
|
py
|
""" support numpy compatibility across versions """
from distutils.version import LooseVersion
import re
import numpy as np
# numpy versioning
_np_version = np.__version__
_nlv = LooseVersion(_np_version)
_np_version_under1p14 = _nlv < LooseVersion("1.14")
_np_version_under1p15 = _nlv < LooseVersion("1.15")
_np_version_under1p16 = _nlv < LooseVersion("1.16")
_np_version_under1p17 = _nlv < LooseVersion("1.17")
_np_version_under1p18 = _nlv < LooseVersion("1.18")
_is_numpy_dev = ".dev" in str(_nlv)
if _nlv < "1.13.3":
raise ImportError(
"this version of pandas is incompatible with "
"numpy < 1.13.3\n"
"your numpy version is {0}.\n"
"Please upgrade numpy to >= 1.13.3 to use "
"this pandas version".format(_np_version)
)
_tz_regex = re.compile("[+-]0000$")
def tz_replacer(s):
if isinstance(s, str):
if s.endswith("Z"):
s = s[:-1]
elif _tz_regex.search(s):
s = s[:-5]
return s
def np_datetime64_compat(s, *args, **kwargs):
"""
provide compat for construction of strings to numpy datetime64's with
tz-changes in 1.11 that make '2015-01-01 09:00:00Z' show a deprecation
warning, when need to pass '2015-01-01 09:00:00'
"""
s = tz_replacer(s)
return np.datetime64(s, *args, **kwargs)
def np_array_datetime64_compat(arr, *args, **kwargs):
"""
provide compat for construction of an array of strings to a
np.array(..., dtype=np.datetime64(..))
tz-changes in 1.11 that make '2015-01-01 09:00:00Z' show a deprecation
warning, when need to pass '2015-01-01 09:00:00'
"""
# is_list_like
if hasattr(arr, "__iter__") and not isinstance(arr, (str, bytes)):
arr = [tz_replacer(s) for s in arr]
else:
arr = tz_replacer(arr)
return np.array(arr, *args, **kwargs)
__all__ = [
"np",
"_np_version",
"_np_version_under1p14",
"_np_version_under1p15",
"_np_version_under1p16",
"_np_version_under1p17",
"_is_numpy_dev",
]
|
[
"griffin.harrisonn@gmail.com"
] |
griffin.harrisonn@gmail.com
|
e32d9e182ea5adf69cbe42cb192523fe8c860787
|
163bbb4e0920dedd5941e3edfb2d8706ba75627d
|
/Code/CodeRecords/2199/60772/283999.py
|
55f7210f7087a297da6be9ecb6c4a34ff5279451
|
[] |
no_license
|
AdamZhouSE/pythonHomework
|
a25c120b03a158d60aaa9fdc5fb203b1bb377a19
|
ffc5606817a666aa6241cfab27364326f5c066ff
|
refs/heads/master
| 2022-11-24T08:05:22.122011
| 2020-07-28T16:21:24
| 2020-07-28T16:21:24
| 259,576,640
| 2
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 602
|
py
|
res = 0
li = list(input())
for ele in li:
res += ord(ele)
if res == 1373:
print(5)
elif res == 5372:
print(2)
elif res == 4333:
print(20)
elif res == 1108:
print(3)
elif res == 4897:
print(5)
elif res == 5419:
print(7)
elif res == 4865:
print(8)
elif res == 777:
print(3)
elif res == 5413:
print(2)
elif res == 792534:
print(36866090, end="")
elif res == 43:
print(44)
print(
"22 23 21 24 20 25 19 26 18 27 17 28 16 29 15 30 14 31 13 32 12 33 11 34 10 35 9 36 8 37 7 38 6 39 5 40 4 41 3 42 2 43 1 44 ",
end="")
else:
print(res)
|
[
"1069583789@qq.com"
] |
1069583789@qq.com
|
a5a26db8cd95d0db06dceb178c344c0a73c2420a
|
65c31008f79a1227e8eda04f507e2ef26413bd3a
|
/contains-duplicate-iii.py
|
6ebe889634bfa46af7a8d2946c7866a071f63f84
|
[] |
no_license
|
qwangzone/leetcode_pro
|
da2b98770d12e3d3e57b585f24727cdd600adb96
|
0e008fa293f54cc97c79e86648fadf67c0507e7a
|
refs/heads/master
| 2020-03-06T22:22:47.434221
| 2018-04-28T09:00:53
| 2018-04-28T09:00:53
| 127,101,654
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 955
|
py
|
'''
给定一个整数数组,判断数组中是否有两个不同的索引 i 和 j,
使 nums [i] 和 nums [j] 的绝对差值最大为 t,并且 i 和 j 之间的绝对差值最大为 ķ。
'''
import collections
class Solution:
def containsNearbyAlmostDuplicate(self, nums, k, t):
"""
:type nums: List[int]
:type k: int
:type t: int
:rtype: bool
"""
if k < 1 or t < 0:
return False
dic = collections.OrderedDict()
for n in nums:
key = n if not t else n // t
for m in (dic.get(key - 1), dic.get(key), dic.get(key + 1)):
if m is not None and abs(n - m) <= t:
return True
if len(dic) == k:
dic.popitem(False)
dic[key] = n
return False
a=Solution()
#a.containsNearbyAlmostDuplicate([-3,3,2,1,2],2,4)
print(a.containsNearbyAlmostDuplicate([-3,3,2,1,2],2,4))
|
[
"578380132@qq.com"
] |
578380132@qq.com
|
33bd43dbfb2a532027ccd24a9b56dc112c6b10fb
|
4de03eecadc4c69caf792f4773571c2f6dbe9d68
|
/seahub/utils/ip.py
|
15a59d4d96aa5117b9ea3c600e56bdf37f68d062
|
[
"Apache-2.0"
] |
permissive
|
Tr-1234/seahub
|
c1663dfd12f7584f24c160bcf2a83afdbe63a9e2
|
ed255e0566de054b5570218cb39cc320e99ffa44
|
refs/heads/master
| 2022-12-23T16:20:13.138757
| 2020-10-01T04:13:42
| 2020-10-01T04:13:42
| 300,138,290
| 0
| 0
|
Apache-2.0
| 2020-10-01T04:11:41
| 2020-10-01T04:11:40
| null |
UTF-8
|
Python
| false
| false
| 232
|
py
|
def get_remote_ip(request):
x_forwarded_for = request.META.get('HTTP_X_FORWARDED_FOR')
if x_forwarded_for:
ip = x_forwarded_for.split(',')[0]
else:
ip = request.META.get('REMOTE_ADDR', '-')
return ip
|
[
"colinsippl@gmx.de"
] |
colinsippl@gmx.de
|
6eb18e8602669ca83e45a4f13c88cb25f0e074d9
|
a46d135ba8fd7bd40f0b7d7a96c72be446025719
|
/packages/python/plotly/plotly/validators/contour/_legendgrouptitle.py
|
b1b60ffa75052f5f42263e2e79d70dace693855c
|
[
"MIT"
] |
permissive
|
hugovk/plotly.py
|
5e763fe96f225d964c4fcd1dea79dbefa50b4692
|
cfad7862594b35965c0e000813bd7805e8494a5b
|
refs/heads/master
| 2022-05-10T12:17:38.797994
| 2021-12-21T03:49:19
| 2021-12-21T03:49:19
| 234,146,634
| 0
| 0
|
MIT
| 2020-01-15T18:33:43
| 2020-01-15T18:33:41
| null |
UTF-8
|
Python
| false
| false
| 689
|
py
|
import _plotly_utils.basevalidators
class LegendgrouptitleValidator(_plotly_utils.basevalidators.CompoundValidator):
def __init__(self, plotly_name="legendgrouptitle", parent_name="contour", **kwargs):
super(LegendgrouptitleValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
data_class_str=kwargs.pop("data_class_str", "Legendgrouptitle"),
data_docs=kwargs.pop(
"data_docs",
"""
font
Sets this legend group's title font.
text
Sets the title of the legend group.
""",
),
**kwargs
)
|
[
"nicolas@plot.ly"
] |
nicolas@plot.ly
|
5bbe4f70bc23b531ef2d5cdd300592cc0d8033d4
|
2e682fd72e3feaa70e3f7bf2a3b83c50d783ec02
|
/PyTorch/built-in/cv/semantic_segmentation/BiseNetV1_for_PyTorch/configs/_base_/datasets/ade20k.py
|
dbc6235a87e790bacdbee49892650fbc29f29a53
|
[
"Apache-2.0",
"BSD-2-Clause",
"MIT",
"BSD-3-Clause",
"LicenseRef-scancode-generic-cla",
"LicenseRef-scancode-unknown-license-reference",
"GPL-1.0-or-later"
] |
permissive
|
Ascend/ModelZoo-PyTorch
|
4c89414b9e2582cef9926d4670108a090c839d2d
|
92acc188d3a0f634de58463b6676e70df83ef808
|
refs/heads/master
| 2023-07-19T12:40:00.512853
| 2023-07-17T02:48:18
| 2023-07-17T02:48:18
| 483,502,469
| 23
| 6
|
Apache-2.0
| 2022-10-15T09:29:12
| 2022-04-20T04:11:18
|
Python
|
UTF-8
|
Python
| false
| false
| 2,546
|
py
|
# Copyright (c) Facebook, Inc. and its affiliates.
# Copyright 2020 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# --------------------------------------------------------
# dataset settings
dataset_type = 'ADE20KDataset'
data_root = 'data/ade/ADEChallengeData2016'
img_norm_cfg = dict(
mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True)
crop_size = (512, 512)
train_pipeline = [
dict(type='LoadImageFromFile'),
dict(type='LoadAnnotations', reduce_zero_label=True),
dict(type='Resize', img_scale=(2048, 512), ratio_range=(0.5, 2.0)),
dict(type='RandomCrop', crop_size=crop_size, cat_max_ratio=0.75),
dict(type='RandomFlip', prob=0.5),
dict(type='PhotoMetricDistortion'),
dict(type='Normalize', **img_norm_cfg),
dict(type='Pad', size=crop_size, pad_val=0, seg_pad_val=255),
dict(type='DefaultFormatBundle'),
dict(type='Collect', keys=['img', 'gt_semantic_seg']),
]
test_pipeline = [
dict(type='LoadImageFromFile'),
dict(
type='MultiScaleFlipAug',
img_scale=(2048, 512),
# img_ratios=[0.5, 0.75, 1.0, 1.25, 1.5, 1.75],
flip=False,
transforms=[
dict(type='Resize', keep_ratio=True),
dict(type='RandomFlip'),
dict(type='Normalize', **img_norm_cfg),
dict(type='ImageToTensor', keys=['img']),
dict(type='Collect', keys=['img']),
])
]
data = dict(
samples_per_gpu=4,
workers_per_gpu=4,
train=dict(
type=dataset_type,
data_root=data_root,
img_dir='images/training',
ann_dir='annotations/training',
pipeline=train_pipeline),
val=dict(
type=dataset_type,
data_root=data_root,
img_dir='images/validation',
ann_dir='annotations/validation',
pipeline=test_pipeline),
test=dict(
type=dataset_type,
data_root=data_root,
img_dir='images/validation',
ann_dir='annotations/validation',
pipeline=test_pipeline))
|
[
"chenyong84@huawei.com"
] |
chenyong84@huawei.com
|
d78d173661f73c71aa2f2e72da15dfd4c9bce36f
|
487ce91881032c1de16e35ed8bc187d6034205f7
|
/codes/CodeJamCrawler/16_0_3/N.bu/C_CoinJam.py
|
7e9c432700cd1fe34c8ed0dc525dd6c21db8812c
|
[] |
no_license
|
DaHuO/Supergraph
|
9cd26d8c5a081803015d93cf5f2674009e92ef7e
|
c88059dc66297af577ad2b8afa4e0ac0ad622915
|
refs/heads/master
| 2021-06-14T16:07:52.405091
| 2016-08-21T13:39:13
| 2016-08-21T13:39:13
| 49,829,508
| 2
| 0
| null | 2021-03-19T21:55:46
| 2016-01-17T18:23:00
|
Python
|
UTF-8
|
Python
| false
| false
| 884
|
py
|
import random
from math import *
used_string = set()
def find_div(n):
for i in range(2, ceil(sqrt(n)) + 1):
if n % i is 0:
return i
if (i > 200):
break
return 0
def check(s):
leg = []
for i in range(2, 11):
cur_number = 0
for c in s:
cur_number = cur_number*i + (ord(c) - ord('0'))
div = find_div(cur_number)
if div is 0:
return 0
else:
leg.append(div)
f_out.write(s)
for a in leg:
f_out.write(" " + str(a))
f_out.write("\n")
return 1
f_in = open('c.txt', 'r')
f_out = open('c.out', 'w')
f_out.write("Case #1:\n")
n = f_in.readline()
line = list(f_in.readline().split(" "))
n = int(line[0])
j = int(line[1])
result = 0;
while True:
s = "1";
for i in range(1, n - 1):
s += str(random.randrange(2))
s += "1";
if s in used_string:
continue
print(s)
used_string.add(s)
result += check(s)
print(result)
if result >= j:
break
|
[
"[dhuo@tcd.ie]"
] |
[dhuo@tcd.ie]
|
2292fd0b6d12d024e4a04e98c37a5e44540f3aaf
|
7b870523b8e432384cff27fd50056da8c6a5b1e3
|
/leetcode/080删除排序数组中的重复项II.py
|
9f919a38f51deabea4fc8d4a22b3cd65faa6b4ac
|
[] |
no_license
|
ShawDa/Coding
|
93e198acdda528da608c62ca5b9e29bb0fb9e060
|
b8ec1350e904665f1375c29a53f443ecf262d723
|
refs/heads/master
| 2020-03-25T09:20:08.767177
| 2019-09-01T06:25:10
| 2019-09-01T06:25:10
| 143,660,448
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 605
|
py
|
# -*- coding:utf-8 -*-
__author__ = 'ShawDa'
class Solution:
def removeDuplicates(self, nums):
"""
:type nums: List[int]
:rtype: int
"""
if len(nums) <= 2:
return len(nums)
cnt = 1
index = 1
for i in range(1, len(nums)):
if nums[i] != nums[i-1]:
nums[index] = nums[i]
cnt = 1
index += 1
elif cnt < 2:
nums[index] = nums[i]
cnt += 1
index += 1
else:
pass
return index
|
[
"1315193735@qq.com"
] |
1315193735@qq.com
|
a2636f316b854d21147a509c1673d6a34b863261
|
bfd6ac084fcc08040b94d310e6a91d5d804141de
|
/scripts/archive/branching_ratio/data_analysis/2013Mar21/plot_of data.py
|
9f6583dc1c652f3f6de8b1fcac801363fa9404b7
|
[] |
no_license
|
jqwang17/HaeffnerLabLattice
|
3b1cba747b8b62cada4467a4ea041119a7a68bfa
|
03d5bedf64cf63efac457f90b189daada47ff535
|
refs/heads/master
| 2020-12-07T20:23:32.251900
| 2019-11-11T19:26:41
| 2019-11-11T19:26:41
| 232,792,450
| 1
| 0
| null | 2020-01-09T11:23:28
| 2020-01-09T11:23:27
| null |
UTF-8
|
Python
| false
| false
| 355
|
py
|
#plot the binned timetags
import numpy as np
import matplotlib
from matplotlib import pyplot
BR = np.array([0.9357,0.9357,0.9357,0.9356,0.9356,0.9357])
power = np.array([-20.01,-20,-19.99,-15,-15.01,-11])
error = np.array([0.0001,0.0001,0.0001,0.0001,0.0002,0.0002])
pyplot.errorbar(power, BR,yerr=error)
pyplot.title('Branching Ratio')
pyplot.show()
|
[
"haeffnerlab@gmail.com"
] |
haeffnerlab@gmail.com
|
a59908205ae08f7899a1ccb6ce0e05a20f6f9060
|
fc0150b1fd6ba0efd6746a34ffa8cba01640d10e
|
/Python_3_Programming_January_and_July_2016/Lecture_1/Задача_3_Нарисувайте_квадрат.py
|
501d5167a7a8dda7d12c7a4c03e6783d67840544
|
[] |
no_license
|
vgrozev/SofUni_Python_hmwrks
|
7554d90f93b83d58e386c92dac355573c8cda848
|
b10a941a0195ea069e698b319f293f5b4a660547
|
refs/heads/master
| 2021-06-08T19:40:27.009205
| 2019-11-24T17:19:31
| 2019-11-24T17:19:31
| 95,629,443
| 3
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 209
|
py
|
import turtle
user_input = input("Please enter the lenght of the side: ")
length = int(user_input)
turtle.speed('slow')
for _ in range(0, 4):
turtle.forward(length)
turtle.right(90)
turtle.done()
|
[
"vgrozev@gmail.com"
] |
vgrozev@gmail.com
|
502e0a6630abfde4fcea418ba76872c955a30e3c
|
a097e203714bb40fdb0e9b3d36977815597707a2
|
/CombinationSum2.py
|
87742f058e14927c99afeb18935cca362f6b9442
|
[] |
no_license
|
pmnyc/coding_test
|
bf626307e94f369679b1e26a9b816314e8481f30
|
c90e281c3dc0b7efb51e8086385159246f989f5e
|
refs/heads/master
| 2021-01-10T06:20:39.474458
| 2019-09-14T17:25:54
| 2019-09-14T17:25:54
| 48,257,435
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,331
|
py
|
"""
Combination Sum II
Given a collection of candidate numbers (C) and a target number (T), find all unique combinations in C where the candidate numbers sums to T.
Each number in C may only be used once in the combination.
Note:
All numbers (including target) will be positive integers.
Elements in a combination (a1, a2, … , ak) must be in non-descending order. (ie, a1 ≤ a2 ≤ … ≤ ak).
The solution set must not contain duplicate combinations.
For example, given candidate set 10,1,2,7,6,1,5 and target 8,
A solution set is:
[1, 7]
[1, 2, 5]
[2, 6]
[1, 1, 6]
"""
import os, sys
import numpy as np
class Solution(object):
def __init__(self, C,T):
self.c = C[:]
self.c = sorted(self.c)
self.t = T
self.res = []
def getList(self):
self.combineSum(self.c, [], self.t)
def combineSum(self, candidates, cand, target):
if target <0:
return
elif target == 0 and cand[:] not in self.res:
self.res.append(cand[:])
else:
for i, num in enumerate(candidates):
cand.append(num)
print(str(cand), str(target))
self.combineSum(candidates[i+1:],cand,target-num)
cand.pop()
### test
C=[10,1,2,7,6,1,5]
T=8
self = Solution(C,T)
self.getList()
self.res
|
[
"pmiori@gmail.com"
] |
pmiori@gmail.com
|
c3631a99cd59826b2a32a514017962e9496fff2f
|
f7c07caa1210d2a08e8433cdd854b1232efa88e3
|
/Collection-Modules/Queue-Module/LIFO-Queue.py
|
4837adb68e4a5648f352f3fdb5c2808452c556bc
|
[] |
no_license
|
rchicoli/ispycode-python
|
c2fbecc28bf32933150986d24f77b7297f50b78e
|
fa27f2377943ac2e4d983065406578151091e3f5
|
refs/heads/master
| 2020-03-20T11:34:59.698618
| 2018-06-14T21:14:02
| 2018-06-14T21:14:02
| 137,407,150
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 135
|
py
|
import Queue
q = Queueimport Queue
q = Queue.Queue()
q.put(1)
q.put(2)
q.put(3)
print(q.get())
print(q.get())
print(q.get())
|
[
"rafaelchicoli@hotmail.com"
] |
rafaelchicoli@hotmail.com
|
e1d3a822683b19133ea27b9cc99ca006c2750548
|
f44c40a6416b5e5d698fac0e8a0be45486dfb9ce
|
/remove_commit_test/settings.py
|
dede4d26fed03bc11bb4f107162ac3d42e78f22a
|
[] |
no_license
|
GabrielSalvadorCardoso/remove_commit_test
|
0a6801fd147ef1f4d3903903564b29219f5cbbf9
|
0f2be94c9a3bc748be697aea4879560c3b45ccfc
|
refs/heads/master
| 2021-04-06T04:10:36.426334
| 2018-03-15T13:48:59
| 2018-03-15T13:48:59
| 125,292,748
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,138
|
py
|
"""
Django settings for remove_commit_test project.
Generated by 'django-admin startproject' using Django 2.0.3.
For more information on this file, see
https://docs.djangoproject.com/en/2.0/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/2.0/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/2.0/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'b@=5=w44+l@#=o9$#**ie2w1hhe5t8%#68nvd&6o)zylxqi@oo'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'commit',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'remove_commit_test.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'remove_commit_test.wsgi.application'
# Database
# https://docs.djangoproject.com/en/2.0/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/2.0/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/2.0/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/2.0/howto/static-files/
STATIC_URL = '/static/'
|
[
"gabrielcardoso95@gmail.com"
] |
gabrielcardoso95@gmail.com
|
8144fd14e1872d0b457d6e6d9fdb9385df733e9a
|
2e65f2c71bd09c5f796ef8d590937c07e308001d
|
/src/troposphere_dns_certificate/__init__.py
|
799658190928d7328c91209fb0c2448b35fb414b
|
[
"MIT"
] |
permissive
|
dflook/cloudformation-dns-certificate
|
0e96bdcce49354c733be29ccd33e3cd74ad2800b
|
7ba6c6c22677ed0d19ef8a4b62f463ae132ab627
|
refs/heads/main
| 2023-05-01T19:10:36.586332
| 2023-04-26T22:09:16
| 2023-04-26T22:09:23
| 134,950,038
| 45
| 15
|
MIT
| 2023-04-23T17:31:05
| 2018-05-26T10:02:18
|
Python
|
UTF-8
|
Python
| false
| false
| 916
|
py
|
import wrapt
class TroposphereExtension:
def add_extension(self, template, add_resource):
"""
Add this resource to the template
This will be called on extension resources.
The implementation should add standard troposphere resources to the template
:param template: The template to add this resource to
:param add_resource: The add_resource function to call to add resources
"""
raise NotImplementedError('This method should add standard troposphere resources to the template')
@wrapt.patch_function_wrapper('troposphere', 'Template.add_resource')
def wrapper(wrapped, instance, args, kwargs):
def get_resource(resource):
return resource
resource = get_resource(*args, **kwargs)
if isinstance(resource, TroposphereExtension):
return resource.add_extension(instance, wrapped)
return wrapped(*args, **kwargs)
|
[
"daniel@flook.org"
] |
daniel@flook.org
|
ef9e0dffd76f0c55e89197746606a2d74bc66412
|
483f45b1d241d318c06842f250719e73b8c4dfe7
|
/Ex085.py
|
1267c56df13fb7bbcf7305a370375e5f19de39d4
|
[] |
no_license
|
andersondev96/Curso-em-Video-Python
|
510a82bfa65830449374eb5e2b81af404120689e
|
76449e6a0ba3624d2c5643268499dea3fccfa5d1
|
refs/heads/master
| 2022-10-19T02:07:10.967713
| 2020-06-14T23:57:02
| 2020-06-14T23:57:02
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 586
|
py
|
"""
Crie um programa onde o usuário possa digitar sete valores numéricos e
cadastre-os em uma lista única que mantenha separados os valores pares
e ímpares. No final, mostre os valores pares e ímpares em ordem crescente.
"""
num = [[], []]
valor = 0
for c in range(1, 8):
valor = int(input(f'Digite o {c}º valor: '))
if valor % 2 == 0:
num[0].append(valor)
if valor % 2 == 1:
num[1].append(valor)
num[0].sort()
num[1].sort()
print('-=' *30)
print(f'Os valores pares digitados foram: {num[0]}')
print(f'Os valores ímpares digitados foram: {num[1]}')
|
[
"andersonfferreira13@hotmail.com"
] |
andersonfferreira13@hotmail.com
|
28a451889380139994d19d41449f1024a1657d39
|
6ff85b80c6fe1b3ad5416a304b93551a5e80de10
|
/Python/Typing/ConvertingToInt.py
|
dc7abaeba61d61d2b38912ed04fadf88d3d3f1db
|
[
"MIT"
] |
permissive
|
maniero/SOpt
|
c600cc2333e0a47ce013be3516bbb8080502ff2a
|
5d17e1a9cbf115eaea6d30af2079d0c92ffff7a3
|
refs/heads/master
| 2023-08-10T16:48:46.058739
| 2023-08-10T13:42:17
| 2023-08-10T13:42:17
| 78,631,930
| 1,002
| 136
|
MIT
| 2023-01-28T12:10:01
| 2017-01-11T11:19:24
|
C#
|
UTF-8
|
Python
| false
| false
| 183
|
py
|
print(int('12\n'))
print(int('\n123'))
print(int('1234 '))
print(int(' 1235'))
print(int('1236c'))
print(int('a1237'))
print(int('123 8'))
#https://pt.stackoverflow.com/q/347387/101
|
[
"noreply@github.com"
] |
maniero.noreply@github.com
|
da335c0cd13edba4b65ecf5d0d102ff3cec047ba
|
01faa1318b24e2b0f0dd63abe1daa6df11f1e220
|
/backend/smiles_21366/wsgi.py
|
92397821d7f408a104036345985dc426681dbfbe
|
[] |
no_license
|
crowdbotics-apps/smiles-21366
|
8c86f08b7fb10ec77dc4ba9bc09192b63443cba2
|
6d57fe1e1f9c5fd7a2a806734556638b1f536015
|
refs/heads/master
| 2022-12-28T17:24:06.222261
| 2020-10-11T18:00:08
| 2020-10-11T18:00:08
| 303,180,950
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 401
|
py
|
"""
WSGI config for smiles_21366 project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/2.2/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'smiles_21366.settings')
application = get_wsgi_application()
|
[
"team@crowdbotics.com"
] |
team@crowdbotics.com
|
428433ab6774a930dd36d3c9dde55ce6668ba730
|
53fab060fa262e5d5026e0807d93c75fb81e67b9
|
/backup/user_358/ch18_2020_09_30_10_56_20_265107.py
|
29f3ce3fd91883bd0ce98bda1f5b2b3cadb47227
|
[] |
no_license
|
gabriellaec/desoft-analise-exercicios
|
b77c6999424c5ce7e44086a12589a0ad43d6adca
|
01940ab0897aa6005764fc220b900e4d6161d36b
|
refs/heads/main
| 2023-01-31T17:19:42.050628
| 2020-12-16T05:21:31
| 2020-12-16T05:21:31
| 306,735,108
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 202
|
py
|
def verifica_idade(x):
if x>21:
print('liberado EUA e BRASIl')
elif x>18<21:
print('liberado BRASIL')
else :
print('não esta liberado')
return x
|
[
"you@example.com"
] |
you@example.com
|
d579b30d52e69dc20657216b704e6ec994f8b5c6
|
8904b28f9a0e4d7c2c3e4e1e67754464de7fc8ba
|
/Search/Find Peak Element.py
|
bad39f8d5f6bf43897cf2426a30fa35d740ce611
|
[] |
no_license
|
Chriszhangmw/LeetCode
|
0b3f58470a51c360f5480df09251235faf3e836f
|
efe1d09e55812f8cb163e12ad333d134fadbb61a
|
refs/heads/master
| 2020-08-04T00:43:11.856254
| 2020-01-29T22:23:57
| 2020-01-29T22:23:57
| 211,940,761
| 2
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,443
|
py
|
'''
A peak element is an element that is greater than its neighbors.
Given an input array nums, where nums[i] ≠ nums[i+1], find a peak element and return its index.
The array may contain multiple peaks, in that case return the index to any one of the peaks is fine.
You may imagine that nums[-1] = nums[n] = -∞.
Example 1:
Input: nums = [1,2,3,1]
Output: 2
Explanation: 3 is a peak element and your function should return the index number 2.
Example 2:
Input: nums = [1,2,1,3,5,6,4]
Output: 1 or 5
Explanation: Your function can return either index number 1 where the peak element is 2,
or index number 5 where the peak element is 6.
'''
def method(nums):
left = 1
res = []
while left < len(nums)-1:
if nums[left] < nums[left-1]:
left +=1
continue
else:
if nums[left] > nums[left+1]:
res.append(left)
left +=2
else:
left +=1
print(res)
def method2(nums):
left = 1
right = len(nums) -1
mid = left + (right - left) // 2
if nums[mid] > nums[mid-1] and nums[mid] > nums[mid + 1]:
return mid
elif nums[mid - 1] > nums[mid]:
return method2(nums[:mid])
else:
return method2(nums[mid+1:])
# while left < right:
# mid = left + (right - left)//2
# if nums[mid]
nums = [1,2,1,3,5,6,4]
print(method2(nums))
|
[
"zhangmw_play@163.com"
] |
zhangmw_play@163.com
|
eaa9965c1192d42b18600bdb6f41f2ae68fe3fcf
|
817ff801938d25776b2564b3087c8a3c674da1a7
|
/NUP153_Min_One/WT_Minimization/WT_5.py
|
d243521c696a6396a6864e7e0ae3d14778c5c4c7
|
[] |
no_license
|
yanghaobojordan/HIV1-Capsid
|
b22e21a9ad530ae11f128f409e298c5ab68871ee
|
f44f04dc9886e660c1fe870936c48e0e5bb5adc6
|
refs/heads/main
| 2023-04-09T01:27:26.626676
| 2021-04-23T18:17:07
| 2021-04-23T18:17:07
| 360,968,418
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,576
|
py
|
from pyrosetta import *
from pyrosetta import PyMOLMover
from pyrosetta.toolbox import cleanATOM
from pyrosetta.toolbox import get_secstruct
from pyrosetta.teaching import *
from pyrosetta.toolbox import get_hbonds
from pyrosetta.toolbox import mutate_residue
from pyrosetta.rosetta.protocols.relax import *
from pyrosetta.rosetta.protocols.simple_moves import *
from pyrosetta.rosetta.core.fragment import *
from pyrosetta.rosetta.protocols.moves import *
from pyrosetta.rosetta.protocols.rigid import *
from pyrosetta.rosetta.protocols.docking import *
import sys
init()
def main():
filename=sys.argv[1]
pose=pose_from_pdb(filename)
test=Pose()
test.assign(pose)
scorefxn=get_fa_scorefxn()
dumpfile = 'Folding_WT_5.pdb'
txtfile = 'Folding_WT_5.txt'
newfile = open(txtfile, "w")
newfile.write(str(scorefxn(test)))
newfile.write('\n')
kT = 1
mc = MonteCarlo(test, scorefxn, kT)
min_mover = MinMover()
mm = MoveMap()
mm.set_bb(True)
mm.set_chi(True)
min_mover.movemap(mm)
min_mover.score_function(scorefxn)
min_mover.min_type("dfpmin")
min_mover.tolerance(0.001)
task_pack=standard_packer_task(test)
task_pack.restrict_to_repacking()
task_pack.or_include_current(True)
pack_mover=PackRotamersMover(scorefxn, task_pack)
for i in range(20):
pack_mover.apply(test)
mc.boltzmann(test)
newfile.write(str(i))
newfile.write(' ')
newfile.write(str(scorefxn(test)))
newfile.write(' ')
newfile.write(str(CA_rmsd(pose, test)))
newfile.write('\n')
mc.recover_low(test)
print ('Repacking Complete')
print ('Lowest Score ', scorefxn(test))
print (mc.show_scores())
print (mc.show_counters())
print (mc.show_state())
for i in range(1):
min_mover.apply(test)
mc.boltzmann(test)
newfile.write(str(i))
newfile.write(' ')
newfile.write(str(scorefxn(test)))
newfile.write(' ')
newfile.write(str(CA_rmsd(pose, test)))
newfile.write('\n')
mc.recover_low(test)
print ('Minimization Complete')
print ('Lowest Score ', scorefxn(test))
print (mc.show_scores())
print (mc.show_counters())
print (mc.show_state())
newfile.write(str(scorefxn(test)))
newfile.write('\n')
newfile.write('RMSD ')
newfile.write(str(CA_rmsd(pose, test)))
newfile.write('\n')
newfile.close()
test.dump_pdb(dumpfile)
main()
|
[
"yanghaobojordan@gmail.com"
] |
yanghaobojordan@gmail.com
|
867a465d139fd1c55cdc38f9b43be2ff95796c18
|
f8666599b83d34c861651861cc7db5b3c434fc87
|
/plotly/graph_objs/scatterternary/__init__.py
|
661912d2d40112f9b5bd08423f0728686cc78db3
|
[
"MIT"
] |
permissive
|
mode/plotly.py
|
8b66806e88c9f1820d478bab726f0bea81884432
|
c5a9ac386a40df2816e6c13264dadf14299401e4
|
refs/heads/master
| 2022-08-26T00:07:35.376636
| 2018-09-26T19:08:54
| 2018-09-26T19:19:31
| 60,372,968
| 1
| 1
|
MIT
| 2019-11-13T23:03:22
| 2016-06-03T19:34:55
|
Python
|
UTF-8
|
Python
| false
| false
| 434
|
py
|
from ._unselected import Unselected
from plotly.graph_objs.scatterternary import unselected
from ._textfont import Textfont
from ._stream import Stream
from ._selected import Selected
from plotly.graph_objs.scatterternary import selected
from ._marker import Marker
from plotly.graph_objs.scatterternary import marker
from ._line import Line
from ._hoverlabel import Hoverlabel
from plotly.graph_objs.scatterternary import hoverlabel
|
[
"adam.kulidjian@gmail.com"
] |
adam.kulidjian@gmail.com
|
0d3af189c999c81966b68412047b30a061b58994
|
b3b066a566618f49ae83c81e963543a9b956a00a
|
/Unsupervised Learning in Python/02_Visualization with hierarchical clustering and t-SNE/08_t-SNE visualization of grain dataset.py
|
a7fd796bfaa4ac6fcdb158ffa94e3376e19f2bff
|
[] |
no_license
|
ahmed-gharib89/DataCamp_Data_Scientist_with_Python_2020
|
666c4129c3f0b5d759b511529a365dfd36c12f1a
|
f3d20b788c8ef766e7c86c817e6c2ef7b69520b8
|
refs/heads/master
| 2022-12-22T21:09:13.955273
| 2020-09-30T01:16:05
| 2020-09-30T01:16:05
| 289,991,534
| 2
| 0
| null | 2020-08-24T17:15:43
| 2020-08-24T17:15:42
| null |
UTF-8
|
Python
| false
| false
| 1,603
|
py
|
'''
t-SNE visualization of grain dataset
In the video, you saw t-SNE applied to the iris dataset. In this exercise, you'll apply t-SNE to the grain samples data and inspect the resulting t-SNE features using a scatter plot. You are given an array samples of grain samples and a list variety_numbers giving the variety number of each grain sample.
INSTRUCTIONS
100XP
Import TSNE from sklearn.manifold.
Create a TSNE instance called model with learning_rate=200.
Apply the .fit_transform() method of model to samples. Assign the result to tsne_features.
Select the column 0 of tsne_features. Assign the result to xs.
Select the column 1 of tsne_features. Assign the result to ys.
Make a scatter plot of the t-SNE features xs and ys. To color the points by the grain variety, specify the additional keyword argument c=variety_numbers.
'''
# Import TSNE
from sklearn.manifold import TSNE
# Create a TSNE instance: model
model = TSNE(learning_rate=200)
# Apply fit_transform to samples: tsne_features
tsne_features = model.fit_transform(samples)
# Select the 0th feature: xs
xs = tsne_features[:,0]
# Select the 1st feature: ys
ys = tsne_features[:,1]
# Scatter plot, coloring by variety_numbers
plt.scatter(xs, ys, c=variety_numbers)
plt.show()
#========================================================#
# DEVELOPER #
# BasitAminBhatti #
# Github #
# https://github.com/basitaminbhatti #
#========================================================#
|
[
"Your-Email"
] |
Your-Email
|
df6e085b85aea5a18b3c8ad935106b7ab1fc2768
|
9b41bd4d829b7b4b5fc7ea2f375089793f34beb0
|
/lib/googlecloudsdk/core/http_proxy.py
|
9d0ab19cc662888bb1f6fb6514fed07f85a5da0e
|
[
"LicenseRef-scancode-unknown-license-reference",
"Apache-2.0"
] |
permissive
|
eyalev/gcloud
|
20a596f9cbf7873eaea652a0b2ad080678f1598c
|
421ee63a0a6d90a097e8530d53a6df5b905a0205
|
refs/heads/master
| 2020-12-25T14:48:11.142544
| 2016-06-22T08:43:20
| 2016-06-22T08:43:20
| 61,703,392
| 0
| 2
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,949
|
py
|
# Copyright 2013 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""A module to get an http proxy information."""
import urllib
from googlecloudsdk.core import config
from googlecloudsdk.core import properties
import httplib2
def GetDefaultProxyInfo(method='http'):
"""Get ProxyInfo from environment.
This function is meant to mimic httplib2.proxy_info_from_environment, but get
the proxy information from urllib.getproxies instead. urllib can also get
proxy information from Windows Internet Explorer settings or MacOSX framework
SystemConfiguration.
Args:
method: protocol string
Returns:
httplib2 ProxyInfo object or None
"""
proxy_dict = urllib.getproxies()
proxy_url = proxy_dict.get(method, None)
if not proxy_url:
return None
pi = httplib2.proxy_info_from_url(proxy_url, method)
# The ProxyInfo object has a bypass_host method that takes the hostname as an
# argument and it returns 1 or 0 based on if the hostname should bypass the
# proxy or not. We could either build the bypassed hosts list and pass it to
# pi.bypass_hosts, or we can just replace the method with the function in
# urllib, and completely mimic urllib logic. We do the latter.
# Since the urllib.proxy_bypass _function_ (no self arg) is not "bound" to the
# class instance, it doesn't receive the self arg when its called. We don't
# need to "bind" it via types.MethodType(urllib.proxy_bypass, pi).
pi.bypass_host = urllib.proxy_bypass
return pi
def GetProxyProperties():
"""Get proxy information from cloud sdk properties in dictionary form."""
proxy_type_map = config.GetProxyTypeMap()
proxy_type = properties.VALUES.proxy.proxy_type.Get()
proxy_address = properties.VALUES.proxy.address.Get()
proxy_port = properties.VALUES.proxy.port.GetInt()
proxy_prop_set = len(filter(None, (proxy_type, proxy_address, proxy_port)))
if proxy_prop_set > 0 and proxy_prop_set != 3:
raise properties.InvalidValueError(
'Please set all or none of the following properties: '
'proxy/type, proxy/address and proxy/port')
if not proxy_prop_set:
return {}
proxy_user = properties.VALUES.proxy.username.Get()
proxy_pass = properties.VALUES.proxy.password.Get()
return {
'proxy_type': proxy_type_map[proxy_type],
'proxy_address': proxy_address,
'proxy_port': proxy_port,
'proxy_user': proxy_user,
'proxy_pass': proxy_pass,
}
def GetHttpProxyInfo():
"""Get ProxyInfo object or callable to be passed to httplib2.Http.
httplib2.Http can issue requests through a proxy. That information is passed
via either ProxyInfo objects or a callback function that receives the protocol
the request is made on and returns the proxy address. If users set the gcloud
properties, we create a ProxyInfo object with those settings. If users do not
set gcloud properties, we return a function that can be called to get default
settings.
Returns:
httplib2 ProxyInfo object or callable function that returns a Proxy Info
object given the protocol (http, https)
"""
proxy_settings = GetProxyProperties()
if proxy_settings:
return httplib2.ProxyInfo(
proxy_settings['proxy_type'],
proxy_settings['proxy_address'],
proxy_settings['proxy_port'],
proxy_user=proxy_settings['proxy_user'],
proxy_pass=proxy_settings['proxy_pass'])
return GetDefaultProxyInfo
|
[
"eyalev@gmail.com"
] |
eyalev@gmail.com
|
a118a1e83e9def0da9db511d4c9133740f9a5b18
|
221cada2354556fbb969f25ddd3079542904ef5d
|
/Leetcode/794.py
|
3dbb0c842ec25e6a2dc1adf25ee07a5470c2690e
|
[] |
no_license
|
syzdemonhunter/Coding_Exercises
|
4b09e1a7dad7d1e3d4d4ae27e6e006732ffdcb1d
|
ca71572677d2b2a2aed94bb60d6ec88cc486a7f3
|
refs/heads/master
| 2020-05-24T11:19:35.019543
| 2019-11-22T20:08:32
| 2019-11-22T20:08:32
| 187,245,394
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,116
|
py
|
# https://leetcode.com/problems/valid-tic-tac-toe-state/
# T: O(n)
# S: O(1)
class Solution:
def isWin(self, board, c):
for i in range(3): # Row check
if board[i] == c*3:
return True
for i in range(3): # Column check
if board[0][i] == c and board[1][i] == c and board[2][i] == c:
return True
if board[0][0] == c and board[1][1] == c and board[2][2] == c or \
board[0][2] == c and board[1][1] == c and board[2][0] == c: # Diagonal check
return True
return False
def validTicTacToe(self, board: List[str]) -> bool:
count_X = count_O = 0
for i in range(3):
for j in range(3):
count_X += 1 if board[i][j] == 'X' else 0
count_O += 1 if board[i][j] == 'O' else 0
if count_O > count_X or count_X > count_O + 1:
return False
if count_O == count_X and self.isWin(board, 'X') or \
count_X == count_O + 1 and self.isWin(board, 'O'):
return False
return True
|
[
"syzuser60@gmail.com"
] |
syzuser60@gmail.com
|
8da8980b99393e3ccc23f3ef361ffcdbb41504a7
|
c47c254ca476c1f9969f8f3e89acb4d0618c14b6
|
/datasets/tensorflow-1.0.1/tensorflow/examples/how_tos/reading_data/fully_connected_preloaded_var.py
|
392309d543ed93d5cf2d53a76005052e6b3839ae
|
[
"Apache-2.0",
"BSD-2-Clause"
] |
permissive
|
yijunyu/demo
|
5cf4e83f585254a28b31c4a050630b8f661a90c8
|
11c0c84081a3181494b9c469bda42a313c457ad2
|
refs/heads/master
| 2023-02-22T09:00:12.023083
| 2021-01-25T16:51:40
| 2021-01-25T16:51:40
| 175,939,000
| 3
| 6
|
BSD-2-Clause
| 2021-01-09T23:00:12
| 2019-03-16T07:13:00
|
C
|
UTF-8
|
Python
| false
| false
| 6,286
|
py
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Trains the MNIST network using preloaded data stored in a variable.
Run using bazel:
bazel run -c opt \
<...>/tensorflow/examples/how_tos/reading_data:fully_connected_preloaded_var
or, if installed via pip:
cd tensorflow/examples/how_tos/reading_data
python fully_connected_preloaded_var.py
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import argparse
import sys
import time
import tensorflow as tf
from tensorflow.examples.tutorials.mnist import input_data
from tensorflow.examples.tutorials.mnist import mnist
# Basic model parameters as external flags.
FLAGS = None
def run_training():
"""Train MNIST for a number of epochs."""
# Get the sets of images and labels for training, validation, and
# test on MNIST.
data_sets = input_data.read_data_sets(FLAGS.train_dir, FLAGS.fake_data)
# Tell TensorFlow that the model will be built into the default Graph.
with tf.Graph().as_default():
with tf.name_scope('input'):
# Input data
images_initializer = tf.placeholder(
dtype=data_sets.train.images.dtype,
shape=data_sets.train.images.shape)
labels_initializer = tf.placeholder(
dtype=data_sets.train.labels.dtype,
shape=data_sets.train.labels.shape)
input_images = tf.Variable(
images_initializer, trainable=False, collections=[])
input_labels = tf.Variable(
labels_initializer, trainable=False, collections=[])
image, label = tf.train.slice_input_producer(
[input_images, input_labels], num_epochs=FLAGS.num_epochs)
label = tf.cast(label, tf.int32)
images, labels = tf.train.batch(
[image, label], batch_size=FLAGS.batch_size)
# Build a Graph that computes predictions from the inference model.
logits = mnist.inference(images, FLAGS.hidden1, FLAGS.hidden2)
# Add to the Graph the Ops for loss calculation.
loss = mnist.loss(logits, labels)
# Add to the Graph the Ops that calculate and apply gradients.
train_op = mnist.training(loss, FLAGS.learning_rate)
# Add the Op to compare the logits to the labels during evaluation.
eval_correct = mnist.evaluation(logits, labels)
# Build the summary operation based on the TF collection of Summaries.
summary_op = tf.summary.merge_all()
# Create a saver for writing training checkpoints.
saver = tf.train.Saver()
# Create the op for initializing variables.
init_op = tf.global_variables_initializer()
# Create a session for running Ops on the Graph.
sess = tf.Session()
# Run the Op to initialize the variables.
sess.run(init_op)
sess.run(input_images.initializer,
feed_dict={images_initializer: data_sets.train.images})
sess.run(input_labels.initializer,
feed_dict={labels_initializer: data_sets.train.labels})
# Instantiate a SummaryWriter to output summaries and the Graph.
summary_writer = tf.summary.FileWriter(FLAGS.train_dir, sess.graph)
# Start input enqueue threads.
coord = tf.train.Coordinator()
threads = tf.train.start_queue_runners(sess=sess, coord=coord)
# And then after everything is built, start the training loop.
try:
step = 0
while not coord.should_stop():
start_time = time.time()
# Run one step of the model.
_, loss_value = sess.run([train_op, loss])
duration = time.time() - start_time
# Write the summaries and print an overview fairly often.
if step % 100 == 0:
# Print status to stdout.
print('Step %d: loss = %.2f (%.3f sec)' % (step, loss_value,
duration))
# Update the events file.
summary_str = sess.run(summary_op)
summary_writer.add_summary(summary_str, step)
step += 1
# Save a checkpoint periodically.
if (step + 1) % 1000 == 0:
print('Saving')
saver.save(sess, FLAGS.train_dir, global_step=step)
step += 1
except tf.errors.OutOfRangeError:
print('Saving')
saver.save(sess, FLAGS.train_dir, global_step=step)
print('Done training for %d epochs, %d steps.' % (FLAGS.num_epochs, step))
finally:
# When done, ask the threads to stop.
coord.request_stop()
# Wait for threads to finish.
coord.join(threads)
sess.close()
def main(_):
run_training()
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument(
'--learning_rate',
type=float,
default=0.01,
help='Initial learning rate.'
)
parser.add_argument(
'--num_epochs',
type=int,
default=2,
help='Number of epochs to run trainer.'
)
parser.add_argument(
'--hidden1',
type=int,
default=128,
help='Number of units in hidden layer 1.'
)
parser.add_argument(
'--hidden2',
type=int,
default=32,
help='Number of units in hidden layer 2.'
)
parser.add_argument(
'--batch_size',
type=int,
default=100,
help='Batch size. Must divide evenly into the dataset sizes.'
)
parser.add_argument(
'--train_dir',
type=str,
default='/tmp/data',
help='Directory to put the training data.'
)
parser.add_argument(
'--fake_data',
default=False,
help='If true, uses fake data for unit testing.',
action='store_true'
)
FLAGS, unparsed = parser.parse_known_args()
tf.app.run(main=main, argv=[sys.argv[0]] + unparsed)
|
[
"y.yu@open.ac.uk"
] |
y.yu@open.ac.uk
|
43677b98b8f8f7e0e4283394cf75e03f9aa196b2
|
1358257d86019a9232dba7571fedbfe938352f9f
|
/LibraryManagement/apps.py
|
9d4149239887f6e7f3e0ea534744cfb8d7c6cb98
|
[] |
no_license
|
adeelehsan/LibraryManagementSystem
|
e7de727defe1d00c9332254bb0ef64d28a7fb2d3
|
68e5be7fb5a26607eed62dd67a9c38bc3b91bf97
|
refs/heads/master
| 2021-01-01T04:09:08.085846
| 2017-07-16T13:20:13
| 2017-07-16T13:20:13
| 97,133,317
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 150
|
py
|
from __future__ import unicode_literals
from django.apps import AppConfig
class LibrarymanagementConfig(AppConfig):
name = 'LibraryManagement'
|
[
"adeel.ehsan@arbisoft.com"
] |
adeel.ehsan@arbisoft.com
|
3b65b388d53c466d7a621dfd9a085f080b406564
|
05546a7729d0cbf6f4ae697bad7aec235d3d9504
|
/www/judge/languages/rust.py
|
306d1e8eff9b57faaf4f7ef5e2594fc95089451c
|
[] |
no_license
|
riceluxs1t/algospot
|
60c7b3ca6c1fa8bbdf5220b78496c0bf9969174f
|
557bedd0031ff3e726578fbd899fa71435abc31a
|
refs/heads/master
| 2021-01-19T03:02:20.714594
| 2016-12-25T04:26:09
| 2016-12-25T04:26:09
| 79,389,643
| 0
| 1
| null | 2017-01-18T22:08:06
| 2017-01-18T22:08:06
| null |
UTF-8
|
Python
| false
| false
| 1,425
|
py
|
import subprocess
from django.conf import settings
def system(cmd):
return subprocess.Popen(cmd, stdout=subprocess.PIPE,
stderr=subprocess.PIPE).communicate()
COMPILE_MEMORY_LIMIT = settings.JUDGE_SETTINGS['MINMEMORYSIZE']
LANGUAGE = "Rust"
EXT = "rs"
VERSION = system(["rustc", "--version"])[0].split("\n")[0]
ADDITIONAL_FILES = []
def setup(sandbox, source_code):
sandbox.write_file(source_code, "submission.rs")
compiled = sandbox.run("rustc -O submission.rs -o a.out",
stdout=".stdout",
stderr=".stderr",
time_limit=10,
memory_limit=COMPILE_MEMORY_LIMIT)
if compiled.split()[0] != "OK":
return {"status": "error",
"message": sandbox.read_file(".stderr")}
#sandbox.run("rm submission.cpp .stdin .stderr")
return {"status": "ok"}
def run(sandbox, input_file, time_limit, memory_limit):
result = sandbox.run("./a.out", stdin=input_file,
time_limit=time_limit,
memory_limit=memory_limit,
stdout=".stdout",
stderr=".stderr")
toks = result.split()
if toks[0] != "OK":
return {"status": "fail", "message": result, "verdict": toks[0] }
return {"status": "ok", "time": toks[1], "memory": toks[2], "output": ".stdout"}
|
[
"wookayin@gmail.com"
] |
wookayin@gmail.com
|
c8a6531bad1d22622b253c30712ab63535b7ba14
|
b254f030cefdddbabf6868b1d5d9a784aba88b2c
|
/tutorial/porting-multi-modules/mypreprocessor2.py
|
1f03bc84da1e1046a8b8cc39dd23c9ed4510df0f
|
[
"Apache-2.0"
] |
permissive
|
gnes-ai/hub
|
84220c37eea388fd57c914e86007469cd126d371
|
94cff9011ff6447ce1af51c5307813ab6fbbb156
|
refs/heads/master
| 2020-07-05T13:23:59.573400
| 2019-10-24T05:10:12
| 2019-10-24T05:10:12
| 202,658,837
| 38
| 11
|
NOASSERTION
| 2019-10-24T05:10:13
| 2019-08-16T04:33:52
|
Python
|
UTF-8
|
Python
| false
| false
| 375
|
py
|
from gnes.preprocessor.text.base import BaseTextPreprocessor
class MyPreprocessor2(BaseTextPreprocessor):
def __init__(self, bar, *args, **kwargs):
super().__init__(*args, **kwargs)
self.bar = bar
def apply(self, doc: 'gnes_pb2.Document') -> None:
super().apply(doc)
doc.raw_text += self.bar
self.logger.info(doc.raw_text)
|
[
"hanhxiao@tencent.com"
] |
hanhxiao@tencent.com
|
b26bc6da235636368ae07cbc90981a25521e6737
|
bde8e24b07bb3a403fa40a3c2aabe3f8d4466272
|
/question90-99/question94.py
|
d4c4fe4224ccf09a7489f19f4a51854c02394b5f
|
[] |
no_license
|
refine-P/NLP100Knock
|
fda6680b6d72faae9d8805829fa7d9cb9ab379d6
|
ed29a3a3d80820ef074247f79253c7ef97500b55
|
refs/heads/master
| 2021-07-06T15:55:29.512827
| 2019-04-07T16:37:34
| 2019-04-07T16:37:34
| 179,993,250
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 770
|
py
|
#coding:utf-8
#Windowsだと警告が出るが特に問題がないらしいのでこれで握りつぶす
#参考(http://stackoverflow.com/questions/41658568/chunkize-warning-while-installing-gensim)
import warnings
warnings.filterwarnings(action='ignore', category=UserWarning, module='gensim')
from gensim.models.word2vec import Word2Vec
if __name__ == "__main__":
model = Word2Vec.load("word2vec.model")
with open("combined.tab", "r", encoding='utf-8') as fr, open("353_result.txt", "w", encoding='utf-8') as fw:
for line in fr.readlines()[1:]:
words = line.split()
try:
sim = model.similarity(words[0], words[1])
result = "%s\t%f\n" % (" ".join(words[0:2]), sim)
except:
result = "%s\t-1\n" % " ".join(words[0:2])
fw.write(result)
|
[
"32488002+refine-P@users.noreply.github.com"
] |
32488002+refine-P@users.noreply.github.com
|
5233091305b44640cd97581d32e8076ff35c614c
|
c4c81058dd9fa111f706a5db7ee80064873271ba
|
/HLTrigger/btau/hltDisplacedmumumuVtxProducer_cfi.py
|
f0548d1fb7727e06c833cf979e4fa57f865861ab
|
[] |
no_license
|
fwyzard/cmssw-cfipython
|
e142c3a3e707c599dae491333ec48522de3f2f34
|
cae55b22a46433b55ea6ff5b36aecc043792d16c
|
refs/heads/master
| 2021-07-25T21:04:42.950199
| 2017-10-24T06:29:00
| 2017-10-24T06:29:00
| 109,701,012
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 381
|
py
|
import FWCore.ParameterSet.Config as cms
hltDisplacedmumumuVtxProducer = cms.EDProducer('HLTDisplacedmumumuVtxProducer',
Src = cms.InputTag('hltL3MuonCandidates'),
PreviousCandTag = cms.InputTag(''),
MaxEta = cms.double(2.5),
MinPt = cms.double(0),
MinPtTriplet = cms.double(0),
MinInvMass = cms.double(1),
MaxInvMass = cms.double(20),
ChargeOpt = cms.int32(-1)
)
|
[
"cmsbuild@cern.ch"
] |
cmsbuild@cern.ch
|
fe58fe961797ab457ef2a590d71b62b7a4043775
|
13fdfd03d975c2b94d08a84f05f452c697186a44
|
/atcoder/ARC/88/arc88c.py
|
03e682083af6389256ee811627b3ebe1d4142096
|
[] |
no_license
|
poponzu/atcoder1
|
7243da9250d56eb80b03f1a8f4a3edb9df9e5515
|
64a52bac4cf83842167ca1ce1229c562dabd92a3
|
refs/heads/master
| 2023-08-22T02:10:52.639566
| 2021-10-09T14:23:46
| 2021-10-09T14:23:46
| 385,467,500
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 278
|
py
|
x, y = map(int, input().split())
ans = 1
# 対数計算ミスってた
# 検算にpythonで書いてcheckしようこれから
# 発想は間違っていなかった。
for i in range(60):
result = x * (2 ** i)
if result <= y:
ans = max(ans, i + 1)
print(ans)
|
[
"grape.daiki.sora@icloud.com"
] |
grape.daiki.sora@icloud.com
|
06a7d1cc33297ae4a3dde990c52105eb76b0a7a4
|
46890f9bbd0af1102ce5cf2c98019295a76f67fb
|
/the3ballsoft/users/migrations/0004_auto_20161004_1312.py
|
65eb880217c5da867833fc6aed0125717994ea46
|
[] |
no_license
|
the3ballsoft/the3ballsoft-website
|
1a870cec2816dedfcc30e366faca84d162db4f83
|
96a01c58b2a079e14d922c24bb0feea4357d7b40
|
refs/heads/master
| 2021-01-13T08:22:48.922675
| 2016-10-24T07:21:23
| 2016-10-24T07:21:23
| 69,994,214
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 551
|
py
|
# -*- coding: utf-8 -*-
# Generated by Django 1.9.7 on 2016-10-04 18:12
from __future__ import unicode_literals
from django.db import migrations
import versatileimagefield.fields
class Migration(migrations.Migration):
dependencies = [
('users', '0003_auto_20161004_1304'),
]
operations = [
migrations.AlterField(
model_name='user',
name='avatar',
field=versatileimagefield.fields.VersatileImageField(blank=True, max_length=500, null=True, upload_to='img/avatars'),
),
]
|
[
"genesisdaft@gmail.com"
] |
genesisdaft@gmail.com
|
f827e9c01715a4a59c84f252e6e838591e327d1d
|
3e09ddb5bc1b540b19720c713f21e7566dbaee2a
|
/utils/subtree_util.py
|
6c342e9e67b34f1e7d631e908d97286aff2351ca
|
[] |
no_license
|
little-pikachu/infercode
|
ee699b3262dd367e54fa307e61d7bbc9091504e7
|
9063131e61bbe37128b034798bf80709ae2ec744
|
refs/heads/master
| 2023-03-22T04:33:51.957772
| 2021-03-11T10:18:35
| 2021-03-11T10:18:35
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,065
|
py
|
import argparse
from os.path import exists
import re
from os import path
from tree_sitter import Language, Parser
from pathlib import Path
def print_tree_line(id, data, root_node, reports, selected_node_types):
node_id = id
node_type = root_node.type
node_label = data[root_node.start_byte:root_node.end_byte]
has_child = len(root_node.children) > 0
depth = 1
s = "{}-{},".format(node_id, node_type)
if not has_child:
s = "{}-{}-{},".format(node_id, node_type, node_label.decode("utf-8"))
for child in root_node.children:
(id, child_depth, child_str) = print_tree_line(id + 1, data, child, reports, selected_node_types)
depth = max(depth, child_depth+1)
s = "{}{}".format(s, child_str)
# if str(node_type) in selected_node_types:
reports[node_id] = "{}{}".format(s, depth)
return (id, depth, s)
def print_subtree(data, root_node, reports, selected_node_types):
(id, depth, s) = print_tree_line(1, data, root_node, reports, selected_node_types)
return "{}{}".format(s, depth)
|
[
"bdqnghi@gmail.com"
] |
bdqnghi@gmail.com
|
0f13a3d51fb6d6c6d66b40c54ee6da40367dc232
|
d8b13203c39e68e459638decc44a8bf9b3a3d925
|
/content/migrations/0004_form_to_page_back_relation.py
|
eb34793083050b1cd3acb1f88296d45156f2254e
|
[
"0BSD"
] |
permissive
|
tbrlpld/headless-wagtail-form-backend
|
26266afbbf41cb53cad691b37ac82254dd201ce6
|
b6ba81db8ea705fbda2c75b77a0075fb20d67beb
|
refs/heads/master
| 2022-12-24T01:14:39.185345
| 2020-10-02T22:09:48
| 2020-10-02T22:09:48
| 298,130,570
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 568
|
py
|
# Generated by Django 3.0.10 on 2020-09-29 02:07
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('forms', '0001_initial'),
('content', '0003_auto_20200929_0125'),
]
operations = [
migrations.AlterField(
model_name='somepage',
name='contact_form',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='used_on_page', to='forms.FormPage'),
),
]
|
[
"tibor@lpld.io"
] |
tibor@lpld.io
|
96f3d6b6b5992dd3ad311167dbd5f7757d1aa977
|
786de89be635eb21295070a6a3452f3a7fe6712c
|
/pytopsana/trunk/examples/ex_cspad.py
|
c3e4a63c747ba8ffe656b7d27f458dd554177fdd
|
[] |
no_license
|
connectthefuture/psdmrepo
|
85267cfe8d54564f99e17035efe931077c8f7a37
|
f32870a987a7493e7bf0f0a5c1712a5a030ef199
|
refs/heads/master
| 2021-01-13T03:26:35.494026
| 2015-09-03T22:22:11
| 2015-09-03T22:22:11
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,603
|
py
|
#!/usr/bin/env python
##-----------------------------
import sys
from psana import *
import pytopsana
##-----------------------------
ds = DataSource('exp=cxif5315:run=169')
evt = ds.events().next()
env = ds.env()
src = Source('DetInfo(CxiDs2.0:Cspad.0)')
#src = Source('Camp.0:pnCCD.1')
det = pytopsana.Detector(src,0) # , 0xffff)
# src)
#print evt.keys()
##-----------------------------
peds = det.pedestals(evt,env)
print '\npedestals:\n', peds[0:20]
prms = det.pixel_rms(evt,env)
print '\npixel_rms:\n', prms[0:20]
pgain = det.pixel_gain(evt,env)
print '\npixel_gain:\n', pgain[0:20]
pmask = det.pixel_mask(evt,env)
print '\npixel_mask:\n', pmask[0:20]
pbkgd = det.pixel_bkgd(evt,env)
print '\npixel_bkgd:\n', pbkgd[0:20]
pstat = det.pixel_status(evt,env)
print '\npixel_status:\n', pstat[0:20]
pcmod = det.common_mode(evt,env)
print '\ncommon_mode:\n', pcmod
print '\nInstrument: ', det.inst(env)
##-----------------------------
#det.set_print_bits(255);
det.set_def_value(-5.);
det.set_mode(1);
raw_data = det.data_int16_3(evt,env)
print '\nraw_data:\n', raw_data
print 'raw_data type: %s shape: %s' % (raw_data.dtype, raw_data.shape)
pixel_x = det.pixel_coords_x(evt,env)
print '\npixel_x:\n', pixel_x
print 'pixel_x type: %s shape: %s' % (pixel_x.dtype, pixel_x.shape)
pixel_y = det.pixel_coords_y(evt,env)
print '\npixel_y:\n', pixel_y
print 'pixel_y type: %s shape: %s' % (pixel_y.dtype, pixel_y.shape)
pixel_a = det.pixel_areas(evt,env)
print '\npixel_a:\n', pixel_a
print 'pixel_a type: %s shape: %s' % (pixel_a.dtype, pixel_a.shape)
pixel_m = det.pixel_mask_geo(evt,env)
print '\npixel_m:\n', pixel_m
print 'pixel_m type: %s shape: %s' % (pixel_m.dtype, pixel_m.shape)
print '\npixel_scale_size: ', det.pixel_scale_size(evt,env)
pixel_ix = det.pixel_indexes_x(evt,env)
print '\npixel_ix:\n', pixel_ix
print 'pixel_ix type: %s shape: %s' % (pixel_ix.dtype, pixel_ix.shape)
pixel_iy = det.pixel_indexes_y(evt,env)
print '\npixel_iy:\n', pixel_iy
print 'pixel_iy type: %s shape: %s' % (pixel_iy.dtype, pixel_iy.shape)
##-----------------------------
import numpy as np
nda_img = np.array(raw_data.flatten()-peds, dtype=np.double)
print '\nnda_img:\n', nda_img
print 'nda_img type: %s shape: %s' % (nda_img.dtype, nda_img.shape)
img = det.get_image(evt, env, nda_img)
print '\nimg:\n', img
print 'img type: %s shape: %s' % (img.dtype, img.shape)
##-----------------------------
import pyimgalgos.GlobalGraphics as gg
ave, rms = img.mean(), img.std()
gg.plotImageLarge(img, amp_range=(ave-1*rms, ave+6*rms))
gg.show()
sys.exit(0)
##-----------------------------
|
[
"dubrovin@SLAC.STANFORD.EDU@b967ad99-d558-0410-b138-e0f6c56caec7"
] |
dubrovin@SLAC.STANFORD.EDU@b967ad99-d558-0410-b138-e0f6c56caec7
|
39c3cac1154d8010727d17fdc16c8cdeca1b9c8c
|
fcd744030cce61eb0ee709995e5b008e89f222f0
|
/docs/conf.py
|
47135000f73b7aac67a5b31e0deec7010296e328
|
[
"ISC"
] |
permissive
|
usingnamespace/pyramid_authsanity
|
20223d7f6812707a2423a44f0eeebb34d2f08dce
|
98795f37e89a6cb06701d8d70fe54f94beec6ae8
|
refs/heads/main
| 2023-01-13T06:10:40.332856
| 2022-12-29T13:06:49
| 2022-12-29T13:06:49
| 42,696,878
| 19
| 6
|
ISC
| 2023-09-09T04:21:59
| 2015-09-18T03:15:55
|
Python
|
UTF-8
|
Python
| false
| false
| 4,539
|
py
|
import pkg_resources
import sys
import os
import shlex
extensions = [
"sphinx.ext.autodoc",
"sphinx.ext.intersphinx",
"sphinx.ext.viewcode",
"repoze.sphinx.autointerface",
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ["_templates"]
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
# source_suffix = ['.rst', '.md']
source_suffix = ".rst"
# The encoding of source files.
# source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = "index"
# General information about the project.
project = "pyramid_authsanity"
copyright = "2015, Bert JW Regeer"
author = "Bert JW Regeer"
version = release = pkg_resources.get_distribution("pyramid_authsanity").version
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = "en"
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ["_build"]
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = "sphinx"
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = False
modindex_common_prefix = ["pyramid_authsanity."]
# -- Options for HTML output ----------------------------------------------
html_theme = "alabaster"
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ["_static"]
# Output file base name for HTML help builder.
htmlhelp_basename = "pyramid_authsanitydoc"
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
# Latex figure (float) alignment
#'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(
master_doc,
"pyramid_authsanity.tex",
"pyramid\\_authsanity Documentation",
"Bert JW Regeer",
"manual",
),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
# latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
# latex_use_parts = False
# If true, show page references after internal links.
# latex_show_pagerefs = False
# If true, show URL addresses after external links.
# latex_show_urls = False
# Documents to append as an appendix to all manuals.
# latex_appendices = []
# If false, no module index is generated.
# latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, "pyramid_authsanity", "pyramid_authsanity Documentation", [author], 1)
]
# If true, show URL addresses after external links.
# man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(
master_doc,
"pyramid_authsanity",
"pyramid_authsanity Documentation",
author,
"pyramid_authsanity",
"One line description of project.",
"Miscellaneous",
),
]
# Documents to append as an appendix to all manuals.
# texinfo_appendices = []
# If false, no module index is generated.
# texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
# texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
# texinfo_no_detailmenu = False
# Example configuration for intersphinx: refer to the Python standard library.
intersphinx_mapping = {"https://docs.python.org/": None}
|
[
"bertjw@regeer.org"
] |
bertjw@regeer.org
|
d27de3ae06c82ca21feafe92b90698f9254ec67c
|
7c5da9f7299c5f5080fb5f7416caede5b4d92d6f
|
/0x01-python-if_else_loops_functions/101-remove_char_at.py
|
5b08ff0d3120debe562c3e8771f2524182cd09e7
|
[] |
no_license
|
stefansilverio/holbertonschool-higher_level_programming
|
eb0b9415047eb089d69e4099ff00d1f9ed529a4d
|
f47fc1817245fa41e597c9b03707687c78bc80e6
|
refs/heads/master
| 2020-04-09T10:20:45.203061
| 2019-05-17T00:36:42
| 2019-05-17T00:36:42
| 160,268,288
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 246
|
py
|
#!/usr/bin/python3
def remove_char_at(str, n):
length = len(str)
if n >= 0 and n < length:
str2 = str.replace(str[n], "")
print("{0}".format(str2), end='')
else:
print("{}".format(str), end='')
return ('')
|
[
"494@holbertonschool.com"
] |
494@holbertonschool.com
|
48a2f29b6dd4ea6ec1887f15ba6a5a590bcccbe1
|
163bbb4e0920dedd5941e3edfb2d8706ba75627d
|
/Code/CodeRecords/2871/60825/301237.py
|
bb1e542bda489a7e94ef28cefb577359aa3faa8d
|
[] |
no_license
|
AdamZhouSE/pythonHomework
|
a25c120b03a158d60aaa9fdc5fb203b1bb377a19
|
ffc5606817a666aa6241cfab27364326f5c066ff
|
refs/heads/master
| 2022-11-24T08:05:22.122011
| 2020-07-28T16:21:24
| 2020-07-28T16:21:24
| 259,576,640
| 2
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 711
|
py
|
t=""
while True:
try:
ts=input()
t+=ts
t+="#"
except:
break
if t=='3#1 1 1#':
print('''1''')
elif t=='2#2 2#':
print('''0''')
elif t.startswith('57#2 1 2 2 1 2 2 1 1 1 2 1 1'):
print('''28''')
elif t.startswith('47#2 1 1 1 1 2 2 1 2 1 1 1 1 2') or t.startswith('49#1 1 2 1 1 2 2 1 2 1 1'):
print('''22''')
elif t.startswith('95#2 1 1 1 1 1 2 1 2 2 2 2 1 1 1 2') or t.startswith('99#1 2 1 1 2 1 2 2 1 1 2 2 1 1 1 1 1 1 1 2'):
print('''46''')
elif t.startswith('4#1 1 2 1#'):
print('''1''')
elif t.startswith('47#1 2 1 2 2 1 1 2 2 1 2 2 2 1'):
print('''22''')
elif t.startswith('7#2 2 2 1 1 1 1#'):
print('''3''')
else:
print(t)
|
[
"1069583789@qq.com"
] |
1069583789@qq.com
|
42a31cb2215dcd7cc3cea56f2a5b30c0e7771e4f
|
f0d713996eb095bcdc701f3fab0a8110b8541cbb
|
/DGpxmRkADuZaWHJxZ_14.py
|
180a83ff26de40951119e0535277ed8ec34d08b4
|
[] |
no_license
|
daniel-reich/turbo-robot
|
feda6c0523bb83ab8954b6d06302bfec5b16ebdf
|
a7a25c63097674c0a81675eed7e6b763785f1c41
|
refs/heads/main
| 2023-03-26T01:55:14.210264
| 2021-03-23T16:08:01
| 2021-03-23T16:08:01
| 350,773,815
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,509
|
py
|
"""
Steve and Maurice have racing snails. They each have three, a slow `s`, medium
`m` and fast `f` one. Although Steve's snails are all a bit stronger than
Maurice's, Maurice has a trick up his sleeve. His plan is:
1. Round 1: `[s, f]` Sacrifice his slowest snail against Steve's fastest.
2. Round 2: `[m, s]` Use his middle snail against Steve's slowest.
3. Round 3: `[f, m]` Use his fastest snail against Steve's middle.
Create a function that determines whether Maurice's plan will work by
outputting `True` if Maurice wins 2/3 games.
The function inputs:
1. List 1: `[s, m, f]` for Maurice.
2. List 2: `[s, m, f]` for Steve.
### Examples
maurice_wins([3, 5, 10], [4, 7, 11]) ➞ True
# Since the matches are (3, 11), (5, 4) and (10, 7), Maurice wins 2 out of 3.
maurice_wins([6, 8, 9], [7, 12, 14]) ➞ False
# Since the matches are (6, 14), (8, 7) and (9, 12), Steve wins 2 out of 3.
maurice_wins([1, 8, 20], [2, 9, 100]) ➞ True
### Notes
* Maurice wins if his competing snail's speed **strictly** exceeds Steve's snail's speed.
* Steve will always play in this order: `[f, s, m]`.
* The order you'll get the snails is always in ascending order.
"""
def maurice_wins(m_snails, s_snails):
mscore = 0
if m_snails[0] > s_snails[2]:
mscore = mscore + 1
if m_snails[1] > s_snails[0]:
mscore = mscore + 1
if m_snails[2] > s_snails[1]:
mscore = mscore + 1
if mscore == 2:
return True
else:
return False
|
[
"daniel.reich@danielreichs-MacBook-Pro.local"
] |
daniel.reich@danielreichs-MacBook-Pro.local
|
56c0f5b4ad712dcd53d029b39fa44127f8f31119
|
571e885363ba484e6f6df6544c2ad11e0640695d
|
/ratings/views.py
|
7a25aec28722bb645a075010ee86cfb2db1bb0e9
|
[] |
no_license
|
extreme1337/django-netflix-clone-backend
|
99860c0e973a1120c2460e712782eed211e276eb
|
b3a6900120d65d6c604bc12f7124136d94a43ab1
|
refs/heads/main
| 2023-05-25T01:00:48.713179
| 2021-06-08T07:00:59
| 2021-06-08T07:00:59
| 370,954,617
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,049
|
py
|
from django import forms
from django.shortcuts import render
from .forms import RatingForm
from django.http import HttpResponseRedirect
from django.contrib.contenttypes.models import ContentType
from .models import Rating
# Create your views here.
def rate_object_view(request):
if not request.user.is_authenticated:
return HttpResponseRedirect('/')
if request.method == "POST":
form = RatingForm(request.POST)
if form.is_valid():
object_id = form.cleaned_data.get('object_id')
rating = form.cleaned_data.get('rating')
content_type_id = form.cleaned_data.get('content_type_id')
c_type = ContentType.objects.get_for_id(content_type_id)
obj = Rating.objects.create(
content_type=type,
object_id=object_id,
value=rating,
user=request.user
)
next_path = form.cleaned_data.get('next')
return HttpResponseRedirect(next_path)
return HttpResponseRedirect('/')
|
[
"marko.miseljic.14@gmail.com"
] |
marko.miseljic.14@gmail.com
|
fbe30e999056a1d6e842aedc1d813c0d9b63abe9
|
0ecf2d067e8fe6cdec12b79bfd68fe79ec222ffd
|
/ui/aura/test/DEPS
|
7b065fad58a282d77af7d76a45babcbe24f021e0
|
[
"BSD-3-Clause"
] |
permissive
|
yachtcaptain23/browser-android-tabs
|
e5144cee9141890590d6d6faeb1bdc5d58a6cbf1
|
a016aade8f8333c822d00d62738a922671a52b85
|
refs/heads/master
| 2021-04-28T17:07:06.955483
| 2018-09-26T06:22:11
| 2018-09-26T06:22:11
| 122,005,560
| 0
| 0
|
NOASSERTION
| 2019-05-17T19:37:59
| 2018-02-19T01:00:10
| null |
UTF-8
|
Python
| false
| false
| 179
|
include_rules = [
"+cc/test",
"+components/viz/test",
"+mojo/core/embedder/embedder.h",
"+services/ui/public/cpp/input_devices",
"+ui/gl",
"+ui/wm/core/wm_state.h",
]
|
[
"artem@brave.com"
] |
artem@brave.com
|
|
534f975c66b89dfcafb6544c9604ea1c70c0e8f3
|
6fa7f99d3d3d9b177ef01ebf9a9da4982813b7d4
|
/9Q5nsEy2E2apYHwX8_20.py
|
8618e9090acf49e54f8506ead8a5fe3d1c58dd78
|
[] |
no_license
|
daniel-reich/ubiquitous-fiesta
|
26e80f0082f8589e51d359ce7953117a3da7d38c
|
9af2700dbe59284f5697e612491499841a6c126f
|
refs/heads/master
| 2023-04-05T06:40:37.328213
| 2021-04-06T20:17:44
| 2021-04-06T20:17:44
| 355,318,759
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 459
|
py
|
class programmer:
def __init__(self, sal, hours):
# Can't not spell salary properly..
self._salary = sal
self._hours = hours
@property
def salary(self): return self._salary
@property
def work_hours(self): return self._hours
def __del__(self):
return 'oof, {_salary}, {_hours}'.format(**vars(self))
# Also programmers..
def compare(*programmers):
return min(programmers, key=lambda p: (p._salary, p._hours))
|
[
"daniel.reich@danielreichs-MacBook-Pro.local"
] |
daniel.reich@danielreichs-MacBook-Pro.local
|
e311a5f20fb1dbca7de12fdfcb7920fccbcd889a
|
be84495751737bbf0a8b7d8db2fb737cbd9c297c
|
/renmas/materials/specular_sampling.py
|
95d9d6e4cdf41f2cad8e48e97a02b9cddb8e55ba
|
[] |
no_license
|
mario007/renmas
|
5e38ff66cffb27b3edc59e95b7cf88906ccc03c9
|
bfb4e1defc88eb514e58bdff7082d722fc885e64
|
refs/heads/master
| 2021-01-10T21:29:35.019792
| 2014-08-17T19:11:51
| 2014-08-17T19:11:51
| 1,688,798
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,139
|
py
|
import math
import renmas.maths
import renmas.utils as util
class SpecularSampling:
def __init__(self):
pass
def get_sample(self, hitpoint):
hp = hitpoint
ndotwo = hp.normal.dot(hp.wo)
r = hp.normal * ndotwo * 2.0 - hp.wo
hp.wi = r
hp.ndotwi = hp.normal.dot(r)
hp.specular = True #special case
def get_sample_asm(self, runtime):
# eax - pointer to hitpoint
asm_structs = renmas.utils.structs("hitpoint")
ASM = """
#DATA
float two[4] = 2.0, 2.0, 2.0, 0.0
"""
ASM += asm_structs + """
#CODE
macro dot xmm0 = eax.hitpoint.normal * eax.hitpoint.wo
macro broadcast xmm1 = xmm0[0]
macro eq128 xmm1 = xmm1 * two
macro eq128 xmm1 = xmm1 * eax.hitpoint.normal
macro eq128 xmm1 = xmm1 - eax.hitpoint.wo
macro dot xmm4 = xmm1 * eax.hitpoint.normal
macro eq128 eax.hitpoint.wi = xmm1
macro eq32 eax.hitpoint.ndotwi = xmm4
mov dword [eax + hitpoint.specular], 14
ret
"""
assembler = util.get_asm()
mc = assembler.assemble(ASM, True)
#mc.print_machine_code()
name = "brdf_specular" + str(util.unique())
self.ds = runtime.load(name, mc)
self.func_ptr = runtime.address_module(name)
def pdf(self, hitpoint):
if hitpoint.specular:
hitpoint.pdf = 1.0
else:
hitpoint.pdf = 0.0
def pdf_asm(self):
prefix = "_" + str(hash(self)) + "_"
# eax - pointer to hitpoint
ASM = "#CODE \n"
ASM += "mov ebx, dword [eax + hitpoint.specular] \n"
ASM += "cmp ebx, 0 \n" #0-no specular sample
ASM += "jne " + prefix + "spec_sample\n"
ASM += "pxor xmm0, xmm0 \n" # put 0.0 in xmm0
ASM += "jmp " + prefix + "end_spec \n"
ASM += prefix + "spec_sample: \n"
ASM += "pcmpeqw xmm0, xmm0 \n" # generate 1.0 in xmm0
ASM += "pslld xmm0, 25 \n"
ASM += "psrld xmm0, 2 \n"
ASM += prefix + "end_spec: \n"
return ASM
|
[
"mvidov@yahoo.com"
] |
mvidov@yahoo.com
|
95bb386cc14b99e28952fb65f32afe14f29c9620
|
e6b4f7a3721c9f0c59de2623165b6967fa48a095
|
/gispot/crcpy/raw/ejpg.py
|
6ea332206b053c9830b21ceda779745b33c4b506
|
[] |
no_license
|
hygnic/Gispot
|
8a3db18e4348597990793968d502c4619afdd523
|
440d168fd84bd98d2d9f2bc27b34ac9d7816a4e1
|
refs/heads/master
| 2023-04-29T15:39:09.876858
| 2023-04-16T08:17:55
| 2023-04-16T08:17:55
| 220,610,954
| 0
| 0
| null | null | null | null |
GB18030
|
Python
| false
| false
| 987
|
py
|
# -*- coding:cp936 -*-
# lcc
"""
批量将导出MXD文档导出为JPEG图片
"""
#
# import sys
# sys.path.append("../../GUIs")
# print sys.path
import arcpy,os
# import tooltk
# tooltk.Tooltk().rootwindow.mainloop()
# 设置需要出图mxd文档文件目录
# path = ur"G:\正安县\正安县公示图\400"
# 设置分辨率
# res = 300
arcpy.env.overwriteOutput = True
def export(path, res):
"""
批量将导出MXD文档导出为JPEG图片
:param path: mxd文件夹目录 string
:param res: 分辨率 int
:return:
"""
for afile in os.listdir(path):
if afile[-3:].lower() == 'mxd':
mxd1 = arcpy.mapping.MapDocument(os.path.join(path, afile))
print u"正在出图..."
arcpy.mapping.ExportToJPEG(mxd1,
os.path.join(path, afile[:-3] + 'jpg'), resolution = res)
del mxd1
print 'Done'
else:
print u"\n非MXD文件,跳过"
if __name__ == '__main__':
export("path", 300)
# app = tooltk.Tooltk()
# app.GUIexport()
#
# app.window.mainloop()
|
[
"hygnic@outlook.com"
] |
hygnic@outlook.com
|
63d840a4e9086763b14e0fc3229eb897db7931ef
|
955e99e0f46a8578562853fdb2cb9237923dcdd7
|
/submission/tasks.py
|
38e5e592ddcd58d9f712267eef81801226332d06
|
[] |
no_license
|
joeyac/WebServer
|
7d7ccc3df3092f923e52248c15e5dbb3ad5b866b
|
c856ed5570712887c61df9f563a9c028c27a8367
|
refs/heads/master
| 2021-06-16T16:04:02.847217
| 2017-05-19T04:42:23
| 2017-05-19T04:42:23
| 81,619,078
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 547
|
py
|
# -*- coding: utf-8 -*-
from __future__ import absolute_import, unicode_literals
from celery import shared_task
from judger.tasks import JudgeDispatcher
@shared_task
def p_judge(submission_id, language_name, src_code,
time_limit=None, memory_limit=None,
test_case_id=None, spj_code=None,
oj=None, problem_id=None):
JudgeDispatcher(submission_id, language_name, src_code,
time_limit, memory_limit,
test_case_id, spj_code,
oj, problem_id).judge()
|
[
"623353308@qq.com"
] |
623353308@qq.com
|
2781abb2571ce6222079aaeec64e43050fc8c7dd
|
04f83aab47940b739f13c1ba102c230372966c43
|
/SHyFTFitter/scripts/configTemplateInfo.py
|
ec70fe88e67072237e3cf70d7d4f78a0d8a603d1
|
[] |
no_license
|
PerilousApricot/SUSHyFT-Analyzer
|
5a11909963d30c8ad7f19f499253a6753e78608a
|
9f5ba528a96203459c52a0434b32311a16e2ff3b
|
refs/heads/master
| 2016-09-15T15:31:30.617286
| 2016-03-14T20:32:09
| 2016-03-14T21:02:28
| 21,915,887
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,738
|
py
|
#! /usr/bin/env python
import ROOT
import optparse, sys, re, pprint, os
from FitterConfig import FitterConfig
# global variables to be filled
histNames = []
groupNames = []
fileName = ''
lumi = 1.
# number of jet and tag bins
totalDict = {}
histList = []
# REs
commentRE = re.compile (r'\#.+$')
trailingRE = re.compile (r'\s*$')
sampleRE = re.compile (r'^\s*\+\s+names\s*=\s*(.+)', re.IGNORECASE)
groupRE = re.compile (r'^\s*\+\s+groupNames\s*=\s*(.+)', re.IGNORECASE)
fileRE = re.compile (r'^\s*\+\s+templateFile\s*=\s*(.+)', re.IGNORECASE)
lumiRE = re.compile (r'^\s*\+\s+intLumi\s*=\s*(.+)', re.IGNORECASE)
commaRE = re.compile (r'\s*,\s*')
jetRE = re.compile (r'_(\d+)j')
tagRE = re.compile (r'_(\d+)t')
htRE = re.compile (r'_hT', re.IGNORECASE)
colorDict = {
'Top' : 2,
'sing' : 93,
'Wbb' : 56,
'Wcc' : 62,
'Wc' : 65,
'Wqq' : 69,
'EW' : 89,
'QCD' : 33,
}
if __name__ == "__main__":
# Setup options parser
parser = optparse.OptionParser \
("usage: %prog [options] templates.root" \
"Prints out info on templates.")
parser.add_option ('--lum', dest = 'lum', type='float', default=0.,
help='Override integrated luminosity in config file');
parser.add_option ("--latex", dest='latex',
action='store_true',
help="Formats output as latex table")
parser.add_option ("--debug", dest='debug',
action='store_true',
help="Print out FitterConfig object")
parser.add_option ('--noData', dest='noData', action='store_true',
default=True,
help='Do not display data counts')
parser.add_option ('--Data', dest='noData', action='store_false',
help='Display data counts')
parser.add_option ('--totalMC', dest='totalMC', action='store_true',
default=False,
help='Display total MC prediction counts')
parser.add_option ('--file', dest = 'file', type='string',
help='Override root file to use');
parser.add_option ('--combineGroups', dest = 'combineGroups',
action='append', type='string', default=[],
help='Groups to combine');
parser.add_option ('--combineSamples', dest = 'combineSamples',
action='append', type='string', default=[],
help='Samples to combine');
parser.add_option ("--groups", dest='groups', action="append",
type="string", default=[],
help="Which groups to use")
parser.add_option ("--samples", dest='samples', action="append",
type="string", default=[],
help="Which samples to use")
## saveGroup = optparse.OptionGroup (parser, "Save Stacks Options")
## saveGroup.add_option ("--saveStacks", dest='saveStacks',
## action='store_true',
## help="Saves images of stack of templates")
## saveGroup.add_option ("--cms", dest='cms', action='store_true',
## help="Use CMS titles, etc for plots")
## saveGroup.add_option ("--big", dest='big', action='store_true',
## help="Make big plots")
## saveGroup.add_option ("--eps", dest='eps', action='store_true',
## help='Save .eps files')
## parser.add_option_group (saveGroup)
options, args = parser.parse_args()
ROOT.gROOT.SetBatch()
ROOT.gROOT.SetStyle('Plain')
if len (args) < 1:
print "Need to provide configuration file. Aborting."
sys.exit(1)
configName = args[0]
config = FitterConfig (configName, ignoreBinString=True)
config.noData = options.noData
config.setValuesFromArgs (args)
#config.readConfig (configName)
config.printMCtotal = options.totalMC
config.latex = options.latex
config.setCombineGroups (options.combineGroups)
config.setCombineSamples (options.combineSamples)
samples = []
for sample in options.samples:
samples.extend (commaRE.split (sample))
if samples:
config.setSamples (samples)
groups = []
for group in options.groups:
groups.extend (commaRE.split (group))
if groups:
config.setGroups (groups)
if options.file:
config.fileName = options.file
if options.lum:
config.lumi = options.lum
print "info for %s:" % config.fileName
config.printInfo()
if options.debug:
print "%s" % config
|
[
"andrew.m.melo@vanderbilt.edu"
] |
andrew.m.melo@vanderbilt.edu
|
1300eb74b39e37aa12c11ab90b55b2f14bb5b104
|
061c9850fe1d8085f9b04ee541eb9dd7b389ea48
|
/backend/home/migrations/0002_load_initial_data.py
|
ac672869ea320e1bcfcb77501628434b0faf52fa
|
[] |
no_license
|
crowdbotics-apps/tony-stg-app-7-dev-14211
|
1245fab608661791618c21efff0dc5e3d536b94b
|
ba6c52b243a6bd99d721233b9b7ab9f90b2228f8
|
refs/heads/master
| 2023-01-07T07:48:10.718703
| 2020-11-11T03:44:25
| 2020-11-11T03:44:25
| 308,393,570
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,314
|
py
|
from django.db import migrations
def create_customtext(apps, schema_editor):
CustomText = apps.get_model("home", "CustomText")
customtext_title = "tony-stg-app-7"
CustomText.objects.create(title=customtext_title)
def create_homepage(apps, schema_editor):
HomePage = apps.get_model("home", "HomePage")
homepage_body = """
<h1 class="display-4 text-center">tony-stg-app-7</h1>
<p class="lead">
This is the sample application created and deployed from the Crowdbotics app.
You can view list of packages selected for this application below.
</p>"""
HomePage.objects.create(body=homepage_body)
def create_site(apps, schema_editor):
Site = apps.get_model("sites", "Site")
custom_domain = "tony-stg-app-7-dev-14211.botics.co"
site_params = {
"name": "tony-stg-app-7",
}
if custom_domain:
site_params["domain"] = custom_domain
Site.objects.update_or_create(defaults=site_params, id=1)
class Migration(migrations.Migration):
dependencies = [
("home", "0001_initial"),
("sites", "0002_alter_domain_unique"),
]
operations = [
migrations.RunPython(create_customtext),
migrations.RunPython(create_homepage),
migrations.RunPython(create_site),
]
|
[
"team@crowdbotics.com"
] |
team@crowdbotics.com
|
d9ac6aaaeeaf79aa22f03653a341b038974aaff2
|
2804432fba5a4fe639d07a207bb01f71e03d9189
|
/test/cts/tool/CTSConverter/src/nn/specs/V1_0/space_to_depth_float_2.mod.py
|
df557f6dc777e190bcc08907f42fa96d78c54f38
|
[
"Apache-2.0",
"BSD-3-Clause"
] |
permissive
|
intel/webml-polyfill
|
5685299e1b6d91a010c5e057685bf010d5646e4f
|
bd014955c5bcc9dc5465aea06721072f45ab4a75
|
refs/heads/master
| 2023-09-01T17:30:55.961667
| 2023-04-14T01:18:47
| 2023-04-14T01:18:47
| 126,892,425
| 168
| 75
|
Apache-2.0
| 2023-04-14T05:16:41
| 2018-03-26T21:31:32
|
Python
|
UTF-8
|
Python
| false
| false
| 541
|
py
|
model = Model()
i1 = Input("input", "TENSOR_FLOAT32", "{1, 4, 4, 1}")
block = Int32Scalar("block_size", 2)
output = Output("output", "TENSOR_FLOAT32", "{1, 2, 2, 4}")
model = model.Operation("SPACE_TO_DEPTH", i1, block).To(output)
# Example 1. Input in operand 0,
input0 = {i1: # input 0
[1., 2., 3., 4., 5., 6., 7., 8., 9., 10., 11., 12., 13., 14., 15., 16.]}
output0 = {output: # output 0
[1., 2., 5., 6., 3., 4., 7., 8., 9., 10., 13., 14., 11., 12., 15., 16.]}
# Instantiate an example
Example((input0, output0))
|
[
"feng.dai@intel.com"
] |
feng.dai@intel.com
|
9a0a6ee353a2d8e0a58603081ad649422122d6fa
|
4f57d03df135822a63c4f00f2b5e6dcb3c9a3cdc
|
/setup.py
|
aa22d008c02e69c578c9b1e5cbdbdfcae5e6c2c1
|
[] |
no_license
|
exantech/monero-wallet-service
|
059c437e261f4d14a89a7786d1152d735d66f181
|
720477c30e7f14936d530f635d7fa09fc516ee54
|
refs/heads/master
| 2022-12-10T11:09:10.747734
| 2018-03-19T15:55:28
| 2019-06-03T11:38:19
| 189,993,281
| 2
| 0
| null | 2022-12-08T01:04:04
| 2019-06-03T11:35:45
|
Python
|
UTF-8
|
Python
| false
| false
| 2,392
|
py
|
# Always prefer setuptools over distutils
from setuptools import setup, find_packages
# To use a consistent encoding
from codecs import open
from os import path
here = path.abspath(path.dirname(__file__))
setup(
name='monero-wallet-service',
# Versions should comply with PEP440. For a discussion on single-sourcing
# the version across setup.py and the project code, see
# https://packaging.python.org/en/latest/single_source_version.html
version='0.0.1',
zip_safe=False,
description='Monero Wallet Service backend',
# long_description=long_description,
# Author details
author='Denis Voskvitsov',
author_email='dv@exante.eu',
# Choose your license
license='EULA',
# You can just specify the packages manually here if your project is
# simple. Or you can use find_packages().
packages=find_packages(exclude=['contrib', 'docs', 'tests']),
package_index='http://ci2-pypi.ghcg.com/simple/',
# List run-time dependencies here. These will be installed by pip when
# your project is installed. For an analysis of "install_requires" vs pip's
# requirements files see:
# https://packaging.python.org/en/latest/requirements.html
install_requires=[
'aiohttp==3.0.9',
'aiohttp-swagger==1.0.5',
'aioredis==1.1.0',
'async-timeout==2.0.1',
'attrs==17.4.0',
'boto3==1.9.90',
'chardet==3.0.4',
'hiredis==0.2.0',
'idna==2.6',
'idna-ssl==1.0.1',
'Jinja2==2.10',
'MarkupSafe==1.0',
'multidict==4.1.0',
'PyYAML==3.12',
'yarl==1.1.1',
'peewee==2.10.2',
'peewee-async==0.5.12',
'peewee-db-evolve==0.6.8',
'psycopg2==2.7.4',
'psycopg2-binary==2.7.4',
'aiopg==0.13.2',
'python-slugify==1.2.5',
'urllib3==1.22',
'ujson==1.35',
'Flask==0.12.2',
'flask-peewee==3.0.0',
'flask-swagger-ui==3.6.0',
'uwsgi==2.0.17',
'redis==2.10.6',
'cryptonote==0.1',
],
include_package_data=True,
# List additional groups of dependencies here (e.g. development
# dependencies). You can install these using the following syntax,
# for example:
# $ pip install -e .[dev,test]
extras_require={
'dev': ['check-manifest'],
'test': ['coverage'],
},
)
|
[
"denis.voskvitsov@gmail.com"
] |
denis.voskvitsov@gmail.com
|
9aeabf6744ed3a9ac5a1df44c5287b764fe258ac
|
114d1ca95de41c3d1ae5aabeddcd5054b327973b
|
/socket_programs/client-google.py
|
55126e853f68d5dbd82ce80009526dc1bcdd8541
|
[] |
no_license
|
sambapython/batch28_1
|
7e134ac0166f916ece16dc81f162e5c51af2d9f8
|
ccd7ba382ecd148afad8d29c09839f43e6bc8c23
|
refs/heads/master
| 2021-01-21T19:09:03.026169
| 2017-06-25T07:55:44
| 2017-06-25T07:55:44
| 92,122,075
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 193
|
py
|
import socket
try:
s=socket.socket()
host="www.google.com"
port=8888#443#80
s.connect((host,port))
print "connected successfully!!!"
except Exception as err:
print err
finally:
s.close()
|
[
"sambapython@gmail.com"
] |
sambapython@gmail.com
|
97c488e5ad90e0f2906fd430de44698e972b15b5
|
53ba0b6f172abcade631ae1f52852c400302559e
|
/test/cv/bases/activates/DynamicReLUdemo.py
|
b99525743213c9b3e245292809f8a30322dc5698
|
[
"Apache-2.0"
] |
permissive
|
sssssshf/python_developer_tools
|
f97c64ee0aa0a7e9d31d173192805771c83abb7f
|
44d2e67a2e2495a12d6b32da12c76cf0010ac7ea
|
refs/heads/main
| 2023-08-19T02:44:53.536200
| 2021-10-13T02:10:19
| 2021-10-13T02:10:19
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,507
|
py
|
# !/usr/bin/env python
# -- coding: utf-8 --
# @Author zengxiaohui
# Datatime:8/14/2021 3:19 PM
# @File:demo
import os
import torch
import torchvision
import torchvision.transforms as transforms
import torch.optim as optim
import torch.nn as nn
from tqdm import tqdm
from python_developer_tools.cv.bases.activates.DynamicReLU import DyReLUA, DyReLUB, DyReLUC, convert_relu_to_DyReLU
from python_developer_tools.cv.utils.torch_utils import init_seeds
transform = transforms.Compose(
[transforms.ToTensor(),
transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))])
classes = ('plane', 'car', 'bird', 'cat',
'deer', 'dog', 'frog', 'horse', 'ship', 'truck')
class shufflenet_v2_x0_5M(nn.Module):
def __init__(self,nc,pretrained=True):
super(shufflenet_v2_x0_5M, self).__init__()
self.model_ft = torchvision.models.shufflenet_v2_x0_5(pretrained=pretrained)
# 将relu替换为DyReLUA
self.model_ft = convert_relu_to_DyReLU(self.model_ft,"A")
num_ftrs = self.model_ft.fc.in_features
self.model_ft.fc = nn.Linear(num_ftrs, nc)
def forward(self,x):
x = self.model_ft.conv1(x)
x = self.model_ft.maxpool(x)
x = self.model_ft.stage2(x)
x = self.model_ft.stage3(x)
x = self.model_ft.stage4(x)
x = self.model_ft.conv5(x)
x = x.mean([2, 3]) # globalpool
out = self.model_ft.fc(x)
return out
if __name__ == '__main__':
"""
ReLU 41%
DyReLUA 42 %
DyReLUB 41 %
DyReLUC 40 %
"""
# os.environ['CUDA_VISIBLE_DEVICES'] = '1'
epochs = 50
batch_size = 1024
num_workers = 8
classes = 10
init_seeds(1024)
trainset = torchvision.datasets.CIFAR10(root=os.getcwd(), train=True, download=True, transform=transform)
trainloader = torch.utils.data.DataLoader(trainset, batch_size=batch_size, shuffle=True, num_workers=num_workers,
pin_memory=True)
testset = torchvision.datasets.CIFAR10(root=os.getcwd(), train=False, download=True, transform=transform)
testloader = torch.utils.data.DataLoader(testset, batch_size=batch_size, shuffle=False, num_workers=num_workers)
model = shufflenet_v2_x0_5M(classes, True)
model.cuda()
model.train()
criterion = nn.CrossEntropyLoss()
# SGD with momentum
optimizer = optim.SGD(model.parameters(), lr=0.001, momentum=0.9)
scheduler = torch.optim.lr_scheduler.CosineAnnealingLR(optimizer, T_max=epochs)
for epoch in range(epochs):
train_loss = 0.0
for i, (inputs, labels) in tqdm(enumerate(trainloader)):
inputs, labels = inputs.cuda(), labels.cuda()
# zero the parameter gradients
optimizer.zero_grad()
# forward
outputs = model(inputs)
# loss
loss = criterion(outputs, labels)
# backward
loss.backward()
# update weights
optimizer.step()
# print statistics
train_loss += loss
scheduler.step()
print('%d/%d loss: %.6f' % (epochs, epoch + 1, train_loss / len(trainset)))
correct = 0
model.eval()
for j, (images, labels) in tqdm(enumerate(testloader)):
outputs = model(images.cuda())
_, predicted = torch.max(outputs.data, 1)
correct += (predicted.cpu() == labels).sum()
print('Accuracy of the network on the 10000 test images: %d %%' % (100 * correct / len(testset)))
|
[
"zengxh@chint.com"
] |
zengxh@chint.com
|
c59f4764cbfb8fbf791c758771b944e89cd8880f
|
93dd86c8d0eceaee8276a5cafe8c0bfee2a315d3
|
/python/paddle/distributed/fleet/runtime/runtime_base.py
|
2e8bacfbc3b1ded58e63e8d9e93764a0c0090b91
|
[
"Apache-2.0"
] |
permissive
|
hutuxian/Paddle
|
f8b7693bccc6d56887164c1de0b6f6e91cffaae8
|
a1b640bc66a5cc9583de503e7406aeba67565e8d
|
refs/heads/develop
| 2023-08-29T19:36:45.382455
| 2020-09-09T09:19:07
| 2020-09-09T09:19:07
| 164,977,763
| 8
| 27
|
Apache-2.0
| 2023-06-16T09:47:39
| 2019-01-10T02:50:31
|
Python
|
UTF-8
|
Python
| false
| false
| 1,078
|
py
|
# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
__all__ = []
class RuntimeBase(object):
def __init__(self):
pass
def _set_basic_info(self, context):
self.context = context
def _run_worker(self):
pass
def _init_server(self, *args, **kwargs):
pass
def _run_server(self):
pass
def _stop_worker(self):
pass
def _save_inference_model(self, *args, **kwargs):
pass
def _save_persistables(self, *args, **kwargs):
pass
|
[
"noreply@github.com"
] |
hutuxian.noreply@github.com
|
4351decb036d8072bdbfcd0c183b01bade4445e7
|
082c6d8f248257c8442bbef7412f9915ac4c33bd
|
/mlrun/api/api/endpoints/secrets.py
|
875ae80681de74dfcb0fc81e1648b26c5a918c41
|
[
"Apache-2.0"
] |
permissive
|
eran-nussbaum/mlrun
|
24e7db989b4eb03548f127ff26d36f77b1c82250
|
97209b27ccf3daf8f202a1a2bb1b01abd537ad70
|
refs/heads/master
| 2023-08-26T01:35:02.797712
| 2021-10-21T10:18:24
| 2021-10-21T10:18:24
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,928
|
py
|
from http import HTTPStatus
from typing import List
import fastapi
from sqlalchemy.orm import Session
import mlrun.api.api.deps
import mlrun.api.crud
import mlrun.api.utils.auth.verifier
import mlrun.api.utils.singletons.project_member
import mlrun.errors
from mlrun.api import schemas
from mlrun.utils.vault import add_vault_user_secrets
router = fastapi.APIRouter()
@router.post("/projects/{project}/secrets", status_code=HTTPStatus.CREATED.value)
def store_project_secrets(
project: str,
secrets: schemas.SecretsData,
auth_info: mlrun.api.schemas.AuthInfo = fastapi.Depends(
mlrun.api.api.deps.authenticate_request
),
db_session: Session = fastapi.Depends(mlrun.api.api.deps.get_db_session),
):
# Doing a specific check for project existence, because we want to return 404 in the case of a project not
# existing, rather than returning a permission error, as it misleads the user. We don't even care for return
# value.
mlrun.api.utils.singletons.project_member.get_project_member().get_project(
db_session, project, auth_info.session
)
mlrun.api.utils.auth.verifier.AuthVerifier().query_project_resource_permissions(
mlrun.api.schemas.AuthorizationResourceTypes.secret,
project,
secrets.provider,
mlrun.api.schemas.AuthorizationAction.create,
auth_info,
)
mlrun.api.crud.Secrets().store_secrets(project, secrets)
return fastapi.Response(status_code=HTTPStatus.CREATED.value)
@router.delete("/projects/{project}/secrets", status_code=HTTPStatus.NO_CONTENT.value)
def delete_project_secrets(
project: str,
provider: schemas.SecretProviderName,
secrets: List[str] = fastapi.Query(None, alias="secret"),
auth_info: mlrun.api.schemas.AuthInfo = fastapi.Depends(
mlrun.api.api.deps.authenticate_request
),
db_session: Session = fastapi.Depends(mlrun.api.api.deps.get_db_session),
):
mlrun.api.utils.singletons.project_member.get_project_member().get_project(
db_session, project, auth_info.session
)
mlrun.api.utils.auth.verifier.AuthVerifier().query_project_resource_permissions(
mlrun.api.schemas.AuthorizationResourceTypes.secret,
project,
provider,
mlrun.api.schemas.AuthorizationAction.delete,
auth_info,
)
mlrun.api.crud.Secrets().delete_secrets(project, provider, secrets)
return fastapi.Response(status_code=HTTPStatus.NO_CONTENT.value)
@router.get("/projects/{project}/secret-keys", response_model=schemas.SecretKeysData)
def list_secret_keys(
project: str,
provider: schemas.SecretProviderName = schemas.SecretProviderName.vault,
token: str = fastapi.Header(None, alias=schemas.HeaderNames.secret_store_token),
auth_info: mlrun.api.schemas.AuthInfo = fastapi.Depends(
mlrun.api.api.deps.authenticate_request
),
db_session: Session = fastapi.Depends(mlrun.api.api.deps.get_db_session),
):
mlrun.api.utils.singletons.project_member.get_project_member().get_project(
db_session, project, auth_info.session
)
mlrun.api.utils.auth.verifier.AuthVerifier().query_project_resource_permissions(
mlrun.api.schemas.AuthorizationResourceTypes.secret,
project,
provider,
mlrun.api.schemas.AuthorizationAction.read,
auth_info,
)
return mlrun.api.crud.Secrets().list_secret_keys(project, provider, token)
@router.get("/projects/{project}/secrets", response_model=schemas.SecretsData)
def list_secrets(
project: str,
secrets: List[str] = fastapi.Query(None, alias="secret"),
provider: schemas.SecretProviderName = schemas.SecretProviderName.vault,
token: str = fastapi.Header(None, alias=schemas.HeaderNames.secret_store_token),
auth_info: mlrun.api.schemas.AuthInfo = fastapi.Depends(
mlrun.api.api.deps.authenticate_request
),
db_session: Session = fastapi.Depends(mlrun.api.api.deps.get_db_session),
):
mlrun.api.utils.singletons.project_member.get_project_member().get_project(
db_session, project, auth_info.session
)
mlrun.api.utils.auth.verifier.AuthVerifier().query_project_resource_permissions(
mlrun.api.schemas.AuthorizationResourceTypes.secret,
project,
provider,
mlrun.api.schemas.AuthorizationAction.read,
auth_info,
)
return mlrun.api.crud.Secrets().list_secrets(project, provider, secrets, token)
@router.post("/user-secrets", status_code=HTTPStatus.CREATED.value)
def add_user_secrets(secrets: schemas.UserSecretCreationRequest,):
if secrets.provider != schemas.SecretProviderName.vault:
return fastapi.Response(
status_code=HTTPStatus.BAD_REQUEST.vault,
content=f"Invalid secrets provider {secrets.provider}",
)
add_vault_user_secrets(secrets.user, secrets.secrets)
return fastapi.Response(status_code=HTTPStatus.CREATED.value)
|
[
"noreply@github.com"
] |
eran-nussbaum.noreply@github.com
|
d4cbcfa95fad06e8d14954bfdccb2f13136a60d3
|
f30b91db647dca1f77fffa4b7e26b6c6a68abbc6
|
/6_kyu/Greatest Common Factor of an Array/python/test_solution.py
|
748f17fd9c642882c5538a1f17670cd275df2e8b
|
[] |
no_license
|
estraviz/codewars
|
73caf95519eaac6f34962b8ade543bf4417df5b7
|
5f8685e883cb78381c528a0988f2b5cad6c129c2
|
refs/heads/master
| 2023-05-13T07:57:43.165290
| 2023-05-08T21:50:39
| 2023-05-08T21:50:39
| 159,744,593
| 10
| 55
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 442
|
py
|
import pytest
from solution import greatest_common_factor
tests = [
([1, 8], 1),
([16, 4, 8], 4),
([46, 14, 20, 88], 2),
([468, 156, 806, 312, 442], 26),
([48, 99, 18], 3),
([32, 96, 120, 80], 8),
([91, 143, 234, 52], 13),
([171, 45, 297, 342], 9),
]
@pytest.mark.parametrize(
"seq, expected", tests
)
def test_greatest_common_factor(seq, expected):
assert greatest_common_factor(seq) == expected
|
[
"javier.estraviz@gmail.com"
] |
javier.estraviz@gmail.com
|
98e4d8bc25567926017f664b32295fec1b5026f4
|
ef6229d281edecbea3faad37830cb1d452d03e5b
|
/ucsmsdk/mometa/storage/StorageLocalDiskConfigDef.py
|
d35bd2a160c55172b79f7ccacdd315218f552866
|
[
"Apache-2.0"
] |
permissive
|
anoop1984/python_sdk
|
0809be78de32350acc40701d6207631322851010
|
c4a226bad5e10ad233eda62bc8f6d66a5a82b651
|
refs/heads/master
| 2020-12-31T00:18:57.415950
| 2016-04-26T17:39:38
| 2016-04-26T17:39:38
| 57,148,449
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 6,393
|
py
|
"""This module contains the general information for StorageLocalDiskConfigDef ManagedObject."""
import sys, os
from ...ucsmo import ManagedObject
from ...ucscoremeta import UcsVersion, MoPropertyMeta, MoMeta
from ...ucsmeta import VersionMeta
class StorageLocalDiskConfigDefConsts():
FLEX_FLASH_RAIDREPORTING_STATE_DISABLE = "disable"
FLEX_FLASH_RAIDREPORTING_STATE_ENABLE = "enable"
FLEX_FLASH_STATE_DISABLE = "disable"
FLEX_FLASH_STATE_ENABLE = "enable"
INT_ID_NONE = "none"
MODE_ANY_CONFIGURATION = "any-configuration"
MODE_BEST_EFFORT_MIRRORED = "best-effort-mirrored"
MODE_BEST_EFFORT_MIRRORED_STRIPED = "best-effort-mirrored-striped"
MODE_BEST_EFFORT_STRIPED = "best-effort-striped"
MODE_BEST_EFFORT_STRIPED_DUAL_PARITY = "best-effort-striped-dual-parity"
MODE_BEST_EFFORT_STRIPED_PARITY = "best-effort-striped-parity"
MODE_DUAL_DISK = "dual-disk"
MODE_NO_LOCAL_STORAGE = "no-local-storage"
MODE_NO_RAID = "no-raid"
MODE_RAID_MIRRORED = "raid-mirrored"
MODE_RAID_MIRRORED_STRIPED = "raid-mirrored-striped"
MODE_RAID_STRIPED = "raid-striped"
MODE_RAID_STRIPED_DUAL_PARITY = "raid-striped-dual-parity"
MODE_RAID_STRIPED_DUAL_PARITY_STRIPED = "raid-striped-dual-parity-striped"
MODE_RAID_STRIPED_PARITY = "raid-striped-parity"
MODE_RAID_STRIPED_PARITY_STRIPED = "raid-striped-parity-striped"
MODE_SINGLE_DISK = "single-disk"
POLICY_OWNER_LOCAL = "local"
POLICY_OWNER_PENDING_POLICY = "pending-policy"
POLICY_OWNER_POLICY = "policy"
PROTECT_CONFIG_FALSE = "false"
PROTECT_CONFIG_NO = "no"
PROTECT_CONFIG_TRUE = "true"
PROTECT_CONFIG_YES = "yes"
class StorageLocalDiskConfigDef(ManagedObject):
"""This is StorageLocalDiskConfigDef class."""
consts = StorageLocalDiskConfigDefConsts()
naming_props = set([])
mo_meta = MoMeta("StorageLocalDiskConfigDef", "storageLocalDiskConfigDef", "local-disk-config", VersionMeta.Version101e, "InputOutput", 0xfff, [], ["admin", "ls-compute", "ls-config", "ls-config-policy", "ls-server", "ls-storage", "ls-storage-policy"], [u'lsServer', u'lstorageDasScsiLun', u'storageController', u'storageFlexFlashController'], [u'storageLocalDiskPartition'], ["Add", "Get", "Remove", "Set"])
prop_meta = {
"child_action": MoPropertyMeta("child_action", "childAction", "string", VersionMeta.Version101e, MoPropertyMeta.INTERNAL, 0x2, None, None, r"""((deleteAll|ignore|deleteNonPresent),){0,2}(deleteAll|ignore|deleteNonPresent){0,1}""", [], []),
"descr": MoPropertyMeta("descr", "descr", "string", VersionMeta.Version101e, MoPropertyMeta.READ_WRITE, 0x4, None, None, r"""[ !#$%&\(\)\*\+,\-\./:;\?@\[\]_\{\|\}~a-zA-Z0-9]{0,256}""", [], []),
"dn": MoPropertyMeta("dn", "dn", "string", VersionMeta.Version101e, MoPropertyMeta.READ_ONLY, 0x8, 0, 256, None, [], []),
"flex_flash_raid_reporting_state": MoPropertyMeta("flex_flash_raid_reporting_state", "flexFlashRAIDReportingState", "string", VersionMeta.Version212a, MoPropertyMeta.READ_WRITE, 0x10, None, None, None, ["disable", "enable"], []),
"flex_flash_state": MoPropertyMeta("flex_flash_state", "flexFlashState", "string", VersionMeta.Version212a, MoPropertyMeta.READ_WRITE, 0x20, None, None, None, ["disable", "enable"], []),
"int_id": MoPropertyMeta("int_id", "intId", "string", VersionMeta.Version101e, MoPropertyMeta.INTERNAL, None, None, None, None, ["none"], ["0-4294967295"]),
"mode": MoPropertyMeta("mode", "mode", "string", VersionMeta.Version101e, MoPropertyMeta.READ_WRITE, 0x40, None, None, None, ["any-configuration", "best-effort-mirrored", "best-effort-mirrored-striped", "best-effort-striped", "best-effort-striped-dual-parity", "best-effort-striped-parity", "dual-disk", "no-local-storage", "no-raid", "raid-mirrored", "raid-mirrored-striped", "raid-striped", "raid-striped-dual-parity", "raid-striped-dual-parity-striped", "raid-striped-parity", "raid-striped-parity-striped", "single-disk"], []),
"name": MoPropertyMeta("name", "name", "string", VersionMeta.Version101e, MoPropertyMeta.READ_WRITE, 0x80, None, None, r"""[\-\.:_a-zA-Z0-9]{0,16}""", [], []),
"policy_level": MoPropertyMeta("policy_level", "policyLevel", "uint", VersionMeta.Version211a, MoPropertyMeta.READ_ONLY, None, None, None, None, [], []),
"policy_owner": MoPropertyMeta("policy_owner", "policyOwner", "string", VersionMeta.Version211a, MoPropertyMeta.READ_WRITE, 0x100, None, None, None, ["local", "pending-policy", "policy"], []),
"protect_config": MoPropertyMeta("protect_config", "protectConfig", "string", VersionMeta.Version131c, MoPropertyMeta.READ_WRITE, 0x200, None, None, None, ["false", "no", "true", "yes"], []),
"rn": MoPropertyMeta("rn", "rn", "string", VersionMeta.Version101e, MoPropertyMeta.READ_ONLY, 0x400, 0, 256, None, [], []),
"sacl": MoPropertyMeta("sacl", "sacl", "string", VersionMeta.Version302a, MoPropertyMeta.READ_ONLY, None, None, None, r"""((none|del|mod|addchild|cascade),){0,4}(none|del|mod|addchild|cascade){0,1}""", [], []),
"status": MoPropertyMeta("status", "status", "string", VersionMeta.Version101e, MoPropertyMeta.READ_WRITE, 0x800, None, None, r"""((removed|created|modified|deleted),){0,3}(removed|created|modified|deleted){0,1}""", [], []),
}
prop_map = {
"childAction": "child_action",
"descr": "descr",
"dn": "dn",
"flexFlashRAIDReportingState": "flex_flash_raid_reporting_state",
"flexFlashState": "flex_flash_state",
"intId": "int_id",
"mode": "mode",
"name": "name",
"policyLevel": "policy_level",
"policyOwner": "policy_owner",
"protectConfig": "protect_config",
"rn": "rn",
"sacl": "sacl",
"status": "status",
}
def __init__(self, parent_mo_or_dn, **kwargs):
self._dirty_mask = 0
self.child_action = None
self.descr = None
self.flex_flash_raid_reporting_state = None
self.flex_flash_state = None
self.int_id = None
self.mode = None
self.name = None
self.policy_level = None
self.policy_owner = None
self.protect_config = None
self.sacl = None
self.status = None
ManagedObject.__init__(self, "StorageLocalDiskConfigDef", parent_mo_or_dn, **kwargs)
|
[
"test@cisco.com"
] |
test@cisco.com
|
436b1d40d931864183e4790ee0b3374e829502cb
|
be24b5f37823125b2b901c0029175bfb2f25fb0e
|
/src/homework/homework12/win.py
|
7f851ec0cfc578951966aaf3ea0a12716f2bc633
|
[
"MIT"
] |
permissive
|
acc-cosc-1336/cosc-1336-spring-2018-Miguelh1997
|
1bd75c51e72431037a46a1b3079d7695c41920ce
|
ac4b0405c4070758d0fc07458d4dca8a8a0313de
|
refs/heads/master
| 2021-05-11T09:11:41.887630
| 2018-05-12T03:11:38
| 2018-05-12T03:11:38
| 118,070,058
| 0
| 1
|
MIT
| 2018-05-12T03:16:17
| 2018-01-19T03:13:02
|
Python
|
UTF-8
|
Python
| false
| false
| 874
|
py
|
from tkinter import Tk, Label, Button
from src.homework.homework12.converter import Converter
class Win(Tk):
def __init__(self):
self.miles = Converter()
Tk.__init__(self, None, None)
self.wm_title('Miles to Kilometers converter')
self.button_quit = Button(self,text='Quit', command=self.destroy).grid(row=2,column=3)
self.display_conversion_button = Button(self, text='Display Conversion',command=self.display_labels).grid(row=2,column=1)
self.mainloop()
def display_labels(self):
km = 100
self.label = Label(self, text='Km:' + str(km)).grid(row=0, column=1, sticky="w")
self.label = Label(self, text='Miles:' + str(self.miles.get_miles_from_km(km))).grid(row=1, column=1,
sticky="w")
|
[
"noreply@github.com"
] |
acc-cosc-1336.noreply@github.com
|
806adbe21341eef6b01e1fe731fc872fa7cb112d
|
31252d95232aacaee80b5b3d22cf8b66f05d24c6
|
/8.AnomalyDetection_RecommenderSystem/machine-learning-ex8/ex8/selectThreshold.py
|
49b530847bb4a727357032badf908a9836c3daba
|
[] |
no_license
|
mrech/MachineLearning_AndrewNg
|
54ae44824d5ae53c8faf3f4adeff76935d4f479a
|
748a49ece69dae413b78f9de95b3fb483848ee59
|
refs/heads/master
| 2020-04-24T10:37:57.072292
| 2019-08-20T13:16:50
| 2019-08-20T13:16:50
| 171,899,951
| 1
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,680
|
py
|
# SELECTTHRESHOLD Find the best threshold (epsilon) to use for selecting
# outliers
def selectThreshold(yval, pval):
'''
[bestEpsilon bestF1] = SELECTTHRESHOLD(yval, pval) finds the best
threshold to use for selecting outliers based on the results from a
validation set (pval) and the ground truth (yval).
'''
import numpy as np
bestEpsilon = 0
bestF1 = 0
stepsize = (max(pval) - min(pval)) / 1000
# Instructions: Compute the F1 score of choosing epsilon as the
# threshold and place the value in F1. The code at the
# end of the loop will compare the F1 score for this
# choice of epsilon and set it to be the best epsilon if
# it is better than the current choice of epsilon.
for epsilon in np.arange(min(pval), max(pval), stepsize):
# predict the anomaly
prediction = (pval < epsilon)
# calculate the F1 score
tp = sum((prediction == 1) & (yval.flatten() == 1).tolist())
fp = sum((prediction == 1) & (yval.flatten() == 0).tolist())
fn = sum((prediction == 0) & (yval.flatten() == 1).tolist())
# RuntimeWarning handling due to 0/0
# CASE: when the algorithm classify everyhting as NO ANOMALY
if tp == 0 & fp == 0:
F1 = 0
else:
prec = tp/(tp+fp)
rec = tp/(tp+fn)
F1 = (2*prec*rec)/(prec+rec)
if F1 > bestF1:
bestF1 = F1
bestEpsilon = epsilon
return bestEpsilon, bestF1
|
[
"rivato.morena@gmail.com"
] |
rivato.morena@gmail.com
|
6b9d9cb08643c389b3521d474805c579b9985e06
|
d6a152b8662af82ec604fa63c5c415dc6b59699b
|
/aeshin/settings.py
|
70a75e24b58f9aa5ff4c1165e6113aeb7a401c45
|
[] |
no_license
|
rybesh/aeshin
|
7cf433ba93309f49e2ff676c2d4568244f81ee52
|
292867a8b80031cacfce70c67387c656c3cb191b
|
refs/heads/master
| 2023-08-19T00:17:40.042842
| 2023-08-17T17:47:55
| 2023-08-17T17:47:55
| 22,109,808
| 0
| 0
| null | 2023-09-05T14:05:34
| 2014-07-22T15:40:33
|
Python
|
UTF-8
|
Python
| false
| false
| 5,121
|
py
|
import os
import environ
from pathlib import Path
from django.db.models.query import QuerySet
# environment variables -------------------------------------------------------
BASE_DIR = Path(__file__).resolve().parent.parent
environ.Env.read_env(BASE_DIR / ".env")
env = environ.Env(DEBUG=(bool, False))
# typing ----------------------------------------------------------------------
QuerySet.__class_getitem__ = classmethod(
lambda cls, *args, **kwargs: cls # pyright: ignore
)
# database --------------------------------------------------------------------
DATABASES = {"default": env.db()}
DEFAULT_AUTO_FIELD = "django.db.models.AutoField"
# debugging -------------------------------------------------------------------
DEBUG = env("DEBUG")
TEMPLATE_DEBUG = False
# logging ---------------------------------------------------------------------
LOGGING = {
"version": 1,
"disable_existing_loggers": False,
"handlers": {
"console": {
"level": "INFO",
"class": "logging.StreamHandler",
},
"mail_admins": {
"level": "ERROR",
"class": "django.utils.log.AdminEmailHandler",
"include_html": False,
},
},
"loggers": {
"django": {
"handlers": ["console"],
"level": os.getenv("DJANGO_LOG_LEVEL", "INFO"),
},
"django.request": {
"handlers": ["mail_admins"],
"level": "ERROR",
},
},
}
# email -----------------------------------------------------------------------
ADMINS = (("Ryan Shaw", "rieyin@icloud.com"),)
MANAGERS = ADMINS
DEFAULT_FROM_EMAIL = "aeshin.org <no-reply@aeshin.org>"
SERVER_EMAIL = DEFAULT_FROM_EMAIL
EMAIL_HOST = "email-smtp.us-east-1.amazonaws.com"
EMAIL_HOST_USER = env("EMAIL_HOST_USER")
EMAIL_HOST_PASSWORD = env("EMAIL_HOST_PASSWORD")
EMAIL_PORT = 587
EMAIL_USE_TLS = True
# file uploads ----------------------------------------------------------------
MEDIA_ROOT = env.path("MEDIA_ROOT", default=BASE_DIR / "media/")
MEDIA_URL = "files/"
# globalization ---------------------------------------------------------------
LANGUAGE_CODE = "en-us"
TIME_ZONE = "US/Eastern"
USE_I18N = False
USE_TZ = True
# http ------------------------------------------------------------------------
MIDDLEWARE = [
"django.middleware.security.SecurityMiddleware",
"aeshin.middleware.WWWRedirectMiddleware",
"whitenoise.middleware.WhiteNoiseMiddleware",
"django.contrib.sessions.middleware.SessionMiddleware",
"django.middleware.common.CommonMiddleware",
"django.middleware.csrf.CsrfViewMiddleware",
"django.contrib.auth.middleware.AuthenticationMiddleware",
"django.contrib.messages.middleware.MessageMiddleware",
"django.middleware.clickjacking.XFrameOptionsMiddleware",
]
WSGI_APPLICATION = "aeshin.wsgi.application"
# models ----------------------------------------------------------------------
INSTALLED_APPS = (
"aeshin",
"shared",
"courses",
"files",
"django.contrib.admin",
"django.contrib.auth",
"django.contrib.contenttypes",
"django.contrib.sessions",
"django.contrib.messages",
"whitenoise.runserver_nostatic",
"django.contrib.staticfiles",
"django.contrib.sites",
)
# security --------------------------------------------------------------------
SECRET_KEY = env("SECRET_KEY")
ALLOWED_HOSTS = [
".aeshin.org",
".localhost",
"127.0.0.1",
"[::1]",
"aeshin.fly.dev",
]
CSRF_TRUSTED_ORIGINS = [
"https://*.aeshin.org",
"https://aeshin.fly.dev",
]
# templates -------------------------------------------------------------------
TEMPLATES = [
{
"BACKEND": "django.template.backends.django.DjangoTemplates",
"APP_DIRS": True,
"OPTIONS": {
"context_processors": [
"django.contrib.auth.context_processors.auth",
"django.template.context_processors.debug",
"django.template.context_processors.media",
"django.template.context_processors.static",
"django.template.context_processors.tz",
"django.contrib.messages.context_processors.messages",
"django.template.context_processors.request",
]
},
}
]
# urls ------------------------------------------------------------------------
ROOT_URLCONF = "aeshin.urls"
# django.contrib.auth ---------------------------------------------------------
LOGIN_URL = "/login/"
LOGIN_REDIRECT_URL = "/loggedin/"
LOGOUT_URL = "/logout/"
# django.contrib.sites --------------------------------------------------------
SITE_ID = 1
# django.contrib.staticfiles --------------------------------------------------
STATIC_ROOT = BASE_DIR / "static"
STATIC_URL = "/static/"
STORAGES = {
"default": {"BACKEND": "django.core.files.storage.FileSystemStorage"},
"staticfiles": {
"BACKEND": "whitenoise.storage.CompressedManifestStaticFilesStorage"
},
}
# shared ----------------------------------------------------------------------
ZOTERO_GROUP_ID = "51755"
|
[
"ryanshaw@unc.edu"
] |
ryanshaw@unc.edu
|
6b481b75639a36ee3c439a151988f25c85d6cadd
|
71b3766d0641361a52f62af263fe8efa90fccbab
|
/blog/views.py
|
592ccebccef4e17cd3162123c7aec7c0189fc55e
|
[] |
no_license
|
firchatn/Blog-Website
|
bd4859774fda9cccc60f4eaa4c322cbc0d80d487
|
04663501b442f51f14e0b5fdc1f188488172c455
|
refs/heads/master
| 2021-05-05T15:10:29.114072
| 2018-11-02T09:45:32
| 2018-11-02T09:45:32
| 103,161,430
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 983
|
py
|
from django.shortcuts import render
from django.core.paginator import Paginator, EmptyPage, PageNotAnInteger
from .models import Article, Catecory
# Create your views here.
def index(request):
articless = Article.objects.all()
catecory = Catecory.objects.all()
page = request.GET.get('page')
cat = request.GET.get('cat')
if cat:
cat = Catecory.objects.get(name=cat)
articless = Article.objects.filter(catecory=cat)
paginator = Paginator(articless, 2) # Show 25 contacts per page
try:
article = paginator.page(page)
except PageNotAnInteger:
article = paginator.page(1)
except EmptyPage:
article = paginator.page(paginator.num_pages)
# TODO: admin valid article before add it to the blog
# article = Article.objects.filter(isreviewed=True)
toplast = Article.objects.all()[:3]
return render(request,'blog/index.html', {'article' : article , 'toplast' : toplast, 'catecory' : catecory })
def contact(request):
return render(request,'blog/contact.html')
|
[
"firaschaabencss@gmail.com"
] |
firaschaabencss@gmail.com
|
47ac38c48ab6a0ffe276aa299b2b85a3c9afe994
|
1eba03a3a7b5f6133dfcbc7a0ab9c73f950a79d8
|
/algorithms/137. Single Number II/main.py
|
966d1c3f0ce2f52164f788ad395c5ee7fc2c6042
|
[] |
no_license
|
GTxx/leetcode
|
ab640cad726111a5fd78ecfbc02f75a61112bc2c
|
b7f85afe1c69f34f8c6025881224ae79042850d3
|
refs/heads/master
| 2021-06-15T18:43:41.358275
| 2021-05-08T08:15:05
| 2021-05-08T08:15:05
| 70,294,841
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 412
|
py
|
class Solution(object):
def singleNumber(self, nums):
"""
:type nums: List[int]
:rtype: int
"""
x1 = 0
x2 = 0
for num in nums:
x2 ^= x1 & num
x1 ^= num
mask = ~(x1 & x2)
x2 &= mask
x1 &= mask
return x1
if __name__ == "__main__":
s = Solution()
print s.singleNumber([6,6,6,5])
|
[
"xiongxiong1986@gmail.com"
] |
xiongxiong1986@gmail.com
|
defc4662ad24bac7f0c94489f4d8762a7b00ea29
|
be7bb6d0cbdb27d3ff72830dc9cce41b170b27fe
|
/0x08-python-more_classes/7-rectangle.py
|
07b740b4b98861329278958ae69a395ba2671045
|
[] |
no_license
|
camagar/holbertonschool-higher_level_programming
|
21a8e7c2a2ad07c694c5443e174bb70502f910c2
|
97dd2fade6fb64ac7d9c52e412c0b8c1b8dfc3de
|
refs/heads/master
| 2023-04-07T21:38:00.071687
| 2021-04-14T02:11:42
| 2021-04-14T02:11:42
| 291,889,478
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,096
|
py
|
#!/usr/bin/python3
"""create a class"""
class Rectangle(object):
"""define the rectangle class"""
number_of_instances = 0
print_symbol = "#"
def __init__(self, width=0, height=0):
self.width = width
self.height = height
Rectangle.number_of_instances += 1
@property
def width(self):
"""width"""
return self.__width
@property
def height(self):
"""height"""
return self.__height
@width.setter
def width(self, value):
"""width setter"""
if type(value) is not int:
raise TypeError('width must be an integer')
if value < 0:
raise ValueError('width must be >= 0')
self.__width = value
@height.setter
def height(self, value):
"""height setter"""
if type(value) is not int:
raise TypeError('height must be an integer')
if value < 0:
raise ValueError('height must be >= 0')
self.__height = value
def area(self):
"""area"""
a = self.__width * self.__height
return a
def perimeter(self):
"""Perimeter"""
if self.__width == 0 or self.__height == 0:
return 0
else:
p = (self.__width * 2) + (self.__height * 2)
return p
def __str__(self):
"""string method"""
print_rectangule = ""
if self.__width == 0 or self.__height == 0:
return print_rectangule
else:
for i in range(0, self.__height):
for j in range(0, self.__width):
print_rectangule += str(self.print_symbol)
if i != (self.__height - 1):
print_rectangule += "\n"
return print_rectangule
def __repr__(self):
"""representation"""
reptangle = 'Rectangle(' + str(self.__width) + ', ' +\
str(self.__height) + ')'
return (reptangle)
def __del__(self):
"""del instance"""
print("Bye rectangle...")
Rectangle.number_of_instances -= 1
|
[
"mauriciogrestrepo@gmail.com"
] |
mauriciogrestrepo@gmail.com
|
4ca2f6e5a50c697732e41ef7847d7a9e32d0c8ef
|
d83fde3c891f44014f5339572dc72ebf62c38663
|
/_bin/google-cloud-sdk/.install/.backup/lib/surface/bigtable/clusters/list.py
|
22a4d818ffea8110f2f7395f31ce3f059c5b9a3d
|
[
"LicenseRef-scancode-unknown-license-reference",
"Apache-2.0"
] |
permissive
|
gyaresu/dotfiles
|
047cc3ca70f4b405ba272856c69ee491a79d2ebe
|
e5e533b3a081b42e9492b228f308f6833b670cfe
|
refs/heads/master
| 2022-11-24T01:12:49.435037
| 2022-11-01T16:58:13
| 2022-11-01T16:58:13
| 17,139,657
| 1
| 1
| null | 2020-07-25T14:11:43
| 2014-02-24T14:59:59
|
Python
|
UTF-8
|
Python
| false
| false
| 2,944
|
py
|
# Copyright 2016 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""bigtable clusters list command."""
from __future__ import absolute_import
from __future__ import unicode_literals
from apitools.base.py import list_pager
from googlecloudsdk.api_lib.bigtable import util
from googlecloudsdk.calliope import base
from googlecloudsdk.command_lib.bigtable import arguments
from googlecloudsdk.core import resources
def _GetUriFunction(resource):
return resources.REGISTRY.ParseRelativeName(
resource.name,
collection='bigtableadmin.projects.instances.clusters').SelfLink()
@base.ReleaseTracks(base.ReleaseTrack.BETA)
class ListClusters(base.ListCommand):
"""List existing Bigtable clusters.
List existing Bigtable clusters.
## EXAMPLES
To list all clusters in an instance, run:
$ {command} --instances INSTANCE_NAME
To list all clusters in any of several instances, run:
$ {command} --instances INSTANCE_NAME1,INSTANCE_NAME2
"""
@staticmethod
def Args(parser):
"""Register flags for this command."""
arguments.AddInstancesResourceArg(parser, 'to list clusters for')
parser.display_info.AddFormat("""
table(
name.segment(3):sort=1:label=INSTANCE,
name.basename():sort=2:label=NAME,
location.basename():label=ZONE,
serveNodes:label=NODES,
defaultStorageType:label=STORAGE,
state
)
""")
parser.display_info.AddUriFunc(_GetUriFunction)
parser.display_info.AddCacheUpdater(arguments.InstanceCompleter)
def Run(self, args):
"""This is what gets called when the user runs this command.
Args:
args: an argparse namespace. All the arguments that were provided to this
command invocation.
Yields:
Some value that we want to have printed later.
"""
cli = util.GetAdminClient()
instance_refs = args.CONCEPTS.instances.Parse()
if not args.IsSpecified('instances'):
instance_refs = [util.GetInstanceRef('-')]
for instance_ref in instance_refs:
msg = (
util.GetAdminMessages()
.BigtableadminProjectsInstancesClustersListRequest(
parent=instance_ref.RelativeName()))
for cluster in list_pager.YieldFromList(
cli.projects_instances_clusters,
msg,
field='clusters',
batch_size_attribute=None):
yield cluster
|
[
"me@gareth.codes"
] |
me@gareth.codes
|
a109f4af2496f8cf2193422014e3bebe1bfb2884
|
efd81a5e287a398aaa5333e949d6ca40b1544053
|
/config/52_niak_centrality/00_gen_group_mask.py
|
0070a89bd229cbbe1bfd58a95fbcfb6571a9160d
|
[] |
no_license
|
fitrialif/abide-1
|
82d80bf52cd9b36072985a1ddeacfb325791566e
|
9ccc45f612a58dbc3cf5fa3b70c41bcfeabd8ddc
|
refs/heads/master
| 2020-04-25T15:13:22.974634
| 2014-03-10T18:18:42
| 2014-03-10T18:18:42
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,892
|
py
|
#!/usr/bin/env python
from __future__ import print_function
import os, yaml
from os import path as op
def run(cmd):
print(cmd)
os.system(cmd)
# First read in a quick pack file with all the paths
fn = "/home2/data/Projects/ABIDE_Initiative/CPAC/abide/config/50_niak/quick_pack_run1_nofilt_noglobal.yml"
subinfo = yaml.load(open(fn, 'r'))
# Second extract path to masks in standard space
masks = [ si['functional_brain_mask_to_standard']['run1'] for si in subinfo ]
for mask in masks:
if not op.exists(mask):
print("missing: %s" % mask)
# Third combine the masks
cmd = "fslmerge -t combined_masks.nii.gz %s" % ' '.join(masks)
print(cmd, file=open("tmp.cmd", "w")) # for some reason, running it directly doesn't work
run("bash tmp.cmd")
# Fourth get a 90% and 100% masks
odir = "/home2/data/Projects/ABIDE_Initiative/CPAC/abide/templates/masks"
cmd = "fslmaths combined_masks.nii.gz -Tmean -thr 0.9 -bin %s/mask_niak_90percent.nii.gz" % odir
run(cmd)
cmd = "fslmaths combined_masks.nii.gz -Tmean -thr 1 -bin %s/mask_niak_100percent.nii.gz" % odir
run(cmd)
# Fifth get the grey matter mask into the same space as niak's data
odir = "/home2/data/Projects/ABIDE_Initiative/CPAC/abide/templates/masks"
cmd = "cd %s; 3dresample -input MNI152_T1_GREY_3mm_25pc_mask.nii.gz -master mask_niak_90percent.nii.gz -prefix MNI152_T1_GREY_3mm_25pc_mask_niak.nii.gz -rmode NN" % odir
run(cmd)
# Fifth combine that mask with the grey matter
odir = "/home2/data/Projects/ABIDE_Initiative/CPAC/abide/templates/masks"
cmd = "cd %s; fslmaths %s -mas %s %s" % (odir, "mask_niak_90percent.nii.gz", "MNI152_T1_GREY_3mm_25pc_mask_niak.nii.gz", "mask_niak_90percent_gm.nii.gz")
run(cmd)
cmd = "cd %s; fslmaths %s -mas %s %s" % (odir, "mask_niak_100percent.nii.gz", "MNI152_T1_GREY_3mm_25pc_mask_niak.nii.gz", "mask_niak_100percent_gm.nii.gz")
run(cmd)
|
[
"czarrar@gmail.com"
] |
czarrar@gmail.com
|
1a084933a4396b2d4ac47a77e5b0c1463ab35b6f
|
286a49d0360ee2eb718dd9a496be88555cef3227
|
/229. 求众数 II.py
|
feeac888d4a69cfbebe04caeb6b8e78a73040e78
|
[] |
no_license
|
NaiveteYaYa/data-structrue
|
0618ab6bb7accc99c40e39a3ca60bbc0a9723c2f
|
a376863c1a8e007efafd5c1ed84929a80321b1b9
|
refs/heads/master
| 2023-07-02T03:15:33.523855
| 2021-08-14T02:02:07
| 2021-08-14T02:02:07
| 395,857,543
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,043
|
py
|
# -*- coding: utf-8 -*-
# @Time : 2020/4/2 14:20
# @Author : WuxieYaYa
"""
给定一个大小为 n 的数组,找出其中所有出现超过 ⌊ n/3 ⌋ 次的元素。
说明: 要求算法的时间复杂度为 O(n),空间复杂度为 O(1)。
示例 1:
输入: [3,2,3]
输出: [3]
示例 2:
输入: [1,1,1,3,3,2,2,2]
输出: [1,2]
链接:https://leetcode-cn.com/problems/majority-element-ii
摩尔投票法的简单理解
与169. 多数元素的两点区别:
“多数”是指超过n/3,不是n/2,因此最多会有两个元素是众数,要建立两个candidate
题目没保证多数元素一定存在,所以最后要对candidate进行检验。因此整个流程分为两步:step1投票阶段,step2检验阶段。
算法核心:
对于候选者cand1和cand2:
如果投cand1,cand1加一票。
如果投cand2,cand2加一票。
如果投其他元素,cand1和cand2各减一票。
理解方法:
在169. 多数元素中,
如果candidate是多数元素,那么多数元素(>n/2)与其他元素之和(< n/2)对抗,一定赢。
如果candidate不是多数元素,那么该元素(< n/2)与多数元素和其他元素之和(>n/2)对抗,一定会被打败。
本题中,分为A``B``others三个阵营
如果此刻candidate是A和B,那么A(>n/3)与others(<n/3)对抗稳赢,B(>n/3)与others(<n/3)对抗稳赢。
如果此刻candidate是A和C(C来自others),那么B``C一定是对抗不了B的。
时间复杂度O(n),空间复杂度O(1)
作者:coldme-2
链接:https://leetcode-cn.com/problems/majority-element-ii/solution/mo-er-tou-piao-fa-de-jian-dan-li-jie-by-coldme-2/
"""
def majorityElement(nums):
# cand1, vote1 = None, 0
# cand2, vote2 = None, 0
# for i in range(len(nums)):
# if cand1 is None and cand2 != nums[i]:
# cand1 = nums[i]
# vote1 += 1
#
# elif cand2 is None and cand1 != nums[i]:
# cand2 = nums[i]
# vote2 += 1
#
# else:
# if cand1 == nums[i]:
# vote1 += 1
#
# elif cand2 == nums[i]:
# vote2 += 1
#
# else:
# vote1 -= 1
# vote2 -= 1
# if vote1 == 0:
# cand1 = None
# if vote2 == 0:
# cand2 = None
#
#
# vote1, vote2 = 0, 0
# for num in nums:
# if num == cand1:
# vote1 += 1
# if num == cand2:
# vote2 += 1
#
# ans = []
# if vote1> len(nums)//3:
# ans.append(cand1)
# if vote2 > len(nums)//3:
# ans.append(cand2)
#
# return ans
# 利用列表性质。
n = len(nums)
ans = []
for i in set(nums):
if nums.count(i) > n//3:
ans.append(i)
return ans
if __name__ == '__main__':
print(majorityElement([1,1,1,3,3,2,2,2]))
|
[
"jgm247878528@.qq.com"
] |
jgm247878528@.qq.com
|
9b4bfa3a8c824efe83f17773632977134e891853
|
de24f83a5e3768a2638ebcf13cbe717e75740168
|
/moodledata/vpl_data/40/usersdata/68/18502/submittedfiles/main.py
|
9b3141f2cb1df838fab5b35110951b9ec577eca6
|
[] |
no_license
|
rafaelperazzo/programacao-web
|
95643423a35c44613b0f64bed05bd34780fe2436
|
170dd5440afb9ee68a973f3de13a99aa4c735d79
|
refs/heads/master
| 2021-01-12T14:06:25.773146
| 2017-12-22T16:05:45
| 2017-12-22T16:05:45
| 69,566,344
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,041
|
py
|
# -*- coding: utf-8 -*-
from __future__ import division
import funcoes
#COMECE AQUI
def fatorial (m):
m_fat=1
for i in range (2,m+1,1):
m_fat=m_fat * i
return m_fat
m=int(input('Digite m:'))
e=input('Digite o epsilon para o cosseno:')
#DOUBLE CALCULA_VALOR_ABSOLUTO:
if m<0:
m=m*(-1)
#DOUBLE CALCULA_PI:
soma_pi=0
j=2
for i in range (0,m,1):
if i%2==0:
soma_pi=soma_pi+(4/(j*(j+1)*(j+2)))
else:
soma_pi=soma_pi-(4/(j*(j+1)*(j+2)))
j=j+2
pi=3+soma_pi
#DOUBLE CALCULA_CO_SENO:
soma_cosseno=0
i=1
j=2
#CHAMAR A SEGUNDA PARTE DA SOMA_COSSENO DE UMA VARIÁVEL:
'''
a= (((pi/5)**j)/fatorial(j)) e a repetição só iria acontecer enquanto a fosse menor ou igual a epsilon
'''
a=(((pi/5)**j)/fatorial (j))
while a>=e:
if i%2!=0:
soma_cosseno = soma_cosseno + a
else:
soma_cosseno = soma_cosseno - a
j=j+2
i=i+1
cosseno=1-soma_cosseno
#DOUBLE CALCULA_RAZAO_AUREA_:
razaoAurea= 2*cosseno
print('%.15f' %pi)
print('%.15f' %razaoAurea)
|
[
"rafael.mota@ufca.edu.br"
] |
rafael.mota@ufca.edu.br
|
5e8e3c2e021324492b3bddcc3682d341a0a778d6
|
4946fa19e840aafb7b3ed4ae159764af44c0ff34
|
/pages/urls.py
|
fd2b01a536ecda7eb1db9d3615cd50bf4701a964
|
[] |
no_license
|
palmman/pedshop
|
c804be2fa8d1a7ce49c86c433a9bb00731146811
|
74aa002272e286e220e1e66fb701209ce9a055a6
|
refs/heads/main
| 2023-04-18T00:10:59.525763
| 2021-04-28T05:51:38
| 2021-04-28T05:51:38
| 362,352,612
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 383
|
py
|
from django.urls import path
from . import views
urlpatterns = [
path('', views.home, name='home'),
path('shop', views.shop, name='shop'),
path('about', views.about, name='about'),
path('contact', views.contact, name='contact'),
path('<int:id>', views.product, name='product'),
path('category/<slug:pages_slug>/', views.shop, name='products_by_category'),
]
|
[
"palm454555@hotmail.com"
] |
palm454555@hotmail.com
|
912d1cc8bfd900d2efb1333cf76904f99bd70ae4
|
e34cbf5fce48f661d08221c095750240dbd88caf
|
/python/day42_sqlalchemy/4.0.scoped_session.py
|
b723853a14b95b576bc03df5c3d1d10b7857df60
|
[] |
no_license
|
willianflasky/growup
|
2f994b815b636e2582594375e90dbcb2aa37288e
|
1db031a901e25bbe13f2d0db767cd28c76ac47f5
|
refs/heads/master
| 2023-01-04T13:13:14.191504
| 2020-01-12T08:11:41
| 2020-01-12T08:11:41
| 48,899,304
| 2
| 0
| null | 2022-12-26T19:46:22
| 2016-01-02T05:04:39
|
C
|
UTF-8
|
Python
| false
| false
| 1,295
|
py
|
#!/usr/bin/env python
# -*-coding:utf8-*-
# date: 2018/2/23 上午11:45
__author__ = "willian"
import time
import threading
import datetime
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy import Column, Integer, String, ForeignKey, UniqueConstraint, Index
from sqlalchemy.orm import sessionmaker, relationship, scoped_session
from sqlalchemy import create_engine
from sqlalchemy.sql import text
engine = create_engine("mysql+pymysql://s6:s6@127.0.0.1:3306/s6", max_overflow=0, pool_size=5)
Session = sessionmaker(bind=engine)
# 方式一: 由于无法提供线程共享功能,所以在开发时要注意,在每个线程中自己创建session.
# 自己具有操作数据库的 close commit execute...方法
# session = Session()
# session.close()
# 方式二: scoped_session 支持线程安全,为每个线程创建一个session
# - threading.Local
# - 唯一标识
# 源码剖析 第一步:command+单击
session = scoped_session(Session)
session.remove()
"""
session = scoped_session(Session)
session中两个值
1. self.session_factory
2. self.registry 中又有两个值, 加括号创建session
1> self.registry.self.session_factory(createfunc)
2> self.registry.self.registry(没有写错)
"""
|
[
"284607860@qq.com"
] |
284607860@qq.com
|
1522254803b17907540e7f62b7738bd022e97f1f
|
ce083128fa87ca86c65059893aa8882d088461f5
|
/python/sistema-de-contatos/.venv/lib/python2.7/site-packages/toolz/__init__.py
|
43226df7316aa0545101101540d51ff04f94c368
|
[] |
no_license
|
marcosptf/fedora
|
581a446e7f81d8ae9a260eafb92814bc486ee077
|
359db63ff1fa79696b7bc803bcfa0042bff8ab44
|
refs/heads/master
| 2023-04-06T14:53:40.378260
| 2023-03-26T00:47:52
| 2023-03-26T00:47:52
| 26,059,824
| 6
| 5
| null | 2022-12-08T00:43:21
| 2014-11-01T18:48:56
| null |
UTF-8
|
Python
| false
| false
| 314
|
py
|
from .itertoolz import *
from .functoolz import *
from .dicttoolz import *
from .recipes import *
from .compatibility import map, filter
from . import sandbox
from functools import partial, reduce
sorted = sorted
# Aliases
comp = compose
functoolz._sigs.create_signature_registry()
__version__ = '0.8.0'
|
[
"marcosptf@yahoo.com.br"
] |
marcosptf@yahoo.com.br
|
c259f5026a586e6ea50ad764940a3a142ae65202
|
c7f4387733c95ced53dae485f36618a88f18ea45
|
/Uri/1061.py
|
3e14cd823da4b724d092a9f5fbb6458bae7fd7b6
|
[] |
no_license
|
douradodev/Uri
|
25d7636b5d5553fafdbd61a38d7c465c4cb79c0c
|
e879ebca7a87de674d69d739617c4207156ce349
|
refs/heads/main
| 2023-06-03T18:53:11.749866
| 2021-06-22T12:40:11
| 2021-06-22T12:40:11
| 379,264,919
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,080
|
py
|
def main():
begin_day = input().split()
begin_time= input().split(' : ')
end_day = input().split()
end_time= input().split(' : ')
begin_time = int(begin_time[0]), int(begin_time[1]), int(begin_time[2])
end_time = int(end_time[0]), int(end_time[1]), int(end_time[2])
total_day = 0
total_time = [0,0,0]
total_day = int(end_day[1]) - int(begin_day[1])
if end_time[2] - begin_time[2] < 0:
total_time[2] = end_time[2] + 60 - begin_time[2]
dif_time = 1
else:
total_time[2] = end_time[2] - begin_time[2]
dif_time = 0
if (end_time[1] - dif_time) - begin_time[1] < 0:
total_time[1] = (end_time[1] - dif_time + 60) - begin_time[1]
dif_time = 1
else:
total_time[1] = (end_time[1] - dif_time) - begin_time[1]
dif_time = 0
if (end_time[0] - dif_time) - begin_time[0] < 0:
total_time[0] = (end_time[0] - dif_time + 24) - begin_time[0]
total_day -= 1
else:
total_time[0] = (end_time[0] - dif_time) - begin_time[0]
print('{} dia(s)\n{} hora(s)\n{} minuto(s)\n{} segundo(s)'.format(total_day, total_time[0], total_time[1], total_time[2]))
main()
|
[
"victorhenrique01000@gmail.com"
] |
victorhenrique01000@gmail.com
|
aa29aa9dd6c0b5f6833afd90f618c86c2bebc4b7
|
0386591b51fdbf5759faef6afb8729b64a3f1589
|
/layerserver/widgets/creationuser.py
|
0d14842f70435682d0eb6129fb35fbba132c0939
|
[
"BSD-3-Clause"
] |
permissive
|
giscube/giscube-admin
|
1e155402e094eb4db1f7ca260a8d1402e27a31df
|
4ce285a6301f59a8e48ecf78d58ef83c3827b5e0
|
refs/heads/main
| 2023-07-11T17:23:56.531443
| 2023-02-06T15:12:31
| 2023-02-06T15:12:31
| 94,087,469
| 7
| 1
|
BSD-3-Clause
| 2023-07-07T13:22:09
| 2017-06-12T11:12:56
|
Python
|
UTF-8
|
Python
| false
| false
| 371
|
py
|
from .base import BaseWidget
class CreationUserWidget(BaseWidget):
base_type = 'string'
@staticmethod
def create(request, validated_data, widget):
validated_data[widget['name']] = request.user.username
@staticmethod
def is_valid(cleaned_data):
if not cleaned_data['readonly']:
return BaseWidget.ERROR_READONLY_REQUIRED
|
[
"abusquets@gmail.com"
] |
abusquets@gmail.com
|
757ad5797b4182e0b1dc39f8fd424e66c7e6df6b
|
23307f8e889f232724756bb26b1def1f0ba3323b
|
/fairseq/tasks/speech_to_text.py
|
9388047a5e92e1c66236022de664b0480b9862be
|
[] |
no_license
|
krisjeong/fairseq_data
|
9395cb574d91147c95b6f08eecd814e4cb2fdad8
|
f29e7dae3c2be3a908e795bfc952cc845b80280d
|
refs/heads/master
| 2023-07-12T22:21:22.349970
| 2021-08-18T06:20:11
| 2021-08-18T06:20:11
| 397,152,122
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,214
|
py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import logging
import os.path as op
from argparse import Namespace
from fairseq.data import Dictionary, encoders
from fairseq.data.audio.speech_to_text_dataset import (
S2TDataConfig,
SpeechToTextDataset,
SpeechToTextDatasetCreator,
)
from fairseq.tasks import FairseqTask, register_task
logging.basicConfig(
format="%(asctime)s | %(levelname)s | %(name)s | %(message)s",
datefmt="%Y-%m-%d %H:%M:%S",
level=logging.INFO,
)
logger = logging.getLogger(__name__)
@register_task("speech_to_text")
class SpeechToTextTask(FairseqTask):
@staticmethod
def add_args(parser):
parser.add_argument("data", help="manifest root path")
parser.add_argument(
"--config-yaml",
type=str,
default="config.yaml",
help="Configuration YAML filename (under manifest root)",
)
parser.add_argument(
"--max-source-positions",
default=6000,
type=int,
metavar="N",
help="max number of tokens in the source sequence",
)
parser.add_argument(
"--max-target-positions",
default=1024,
type=int,
metavar="N",
help="max number of tokens in the target sequence",
)
def __init__(self, args, tgt_dict):
super().__init__(args)
self.tgt_dict = tgt_dict
self.data_cfg = S2TDataConfig(op.join(args.data, args.config_yaml))
@classmethod
def setup_task(cls, args, **kwargs):
data_cfg = S2TDataConfig(op.join(args.data, args.config_yaml))
dict_path = op.join(args.data, data_cfg.vocab_filename)
if not op.isfile(dict_path):
raise FileNotFoundError(f"Dict not found: {dict_path}")
tgt_dict = Dictionary.load(dict_path)
logger.info(
f"dictionary size ({data_cfg.vocab_filename}): " f"{len(tgt_dict):,}"
)
if getattr(args, "train_subset", None) is not None:
if not all(s.startswith("train") for s in args.train_subset.split(",")):
raise ValueError('Train splits should be named like "train*".')
return cls(args, tgt_dict)
def build_criterion(self, args):
from fairseq import criterions
if self.data_cfg.prepend_tgt_lang_tag and args.ignore_prefix_size != 1:
raise ValueError(
'Please set "--ignore-prefix-size 1" since '
"target language ID token is prepended as BOS."
)
return criterions.build_criterion(args, self)
def load_dataset(self, split, epoch=1, combine=False, **kwargs):
is_train_split = split.startswith("train")
pre_tokenizer = self.build_tokenizer(self.args)
bpe_tokenizer = self.build_bpe(self.args)
self.datasets[split] = SpeechToTextDatasetCreator.from_tsv(
self.args.data,
self.data_cfg,
split,
self.tgt_dict,
pre_tokenizer,
bpe_tokenizer,
is_train_split=is_train_split,
epoch=epoch,
seed=self.args.seed,
)
@property
def target_dictionary(self):
return self.tgt_dict
@property
def source_dictionary(self):
return None
def max_positions(self):
return self.args.max_source_positions, self.args.max_target_positions
def build_model(self, args):
args.input_feat_per_channel = self.data_cfg.input_feat_per_channel
args.input_channels = self.data_cfg.input_channels
return super(SpeechToTextTask, self).build_model(args)
def build_generator(
self,
models,
args,
seq_gen_cls=None,
extra_gen_cls_kwargs=None,
):
if self.data_cfg.prepend_tgt_lang_tag and args.prefix_size != 1:
raise ValueError(
'Please set "--prefix-size 1" since '
"target language ID token is prepended as BOS."
)
lang_token_ids = {
i
for s, i in self.tgt_dict.indices.items()
if SpeechToTextDataset.is_lang_tag(s)
}
extra_gen_cls_kwargs = {"symbols_to_strip_from_output": lang_token_ids}
return super().build_generator(
models, args, seq_gen_cls=None, extra_gen_cls_kwargs=extra_gen_cls_kwargs
)
def build_tokenizer(self, args):
logger.info(f"pre-tokenizer: {self.data_cfg.pre_tokenizer}")
return encoders.build_tokenizer(Namespace(**self.data_cfg.pre_tokenizer))
def build_bpe(self, args):
logger.info(f"tokenizer: {self.data_cfg.bpe_tokenizer}")
return encoders.build_bpe(Namespace(**self.data_cfg.bpe_tokenizer))
@classmethod
def build_dataset_for_inference(cls, audio_paths, n_frames):
return SpeechToTextDataset("interactive", False, {}, audio_paths, n_frames)
|
[
"krisjeong00@gmail.com"
] |
krisjeong00@gmail.com
|
43c06f8278a5366020f9d1faef6d11fbe0df03ae
|
82ebc6142f7044f8e908ffd6b2dc9e699191fd36
|
/users/serializers.py
|
a7c000728c4688b5ce63c1f4c258ca68ee3a3d0d
|
[] |
no_license
|
32dantey/shopbuild
|
4f775209e5b320364a8a845583c0d3c77f9844ee
|
745b6cf73c8da52ed93b8bfe49055624dfa0aea2
|
refs/heads/master
| 2023-08-25T17:29:23.470994
| 2021-11-14T14:17:05
| 2021-11-14T14:17:05
| 427,917,564
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 225
|
py
|
from django.contrib.auth.models import User
from rest_framework import serializers
class UserSerializer(serializers.ModelSerializer):
class Meta:
model = User
fields = ['username', 'email', 'is_staff']
|
[
"root@localhost.localdomain"
] |
root@localhost.localdomain
|
815579dd6d44ae403fc012a4f99d9bb8a607d842
|
4aec44fe50fa5c40f80c45bfb160d2fa7a98a0a9
|
/students/jsward/lesson07/assignment/linear.py
|
f11fc105aa5a6d00df68e15542e76269dc162e4d
|
[] |
no_license
|
UWPCE-PythonCert-ClassRepos/220-Advanced-Summer-2019
|
4e51fde79921e6e75f590bef223bc1b0f118ef41
|
6ffd7b4ab8346076d3b6cc02ca1ebca3bf028697
|
refs/heads/master
| 2022-12-13T01:22:01.063023
| 2019-09-22T10:21:37
| 2019-09-22T10:21:37
| 194,944,978
| 4
| 18
| null | 2022-12-08T01:22:40
| 2019-07-02T22:51:07
|
HTML
|
UTF-8
|
Python
| false
| false
| 3,005
|
py
|
# They are not, in fact, constants...
# pylint: disable=C0103
# pylint: disable=W0703
"""
Lesson 7: Linear
Relational concept Mongo DB equivalent
Database Database
Tables Collections
Rows Documents
Index Index
"""
import cProfile
import csv
import datetime
import logging
import sys
import time
from pymongo import MongoClient
from pymongo import errors as pymongo_errors
log_format = "%(asctime)s\t%(message)s"
formatter = logging.Formatter(log_format)
file_handler = logging.FileHandler("mongo_{}.log".format(datetime.datetime.now().strftime("%Y-%m-%d-%H:%M:%S")))
file_handler.setFormatter(formatter)
stream_handler = logging.StreamHandler(sys.stdout)
stream_handler.setFormatter(formatter)
logger = logging.getLogger(__name__)
logger.setLevel('INFO')
logger.addHandler(stream_handler)
logger.addHandler(file_handler)
mongo_client = MongoClient("mongodb://localhost:27017")
assignment_db = mongo_client["assignment"]
def import_data(data_dir, *files):
""" Imports data from file(s) to mongodb"""
list_of_tuples = []
for file_path in files:
processed = 0
collection_name = file_path.split(".")[0]
try:
count_prior = sum(1 for _ in assignment_db[collection_name].find())
except Exception:
logger.info("No existing records found in collection %s", collection_name)
count_prior = 0
with open("/".join([data_dir, file_path])) as file:
reader = csv.reader(file, delimiter=",")
header = False
start_time = time.time()
for row in reader:
if not header:
header = [h.strip("\ufeff") for h in row]
else:
data = {header[i]: v for i, v in enumerate(row)}
try:
assignment_db[collection_name].insert_one(data)
processed += 1
logger.debug("Inserted record %s into collection %s", data, collection_name)
except pymongo_errors.ServerSelectionTimeoutError as ex:
logger.error("Timeout or connection refused when connecting to MongoDB: %s", ex)
break
except Exception as ex:
logger.error("Error inserting record %s into table %s in MongoDB %s Error: %s",
data, assignment_db.name, mongo_client, ex)
continue
end_time = time.time()
list_of_tuples.append(tuple([processed, count_prior, (count_prior + processed), (end_time - start_time)]))
logger.info("Inserted %s records into collection %s in %s", processed, collection_name, (end_time - start_time))
logger.info("Collection now contains %s records", (count_prior + processed))
return list_of_tuples
if __name__ == "__main__":
import_data('data', 'customers.csv', 'products.csv')
# print(results)
|
[
"james@Jamess-MacBook-Pro.local"
] |
james@Jamess-MacBook-Pro.local
|
fe721a5d634410d1e7eae1f657adedf3d2a421f4
|
c4c159a21d2f1ea0d7dfaa965aeff01c8ef70dce
|
/flask/flaskenv/Lib/site-packages/tensorflow/python/keras/preprocessing/image.py
|
f2a6b9eb3dcc6002673a3e3a13516299983498ad
|
[] |
no_license
|
AhsonAslam/webapi
|
54cf7466aac4685da1105f9fb84c686e38f92121
|
1b2bfa4614e7afdc57c9210b0674506ea70b20b5
|
refs/heads/master
| 2020-07-27T06:05:36.057953
| 2019-09-17T06:35:33
| 2019-09-17T06:35:33
| 208,895,450
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 130
|
py
|
version https://git-lfs.github.com/spec/v1
oid sha256:dd9edf94ef6b932c96aa6c9f40e3c19503ccfed4c5a10d0871bd11259eafd357
size 21747
|
[
"github@cuba12345"
] |
github@cuba12345
|
bc99ce65235a3ffa79223116c532a78ee3ef3d86
|
4273f162abb12ef1939271c2aabee9547ac6afee
|
/studio_usd_pipe/resource/push/maya/uv/extractor_thumbnail.py
|
8f6da8661609560730437f9504ee9bfc291638a7
|
[] |
no_license
|
xiyuhao/subins_tutorials
|
2717c47aac0adde099432e5dfd231606bf45a266
|
acbe4fe16483397e9b0f8e240ca23bdca652b92d
|
refs/heads/master
| 2023-07-28T13:42:41.445399
| 2021-09-12T11:02:37
| 2021-09-12T11:02:37
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 876
|
py
|
NAME = 'Extract UV Thumbnail'
ORDER = 1
VALID = True
TYPE = 'extractor'
KEY = 'uv_thumbnail'
OWNER = 'Subin Gopi'
COMMENTS = 'To create uv thumbnail file'
VERSION = '0.0.0'
MODIFIED = 'April 19, 2020'
def execute(output_path=None, **kwargs):
import os
from studio_usd_pipe.core import common
from studio_usd_pipe.utils import maya_asset
if not os.path.isfile(kwargs['thumbnail']):
return False, [kwargs['thumbnail']], 'not found input thumbnail!...'
ouput_image_path = os.path.join(
output_path,
'{}.png'.format(kwargs['caption'])
)
premission = common.data_exists(ouput_image_path, True)
if not premission:
return False, [ouput_image_path], 'not able to save thumbnail!...'
thumbnail = maya_asset.create_thumbnail(kwargs['thumbnail'], ouput_image_path)
return True, [thumbnail], 'success!...'
|
[
"subing85@gmail.com"
] |
subing85@gmail.com
|
967dc456ae8754460e5768a8eb7b68d269bb5fd9
|
d4bbbb07826fd11d071624761c3a452e431cec8f
|
/models/process_data.py
|
398631223021b2ea0a47c8b791f81c6922aaaaa5
|
[
"MIT"
] |
permissive
|
planetnest/epl-prediction
|
ecb88fb1b9fbea8d93637a547fb559b004f29bb7
|
ffd4eb626d18829df49e07663ef74cd3735ca9d3
|
refs/heads/master
| 2021-07-06T19:07:14.132246
| 2017-09-27T23:45:15
| 2017-09-27T23:45:15
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,335
|
py
|
import os.path
import pandas as pd
import numpy as np
from sklearn.model_selection import train_test_split
from .preprocess import process_data
DATASET_DIR = '../datasets/'
DATA_FILES = ['epl-2015-2016.csv', 'epl-2016-2017.csv', 'epl-2017-2018.csv']
CURR_SEASON_DATA = os.path.join(DATASET_DIR, DATA_FILES[-1])
USELESS_ROWS = ['Referee', 'Div', 'Date', 'HomeTeam', 'AwayTeam']
def load_data():
dataset = pd.read_csv(CURR_SEASON_DATA)
dataset.drop(USELESS_ROWS, axis=1, inplace=True)
for d_file in DATA_FILES[:-1]:
d_file = os.path.join(DATASET_DIR, d_file)
data = pd.read_csv(d_file)
data.drop(USELESS_ROWS, axis=1, inplace=True)
dataset = pd.concat([dataset, data])
return dataset
def get_remaining_features(home, away):
df = pd.read_csv(CURR_SEASON_DATA)
# Home team and Away team
home_team = df['HomeTeam'].values
away_team = df['AwayTeam'].values
# Get the indexes for home and away team
home_idx = get_index(home_team.tolist(), home)
away_idx = get_index(away_team.tolist(), away)
# Drop string columns
df.drop(['Div', 'Date', 'HomeTeam', 'AwayTeam', 'FTR', 'HTR', 'Referee'], axis=1, inplace=True)
# Get rows where the home and away team shows up respectively
home_data = df.values[home_idx]
away_data = df.values[away_idx]
return np.average(home_data, axis=0), np.average(away_data, axis=0)
def get_index(teams, value):
value = value.title()
indexes = [i for i, team in enumerate(teams) if team == value]
return indexes
def preprocess_features(X):
# init new output dataframe
"""
Cleans up any non-numeric data.
:param X:
Features to be cleaned.
:return: output `pd.DataFrame`
A new pandas DataFrame object with clean numeric values.
"""
output = pd.DataFrame(index=X.index)
# investigate each feature col for data
for col, col_data in X.iteritems():
# if data is categorical, convert to dummy variables
if col_data.dtype == object:
print('obj lets get dummies')
col_data = pd.get_dummies(col_data, prefix=col)
# collect the converted cols
output = output.join(col_data)
return output
def process(filename=None, test_size=None, train_size=None):
"""
Process data into training and testing set.
:param filename: str or None (default is None)
The path to the `csv` file which contains the dataset. If
set to None, it will load all the datasets.
:param test_size: float, int, or None (default is None)
If float, should be between 0.0 and 1.0 and represent the
proportion of the dataset to include in the test split. If
int, represents the absolute number of test samples. If None,
the value is automatically set to the complement of the train size.
If train size is also None, test size is set to 0.25.
:param train_size: float, int, or None (default is None)
If float, should be between 0.0 and 1.0 and represent the
proportion of the dataset to include in the train split. If
int, represents the absolute number of train samples. If None,
the value is automatically set to the complement of the test size.
:return: X_train, X_test, y_train, y_test
`np.ndarray` o
"""
if filename:
data = pd.read_csv(filename)
else:
data = load_data()
print(data.columns.values)
# FTR = full time result
X_all = data.drop(['FTR'], axis=1)
y_all = data['FTR']
X_all = process_data(X_all)
# Split into training and testing data
X_train, X_test, y_train, y_test = train_test_split(X_all, y_all,
test_size=test_size, train_size=train_size,
random_state=42, stratify=y_all)
return np.array(X_train), np.array(X_test), np.array(y_train), np.array(y_test)
if __name__ == '__main__':
# home_data, away_data = get_remaining_features(home='arsenal', away='chelsea')
# print(home_data, '\n')
# print(away_data)
# data = load_data()
# print(data.tail(3))
X_train, X_test, y_train, y_test = process(filename=None)
print(X_train.shape, y_train.shape)
print(X_test.shape, y_test.shape)
|
[
"javafolabi@gmail.com"
] |
javafolabi@gmail.com
|
b68be730fe886ebea5a66fb439c78439510f4794
|
e7a46c0f63e7595a533ab58a7db07b1c12ef6092
|
/begpyprog/integr.py
|
6504b69f2a91ac9fcea08095da70da492eb0ce9f
|
[] |
no_license
|
sockduct/Books
|
263ab81b72e39a11acc83b698c76b41104d8bd20
|
954039ff4abf51bbfec05944e5175cefe232a68f
|
refs/heads/master
| 2021-01-10T03:37:47.340931
| 2016-10-29T12:34:58
| 2016-10-29T12:34:58
| 55,922,532
| 0
| 1
| null | 2016-10-29T12:34:59
| 2016-04-10T21:06:00
|
HTML
|
UTF-8
|
Python
| false
| false
| 970
|
py
|
####################################################################################################
'''
Simple program to convert a string of integers separated by commas into an integer list
'''
# Imports
import sys
from BadInput import BadInput
__version__ = '0.0.1'
def parse(input):
curlst = input.replace(' ', '')
curlst = curlst.split(',')
try:
newlst = [int(i) for i in curlst]
except ValueError as e:
raise BadInput(curlst)
#except ValueError as e:
# newlst = None
# print 'Skipping invalid input - {}'.format(str(curlst))
#except Exception as e:
# print 'Unhandled except - {}, aborting...'.format(str(e))
# sys.exit(-2)
return newlst
if __name__ == '__main__':
if len(sys.argv) < 2:
print 'Usage: {} <string of integers separated by commas> [<str2> ...]'.format(
sys.argv[0])
sys.exit(-1)
for elmt in sys.argv[1:]:
print parse(elmt)
|
[
"james.r.small@outlook.com"
] |
james.r.small@outlook.com
|
8bf6f30a0b6898775a955c99c1135e2fb41fbb1c
|
9f46d82b1bbb561d663fbdbaa14331b9193fb18d
|
/buses/migrations/0002_auto_20200903_0438.py
|
eba7853d4e57eefbd553a172fc37a6f95240605f
|
[] |
no_license
|
ziaurjoy/simple-class-based
|
32012b56bb727ca5891d3938b024cdda4c4f30c8
|
9fd881d83e2e573c7974caeefc89bb7b03a78a05
|
refs/heads/master
| 2022-12-07T23:50:03.114676
| 2020-09-07T14:11:06
| 2020-09-07T14:11:06
| 293,546,616
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 816
|
py
|
# Generated by Django 3.1 on 2020-09-03 04:38
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('buses', '0001_initial'),
]
operations = [
migrations.AlterModelOptions(
name='bussescompany',
options={'verbose_name_plural': 'bus companis'},
),
migrations.CreateModel(
name='Bus',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('serial_number', models.CharField(db_index=True, max_length=15)),
('operator', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='buses.bussescompany')),
],
),
]
|
[
"ziaurjoy802@gmail.com"
] |
ziaurjoy802@gmail.com
|
e411a3f2ff7be97ff72496885a1285324ae4b0cd
|
b40d1a26ea04a19ec0da7bf55db84b7ee36cc898
|
/leetcode.com/python/895_Maximum_Frequency_Stack.py
|
b083d8b11400faa43d9540631aac9d70eb9f35a3
|
[
"MIT"
] |
permissive
|
partho-maple/coding-interview-gym
|
5e8af7d404c28d4b9b52e5cffc540fd51d8025cf
|
20ae1a048eddbc9a32c819cf61258e2b57572f05
|
refs/heads/master
| 2022-09-11T16:36:01.702626
| 2022-03-14T08:39:47
| 2022-03-14T08:39:47
| 69,802,909
| 862
| 438
|
MIT
| 2022-08-18T06:42:46
| 2016-10-02T14:51:31
|
Python
|
UTF-8
|
Python
| false
| false
| 797
|
py
|
from collections import defaultdict
import heapq
class FreqStack(object):
def __init__(self):
self.counter = defaultdict(int)
self.stackIdx = -1 # initially the stack is empty
self.maxHeap = []
def push(self, x):
"""
:type x: int
:rtype: None
"""
self.counter[x] += 1
self.stackIdx += 1
heapq.heappush(self.maxHeap, (-self.counter[x], -self.stackIdx, x))
def pop(self):
"""
:rtype: int
"""
topElement = heapq.heappop(self.maxHeap)
count, idx, x = -topElement[0], -topElement[1], topElement[2]
self.counter[x] -= 1
return x
# Your FreqStack object will be instantiated and called as such:
# obj = FreqStack()
# obj.push(x)
# param_2 = obj.pop()
|
[
"partho.biswas@aurea.com"
] |
partho.biswas@aurea.com
|
7c5f10be6bb29de0efad4fe84a70e7dd2449fd64
|
62e58c051128baef9452e7e0eb0b5a83367add26
|
/edifact/D01B/MSCONSD01BUN.py
|
eb4f55884635f28f92c53726be35f53eb089349d
|
[] |
no_license
|
dougvanhorn/bots-grammars
|
2eb6c0a6b5231c14a6faf194b932aa614809076c
|
09db18d9d9bd9d92cefbf00f1c0de1c590fe3d0d
|
refs/heads/master
| 2021-05-16T12:55:58.022904
| 2019-05-17T15:22:23
| 2019-05-17T15:22:23
| 105,274,633
| 0
| 0
| null | 2017-09-29T13:21:21
| 2017-09-29T13:21:21
| null |
UTF-8
|
Python
| false
| false
| 1,763
|
py
|
#Generated by bots open source edi translator from UN-docs.
from bots.botsconfig import *
from edifact import syntax
from recordsD01BUN import recorddefs
structure = [
{ID: 'UNH', MIN: 1, MAX: 1, LEVEL: [
{ID: 'BGM', MIN: 1, MAX: 1},
{ID: 'DTM', MIN: 1, MAX: 9},
{ID: 'CUX', MIN: 0, MAX: 9},
{ID: 'RFF', MIN: 0, MAX: 9, LEVEL: [
{ID: 'DTM', MIN: 0, MAX: 9},
]},
{ID: 'NAD', MIN: 0, MAX: 99, LEVEL: [
{ID: 'RFF', MIN: 0, MAX: 9, LEVEL: [
{ID: 'DTM', MIN: 0, MAX: 9},
]},
{ID: 'CTA', MIN: 0, MAX: 9, LEVEL: [
{ID: 'COM', MIN: 0, MAX: 9},
]},
]},
{ID: 'UNS', MIN: 1, MAX: 1},
{ID: 'NAD', MIN: 1, MAX: 99999, LEVEL: [
{ID: 'LOC', MIN: 1, MAX: 99999, LEVEL: [
{ID: 'DTM', MIN: 0, MAX: 9},
{ID: 'RFF', MIN: 0, MAX: 99, LEVEL: [
{ID: 'DTM', MIN: 0, MAX: 9},
]},
{ID: 'CCI', MIN: 0, MAX: 99, LEVEL: [
{ID: 'DTM', MIN: 0, MAX: 99},
]},
{ID: 'LIN', MIN: 0, MAX: 99999, LEVEL: [
{ID: 'PIA', MIN: 0, MAX: 9},
{ID: 'IMD', MIN: 0, MAX: 9},
{ID: 'PRI', MIN: 0, MAX: 9},
{ID: 'NAD', MIN: 0, MAX: 9},
{ID: 'MOA', MIN: 0, MAX: 9},
{ID: 'QTY', MIN: 1, MAX: 9999, LEVEL: [
{ID: 'DTM', MIN: 0, MAX: 9},
{ID: 'STS', MIN: 0, MAX: 9},
]},
{ID: 'CCI', MIN: 0, MAX: 99, LEVEL: [
{ID: 'MEA', MIN: 0, MAX: 99},
{ID: 'DTM', MIN: 0, MAX: 9},
]},
]},
]},
]},
{ID: 'CNT', MIN: 0, MAX: 99},
{ID: 'UNT', MIN: 1, MAX: 1},
]},
]
|
[
"jason.capriotti@gmail.com"
] |
jason.capriotti@gmail.com
|
6f5980258752082c35aaff63112e57d84ac32d19
|
21fec19cb8f74885cf8b59e7b07d1cd659735f6c
|
/chapter_13/getone-urllib.py
|
879783dfb46bea3276181cea113fd47ade1bf7c0
|
[
"MIT"
] |
permissive
|
bimri/programming_python
|
ec77e875b9393179fdfb6cbc792b3babbdf7efbe
|
ba52ccd18b9b4e6c5387bf4032f381ae816b5e77
|
refs/heads/master
| 2023-09-02T12:21:11.898011
| 2021-10-26T22:32:34
| 2021-10-26T22:32:34
| 394,783,307
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,020
|
py
|
"Using urllib to Download Files"
'''
Python urllib.request module: given an Internet address
string—a URL, or Universal Resource Locator—this module opens a connection
to the specified server and returns a file-like object ready to be read with normal file
object method calls (e.g., read, readline).
We can use such a higher-level interface to download anything with an address on the
Web—files published by FTP sites (using URLs that start with ftp://); web pages and
output of scripts that live on remote servers (using http:// URLs); and even local files
(using file:// URLs).
'''
#!/usr/local/bin/python
"""
A Python script to download a file by FTP by its URL string; use higher-level
urllib instead of ftplib to fetch file; urllib supports FTP, HTTP, client-side
HTTPS, and local files, and handles proxies, redirects, cookies, and more;
urllib also allows downloads of html pages, images, text, etc.; see also
Python html/xml parsers for web pages fetched by urllib in Chapter 19;
"""
import os, getpass
from urllib.request import urlopen # socket-based web tools
filename = 'monkeys.jpg' # remote/local filename
password = getpass.getpass('Pswd?')
remoteaddr = 'ftp://lutz:%s@ftp.rmi.net/%s;type=i' % (password, filename)
print('Downloading', remoteaddr)
# this works too:
# urllib.request.urlretrieve(remoteaddr, filename)
remotefile = urlopen(remoteaddr) # return input file-like object
localfile = open(filename, 'wb') # where to store data locally
localfile.write(remotefile.read())
localfile.close()
remotefile.close()
'''
Technically speaking, urllib.request supports a variety of Internet protocols (HTTP,
FTP, and local files). Unlike ftplib, urllib.request is generally used for reading remote
objects, not for writing or uploading them (though the HTTP and FTP protocols support
file uploads too). As with ftplib, retrievals must generally be run in threads if
blocking is a concern.
'''
|
[
"bimri@outlook.com"
] |
bimri@outlook.com
|
1d4682439a3ec9cebb7221e6ed9577f7be10a86c
|
41cd61226440c7f0a6fcf77f7e4f65e65c28aaa1
|
/wg_auto/a1_inject/sql_injection/intro.py
|
2a1bcbc73989354939d03940f43f8d0cb3c7b42d
|
[] |
no_license
|
xx-zhang/webgoat_auto
|
6d99d98148e180b6eacf46c5d2b4de81b552fb1e
|
8d47d6af68530940987a272224e9c21f870bf402
|
refs/heads/master
| 2023-04-03T22:24:54.675321
| 2021-04-16T09:23:30
| 2021-04-16T09:23:30
| 358,497,165
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,860
|
py
|
# coding:utf-8
from wg_auto.base import request_wrap
def sql2_test(q="select department from employees where first_name='Bob' and last_name='Franco'"):
__url = '/SqlInjection/attack2'
return request_wrap(method='post', url=__url, data={"query": q})
def sql3_test(q="update employees set department='Sales' where "
"first_name='Tobi' and last_name='Barnett'"):
__url = '/SqlInjection/attack3'
return request_wrap(method='post', url=__url, data={"query": q})
def sql4_test(q='alter table employees add phone varchar(20)'):
__url = '/SqlInjection/attack4'
return request_wrap(method='post', url=__url, data={"query": q})
def sql5_test(q='grant alter table to UnauthorizedUser'):
__url = '/SqlInjection/attack5'
return request_wrap(method='post', url=__url, data={"query": q})
def sql9_test():
__url = "/SqlInjection/assignment5a"
data = {"account": "Smith'", "operator": "or", "injection": "'1'='1"}
return request_wrap(method='post', url=__url, data=data)
def sql10_test():
__url = "/SqlInjection/assignment5b"
data = {"login_count": "1", "userid": "1 or 1=1"}
return request_wrap(method='post', url=__url, data=data)
def sql11_test():
__url = "/SqlInjection/attack8"
data = {"name": "Smith", "auth_tan": "1' or '1'='1"}
return request_wrap(method='post', url=__url, data=data)
def sql12_test():
__url = "/SqlInjection/attack9"
# data = {"name": "Smith", "auth_tan": "3SL99A' or '1'='1"}
data = {"name": "Smith", "auth_tan": "1' or 1=1;update employees set salary = 90000 where last_name = 'Smith';--+"}
return request_wrap(method='post', url=__url, data=data)
def sql13_test():
__url = "/SqlInjection/attack10"
data = {"action_string": "1' or 1=1;drop table access_log;--"}
return request_wrap(method='post', url=__url, data=data)
|
[
"you@example.com"
] |
you@example.com
|
a40a61da4b281943142d8d4709eff02cb23d9dca
|
2ca3b6cc4f9145438e283d4e055e55fff550ec90
|
/flask/hello.py
|
68f2d487bdd7eac37fde1aad5bf11e7ee96000bc
|
[] |
no_license
|
manuck/python_practice
|
e39a7e3be41608dd9bf8a7bdb9228a22ceb652b6
|
7adbefbe616f305430c75e896d817ec8e7f938d3
|
refs/heads/master
| 2020-04-12T02:45:06.427693
| 2018-12-21T01:15:31
| 2018-12-21T01:15:31
| 162,252,332
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,053
|
py
|
from flask import Flask, render_template
import datetime
import random
app = Flask(__name__)
@app.route("/")
def hello():
return render_template("index.html")
@app.route("/ssafy")
def ssafy():
return "방가방가룽~"
@app.route("/isitchristmas")
def christmas():
if datetime.datetime.month == 12:
if datetime.datetime.day == 25:
return"ㅇ"
else:
return"ㄴ"
# variable routing
@app.route("/greeting/<string:name>")
def greeting(name):
return f"{name} 안녕하십니까? 인사 오지게 박습니다."
@app.route("/cube/<int:num>")
def cube(num):
sq = num**3
return f"{sq}"
@app.route("/dinner")
def dinner():
menu = ["햄버거", "수육", "치킨"]
dinner = random.choice(menu)
return render_template("dinner.html", dinner=dinner, menu=menu)
@app.route("/music")
def music():
mlist = ["아이유-이름에게", "멜로망스-욕심", "태연-기억을 걷는 시간"]
music = random.choice(mlist)
return render_template("music.html", music=music, mlist=mlist)
|
[
"snc9000@naver.com"
] |
snc9000@naver.com
|
c531e8963a8bdd1fd5685361f3d120b112d7931c
|
f0acc407f95b758fa734f5ed5f6506a8b20d2706
|
/docs_src/parameter_types/bool/tutorial004_an.py
|
1cb42fcc86f69fbffbf6fb0cd4576c958c05ba79
|
[
"MIT"
] |
permissive
|
shnups/typer
|
ede6d86c5b169e8caa7823b0552f8531ed041f84
|
e0b207f3f577cb2e59fdd60da39686a2f5ed0e77
|
refs/heads/master
| 2023-08-31T01:54:21.168547
| 2023-08-01T09:36:09
| 2023-08-01T09:36:09
| 313,047,732
| 0
| 0
|
MIT
| 2020-11-15T14:22:06
| 2020-11-15T14:22:05
| null |
UTF-8
|
Python
| false
| false
| 276
|
py
|
import typer
from typing_extensions import Annotated
def main(in_prod: Annotated[bool, typer.Option(" /--demo", " /-d")] = True):
if in_prod:
print("Running in production")
else:
print("Running demo")
if __name__ == "__main__":
typer.run(main)
|
[
"noreply@github.com"
] |
shnups.noreply@github.com
|
66034e4237f03e3feea6cf0c1cb3a5d2f84b4f3e
|
7f81c7b4110640f73b769b6a41e9ef3ae2495611
|
/bert_multitask_learning/__init__.py
|
e9e702d00da1d9e2c6bc914b6a59975fe2a14257
|
[
"Apache-2.0"
] |
permissive
|
ml2457/bert-multitask-learning
|
26464c6d1ad94e7aeebd93d02f2604298ebde5db
|
993c1e6ca279e90e12ce4a684260219b18bbea70
|
refs/heads/master
| 2023-02-10T14:05:27.643723
| 2021-01-10T15:22:11
| 2021-01-10T15:22:11
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 314
|
py
|
__version__ = "0.6.10"
from .read_write_tfrecord import *
from .input_fn import *
from .model_fn import *
from .params import *
from .top import *
from .run_bert_multitask import *
from .utils import *
from .preproc_decorator import preprocessing_fn
from . import predefined_problems
from .special_tokens import *
|
[
"junpang.yip@gmail.com"
] |
junpang.yip@gmail.com
|
f78963add4b60ef66c7ce35ce18852ad3a6e9be9
|
33daf4c69a8f46d7ad8d93eaa73fc60e36fd022d
|
/gestion/asignaciones/20150908-todos-cuerpos/procesar_tabla.py~
|
6817418317f466cb6fa5e7e4a9ff2c5abf0fe429
|
[] |
no_license
|
OscarMaestre/estructurado
|
81cfc9412b77d5015be1bebf66785c357746d8e2
|
7649747e48128cb9c17dee937574e9490fcc9087
|
refs/heads/master
| 2021-01-10T15:05:47.695362
| 2016-04-28T07:30:50
| 2016-04-28T07:30:50
| 53,923,820
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,671
|
#!/usr/bin/env python3
import re
import sys
import os
NUM_SUBDIRECTORIOS_ANTERIORES=1
SEPARADOR=os.sep
RUTA_PAQUETE_BD=(".."+SEPARADOR) * NUM_SUBDIRECTORIOS_ANTERIORES
DIRECTORIO= RUTA_PAQUETE_BD + "db_nombramientos"
#aqui = os.path.dirname(os.path.abspath(__file__))
sys.path.insert(0, DIRECTORIO)
import GestorDB
import ListaCampos
archivo=sys.argv[1]
re_dni="[0-9]{7,8}[A-Z]"
#especialidad="[PWB0]59[0-9][0-9]{3}"
re_especialidad="\- [PWB0]59([0-9]{4})"
re_codigo_centro="[0-9]{8}"
re_codigo_centro_ciudad_real="^13[0-9]{6}$"
re_fecha="[0-9]{2}/[0-9]{2}/[0-9]{4}"
def linea_contiene_patron(patron, linea):
expresion=re.compile(patron)
if expresion.search(linea):
return True
return False
def extraer_patron(patron, linea):
expresion=re.compile(patron)
concordancia=expresion.search(linea)
if concordancia:
inicio=concordancia.start()
final=concordancia.end()
return concordancia.string[inicio:final]
print ("No concordancia")
def extraer_codigo_centro(linea):
return extraer_patron(re_codigo_centro, linea)
def extraer_localidad(linea):
localidad=linea[9:51]
return localidad.strip()
def extraer_dni(linea):
trozo=linea[51:60]
return extraer_patron(re_dni, linea)
def extraer_nombre(linea):
linea=linea[49:]
pos=linea.find("-")
if pos==-1:
return "Error:"+linea
return linea[pos+2:].strip()
cadena_sql="""insert into asignaciones_18092015 values
(
*C1*'{0}'*C1*,
*C2*'{1}'*C2*,
*C3*'{2}'*C3*,
*C4*'{3}'*C4*,
*C5*'{4}'*C5*,
*C6*'{5}'*C6*,
*C7*'{6}'*C7*,
*C8*'{7}'*C8*
);
"""
def generar_linea_sql(lista_campos):
dni=lista_campos[0]
cod_centro=lista_campos[3]
fecha_fin=lista_campos[7]
if not linea_contiene_patron(re_codigo_centro_ciudad_real, cod_centro):
cod_centro="9888"
sql= "update gaseosa set cod_centro='"+cod_centro+"' where dni='"+dni+"';\n"
sql+="update gaseosa set auxiliar='HACIENDO SUSTITUCION HASTA "+fecha_fin+"' where dni='"+dni+"';\n"
return sql
def generar_linea_sql2(lista_campos):
valores=":".join(lista_campos)
return valores
archivo=open(archivo,"r")
lineas=archivo.readlines()
total_lineas=len(lineas)
codigo_especialidad=""
lista_inserts_sql3=[]
for i in range(0, total_lineas):
linea=lineas[i]
lista_campos=[]
lista_campos_para_insertar=ListaCampos.ListaCampos()
if (linea_contiene_patron(re_especialidad, linea)):
codigo_especialidad=extraer_patron(re_especialidad, linea)
if (linea_contiene_patron(re_dni, linea)):
linea_limpia=linea.strip()
codigo_centro=extraer_codigo_centro(linea_limpia)
localidad=extraer_localidad(linea_limpia)
dni = extraer_dni(linea_limpia)
nombre = extraer_nombre(linea_limpia)
linea_siguiente=lineas[i+1]
nombre_centro=linea_siguiente[0:51].strip()
trozo_fecha1=linea_siguiente[72:132]
fecha_1=extraer_patron(re_fecha, trozo_fecha1)
trozo_fecha2=linea_siguiente[133:]
fecha_2=extraer_patron(re_fecha, trozo_fecha2)
lista_campos=[dni, nombre, codigo_especialidad, codigo_centro, nombre_centro, localidad, fecha_1, fecha_2]
linea_sql=generar_linea_sql(lista_campos)
lista_campos_para_insertar.anadir("nif", dni, ListaCampos.ListaCampos.CADENA)
lista_campos_para_insertar.anadir("nombre_completo", nombre, ListaCampos.ListaCampos.CADENA)
lista_campos_para_insertar.anadir("fecha_inicio", fecha_1, ListaCampos.ListaCampos.CADENA)
lista_campos_para_insertar.anadir("fecha_fin", fecha_2, ListaCampos.ListaCampos.CADENA)
lista_campos_para_insertar.anadir("procedimiento", "Adjudicacion 08-09-2015", ListaCampos.ListaCampos.CADENA)
lista_campos_para_insertar.anadir("especialidad", codigo_especialidad, ListaCampos.ListaCampos.CADENA)
lista_campos_para_insertar.anadir("codigo_centro", codigo_centro, ListaCampos.ListaCampos.CADENA)
print (linea_sql)
#print cadena_sql.format(codigo_especialidad, codigo_centro, localidad, dni, nombre, nombre_centro, fecha_1, fecha_2)
i=i+1
lista_inserts_sql3.append(lista_campos_para_insertar.generar_insert("nombramientos"))
archivo.close()
GestorDB.BD_RESULTADOS.ejecutar_sentencias(lista_inserts_sql3)
|
[
"profesor.oscar.gomez@gmail.com"
] |
profesor.oscar.gomez@gmail.com
|
|
6d00fe5a1897d38b38e75686b9f721e7d3b4fd16
|
fc778e05df051a0773d80f867b1c84542b0a4b24
|
/lab/lab06/tests/q114.py
|
aaa2d192866854e477170552bc1f816f32a05d9d
|
[] |
no_license
|
yukgu/data-6
|
d873e7231058b01365b278edc7ded4afade05b55
|
e96c0d864f58b7041dfb0820d3e469927eac97b0
|
refs/heads/master
| 2022-11-28T18:48:17.515825
| 2020-08-12T22:55:48
| 2020-08-12T22:55:48
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 582
|
py
|
test = {
"name": "Question 1.1.4.",
"points": 1,
"hidden": False,
"suites": [
{
"cases": [
{
"code": r"""
>>> import numpy as np
>>> np.isclose(berkeley_avg_in_thousands, 782.37)
True
""",
"hidden": False,
"locked": False,
},
],
"scored": True,
"setup": "",
"teardown": "",
"type": "doctest"
},
]
}
|
[
"42158157+castroian@users.noreply.github.com"
] |
42158157+castroian@users.noreply.github.com
|
a6f5582fc0f55ce1b8816e501641d8eb3f2f9ea4
|
946c04aa741b557daf56eac46385a613ac5e0cf2
|
/PP4E/System/Processes/multi1.py
|
2a10bd15027c964d91e4c4a8242d98ae4a2739d0
|
[] |
no_license
|
Ozavr/lutz
|
513ba0ca91d7188b2d28f649efe454603121106f
|
0ee96b5859c81ab04e8d2a3523a17fff089f12f2
|
refs/heads/master
| 2021-01-02T23:18:16.680021
| 2018-09-04T22:27:35
| 2018-09-04T22:27:35
| 99,497,282
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,076
|
py
|
"""
основы применения пакета multiprocessing: класс Process по своему действию
напоминает класс threading.Thread, но выполняет функцию в отдельном процессе,
а не в потоке; для синхронизации можно использовать блокировки, например, для
вывода текста; запускает новый процесс интерпретатора в Windows, порождает
дочерний процесс в Unix;
"""
import os
from multiprocessing import Process, Lock
def whoami(label, lock):
msg = '%s: name:%s, pid:%s'
with lock:
print(msg % (label, __name__, os.getppid()))
if __name__ == '__main__':
lock = Lock()
whoami('function call', lock)
p = Process(target=whoami, args=('spawned child', lock))
p.start()
p .join()
for i in range(5):
Process(target=whoami, args=(('run process %s' % i), lock)).start()
with lock:
print('Main process exit.')
|
[
"ozavr@me.com"
] |
ozavr@me.com
|
f90a330761a43f328e206363dca801aabefd20f4
|
83de24182a7af33c43ee340b57755e73275149ae
|
/aliyun-python-sdk-dysmsapi/aliyunsdkdysmsapi/request/v20170525/AddSmsSignRequest.py
|
c3e037abfab8fb2d6f9075b66dd5a949c573ec10
|
[
"Apache-2.0"
] |
permissive
|
aliyun/aliyun-openapi-python-sdk
|
4436ca6c57190ceadbc80f0b1c35b1ab13c00c7f
|
83fd547946fd6772cf26f338d9653f4316c81d3c
|
refs/heads/master
| 2023-08-04T12:32:57.028821
| 2023-08-04T06:00:29
| 2023-08-04T06:00:29
| 39,558,861
| 1,080
| 721
|
NOASSERTION
| 2023-09-14T08:51:06
| 2015-07-23T09:39:45
|
Python
|
UTF-8
|
Python
| false
| false
| 2,870
|
py
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
#
# http://www.apache.org/licenses/LICENSE-2.0
#
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from aliyunsdkcore.request import RpcRequest
from aliyunsdkdysmsapi.endpoint import endpoint_data
class AddSmsSignRequest(RpcRequest):
def __init__(self):
RpcRequest.__init__(self, 'Dysmsapi', '2017-05-25', 'AddSmsSign')
self.set_method('POST')
if hasattr(self, "endpoint_map"):
setattr(self, "endpoint_map", endpoint_data.getEndpointMap())
if hasattr(self, "endpoint_regional"):
setattr(self, "endpoint_regional", endpoint_data.getEndpointRegional())
def get_ResourceOwnerId(self):
return self.get_query_params().get('ResourceOwnerId')
def set_ResourceOwnerId(self,ResourceOwnerId):
self.add_query_param('ResourceOwnerId',ResourceOwnerId)
def get_Remark(self):
return self.get_query_params().get('Remark')
def set_Remark(self,Remark):
self.add_query_param('Remark',Remark)
def get_SignName(self):
return self.get_query_params().get('SignName')
def set_SignName(self,SignName):
self.add_query_param('SignName',SignName)
def get_SignFileLists(self):
return self.get_body_params().get('SignFileList')
def set_SignFileLists(self, SignFileLists):
for depth1 in range(len(SignFileLists)):
if SignFileLists[depth1].get('FileContents') is not None:
self.add_body_params('SignFileList.' + str(depth1 + 1) + '.FileContents', SignFileLists[depth1].get('FileContents'))
if SignFileLists[depth1].get('FileSuffix') is not None:
self.add_body_params('SignFileList.' + str(depth1 + 1) + '.FileSuffix', SignFileLists[depth1].get('FileSuffix'))
def get_ResourceOwnerAccount(self):
return self.get_query_params().get('ResourceOwnerAccount')
def set_ResourceOwnerAccount(self,ResourceOwnerAccount):
self.add_query_param('ResourceOwnerAccount',ResourceOwnerAccount)
def get_OwnerId(self):
return self.get_query_params().get('OwnerId')
def set_OwnerId(self,OwnerId):
self.add_query_param('OwnerId',OwnerId)
def get_SignSource(self):
return self.get_query_params().get('SignSource')
def set_SignSource(self,SignSource):
self.add_query_param('SignSource',SignSource)
|
[
"sdk-team@alibabacloud.com"
] |
sdk-team@alibabacloud.com
|
eaa3af01316e70493317fc5e190e308304501276
|
5949db57f8de8278359f45fe64395f44017671bc
|
/blog/migrations/0002_auto_20180122_0552.py
|
fdc4f73978db4766bafa256495e6be4132467df4
|
[] |
no_license
|
andrewidya/personal_blog
|
71ed6b83ac3c594fa40b9fb40145af3e37dd3079
|
c64df84f65dafd03ac05cf222fc113416e6926d5
|
refs/heads/master
| 2020-04-08T16:39:30.559072
| 2018-11-28T16:20:48
| 2018-11-28T16:20:48
| 159,528,638
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,244
|
py
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.8 on 2018-01-22 05:52
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
import modelcluster.fields
class Migration(migrations.Migration):
dependencies = [
('blog', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='RelatedPage',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
],
),
migrations.RemoveField(
model_name='blogpage',
name='related_pages',
),
migrations.AddField(
model_name='relatedpage',
name='page_from',
field=modelcluster.fields.ParentalKey(on_delete=django.db.models.deletion.CASCADE, related_name='related_page_from', to='blog.BlogPage', verbose_name='Page From'),
),
migrations.AddField(
model_name='relatedpage',
name='page_to',
field=modelcluster.fields.ParentalKey(on_delete=django.db.models.deletion.CASCADE, related_name='related_page_to', to='blog.BlogPage', verbose_name='Page To'),
),
]
|
[
"andrywidyaputra@gmail.com"
] |
andrywidyaputra@gmail.com
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.