blob_id stringlengths 40 40 | directory_id stringlengths 40 40 | path stringlengths 3 288 | content_id stringlengths 40 40 | detected_licenses listlengths 0 112 | license_type stringclasses 2 values | repo_name stringlengths 5 115 | snapshot_id stringlengths 40 40 | revision_id stringlengths 40 40 | branch_name stringclasses 684 values | visit_date timestamp[us]date 2015-08-06 10:31:46 2023-09-06 10:44:38 | revision_date timestamp[us]date 1970-01-01 02:38:32 2037-05-03 13:00:00 | committer_date timestamp[us]date 1970-01-01 02:38:32 2023-09-06 01:08:06 | github_id int64 4.92k 681M ⌀ | star_events_count int64 0 209k | fork_events_count int64 0 110k | gha_license_id stringclasses 22 values | gha_event_created_at timestamp[us]date 2012-06-04 01:52:49 2023-09-14 21:59:50 ⌀ | gha_created_at timestamp[us]date 2008-05-22 07:58:19 2023-08-21 12:35:19 ⌀ | gha_language stringclasses 147 values | src_encoding stringclasses 25 values | language stringclasses 1 value | is_vendor bool 2 classes | is_generated bool 2 classes | length_bytes int64 128 12.7k | extension stringclasses 142 values | content stringlengths 128 8.19k | authors listlengths 1 1 | author_id stringlengths 1 132 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
44d9017c2f7d723b7a212c2c0c90c9ecd07f3814 | 1a87ac9522591f25b03e6912ba3af3cca115abae | /inventory/migrations/0008_auto_20210323_1552.py | 9f861fbe025a9c3a0b6f37ed307a6f83f8ec55b3 | [
"MIT"
] | permissive | jyywong/InventoryMS | c67fdb0a051be5d136d9509e63b7fc0aeadcc324 | 9aac1324742730ce980e638f2156ece9eb44a593 | refs/heads/master | 2023-04-01T15:38:44.448813 | 2021-04-05T19:59:45 | 2021-04-05T19:59:45 | 350,162,598 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 403 | py | # Generated by Django 2.2.10 on 2021-03-23 19:52
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('inventory', '0007_auto_20210323_1528'),
]
operations = [
migrations.AlterField(
model_name='item',
name='bar_code',
field=models.BigIntegerField(blank=True, null=True),
),
]
| [
"wong.jonathan1@gmail.com"
] | wong.jonathan1@gmail.com |
ccbe1eb7db398e37dcf02cb0576aa88a28663115 | 45185a2c65924ed01cdc222ccc42e71391e5a1f4 | /tt/tests/utils.py | 4baa12a29ca5658108cc95656aef682479cb6851 | [
"MIT"
] | permissive | parsamz/tt | 5cb0db124fd9ed5ec3fe24e0e807c72d33f9aebb | 0d2a286d46cfe1ca01b340d710ba5a1921a9b66e | refs/heads/master | 2020-04-01T13:47:37.380990 | 2016-05-07T01:41:42 | 2016-05-07T01:41:42 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,156 | py | """Utility methods/classes used in the testing pipeline.
"""
import sys
import unittest
from contextlib import contextmanager
from tt.core import main
if sys.version_info < (3, 0):
from io import BytesIO as StringIO
else:
from io import StringIO
# === stdout/stderr interaction ===============================================
@contextmanager
def redirected_stream(stream_name):
orig_stream = getattr(sys, stream_name)
setattr(sys, stream_name, StringIO())
try:
yield getattr(sys, stream_name)
finally:
setattr(sys, stream_name, orig_stream)
# === Generalized test cases ==================================================
class FunctionalTestAssertions(object):
pass
class FunctionalTestCase(unittest.TestCase):
def functional_test_helper(self, cl_args=[],
expected_stdout='', expected_stderr=''):
with redirected_stream('stdout') as _stdout:
with redirected_stream('stderr') as _stderr:
main(args=cl_args)
self.assertEqual(expected_stdout, _stdout.getvalue())
self.assertEqual(expected_stderr, _stderr.getvalue())
| [
"welch18@vt.edu"
] | welch18@vt.edu |
003c4f1f23e0854df92932089c015e72820a1d9e | 62248ce4ce8f11d24c089b54d4c02bdec51df565 | /Stars_Web/settings.py | e85e844b2bf965f39af337d25c11917f7278db24 | [] | no_license | challeger/Starts_Web | 0b7231bbdf0e6f6350c928e13f9b67c6f1c4af84 | 1b372013706f8d082e9feab5c73fd690a10b7286 | refs/heads/master | 2022-12-24T21:32:31.231838 | 2020-09-30T05:04:21 | 2020-09-30T05:04:21 | 299,798,716 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,993 | py | """
Django settings for Stars_Web project.
Generated by 'django-admin startproject' using Django 3.1.1.
For more information on this file, see
https://docs.djangoproject.com/en/3.1/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/3.1/ref/settings/
"""
import os
from pathlib import Path
# Build paths inside the project like this: BASE_DIR / 'subdir'.
BASE_DIR = Path(__file__).resolve().parent.parent
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/3.1/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '%a^a4*!h^d*_%9&bspa(vu^^uawil9uzm62c0zu_19otx0+02g'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = ['*']
# Application definition
INSTALLED_APPS = [
'Users',
'rest_framework',
'rest_framework_jwt',
'django_filters',
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'corsheaders', # 解决跨域问题
]
CORS_ALLOW_CREDENTIALS = True
CORS_ORIGIN_ALLOW_ALL = True
# 允许所有的请求头
CORS_ALLOW_HEADERS = ('*', )
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'Stars_Web.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [os.path.join(BASE_DIR, 'templates')]
,
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'Stars_Web.wsgi.application'
# Database
# https://docs.djangoproject.com/en/3.1/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.mysql',
'NAME': 'stars_web',
'USER': 'root',
'PASSWORD': 'T85568397',
'HOST': '127.0.0.1',
'PORT': '3306',
'CONN_MAX_AGE': 5 * 60,
'OPTIONS': {
'charset': 'utf8mb4'
}
}
}
# Password validation
# https://docs.djangoproject.com/en/3.1/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/3.1/topics/i18n/
LANGUAGE_CODE = 'zh-hans'
TIME_ZONE = 'Asia/Shanghai'
USE_I18N = True
USE_L10N = True
USE_TZ = False
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/3.1/howto/static-files/
STATIC_URL = '/static/'
STATIC_ROOT = os.path.join(BASE_DIR, STATIC_URL)
STATICFILES_DIRS = (os.path.join(BASE_DIR, 'static'), )
EMAIL_BACKEND = 'django.core.mail.backends.smtp.EmailBackend'
EMAIL_HOST = 'smtp.qq.com'
EMAIL_PORT = 465
EMAIL_USE_SSL = True
EMAIL_HOST_USER = '799613500@qq.com'
EMAIL_HOST_PASSWORD = 'pqtssodfmxysbcdh'
EMAIL_FROM = '群星小说网<799613500@qq.com>'
# 验证码过期时间为十分钟
EMAIL_EXP_DELTA = 600
| [
"799613500@qq.com"
] | 799613500@qq.com |
f3ba2d5ebe159e659d05d9840939a38c42042d12 | 70e81f00b600057464fdccaef2d82f238c8f08dc | /apps/utils/yunpian.py | 89e5b0098e2c0e410c73f6bff53474e77df75b25 | [] | no_license | wujialaoer/shop | 1bbd905369260ce1df9822027649655b7a909657 | 5fc8b02ba63cea96172f30520b553dab3ec5fe8a | refs/heads/master | 2020-05-07T09:06:49.935942 | 2019-04-16T09:03:38 | 2019-04-16T09:03:38 | 180,359,944 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 667 | py | import json
import requests
class YunPian(object):
def __init__(self, api_key):
self.api_key = api_key
self.single_send_url = "https://sms.yunpian.com/v2/sms/single_send.json"
def send_sms(self, code, mobile):
parmas = {
"apikey": self.api_key,
"mobile": mobile,
"text": "您的验证码是{code}。如非本人操作,请忽略本短信".format(code=code)
}
response = requests.post(self.single_send_url, data=parmas)
re_dict = json.loads(response.text)
return re_dict
if __name__ == "__main__":
yun_pian = YunPian("")
yun_pian.send_sms("2017", "")
| [
"624334922@qq.com"
] | 624334922@qq.com |
76eb91fe6bfb0a872f54e9b9920fc6ef2255dffa | 43b72b9e4d81ffab6e4d79f324034cbb4b7413a3 | /challenge/NOTDONEsmallestWindowHavingOneInAnother.py | 07d1a5408b509bd0143be6df6ca2f1db94677de3 | [] | no_license | devathul/prepCode | bfe0ad44f68c8c9d4a48f76dde9a1bb8af165373 | eb0751bda3066ac2f1a2890cf63b28ee63a6dd89 | refs/heads/master | 2023-01-08T20:59:52.214333 | 2020-11-01T00:13:55 | 2020-11-01T00:13:55 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 355 | py | """
smallestWindowHavingOneInAnother
Given a string S and text T. Output the smallest window in the string S having all characters of the text T. Both the string S and text T contains lowercase english alphabets
"""
no_of_chars=256
def findSubString(string,pat):
len1=len(string)
len2=len(pat)
if len1<len2:
print("No such window")
return None
| [
"42695433+bsofcs@users.noreply.github.com"
] | 42695433+bsofcs@users.noreply.github.com |
3a5fa4638e3ee7de74813129b2b4c3231d061a33 | b4fdd022b45751cfaf2a8770152bf7ca6aeb2b9a | /putcall/formulas/interest_rate_options/__init__.py | 6a8b9a40e055ef017c195e8815cf99e64e93500c | [
"Apache-2.0",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | fagan2888/putcall | 7547727b48e52f8a5ef325f02952778a93d8acb4 | a5984b52cb7bae33cfd48490439acea4844af0f9 | refs/heads/master | 2021-03-16T21:22:16.024168 | 2019-09-18T13:03:41 | 2019-09-18T13:03:41 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 430 | py | # -*- coding: utf-8 -*-
# putcall
# -------
# Collection of classical option pricing formulas.
#
# Author: sonntagsgesicht, based on a fork of Deutsche Postbank [pbrisk]
# Version: 0.2, copyright Wednesday, 18 September 2019
# Website: https://github.com/sonntagsgesicht/putcall
# License: Apache License 2.0 (see LICENSE file)
from .black76 import *
from .bachelier import *
from .hullwhite import *
from .sabr import *
| [
"sonntagsgesicht@icloud.com"
] | sonntagsgesicht@icloud.com |
2ed766c1a90f661f262b30118b23197f0eafba1e | 56f5b2ea36a2258b8ca21e2a3af9a5c7a9df3c6e | /CMGTools/H2TauTau/prod/25aug_corrMC/up/mc/SUSYGluGluToHToTauTau_M-160_8TeV-pythia6-tauola/Summer12_DR53X-PU_S10_START53_V7A-v1/AODSIM/PAT_CMG_V5_16_0_1377467578/HTT_24Jul_newTES_manzoni_Up_Jobs/Job_32/run_cfg.py | d66375515b236b61c9c96404622b8782b49cb4d5 | [] | no_license | rmanzoni/HTT | 18e6b583f04c0a6ca10142d9da3dd4c850cddabc | a03b227073b2d4d8a2abe95367c014694588bf98 | refs/heads/master | 2016-09-06T05:55:52.602604 | 2014-02-20T16:35:34 | 2014-02-20T16:35:34 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,181 | py | import FWCore.ParameterSet.Config as cms
import os,sys
sys.path.append('/afs/cern.ch/user/m/manzoni/summer13/CMGTools/CMSSW_5_3_9/src/CMGTools/H2TauTau/prod/25aug_corrMC/up/mc/SUSYGluGluToHToTauTau_M-160_8TeV-pythia6-tauola/Summer12_DR53X-PU_S10_START53_V7A-v1/AODSIM/PAT_CMG_V5_16_0_1377467578/HTT_24Jul_newTES_manzoni_Up_Jobs')
from base_cfg import *
process.source = cms.Source("PoolSource",
noEventSort = cms.untracked.bool(True),
inputCommands = cms.untracked.vstring('keep *',
'drop cmgStructuredPFJets_cmgStructuredPFJetSel__PAT'),
duplicateCheckMode = cms.untracked.string('noDuplicateCheck'),
fileNames = cms.untracked.vstring('/store/cmst3/group/cmgtools/CMG/SUSYGluGluToHToTauTau_M-160_8TeV-pythia6-tauola/Summer12_DR53X-PU_S10_START53_V7A-v1/AODSIM/PAT_CMG_V5_16_0/cmgTuple_95_1_MM2.root',
'/store/cmst3/group/cmgtools/CMG/SUSYGluGluToHToTauTau_M-160_8TeV-pythia6-tauola/Summer12_DR53X-PU_S10_START53_V7A-v1/AODSIM/PAT_CMG_V5_16_0/cmgTuple_96_1_vAc.root',
'/store/cmst3/group/cmgtools/CMG/SUSYGluGluToHToTauTau_M-160_8TeV-pythia6-tauola/Summer12_DR53X-PU_S10_START53_V7A-v1/AODSIM/PAT_CMG_V5_16_0/cmgTuple_97_1_PJw.root')
)
| [
"riccardo.manzoni@cern.ch"
] | riccardo.manzoni@cern.ch |
bc8690891dbddd0b15fa87a22253b93d5d9e3b63 | c1e13dabefcfa873b136f36d464f2bf5094ee5ba | /manubot/cite/tests/test_csl_item.py | 70e974fa9fcef5cb621a11f879e591ef1f7b6126 | [
"LicenseRef-scancode-unknown-license-reference",
"BSD-3-Clause"
] | permissive | epogrebnyak/manubot | 6001c3271d2c6bc059e09dfcb786c74efcb03df7 | 77920eca091995184684e7b6b0d12266917c3aa4 | refs/heads/master | 2020-07-27T00:11:36.221293 | 2019-10-09T06:35:23 | 2019-10-09T06:35:23 | 208,805,899 | 1 | 0 | NOASSERTION | 2019-10-30T09:20:17 | 2019-09-16T13:24:26 | Python | UTF-8 | Python | false | false | 1,959 | py | import copy
import pytest
from manubot.cite.csl_item import (
csl_item_set_standard_id)
@pytest.mark.parametrize(
['csl_item', 'standard_citation'],
[
(
{'id': 'my-id', 'standard_citation': 'doi:10.7554/elife.32822'},
'doi:10.7554/elife.32822',
),
(
{'id': 'doi:10.7554/elife.32822'},
'doi:10.7554/elife.32822',
),
(
{'id': 'doi:10.7554/ELIFE.32822'},
'doi:10.7554/elife.32822',
),
(
{'id': 'my-id'},
'raw:my-id',
),
],
ids=[
'from_standard_citation',
'from_doi_id',
'from_doi_id_standardize',
'from_raw_id',
]
)
def test_csl_item_set_standard_id(csl_item, standard_citation):
output = csl_item_set_standard_id(csl_item)
assert output is csl_item
assert output['id'] == standard_citation
def test_csl_item_set_standard_id_repeated():
csl_item = {
'id': 'pmid:1',
'type': 'article-journal',
}
# csl_item_0 = copy.deepcopy(csl_item)
csl_item_1 = copy.deepcopy(csl_item_set_standard_id(csl_item))
assert 'standard_citation' not in 'csl_item'
csl_item_2 = copy.deepcopy(csl_item_set_standard_id(csl_item))
assert csl_item_1 == csl_item_2
def test_csl_item_set_standard_id_note():
"""
Test extracting standard_id from a note and setting additional
note fields.
"""
csl_item = {
'id': 'original-id',
'type': 'article-journal',
'note': 'standard_id: doi:10.1371/journal.PPAT.1006256',
}
csl_item_set_standard_id(csl_item)
assert csl_item['id'] == 'doi:10.1371/journal.ppat.1006256'
from manubot.cite.citeproc import parse_csl_item_note
note_dict = parse_csl_item_note(csl_item['note'])
assert note_dict['original_id'] == 'original-id'
assert note_dict['original_standard_id'] == 'doi:10.1371/journal.PPAT.1006256'
| [
"daniel.himmelstein@gmail.com"
] | daniel.himmelstein@gmail.com |
17b3fb728b6f6d680a80c7565e7a37f431aa7e6d | 9743d5fd24822f79c156ad112229e25adb9ed6f6 | /xai/brain/wordbase/otherforms/_dithers.py | eef9fd87e5c87980635cc5c430ce7f137bbb0de8 | [
"MIT"
] | permissive | cash2one/xai | de7adad1758f50dd6786bf0111e71a903f039b64 | e76f12c9f4dcf3ac1c7c08b0cc8844c0b0a104b6 | refs/heads/master | 2021-01-19T12:33:54.964379 | 2017-01-28T02:00:50 | 2017-01-28T02:00:50 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 222 | py |
#calss header
class _DITHERS():
def __init__(self,):
self.name = "DITHERS"
self.definitions = dither
self.parents = []
self.childen = []
self.properties = []
self.jsondata = {}
self.basic = ['dither']
| [
"xingwang1991@gmail.com"
] | xingwang1991@gmail.com |
0647d51b728b8cfd6cb1d6995238a196bb8a110a | 9f91cc7389d212720ab16ab9d0a60d62e5cf7088 | /astropy/io/votable/tests/table_test.py | 2d6564672e783912b47978ef48505dd4910c5021 | [] | no_license | MQQ/astropy | 47d6889e54dfa714bcbe9a6a572bb6c4c56427fd | 67c8ce053d075399d22b4a674bf1df3e441ad125 | refs/heads/master | 2021-01-18T07:48:04.107638 | 2012-11-04T22:04:33 | 2012-11-04T22:04:33 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,591 | py | """
Test the conversion to/from astropy.table
"""
import os
import shutil
import tempfile
from ....config import get_data_filename
from ..table import parse, writeto
from .. import tree
TMP_DIR = None
def setup_module():
global TMP_DIR
TMP_DIR = tempfile.mkdtemp()
def teardown_module():
shutil.rmtree(TMP_DIR)
def test_table():
# Read the VOTABLE
votable = parse(
get_data_filename('data/regression.xml'),
pedantic=False)
table = votable.get_first_table()
astropy_table = table.to_table()
votable2 = tree.VOTableFile.from_table(astropy_table)
t = votable2.get_first_table()
field_types = [
('string_test', {'datatype': 'char', 'arraysize': '*'}),
('string_test_2', {'datatype': 'char', 'arraysize': '10'}),
('unicode_test', {'datatype': 'unicodeChar', 'arraysize': '*'}),
('fixed_unicode_test', {'datatype': 'unicodeChar', 'arraysize': '10'}),
('string_array_test', {'datatype': 'char', 'arraysize': '4'}),
('unsignedByte', {'datatype': 'unsignedByte'}),
('short', {'datatype': 'short'}),
('int', {'datatype': 'int'}),
('long', {'datatype': 'long'}),
('double', {'datatype': 'double'}),
('float', {'datatype': 'float'}),
('array', {'datatype': 'long', 'arraysize': '2*'}),
('bit', {'datatype': 'bit'}),
('bitarray', {'datatype': 'bit', 'arraysize': '3x2'}),
('bitvararray', {'datatype': 'bit', 'arraysize': '*'}),
('bitvararray2', {'datatype': 'bit', 'arraysize': '3x2*'}),
('floatComplex', {'datatype': 'floatComplex'}),
('doubleComplex', {'datatype': 'doubleComplex'}),
('doubleComplexArray', {'datatype': 'doubleComplex', 'arraysize': '*'}),
('doubleComplexArrayFixed', {'datatype': 'doubleComplex', 'arraysize': '2'}),
('boolean', {'datatype': 'bit'}),
('booleanArray', {'datatype': 'bit', 'arraysize': '4'}),
('nulls', {'datatype': 'int'}),
('nulls_array', {'datatype': 'int', 'arraysize': '2x2'}),
('precision1', {'datatype': 'double'}),
('precision2', {'datatype': 'double'}),
('doublearray', {'datatype': 'double', 'arraysize': '*'}),
('bitarray2', {'datatype': 'bit', 'arraysize': '16'})]
for field, type in zip(t.fields, field_types):
name, d = type
assert field.ID == name
assert field.datatype == d['datatype']
if 'arraysize' in d:
assert field.arraysize == d['arraysize']
writeto(votable2, os.path.join(TMP_DIR, "through_table.xml"))
| [
"mdboom@gmail.com"
] | mdboom@gmail.com |
e39117bcbd65c3e815a82d6f118abff791f48236 | 7275226803632a73466214cf14ad37908d9cc5db | /blog/migrations/0016_auto_20201129_1253.py | eeba89141db5baf1444c5141c4394109d8dd3bb9 | [] | no_license | GBrachetta/new-blog | dc01a7b6289cce4106f03954d6bfe49e04bca740 | cf25dbbcd54e5c309664cd7eec2488e344c0d41d | refs/heads/master | 2023-01-24T11:06:43.473885 | 2020-11-30T20:21:08 | 2020-11-30T20:21:08 | 316,278,713 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 604 | py | # Generated by Django 3.1.3 on 2020-11-29 12:53
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('blog', '0015_auto_20201129_0206'),
]
operations = [
migrations.AlterField(
model_name='comment',
name='user',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='user_profile', to=settings.AUTH_USER_MODEL),
),
]
| [
"brachetta@me.com"
] | brachetta@me.com |
5dacf768ff89c1dc776f4f6b3e3d1a4da857d88c | 6fa7f99d3d3d9b177ef01ebf9a9da4982813b7d4 | /n5Ar5F2CJMpGRXz3o_9.py | 025d1f238f707bb2767ad5f637fa17a022f0f106 | [] | no_license | daniel-reich/ubiquitous-fiesta | 26e80f0082f8589e51d359ce7953117a3da7d38c | 9af2700dbe59284f5697e612491499841a6c126f | refs/heads/master | 2023-04-05T06:40:37.328213 | 2021-04-06T20:17:44 | 2021-04-06T20:17:44 | 355,318,759 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 158 | py |
def mineral_formation(cave):
if sum(cave[0]) == 0:
return 'stalagmites'
elif sum(cave[-1]) == 0:
return 'stalactites'
else:
return 'both'
| [
"daniel.reich@danielreichs-MacBook-Pro.local"
] | daniel.reich@danielreichs-MacBook-Pro.local |
511a293ebbc8e556034f32c06027531a837e841f | e21599d08d2df9dac2dee21643001c0f7c73b24f | /practice/lib/tenacity_sample/try_with_exception.py | c78f7317fc738abd2475f9d6e07562ac6938c80a | [] | no_license | herolibra/PyCodeComplete | c7bf2fb4ce395737f8c67749148de98a36a71035 | 4ef7d2c3aec6d28a53eed0e649cdeb74df3d783b | refs/heads/master | 2022-07-17T05:39:03.554760 | 2020-05-03T07:00:14 | 2020-05-03T07:00:14 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 437 | py | #!/usr/bin/env python
# coding=utf-8
import random
def do_something_unreliable(retry=10):
for i in range(retry):
try:
if random.randint(0, 10) > 1:
raise IOError("Unstable status, try again")
else:
print("Get stable result")
return
except Exception as e:
print(e.message)
if __name__ == "__main__":
do_something_unreliable(3) | [
"zengyuetian@cloutropy.com"
] | zengyuetian@cloutropy.com |
faa141d3b4652d9d6c57baca5a76f24a41679132 | 068d271e241d8cdb46dbf4243166e4b8ee7025b2 | /web前端/day54/day54/00今日面试题.py | 6c84dbb775fe3b3bec0b220ebe9472d71968acd0 | [
"MIT"
] | permissive | caiqinxiong/python | f6e226e76cb62aac970bcfbcb6c8adfc64858b60 | 9029f6c528d2cb742b600af224e803baa74cbe6a | refs/heads/master | 2023-05-26T19:41:34.911885 | 2020-05-15T09:02:08 | 2020-05-15T09:02:08 | 195,261,757 | 1 | 0 | null | 2021-06-10T23:33:33 | 2019-07-04T15:01:42 | JavaScript | UTF-8 | Python | false | false | 420 | py | """
问:执行完下面的代码后, l,m的内容分别是什么?
"""
def func(m):
for k,v in m.items():
m[k+2] = v+2
m = {1: 2, 3: 4}
l = m # 浅拷贝
from copy import deepcopy
l2 = deepcopy(m)
l[9] = 10
l2[90] = 100
# func(l)
m[7] = 8
# 1. 在Python中遍历字典的时候能不能对字典本身做涉及键(key)的操作
# 2. 深浅拷贝的理解
print("l:", l)
print("l2:", l2)
print("m:", m)
| [
"13269469526@163.com"
] | 13269469526@163.com |
62d659bd368096cf43711563ed604f8fd3f7a6bc | f62ed4c130e6ecad19c606bac2e5aa561e18a6d5 | /week2/integer_ex.py | d5b87cfd33608f37b644ca1513287cab3ade1731 | [] | no_license | jimyeong22/2021ddangil2 | d17c7d9fd6c7c3f369a01a20317ccb6a4ea05678 | d2016a33e4ceba3ffd12fef9cace4cdceb6b1bcb | refs/heads/master | 2023-07-23T20:05:22.321927 | 2021-08-19T11:12:40 | 2021-08-19T11:12:40 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 358 | py | # 정수형
a=154
print(type(a))
a = 0
print(type(a))
a = -25
print(type(a))
# 실수형
a= 181.34
print(type(a))
b=-22.22
print(type(b))
# 복소수
c = 1 + 4j
print(type(c))
print(c.real)
print(c.imag)
print(c.conjugate())
print(abs(c))
# 예제: 스스로 사칙연산을 활용해 확인해보자
a = 5
b = 3.14
c = 3 + 4j
18.28 + 16j
print(2b + 4c) | [
"choikm3847@gmail.com"
] | choikm3847@gmail.com |
f3b0b5741ace2b644e0178e50b90dcfaeb5ec3fd | b4ecc9c5a74f11958e7a49999d0299e7bb883d2e | /train.py | 5ebc5af6153bd0e810cbb34969e6e53072f76913 | [] | no_license | teja0508/AcronymLookup | 6edea8ab9bc27824b961563f5bf968b499490094 | ea5b812c41f138b5dccabbe2c474e2da0f85ce9e | refs/heads/main | 2022-12-20T08:00:30.161858 | 2020-10-18T06:01:32 | 2020-10-18T06:01:32 | 305,030,809 | 1 | 1 | null | null | null | null | UTF-8 | Python | false | false | 5,327 | py | # Machine-Learning Approach for Cross-Domain Acronym Definition Identification
# Maya Varma and Rachel Gardner
# Autumn 2017
# Train Machine Learning Classifier
import sys
sys.path.append('postgres-database/')
from urllib.request import urlopen
import re
import csv
import os
from collections import defaultdict, Counter
import operator
import random
from dbFunctions import AcronymDatabase
from sklearn.feature_extraction import text, DictVectorizer
from sklearn.feature_extraction.text import CountVectorizer
from sklearn.naive_bayes import MultinomialNB
from sklearn.metrics import accuracy_score, confusion_matrix
from sklearn.linear_model import SGDClassifier
from sklearn.model_selection import GridSearchCV
from sklearn import tree, metrics, svm
from sklearn.ensemble import RandomForestClassifier
import numpy as np
import matplotlib.pyplot as plt
#from sklearn.externals import joblib
#import sklearn.externals.joblib as extjoblib
#import joblib
import pickle
#Load in csv data (contains list of HTML urls)
def loadHTMLData():
urls = []
with open('data/data.csv', 'rU') as data:
reader = csv.reader(data, dialect=csv.excel_tab)
for row in reader:
if(len((row[0].split(','))[1]) > 0): urls.append((row[0].split(','))[1])
return urls
def loadDuplicateData():
train = []
test = []
with open('data/duplicatedata.csv', 'rU') as data:
reader = csv.reader(data, dialect=csv.excel_tab)
count=0
for row in reader:
if(len((row[0].split(','))[1]) > 0): train.append((row[0].split(','))[2])
if(count%2 == 0 and len((row[0].split(','))[1]) > 0): train.append((row[0].split(','))[3])
elif(count%2 == 1 and len((row[0].split(','))[1]) > 0): test.append((row[0].split(','))[3])
count+=1
return (train, test)
urls = loadHTMLData()
trainingUrlsDuplicates = loadDuplicateData()[0]
testingUrlsDuplicates = loadDuplicateData()[1]
trainingUrls = trainingUrlsDuplicates + urls[:int(0.7*len(urls))]
testingUrls = testingUrlsDuplicates + urls[int(0.7*len(urls)):]
print ('Size of Training Dataset: ', len(trainingUrls))
print ('Size of Testing Dataset: ', len(testingUrls))
#Adapted from NLTK package. Removes HTML markup from given string.
def clean_html(html):
# First we remove inline JavaScript/CSS:
cleaned = re.sub(r"(?is)<(script|style).*?>.*?(</\1>)", "", html.strip())
# Then we remove html comments. This has to be done before removing regular
# tags since comments can contain '>' characters.
cleaned = re.sub(r"(?s)<!--(.*?)-->[\n]?", "", cleaned)
# Next we can remove the remaining tags:
cleaned = re.sub(r"(?s)<.*?>", " ", cleaned)
# Finally, we deal with whitespace
cleaned = re.sub(r" ", " ", cleaned)
cleaned = re.sub(r" ", " ", cleaned)
cleaned = re.sub(r" ", " ", cleaned)
return (cleaned.strip()).split()
#Takes url as input. Returns list of all acronyms in webpage
def identifyAcronyms(rawText):
acronyms = []
#words commonly misidentified as acronyms are manually blacklisted
blacklist = ['ABSTRACT', 'INTRODUCTION', 'CONCLUSION', 'CONCLUSIONS', 'ACKNOWLEDGEMENTS', 'RESULTS']
for i in range(1,len(rawText)-1):
word = rawText[i]
word = re.sub(r'[^\w\s]','',word)
'''
characteristics of an acronym: all capital letters, length > 2,
contains only alphabet characters, not in blacklist, and not part
of a header (identified by determining if surrounding words are in all-caps)
'''
nextIndex = i+1
prevIndex = i-1
if(len(word)>2 and word[:-1].isupper() and word.isalpha() and word not in blacklist and not(rawText[i-1].isupper()) and not(rawText[i+1].isupper())):
acronyms.append((word, i))
return acronyms
# Extracting Features
db = AcronymDatabase()
#Convert training data to sparse vectors
tokenize = CountVectorizer().build_tokenizer()
true_defs = []
def features(cad):
acronym = cad[0]
context = cad[1]
if(len(cad)==3): true_defs.append(cad[2])
terms = tokenize(context)
d = {acronym: 10}
for t in terms:
if(t not in text.ENGLISH_STOP_WORDS):
d[t] = d.get(t, 0) + 1
return d
cadList = db.getContextAcronymList()
vect = DictVectorizer()
X_train = vect.fit_transform(features(d) for d in cadList)
joblib.dump(vect, 'trained-models/vectorizer.pkl')
print (X_train.toarray())
# Train Machine Learning Classifier
clf1 = MultinomialNB(alpha=0.09).fit(X_train, true_defs)
print ('Trained Model 1')
clf2 = svm.LinearSVC(C=1).fit(X_train, true_defs)
print ('Trained Model 2')
clf3 = tree.DecisionTreeClassifier(min_samples_leaf=1).fit(X_train, true_defs)
print ('Trained Model 3')
clf4 = RandomForestClassifier().fit(X_train, true_defs)
print ('Trained Model 4')
#joblib.dump(clf1, 'trained-models/naivebayes.pkl')
#joblib.dump(clf2, 'trained-models/svc.pkl')
#joblib.dump(clf3, 'trained-models/decisiontree.pkl')
#joblib.dump(clf4, 'trained-models/randomforest.pkl')
pickle.dump(clf1, open('trained-models/naivebayes.pkl', "wb"))
pickle.dump(clf2, open('trained-models/svc.pkl', "wb"))
pickle.dump(clf3, open('trained-models/decisiontree.pkl', "wb"))
pickle.dump(clf4, open('trained-models/randomforest.pkl', "wb"))
db.close()
| [
"lchandratejareddy@gmail.com"
] | lchandratejareddy@gmail.com |
26a6d470417d511695f873a58b8d5db99dc91e16 | 6351221d588668804e2df01936732eede4d96ed0 | /leetcode-cn/Python/783.二叉搜索树节点最小距离.py | 56c0fda33433e32c094361d70f8113486693b1ec | [] | no_license | LogicJake/code-for-interview | 8e4ec9e24ec661a443ad42aa2496d78a1fbc8a3f | 5990b09866696c2f3e845047c755fa72553dd421 | refs/heads/master | 2021-09-20T20:19:17.118333 | 2021-09-14T13:46:30 | 2021-09-14T13:46:30 | 102,202,212 | 3 | 2 | null | null | null | null | UTF-8 | Python | false | false | 684 | py | #
# @lc app=leetcode.cn id=783 lang=python3
#
# [783] 二叉搜索树节点最小距离
#
# @lc code=start
# Definition for a binary tree node.
class TreeNode:
def __init__(self, x):
self.val = x
self.left = None
self.right = None
class Solution:
def minDiffInBST(self, root: TreeNode) -> int:
pre = 9999
ans = 9999
def help(root):
if root is None:
return
nonlocal ans, pre
help(root.left)
val = root.val
ans = min(ans, abs(val - pre))
pre = val
help(root.right)
help(root)
return ans
# @lc code=end
| [
"835410808@qq.com"
] | 835410808@qq.com |
f5b172c6b6c06efd0d3b35027922b0150ee7ce06 | a990bd26d3a69d1ea6699c85efa2cea99452c3df | /problems/leetcode/minCostClimbingStairs746.py | 87821e07472154af738e6ceddc4526aa197158b3 | [] | no_license | abecus/DS-and-Algorithms | 5f1a948a085465ae165090ec957a9d5307ce729d | 3259e8183382265a27cf8c91e37d0086175a5703 | refs/heads/master | 2022-05-05T07:07:08.194243 | 2022-04-05T16:23:39 | 2022-04-05T16:23:39 | 193,111,610 | 11 | 6 | null | 2020-11-18T16:19:18 | 2019-06-21T14:27:25 | Python | UTF-8 | Python | false | false | 1,357 | py | """
_________________________746. Min Cost Climbing Stairs_________________________
Difficulty: Easy Likes: 1564 Dislikes: 354 Solution: Available
Total Accepted: 124.5K Total Submission: 254K Acceptance Rate: 49.0%
Tags: Dynamic Programming, Array
On a staircase, the i-th step has some non-negative cost cost[i] assigned
(0 indexed).
Once you pay the cost, you can either climb one or two
steps. You need to find minimum cost to reach the top of the floor, and you
can either start from the step with index 0, or the step with index 1.
Example 1:
Input: cost = [10, 15, 20]
Output: 15
Example 2:
Input: cost = [1, 100, 1, 1, 1, 100, 1, 1, 100, 1]
Output: 6
Note:
cost will have a length in the range [2, 1000].Every cost[i] will be an integer in the range [0, 999].
"""
from functools import lru_cache
def minCostClimbingStairs(cost):
# cost.append(0)
# @lru_cache(None)
# def helper(i):
# if i<2:
# if i<0:
# return 0
# return cost[i]
# return cost[i]+min(helper(i-1), helper(i-2))
# return helper(len(cost)-1)
a=cost[0]
b=cost[1]
for i in range(2,len(cost)):
b,a=cost[i]+min(a,b),b
return min(a,b)
if __name__ == "__main__":
cost = [0,0,0,0]
# cost = [10, 15, 20]
# cost = [1, 100, 1, 1, 1, 100, 1, 1, 100, 1]
print(minCostClimbingStairs(cost,))
"""
similarQuestions::
Climbing Stairs: Easy
"""
| [
"insaaone@gmail.com"
] | insaaone@gmail.com |
aacae8a6adbe5fe07565b2aa5b5d61ddacca7f29 | 755f1fa3d56340d64b72c261bf6c738d9fa5f1b5 | /httpInterface/get_series.py | 013e9595dab6efa8a28ed166f5bdd2f2f10c0ea1 | [] | no_license | piotrmaslanka/morda | ad3e78b3ab94129b551b4205b5a77367734d2ea6 | 4fbd1024b6e75c62c79bb15cc72bf111c53ce5a2 | refs/heads/master | 2018-12-31T21:56:59.237613 | 2014-06-01T16:31:21 | 2014-06-01T16:31:21 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,170 | py | from yzero import Zero
import struct
from morda.settings import ZERO_CONNECT
def get_series(sername, from_, to, callback, zero=None):
if zero == None: zero = Zero(ZERO_CONNECT)
class Receiver(object):
def __init__(self, zero, from_, to, callback):
self.zero = zero
self.serdef = None
self.data = []
self.from_ = from_
self.to = to
self.fin_callback = callback
def on_got_serdef(self, serdef):
self.serdef = serdef
zero.readSeries(serdef, self.from_, self.to, self.on_got_data, self.on_end)
def on_got_data(self, dat):
self.data.extend(dat)
def on_end(self, suc):
deco = lambda x: struct.unpack('d' if self.serdef.recordsize == 8 else 'f', x)[0]
for i in range(0, len(self.data)):
ts, dat = self.data[i]
self.data[i] = (ts, deco(dat))
self.fin_callback(self.data)
rec = Receiver(zero, from_, to, callback)
zero.getDefinition(sername, rec.on_got_serdef)
| [
"piotr.maslanka@henrietta.com.pl"
] | piotr.maslanka@henrietta.com.pl |
2790e613abe27c5ba178b8e93d4e74818149712a | b3c93ef42b9ee529218f086cbf32535ac2e75f0b | /tests/test_version.py | c9560061ff1acad2224516f939120252a1cb0b54 | [
"MIT"
] | permissive | thumby/smartoptim | c34f85a9b83ee9194232e037ca2906b5db7fa221 | e65839dbb1fbcd985552a9a23e3a73e1cfc58d1a | refs/heads/master | 2021-01-10T02:38:03.011856 | 2015-11-23T16:35:16 | 2015-11-23T16:35:16 | 46,456,075 | 3 | 0 | null | null | null | null | UTF-8 | Python | false | false | 465 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# This file is part of smartoptim.
# https://github.com/thumby/smartoptim
# Licensed under the MIT license:
# http://www.opensource.org/licenses/MIT-license
# Copyright (c) 2015, Thumby <dev@thumby.io>
from preggy import expect
from smartoptim import __version__
from tests.base import TestCase
class VersionTestCase(TestCase):
def test_has_proper_version(self):
expect(__version__).to_equal('0.1.0')
| [
"heynemann@gmail.com"
] | heynemann@gmail.com |
58ee8fa2946ceeab6382b00f21d4c439fc798613 | b31ff20af39eb96f5c78a3e41d4a7727a32bc309 | /collection/list/examples/list/list_comprehension/exercise2.py | 16501aaabe64db47d5a4e04fa83ac2ab25aa876f | [] | no_license | abhi15sep/Python-Course | 42b74c2f3f016c960edcc091808066f7d1411054 | 482bd7fdb32df54d97d1e6dd76fc807bcab70e9a | refs/heads/master | 2020-04-27T20:28:25.448692 | 2019-08-04T07:00:12 | 2019-08-04T07:00:12 | 174,659,260 | 0 | 3 | null | null | null | null | UTF-8 | Python | false | false | 952 | py | #1. Given two lists [1,2,3,4] and [3,4,5,6], create a variable called answer, which is a new list that is the intersection of the two. Your output should be [3,4] . Hint: use the in operator to test whether an element is in a list. For example: 5 in [1,5,2] is True. 3 in [1,5,2] is False.
#2. Given a list of words ["Elie", "Tim", "Matt"] answer2, which is a new list with each word reversed and in lower case (use a slice to do the reversal!) Your output should be ['eile', 'mit', 'ttam']
#Using list comprehensions(the more Pythonic way):
answer = [val for val in [1,2,3,4] if val in [3,4,5,6]]
#the slice [::-1] is a quick way to reverse a string
answer2 = [val[::-1].lower() for val in ["Elie", "Tim", "Matt"]]
#Without list comprehensions, things are a bit longer:
answer = []
for x in [1,2,3,4]:
if x in [3,4,5,6]:
answer.append(x)
answer2 = []
for name in ["Elie", "Tim", "Matt"]:
answer2.append(name[::-1].lower())
| [
"abhaypratap3536@gmail.com"
] | abhaypratap3536@gmail.com |
cc920b16c95ac819b236fa84be0b4223fe58683a | 96602eeaa034e3e7b36df4ed10fba9bc9c9ed5c8 | /01-15/day08-2/文件操作.py | 57723e900a692f4a8c9c3817e4781677663b0e4e | [] | no_license | microease/Old-boy-Python-knight-project-1 | f4b12fe6f46bd159c6dc8151b1d28c6520042441 | dc32749e29cc63b44849d40af345d4bb7817d624 | refs/heads/master | 2020-09-20T18:00:34.821769 | 2019-12-11T14:47:44 | 2019-12-11T14:47:44 | 224,553,833 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 168 | py | # coding:utf-8
# File Name: 文件操作
# Description :
# Author : micro
# Date: 2019/12/3
f = open("./测试.txt", encoding="utf-8", mode="w")
| [
"microease@163.com"
] | microease@163.com |
2c4f87b94aa0d96dd697d8229a9c6e151d976104 | 7a09af404f29389504742a3d5f1727bfbe562750 | /TrekBot2_WS/build/tf2_eigen/catkin_generated/pkg.develspace.context.pc.py | f5c5a1a6d5db48c50693d36ed16e7a9aa2ab7b74 | [
"MIT"
] | permissive | Rafcin/TrekBot | 4baa2ed93b90920b36adba0b72384ac320d2de01 | d3dc63e6c16a040b16170f143556ef358018b7da | refs/heads/master | 2020-03-30T02:15:35.361254 | 2018-12-14T03:30:25 | 2018-12-14T03:30:25 | 150,622,252 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 565 | py | # generated from catkin/cmake/template/pkg.context.pc.in
CATKIN_PACKAGE_PREFIX = ""
PROJECT_PKG_CONFIG_INCLUDE_DIRS = "/xavier_ssd/TrekBot/TrekBot2_WS/src/geometry2/tf2_eigen/include;/usr/include/eigen3".split(';') if "/xavier_ssd/TrekBot/TrekBot2_WS/src/geometry2/tf2_eigen/include;/usr/include/eigen3" != "" else []
PROJECT_CATKIN_DEPENDS = "".replace(';', ' ')
PKG_CONFIG_LIBRARIES_WITH_PREFIX = "".split(';') if "" != "" else []
PROJECT_NAME = "tf2_eigen"
PROJECT_SPACE_DIR = "/xavier_ssd/TrekBot/TrekBot2_WS/devel/.private/tf2_eigen"
PROJECT_VERSION = "0.6.3"
| [
"Rafcin.s@gmail.com"
] | Rafcin.s@gmail.com |
1ec1f56bd18f8a82568356fe621e4593be8a368a | 2565970a2461fec97c0b0972eed161d9bd9e268f | /test_finetuning.py | 74bcbec628e6f36adb5ccd96b187b105430735d5 | [
"Apache-2.0",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | sskram/nn-toolbox | ee40e2d0f8792444a6d46bd477ffc69b144691a1 | b998d61800311d788bf3c4c5f517f1fd6d9c2e66 | refs/heads/master | 2020-07-01T02:55:22.318592 | 2019-07-21T06:46:07 | 2019-07-21T06:46:07 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,678 | py | import torchvision
from torch.nn import *
from torchvision.datasets import ImageFolder, CIFAR10
from torchvision.models import resnet18
from torchvision.transforms import *
from torch.optim import *
from torch.optim.lr_scheduler import CosineAnnealingLR
from nntoolbox.optim import AdamW
from torch.utils.data import random_split
# from adabound import AdaBound
from nntoolbox.vision.components import *
from nntoolbox.vision.learner import SupervisedImageLearner
from nntoolbox.utils import load_model, LRFinder, get_first_batch, get_device
from nntoolbox.callbacks import *
from nntoolbox.metrics import Accuracy, Loss
from nntoolbox.vision.transforms import Cutout
from nntoolbox.vision.models import ImageClassifier, EnsembleImageClassifier
from nntoolbox.losses import SmoothedCrossEntropy
from nntoolbox.init import lsuv_init
import math
torch.backends.cudnn.benchmark=True
pretrained_model = resnet18()
# print(modules)
from nntoolbox.utils import cut_model, get_trainable_parameters
feature, head = cut_model(pretrained_model)
for param in feature.parameters():
param.requires_grad = False
model = nn.Sequential(
feature,
FeedforwardBlock(
in_channels=512,
out_features=10,
pool_output_size=2,
hidden_layer_sizes=(256, 128)
)
)
# print(model._modules['0']._modules[str(0)])
from typing import List
def unfreeze(module: Sequential, optimizer: Optimizer, unfreeze_from: int, unfreeze_to: int):
"""
Unfreeze a model from ind
:param module:
:param optimizer
:param unfreeze_from:
:param unfreeze_to:
:return:
"""
for ind in range(len(module)):
submodule = module._modules[str(ind)]
if ind < unfreeze_from:
for param in submodule.parameters():
param.requires_grad = False
elif ind < unfreeze_to:
for param in submodule.parameters():
param.requires_grad = True
optimizer.add_param_group({'params': submodule.parameters()})
class GradualUnfreezing(Callback):
def __init__(self, freeze_inds: List[int], unfreeze_every: int):
self._freeze_inds = freeze_inds
self._unfreeze_every = unfreeze_every
# def on_train_begin(self):
# self._freeze_inds = [len(self.learner._model._modules['0'])] + self._freeze_inds
#
# for i in range(1, len(self._freeze_inds)):
# unfreeze_from = self._freeze_inds[i]
# unfreeze_to = self._freeze_inds[i - 1]
#
# unfreeze(self.learner._model._modules['0'], self.learner._optimizer, unfreeze_from, unfreeze_to)
# print("Unfreeze feature after " + str(unfreeze_from))
# for ind in range(len(self.learner._model._modules['0'])):
# for param in self.learner._model._modules['0']._modules[str(ind)].parameters():
# param.requires_grad = False
# print("Unfreeze feature after " + str(freeze_to))
def on_epoch_end(self, logs: Dict[str, Any]) -> bool:
if logs['epoch'] % self._unfreeze_every == 0 \
and logs['epoch'] > 0 \
and logs['epoch'] // self._unfreeze_every < len(self._freeze_inds):
unfreeze_from = self._freeze_inds[logs['epoch'] // self._unfreeze_every]
unfreeze_to = self._freeze_inds[logs['epoch'] // self._unfreeze_every - 1]
# for ind in range(len(self.learner._model._modules['0'])):
# module = self.learner._model._modules['0']._modules[str(ind)]
# if ind < unfreeze_from:
# for param in module.parameters():
# param.requires_grad = False
# else:
# for param in module.parameters():
# param.requires_grad = True
# self.learner._optimizer.add_param_group({'params': module.parameters()})
unfreeze(self.learner._model._modules['0'], self.learner._optimizer, unfreeze_from, unfreeze_to)
print("Unfreeze feature after " + str(unfreeze_from))
return False
unfreezer = GradualUnfreezing([6, 4, 2, 0], 10)
# data = CIFAR10('data/', train=True, download=True, transform=ToTensor())
# train_size = int(0.8 * len(data))
# val_size = len(data) - train_size
# train_dataset, val_dataset = torch.utils.data.random_split(data, [train_size, val_size])
# train_dataset.dataset.transform = Compose(
# [
# RandomHorizontalFlip(),
# RandomResizedCrop(size=32, scale=(0.95, 1.0)),
# # Cutout(length=16, n_holes=1),
# ToTensor()
# ]
# )
#
# test_dataset = torchvision.datasets.CIFAR10('data/', train=False, download=True, transform=ToTensor())
train_val_dataset = ImageFolder(
'data/imagenette-160/train',
transform=Compose([
Resize((128, 128)),
ToTensor()
])
)
test_dataset = ImageFolder(
'data/imagenette-160/val',
transform=Compose([
Resize((128, 128)),
ToTensor()
])
)
train_size = int(0.8 * len(train_val_dataset))
val_size = len(train_val_dataset) - train_size
train_dataset, val_dataset = random_split(train_val_dataset, [train_size, val_size])
train_dataset.dataset.transform = Compose(
[
RandomHorizontalFlip(),
RandomResizedCrop(size=(128, 128), scale=(0.95, 1.0)),
# Cutout(length=16, n_holes=1),
ToTensor()
]
)
train_loader = torch.utils.data.DataLoader(train_dataset, batch_size=128, shuffle=True)
val_loader = torch.utils.data.DataLoader(val_dataset, batch_size=128, shuffle=False)
test_loader = torch.utils.data.DataLoader(test_dataset, batch_size=128, shuffle=False)
# print(count_trainable_parameters(model)) # 14437816 3075928
optimizer = SGD(get_trainable_parameters(model), weight_decay=0.0001, lr=0.30, momentum=0.9)
learner = SupervisedImageLearner(
train_data=train_loader,
val_data=val_loader,
model=model,
criterion=SmoothedCrossEntropy().to(get_device()),
optimizer=optimizer,
mixup=True
)
# lr_finder = LRFinder(
# model=model,
# train_data=train_loader,
# criterion=SmoothedCrossEntropy(),
# optimizer=partial(SGD, lr=0.074, weight_decay=0.0001, momentum=0.9),
# device=get_device()
# )
# lr_finder.find_lr(warmup=100, callbacks=[ToDeviceCallback()])
swa = StochasticWeightAveraging(learner, average_after=5025, update_every=670)
callbacks = [
# ManifoldMixupCallback(learner=learner, modules=[layer_1, block_1]),
ToDeviceCallback(),
# MixedPrecisionV2(),
# InputProgressiveResizing(initial_size=80, max_size=160, upscale_every=10, upscale_factor=math.sqrt(2)),
# unfreezer,
Tensorboard(),
# ReduceLROnPlateauCB(optimizer, monitor='accuracy', mode='max', patience=10),
LRSchedulerCB(CosineAnnealingLR(optimizer, eta_min=0.10, T_max=335)),
swa,
LossLogger(),
ModelCheckpoint(learner=learner, filepath="weights/model.pt", monitor='accuracy', mode='max'),
]
metrics = {
"accuracy": Accuracy(),
"loss": Loss()
}
final = learner.learn(
n_epoch=500,
callbacks=callbacks,
metrics=metrics,
final_metric='accuracy'
)
print(final)
load_model(model=model, path="weights/model.pt")
classifier = ImageClassifier(model, tta_transform=Compose([
ToPILImage(),
RandomHorizontalFlip(),
RandomResizedCrop(size=(128, 128), scale=(0.95, 1.0)),
ToTensor()
]))
print(classifier.evaluate(test_loader))
print("Test SWA:")
model = swa.get_averaged_model()
classifier = ImageClassifier(model, tta_transform=Compose([
ToPILImage(),
RandomHorizontalFlip(),
RandomResizedCrop(size=(128, 128), scale=(0.95, 1.0)),
ToTensor()
]))
print(classifier.evaluate(test_loader)) | [
"nhatsmrt@uw.edu"
] | nhatsmrt@uw.edu |
c302bd0f7915622567d722cecc72a0fa8d7a454e | 7f57c12349eb4046c40c48acb35b0f0a51a344f6 | /2015/AddTwoNumbers_v2.py | d543f46cb1cd4d03c07c46e63c81d2e789227d84 | [] | no_license | everbird/leetcode-py | 0a1135952a93b93c02dcb9766a45e481337f1131 | b093920748012cddb77258b1900c6c177579bff8 | refs/heads/master | 2022-12-13T07:53:31.895212 | 2022-12-10T00:48:39 | 2022-12-10T00:48:39 | 11,116,752 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,212 | py | #!/usr/bin/env python
# encoding: utf-8
# Definition for singly-linked list.
class ListNode:
def __init__(self, x):
self.val = x
self.next = None
class Solution:
# @param {ListNode} l1
# @param {ListNode} l2
# @return {ListNode}
def addTwoNumbers(self, l1, l2):
if l1 is None:
return l2
if l2 is None:
return l1
lr = p = ListNode(0)
carry = 0
while l1 or l2 or carry:
a = l1.val if l1 else 0
b = l2.val if l2 else 0
r = a + b + carry
carry = r // 10
p.val = r % 10
l1 = l1.next if l1 else None
l2 = l2.next if l2 else None
if l1 or l2 or carry:
p.next = ListNode(0)
p = p.next
return lr
def print_list(list_head):
print_l(list_head)
print '\n'
def print_l(list_head):
if list_head:
print list_head.val,
print_l(list_head.next)
if __name__ == '__main__':
l1a = ListNode(5)
l1 = l1a
l2a = ListNode(5)
l2 = l2a
s = Solution()
lr = s.addTwoNumbers(l1, l2)
print_list(l1)
print_list(l2)
print_list(lr)
| [
"stephen.zhuang@gmail.com"
] | stephen.zhuang@gmail.com |
91a8cc0846d15cb77f1dac39e86a83ba81da4c66 | 9b162310e5db0f714dbd6019894eb5b04192b6aa | /src/windows-gam.spec | 658f6450028749ee0c70daae3de86bc5c86028b0 | [
"LicenseRef-scancode-warranty-disclaimer",
"Apache-2.0",
"LicenseRef-scancode-rsa-md4",
"HPND-sell-variant",
"LicenseRef-scancode-zeusbench",
"NTP",
"metamail",
"Beerware",
"LicenseRef-scancode-rsa-1990",
"RSA-MD",
"Spencer-94",
"LicenseRef-scancode-other-permissive",
"MIT"
] | permissive | xbl3/GAMADV-XTD | c1d68911f4116157173838856f49151e05cd5658 | a09efb7a10074dc052968ef82c1044f2a0b664b3 | refs/heads/master | 2022-04-08T22:30:39.715172 | 2020-02-22T18:05:00 | 2020-02-22T18:05:00 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 849 | spec | # -*- mode: python -*-
ssl_json_files = [
('cacerts.pem', '.'),
('cros-aue-dates.json', '.'),
('cloudprint-v2.json', '.'),
('contacts-v3.json', '.'),
('email-audit-v1.json', '.'),
('email-settings-v2.json', '.'),
('sites-v1.json', '.')
]
a = Analysis(['gam.py'],
pathex=['C:\\GAMADV-XTD'],
datas=ssl_json_files,
hiddenimports=[],
hookspath=None,
excludes=['_tkinter'],
runtime_hooks=None)
for d in a.datas:
if 'pyconfig' in d[0]:
a.datas.remove(d)
break
pyz = PYZ(a.pure)
exe = EXE(pyz,
a.scripts,
a.binaries,
a.zipfiles,
a.datas,
name='gam.exe',
debug=False,
strip=None,
upx=True,
console=True )
| [
"ross.scroggs@gmail.com"
] | ross.scroggs@gmail.com |
80ceed4ac066ee786c77744bdc31d0acfd1fd6e0 | ca28f1535bb9a4b6504d5f6a5c5abf1a4569037f | /pos_umbrella/pos_umbrella/report/eod_report/eod_report.py | fc9a6b62af7844ad6a75cb96ea3541872cb50a0a | [
"MIT"
] | permissive | worldkingpradeep/pos_umbrella | 8b8f83cb7d638f15a1808e779656e250549c5e26 | 6fa7a51a9c019b533befcf85955fdd5e165c6a5c | refs/heads/master | 2023-04-20T06:30:37.054666 | 2021-05-14T17:19:59 | 2021-05-14T17:19:59 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,285 | py | # Copyright (c) 2013, jan and contributors
# For license information, please see license.txt
from __future__ import unicode_literals
import frappe
def execute(filters=None):
columns, data = [], []
from_date = filters.get("from_date")
to_date = filters.get("to_date")
pos_profile = filters.get("pos_profile")
print(filters.get("with_details"))
with_details = filters.get("with_details")
if from_date > to_date:
frappe.throw("From Date should be before To Date")
else:
columns.append({"fieldname": "store_name", "label": "Store Name", "fieldtype": "Data", "width": 150})
if with_details:
columns.append({"fieldname": "invoice_number", "label": "Invoice Number", "fieldtype": "Link", "options": "Sales Invoice", "width": 150})
columns.append({"fieldname": "item_code", "label": "Item_code", "fieldtype": "Data", "width": 120})
columns.append({"fieldname": "item_name", "label": "Item Name", "fieldtype": "Data", "width": 230})
columns.append({"fieldname": "quantity", "label": "Quantity", "fieldtype": "Data", "width": 100})
columns.append({"fieldname": "rate", "label": "Rate", "fieldtype": "Data", "width": 100})
columns.append({"fieldname": "amount", "label": "Amount", "fieldtype": "Data", "width": 100})
columns.append({"fieldname": "discount", "label": "Discount", "fieldtype": "Data", "width": 100})
columns.append({"fieldname": "write_off", "label": "Write Off", "fieldtype": "Data", "width": 100})
columns.append({"fieldname": "loyalty", "label": "Loyalty", "fieldtype": "Data", "width": 100})
columns.append({"fieldname": "net_sale", "label": "Net Sale", "fieldtype": "Data", "width": 100})
columns.append({"fieldname": "vat", "label": "VAT", "fieldtype": "Data", "width": 100})
columns.append({"fieldname": "gross_sale", "label": "Gross Sale", "fieldtype": "Data", "width": 100})
condition = ""
if pos_profile:
condition += " and pos_profile='{0}' ".format(pos_profile)
if with_details:
condition += " and is_pos=1"
condition += " ORDER By pos_profile ASC"
query = """ SELECT * FROM `tabSales Invoice`
WHERE docstatus=1 and posting_date BETWEEN '{0}' and '{1}' {2}""".format(from_date, to_date,condition)
print(query)
sales_invoices = frappe.db.sql(query, as_dict=True)
for idx,i in enumerate(sales_invoices):
if not with_details:
obj = {
"invoice_number": i.name,
"store_name": i.pos_profile,
"discount": i.discount_amount,
"write_off": i.write_off_amount,
"loyalty": i.loyalty_amount,
"net_sale": i.total,
"gross_sale": i.grand_total,
"vat": i.total_taxes_and_charges,
}
mode_of_payments = frappe.db.sql(""" SELECT * FROM `tabSales Invoice Payment` WHERE parent=%s """,i.name,as_dict=True)
for ii in mode_of_payments:
check_mop(columns,ii)
obj[ii.mode_of_payment] = ii.amount
data.append(obj)
else:
obj = {}
obj["invoice_number"] = i.name
obj["store_name"] = i.pos_profile
invoice_items = frappe.db.sql(""" SELECT * FROM `tabSales Invoice Item` WHERE parent=%s""", i.name, as_dict=1)
for idxx,x in enumerate(invoice_items):
if idxx == 0:
obj["item_code"] = x.item_code
obj["item_name"] = x.item_name
obj["quantity"] = x.qty
obj["rate"] = x.rate
obj["amount"] = x.amount
obj["discount"] = i.discount_amount
obj["write_off"] = i.write_off_amount
obj["loyalty"] = i.loyalty_amount
obj["net_sale"] = i.total
obj["gross_sale"] = i.grand_total
obj["vat"] = i.total_taxes_and_charges
mode_of_payments = frappe.db.sql(""" SELECT * FROM `tabSales Invoice Payment` WHERE parent=%s """,
i.name, as_dict=True)
for ii in mode_of_payments:
check_mop(columns, ii)
obj[ii.mode_of_payment] = ii.amount
else:
obj = {}
obj["item_code"] = x.item_code
obj["item_name"] = x.item_name
obj["quantity"] = x.qty
obj["rate"] = x.rate
obj["amount"] = x.amount
data.append(obj)
return columns, data
def check_mop(columns, ii):
add = True
for i in columns:
if i.get("label") == ii.mode_of_payment:
add = False
if add:
columns.append({
"fieldname": ii.mode_of_payment,
"label": ii.mode_of_payment,
"fieldtype": "Data",
"width": 150
}) | [
"jangeles@bai.ph"
] | jangeles@bai.ph |
fabdcb016bb2945ce5a4420e58c20a8cc2070765 | 46afba4407a98ac564ed7a2e08aebfcec4fa1ba3 | /Project Euler/problem_20.py | 83947b08ce6336236edc3cd968c9bdea337af690 | [] | no_license | areebbeigh/CompetitiveProgramming | b28ffe99ac15cadfa3b54f9974beb77c280b2309 | 04044674ad0663181326649d0c14da94108e90da | refs/heads/master | 2021-07-15T07:48:42.338241 | 2021-07-13T10:36:11 | 2021-07-13T10:36:11 | 199,145,494 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 166 | py | #!/usr/bin/python3.6
def factorial(n):
if n == 0:
return 1
return n * factorial(n - 1)
print(
sum(map(lambda x: int(x), str(factorial(100))))
)
| [
"areebbeigh@gmail.com"
] | areebbeigh@gmail.com |
ba292c32cf83dce0c1b1f6d90d73548a523ad68b | 98e4dc41e3d994dfb55a2553c79d1b61590ecca6 | /LeetCode/Medium/Subarray Sum Equals K/sol.py | aa9bc85d1765a4c74100a8bfe8caf9119a4376d8 | [] | no_license | krohak/Project_Euler | b753c4f3bbf26a5eff3203e27482599d1e089fc6 | 1d8a2326543d69457f1971af9435b3e93ab32f52 | refs/heads/master | 2022-09-02T10:48:59.472111 | 2022-08-18T11:11:16 | 2022-08-18T11:11:16 | 111,204,162 | 4 | 1 | null | null | null | null | UTF-8 | Python | false | false | 517 | py | class Solution:
def subarraySum(self, nums, target_sum):
cumulative_sum = {0:1}
counter = 0
summ = 0
for num in nums:
summ+=num
if (summ-target_sum) in cumulative_sum:
counter+=cumulative_sum[(summ-target_sum)]
cumulative_sum[summ] = cumulative_sum.get(summ, 0)+1
return counter
nums = [1,1,1,1,2,2,1,1]
sol = Solution().subarraySum(nums, 2)
print(sol) | [
"rohaksinghal14@gmail.com"
] | rohaksinghal14@gmail.com |
4eff3ee86176474b0f5ada0af11864b69625c3c0 | ec551303265c269bf1855fe1a30fdffe9bc894b6 | /topic10_queue/T933_RecentCounter/interview.py | 8bd0e8689824bc5d8875b7a6fa5e0244cd77e011 | [] | no_license | GongFuXiong/leetcode | 27dbda7a5ced630ae2ae65e19d418ebbc65ae167 | f831fd9603592ae5bee3679924f962a3ebce381c | refs/heads/master | 2023-06-25T01:05:45.683510 | 2021-07-26T10:05:25 | 2021-07-26T10:05:25 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,331 | py | '''
933. 最近的请求次数
写一个 RecentCounter 类来计算最近的请求。
它只有一个方法:ping(int t),其中 t 代表以毫秒为单位的某个时间。
返回从 3000 毫秒前到现在的 ping 数。
任何处于 [t - 3000, t] 时间范围之内的 ping 都将会被计算在内,包括当前(指 t 时刻)的 ping。
保证每次对 ping 的调用都使用比之前更大的 t 值。
示例:
输入:inputs = ["RecentCounter","ping","ping","ping","ping"], inputs = [[],[1],[100],[3001],[3002]]
输出:[null,1,2,3,3]
提示:
每个测试用例最多调用 10000 次 ping。
每个测试用例会使用严格递增的 t 值来调用 ping。
每次调用 ping 都有 1 <= t <= 10^9。
'''
import collections
class RecentCounter:
def __init__(self):
self.deque = collections.deque()
def ping(self, t):
self.deque.append(t)
while self.deque[0] < t-3000:
self.deque.popleft()
return len(self.deque)
if __name__ == "__main__":
solution = RecentCounter()
while 1:
str1 = input()
if str1 != "":
num1 = int(str1)
res = solution.ping(num1)
print(res)
else:
break
| [
"958747457@qq.com"
] | 958747457@qq.com |
bfcc4f82ae5fd44b4414bb887094046c13bb3e10 | c0fad90611a6e943277c3d79eeb48ccd5f0d0a88 | /29divide.py | 6cee6413834a3e4bbc05b6458fb1114fdad5b765 | [] | no_license | lmb633/leetcode | e2da31984af07b9e16787f4d57f82dab2dcb551a | d91568d245dd8fb66f46ff73737cbad974f490a6 | refs/heads/master | 2021-07-19T16:07:40.864854 | 2021-02-24T10:57:40 | 2021-02-24T10:57:40 | 243,146,182 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 572 | py | class Solution(object):
def divide(self, dividend, divisor):
if dividend == 0:
return 0
if dividend == -2 ** 31 and divisor == -1:
return 2 ** 31 - 1
flag = 1
if dividend ^ divisor < 0:
flag = -1
dividend = abs(dividend)
divisor = abs(divisor)
result = 0
for i in range(31, -1, -1):
if (dividend >> i) >= divisor:
result += (1 << i)
dividend -= divisor << i
return result if flag > 0 else -result
| [
"limingbo@focusmedia.cn"
] | limingbo@focusmedia.cn |
315e0a44b6c237ed7a6e9ed6807d3222de0857a3 | 7837cd1bee1a9abd623600cf30c2f462da48d558 | /aaa.py | 1123a79ae464804cd597a4e45e9d9c4e5f526712 | [] | no_license | hal1932/astor_test | 8285b3b8c1fa187b7cd3c8d147c8a75d8e4ba207 | e14c7de55bb6e947e41387d33fff5286bbea4570 | refs/heads/master | 2021-08-30T09:37:33.083995 | 2017-12-17T08:36:12 | 2017-12-17T08:36:12 | 114,521,531 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 734 | py | # encoding: utf-8
import functools
def deco1(func):
@functools.wraps(func)
def wrapper(*args, **kwargs):
print 'deco1 start'
func(*args, **kwargs)
print 'deco1 end'
return wrapper
def deco2(*arg, **kwarg):
def decorator(func):
@functools.wraps(func)
def wrapper(*args, **kwargs):
print 'deco2 start'
func(*args, **kwargs)
print 'deco2 end'
return wrapper
return decorator
def func1(arg1):
print arg1
x = 1
print x
@deco1
def func2(arg):
print arg
@deco2('hoge', 1, a=2.0)
def func3(arg):
print arg
def main():
func1('aaa')
func2('bbb')
func3('ccc')
if __name__ == '__main__':
main()
| [
"yu.arai.19@gmail.com"
] | yu.arai.19@gmail.com |
760c46b1f182472c11a3cb0026781c521fddb142 | a943cb6da95ec1e06cb480887ba1062a5783527f | /2012-aqs/figures/plot-smh-norris.py | 262432f7ea056293f457ca60b89fe0a54119ed39 | [] | no_license | andycasey/papers | 1b2c882c20b0c65b5899d70dc95825ec53cc9fe2 | 3d585ad4b6b1c3b40227185fd7b22ea9bdeb8e02 | refs/heads/master | 2021-01-19T17:24:48.788580 | 2013-08-13T08:51:02 | 2013-08-13T08:51:02 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,155 | py | import numpy as np
import matplotlib.pyplot as plt
import matplotlib.gridspec as gridspec
wavelength, smh_ew, norris_ew = np.loadtxt('SMH-Norris-comparison.data', usecols=(0, 1, 2, ), unpack=True)
fig = plt.figure(figsize=(6,7))
fig.subplots_adjust(hspace=0.0, wspace=0.0)
gs = gridspec.GridSpec(2, 1, height_ratios=[1, 2])
ax1 = fig.add_subplot(gs[0])
#ax1 = plt.subplot2grid((3, 1), (0, 0))
ax1.scatter(smh_ew, smh_ew - norris_ew, facecolor='none', edgecolor='k', marker='+')
ax1.plot([0, 200], [0, 0], 'k-', zorder=-1)
A = np.vstack([smh_ew, np.ones(len(norris_ew))]).T
m, c = np.linalg.lstsq(A, smh_ew - norris_ew)[0]
x = np.array([np.min(smh_ew), np.max(smh_ew)])
ax1.plot(x, m * x + c, 'k:')
ylim = np.max(np.abs(np.array(ax1.get_ylim())))
ax1.set_ylim(-15, 15)
ax1.xaxis.set_visible(False)
ax1.set_ylabel('$\Delta{}W_\lambda$ [m$\AA{}$]')
ax2 = fig.add_subplot(gs[1], sharex=ax1)
#ax2 = plt.subplot2grid((3, 1), (1, 0), rowspan=2)
ax2.scatter(smh_ew, norris_ew, facecolor='none', edgecolor='k', marker='+')
A = np.vstack([norris_ew, np.ones(len(norris_ew))]).T
m, c = np.linalg.lstsq(A, smh_ew)[0]
x = np.array([0, 200])
ax2.plot(x, x, 'k-', zorder=-1)
x = np.array([np.min(smh_ew), np.max(smh_ew)])
ax2.plot(x, m * x + c, 'k:')
# Plot an error cone
error = 10 # percent
bounds = np.array([0, 160])
#ax2.plot(bounds, bounds * (1 + error/100.), '-', c='#aaaaaa', zorder=-5)
#ax2.plot(bounds, bounds * (1 - error/100.), '-', c='#aaaaaa', zorder=-5)
ax1.set_xlim(bounds)
ax2.set_xlim(bounds)
ax2.set_ylim(bounds)
ax2.set_xlabel('$W_\lambda$ (This work, automatic) [m$\AA{}$]')
ax2.set_ylabel('$W_\lambda$ (Norris et al. 1996) [m$\AA{}$]')
ax2.get_yticklabels()[-1].set_visible(False)
ax1.get_yticklabels()[0].set_visible(False)
ax1.get_yticklabels()[-1].set_visible(False)
ax1.text(5, 10, '$\langle{}\Delta{}W_\lambda\\rangle{}\,=\,-0.64\,\pm\,2.78\,$m${\AA}$', color='k', verticalalignment='center')
ax2.text(5, 150, "$a_0\,=\,%1.2f$\n$a_1\,=\,%1.2f$\n$N\,=\,%i$" % (c, m, len(smh_ew)), verticalalignment='top')
#ax1.set_title('%i lines in HD 140283' % (len(smh_ew), ))
plt.savefig('smh-norris.pdf')
plt.savefig('smh-norris.eps')
| [
"andycasey@gmail.com"
] | andycasey@gmail.com |
6298875d8e11878aa23517f122c8a75e9d106d46 | 38fff7bdefd8d62a740d51329b50d0e1e49258bb | /projects/smart_open/fuzz_zip.py | 3a7f08c09ad89f312bad1a8251882d276259b866 | [
"Apache-2.0"
] | permissive | google/oss-fuzz | 026384c2ada61ef68b147548e830f60730c5e738 | f0275421f84b8f80ee767fb9230134ac97cb687b | refs/heads/master | 2023-08-31T23:30:28.157702 | 2023-08-31T21:49:30 | 2023-08-31T21:49:30 | 63,809,205 | 9,438 | 2,315 | Apache-2.0 | 2023-09-14T20:32:19 | 2016-07-20T19:39:50 | Shell | UTF-8 | Python | false | false | 1,605 | py | #!/usr/bin/python3
# Copyright 2022 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import atheris
import sys
import os
with atheris.instrument_imports():
from smart_open import open
import zipfile
import tempfile
def TestInput(data):
if len(data) < 10:
return
fdp = atheris.FuzzedDataProvider(data)
tmp = tempfile.NamedTemporaryFile(prefix=fdp.ConsumeString(10), suffix=fdp.ConsumeString(4), delete=False)
filestr = fdp.ConsumeString(100)
with open(tmp.name, 'wb') as f:
with zipfile.ZipFile(f, 'w') as zip:
zip.writestr(fdp.ConsumeString(10), filestr)
zip.writestr(fdp.ConsumeString(10), filestr)
with open(tmp.name, 'rb') as f:
with zipfile.ZipFile(f) as zip:
for info in zip.infolist():
file_bytes = zip.read(info.filename)
assert filestr == file_bytes.decode('utf-8')
os.unlink(tmp.name)
def main():
atheris.Setup(sys.argv, TestInput, enable_python_coverage=True)
atheris.instrument_all()
atheris.Fuzz()
if __name__ == "__main__":
main()
| [
"noreply@github.com"
] | google.noreply@github.com |
66b1007d1dabe0428cbe0ba4c2f82d9ad8aa4dec | cb20ef5b4048457a2e6dca4a4cb45c53c9843744 | /tests/RESTful/testcases/system/test01_usermanager.py | c8459e97ec6e5c06d18b505403753911f74efb0c | [] | no_license | rudecs/openvcloud | 5001b77e8d943427c1bed563f3dcc6b9467936e2 | 12ccce2a54034f5bf5842e000c2cc3d7e22836d8 | refs/heads/master | 2020-03-24T00:00:10.422677 | 2018-11-22T13:41:17 | 2018-11-22T13:41:17 | 142,267,808 | 2 | 1 | null | 2018-07-25T08:02:37 | 2018-07-25T08:02:36 | null | UTF-8 | Python | false | false | 4,341 | py | import time, random, unittest
from testcases import *
from nose_parameterized import parameterized
class UsersTests(TestcasesBase):
def setUp(self):
super().setUp()
self.data, self.response = self.api.system.usermanager.create(provider=None)
self.assertEqual(self.response.status_code, 200, self.response.content)
self.username = self.data['username']
self.CLEANUP['users'].append(self.username)
@parameterized.expand([('exists', 200, 'true'), ('non-exist', 404, 'false')])
def test01_userget_userexists(self, case, response_code, userexists):
""" OVC-001
#. Create user (U1), should succeed.
#. Get user (U1), should succeed.
#. Check if user (U1) exists, should return true.
#. Get not existing user, should fail.
#. Check if non-existing user exists, should return false.
"""
if case == 'exists':
username = self.username
else:
username = self.utils.random_string()
response = self.api.system.usermanager.userget(name=username)
self.assertEqual(response.status_code, response_code, response.content)
response = self.api.system.usermanager.userexists(name=username)
self.assertEqual(response.status_code, 200, response.content)
self.assertEqual(response.text, userexists)
@parameterized.expand([('exists', 200), ('non-exist', 404)])
def test02_edit_user(self, case, response_code):
""" OVC-002
#. Create user (U1), should succeed.
#. Edit user (U1), should succeed.
#. Edit non-existing user, should fail.
"""
if case == 'exists':
username = self.username
else:
username = self.utils.random_string()
data, response = self.api.system.usermanager.editUser(username=username)
self.assertEqual(response.status_code, response_code, response.content)
@parameterized.expand([('exists', 200), ('non-exist', 404)])
def test03_delete_user(self, case, response_code):
""" OVC-003
#. Create user (U1), should succeed.
#. Delete user (U1), should succeed.
#. Delete none existing user, should fail.
"""
if case == 'exists':
username = self.username
else:
username = self.utils.random_string()
response = self.api.system.usermanager.delete(username=username)
self.assertEqual(response.status_code, response_code, response.content)
response = self.api.system.usermanager.userexists(name=username)
self.assertEqual(response.status_code, 200, response.content)
self.assertEqual(response.text, 'false')
class GroupsTests(TestcasesBase):
def setUp(self):
super().setUp()
self.data, self.response = self.api.system.usermanager.createGroup()
self.assertEqual(self.response.status_code, 200, self.response.content)
self.name = self.data['name']
def tearDown(self):
self.api.system.usermanager.deleteGroup(id=self.name)
super().tearDown()
@parameterized.expand([('exists', 200), ('non-exist', 404)])
def test01_edit_group(self, case, response_code):
""" OVC-001
#. Create group (G1), should succeed.
#. Edit group (G1), should succeed.
#. Edit non-existing group, should fail.
"""
if case == 'exists':
name = self.name
else:
name = self.utils.random_string()
data, response = self.api.system.usermanager.editGroup(name=name)
self.assertEqual(response.status_code, response_code, response.content)
@parameterized.expand([('exists', 200), ('non-exist', 404)])
@unittest.skip('https://github.com/0-complexity/openvcloud/issues/1367')
def test02_delete_group(self, case, response_code):
""" OVC-002
#. Create group (G1), should succeed.
#. Delete group (G1), should succeed.
#. Delete non-existing group, should fail.
"""
if case == 'exists':
name = self.name
else:
name = self.utils.random_string()
response = self.api.system.usermanager.deleteGroup(id=name)
self.assertEqual(response.status_code, response_code, response.content) | [
"deboeck.jo@gmail.com"
] | deboeck.jo@gmail.com |
0a58a94a0291c9eee74ec90033a491790733ec6e | 55e28e35db5bf6a844df3fb47080500b115a893e | /day10/select/select_server.py | 009bb5773e7561fb2d56689d463ea451aefcc9ee | [] | no_license | pylarva/Python | 5743ffa4a69db42b642d51b62f9e9b69ddbc1a72 | 71b484950e6dbdcf708726a68a3386d0d6ddc07f | refs/heads/master | 2020-04-19T09:11:11.195393 | 2017-11-16T07:32:59 | 2017-11-16T07:32:59 | 67,507,687 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,258 | py | # !/usr/bin/env python
# -*- coding:utf-8 -*-
# Author:pylarva
# bolg:www.lichengbing.com
__author__ = 'Alex Li'
import select
import socket
import sys
import queue
server = socket.socket()
server.setblocking(0)
server_addr = ('localhost', 10000)
print('starting up on %s port %s' % server_addr)
server.bind(server_addr)
server.listen(5)
inputs = [server, ] #自己也要监测呀,因为server本身也是个fd
outputs = []
message_queues = {}
while True:
print("waiting for next event...")
readable, writeable, exeptional = select.select(inputs,outputs,inputs) #如果没有任何fd就绪,那程序就会一直阻塞在这里
for s in readable: #每个s就是一个socket
if s is server: #别忘记,上面我们server自己也当做一个fd放在了inputs列表里,传给了select,如果这个s是server,代表server这个fd就绪了,
#就是有活动了, 什么情况下它才有活动? 当然 是有新连接进来的时候 呀
#新连接进来了,接受这个连接
conn, client_addr = s.accept()
print("new connection from",client_addr)
conn.setblocking(0)
inputs.append(conn) #为了不阻塞整个程序,我们不会立刻在这里开始接收客户端发来的数据, 把它放到inputs里, 下一次loop时,这个新连接
#就会被交给select去监听,如果这个连接的客户端发来了数据 ,那这个连接的fd在server端就会变成就续的,select就会把这个连接返回,返回到
#readable 列表里,然后你就可以loop readable列表,取出这个连接,开始接收数据了, 下面就是这么干 的
message_queues[conn] = queue.Queue() #接收到客户端的数据后,不立刻返回 ,暂存在队列里,以后发送
else: #s不是server的话,那就只能是一个 与客户端建立的连接的fd了
#客户端的数据过来了,在这接收
data = s.recv(1024)
if data:
print("收到来自[%s]的数据:" % s.getpeername()[0], data)
message_queues[s].put(data) #收到的数据先放到queue里,一会返回给客户端
if s not in outputs:
outputs.append(s) #为了不影响处理与其它客户端的连接 , 这里不立刻返回数据给客户端
else:#如果收不到data代表什么呢? 代表客户端断开了呀
print("客户端断开了",s)
if s in outputs:
outputs.remove(s) #清理已断开的连接
inputs.remove(s) #清理已断开的连接
del message_queues[s] ##清理已断开的连接
for s in writeable:
try :
next_msg = message_queues[s].get_nowait()
except queue.Empty:
print("client [%s]" %s.getpeername()[0], "queue is empty..")
outputs.remove(s)
else:
print("sending msg to [%s]"%s.getpeername()[0], next_msg)
s.send(next_msg.upper())
for s in exeptional:
print("handling exception for ", s.getpeername())
inputs.remove(s)
if s in outputs:
outputs.remove(s)
s.close()
del message_queues[s] | [
"1326126359@qq.com"
] | 1326126359@qq.com |
6d71558f72f56b692f826f2c54b03347759f5030 | 66b220a4c8c0bfde435f29e3a18cf79f6e7a4c67 | /src/exemplos/01_Dados/02_Operadores/01-subtracao.py | 77f2c292956d5c74e2524a563e94f8fc4d5a83cb | [] | no_license | gnramos/CIC-APC | 089b6d0110394b4db97c23e032394eaefce0aeef | b94fe2dc4840064f1613d24e5d1447d49b9bb8bd | refs/heads/master | 2023-04-15T18:11:27.919896 | 2023-04-05T21:31:03 | 2023-04-05T21:31:03 | 31,514,265 | 42 | 30 | null | 2018-11-20T18:09:10 | 2015-03-01T22:57:39 | C | UTF-8 | Python | false | false | 964 | py | # -*- coding: utf-8 -*-
# @package: 01-subtracao.py
# @author: Guilherme N. Ramos (gnramos@unb.br)
# @disciplina: Algoritmos e Programação de Computadores
#
# Exemplos de utilização do operador de subtração. Em Python, só é possível
# subtrair valores numéricos.
print('Subtração (numéricos):')
# Escreva o resultado da operação 2 - 1. A subtração de valores inteiros também
# é um valor inteiro.
print(' 2 - 1 =', 2 - 1)
# Escreva o resultado da operação 1 - 2.
print(' 1 - 2 =', 1 - 2)
# Escreva o resultado da operação 2 - 1.0. A subtração de valores reais de
# inteiros é um valor real.
print(' 2 - 1.0 =', 2 - 1.0)
# Escreva o resultado da operação 2.0 - 1. A subtração de valores inteiros de
# reais é um valor real.
print(' 2.0 - 1 =', 2.0 - 1)
# Escreva o resultado da operação 2.0 - 1.0. A subtração de valores reais
# também é um valor real.
print(' 2.0 - 1.0 =', 2.0 - 1.0) | [
"ramos@gnramos.com"
] | ramos@gnramos.com |
2810d657e2aa3272c2d799f7b7ea8f265d83dd92 | 321afe9ca4a30ff655483901bdb6368cce1bd58b | /catalog/migrations/0019_biditems_time.py | f8acacc12ce34e72ef8a1a024598b0d27ff127b5 | [] | no_license | moses-mugoya/Auction-System | 75456a475a0a76a9c7143f2f039e059f841d204f | 42de3e68fd7a99bdb0598f820b5f8ae6359e972d | refs/heads/main | 2023-02-04T22:58:22.793934 | 2020-12-24T18:05:51 | 2020-12-24T18:05:51 | 324,211,000 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 387 | py | # Generated by Django 2.1.4 on 2019-04-07 17:28
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('catalog', '0018_auto_20190407_1343'),
]
operations = [
migrations.AddField(
model_name='biditems',
name='time',
field=models.BooleanField(default=False),
),
]
| [
"mosesmugoya31@gmail.com"
] | mosesmugoya31@gmail.com |
efbe5cae3f768724158b26af2d52232b3009deaf | ca7aa979e7059467e158830b76673f5b77a0f5a3 | /Python_codes/p02889/s800108978.py | f16d83d80d9585a9e51d77414e46a2135a05fdac | [] | no_license | Aasthaengg/IBMdataset | 7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901 | f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8 | refs/heads/main | 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,377 | py | import sys
import math
import heapq
sys.setrecursionlimit(10**7)
INTMAX = 9323372036854775807
INTMIN = -9223372036854775808
DVSR = 1000000007
def POW(x, y): return pow(x, y, DVSR)
def INV(x, m=DVSR): return pow(x, m - 2, m)
def DIV(x, y, m=DVSR): return (x * INV(y, m)) % m
def LI(): return [int(x) for x in sys.stdin.readline().split()]
def LF(): return [float(x) for x in sys.stdin.readline().split()]
def LS(): return sys.stdin.readline().split()
def II(): return int(sys.stdin.readline())
def FLIST(n):
res = [1]
for i in range(1, n+1): res.append(res[i-1]*i%DVSR)
return res
N,M,L=LI()
LG=10**15
DIST=[[LG for _ in range(N+1)] for _ in range(N+1)]
for i in range(M):
a,b,c = LI()
if c <= L:
DIST[a][b] = c
DIST[b][a] = c
for k in range(1, N+1):
for i in range(1, N+1):
for j in range(1, N+1):
if DIST[i][j] > DIST[i][k] + DIST[k][j]:
DIST[i][j] = DIST[i][k] + DIST[k][j]
for i in range(1, N+1):
for j in range(1, N+1):
DIST[i][j] = 1 if DIST[i][j] <= L else LG
for k in range(1, N+1):
for i in range(1, N+1):
for j in range(1, N+1):
if DIST[i][j] > DIST[i][k] + DIST[k][j]:
DIST[i][j] = DIST[i][k] + DIST[k][j]
for i in range(II()):
st, en = LI()
if DIST[st][en] >= LG:
print(-1)
else:
print(DIST[st][en] - 1)
| [
"66529651+Aastha2104@users.noreply.github.com"
] | 66529651+Aastha2104@users.noreply.github.com |
403050852dd2e8392f1e8610f4911bf3608ab119 | 9ee751382146d280c0105981e2e54fa900cb04de | /djblets/util/tests/test_compressed_tags.py | 1d1d87775890230056755ea9767bb66c60caefae | [] | no_license | lmyfzx/djblets | 25c3d3fb2478047eede05238b60b6d16598f9131 | 33b4475cfabe24644335093a028d7d2aabc4ab84 | refs/heads/master | 2023-02-03T18:20:46.873799 | 2020-12-22T10:58:35 | 2020-12-22T10:58:35 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,966 | py | """Unit tests for djblets.util.templatetags.djblets_forms."""
from __future__ import unicode_literals
import os
from django.conf import settings
from django.template import Context, Template
from pipeline.conf import settings as pipeline_settings
from djblets.testing.testcases import TestCase
class CompressedTagsTests(TestCase):
"""Unit tests for the {% compressed_* %} template tags."""
def test_compressed_css_tag(self):
"""Testing {% compressed_css %}"""
self._touch_files(['test.css', 'test.d41d8cd98f00.css'])
pipeline_settings.STYLESHEETS = {
'test': {
'source_filenames': [],
'output_filename': 'test.css',
}
}
t = Template('{% load compressed %}'
'{% compressed_css "test" %}')
self.assertHTMLEqual(
t.render(Context({'test': 'test'})),
'<link href="/test.d41d8cd98f00.css" rel="stylesheet"'
' type="text/css" />')
def test_compressed_js_tag(self):
"""Testing {% compressed_js %}"""
self._touch_files(['test.js', 'test.d41d8cd98f00.js'])
pipeline_settings.JAVASCRIPT = {
'test': {
'source_filenames': [],
'output_filename': 'test.js',
}
}
t = Template('{% load compressed %}'
'{% compressed_js "test" %}')
self.assertHTMLEqual(
t.render(Context({'test': 'test'})),
'<script type="text/javascript" src="/test.d41d8cd98f00.js"'
' charset="utf-8"></script>')
def _touch_files(self, filenames):
"""Create one or more empty static media files.
Args:
filenames (list of unicode):
The list of static media files to create.
"""
for filename in filenames:
with open(os.path.join(settings.STATIC_ROOT, filename), 'w'):
pass
| [
"christian@beanbaginc.com"
] | christian@beanbaginc.com |
edf6ec9094282214c247789c19af30388e1fb891 | cf5b2850dc9794eb0fc11826da4fd3ea6c22e9b1 | /xlsxwriter/test/styles/test_styles06.py | ecba383c9b2848fa80c25090f9d3c53d0e528278 | [
"BSD-2-Clause"
] | permissive | glasah/XlsxWriter | bcf74b43b9c114e45e1a3dd679b5ab49ee20a0ec | 1e8aaeb03000dc2f294ccb89b33806ac40dabc13 | refs/heads/main | 2023-09-05T03:03:53.857387 | 2021-11-01T07:35:46 | 2021-11-01T07:35:46 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,183 | py | ###############################################################################
#
# Tests for XlsxWriter.
#
# SPDX-License-Identifier: BSD-2-Clause
# Copyright (c), 2013-2021, John McNamara, jmcnamara@cpan.org
#
import unittest
from io import StringIO
from ..helperfunctions import _xml_to_list
from ...styles import Styles
from ...workbook import Workbook
class TestAssembleStyles(unittest.TestCase):
"""
Test assembling a complete Styles file.
"""
def test_assemble_xml_file(self):
"""Test for border colour styles."""
self.maxDiff = None
fh = StringIO()
style = Styles()
style._set_filehandle(fh)
workbook = Workbook()
workbook.add_format({
'left': 1,
'right': 1,
'top': 1,
'bottom': 1,
'diag_border': 1,
'diag_type': 3,
'left_color': 'red',
'right_color': 'red',
'top_color': 'red',
'bottom_color': 'red',
'diag_color': 'red'})
workbook._set_default_xf_indices()
workbook._prepare_format_properties()
style._set_style_properties([
workbook.xf_formats,
workbook.palette,
workbook.font_count,
workbook.num_format_count,
workbook.border_count,
workbook.fill_count,
workbook.custom_colors,
workbook.dxf_formats,
workbook.has_comments,
])
style._assemble_xml_file()
workbook.fileclosed = 1
exp = _xml_to_list("""
<?xml version="1.0" encoding="UTF-8" standalone="yes"?>
<styleSheet xmlns="http://schemas.openxmlformats.org/spreadsheetml/2006/main">
<fonts count="1">
<font>
<sz val="11"/>
<color theme="1"/>
<name val="Calibri"/>
<family val="2"/>
<scheme val="minor"/>
</font>
</fonts>
<fills count="2">
<fill>
<patternFill patternType="none"/>
</fill>
<fill>
<patternFill patternType="gray125"/>
</fill>
</fills>
<borders count="2">
<border>
<left/>
<right/>
<top/>
<bottom/>
<diagonal/>
</border>
<border diagonalUp="1" diagonalDown="1">
<left style="thin">
<color rgb="FFFF0000"/>
</left>
<right style="thin">
<color rgb="FFFF0000"/>
</right>
<top style="thin">
<color rgb="FFFF0000"/>
</top>
<bottom style="thin">
<color rgb="FFFF0000"/>
</bottom>
<diagonal style="thin">
<color rgb="FFFF0000"/>
</diagonal>
</border>
</borders>
<cellStyleXfs count="1">
<xf numFmtId="0" fontId="0" fillId="0" borderId="0"/>
</cellStyleXfs>
<cellXfs count="2">
<xf numFmtId="0" fontId="0" fillId="0" borderId="0" xfId="0"/>
<xf numFmtId="0" fontId="0" fillId="0" borderId="1" xfId="0" applyBorder="1"/>
</cellXfs>
<cellStyles count="1">
<cellStyle name="Normal" xfId="0" builtinId="0"/>
</cellStyles>
<dxfs count="0"/>
<tableStyles count="0" defaultTableStyle="TableStyleMedium9" defaultPivotStyle="PivotStyleLight16"/>
</styleSheet>
""")
got = _xml_to_list(fh.getvalue())
self.assertEqual(got, exp)
| [
"jmcnamara@cpan.org"
] | jmcnamara@cpan.org |
816a9237dc7938a0b5c52aa4309713b2228816f7 | adf3076bd40e37f4a422e79f6efb938f13def6c6 | /objectModel/Python/cdm/storage/local.py | e8a47c16802f5603d8c1d96895365a7d49ac8a07 | [
"MIT",
"CC-BY-4.0"
] | permissive | assetdatasystems/CDM | 445d1b22f0071620f1eb2fd8d1b5f7d6152ec388 | 576ccfd07fc718b3d0911112e5041729a3ba8088 | refs/heads/master | 2020-09-29T00:54:18.350717 | 2019-12-06T22:49:02 | 2019-12-06T22:49:02 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,521 | py | # ----------------------------------------------------------------------
# Copyright (c) Microsoft Corporation.
# All rights reserved.
# ----------------------------------------------------------------------
from datetime import datetime
import json
import os
from typing import Any, List, Optional
from .base import StorageAdapterBase
class LocalAdapter(StorageAdapterBase):
"""Local file system storage adapter"""
def __init__(self, root: str = '') -> None:
self._root = os.path.abspath(root) # type: str
def can_read(self) -> bool:
return True
def can_write(self) -> bool:
return True
async def read_async(self, corpus_path: str) -> str:
adapter_path = self.create_adapter_path(corpus_path)
with open(adapter_path, 'r', encoding='utf-8') as file:
return file.read()
async def write_async(self, corpus_path: str, data: str) -> None:
adapter_path = self.create_adapter_path(corpus_path)
parent_dir = os.path.abspath(os.path.join(adapter_path, os.pardir))
os.makedirs(parent_dir, exist_ok=True)
with open(adapter_path, 'w', encoding='utf-8') as file:
file.write(data)
def create_adapter_path(self, corpus_path: str) -> str:
corpus_path = corpus_path[(corpus_path.find(':') + 1):].lstrip('\\/')
return os.path.normpath(os.path.join(self._root, corpus_path))
def create_corpus_path(self, adapter_path: str) -> Optional[str]:
if not adapter_path.startswith("http"):
normalized_adapter_path = os.path.abspath(adapter_path).replace('\\', '/')
normalized_root = self._root.replace('\\', '/')
if normalized_adapter_path.startswith(normalized_root):
return normalized_adapter_path[len(normalized_root):]
# Signal that we did not recognize path as one for this adapter.
return None
def clear_cache(self) -> None:
pass
async def compute_last_modified_time_async(self, adapter_path: str) -> Optional[datetime]:
if os.path.exists(adapter_path):
return datetime.fromtimestamp(os.path.getmtime(adapter_path))
return None
async def fetch_all_files_async(self, folder_corpus_path: str) -> List[str]:
adapter_folder = self.create_adapter_path(folder_corpus_path)
adapter_files = [os.path.join(dp, fn) for dp, dn, fns in os.walk(adapter_folder) for fn in fns]
return [self.create_corpus_path(file) for file in adapter_files]
| [
"nebanfic@microsoft.com"
] | nebanfic@microsoft.com |
15ac7a012158192d1c75ea2adf14451862b089f5 | c475cd8531a94ffae69cc92371d41531dbbddb6c | /Projects/bullet3-2.89/examples/pybullet/gym/pybullet_utils/util.py | 5a014c8ed07a8b530a0fc58ca5cf708b435bd654 | [
"Apache-2.0",
"LicenseRef-scancode-free-unknown",
"Zlib"
] | permissive | WolfireGames/overgrowth | 72d3dd29cbd7254337265c29f8de3e5c32400114 | 594a2a4f9da0855304ee8cd5335d042f8e954ce1 | refs/heads/main | 2023-08-15T19:36:56.156578 | 2023-05-17T08:17:53 | 2023-05-17T08:20:36 | 467,448,492 | 2,264 | 245 | Apache-2.0 | 2023-05-09T07:29:58 | 2022-03-08T09:38:54 | C++ | UTF-8 | Python | false | false | 218 | py | import random
import numpy as np
def set_global_seeds(seed):
try:
import tensorflow as tf
except ImportError:
pass
else:
tf.set_random_seed(seed)
np.random.seed(seed)
random.seed(seed)
return
| [
"max@autious.net"
] | max@autious.net |
b0dcca8c35d97cf96c6a426d4cd4e0c4f1757ab5 | b7125b27e564d2cc80a2ce8d0a6f934aa22c8445 | /.history/sudoku_20201031012809.py | 1e1bd31e61f1cca07054e01ff4e65dab7e7033db | [] | no_license | JensVL96/Puzzle-solver-for-fun | 4c15dcd570c3705b7ac555efb56b52913e81083c | 6d8a4378a480372213a596a336a4deca727a00fc | refs/heads/master | 2021-07-15T05:19:42.185495 | 2020-11-08T13:59:49 | 2020-11-08T13:59:49 | 224,855,888 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,215 | py | # -*- coding: utf-8 -*-
from __future__ import print_function
from config import *
from create_board import *
from solve_bloard import *
from display_board import *
from string import *
import pygame as pg
import numpy as np
# For error highlighting
row_index = (0, 0)
col_index = (0, 0)
blk_index = (0, 0)
input_lock = 0
def reset_errors():
global input_lock
input_lock = 1
global row_index
row_index = (0, 0)
global col_index
col_index = (0, 0)
global blk_index
blk_index = (0, 0)
def get_cord(pos):
global box_index_x
box_index_x = (pos[0] - TOP_LX)//BLOCK_SIZE
global box_index_y
box_index_y = (pos[1] - TOP_LY)//BLOCK_SIZE
def valid(grid, x, y, val, increase):
global input_lock
for index in range(9):
# Check if value in column
if grid[x][index] == val:
print("in the same column")
global col_index
col_index = (x, index)
input_lock = 1
# Check if value in row
if grid[index][y] == val:
print("in the same row")
global row_index
row_index = (index, y)
input_lock = 1
# Finds the block
index_x = x // 3 # integer division
index_y = y // 3
# Check if value in block
for i in range(index_x * 3, index_x * 3 + 3):
for j in range (index_y * 3, index_y * 3 + 3):
if grid[i][j] == val:
print("in the same block")
global blk_index
blk_index = (i, j)
input_lock = 1
if input_lock == 1:
return False
return True
class Main():
def __init__(self):
self.board = []
self.run()
def run(self):
pg.init()
self.screen = pg.display.set_mode(SCREEN_RES)
pg.display.set_caption('Sudoku solver')
display = Display_board(self.screen)
flag1 = 0
val = 0
global input_lock
board = create_board().board
while 1:
for event in pg.event.get():
if event.type == pg.QUIT or (event.type == pg.KEYDOWN and event.key == pg.K_ESCAPE):
exit()
if event.type == pg.MOUSEBUTTONDOWN:
flag1 = 1
pos = pg.mouse.get_pos()
get_cord(pos)
display.glow(pos)
if event.type == pg.KEYDOWN and input_lock != 1:
if event.key == pg.K_1:
val = 1
if event.key == pg.K_2:
val = 2
if event.key == pg.K_3:
val = 3
if event.key == pg.K_4:
val = 4
if event.key == pg.K_5:
val = 5
if event.key == pg.K_6:
val = 6
if event.key == pg.K_7:
val = 7
if event.key == pg.K_8:
val = 8
if event.key == pg.K_9:
val = 9
elif event.type == pg.KEYDOWN and input_lock == 1:
if event.key == pg.K_BACKSPACE:
val = 0
input_lock = 0
reset_errors()
if val != 0:
display.draw_val(val, box_index_x, box_index_y)
if valid(board, int(box_index_x), int(box_index_y), val, display):
board[int(box_index_x)][int(box_index_y)] = val
else:
board[int(box_index_x)][int(box_index_y)] = 0
val = 0
pg.draw.rect(self.screen, BLACK, (0, 0, self.screen.get_width(), self.screen.get_height()))
self.screen.fill(BEIGE)
display.draw(board)
if input_lock == 1:
display.update(board, row_index, col_index, blk_index)
# display.draw_box()
pg.display.update()
self.solution = solve_board(board)
self.solution.assign_flags(board)
if __name__ == '__main__':
Main()
| [
"jle040@uit.no"
] | jle040@uit.no |
b7723239c4a46c561258470ad64b96116357489b | ac5e52a3fc52dde58d208746cddabef2e378119e | /exps-gsn-edf/gsn-edf_ut=2.5_rd=1_rw=0.06_rn=4_u=0.075-0.35_p=harmonic-2/sched=RUN_trial=61/sched.py | d192899a28b503972848e0f3b23908f61c81e5b3 | [] | no_license | ricardobtxr/experiment-scripts | 1e2abfcd94fb0ef5a56c5d7dffddfe814752eef1 | 7bcebff7ac2f2822423f211f1162cd017a18babb | refs/heads/master | 2023-04-09T02:37:41.466794 | 2021-04-25T03:27:16 | 2021-04-25T03:27:16 | 358,926,457 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 336 | py | -X FMLP -Q 0 -L 3 124 400
-X FMLP -Q 0 -L 3 86 400
-X FMLP -Q 0 -L 3 52 200
-X FMLP -Q 1 -L 2 51 175
-X FMLP -Q 1 -L 2 47 150
-X FMLP -Q 1 -L 2 46 150
-X FMLP -Q 2 -L 1 27 200
-X FMLP -Q 2 -L 1 26 100
-X FMLP -Q 2 -L 1 18 125
-X FMLP -Q 3 -L 1 16 150
-X FMLP -Q 3 -L 1 14 100
-X FMLP -Q 3 -L 1 3 175
| [
"ricardo.btxr@gmail.com"
] | ricardo.btxr@gmail.com |
120f170cf018653194adb4e24abad7bac1b97950 | 5cbbeb11fb1400019690d10db24b4579f97e8896 | /mlkernels/kernels/derivative.py | a0d74a40a20655d7822642c6d104d4e38e97fc0c | [
"MIT"
] | permissive | darsh8200/mlkernels | b735c213f5cf590cabebcee166e3b4aea95c4e1e | cad223c422a32bc10375358fda076645efca62f1 | refs/heads/main | 2023-06-16T19:48:37.056247 | 2021-07-09T06:18:39 | 2021-07-09T06:18:39 | 384,340,711 | 0 | 0 | MIT | 2021-07-09T06:16:48 | 2021-07-09T06:16:47 | null | UTF-8 | Python | false | false | 6,853 | py | import lab as B
import numpy as np
from algebra import DerivativeFunction
from algebra.util import identical
from matrix import Dense
from plum import convert
from . import _dispatch
from .. import Kernel
from ..util import num_elements, uprank, expand
__all__ = ["perturb", "DerivativeKernel"]
def dkx(k_elwise, i):
"""Construct the derivative of a kernel with respect to its first
argument.
Args:
k_elwise (function): Function that performs element-wise computation
of the kernel.
i (int): Dimension with respect to which to compute the derivative.
Returns:
function: Derivative of the kernel with respect to its first argument.
"""
@uprank
def _dkx(x, y):
import tensorflow as tf
with tf.GradientTape() as t:
# Get the numbers of inputs.
nx = num_elements(x)
ny = num_elements(y)
# Copy the input `ny` times to efficiently compute many derivatives.
xis = tf.identity_n([x[:, i : i + 1]] * ny)
t.watch(xis)
# Tile inputs for batched computation.
x = B.tile(x, ny, 1)
y = B.reshape(B.tile(y, 1, nx), ny * nx, -1)
# Insert tracked dimension, which is different for every tile.
xi = B.concat(*xis, axis=0)
x = B.concat(x[:, :i], xi, x[:, i + 1 :], axis=1)
# Perform the derivative computation.
out = B.dense(k_elwise(x, y))
grads = t.gradient(out, xis, unconnected_gradients="zero")
return B.concat(*grads, axis=1)
return _dkx
def dkx_elwise(k_elwise, i):
"""Construct the element-wise derivative of a kernel with respect to
its first argument.
Args:
k_elwise (function): Function that performs element-wise computation
of the kernel.
i (int): Dimension with respect to which to compute the derivative.
Returns:
function: Element-wise derivative of the kernel with respect to its
first argument.
"""
@uprank
def _dkx_elwise(x, y):
import tensorflow as tf
with tf.GradientTape() as t:
xi = x[:, i : i + 1]
t.watch(xi)
x = B.concat(x[:, :i], xi, x[:, i + 1 :], axis=1)
out = B.dense(k_elwise(x, y))
return t.gradient(out, xi, unconnected_gradients="zero")
return _dkx_elwise
def dky(k_elwise, i):
"""Construct the derivative of a kernel with respect to its second
argument.
Args:
k_elwise (function): Function that performs element-wise computation
of the kernel.
i (int): Dimension with respect to which to compute the derivative.
Returns:
function: Derivative of the kernel with respect to its second argument.
"""
@uprank
def _dky(x, y):
import tensorflow as tf
with tf.GradientTape() as t:
# Get the numbers of inputs.
nx = num_elements(x)
ny = num_elements(y)
# Copy the input `nx` times to efficiently compute many derivatives.
yis = tf.identity_n([y[:, i : i + 1]] * nx)
t.watch(yis)
# Tile inputs for batched computation.
x = B.reshape(B.tile(x, 1, ny), nx * ny, -1)
y = B.tile(y, nx, 1)
# Insert tracked dimension, which is different for every tile.
yi = B.concat(*yis, axis=0)
y = B.concat(y[:, :i], yi, y[:, i + 1 :], axis=1)
# Perform the derivative computation.
out = B.dense(k_elwise(x, y))
grads = t.gradient(out, yis, unconnected_gradients="zero")
return B.transpose(B.concat(*grads, axis=1))
return _dky
def dky_elwise(k_elwise, i):
"""Construct the element-wise derivative of a kernel with respect to
its second argument.
Args:
k_elwise (function): Function that performs element-wise computation
of the kernel.
i (int): Dimension with respect to which to compute the derivative.
Returns:
function: Element-wise derivative of the kernel with respect to its
second argument.
"""
@uprank
def _dky_elwise(x, y):
import tensorflow as tf
with tf.GradientTape() as t:
yi = y[:, i : i + 1]
t.watch(yi)
y = B.concat(y[:, :i], yi, y[:, i + 1 :], axis=1)
out = B.dense(k_elwise(x, y))
return t.gradient(out, yi, unconnected_gradients="zero")
return _dky_elwise
def perturb(x):
"""Slightly perturb a tensor.
Args:
x (tensor): Tensor to perturb.
Returns:
tensor: `x`, but perturbed.
"""
dtype = convert(B.dtype(x), B.NPDType)
if dtype == np.float64:
return 1e-20 + x * (1 + 1e-14)
elif dtype == np.float32:
return 1e-20 + x * (1 + 1e-7)
else:
raise ValueError(f"Cannot perturb a tensor of data type {B.dtype(x)}.")
class DerivativeKernel(Kernel, DerivativeFunction):
"""Derivative of kernel."""
@property
def _stationary(self):
# NOTE: In the one-dimensional case, if derivatives with respect to both
# arguments are taken, then the result is in fact stationary.
return False
@_dispatch
def __eq__(self, other: "DerivativeKernel"):
identical_derivs = identical(expand(self.derivs), expand(other.derivs))
return self[0] == other[0] and identical_derivs
@_dispatch
def pairwise(k: DerivativeKernel, x: B.Numeric, y: B.Numeric):
i, j = expand(k.derivs)
k = k[0]
# Prevent that `x` equals `y` to stabilise nested gradients.
y = perturb(y)
if i is not None and j is not None:
# Derivative with respect to both `x` and `y`.
return Dense(dky(dkx_elwise(elwise(k), i), j)(x, y))
elif i is not None and j is None:
# Derivative with respect to `x`.
return Dense(dkx(elwise(k), i)(x, y))
elif i is None and j is not None:
# Derivative with respect to `y`.
return Dense(dky(elwise(k), j)(x, y))
else:
raise RuntimeError("No derivative specified.")
@_dispatch
def elwise(k: DerivativeKernel, x: B.Numeric, y: B.Numeric):
i, j = expand(k.derivs)
k = k[0]
# Prevent that `x` equals `y` to stabilise nested gradients.
y = perturb(y)
if i is not None and j is not None:
# Derivative with respect to both `x` and `y`.
return dky_elwise(dkx_elwise(elwise(k), i), j)(x, y)
elif i is not None and j is None:
# Derivative with respect to `x`.
return dkx_elwise(elwise(k), i)(x, y)
elif i is None and j is not None:
# Derivative with respect to `y`.
return dky_elwise(elwise(k), j)(x, y)
else:
raise RuntimeError("No derivative specified.")
| [
"wessel.p.bruinsma@gmail.com"
] | wessel.p.bruinsma@gmail.com |
cbd0426653d0bdcaf34cbdaf86cd071eb58163b8 | a6ab2735ff2f89adc64a4afcbfe013c1039198a1 | /scrapers/liverpool.py | 417b56688c24c6978ebc90919fea9056e20e3935 | [] | no_license | rehmanali1337/innvictus_scraper | 72e5049dd2c3d391f47d37e145edb2bf7c6a371d | bcb4e986c1922b20d61baca88e6ff03909bca518 | refs/heads/master | 2023-05-06T21:43:27.163117 | 2021-05-26T05:04:29 | 2021-05-26T05:04:29 | 341,820,871 | 2 | 1 | null | null | null | null | UTF-8 | Python | false | false | 3,929 | py | from selenium import webdriver
import asyncio
import json
from models.cache import ListCache
from models.products import LiverPoolProduct
from configs import global_vars
import logging
class LiverPoolNewProdsScraper:
def __init__(self, queue):
self.config = json.load(open(global_vars.MAIN_CONFIG_FILE_LOCATION))
self.queue = queue
print = logging.getLogger(' LiverpoolMonitor ').info
self.options = webdriver.ChromeOptions()
self.options.add_argument('--no-sandbox')
# self.options.add_argument('--headless')
self.options.add_argument('--disable-dev-shm-usage')
self.options.add_argument('start-maximized')
self.options.add_argument('disable-infobars')
self.webdriver_path = self.config.get("WEBDRIVER_PATH")
self.loop = asyncio.new_event_loop()
self.driver = None
self.URLs = [
'https://www.liverpool.com.mx/tienda/zapatos/catst1105210',
'https://www.liverpool.com.mx/tienda/zapatos/catst1010801',
'https://www.liverpool.com.mx/tienda/zapatos/catst1011086'
]
self.itter_time = 10
def start(self):
self.cache = ListCache('LiverPoolCache')
self.loop.run_until_complete(self.main())
async def main(self):
self.driver = webdriver.Chrome(
executable_path=self.webdriver_path, options=self.options)
self.driver.implicitly_wait(10)
# await self.create_cache()
while True:
try:
all_links = await self.get_all_prod_links()
print(f'[+] Got {len(all_links)} prod links!')
for link in all_links:
if not self.cache.has_item(link):
prod = await self.get_prod_details(link)
self.queue.put(prod)
self.cache.add_item(link)
await asyncio.sleep(self.itter_time)
except Exception as e:
print(e)
async def create_cache(self):
print('[+] Creating cache ..')
links = await self.get_all_prod_links()
self.cache.replace_cache(links)
print('[+] Created cache for prods')
async def get_all_prod_links(self):
links = []
for url in self.URLs:
self.driver.get(url)
prods_list = self.driver.find_elements_by_xpath(
'//li[@class="m-product__card card-masonry"]')
for prod in prods_list:
link = prod.find_element_by_tag_name('a').get_attribute('href')
links.append(link)
return links
async def get_prod_details(self, link):
self.driver.get(link)
prod = LiverPoolProduct()
prod.name = self.driver.find_element_by_xpath(
'//h1[@class="a-product__information--title"]').text
prod.link = link
out_of_stock_sizes = self.driver.find_elements_by_xpath(
'//button[@class="a-btn a-btn--actionpdp -disabled"]')
for size in out_of_stock_sizes:
prod.out_of_stock_sizes.append(size.text)
in_stock_sizes = self.driver.find_elements_by_xpath(
'//button[@class="a-btn a-btn--actionpdp"]')
for size in in_stock_sizes:
prod.in_stock_sizes.append(size.text)
prod.img_link = self.driver.find_element_by_xpath(
'//img[@id="image-real"]').get_attribute('src')
prod.color = self.driver.find_element_by_xpath(
'//p[@class="a-product__paragraphColor m-0 mt-2 mb-1"]').text.split(':')[-1].strip()
prod.price = self.driver.find_element_by_xpath(
'//p[@class="a-product__paragraphDiscountPrice m-0 d-inline "]').text.split('\n')[0].replace(',', '').replace('$', '')
return prod
# def quit_browser(self):
# if self.driver is not None:
# self.driver.quit()
# self.driver = None
| [
"rehmanali.9442289@gmail.com"
] | rehmanali.9442289@gmail.com |
fdef72e6ed2b89d6e3312ca8d0abab76e55416d7 | 4f4d47d60e17f0e3b7120ebb26f3d83e0a1f8e66 | /tf_agents/bandits/environments/random_bandit_environment.py | 735af739c1d680f16bcb6a4df8ef9ba29e2bd8e5 | [
"Apache-2.0"
] | permissive | tfboyd/agents | 644ff1ee3961ac629671110c45f6c90234bd0ad1 | 858ee36aaaea6fbcf0e5ab1c12929c77bd17abae | refs/heads/master | 2020-11-28T15:46:31.635917 | 2020-06-26T06:05:57 | 2020-06-26T06:05:57 | 229,859,259 | 2 | 0 | Apache-2.0 | 2020-06-26T15:34:23 | 2019-12-24T02:56:28 | Python | UTF-8 | Python | false | false | 5,146 | py | # coding=utf-8
# Copyright 2018 The TF-Agents Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Bandit environment that returns random observations and rewards."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow as tf # pylint: disable=g-explicit-tensorflow-version-import
from tf_agents.bandits.environments import bandit_tf_environment as bte
from tf_agents.specs import tensor_spec
from tf_agents.trajectories import time_step
__all__ = ['RandomBanditEnvironment']
def _raise_batch_shape_error(distribution_name, batch_shape):
raise ValueError('`{distribution_name}` must have batch shape with length 1; '
'got {batch_shape}. Consider using '
'`tensorflow_probability.distributions.Independent` '
'to manipulate batch and event shapes.'.format(
distribution_name=distribution_name,
batch_shape=batch_shape))
class RandomBanditEnvironment(bte.BanditTFEnvironment):
"""Bandit environment that returns random observations and rewards."""
def __init__(self,
observation_distribution,
reward_distribution,
action_spec=None):
"""Initializes an environment that returns random observations and rewards.
Note that `observation_distribution` and `reward_distribution` are expected
to have batch rank 1. That is, `observation_distribution.batch_shape` should
have length exactly 1. `tensorflow_probability.distributions.Independent` is
useful for manipulating batch and event shapes. For example,
```python
observation_distribution = tfd.Independent(tfd.Normal(tf.zeros([12, 3, 4]),
tf.ones([12, 3, 4])))
env = RandomBanditEnvironment(observation_distribution, ...)
env.observation_spec # tensor_spec.TensorSpec(shape=[3, 4], ...)
env.batch_size # 12
```
Args:
observation_distribution: a `tensorflow_probability.Distribution`.
Batches of observations will be drawn from this distribution. The
`batch_shape` of this distribution must have length 1 and be the same as
the `batch_shape` of `reward_distribution`.
reward_distribution: a `tensorflow_probability.Distribution`.
Batches of rewards will be drawn from this distribution. The
`batch_shape` of this distribution must have length 1 and be the same as
the `batch_shape` of `observation_distribution`.
action_spec: a `TensorSpec` describing the expected action. Note that
actions are ignored and do not affect rewards.
"""
observation_batch_shape = observation_distribution.batch_shape
reward_batch_shape = reward_distribution.batch_shape
reward_event_shape = reward_distribution.event_shape
if observation_batch_shape.rank != 1:
_raise_batch_shape_error(
'observation_distribution', observation_batch_shape)
if reward_batch_shape.rank != 1:
_raise_batch_shape_error(
'reward_distribution', observation_batch_shape)
if reward_event_shape.rank != 0:
raise ValueError('`reward_distribution` must have event_shape (); '
'got {}'.format(reward_event_shape))
if reward_distribution.dtype != tf.float32:
raise ValueError('`reward_distribution` must have dtype float32; '
'got {}'.format(reward_distribution.float32))
if observation_batch_shape[0] != reward_batch_shape[0]:
raise ValueError(
'`reward_distribution` and `observation_distribution` must have the '
'same batch shape; got {} and {}'.format(
reward_batch_shape, observation_batch_shape))
batch_size = tf.compat.dimension_value(observation_batch_shape[0])
self._observation_distribution = observation_distribution
self._reward_distribution = reward_distribution
observation_spec = tensor_spec.TensorSpec(
shape=self._observation_distribution.event_shape,
dtype=self._observation_distribution.dtype,
name='observation_spec')
time_step_spec = time_step.time_step_spec(observation_spec)
super(RandomBanditEnvironment, self).__init__(time_step_spec=time_step_spec,
action_spec=action_spec,
batch_size=batch_size)
def _apply_action(self, action):
del action # unused
return self._reward_distribution.sample()
def _observe(self):
return self._observation_distribution.sample()
| [
"copybara-worker@google.com"
] | copybara-worker@google.com |
96254c047f5ab42412198c47ef93c0af1d2d97ba | e537b9b866c6533ef4c488b0104070a3f865d40e | /joerd/store/s3.py | d0d448ec3dabf9fd4d2a674e36689e3fb00c1ac6 | [
"MIT"
] | permissive | mohammadrezabk/joerd | 0c3a65ddb746578f9c06574601dc91ea6af2de2e | 0b86765156d0612d837548c2cf70376c43b3405c | refs/heads/master | 2023-02-14T16:08:59.103192 | 2017-11-21T17:22:22 | 2017-11-21T17:22:22 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,965 | py | import boto3
from boto3.s3.transfer import TransferConfig
from botocore.exceptions import ClientError
from os import walk
import os.path
from contextlib2 import contextmanager
from joerd.tmpdir import tmpdir
import traceback
import sys
import time
import logging
# extension to mime type mappings to help with serving the S3 bucket as
# a web site. if we add the content-type header on upload, then S3 will
# repeat it back when the tiles are accessed.
_MIME_TYPES = {
'.png': 'image/png',
'.tif': 'image/tif',
'.xml': 'application/xml',
'.gz': 'application/x-gzip',
}
# Stores files in S3
class S3Store(object):
def __init__(self, cfg):
self.bucket_name = cfg.get('bucket_name')
self.upload_config = cfg.get('upload_config')
assert self.bucket_name is not None, \
"Bucket name not configured for S3 store, but it must be."
# cache the boto resource and s3 bucket - we don't know what this
# contains, so it seems safe to assume we can't pass it across a
# multiprocessing boundary.
self.s3 = None
self.bucket = None
# This object is likely to get pickled to send it to other processes
# for multiprocessing. However, the s3/boto objects are probably not
# safe to be pickled, so we'll just set them to None and regenerate
# them on the other side.
def __getstate__(self):
odict = self.__dict__.copy()
del odict['s3']
del odict['bucket']
return odict
def __setstate__(self, d):
self.__dict__.update(d)
self.s3 = None
self.bucket = None
def _get_bucket(self):
if self.s3 is None or self.bucket is None:
self.s3 = boto3.resource('s3')
self.bucket = self.s3.Bucket(self.bucket_name)
return self.bucket
def upload_all(self, d):
# strip trailing slashes so that we're sure that the path we create by
# removing this as a prefix does not start with a /.
if not d.endswith('/'):
d = d + "/"
transfer_config = TransferConfig(**self.upload_config)
for dirpath, dirs, files in walk(d):
if dirpath.startswith(d):
suffix = dirpath[len(d):]
self._upload_files(dirpath, suffix, files, transfer_config)
def _upload_files(self, dirpath, suffix, files, transfer_config):
for f in files:
src_name = os.path.join(dirpath, f)
s3_key = os.path.join(suffix, f)
ext = os.path.splitext(f)[1]
mime = _MIME_TYPES.get(ext)
extra_args = {}
if mime:
extra_args['ContentType'] = mime
# retry up to 6 times, waiting 32 (=2^5) seconds before the final
# attempt.
tries = 6
self.retry_upload_file(src_name, s3_key, transfer_config,
extra_args, tries)
def retry_upload_file(self, src_name, s3_key, transfer_config,
extra_args, tries, backoff=1):
logger = logging.getLogger('s3')
bucket = self._get_bucket()
try_num = 0
while True:
try:
bucket.upload_file(src_name, s3_key,
Config=transfer_config,
ExtraArgs=extra_args)
break
except StandardError as e:
try_num += 1
logger.warning("Try %d of %d: Failed to upload %s due to: %s" \
% (try_num, tries, s3_key,
"".join(traceback.format_exception(
*sys.exc_info()))))
if try_num > tries:
raise
time.sleep(backoff)
backoff *= 2
@contextmanager
def upload_dir(self):
with tmpdir() as t:
yield t
self.upload_all(t)
def exists(self, filename):
bucket = self._get_bucket()
exists = False
try:
obj = bucket.Object(filename)
obj.load()
except ClientError as e:
code = e.response['Error']['Code']
# 403 is returned instead of 404 when the bucket doesn't allow
# LIST operations, so treat that as missing as well.
if code == "404" or code == "403":
exists = False
else:
raise e
else:
exists = True
return exists
def get(self, source, dest):
try:
bucket = self._get_bucket()
obj = bucket.Object(source)
obj.download_file(dest)
except:
raise RuntimeError("Failed to download %r, due to: %s"
% (source, "".join(traceback.format_exception(
*sys.exc_info()))))
def create(cfg):
return S3Store(cfg)
| [
"zerebubuth@gmail.com"
] | zerebubuth@gmail.com |
a233d4e8b9afc6d98a3d8ee9809d4b0450623742 | 6fa7f99d3d3d9b177ef01ebf9a9da4982813b7d4 | /Ff84aGq6e7gjKYh8H_6.py | 44234b3c35513f59dda8ddfcdac696049dd11660 | [] | no_license | daniel-reich/ubiquitous-fiesta | 26e80f0082f8589e51d359ce7953117a3da7d38c | 9af2700dbe59284f5697e612491499841a6c126f | refs/heads/master | 2023-04-05T06:40:37.328213 | 2021-04-06T20:17:44 | 2021-04-06T20:17:44 | 355,318,759 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 145 | py |
def minutes_to_seconds(time):
if int(time[-2:]) >= 60:
return False
else:
return int(time[:time.index(':')]) * 60 + int(time[-2:])
| [
"daniel.reich@danielreichs-MacBook-Pro.local"
] | daniel.reich@danielreichs-MacBook-Pro.local |
5a70b3dd55fd3d9c45bbbf134b407923549c6c38 | 50edd95cf9ea295b4216e10361a3dfc7e029a660 | /anipose/train_autoencoder.py | e5534bcc6a314c1726f5d863fc601e59da0b5da8 | [
"BSD-2-Clause"
] | permissive | goyallon/anipose | 5fc03b66b5a362d8ea151c6df4cc6049bccabb15 | 2239cd04f1e6d1f21ff62aab005ebfe6fed351c8 | refs/heads/master | 2022-11-05T06:59:14.077907 | 2020-06-15T23:39:10 | 2020-06-15T23:39:10 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,255 | py | #!/usr/bin/env python3
from sklearn.neural_network import MLPRegressor, MLPClassifier
import pandas as pd
import os.path
import numpy as np
from glob import glob
from ruamel.yaml import YAML
import pickle
def get_dataset_location(model_folder):
config_fname = os.path.join(model_folder, 'config.yaml')
yaml = YAML(typ='rt')
with open(config_fname, 'r') as f:
dlc_config = yaml.load(f)
iternum = dlc_config['iteration']
fname_pat = os.path.join(
model_folder, 'training-datasets', 'iteration-'+str(iternum),
'*', 'CollectedData_*.h5')
fname = glob(fname_pat)[0]
return fname
def load_pose_2d_training(fname):
data_orig = pd.read_hdf(fname)
scorer = data_orig.columns.levels[0][0]
data = data_orig.loc[:, scorer]
bp_index = data.columns.names.index('bodyparts')
coord_index = data.columns.names.index('coords')
bodyparts = list(data.columns.get_level_values(bp_index).unique())
n_frames = len(data)
n_joints = len(bodyparts)
test = np.array(data).reshape(n_frames, n_joints, 2)
bad = np.any(~np.isfinite(test), axis=2)
test[bad] = np.nan
metadata = {
'bodyparts': bodyparts,
'scorer': scorer,
'index': data.index
}
return test, metadata
def generate_training_data(scores, n_iters=10):
Xs = []
ys = []
for i in range(n_iters):
scores_perturb = scores.copy()
good = scores_perturb == 1
scores_perturb[good] = np.random.normal(1, 0.3, size=np.sum(good))
scores_perturb[~good] = np.random.normal(0, 0.3, size=np.sum(~good))
flipped = np.random.uniform(size=good.shape) < 0.05
scores_perturb = np.clip(scores_perturb, 0, 1)
scores_perturb[flipped] = 1 - scores_perturb[flipped]
Xs.append(scores_perturb)
ys.append(scores)
X = np.vstack(Xs)
y = np.vstack(ys)
return X, y
def train_mlp_classifier(X, y):
hidden = X.shape[1]
mlp = MLPClassifier(hidden_layer_sizes=(hidden),
verbose=2, max_iter=2000,
activation='tanh',
learning_rate='adaptive', solver='adam',
early_stopping=True)
mlp.fit(X, y)
return mlp
def save_mlp_classifier(mlp, fname):
with open(fname, 'wb') as f:
pickle.dump(mlp, f)
print('autoencoder saved at:\n {}'.format(fname))
def train_autoencoder(config):
model_folder = config['model_folder']
data_fname = get_dataset_location(model_folder)
data, metadata = load_pose_2d_training(data_fname)
n_frames, n_joints, _ = data.shape
scores = np.ones((n_frames, n_joints), dtype='float64')
bad = np.any(~np.isfinite(data), axis=2)
scores[bad] = 0
X, y = generate_training_data(scores)
mlp = train_mlp_classifier(X, y)
out_fname = os.path.join(config['path'], 'autoencoder.pickle')
save_mlp_classifier(mlp, out_fname)
# model_folder = '/jellyfish/research/tuthill/hand-demo-dlc-TuthillLab-2019-08-05'
# config = {'model_folder': model_folder, 'path': model_folder}
# train_autoencoder(config)
# get dataset from deeplabcut folder
# generate augmented dataset to train autoencoder
# train MLP classifier
# save result
| [
"krchtchk@gmail.com"
] | krchtchk@gmail.com |
30e78d2b6cb33880f8469deab8d18521ad8705d3 | ef76f8bcea6cc5331b4c8873704426f1aacfd60d | /tests/test_likenumpy.py | 33b1e97bd9a7e2c4fce6a68e09a09b1832715d35 | [
"BSD-3-Clause"
] | permissive | DumbMachine/awkward-array | 10a51c8ac471839e435bb471f45b6624c4f982cb | 8f54cc5d4de3bc56628676243bfe63c683667f16 | refs/heads/master | 2020-04-15T17:43:42.684480 | 2019-01-18T18:39:46 | 2019-01-18T18:39:46 | 164,884,686 | 1 | 0 | BSD-3-Clause | 2019-01-18T18:33:23 | 2019-01-09T15:06:24 | Python | UTF-8 | Python | false | false | 6,027 | py | #!/usr/bin/env python
# Copyright (c) 2018, DIANA-HEP
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# * Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import unittest
import numpy
import awkward
class Test(unittest.TestCase):
def runTest(self):
pass
def test_likenumpy_slices(self):
print()
np = numpy.array([[1, 10, 100], [2, 20, 200], [3, 30, 300]])
aw = awkward.fromiter(np)
assert np.tolist() == aw.tolist()
assert np[:2].tolist() == aw[:2].tolist()
assert np[:2, :2].tolist() == aw[:2, :2].tolist()
assert np[:2, 2].tolist() == aw[:2, 2].tolist()
assert np[2, :2].tolist() == aw[2, :2].tolist()
assert np[:2, [0, 1]].tolist() == aw[:2, [0, 1]].tolist()
assert np[[0, 1], :2].tolist() == aw[[0, 1], :2].tolist()
assert np[:2, [0, 1, 2]].tolist() == aw[:2, [0, 1, 2]].tolist()
assert np[[0, 1, 2], :2].tolist() == aw[[0, 1, 2], :2].tolist()
assert np[[0, 1], [0, 1]].tolist() == aw[[0, 1], [0, 1]].tolist()
assert np[[0, 1, 2], [0, 1, 2]].tolist() == aw[[0, 1, 2], [0, 1, 2]].tolist()
assert np[:2, [True, False, True]].tolist() == aw[:2, [True, False, True]].tolist()
assert np[[True, False, True], :2].tolist() == aw[[True, False, True], :2].tolist()
assert np[[True, False, True], [True, False, True]].tolist() == aw[[True, False, True], [True, False, True]].tolist()
np = numpy.array([[[1, 10, 100], [2, 20, 200], [3, 30, 300]], [[4, 40, 400], [5, 50, 500], [6, 60, 600]], [[7, 70, 700], [8, 80, 800], [9, 90, 900]]])
aw = awkward.fromiter(np)
assert np.tolist() == aw.tolist()
assert np[:2].tolist() == aw[:2].tolist()
assert np[:2, :2].tolist() == aw[:2, :2].tolist()
assert np[:2, 2].tolist() == aw[:2, 2].tolist()
assert np[2, :2].tolist() == aw[2, :2].tolist()
assert np[:2, [0, 1]].tolist() == aw[:2, [0, 1]].tolist()
assert np[[0, 1], :2].tolist() == aw[[0, 1], :2].tolist()
assert np[:2, [0, 1, 2]].tolist() == aw[:2, [0, 1, 2]].tolist()
assert np[[0, 1, 2], :2].tolist() == aw[[0, 1, 2], :2].tolist()
assert np[[0, 1], [0, 1]].tolist() == aw[[0, 1], [0, 1]].tolist()
assert np[[0, 1, 2], [0, 1, 2]].tolist() == aw[[0, 1, 2], [0, 1, 2]].tolist()
assert np[:2, [True, False, True]].tolist() == aw[:2, [True, False, True]].tolist()
assert np[[True, False, True], :2].tolist() == aw[[True, False, True], :2].tolist()
assert np[[True, False, True], [True, False, True]].tolist() == aw[[True, False, True], [True, False, True]].tolist()
assert np[:2, :2, 0].tolist() == aw[:2, :2, 0].tolist()
assert np[:2, 2, 0].tolist() == aw[:2, 2, 0].tolist()
assert np[2, :2, 0].tolist() == aw[2, :2, 0].tolist()
assert np[:2, [0, 1], 0].tolist() == aw[:2, [0, 1], 0].tolist()
assert np[[0, 1], :2, 0].tolist() == aw[[0, 1], :2, 0].tolist()
assert np[:2, [0, 1, 2], 0].tolist() == aw[:2, [0, 1, 2], 0].tolist()
assert np[[0, 1, 2], :2, 0].tolist() == aw[[0, 1, 2], :2, 0].tolist()
assert np[[0, 1], [0, 1], 0].tolist() == aw[[0, 1], [0, 1], 0].tolist()
assert np[[0, 1, 2], [0, 1, 2], 0].tolist() == aw[[0, 1, 2], [0, 1, 2], 0].tolist()
assert np[:2, [True, False, True], 0].tolist() == aw[:2, [True, False, True], 0].tolist()
assert np[[True, False, True], :2, 0].tolist() == aw[[True, False, True], :2, 0].tolist()
assert np[[True, False, True], [True, False, True], 0].tolist() == aw[[True, False, True], [True, False, True], 0].tolist()
assert np[:2, :2, 1].tolist() == aw[:2, :2, 1].tolist()
assert np[:2, 2, 1].tolist() == aw[:2, 2, 1].tolist()
assert np[2, :2, 1].tolist() == aw[2, :2, 1].tolist()
assert np[:2, [0, 1], 1].tolist() == aw[:2, [0, 1], 1].tolist()
assert np[[0, 1], :2, 1].tolist() == aw[[0, 1], :2, 1].tolist()
assert np[:2, [0, 1, 2], 1].tolist() == aw[:2, [0, 1, 2], 1].tolist()
assert np[[0, 1, 2], :2, 1].tolist() == aw[[0, 1, 2], :2, 1].tolist()
assert np[[0, 1], [0, 1], 1].tolist() == aw[[0, 1], [0, 1], 1].tolist()
assert np[[0, 1, 2], [0, 1, 2], 1].tolist() == aw[[0, 1, 2], [0, 1, 2], 1].tolist()
assert np[:2, [True, False, True], 1].tolist() == aw[:2, [True, False, True], 1].tolist()
assert np[[True, False, True], :2, 1].tolist() == aw[[True, False, True], :2, 1].tolist()
assert np[[True, False, True], [True, False, True], 1].tolist() == aw[[True, False, True], [True, False, True], 1].tolist()
| [
"jpivarski@gmail.com"
] | jpivarski@gmail.com |
d6e06778da1716fbaaf68b4e91319ac1c219ef43 | daaf133cc4146ecd3b0df5ceafea84daa6bac2ce | /project/notes/serializers.py | 085cf9d4da8d8d79ed810d541f550edae69f4dcb | [] | no_license | core-api/heroku-app | 8c29452c609e4ff2344542e1e952a343f29953f6 | 7f03a36dc34baddcdf4cda8534ab800a98e079c9 | refs/heads/master | 2023-07-20T05:34:25.707890 | 2016-01-20T12:32:12 | 2016-01-20T12:32:12 | 32,865,301 | 1 | 0 | null | 2016-01-20T12:23:01 | 2015-03-25T13:11:06 | Python | UTF-8 | Python | false | false | 326 | py | from rest_framework import serializers
class AddNoteSerializer(serializers.Serializer):
description = serializers.CharField(max_length=100)
class EditNoteSerializer(serializers.Serializer):
description = serializers.CharField(max_length=100, required=False)
complete = serializers.BooleanField(required=False)
| [
"tom@tomchristie.com"
] | tom@tomchristie.com |
4b95149358f6dfefe0687c5d6e8ae4f54758fb4a | b74320ad439e37dfa48cd8db38dab3b7a20a36ff | /src/diffusers/pipelines/dance_diffusion/pipeline_dance_diffusion.py | b2d46c6f90f142635dec50da02b00fe63b3e40c2 | [
"Apache-2.0"
] | permissive | huggingface/diffusers | c82beba1ec5f0aba01b6744040a5accc41ec2493 | 5eeedd9e3336882d598091e191559f67433b6427 | refs/heads/main | 2023-08-29T01:22:52.237910 | 2023-08-28T18:16:27 | 2023-08-28T18:16:27 | 498,011,141 | 17,308 | 3,158 | Apache-2.0 | 2023-09-14T20:57:44 | 2022-05-30T16:04:02 | Python | UTF-8 | Python | false | false | 6,254 | py | # Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import List, Optional, Tuple, Union
import torch
from ...utils import logging, randn_tensor
from ..pipeline_utils import AudioPipelineOutput, DiffusionPipeline
logger = logging.get_logger(__name__) # pylint: disable=invalid-name
class DanceDiffusionPipeline(DiffusionPipeline):
r"""
Pipeline for audio generation.
This model inherits from [`DiffusionPipeline`]. Check the superclass documentation for the generic methods
implemented for all pipelines (downloading, saving, running on a particular device, etc.).
Parameters:
unet ([`UNet1DModel`]):
A `UNet1DModel` to denoise the encoded audio.
scheduler ([`SchedulerMixin`]):
A scheduler to be used in combination with `unet` to denoise the encoded audio latents. Can be one of
[`IPNDMScheduler`].
"""
def __init__(self, unet, scheduler):
super().__init__()
self.register_modules(unet=unet, scheduler=scheduler)
@torch.no_grad()
def __call__(
self,
batch_size: int = 1,
num_inference_steps: int = 100,
generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None,
audio_length_in_s: Optional[float] = None,
return_dict: bool = True,
) -> Union[AudioPipelineOutput, Tuple]:
r"""
The call function to the pipeline for generation.
Args:
batch_size (`int`, *optional*, defaults to 1):
The number of audio samples to generate.
num_inference_steps (`int`, *optional*, defaults to 50):
The number of denoising steps. More denoising steps usually lead to a higher-quality audio sample at
the expense of slower inference.
generator (`torch.Generator`, *optional*):
A [`torch.Generator`](https://pytorch.org/docs/stable/generated/torch.Generator.html) to make
generation deterministic.
audio_length_in_s (`float`, *optional*, defaults to `self.unet.config.sample_size/self.unet.config.sample_rate`):
The length of the generated audio sample in seconds.
return_dict (`bool`, *optional*, defaults to `True`):
Whether or not to return a [`~pipelines.AudioPipelineOutput`] instead of a plain tuple.
Example:
```py
from diffusers import DiffusionPipeline
from scipy.io.wavfile import write
model_id = "harmonai/maestro-150k"
pipe = DiffusionPipeline.from_pretrained(model_id)
pipe = pipe.to("cuda")
audios = pipe(audio_length_in_s=4.0).audios
# To save locally
for i, audio in enumerate(audios):
write(f"maestro_test_{i}.wav", pipe.unet.sample_rate, audio.transpose())
# To dislay in google colab
import IPython.display as ipd
for audio in audios:
display(ipd.Audio(audio, rate=pipe.unet.sample_rate))
```
Returns:
[`~pipelines.AudioPipelineOutput`] or `tuple`:
If `return_dict` is `True`, [`~pipelines.AudioPipelineOutput`] is returned, otherwise a `tuple` is
returned where the first element is a list with the generated audio.
"""
if audio_length_in_s is None:
audio_length_in_s = self.unet.config.sample_size / self.unet.config.sample_rate
sample_size = audio_length_in_s * self.unet.config.sample_rate
down_scale_factor = 2 ** len(self.unet.up_blocks)
if sample_size < 3 * down_scale_factor:
raise ValueError(
f"{audio_length_in_s} is too small. Make sure it's bigger or equal to"
f" {3 * down_scale_factor / self.unet.config.sample_rate}."
)
original_sample_size = int(sample_size)
if sample_size % down_scale_factor != 0:
sample_size = (
(audio_length_in_s * self.unet.config.sample_rate) // down_scale_factor + 1
) * down_scale_factor
logger.info(
f"{audio_length_in_s} is increased to {sample_size / self.unet.config.sample_rate} so that it can be handled"
f" by the model. It will be cut to {original_sample_size / self.unet.config.sample_rate} after the denoising"
" process."
)
sample_size = int(sample_size)
dtype = next(self.unet.parameters()).dtype
shape = (batch_size, self.unet.config.in_channels, sample_size)
if isinstance(generator, list) and len(generator) != batch_size:
raise ValueError(
f"You have passed a list of generators of length {len(generator)}, but requested an effective batch"
f" size of {batch_size}. Make sure the batch size matches the length of the generators."
)
audio = randn_tensor(shape, generator=generator, device=self._execution_device, dtype=dtype)
# set step values
self.scheduler.set_timesteps(num_inference_steps, device=audio.device)
self.scheduler.timesteps = self.scheduler.timesteps.to(dtype)
for t in self.progress_bar(self.scheduler.timesteps):
# 1. predict noise model_output
model_output = self.unet(audio, t).sample
# 2. compute previous audio sample: x_t -> t_t-1
audio = self.scheduler.step(model_output, t, audio).prev_sample
audio = audio.clamp(-1, 1).float().cpu().numpy()
audio = audio[:, :, :original_sample_size]
if not return_dict:
return (audio,)
return AudioPipelineOutput(audios=audio)
| [
"noreply@github.com"
] | huggingface.noreply@github.com |
4fe98793df58d5e1bf85fc96af28a813a0e52817 | 906e8d5711f64b45db1541ea15ab5de50c73fafa | /src/api/listeners/console.py | 9ab2fd7769322fa1b97d3a3048b9ab91dc515ed7 | [
"MIT"
] | permissive | yagrxu/infrabox | 079cb2f04f13dc31811698fe94354e32e8ea91e1 | 1d8789db1968897fd471d4dbc1480395d365ff85 | refs/heads/master | 2021-04-15T10:31:54.697521 | 2018-03-21T21:48:12 | 2018-03-21T21:48:12 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,695 | py | import json
from psycopg2.extensions import ISOLATION_LEVEL_AUTOCOMMIT
from eventlet.hubs import trampoline
from pyinfraboxutils.db import connect_db
from pyinfraboxutils import dbpool
from pyinfraboxutils import get_logger
logger = get_logger('console_listener')
def __handle_event(event, socketio, client_manager):
job_id = event['job_id']
console_id = event['id']
if not client_manager.has_clients(job_id):
return
logger.info('start console %s', console_id)
conn = dbpool.get()
try:
r = conn.execute_one('''
SELECT output FROM console WHERE id = %s
''', [console_id])
logger.info('retrived console %s', console_id)
if not r:
return
r = r[0]
socketio.emit('notify:console', {
'data': r,
'job_id': job_id
}, room=job_id)
finally:
dbpool.put(conn)
logger.info('stop console %s', console_id)
def listen(socketio, client_manager):
while True:
try:
__listen(socketio, client_manager)
except Exception as e:
logger.exception(e)
def __listen(socketio, client_manager):
conn = connect_db()
conn.set_isolation_level(ISOLATION_LEVEL_AUTOCOMMIT)
cur = conn.cursor()
cur.execute("LISTEN console_update")
while True:
trampoline(conn, read=True)
conn.poll()
while conn.notifies:
n = conn.notifies.pop()
socketio.start_background_task(__handle_event,
json.loads(n.payload),
socketio,
client_manager)
| [
"steffen@infrabox.net"
] | steffen@infrabox.net |
630e3b59bc97ae65efd9cdf123fa18dc17a216c8 | 69c81130633ba4d41b1ec938f0fc586f777e95ba | /setup.py | 7e3762cd896e38a132a848717fe69bc6b7b3c13b | [
"ISC"
] | permissive | pregiotek/drf-tracking | d8ff934e884e7908f997f524d4e363914c2f11b2 | f40c87a7e392009cdffa7b893e964b51f2faeb5b | refs/heads/master | 2021-01-18T07:51:57.961574 | 2016-09-09T14:34:44 | 2016-09-09T14:34:44 | 67,803,102 | 1 | 0 | null | 2016-09-09T13:54:10 | 2016-09-09T13:54:08 | Python | UTF-8 | Python | false | false | 2,771 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import re
import os
import sys
from setuptools import setup
name = 'drf-tracking'
package = 'rest_framework_tracking'
description = 'Utils to log Django Rest Framework requests to the database'
url = 'https://github.com/aschn/drf-tracking'
author = 'Anna Schneider'
author_email = 'anna@WattTime.org'
license = 'BSD'
def get_version(package):
"""
Return package version as listed in `__version__` in `init.py`.
"""
init_py = open(os.path.join(package, '__init__.py')).read()
return re.search("^__version__ = ['\"]([^'\"]+)['\"]",
init_py, re.MULTILINE).group(1)
def get_packages(package):
"""
Return root package and all sub-packages.
"""
return [dirpath
for dirpath, dirnames, filenames in os.walk(package)
if os.path.exists(os.path.join(dirpath, '__init__.py'))]
def get_package_data(package):
"""
Return all files under the root package, that are not in a
package themselves.
"""
walk = [(dirpath.replace(package + os.sep, '', 1), filenames)
for dirpath, dirnames, filenames in os.walk(package)
if not os.path.exists(os.path.join(dirpath, '__init__.py'))]
filepaths = []
for base, filenames in walk:
filepaths.extend([os.path.join(base, filename)
for filename in filenames])
return {package: filepaths}
version = get_version(package)
if sys.argv[-1] == 'publish':
if os.system("pip freeze | grep wheel"):
print("wheel not installed.\nUse `pip install wheel`.\nExiting.")
sys.exit()
os.system("python setup.py sdist upload")
os.system("python setup.py bdist_wheel upload")
print("You probably want to also tag the version now:")
print(" git tag -a {0} -m 'version {0}'".format(version))
print(" git push --tags")
sys.exit()
setup(
name=name,
version=version,
url=url,
license=license,
description=description,
author=author,
author_email=author_email,
packages=get_packages(package),
package_data=get_package_data(package),
install_requires=[],
classifiers=[
'Development Status :: 2 - Pre-Alpha',
'Environment :: Web Environment',
'Framework :: Django',
'Intended Audience :: Developers',
'License :: OSI Approved :: BSD License',
'Operating System :: OS Independent',
'Natural Language :: English',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
'Topic :: Internet :: WWW/HTTP',
]
)
| [
"annarschneider@gmail.com"
] | annarschneider@gmail.com |
970297db1b672f47016ec7d408bacef3cc4da9e3 | eec9299fd80ed057585e84e0f0e5b4d82b1ed9a7 | /user/admin.py | 0b8e5495afbf1e7b37d5a28a0f436f6a35c69dc8 | [] | no_license | aimiliya/mysite | f51967f35c0297be7051d9f485dd0e59b8bb60c2 | b8e3b639de6c89fb8e6af7ee0092ee744a75be41 | refs/heads/master | 2020-04-08T19:06:36.539404 | 2018-12-01T08:05:18 | 2018-12-01T08:05:18 | 159,640,181 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 718 | py | from django.contrib import admin
from django.contrib.auth.admin import UserAdmin as BaseUserAdmin
from django.contrib.auth.admin import User
from .models import Profile
class ProfileInline(admin.StackedInline):
model = Profile
can_delete = False
class UserAdmin(BaseUserAdmin):
inlines = (ProfileInline,)
list_display = ('username', 'nickname', 'email', 'is_staff', 'is_active', 'is_superuser')
def nickname(self, obj):
return obj.profile.nickname
nickname.short_description = '昵称'
# Re-register UserAdmin
admin.site.unregister(User)
admin.site.register(User, UserAdmin)
@admin.register(Profile)
class ProfileAdmin(admin.ModelAdmin):
list_display = ('user', 'nickname') | [
"951416267@qq.com"
] | 951416267@qq.com |
99ed850db9f54ab4480a94c40c385368950b6d31 | 58f8ba80b7288aa762e114b9d6476ef911a64044 | /tests/level4-1/test_app2_responses.py | d6a6c8f99ee51d3aff9052118e8cb493c59dc8bf | [
"MIT"
] | permissive | hansroh/skitai | a3cc80b1b0ef152ee22926b40a5c22a872c4235d | c54990839a634544ae26ec2d2c2d755e2b34f99c | refs/heads/master | 2023-01-22T23:49:42.868422 | 2023-01-04T10:14:38 | 2023-01-04T10:14:38 | 77,034,278 | 5 | 2 | null | null | null | null | UTF-8 | Python | false | false | 1,067 | py | import requests
import platform
def test_app (launch):
with launch ("./examples/app2.py") as engine:
resp = engine.get ('/threaproducer')
assert resp.status_code == 200
assert len (resp.data) == 100000
resp = engine.get ('/map_in_thread')
assert resp.status_code == 200
assert resp.data == {'media': 'Hello'}
resp = engine.get ('/reindeer')
assert resp.headers.get ('etag')
assert resp.headers.get ('content-type') == 'image/jpeg'
assert resp.headers.get ('content-length') == '32772'
resp = engine.get ('/file')
assert resp.headers.get ('content-type') == 'application/octet-stream'
assert resp.headers.get ('content-length') == '32772'
resp = engine.get ('/stream')
assert resp.status_code == 210
assert resp.headers.get ('content-type') == 'text/plain'
assert resp.data.count (b'<CHUNK>') == 100
resp = engine.get ('/thread_future')
assert resp.status_code == 200
assert resp.data == b'Hello'
| [
"hansroh@gmail.com"
] | hansroh@gmail.com |
acf0b8131f06d50afc7b3cb0f11b74a4e2b1a554 | 5d61565651b7ba5fa8fade3313a5e82fca8b6686 | /interface/migrations/0002_prjcheck_fpic.py | 546e9fd2488175f24de03e710e76989c914de9ef | [] | no_license | lonelyxmas/ISMS | d597b00072bfa77907875f575b866fbb1fb53295 | 08c5e2f3518fc639cf1a1f2869f4b2f3ae58e306 | refs/heads/master | 2023-08-14T12:02:59.001215 | 2021-03-22T03:34:58 | 2021-03-22T03:34:58 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 452 | py | # Generated by Django 2.1.4 on 2019-12-27 21:09
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('interface', '0001_initial'),
]
operations = [
migrations.AddField(
model_name='prjcheck',
name='FPic',
field=models.ImageField(blank=True, default='', null=True, upload_to='itemcheckpic/', verbose_name='检查图片'),
),
]
| [
"11325818@qq.com"
] | 11325818@qq.com |
91ff036f6c97b605a72a9b5abf1bbfc31a53e774 | c18ba3fe85ae03f084bd2fef2eb4abf83ca41fd1 | /ptp/wikidata.py | 5c413f80db2e9600537f1a8c0a17f598a1d6d0f5 | [
"Apache-2.0"
] | permissive | MusaabKh/ProceedingsTitleParser | 9f68d11d02193f817a0db830f124de2a68a721e1 | b48832e9032e41785f61338f6ff2f5cac91aba0e | refs/heads/master | 2023-02-24T20:08:23.096893 | 2021-01-31T09:36:26 | 2021-01-31T09:36:26 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,035 | py | '''
Created on 2020-07-11
@author: wf
'''
import os
from ptp.titleparser import TitleParser
from ptp.event import EventManager, Event
class WikiData(object):
'''
WikiData proceedings titles event source
'''
defaultEndpoint="https://query.wikidata.org/sparql"
def __init__(self, config=None):
'''
Constructor
Args:
config(StorageConfig): the storage configuration to use
'''
self.em=EventManager('wikidata',url='https://www.wikidata.org/wiki/Wikidata:Main_Page',title='Wikidata',config=config)
self.debug=self.em.config.debug
self.profile=self.em.config.profile
path=os.path.dirname(__file__)
self.sampledir=path+"/../sampledata/"
self.sampleFilePath=self.sampledir+"proceedings-wikidata.txt"
def cacheEvents(self,limit=1000000,batchSize=500):
'''
initialize me from my sample file
Args:
limit(int): the maximum number of events to cache
batchSize(int): the batchSize to use
'''
tp=TitleParser.getDefault(self.em.name)
tp.fromFile(self.sampleFilePath, "wikidata")
tc,errs,result=tp.parseAll()
if self.debug:
print(tc)
print("%d errs %d titles" % (len(errs),len(result)))
for title in result:
if 'acronym' in title.metadata():
if self.debug:
print(title.metadata())
if 'eventId' in title.info:
event=Event()
event.fromTitle(title,self.debug)
event.eventId=event.eventId.replace("http://www.wikidata.org/entity/","")
event.url="%s" % (title.info['eventId'])
self.em.add(event)
self.em.store(limit=limit,batchSize=batchSize)
def initEventManager(self):
''' init my event manager '''
if not self.em.isCached():
self.cacheEvents()
else:
self.em.fromStore()
self.em.extractCheckedAcronyms()
| [
"wf@bitplan.com"
] | wf@bitplan.com |
3d9b3e3a8e24455f1d17ac7a5e2af516ed052473 | 5a52ccea88f90dd4f1acc2819997fce0dd5ffb7d | /alipay/aop/api/domain/AlipayOpenMiniWidgetDataSyncModel.py | 7686d4a08b1e579200406d13612fb354366b681e | [
"Apache-2.0"
] | permissive | alipay/alipay-sdk-python-all | 8bd20882852ffeb70a6e929038bf88ff1d1eff1c | 1fad300587c9e7e099747305ba9077d4cd7afde9 | refs/heads/master | 2023-08-27T21:35:01.778771 | 2023-08-23T07:12:26 | 2023-08-23T07:12:26 | 133,338,689 | 247 | 70 | Apache-2.0 | 2023-04-25T04:54:02 | 2018-05-14T09:40:54 | Python | UTF-8 | Python | false | false | 4,131 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import json
from alipay.aop.api.constant.ParamConstants import *
from alipay.aop.api.domain.WidgetActivityInfo import WidgetActivityInfo
from alipay.aop.api.domain.WidgetGoodsInfo import WidgetGoodsInfo
class AlipayOpenMiniWidgetDataSyncModel(object):
def __init__(self):
self._activity_list = None
self._data_type = None
self._goods_list = None
self._mini_app_id = None
self._pid = None
@property
def activity_list(self):
return self._activity_list
@activity_list.setter
def activity_list(self, value):
if isinstance(value, list):
self._activity_list = list()
for i in value:
if isinstance(i, WidgetActivityInfo):
self._activity_list.append(i)
else:
self._activity_list.append(WidgetActivityInfo.from_alipay_dict(i))
@property
def data_type(self):
return self._data_type
@data_type.setter
def data_type(self, value):
self._data_type = value
@property
def goods_list(self):
return self._goods_list
@goods_list.setter
def goods_list(self, value):
if isinstance(value, list):
self._goods_list = list()
for i in value:
if isinstance(i, WidgetGoodsInfo):
self._goods_list.append(i)
else:
self._goods_list.append(WidgetGoodsInfo.from_alipay_dict(i))
@property
def mini_app_id(self):
return self._mini_app_id
@mini_app_id.setter
def mini_app_id(self, value):
self._mini_app_id = value
@property
def pid(self):
return self._pid
@pid.setter
def pid(self, value):
self._pid = value
def to_alipay_dict(self):
params = dict()
if self.activity_list:
if isinstance(self.activity_list, list):
for i in range(0, len(self.activity_list)):
element = self.activity_list[i]
if hasattr(element, 'to_alipay_dict'):
self.activity_list[i] = element.to_alipay_dict()
if hasattr(self.activity_list, 'to_alipay_dict'):
params['activity_list'] = self.activity_list.to_alipay_dict()
else:
params['activity_list'] = self.activity_list
if self.data_type:
if hasattr(self.data_type, 'to_alipay_dict'):
params['data_type'] = self.data_type.to_alipay_dict()
else:
params['data_type'] = self.data_type
if self.goods_list:
if isinstance(self.goods_list, list):
for i in range(0, len(self.goods_list)):
element = self.goods_list[i]
if hasattr(element, 'to_alipay_dict'):
self.goods_list[i] = element.to_alipay_dict()
if hasattr(self.goods_list, 'to_alipay_dict'):
params['goods_list'] = self.goods_list.to_alipay_dict()
else:
params['goods_list'] = self.goods_list
if self.mini_app_id:
if hasattr(self.mini_app_id, 'to_alipay_dict'):
params['mini_app_id'] = self.mini_app_id.to_alipay_dict()
else:
params['mini_app_id'] = self.mini_app_id
if self.pid:
if hasattr(self.pid, 'to_alipay_dict'):
params['pid'] = self.pid.to_alipay_dict()
else:
params['pid'] = self.pid
return params
@staticmethod
def from_alipay_dict(d):
if not d:
return None
o = AlipayOpenMiniWidgetDataSyncModel()
if 'activity_list' in d:
o.activity_list = d['activity_list']
if 'data_type' in d:
o.data_type = d['data_type']
if 'goods_list' in d:
o.goods_list = d['goods_list']
if 'mini_app_id' in d:
o.mini_app_id = d['mini_app_id']
if 'pid' in d:
o.pid = d['pid']
return o
| [
"jishupei.jsp@alibaba-inc.com"
] | jishupei.jsp@alibaba-inc.com |
757fa10fa71cf83c4ddce1e7fe97f57729e28263 | ca7aa979e7059467e158830b76673f5b77a0f5a3 | /Python_codes/p02258/s174082799.py | ec4c268e53e6fd615b4b7d4c2cbd623cf2fc2291 | [] | no_license | Aasthaengg/IBMdataset | 7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901 | f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8 | refs/heads/main | 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 192 | py | n = int(input())
a = []
for i in range(n):
a.append(int(input()))
maxv = -(10**9)
mina = a[0]
for i in range(1,n):
maxv = max(maxv, a[i]-mina)
mina = min(mina, a[i])
print(maxv) | [
"66529651+Aastha2104@users.noreply.github.com"
] | 66529651+Aastha2104@users.noreply.github.com |
fce5b5f2ed42e0117e781fb4e217c76333570e72 | 8bda8911512f1c454f5e75ef36f3d828661b1479 | /math_03/test04.py | 825c4353a53aea3c85d7334e46178e3e6b4ea99a | [] | no_license | choijaehoon1/backjoon | 0f5909a1e1d416f8f431d6b986754af7eb6a3396 | a0411dba08c057a312733e38683246162256e61d | refs/heads/master | 2023-02-26T12:28:33.733297 | 2021-02-05T13:28:33 | 2021-02-05T13:28:33 | 280,430,716 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 153 | py | import math
a,b = map(int,input().split())
gcd = math.gcd(a,b)
tmp_a = a // gcd
tmp_b = b // gcd
result = tmp_a * tmp_b * gcd
print(gcd)
print(result)
| [
"wogns_20@naver.com"
] | wogns_20@naver.com |
a4965ebdbf2345026b3cf7f906d6d34263e5a778 | 2c74bb301f1ed83b79254944183ac5a18a639fdf | /homeassistant/components/modem_callerid/config_flow.py | 2bc857a16f43a79d68aca1bd9f922d9893d43e62 | [
"Apache-2.0"
] | permissive | Adminiuga/home-assistant | 5bec93007ddac1a268cc359bf7e48530c5f73b38 | dcf68d768e4f628d038f1fdd6e40bad713fbc222 | refs/heads/dev | 2023-02-22T22:03:31.013931 | 2022-11-09T00:27:20 | 2022-11-09T00:27:20 | 123,929,062 | 5 | 4 | Apache-2.0 | 2023-02-22T06:14:31 | 2018-03-05T14:11:09 | Python | UTF-8 | Python | false | false | 4,450 | py | """Config flow for Modem Caller ID integration."""
from __future__ import annotations
from typing import Any
from phone_modem import PhoneModem
import serial.tools.list_ports
from serial.tools.list_ports_common import ListPortInfo
import voluptuous as vol
from homeassistant import config_entries
from homeassistant.components import usb
from homeassistant.const import CONF_DEVICE, CONF_NAME
from homeassistant.data_entry_flow import FlowResult
from .const import DEFAULT_NAME, DOMAIN, EXCEPTIONS
DATA_SCHEMA = vol.Schema({"name": str, "device": str})
def _generate_unique_id(port: ListPortInfo) -> str:
"""Generate unique id from usb attributes."""
return f"{port.vid}:{port.pid}_{port.serial_number}_{port.manufacturer}_{port.description}"
class PhoneModemFlowHandler(config_entries.ConfigFlow, domain=DOMAIN):
"""Handle a config flow for Phone Modem."""
def __init__(self) -> None:
"""Set up flow instance."""
self._device: str | None = None
async def async_step_usb(self, discovery_info: usb.UsbServiceInfo) -> FlowResult:
"""Handle USB Discovery."""
device = discovery_info.device
dev_path = await self.hass.async_add_executor_job(usb.get_serial_by_id, device)
unique_id = f"{discovery_info.vid}:{discovery_info.pid}_{discovery_info.serial_number}_{discovery_info.manufacturer}_{discovery_info.description}"
if (
await self.validate_device_errors(dev_path=dev_path, unique_id=unique_id)
is None
):
self._device = dev_path
return await self.async_step_usb_confirm()
return self.async_abort(reason="cannot_connect")
async def async_step_usb_confirm(
self, user_input: dict[str, Any] | None = None
) -> FlowResult:
"""Handle USB Discovery confirmation."""
if user_input is not None:
return self.async_create_entry(
title=user_input.get(CONF_NAME, DEFAULT_NAME),
data={CONF_DEVICE: self._device},
)
self._set_confirm_only()
return self.async_show_form(step_id="usb_confirm")
async def async_step_user(
self, user_input: dict[str, Any] | None = None
) -> FlowResult:
"""Handle a flow initiated by the user."""
errors: dict[str, str] | None = {}
if self._async_in_progress():
return self.async_abort(reason="already_in_progress")
ports = await self.hass.async_add_executor_job(serial.tools.list_ports.comports)
existing_devices = [
entry.data[CONF_DEVICE] for entry in self._async_current_entries()
]
unused_ports = [
usb.human_readable_device_name(
port.device,
port.serial_number,
port.manufacturer,
port.description,
port.vid,
port.pid,
)
for port in ports
if port.device not in existing_devices
]
if not unused_ports:
return self.async_abort(reason="no_devices_found")
if user_input is not None:
port = ports[unused_ports.index(str(user_input.get(CONF_DEVICE)))]
dev_path = await self.hass.async_add_executor_job(
usb.get_serial_by_id, port.device
)
errors = await self.validate_device_errors(
dev_path=dev_path, unique_id=_generate_unique_id(port)
)
if errors is None:
return self.async_create_entry(
title=user_input.get(CONF_NAME, DEFAULT_NAME),
data={CONF_DEVICE: dev_path},
)
user_input = user_input or {}
schema = vol.Schema({vol.Required(CONF_DEVICE): vol.In(unused_ports)})
return self.async_show_form(step_id="user", data_schema=schema, errors=errors)
async def validate_device_errors(
self, dev_path: str, unique_id: str
) -> dict[str, str] | None:
"""Handle common flow input validation."""
self._async_abort_entries_match({CONF_DEVICE: dev_path})
await self.async_set_unique_id(unique_id)
self._abort_if_unique_id_configured(updates={CONF_DEVICE: dev_path})
try:
api = PhoneModem()
await api.test(dev_path)
except EXCEPTIONS:
return {"base": "cannot_connect"}
else:
return None
| [
"noreply@github.com"
] | Adminiuga.noreply@github.com |
9701179c2d1d86e8d46e81423ada9305f0e75887 | 657bad752fd4603b5c5e44a59aa6a2210d343bf0 | /huxiu/huxiu/test.py | bf7fcfcd9022935eef9a3881692284db3b55938d | [] | no_license | PurpleBreeze0322/web-scraping-cases | 0ed1a6f611289898004d07ef409d783c5ca25898 | 8b8c8c15671f2c0e7283d2e6428d44786478fede | refs/heads/master | 2021-05-22T13:57:40.849489 | 2018-09-30T14:25:15 | 2018-09-30T14:25:15 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 924 | py | from selenium.webdriver.common.by import By
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
from selenium import webdriver
headers = {
"Accept-Encoding":"gzip, deflate",
"Accept-Language":"zh-CN, zh;q=0.8",
"Connection":"keep-alive",
"Host":"img.titan007.com",
"Referer":"http://zq.win007.com/cn/CupMatch/2018/75.html",
"User-Agent":"Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/60.0.3112.101 Safari/537.36"
}
driver = webdriver.PhantomJS(executable_path=r'D:\03-CS\plantomJS\phantomjs-2.1.1-windows\bin\phantomjs')
driver.get('http://zq.win007.com/cn/CupMatch/75.html' )
try:
element = WebDriverWait(driver, 30).until(EC.presence_of_element_located((By.XPATH, '//*[@id="ScoreGroupDiv"][@style="display: block;"]')))
finally:
print(driver.page_source)
driver.close() | [
"wuqili1025@gmail.com"
] | wuqili1025@gmail.com |
591f1ad12a3d97363911cc7e996a2fa9768a008f | 61a5e9adfcd292dcf06fceda993c0fbeb0b469cc | /alg/leetcode/b40combinenum2.py | 5361ab60cff911ae885e89c6bedebf9e12615f9b | [] | no_license | godspysonyou/everything | f76e0ade065601990ff5b449f8c5955ba3c07374 | 03c7ec9d9e5a802ffbc854a9f929ca770ae7add8 | refs/heads/master | 2020-03-20T23:34:50.812645 | 2019-04-21T13:16:58 | 2019-04-21T13:16:58 | 137,853,372 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,081 | py | class Solution:
def __init__(self):
self.ret=[]
def DFS(self, candidates, target, start, valueslist):
length = len(candidates)
if target==0:
return self.ret.append(valueslist)
before = -1 # 设置before是为了避免这一层中有相同的初始, 如 1 1,将会产生,用 if i in list会更好
for i in range(start,length):
if target<candidates[i]:
return
if candidates[i]==before:
continue
before = candidates[i]
cantemp = candidates.copy()
cantemp.remove(candidates[i])
self.DFS(cantemp,target-candidates[i],i,valueslist+[candidates[i]])
def combinationSum2(self, candidates, target):
"""
:type candidates: List[int]
:type target: int
:rtype: List[List[int]]
"""
candidates.sort()
self.DFS(candidates, target, 0, [])
return self.ret
if __name__ == '__main__':
l = [1]
t = 1
s = Solution()
print(s.combinationSum2(l, t)) | [
"1344094556@qq.com"
] | 1344094556@qq.com |
809de22e39392cd22203c2ab663bdb9e6901856d | 6fcfb638fa725b6d21083ec54e3609fc1b287d9e | /python/jupyterhub_jupyterhub/jupyterhub-master/docs/source/conf.py | 83b19bab19ab399e13108106e184f1424fe934bd | [] | no_license | LiuFang816/SALSTM_py_data | 6db258e51858aeff14af38898fef715b46980ac1 | d494b3041069d377d6a7a9c296a14334f2fa5acc | refs/heads/master | 2022-12-25T06:39:52.222097 | 2019-12-12T08:49:07 | 2019-12-12T08:49:07 | 227,546,525 | 10 | 7 | null | 2022-12-19T02:53:01 | 2019-12-12T07:29:39 | Python | UTF-8 | Python | false | false | 5,011 | py | # -*- coding: utf-8 -*-
#
import sys
import os
import shlex
# For conversion from markdown to html
import recommonmark.parser
# Set paths
#sys.path.insert(0, os.path.abspath('.'))
# -- General configuration ------------------------------------------------
# Minimal Sphinx version
needs_sphinx = '1.4'
# Sphinx extension modules
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.intersphinx',
'sphinx.ext.napoleon',
'autodoc_traits',
]
templates_path = ['_templates']
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'JupyterHub'
copyright = u'2016, Project Jupyter team'
author = u'Project Jupyter team'
# Autopopulate version
from os.path import dirname
docs = dirname(dirname(__file__))
root = dirname(docs)
sys.path.insert(0, root)
sys.path.insert(0, os.path.join(docs, 'sphinxext'))
import jupyterhub
# The short X.Y version.
version = '%i.%i' % jupyterhub.version_info[:2]
# The full version, including alpha/beta/rc tags.
release = jupyterhub.__version__
language = None
exclude_patterns = []
pygments_style = 'sphinx'
todo_include_todos = False
# Set the default role so we can use `foo` instead of ``foo``
default_role = 'literal'
# -- Source -------------------------------------------------------------
source_parsers = {
'.md': 'recommonmark.parser.CommonMarkParser',
}
source_suffix = ['.rst', '.md']
#source_encoding = 'utf-8-sig'
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages.
html_theme = 'sphinx_rtd_theme'
#html_theme_options = {}
#html_theme_path = []
#html_title = None
#html_short_title = None
#html_logo = None
#html_favicon = None
# Paths that contain custom static files (such as style sheets)
html_static_path = ['_static']
#html_extra_path = []
#html_last_updated_fmt = '%b %d, %Y'
#html_use_smartypants = True
#html_sidebars = {}
#html_additional_pages = {}
#html_domain_indices = True
#html_use_index = True
#html_split_index = False
#html_show_sourcelink = True
#html_show_sphinx = True
#html_show_copyright = True
#html_use_opensearch = ''
#html_file_suffix = None
#html_search_language = 'en'
#html_search_options = {'type': 'default'}
#html_search_scorer = 'scorer.js'
htmlhelp_basename = 'JupyterHubdoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
#'papersize': 'letterpaper',
#'pointsize': '10pt',
#'preamble': '',
#'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'JupyterHub.tex', u'JupyterHub Documentation',
u'Project Jupyter team', 'manual'),
]
#latex_logo = None
#latex_use_parts = False
#latex_show_pagerefs = False
#latex_show_urls = False
#latex_appendices = []
#latex_domain_indices = True
# -- manual page output -------------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'jupyterhub', u'JupyterHub Documentation',
[author], 1)
]
#man_show_urls = False
# -- Texinfo output -----------------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'JupyterHub', u'JupyterHub Documentation',
author, 'JupyterHub', 'One line description of project.',
'Miscellaneous'),
]
#texinfo_appendices = []
#texinfo_domain_indices = True
#texinfo_show_urls = 'footnote'
#texinfo_no_detailmenu = False
# -- Epub output --------------------------------------------------------
# Bibliographic Dublin Core info.
epub_title = project
epub_author = author
epub_publisher = author
epub_copyright = copyright
# A list of files that should not be packed into the epub file.
epub_exclude_files = ['search.html']
# -- Intersphinx ----------------------------------------------------------
intersphinx_mapping = {'https://docs.python.org/': None}
# -- Read The Docs --------------------------------------------------------
on_rtd = os.environ.get('READTHEDOCS', None) == 'True'
if not on_rtd:
# only import and set the theme if we're building docs locally
import sphinx_rtd_theme
html_theme = 'sphinx_rtd_theme'
html_theme_path = [sphinx_rtd_theme.get_html_theme_path()]
else:
# readthedocs.org uses their theme by default, so no need to specify it
# build rest-api, since RTD doesn't run make
from subprocess import check_call as sh
sh(['make', 'rest-api'], cwd=docs)
# -- Spell checking -------------------------------------------------------
try:
import sphinxcontrib.spelling
except ImportError:
pass
else:
extensions.append("sphinxcontrib.spelling")
spelling_word_list_filename='spelling_wordlist.txt'
| [
"659338505@qq.com"
] | 659338505@qq.com |
85550704eeeeb7250fd6c0029ac51c44c29bfa89 | 3d0f61f8bf2ad1ce503022c1c4c9ebe566d6040b | /detectron/utils/io.py | 0cb5e22c3c889728b88623f82840936dc5562504 | [
"MIT",
"Apache-2.0"
] | permissive | programmerjide/Detectron | 3410a812de62fc905860b3afde00f62c68a11c94 | da2441cd3a3d08f9aa1e51b0d05370bdc94bf98e | refs/heads/master | 2020-03-30T18:54:25.149074 | 2019-01-29T08:47:37 | 2019-01-29T08:47:37 | 151,519,747 | 0 | 0 | Apache-2.0 | 2018-10-04T04:45:54 | 2018-10-04T04:45:54 | null | UTF-8 | Python | false | false | 5,864 | py | # Copyright (c) 2017-present, Facebook, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
##############################################################################
"""IO utilities."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import hashlib
import logging
import os
import re
import six
import sys
from six.moves import cPickle as pickle
from six.moves import urllib
logger = logging.getLogger(__name__)
_DETECTRON_S3_BASE_URL = 'https://s3-us-west-2.amazonaws.com/detectron'
def save_object(obj, file_name, pickle_format=2):
"""Save a Python object by pickling it.
Unless specifically overridden, we want to save it in Pickle format=2 since this
will allow other Python2 executables to load the resulting Pickle. When we want
to completely remove Python2 backward-compatibility, we can bump it up to 3. We
should never use pickle.HIGHEST_PROTOCOL as far as possible if the resulting
file is manifested or used, external to the system.
"""
file_name = os.path.abspath(file_name)
with open(file_name, 'wb') as f:
pickle.dump(obj, f, pickle_format)
def load_object(file_name):
with open(file_name, 'rb') as f:
# The default encoding used while unpickling is 7-bit (ASCII.) However,
# the blobs are arbitrary 8-bit bytes which don't agree. The absolute
# correct way to do this is to use `encoding="bytes"` and then interpret
# the blob names either as ASCII, or better, as unicode utf-8. A
# reasonable fix, however, is to treat it the encoding as 8-bit latin1
# (which agrees with the first 256 characters of Unicode anyway.)
if six.PY2:
return pickle.load(f)
else:
return pickle.load(f, encoding='latin1')
def cache_url(url_or_file, cache_dir):
"""Download the file specified by the URL to the cache_dir and return the
path to the cached file. If the argument is not a URL, simply return it as
is.
"""
is_url = re.match(
r'^(?:http)s?://', url_or_file, re.IGNORECASE
) is not None
if not is_url:
return url_or_file
url = url_or_file
assert url.startswith(_DETECTRON_S3_BASE_URL), \
('Detectron only automatically caches URLs in the Detectron S3 '
'bucket: {}').format(_DETECTRON_S3_BASE_URL)
cache_file_path = url.replace(_DETECTRON_S3_BASE_URL, cache_dir)
if os.path.exists(cache_file_path):
assert_cache_file_is_ok(url, cache_file_path)
return cache_file_path
cache_file_dir = os.path.dirname(cache_file_path)
if not os.path.exists(cache_file_dir):
os.makedirs(cache_file_dir)
logger.info('Downloading remote file {} to {}'.format(url, cache_file_path))
download_url(url, cache_file_path)
assert_cache_file_is_ok(url, cache_file_path)
return cache_file_path
def assert_cache_file_is_ok(url, file_path):
"""Check that cache file has the correct hash."""
# File is already in the cache, verify that the md5sum matches and
# return local path
cache_file_md5sum = _get_file_md5sum(file_path)
ref_md5sum = _get_reference_md5sum(url)
assert cache_file_md5sum == ref_md5sum, \
('Target URL {} appears to be downloaded to the local cache file '
'{}, but the md5 hash of the local file does not match the '
'reference (actual: {} vs. expected: {}). You may wish to delete '
'the cached file and try again to trigger automatic '
'download.').format(url, file_path, cache_file_md5sum, ref_md5sum)
def _progress_bar(count, total):
"""Report download progress.
Credit:
https://stackoverflow.com/questions/3173320/text-progress-bar-in-the-console/27871113
"""
bar_len = 60
filled_len = int(round(bar_len * count / float(total)))
percents = round(100.0 * count / float(total), 1)
bar = '=' * filled_len + '-' * (bar_len - filled_len)
sys.stdout.write(
' [{}] {}% of {:.1f}MB file \r'.
format(bar, percents, total / 1024 / 1024)
)
sys.stdout.flush()
if count >= total:
sys.stdout.write('\n')
def download_url(
url, dst_file_path, chunk_size=8192, progress_hook=_progress_bar
):
"""Download url and write it to dst_file_path.
Credit:
https://stackoverflow.com/questions/2028517/python-urllib2-progress-hook
"""
response = urllib.request.urlopen(url)
total_size = response.info().getheader('Content-Length').strip()
total_size = int(total_size)
bytes_so_far = 0
with open(dst_file_path, 'wb') as f:
while 1:
chunk = response.read(chunk_size)
bytes_so_far += len(chunk)
if not chunk:
break
if progress_hook:
progress_hook(bytes_so_far, total_size)
f.write(chunk)
return bytes_so_far
def _get_file_md5sum(file_name):
"""Compute the md5 hash of a file."""
hash_obj = hashlib.md5()
with open(file_name, 'r') as f:
hash_obj.update(f.read())
return hash_obj.hexdigest()
def _get_reference_md5sum(url):
"""By convention the md5 hash for url is stored in url + '.md5sum'."""
url_md5sum = url + '.md5sum'
md5sum = urllib.request.urlopen(url_md5sum).read().strip()
return md5sum
| [
"facebook-github-bot@users.noreply.github.com"
] | facebook-github-bot@users.noreply.github.com |
65e9bfc51a6d4ab7499c2b0d6f71f5725e351419 | 8fe440deb4eb66d2fcb222a7c43680dc516394c1 | /src/api/bkuser_core/tests/categories/plugins/custom/test_client.py | 4c5bf6bce48cd7f37c39efcb6734fed68794bd8c | [
"MIT"
] | permissive | robert871126/bk-user | 780e163db76a8a997ed94a1a83389fa4f81ad6a4 | 8c633e0a3821beb839ed120c4514c5733e675862 | refs/heads/master | 2023-08-20T11:05:46.317044 | 2021-10-22T08:44:06 | 2021-10-22T08:44:06 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,217 | py | # -*- coding: utf-8 -*-
"""
TencentBlueKing is pleased to support the open source community by making 蓝鲸智云-用户管理(Bk-User) available.
Copyright (C) 2017-2021 THL A29 Limited, a Tencent company. All rights reserved.
Licensed under the MIT License (the "License"); you may not use this file except in compliance with the License.
You may obtain a copy of the License at http://opensource.org/licenses/MIT
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
specific language governing permissions and limitations under the License.
"""
import json
from unittest.mock import patch
import pytest
from bkuser_core.categories.plugins.custom.client import CustomDataClient
from bkuser_core.categories.plugins.custom.exceptions import CustomAPIRequestFailed
from bkuser_core.categories.plugins.custom.models import CustomTypeList
from requests import Request, Response
pytestmark = pytest.mark.django_db
class TestClient:
@staticmethod
def make_resp(content: list, status_code: int = 200) -> Response:
response = Response()
response._content = str.encode(json.dumps({"count": len(content), "results": content})) # type: ignore
response.status_code = status_code
fake_req = Request(method="GET", json={}, url="")
fake_req.body = None # type: ignore
response.request = fake_req # type: ignore
return response
@pytest.fixture
def client(self, test_custom_category):
c = CustomDataClient(
api_host="test.com",
category_id=test_custom_category.id,
paths={"profile": "some-path", "department": "some-path"},
)
return c
@pytest.mark.parametrize(
"fake_profiles,expected",
[
(
[
{
"username": "fake-user",
"email": "fake@test.com",
"code": "code-1",
"display_name": "fakeman",
"telephone": "13111123445",
"leaders": [],
"departments": [],
"extras": {"aaa": "xxxx", "bbb": "qqqq", "uniquetest": "vvvv"},
"position": 0,
},
{
"username": "fake-user-2",
"email": "fake2@test.com",
"code": "code-2",
"display_name": "fakeman2",
"telephone": "13111123445",
"leaders": ["code-1"],
"departments": [],
"extras": {"aaa": "xxxx", "bbb": "qqqq", "uniquetest": "vvvv"},
"position": 0,
},
],
{"code-1", "code-2"},
),
],
)
def test_fetch_profiles(self, client, fake_profiles, expected):
with patch("requests.get") as mocked_get:
mocked_get.return_value = self.make_resp(fake_profiles)
r = client.fetch_profiles()
assert isinstance(r, CustomTypeList)
assert len(r.values) == len(fake_profiles)
assert set(r.items_map.keys()) == expected
@pytest.mark.parametrize(
"fake_profiles,expected",
[
(
[
{
"username": "fake-user",
"email": "fake@test.com",
"code": "code-1",
"extras": {"aaa": "xxxx", "bbb": "qqqq", "uniquetest": "vvvv"},
"position": 0,
}
],
TypeError,
),
],
)
def test_fetch_wrong_profiles(self, client, fake_profiles, expected):
with patch("requests.get") as mocked_get:
mocked_get.return_value = self.make_resp(fake_profiles)
with pytest.raises(expected):
client.fetch_profiles()
@pytest.mark.parametrize(
"fake_departments,expected",
[
(
[
{"name": "测试部门", "code": "dep1", "parent": None},
{"name": "测试部门2", "code": "dep2", "parent": "dep1"},
],
{"dep1", "dep2"},
),
],
)
def test_fetch_departments(self, client, fake_departments, expected):
with patch("requests.get") as mocked_get:
mocked_get.return_value = self.make_resp(fake_departments)
r = client.fetch_departments()
assert isinstance(r, CustomTypeList)
assert len(r.values) == len(fake_departments)
assert set(r.items_map.keys()) == expected
def test_fetch_exception(self, client):
with patch("requests.get") as mocked_get:
mocked_get.return_value = self.make_resp([], 400)
with pytest.raises(CustomAPIRequestFailed):
client.fetch_departments()
| [
"bluesedenyu@gmail.com"
] | bluesedenyu@gmail.com |
50a1c197aa182e2136aec1c04b8b9ee483b8ca09 | f9a8ee37334771f37edda863db08a7dcccc9522f | /AtCoder/Practice/茶緑埋め/ARC016B.py | 751d89f85482bc23091e3ca78e6dbd24952fce1a | [] | no_license | shimmee/competitive-programming | 25b008ee225858b7b208c3f3ca7681e33f6c0190 | 894f0b7d557d6997789af3fcf91fe65a33619080 | refs/heads/master | 2023-06-07T13:07:17.850769 | 2021-07-05T17:20:47 | 2021-07-05T17:20:47 | 331,076,780 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,205 | py | # ARC016B - 音楽ゲーム
# URL: https://atcoder.jp/contests/arc016/tasks/arc016_2
# Date: 2021/02/18
# ---------- Ideas ----------
# 90度回転させてgroupbyする
# key=='x'であれば連続する長さをインクリメント
# key == 'o'であれば+1インクリメント
# ------------------- Answer --------------------
#code:python
from itertools import groupby
def rotated(array_2d):
list_of_tuples = zip(*array_2d[::-1])
return [list(elem) for elem in list_of_tuples]
n = int(input())
a = [input() for _ in range(n)]
a = rotated(a)
ans = 0
for l in a:
gr = groupby(l)
for key, group in gr:
if key == 'x':
ans += len(list(group))
elif key == 'o':
ans += 1
print(ans)
# ACしたけど解説が違う解き方してた!
# 列数と同じ長さの配列を用意して,直前(上)がoだったらTrueにして,カウントしない
n = int(input())
X = [input() for _ in range(n)]
flag = [False]*9
ans = 0
for i in range(n):
for j in range(9):
s = X[i][j]
if s == 'o':
if not flag[j]:
ans += 1
flag[j] = True
else:
flag[j] = False
if s == 'x': ans += 1
print(ans)
# もっと簡単にかける。直前(上)がoかどうかを毎回調べればいい
n = int(input())
S = ['.'*9] + [input() for _ in range(n)]
ans = 0
for y in range(1, n+1):
for x in range(9):
if S[y][x] == 'x':
ans += 1
elif S[y][x] == 'o' and S[y-1][x] != 'o':
ans += 1
print(ans)
# ------------------ Sample Input -------------------
6
..o..x.o.
..o..x.o.
..x..o.o.
..o..o.o.
..o..x.o.
..o..x.o.
15
.........
.x.......
.........
...x.....
.........
.......o.
.......o.
.......o.
.........
..x.....o
........o
........o
....x...o
.x......o
........o
# ----------------- Length of time ------------------
# 7分
# -------------- Editorial / my impression -------------
# https://www.slideshare.net/chokudai/atcoder-regular-contest-016
# 解説にある方法2つとも試してみた。
# 結局普通に全探索するのが一番早い
# ----------------- Category ------------------
#AtCoder
#全探索
#ARC-B | [
"shinmeikeita@gmail.com"
] | shinmeikeita@gmail.com |
87cbbb361b99a7e07ef4e3ae17d6ca347ce8425f | 5e277a32c166ae45bea28310074dc459a0d99cf6 | /.metadata/.plugins/org.eclipse.core.resources/.history/ba/50143292859b00161299af2c090fd9cc | 9158d155646d3e761ac70b1bf19b0f6db5aa03ea | [] | no_license | vgvcode/pos | 4d7172d7905f60157fcae445c650475d17a9a390 | a9dba2c5c3fc8c4529c6619a3dc92c9608a4c70d | refs/heads/master | 2021-01-13T13:12:37.833510 | 2016-11-02T22:28:42 | 2016-11-02T22:28:42 | 72,686,364 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,974 | #!/usr/bin/python
from __future__ import print_function # Python 2/3 compatibility
import json
from decimal import *
import time
import uuid
import boto3
from copy import deepcopy
from boto3.dynamodb.conditions import Key
import commonmodule
# Helper class to convert a DynamoDB item to JSON.
class DecimalEncoder(json.JSONEncoder):
def default(self, o):
if isinstance(o, Decimal):
if o % 1 > 0:
return float(o)
else:
return int(o)
return super(DecimalEncoder, self).default(o)
class CatalogSchema:
'base class for DDL operations on catalog'
tableName = 'catalog'
def __init(self, endpoint = "http://localhost:8000"):
self.__endpoint = endpoint
self.__dynamodb = boto3.resource('dynamodb', endpoint_url=endpoint)
def createTable(self):
'create a new catalog'
result = True
try:
self.__table = self.__dynamodb.create_table(
TableName=CatalogSchema.tableName,
KeySchema=[
{
'AttributeName': 'CatalogID',
'KeyType': 'HASH' #Partition key
},
{
'AttributeName': 'ItemID',
'KeyType': 'RANGE' #Sort key
}
],
AttributeDefinitions=[
{
'AttributeName': 'CatalogID',
'AttributeType': 'S'
},
{
'AttributeName': 'ItemID',
'AttributeType': 'S'
},
],
ProvisionedThroughput={
'ReadCapacityUnits': 10,
'WriteCapacityUnits': 10
}
)
except Exception as e:
print(e.response['Error']['Message'])
result = False
finally:
return result
def deleteTable(self):
result = True
try:
self.__table.delete()
except Exception as e:
print(e.response['Error']['Message'])
result = False
finally:
return result
class Catalog:
'common base class for catalog'
tableName = 'catalog'
def __init__(self, catalogId, endpoint = "http://localhost:8000"):
self.__endpoint = endpoint
self.__dynamodb = boto3.resource('dynamodb', endpoint_url=endpoint)
self.__table = self.__dynamodb.Table(Catalog.tableName)
self.__catalogId = catalogId
self.__items = {}
self.fetchFromDB()
def get(self):
return {
"catalogId" : self.__catalogId,
"endpoint" : self.__endpoint,
"tableName" : Catalog.tableName,
"items" : self.__items
}
def load(self, fileName):
result = True
try:
'load the catalog from a json file'
with open(fileName) as json_file:
catalog = json.load(json_file, parse_float = Decimal)
for catItem in catalog:
CatalogID = catItem['CatalogID']
ItemID = catItem['ItemID']
Info = catItem['Info']
print("Adding item:", CatalogID, ItemID, Info)
self.__table.put_item(
Item={
'CatalogID': CatalogID,
'ItemID': ItemID,
'Info': Info
}
)
except Exception as e:
print(e.response['Error']['Message'])
result = False
finally:
return result
def fetchFromDB(self):
'fetch the catalog'
'first clear the existing catalog'
result = True
self.__items = {}
try:
response = self.__table.query(KeyConditionExpression=Key('CatalogID').eq(self.__catalogId))
for r in response['Items']:
self.__items[r['ItemID']] = r['Info']
except Exception as e:
print(e.response['Error']['Message'])
result = False
finally:
return result
def addItem(self, idy, name, price):
'add item to the catalog'
result = True
print('Adding to DB: {}, {}, {}'.format(idy, name, price))
createdTicks = time.time()
createdTime = time.asctime(time.localtime(createdTicks))
info = {
'ItemId': idy,
'CreatedTime': createdTime,
'CreatedTicks': Decimal(createdTicks),
'UpdatedTime': "0",
'UpdatedTicks': Decimal(0),
'Name': name,
'Price': commonmodule.money(price)
}
try:
response = self.__table.put_item(
Item={
'CatalogID': self.__catalogId,
'ItemID': idy,
'Info': info
}
)
'add the item to the catalog in memory'
self.__items[idy] = info
#print("PutItem succeeded:")
#print(json.dumps(response, indent=4, cls=DecimalEncoder))
except Exception as e:
print(e.response['Error']['Message'])
result = False
finally:
return result
def getItems(self):
return self.__items
def updatePrice(self, itemId, price):
'update item price'
result = True
updatedTicks = time.time()
updatedTime = time.asctime(time.localtime(updatedTicks))
try:
response = self.__table.update_item(
Key={
'CatalogID': self.__catalogId,
'ItemID': itemId,
},
UpdateExpression="set Info.Price = :p, Info.UpdatedTime = :u, Info.UpdatedTicks = :t",
ExpressionAttributeValues={
':p': commonmodule.money(price),
':u': updatedTime,
':t': Decimal(updatedTicks),
},
ReturnValues="UPDATED_NEW"
)
print("Item updated")
result = True
#print("UpdateItem succeeded:")
#print(json.dumps(response, indent=4, cls=DecimalEncoder))
'update the item in the catalog in memory'
self.__items[itemId]['Price'] = commonmodule.money(price)
self.__items[itemId]['UpdatedTime'] = updatedTime
self.__items[itemId]['UpdatedTicks'] = updatedTicks
except Exception as e:
print(e.response['Error']['Message'])
result = False
finally:
return result
def removeItem(self, itemId):
'remove item from catalog'
result = False
try:
response = self.__table.delete_item(
Key={
'CatalogID': self.__catalogId,
'ItemID': itemId,
},
)
'remove the item from the catalog in memory'
del self.__items[itemId]
#print("DeleteItem succeeded:")
#print(json.dumps(response, indent=4, cls=DecimalEncoder))
except Exception as e:
print(e.response['Error']['Message'])
finally:
return result
def print(self):
for itm in self.__items:
print('{}: {}'.format(itm, self.__items[itm]))
print('There are {} items in the catalog'.format(len(self.__items)))
| [
"vgvcode@gmail.com"
] | vgvcode@gmail.com | |
ead246f8b3bc4b82a6e3c6fcbc15adda7e8c9394 | 2148b047f6b0e8c3182aae438d745cf35234220c | /config/eval.py | 6b77931a633704949ff5f9f810166b62f52199b4 | [] | no_license | xzk-seu/xzk_thesis_code | d12845fdbed38893ac66aec876ed8933c5a7a2a1 | abfebca5315725d28346ddc362abe1fef73097b9 | refs/heads/master | 2023-04-11T19:36:16.556585 | 2021-05-07T12:18:18 | 2021-05-07T12:18:18 | 363,370,982 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,078 | py | from typing import List
from common import Instance
import torch
import numpy as np
class Span:
"""
A class of `Span` where we use it during evaluation.
We construct spans for the convenience of evaluation.
"""
def __init__(self, left: int, right: int, type: str):
"""
A span compose of left, right (inclusive) and its entity label.
:param left:
:param right: inclusive.
:param type:
"""
self.left = left
self.right = right
self.type = type
def __eq__(self, other):
return self.left == other.left and self.right == other.right and self.type == other.type
def __hash__(self):
return hash((self.left, self.right, self.type))
def evaluate_batch_insts(batch_insts: List[Instance],
batch_pred_ids: torch.LongTensor,
batch_gold_ids: torch.LongTensor,
word_seq_lens: torch.LongTensor,
idx2label: List[str],
use_crf_layer: bool = True) -> np.ndarray:
"""
:return: numpy array containing (number of true positive, number of all positive, number of true positive + number of false negative)
You can also refer as (number of correctly predicted entities, number of entities predicted, number of entities in the dataset)
"""
p = 0
total_entity = 0
total_predict = 0
word_seq_lens = word_seq_lens.tolist()
for idx in range(len(batch_pred_ids)):
length = word_seq_lens[idx]
output = batch_gold_ids[idx][:length].tolist()
prediction = batch_pred_ids[idx][:length].tolist()
prediction = prediction[::-1] if use_crf_layer else prediction
output = [idx2label[l] for l in output]
prediction =[idx2label[l] for l in prediction]
batch_insts[idx].prediction = prediction
#convert to span
output_spans = set()
start = -1
# for i in range(len(output)):
# if output[i].startswith("B-"):
# start = i
# if output[i].startswith("E-"):
# end = i
# output_spans.add(Span(start, end, output[i][2:]))
# if output[i].startswith("S-"):
# output_spans.add(Span(i, i, output[i][2:]))
# predict_spans = set()
# for i in range(len(prediction)):
# if prediction[i].startswith("B-"):
# start = i
# if prediction[i].startswith("E-"):
# end = i
# predict_spans.add(Span(start, end, prediction[i][2:]))
# if prediction[i].startswith("S-"):
# predict_spans.add(Span(i, i, prediction[i][2:]))
for i in range(len(output)):
if output[i].startswith("B-"):
start = i
if output[i].startswith("E-"):
end = i
output_spans.add(Span(start, end, output[i][2:]))
if output[i].startswith("S-"):
output_spans.add(Span(i, i, output[i][2:]))
predict_spans = set()
for i in range(len(prediction)):
if prediction[i].startswith("B-"):
start = i
if prediction[i].startswith("E-"):
end = i
predict_spans.add(Span(start, end, prediction[i][2:]))
if prediction[i].startswith("S-"):
predict_spans.add(Span(i, i, prediction[i][2:]))
total_entity += len(output_spans)
total_predict += len(predict_spans)
p += len(predict_spans.intersection(output_spans))
# In case you need the following code for calculating the p/r/f in a batch.
# (When your batch is the complete dataset)
# precision = p * 1.0 / total_predict * 100 if total_predict != 0 else 0
# recall = p * 1.0 / total_entity * 100 if total_entity != 0 else 0
# fscore = 2.0 * precision * recall / (precision + recall) if precision != 0 or recall != 0 else 0
return np.asarray([p, total_predict, total_entity], dtype=int)
| [
"1399350807@qq.com"
] | 1399350807@qq.com |
02d5599e8cefba6aba67e00d744ef474a2d137de | b15a9d9c7374c4a1fa5ec3ef63603a8c57e8681f | /Design-Patterns-Python/facade/game_api.py | c049520bdfdac8376257014495a620024530efab | [] | no_license | gohils/zemr_notebook | 3f7490ef7a2559655746c3e2e0dbfb835a83891e | 00d53cea9970df44160c51e6ad2bdeadfae2c91f | refs/heads/master | 2023-08-04T14:32:35.428016 | 2023-07-20T11:51:08 | 2023-07-20T11:51:08 | 222,027,451 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,207 | py | "The Game API facade"
from decimal import Decimal
from users import Users
from wallets import Wallets
from game_engine import GameEngine
from reports import Reports
class GameAPI():
"The Game API facade"
@staticmethod
def get_balance(user_id: str) -> Decimal:
"Get a players balance"
return Wallets.get_balance(user_id)
@staticmethod
def game_state() -> dict:
"Get the current game state"
return GameEngine().get_game_state()
@staticmethod
def get_history() -> dict:
"get the game history"
return Reports.get_history()
@staticmethod
def change_pwd(user_id: str, password: str) -> bool:
"change users password"
return Users.change_pwd(user_id, password)
@staticmethod
def submit_entry(user_id: str, entry: Decimal) -> bool:
"submit a bet"
return GameEngine().submit_entry(user_id, entry)
@staticmethod
def register_user(value: dict[str, str]) -> str: # Python 3.9
# def register_user(value) -> str: # Python 3.8 and earlier
"register a new user and returns the new id"
return Users.register_user(value)
| [
"noreply@github.com"
] | gohils.noreply@github.com |
996b44812bd5d0977998f519cc46389a487cbfff | f54fdbb8301f54dda8551bb811e864d3a81da6de | /Python/심화_클래스활용_실습코드/Advanced_OOP_6.py | 92141b2e98259d343ecd114a4d1b92c725d51320 | [] | no_license | Jerrykim91/Daily | 0533afe1956ca5cc88f7d69f7810b489240e70e6 | 30f738fc9728b7501bf5601e17189f47c13aaec9 | refs/heads/master | 2021-07-24T23:18:09.686269 | 2020-10-17T12:07:57 | 2020-10-17T12:07:57 | 226,638,343 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,781 | py |
# 출처 : README.md 참조
"""
클래스 메소드의 사용법 +
# 리턴하는 기능을 가진 스태틱 메소드
"""
class Person(object):
# my_class_var = 'sanghee'
def __init__(self, year, month, day, sex):
self.year = year
self.month = month
self.day = day
self.sex = sex
def __str__(self):
return '{}년 {}월 {}일생 {}'.format(self.year, self.month, self.day, self.sex)
@classmethod
def ssnConstructor(cls, ssn):
front, back = ssn.split('-')
sex = back[0]
if sex == '1' or sex == '2':
year = '19' + front[:2]
else :
year = '20' + front[:2]
if (int(sex)%2) == 0 :
sex = '여성'
else :
sex = '남성'
month = front[2:4]
day = front[4:6]
return cls(year, month, day, sex)
@staticmethod
def isWorkDay(day):
"""
근무 여부를 리턴하는 기능을 가진 스태틱 메소드
월: 0, 화: 1, 수: 2, 목: 3, 금: 4, 토: 5, 일: 6
"""
if day.weekday() == 5 or day.weekday() == 6 :
return False
return True
ssn_1 = '900829-1000006'
ssn_2 = '951224-2000069'
ssn_3 = '201214-4000069'
Jun = Person.ssnConstructor(ssn_1)
Jain = Person.ssnConstructor(ssn_2)
Rose = Person.ssnConstructor(ssn_3)
print(Jun)
print(Jain)
print(Rose)
import datetime
myDate = datetime.date(2020, 6, 21)
yourDate = datetime.date(2020, 6, 22)
print(Person.isWorkDay(myDate)) # 클래스를 통하여 스태틱 메소드 호출
print(Rose.isWorkDay(myDate)) # 인스턴스를 통하여 스태틱 메소드 호출
print('='*25)
print(Person.isWorkDay(yourDate))
print(Rose.isWorkDay(yourDate)) | [
"sun4131@gmail.com"
] | sun4131@gmail.com |
ddc15468630f01ae1dab3138722888d249e76ae0 | 7548c8efccb43b1d8daec719bd7d8ad4a4d03630 | /Wildcard Matching/Leetcode_44.py | 7999629e5a083344bd36c21f952567b1a3033e3d | [] | no_license | arw2019/AlgorithmsDataStructures | fdb2d462ded327857d72245721d3c9677ba1617b | 9164c21ab011c90944f844e3c359093ce6180223 | refs/heads/master | 2023-02-17T11:50:07.418705 | 2021-01-19T19:37:17 | 2021-01-19T19:37:17 | 204,222,584 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,764 | py | # dynamic program
# O(m*n) runtime, O(n) space
class Solution:
def isMatch(self, s: str, p: str) -> bool:
m, n = len(s), len(p)
cur = [True] + [False]*(m)
print(cur)
for j in range(1, n+1):
pre = cur[0]
cur[0] = cur[0] and (p[j-1] == '*')
for i in range(1, m+1):
if p[j-1] != '*':
pre, cur[i] = cur[i], pre and (s[i-1]==p[j-1] or p[j-1]=='?')
else:
pre, cur[i] = cur[i], cur[i-1] or cur[i]
return cur[m]
# dynamic program
# o(m*n) runtime, o(m*n) space
class Solution:
def isMatch(self, s: str, p: str) -> bool:
m, n = len(s), len(p)
dp = [[False]*(n+1) for _ in range(m+1)]
dp[0][0] = True
for j in range(1, n+1):
dp[0][j] = p[j-1] == '*' and dp[0][j-1]
for j in range(1, n+1):
for i in range(1, m+1):
if p[j-1] != '*':
if p[j-1] in ('?', s[i-1]):
dp[i][j] = dp[i-1][j-1]
else:
dp[i][j] = dp[i][j-1] or dp[i-1][j]
return dp[-1][-1]
# finite-state machine solution
class Solution:
def isMatch(self, s: str, p: str) -> bool:
transfer = {}
state = 0
for char in p:
if char == '*':
transfer[state, char] = state
else:
transfer[state, char] = state + 1
state += 1
print(transfer)
accept = state
state = {0}
for char in s:
state = {transfer.get((at, token)) for at in state for token in (char, '*', '?')}
return accept in state
| [
"noreply@github.com"
] | arw2019.noreply@github.com |
820a5e2454cddcd8cb18cb9f6630a71d464f709e | f55e27646398ccf20b99d4bf1afdb4cdf1b2f30d | /app/role/models.py | 3501c6f3594509c1dd44d4e2227a3b3da7cd12a1 | [] | no_license | sartim/flask_shop_api | 1f8b96ccdf6bae5b080fa0ff29116f2cbec14c4b | 07b6b4460593ce736866ead6e5f3682b16fc5316 | refs/heads/master | 2023-08-02T23:40:25.897464 | 2023-05-20T06:12:38 | 2023-05-20T06:12:38 | 176,507,271 | 2 | 4 | null | 2023-07-25T22:18:45 | 2019-03-19T12:28:44 | Python | UTF-8 | Python | false | false | 1,473 | py | from app import db
from sqlalchemy import text
from sqlalchemy.dialects.postgresql import UUID
from app.core.base_model import BaseModel, AbstractBaseModel
class Role(BaseModel):
__tablename__ = 'role'
id = db.Column(
UUID(as_uuid=True), primary_key=True,
server_default=text("uuid_generate_v4()")
)
name = db.Column(db.String(255), unique=True, nullable=False)
description = db.Column(db.Text)
deleted = db.Column(db.Boolean, default=False)
permissions = db.relationship("RolePermission", lazy=False)
def __init__(self, id=None, name=None, description=None, deleted=None):
self.id = id
self.name = name
self.description = description
self.deleted = deleted
def __repr__(self):
return "<%r (%r)>" % (self.__class__.__name__, self.name)
class RolePermission(AbstractBaseModel):
__tablename__ = 'role_permission'
role_id = db.Column(UUID(as_uuid=True), db.ForeignKey('role.id'), primary_key=True)
permission_id = db.Column(
UUID(as_uuid=True), db.ForeignKey('permission.id'), primary_key=True
)
role = db.relationship('Role', lazy=False, overlaps="permissions")
permission = db.relationship('Permission', lazy=False)
def __init__(self, role_id=None, permission_id=None):
self.role_id = role_id
self.permission_id = permission_id
def __repr__(self):
return "<%r (%r)>" % (self.__class__.__name__, self.name)
| [
"sarrtim@gmail.com"
] | sarrtim@gmail.com |
932330f52d59db3f156a0cd226118ccfa8de456a | f3f42cd16c7c39f54dcb6891d37369542cf7bc00 | /server.py | e2803cd828506afe19777eb49fcf7cdcd4d4f16b | [] | no_license | imhardikj/inception | 3cfb1d492669c30fdedc65d26c91bc6ee9846bc0 | 7585afa0586a73f4a5553b384b791e68383f0c29 | refs/heads/master | 2020-11-25T02:11:32.171346 | 2019-12-16T18:10:14 | 2019-12-16T18:10:14 | 228,446,130 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,113 | py | import os
from flask import Flask, request, redirect,jsonify
from werkzeug.utils import secure_filename
from try_retrain import predict_image_class
from bs4 import BeautifulSoup
import requests
UPLOAD_FOLDER = 'D:/'
ALLOWED_EXTENSIONS = set([ 'png', 'jpg', 'jpeg'])
app = Flask(__name__)
app.config['UPLOAD_FOLDER'] = UPLOAD_FOLDER
def allowed_file(filename):
return '.' in filename and filename.rsplit('.', 1)[1].lower() in ALLOWED_EXTENSIONS
@app.route('/alzheimer')
def alzheimer():
# URL = "https://www.google.com/search?tbm=nws&q=alzheimer"
URL = "https://news.google.com/search?q=alzheimer"
r = requests.get(URL)
# return r.content
soup = BeautifulSoup(r.content, 'html5lib')
# return str(len(soup.findAll('a',{'class': 'lLrAF'})))
# return r.content
newsList = [] # a list to store quotes
all_news = soup.findAll('article')
print(len(all_news))
# soup.findNextSiblings
all_news = all_news[:10]
for news in all_news:
newsData = {}
newsData['url'] = news.contents[1].a['href']
newsData['title'] = news.contents[1].a.text
newsData['source'] = news.contents[3].div.a.text
newsData['time'] = news.contents[3].div.time.text
newsList.append(newsData)
return jsonify(newsList)
@app.route('/cancer')
def cancer():
# URL = "https://www.google.com/search?tbm=nws&q=alzheimer"
URL = "https://news.google.com/search?q=cancer"
r = requests.get(URL)
# return r.content
soup = BeautifulSoup(r.content, 'html5lib')
# return str(len(soup.findAll('a',{'class': 'lLrAF'})))
# return r.content
newsList = [] # a list to store quotes
all_news = soup.findAll('article')
print(len(all_news))
# soup.findNextSiblings
all_news = all_news[:10]
for news in all_news:
newsData = {}
newsData['url'] = news.contents[1].a['href']
newsData['title'] = news.contents[1].a.text
newsData['source'] = news.contents[3].div.a.text
newsData['time'] = news.contents[3].div.time.text
newsList.append(newsData)
return jsonify(newsList)
@app.route('/', methods=['GET', 'POST'])
def upload_file():
print("START")
if request.method == 'POST':
# check if the post request has the file part
if 'file' not in request.files:
print('No file part')
return redirect(request.url)
file = request.files['file']
# if user does not select file, browser also
# submit an empty part without filename
if file.filename == '':
print('No selected file')
return redirect(request.url)
if file and allowed_file(file.filename):
filename = secure_filename(file.filename)
file.save(os.path.join(app.config['UPLOAD_FOLDER'], filename))
testres = predict_image_class(UPLOAD_FOLDER+filename)
print(testres)
return jsonify(testres)
if __name__ == '__main__':
app.run(debug = True,host='0.0.0.0')
| [
"noreply@github.com"
] | imhardikj.noreply@github.com |
60dce4a3326a9bdffb8321222519728bd4e53b3e | 9743d5fd24822f79c156ad112229e25adb9ed6f6 | /xai/brain/wordbase/adjectives/_exempts.py | ba3ae516ff8f24200a01a7f19517c7cf4bb06803 | [
"MIT"
] | permissive | cash2one/xai | de7adad1758f50dd6786bf0111e71a903f039b64 | e76f12c9f4dcf3ac1c7c08b0cc8844c0b0a104b6 | refs/heads/master | 2021-01-19T12:33:54.964379 | 2017-01-28T02:00:50 | 2017-01-28T02:00:50 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 248 | py |
from xai.brain.wordbase.adjectives._exempt import _EXEMPT
#calss header
class _EXEMPTS(_EXEMPT, ):
def __init__(self,):
_EXEMPT.__init__(self)
self.name = "EXEMPTS"
self.specie = 'adjectives'
self.basic = "exempt"
self.jsondata = {}
| [
"xingwang1991@gmail.com"
] | xingwang1991@gmail.com |
9f8a8dd6b836f1686c9e46fb4cba274438d27581 | 7b67ea903cc08e31c6156fa4bb7b40b64950b474 | /note32/test_context.py | 7692a0cb759e99eaecbd3a04658721c84423417d | [
"MIT"
] | permissive | icexmoon/python-learning-notes | 62596c2d6a439f30c8c0637eca1af36d88a3bea6 | 838c91d896404290b89992b6517be1b6a79df41f | refs/heads/main | 2023-05-27T11:21:25.575286 | 2021-06-08T07:33:22 | 2021-06-08T07:33:22 | 365,742,219 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 277 | py | def test_yield():
print('start')
yield 1
print('after')
ty = test_yield()
next(ty)
next(ty)
# start
# after
# Traceback (most recent call last):
# File "D:\workspace\python\python-learning-notes\note32\test.py", line 8, in <module>
# next(ty)
# StopIteration | [
"icexmoon@qq.com"
] | icexmoon@qq.com |
f56cd91fa9140e05b12a527a040d556bb0d9b957 | 4f8ddd9808535ee8aa900393c3a429f480324574 | /Manipulação_Arquivo/io_1.py | 3b10d40c98aceb024f65e4886c0c612f6187de6c | [] | no_license | kamibarreto/Cod3r | 1de8bb5288c16f90e1060089e7fda8216b6cb7cf | cea86f3984e3d43c0726b9ea809505d00679a314 | refs/heads/master | 2022-12-22T23:13:59.176534 | 2020-09-05T11:01:10 | 2020-09-05T11:01:10 | 284,170,973 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 496 | py | #!/usr/bin/env python3
arquivo = open('/home/kami/Projetos/Cod3r/Manipulação_Arquivo/pessoas.csv') #ele vai abrir o arquivo pessoas.csv com o "open"
dados = arquivo.read() #read vai colocar o conteudo de arquivo em dados, e ai ele vai poder fechar arquivo, pq tudo vai estar na variavel
arquivo.close()
for registro in dados.splitlines(): #ele vai pegar uma das linhas dos nomes e idade e separar
#print(*registro.split(','))
print('Nome: {}\n Idade: {}'.format(*registro.split(',')))
| [
"fabinhobarreto9928@gmail.com"
] | fabinhobarreto9928@gmail.com |
5e5c71a02a8be32bfa37c518c777781b96b690f6 | 2dbaf18e92d31a2b8a5ffbd213c62a94a3256076 | /taobei/tbbuy/models/cart_product.py | f264206ebfa05d7f7c20112b4d64781658dac435 | [] | no_license | ruoshengyuan/guanfang | 17c289677f44a50fdd4ae7a1d9858608228496e0 | 251f514636c4828f28aa65b2bd6fc1fe46f4437a | refs/heads/master | 2022-12-13T18:05:50.268182 | 2019-09-24T13:42:09 | 2019-09-24T13:42:09 | 201,073,359 | 3 | 0 | null | 2022-12-07T23:53:57 | 2019-08-07T15:04:07 | Python | UTF-8 | Python | false | false | 839 | py | from sqlalchemy import Column, Integer, String, ForeignKey, UniqueConstraint, Index
from sqlalchemy.orm import relationship
from marshmallow import Schema, fields, post_load
from .base import Base
class CartProduct(Base):
__tablename__ = 'cart_product'
__table_args__ = (
UniqueConstraint('user_id', 'product_id'),
Index('idx_product_id', 'product_id'),
)
user_id = Column(Integer, nullable=False)
product_id = Column(Integer, nullable=False)
amount = Column(Integer, nullable=False, default=1)
class CartProductSchema(Schema):
id = fields.Int()
user_id = fields.Int()
product_id = fields.Int()
amount = fields.Int()
created_at = fields.DateTime()
updated_at = fields.DateTime()
@post_load
def make_cart_product(self, data):
return CartProduct(**data)
| [
"jaggerwang@gmail.com"
] | jaggerwang@gmail.com |
49ac9d28cdc33d372bad35111a0dada73d3cf5c4 | c4e3a57511eb7a39425847bdcd38a2207e560a13 | /Algorithm/909_Snakes_Or_Ladders.py | 4a704f1a03ea4df560bf51751413b205e395c053 | [] | no_license | Gi1ia/TechNoteBook | 57af562b78278b7f937b906d1154b19f2c077ebd | 1a3c1f4d6e9d3444039f087763b93241f4ba7892 | refs/heads/master | 2021-06-03T02:31:24.986063 | 2020-07-16T22:25:56 | 2020-07-16T22:25:56 | 141,761,958 | 7 | 1 | null | 2018-11-05T01:09:46 | 2018-07-20T22:06:12 | HTML | UTF-8 | Python | false | false | 1,299 | py | import collections
class Solution:
def snakesAndLadders(self, board):
"""
:type board: List[List[int]]
:rtype: int
"""
if not board or not board[0] or len(board[0]) == 1:
return 0
# Reorder board to straight
n = len(board)
straight = []
index = []
seq = 1
for i in reversed(range(n)):
if seq == 1:
straight.extend(board[i])
seq = -1
else:
straight.extend(reversed(board[i]))
seq = 1
# Calculate
step = 0
seen = {1:0}
possible = collections.deque([1])
while possible:
cursor = possible.popleft()
if cursor == n*n:
return seen[cursor]
# move to next
for cursor2 in range(cursor + 1, cursor + 7):
if cursor2 > n*n:
continue
if straight[cursor2 - 1] != -1:
cursor2 = straight[cursor2 - 1]
if cursor2 not in seen:
possible.append(cursor2)
seen[cursor2] = seen[cursor] + 1
return -1
| [
"41492334+Gi1ia@users.noreply.github.com"
] | 41492334+Gi1ia@users.noreply.github.com |
ffb6607401c22caf600ff9a032495a22b1808ea7 | 665455c521cc7cf76c5436337ed545de90976af4 | /cohesity_management_sdk/models/mongoDB_additional_params.py | 7224cbcb0a345aae191387f6feddcbb4add9c4ad | [
"Apache-2.0"
] | permissive | hsantoyo2/management-sdk-python | d226273bc8eedcf9220ea4999a6f0b9a1a30d99c | 0093194d125fc6746f55b8499da1270c64f473fc | refs/heads/master | 2023-03-01T06:09:39.644085 | 2021-01-15T08:23:16 | 2021-01-15T08:23:16 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,906 | py | # -*- coding: utf-8 -*-
# Copyright 2020 Cohesity Inc.
class MongoDBAdditionalParams(object):
"""Implementation of the 'MongoDBAdditionalParams' model.
Contains additional parameters required for taking backup from
this Mongo cluster.
Attributes:
secondary_node_tag (list of string): The tag associated with the
secondary nodes from which backups should be performed.
use_secondary_for_backup (bool): Set to true if this cluster uses
secondary nodes for backup.
"""
# Create a mapping from Model property names to API property names
_names = {
"secondary_node_tag": 'secondaryNodeTag',
"use_secondary_for_backup": 'useSecondaryForBackup'
}
def __init__(self,
secondary_node_tag=None,
use_secondary_for_backup=None):
"""Constructor for the MongoDBAdditionalParams class"""
# Initialize members of the class
self.secondary_node_tag = secondary_node_tag
self.use_secondary_for_backup = use_secondary_for_backup
@classmethod
def from_dictionary(cls,
dictionary):
"""Creates an instance of this model from a dictionary
Args:
dictionary (dictionary): A dictionary representation of the object as
obtained from the deserialization of the server's response. The keys
MUST match property names in the API description.
Returns:
object: An instance of this structure class.
"""
if dictionary is None:
return None
# Extract variables from the dictionary
secondary_node_tag = dictionary.get('secondaryNodeTag')
use_secondary_for_backup = dictionary.get('useSecondaryForBackup')
# Return an object of this model
return cls(secondary_node_tag,
use_secondary_for_backup)
| [
"naveena.maplelabs@cohesity.com"
] | naveena.maplelabs@cohesity.com |
d8135d350a5efd8400b5d8049c2655c6b35e83eb | 522d50be4c7c4f289706eaf9b07e6a17b0d46199 | /src/djangopycsw/migrations/0011_auto_20150710_1157.py | d2bdf148b01958799a29b410b0777b4899bf8c14 | [] | no_license | ricardogsilva/django-pycsw | f7f4655ccc1d686074c4a0cded198290767fd788 | 0b51dcf0456e7d9f366874ac3f066e295f533876 | refs/heads/master | 2021-01-18T14:19:16.313364 | 2017-11-08T22:50:41 | 2017-11-08T22:50:41 | 38,635,333 | 2 | 2 | null | null | null | null | UTF-8 | Python | false | false | 3,568 | py | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('djangopycsw', '0010_auto_20150710_1155'),
]
operations = [
migrations.AlterField(
model_name='collaborator',
name='fax',
field=models.CharField(max_length=100, blank=True),
),
migrations.AlterField(
model_name='collaborator',
name='phone',
field=models.CharField(max_length=100, blank=True),
),
migrations.AlterField(
model_name='organization',
name='city',
field=models.CharField(max_length=100, blank=True),
),
migrations.AlterField(
model_name='organization',
name='country',
field=models.CharField(max_length=100, blank=True),
),
migrations.AlterField(
model_name='organization',
name='postal_code',
field=models.CharField(max_length=100, blank=True),
),
migrations.AlterField(
model_name='organization',
name='state_or_province',
field=models.CharField(max_length=100, blank=True),
),
migrations.AlterField(
model_name='pycswconfig',
name='inspire_default_language',
field=models.CharField(max_length=100, blank=True),
),
migrations.AlterField(
model_name='record',
name='coupling_type',
field=models.CharField(help_text=b'Maps to pycsw:CouplingType', max_length=100, null=True, blank=True),
),
migrations.AlterField(
model_name='record',
name='distance_uom',
field=models.CharField(help_text=b'Maps to pycsw:DistanceUOM', max_length=100, null=True, blank=True),
),
migrations.AlterField(
model_name='record',
name='distance_value',
field=models.CharField(help_text=b'Maps to pycsw:DistanceValue', max_length=100, null=True, blank=True),
),
migrations.AlterField(
model_name='record',
name='geographic_description_code',
field=models.CharField(help_text=b'Maps to pycsw:GeographicDescriptionCode', max_length=100, null=True, blank=True),
),
migrations.AlterField(
model_name='record',
name='operation',
field=models.CharField(help_text=b'Maps to pycsw:Operation', max_length=100, null=True, blank=True),
),
migrations.AlterField(
model_name='record',
name='resource_language',
field=models.CharField(help_text=b'Maps to pycsw:ResourceLanguage', max_length=100, null=True, blank=True),
),
migrations.AlterField(
model_name='record',
name='service_type',
field=models.CharField(help_text=b'Maps to pycsw:ServiceType', max_length=100, null=True, blank=True),
),
migrations.AlterField(
model_name='record',
name='service_type_version',
field=models.CharField(help_text=b'Maps to pycsw:ServiceTypeVersion', max_length=100, null=True, blank=True),
),
migrations.AlterField(
model_name='record',
name='specification_date_type',
field=models.CharField(help_text=b'Maps to pycsw:SpecificationDateType', max_length=100, null=True, blank=True),
),
]
| [
"ricardo.garcia.silva@gmail.com"
] | ricardo.garcia.silva@gmail.com |
cd1f3ab6eb02464ada9c3e0bdbb15065d1bff148 | b2755ce7a643ae5c55c4b0c8689d09ad51819e6b | /anuvaad-etl/anuvaad-extractor/document-processor/ocr/google/src/utilities/utils.py | e05f8b91eff7a7fc61d6bf08994e9e6865583968 | [
"MIT"
] | permissive | project-anuvaad/anuvaad | 96df31170b27467d296cee43440b6dade7b1247c | 2bfcf6b9779bf1abd41e1bc42c27007127ddbefb | refs/heads/master | 2023-08-17T01:18:25.587918 | 2023-08-14T09:53:16 | 2023-08-14T09:53:16 | 265,545,286 | 41 | 39 | MIT | 2023-09-14T05:58:27 | 2020-05-20T11:34:37 | Jupyter Notebook | UTF-8 | Python | false | false | 3,688 | py | import os
from pathlib import Path
import time
import json
from anuvaad_auditor.errorhandler import post_error
from anuvaad_auditor.errorhandler import post_error_wf
class FileOperation(object):
def __init__(self):
self.download_folder = None
# creating directory if it is not existed before.
def create_file_download_dir(self, downloading_folder):
self.download_folder = downloading_folder
download_dir = Path(os.path.join(os.getcwd(), self.download_folder))
if download_dir.exists() is False:
os.makedirs(download_dir)
return str(download_dir)
def accessing_files(self,files):
try:
filepath = files['name']
file_type = files['type']
identifier = files['identifier']
except Exception as e:
log_exception("accessing_files, keys not found ", LOG_WITHOUT_CONTEXT, e)
return filepath, file_type, identifier
# generating input filepath for input filename
def input_path(self, input_filename):
input_filepath = os.path.join('upload', input_filename)
return input_filepath
# extracting data from received json input
def json_input_format(self, json_data):
try:
input_data = json_data['input']['inputs']
workflow_id = json_data['workflowCode']
jobid = json_data['jobID']
tool_name = json_data['tool']
step_order = json_data['stepOrder']
except Exception as e:
log_exception("json_input_format, keys not found or mismatch in json inputs ", LOG_WITHOUT_CONTEXT, e)
return input_data, workflow_id, jobid, tool_name, step_order
# output format for individual pdf file
def one_filename_response(self,output_json_file):
file_res = {
"outputFile" : output_json_file,
"outputType" : "json"
}
return file_res
# checking file extension of received file type
def check_file_extension(self, file_type):
allowed_extensions = ['pdf']
if file_type in allowed_extensions:
return True
else:
return False
# checking directory exists or not
def check_path_exists(self, dir):
if dir is not None and os.path.exists(dir) is True:
return True
else:
return False
# generating output filepath for output filename
def output_path(self,index, DOWNLOAD_FOLDER):
output_filename = '%d-'%index + str(time.time()).replace('.', '') + '.json'
output_filepath = os.path.join(DOWNLOAD_FOLDER, output_filename)
return output_filepath , output_filename
# writing json file of service response
def writing_json_file(self, index, json_data, DOWNLOAD_FOLDER):
output_filepath , output_filename = self.output_path(index, DOWNLOAD_FOLDER)
with open(output_filepath, 'w') as f:
json_object = json.dumps(json_data)
f.write(json_object)
return output_filename
# error manager integration
def error_handler(self, object_in, code, iswf):
if iswf:
job_id = object_in["jobID"]
task_id = object_in["taskID"]
state = object_in['state']
status = object_in['status']
code = code
message = object_in['message']
error = post_error_wf(code, message, object_in , None)
return error
else:
code = object_in['error']['code']
message = object_in['error']['message']
error = post_error(code, message, None)
return error
| [
"srihari.nagaraj@tarento.com"
] | srihari.nagaraj@tarento.com |
3c258612b6a06b131dd33dde99f1d222ad80f67e | 685f4474699d769dae88537c69f5517ac13a8431 | /EL37.py | 5132902ccc1db99462685de28e9318eee8d0eb4f | [] | no_license | Pumafied/Project-Euler | 7466f48e449b7314598c106398c0be0424ae72d5 | 0c3e80a956893ce1881a9694131d52b156b9d3d8 | refs/heads/master | 2016-09-05T22:45:09.733696 | 2013-04-20T04:46:48 | 2013-04-20T04:46:48 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 432 | py | # The number 3797 has an interesting property. Being prime itself, it is possible to continuously remove digits from left to right, and remain prime at each stage: 3797, 797, 97, and 7. Similarly we can work from right to left: 3797, 379, 37, and 3.
# Find the sum of the only eleven primes that are both truncatable from left to right and right to left.
# NOTE: 2, 3, 5, and 7 are not considered to be truncatable primes.
| [
"pumafied@gmail.com"
] | pumafied@gmail.com |
bd9fcd49f98ccc6899ff0965b7a991dca19906f9 | 1d928c3f90d4a0a9a3919a804597aa0a4aab19a3 | /python/spaCy/2017/8/morph_rules.py | 2875eb3c8d6f3e52d7c95b8b56099ed57ab2a886 | [
"MIT"
] | permissive | rosoareslv/SED99 | d8b2ff5811e7f0ffc59be066a5a0349a92cbb845 | a062c118f12b93172e31e8ca115ce3f871b64461 | refs/heads/main | 2023-02-22T21:59:02.703005 | 2021-01-28T19:40:51 | 2021-01-28T19:40:51 | 306,497,459 | 1 | 1 | null | 2020-11-24T20:56:18 | 2020-10-23T01:18:07 | null | UTF-8 | Python | false | false | 5,937 | py | # coding: utf8
from __future__ import unicode_literals
from ...symbols import LEMMA
from ...deprecated import PRON_LEMMA
# Used the table of pronouns at https://sv.wiktionary.org/wiki/deras
MORPH_RULES = {
"PRP": {
"jag": {LEMMA: PRON_LEMMA, "PronType": "Prs", "Person": "One", "Number": "Sing", "Case": "Nom"},
"mig": {LEMMA: PRON_LEMMA, "PronType": "Prs", "Person": "One", "Number": "Sing", "Case": "Acc"},
"mej": {LEMMA: PRON_LEMMA, "PronType": "Prs", "Person": "One", "Number": "Sing", "Case": "Acc"},
"du": {LEMMA: PRON_LEMMA, "PronType": "Prs", "Person": "Two", "Number": "Sing", "Case": "Nom"},
"han": {LEMMA: PRON_LEMMA, "PronType": "Prs", "Person": "Three", "Number": "Sing", "Gender": "Masc", "Case": "Nom"},
"honom": {LEMMA: PRON_LEMMA, "PronType": "Prs", "Person": "Three", "Number": "Sing", "Gender": "Masc", "Case": "Acc"},
"hon": {LEMMA: PRON_LEMMA, "PronType": "Prs", "Person": "Three", "Number": "Sing", "Gender": "Fem", "Case": "Nom"},
"henne": {LEMMA: PRON_LEMMA, "PronType": "Prs", "Person": "Three", "Number": "Sing", "Gender": "Fem", "Case": "Acc"},
"det": {LEMMA: PRON_LEMMA, "PronType": "Prs", "Person": "Three", "Number": "Sing", "Gender": "Neut"},
"vi": {LEMMA: PRON_LEMMA, "PronType": "Prs", "Person": "One", "Number": "Plur", "Case": "Nom"},
"oss": {LEMMA: PRON_LEMMA, "PronType": "Prs", "Person": "One", "Number": "Plur", "Case": "Acc"},
"ni": {LEMMA: PRON_LEMMA, "PronType": "Prs", "Person": "Two", "Number": "Plur", "Case": "Nom"},
"er": {LEMMA: PRON_LEMMA, "PronType": "Prs", "Person": "Two", "Number": "Plur", "Case": "Acc"},
"de": {LEMMA: PRON_LEMMA, "PronType": "Prs", "Person": "Three", "Number": "Plur", "Case": "Nom"},
"dom": {LEMMA: PRON_LEMMA, "PronType": "Prs", "Person": "Three", "Number": "Plur", "Case": "Nom"},
"dem": {LEMMA: PRON_LEMMA, "PronType": "Prs", "Person": "Three", "Number": "Plur", "Case": "Acc"},
"dom": {LEMMA: PRON_LEMMA, "PronType": "Prs", "Person": "Three", "Number": "Plur", "Case": "Acc"},
"min": {LEMMA: PRON_LEMMA, "PronType": "Prs", "Person": "One", "Number": "Sing", "Poss": "Yes", "Reflex": "Yes"},
"mitt": {LEMMA: PRON_LEMMA, "PronType": "Prs", "Person": "One", "Number": "Sing", "Poss": "Yes", "Reflex": "Yes"},
"mina": {LEMMA: PRON_LEMMA, "PronType": "Prs", "Person": "One", "Number": "Plur", "Poss": "Yes", "Reflex": "Yes"},
"din": {LEMMA: PRON_LEMMA, "PronType": "Prs", "Person": "Two", "Number": "Sing", "Poss": "Yes", "Reflex": "Yes"},
"ditt": {LEMMA: PRON_LEMMA, "PronType": "Prs", "Person": "Two", "Number": "Sing", "Poss": "Yes", "Reflex": "Yes"},
"dina": {LEMMA: PRON_LEMMA, "PronType": "Prs", "Person": "Two", "Number": "Plur", "Poss": "Yes", "Reflex": "Yes"},
"hans": {LEMMA: PRON_LEMMA, "PronType": "Prs", "Person": "Two", "Number": "Sing", "Gender": "Masc", "Poss": "Yes", "Reflex": "Yes"},
"hans": {LEMMA: PRON_LEMMA, "PronType": "Prs", "Person": "Two", "Number": "Plur", "Gender": "Masc", "Poss": "Yes", "Reflex": "Yes"},
"hennes": {LEMMA: PRON_LEMMA, "PronType": "Prs", "Person": "Two", "Number": "Sing", "Gender": "Fem", "Poss": "Yes", "Reflex": "Yes"},
"hennes": {LEMMA: PRON_LEMMA, "PronType": "Prs", "Person": "Two", "Number": "Plur", "Gender": "Fem", "Poss": "Yes", "Reflex": "Yes"},
"dess": {LEMMA: PRON_LEMMA, "PronType": "Prs", "Person": "Two", "Number": "Sing", "Poss": "Yes", "Reflex": "Yes"},
"dess": {LEMMA: PRON_LEMMA, "PronType": "Prs", "Person": "Two", "Number": "Plur", "Poss": "Yes", "Reflex": "Yes"},
"vår": {LEMMA: PRON_LEMMA, "PronType": "Prs", "Person": "One", "Number": "Plur", "Poss": "Yes", "Reflex": "Yes"},
"våran": {LEMMA: PRON_LEMMA, "PronType": "Prs", "Person": "One", "Number": "Plur", "Poss": "Yes", "Reflex": "Yes"},
"vårt": {LEMMA: PRON_LEMMA, "PronType": "Prs", "Person": "One", "Number": "Plur", "Poss": "Yes", "Reflex": "Yes"},
"vårat": {LEMMA: PRON_LEMMA, "PronType": "Prs", "Person": "One", "Number": "Plur", "Poss": "Yes", "Reflex": "Yes"},
"våra": {LEMMA: PRON_LEMMA, "PronType": "Prs", "Person": "One", "Number": "Plur", "Poss": "Yes", "Reflex": "Yes"},
"er": {LEMMA: PRON_LEMMA, "PronType": "Prs", "Person": "Two", "Number": "Plur", "Poss": "Yes", "Reflex": "Yes"},
"eran": {LEMMA: PRON_LEMMA, "PronType": "Prs", "Person": "Two", "Number": "Plur", "Poss": "Yes", "Reflex": "Yes"},
"ert": {LEMMA: PRON_LEMMA, "PronType": "Prs", "Person": "Two", "Number": "Plur", "Poss": "Yes", "Reflex": "Yes"},
"erat": {LEMMA: PRON_LEMMA, "PronType": "Prs", "Person": "Two", "Number": "Plur", "Poss": "Yes", "Reflex": "Yes"},
"era": {LEMMA: PRON_LEMMA, "PronType": "Prs", "Person": "Two", "Number": "Plur", "Poss": "Yes", "Reflex": "Yes"},
"deras": {LEMMA: PRON_LEMMA, "PronType": "Prs", "Person": "Three", "Number": "Plur", "Poss": "Yes", "Reflex": "Yes"}
},
"VBZ": {
"är": {"VerbForm": "Fin", "Person": "One", "Tense": "Pres", "Mood": "Ind"},
"är": {"VerbForm": "Fin", "Person": "Two", "Tense": "Pres", "Mood": "Ind"},
"är": {"VerbForm": "Fin", "Person": "Three", "Tense": "Pres", "Mood": "Ind"},
},
"VBP": {
"är": {"VerbForm": "Fin", "Tense": "Pres", "Mood": "Ind"}
},
"VBD": {
"var": {"VerbForm": "Fin", "Tense": "Past", "Number": "Sing"},
"vart": {"VerbForm": "Fin", "Tense": "Past", "Number": "Plur"}
}
}
| [
"rodrigosoaresilva@gmail.com"
] | rodrigosoaresilva@gmail.com |
585504f0c092f229fe513fe44804077ba55e94bf | ed54290846b5c7f9556aacca09675550f0af4c48 | /python/scrapy/shiyanlougithub/shiyanlougithub/spiders/repositories.py | 1ba29b6d6b0ed95cf7b9e5c39a7ee14505332f44 | [
"Apache-2.0"
] | permissive | smallyear/linuxLearn | 87226ccd8745cd36955c7e40cafd741d47a04a6f | 342e5020bf24b5fac732c4275a512087b47e578d | refs/heads/master | 2022-03-20T06:02:25.329126 | 2019-08-01T08:39:59 | 2019-08-01T08:39:59 | 103,765,131 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 682 | py | # -*- coding: utf-8 -*-
import scrapy
from shiyanlougithub.items import ShiyanlougithubItem
class RepositoriesSpider(scrapy.Spider):
name = 'repositories'
@property
def start_urls(self):
url = 'https://github.com/shiyanlou?page={}&tab=repositories'
return (url.format(i) for i in range(1,5))
def parse(self, response):
for res in response.css('li.public'):
item = ShiyanlougithubItem({
'name' : res.xpath('.//a[@itemprop="name codeRepository"]/text()').re_first("\n\s*(.*)"),
'update_time' : res.xpath('.//relative-time/@datetime').extract_first()
})
yield item
| [
"5931263123@163.com"
] | 5931263123@163.com |
fcb28620711e0de02486fea670581b9b5545cc98 | 3d19e1a316de4d6d96471c64332fff7acfaf1308 | /Users/L/LisaTalia/jensonbutton_twitter_followers_2.py | bf8959f7d0ada25a0ceeb119eca07c6ab3c8838c | [] | no_license | BerilBBJ/scraperwiki-scraper-vault | 4e98837ac3b1cc3a3edb01b8954ed00f341c8fcc | 65ea6a943cc348a9caf3782b900b36446f7e137d | refs/heads/master | 2021-12-02T23:55:58.481210 | 2013-09-30T17:02:59 | 2013-09-30T17:02:59 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,246 | py | import scraperwiki
import simplejson
import urllib2
import sys
# Needs to be in lower case
SCREENNAME = 'iCod'
# API help: https://dev.twitter.com/docs/api/1/get/followers/ids
url = 'http://api.twitter.com/1/followers/ids.json?screen_name=%s' % (urllib2.quote(SCREENNAME))
print url
followers_json = simplejson.loads(scraperwiki.scrape(url))
# print "Found %d followers of %s" % (len(followers_json), SCREENNAME)
followers_json = followers_json['ids'] # get earliest followers first for batching
followers_json.reverse()
# Groups a list in chunks of a given size
def group(lst, n):
for i in range(0, len(lst), n):
val = lst[i:i+n]
if len(val) == n:
yield tuple(val)
# Where to start? Overlap one batch to increase hit rate if people unfollow etc.
batchdone = scraperwiki.sqlite.get_var('batchdone', 1)
batchstart = batchdone - 1
if batchstart < 1:
batchstart = 1
# Take 100 at a time, and do one lookup call for each batch
c = 0
for follower_list in group(followers_json, 100):
c = c + 1
if c < batchstart:
continue
# print "number", c, "out of", len(followers_json) / 100
# print 'batch of ids:', follower_list
url = 'http://api.twitter.com/1/users/lookup.json?user_id=%s' % (urllib2.quote(','.join(map(str, follower_list))))
# print 'getting url:', url
details_json = simplejson.loads(scraperwiki.scrape(url))
for detail in details_json:
data = {'screen_name': detail['screen_name'],
'id': detail['id'],'location': detail['location'],
'bio': detail['description'],
'followers_count': detail['followers_count'],
'friends_count': detail['friends_count'],
'statuses_count': detail['statuses_count'],
'listed_count': detail['listed_count'],
'url': detail['url'],
'verified': detail['verified'],
'time_zone': detail['time_zone']}
# print "Found person", data
scraperwiki.sqlite.save(unique_keys=['id'], data = data)
scraperwiki.sqlite.save_var('batchdone', c)
import scraperwiki
import simplejson
import urllib2
import sys
# Needs to be in lower case
SCREENNAME = 'iCod'
# API help: https://dev.twitter.com/docs/api/1/get/followers/ids
url = 'http://api.twitter.com/1/followers/ids.json?screen_name=%s' % (urllib2.quote(SCREENNAME))
print url
followers_json = simplejson.loads(scraperwiki.scrape(url))
# print "Found %d followers of %s" % (len(followers_json), SCREENNAME)
followers_json = followers_json['ids'] # get earliest followers first for batching
followers_json.reverse()
# Groups a list in chunks of a given size
def group(lst, n):
for i in range(0, len(lst), n):
val = lst[i:i+n]
if len(val) == n:
yield tuple(val)
# Where to start? Overlap one batch to increase hit rate if people unfollow etc.
batchdone = scraperwiki.sqlite.get_var('batchdone', 1)
batchstart = batchdone - 1
if batchstart < 1:
batchstart = 1
# Take 100 at a time, and do one lookup call for each batch
c = 0
for follower_list in group(followers_json, 100):
c = c + 1
if c < batchstart:
continue
# print "number", c, "out of", len(followers_json) / 100
# print 'batch of ids:', follower_list
url = 'http://api.twitter.com/1/users/lookup.json?user_id=%s' % (urllib2.quote(','.join(map(str, follower_list))))
# print 'getting url:', url
details_json = simplejson.loads(scraperwiki.scrape(url))
for detail in details_json:
data = {'screen_name': detail['screen_name'],
'id': detail['id'],'location': detail['location'],
'bio': detail['description'],
'followers_count': detail['followers_count'],
'friends_count': detail['friends_count'],
'statuses_count': detail['statuses_count'],
'listed_count': detail['listed_count'],
'url': detail['url'],
'verified': detail['verified'],
'time_zone': detail['time_zone']}
# print "Found person", data
scraperwiki.sqlite.save(unique_keys=['id'], data = data)
scraperwiki.sqlite.save_var('batchdone', c)
| [
"pallih@kaninka.net"
] | pallih@kaninka.net |
b019fb5ae46827e1a3740f5c118317c74fa8262c | 9ae6ce54bf9a2a86201961fdbd5e7b0ec913ff56 | /google/ads/googleads/v9/services/services/account_budget_proposal_service/transports/base.py | f1aa4845ec900732bfa03bdd6987bbf629ad7d73 | [
"Apache-2.0"
] | permissive | GerhardusM/google-ads-python | 73b275a06e5401e6b951a6cd99af98c247e34aa3 | 676ac5fcb5bec0d9b5897f4c950049dac5647555 | refs/heads/master | 2022-07-06T19:05:50.932553 | 2022-06-17T20:41:17 | 2022-06-17T20:41:17 | 207,535,443 | 0 | 0 | Apache-2.0 | 2019-09-10T10:58:55 | 2019-09-10T10:58:55 | null | UTF-8 | Python | false | false | 4,520 | py | # -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import abc
import typing
import pkg_resources
import google.auth # type: ignore
from google.api_core import gapic_v1
from google.auth import credentials as ga_credentials # type: ignore
from google.ads.googleads.v9.resources.types import account_budget_proposal
from google.ads.googleads.v9.services.types import (
account_budget_proposal_service,
)
try:
DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo(
gapic_version=pkg_resources.get_distribution("google-ads",).version,
)
except pkg_resources.DistributionNotFound:
DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo()
class AccountBudgetProposalServiceTransport(metaclass=abc.ABCMeta):
"""Abstract transport class for AccountBudgetProposalService."""
AUTH_SCOPES = ("https://www.googleapis.com/auth/adwords",)
def __init__(
self,
*,
host: str = "googleads.googleapis.com",
credentials: ga_credentials.Credentials = None,
client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO,
) -> None:
"""Instantiate the transport.
Args:
host (Optional[str]):
The hostname to connect to.
credentials (Optional[google.auth.credentials.Credentials]): The
authorization credentials to attach to requests. These
credentials identify the application to the service; if none
are specified, the client will attempt to ascertain the
credentials from the environment.
client_info (google.api_core.gapic_v1.client_info.ClientInfo):
The client info used to send a user-agent string along with
API requests. If ``None``, then default info will be used.
Generally, you only need to set this if you're developing
your own client library.
"""
# Save the hostname. Default to port 443 (HTTPS) if none is specified.
if ":" not in host:
host += ":443"
self._host = host
# If no credentials are provided, then determine the appropriate
# defaults.
if credentials is None:
credentials, _ = google.auth.default(scopes=self.AUTH_SCOPES)
# Save the credentials.
self._credentials = credentials
# Lifted into its own function so it can be stubbed out during tests.
self._prep_wrapped_messages(client_info)
def _prep_wrapped_messages(self, client_info):
# Precomputed wrapped methods
self._wrapped_methods = {
self.get_account_budget_proposal: gapic_v1.method.wrap_method(
self.get_account_budget_proposal,
default_timeout=None,
client_info=client_info,
),
self.mutate_account_budget_proposal: gapic_v1.method.wrap_method(
self.mutate_account_budget_proposal,
default_timeout=None,
client_info=client_info,
),
}
def close(self):
"""Closes resources associated with the transport.
.. warning::
Only call this method if the transport is NOT shared
with other clients - this may cause errors in other clients!
"""
raise NotImplementedError()
@property
def get_account_budget_proposal(
self,
) -> typing.Callable[
[account_budget_proposal_service.GetAccountBudgetProposalRequest],
account_budget_proposal.AccountBudgetProposal,
]:
raise NotImplementedError
@property
def mutate_account_budget_proposal(
self,
) -> typing.Callable[
[account_budget_proposal_service.MutateAccountBudgetProposalRequest],
account_budget_proposal_service.MutateAccountBudgetProposalResponse,
]:
raise NotImplementedError
__all__ = ("AccountBudgetProposalServiceTransport",)
| [
"noreply@github.com"
] | GerhardusM.noreply@github.com |
f1caa1d51454453d66b6f77be2fd0f473f85a711 | 9d1ef7993bf0df9967b1e7a79d5913fbc3e3a7e1 | /util.py | 685621eda555f1de7649a2fbe36382c343b6b420 | [
"BSD-2-Clause"
] | permissive | mitmedialab/WhatWeWatch-Analysis | f6f4fbd8fba4ef6a58f4961c7f3d9b9519dae3a4 | cc01dee4e77155c8aec7638e4275172053db3247 | refs/heads/master | 2021-05-28T05:40:36.678808 | 2014-11-03T01:22:26 | 2014-11-03T01:22:26 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,798 | py | import csv
import datetime
import json
import math
import os
import random
import numpy as np
dirname = os.path.dirname(os.path.realpath(__file__))
with open(os.path.join(dirname, 'country_info.json'), 'rb') as f:
country_list = json.loads(f.read())
country_info = dict()
for country in country_list:
alpha3 = country['alpha-3'].lower()
country_info[alpha3] = {
'name': country['name']
, 'alpha3': alpha3
}
def country_name(alpha3):
return country_info[alpha3]['name']
def write_results_csv(experiment, run, filename, data, headers):
create_result_dir(experiment, run)
path = 'results/%s/%s/%s.csv' % (experiment, run, filename)
with open(path, 'wb') as f:
f.write(','.join(headers))
f.write("\n")
for row in data:
f.write(','.join([str(x) for x in row]))
f.write("\n")
def create_result_dir(experiment, run):
try:
os.stat('results')
except OSError:
os.mkdir('results')
try:
os.stat('results/%s' % experiment)
except OSError:
os.mkdir('results/%s' % experiment)
try:
os.stat('results/%s/%s' % (experiment, run))
except OSError:
os.mkdir('results/%s/%s' % (experiment, run))
class VideoData(object):
@classmethod
def from_csv(cls, filename, filter_single=False):
# Read data file
with open(filename, 'rb') as f:
reader = csv.reader(f)
# Skip header and read rows
reader.next()
rows = []
for row in reader:
date = row[0].strip()
loc = row[1].strip().lower()
vid_id = row[2].strip()
rows.append((date, loc, vid_id))
return VideoData(rows, filter_single=filter_single)
def __init__(self, rows, proto=None, filter_single=False):
'''Load data from a csv and create useful representations'''
# Load basic data
if proto is None:
self.countries = set()
self.videos = set()
self.dates = set()
self.pairs = list()
self.dates_vid_cid = {}
self.vids_by_cid = {}
self.rows_by_date = {}
self.cids_by_vid = {}
else:
self.countries = proto.countries
self.videos = proto.videos
self.dates = set()
self.pairs = list()
self.dates_vid_cid = {}
self.vids_by_cid = {}
self.rows_by_date = {}
self.cids_by_vid = {}
# Process rows
for row in rows:
date = row[0]
loc = row[1]
vid_id = row[2]
if proto is None:
self.countries.add(loc)
self.videos.add(vid_id)
self.rows_by_date[date] = self.rows_by_date.get(date,[]) + [(date, loc, vid_id)]
self.dates.add(date)
self.pairs.append((loc, vid_id))
self.vids_by_cid[loc] = self.vids_by_cid.get(loc, set()).union(set([vid_id]))
self.cids_by_vid[vid_id] = self.cids_by_vid.get(vid_id, set()).union(set([loc]))
# Store video dates by location by video id
self.dates_vid_cid[vid_id] = self.dates_vid_cid.get(vid_id, dict())
self.dates_vid_cid[vid_id][loc] = self.dates_vid_cid[vid_id].get(loc, list())
y,m,d = date.split('-')
self.dates_vid_cid[vid_id][loc].append(datetime.date(int(y), int(m), int(d)))
exclude = set()
if proto is None and filter_single:
for vid, cids in self.cids_by_vid.iteritems():
if len(cids) < 2:
exclude.add(vid)
self.videos = [x for x in self.videos if not x in exclude]
# Country and video lookups
if proto is None:
self.country_lookup = Lookup(sorted(self.countries))
self.video_lookup = Lookup(sorted(self.videos))
else:
self.country_lookup = proto.country_lookup
self.video_lookup = proto.video_lookup
# Calculate counts
num_countries = len(self.countries)
num_videos = len(self.videos)
print 'Creating data with %d countries and %d videos' % (num_countries, num_videos)
counts = np.zeros((num_countries, num_videos))
for loc, vid_id in self.pairs:
try:
vid_index = self.video_lookup.get_id(vid_id)
loc_index = self.country_lookup.get_id(loc)
counts[loc_index][vid_index] += 1
except KeyError:
pass
self.counts = counts
def cross_validation_sets(self, num_folds=10):
'''Return a list of (training, test) pairs from this data set.'''
dates = self.rows_by_date.keys()
random.shuffle(dates)
per_fold = int(math.floor(len(dates) / num_folds))
folds = []
for k in range(num_folds):
fold = []
for i in range(per_fold):
date_rows = self.rows_by_date[dates.pop()]
for row in date_rows:
if row[2] in self.videos:
fold.append(row)
folds.append(fold)
cv = CrossValidation()
for k in range(num_folds):
training = sum(folds[:k] + folds[k+1:], [])
test = folds[k]
cv.add_fold(training, test)
return cv
def rows_to_counts(self, rows):
counts = np.zeros(self.counts.shape)
for date, loc, vid_id in rows:
v = self.video_lookup.tok2id[vid_id]
c = self.country_lookup.tok2id[loc]
counts[c,v] += 1
return counts
class CrossValidation(object):
def __init__(self):
self.folds = []
def add_fold(self, training, test):
self.folds.append((training, test))
def get_fold_training(self, i):
return self.folds[i][0]
def get_fold_test(self, i):
return self.folds[i][1]
def __len__(self):
return len(self.folds)
class Lookup(object):
def __init__(self, tokens):
'''Create a two-way lookup between tokens and unique integer ids.'''
self.tok2id = dict()
self.id2tok = dict()
next_id = 0
for t in tokens:
if not t in self.tok2id:
self.tok2id[t] = next_id
self.id2tok[next_id] = t
next_id += 1
def get_token(self, id):
'''Get a named token from an integer id.'''
return self.id2tok[id]
def get_id(self, tok):
'''Get an integer id for the named token.'''
return self.tok2id[tok]
def __len__(self):
return len(self.id2tok)
| [
"ed@elplatt.com"
] | ed@elplatt.com |
1652ad9daf15db0ca2e2d44b9f59139dd7a652db | 4142b8c513d87361da196631f7edd82f11465abb | /python/630/630I.py | 6cde5d177d6668156063078b87be1d83a647c71e | [] | no_license | npkhanhh/codeforces | b52b66780426682ea1a3d72c66aedbe6dc71d7fe | 107acd623b0e99ef0a635dfce3e87041347e36df | refs/heads/master | 2022-02-08T17:01:01.731524 | 2022-02-07T10:29:52 | 2022-02-07T10:29:52 | 228,027,631 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 169 | py | import math
n = int(input())
space = 2 * n - 2
rs = space - n
res = 2 * 4 * 3 * pow(4, rs - 1)
if n > 3:
res += 3 * 4 * 3 * pow(4, rs - 2) * (rs - 1)
print(res)
| [
"npkhanh93@gmail.com"
] | npkhanh93@gmail.com |
fff590cd8a782b6dfd6b9e833ebce514704de1a4 | a0dbc48f31cf3fbddd3cc7672cf3db415cb391c4 | /compiler/drc/__init__.py | 40e3a45cdbd01353b65a987887e550f8c1438711 | [
"BSD-3-Clause"
] | permissive | wangyaobsz/OpenRAM | 4178ef93816b233bac0aaecc580e2cbd235ac39d | 0d616ae072e6c42a0d8a006eebc681408502e956 | refs/heads/master | 2022-08-23T07:51:39.745708 | 2022-07-21T16:37:24 | 2022-07-21T16:37:24 | 113,813,373 | 1 | 0 | null | 2017-12-11T04:47:53 | 2017-12-11T04:47:52 | null | UTF-8 | Python | false | false | 180 | py | from .custom_cell_properties import *
from .custom_layer_properties import *
from .design_rules import *
from .module_type import *
from .drc_lut import *
from .drc_value import *
| [
"mrg@ucsc.edu"
] | mrg@ucsc.edu |
e638a1bce25a680fb36e833c918588d159a522d5 | 32809f6f425bf5665fc19de2bc929bacc3eeb469 | /src/1039-Minimum-Score-Triangulation-of-Polygon/1039.py | f87bf6ee51d479a31c606ff6a795632caf3e38a6 | [] | no_license | luliyucoordinate/Leetcode | 9f6bf01f79aa680e2dff11e73e4d10993467f113 | bcc04d49969654cb44f79218a7ef2fd5c1e5449a | refs/heads/master | 2023-05-25T04:58:45.046772 | 2023-05-24T11:57:20 | 2023-05-24T11:57:20 | 132,753,892 | 1,575 | 569 | null | 2023-05-24T11:57:22 | 2018-05-09T12:30:59 | C++ | UTF-8 | Python | false | false | 350 | py | class Solution:
def minScoreTriangulation(self, A: List[int]) -> int:
n = len(A)
dp = [[0] * n for i in range(n)]
for d in range(2, n):
for i in range(n - d):
j = i + d
dp[i][j] = min(dp[i][k] + dp[k][j] + A[i] * A[j] * A[k] for k in range(i + 1, j))
return dp[0][n - 1] | [
"luliyucoordinate@outlook.com"
] | luliyucoordinate@outlook.com |
443f9c31e65875c862b8d0538956fdb4badc4e8f | 313e29c735deecfe75ae603ff774f32a6574d159 | /home/migrations/0002_load_initial_data.py | eb9af5fcf0569de1f1b3fad3aab461a66bef3afb | [] | no_license | crowdbotics-apps/web-24-dev-3400 | 15513166c4316512671bb25ce7c048a2b8322b86 | 702f7cd25e31447f87d12625dcc86eb8a5bfd865 | refs/heads/master | 2023-05-30T19:28:40.871181 | 2020-04-24T13:46:18 | 2020-04-24T13:46:18 | 258,422,254 | 0 | 0 | null | 2021-06-12T18:05:52 | 2020-04-24T06:09:54 | Python | UTF-8 | Python | false | false | 1,281 | py | from django.db import migrations
def create_customtext(apps, schema_editor):
CustomText = apps.get_model("home", "CustomText")
customtext_title = "web 24"
CustomText.objects.create(title=customtext_title)
def create_homepage(apps, schema_editor):
HomePage = apps.get_model("home", "HomePage")
homepage_body = """
<h1 class="display-4 text-center">web 24</h1>
<p class="lead">
This is the sample application created and deployed from the Crowdbotics app.
You can view list of packages selected for this application below.
</p>"""
HomePage.objects.create(body=homepage_body)
def create_site(apps, schema_editor):
Site = apps.get_model("sites", "Site")
custom_domain = "web-24-dev-3400.botics.co"
site_params = {
"name": "web 24",
}
if custom_domain:
site_params["domain"] = custom_domain
Site.objects.update_or_create(defaults=site_params, id=1)
class Migration(migrations.Migration):
dependencies = [
("home", "0001_initial"),
("sites", "0002_alter_domain_unique"),
]
operations = [
migrations.RunPython(create_customtext),
migrations.RunPython(create_homepage),
migrations.RunPython(create_site),
]
| [
"team@crowdbotics.com"
] | team@crowdbotics.com |
1c8040e3a1f1d9305eaa363c6c7d055c98c090ac | 42ffb5262adaaaba6477444cbc922c7e119ddb32 | /pycatia/tps_interfaces/particular_tol_elem.py | 14cc5542c7ebbd868c3ac0325b3425b85b072c07 | [
"MIT"
] | permissive | joaoantfig/pycatia | 0bdd03c489c87b982d45617f783b04ce167fd56a | 2d087d9861c76dbcdc4b19d99528d14649d1c45f | refs/heads/master | 2023-09-01T00:00:09.727160 | 2021-09-29T16:17:54 | 2021-09-29T16:17:54 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,879 | py | #! usr/bin/python3.6
"""
Module initially auto generated using V5Automation files from CATIA V5 R28 on 2020-09-25 14:34:21.593357
.. warning::
The notes denoted "CAA V5 Visual Basic Help" are to be used as reference only.
They are there as a guide as to how the visual basic / catscript functions work
and thus help debugging in pycatia.
"""
from pycatia.system_interfaces.any_object import AnyObject
class ParticularTolElem(AnyObject):
"""
.. note::
:class: toggle
CAA V5 Visual Basic Help (2020-09-25 14:34:21.593357)
| System.IUnknown
| System.IDispatch
| System.CATBaseUnknown
| System.CATBaseDispatch
| System.AnyObject
| ParticularTolElem
|
| Interface for accessing particular geometry of the toleranced
| element.
"""
def __init__(self, com_object):
super().__init__(com_object)
self.particular_tol_elem = com_object
@property
def particular_geometry(self) -> str:
"""
.. note::
:class: toggle
CAA V5 Visual Basic Help (2020-09-25 14:34:21.593357)
| o Property ParticularGeometry() As CATBSTR (Read Only)
|
| Retrieves particular geometry of the toleranced element.
|
| Parameters:
|
| oPartGeom
| : Not Defined CenterElement Surface Unsupported
:return: str
:rtype: str
"""
return self.particular_tol_elem.ParticularGeometry
def __repr__(self):
return f'ParticularTolElem(name="{ self.name }")'
| [
"evereux@gmail.com"
] | evereux@gmail.com |
26e4da7286661b314fb390dae31d023f75fe2622 | ad6f20ca36dc65e34b43c69db66f383554718fed | /OOPs/templates.py | 70b0ac5f07ef7984309df506657eb224cfe06582 | [] | no_license | atulanandnitt/questionsBank | 3df734c7389959801ab6447c0959c85f1013dfb8 | 477accc02366b5c4507e14d2d54850a56947c91b | refs/heads/master | 2021-06-11T21:39:24.682159 | 2021-05-06T17:54:18 | 2021-05-06T17:54:18 | 175,861,522 | 0 | 1 | null | 2020-05-02T09:26:25 | 2019-03-15T17:10:06 | Python | UTF-8 | Python | false | false | 873 | py | from string import Template
class MyTemplate(Template):
delimiter = '#'
def Main():
cart = []
cart.append(dict(item='Coke', price=8, qty=1))
cart.append(dict(item='Cake', price=12, qty=2))
cart.append(dict(item='Fish', price=22, qty=4))
t = MyTemplate("#price * #qty = #price")
total = 0
print(cart)
for data in cart:
print(t.substitute(data))
total += data["price"]
print("total " + str(total))
def summation(a,b,*args,**kwargs):
result = a+b
for item in args:
result += int(item)
for key1, val1 in kwargs.items():
result += int(val1)
print("p is ", kwargs['p'])
print("kwargs",kwargs, type(kwargs))
print("args", args, type(args))
return result
if __name__ == "__main__":
Main()
print(summation(1,2,3,4,5,p=1,q=2,r=4))
| [
"atul.anand.nitt@gmail.com"
] | atul.anand.nitt@gmail.com |
69bb140daaed69ba98843a48a802fa8cf3e5a5e5 | a34f722efe6b7b4c102464daebf61450d1fcb774 | /devel/test_series/gridded_noiseless_nofg.py | 41d51d04fafaf91d914ecf980e2b6f806ee8af0c | [
"MIT"
] | permissive | BellaNasirudin/py21cmmc_fg | 4eae59fc5c3647f48a0de5f2963473e92409241a | 928822d07760c481c7673c83c1b7bf4421310b31 | refs/heads/master | 2021-10-10T11:57:57.278899 | 2019-07-26T05:05:45 | 2019-07-26T05:05:45 | 131,949,549 | 1 | 3 | MIT | 2019-02-20T05:14:16 | 2018-05-03T06:10:13 | Jupyter Notebook | UTF-8 | Python | false | false | 876 | py | """
The first test in a series of tests to prove that this code works.
Here are the tests:
1. ** Gridded baselines, no thermal noise, no foregrounds
2. Gridded baselines, thermal noise, no foregrounds
3. MWA baselines, thermal noise, no foregrounds
4. Gridded baselines, thermal noise, point-source foregrounds
5. MWA baselines, thermal noise, point-source foregrounds
"""
from base_definitions import CustomCoreInstrument, CustomLikelihood, core_eor, run_mcmc, DEBUG
model_name = "InstrumentalGridTest"
core_instr = CustomCoreInstrument(
antenna_posfile = 'grid_centres', # use a special grid of *baselines*.
Tsys = 0,
)
likelihood = CustomLikelihood(
datafile=[f'data/{model_name}.npz']
)
if __name__== "__main__":
chain = run_mcmc(
[core_eor, core_instr], likelihood,
model_name=model_name, # Filename of main chain output
)
| [
"steven.murray@curtin.edu.au"
] | steven.murray@curtin.edu.au |
571e9c49ac89ab60c4939efda633234705c25958 | c1960138a37d9b87bbc6ebd225ec54e09ede4a33 | /adafruit-circuitpython-bundle-py-20210402/lib/adafruit_mcp230xx/mcp230xx.py | 23b64f8c9d08030a7bc42a7c1083c134cdb63a14 | [] | no_license | apalileo/ACCD_PHCR_SP21 | 76d0e27c4203a2e90270cb2d84a75169f5db5240 | 37923f70f4c5536b18f0353470bedab200c67bad | refs/heads/main | 2023-04-07T00:01:35.922061 | 2021-04-15T18:02:22 | 2021-04-15T18:02:22 | 332,101,844 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,153 | py | # SPDX-FileCopyrightText: 2017 Tony DiCola for Adafruit Industries
# SPDX-FileCopyrightText: 2019 Carter Nelson
#
# SPDX-License-Identifier: MIT
"""
`mcp230xx`
====================================================
CircuitPython module for the MCP23017 and MCP23008 I2C I/O extenders.
* Author(s): Tony DiCola
"""
from adafruit_bus_device import i2c_device
__version__ = "2.4.5"
__repo__ = "https://github.com/adafruit/Adafruit_CircuitPython_MCP230xx.git"
# Global buffer for reading and writing registers with the devices. This is
# shared between both the MCP23008 and MCP23017 class to reduce memory allocations.
# However this is explicitly not thread safe or re-entrant by design!
_BUFFER = bytearray(3)
# pylint: disable=too-few-public-methods
class MCP230XX:
"""Base class for MCP230xx devices."""
def __init__(self, i2c, address):
self._device = i2c_device.I2CDevice(i2c, address)
def _read_u16le(self, register):
# Read an unsigned 16 bit little endian value from the specified 8-bit
# register.
with self._device as i2c:
_BUFFER[0] = register & 0xFF
i2c.write_then_readinto(_BUFFER, _BUFFER, out_end=1, in_start=1, in_end=3)
return (_BUFFER[2] << 8) | _BUFFER[1]
def _write_u16le(self, register, val):
# Write an unsigned 16 bit little endian value to the specified 8-bit
# register.
with self._device as i2c:
_BUFFER[0] = register & 0xFF
_BUFFER[1] = val & 0xFF
_BUFFER[2] = (val >> 8) & 0xFF
i2c.write(_BUFFER, end=3)
def _read_u8(self, register):
# Read an unsigned 8 bit value from the specified 8-bit register.
with self._device as i2c:
_BUFFER[0] = register & 0xFF
i2c.write_then_readinto(_BUFFER, _BUFFER, out_end=1, in_start=1, in_end=2)
return _BUFFER[1]
def _write_u8(self, register, val):
# Write an 8 bit value to the specified 8-bit register.
with self._device as i2c:
_BUFFER[0] = register & 0xFF
_BUFFER[1] = val & 0xFF
i2c.write(_BUFFER, end=2)
| [
"55570902+apalileo@users.noreply.github.com"
] | 55570902+apalileo@users.noreply.github.com |
a4e594df686f039ef10ff36ac5c9d74f148dde7e | 7cd760f1a570155ad001e53dd34cf7b5451bc099 | /mkt/installs/api.py | 7c584778313ea7bd8232dd6f95312c706f5fa19c | [] | no_license | l-hedgehog/zamboni | 6bab963b334a32bfc9b2e986dc657510f3d10602 | ef1f1849f42023bc684866879c854cdb84eef2f6 | refs/heads/master | 2020-12-25T00:28:19.982297 | 2013-08-21T23:34:35 | 2013-08-21T23:34:35 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,770 | py | from django.core.exceptions import PermissionDenied
import commonware.log
from rest_framework.decorators import (api_view, authentication_classes,
parser_classes, permission_classes)
from rest_framework.parsers import FormParser, JSONParser
from rest_framework.permissions import AllowAny
from rest_framework.response import Response
from mkt.api.authentication import (RestOAuthAuthentication,
RestSharedSecretAuthentication)
from mkt.constants.apps import INSTALL_TYPE_USER
from mkt.installs.forms import InstallForm
from mkt.installs.utils import install_type, record
from mkt.webapps.models import Installed
log = commonware.log.getLogger('z.api')
@api_view(['POST'])
@authentication_classes([RestOAuthAuthentication,
RestSharedSecretAuthentication])
@parser_classes([JSONParser, FormParser])
@permission_classes([AllowAny])
def install(request):
request._request.CORS = ['POST']
form = InstallForm(request.DATA, request=request)
if form.is_valid():
app = form.cleaned_data['app']
type_ = install_type(request, app)
# Users can't install public apps. Developers can though.
if not app.is_public() and type_ == INSTALL_TYPE_USER:
log.info('App not public: {0}'.format(app.pk))
raise PermissionDenied
if not request.amo_user:
record(request, app)
else:
installed, created = Installed.objects.get_or_create(
addon=app, user=request.amo_user, install_type=type_)
record(request, app)
if not created:
return Response(status=202)
return Response(status=201)
return Response(status=400)
| [
"amckay@mozilla.com"
] | amckay@mozilla.com |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.