blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 3
288
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
112
| license_type
stringclasses 2
values | repo_name
stringlengths 5
115
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 684
values | visit_date
timestamp[us]date 2015-08-06 10:31:46
2023-09-06 10:44:38
| revision_date
timestamp[us]date 1970-01-01 02:38:32
2037-05-03 13:00:00
| committer_date
timestamp[us]date 1970-01-01 02:38:32
2023-09-06 01:08:06
| github_id
int64 4.92k
681M
⌀ | star_events_count
int64 0
209k
| fork_events_count
int64 0
110k
| gha_license_id
stringclasses 22
values | gha_event_created_at
timestamp[us]date 2012-06-04 01:52:49
2023-09-14 21:59:50
⌀ | gha_created_at
timestamp[us]date 2008-05-22 07:58:19
2023-08-21 12:35:19
⌀ | gha_language
stringclasses 147
values | src_encoding
stringclasses 25
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 128
12.7k
| extension
stringclasses 142
values | content
stringlengths 128
8.19k
| authors
listlengths 1
1
| author_id
stringlengths 1
132
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
dd6bcf1ef05674887d1083e99174ba463f169eb5
|
f3b233e5053e28fa95c549017bd75a30456eb50c
|
/mcl1_input/L28/28-47_MD_NVT_rerun/set_1ns_equi_1_m.py
|
af38002c6274aeca880c96a0e6b883ada530dcf5
|
[] |
no_license
|
AnguseZhang/Input_TI
|
ddf2ed40ff1c0aa24eea3275b83d4d405b50b820
|
50ada0833890be9e261c967d00948f998313cb60
|
refs/heads/master
| 2021-05-25T15:02:38.858785
| 2020-02-18T16:57:04
| 2020-02-18T16:57:04
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 935
|
py
|
import os
dir = '/mnt/scratch/songlin3/run/mcl1/L28/MD_NVT_rerun/ti_one-step/28_47/'
filesdir = dir + 'files/'
temp_equiin = filesdir + 'temp_equi_1_m.in'
temp_pbs = filesdir + 'temp_1ns_equi_1_m.pbs'
lambd = [ 0.00922, 0.04794, 0.11505, 0.20634, 0.31608, 0.43738, 0.56262, 0.68392, 0.79366, 0.88495, 0.95206, 0.99078]
for j in lambd:
os.system("rm -r %6.5f" %(j))
os.system("mkdir %6.5f" %(j))
os.chdir("%6.5f" %(j))
os.system("rm *")
workdir = dir + "%6.5f" %(j) + '/'
#equiin
eqin = workdir + "%6.5f_equi_1_m.in" %(j)
os.system("cp %s %s" %(temp_equiin, eqin))
os.system("sed -i 's/XXX/%6.5f/g' %s" %(j, eqin))
#PBS
pbs = workdir + "%6.5f_1ns_equi_1_m.pbs" %(j)
os.system("cp %s %s" %(temp_pbs, pbs))
os.system("sed -i 's/XXX/%6.5f/g' %s" %(j, pbs))
#top
os.system("cp ../28-47_merged.prmtop .")
os.system("cp ../0.5_equi_0_3.rst .")
#submit pbs
os.system("qsub %s" %(pbs))
os.chdir(dir)
|
[
"songlin3@msu.edu"
] |
songlin3@msu.edu
|
77fc26163f61919df4efdfcbb251803d49603c9d
|
ec8fef96af2a6b6610d298637f05bcdfe67cba2b
|
/experiments/cremi/utils/align_test_samples_part_3.py
|
f1a04694e55134c663b6542a64a388ae5f5fa0df
|
[] |
no_license
|
abailoni/longRangeAgglo
|
8b98aca75b17d177cb5e408460f95ff20f411aeb
|
260b452e106125722ae3824755584ce7bfd5b81c
|
refs/heads/master
| 2021-06-25T14:14:57.150233
| 2020-11-06T11:14:52
| 2020-11-06T11:14:52
| 150,707,062
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,272
|
py
|
import long_range_compare # Add missing package-paths
from long_range_compare.data_paths import get_trendytukan_drive_path, get_hci_home_path
"""
This is a modified version of part 2 to downscale the whole aligned data without cropping it at all
"""
downscale = True
include_affs = False
from scipy.ndimage import zoom
import vigra
import numpy as np
import os
import h5py
import sys
sys.path += [
os.path.join(get_hci_home_path(), "python_libraries/cremi_tools"),]
def get_gt_bounding_box(gt):
# no-label ids are <0, i.e. the highest numbers in uint64
fg_indices = np.where(gt == 1)
return tuple(
slice(np.min(fg_indices[d]),np.max(fg_indices[d])+1)
for d in range(3)
)
POSTFIX = "_no_crop"
# Weird defects to be blacked out in the uncropped version:
blacked_out = {"A+": [11, 25, 37, 70],
"B+": [18],
"C+": [123]}
# original_pad = ((37, 38), (911, 911), (911, 911))
# FOUND CROP SLICES:
# A+ (slice(36, 163, None), slice(1154, 2753, None), slice(934, 2335, None))
# B+ (slice(36, 163, None), slice(1061, 2802, None), slice(1254, 4009, None))
# C+ (slice(36, 163, None), slice(980, 2443, None), slice(1138, 2569, None))
for sample in ["B+", "C+"]:
# Load GT mask:
print("Loading")
mask_inner_path = "volumes/labels/mask"
source_path_big_pad = os.path.join(get_trendytukan_drive_path(),
"datasets/CREMI/official_test_samples/full_aligned_samples/sample_{}_aligned_plus_big_pad.hdf".format(sample))
source_path = os.path.join(get_trendytukan_drive_path(),
"datasets/CREMI/official_test_samples/full_aligned_samples/sample_{}_aligned.hdf".format(sample))
from segmfriends.utils.various import readHDF5, writeHDF5
print("Reading...")
mask_big_pad = readHDF5(source_path_big_pad, mask_inner_path)
print("Max big pad: ", mask_big_pad.max())
mask_border = mask_big_pad > 10
mask_big_pad = np.logical_not(mask_border).astype('uint16')
# print(mask_GT.shape)
print("Find crop")
# crop_slice = get_gt_bounding_box(mask_big_pad)
# Write crop_slice to file:
import csv
csv_file_path = os.path.join(get_hci_home_path(),
"datasets/CREMI/official_test_samples/cropped_aligned_samples/sample{}_cropped{}.csv".format(sample, POSTFIX))
with open(csv_file_path, mode='w') as f:
employee_writer = csv.writer(f, delimiter=';', quotechar='"', quoting=csv.QUOTE_MINIMAL)
for i in range(3):
employee_writer.writerow([0, str(mask_big_pad.shape[i])])
# print(crop_slice)
# Write affs and mask in target file:
print("Saving...")
target_path_old = os.path.join(get_hci_home_path(),
"datasets/CREMI/official_test_samples/cropped_aligned_samples/sample{}_cropped{}.h5".format(
sample, POSTFIX))
target_path = os.path.join(get_hci_home_path(),
"datasets/CREMI/official_test_samples/cropped_aligned_samples/sample{}_cropped{}.h5".format(sample, POSTFIX))
# if include_affs:
# affs_path = os.path.join(get_trendytukan_drive_path(), "datasets/CREMI/constantin_affs/test_samples/sample{}.h5".format(sample))
# affs_inner_path = "affinities"
# affs = readHDF5(affs_path, affs_inner_path, crop_slice=(slice(None), ) + crop_slice)
# writeHDF5(affs, target_path, "volumes/affinities")
raw = readHDF5(source_path, "volumes/raw")
# raw = readHDF5(target_path_old, "volumes/raw_2x")
if sample in blacked_out:
for blk in blacked_out[sample]:
print("blacking out ", blk)
raw[blk] = 0
# mask_gt = readHDF5(source_path, mask_inner_path, dtype="uint16", crop_slice=crop_slice)
# writeHDF5(raw, target_path, "volumes/raw")
# writeHDF5(mask_big_pad.astype('uint16'), target_path, "volumes/labels/mask_gt")
# writeHDF5(mask_gt, target_path, "volumes/labels/mask_gt")
if downscale:
writeHDF5(zoom(mask_big_pad, (1, 0.5, 0.5), order=0), target_path, "volumes/labels/mask_raw_2x")
writeHDF5(zoom(raw, (1, 0.5, 0.5), order=3), target_path, "volumes/raw_2x")
# writeHDF5(raw, target_path, "volumes/raw_2x")
|
[
"bailoni.alberto@gmail.com"
] |
bailoni.alberto@gmail.com
|
73b494f266e34eb2deabf1b569f57c9d6a30555d
|
0db6e82011087bc31b2edfd3ac2d5757c08116b8
|
/my_library/models/res_config_settings.py
|
71cc7770897e3ef56c78265398b82ee46e3e046c
|
[] |
no_license
|
NumanIbnMazid/odoo-my-library
|
60617551f3e968a4cf42670785347284901aa4b0
|
f000fea813a5f246e58617e09a5420739569169c
|
refs/heads/master
| 2023-07-29T05:33:41.697740
| 2021-09-13T11:27:58
| 2021-09-13T11:27:58
| 405,944,906
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 266
|
py
|
# -*- coding: utf-8 -*-
from odoo import api, fields, models
class ResConfigSettings(models.TransientModel):
_inherit = 'res.config.settings'
group_self_borrow = fields.Boolean(
string="Self borrow", implied_group='my_library.group_self_borrow')
|
[
"numanibnmazid@gmail.com"
] |
numanibnmazid@gmail.com
|
2eeb9ae2d804446a3ffa2341c35a49419b89a47d
|
0ad8fc76aebe7ce22abe771fbeadf227e5b471cb
|
/app/productdb/tests/test_celery_task_creation.py
|
86ea5651b0e4aaaf77df4f63d6dd2a1ae313ba00
|
[
"MIT"
] |
permissive
|
ppavlu/product-database
|
354c6a1a3e9ebfdc931f2aacf8751ed0f149401c
|
09610c09600c63eb91106c0b5a2fa995b134dbf4
|
refs/heads/master
| 2021-01-17T22:51:43.247027
| 2015-10-11T11:37:12
| 2015-10-11T11:37:12
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,204
|
py
|
from django.core.urlresolvers import reverse
from django.test import TestCase
from app.productdb.models import Settings
class TestCeleryTaskCreation(TestCase):
"""
This test verifies that a celery task is created in celery when calling certain URLs with a specific parameter
"""
fixtures = ['default_vendors.yaml', 'default_users.yaml']
def test_trigger_manual_cisco_eox_synchronization(self):
"""
Test if the manual Cisco EoX synchronization can be scheduled manually
:return:
"""
print("--> remember to start a redis server when executing this test")
s, created = Settings.objects.get_or_create(id=0)
s.cisco_api_enabled = True
s.cisco_eox_api_auto_sync_enabled = True
s.save()
# schedule Cisco EoX API update
url = reverse('productdb:schedule_cisco_eox_api_sync_now')
self.client.login(username="admin", password="admin")
resp = self.client.get(url)
self.assertEqual(resp.status_code, 302)
# verify that task ID is saved in settings (set by the schedule call)
s = Settings.objects.get(id=0)
self.assertNotEqual(s.eox_api_sync_task_id, "")
|
[
"henry@codingnetworker.com"
] |
henry@codingnetworker.com
|
364125728030ee90a6148fc48a1cf9ab5ed80027
|
03898aa9b248360c16164adb50a40da418cdcb45
|
/src/settings/common.py
|
545ccdd9db091ede491583504513fa6161c3b57b
|
[] |
no_license
|
asamolion/jobi
|
d0f9184de3db6fdee22934270e36c3c469f75ccb
|
d0ad0165c9d55b430d545d7c68d10cd7757e3766
|
refs/heads/master
| 2021-05-15T05:47:23.194551
| 2017-06-13T20:15:10
| 2017-06-13T20:15:10
| 115,773,416
| 0
| 0
| null | 2017-12-30T04:25:17
| 2017-12-30T04:25:17
| null |
UTF-8
|
Python
| false
| false
| 4,115
|
py
|
"""
Django settings for src project.
Generated by 'django-admin startproject' using Django 1.10.6.
For more information on this file, see
https://docs.djangoproject.com/en/1.10/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.10/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.10/howto/deployment/checklist/
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'django.contrib.sites',
'rest_framework',
'tinymce',
'django_extensions',
'sorl.thumbnail',
'newsletter',
'django_celery_beat',
'django_celery_results',
'user_custom',
'admin_custom',
'data',
'essentials',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'src.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [os.path.join(BASE_DIR, 'templates')],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
'django.template.context_processors.media',
],
},
},
]
WSGI_APPLICATION = 'src.wsgi.application'
# Password validation
# https://docs.djangoproject.com/en/1.10/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/1.10/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.10/howto/static-files/
STATIC_URL = '/static/'
STATIC_ROOT = os.path.join(os.path.dirname(BASE_DIR), "static_dev", "static_root")
STATICFILES_DIRS = (
os.path.join(BASE_DIR, "static"),
)
MEDIA_URL = '/media/'
MEDIA_ROOT = os.path.join(os.path.dirname(BASE_DIR), "static_dev", "media_root")
SITE_ID = 1
TINYMCE_JS_URL = os.path.join(STATIC_URL, "/tiny_mce/tiny_mce.js")
TINYMCE_JS_ROOT = os.path.join(STATIC_URL, 'tiny_mce')
TINYMCE_DEFAULT_CONFIG = {
'plugins': "table,spellchecker,paste,searchreplace",
'theme': "advanced",
'cleanup_on_startup': True,
'custom_undo_redo_levels': 10,
}
TINYMCE_SPELLCHECKER = True
TINYMCE_COMPRESSOR = True
# FIXME : Complete installation of django newsletter
# django-newsletter
# https://django-newsletter.readthedocs.io/en/latest/index.html
# Using django-tinymce as editor
NEWSLETTER_RICHTEXT_WIDGET = "tinymce.widgets.TinyMCE"
NEWSLETTER_CONFIRM_EMAIL = True
# Used by Celery and RabbitMq
# Set interval for the Master to check for scrapper status [seconds]
RMQ_REFRESH_RATE = 3600.00
|
[
"rushil0195@gmail.com"
] |
rushil0195@gmail.com
|
431d32716f21e9eef507696baec83c2625141591
|
251d6d11e807fa47fd1bad1f070b727500b17fd5
|
/shares/migrations/0009_shareitemdividend_percent.py
|
102eb77b42c8eeb128d242542ab0ea2694dc1656
|
[] |
no_license
|
khokhlov/dinv
|
a0964403a930f479fb744d90c4dbad887ba9810c
|
7943b533808c913ec3564aa28ada485f857609ee
|
refs/heads/master
| 2020-05-26T13:06:42.975971
| 2017-03-10T12:02:30
| 2017-03-10T12:02:30
| 82,479,422
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 743
|
py
|
# -*- coding: utf-8 -*-
# Generated by Django 1.10.5 on 2017-02-18 18:23
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('shares', '0008_auto_20170218_2106'),
]
operations = [
migrations.AddField(
model_name='shareitemdividend',
name='percent',
field=models.DecimalField(blank=True, decimal_places=15, help_text='\u0414\u043e\u0445\u043e\u0434\u043d\u043e\u0441\u0442\u044c \u043d\u0430 \u0434\u0430\u0442\u0443 \u0432\u044b\u043f\u043b\u0430\u0442\u044b, %', max_digits=35, null=True, verbose_name='\u0414\u043e\u0445\u043e\u0434\u043d\u043e\u0441\u0442\u044c, %'),
),
]
|
[
"kolya.khokhlov@gmail.com"
] |
kolya.khokhlov@gmail.com
|
566f7fe7adac4f3e60a1df3d58f77ea9f53eda7d
|
f22d31484a12d001826c1775a6f2d245a720fce8
|
/Introdução à Programação com Python/Do autor/Códigi fonte e listagem/listagem/capitulo 05/05.10 - Contagem de questões corretas.py
|
94220dbdaf11aa24f9b075ebf42e8fc3747daf6f
|
[] |
no_license
|
eduardoprograma/linguagem_Python
|
9eb55f0a5a432a986e047b091eb7ed7152b7da67
|
942aba9146800fc33bbea98778467f837396cb93
|
refs/heads/master
| 2021-07-07T20:48:37.673101
| 2020-07-31T21:24:17
| 2020-07-31T21:24:17
| 159,852,510
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,238
|
py
|
##############################################################################
# Parte do livro Introdução à Programação com Python
# Autor: Nilo Ney Coutinho Menezes
# Editora Novatec (c) 2010-2017
# Primeira edição - Novembro/2010 - ISBN 978-85-7522-250-8
# Primeira reimpressão - Outubro/2011
# Segunda reimpressão - Novembro/2012
# Terceira reimpressão - Agosto/2013
# Segunda edição - Junho/2014 - ISBN 978-85-7522-408-3
# Primeira reimpressão - Segunda edição - Maio/2015
# Segunda reimpressão - Segunda edição - Janeiro/2016
# Terceira reimpressão - Segunda edição - Junho/2016
# Quarta reimpressão - Segunda edição - Março/2017
#
# Site: http://python.nilo.pro.br/
#
# Arquivo: listagem\capitulo 05\05.10 - Contagem de questões corretas.py
##############################################################################
pontos = 0
questão = 1
while questão <= 3:
resposta = input("Resposta da questão %d: " % questão)
if questão == 1 and resposta == "b":
pontos = pontos + 1
if questão == 2 and resposta == "a":
pontos = pontos + 1
if questão == 3 and resposta == "d":
pontos = pontos + 1
questão += 1
print("O aluno fez %d ponto(s)" % pontos)
|
[
"eduardo.candido@fatec.sp.gov.br"
] |
eduardo.candido@fatec.sp.gov.br
|
461eeec321d529c29878dcb64812490da5702fda
|
7938413839bb664b97769c2d7a72664b7ab80a64
|
/tests/test_forms.py
|
bef9c1a8488fedd534e226e87159d4a5497f46ce
|
[
"BSD-3-Clause"
] |
permissive
|
yurkobb/django-contact-form
|
a9268bdf357f7746ca38f34adf2d9a05deed5c8b
|
3a160183458f4a782fc8d23f88807689d1335d10
|
refs/heads/master
| 2020-09-24T09:33:24.313968
| 2019-10-04T09:24:10
| 2019-10-04T09:24:10
| 225,729,641
| 0
| 0
|
BSD-3-Clause
| 2019-12-03T22:27:53
| 2019-12-03T22:27:52
| null |
UTF-8
|
Python
| false
| false
| 6,323
|
py
|
import os
import unittest
from django.conf import settings
from django.core import mail
from django.test import RequestFactory, TestCase
from django.utils.six import text_type
import mock
from contact_form.forms import AkismetContactForm, ContactForm
class ContactFormTests(TestCase):
"""
Tests the base ContactForm.
"""
valid_data = {'name': 'Test',
'email': 'test@example.com',
'body': 'Test message'}
def request(self):
return RequestFactory().request()
def test_request_required(self):
"""
Can't instantiate without an HttpRequest.
"""
self.assertRaises(TypeError, ContactForm)
def test_valid_data_required(self):
"""
Can't try to build the message dict unless data is valid.
"""
data = {'name': 'Test',
'body': 'Test message'}
form = ContactForm(request=self.request(), data=data)
self.assertRaises(ValueError, form.get_message_dict)
self.assertRaises(ValueError, form.get_context)
def test_send(self):
"""
Valid form can and does in fact send email.
"""
form = ContactForm(request=self.request(),
data=self.valid_data)
self.assertTrue(form.is_valid())
form.save()
self.assertEqual(1, len(mail.outbox))
message = mail.outbox[0]
self.assertTrue(self.valid_data['body'] in message.body)
self.assertEqual(settings.DEFAULT_FROM_EMAIL,
message.from_email)
self.assertEqual(form.recipient_list,
message.recipients())
def test_no_sites(self):
"""
Sites integration works with or without installed
contrib.sites.
"""
with self.modify_settings(
INSTALLED_APPS={
'remove': ['django.contrib.sites'],
}):
form = ContactForm(request=self.request(),
data=self.valid_data)
self.assertTrue(form.is_valid())
form.save()
self.assertEqual(1, len(mail.outbox))
def test_recipient_list(self):
"""
Passing recipient_list when instantiating ContactForm properly
overrides the list of recipients.
"""
recipient_list = ['recipient_list@example.com']
form = ContactForm(request=self.request(),
data=self.valid_data,
recipient_list=recipient_list)
self.assertTrue(form.is_valid())
form.save()
self.assertEqual(1, len(mail.outbox))
message = mail.outbox[0]
self.assertEqual(recipient_list,
message.recipients())
def test_callable_template_name(self):
"""
When a template_name() method is defined, it is used and
preferred over a 'template_name' attribute.
"""
class CallableTemplateName(ContactForm):
def template_name(self):
return 'contact_form/test_callable_template_name.html'
form = CallableTemplateName(request=self.request(),
data=self.valid_data)
self.assertTrue(form.is_valid())
form.save()
self.assertEqual(1, len(mail.outbox))
message = mail.outbox[0]
self.assertTrue('Callable template_name used.' in
message.body)
def test_callable_message_parts(self):
"""
Message parts implemented as methods are called and preferred
over attributes.
"""
overridden_data = {
'from_email': 'override@example.com',
'message': 'Overridden message.',
'recipient_list': ['override_recpt@example.com'],
'subject': 'Overridden subject',
}
class CallableMessageParts(ContactForm):
def from_email(self):
return overridden_data['from_email']
def message(self):
return overridden_data['message']
def recipient_list(self):
return overridden_data['recipient_list']
def subject(self):
return overridden_data['subject']
form = CallableMessageParts(request=self.request(),
data=self.valid_data)
self.assertTrue(form.is_valid())
self.assertEqual(overridden_data,
form.get_message_dict())
@unittest.skipUnless(
getattr(
settings,
'AKISMET_API_KEY',
os.getenv('PYTHON_AKISMET_API_KEY')
) is not None,
"AkismetContactForm requires Akismet configuration"
)
class AkismetContactFormTests(TestCase):
"""
Tests the Akismet contact form.
"""
def request(self):
return RequestFactory().request()
def test_akismet_form_spam(self):
"""
The Akismet contact form correctly rejects spam.
"""
data = {'name': 'viagra-test-123',
'email': 'email@example.com',
'body': 'This is spam.'}
with mock.patch('akismet.Akismet', autospec=True) as akismet_mock:
instance = akismet_mock.return_value
instance.verify_key.return_value = True
instance.comment_check.return_value = True
form = AkismetContactForm(
request=self.request(),
data=data
)
self.assertFalse(form.is_valid())
self.assertTrue(
text_type(form.SPAM_MESSAGE) in
form.errors['body']
)
def test_akismet_form_ham(self):
"""
The Akismet contact form correctly accepts non-spam.
"""
data = {'name': 'Test',
'email': 'email@example.com',
'body': 'Test message.'}
with mock.patch('akismet.Akismet', autospec=True) as akismet_mock:
instance = akismet_mock.return_value
instance.verify_key.return_value = True
instance.comment_check.return_value = False
form = AkismetContactForm(
request=self.request(),
data=data
)
self.assertTrue(form.is_valid())
|
[
"james@b-list.org"
] |
james@b-list.org
|
1d39e93c056e4c8546343a22db34e45dab76f66d
|
56dff287f055067b3c08dcbbad0c5df09377cab5
|
/myshop/urls.py
|
75a244fd38b3cf95124769c5247992ace63fdd8e
|
[] |
no_license
|
antonmazun/kursach_project
|
971b37435a5fe231afe24a57a497bba6f417b0d0
|
04ee570b1975791e1a1c22ac76bbdc55f5accbd8
|
refs/heads/master
| 2021-01-13T14:40:30.541635
| 2016-12-17T07:16:45
| 2016-12-17T07:16:45
| 76,683,535
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,615
|
py
|
"""myshop URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/1.10/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.conf.urls import url, include
2. Add a URL to urlpatterns: url(r'^blog/', include('blog.urls'))
"""
from django.conf.urls import include, url
from django.contrib import admin
from django.contrib.staticfiles.urls import static
from django.contrib.staticfiles.urls import staticfiles_urlpatterns
from django.conf.urls.static import static
from . import settings
urlpatterns = [
# Examples:
# url(r'^$', 'mysite.views.home', name='home'),
# url(r'^blog/', include('blog.urls')),
url(r'^admin/', include(admin.site.urls)),
url(r'^basicview/', include('blog.urls')),
url(r'^auth/', include('loginsys.urls') ),
url(r'^api/' , include('blog.api_urls')),
#url(r'^api-auth/', include('rest_framework.urls', namespace='rest_framework')),
url(r'^', include('blog.urls')),
# url(r'^ckeditor/', include('ckeditor_uploader.urls')),
# url(r'', include('user.urls')),
] + static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)
urlpatterns += staticfiles_urlpatterns()
urlpatterns += static(settings.MEDIA_URL , document_root = settings.MEDIA_ROOT)
|
[
"kpi.study1@gmail.com"
] |
kpi.study1@gmail.com
|
ef0abb1939ca922caa44de6b6f2b04e213bf49d4
|
0cf7dd2c3c0b28b52f1273e8fe2ea0a87cacc6af
|
/futval_graph_2.py
|
59a74b1035887410d2c7ae4724dc960ecaacca5f
|
[] |
no_license
|
EngrDevDom/Everyday-Coding-in-Python
|
61b0e4fcbc6c7f399587deab2fa55763c9d519b5
|
93329ad485a25e7c6afa81d7229147044344736c
|
refs/heads/master
| 2023-02-25T05:04:50.051111
| 2021-01-30T02:43:40
| 2021-01-30T02:43:40
| 274,971,215
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,216
|
py
|
# File : futval_graph_2.py
# Desc : Future Value Graph Version 2.0
from graphics import *
def main():
# Introduction
print("This program plots the growth of a 10-year investment.")
# Get principal and interest rate
principal = float(input("Enter the initial principal: "))
apr = float(input("Enter the annualized interest rate: "))
# Create a graphics window with labels on left edge
win = GraphWin("Investment Growth Chart", 320, 240)
win.setBackground("white")
win.setCoords(-1.75, -200, 11.5, 10400)
Text(Point(-1, 0), ' 0.0K').draw(win)
Text(Point(-1, 2500), ' 2.5K').draw(win)
Text(Point(-1, 5000), ' 5.0K').draw(win)
Text(Point(-1, 7500), ' 7.5K').draw(win)
Text(Point(-1, 10000), ' 10.0K').draw(win)
# Draw bar for initial principal
bar = Rectangle(Point(0, 0), Point(1, principal))
bar.setFill("green")
bar.setWidth(2)
bar.draw(win)
# Draw a bar for each subsequent year
for year in range(1, 11):
principal = principal * (1 + apr)
bar = Rectangle(Point(year, 0), Point(year+1, principal))
bar.setWidth(2)
bar.draw(win)
input("Press <Enter> to quit.")
win.close()
main()
|
[
"60880034+EngrDevDom@users.noreply.github.com"
] |
60880034+EngrDevDom@users.noreply.github.com
|
ca14417a5fd678ba5eb1b37a8aa1ddfa86d0688c
|
82149a84b47fb37238452a658d5f3a8f23342658
|
/pushbase/message_box_component.py
|
563dab44d1e184f89d5ad92aec6a767a6d607eb8
|
[] |
no_license
|
maratbakirov/AbletonLive11_MIDIRemoteScripts
|
408c90522d8f54b878e595b0d8af28ad5008a4a1
|
2b25ba9351764c49f7fd1f99875e28e67b002f30
|
refs/heads/master
| 2023-04-12T04:44:16.715220
| 2021-05-09T11:48:24
| 2021-05-09T11:48:24
| 365,708,395
| 3
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 6,551
|
py
|
#Embedded file name: /Users/versonator/Jenkins/live/output/Live/mac_64_static/Release/python-bundle/MIDI Remote Scripts/pushbase/message_box_component.py
from __future__ import absolute_import, print_function, unicode_literals
from builtins import map
from builtins import object
import re
from future.moves.itertools import zip_longest
from ableton.v2.base import forward_property, const, nop, listens, listenable_property
from ableton.v2.base.dependency import dependency
from ableton.v2.control_surface import Component
from ableton.v2.control_surface.elements import DisplayDataSource
from ableton.v2.control_surface.components import BackgroundComponent
from .consts import DISPLAY_LENGTH, MessageBoxText
FORMAT_SPECIFIER_WITH_MARKUP_PATTERN = re.compile(u'[%](len=([0-9]+),)?([^%]*?[diouxXeEfFgGcrs])')
def strip_restriction_markup_and_format(text_or_text_spec):
if isinstance(text_or_text_spec, tuple):
format_string = text_or_text_spec[0]
stripped_format_string = re.sub(FORMAT_SPECIFIER_WITH_MARKUP_PATTERN, u'%\\g<3>', format_string)
arguments = text_or_text_spec[1:]
return stripped_format_string % arguments
else:
return text_or_text_spec
class Notification(object):
def __init__(self, parent, *a, **k):
super(Notification, self).__init__(*a, **k)
self.hide = parent.hide_notification
class Messenger(object):
u"""
Externally provided interface for those components that provide
global Push feedback.
"""
expect_dialog = dependency(expect_dialog=const(nop))
show_notification = dependency(show_notification=const(nop))
class MessageBoxComponent(BackgroundComponent):
u"""
Component showing a temporary message in the display
"""
__events__ = (u'cancel',)
num_lines = 4
def __init__(self, *a, **k):
super(MessageBoxComponent, self).__init__(*a, **k)
self._current_text = None
self._can_cancel = False
self.data_sources = list(map(DisplayDataSource, (u'',) * self.num_lines))
self._notification_display = None
def _set_display_line(self, n, display_line):
if display_line:
display_line.set_data_sources((self.data_sources[n],))
def set_display_line1(self, display_line):
self._set_display_line(0, display_line)
def set_display_line2(self, display_line):
self._set_display_line(1, display_line)
def set_display_line3(self, display_line):
self._set_display_line(2, display_line)
def set_display_line4(self, display_line):
self._set_display_line(3, display_line)
def set_cancel_button(self, button):
self._on_cancel_button_value.subject = button
self._update_cancel_button()
def _update_cancel_button(self):
if self.is_enabled():
button = self._on_cancel_button_value.subject
if button is not None:
button.reset()
if self._can_cancel and button:
button.set_light(u'MessageBox.Cancel')
def _update_display(self):
if self._current_text != None:
lines = self._current_text.split(u'\n')
for source_line, line in zip_longest(self.data_sources, lines):
if source_line:
source_line.set_display_string(line or u'')
if self._can_cancel:
self.data_sources[-1].set_display_string(u'[ Ok ]'.rjust(DISPLAY_LENGTH - 1))
@listens(u'value')
def _on_cancel_button_value(self, value):
if self.is_enabled() and self._can_cancel and value:
self.notify_cancel()
@listenable_property
def text(self):
return self._current_text
@text.setter
def text(self, text):
if self._current_text != text:
self._current_text = text
self._update_display()
self.notify_text()
@listenable_property
def can_cancel(self):
return self._can_cancel
@can_cancel.setter
def can_cancel(self, can_cancel):
if self._can_cancel != can_cancel:
self._can_cancel = can_cancel
self._update_cancel_button()
self._update_display()
self.notify_can_cancel()
def update(self):
super(MessageBoxComponent, self).update()
self._update_cancel_button()
self._update_display()
class DialogComponent(Component):
u"""
Handles representing modal dialogs from the application. The
script can also request dialogs.
"""
def __init__(self, *a, **k):
super(DialogComponent, self).__init__(*a, **k)
self._message_box = MessageBoxComponent(parent=self, is_enabled=False)
self._next_message = None
self._on_open_dialog_count.subject = self.application
self._on_message_cancel.subject = self._message_box
message_box_layer = forward_property(u'_message_box')(u'layer')
def expect_dialog(self, message):
u"""
Expects a dialog from Live to appear soon. The dialog will be
shown on the controller with the given message regardless of
wether a dialog actually appears. This dialog can be
cancelled.
"""
self._next_message = message
self._update_dialog()
@listens(u'open_dialog_count')
def _on_open_dialog_count(self):
self._update_dialog(open_dialog_changed=True)
self._next_message = None
@listens(u'cancel')
def _on_message_cancel(self):
self._next_message = None
try:
self.application.press_current_dialog_button(0)
except RuntimeError:
pass
self._update_dialog()
def _update_dialog(self, open_dialog_changed = False):
message = self._next_message or MessageBoxText.LIVE_DIALOG
can_cancel = self._next_message != None
self._message_box.text = message
self._message_box.can_cancel = can_cancel
self._message_box.set_enabled(self.application.open_dialog_count > 0 or not open_dialog_changed and self._next_message)
class InfoComponent(BackgroundComponent):
u"""
Component that will show an info text and grab all components that should be unusable.
"""
def __init__(self, info_text = u'', *a, **k):
super(InfoComponent, self).__init__(*a, **k)
self._data_source = DisplayDataSource()
self._data_source.set_display_string(info_text)
def set_display(self, display):
if display:
display.set_data_sources([self._data_source])
|
[
"mbakirov@hotmail.com"
] |
mbakirov@hotmail.com
|
c8c7fc85d28ffc62c00ad9995fef8973968eb8be
|
b63142e8540cb30bb0c663332e29a4112721073e
|
/901_closest_binary_search_tree_value_II.py
|
7c734dae6192625113de43b849bed05bdabc682b
|
[] |
no_license
|
HaydenInEdinburgh/LintCode
|
025bb2f0d75686097061de324c0fd292536dbb14
|
dbeae2bf631e57667d1415164d452d5ca2df7447
|
refs/heads/master
| 2023-08-18T19:52:54.561623
| 2021-10-06T21:46:50
| 2021-10-06T21:46:50
| 370,733,548
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,092
|
py
|
"""
Definition of TreeNode:
class TreeNode:
def __init__(self, val):
self.val = val
self.left, self.right = None, None
"""
class Solution:
"""
@param root: the given BST
@param target: the given target
@param k: the given k
@return: k values in the BST that are closest to the target
"""
def closestKValues(self, root, target, k):
# write your code here
if not root:
return
lower_stack, upper_stack = [], []
res = []
# upper_stack => upper bound of the target [desc]
# lower_stack => lower bound of the target [asc]
cur = root
while cur:
upper_stack.append(cur)
cur = cur.left
cur = root
while cur:
lower_stack.append(cur)
cur = cur.right
while len(upper_stack) >0 and upper_stack[-1].val < target:
self.move_upper(upper_stack)
while len(lower_stack) >0 and lower_stack[-1].val >= target:
self.move_lower(lower_stack)
for i in range(k):
if not lower_stack:
res.append(upper_stack[-1].val)
self.move_upper(upper_stack)
elif not upper_stack:
res.append(lower_stack[-1].val)
self.move_lower(lower_stack)
else:
upper, lower = upper_stack[-1].val, lower_stack[-1].val
if abs(upper - target) < abs(lower - target):
res.append(upper)
self.move_upper(upper_stack)
else:
res.append(lower)
self.move_lower(lower_stack)
return res
def move_upper(self, stack):
cur = stack.pop()
if cur.right:
cur = cur.right
while cur:
stack.append(cur)
cur = cur.left
def move_lower(self, stack):
cur = stack.pop()
if cur.left:
cur = cur.left
while cur:
stack.append(cur)
cur = cur.right
|
[
"bony960323@gmail.com"
] |
bony960323@gmail.com
|
662c08229760c0a9763d7d467b8797d9e6268021
|
f68ec37ae975d3aaff2ab3d6a0bae11a2cc432fa
|
/iot_message/tests/test_plain_cryptor.py
|
7909cbc06e25c672eddf4706f64f472276ea30a7
|
[
"MIT"
] |
permissive
|
bkosciow/python_iot-1
|
3f78a0b2ec18949e579a75132a838a7793d6bbe8
|
f3cd2bdbb75cb9afe13fecb603b5b8c026d23500
|
refs/heads/master
| 2021-04-29T01:00:10.595553
| 2019-10-27T19:09:46
| 2019-10-27T19:09:46
| 77,786,513
| 0
| 0
|
MIT
| 2019-10-27T19:09:47
| 2017-01-01T16:24:51
|
Python
|
UTF-8
|
Python
| false
| false
| 1,394
|
py
|
#!/usr/bin/python3
# -*- coding: utf-8 -*-
#pylint: skip-file
from nose.tools import assert_equal
from iot_message.cryptor.plain import Cryptor
from iot_message.message import Message
__author__ = 'Bartosz Kościów'
import iot_message.factory as factory
class TestCryptorPlain(object):
def setUp(self):
Message.chip_id = 'pc'
Message.node_name = 'Turkusik'
Message.drop_unencrypted = False
Message.encoders = []
Message.decoders = {}
def test_encode_message(self):
Message.add_encoder(Cryptor())
msg = factory.MessageFactory.create()
inp = {"event": "channel.on", "parameters": {"channel": 0}, "response": "", "targets": ["node-north"]}
msg.set(inp)
msg.encrypt()
assert_equal(inp["event"], msg.data["event"])
assert_equal(inp["parameters"], msg.data["parameters"])
assert_equal(inp["targets"], msg.data["targets"])
def test_decrypt_message(self):
Message.add_decoder(Cryptor())
inp = """{"protocol": "iot:1", "node": "Turkusik", "chip_id": "pc", "event": "message.plain", "parameters": ["a"], "response": "", "targets": ["Turkusik"]}"""
msg = factory.MessageFactory.create(inp)
assert_equal(msg.data["event"], "message.plain")
assert_equal(msg.data["parameters"], ["a"])
assert_equal(msg.data["targets"], ['Turkusik'])
|
[
"kosci1@gmail.com"
] |
kosci1@gmail.com
|
d7a51028837d657019f9ce5cb1f457861af7fb4f
|
e9757274ddb8484e27590ff0cc3f24550776c6cc
|
/Solved/0090/0090.py
|
2e9cf4d9d2394d5db0791134951f59d920d1fab6
|
[] |
no_license
|
Jinmin-Goh/LeetCode
|
948a9b3e77eb03507aad6f3c78640aa7f00e6ad5
|
d6e80b968032b08506c5b185f66d35c6ff1f8bb9
|
refs/heads/master
| 2020-09-22T10:22:18.443352
| 2020-09-06T06:34:12
| 2020-09-06T06:34:12
| 225,153,497
| 1
| 1
| null | 2020-01-29T15:16:53
| 2019-12-01T11:55:25
|
Python
|
UTF-8
|
Python
| false
| false
| 642
|
py
|
# Problem No.: 90
# Solver: Jinmin Goh
# Date: 20200105
# URL: https://leetcode.com/problems/subsets-ii/
import sys
class Solution:
def subsetsWithDup(self, nums: List[int]) -> List[List[int]]:
ans = [[]]
nums.sort()
for i in range(len(nums)):
temp = []
temp_ans = []
for j in range(len(ans)):
temp.append(ans[j][:])
for j in range(len(temp)):
if temp[j] + [nums[i]] not in temp or temp[j] + [nums[i]] not in ans:
temp_ans.append(temp[j] + [nums[i]])
ans += temp_ans[:]
return ans
|
[
"eric970901@gmail.com"
] |
eric970901@gmail.com
|
1a16421fb2838a45d9e6547212bb8e2eb6d37eeb
|
23b686feb2d0ab9082a7ce622fc055946ed99c55
|
/.history/atkd/views_20190410194907.py
|
7a0a09bfdb29f76eeeb07e0da6fa0ffd18751db4
|
[] |
no_license
|
jasvr/atkd
|
a18b9840bf9948a7560684cd5eb0d5e22f6c52c7
|
daf61f7aa11cfc812171298894b1d0019641c4bd
|
refs/heads/master
| 2020-05-07T09:35:56.343837
| 2019-04-12T16:17:09
| 2019-04-12T16:17:09
| 180,383,260
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 535
|
py
|
from django.shortcuts import render
from .models import Parent, Student
def parent_list(request):
parents = Parent.objects.all()
return render(request, 'atkd/parent_list.html', {'parents': parents})
def parent_detail(request, pk):
parent = Parent.objects.get(id=pk)
return render(request, 'atkd/parent_detail.html', {'parent': parent})
def student_list(request):
students = Student.objects.all()
return render(request,'atkd/student_list.html',{'students':students})
def student_detail(request, pk):
|
[
"jas.vrgs@gmail.com"
] |
jas.vrgs@gmail.com
|
b08f9607a37ab94a68b28bf4a97f2e2c7d373c85
|
0a0b75df10cb4643a2a9124750239f85bb0abadf
|
/dataset/arxiv_ordering.py
|
8c95621558da411473c4112a7be5e95129c8ec1c
|
[
"MIT"
] |
permissive
|
baotg080599/passage-ordering
|
1d8eaac939c08f21c398425af819f49544af809d
|
f63b993dfd5b7e6475e7fb8950c23c3f22951979
|
refs/heads/main
| 2023-09-05T21:04:05.172285
| 2021-11-22T11:18:26
| 2021-11-22T11:18:26
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,542
|
py
|
# coding=utf-8
# Copyright 2020 The TensorFlow Datasets Authors and the HuggingFace NLP Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as: python3
"""arXiv ordering dataset."""
from __future__ import absolute_import, division, print_function
import json
import os
import datasets
import numpy as np
import pathlib
_CITATION = """
@misc{chen2016neural,
title={Neural Sentence Ordering},
author={Xinchi Chen and Xipeng Qiu and Xuanjing Huang},
year={2016},
eprint={1607.06952},
archivePrefix={arXiv},
primaryClass={cs.CL}
}
"""
_DESCRIPTION = """
Dataset for sentence ordering using text from arXiv."""
_PATH = "dataset/arxiv/"
_SENTENCES = "sentences"
_SHUFFLED_SENTENCES = "shuffled_sentences"
_LABEL = "label"
class ArXivOrdering(datasets.GeneratorBasedBuilder):
"""arXiv ordering dataset."""
VERSION = datasets.Version("1.0.0")
def _info(self):
info = datasets.DatasetInfo(
description=_DESCRIPTION,
features=datasets.Features(
{
_SENTENCES: datasets.Sequence(datasets.Value("string")),
_SHUFFLED_SENTENCES: datasets.Sequence(datasets.Value("string")),
_LABEL: datasets.Sequence(datasets.Value("int8")),
}
),
supervised_keys=None,
homepage="https://github.com/FudanNLP/NeuralSentenceOrdering",
citation=_CITATION,
)
return info
def _split_generators(self, dl_manager):
"""Returns SplitGenerators."""
data_path = os.path.join(pathlib.Path().absolute(), _PATH)
return [
datasets.SplitGenerator(
name=datasets.Split.TRAIN,
gen_kwargs={"path": os.path.join(data_path, "train.txt")},
),
datasets.SplitGenerator(
name=datasets.Split.VALIDATION,
gen_kwargs={"path": os.path.join(data_path, "valid.txt")},
),
datasets.SplitGenerator(
name=datasets.Split.TEST,
gen_kwargs={"path": os.path.join(data_path, "test.txt")},
),
]
def _generate_examples(self, path=None):
"""Yields examples."""
with open(path, "r") as f:
data = f.read()
examples = data.split("\n\n")
for i, example in enumerate(examples):
lines = example.split("\n")
sentences = lines[2:]
if sentences == []:
continue
shuffled_sentences, label = self.shuffle_sentences(sentences)
yield i, {
_SENTENCES: sentences,
_SHUFFLED_SENTENCES: shuffled_sentences,
_LABEL: label,
}
def shuffle_sentences(self, sentences):
sentences = np.array(sentences)
permutation = np.random.permutation(len(sentences))
return sentences[permutation].tolist(), np.argsort(permutation).tolist()
|
[
"remi.calizzano@gmail.com"
] |
remi.calizzano@gmail.com
|
cb33f0219b3459387def27f934269bb9559ff9cf
|
3f60b999ea7bda83c9586f75f52463dc20337f24
|
/sensitive_user_portrait/influence_application/user_list.py
|
123e10e3ce502ea62b35a1d374c6cac5452c08ff
|
[] |
no_license
|
jianjian0dandan/sensitive_user_portrait
|
629e49ce71db92b50634bac9c828811cdb5381e9
|
cacc30267ebc0e621b1d48d4f1206277a0f48123
|
refs/heads/master
| 2021-01-20T23:18:07.138057
| 2016-05-22T12:09:40
| 2016-05-22T12:09:40
| 42,869,287
| 0
| 0
| null | 2015-09-21T13:55:12
| 2015-09-21T13:55:11
| null |
UTF-8
|
Python
| false
| false
| 1,754
|
py
|
# -*- coding:utf-8 -*-
writer = ['1311967407', '1195347197', '1142648704', '1889213710', '1706987705']
expert = ['1827652007', '1265965213', '1596329427', '1908195982', '2248789552']
grassroot = ['1220291284','1677126410', '1635764393', '2682546440', '3188186580', '1665808371', '1751124681', '2721237781', '2971141411', '3188186580', '2540187460', '3689493785']
religion = ['1218353337', '1761179351', '3482911112', '1220291284', '2504433601']
attorney = ['1840604224', '2752172261', '1707314224', '3268947881', '1935084477', '3568404205', '1510017057', '3306911303', '1006235863', '3194187021', '3575186384', '1762325394', '1628679672', '2338225900']
public_intellectual = ['1197161814','1182391231','1182389073', '1989660417', '1494720324', '1189591617', '1971861621', '3270699555', '1093622153', '2892376557']
non_public_owner = ['1182391231', '1640571365', '1197161814', '1191220232', '1749127163']
independent_media = ['1189729754', '1497882593', '1742566624', '1661598840', '2283406765']
public_media = ['2803301701', '1974576991', '1639498782', '2105426467', '1644648333']
civil_officer = ['1419517335', '1098736570', '1729736051', '2419062270', '1369714780']
star = ['1687429295', '1195300800', '1997641692', '1746274673', '1223178222']
commonweal = ['3299094722', '1342829361', '1946798710', '1894477791', '1958321657']
domain_dict = {'作家写手': writer, '专家学者': expert, '草根红人': grassroot, '宗教人士': religion, \
'维权律师': attorney, '公知分子': public_intellectual, '非公企业主': non_public_owner, \
'独立媒体人': independent_media, '官方媒体': public_media, '公职人员': civil_officer, \
'文体明星': star, '社会公益': commonweal}
|
[
"1257819385@qq.com"
] |
1257819385@qq.com
|
ad5ccba0f6cdf7b7f5ac9268a639d3d4f3c37905
|
22b64b8157f8a1daa55f4508ca0a28e356329d94
|
/809.column_density.py
|
dc6c3b8c71b55fed067bd24764fbd01b93a2ad6d
|
[] |
no_license
|
GiantMolecularCloud/NGC253-turbulent-gas-structure
|
2edc96a8b6e6b407217f8d34459a1d6a3b155b8c
|
d2c47e62f412bfb3665bcde5754221e40214ff7b
|
refs/heads/main
| 2023-03-02T13:47:53.191569
| 2021-02-05T16:32:15
| 2021-02-05T16:32:15
| 336,327,376
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 7,098
|
py
|
##############
# DENDROGRAM #
##############
###################################################################################################
# load data
###################################################################################################
execfile(join(scriptdir, '800.info.py'))
data = fnunpickle(join(datadir, 'data.pickle'))
dendrograms = load_dendrograms()
###################################################################################################
# get column density
###################################################################################################
# integrated flux
###################################################################################################
for co,CO in lines.items():
for gal,GAL in galaxies.items():
from astropy.table.column import Column
catalog = dendrograms[co][gal]['catalog']
dendrogram = dendrograms[co][gal]['dendrogram']
try:
del catalog['integrated Tb']
except:
pass
chanwidth = CO['resolution']['spectral']
idx_list = [i.idx for i in dendrogram.all_structures]
all_structs_ordered = [x for _,x in sorted(zip(idx_list,list(dendrogram.all_structures)))]
flux = []
for struct in tqdm(all_structs_ordered):
flux.append( struct.values().sum()*chanwidth.value )
# add column to catalog table
catalog.add_column( Column(name='integrated Tb', data=flux, dtype=np.float64, unit='K km/s') )
# structure area
###################################################################################################
for co,CO in lines.items():
for gal,GAL in galaxies.items():
from astropy.table.column import Column
catalog = dendrograms[co][gal]['catalog']
dendrogram = dendrograms[co][gal]['dendrogram']
Apix = ((CO['resolution']['spatial']/5.)**2)
npix = []
idx_list = [i.idx for i in dendrogram.all_structures]
all_structs_ordered = [x for _,x in sorted(zip(idx_list,list(dendrogram.all_structures)))]
for struct in tqdm(all_structs_ordered):
mask = struct.get_mask()
n = np.sum(mask, axis=0)
n[n>0] = 1.
n = np.sum(n)
npix.append(n)
area_projected = npix*Apix
catalog.add_column( Column(name='npix (projected)', data=npix, dtype=np.int64) )
catalog.add_column( Column(name='area (projected)', data=area_projected, dtype=np.float64, unit='pc^2') )
# effective pixel area
###################################################################################################
for co,CO in lines.items():
for gal,GAL in galaxies.items():
from astropy.table.column import Column
catalog = dendrograms[co][gal]['catalog']
dendrogram = dendrograms[co][gal]['dendrogram']
Apix = ((CO['resolution']['spatial']/5.)**2)
npix_exact = catalog['area (exact)']/Apix
npix_ellipse = catalog['area (ellipse)']/Apix
npix_effective = catalog['area (effective)']/Apix
catalog.add_column( Column(name='npix (exact)', data=npix_exact, dtype=np.float64) )
catalog.add_column( Column(name='npix (ellipse)', data=npix_ellipse, dtype=np.float64) )
catalog.add_column( Column(name='npix (effective)', data=npix_effective, dtype=np.float64) )
for i in ['projected','exact','ellipse','effective']:
print(np.percentile(dendrograms['CO(1-0)']['NGC253']['catalog']['npix ('+i+')'], (1,50,99)))
# colunm density
###################################################################################################
for co,CO in lines.items():
for gal,GAL in galaxies.items():
from astropy.table.column import Column
catalog = dendrograms[co][gal]['catalog']
dendrogram = dendrograms[co][gal]['dendrogram']
Xco = GAL['Xco'] *u.cm**-2 /(u.K*u.km/u.s)
catalog['integrated Tb']*Xco/CO['excitation correction']
# factor 1.36 due to Helium contribution
coldens_projected = catalog['integrated Tb']*1.36*Xco/CO['excitation correction'] /catalog['npix (projected)']
coldens_exact = catalog['integrated Tb']*1.36*Xco/CO['excitation correction'] /catalog['npix (exact)']
coldens_ellipse = catalog['integrated Tb']*1.36*Xco/CO['excitation correction'] /catalog['npix (ellipse)']
coldens_effective = catalog['integrated Tb']*1.36*Xco/CO['excitation correction'] /catalog['npix (effective)']
for col in ['projected','exact','ellipse','effective']:
try:
del catalog['column density ('+col+')']
except:
pass
catalog.add_column( Column(name='column density (projected)', data=coldens_projected, dtype=np.float64, unit='cm^-2') )
catalog.add_column( Column(name='column density (exact)', data=coldens_exact, dtype=np.float64, unit='cm^-2') )
catalog.add_column( Column(name='column density (ellipse)', data=coldens_ellipse, dtype=np.float64, unit='cm^-2') )
catalog.add_column( Column(name='column density (effective)', data=coldens_effective, dtype=np.float64, unit='cm^-2') )
for i in ['projected','exact','ellipse','effective']:
print(np.percentile(dendrograms['CO(1-0)']['NGC253']['catalog']['column density ('+i+')'], (1,50,99)))
for i in ['projected','exact','ellipse','effective']:
print(np.percentile(dendrograms['CO(3-2)']['NGC253']['catalog']['column density ('+i+')'], (1,50,99)))
# mass
###################################################################################################
for co,CO in lines.items():
for gal,GAL in galaxies.items():
from astropy.table.column import Column
catalog = dendrograms[co][gal]['catalog']
dendrogram = dendrograms[co][gal]['dendrogram']
Apix = ((CO['resolution']['spatial']/5.)**2).to(u.cm**2)
Xco = GAL['Xco'] *u.cm**-2 /(u.K*u.km/u.s)
# atomic weight of H2: 2; 1.36 to account for helium
mass = ((catalog['integrated Tb']*Xco/CO['excitation correction'] *Apix *1.36*2.0*u.u).to(u.Msun)).value
catalog.add_column( Column(name='mass', data=mass, dtype=np.float64, unit='Msun') )
# log mass
mass[(mass<1e0) & ~np.isfinite(mass)] = np.nan
log_mass = np.log10(mass)
catalog.add_column( Column(name='log mass', data=log_mass, dtype=np.float64, unit='log Msun') )
###################################################################################################
# save catalog
###################################################################################################
for co,CO in lines.items():
for gal,GAL in galaxies.items():
fnpickle(dendrograms[co][gal]['catalog'], join(compdir,gal+'.'+co+'.catalog.pickle'))
###################################################################################################
#
###################################################################################################
|
[
"26749617+GiantMolecularCloud@users.noreply.github.com"
] |
26749617+GiantMolecularCloud@users.noreply.github.com
|
b46d4b24129fca46f968e5c52580c6c91b8043f9
|
979ee8dcf0ca0c4c249809bbac86c7a781c2e98d
|
/tensorflow_datasets/image/mnist.py
|
265e602b62665c4d4d327b3bdd0e63be1f6e7108
|
[
"Apache-2.0"
] |
permissive
|
MODU-FTNC/tensorflow-datasets
|
c315602bcb50830e05d6a4c8968d20e7f1d5a3f5
|
1565c40e3d8a9d6ecf186cb53117d4bd998b4437
|
refs/heads/master
| 2020-04-02T21:51:33.243567
| 2018-10-24T19:04:41
| 2018-10-24T19:05:10
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,017
|
py
|
# coding=utf-8
# Copyright 2018 The TensorFlow Datasets Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""MNIST and Fashion MNIST."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import functools
import random
import numpy as np
import six.moves.urllib as urllib
import tensorflow as tf
from tensorflow_datasets.core import dataset_builder
from tensorflow_datasets.core import file_format_adapter
from tensorflow_datasets.image import image_utils
# MNIST constants
_MNIST_URL = "http://yann.lecun.com/exdb/mnist/"
_MNIST_TRAIN_DATA_FILENAME = "train-images-idx3-ubyte.gz"
_MNIST_TRAIN_LABELS_FILENAME = "train-labels-idx1-ubyte.gz"
_MNIST_TEST_DATA_FILENAME = "t10k-images-idx3-ubyte.gz"
_MNIST_TEST_LABELS_FILENAME = "t10k-labels-idx1-ubyte.gz"
_MNIST_IMAGE_SIZE = 28
_TRAIN_EXAMPLES = 60000
_TEST_EXAMPLES = 10000
# URL for Fashion MNIST data.
_FASHION_MNIST_URL = ("http://fashion-mnist.s3-website.eu-central-1"
".amazonaws.com/")
class MNIST(dataset_builder.GeneratorBasedDatasetBuilder):
"""MNIST."""
URL = _MNIST_URL
def _dataset_split_generators(self, dl_manager):
# Download the full MNist Database
filenames = {
"train_data": _MNIST_TRAIN_DATA_FILENAME,
"train_labels": _MNIST_TRAIN_LABELS_FILENAME,
"test_data": _MNIST_TEST_DATA_FILENAME,
"test_labels": _MNIST_TEST_LABELS_FILENAME,
}
mnist_files = dl_manager.download_and_extract({
k: urllib.parse.urljoin(self.URL, v) for k, v in filenames.items()
})
# MNIST provides TRAIN and TEST splits, not a VALIDATION split, so we only
# write the TRAIN and TEST splits to disk.
train_gen = functools.partial(
_generate_mnist_examples,
nb_examples=_TRAIN_EXAMPLES,
data_path=mnist_files["train_data"],
label_path=mnist_files["train_labels"],
)
test_gen = functools.partial(
_generate_mnist_examples,
nb_examples=_TEST_EXAMPLES,
data_path=mnist_files["test_data"],
label_path=mnist_files["test_labels"],
)
train_splits = [
self._split_files(split=dataset_builder.Split.TRAIN, num_shards=10)
]
test_splits = [
self._split_files(split=dataset_builder.Split.TEST, num_shards=1)
]
return [
dataset_builder.SplitGenerator(generator_fn=train_gen,
split_files=train_splits),
dataset_builder.SplitGenerator(generator_fn=test_gen,
split_files=test_splits),
]
@property
def _file_format_adapter(self):
example_spec = {
"input/encoded": tf.FixedLenFeature(tuple(), tf.string),
"target": tf.FixedLenFeature(tuple(), tf.int64),
}
return file_format_adapter.TFRecordExampleAdapter(example_spec)
def _preprocess(self, record):
record["input"] = image_utils.decode_png(
record.pop("input/encoded"),
[_MNIST_IMAGE_SIZE, _MNIST_IMAGE_SIZE, 1])
return record
def _generate_mnist_examples(nb_examples, data_path, label_path):
"""Generate MNIST examples as dicts.
Args:
nb_examples (int): The number of example.
data_path (str): Path to the data files
label_path (str): Path to the labels
Returns:
Generator yielding:
Feature dictionaries `dict<str feature_name, feature_value>` containing:
* `image/encoded`: png-encoded image
* `image/shape`: image shape
* `image/format`: "png"
* `target`: label
"""
images = _extract_mnist_images(data_path, nb_examples)
labels = _extract_mnist_labels(label_path, nb_examples)
# Shuffle the data to make sure classes are well distributed.
data = list(zip(images, labels))
random.shuffle(data)
return image_utils.image_classification_generator(data)
class FashionMNIST(MNIST):
URL = _FASHION_MNIST_URL
def _extract_mnist_images(image_filepath, num_images):
with tf.gfile.Open(image_filepath, "rb") as f:
f.read(16) # header
buf = f.read(_MNIST_IMAGE_SIZE * _MNIST_IMAGE_SIZE * num_images)
data = np.frombuffer(
buf, dtype=np.uint8).reshape(num_images, _MNIST_IMAGE_SIZE,
_MNIST_IMAGE_SIZE, 1)
return data
def _extract_mnist_labels(labels_filepath, num_labels):
with tf.gfile.Open(labels_filepath, "rb") as f:
f.read(8) # header
buf = f.read(num_labels)
labels = np.frombuffer(buf, dtype=np.uint8).astype(np.int64)
return labels
|
[
"copybara-piper@google.com"
] |
copybara-piper@google.com
|
ae8418d13bffc926264d2ff5a2082318766d25f5
|
57cf3fca43dd108f0d1626ab411177dc0604385c
|
/mozbuilddata/exporter.py
|
4504541d5dab43bb4675af50992d752b6121348f
|
[] |
no_license
|
indygreg/mozilla-build-analyzer
|
d5b8ec2a49a23b244fb214c52a60ce7a8f6bf659
|
108c81610e6b9f5cd8ebcf322d15158cea8a8c70
|
refs/heads/master
| 2021-01-11T08:31:07.125777
| 2013-08-21T22:07:14
| 2013-08-21T22:07:14
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,650
|
py
|
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
import datetime
import errno
import json
import os
class JSONExporter(object):
def __init__(self, conn, path):
self.c = conn
self.root = path
def export(self):
self._mkdir('.')
for f in ['totals', 'builder_counters']:
func = getattr(self, '_export_%s' % f)
for msg in func():
yield msg
def _mkdir(self, p):
p = os.path.join(self.root, p)
try:
os.makedirs(p)
except OSError as e:
if e.errno != errno.EEXIST:
raise
def _active_dates(self):
today = datetime.date.today()
for i in range(30, 0, -1):
yield today - datetime.timedelta(i)
def _write_obj(self, path, obj):
path = os.path.join(self.root, path)
with open(path, 'wb') as fh:
json.dump(obj, fh)
def _export_totals(self):
yield 'Writing totals.json'
o = {}
#o['file_counts'] = len(list(self.c.filenames()))
self._write_obj('totals.json', o)
def _export_builder_counters(self):
yield 'Writing builder counts files.'
self._mkdir('builder/job_counts/by-day')
self._mkdir('builder/job_durations/by-day')
counts = dict(self.c.builder_counts())
self._write_obj('builder/job_counts/all.json', counts)
for date in self._active_dates():
df = date.isoformat()
counts = dict(self.c.builder_counts_in_day(df))
if not counts:
continue
self._write_obj('builder/job_counts/by-day/%s.json' % df, counts)
yield 'Writing builder duration files.'
durations = dict(self.c.builder_durations())
self._write_obj('builder/job_durations/all.json', durations)
for date in self._active_dates():
df = date.isoformat()
durations = dict(self.c.builder_durations_in_day(df))
if not durations:
continue
self._write_obj('builder/job_durations/by-day/%s.json' % df,
durations)
yield 'Writing per-category builder files.'
for cat in sorted(self.c.builder_categories()):
p = 'builder/by-category/%s' % cat
self._mkdir(p)
counts = dict(self.c.builder_counts_in_category(cat))
total = sum(counts.values())
self._write_obj('%s/job-counts.json' % p, counts)
|
[
"gregory.szorc@gmail.com"
] |
gregory.szorc@gmail.com
|
286ae249433950b017b9abd83b42271e3242490c
|
7029073a12cf93e066e2b2e51134160d8c5c2b6d
|
/tango_with_django_project/rango/migrations/0002_auto_20190606_1734.py
|
c924bc8c3da1075575b89d9c75c256e34b2979ca
|
[] |
no_license
|
eflipe/tango-with-django-and-polls
|
c85898808e2103a8b95aa290caceb5a8ce5b3bf9
|
cc91f7cf5f219ec945035e98628363ad4c56b023
|
refs/heads/master
| 2020-06-04T02:16:34.155465
| 2019-06-26T14:45:20
| 2019-06-26T14:45:20
| 191,830,646
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 659
|
py
|
# Generated by Django 2.1.5 on 2019-06-06 20:34
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('rango', '0001_initial'),
]
operations = [
migrations.AlterModelOptions(
name='category',
options={'verbose_name_plural': 'Categories'},
),
migrations.AddField(
model_name='category',
name='likes',
field=models.IntegerField(default=0),
),
migrations.AddField(
model_name='category',
name='views',
field=models.IntegerField(default=0),
),
]
|
[
"felipecabaleiro@gmail.com"
] |
felipecabaleiro@gmail.com
|
b6a3316fb8d4683775360c8b0ba25f6e49ae3e68
|
ca7aa979e7059467e158830b76673f5b77a0f5a3
|
/Python_codes/p03852/s966510800.py
|
2b804b35488391e38ae1b2a96d024bb448a9d5da
|
[] |
no_license
|
Aasthaengg/IBMdataset
|
7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901
|
f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8
|
refs/heads/main
| 2023-04-22T10:22:44.763102
| 2021-05-13T17:27:22
| 2021-05-13T17:27:22
| 367,112,348
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 237
|
py
|
import sys
input = lambda: sys.stdin.readline().rstrip()
def main():
c = input()
str = ['a', 'i', 'u', 'e', 'o']
if c in str:
print('vowel')
else:
print('consonant')
if __name__ == '__main__':
main()
|
[
"66529651+Aastha2104@users.noreply.github.com"
] |
66529651+Aastha2104@users.noreply.github.com
|
9cf28886a88e6788f61893d95e128594c5d472c1
|
37cde98734ebe6cc99a390c8ae2f049346ffbe88
|
/sort_for_forecast.py
|
ecec9b034a219c5940ec96d52cd2d383d1c90ced
|
[] |
no_license
|
Dimon0014/Zapis_chtenie_03_12
|
4d36efaf2667263e163cec1dd5d64c9c8f3f0422
|
d32e9eeb968ff9f6c9339a8e33219f166f757687
|
refs/heads/master
| 2021-09-09T16:40:39.751513
| 2018-03-18T05:21:33
| 2018-03-18T05:21:33
| 113,576,736
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 697
|
py
|
from operator import itemgetter
from collections import OrderedDict
d = {(37,36):[ 1,[1, 2], 33],(37,35):[ 11,[101, 102], 31]}
x = {1: 2, 3: 4, 4: 3, 2: 1, 0: 0}
sorted_x = (sorted(d.items(), key=lambda t: t[1][2])) # работает по последней цифре из списка
# OrderedDict([(0, 0), (2, 1), (1, 2), (4, 3), (3, 4)])
print(sorted_x)
sorted_x = (sorted(d.items(),reverse=True, key=lambda t: t[1][2]))# работает по последней цифре из списка, а потом выдает с конца
print(sorted_x)
#print(sorted_x[0][0]) # вытаскиваем первый ключ
#print(sorted_x[1][0]) # вытаскиваем второй ключ
|
[
"toropov0014@mail.ru"
] |
toropov0014@mail.ru
|
20bb6ca5b8cb7ce87db17974d93530e6b68b9bf4
|
7c2dcfefcf9eec3f12095b304541048f4e23cb6f
|
/butn-eg1.py
|
123a91a0706998fec8708fb2180707c671586d07
|
[] |
no_license
|
mamaker/eupy
|
b042e6f43601235afc290c25c62e58f8622643ee
|
8700ae7f22cda3be7170cf153205e8b41a943d4b
|
refs/heads/master
| 2020-05-23T11:02:50.835477
| 2019-05-15T01:57:56
| 2019-05-15T01:57:56
| 186,731,088
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 638
|
py
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
butn-eg1.py
Created on Tue May 14 14:12:58 2019
@author: madhu
"""
import tkinter as tk
def write_slogan():
print("Tkinter is easy to use!")
root = tk.Tk()
frame = tk.Frame(root)
frame.pack()
button = tk.Button(frame,
text="QUIT",
fg="red",
font="Verdana 26 bold",
command=root.destroy)
button.pack(side=tk.LEFT)
slogan = tk.Button(frame,
text="Hello",
font="Verdana 26 bold",
command=write_slogan)
slogan.pack(side=tk.LEFT)
root.mainloop()
|
[
"madhuvasudevan@yahoo.com"
] |
madhuvasudevan@yahoo.com
|
a88577bd3025afd548ec507773aec881739a8541
|
ca7aa979e7059467e158830b76673f5b77a0f5a3
|
/Python_codes/p02402/s285849568.py
|
4fafd999b2f74b08bae833e0e846df311a76b6ef
|
[] |
no_license
|
Aasthaengg/IBMdataset
|
7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901
|
f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8
|
refs/heads/main
| 2023-04-22T10:22:44.763102
| 2021-05-13T17:27:22
| 2021-05-13T17:27:22
| 367,112,348
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 135
|
py
|
#coding:utf-8
n = input()
a = []
a_input = raw_input().split()
for i in a_input:
a.append(int(i))
print min(a), max(a), sum(a)
|
[
"66529651+Aastha2104@users.noreply.github.com"
] |
66529651+Aastha2104@users.noreply.github.com
|
6a72b8899c4d9357de5c92967a680fdad3f299a6
|
434b6556038ad326ffaa8584a8a91edf8ad5c037
|
/GenericTrees-1/NumNodes.py
|
fdc36e9ff4df334a4381f17d45707aef4923454a
|
[] |
no_license
|
Pranav016/DS-Algo-in-Python
|
60702460ad6639dd3e8a1fdc3caf0821b8e0b4c2
|
5557e371ccdf801d78ba123ca83c0dd47b3bdb3b
|
refs/heads/master
| 2023-01-23T08:29:32.186861
| 2020-11-01T17:14:12
| 2020-11-01T17:14:12
| 284,651,382
| 3
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,255
|
py
|
class TreeNode:
def __init__(self,data):
self.data=data
self.children=list()
def printTree(root):
if root is None:
return
print(root.data)
for child in root.children:
printTree(child)
def printTreeDetailed(root):
if root is None:
return
print(root.data,end=":")
for child in root.children:
print(child.data,end=",")
print()
for child in root.children:
printTreeDetailed(child)
def treeInput():
print("Enter root data: ")
rootData=int(input())
if rootData == -1:
return None
root=TreeNode(rootData)
print("Enter the no of children of ", rootData," :")
numChild=int(input())
for i in range(numChild):
child=treeInput()
root.children.append(child)
i+=1
return root
# Method 1
def numNodes1(root):
if root is None:
return 0
count=1
for child in root.children:
count=count+numNodes1(child)
return count
# Method 2
count=0
def numNodes2(root):
global count
if root is None:
return 0
count+=1
for child in root.children:
numNodes2(child)
return count
# main
root=treeInput()
# printTreeDetailed(root)
print(numNodes2(root))
|
[
"pranavmendi@gmail.com"
] |
pranavmendi@gmail.com
|
878eb2b6b84778f9c3205b98ff27172ba0646c63
|
f539db814fce098f71192e6b4922be53687d140e
|
/azure-cognitiveservices-vision-contentmoderator/azure/cognitiveservices/vision/contentmoderator/models/phone_properties.py
|
3899065bcfe7573352ca53101bcb53f4b00b8028
|
[
"MIT"
] |
permissive
|
marki555/azure-sdk-for-python
|
1eb7abe1de5f13db7bd6654f2eb517544e4950dc
|
adc3a3bf4ddb06ab8207bbabf8910577e96d2512
|
refs/heads/master
| 2021-08-28T08:32:50.974402
| 2017-12-11T17:36:06
| 2017-12-11T17:36:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,202
|
py
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.serialization import Model
class PhoneProperties(Model):
"""Phone Property details.
:param country_code: CountryCode of the detected Phone number.
:type country_code: str
:param text: Detected Phone number.
:type text: str
:param index: Index(Location) of the Phone number in the input text
content.
:type index: float
"""
_attribute_map = {
'country_code': {'key': 'countryCode', 'type': 'str'},
'text': {'key': 'text', 'type': 'str'},
'index': {'key': 'index', 'type': 'float'},
}
def __init__(self, country_code=None, text=None, index=None):
self.country_code = country_code
self.text = text
self.index = index
|
[
"laurent.mazuel@gmail.com"
] |
laurent.mazuel@gmail.com
|
409819f0c59ef097758fd6cba4b76ab3e963f738
|
4a240d06679e464f885228b3a08c91644b4af65d
|
/0118. Pascal's Triangle.py
|
21ea6a88fad0487d4d9331b9b87abc494ee31cd7
|
[] |
no_license
|
CaizhiXu/LeetCode-Solutions-Python-Weimin
|
f46a75797c32eb3ff8c884eea2c6e31e18a47c1e
|
48d63c98beac4260b2b2b4dff26139d19752d125
|
refs/heads/master
| 2022-02-24T06:22:36.085101
| 2019-09-18T04:03:19
| 2019-09-18T04:03:19
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 634
|
py
|
# dynamic programming, space O(n), time O(n^2)
# bottom up
# https://leetcode.com/problems/pascals-triangle/
class Solution(object):
def generate(self, numRows):
"""
:type numRows: int
:rtype: List[List[int]]
"""
if numRows < 1:
return []
elif numRows == 1:
return [[1]]
res = [[1]]
for i in range(1, numRows):
vals = [1]
for j in range(i-1): # not range(i)
vals.append(res[-1][j] + res[-1][j+1])
vals.append(1)
res.append(vals)
return res
|
[
"noreply@github.com"
] |
CaizhiXu.noreply@github.com
|
ed322316c06fedeff412830f9458b976b3b6a53f
|
c828f5c86e8ae7a157bd3f212c4bd754ee04e5e7
|
/exercise_coding_test_1.py
|
e4f3de6db1535257feb85c9bf02006381dbf19d7
|
[] |
no_license
|
SeungHune/beakjun
|
c319e33f10a3dfd3acb090a7872b900ed92c5419
|
5d4610557aa56efc41053954299924ab890812f2
|
refs/heads/master
| 2020-12-03T04:37:48.054739
| 2020-11-10T10:50:17
| 2020-11-10T10:50:17
| 231,204,761
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 580
|
py
|
# 해시 - 완주하지 못한 선수
def solution(participant, completion):
participant.sort()
# print(participant)
completion.sort()
# print(completion)
for i in range(len(completion)):
if participant[i] != completion[i]:
return participant[i]
return participant[-1]
print(solution(["leo", "kiki", "eden"], ["eden", "kiki"]))
print(solution(["marina", "josipa", "nikola", "vinko", "filipa"],
["josipa", "filipa", "marina", "nikola"]))
print(solution(["mislav", "stanko", "mislav", "ana"],["stanko", "ana", "mislav"]))
|
[
"xyz9080@naver.com"
] |
xyz9080@naver.com
|
9fe0d8ccd1b7e3c64db5f15e1b6b6322e84cb7b0
|
f54d702c1289b2b78f423850d7fedba6c9378126
|
/Python/Sets/set-discard-remove-pop.py
|
c14b54b2998a1f2696af860af61e098f00d6a952
|
[
"MIT"
] |
permissive
|
ekant1999/HackerRank
|
81e6ac5bec8307bca2bd1debb169f2acdf239b66
|
084d4550b4eaf130837ab26a4efdbcaf8b667cdc
|
refs/heads/master
| 2020-05-02T09:19:10.102144
| 2016-10-27T04:10:28
| 2016-10-27T04:10:28
| 177,868,424
| 0
| 0
|
MIT
| 2019-03-26T21:04:17
| 2019-03-26T21:04:17
| null |
UTF-8
|
Python
| false
| false
| 378
|
py
|
n = input()
s = set(map(int, raw_input().split()))
c = int(raw_input())
for i in range(c):
command = raw_input().split()
if command[0] == 'pop':
s.pop()
elif command[0] == 'remove':
s.remove(int(command[1]))
elif command[0] == 'discard':
s.discard(int(command[1]))
total = 0
for x in s:
total += x
print total
|
[
"ugali_guy@hotmail.com"
] |
ugali_guy@hotmail.com
|
275d4cf639767b26b5534e0e960c0fbfebb14e65
|
1e50d8c2217ffd5dca17bf0da539ff98e22be990
|
/tests/test_utils.py
|
a3597ccb51723e9b482e201120174823b64037e1
|
[
"CC-BY-4.0",
"MIT"
] |
permissive
|
matslindh/dtedifier
|
2c90ae69ed6e20559839d4ac58c8b7a8a099480c
|
daad614f4d3ee4e0b88e832f5b07128eba2f0602
|
refs/heads/master
| 2022-09-28T18:15:29.056421
| 2020-05-29T13:40:21
| 2020-05-29T13:40:21
| 267,865,651
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,167
|
py
|
from dtedifier.utils import (
degrees_string_to_float,
na_or_int,
empty_or_int,
convert_signed_16bfixed,
)
from pytest import (
approx
)
def test_degrees_string_to_float():
assert degrees_string_to_float('1001010E') == approx(100.16944444)
assert degrees_string_to_float('0505959E') == approx(50.99972222)
assert degrees_string_to_float('1001010W') == approx(-100.16944444)
assert degrees_string_to_float('0505959W') == approx(-50.99972222)
assert degrees_string_to_float('0505959N') == approx(50.99972222)
assert degrees_string_to_float('0505959S') == approx(-50.99972222)
assert degrees_string_to_float('424242.42') == approx(42.71178333)
def test_na_or_int():
assert na_or_int('NA ') is None
assert na_or_int('341') == 341
assert na_or_int('341 ') == 341
def test_empty_or_int():
assert empty_or_int(' ') is None
assert empty_or_int('1231 ') == 1231
def test_convert_signed_16bfixed():
assert convert_signed_16bfixed(-1) is None
assert convert_signed_16bfixed(-2) == -32766
assert convert_signed_16bfixed(2) == 2
assert convert_signed_16bfixed(32767) == 32767
|
[
"mats@lindh.no"
] |
mats@lindh.no
|
32374aebf85565af27a3add23cec07466aaa07aa
|
e121dcc5d23e225891420e730549b9cc7ebe8e88
|
/python/lib/direct/extensions_native/VBase3.py
|
f54e3e8a89de985505c6438f24eb3c6957a91610
|
[
"BSD-3-Clause",
"BSD-2-Clause"
] |
permissive
|
PlumpMath/panda3d-3
|
4f4cf7627eddae9b7f30795e0a0657b01fdf670d
|
5c0be0e1cd46b422d28d5b81ffb1e8b28c3ac914
|
refs/heads/master
| 2021-01-25T06:55:36.209044
| 2014-09-29T14:24:53
| 2014-09-29T14:24:53
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 598
|
py
|
"""
Methods to extend functionality of the VBase3 class
"""
from panda3d.direct.extensions_native.Helpers import *
Dtool_PreloadDLL("panda")
from panda import *
def pPrintValues(self):
"""
Pretty print
"""
return "% 10.4f, % 10.4f, % 10.4f" % (self[0], self[1], self[2])
Dtool_funcToMethod(pPrintValues, VBase3)
del pPrintValues
def asTuple(self):
"""
Returns the vector as a tuple.
"""
print "Warning: VBase3.asTuple() is no longer needed and deprecated. Use the vector directly instead."
return tuple(self)
Dtool_funcToMethod(asTuple, VBase3)
del asTuple
|
[
"ralf.kaestner@gmail.com"
] |
ralf.kaestner@gmail.com
|
f0b7b2b37d848ed15bfc888c0eba84a61cbd2f51
|
5e80f0b1af9fbf9dc774dbb68aa603574e4ae0ba
|
/algorithm-study/leetcode/climbing-stairs.py
|
ca3dca47f2b76b821331f45202cfeefc9033a5d6
|
[] |
no_license
|
namujinju/study-note
|
4271b4248b3c4ac1b96ef1da484d86569a030762
|
790b21e5318a326e434dc836f5f678a608037a8c
|
refs/heads/master
| 2023-02-04T13:25:55.418896
| 2020-12-26T10:47:11
| 2020-12-26T10:47:11
| 275,279,138
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 181
|
py
|
def climbStairs(n):
if n <= 2:
return n
arr = [1, 2]
for i in range(n-2):
arr.append(arr[-1]+arr[-2])
return arr[n-1]
n = 4
print(climbStairs(n))
|
[
"59328810+namujinju@users.noreply.github.com"
] |
59328810+namujinju@users.noreply.github.com
|
2bbf4d931135df1477a8e41fcae63595118ead1e
|
8076124f4087781e0513dbe09c0f43dc6a861ab0
|
/src/sentry/mediators/external_requests/util.py
|
f3775d3ac28f7b2ab5638ec645a128b03c78f206
|
[
"BSD-2-Clause"
] |
permissive
|
sharmapacific/sentry
|
75e3356f87cb5a1e812e0974b081fd47852dfe33
|
fceabe7cb84de587fe05b2c36edc013058e7e55a
|
refs/heads/master
| 2020-08-19T00:13:48.748983
| 2019-10-17T17:09:06
| 2019-10-17T17:09:06
| 215,851,537
| 1
| 0
|
BSD-3-Clause
| 2019-10-17T17:43:49
| 2019-10-17T17:43:49
| null |
UTF-8
|
Python
| false
| false
| 953
|
py
|
from __future__ import absolute_import
from jsonschema import Draft4Validator
SELECT_OPTIONS_SCHEMA = {
"type": "array",
"definitions": {
"select-option": {
"type": "object",
"properties": {"label": {"type": "string"}, "value": {"type": "string"}},
"required": ["label", "value"],
}
},
"properties": {"type": "array", "items": {"$ref": "#definitions/select-option"}},
}
ISSUE_LINKER_SCHEMA = {
"type": "object",
"properties": {
"webUrl": {"type": "string"},
"identifier": {"type": "string"},
"project": {"type": "string"},
},
"required": ["webUrl", "identifier", "project"],
}
SCHEMA_LIST = {"select": SELECT_OPTIONS_SCHEMA, "issue_link": ISSUE_LINKER_SCHEMA}
def validate(instance, schema_type):
schema = SCHEMA_LIST[schema_type]
v = Draft4Validator(schema)
if not v.is_valid(instance):
return False
return True
|
[
"noreply@github.com"
] |
sharmapacific.noreply@github.com
|
f20e2a525e425c4f6bb3fa847e3b214c7e761cdc
|
cceda0ed268253be60c549ee957804d367bc3ca3
|
/from_cpython/Lib/test/test_dictcomps.py
|
60b2dc0cb3a8e085690d52cff40c7cb34ac48b7d
|
[
"Python-2.0",
"Apache-2.0",
"BSD-2-Clause"
] |
permissive
|
mcanthony/pyston
|
3bc6a45e5c118fb6860427c9b0cc885dec8f5b6e
|
eed1d41307b578ff8d873b92b8b4db24775d5daf
|
refs/heads/master
| 2020-12-29T00:55:11.099535
| 2015-10-23T22:28:07
| 2015-10-23T22:28:07
| 44,902,270
| 2
| 0
| null | 2015-10-25T08:34:09
| 2015-10-25T08:34:08
| null |
UTF-8
|
Python
| false
| false
| 3,866
|
py
|
# expected: fail
import unittest
from test import test_support as support
# For scope testing.
g = "Global variable"
class DictComprehensionTest(unittest.TestCase):
def test_basics(self):
expected = {0: 10, 1: 11, 2: 12, 3: 13, 4: 14, 5: 15, 6: 16, 7: 17,
8: 18, 9: 19}
actual = {k: k + 10 for k in range(10)}
self.assertEqual(actual, expected)
expected = {0: 0, 1: 1, 2: 2, 3: 3, 4: 4, 5: 5, 6: 6, 7: 7, 8: 8, 9: 9}
actual = {k: v for k in range(10) for v in range(10) if k == v}
self.assertEqual(actual, expected)
def test_scope_isolation(self):
k = "Local Variable"
expected = {0: None, 1: None, 2: None, 3: None, 4: None, 5: None,
6: None, 7: None, 8: None, 9: None}
actual = {k: None for k in range(10)}
self.assertEqual(actual, expected)
self.assertEqual(k, "Local Variable")
expected = {9: 1, 18: 2, 19: 2, 27: 3, 28: 3, 29: 3, 36: 4, 37: 4,
38: 4, 39: 4, 45: 5, 46: 5, 47: 5, 48: 5, 49: 5, 54: 6,
55: 6, 56: 6, 57: 6, 58: 6, 59: 6, 63: 7, 64: 7, 65: 7,
66: 7, 67: 7, 68: 7, 69: 7, 72: 8, 73: 8, 74: 8, 75: 8,
76: 8, 77: 8, 78: 8, 79: 8, 81: 9, 82: 9, 83: 9, 84: 9,
85: 9, 86: 9, 87: 9, 88: 9, 89: 9}
actual = {k: v for v in range(10) for k in range(v * 9, v * 10)}
self.assertEqual(k, "Local Variable")
self.assertEqual(actual, expected)
def test_scope_isolation_from_global(self):
expected = {0: None, 1: None, 2: None, 3: None, 4: None, 5: None,
6: None, 7: None, 8: None, 9: None}
actual = {g: None for g in range(10)}
self.assertEqual(actual, expected)
self.assertEqual(g, "Global variable")
expected = {9: 1, 18: 2, 19: 2, 27: 3, 28: 3, 29: 3, 36: 4, 37: 4,
38: 4, 39: 4, 45: 5, 46: 5, 47: 5, 48: 5, 49: 5, 54: 6,
55: 6, 56: 6, 57: 6, 58: 6, 59: 6, 63: 7, 64: 7, 65: 7,
66: 7, 67: 7, 68: 7, 69: 7, 72: 8, 73: 8, 74: 8, 75: 8,
76: 8, 77: 8, 78: 8, 79: 8, 81: 9, 82: 9, 83: 9, 84: 9,
85: 9, 86: 9, 87: 9, 88: 9, 89: 9}
actual = {g: v for v in range(10) for g in range(v * 9, v * 10)}
self.assertEqual(g, "Global variable")
self.assertEqual(actual, expected)
def test_global_visibility(self):
expected = {0: 'Global variable', 1: 'Global variable',
2: 'Global variable', 3: 'Global variable',
4: 'Global variable', 5: 'Global variable',
6: 'Global variable', 7: 'Global variable',
8: 'Global variable', 9: 'Global variable'}
actual = {k: g for k in range(10)}
self.assertEqual(actual, expected)
def test_local_visibility(self):
v = "Local variable"
expected = {0: 'Local variable', 1: 'Local variable',
2: 'Local variable', 3: 'Local variable',
4: 'Local variable', 5: 'Local variable',
6: 'Local variable', 7: 'Local variable',
8: 'Local variable', 9: 'Local variable'}
actual = {k: v for k in range(10)}
self.assertEqual(actual, expected)
self.assertEqual(v, "Local variable")
def test_illegal_assignment(self):
with self.assertRaisesRegexp(SyntaxError, "can't assign"):
compile("{x: y for y, x in ((1, 2), (3, 4))} = 5", "<test>",
"exec")
with self.assertRaisesRegexp(SyntaxError, "can't assign"):
compile("{x: y for y, x in ((1, 2), (3, 4))} += 5", "<test>",
"exec")
def test_main():
support.run_unittest(__name__)
if __name__ == "__main__":
test_main()
|
[
"daekharel@gmail.com"
] |
daekharel@gmail.com
|
8c8f9ea9bae907e335aa983e57ae69c1969f4aca
|
177d3f04c566e9de05a2fd651a4d91d24dfdb4d2
|
/exifcleaner.py
|
c6bd3078cdd1dbab5c09cd5d4f3ecd6c4b9344c5
|
[] |
no_license
|
luisgf/exifcleaner
|
5c7ebe941f6930f2173b6eb9f5a9ba73d91cf74f
|
de1954e7514929acba8cf02096cd06023adf770d
|
refs/heads/master
| 2021-01-10T19:43:31.690766
| 2015-12-28T13:21:12
| 2015-12-28T13:21:12
| 35,278,798
| 2
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,245
|
py
|
#!/usr/bin/env python3
"""
EXIF Cleaner. A tool to clean EXIF metadata from files
Luis González Fernández (c) 2015
luisgf at luisgf . es
"""
import os
import sys
from subprocess import check_call, CalledProcessError, DEVNULL
class ExifCleaner():
def __init__(self, folder_list=None, verbose=False):
self.folders = folder_list
self.verbose = verbose
self.errors = []
def clean_exif(self, path):
""" Clean EXIF metadata using exiv2 """
try:
args = ['exiv2', 'rm', path]
check_call(args, shell=False, stdout=DEVNULL, stderr=DEVNULL)
if self.verbose:
print('File %s cleaned' % path)
except FileNotFoundError:
print('exiv2 not found. Please install it!')
sys.exit(-1)
except CalledProcessError as e:
if self.verbose:
print('Error cleaning EXIF in %s' % path)
if path not in self.errors:
self.errors.append(path)
def check_exif_presence(self, path):
""" Check the EXIF metadata presence in a given file """
rc = False
try:
args = ['exiv2', 'pr', path]
check_call(args, shell=False, stdout=DEVNULL, stderr=DEVNULL)
rc = True # File has exif, rc=0 running exiv2
except CallProgramError as e:
if e.returncode is 253:
pass # File hasn't exif
else:
raise
finally:
return rc
def Start(self):
wiped = 0 # Num of wiped files
for folder in self.folders:
if self.verbose:
print('Cleaning: %s' % folder)
for path in os.listdir(folder):
file_path = os.path.join(folder, path)
if self.check_exif_presence(file_path):
self.clean_exif(file_path)
wiped += 1
print('EXIF data cleaned in %d Files. Errors %d' % (wiped,len(self.errors)))
def has_errors(self):
""" Return True if some file has errors """
return True if len(self.errors) > 0 else False
def show_errors(self):
""" Show the errors after execution """
if self.errors:
print('Clean error in:')
for file in self.errors:
print(' %s' % file)
def set_verbose(self, value):
self.verbose = bool(value)
def set_folders(self, folders):
""" Set the folder list to check """
self.folders = folders
if __name__ == '__main__':
params = [param for param in sys.argv]
params.pop(0)
exif = ExifCleaner()
if '-v' in params:
exif.set_verbose(True)
params.pop(params.index('-v'))
if len(params) is 0:
print('Please, pass a list of folders to check as parameter')
print('Example: %s /folder1 [/folder2 /folder3' % sys.argv[0])
sys.exit(-1)
else:
exif.set_folders(params)
exif.Start()
if exif.has_errors():
exit.show_errors()
|
[
"luisgf@luisgf.es"
] |
luisgf@luisgf.es
|
1d1b804b1e7e0fe0ee563309b987e0186f53edc9
|
d305e9667f18127e4a1d4d65e5370cf60df30102
|
/mindspore/ops/_op_impl/tbe/layer_norm.py
|
2414b9bcb3d9ac3edd5a9d50cc364f6a9c0e3a43
|
[
"Apache-2.0",
"MIT",
"Libpng",
"LicenseRef-scancode-proprietary-license",
"LGPL-2.1-only",
"AGPL-3.0-only",
"MPL-2.0-no-copyleft-exception",
"IJG",
"Zlib",
"MPL-1.1",
"BSD-3-Clause",
"BSD-3-Clause-Open-MPI",
"MPL-1.0",
"GPL-2.0-only",
"MPL-2.0",
"BSL-1.0",
"LicenseRef-scancode-unknown-license-reference",
"Unlicense",
"LicenseRef-scancode-public-domain",
"BSD-2-Clause"
] |
permissive
|
imyzx2017/mindspore_pcl
|
d8e5bd1f80458538d07ef0a8fc447b552bd87420
|
f548c9dae106879d1a83377dd06b10d96427fd2d
|
refs/heads/master
| 2023-01-13T22:28:42.064535
| 2020-11-18T11:15:41
| 2020-11-18T11:15:41
| 313,906,414
| 6
| 1
|
Apache-2.0
| 2020-11-18T11:25:08
| 2020-11-18T10:57:26
| null |
UTF-8
|
Python
| false
| false
| 1,921
|
py
|
# Copyright 2020 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""LayerNorm op"""
from mindspore.ops.op_info_register import op_info_register, TBERegOp, DataType
layer_norm_op_info = TBERegOp("LayerNorm") \
.fusion_type("OPAQUE") \
.async_flag(False) \
.binfile_name("layer_norm.so") \
.compute_cost(10) \
.kernel_name("layer_norm") \
.partial_flag(True) \
.attr("begin_norm_axis", "required", "int", "all") \
.attr("begin_params_axis", "required", "int", "all") \
.attr("epsilon", "optional", "float", "all") \
.input(0, "x", False, "required", "all") \
.input(1, "gamma", False, "required", "all") \
.input(2, "beta", False, "required", "all") \
.output(0, "y", False, "required", "all") \
.output(1, "mean", False, "required", "all") \
.output(2, "variance", False, "required", "all") \
.op_pattern("dynamicFormat") \
.dtype_format(DataType.F16_None, DataType.F16_None, DataType.F16_None, DataType.F16_None,
DataType.F16_None, DataType.F16_None) \
.dtype_format(DataType.F32_None, DataType.F32_None, DataType.F32_None, DataType.F32_None,
DataType.F32_None, DataType.F32_None) \
.get_op_info()
@op_info_register(layer_norm_op_info)
def _layer_norm_tbe():
"""LayerNorm TBE register"""
return
|
[
"513344092@qq.com"
] |
513344092@qq.com
|
1d3d42f917cfbf6d11fe41892de5d8a866f19c15
|
dad45eb1fb0505a7b515c68eda08f004ca0d6c0e
|
/algorithms/ellipticCurves.py
|
e654bc34b8fecf2d1f8416ad8ddb5592f79a7c73
|
[
"MIT"
] |
permissive
|
jaanos/kirv
|
8132785436a834460725d46ee9089c685f1cedda
|
8ea0a106a1eee1f22d46c6613f09f533678d2ed1
|
refs/heads/master
| 2023-01-06T05:26:19.075970
| 2022-12-27T17:50:54
| 2022-12-27T17:50:54
| 112,191,005
| 7
| 3
|
MIT
| 2020-10-21T17:59:26
| 2017-11-27T12:01:35
|
Jupyter Notebook
|
UTF-8
|
Python
| false
| false
| 1,819
|
py
|
from .euclidean import inverse
from .util import xxrange
def points(params):
"""
Find all points on an elliptic curve y^2 = x^3 + ax + b
over a field with p elements with characteristic greater than 3.
"""
a, b, p = params
sqrt = {x: [] for x in xxrange(p)}
sqrt[0].append(0)
for x in xxrange(1, (p+1)//2):
sqrt[x*x % p].append(x)
sqrt[x*x % p].append(p-x)
return [()] + sum([[(x, y) for y in sqrt[(x**3 + a*x + b) % p]]
for x in xxrange(p)], [])
def pointSum(P, Q, params):
"""
Compute the sum of the points P and Q
on an elliptic curve y^2 = x^3 + ax + b
over a field with p elements with characteristic greater than 3.
"""
if P == ():
return Q
elif Q == ():
return P
a, b, p = params
Px, Py = P
Qx, Qy = Q
if Px == Qx:
if Py == Qy:
lm = (3*Px*Px + a) * inverse(2*Py, p) % p
else:
return ()
else:
lm = (Qy-Py) * inverse(Qx-Px, p) % p
x = (lm*lm - Px - Qx) % p
y = (lm*(Px - x) - Py) % p
return (x, y)
def pointMultiply(k, P, params, trace = False):
"""
Compute the multiple of the point P by the scalar k
on an elliptic curve y^2 = x^3 + ax + b
over a field with p elements with characteristic greater than 3.
"""
a, b, p = params
if k == 0:
return ()
elif k < 0:
k = -k
x, y = P
P = (x, p-y)
Q = ()
if trace:
r, s = 0, 1
while k > 0:
if k % 2 == 1:
Q = pointSum(P, Q, (a, b, p))
if trace:
r += s
print("%dP = %s" % (r, Q))
P = pointSum(P, P, (a, b, p))
k //= 2
if trace:
s *= 2
print("%dP = %s" % (s, P))
return Q
|
[
"janos.vidali@fmf.uni-lj.si"
] |
janos.vidali@fmf.uni-lj.si
|
d0ee3b1ff7c46b38cc69612bc693a3c46b9c79fa
|
dff65cb5c1c68a452830650e2df0efb9351b715a
|
/2-MAY-2016_Assignment/python/removeat.py
|
3491e9ac84b0ba1c847c06fdfb6194964876b298
|
[] |
no_license
|
vishnusak/DojoAssignments
|
194f2ff5f200d431110d89a81be1341d2f565055
|
fb6b5384016e58490fbe6999a4651743fcde8692
|
refs/heads/master
| 2020-04-06T09:20:44.291863
| 2016-10-26T13:30:05
| 2016-10-26T13:30:05
| 55,562,579
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,073
|
py
|
# Remove At
# Given array and an index into array, remove and return the array value at that index. Do this without using built-in array methods except pop() . Think of PopFront(arr) as equivalent to RemoveAt(arr,0) .
# steps:
# 1 - assign value of required index to the return variable
# 2 - move all elements from that index to last 1 spot to the left
# 3 - remove the last element of the array
# can be done using the built-in function remove(element)
def removeAt(arr, idx):
val = arr[idx]
for i in range(idx, (len(arr) - 1)):
arr[i] = arr[i+1]
arr.remove(arr[-1])
return val
my_array = [1,2,3,4,5,6,7,8,9]
index = 4
print("\nThe existing array is {}").format(my_array)
print("The length of existing array is {}").format(len(my_array))
print("The index at which value should be removed is {}\n").format(index)
removed_val = removeAt(my_array, index)
print("The removed value is {}").format(removed_val)
print("The array after removing the value is {}").format(my_array)
print("The length of array now is {}").format(len(my_array))
|
[
"vishnusak@gmail.com"
] |
vishnusak@gmail.com
|
64ac7d923348dc3b2424ff077f7bbb9981d7d925
|
31014bf4464a5fae77ff86241ae15cfdd71ccb9e
|
/gnomics/objects/anatomical_structure_files/caro.py
|
585227e09243676728f164cf37246900f31a4e24
|
[
"BSD-3-Clause",
"BSD-2-Clause-Views"
] |
permissive
|
izhangcd/Gnomics
|
14de8f90960f88d3eb2f2a49c94fa3a0f8048a2d
|
bd0fb4e7be009b2afe1c2667f2890c712ae0ad9d
|
refs/heads/master
| 2021-09-09T03:42:40.953105
| 2018-03-13T16:05:17
| 2018-03-13T16:05:17
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,021
|
py
|
#!/usr/bin/env python
#
#
#
#
#
#
# IMPORT SOURCES:
#
#
#
# Common Anatomy Reference Ontology (CARO).
#
# PRE-CODE
import faulthandler
faulthandler.enable()
# IMPORTS
# Imports for recognizing modules.
import os
import sys
sys.path.append(os.path.join(os.path.dirname(__file__), "../../.."))
# Import modules.
from gnomics.objects.user import User
import gnomics.objects.anatomical_structure
import gnomics.objects.auxiliary_files.identifier
# Other imports.
import json
import requests
import timeit
# MAIN
def main():
caro_unit_tests()
# Return CARO ID.
def get_caro_id(anat, user=None):
caro_array = []
for iden in gnomics.objects.auxiliary_files.identifier.filter_identifiers(anat.identifiers, ["caro", "caro id", "caro identifier"]):
if iden["identifier"] not in caro_array:
caro_array.append(iden["identifier"])
return caro_array
# UNIT TESTS
def caro_unit_tests():
print("NOT FUNCTIONAL.")
# MAIN
if __name__ == "__main__": main()
|
[
"charles.kronk@gmail.com"
] |
charles.kronk@gmail.com
|
eb703ff151fe9213cc4a4e74f1ae4110553b205c
|
b5a9d42f7ea5e26cd82b3be2b26c324d5da79ba1
|
/third_party/llvm/expand_cmake_vars.py
|
2197698837f1d209a363f796d8db6886d686d70c
|
[
"Apache-2.0"
] |
permissive
|
uve/tensorflow
|
e48cb29f39ed24ee27e81afd1687960682e1fbef
|
e08079463bf43e5963acc41da1f57e95603f8080
|
refs/heads/master
| 2020-11-29T11:30:40.391232
| 2020-01-11T13:43:10
| 2020-01-11T13:43:10
| 230,088,347
| 0
| 0
|
Apache-2.0
| 2019-12-25T10:49:15
| 2019-12-25T10:49:14
| null |
UTF-8
|
Python
| false
| false
| 2,767
|
py
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Expands CMake variables in a text file."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import re
import sys
_CMAKE_DEFINE_REGEX = re.compile(r"\s*#cmakedefine\s+([A-Za-z_0-9]*)(\s.*)?$")
_CMAKE_DEFINE01_REGEX = re.compile(r"\s*#cmakedefine01\s+([A-Za-z_0-9]*)")
_CMAKE_VAR_REGEX = re.compile(r"\${([A-Za-z_0-9]*)}")
def _parse_args(argv):
"""Parses arguments with the form KEY=VALUE into a dictionary."""
result = {}
for arg in argv:
k, v = arg.split("=")
result[k] = v
return result
def _expand_variables(input_str, cmake_vars):
"""Expands ${VARIABLE}s in 'input_str', using dictionary 'cmake_vars'.
Args:
input_str: the string containing ${VARIABLE} expressions to expand.
cmake_vars: a dictionary mapping variable names to their values.
Returns:
The expanded string.
"""
def replace(match):
if match.group(1) in cmake_vars:
return cmake_vars[match.group(1)]
return ""
return _CMAKE_VAR_REGEX.sub(replace, input_str)
def _expand_cmakedefines(line, cmake_vars):
"""Expands #cmakedefine declarations, using a dictionary 'cmake_vars'."""
# Handles #cmakedefine lines
match = _CMAKE_DEFINE_REGEX.match(line)
if match:
name = match.group(1)
suffix = match.group(2) or ""
if name in cmake_vars:
return "#define {}{}\n".format(name,
_expand_variables(suffix, cmake_vars))
else:
return "/* #undef {} */\n".format(name)
# Handles #cmakedefine01 lines
match = _CMAKE_DEFINE01_REGEX.match(line)
if match:
name = match.group(1)
value = cmake_vars.get(name, "0")
return "#define {} {}\n".format(name, value)
# Otherwise return the line unchanged.
return _expand_variables(line, cmake_vars)
def main():
cmake_vars = _parse_args(sys.argv[1:])
for line in sys.stdin:
sys.stdout.write(_expand_cmakedefines(line, cmake_vars))
if __name__ == "__main__":
main()
|
[
"v-grniki@microsoft.com"
] |
v-grniki@microsoft.com
|
d211cd9a32864330bf938f5f881ee0e2753c328a
|
163bbb4e0920dedd5941e3edfb2d8706ba75627d
|
/Code/CodeRecords/2737/60593/254924.py
|
559e2d68a0fc4a972e5eecb3b97eaa9b07420a80
|
[] |
no_license
|
AdamZhouSE/pythonHomework
|
a25c120b03a158d60aaa9fdc5fb203b1bb377a19
|
ffc5606817a666aa6241cfab27364326f5c066ff
|
refs/heads/master
| 2022-11-24T08:05:22.122011
| 2020-07-28T16:21:24
| 2020-07-28T16:21:24
| 259,576,640
| 2
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 397
|
py
|
a=eval(input())
cm=0
cn=0
m=0
n=0
for i in a:
if(i==m):
cm+=1
elif(i==n):
cn+=1
elif(cm==0):
m=i
cm=1
elif(cn==0):
n=i
cn=1
else:
cm-=1
cn-=1
cm=0
cn=0
for i in a:
if(i==m):
cm+=1
elif(i==n):
cn+=1
res=[]
if(cm>len(a)//3):
res.append(cm)
if(cn>len(a)//3):
res.append(cn)
print(res)
|
[
"1069583789@qq.com"
] |
1069583789@qq.com
|
fe566ec84f6894c4e4ceed91b8fb7202418afba3
|
8952afe242c836b516c6236cf0987676cfb7abf7
|
/TaobaoSdk/Request/SellercenterUserPermissionsGetRequest.py
|
c9f37ef7d916d816fc56b7ede1d4da4f103f2062
|
[] |
no_license
|
xieguanfu/TaobaoOpenPythonSDK
|
2fc20df983811990a2d981379c9da6c1117f9f21
|
88cdab41ba19a2326aa4085c92455697bd37d8d7
|
refs/heads/master
| 2021-01-18T14:38:51.465614
| 2014-08-21T05:44:42
| 2014-08-21T05:44:42
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,182
|
py
|
#! /usr/bin/env python
# -*- coding: utf-8 -*-
# vim: set ts=4 sts=4 sw=4 et:
## @brief 获取指定用户的权限集合,并不组装成树。如果是主账号,返回所有的权限列表;如果是子账号,返回所有已授权的权限。只能查询属于自己的账号信息 (如果是主账号,则是主账号以及所属子账号,如果是子账号则是对应主账号以及所属子账号)
# @author wuliang@maimiaotech.com
# @version: 0.0.0
import os
import sys
import time
def __getCurrentPath():
return os.path.normpath(os.path.join(os.path.realpath(__file__), os.path.pardir))
__modulePath = os.path.join(__getCurrentPath(), os.path.pardir)
__modulePath = os.path.normpath(__modulePath)
if __modulePath not in sys.path:
sys.path.insert(0, __modulePath)
## @brief <SPAN style="font-size:16px; font-family:'宋体','Times New Roman',Georgia,Serif;">获取指定用户的权限集合,并不组装成树。如果是主账号,返回所有的权限列表;如果是子账号,返回所有已授权的权限。只能查询属于自己的账号信息 (如果是主账号,则是主账号以及所属子账号,如果是子账号则是对应主账号以及所属子账号)</SPAN>
# <UL>
# </UL>
class SellercenterUserPermissionsGetRequest(object):
def __init__(self):
super(self.__class__, self).__init__()
## @brief <SPAN style="font-size:16px; font-family:'宋体','Times New Roman',Georgia,Serif;">获取API名称</SPAN>
# <UL>
# <LI>
# <SPAN style="color:DarkRed; font-size:18px; font-family:'Times New Roman',Georgia,Serif;">Type</SPAN>: <SPAN style="color:DarkMagenta; font-size:16px; font-family:'Times New Roman','宋体',Georgia,Serif;">str</SPAN>
# </LI>
# </UL>
self.method = "taobao.sellercenter.user.permissions.get"
## @brief <SPAN style="font-size:16px; font-family:'宋体','Times New Roman',Georgia,Serif;">时间戳,如果不设置,发送请求时将使用当时的时间</SPAN>
# <UL>
# <LI>
# <SPAN style="color:DarkRed; font-size:18px; font-family:'Times New Roman',Georgia,Serif;">Type</SPAN>: <SPAN style="color:DarkMagenta; font-size:16px; font-family:'Times New Roman','宋体',Georgia,Serif;">int</SPAN>
# </LI>
# </UL>
self.timestamp = int(time.time())
## @brief <SPAN style="font-size:16px; font-family:'宋体','Times New Roman',Georgia,Serif;">用户标识,次入参必须为子账号比如zhangsan:cool。如果只输入主账号zhangsan,将报错。</SPAN>
# <UL>
# <LI>
# <SPAN style="color:DarkRed; font-size:18px; font-family:'Times New Roman',Georgia,Serif;">Type</SPAN>: <SPAN style="color:DarkMagenta; font-size:16px; font-family:'Times New Roman','宋体',Georgia,Serif;">String</SPAN>
# </LI>
# <LI>
# <SPAN style="color:DarkRed; font-size:18px; font-family:'Times New Roman',Georgia,Serif;">Required</SPAN>: <SPAN style="color:DarkMagenta; font-size:16px; font-family:'Times New Roman','宋体',Georgia,Serif;">required</SPAN>
# </LI>
# </UL>
self.nick = None
|
[
"liyangmin@maimiaotech.com"
] |
liyangmin@maimiaotech.com
|
a0256ae799eee5f88ab513b7a53e1e61b8802ff8
|
53fab060fa262e5d5026e0807d93c75fb81e67b9
|
/backup/user_304/ch30_2019_03_22_13_13_50_872840.py
|
2dc4f4c0e8660f0b4ce64b93dc3369535be2c7b0
|
[] |
no_license
|
gabriellaec/desoft-analise-exercicios
|
b77c6999424c5ce7e44086a12589a0ad43d6adca
|
01940ab0897aa6005764fc220b900e4d6161d36b
|
refs/heads/main
| 2023-01-31T17:19:42.050628
| 2020-12-16T05:21:31
| 2020-12-16T05:21:31
| 306,735,108
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 242
|
py
|
import math
v=int(input('qual é a velocidade? '))
a=int(input('qual é o angulo? '))
r=(a*mathi.pi)/180
d=(v**2 * (math.sin(2*r)))/9.8
if d<=98:
print('Muito perto')
elif d>=102:
print('Muito longe')
else:
print ('Acertou!')
|
[
"you@example.com"
] |
you@example.com
|
32aeab6cc7b86ecb32e647dae4e1b1f7b2d6b2b7
|
f0d713996eb095bcdc701f3fab0a8110b8541cbb
|
/uugzpwJXKdiESZbjM_12.py
|
0b959d83852aa9daf41c4758204d29c248f28006
|
[] |
no_license
|
daniel-reich/turbo-robot
|
feda6c0523bb83ab8954b6d06302bfec5b16ebdf
|
a7a25c63097674c0a81675eed7e6b763785f1c41
|
refs/heads/main
| 2023-03-26T01:55:14.210264
| 2021-03-23T16:08:01
| 2021-03-23T16:08:01
| 350,773,815
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 700
|
py
|
"""
Create a function that determines whether or not a player is holding a **Full
House** in their hand. A hand is represented as a list of 5 cards. A full
house is defined as a pair of cards and a three-of-a-kind.
To illustrate: `["A", "A", "A", "K", "K"]` would be a **Full House** , since
the player holds 3 aces and 2 kings.
### Examples
is_full_house(["A", "A", "A", "K", "K"]) ➞ True
is_full_house(["3", "J", "J", "3", "3"]) ➞ True
is_full_house(["10", "J", "10", "10", "10"]) ➞ False
is_full_house(["7", "J", "3", "4", "2"]) ➞ False
### Notes
N/A
"""
def is_full_house(hand):
return sorted(hand.count(card) for card in set(hand)) == [2, 3]
|
[
"daniel.reich@danielreichs-MacBook-Pro.local"
] |
daniel.reich@danielreichs-MacBook-Pro.local
|
496c48ec6b7c11f47ad5fbcf46c65d16d7712f7e
|
2bb90b620f86d0d49f19f01593e1a4cc3c2e7ba8
|
/pardus/tags/2011.1/network/connection/openconnect/actions.py
|
5b670dd034baf8d5d77b5d8818ec40931144eed4
|
[] |
no_license
|
aligulle1/kuller
|
bda0d59ce8400aa3c7ba9c7e19589f27313492f7
|
7f98de19be27d7a517fe19a37c814748f7e18ba6
|
refs/heads/master
| 2021-01-20T02:22:09.451356
| 2013-07-23T17:57:58
| 2013-07-23T17:57:58
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 597
|
py
|
# -*- coding: utf-8 -*-
#
# Licensed under the GNU General Public License, version 2.
# See the file http://www.gnu.org/licenses/old-licenses/gpl-2.0.txt
from pisi.actionsapi import autotools
from pisi.actionsapi import pisitools
from pisi.actionsapi import get
def build():
autotools.make("OPT_FLAGS='%s' openconnect" % get.CFLAGS())
def install():
autotools.rawInstall("DESTDIR=%s" % get.installDIR())
autotools.rawInstall("DESTDIR=%s LIBDIR=/usr/lib" % get.installDIR(), "install-lib")
pisitools.doman("openconnect.8")
pisitools.dodoc("AUTHORS", "COPYING*", "README*")
|
[
"yusuf.aydemir@istanbul.com"
] |
yusuf.aydemir@istanbul.com
|
06c6745e6392d7d569d51badf6a793b8f4123198
|
d552a3c92155d82ad146cd99ea9b8b4a3b65eab7
|
/openstack/cloud/_accelerator.py
|
b28ac1f6b37ddd125b92ebcabd89945e5482d8e2
|
[
"Apache-2.0"
] |
permissive
|
jlyheden/openstacksdk
|
600201d4fbf23fd8a4fa9a53b398b29811446051
|
7e0dcaaa4a69b17b97e746ce8de104689c60becc
|
refs/heads/master
| 2022-11-30T19:15:16.113961
| 2020-06-07T18:02:22
| 2020-06-07T18:02:23
| 270,694,856
| 0
| 0
|
Apache-2.0
| 2020-06-08T14:15:36
| 2020-06-08T14:15:35
| null |
UTF-8
|
Python
| false
| false
| 5,707
|
py
|
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# import types so that we can reference ListType in sphinx param declarations.
# We can't just use list, because sphinx gets confused by
# openstack.resource.Resource.list and openstack.resource2.Resource.list
from openstack.cloud import _normalize
class AcceleratorCloudMixin(_normalize.Normalizer):
def list_deployables(self, filters=None):
"""List all available deployables.
:param filters: (optional) dict of filter conditions to push down
:returns: A list of deployable info.
"""
# Translate None from search interface to empty {} for kwargs below
if not filters:
filters = {}
return list(self.accelerator.deployables(**filters))
def list_devices(self, filters=None):
"""List all devices.
:param filters: (optional) dict of filter conditions to push down
:returns: A list of device info.
"""
# Translate None from search interface to empty {} for kwargs below
if not filters:
filters = {}
return list(self.accelerator.devices(**filters))
def list_device_profiles(self, filters=None):
"""List all device_profiles.
:param filters: (optional) dict of filter conditions to push down
:returns: A list of device profile info.
"""
# Translate None from search interface to empty {} for kwargs below
if not filters:
filters = {}
return list(self.accelerator.device_profiles(**filters))
def create_device_profile(self, attrs):
"""Create a device_profile.
:param attrs: The info of device_profile to be created.
:returns: A ``munch.Munch`` of the created device_profile.
"""
return self.accelerator.create_device_profile(**attrs)
def delete_device_profile(self, name_or_id, filters):
"""Delete a device_profile.
:param name_or_id: The Name(or uuid) of device_profile to be deleted.
:returns: True if delete succeeded, False otherwise.
"""
device_profile = self.accelerator.get_device_profile(
name_or_id,
filters
)
if device_profile is None:
self.log.debug(
"device_profile %s not found for deleting",
name_or_id
)
return False
self.accelerator.delete_device_profile(name_or_id=name_or_id)
return True
def list_accelerator_requests(self, filters=None):
"""List all accelerator_requests.
:param filters: (optional) dict of filter conditions to push down
:returns: A list of accelerator request info.
"""
# Translate None from search interface to empty {} for kwargs below
if not filters:
filters = {}
return list(self.accelerator.accelerator_requests(**filters))
def delete_accelerator_request(self, name_or_id, filters):
"""Delete a accelerator_request.
:param name_or_id: The Name(or uuid) of accelerator_request.
:returns: True if delete succeeded, False otherwise.
"""
accelerator_request = self.accelerator.get_accelerator_request(
name_or_id,
filters
)
if accelerator_request is None:
self.log.debug(
"accelerator_request %s not found for deleting",
name_or_id
)
return False
self.accelerator.delete_accelerator_request(name_or_id=name_or_id)
return True
def create_accelerator_request(self, attrs):
"""Create an accelerator_request.
:param attrs: The info of accelerator_request to be created.
:returns: A ``munch.Munch`` of the created accelerator_request.
"""
return self.accelerator.create_accelerator_request(**attrs)
def bind_accelerator_request(self, uuid, properties):
"""Bind an accelerator to VM.
:param uuid: The uuid of the accelerator_request to be binded.
:param properties: The info of VM that will bind the accelerator.
:returns: True if bind succeeded, False otherwise.
"""
accelerator_request = self.accelerator.get_accelerator_request(uuid)
if accelerator_request is None:
self.log.debug(
"accelerator_request %s not found for unbinding", uuid
)
return False
return self.accelerator.update_accelerator_request(uuid, properties)
def unbind_accelerator_request(self, uuid, properties):
"""Unbind an accelerator from VM.
:param uuid: The uuid of the accelerator_request to be unbinded.
:param properties: The info of VM that will unbind the accelerator.
:returns:True if unbind succeeded, False otherwise.
"""
accelerator_request = self.accelerator.get_accelerator_request(uuid)
if accelerator_request is None:
self.log.debug(
"accelerator_request %s not found for unbinding", uuid
)
return False
return self.accelerator.update_accelerator_request(uuid, properties)
|
[
"mordred@inaugust.com"
] |
mordred@inaugust.com
|
4941e8f118771aa5bc373cb3e248e556ca14e33a
|
c088967f6fcd2cfbae48ad9eb757935ba8783b8b
|
/nikola/data/themes/base/messages/messages_ja.py
|
4a238ccc164b4b5bf3d58505c1e34be7a882403d
|
[
"MIT"
] |
permissive
|
verbalshadow/nikola
|
281bafcf52b725bc0e54f99691d17f2a7fd9d95c
|
1e12b9525227ac223d80beb3e537a7b9eb637fc5
|
refs/heads/master
| 2021-01-18T08:26:36.867146
| 2013-09-18T15:41:39
| 2013-09-18T15:41:39
| 12,930,300
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 839
|
py
|
# -*- encoding:utf-8 -*-
from __future__ import unicode_literals
MESSAGES = {
"Also available in": "他の言語で読む",
"Archive": "過去の記事",
"Categories": "",
"LANGUAGE": "日本語",
"More posts about": "タグ",
"Newer posts": "新しい記事",
"Next post": "次の記事",
"Older posts": "過去の記事",
"Original site": "元のサイト",
"Posted": "投稿日時",
"Posts about %s": "%sについての記事",
"Posts for year %s": "%s年の記事",
"Posts for {month} {year}": "{year}年{month}月の記事",
"Previous post": "前の記事",
"Read in English": "日本語で読む",
"Read more": "続きを読む",
"Source": "ソース",
"Tags and Categories": "",
"Tags": "タグ",
"old posts page %d": "前の記事 %dページ目",
}
|
[
"ralsina@netmanagers.com.ar"
] |
ralsina@netmanagers.com.ar
|
abcf3cb3d023f19268c08f7a3479b54c66c941e0
|
83762584d226f2c9ccbf42d5f745cf95baa71247
|
/Practice/Word palindrome.py
|
9ddf33de50fdce2ac8d3557ad0f48c486b28adae
|
[] |
no_license
|
anmolpanwar/python-practice
|
85f420e57b8b3e4295b5759b451e6b2673731b6f
|
831d8c6eeed8ff08a5d282bdac8c897f39dd4c6f
|
refs/heads/master
| 2020-12-18T22:52:14.226202
| 2020-02-05T06:22:14
| 2020-02-05T06:22:14
| 235,538,582
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 315
|
py
|
def is_palindrome(word):
newword = word.lower()
list1 = list(newword.strip())
strn = ''
list1.reverse()
for i in list1:
strn+=i
pal = newword+strn
if strn == newword:
print True
else:
print "Palindrome of this will be: " + pal
is_palindrome('dfsbj')
|
[
"anmolpanwar8@gmail.com"
] |
anmolpanwar8@gmail.com
|
c79adfc9e9b5025797457c31a03c1ee87cd7922f
|
de24f83a5e3768a2638ebcf13cbe717e75740168
|
/moodledata/vpl_data/96/usersdata/164/53660/submittedfiles/estatistica.py
|
50b28060dc86c4a798dc697d40a8e01a37e12e8e
|
[] |
no_license
|
rafaelperazzo/programacao-web
|
95643423a35c44613b0f64bed05bd34780fe2436
|
170dd5440afb9ee68a973f3de13a99aa4c735d79
|
refs/heads/master
| 2021-01-12T14:06:25.773146
| 2017-12-22T16:05:45
| 2017-12-22T16:05:45
| 69,566,344
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 590
|
py
|
# -*- coding: utf-8 -*-
n=int(input('Digite n: '))
a=[]
b=[]
somaA=0
difquadA=0
resultadoA=0
mediaA=0
for z in range (1, n+1, 1):
valorA=float(input('Valor da lista A: '))
a.append(valorA)
for i in range(0, len(a), 1):
somaA=somaA+a[i]
resultadoA=somaA/len(a)
for j in range (0, len(a), 1):
difquadA=(a[j]-resultadoA)**2
desvioA=difquadA/len(a)
print(resultadoA)
print(desvioA)
#Baseado na função acima, escreva a função para calcular o desvio padrão de uma lista
#Por último escreva o programa principal, que pede a entrada e chama as funções criadas.
|
[
"rafael.mota@ufca.edu.br"
] |
rafael.mota@ufca.edu.br
|
e74dde33ebc5b1089f1e3842a3e27d7443ec4650
|
7680dbfce22b31835107403514f1489a8afcf3df
|
/Exercícios_parte_1/exercício__017.py
|
7f8da7cc45144d76b466704e28a2b16706f28d93
|
[] |
no_license
|
EstephanoBartenski/Aprendendo_Python
|
c0022d545af00c14e6778f6a80f666de31a7659e
|
69b4c2e07511a0bd91ac19df59aa9dafdf28fda3
|
refs/heads/master
| 2022-11-27T17:14:00.949163
| 2020-08-03T22:11:19
| 2020-08-03T22:11:19
| 284,564,300
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 397
|
py
|
# catetos e hipotenusa
import math
co = float(input('Comprimento do cateto oposto:'))
ca = float(input('Comprimento do cateto adjacente:'))
h = math.sqrt(co**2+ca**2)
print('A hipotenusa deste triângulo retângulo irá medir {:.2f}'.format(h))
# há uma fórmula para a hipotenusa no math
print('A hipotenusa deste triângulo retângulo irá medir {:.2f}'.format(math.hypot(co, ca)))
|
[
"noreply@github.com"
] |
EstephanoBartenski.noreply@github.com
|
e6920e519350bed9d9f0b08101a7efa981861c19
|
efb32799a616432b9cf90113d042bd45d889ef99
|
/jel/test/test_ast.py
|
4f9eb086fabaadcdc8389f4e4dde503ecf7900f0
|
[] |
no_license
|
cstawarz/jel
|
859216fd0dc63a9a85810263d42b662967c00788
|
b01ca2127c0c317aaf9142dcd6c9441154e8830e
|
refs/heads/master
| 2020-04-29T14:58:58.448508
| 2014-03-31T18:18:41
| 2014-03-31T18:18:41
| 3,422,630
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,072
|
py
|
from __future__ import division, print_function, unicode_literals
import unittest
from .. import ast
class Node(ast.AST):
_fields = ('foo', 'bar')
class DerivedNode(Node):
pass
class OtherNode(ast.AST):
pass
class TestAST(unittest.TestCase):
def test_lineno_and_lexpos(self):
n = OtherNode()
self.assertEqual(-1, n.lineno)
self.assertEqual(-1, n.lexpos)
n = OtherNode(lineno=12, lexpos=34)
self.assertEqual(12, n.lineno)
self.assertEqual(34, n.lexpos)
def test_missing_field(self):
with self.assertRaises(KeyError) as cm:
Node(foo=1)
self.assertEqual('bar', cm.exception.args[0])
def test_invalid_field(self):
with self.assertRaises(TypeError) as cm:
DerivedNode(foo=1, bar=2, blah=3)
self.assertEqual("DerivedNode has no field 'blah'",
cm.exception.args[0])
def test_repr(self):
self.assertEqual('Node(foo=1, bar=2)', repr(Node(foo=1, bar=2)))
self.assertEqual('DerivedNode(foo=3, bar=4)',
repr(DerivedNode(foo=3, bar=4)))
self.assertEqual('OtherNode()', repr(OtherNode()))
def test_equality(self):
n1 = Node(foo=1, bar=2)
n2 = Node(foo=1, bar=2)
n3 = Node(foo=1, bar=3)
self.assertTrue(n1 == n1)
self.assertTrue(n1 == n2)
self.assertFalse(n1 == n3)
self.assertFalse(n1 != n1)
self.assertFalse(n1 != n2)
self.assertTrue(n1 != n3)
d = DerivedNode(foo=n1.foo, bar=n1.bar)
self.assertEqual(n1.foo, d.foo)
self.assertEqual(n1.bar, d.bar)
self.assertFalse(n1 == d)
self.assertFalse(d == n1)
self.assertTrue(n1 != d)
self.assertTrue(d != n1)
o1 = OtherNode()
o2 = OtherNode()
self.assertTrue(o1 == o1)
self.assertTrue(o1 == o2)
self.assertFalse(o1 == n1)
self.assertFalse(o1 != o1)
self.assertFalse(o1 != o2)
self.assertTrue(o1 != n1)
|
[
"cstawarz@mit.edu"
] |
cstawarz@mit.edu
|
22c25bd8ee4081e8a410ce1a2069675d52ed0986
|
bf397e60bba27b649084966aee686869c7df595d
|
/PythonNet/day04/code/fork_getpid_son.py
|
fcbdab4d16a7bc28a60e6498331d6ef65489308e
|
[] |
no_license
|
demo112/1807
|
3783e37f7dab3945a3fc857ff8f77f4690012fbe
|
9b921c90b3003226d919017d521a32da47e546ad
|
refs/heads/master
| 2022-12-01T10:50:24.086828
| 2018-12-06T09:48:14
| 2018-12-06T09:48:14
| 150,758,323
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 389
|
py
|
import os
from time import sleep
pid = os.fork()
if pid < 0:
print('创建进程失败')
elif pid == 0:
sleep(0.1)
print('这是新的进程')
print('Child Get PID:', os.getpid())
print('Child Get Parent PID:', os.getppid())
else:
print("这是原有进程")
print('Parent get PID:', os.getpid())
print('Parent get Child PID:', pid)
print('演示完毕')
|
[
"huafengdongji@hotmail.com"
] |
huafengdongji@hotmail.com
|
c52935dd962d4d9ac6a315d564c60e248c2169ad
|
d3cc9db967b05c740db85ed31358701434900aaa
|
/code/python/caffe/draw.py
|
ae7ec76eb5f7e3484ec332370e47be1f48cc80ab
|
[
"LicenseRef-scancode-public-domain",
"BSD-2-Clause",
"BSD-3-Clause",
"LicenseRef-scancode-generic-cla"
] |
permissive
|
minar09/LIP-SSL-Caffe
|
371134b4aaadae3371dbbe450fc9a44afa368b53
|
089d437844a7d15352199b55bf80e429f1d63e4a
|
refs/heads/master
| 2020-04-13T10:43:27.677741
| 2019-01-26T14:10:23
| 2019-01-26T14:10:23
| 163,150,964
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 7,663
|
py
|
"""
Caffe network visualization: draw the NetParameter protobuffer.
.. note::
This requires pydot>=1.0.2, which is not included in requirements.txt since
it requires graphviz and other prerequisites outside the scope of the
Caffe.
"""
from caffe.proto import caffe_pb2
"""
pydot is not supported under python 3 and pydot2 doesn't work properly.
pydotplus works nicely (pip install pydotplus)
"""
try:
# Try to load pydotplus
import pydotplus as pydot
except ImportError:
import pydot
# Internal layer and blob styles.
LAYER_STYLE_DEFAULT = {'shape': 'record',
'fillcolor': '#6495ED',
'style': 'filled'}
NEURON_LAYER_STYLE = {'shape': 'record',
'fillcolor': '#90EE90',
'style': 'filled'}
BLOB_STYLE = {'shape': 'octagon',
'fillcolor': '#E0E0E0',
'style': 'filled'}
def get_pooling_types_dict():
"""Get dictionary mapping pooling type number to type name
"""
desc = caffe_pb2.PoolingParameter.PoolMethod.DESCRIPTOR
d = {}
for k, v in desc.values_by_name.items():
d[v.number] = k
return d
def get_edge_label(layer):
"""Define edge label based on layer type.
"""
if layer.type == 'Data':
edge_label = 'Batch ' + str(layer.data_param.batch_size)
elif layer.type == 'Convolution' or layer.type == 'Deconvolution':
edge_label = str(layer.convolution_param.num_output)
elif layer.type == 'InnerProduct':
edge_label = str(layer.inner_product_param.num_output)
else:
edge_label = '""'
return edge_label
def get_layer_label(layer, rankdir):
"""Define node label based on layer type.
Parameters
----------
layer : ?
rankdir : {'LR', 'TB', 'BT'}
Direction of graph layout.
Returns
-------
string :
A label for the current layer
"""
if rankdir in ('TB', 'BT'):
# If graph orientation is vertical, horizontal space is free and
# vertical space is not; separate words with spaces
separator = ' '
else:
# If graph orientation is horizontal, vertical space is free and
# horizontal space is not; separate words with newlines
separator = '\\n'
if layer.type == 'Convolution' or layer.type == 'Deconvolution':
# Outer double quotes needed or else colon characters don't parse
# properly
node_label = '"%s%s(%s)%skernel size: %d%sstride: %d%spad: %d"' %\
(layer.name,
separator,
layer.type,
separator,
layer.convolution_param.kernel_size[0] if len(
layer.convolution_param.kernel_size._values) else 1,
separator,
layer.convolution_param.stride[0] if len(
layer.convolution_param.stride._values) else 1,
separator,
layer.convolution_param.pad[0] if len(layer.convolution_param.pad._values) else 0)
elif layer.type == 'Pooling':
pooling_types_dict = get_pooling_types_dict()
node_label = '"%s%s(%s %s)%skernel size: %d%sstride: %d%spad: %d"' %\
(layer.name,
separator,
pooling_types_dict[layer.pooling_param.pool],
layer.type,
separator,
layer.pooling_param.kernel_size,
separator,
layer.pooling_param.stride,
separator,
layer.pooling_param.pad)
else:
node_label = '"%s%s(%s)"' % (layer.name, separator, layer.type)
return node_label
def choose_color_by_layertype(layertype):
"""Define colors for nodes based on the layer type.
"""
color = '#6495ED' # Default
if layertype == 'Convolution' or layertype == 'Deconvolution':
color = '#FF5050'
elif layertype == 'Pooling':
color = '#FF9900'
elif layertype == 'InnerProduct':
color = '#CC33FF'
return color
def get_pydot_graph(caffe_net, rankdir, label_edges=True):
"""Create a data structure which represents the `caffe_net`.
Parameters
----------
caffe_net : object
rankdir : {'LR', 'TB', 'BT'}
Direction of graph layout.
label_edges : boolean, optional
Label the edges (default is True).
Returns
-------
pydot graph object
"""
pydot_graph = pydot.Dot(caffe_net.name,
graph_type='digraph',
rankdir=rankdir)
pydot_nodes = {}
pydot_edges = []
for layer in caffe_net.layer:
node_label = get_layer_label(layer, rankdir)
node_name = "%s_%s" % (layer.name, layer.type)
if (len(layer.bottom) == 1 and len(layer.top) == 1 and
layer.bottom[0] == layer.top[0]):
# We have an in-place neuron layer.
pydot_nodes[node_name] = pydot.Node(node_label,
**NEURON_LAYER_STYLE)
else:
layer_style = LAYER_STYLE_DEFAULT
layer_style['fillcolor'] = choose_color_by_layertype(layer.type)
pydot_nodes[node_name] = pydot.Node(node_label, **layer_style)
for bottom_blob in layer.bottom:
pydot_nodes[bottom_blob + '_blob'] = pydot.Node('%s' % bottom_blob,
**BLOB_STYLE)
edge_label = '""'
pydot_edges.append({'src': bottom_blob + '_blob',
'dst': node_name,
'label': edge_label})
for top_blob in layer.top:
pydot_nodes[top_blob + '_blob'] = pydot.Node('%s' % (top_blob))
if label_edges:
edge_label = get_edge_label(layer)
else:
edge_label = '""'
pydot_edges.append({'src': node_name,
'dst': top_blob + '_blob',
'label': edge_label})
# Now, add the nodes and edges to the graph.
for node in pydot_nodes.values():
pydot_graph.add_node(node)
for edge in pydot_edges:
pydot_graph.add_edge(
pydot.Edge(pydot_nodes[edge['src']],
pydot_nodes[edge['dst']],
label=edge['label']))
return pydot_graph
def draw_net(caffe_net, rankdir, ext='png'):
"""Draws a caffe net and returns the image string encoded using the given
extension.
Parameters
----------
caffe_net : a caffe.proto.caffe_pb2.NetParameter protocol buffer.
ext : string, optional
The image extension (the default is 'png').
Returns
-------
string :
Postscript representation of the graph.
"""
return get_pydot_graph(caffe_net, rankdir).create(format=ext)
def draw_net_to_file(caffe_net, filename, rankdir='LR'):
"""Draws a caffe net, and saves it to file using the format given as the
file extension. Use '.raw' to output raw text that you can manually feed
to graphviz to draw graphs.
Parameters
----------
caffe_net : a caffe.proto.caffe_pb2.NetParameter protocol buffer.
filename : string
The path to a file where the networks visualization will be stored.
rankdir : {'LR', 'TB', 'BT'}
Direction of graph layout.
"""
ext = filename[filename.rfind('.')+1:]
with open(filename, 'wb') as fid:
fid.write(draw_net(caffe_net, rankdir, ext))
|
[
"minar09.bd@gmail.com"
] |
minar09.bd@gmail.com
|
c2b29616b93803713a5bd6a6203584e68b80e826
|
ca7aa979e7059467e158830b76673f5b77a0f5a3
|
/Python_codes/p02623/s631394286.py
|
e65dea6135c3f5910ed9740aa8f22592b2ef34ca
|
[] |
no_license
|
Aasthaengg/IBMdataset
|
7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901
|
f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8
|
refs/heads/main
| 2023-04-22T10:22:44.763102
| 2021-05-13T17:27:22
| 2021-05-13T17:27:22
| 367,112,348
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 577
|
py
|
import sys
input = sys.stdin.readline
n, m, k = list(map(int, input().split()))
a = list(map(int, input().split()))
b = list(map(int, input().split()))
#Aだけのときのmax
s = 0
j = 0
for i in range(n):
if s + a[i] > k:
break
else:
s += a[i]
j += 1
ans = [j]
for l in range(m):
flag = True
s += b[l]
while s > k:
s -= a[j-1]
j -= 1
if j < 0:
flag = False
break
if not flag:
break
else:
ans.append(l + 1 + j)
#print(ans)
print(max(ans))
|
[
"66529651+Aastha2104@users.noreply.github.com"
] |
66529651+Aastha2104@users.noreply.github.com
|
d8f15f849c53f1756f8f4388019e73e0e3cbef1d
|
87fbed6f08a01437ecfc31eec3eb8a6558721678
|
/bigtop-packages/src/charm/kafka/layer-kafka/tests/01-deploy.py
|
62d6a55942f885c15da7aebb7a6ff622aeaafbe3
|
[
"Apache-2.0",
"FreeBSD-DOC",
"MIT",
"DOC"
] |
permissive
|
hdinsight/bigtop
|
4427324380b3375741f816e9249f7fc910f80037
|
568252ea8fe5bd2c1bc50833501fef4d5a48bf0e
|
refs/heads/master
| 2020-05-26T14:21:28.808326
| 2017-03-23T03:33:12
| 2017-03-23T03:33:12
| 82,482,997
| 0
| 1
| null | 2017-03-23T03:33:13
| 2017-02-19T19:15:23
|
Java
|
UTF-8
|
Python
| false
| false
| 1,647
|
py
|
#!/usr/bin/env python3
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import amulet
import unittest
class TestDeploy(unittest.TestCase):
"""
Trivial deployment test for Apache Bigtop Kafka.
"""
@classmethod
def setUpClass(cls):
cls.d = amulet.Deployment(series='xenial')
cls.d.add('kafka', charm='kafka')
cls.d.add('zookeeper', charm='cs:xenial/zookeeper')
cls.d.relate('kafka:zookeeper', 'zookeeper:zookeeper')
cls.d.setup(timeout=1800)
cls.d.sentry.wait_for_messages({'kafka': 'ready'}, timeout=1800)
cls.kafka = cls.d.sentry['kafka'][0]
def test_deploy(self):
"""
Simple test to make sure the Kafka java process is running.
"""
output, retcode = self.kafka.run("pgrep -a java")
assert 'Kafka' in output, "Kafka daemon is not started"
if __name__ == '__main__':
unittest.main()
|
[
"kevin.monroe@canonical.com"
] |
kevin.monroe@canonical.com
|
aff7f9a5f0b21762a135c562ae38e61d20efde11
|
90bf2ffa7ee75ff266238bffd1b3edc6f83a2bbe
|
/WebApp_DataSupport/Pharmacy_store_database/RiteAids/step1_taskplan.py
|
7a3f98b103ddd1e26984a095aa745ab90d3cc6ff
|
[] |
no_license
|
MacHu-GWU/EFA-finished-projects
|
f7cf5e0f765aba78db2c1dd8729accff443aa6ee
|
88c93b0e1c5880b710c11ef93254a732573c92ee
|
refs/heads/master
| 2021-03-13T00:11:15.580259
| 2014-10-06T15:20:15
| 2014-10-06T15:20:15
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,081
|
py
|
##coding=utf8
''' THIS IS THE SCRIPT TO CRAWL RITEAID STORE LOCATION AND DETAIL INFORMATION
'''
from LinearSpider.crawler import Crawler, Taskplanner
from LinearSpider.jt import *
import bs4
import re, pprint
import jsontree
import itertools
'''
第一级,入口页面,内容是所有的rite aid的商店的url
https://www.riteaid.com/store-site-map
第二季,rite aid商店,内容是具体信息
https://www.riteaid.com/store-details?storeNumber=01140
'''
def step1_taskplan():
'''设定函数内常量'''
spider = Crawler()
TP = Taskplanner()
base_url = 'https://www.riteaid.com'
entrance_url = 'https://www.riteaid.com/store-site-map'
TP.todo.setdefault(entrance_url, {'data': None} ) # 给下一步预设空间的行为发生在当前页面爬完的情况下
html = spider.html(entrance_url) # 开始爬
if html:
soup = bs4.BeautifulSoup(html)
for a in soup.findAll(href = re.compile(r'https://www.riteaid.com/store-details\?storeNumber=\d*')):
TP.todo[entrance_url].setdefault( a['href'],
{'data': a.text} )
TP._dump_todo('riteaid-task.json', replace = True)
def validate(phone, hours, additional_info, detail): # 下
if len(hours) == 4: # phone 必须有14位长,例如(202)-001-1234;hour 必须有 Mon-Thur, Fri, Sat, Sun 四项,
# if (len(phone) == 14) & (len(hours) == 4):
return True
else:
return False
def step2_download():
spider = Crawler()
TP = Taskplanner()
TP._load_todo('riteaid_task.json')
base_url = 'https://www.riteaid.com'
entrance_url = 'https://www.riteaid.com/store-site-map'
riteaid = load_jt('riteaid_data.json')
counter = itertools.count(0)
for store_url in ignore_iterkeys(TP.todo[entrance_url] ):
## 首先处理随着url一块传入的reference data
text = TP.todo[entrance_url][store_url]['data']
storeID, address = text.split(',', 1)
storeID, address = storeID.strip(), address.strip()
## 然后处理每个url页面
if storeID not in riteaid: # 如果没有爬过
html = spider.html(store_url)
if html:
try:
soup = bs4.BeautifulSoup(html)
''' phone number '''
phone = ''
for p in soup.findAll('p', attrs = {'class', 'padding-phone'}):
phone = p.text.replace(p.strong.text, '').strip().replace(' ', '-') # process Phone
''' hour '''
hours = list()
for ul in soup.findAll('ul', attrs = {'class', 'days'}):
hours.append( ul.text.split() ) # process Office Hour
''' additional information '''
additional_info = list()
for div in soup.findAll('div', attrs = {'id': 'eventListId'}):
for li in div.findAll('li'):
additional_info.append( li.text ) # process Additional Information
''' store detail '''
detail = {}
for div in soup.findAll('div', attrs = {'class': 'storeDetailsAttributeCategory'}):
storeDetailsAttributeCategory = div.strong.text.strip()
detail.setdefault(storeDetailsAttributeCategory, list())
for subdiv in div.findAll('div', attrs = {'class': 'storeDetailsAttribute'}):
detail[storeDetailsAttributeCategory].append(subdiv.text.strip()) # process Store Detail
## validate the information I crawled
if validate(phone, hours, additional_info, detail): # <=== validate, sometime error
print "CORRECT"
riteaid.setdefault(storeID,
{'address': address,
'phone': phone,
'hours': hours,
'additional_info': additional_info,
'detail': detail} )
dump_jt(riteaid, 'riteaid_data.json', replace = True)
print storeID, counter.next() ## 只统计正确的
else:
print "ERROR!", (phone, hours, additional_info, detail)
print "\t%s" % store_url
print '%s.html' % (store_url[-5:],)
with open('%s.html' % store_url[-5:], 'wb') as f:
f.write(html)
except:
pass
def unit_test():
pass
if __name__ == '__main__':
# step1_taskplan() # 先执行taskplan
step2_download()
# unit_test()
|
[
"husanhe@gmail.com"
] |
husanhe@gmail.com
|
68bbdbbe3be2d7e90ddbed3f4526c3b1b1d5c6b1
|
09e57dd1374713f06b70d7b37a580130d9bbab0d
|
/benchmark/startQiskit2900.py
|
86361f453a3ce3e1037cbed649c72e635d83d62a
|
[
"BSD-3-Clause"
] |
permissive
|
UCLA-SEAL/QDiff
|
ad53650034897abb5941e74539e3aee8edb600ab
|
d968cbc47fe926b7f88b4adf10490f1edd6f8819
|
refs/heads/main
| 2023-08-05T04:52:24.961998
| 2021-09-19T02:56:16
| 2021-09-19T02:56:16
| 405,159,939
| 2
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,269
|
py
|
# qubit number=4
# total number=44
import cirq
import qiskit
from qiskit import QuantumCircuit, QuantumRegister, ClassicalRegister
from qiskit import BasicAer, execute, transpile
from pprint import pprint
from qiskit.test.mock import FakeVigo
from math import log2
import numpy as np
import networkx as nx
def bitwise_xor(s: str, t: str) -> str:
length = len(s)
res = []
for i in range(length):
res.append(str(int(s[i]) ^ int(t[i])))
return ''.join(res[::-1])
def bitwise_dot(s: str, t: str) -> str:
length = len(s)
res = 0
for i in range(length):
res += int(s[i]) * int(t[i])
return str(res % 2)
def build_oracle(n: int, f) -> QuantumCircuit:
# implement the oracle O_f
# NOTE: use multi_control_toffoli_gate ('noancilla' mode)
# https://qiskit.org/documentation/_modules/qiskit/aqua/circuits/gates/multi_control_toffoli_gate.html
# https://quantumcomputing.stackexchange.com/questions/3943/how-do-you-implement-the-toffoli-gate-using-only-single-qubit-and-cnot-gates
# https://quantumcomputing.stackexchange.com/questions/2177/how-can-i-implement-an-n-bit-toffoli-gate
controls = QuantumRegister(n, "ofc")
target = QuantumRegister(1, "oft")
oracle = QuantumCircuit(controls, target, name="Of")
for i in range(2 ** n):
rep = np.binary_repr(i, n)
if f(rep) == "1":
for j in range(n):
if rep[j] == "0":
oracle.x(controls[j])
oracle.mct(controls, target[0], None, mode='noancilla')
for j in range(n):
if rep[j] == "0":
oracle.x(controls[j])
# oracle.barrier()
return oracle
def make_circuit(n:int,f) -> QuantumCircuit:
# circuit begin
input_qubit = QuantumRegister(n,"qc")
classical = ClassicalRegister(n, "qm")
prog = QuantumCircuit(input_qubit, classical)
prog.h(input_qubit[3]) # number=31
prog.cz(input_qubit[0],input_qubit[3]) # number=32
prog.h(input_qubit[3]) # number=33
prog.x(input_qubit[3]) # number=27
prog.h(input_qubit[3]) # number=34
prog.cz(input_qubit[0],input_qubit[3]) # number=35
prog.h(input_qubit[3]) # number=36
prog.h(input_qubit[1]) # number=2
prog.h(input_qubit[2]) # number=3
prog.h(input_qubit[0]) # number=41
prog.cz(input_qubit[3],input_qubit[0]) # number=42
prog.h(input_qubit[0]) # number=43
prog.z(input_qubit[3]) # number=39
prog.cx(input_qubit[3],input_qubit[0]) # number=40
prog.h(input_qubit[3]) # number=4
prog.h(input_qubit[0]) # number=5
oracle = build_oracle(n-1, f)
prog.append(oracle.to_gate(),[input_qubit[i] for i in range(n-1)]+[input_qubit[n-1]])
prog.h(input_qubit[1]) # number=6
prog.h(input_qubit[2]) # number=7
prog.h(input_qubit[3]) # number=8
prog.h(input_qubit[0]) # number=9
prog.cx(input_qubit[2],input_qubit[0]) # number=10
prog.y(input_qubit[3]) # number=37
prog.h(input_qubit[0]) # number=14
prog.h(input_qubit[1]) # number=30
prog.cz(input_qubit[2],input_qubit[0]) # number=15
prog.h(input_qubit[0]) # number=16
prog.cx(input_qubit[0],input_qubit[2]) # number=20
prog.x(input_qubit[2]) # number=21
prog.cx(input_qubit[0],input_qubit[2]) # number=22
prog.cx(input_qubit[0],input_qubit[2]) # number=17
prog.cx(input_qubit[0],input_qubit[2]) # number=23
prog.x(input_qubit[2]) # number=24
prog.cx(input_qubit[0],input_qubit[2]) # number=25
prog.cx(input_qubit[0],input_qubit[2]) # number=19
# circuit end
for i in range(n):
prog.measure(input_qubit[i], classical[i])
return prog
if __name__ == '__main__':
a = "111"
b = "0"
f = lambda rep: bitwise_xor(bitwise_dot(a, rep), b)
prog = make_circuit(4,f)
backend = BasicAer.get_backend('qasm_simulator')
sample_shot =8000
info = execute(prog, backend=backend, shots=sample_shot).result().get_counts()
backend = FakeVigo()
circuit1 = transpile(prog,backend,optimization_level=2)
writefile = open("../data/startQiskit2900.csv","w")
print(info,file=writefile)
print("results end", file=writefile)
print(circuit1.__len__(),file=writefile)
print(circuit1,file=writefile)
writefile.close()
|
[
"wangjiyuan123@yeah.net"
] |
wangjiyuan123@yeah.net
|
b0beeccbba6eae48ddb4620af81cb6474b233249
|
d94b6845aeeb412aac6850b70e22628bc84d1d6d
|
/abstract_nas/train/utils.py
|
081b0270fe1231712222e899b511aca62b6b905e
|
[
"CC-BY-4.0",
"Apache-2.0"
] |
permissive
|
ishine/google-research
|
541aea114a68ced68736340e037fc0f8257d1ea2
|
c1ae273841592fce4c993bf35cdd0a6424e73da4
|
refs/heads/master
| 2023-06-08T23:02:25.502203
| 2023-05-31T01:00:56
| 2023-05-31T01:06:45
| 242,478,569
| 0
| 0
|
Apache-2.0
| 2020-06-23T01:55:11
| 2020-02-23T07:59:42
|
Jupyter Notebook
|
UTF-8
|
Python
| false
| false
| 5,379
|
py
|
# coding=utf-8
# Copyright 2023 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Training utils."""
import math
from typing import Any, Callable, Optional
from big_vision import utils as bv_utils
from big_vision.utils import create_learning_rate_schedule as bv_create_learning_rate_schedule
import flax
from flax import struct
import jax
import jax.numpy as jnp
# pytype:disable=attribute-error
@struct.dataclass
class ExponentialMovingAverage:
"""Exponential Moving Average as implemented in Tensorflow."""
# Moving average of the parameters.
state: Any
# Decay to use for the update (typical values are 0.999, 0.9999, etc...).
decay: float
# For how many steps we should just keep the new parameters instead of an
# average (useful if we don't want the initial weights to be included in the
# average).
warmup_steps: int
def update_moving_average(self, new_target,
step):
"""Updates the moving average of the target.
Args:
new_target: New values of the target (example: weights of a network
after gradient step).
step: Current step (used only for warmup).
Returns:
The updated ExponentialMovingAverage.
"""
factor = jnp.float32(step >= self.warmup_steps)
delta = step - self.warmup_steps
decay = jnp.minimum(self.decay, (1. + delta) / (10. + delta))
decay *= factor
new_target = flax.core.FrozenDict(new_target)
state = flax.core.FrozenDict(self.state)
weight_ema = jax.tree_map(lambda a, b: (1 - decay) * a + decay * b,
new_target, state)
return self.replace(state=weight_ema)
# pytype:enable=attribute-error
def create_exponential_rate_schedule(global_batch_size,
total_steps,
steps_per_epoch = None,
base = 0.0,
scale_with_batchsize = False,
warmup_steps = 0,
cooldown_steps = 0,
warmup_epochs = 0,
cooldown_epochs = 0,
**kw):
"""Creates exponential learning rate schedule.
Args:
global_batch_size: The global batch-size optionally used for scaling.
total_steps: The total number of steps to run.
steps_per_epoch: How many steps form an epoch. Needed only if anything is
passed in terms of epochs.
base: The starting learning-rate (without warmup).
scale_with_batchsize: Whether or not to scale lr automatically.
warmup_steps: how many steps to warm up for.
cooldown_steps: how many steps to cool down for.
warmup_epochs: how many epochs to warm up for.
cooldown_epochs: how many epochs to cool down for.
**kw: extra arguments specific to individual decay_types.
Returns:
A function learning_rate(step): float -> {"learning_rate": float}.
"""
# For convenience, convert {warmup,cooldown}_epochs to _steps.
assert bool(warmup_epochs) + bool(warmup_steps) < 2, "Only one!"
assert bool(cooldown_epochs) + bool(cooldown_steps) < 2, "Only one!"
if warmup_epochs:
warmup_steps = warmup_epochs * steps_per_epoch
assert warmup_steps < total_steps, "warmup_steps is >= total_steps"
if cooldown_epochs:
cooldown_steps = cooldown_epochs * steps_per_epoch
def step_fn(step):
"""Step to learning rate function."""
lr = base
# This implements the linear scaling rule following
# Goyal et al. at arxiv.org/abs/1706.02677.
# The reference batch size in literature is 256, so we scale the lr to
# adjust to the literature lr when bach_size changes.
if scale_with_batchsize:
lr = lr * global_batch_size / 256.0
progress = (step - warmup_steps) / float(total_steps - warmup_steps)
progress = jnp.clip(progress, 0.0, 1.0)
# At the end of the training, lr should be 1.2% of original value.
# This mimic the behavior from the efficientnet paper.
end_lr_ratio = kw.get("end_lr_ratio", 0.012)
lr = lr * jnp.exp(progress * math.log(end_lr_ratio))
if warmup_steps:
lr = lr * jnp.minimum(1., step / warmup_steps)
if cooldown_steps:
lr = lr * jnp.minimum(1., (total_steps - step) / cooldown_steps)
return jnp.asarray(lr, dtype=jnp.float32)
return step_fn
def create_learning_rate_schedule(*args,
decay_type = "stair",
**kwargs):
if decay_type != "exponential":
return bv_create_learning_rate_schedule(*args, decay_type=decay_type,
**kwargs)
else:
return create_exponential_rate_schedule(*args, **kwargs)
bv_utils.create_learning_rate_schedule = create_learning_rate_schedule
|
[
"copybara-worker@google.com"
] |
copybara-worker@google.com
|
a431c38e40d5c992bad2de38414efcdd2faf009f
|
9fdee128812956e1e1919a58c7f64561543abf56
|
/test/lesson7.py
|
0a2d77f3ac3db358fa074a6c257dcca8f8fe1555
|
[] |
no_license
|
OleksandrMyshko/python
|
38139b72a75d52ca0a6a5787c5e6357432ec6799
|
1caed3c05d513c0dd62d6ff77910e9596c50969f
|
refs/heads/master
| 2021-07-03T03:31:11.198438
| 2017-09-25T17:43:59
| 2017-09-25T17:43:59
| 104,762,652
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 751
|
py
|
class Quadrangle:
def __init__(self, line1, line2, line3, angle1, angle2):
self.line1 = line1
self.line2 = line2
self.line3 = line3
self.anle1 = angle1
self.anle2 = angle2
class Paralelogram(Quadrangle):
def __init__(self, line1, line2, angle):
Quadrangle.__init__(self, line1, line2, line1, angle, 180 - angle)
class Rectangle(Paralelogram):
def __init__(self, line1, line2):
Paralelogram.__init__(self, line1, line2, 90)
class Romb(Paralelogram):
def __init__(self, line, angle):
Paralelogram.__init__(self, line, angle)
class Squere(Rectangle, Romb):
def __init__(self, line):
Rectangle.__init__(self, line, line)
Romb.__init__(self, line, 90)
|
[
"sashamushko@gmail.com"
] |
sashamushko@gmail.com
|
bd56a0f44ae46f5794196f471684b9ae4761f9dc
|
76e6d4f93078327fef8672133fc75a6f12abc240
|
/ABC173/B.py
|
6e91788c46fdff0b91536ff5bd3d7c1f1f6f90f1
|
[] |
no_license
|
adusa1019/atcoder
|
1e8f33253f6f80a91d069b2f3b568ce7a2964940
|
f7dbdfc021425160a072f4ce4e324953a376133a
|
refs/heads/master
| 2021-08-08T04:41:36.098678
| 2021-02-01T07:34:34
| 2021-02-01T07:34:34
| 89,038,783
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 256
|
py
|
from collections import Counter
def solve(string):
n, *s = string.split()
c = Counter(s)
return "\n".join(f"{k} x {c[k]}" for k in "AC WA TLE RE".split())
if __name__ == '__main__':
import sys
print(solve(sys.stdin.read().strip()))
|
[
"symphony20030829@yahoo.co.jp"
] |
symphony20030829@yahoo.co.jp
|
ff8f9fb12fc8a07674151d36db80f85f4f5c9b1a
|
53c1de76b7959da4689b2be1c6508fc0d39f0e88
|
/lv1_rf.py
|
67ceb779a06ca6154fa363950114cf867d5aff64
|
[
"MIT"
] |
permissive
|
ak110/kaggle-otto
|
d1ada166f65752435ebe50ad292306eb00f91106
|
03d0bf045beeeed9754d872824c4b9649a3782a7
|
refs/heads/master
| 2021-04-12T22:55:18.201278
| 2020-06-17T02:40:54
| 2020-06-17T02:40:54
| 249,114,273
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,514
|
py
|
#!/usr/bin/env python3
"""
acc: 0.809
"""
# region imports
# pylint: disable=unused-import
import functools # noqa: F401
import pathlib # noqa: F401
import random # noqa: F401
import albumentations as A # noqa: F401
import numpy as np # noqa: F401
import pandas as pd # noqa: F401
import sklearn.datasets # noqa: F401
import sklearn.ensemble # noqa: F401
import sklearn.linear_model # noqa: F401
import sklearn.metrics # noqa: F401
import sklearn.model_selection # noqa: F401
import sklearn.neighbors # noqa: F401
import tensorflow as tf # noqa: F401
import tensorflow_addons as tfa # noqa: F401
import _data
import pytoolkit as tk
# endregion
num_classes = 9
nfold = 5
split_seed = 1
models_dir = pathlib.Path(f"models/{pathlib.Path(__file__).stem}")
app = tk.cli.App(output_dir=models_dir)
logger = tk.log.get(__name__)
def create_model():
return tk.pipeline.SKLearnModel(
estimator=sklearn.ensemble.RandomForestClassifier(n_jobs=-1),
nfold=nfold,
models_dir=models_dir,
score_fn=score,
predict_method="predict_proba",
)
# region data/score
def load_train_data():
dataset = _data.load_train_data()
return dataset
def load_test_data():
dataset = _data.load_test_data()
return dataset
def score(
y_true: tk.data.LabelsType, y_pred: tk.models.ModelIOType
) -> tk.evaluations.EvalsType:
return tk.evaluations.evaluate_classification(y_true, y_pred)
# endregion
# region commands
@app.command(then="validate")
def train():
train_set = load_train_data()
folds = tk.validation.split(train_set, nfold, stratify=True, split_seed=split_seed)
model = create_model()
model.cv(train_set, folds)
@app.command(then="predict")
def validate():
train_set = load_train_data()
folds = tk.validation.split(train_set, nfold, stratify=True, split_seed=split_seed)
model = create_model().load()
pred = model.predict_oof(train_set, folds)
if tk.hvd.is_master():
tk.utils.dump(pred, models_dir / "pred_train.pkl")
tk.notifications.post_evals(score(train_set.labels, pred))
@app.command()
def predict():
test_set = load_test_data()
model = create_model().load()
pred_list = model.predict_all(test_set)
pred = np.mean(pred_list, axis=0)
if tk.hvd.is_master():
tk.utils.dump(pred_list, models_dir / "pred_test.pkl")
_data.save_prediction(models_dir, test_set, pred)
# endregion
if __name__ == "__main__":
app.run(default="train")
|
[
"mark@aur.ll.to"
] |
mark@aur.ll.to
|
0d36cabbeaf2eaf5b5c22ef55030ad917ec5946f
|
85f5dff291acf1fe7ab59ca574ea9f4f45c33e3b
|
/api/tacticalrmm/checks/migrations/0029_alter_checkresult_alert_severity.py
|
79d9fcd1af50910fe38897c0668156f57f23fe85
|
[
"LicenseRef-scancode-proprietary-license",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
sadnub/tacticalrmm
|
a4ecaf994abe39244a6d75ed2166222abb00d4f4
|
0af95aa9b1084973642da80e9b01a18dcacec74a
|
refs/heads/develop
| 2023-08-30T16:48:33.504137
| 2023-04-10T22:57:44
| 2023-04-10T22:57:44
| 243,405,684
| 0
| 2
|
MIT
| 2020-09-08T13:03:30
| 2020-02-27T01:43:56
|
Python
|
UTF-8
|
Python
| false
| false
| 502
|
py
|
# Generated by Django 4.0.3 on 2022-04-15 21:23
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('checks', '0028_auto_20220401_2301'),
]
operations = [
migrations.AlterField(
model_name='checkresult',
name='alert_severity',
field=models.CharField(blank=True, choices=[('info', 'Informational'), ('warning', 'Warning'), ('error', 'Error')], max_length=15, null=True),
),
]
|
[
"josh@torchlake.com"
] |
josh@torchlake.com
|
73ff21278e42e7835eb250eb0db2d9850fa185fd
|
d66818f4b951943553826a5f64413e90120e1fae
|
/hackerearth/Basic Programming/Implementation/Basics of Implementation/Strings/test.py
|
8b64720b4744115d68bb975ca590626995608952
|
[
"MIT"
] |
permissive
|
HBinhCT/Q-project
|
0f80cd15c9945c43e2e17072416ddb6e4745e7fa
|
19923cbaa3c83c670527899ece5c3ad31bcebe65
|
refs/heads/master
| 2023-08-30T08:59:16.006567
| 2023-08-29T15:30:21
| 2023-08-29T15:30:21
| 247,630,603
| 8
| 1
|
MIT
| 2020-07-22T01:20:23
| 2020-03-16T06:48:02
|
Python
|
UTF-8
|
Python
| false
| false
| 556
|
py
|
import io
import unittest
from contextlib import redirect_stdout
from unittest.mock import patch
class TestQ(unittest.TestCase):
@patch(
"builtins.input",
side_effect=[
"3",
"1 1",
"1 3",
"100 500",
],
)
def test_case_0(self, input_mock=None):
text_trap = io.StringIO()
with redirect_stdout(text_trap):
import solution
self.assertEqual(text_trap.getvalue(), "YES\n" + "NO\n" + "NO\n")
if __name__ == "__main__":
unittest.main()
|
[
"hbinhct@gmail.com"
] |
hbinhct@gmail.com
|
a538b99d09fb6b9ae2bcc408777ddcae02867ce4
|
6db7b8f3bba1fa4e9aec470373f86ef5077ed169
|
/degroofpetercam/settings.py
|
e72b50dfff81424384fd874e148b1c442854b600
|
[] |
no_license
|
hristo-grudev/degroofpetercam
|
810dbf26d36e08c890fc03bd2fdf3e77f791133d
|
f8cd6cbe665fa1817887965117f154ede9f6376d
|
refs/heads/main
| 2023-03-04T13:28:04.152094
| 2021-02-19T10:19:27
| 2021-02-19T10:19:27
| 340,332,577
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 389
|
py
|
BOT_NAME = 'degroofpetercam'
SPIDER_MODULES = ['degroofpetercam.spiders']
NEWSPIDER_MODULE = 'degroofpetercam.spiders'
FEED_EXPORT_ENCODING = 'utf-8'
LOG_LEVEL = 'ERROR'
DOWNLOAD_DELAY = 0
ROBOTSTXT_OBEY = True
ITEM_PIPELINES = {
'degroofpetercam.pipelines.DegroofpetercamPipeline': 100,
}
USER_AGENT = 'Mozilla/5.0 (Windows NT 6.1; Win64; x64; rv:47.0) Gecko/20100101 Firefox/47.0'
|
[
"hr.grudev@gmail.com"
] |
hr.grudev@gmail.com
|
fb0a573ec2a5f68e2ae5aa4aca36759cefd5d86f
|
473035074bd546694d5e3dbe6decb900ba79e034
|
/traffic fluid simulator/backend/env_3/matrices.py
|
730beac8f975054a3ac9fc8b35d692736de800ac
|
[] |
no_license
|
johny1614/magazyn
|
35424203036191fb255c410412c195c8f41f0ba5
|
a170fea3aceb20f59716a7b5088ccdcb6eea472f
|
refs/heads/master
| 2022-03-26T01:10:04.472374
| 2019-09-19T16:34:22
| 2019-09-19T16:34:22
| 171,033,407
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,927
|
py
|
import numpy as np
# To trzeba bedzie zmieniac tutaj - bo na razie jest dla 1 enva
# Na froncie jest to net3
def hash_(action):
return tuple([tuple(a) for a in action])
x0 = np.array([1, 2, 3, 4, 5, 6]).transpose()
#
# -A-B-
# -E-F
# -c_D-
T = np.array([[0, 0, 0, 0, 0, 0], # A
[1, 1, 0, 0, 0, 0], # B
[0, 0, 0, 0, 0, 0], # C
[0, 0, 1, 1, 0, 0], # D
[0, 1, 0, 1, 0, 0], # E
[0, 0, 0, 0, 1, 0]]) # F
# A B C D E F
A_ORANGE = hash_(np.array([[0, 0, 0, 0, 0, 0], # A
[1, 1, 0, 0, 0, 0], # B
[0, 0, 0, 0, 0, 0], # C
[0, 0, 1, 1, 0, 0], # D
[0, 0, 0, 0, 0, 0], # E
[0, 0, 0, 0, 1, 0]]))# F
# A B C D E F
UP_A_green = hash_(np.array([[0, 0, 0, 0, 0, 0], # A
[1, 0, 0, 0, 0, 0], # B
[0, 0, 0, 0, 0, 0], # C
[0, 0, 1, 1, 0, 0], # D
[0, 1, 0, 0, 0, 0], # E
[0, 0, 0, 0, 1, 0]])) # F
# A B C D E F
DOWN_A_green = hash_(np.array([[0, 0, 0, 0, 0, 0], # A
[1, 1, 0, 0, 0, 0], # B
[0, 0, 0, 0, 0, 0], # C
[0, 0, 1, 0, 0, 0], # D
[0, 0, 0, 1, 0, 0], # E
[0, 0, 0, 0, 1, 0]])) # F
u = np.array([[2, 4, 6, 8, 10, 2, 4, 6, 8, 10, 2, 4, 6, 8, 10, 2, 4, 6, 8, 10, 2, 4, 6, 8, 10, 2, 4, 6, 8, 10, 2, 4, 6,
8, 10, 2, 4, 6, 8, 10],
[1, 3, 5, 7, 9, 1, 3, 5, 7, 9, 1, 3, 5, 7, 9, 1, 3, 5, 7, 9, 1, 3, 5, 7, 9, 1, 3, 5, 7, 9, 1, 3, 5, 7, 9,
1, 3, 5, 7, 9]]).transpose()
# turns = [["", "", ""],
# ["", "", ""],
# ["right_down_slightly_", "right_up_slightly_", ""], ]
|
[
"johny1614@gmail.com"
] |
johny1614@gmail.com
|
411c0adba6ce69bb6c765f142c29d111fdd4a152
|
71a91cac814ec167c4194d8446fe4f94a222a10c
|
/cems/src/main/python/usecase02_avhrr_n07_n06.py
|
c06214382200e84880e94ffefae90335d20d6f4b
|
[] |
no_license
|
bcdev/fiduceo
|
dab0b3ae6d708d7b74d4c9f17c7dedf2e68472a6
|
aea0d74c38e0f503dfe10ddc392f9e36ad420b94
|
refs/heads/master
| 2021-01-23T15:51:06.518930
| 2017-01-19T13:47:55
| 2017-01-19T13:47:55
| 40,123,794
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 354
|
py
|
from workflow import Workflow
w = Workflow('usecase02_avhrr_n07_n06', 7, '/group_workspaces/cems2/fiduceo/Software/mms/config')
w.add_primary_sensor('avhrr-n07', '1981-09-01', '1982-03-17', 'v01.2')
w.add_secondary_sensor('avhrr-n06', '1981-09-01', '1982-03-17', 'v01.2')
w.set_usecase_config('usecase-02.xml')
w.run_matchup(hosts=[('localhost', 24)])
|
[
"tom.block@brockmann-consult.de"
] |
tom.block@brockmann-consult.de
|
a87d65508e1eaf7f08e8b04b37df8871dfdf944f
|
fa148881657508f485936dd93ac9ca36072a6e87
|
/setup.py
|
35bf288f8dd73fffc4c28f395373c36f43b01f69
|
[] |
no_license
|
mwang87/qtp-mass-spec
|
c63eb04bee6581570cb4028fd34b178988ef5115
|
0e55d6187602ffaf074517a0a3672590b29365c6
|
refs/heads/master
| 2020-06-03T15:34:57.010394
| 2016-08-01T20:14:38
| 2016-08-01T20:14:38
| 64,163,383
| 0
| 0
| null | 2016-07-25T19:50:39
| 2016-07-25T19:50:38
| null |
UTF-8
|
Python
| false
| false
| 1,709
|
py
|
#!/usr/bin/env python
# -----------------------------------------------------------------------------
# Copyright (c) 2016, Ming Wang.
#
# Distributed under the terms of the BSD 3-clause License License.
#
# The full license is in the file LICENSE, distributed with this software.
# -----------------------------------------------------------------------------
from setuptools import setup
from glob import glob
__version__ = "0.1.0-dev"
classes = """
Development Status :: 3 - Alpha
License :: OSI Approved :: BSD License
Topic :: Scientific/Engineering :: Bio-Informatics
Topic :: Software Development :: Libraries :: Application Frameworks
Topic :: Software Development :: Libraries :: Python Modules
Programming Language :: Python
Programming Language :: Python :: 2.7
Programming Language :: Python :: Implementation :: CPython
Operating System :: POSIX :: Linux
Operating System :: MacOS :: MacOS X
"""
with open('README.rst') as f:
long_description = f.read()
classifiers = [s.strip() for s in classes.split('\n') if s]
setup(name='mass-spec Qiita Type Plugin',
version=__version__,
long_description=long_description,
license="BSD",
description='Qiita Type Plugin: mass-spec',
author="Ming Wang",
author_email="miw023@ucsd.edu",
url='https://github.com/qiita-spots/qtp-mass-spec',
test_suite='nose.collector',
packages=['qtp_mass_spec'],
package_data={'qtp_mass_spec': ['support_files/config_file.cfg']},
scripts=glob('scripts/*'),
extras_require={'test': ["nose >= 0.10.1", "pep8"]},
install_requires=['click >= 3.3', 'qiita_client'],
classifiers=classifiers
)
|
[
"josenavasmolina@gmail.com"
] |
josenavasmolina@gmail.com
|
983d970ccf1b28206a311df1cc69e69010b1b3a4
|
9743d5fd24822f79c156ad112229e25adb9ed6f6
|
/xai/brain/wordbase/verbs/_papered.py
|
2e58967760d55c4fb726221badf00dc0b2145964
|
[
"MIT"
] |
permissive
|
cash2one/xai
|
de7adad1758f50dd6786bf0111e71a903f039b64
|
e76f12c9f4dcf3ac1c7c08b0cc8844c0b0a104b6
|
refs/heads/master
| 2021-01-19T12:33:54.964379
| 2017-01-28T02:00:50
| 2017-01-28T02:00:50
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 233
|
py
|
from xai.brain.wordbase.verbs._paper import _PAPER
#calss header
class _PAPERED(_PAPER, ):
def __init__(self,):
_PAPER.__init__(self)
self.name = "PAPERED"
self.specie = 'verbs'
self.basic = "paper"
self.jsondata = {}
|
[
"xingwang1991@gmail.com"
] |
xingwang1991@gmail.com
|
8c69df3edbd1c4b38ffab45797db6c04d37e8b3e
|
0386591b51fdbf5759faef6afb8729b64a3f1589
|
/imageserver/urls.py
|
1088fcd312dee73113cfe3bcc60a5b36990fc8d2
|
[
"BSD-3-Clause"
] |
permissive
|
giscube/giscube-admin
|
1e155402e094eb4db1f7ca260a8d1402e27a31df
|
4ce285a6301f59a8e48ecf78d58ef83c3827b5e0
|
refs/heads/main
| 2023-07-11T17:23:56.531443
| 2023-02-06T15:12:31
| 2023-02-06T15:12:31
| 94,087,469
| 7
| 1
|
BSD-3-Clause
| 2023-07-07T13:22:09
| 2017-06-12T11:12:56
|
Python
|
UTF-8
|
Python
| false
| false
| 1,069
|
py
|
from django.conf import settings
from django.urls import path, re_path
from django.views.decorators.csrf import csrf_exempt
from .admin_views import RasterOptimizerView
from .views import (ImageServerMapViewerView, ImageServerTileCacheTilesView, ImageServerTileCacheView,
ImageServerWMSView)
if not settings.GISCUBE_IMAGE_SERVER_DISABLED:
urlpatterns = [
path('services/<str:service_name>/map/',
ImageServerMapViewerView.as_view(), name='imageserver-map-view'),
path('services/<str:service_name>/tilecache/',
ImageServerTileCacheView.as_view(), name='imageserver-tilecache'),
path('services/<str:service_name>/tilecache/<int:z>/<int:x>/<int:y>.<str:image_format>',
ImageServerTileCacheTilesView.as_view(), name='imageserver-tilecache-tiles'),
re_path(r'^services/(?P<service_name>[^/]+)(.*)',
csrf_exempt(ImageServerWMSView.as_view()), name='imageserver'),
path('raster_optimizer/', RasterOptimizerView.as_view(), name='raster_optimizer'),
]
|
[
"abusquets@gmail.com"
] |
abusquets@gmail.com
|
e3370fef915aee2f02a498d78b11c74e92d1404e
|
1780cb2ba112f05f94d725b6cdab5ada09d89259
|
/backend/home/migrations/0002_load_initial_data.py
|
50373280982e1b13110eecb9c509a6ec7ac3961d
|
[] |
no_license
|
crowdbotics-apps/medical-o-19581
|
257cf66392f01c760414ce0e25e12d41d831b7b0
|
84bd5e97699d2320f516627b0e950a2e6775b468
|
refs/heads/master
| 2022-11-28T16:03:58.515158
| 2020-08-15T16:58:09
| 2020-08-15T16:58:09
| 287,788,335
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,290
|
py
|
from django.db import migrations
def create_customtext(apps, schema_editor):
CustomText = apps.get_model("home", "CustomText")
customtext_title = "medical O"
CustomText.objects.create(title=customtext_title)
def create_homepage(apps, schema_editor):
HomePage = apps.get_model("home", "HomePage")
homepage_body = """
<h1 class="display-4 text-center">medical O</h1>
<p class="lead">
This is the sample application created and deployed from the Crowdbotics app.
You can view list of packages selected for this application below.
</p>"""
HomePage.objects.create(body=homepage_body)
def create_site(apps, schema_editor):
Site = apps.get_model("sites", "Site")
custom_domain = "medical-o-19581.botics.co"
site_params = {
"name": "medical O",
}
if custom_domain:
site_params["domain"] = custom_domain
Site.objects.update_or_create(defaults=site_params, id=1)
class Migration(migrations.Migration):
dependencies = [
("home", "0001_initial"),
("sites", "0002_alter_domain_unique"),
]
operations = [
migrations.RunPython(create_customtext),
migrations.RunPython(create_homepage),
migrations.RunPython(create_site),
]
|
[
"team@crowdbotics.com"
] |
team@crowdbotics.com
|
9a92432ffbb0a50fb5c847a399426c5b4fc5c0bb
|
ae87b11560c543cb678c52a28916ea2252d7aa52
|
/tests/frontend/preg.py
|
a8e833e7f06543ee5209828ad3b7b34f0f1a4468
|
[
"Apache-2.0"
] |
permissive
|
CNR-ITTIG/plasodfaxp
|
19ccf77d0be62cfa8a9b246eb6797cf64a480d80
|
923797fc00664fa9e3277781b0334d6eed5664fd
|
refs/heads/master
| 2016-09-13T11:14:08.877399
| 2016-04-11T15:01:42
| 2016-04-11T15:01:42
| 55,975,921
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 7,673
|
py
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
"""Tests for the preg front-end."""
import unittest
from dfvfs.helpers import source_scanner
from dfvfs.lib import definitions as dfvfs_definitions
from dfvfs.path import factory as path_spec_factory
from plaso.engine import knowledge_base
from plaso.frontend import preg
from tests.frontend import test_lib
class PregFrontendTest(test_lib.FrontendTestCase):
"""Tests for the preg front-end."""
def _ConfigureSingleFileTest(self, knowledge_base_values=None):
"""Configure a single file test.
Args:
knowledge_base_values: optional dict containing the knowledge base
values.
"""
self._front_end = preg.PregFrontend()
self._front_end.SetSingleFile(True)
registry_file_path = self._GetTestFilePath([u'SYSTEM'])
path_spec = path_spec_factory.Factory.NewPathSpec(
dfvfs_definitions.TYPE_INDICATOR_OS, location=registry_file_path)
self._front_end.SetSourcePath(registry_file_path)
self._front_end.SetSourcePathSpecs([path_spec])
self._knowledge_base_object = knowledge_base.KnowledgeBase()
if knowledge_base_values:
for identifier, value in knowledge_base_values.iteritems():
self._knowledge_base_object.SetValue(identifier, value)
self._front_end.SetKnowledgeBase(self._knowledge_base_object)
def _ConfigureStorageMediaFileTest(self):
"""Configure a test against a storage media file."""
self._front_end = preg.PregFrontend()
self._front_end.SetSingleFile(False)
self._knowledge_base_object = knowledge_base.KnowledgeBase()
self._front_end.SetKnowledgeBase(self._knowledge_base_object)
storage_media_path = self._GetTestFilePath([u'registry_test.dd'])
test_source_scanner = source_scanner.SourceScanner()
scan_context = source_scanner.SourceScannerContext()
scan_context.OpenSourcePath(storage_media_path)
test_source_scanner.Scan(scan_context)
# Getting the most upper node.
scan_node = scan_context.GetRootScanNode()
while scan_node.sub_nodes:
scan_node = scan_node.sub_nodes[0]
self._front_end.SetSourcePath(storage_media_path)
self._front_end.SetSourcePathSpecs([scan_node.path_spec])
def testExpandKeysRedirect(self):
"""Tests the ExpandKeysRedirect function."""
self._ConfigureSingleFileTest()
registry_key_paths = [
u'\\Software\\Foobar',
u'\\Software\\Key\\SubKey\\MagicalKey',
u'\\Canons\\Blast\\Night',
u'\\EvilCorp\\World Plans\\Takeover']
self._front_end.ExpandKeysRedirect(registry_key_paths)
added_key_paths = [
u'\\Software\\Wow6432Node\\Foobar',
u'\\Software\\Wow6432Node\\Key\\SubKey\\MagicalKey']
for added_key_path in added_key_paths:
self.assertIn(added_key_path, registry_key_paths)
def testGetRegistryFilePaths(self):
"""Tests the GetRegistryFilePaths function."""
self._ConfigureSingleFileTest()
expected_paths = [u'%UserProfile%\\NTUSER.DAT']
registry_file_types = [u'NTUSER']
paths = self._front_end.GetRegistryFilePaths(registry_file_types)
self.assertEqual(sorted(paths), sorted(expected_paths))
expected_paths = [u'%SystemRoot%\\System32\\config\\SOFTWARE']
registry_file_types = [u'SOFTWARE']
paths = self._front_end.GetRegistryFilePaths(registry_file_types)
self.assertEqual(sorted(paths), sorted(expected_paths))
def testGetRegistryHelpers(self):
"""Test the GetRegistryHelpers function."""
self._ConfigureSingleFileTest()
with self.assertRaises(ValueError):
_ = self._front_end.GetRegistryHelpers()
registry_helpers = self._front_end.GetRegistryHelpers(
registry_file_types=[u'SYSTEM'])
self.assertEquals(len(registry_helpers), 1)
registry_helper = registry_helpers[0]
file_path = self._GetTestFilePath([u'SYSTEM'])
self.assertEquals(registry_helper.path, file_path)
self._ConfigureStorageMediaFileTest()
registry_helpers = self._front_end.GetRegistryHelpers(
registry_file_types=[u'NTUSER'])
self.assertEquals(len(registry_helpers), 3)
registry_helper = registry_helpers[0]
registry_helper.Open()
expected_file_type = preg.REGISTRY_FILE_TYPE_NTUSER
self.assertEquals(registry_helper.file_type, expected_file_type)
self.assertEquals(registry_helper.name, u'NTUSER.DAT')
self.assertEquals(registry_helper.collector_name, u'TSK')
registry_helper.Close()
registry_helpers = self._front_end.GetRegistryHelpers(
plugin_names=[u'userassist'])
self.assertEquals(len(registry_helpers), 3)
registry_helpers = self._front_end.GetRegistryHelpers(
registry_file_types=[u'SAM'])
self.assertEquals(len(registry_helpers), 1)
# TODO: Add a test for getting Registry helpers from a storage media file
# that contains VSS stores.
def testGetRegistryPlugins(self):
"""Test the GetRegistryPlugin function."""
self._ConfigureSingleFileTest()
usb_plugins = self._front_end.GetRegistryPlugins(u'usb')
self.assertIsNotNone(usb_plugins)
usb_plugin_names = [plugin.NAME for plugin in usb_plugins]
self.assertIn(u'windows_usb_devices', usb_plugin_names)
self.assertIn(u'windows_usbstor_devices', usb_plugin_names)
other_plugins = self._front_end.GetRegistryPlugins(u'user')
self.assertIsNotNone(other_plugins)
other_plugin_names = [plugin.NAME for plugin in other_plugins]
self.assertIn(u'userassist', other_plugin_names)
def testParseRegistry(self):
"""Test the ParseRegistryFile and ParseRegistryKey functions."""
self._ConfigureSingleFileTest()
registry_helpers = self._front_end.GetRegistryHelpers(
registry_file_types=[u'SYSTEM'])
registry_helper = registry_helpers[0]
plugins = self._front_end.GetRegistryPluginsFromRegistryType(u'SYSTEM')
key_list = []
plugin_list = []
for plugin in plugins:
plugin_list.append(plugin.NAME)
key_list.extend(plugin.GetKeyPaths())
self._front_end.ExpandKeysRedirect(key_list)
parsed_data = self._front_end.ParseRegistryFile(
registry_helper, key_paths=key_list, use_plugins=plugin_list)
for key_parsed in parsed_data:
self.assertIn(key_parsed, key_list)
usb_parsed_data = parsed_data.get(
u'HKEY_LOCAL_MACHINE\\System\\CurrentControlSet\\Enum\\USBSTOR', None)
self.assertIsNotNone(usb_parsed_data)
usb_key = usb_parsed_data.get(u'key', None)
self.assertIsNotNone(usb_key)
expected_key_path = (
u'HKEY_LOCAL_MACHINE\\System\\ControlSet001\\Enum\\USBSTOR')
self.assertEquals(usb_key.path, expected_key_path)
data = usb_parsed_data.get(u'data', None)
self.assertIsNotNone(data)
plugin_names = [plugin.NAME for plugin in data.keys()]
self.assertIn(u'windows_usbstor_devices', plugin_names)
usb_plugin = None
for plugin in data.keys():
if plugin.NAME == u'windows_usbstor_devices':
usb_plugin = plugin
break
event_objects = data.get(usb_plugin, [])
self.assertEquals(len(event_objects), 3)
event_object = event_objects[2]
self.assertEquals(event_object.data_type, u'windows:registry:key_value')
parse_key_data = self._front_end.ParseRegistryKey(
usb_key, registry_helper, use_plugins=u'windows_usbstor_devices')
self.assertEquals(len(parse_key_data.keys()), 1)
parsed_key_value = parse_key_data.values()[0]
for index, event_object in enumerate(event_objects):
parsed_key_event = parsed_key_value[index]
self.assertEquals(
event_object.EqualityString(), parsed_key_event.EqualityString())
if __name__ == '__main__':
unittest.main()
|
[
"dfirfpi@gmail.com"
] |
dfirfpi@gmail.com
|
b35fe4f83cad4155a1a9b795961c33cce5c14570
|
7a527060afabd2e0867d5dcf4b75592b43ef5005
|
/Leetcode/二叉树/226. 翻转二叉树.py
|
dabbaea2e39fd8bfd026b47ad19dbeb63fd57c87
|
[] |
no_license
|
Stevenzzz1996/MLLCV
|
ff01a276cf40142c1b28612cb5b43e563ad3a24a
|
314953b759212db5ad07dcb18854bf6d120ba172
|
refs/heads/master
| 2023-02-10T18:11:30.399042
| 2021-01-05T12:05:21
| 2021-01-05T12:05:21
| 267,804,954
| 6
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 329
|
py
|
#!usr/bin/env python
# -*- coding:utf-8 -*-
# author: sfhong2020 time:2020/5/7 20:47
class Solution:
def invertTree(self, root: TreeNode) -> TreeNode:
if not root: return
root.left, root.right = root.right, root.left
self.invertTree(root.left)
self.invertTree(root.right)
return root
|
[
"2499143041@qq.com"
] |
2499143041@qq.com
|
47421e349ecc14e6f4f3ea1699804de3ccc0655a
|
46b086b8cd119f9067e6ab50ba0038e4703d6728
|
/nlp/hotel_review_enc_dec_rnn.py
|
5f0e1658e8244cd814034527718d56be30048751
|
[] |
no_license
|
wulfebw/rnn
|
874ec8d1d53efe25ff7ab36c1cdc53019123f83a
|
7cdba1ad581c61d08d5e8c4b22f7b952da3a64e1
|
refs/heads/master
| 2021-01-21T13:11:38.211329
| 2015-09-17T21:05:47
| 2015-09-17T21:05:47
| 42,426,369
| 3
| 2
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,212
|
py
|
"""
:description: This file contains 3 classes:
(1) An encoder-decoder recurrent neural net that contains an encoder and a deocder and orchestrates forward and backward propagation through them
(2) An encoder, which takes a input sequence and generates a hidden state aka representation of that sequence
(3) A decoder, which takes a hidden state as input and generates output
"""
import numpy as np
import theano
from theano import scan
import theano.tensor as T
from pylearn2.expr.nnet import arg_of_softmax
from pylearn2.utils import sharedX
class EncoderDecoderRNN(object):
def __init__(self,
encoder,
decoder,
cost=None):
"""
:description: A model that contains an encoder and decoder and orchestrates their combined usage and training
"""
self.encoder = encoder
self.decoder = decoder
self.cost = cost
self.return_indices = return_indices
def fprop(self, input, mask):
return self.decoder.fprop(self.encoder.fprop(input, mask))
def get_cost_updates(self, inputs, targets, mask, learning_rate=0.001, momentum=0.2):
predictions = self.fprop(inputs, mask)
if self.cost is not None:
cost = self.cost(predictions, targets)
else:
cost = T.mean(T.sqr(targets - predictions))
params = self.get_params()
# this does not work
try:
self.gparams = momentum * self.gparams + (1 - momentum) * T.grad(cost, params)
except:
self.gparams = T.grad(cost, params)
updates = [(param, param - learning_rate * gparam) for param, gparam in zip(params, self.gparams)]
return (cost, updates)
def get_params(self):
return self.encoder.params + self.decoder.params
class DecoderLSTM(object):
"""
:description: A decoder class. Takes a hidden state and generates an output sequence.
"""
def __init__(self,
n_hid,
n_classes,
layer_name,
rng=None,
return_indices=None,
param_init_range=0.02,
forget_gate_init_bias=0.05,
input_gate_init_bias=0.,
output_gate_init_bias=0.,
dropout_prob=0.0):
self.n_hid = n_hid
self.n_classes = n_classes
self.layer_name = layer_name
self.param_init_range = param_init_range
self.return_indices = return_indices
self.forget_gate_init_bias = forget_gate_init_bias
self.input_gate_init_bias = input_gate_init_bias
self.output_gate_init_bias = output_gate_init_bias
# only create random arrays once and reuse via copy()
irange = self.param_init_range
# input-to-hidden array, used for incorporating the generated output (conditioned on output)
init_Wxh = self.rng.uniform(-irange, irange, (self.n_classes, self.n_hid))
# hidden-to-hidden array
init_Whh = self.rng.uniform(-irange, irange, (self.n_hid, self.n_hid))
# hidden-to-output array, used only by the 'softmax' portion of the decoder
init_Whx = self.rng.uniform(-irange, irange, (self.n_hid, self.n_classes))
# input-to-hidden array, used for incorporating the generated output
self.Wxh = theano.shared(value=init_Wxh, name=self.layer_name + '_Wxh', borrow=True)
self.bxh = theano.shared(value=np.zeros(self.n_hid), name='bhx', borrow=True)
# hidden-to-hidden (rows, cols) = (n_hidden, n_hidden)
self.Whh = theano.shared(value=init_Whh, name=self.layer_name + '_Whh', borrow=True)
# hidden-to-output (rows, cols) = (n_hidden, n_classes)
self.Whx = theano.shared(value=init_Whx, name=self.layer_name + '_Whx', borrow=True)
self.bhx = theano.shared(value=np.zeros(self.n_classes), name='bhx', borrow=True)
# lstm parameters
# Output gate switch
self.O_b = sharedX(np.zeros((self.n_hid,)) + self.output_gate_init_bias, name=(self.layer_name + '_O_b'))
self.O_x = sharedX(init_Wxh, name=(self.layer_name + '_O_x'))
self.O_h = sharedX(init_Whh, name=(self.layer_name + '_O_h'))
self.O_c = sharedX(init_Whh.copy(), name=(self.layer_name + '_O_c'))
# Input gate switch
self.I_b = sharedX(np.zeros((self.n_hid,)) + self.input_gate_init_bias, name=(self.layer_name + '_I_b'))
self.I_x = sharedX(init_Wxh.copy(), name=(self.layer_name + '_I_x'))
self.I_h = sharedX(init_Whh.copy(), name=(self.layer_name + '_I_h'))
self.I_c = sharedX(init_Whh.copy(), name=(self.layer_name + '_I_c'))
# Forget gate switch
self.F_b = sharedX(np.zeros((self.n_hid,)) + self.forget_gate_init_bias, name=(self.layer_name + '_F_b'))
self.F_x = sharedX(init_Wxh.copy(), name=(self.layer_name + '_F_x'))
self.F_h = sharedX(init_Whh.copy(), name=(self.layer_name + '_F_h'))
self.F_c = sharedX(init_Whh.copy(), name=(self.layer_name + '_F_c'))
self.params = [self.Wxh, self.bxh, self.Whh, self.Whx, self.bhx, self.O_b, self.O_x, self.O_h, self.O_c, self.I_b, self.I_x, self.I_h, self.I_c, self.F_b, self.F_x, self.F_h, self.F_c]
def fprop(self, encoding):
"""
:description: calls decode function. Just here for some consistency.
"""
return self.decode(encoding)
def decode(self, encoding):
"""
:description: decodes an encoding into an output sequence.
:type encoding: tensor3
:param encoding: a batch of encodings with the shape (n_time_steps, n_batches, n_hidden). The reason n_time_steps takes the first dimension spot is that this allows for processing with the theano.scan function.
"""
pass
def decode_step(self, ):
pass
|
[
"blake.w.wulfe@gmail.com"
] |
blake.w.wulfe@gmail.com
|
125b74638bf1b8a47bbe87218c2f4e240f9982c0
|
b6a84594f8c29d968014faaddd49abeb7537a5fc
|
/python/529.minesweeper.py
|
45ee8e1b8e473f48e0b9a9c63955466d1e6c4ffa
|
[] |
no_license
|
nickyfoto/lc
|
8a6af3df114e693e265d0ede03f4d4e1283e010e
|
3633b4df3e24968057c7d684689b931c5a8032d3
|
refs/heads/master
| 2020-09-16T19:23:07.765917
| 2020-06-07T17:18:06
| 2020-06-07T17:18:06
| 223,866,098
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 7,137
|
py
|
#
# @lc app=leetcode id=529 lang=python3
#
# [529] Minesweeper
#
# https://leetcode.com/problems/minesweeper/description/
#
# algorithms
# Medium (54.13%)
# Total Accepted: 38.5K
# Total Submissions: 71.1K
# Testcase Example: '[["E","E","E","E","E"],["E","E","M","E","E"],["E","E","E","E","E"],["E","E","E","E","E"]]\n[3,0]'
#
# Let's play the minesweeper game (Wikipedia, online game)!
#
# You are given a 2D char matrix representing the game board. 'M' represents an
# unrevealed mine, 'E' represents an unrevealed empty square, 'B' represents a
# revealed blank square that has no adjacent (above, below, left, right, and
# all 4 diagonals) mines, digit ('1' to '8') represents how many mines are
# adjacent to this revealed square, and finally 'X' represents a revealed
# mine.
#
# Now given the next click position (row and column indices) among all the
# unrevealed squares ('M' or 'E'), return the board after revealing this
# position according to the following rules:
#
#
# If a mine ('M') is revealed, then the game is over - change it to 'X'.
# If an empty square ('E') with no adjacent mines is revealed, then change it
# to revealed blank ('B') and all of its adjacent unrevealed squares should be
# revealed recursively.
# If an empty square ('E') with at least one adjacent mine is revealed, then
# change it to a digit ('1' to '8') representing the number of adjacent
# mines.
# Return the board when no more squares will be revealed.
#
#
#
#
# Example 1:
#
#
# Input:
#
# [['E', 'E', 'E', 'E', 'E'],
# ['E', 'E', 'M', 'E', 'E'],
# ['E', 'E', 'E', 'E', 'E'],
# ['E', 'E', 'E', 'E', 'E']]
#
# Click : [3,0]
#
# Output:
#
# [['B', '1', 'E', '1', 'B'],
# ['B', '1', 'M', '1', 'B'],
# ['B', '1', '1', '1', 'B'],
# ['B', 'B', 'B', 'B', 'B']]
#
# Explanation:
#
#
#
# Example 2:
#
#
# Input:
#
# [['B', '1', 'E', '1', 'B'],
# ['B', '1', 'M', '1', 'B'],
# ['B', '1', '1', '1', 'B'],
# ['B', 'B', 'B', 'B', 'B']]
#
# Click : [1,2]
#
# Output:
#
# [['B', '1', 'E', '1', 'B'],
# ['B', '1', 'X', '1', 'B'],
# ['B', '1', '1', '1', 'B'],
# ['B', 'B', 'B', 'B', 'B']]
#
# Explanation:
#
#
#
#
#
# Note:
#
#
# The range of the input matrix's height and width is [1,50].
# The click position will only be an unrevealed square ('M' or 'E'), which also
# means the input board contains at least one clickable square.
# The input board won't be a stage when game is over (some mines have been
# revealed).
# For simplicity, not mentioned rules should be ignored in this problem. For
# example, you don't need to reveal all the unrevealed mines when the game is
# over, consider any cases that you will win the game or flag any squares.
#
#
#
from collections import defaultdict
from pprint import pprint
class Solution:
# def updateBoard(self, board: List[List[str]], click: List[int]) -> List[List[str]]:
def updateBoard(self, board, click):
class Node:
def __init__(self, r,c):
self.r = r
self.c = c
mine = self.count_mine_around()
if not mine:
board[self.r][self.c] = 'B'
else:
board[self.r][self.c] = str(mine)
def mine(self, r,c):
if board[r][c] == 'M':
return 1
else:
return 0
def border_valid(self, r,c):
return r >= 0 and r < n_rows and c >= 0 and c < n_cols
def check(self, r,c, func):
if not self.border_valid(r,c):
return 0
return func(r,c)
def count_mine_around(self):
res = 0
res += self.check(*self.up_left(),self.mine)
res += self.check(*self.up(),self.mine)
res += self.check(*self.up_right(),self.mine)
res += self.check(*self.left(),self.mine)
res += self.check(*self.right(),self.mine)
res += self.check(*self.down_left(),self.mine)
res += self.check(*self.down(),self.mine)
res += self.check(*self.down_right(),self.mine)
return res
def valid(self, r,c):
if not explored[(r,c)] and board[r][c] == 'E':
return Node(r,c)
else:
return None
def up_left(self):
return self.r - 1, self.c - 1
def up_right(self):
return self.r - 1, self.c + 1
def up(self):
return self.r - 1, self.c
def left(self):
return self.r, self.c - 1
def right(self):
return self.r, self.c + 1
def down_left(self):
return self.r + 1, self.c -1
def down(self):
return self.r + 1, self.c
def down_right(self):
return self.r + 1, self.c + 1
def explore(self, r,c):
node = self.check(r,c, self.valid)
if node:
node.dfs()
def dfs(self):
if board[self.r][self.c] == 'B':
# up_left = self.check(*self.up_left(), self.valid)
# if up_left:
# up_left.dfs()
self.explore(*self.up_left())
self.explore(*self.up())
self.explore(*self.up_right())
self.explore(*self.left())
self.explore(*self.right())
self.explore(*self.down())
self.explore(*self.down_left())
self.explore(*self.down_right())
n_rows = len(board)
n_cols = len(board[0])
r, c = tuple(click)
if board[r][c] == 'M':
board[r][c] = 'X'
return board
else:
explored = defaultdict(lambda: False)
explored[(r,c)] = True
node = Node(r,c)
node.dfs()
# pprint(board)
return board
# return board
# s = Solution()
# board = [['E', 'E', 'E', 'E', 'E'],
# ['E', 'E', 'M', 'E', 'E'],
# ['E', 'E', 'E', 'E', 'E'],
# ['E', 'E', 'E', 'E', 'E']]
# click = [3,0]
# print(s.updateBoard(board, click))
# board = [["E"]]
# click = [0,0]
# print(s.updateBoard(board, click))
# board = [["E","E","E","E","E","E","E","E"],
# ["E","E","E","E","E","E","E","M"],
# ["E","E","M","E","E","E","E","E"],
# ["M","E","E","E","E","E","E","E"],
# ["E","E","E","E","E","E","E","E"],["E","E","E","E","E","E","E","E"],["E","E","E","E","E","E","E","E"],["E","E","M","M","E","E","E","E"]]
# click = [0,0]
# print(s.updateBoard(board, click) == [["B","B","B","B","B","B","1","E"],
# ["B","1","1","1","B","B","1","M"],
# ["1","2","M","1","B","B","1","1"],
# ["M","2","1","1","B","B","B","B"],
# ["1","1","B","B","B","B","B","B"],
# ["B","B","B","B","B","B","B","B"],
# ["B","1","2","2","1","B","B","B"],
# ["B","1","M","M","1","B","B","B"]])
|
[
"nickyfoto@gmail.com"
] |
nickyfoto@gmail.com
|
d9f7024ed4779cb28010988aab7423be9857cb89
|
b6c09a1b87074d6e58884211ce24df8ec354da5c
|
/344. 反转字符串.py
|
8b92858d6a5fe0caaf3151c087ae5c3b43bb1d38
|
[] |
no_license
|
fengxiaolong886/leetcode
|
a0ee12d67c4a10fb12d6ca4369762ab5b090cab1
|
4c0897bc06a297fa9225a0c46d8ec9217d876db8
|
refs/heads/master
| 2023-03-18T22:16:29.212016
| 2021-03-07T03:48:16
| 2021-03-07T03:48:16
| 339,604,263
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 743
|
py
|
"""
编写一个函数,其作用是将输入的字符串反转过来。输入字符串以字符数组 char[] 的形式给出。
不要给另外的数组分配额外的空间,你必须原地修改输入数组、使用 O(1) 的额外空间解决这一问题。
你可以假设数组中的所有字符都是 ASCII 码表中的可打印字符。
示例 1:
输入:["h","e","l","l","o"]
输出:["o","l","l","e","h"]
示例 2:
输入:["H","a","n","n","a","h"]
输出:["h","a","n","n","a","H"]
"""
def reverseString(s):
n = len(s)
for i in range(n//2):
# print(i)
s[i], s[n-i -1] = s[n-i-1], s[i]
return s
print(reverseString(["h","e","l","l","o"]))
print(reverseString(["H","a","n","n","a","h"]))
|
[
"xlfeng886@163.com"
] |
xlfeng886@163.com
|
d77e8c94bb919365da58341726e79fd06355fd80
|
6dc716bbaf2e63da9153ff72e3c43364a1fcb5ff
|
/src/pyWebdriverAPI/19get_attribute.py
|
9ff4350ae107f31f7565d6a64690d2bb49deb222
|
[] |
no_license
|
Fangziqiang/PythonAutoTest
|
bfa1d583a21768bcce45ac2348cd4913934b1703
|
c9084004b6964fc0d59b98586d2986d0d7f938b1
|
refs/heads/master
| 2020-04-29T03:01:36.450658
| 2019-03-29T10:10:01
| 2019-03-29T10:10:01
| 175,793,069
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 671
|
py
|
#coding= utf-8
#<input type="checkbox" data-node="594434499" data-convert="1" data-type="file">
#<input type="checkbox" data-node="594434498" data-convert="1" data-type="file">
#<input type="checkbox" data-node="594434493" data-convert="1" data-type="file">
#<input type="checkbox" data-node="594434497" data-convert="1" data-type="file">
from selenium import webdriver
driver = webdriver.Firefox()
#选择页面上所有的tag_name为input的元素
inputs =driver.find_element_by_tag_name('input')
#循环遍历出data-node为594434493的元素,单击勾选
for input in inputs:
if input.get_attribute('data-node')=='594434493':
input.click()
|
[
"286330540@qq.com"
] |
286330540@qq.com
|
6dc108d5aa1cddf54041d834cab22c1668506c85
|
ca39938bcc1c04476bd33a52fcfeadd45a21192d
|
/classes1.py
|
2def90a7910b1f31d712cb44baa9c7520f501713
|
[] |
no_license
|
ImayaDismas/python-programs
|
57c2f2e633e3e10e42cfbfb873af60892041978d
|
06102f505603220b5411d5777ceb2dd1f38c3f5d
|
refs/heads/master
| 2021-01-10T16:29:42.530118
| 2016-02-16T17:01:33
| 2016-02-16T17:01:33
| 50,907,511
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 435
|
py
|
#!/usr/bin/python3
class Duck:
def __init__(self, value):
# self._v is attached to the object and not class.
# the value is part of the object(encapsulation)
self._v = value
def quack(self):
print('Quaaack!', self._v)
def walki(self):
print('Walks like a duck', self._v)
def main():
donald = Duck(52)
frank = Duck(151)
donald.quack()
donald.walki()
frank.quack()
frank.walki()
if __name__ == "__main__": main()
|
[
"imayadismas@gmail.com"
] |
imayadismas@gmail.com
|
1d19dfcaa94d515c7b8e7eb32d38bfffef735c0b
|
ac0894b411507bfd027696b6bf11b5e384ed68fc
|
/need-to-do/python3------download-problem--of--leetcode/216.combination-sum-iii.py
|
94404265e1d831a8428bfc30325a91714f84809d
|
[] |
no_license
|
mkzpd/leetcode-solution
|
1d19554628c34c74012fa52582c225e6dccb345c
|
60c9b218683bcdee86477a910c58ec702185c726
|
refs/heads/master
| 2020-05-31T05:56:48.985529
| 2019-09-20T09:10:49
| 2019-09-20T09:10:49
| 190,128,627
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 810
|
py
|
#
# @lc app=leetcode id=216 lang=python3
#
# [216] Combination Sum III
#
# https://leetcode.com/problems/combination-sum-iii/description/
#
# algorithms
# Medium (53.03%)
# Total Accepted: 134.8K
# Total Submissions: 254.1K
# Testcase Example: '3\n7'
#
#
# Find all possible combinations of k numbers that add up to a number n, given
# that only numbers from 1 to 9 can be used and each combination should be a
# unique set of numbers.
#
# Note:
#
#
# All numbers will be positive integers.
# The solution set must not contain duplicate combinations.
#
#
# Example 1:
#
#
# Input: k = 3, n = 7
# Output: [[1,2,4]]
#
#
# Example 2:
#
#
# Input: k = 3, n = 9
# Output: [[1,2,6], [1,3,5], [2,3,4]]
#
#
#
class Solution:
def combinationSum3(self, k: int, n: int) -> List[List[int]]:
|
[
"sodgso262@gmail.com"
] |
sodgso262@gmail.com
|
29838aa991d0e07bfc6098581bf37e44233a71f9
|
2368797b51548c0f6393d63bf4973898ac99d528
|
/stack/hard/q772.py
|
1bb2df7a709fdba2408d3fadf6b0799535b2bf3b
|
[] |
no_license
|
pengzhefu/LeetCodePython
|
595887d1625666962e7e959ffa148580f9b89ada
|
59eff778a5fd5cff3b5b6b88c6c7e76dd213dfb0
|
refs/heads/master
| 2021-06-08T19:44:52.487031
| 2021-06-01T15:44:29
| 2021-06-01T15:44:29
| 175,763,155
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,145
|
py
|
# -*- coding: utf-8 -*-
"""
Created on Tue Jul 23 00:17:51 2019
@author: pengz
"""
'''
Implement a basic calculator to evaluate a simple expression string.
The expression string may contain open ( and closing parentheses ), the plus + or minus sign -,
non-negative integers and empty spaces .
The expression string contains only non-negative integers, +, -, *, / operators , open ( and
closing parentheses ) and empty spaces . The integer division should truncate toward zero.
You may assume that the given expression is always valid. All intermediate results will be in the
range of [-2147483648, 2147483647].
Some examples:
"1 + 1" = 2
" 6-4 / 2 " = 4
"2*(5+5*2)/3+(6/2+8)" = 21
"(2+6* 3+5- (3*14/7+2)*5)+3"=-12
'''
## https://medium.com/@CalvinChankf/solving-basic-calculator-i-ii-iii-on-leetcode-74d926732437
def calculate3(s): ## other's solution, time is O(n^2),
if len(s) ==0:
return 0
stack = []
sign = '+' ## previous sign
num =0 ## tmp result
i =0 ## the index of s
while i <len(s):
c = s[i]
if c.isdigit():
num = 10*num + int(c)
if c == '(': ## 遇到相应的 ")"
# find the corresponding ")"
pCnt = 0
end = 0
clone = s[i:]
while end < len(clone):
if clone[end] == '(':
pCnt += 1
elif clone[end] == ')':
pCnt -= 1
if pCnt == 0:
break
end += 1
# do recursion to calculate the sum within the next (...)
num = self.calculate(s[i+1:i+end])
i += end
if i == len(s)-1 or (c == '+' or c == '-' or c == '*' or c == '/'): ## 这部分是CalculatorII
if sign == '+':
stack.append(num)
elif sign == '-':
stack.append(-num)
elif sign == '*':
stack[-1] = stack[-1]*num
elif sign == '/':
stack[-1] = int(stack[-1]/float(num))
sign = c
num = 0
i +=1
return sum(stack)
|
[
"32311379+pengzhefu@users.noreply.github.com"
] |
32311379+pengzhefu@users.noreply.github.com
|
b0df441e56adb0a05833822a484bf3b07dc5ab63
|
f3af143bada7f79db1e15b4386e5107bc99eb212
|
/ProjectBaseTest1/工具练习/01-微信消息撤回/01-微信撤回消息.py
|
5acc6172948fb9993db5ff078b58c9afe7ab8de5
|
[] |
no_license
|
xxxfly/PythonStudy
|
a5ceae1d2b16cfdba19871507458154fc292bca0
|
478d89ccefc91a84f935aebdca796c9d4c23ef61
|
refs/heads/master
| 2022-04-08T13:45:03.405768
| 2020-03-09T03:47:50
| 2020-03-09T03:47:50
| 45,187,749
| 4
| 2
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,497
|
py
|
#-*-encoding:utf-8-*-
import os
import re
import shutil
import time
import itchat
from itchat.content import *
#相关资料
#安装itchat: pip3 install itchat
#itchat: https://github.com/liduanwei/ItChat
#中文API: http://itchat.readthedocs.io/zh/latest/
# 说明:可以撤回的有文本文字、语音、视频、图片、位置、名片、分享、附件
# {msg_id:(msg_from,msg_to,msg_time,msg_time_rec,msg_type,msg_content,msg_share_url)}
msg_dict={}
#文件存储的临时目录
rev_tmp_dir="RevDir/"
if not os.path.exists(rev_tmp_dir):
os.mkdir(rev_tmp_dir)
# 表情有一个问题 | 接受信息和接受note的msg_id不一致 巧合解决方案
face_bug=None
# 将接收到的消息存放在字典中,当接收到新消息时对字典中超时的消息进行清理 | 不接受不具有撤回功能的信息
# [TEXT, PICTURE, MAP, CARD, SHARING, RECORDING, ATTACHMENT, VIDEO, FRIENDS, NOTE]
@itchat.msg_register([TEXT, PICTURE, MAP, CARD, SHARING, RECORDING, ATTACHMENT, VIDEO])
def handler_receive_msg(msg):
global face_bug
# 获取的是本地时间戳并格式化本地时间戳 e:2018-04-21 13:08:21
msg_time_rec=time.strftime('%Y-%m-%d %H:%M:%S',time.localtime())
# 消息id
msg_id=msg['MsgId']
# 消息时间
msg_time=msg['CreateTime']
# 消息发送人昵称 | 这里也可以使用RemarkName备注 但是自己或者没有备注的人为None
msg_from=(itchat.search_friends(userName=msg['FromUserName']))['NickName']
# 消息内容
msg_content=None
# 分享的连接
msg_share_url=None
print("-->"+time.strftime('%Y-%m-%d %H:%M:%S',time.localtime())+"--接收消息:"+str(msg))
if msg['Type'] == 'Text' or msg['Type'] == 'Friends':
msg_content = msg['Text']
elif msg['Type']=='Recording' or msg['Type']=='Attachment' or msg['Type']=='Video' or msg['Type']=='Picture':
msg_content=r''+msg['FileName']
#保存文件
msg['Text'](rev_tmp_dir+msg['FileName'])
elif msg['Type'] == 'Card':
msg_content=msg['RecommendInfo']['NickName']+r"的名片"
elif msg['Type'] == 'Map':
x,y,location=re.search('<location x="(.*?)" y="(.*?)".*label="(.*?)".*', msg['OriContent']).group(1, 2, 3)
if location is None:
msg_content=r"维度->"+x.__str__()+"经度->"+y.__str__()
else:
msg_content=r""+location
elif msg['Type']=='Sharing':
msg_content=msg['Text']
msg_share_url = msg['Url']
face_bug=msg_content
# 更新字典
msg_dict.update({
msg_id:{
"msg_from":msg_from,"msg_time":msg_time,"msg_time_rec":msg_time_rec,
"msg_type": msg["Type"],
"msg_content": msg_content, "msg_share_url": msg_share_url
}
})
@itchat.msg_register([NOTE])
def send_msg_helper(msg):
global face_bug
print("-->"+time.strftime('%Y-%m-%d %H:%M:%S',time.localtime())+"--撤回消息:"+str(msg))
if re.search(r"<!\[CDATA\[.*撤回了一条消息\]\]>",msg["Content"]) is not None:
#获取消息的id
old_msg_id=re.search("<msgid>(.*?)</msgid>",msg['Content']).group(1)
old_msg=msg_dict.get(old_msg_id,{})
if len(old_msg_id)<11:
itchat.send_file(rev_tmp_dir+face_bug,toUserName='filehelper')
os.remove(rev_tmp_dir+face_bug)
else:
msg_body="告诉你一个秘密~"+""+old_msg.get("msg_from")+" 撤回了 "+old_msg.get("msg_type")+" 消息 "+" "+old_msg.get("msg_time_rec")+" "+"撤回了什么 ⇣"+" "+r""+old_msg.get("msg_content")
#如果是分享存在的连接
if old_msg['msg_type']=='Sharing':
msg_body+= " 就是这个链接➣ " + old_msg.get('msg_share_url')
#将撤回消息发送到文件助手
itchat.send(msg_body,toUserName='filehelper')
#有文件的话也要讲文件发送回去
if old_msg['msg_type']=='Picture' or old_msg['msg_type']=='Recording' or old_msg['msg_type']=='Video' or old_msg['msg_type']=='Attachment':
file='@file@%s'%(rev_tmp_dir+old_msg['msg_content'])
itchat.send(msg=file,toUserName='filehelper')
os.remove(rev_tmp_dir+old_msg['msg_content'])
#删除字典就消息
msg_dict.pop(old_msg_id)
if __name__ == '__main__':
itchat.auto_login(hotReload=True,enableCmdQR=2)
itchat.run()
# itchat.send('hello',toUserName='filehelper')
|
[
"970832396@qq.com"
] |
970832396@qq.com
|
758943cd58a34d2d241f98b466204942032ae6d2
|
9f26975c02f7a10ce23c5f6217fc1f4a80c5134c
|
/crawling_2.py
|
9ec35f60cfda4ecb628605bbfbb835441996677f
|
[] |
no_license
|
ravi4all/PythonReg2_30_2020
|
d0ae837da4c19f1bff9a938e5a91c561c9288f36
|
ccb85325e0efce60c771697f8d07dc5e63cfaa5b
|
refs/heads/master
| 2020-12-18T04:16:27.399221
| 2020-03-20T15:05:30
| 2020-03-20T15:05:30
| 235,315,428
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 572
|
py
|
import bs4
import urllib.request as url
path = "https://www.flipkart.com/search?q=tv&otracker=search&otracker1=search&marketplace=FLIPKART&as-show=on&as=off&as-pos=0&as-type=HISTORY&as-backfill=on"
response = url.urlopen(path)
page = bs4.BeautifulSoup(response,'lxml')
div = page.findAll('div',class_='_3wU53n')
price = page.findAll('div',class_='_1vC4OE _2rQ-NK')
rating = page.findAll('div',class_='hGSR34')
for i in range(len(div)):
print(div[i].text)
print("Price :",price[i].text)
print("Rating :",rating[i].text)
print("="*20)
|
[
"noreply@github.com"
] |
ravi4all.noreply@github.com
|
5c1f9344d0bff5044ed0e8a71e5cabace9acb666
|
4166821e5d4cff87a3f178a0e3047ddd5d62bccf
|
/brigitte/accounts/migrations/0004_auto__add_field_sshpublickey_key_parsed.py
|
6b9d6bc6e6a005d42af354722feb1eae262ef973
|
[
"BSD-3-Clause"
] |
permissive
|
stephrdev/brigitte
|
0df208c797c4e26832fd30f0fdd6ec2db5212b4f
|
473d3c30af728292693f4e94b3c9b34d2d784b41
|
refs/heads/master
| 2021-06-04T05:16:59.683660
| 2013-07-08T08:17:05
| 2013-07-08T08:17:05
| 1,267,927
| 12
| 4
|
BSD-3-Clause
| 2021-03-29T16:44:29
| 2011-01-18T18:00:42
|
Python
|
UTF-8
|
Python
| false
| false
| 4,847
|
py
|
# -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding field 'SshPublicKey.key_parsed'
db.add_column('accounts_sshpublickey', 'key_parsed',
self.gf('django.db.models.fields.TextField')(default='', blank=True),
keep_default=False)
def backwards(self, orm):
# Deleting field 'SshPublicKey.key_parsed'
db.delete_column('accounts_sshpublickey', 'key_parsed')
models = {
'accounts.profile': {
'Meta': {'object_name': 'Profile'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'short_info': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'user': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['auth.User']", 'unique': 'True'})
},
'accounts.sshpublickey': {
'Meta': {'object_name': 'SshPublicKey'},
'can_read': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'can_write': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'description': ('django.db.models.fields.CharField', [], {'max_length': '250', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.TextField', [], {}),
'key_parsed': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"})
},
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
}
}
complete_apps = ['accounts']
|
[
"steph@rdev.info"
] |
steph@rdev.info
|
226f93b97333e8f4e387f28b0bc99298333d003a
|
91d1a6968b90d9d461e9a2ece12b465486e3ccc2
|
/accessanalyzer_write_2/resource_tag.py
|
ef443db399fe96ce9f2ed16a0e7b0822b37cb0c7
|
[] |
no_license
|
lxtxl/aws_cli
|
c31fc994c9a4296d6bac851e680d5adbf7e93481
|
aaf35df1b7509abf5601d3f09ff1fece482facda
|
refs/heads/master
| 2023-02-06T09:00:33.088379
| 2020-12-27T13:38:45
| 2020-12-27T13:38:45
| 318,686,394
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 931
|
py
|
#!/usr/bin/python
# -*- codding: utf-8 -*-
import os
import sys
sys.path.append(os.path.dirname(os.path.abspath(os.path.dirname(__file__))))
from common.execute_command import write_two_parameter
# url : https://awscli.amazonaws.com/v2/documentation/api/latest/reference/accessanalyzer/tag-resource.html
if __name__ == '__main__':
"""
untag-resource : https://awscli.amazonaws.com/v2/documentation/api/latest/reference/accessanalyzer/untag-resource.html
"""
parameter_display_string = """
# resource-arn : The ARN of the resource to add the tag to.
# tags : The tags to add to the resource.
key -> (string)
value -> (string)
"""
add_option_dict = {}
add_option_dict["parameter_display_string"] = parameter_display_string
# ex: add_option_dict["no_value_parameter_list"] = "--single-parameter"
write_two_parameter("accessanalyzer", "tag-resource", "resource-arn", "tags", add_option_dict)
|
[
"hcseo77@gmail.com"
] |
hcseo77@gmail.com
|
459255c5aacaa2dc24b3cef00468477a0911bbdb
|
80a7cd7958eae28af24c4cf2be167e425c7edaca
|
/utils/authors.py
|
8030b5d1d40950b5b36ed33eec87a4b001f56ed7
|
[
"MIT"
] |
permissive
|
prophile/srweb-jekyll
|
18918ea82bdf6a8850a20ef9be5e78a876d078b4
|
faa1727e343d01fe4fa7b75c39a106ca895c8f2b
|
refs/heads/master
| 2021-01-01T05:38:40.764190
| 2015-09-13T16:57:07
| 2015-09-13T16:57:07
| 27,548,340
| 0
| 2
|
MIT
| 2022-06-11T13:03:12
| 2014-12-04T16:09:02
|
CSS
|
UTF-8
|
Python
| false
| false
| 946
|
py
|
import subprocess
import re
import json
from collections import namedtuple
def is_valid(author):
if 'fail@studentrobotics.org' in author.email:
return False
if 'BRIAN' in author.name:
return False
return True
DATA_REGEX = re.compile('^\s*(\d+)\t(.*) <(.*)>$')
Author = namedtuple('Author', 'commits name email')
authors = subprocess.check_output(('git', 'shortlog', '-sne'),
universal_newlines=True).splitlines()
authors = [DATA_REGEX.match(line).groups() for line in authors]
authors = [Author(int(commits), name, email)
for commits, name, email in authors]
authors = [author for author in authors if is_valid(author)]
with open('AUTHORS', 'w') as f:
for author in authors:
f.write("{} <{}>\n".format(author.name, author.email))
with open('_data/authors.yml', 'w') as f:
json.dump([dict(author._asdict()) for author in authors],
f)
|
[
"arplynn@gmail.com"
] |
arplynn@gmail.com
|
d0bc43a4647f0dfc4d749e43af45a248efb2f22d
|
f6d1ed1a5369a5810429756fbdc07a8d293a4b3d
|
/conans/client/run_environment.py
|
5f570cb8a0e111565aa4f5d270ca72669944acfd
|
[
"MIT"
] |
permissive
|
lasote/conan
|
fc4e0b561e736b447b57999026ffe5291b7cab50
|
8f6978290d095778eff6a9a84ea3f06f723fcfea
|
refs/heads/develop
| 2023-05-26T03:10:52.211304
| 2017-11-23T12:44:56
| 2017-11-23T12:44:56
| 47,204,017
| 3
| 3
|
MIT
| 2019-01-22T09:36:12
| 2015-12-01T16:56:42
|
Python
|
UTF-8
|
Python
| false
| false
| 782
|
py
|
class RunEnvironment(object):
"""
- PATH: pointing to the bin/ directories of the requires
- LD_LIBRARY_PATH: requires lib_paths for Linux
- DYLD_LIBRARY_PATH: requires lib_paths for OSx
"""
def __init__(self, conanfile):
"""
:param conanfile: ConanFile instance
"""
self.conanfile = conanfile
@property
def vars(self):
lib_paths = []
bin_paths = []
for dep in self.conanfile.deps_cpp_info.deps:
lib_paths.extend(self.conanfile.deps_cpp_info[dep].lib_paths)
bin_paths.extend(self.conanfile.deps_cpp_info[dep].bin_paths)
ret = {"DYLD_LIBRARY_PATH": lib_paths,
"LD_LIBRARY_PATH": lib_paths,
"PATH": bin_paths}
return ret
|
[
"james@conan.io"
] |
james@conan.io
|
86d2dd1ccdf978c2fd6ced71b93ebce47e3063f6
|
0ad7f553df6b210b5ac004fbf490ed651a21d55e
|
/algos/discrete_esay_control_lib_03.py
|
8f73937bb21a1a30f95dcd2cf3acc4546e94c645
|
[] |
no_license
|
MarianoDel/spyder_python
|
fa00987eb9aa1ef61d7224679a84c05a217c6c35
|
5f5896df68f95eb860bc08c21ae2b19516432cdc
|
refs/heads/master
| 2020-05-23T06:14:57.329478
| 2020-04-23T14:58:16
| 2020-04-23T14:58:16
| 84,753,428
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,993
|
py
|
# -*- coding: utf-8 -*-
#usando libreria de control
#http://python-control.readthedocs.org/en/latest/index.html
import numpy as np
import control as ct
import matplotlib.pyplot as plt
#from scipy import signal
#los coeficientes los saco del sistema digital creado en Tfilter_sympy_02.py
#numd1
#array([ 0. , 1.48941694, 0.9759379 , -0.00450648])
#dend
#array([ 1.00000000e+00, 9.21237959e-01, 5.39610404e-01, 1.33103857e-18])
b = [ 0. , 1.48941694, 0.9759379 , -0.00450648]
a = [ 1.00000000e+00, 9.21237959e-01, 5.39610404e-01, 1.33103857e-18]
dt = 1.0/25000
plt.figure(1)
dsys1 = ct.tf(b, a, dt)
omega = np.arange(100, 3.1415 / dt, 1)
mag, phase, omega = ct.bode_plot(dsys1, omega)
plt.show()
plt.draw()
G1 = dsys1
G2 = 350./3.3
Gt = ct.series(G1, G2)
plt.figure(2)
mag, phase, omega = ct.bode_plot(Gt, omega)
plt.show()
plt.draw()
#LAZO PID
#desde el algoritmo hacia atras
#uk = uk-1 + k1 ek + k2 ek-1 + k3 ek-2
#Uz/Ez = (b0 + b1 z-1 + b2 z-2) / (1 - z-1)
#b0 = kp + kd + ki
#b1 = -kp - 2kd
#b2 = kd
#a0 = 1
#a1 = -1
fs = 25000
kp = 0.0015/fs
#kd = 0.80
kd = 0.0
ki = 58.0/fs
#ki = 0.0
bpid = [kp + kd + ki, -kp - 2*kd, kd] #del spice
apid = [1, -1]
print ("bpid vale")
print (bpid)
print ("apid vale")
print (apid)
plt.figure(3)
Gpid = ct.tf(bpid, apid, dt)
mag, phase, omega = ct.bode_plot(Gpid, omega)
plt.show()
plt.draw()
#open loop
GH = ct.series(Gpid, Gt)
plt.figure(4)
mag, phase, omega = ct.bode_plot(GH, omega)
plt.show()
plt.draw()
#feedback
Gfeed = ct.feedback(GH, sys2=1, sign=-1)
plt.figure(5)
mag, phase, omega = ct.bode_plot(Gfeed, omega)
plt.show()
plt.draw()
plt.figure(6)
tin = np.arange(0.0, 0.005, 0.0001)
Tout, yout2 = ct.step_response(Gfeed, T=tin, X0=0.0, input=None, output=None, transpose=False,)
yout1 = np.transpose(yout2)
yout0 = yout1[0]
#yout = yout0[:50]
yout = yout0[:Tout.size]
plt.plot(Tout, yout)
plt.show()
plt.draw()
|
[
"marianodeleu@yahoo.com.ar"
] |
marianodeleu@yahoo.com.ar
|
63470244c2c7805bd495020c5dd5e40fef97303e
|
7606f5755a83ad6670b64d2acf381a54cf635697
|
/exercises/1901090010/1001S02E05_array.py
|
8f0f333861e9e35aa5184ad7678148535e3985a2
|
[] |
no_license
|
imlzg/selfteaching-python-camp
|
6c107e53740dbc6721008ea36f66c1b3d0c9ee72
|
b620553ef4fb86d8528c54947d98c2f1c2df996e
|
refs/heads/master
| 2022-01-09T04:09:14.849146
| 2019-05-20T15:17:07
| 2019-05-20T15:29:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 330
|
py
|
array=[0, 1, 2, 3, 4, 5, 6, 7, 8, 9]
print(array)
array.reverse()
ls1=[str(i) for i in array]
print(ls1)
ls2=''.join(ls1)
print(ls2)
ls3=ls2[2:8]
print(ls3)
ls4=ls3[::-1]
print(ls4)
ls5=int(ls4)
print("转换为二进制为:", bin(ls5))
print("转换为八进制为:", oct(ls5))
print("转换为十六进制为:", hex(ls5))
|
[
"46160162+EthanYan6@users.noreply.github.com"
] |
46160162+EthanYan6@users.noreply.github.com
|
5b37e01087be00d24c666fb7c11df28e7282eda9
|
ce76b3ef70b885d7c354b6ddb8447d111548e0f1
|
/seem_case/work_great_world/come_early_company/great_world/know_little_place_above_public_company.py
|
95daadfe99dac6c7ffb401779f2a1dd58b021e23
|
[] |
no_license
|
JingkaiTang/github-play
|
9bdca4115eee94a7b5e4ae9d3d6052514729ff21
|
51b550425a91a97480714fe9bc63cb5112f6f729
|
refs/heads/master
| 2021-01-20T20:18:21.249162
| 2016-08-19T07:20:12
| 2016-08-19T07:20:12
| 60,834,519
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 235
|
py
|
#! /usr/bin/env python
def give_company_at_able_number(str_arg):
last_life(str_arg)
print('try_day')
def last_life(str_arg):
print(str_arg)
if __name__ == '__main__':
give_company_at_able_number('old_man_or_child')
|
[
"jingkaitang@gmail.com"
] |
jingkaitang@gmail.com
|
73a44163086c3f3bc57dae1906bd48ffdc92761c
|
f0ee987789f5a6fe8f104890e95ee56e53f5b9b2
|
/pythia-0.8/packages/pyre/pyre/inventory/odb/Registry.py
|
c72724141667dc8e6e35621c08aee1ae78d01a96
|
[] |
no_license
|
echoi/Coupling_SNAC_CHILD
|
457c01adc439e6beb257ac8a33915d5db9a5591b
|
b888c668084a3172ffccdcc5c4b8e7fff7c503f2
|
refs/heads/master
| 2021-01-01T18:34:00.403660
| 2015-10-26T13:48:18
| 2015-10-26T13:48:18
| 19,891,618
| 1
| 2
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,864
|
py
|
#!/usr/bin/env python
#
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
#
# Michael A.G. Aivazis
# California Institute of Technology
# (C) 1998-2005 All Rights Reserved
#
# <LicenseText>
#
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
#
class Registry(object):
def identify(self, inspector):
return inspector.onRegistry(self)
def getFacility(self, name, default=None):
return self.facilities.get(name, default)
def getProperty(self, name, default=''):
try:
return self.properties[name].value
except KeyError:
return default
# UNREACHABLE
import journal
journal.firewall("inventory").log("UNREACHABLE")
return
def setProperty(self, name, value, locator):
self.properties[name] = self._createDescriptor(value, locator)
return
def deleteProperty(self, name):
"""remove the named property"""
try:
del self.properties[name]
except KeyError:
pass
return
def update(self, registry):
if not registry:
return self
for name, descriptor in registry.properties.iteritems():
self.setProperty(name, descriptor.value, descriptor.locator)
for name, node in registry.facilities.iteritems():
self.getNode(name).update(node)
return self
def getNode(self, name):
try:
node = self.facilities[name]
except KeyError:
node = Registry(name)
self.facilities[name] = node
return node
def attachNode(self, node):
self.facilities[node.name] = node
return
def extractNode(self, facility):
try:
node = self.facilities[facility]
except KeyError:
return None
del self.facilities[facility]
return node
def render(self):
listing = [
("%s.%s" % (self.name, name), descriptor.value, "%s" % descriptor.locator)
for name, descriptor in self.properties.iteritems()
]
listing += [
("%s.%s" % (self.name, name), value, "%s" % locator)
for facility in self.facilities.itervalues()
for name, value, locator in facility.render()
]
return listing
def __init__(self, name):
self.name = name
self.properties = {}
self.facilities = {}
return
def _createDescriptor(self, value, locator):
from Descriptor import Descriptor
return Descriptor(value, locator)
# version
__id__ = "$Id: Registry.py,v 1.1.1.1 2005/03/08 16:13:43 aivazis Exp $"
# End of file
|
[
"echoi2@memphis.edu"
] |
echoi2@memphis.edu
|
cfd0938addfe5e354eb030032a683f83195c8112
|
312a8fde11293cb142334a3860966ec1f75ac401
|
/timesketch/views/spa.py
|
c7401a6e3a270c6062b441b881b34d8e5e754333
|
[
"Apache-2.0"
] |
permissive
|
google/timesketch
|
f0fd09062a8a24bac581d2d4286d095d667d2f10
|
24f471b58ca4a87cb053961b5f05c07a544ca7b8
|
refs/heads/master
| 2023-08-31T21:48:19.602686
| 2023-08-31T11:24:17
| 2023-08-31T11:24:17
| 21,009,909
| 2,263
| 647
|
Apache-2.0
| 2023-09-14T14:08:07
| 2014-06-19T17:49:45
|
Python
|
UTF-8
|
Python
| false
| false
| 1,540
|
py
|
# Copyright 2019 Google Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""This module implements HTTP request handler."""
from __future__ import unicode_literals
from flask import Blueprint
from flask import redirect
from flask import render_template
from flask_login import login_required
# Register flask blueprint
spa_views = Blueprint("spa_views", __name__)
@spa_views.route("/sketch/<int:sketch_id>/explore/view/<int:view_id>/", methods=["GET"])
@login_required
# pylint: disable=unused-argument
def redirect_view(sketch_id, view_id):
"""Redirect old (deprecated) view URLs to scheme.
Returns:
Redirect to new URL scheme.
"""
return redirect("/sketch/{0:d}/explore?view={1:d}".format(sketch_id, view_id))
@spa_views.route("/", defaults={"path": ""})
@spa_views.route("/<path:path>")
@login_required
# pylint: disable=unused-argument
def overview(path):
"""Generates the template.
Returns:
Template with context.
"""
return render_template("index.html")
|
[
"noreply@github.com"
] |
google.noreply@github.com
|
5a6822dc37ab0181188d67e82b773b3e8e59d089
|
09e57dd1374713f06b70d7b37a580130d9bbab0d
|
/benchmark/startQiskit2487.py
|
641b39dc24dfafe2c36913591797358aa26dea60
|
[
"BSD-3-Clause"
] |
permissive
|
UCLA-SEAL/QDiff
|
ad53650034897abb5941e74539e3aee8edb600ab
|
d968cbc47fe926b7f88b4adf10490f1edd6f8819
|
refs/heads/main
| 2023-08-05T04:52:24.961998
| 2021-09-19T02:56:16
| 2021-09-19T02:56:16
| 405,159,939
| 2
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,147
|
py
|
# qubit number=4
# total number=43
import cirq
import qiskit
from qiskit import QuantumCircuit, QuantumRegister, ClassicalRegister
from qiskit import BasicAer, execute, transpile
from pprint import pprint
from qiskit.test.mock import FakeVigo
from math import log2
import numpy as np
import networkx as nx
def bitwise_xor(s: str, t: str) -> str:
length = len(s)
res = []
for i in range(length):
res.append(str(int(s[i]) ^ int(t[i])))
return ''.join(res[::-1])
def bitwise_dot(s: str, t: str) -> str:
length = len(s)
res = 0
for i in range(length):
res += int(s[i]) * int(t[i])
return str(res % 2)
def build_oracle(n: int, f) -> QuantumCircuit:
# implement the oracle O_f
# NOTE: use multi_control_toffoli_gate ('noancilla' mode)
# https://qiskit.org/documentation/_modules/qiskit/aqua/circuits/gates/multi_control_toffoli_gate.html
# https://quantumcomputing.stackexchange.com/questions/3943/how-do-you-implement-the-toffoli-gate-using-only-single-qubit-and-cnot-gates
# https://quantumcomputing.stackexchange.com/questions/2177/how-can-i-implement-an-n-bit-toffoli-gate
controls = QuantumRegister(n, "ofc")
target = QuantumRegister(1, "oft")
oracle = QuantumCircuit(controls, target, name="Of")
for i in range(2 ** n):
rep = np.binary_repr(i, n)
if f(rep) == "1":
for j in range(n):
if rep[j] == "0":
oracle.x(controls[j])
oracle.mct(controls, target[0], None, mode='noancilla')
for j in range(n):
if rep[j] == "0":
oracle.x(controls[j])
# oracle.barrier()
return oracle
def make_circuit(n:int,f) -> QuantumCircuit:
# circuit begin
input_qubit = QuantumRegister(n,"qc")
classical = ClassicalRegister(n, "qm")
prog = QuantumCircuit(input_qubit, classical)
prog.h(input_qubit[3]) # number=24
prog.cz(input_qubit[0],input_qubit[3]) # number=25
prog.h(input_qubit[3]) # number=26
prog.h(input_qubit[3]) # number=21
prog.cz(input_qubit[0],input_qubit[3]) # number=22
prog.h(input_qubit[3]) # number=23
prog.h(input_qubit[3]) # number=27
prog.cz(input_qubit[0],input_qubit[3]) # number=28
prog.h(input_qubit[3]) # number=29
prog.h(input_qubit[3]) # number=37
prog.cz(input_qubit[0],input_qubit[3]) # number=38
prog.h(input_qubit[3]) # number=39
prog.x(input_qubit[3]) # number=31
prog.h(input_qubit[3]) # number=33
prog.cz(input_qubit[0],input_qubit[3]) # number=34
prog.h(input_qubit[3]) # number=35
prog.cx(input_qubit[0],input_qubit[3]) # number=18
prog.rx(-0.364424747816416,input_qubit[3]) # number=36
prog.y(input_qubit[3]) # number=20
prog.h(input_qubit[3]) # number=40
prog.cz(input_qubit[0],input_qubit[3]) # number=41
prog.h(input_qubit[3]) # number=42
prog.cx(input_qubit[0],input_qubit[3]) # number=12
prog.h(input_qubit[1]) # number=2
prog.h(input_qubit[2]) # number=3
prog.h(input_qubit[3]) # number=4
prog.h(input_qubit[0]) # number=5
oracle = build_oracle(n-1, f)
prog.append(oracle.to_gate(),[input_qubit[i] for i in range(n-1)]+[input_qubit[n-1]])
prog.h(input_qubit[1]) # number=6
prog.h(input_qubit[2]) # number=7
prog.h(input_qubit[3]) # number=19
prog.h(input_qubit[3]) # number=8
prog.h(input_qubit[0]) # number=9
# circuit end
for i in range(n):
prog.measure(input_qubit[i], classical[i])
return prog
if __name__ == '__main__':
a = "111"
b = "0"
f = lambda rep: bitwise_xor(bitwise_dot(a, rep), b)
prog = make_circuit(4,f)
backend = BasicAer.get_backend('qasm_simulator')
sample_shot =8000
info = execute(prog, backend=backend, shots=sample_shot).result().get_counts()
backend = FakeVigo()
circuit1 = transpile(prog,backend,optimization_level=2)
writefile = open("../data/startQiskit2487.csv","w")
print(info,file=writefile)
print("results end", file=writefile)
print(circuit1.__len__(),file=writefile)
print(circuit1,file=writefile)
writefile.close()
|
[
"wangjiyuan123@yeah.net"
] |
wangjiyuan123@yeah.net
|
3b636769f9391a37b518ae880c23423c3395cc7a
|
09a6d8dbad5b92f93791948b5bf9b75f5cb2e5ce
|
/pennylane/data/attributes/operator/_wires.py
|
f953c9b8f78d3b7681b90320cb92d2c2f394b24d
|
[
"Apache-2.0"
] |
permissive
|
PennyLaneAI/pennylane
|
458efd5d9457e90ada31ca2ef0fb6bb96a24e9a7
|
0843183ff15a013c2622af5e61fea431d18076d3
|
refs/heads/master
| 2023-09-03T17:00:43.105784
| 2023-09-01T16:15:07
| 2023-09-01T16:15:07
| 129,936,360
| 1,431
| 410
|
Apache-2.0
| 2023-09-14T21:30:56
| 2018-04-17T16:45:42
|
Python
|
UTF-8
|
Python
| false
| false
| 1,798
|
py
|
# Copyright 2018-2023 Xanadu Quantum Technologies Inc.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Contains utility function for converting ``Wires`` objects to JSON."""
import json
import numbers
from typing import Any
from pennylane.wires import Wires
class UnserializableWireError(TypeError):
"""Raised if a wire label is not JSON-serializable."""
def __init__(self, wire: Any) -> None:
super().__init__(
f"Cannot serialize wire label '{wire}': Type '{type(wire)}' is not json-serializable."
)
_JSON_TYPES = {int, str, float, type(None), bool}
def wires_to_json(wires: Wires) -> str:
"""Converts ``wires`` to a JSON list, with wire labels in
order of their index.
Returns:
JSON list of wires
Raises:
UnserializableWireError: if any of the wires are not JSON-serializable.
"""
jsonable_wires = []
for w in wires:
if type(w) in _JSON_TYPES:
jsonable_wires.append(w)
elif isinstance(w, numbers.Integral):
w_converted = int(w)
if hash(w_converted) != hash(w):
raise UnserializableWireError(w)
jsonable_wires.append(w_converted)
else:
raise UnserializableWireError(w)
return json.dumps(jsonable_wires)
|
[
"noreply@github.com"
] |
PennyLaneAI.noreply@github.com
|
2ece2111f21d161cc9f0a108b71ef2c8149855fc
|
99dcb18a9e3ea367272f740b8cbf3c34285a0c08
|
/samples/snippets/endpoint_service/deploy_model_sample_test.py
|
b12b234b8bddc0e5080ee113729a18515957f0d0
|
[
"Apache-2.0"
] |
permissive
|
googleapis/python-aiplatform
|
926a4873f35dbea15b2fd86c0e16b5e6556d803e
|
76b95b92c1d3b87c72d754d8c02b1bca652b9a27
|
refs/heads/main
| 2023-08-19T23:49:02.180075
| 2023-08-19T13:25:59
| 2023-08-19T13:27:27
| 298,017,988
| 418
| 240
|
Apache-2.0
| 2023-09-14T21:08:33
| 2020-09-23T15:43:39
|
Python
|
UTF-8
|
Python
| false
| false
| 1,745
|
py
|
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
from uuid import uuid4
import deploy_model_sample
import pytest
import helpers
PROJECT_ID = os.getenv("BUILD_SPECIFIC_GCLOUD_PROJECT")
LOCATION = "us-central1"
# Resource Name of "permanent_50_flowers_new_model"
MODEL_NAME = "projects/580378083368/locations/us-central1/models/4190810559500779520"
@pytest.fixture(scope="function", autouse=True)
def setup(create_endpoint):
create_endpoint(PROJECT_ID, LOCATION)
yield
@pytest.fixture(scope="function", autouse=True)
def teardown(teardown_endpoint):
yield
def test_ucaip_generated_deploy_model_sample(capsys, shared_state):
assert shared_state["endpoint_name"] is not None
# Deploy existing image classification model to endpoint
deploy_model_sample.deploy_model_sample(
project=PROJECT_ID,
model_name=MODEL_NAME,
deployed_model_display_name=f"temp_deploy_model_test_{uuid4()}",
endpoint_id=shared_state["endpoint_name"].split("/")[-1],
)
# Store deployed model ID for undeploying
out, _ = capsys.readouterr()
assert "deploy_model_response" in out
shared_state["deployed_model_id"] = helpers.get_name(out=out, key="id")
|
[
"noreply@github.com"
] |
googleapis.noreply@github.com
|
23daf8e24dd73325c2f0ec16a12b272eeab45c76
|
320bd873b6cf5db2fc9194cc4ad782a49373d6ee
|
/temp/1/11/conftest.py
|
576834bb0e0714ce25adf6d1af3e28750d5c2993
|
[] |
no_license
|
donniezhanggit/AppiumDemo8_Android
|
7b0aed903969e2101330b5da4e89c39e3d591723
|
7a2ed3be27ed6cb27bd4e30e13d48cc8f34aa654
|
refs/heads/master
| 2020-09-13T17:35:33.749237
| 2019-03-10T10:04:46
| 2019-03-10T10:04:46
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 188
|
py
|
import pytest
import logging
logging.basicConfig(level=logging.DEBUG)
@pytest.fixture(scope="class")
def username11():
print("username 11 module 11")
return "module username 11"
|
[
"seveniruby@gmail.com"
] |
seveniruby@gmail.com
|
4c235f4e1cb7df6459e5a27dc2db0e9523efbd8d
|
9e988c0dfbea15cd23a3de860cb0c88c3dcdbd97
|
/sdBs/AllRun/galex_j18372-3125/sdB_GALEX_J18372-3125_coadd.py
|
034c74a4d5e3f32e646d3ef889df15b3efc39fd6
|
[] |
no_license
|
tboudreaux/SummerSTScICode
|
73b2e5839b10c0bf733808f4316d34be91c5a3bd
|
4dd1ffbb09e0a599257d21872f9d62b5420028b0
|
refs/heads/master
| 2021-01-20T18:07:44.723496
| 2016-08-08T16:49:53
| 2016-08-08T16:49:53
| 65,221,159
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 455
|
py
|
from gPhoton.gMap import gMap
def main():
gMap(band="NUV", skypos=[279.319917,-31.420586], skyrange=[0.0333333333333,0.0333333333333], stepsz = 30., cntfile="/data2/fleming/GPHOTON_OUTPUT/LIGHTCURVES/sdBs/sdB_GALEX_J18372-3125/sdB_GALEX_J18372-3125_movie_count.fits", cntcoaddfile="/data2/fleming/GPHOTON_OUTPUT/LIGHTCURVES/sdB/sdB_GALEX_J18372-3125/sdB_GALEX_J18372-3125_count_coadd.fits", overwrite=True, verbose=3)
if __name__ == "__main__":
main()
|
[
"thomas@boudreauxmail.com"
] |
thomas@boudreauxmail.com
|
7a8a0495f011281a56470d70c04e8e2dbe1b09db
|
f92385943346eccca8cc4d7caca66d2d5455caa2
|
/2020.8/老虎-1.py
|
4944f91312e7c445e82228e8f02965aa40b7fbe9
|
[] |
no_license
|
IamWilliamWang/Leetcode-practice
|
83861c5f8672a716141dc6ec9f61f21dc5041535
|
c13c0380a3ae9fef201ae53d7004b9f4224f1620
|
refs/heads/master
| 2023-04-01T12:15:19.335312
| 2020-10-15T14:49:36
| 2020-10-15T14:49:36
| 281,846,435
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 968
|
py
|
#
#
# @param HP long长整型 HP
# @param ACK long长整型 ACK
# @param HP2 long长整型 HP2
# @param ACK2 long长整型 ACK2
# @return long长整型
#
import math
class Solution:
def Pokemonfight(self, HP, ACK, HP2, ACK2):
hp, ack, hpEnemy, ackEnemy = HP2, ACK2, HP, ACK
if ackEnemy >= hp: # 一回合被拍死
return -1
eatMadicine = False
for round in range(1, 100000):
hp -= ackEnemy # 敌人攻击
if hp <= 0:
return -1
if hp <= ackEnemy and hpEnemy > ack: # 该考虑吃药了
if eatMadicine: # 发现不停的要吃药,没有机会攻击
return -1
hp = HP2
eatMadicine = True
else:
hpEnemy -= ack
eatMadicine = False
if hpEnemy <= 0:
return round
return -1
print(Solution().Pokemonfight(8, 3, 8, 1))
|
[
"iamjerichoholic@hotmail.com"
] |
iamjerichoholic@hotmail.com
|
35d505a038c7b205e90b3d1f3f3ee044a8950306
|
d66818f4b951943553826a5f64413e90120e1fae
|
/hackerearth/Algorithms/Types of burgers/solution.py
|
94cc0b583a01253b636b13ccbe80d38732c353cd
|
[
"MIT"
] |
permissive
|
HBinhCT/Q-project
|
0f80cd15c9945c43e2e17072416ddb6e4745e7fa
|
19923cbaa3c83c670527899ece5c3ad31bcebe65
|
refs/heads/master
| 2023-08-30T08:59:16.006567
| 2023-08-29T15:30:21
| 2023-08-29T15:30:21
| 247,630,603
| 8
| 1
|
MIT
| 2020-07-22T01:20:23
| 2020-03-16T06:48:02
|
Python
|
UTF-8
|
Python
| false
| false
| 873
|
py
|
from heapq import heappop, heappush
t = int(input())
moves = ((1, 0, 0), (0, 1, 0), (0, 0, 1))
for _ in range(t):
x, y, z = map(int, input().strip().split())
k = int(input())
b1 = sorted(map(int, input().strip().split()), reverse=True)
b2 = sorted(map(int, input().strip().split()), reverse=True)
b3 = sorted(map(int, input().strip().split()), reverse=True)
heap = [(-(b1[0] + b2[0] + b3[0]), 0, 0, 0)]
visited = {(0, 0, 0)}
ans = 0
while k:
total, bx, by, bz = heappop(heap)
ans -= total
for mx, my, mz in moves:
nx = bx + mx
ny = by + my
nz = bz + mz
if nx < x and ny < y and nz < z and (nx, ny, nz) not in visited:
heappush(heap, (-(b1[nx] + b2[ny] + b3[nz]), nx, ny, nz))
visited.add((nx, ny, nz))
k -= 1
print(ans)
|
[
"hbinhct@gmail.com"
] |
hbinhct@gmail.com
|
4a70fedc08979200d7ed54db5e4a6469a9d88d01
|
53d203e73331d2ee0f1d644946bf6650f5716edd
|
/quantifiedcode/backend/tasks/email.py
|
95859928af4da04b416a6a77b273f397ad3bc141
|
[
"BSD-3-Clause"
] |
permissive
|
martynbristow/quantifiedcode
|
3f6a6c935aeb8d78e90086cefaee6cd3b6f4558d
|
a7485ceaeffc5cf5894654dde35ed9a501657a07
|
refs/heads/master
| 2022-01-25T20:42:24.124289
| 2021-07-07T10:44:54
| 2021-07-07T10:44:54
| 239,041,449
| 0
| 0
|
BSD-3-Clause
| 2022-01-06T22:47:22
| 2020-02-07T23:37:47
|
JavaScript
|
UTF-8
|
Python
| false
| false
| 3,998
|
py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Contains tasks and helper functions to send notifications.
"""
from __future__ import unicode_literals
from __future__ import print_function
from __future__ import absolute_import
import logging
import datetime
import traceback
import re
from six import string_types
from quantifiedcode.settings import settings, backend
from quantifiedcode.backend.settings.jinja import jinja_env
from quantifiedcode.backend.worker import celery
from quantifiedcode.backend.models import User
logger = logging.getLogger(__name__)
def send_mail(*args, **kwargs):
if settings.get('debug'):
send_mail_async(*args, **kwargs)
else:
send_mail_async.delay(*args, **kwargs)
@celery.task(time_limit=120, queue="email", ignore_result=False)
def send_mail_async(email_to,
template,
template_context=None,
email_from=None,
name_from=None,
email_reply_to=None,
attachments=None):
""" Sends an email based on the specified template.
:param email_to: address or a list of email addresses
:param template: name of the template to use for the email
:param template_context: dict with template context, ie `template_context = {"diffs": aggregated_diffs}`
:param email_from: sender of the email
:param name_from: name of the sender
:param email_reply_to: email address to set as the reply-to address
:param attachments: list of attachments
:return:
"""
if isinstance(email_to, string_types):
email_to = [email_to]
if email_to is None or not isinstance(email_to, (list, tuple)):
raise ValueError("email_to is None or incompatible type!")
if template_context is None:
template_context = {}
email_from = email_from if email_from is not None else settings.get('email.from_email')
name_from = name_from if name_from is not None else settings.get('email.from_name')
email_reply_to = email_reply_to if email_reply_to is not None else email_from
if attachments is None:
attachments = []
# render mail content
template_context.update(settings.get('render_context', {}))
template_path = "email/{0}.multipart".format(template)
template = jinja_env.get_template(template_path)
#we generate the module, which allows us the extract individual blocks from it
#we capture those blocks of interest using the {% set ... %} syntax
module = template.make_module(template_context)
logger.info("Sending an email to: {}\ntemplate: {}\ntemplate_context: {}\nsubject: {}"
.format("".join(email_to), template, template_context, module.subject))
message = {
'from_email': email_from,
'from_name': name_from,
'reply_to' : email_reply_to,
'subject': module.subject,
'html': module.html,
'text': module.text if module.text else None,
'to': email_to,
'attachments': attachments,
}
if not settings.providers['email.send']:
logger.warning("No e-mail providers defined, aborting...")
return
for params in settings.providers['email.send']:
params['provider'](message)
break
def send_mail_to_user(user,
template,
template_context=None,
delay=False,
**kwargs):
""" Sends an email message if the user has a verified email and enabled email notifications
:param user: user to send the email message to
:param template: template for the message to send
:param template_context: `template_context = {"diffs": aggregated_diffs}`
:param delay: if True the send_mail function will be run asynchronously
:return: None
"""
function = send_mail.delay if delay is True else send_mail
if user.email and user.email_validated:
return function(user.email, template, template_context=template_context, **kwargs)
|
[
"andreas.dewes@gmail.com"
] |
andreas.dewes@gmail.com
|
de912f9609f0a22e4153cd9d3c53bdd16fe3d6c1
|
53fab060fa262e5d5026e0807d93c75fb81e67b9
|
/backup/user_040/ch25_2020_03_09_19_44_46_012241.py
|
65c6d90914c5b631ee3669e31ddbbd97f3da12dc
|
[] |
no_license
|
gabriellaec/desoft-analise-exercicios
|
b77c6999424c5ce7e44086a12589a0ad43d6adca
|
01940ab0897aa6005764fc220b900e4d6161d36b
|
refs/heads/main
| 2023-01-31T17:19:42.050628
| 2020-12-16T05:21:31
| 2020-12-16T05:21:31
| 306,735,108
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 309
|
py
|
import math
v=float(input("Velocidade de lançamento: "))
angulo=float(input("Ângulo de lançamento: "))
def d(v,angulo):
d = (v**2*math.sin(math.radians(2*angulo))/9.8)
if (d>=102):
return ("Acertou!")
elif (d<=98):
return ("Muito perto")
else:
return ("Muito longe")
|
[
"you@example.com"
] |
you@example.com
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.