blob_id stringlengths 40 40 | directory_id stringlengths 40 40 | path stringlengths 3 288 | content_id stringlengths 40 40 | detected_licenses listlengths 0 112 | license_type stringclasses 2 values | repo_name stringlengths 5 115 | snapshot_id stringlengths 40 40 | revision_id stringlengths 40 40 | branch_name stringclasses 684 values | visit_date timestamp[us]date 2015-08-06 10:31:46 2023-09-06 10:44:38 | revision_date timestamp[us]date 1970-01-01 02:38:32 2037-05-03 13:00:00 | committer_date timestamp[us]date 1970-01-01 02:38:32 2023-09-06 01:08:06 | github_id int64 4.92k 681M ⌀ | star_events_count int64 0 209k | fork_events_count int64 0 110k | gha_license_id stringclasses 22 values | gha_event_created_at timestamp[us]date 2012-06-04 01:52:49 2023-09-14 21:59:50 ⌀ | gha_created_at timestamp[us]date 2008-05-22 07:58:19 2023-08-21 12:35:19 ⌀ | gha_language stringclasses 147 values | src_encoding stringclasses 25 values | language stringclasses 1 value | is_vendor bool 2 classes | is_generated bool 2 classes | length_bytes int64 128 12.7k | extension stringclasses 142 values | content stringlengths 128 8.19k | authors listlengths 1 1 | author_id stringlengths 1 132 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
0a22320d5ad8c6a27fe4569472cbc5867d672629 | 07b249d8b26fc49f1268798b3bd6bdcfd0b86447 | /0x07-python-test_driven_development/testmod_.py | 49bd0c640293d84da2e7e2965fdca8e0dc1c19a2 | [] | no_license | leocjj/holbertonschool-higher_level_programming | 544d6c40632fbcf721b1f39d2453ba3d033007d6 | 50cf2308d2c9eeca8b25c01728815d91e0a9b784 | refs/heads/master | 2020-09-28T23:21:13.378060 | 2020-08-30T23:45:11 | 2020-08-30T23:45:11 | 226,889,413 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 1,398 | py | #!/usr/bin/python3
"""
This is the "example" module.
The example module supplies one function, factorial(). For example,
>>> factorial(6)
720
"""
def factorial(n):
"""Return the factorial of n, an exact integer >= 0.
>>> [factorial(n) for n in range(6)]
[1, 1, 2, 6, 24, 120]
>>> factorial(30)
265252859812191058636308480000000
>>> factorial(-1)
Traceback (most recent call last):
...
ValueError: n must be >= 0
Factorials of floats are OK, but the float must be an exact integer:
>>> factorial(30.1)
Traceback (most recent call last):
...
ValueError: n must be exact integer
>>> factorial(30.0)
265252859812191058636308480000000
It must also not be ridiculously large:
>>> factorial(1e100)
Traceback (most recent call last):
...
OverflowError: n too large
"""
import math
if not n >= 0:
raise ValueError("n must be >= 0")
if math.floor(n) != n:
raise ValueError("n must be exact integer")
if n+1 == n: # catch a value like 1e300
raise OverflowError("n too large")
result = 1
factor = 2
while factor <= n:
result *= factor
factor += 1
return result
if __name__ == "__main__":
#print(factorial(3))
import doctest
doctest.testmod()
#doctest.testmod(verbose=True) #Parameter to force vervose
| [
"leocj@hotmail.com"
] | leocj@hotmail.com |
5b8c78a5752cf5cdaa9f3f64037d7faeab7cad3f | 659d41f0c737dffc2a6ebd5e773a6513da32e5ba | /scripts_OLD/PulseSequences/tests/turn_on_auto.py | e2444cebfe26f3413c0cf7aa2ce2c234bd956621 | [] | no_license | HaeffnerLab/sqip | b3d4d570becb1022083ea01fea9472115a183ace | 5d18f167bd9a5344dcae3c13cc5a84213fb7c199 | refs/heads/master | 2020-05-21T23:11:10.448549 | 2019-11-21T02:00:58 | 2019-11-21T02:00:58 | 19,164,232 | 0 | 0 | null | 2019-11-04T04:39:37 | 2014-04-25T23:54:47 | Python | UTF-8 | Python | false | false | 226 | py |
def main():
import labrad
cxn = labrad.connect()
cxn.dac_server.reset_queue()
cxn.pulser.switch_auto('adv',True)
cxn.pulser.switch_auto('rst',True)
if __name__ == '__main__':
main() | [
"haeffnerlab@gmail.com"
] | haeffnerlab@gmail.com |
cefeca51da9d665c2fa87a15a7a909aa2b76ceb4 | 71596c8aec5ea7eb44b0f86736bc5acdccd55ac1 | /Graphs/dfs_adv.py | 95d92150b5ca2fd58e2045eea1bb44e2b2db9ca6 | [] | no_license | karthikeyansa/Data_Structures_python | dbab61f67d1bc33995dd7ff86989aa56b6f11a5c | b64618a4cff2b1d29ce8c129cb1f8ec35dcddf6f | refs/heads/master | 2023-01-16T05:21:10.308318 | 2020-11-22T06:28:08 | 2020-11-22T06:28:08 | 264,231,273 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 360 | py | #undirected_cyclic_graph
def dfs(g,s):
vis,stack = {s},[]
stack.append(s)
while stack:
u = stack.pop()
for u in g[u]:
if u not in vis:
vis.add(u)
stack.append(u)
return vis
n,m = map(int,input().split())
g = {}
for i in range(n):
g[i+1] = []
for _ in range(m):
x,y = map(int,input().split())
g[x].append(y)
g[y].append(x)
print(dfs(g,1)) | [
"karthikeyansa39@gmail.com"
] | karthikeyansa39@gmail.com |
8a0cae9036743c46a8fba91fa5ef68a7cc72396c | 17c371020e9d5f163246092dc2ba405a4ec19900 | /posts/migrations/0001_initial.py | ab0b028560dcd5c70b2611f8b510221a5776e1d9 | [] | no_license | avs8/My-Blog | 2820386c8af8ceba448e45566c0cad01b832a2a6 | 636a48cf91d55c5688707295b0c0d78b47a17f7d | refs/heads/master | 2021-01-10T23:19:32.467922 | 2016-10-12T18:39:00 | 2016-10-12T18:39:00 | 70,621,951 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 734 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.9 on 2016-10-10 18:40
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Post',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(max_length=256)),
('content', models.TextField()),
('updated', models.DateTimeField(auto_now=True)),
('timestamp', models.DateTimeField(auto_now_add=True)),
],
),
]
| [
"ajitavsingh_8@yahoo.com"
] | ajitavsingh_8@yahoo.com |
b5d2e8ce15ead6d7ef987071845d4c21c1689de8 | 7a4ed01a40e8d79126b26f5e8fca43c8e61e78fd | /Geeky Shows/Advance Pyhton/203.Passing_Member_Of_One_Class_To_Another_Class[17].py | 503cf407b9664e539e7ad2bd8bdf02bdd184c17a | [] | no_license | satyam-seth-learnings/python_learning | 5a7f75bb613dcd7fedc31a1567a434039b9417f8 | 7e76c03e94f5c314dcf1bfae6f26b4a8a6e658da | refs/heads/main | 2023-08-25T14:08:11.423875 | 2021-10-09T13:00:49 | 2021-10-09T13:00:49 | 333,840,032 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 502 | py | # Passing Members Of One Class To Another Class
class Student:
# Constructor
def __init__(self,n,r):
self.name=n
self.roll=r
# Instance Method
def disp(self):
print('Student Name:',self.name)
print('Student Roll:',self.roll)
class User:
# Static Method
@staticmethod
def show(s):
print('User Name:',s.name)
print('User Roll:',s.roll)
s.disp()
# Creating Object Of Student Class
stu=Student('Satyam',101)
User.show(stu) | [
"satyam1998.1998@gmail.com"
] | satyam1998.1998@gmail.com |
2cad8786efb1b0659b1f3bf2c217c5e0e997bd99 | 70c532c46847329d09757455721f4dc15bc16a77 | /morsite/settings.py | 982da5827335efc59b68cc8dd085fa929c876566 | [] | no_license | yaronsamuel-zz/morsite | 31a8f8b25c76f33819bc4eb72ad23c1ca258b7f7 | 4a609bc8cfa49ab8798c1bb87c43cd224a635f1b | refs/heads/master | 2023-01-29T13:26:08.915327 | 2014-04-17T14:20:01 | 2014-04-17T14:20:01 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,551 | py | import os
# Django settings for morsite project.
LOCAL_DIR = r"c:\morsite"
IS_LOCAL = os.path.isdir(LOCAL_DIR)
if IS_LOCAL:
PROJECT_DIR = LOCAL_DIR
# BASE_URL = "http://127.0.0.1:8000/"
else:
PROJECT_DIR = r"/home/ordercak/public_html/sweetsamuel.co.il/"
# BASE_URL = "http://www.morsite.ordercakeinhaifa.com/"
def relToAbs(path):
return os.path.join(PROJECT_DIR, path).replace('\\','/')
def dec(st):
ret = ''
key = '\xab\x67\xa4\x5c\xbb' * 10
for i in xrange(len(st)):
ret += chr( ord(st[i]) ^ ord(key[i]) )
return ret
def assign(name , value):
attr_name = dec(name)
attr_value = dec(value)
globals()[attr_name] = attr_value
DEBUG = False
TEMPLATE_DEBUG = DEBUG
ADMINS = [
('Mor' , 'SamuelCakes@gmail.com') ,
# ('Your Name', 'your_emai@example.com'),
]
MANAGERS = ADMINS
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3', # Add 'postgresql_psycopg2', 'mysql', 'sqlite3' or 'oracle'.
'NAME': 'morsite.db', # Or path to database file if using sqlite3.
# The following settings are not used with sqlite3:
'USER': '',
'PASSWORD': '',
'HOST': '', # Empty for localhost through domain sockets or '127.0.0.1' for localhost through TCP.
'PORT': '', # Set to empty string for default.
}
}
# Hosts/domain names that are valid for this site; required if DEBUG is False
# See https://docs.djangoproject.com/en/1.5/ref/settings/#allowed-hosts
ALLOWED_HOSTS = ['*']
# Local time zone for this installation. Choices can be found here:
# http://en.wikipedia.org/wiki/List_of_tz_zones_by_name
# although not all choices may be available on all operating systems.
# In a Windows environment this must be set to your system time zone.
TIME_ZONE = 'Asia/Tel_Aviv'
# Language code for this installation. All choices can be found here:
# http://www.i18nguy.com/unicode/language-identifiers.html
LANGUAGE_CODE = 'he'#'en-us'
SITE_ID = 1
# If you set this to False, Django will make some optimizations so as not
# to load the internationalization machinery.
USE_I18N = True
# If you set this to False, Django will not format dates, numbers and
# calendars according to the current locale.
USE_L10N = True
# If you set this to False, Django will not use timezone-aware datetimes.
USE_TZ = True
MEDIA_ROOT = relToAbs('media')
MEDIA_URL = '/media/'
STATIC_ROOT = relToAbs('static')
STATIC_URL = '/static/'
MY_STATIC_ROOT = relToAbs('static_files')
# Additional locations of static files
STATICFILES_DIRS = (
MY_STATIC_ROOT,
# Put strings here, like "/home/html/static" or "C:/www/django/static".
# Always use forward slashes, even on Windows.
# Don't forget to use absolute paths, not relative paths.
)
# List of finder classes that know how to find static files in
# various locations.
STATICFILES_FINDERS = (
'django.contrib.staticfiles.finders.FileSystemFinder',
'django.contrib.staticfiles.finders.AppDirectoriesFinder',
# 'django.contrib.staticfiles.finders.DefaultStorageFinder',
)
# Make this unique, and don't share it with anybody.
SECRET_KEY = 'f3oda#81rs%yu+*-bc%_5@*nmmf0!yiyw23d(!34awfexfc+j-'
# List of callables that know how to import templates from various sources.
if IS_LOCAL:
TEMPLATE_LOADERS = (
'django.template.loaders.filesystem.Loader',
'django.template.loaders.app_directories.Loader',
# 'django.template.loaders.eggs.Loader',
)
else:
TEMPLATE_LOADERS = (
('django.template.loaders.cached.Loader', (
'django.template.loaders.filesystem.Loader',
'django.template.loaders.app_directories.Loader',
)),
# 'django.template.loaders.eggs.Loader',
)
MIDDLEWARE_CLASSES = (
'django.middleware.common.CommonMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
# Uncomment the next line for simple clickjacking protection:
# 'django.middleware.clickjacking.XFrameOptionsMiddleware',
)
ROOT_URLCONF = 'morsite.urls'
# Python dotted path to the WSGI application used by Django's runserver.
WSGI_APPLICATION = 'morsite.wsgi.application'
TEMPLATE_DIRS = (
relToAbs('templates') ,
# Put strings here, like "/home/html/django_templates" or "C:/www/django/templates".
# Always use forward slashes, even on Windows.
# Don't forget to use absolute paths, not relative paths.
)
INSTALLED_APPS = (
'grappelli',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.sites',
'django.contrib.messages',
'django.contrib.staticfiles',
'django.contrib.admin',
# Uncomment the next line to enable admin documentation:
# 'django.contrib.admindocs',
'Prices' ,
'orderedmodel',
'django.contrib.comments',
'tagging',
'mptt',
'zinnia',
'menu' ,
'Gallery',
'contact_form',
'my_comment_app',
'tinymce',
)
COMMENTS_APP = 'my_comment_app'
#Zinnia stuff
TEMPLATE_CONTEXT_PROCESSORS = (
'django.contrib.auth.context_processors.auth',
'django.core.context_processors.i18n',
'django.core.context_processors.request',
'django.core.context_processors.media',
'django.core.context_processors.static',
'zinnia.context_processors.version',
"django.core.context_processors.debug",
"django.contrib.messages.context_processors.messages",
) # Optional
SESSION_SERIALIZER = 'django.contrib.sessions.serializers.JSONSerializer'
# A sample logging configuration. The only tangible logging
# performed by this configuration is to send an email to
# the site admins on every HTTP 500 error when DEBUG=False.
# See http://docs.djangoproject.com/en/dev/topics/logging for
# more details on how to customize your logging configuration.
LOGGING = {
'version': 1,
'disable_existing_loggers': False,
'filters': {
'require_debug_false': {
'()': 'django.utils.log.RequireDebugFalse'
}
},
'handlers': {
'mail_admins': {
'level': 'ERROR',
'filters': ['require_debug_false'],
'class': 'django.utils.log.AdminEmailHandler'
}
},
'loggers': {
'django.request': {
'handlers': ['mail_admins'],
'level': 'ERROR',
'propagate': True,
},
}
}
TINYMCE_DEFAULT_CONFIG = {
'theme_advanced_buttons1' : "save,newdocument,|,bold,italic,underline,strikethrough,|,justifyleft,justifycenter,justifyright,justifyfull,|,styleselect,formatselect,fontselect,fontsizeselect",
}
# assign('\xee*\xe5\x15\xf7\xf4/\xeb\x0f\xef\xf47\xe5\x0f\xe8\xfc(\xf6\x18' , '\x9aU\x97h\x8e\x9dP\xdd')
EMAIL_HOST = 'smtp.gmail.com'
EMAIL_HOST_USER = 'cakesnmore1010@gmail.com'
assign('\xee*\xe5\x15\xf7\xf4/\xeb\x0f\xef\xf47\xe5\x0f\xe8\xfc(\xf6\x18' , '\xd1\x00\xce2\xd5\xc7\x08\xd1-\xcc\xde\x11\xd32\xcf\xc1')
EMAIL_PORT = 587
EMAIL_USE_TLS = True
# EMAIL_RECIPIAENTS_LIST = [EMAIL_HOST_USER ]
EMAIL_RECIPIAENTS_LIST = ['cakesnmore1010@gmail.com' , 'SamuelCakes@gmail.com'] | [
"samuel.yaron@gmail.com"
] | samuel.yaron@gmail.com |
72d7294ff5ad1f06d357da58ada0fb115ed59c6e | d24a6e0be809ae3af8bc8daa6dacfc1789d38a84 | /ABC/ABC151-200/ABC190/A.py | 2dc4094c45ab83aaac5f6866e3c4f197b3e157a5 | [] | no_license | k-harada/AtCoder | 5d8004ce41c5fc6ad6ef90480ef847eaddeea179 | 02b0a6c92a05c6858b87cb22623ce877c1039f8f | refs/heads/master | 2023-08-21T18:55:53.644331 | 2023-08-05T14:21:25 | 2023-08-05T14:21:25 | 184,904,794 | 9 | 0 | null | 2023-05-22T16:29:18 | 2019-05-04T14:24:18 | Python | UTF-8 | Python | false | false | 502 | py | def solve(a, b, c):
if c == 0:
if b >= a:
return 'Aoki'
else:
return 'Takahashi'
else:
if a >= b:
return 'Takahashi'
else:
return 'Aoki'
def main():
a, b, c = map(int, input().split())
res = solve(a, b, c)
print(res)
def test():
assert solve(2, 1, 0) == 'Takahashi'
assert solve(2, 2, 0) == 'Aoki'
assert solve(2, 2, 1) == 'Takahashi'
if __name__ == "__main__":
test()
main()
| [
"cashfeg@gmail.com"
] | cashfeg@gmail.com |
bc18007a717e92dad6c5f793ceb31f91ad7bb8a8 | eff0422ed21d7b1b6a870efbc1b969e30b9d2897 | /fabtools/tests/test_vagrant_version.py | 8fec58e230befa79ac31805d7300e4b42022080d | [
"BSD-2-Clause"
] | permissive | fabtools/fabtools | 561ecec02227f48d84e0ff9c5e659d32819e6413 | 5fdc7174c3fae5e93a16d677d0466f41dc2be175 | refs/heads/master | 2023-08-01T15:55:56.871793 | 2019-09-16T09:19:00 | 2019-09-16T09:19:00 | 2,325,793 | 308 | 55 | BSD-2-Clause | 2021-06-04T01:02:55 | 2011-09-05T01:44:24 | Python | UTF-8 | Python | false | false | 1,268 | py | import unittest
from mock import patch
class _Success(str):
@property
def failed(self):
return False
class TestVagrantVersion(unittest.TestCase):
def test_vagrant_version_1_3_0(self):
with patch('fabtools.vagrant.local') as mock_local:
mock_local.return_value = _Success("Vagrant version 1.3.0\n")
from fabtools.vagrant import version
self.assertEqual(version(), (1, 3, 0))
def test_vagrant_version_1_3_1(self):
with patch('fabtools.vagrant.local') as mock_local:
mock_local.return_value = _Success("Vagrant v1.3.1\n")
from fabtools.vagrant import version
self.assertEqual(version(), (1, 3, 1))
def test_vagrant_version_1_4_3(self):
with patch('fabtools.vagrant.local') as mock_local:
mock_local.return_value = _Success("Vagrant 1.4.3\n")
from fabtools.vagrant import version
self.assertEqual(version(), (1, 4, 3))
def test_vagrant_version_1_5_0_dev(self):
with patch('fabtools.vagrant.local') as mock_local:
mock_local.return_value = _Success("Vagrant 1.5.0.dev\n")
from fabtools.vagrant import version
self.assertEqual(version(), (1, 5, 0, 'dev'))
| [
"ronan.amicel@gmail.com"
] | ronan.amicel@gmail.com |
bba4fd67e94ae71583cf7f433709f6cad7bacfc7 | 9743d5fd24822f79c156ad112229e25adb9ed6f6 | /xai/brain/wordbase/nouns/_luxury.py | e4dee89c8349b6b4fdb26c26725c56453c0f4872 | [
"MIT"
] | permissive | cash2one/xai | de7adad1758f50dd6786bf0111e71a903f039b64 | e76f12c9f4dcf3ac1c7c08b0cc8844c0b0a104b6 | refs/heads/master | 2021-01-19T12:33:54.964379 | 2017-01-28T02:00:50 | 2017-01-28T02:00:50 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 499 | py |
#calss header
class _LUXURY():
def __init__(self,):
self.name = "LUXURY"
self.definitions = [u'great comfort, especially as provided by expensive and beautiful things: ', u'something expensive that is pleasant to have but is not necessary: ', u'something that gives you a lot of pleasure but cannot be done often: ']
self.parents = []
self.childen = []
self.properties = []
self.jsondata = {}
self.specie = 'nouns'
def run(self, obj1 = [], obj2 = []):
return self.jsondata
| [
"xingwang1991@gmail.com"
] | xingwang1991@gmail.com |
7f45c773c3dd9aefea08cb9a9711902e9d32e7e8 | 3adf1035314c70514e7acefb13c5489e41fab30e | /stock/migrations/0001_initial.py | a3d8387afb1f6899b25333d2103a84a4be428dba | [
"Apache-2.0"
] | permissive | nowanys/GreaterWMS | 9597bcb2eee25e5c803355d9e7373b62c03af909 | 51baefe3a10016575411133bbc6eb4625d794d82 | refs/heads/master | 2023-02-24T06:14:04.318108 | 2021-01-26T06:57:45 | 2021-01-26T06:57:45 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,077 | py | # Generated by Django 3.1.4 on 2021-01-18 02:23
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='StockBinModel',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('bin_name', models.CharField(max_length=255, verbose_name='Bin Name')),
('goods_code', models.CharField(max_length=255, verbose_name='Goods Code')),
('goods_desc', models.CharField(max_length=255, verbose_name='Goods Description')),
('goods_qty', models.IntegerField(default=0, verbose_name='Binstock Qty')),
('pick_qty', models.IntegerField(default=0, verbose_name='BinPick Qty')),
('picked_qty', models.IntegerField(default=0, verbose_name='BinPicked Qty')),
('bin_size', models.CharField(max_length=255, verbose_name='Bin size')),
('bin_property', models.CharField(max_length=255, verbose_name='Bin Property')),
('openid', models.CharField(max_length=255, verbose_name='Openid')),
('create_time', models.DateTimeField(auto_now_add=True, verbose_name='Create Time')),
('update_time', models.DateTimeField(auto_now=True, null=True, verbose_name='Update Time')),
],
options={
'verbose_name': 'data id',
'verbose_name_plural': 'data id',
'db_table': 'stockbin',
'ordering': ['-id'],
},
),
migrations.CreateModel(
name='StockListModel',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('goods_code', models.CharField(max_length=32, verbose_name='Goods Code')),
('goods_desc', models.CharField(max_length=255, verbose_name='Goods Description')),
('goods_qty', models.BigIntegerField(default=0, verbose_name='Total Qty')),
('onhand_stock', models.BigIntegerField(default=0, verbose_name='On Hand Stock')),
('can_order_stock', models.BigIntegerField(default=0, verbose_name='Can Order Stock')),
('ordered_stock', models.BigIntegerField(default=0, verbose_name='Ordered Stock')),
('inspect_stock', models.BigIntegerField(default=0, verbose_name='Inspect Stock')),
('hold_stock', models.BigIntegerField(default=0, verbose_name='Holding Stock')),
('damage_stock', models.BigIntegerField(default=0, verbose_name='Damage Stock')),
('asn_stock', models.BigIntegerField(default=0, verbose_name='ASN Stock')),
('dn_stock', models.BigIntegerField(default=0, verbose_name='DN Stock')),
('pre_load_stock', models.BigIntegerField(default=0, verbose_name='Pre Load Stock')),
('pre_sort_stock', models.BigIntegerField(default=0, verbose_name='Pre Sort Stock')),
('sorted_stock', models.BigIntegerField(default=0, verbose_name='Sorted Stock')),
('pick_stock', models.BigIntegerField(default=0, verbose_name='Pick Stock')),
('picked_stock', models.BigIntegerField(default=0, verbose_name='Picked Stock')),
('back_order_stock', models.BigIntegerField(default=0, verbose_name='Back Order Stock')),
('openid', models.CharField(max_length=255, verbose_name='Openid')),
('create_time', models.DateTimeField(auto_now_add=True, verbose_name='Create Time')),
('update_time', models.DateTimeField(auto_now=True, null=True, verbose_name='Update Time')),
],
options={
'verbose_name': 'data id',
'verbose_name_plural': 'data id',
'db_table': 'stocklist',
'ordering': ['-id'],
},
),
]
| [
"singosgu@gmail.com"
] | singosgu@gmail.com |
2b4b4aa93d84385b2cdcabc1169aa211a0ecf359 | 0fd5793e78e39adbfe9dcd733ef5e42390b8cc9a | /python3/12_Logging/a_builtin_logging/16_logging_json.py | e96163fa788ef205f6df9a6c57247576641c0168 | [] | no_license | udhayprakash/PythonMaterial | 3ea282ceb4492d94d401e3bc8bad9bf6e9cfa156 | e72f44e147141ebc9bf9ec126b70a5fcdbfbd076 | refs/heads/develop | 2023-07-08T21:07:33.154577 | 2023-07-03T10:53:25 | 2023-07-03T10:53:25 | 73,196,374 | 8 | 5 | null | 2023-05-26T09:59:17 | 2016-11-08T14:55:51 | Jupyter Notebook | UTF-8 | Python | false | false | 1,015 | py | import json
import logging
import logging.config
class JsonFormatter:
ATTR_TO_JSON = [
"created",
"filename",
"funcName",
"levelname",
"lineno",
"module",
"msecs",
"msg",
"name",
"pathname",
"process",
"processName",
"relativeCreated",
"thread",
"threadName",
]
def format(self, record):
obj = {attr: getattr(record, attr) for attr in self.ATTR_TO_JSON}
return json.dumps(obj, indent=4)
console_handler = logging.StreamHandler()
console_handler.formatter = JsonFormatter()
file_handler = logging.FileHandler(filename="logs/17_logging_json.json", mode="w")
file_handler.formatter = JsonFormatter()
logger = logging.getLogger(__name__)
logger.addHandler(console_handler)
logger.addHandler(file_handler)
logger.debug("Debug message")
logger.info("info message")
logger.warning("warning message")
logger.error("error message")
logger.critical("critical message")
| [
"uday3prakash@gmail.com"
] | uday3prakash@gmail.com |
99a7a7f6f37e668b259ef80193a305341646509f | 0818a9020adc6e25b86060a8e84171d0b4958625 | /test_demo/learn_mxnet/main.py | 32fe35b55545b1f1f14cb4d73be8713c45578381 | [] | no_license | wgwangang/mycodes | 2107becb6c457ed88b46426974a8f1fa07ed37dd | 9fa48ca071eacf480034d1f69d3c05171d8a97d2 | refs/heads/master | 2020-03-28T07:58:45.017910 | 2018-03-14T07:21:14 | 2018-03-14T07:21:14 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 320 | py | import mxnet as mx
from mxnet import sym
from mxnet import symbol
net = mx.sym.Variable('data')
net = mx.sym.FullyConnected(data=net, weight=net,name='fc1', num_hidden=128)
net2 = symbol.FullyConnected(data=net,weight=net, name='fc1', num_hidden=128)
print(sym)
print(symbol)
mx.viz.plot_network(symbol=net).render()
| [
"yinpenghhz@hotmail.com"
] | yinpenghhz@hotmail.com |
699c42fa3bf5aa3e7036983a7ba52eef403977dd | 2f638d47a9681cbb2caab865702ddca39a0456d3 | /djangocms_misc/basic/app_template/views.py | 34e07fb3adb9556429404f527e27f54219269bc9 | [
"MIT"
] | permissive | bnzk/djangocms-misc | b0d1a1950b3d8c7752ea661c74bc08bfbd0360a6 | 8869384305ef7ff8538af986f4854bcfde7257de | refs/heads/develop | 2023-06-08T10:12:11.275012 | 2023-05-30T13:00:34 | 2023-05-30T13:00:34 | 66,085,267 | 1 | 1 | MIT | 2023-02-04T07:49:28 | 2016-08-19T13:43:34 | Python | UTF-8 | Python | false | false | 410 | py | from django.views.generic import ListView, DetailView
from .models import AppTemplate
from .views_utils import PublishedViewMixin, AutoSlugMixin, LanguageChooserEnhancerMixin
class AppTemplateListView(PublishedViewMixin, ListView):
model = AppTemplate
class AppTemplateDetailView(
AutoSlugMixin,
PublishedViewMixin,
LanguageChooserEnhancerMixin,
DetailView,
):
model = AppTemplate
| [
"bnzk@bnzk.ch"
] | bnzk@bnzk.ch |
d1ddbd8e8f4dfdb9b410d931a174d498c0ea422f | 84f1fea102aeb2d324e8ad3908e1765d04a0a730 | /manage.py | 61ffa52ca3f12cdb094726d77de51bf55c8f649f | [
"Apache-2.0"
] | permissive | Natsoye/explorer | c205f8eb8d08705c2c4ee4ee45c28f7d0a534b10 | 638c70204d6001d9c5c56701917a6273a02c90cf | refs/heads/master | 2021-08-30T10:42:56.371192 | 2021-08-17T15:43:04 | 2021-08-17T15:43:04 | 181,131,891 | 2 | 0 | Apache-2.0 | 2021-08-17T15:43:05 | 2019-04-13T06:43:15 | Python | UTF-8 | Python | false | false | 256 | py | #!/usr/bin/env python
import os
import sys
if __name__ == "__main__":
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "blockexplorer.settings")
from django.core.management import execute_from_command_line
execute_from_command_line(sys.argv)
| [
"mflaxman@gmail.com"
] | mflaxman@gmail.com |
f5ac3e553479901461c99d71fd770afbce1fe15f | 45f93a9d47204d76b8bf25a71dfb79403e75c33c | /Trees_and_Graphs/Bellmen-Ford-Algorithm.py | 147fdc8e118ce15efcd43136f824d40cbe359e4f | [] | no_license | tahmid-tanzim/problem-solving | 0173bce1973ac3e95441a76c10324c0e1b0a57c3 | 6ddb51de6772130f209474e76f39ca2938f444f0 | refs/heads/master | 2023-06-25T02:18:03.690263 | 2023-06-20T06:58:46 | 2023-06-20T06:58:46 | 137,173,850 | 4 | 1 | null | 2022-03-30T08:28:41 | 2018-06-13T06:44:25 | Python | UTF-8 | Python | false | false | 1,960 | py | #!/usr/bin/python3
"""
Bellman Ford Algorithm
Single-Source - Shortest Path (SSSP)
Dynamic Programming
Time complexity - O(n^2)
"""
class Graph:
def __int__(self):
self.adjacencyList = []
if __name__ == "__main__":
# inputs = (
# {
# "start": "A",
# "vertices": ("A", "B", "C", "D", "E", "F"),
# "edges": [
# ("A", "B", 2),
# ("A", "C", 4),
# ("B", "C", 1),
# ("B", "D", 7),
# ("C", "E", 3),
# ("D", "F", 1),
# ("E", "F", 5),
# ("E", "D", 2),
# ],
# "type": "Directed Graph"
# },
# {
# "start": "A",
# "vertices": ("A", "B", "C", "D", "E", "F"),
# "edges": [
# ("A", "B", 50),
# ("A", "D", 10),
# ("A", "C", 45),
# ("B", "C", 10),
# ("B", "D", 15),
# ("C", "E", 30),
# ("D", "A", 10),
# ("D", "E", 15),
# ("E", "B", 20),
# ("E", "C", 35),
# ("F", "E", 3),
# ],
# "type": "Directed Graph"
# },
# {
# "start": "A",
# "vertices": ("A", "B", "C", "D", "E", "F", "G", "H", "I"),
# "edges": [
# ("A", "B", 4),
# ("A", "H", 8),
# ("H", "B", 11),
# ("B", "C", 8),
# ("H", "I", 7),
# ("H", "G", 1),
# ("I", "G", 6),
# ("I", "C", 2),
# ("D", "C", 7),
# ("D", "E", 9),
# ("D", "F", 14),
# ("E", "F", 10),
# ("C", "F", 4),
# ("G", "F", 2),
# ],
# "type": "Undirected Graph"
# },
# )
pass
| [
"tahmid.tanzim@gmail.com"
] | tahmid.tanzim@gmail.com |
e58d118d8660ed80dce0203b57e55c19fe6d55fb | 974d04d2ea27b1bba1c01015a98112d2afb78fe5 | /test/collective/test_communication_stream_allreduce_api.py | 60386a6262ff257ec272578a19e1b583f84a3960 | [
"Apache-2.0"
] | permissive | PaddlePaddle/Paddle | b3d2583119082c8e4b74331dacc4d39ed4d7cff0 | 22a11a60e0e3d10a3cf610077a3d9942a6f964cb | refs/heads/develop | 2023-08-17T21:27:30.568889 | 2023-08-17T12:38:22 | 2023-08-17T12:38:22 | 65,711,522 | 20,414 | 5,891 | Apache-2.0 | 2023-09-14T19:20:51 | 2016-08-15T06:59:08 | C++ | UTF-8 | Python | false | false | 1,685 | py | # Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
import test_communication_api_base as test_base
class TestCommunicationStreamAllreduceAPI(test_base.CommunicationTestDistBase):
def setUp(self):
super().setUp(num_of_devices=2, timeout=120)
self._default_envs = {
"backend": "nccl",
"shape": "(100, 200)",
"dtype": "float32",
"seeds": str(self._seeds),
}
self._changeable_envs = {
"sync_op": ["True", "False"],
"use_calc_stream": ["True", "False"],
}
def test_allreduce_stream(self):
envs_list = test_base.gen_product_envs_list(
self._default_envs, self._changeable_envs
)
for envs in envs_list:
if eval(envs["use_calc_stream"]) and not eval(envs["sync_op"]):
continue
self.run_test_case(
"communication_stream_allreduce_api_dygraph.py",
user_defined_envs=envs,
)
def tearDown(self):
super().tearDown()
if __name__ == '__main__':
unittest.main()
| [
"noreply@github.com"
] | PaddlePaddle.noreply@github.com |
f8bc55e2cd75960e261d70dec8161f5a44faaa63 | 494af1db6ac6a72b738d79053a8084c3afd1dbd2 | /smartapp/urls.py | ea82d10e5d8b824dba8bfc8c28204ba5502ada1c | [
"MIT"
] | permissive | ae200/SmartApp | b21f0e031a558341b18ea7ca5787f726e1d09fb5 | b24bb7139e65976428ceec1e9d082f2eac52fd24 | refs/heads/master | 2023-01-15T05:26:31.005542 | 2020-07-16T21:52:31 | 2020-07-16T21:52:31 | 132,465,331 | 1 | 1 | MIT | 2023-01-06T05:24:56 | 2018-05-07T13:34:30 | Python | UTF-8 | Python | false | false | 3,510 | py | """smartapp URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/3.0/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path, include
from allauth.account.views import confirm_email
from django.conf.urls import url
from django.contrib import admin
from django.conf.urls.static import static
from django.urls import path, re_path, include
from django.contrib import admin
from django.views.generic.base import TemplateView
urlpatterns = [
path('admin/', admin.site.urls),
path('admin/', admin.site.urls),
#path('register/', register),
# url (r'^$', TemplateView.as_view(template_name="ang_home.html"), name='home'),
path('api/movies/', include('movies.api.urls')),
# path('streamapi/streammovies/', include('streammovies.streamapi.urls')),
path('actionapi/actionmovies/', include('actionmovies.actionapi.urls')),
path('actionthrillerapi/actionthriller/', include('actionthriller.actionthrillerapi.urls')),
path('actionrealapi/actionreal/', include('actionreal.actionrealapi.urls')),
path('adventureapi/adventuremovies/', include('adventuremovies.adventureapi.urls')),
path('adventurethrillerapi/adventurethriller/', include('adventurethriller.adventurethrillerapi.urls')),
path('adventurerealapi/adventurereal/', include('adventurereal.adventurerealapi.urls')),
path('comedyapi/comedymovies/', include('comedymovies.comedyapi.urls')),
path('comedythrillerapi/comedythriller/', include('comedythriller.comedythrillerapi.urls')),
path('comedyrealapi/comedyreal/', include('comedyreal.comedyrealapi.urls')),
path('dramathrillerapi/dramathriller/', include('dramathriller.dramathrillerapi.urls')),
path('dramarealapi/dramareal/', include('dramareal.dramarealapi.urls')),
path('dramaapi/dramamovies/', include('dramamovies.dramaapi.urls')),
path('fictionthrillerapi/fictionthriller/', include('fictionthriller.fictionthrillerapi.urls')),
path('fictionrealapi/fictionreal/', include('fictionreal.fictionrealapi.urls')),
path('fictionapi/fictionmovies/', include('fictionmovies.fictionapi.urls')),
path('historicalapi/historicalmovies/', include('historicalmovies.historicalapi.urls')),
path('historicalthrillerapi/historicalthriller/', include('historicalthriller.historicalthrillerapi.urls')),
path('historicalrealapi/historicalreal/', include('historicalreal.historicalrealapi.urls')),
url(r'^rest-auth/', include('rest_auth.urls')),
url(r'^rest-auth/registration/', include('rest_auth.registration.urls')),
url(r'^users/', include('users.urls')),
url(r'^account/', include('allauth.urls')),
url(r'^accounts-rest/registration/account-confirm-email/(?P<key>.+)/', confirm_email, name='account_confirm_email')
]
urlpatterns += [
re_path(r'^(?P<path>.*)', TemplateView.as_view(template_name="ang_movies.html"), name='movies'),
re_path(r'^(?P<path>.*)', TemplateView.as_view(template_name="ang_home.html"), name='home'),
] | [
"dandaoluks@gmail.com"
] | dandaoluks@gmail.com |
b4b9f787181c6c79a53b128b8e22d735c4638e6c | afa52cfab070818eb08fb9a456b0defcf2df5ebd | /tools/upgrade/errors.py | 26d6437bea9c810823d82964c742119199b1b3a1 | [
"MIT"
] | permissive | vkoukoutsas/pyre-check | a128d77a6d56b50639496025cc458873db7b21c5 | 73fa0dda836c413a86879eb9ef8ba0538e29d615 | refs/heads/master | 2020-07-22T11:09:00.988050 | 2019-09-08T19:56:23 | 2019-09-08T19:57:41 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,582 | py | # Copyright (c) 2016-present, Facebook, Inc.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
# pyre-unsafe
import itertools
import json
import sys
from typing import Any, Dict, List, Optional, Tuple
from .postprocess import LOG
def json_to_errors(json_string: Optional[str]) -> List[Dict[str, Any]]:
if json_string:
try:
return json.loads(json_string)
# pyre-fixme[18]: Undefined name [18]: Global name `json.decoder` is not defined
except json.decoder.JSONDecodeError:
LOG.error(
"Recevied invalid JSON as input."
"If piping from `pyre check` be sure to use `--output=json`."
)
else:
LOG.error(
"Recevied no input."
"If piping from `pyre check` be sure to use `--output=json`."
)
return []
def sort_errors(errors: List[Dict[str, Any]]) -> List[Tuple[str, List[Any]]]:
def error_path(error):
return error["path"]
return itertools.groupby(sorted(errors, key=error_path), error_path)
def filter_errors(arguments, errors) -> List[Dict[str, Any]]:
def matches_error_code(error) -> bool:
return error["code"] == arguments.only_fix_error_code
if arguments.only_fix_error_code:
errors = list(filter(matches_error_code, errors))
return errors
def errors_from_stdin(_arguments) -> List[Dict[str, Any]]:
input = sys.stdin.read()
errors = json_to_errors(input)
return filter_errors(_arguments, errors)
| [
"facebook-github-bot@users.noreply.github.com"
] | facebook-github-bot@users.noreply.github.com |
1eb27ae6c2701ba03d0d277c21d91755f6868825 | 48c62c3693e419257d1e26fd065bf92801f4ef4d | /django_lets_go/custom_xml_emitter.py | 16d19cabf07e9e99990e9f976ae2d4747bf3a91a | [
"MIT"
] | permissive | callhub/django-lets-go | 0da70777331adb9c0637f7b03154bdff7dd8a026 | 70e9016a91b1db06685a7d0cf9ee414e49375fe8 | refs/heads/master | 2021-01-25T13:11:21.140612 | 2014-09-23T16:17:29 | 2014-09-23T16:17:29 | 123,537,598 | 0 | 0 | null | 2018-03-02T06:03:26 | 2018-03-02T06:03:25 | null | UTF-8 | Python | false | false | 2,153 | py | #
# Django-Lets-go License
#
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this file,
# You can obtain one at http://mozilla.org/MPL/2.0/.
#
# Copyright (C) 2011-2014 Star2Billing S.L.
#
# The Initial Developer of the Original Code is
# Arezqui Belaid <info@star2billing.com>
#
try:
import cStringIO as StringIO
except ImportError:
import StringIO
from django.utils.encoding import smart_unicode
from django.utils.xmlutils import SimplerXMLGenerator
from piston.emitters import Emitter
from piston.utils import Mimer
from django.contrib.auth import authenticate
from django.http import HttpResponse
from django.conf import settings
class CustomXmlEmitter(Emitter):
def _to_xml(self, xml, data):
if isinstance(data, (list, tuple)):
for item in data:
self._to_xml(xml, item)
elif isinstance(data, dict):
for key, value in data.iteritems():
xml.startElement(key, {})
self._to_xml(xml, value)
xml.endElement(key.split()[0])
else:
xml.characters(smart_unicode(data))
def render(self, request):
stream = StringIO.StringIO()
xml = SimplerXMLGenerator(stream, "utf-8")
xml.startDocument()
xml.startElement("Response", {})
self._to_xml(xml, self.construct())
xml.endElement("Response")
xml.endDocument()
return stream.getvalue()
Emitter.register('custom_xml', CustomXmlEmitter, 'text/xml; charset=utf-8')
Mimer.register(lambda *a: None, ('text/xml',))
class IpAuthentication(object):
"""IP Authentication handler
"""
def __init__(self, auth_func=authenticate, realm='API'):
self.auth_func = auth_func
self.realm = realm
def is_authenticated(self, request):
try:
settings.API_ALLOWED_IP.index(request.META['REMOTE_ADDR'])
return True
except:
return False
def challenge(self):
resp = HttpResponse("Not Authorized")
resp.status_code = 401
return resp
| [
"areski@gmail.com"
] | areski@gmail.com |
d382ac9e7cbdb1e20a269d0296a00a0cd13c0279 | b96f1bad8a74d31d8ff79bc955813bfcd17d7b26 | /Longest Valid Parentheses3.py | 96a38c4d67c9185d78a04d0b65099873753c94b3 | [] | no_license | brianhu0716/LeetCode-Solution | e7177af15e84e833ce8ab05027683ed4ac489643 | 158a4359c90b723545b22c4898047274cc1b80a6 | refs/heads/main | 2023-07-11T05:29:56.783795 | 2021-08-28T12:53:14 | 2021-08-28T12:53:14 | 374,991,658 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,757 | py | # -*- coding: utf-8 -*-
"""
Created on Sat Feb 6 21:37:21 2021
@author: Brian
"""
"""
Created on Sat Feb 6 19:30:57 2021
@author: Brian
"""
# -*- coding: utf-8 -*-
"""
Created on Sat Feb 6 19:30:57 2021
@author: Brian
"""
# -*- coding: utf-8 -*-
"""
Created on Sat Feb 6 21:37:21 2021
@author: Brian
"""
"""
Created on Sat Feb 6 19:30:57 2021
@author: Brian
"""
import numpy as np
class Solution:
def longestValidParentheses(self, s) -> int:
i = 0
self.s = s
self.pairs = []
self.c = 0
self.flag = []
if len(s) <= 1: # in case s = '','('...
return 0
while True:
if self.s[i] == '(' and self.s[i+1] == ')':
self.c += 1
self.flag += [(i,i+1)]
i += 2
elif self.s[i] == '(' and self.s[i+1] == '(':
self.checkconsecutive(i)
i = self.fi
else:
i += 1
# self.pairs += [self.c]
self.c = 0
self.pairs += [self.c]
if i > len(self.s) - 2:
break
if len(self.flag) > 1:
for i in range(len(self.flag)-1):
if self.flag[i+1][0] - self.flag[i][1] != 1:
break
else:
if '(' in self.s[0:self.flag[0][0]] and ')' in self.s[self.flag[-1][1] + 1:]:
self.pairs += [len(self.flag) + len(self.s[0:self.flag[0][0]])]
# condition = True
# if condition and len(self.s[0:self.flag[0][0]]) == len(self.s[self.flag[-1][1]:]):
# if '(' in self.s[0:self.flag[0][0]] and ')' in self.s[self.flag[-1][1]:]:
# self.pairs += [len(self.flag) + len(self.s[0:self.flag[0][0]])]
# return max(self.pairs) * 2
return max(self.pairs) * 2
def checkconsecutive(self,fi):
self.fi = fi
for i in range(fi,len(self.s)):
if self.s[i] == ')':
break
shift = i-self.fi
if len(self.s[self.fi:i]) == len(self.s[i:i+shift]) and (np.array([item for item in self.s[i:i+shift]]) == ')').all():
self.c += i - self.fi
self.fi = i + shift
else:
self.c = 0
self.fi = fi + 1
s = ['()((())))', # 8
'()()', # 4
'()(()', # 2
'())()', # 2
')(', # 0
'())((()))', # 6
'()(((()))', # 6
'(()())', # 6
")()())", # 4
"(()()", # 4
"((()))())"]
test = Solution()
for i in range(len(s)):
test.longestValidParentheses(s[i])
print(max(test.pairs) * 2)
| [
"85205343+brianhu0716@users.noreply.github.com"
] | 85205343+brianhu0716@users.noreply.github.com |
3032790ef87d1235ba183234e61a2382394daf55 | 7bcec8a9c6a240ec0888bec4179f536046464005 | /moviesys/moviesys/.history/library/views_20210325010105.py | 41322bcdbacdcc67f0951a400f57f5fc6504fbbb | [] | no_license | yifanzhang13/MovieManagementSystem_group5 | c64e5810914c3d33ae6cd94e8eed5dc5a3962181 | 4cca1a4299311681d69b2347ca8d7b02e0846ebc | refs/heads/main | 2023-03-29T08:30:26.655108 | 2021-04-01T15:42:52 | 2021-04-01T15:42:52 | 344,417,874 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,720 | py | from django.shortcuts import render
from .models import Movies, Users, Ratings, Links, Tags
from django.db import connection
from django.views import generic
from django.shortcuts import get_object_or_404
from django.http import HttpResponseRedirect
from django.urls import reverse
from library.forms import SearchMovieForm
# Create your views here.
def index(request):
cursor = connection.cursor()
try:
num_movies = cursor.execute('SELECT * FROM library_movies')
rating_5 = cursor.execute('SELECT * FROM library_ratings WHERE RatingScore = 5')
rating_4 = cursor.execute('SELECT * FROM library_ratings WHERE RatingScore = 4')
rating_3 = cursor.execute('SELECT * FROM library_ratings WHERE RatingScore = 3')
rating_2 = cursor.execute('SELECT * FROM library_ratings WHERE RatingScore = 2')
rating_1 = cursor.execute('SELECT * FROM library_ratings WHERE RatingScore = 1')
finally:
cursor.close()
context = {
'num_movies':num_movies,
'rating_5':rating_5,
'rating_4':rating_4,
'rating_3':rating_3,
'rating_2':rating_2,
'rating_1':rating_1,
}
return render(request, 'index.html', context=context)
def MoviesView(request):
cursor = connection.cursor()
try:
movies = cursor.execute('SELECT * FROM library_movies')
results = cursor.fetchall()
finally:
cursor.close()
all = []
for row in results:
dic = {
'MovieID':row[0],
'MovieTitle':row[1],
'MovieGenres':row[2],
}
all.append(dic)
context = {
'movies':all,
}
return render(request, 'Movies.html', context=context)
class MovieDetailView(generic.DetailView):
model = Movies
def MovieDetail(request, pk):
form = SearchMovieForm()
if request.method == 'POST':
form = SearchMovieForm(request.POST)
if form.is_valid():
return HttpResponseRedirect('http://127.0.0.1:8000/library/movies/'+str(2))
movie = get_object_or_404(Movies, pk=pk)
print(pk)
cursor = connection.cursor()
try:
movie = cursor.execute('SELECT * FROM library_movies WHERE MovieID = '+str(pk))
results = cursor.fetchall()
print(results)
finally:
cursor.close()
all = []
for row in results:
dic = {
'MovieID':row[0],
'MovieTitle':row[1],
'MovieGenres':row[2],
}
all.append(dic)
context = {
'movies':all,
}
return render(request, 'library/movies_detail.html', context=context)
def search(request):
context = {}
return render(request, "library/search.html", context=context)
def handle(request):
text = request.POST["search_content"] # user input text
movie_report = {}
po_list = []
cursor = connection.cursor()
try:
cursor.execute('SELECT * FROM library_movies')
results = cursor.fetchall()
for row in results:
# id title genres
if text == row[1]: # 可以找到准确的电影
movie_report['MovieTitle'] = row[1]
movie_report['MovieID'] = row[0]
movie_report['MovieGenres'] = str(row[2]).replace("|"," & ")
movie_report['test'] = 'test11'
if text in row[1]: # 只能找到带有用户搜索关键字的电影
# js不支持tuple
dic = {
'MovieID':row[0],
'MovieTitle':row[1],
'MovieGenres':row[2],
}
po_list.append(dic)
if movie_report:
# 看过这个电影并打过分的人数
cursor.execute('SELECT count(*) FROM library_ratings WHERE MovieID_id = %s', [movie_report['MovieID']])
results = cursor.fetchall()
for row in results:
movie_report['number_of_ratings'] = row[0]
# 电影的平均分
cursor.execute('SELECT RatingScore FROM library_ratings WHERE MovieID_id = %s', [movie_report['MovieID']])
results = cursor.fetchall()
print(results)
finally:
cursor.close()
context = {
'resp':po_list,
'report':movie_report,
}
return render(request, "library/resp.html", context=context)
def handle_backup(request):
text = request.POST["search_content"] # user input text
db = Movies.objects.all()
movie_report = []
po_list = []
for i in db:
if text == i.MovieTitle:
movie_report = i
if text in i.MovieTitle:
po_list.append(i)
context = {
'resp':po_list,
'report':movie_report,
}
# def MovieDetail(request, pk):
# movie = get_object_or_404(Movies, pk=pk)
# print(pk) # pk等于14 http://127.0.0.1:8000/library/movies/14
# # form = SearchMovieForm()
# # if request.method == 'POST':
# # form = SearchMovieForm(request.POST)
# # if form.is_valid():
# # return HttpResponseRedirect('http://127.0.0.1:8000/library/movies/'+str(2))
# context = {
# 'movie': movie,
# }
# return render(request, 'library/movies_detail.html', context)
class MoviesListView(generic.ListView):
# The generic view will query the database to get all records for the specified model
# (Movies) then render a template located
# at /locallibrary/catalog/templates/catalog/Movies_list.html (which we will create below).
# Within the template you can access the list of books with the
# template variable named object_list OR book_list (i.e. generically "the_model_name_list").
model = Movies | [
"yifancheung13@gmail.com"
] | yifancheung13@gmail.com |
c87233b316d1cde9beb727f774ac0a744257d918 | e1ffebca6a0f185663c779462e3ca27866f557b8 | /week1/Project1/api/urls.py | 6d299dbdab8ff4ebcaebb95a5661e680c6a213b5 | [] | no_license | asselyer/Backend2019 | d8d85d7850261880fe4aeef9092b0a8c7b1b6767 | ec5931e2bd22ec62e68592a4199c00184f4dacc3 | refs/heads/master | 2020-07-24T13:38:21.246351 | 2019-12-04T03:16:27 | 2019-12-04T03:16:27 | 207,944,768 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 423 | py | from django.urls import path, include
from api import views
urlpatterns = [
path('projects/', views.ProjectList.as_view(), name='project_list'),
path('projects/<int:pk>', views.ProjectDetail.as_view(), name='project_detail'),
path('tasks/', views.TaskList.as_view(), name='task_list'),
path('tasks/<int:pk>', views.TaskDetail.as_view(), name='task_detail'),
path('block/', views.BlockList.as_view())
] | [
"asel.yer98@gmail.com"
] | asel.yer98@gmail.com |
49ffa16b8391b7ab4ce364bdc4d7a5b5e37759e8 | d73e73bad3d797bbfa82971ac44e424e7e163fe6 | /mimic_learner/comparsion_learners/cart_scratch.py | 257f6261c6c76b2d76a50064ea52f7e72e36c284 | [] | no_license | Guiliang/statistical-DRL-interpreter | 80bdd885c1d3029a48654118667c0071f3e4a8d8 | 033d79e45579fb7ddd824c8e04d245d0285741f3 | refs/heads/master | 2023-07-24T21:50:56.323738 | 2021-01-04T05:28:20 | 2021-01-04T05:28:20 | 241,288,171 | 1 | 0 | null | 2023-07-23T06:00:02 | 2020-02-18T06:22:52 | Python | UTF-8 | Python | false | false | 6,793 | py | import numpy as np
class CART(object):
def __init__(self, tree='cls', criterion='gini', prune='depth', max_depth=4, min_criterion=0.05):
self.feature = None
self.label = None
self.n_samples = None
self.gain = None
self.left = None
self.right = None
self.threshold = None
self.depth = 0
self.root = None
self.criterion = criterion
self.prune = prune
self.max_depth = max_depth
self.min_criterion = min_criterion
self.tree = tree
def fit(self, features, target):
self.root = CART()
if (self.tree == 'cls'):
self.root._grow_tree(features, target, self.criterion)
else:
self.root._grow_tree(features, target, 'mse')
self.root._prune(self.prune, self.max_depth, self.min_criterion, self.root.n_samples)
def predict(self, features):
return np.array([self.root._predict(f) for f in features])
def print_tree(self):
self.root._show_tree(0, ' ')
def _grow_tree(self, features, target, criterion='gini'):
self.n_samples = features.shape[0]
if len(np.unique(target)) == 1:
self.label = target[0]
return
best_gain = 0.0
best_feature = None
best_threshold = None
if criterion in {'gini', 'entropy'}:
self.label = max([(c, len(target[target == c])) for c in np.unique(target)], key=lambda x: x[1])[0]
else:
self.label = np.mean(target)
impurity_node = self._calc_impurity(criterion, target)
for col in range(features.shape[1]):
feature_level = np.unique(features[:, col])
thresholds = (feature_level[:-1] + feature_level[1:]) / 2.0
for threshold in thresholds:
target_l = target[features[:, col] <= threshold]
impurity_l = self._calc_impurity(criterion, target_l)
n_l = float(target_l.shape[0]) / self.n_samples
target_r = target[features[:, col] > threshold]
impurity_r = self._calc_impurity(criterion, target_r)
n_r = float(target_r.shape[0]) / self.n_samples
impurity_gain = impurity_node - (n_l * impurity_l + n_r * impurity_r)
if impurity_gain > best_gain:
best_gain = impurity_gain
best_feature = col
best_threshold = threshold
self.feature = best_feature
self.gain = best_gain
self.threshold = best_threshold
self._split_tree(features, target, criterion)
def _split_tree(self, features, target, criterion):
features_l = features[features[:, self.feature] <= self.threshold]
target_l = target[features[:, self.feature] <= self.threshold]
self.left = CART()
self.left.depth = self.depth + 1
self.left._grow_tree(features_l, target_l, criterion)
features_r = features[features[:, self.feature] > self.threshold]
target_r = target[features[:, self.feature] > self.threshold]
self.right = CART()
self.right.depth = self.depth + 1
self.right._grow_tree(features_r, target_r, criterion)
def _calc_impurity(self, criterion, target):
if criterion == 'gini':
return 1.0 - sum(
[(float(len(target[target == c])) / float(target.shape[0])) ** 2.0 for c in np.unique(target)])
elif criterion == 'mse':
return np.mean((target - np.mean(target)) ** 2.0)
else:
entropy = 0.0
for c in np.unique(target):
p = float(len(target[target == c])) / target.shape[0]
if p > 0.0:
entropy -= p * np.log2(p)
return entropy
def _prune(self, method, max_depth, min_criterion, n_samples):
if self.feature is None:
return
self.left._prune(method, max_depth, min_criterion, n_samples)
self.right._prune(method, max_depth, min_criterion, n_samples)
pruning = False
if method == 'impurity' and self.left.feature is None and self.right.feature is None:
if (self.gain * float(self.n_samples) / n_samples) < min_criterion:
pruning = True
elif method == 'depth' and self.depth >= max_depth:
pruning = True
if pruning is True:
self.left = None
self.right = None
self.feature = None
def _predict(self, d):
if self.feature != None:
if d[self.feature] <= self.threshold:
return self.left._predict(d)
else:
return self.right._predict(d)
else:
return self.label
def _show_tree(self, depth, cond):
base = ' ' * depth + cond
if self.feature != None:
print(base + 'if X[' + str(self.feature) + '] <= ' + str(self.threshold))
self.left._show_tree(depth + 1, 'then ')
self.right._show_tree(depth + 1, 'else ')
else:
print(base + '{value: ' + str(self.label) + ', samples: ' + str(self.n_samples) + '}')
from sklearn.datasets import load_iris
from sklearn.model_selection import train_test_split
from sklearn import tree as sktree
def classification_example():
print('\n\nClassification Tree')
iris = load_iris()
X, y = iris.data, iris.target
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=42)
cls = CART(tree='cls', criterion='entropy', prune='depth', max_depth=3)
cls.fit(X_train, y_train)
cls.print_tree()
pred = cls.predict(X_test)
print("This Classification Tree Prediction Accuracy: {}".format(sum(pred == y_test) / len(pred)))
clf = sktree.DecisionTreeClassifier(criterion='entropy')
clf = clf.fit(X_train, y_train)
sk_pred = clf.predict(X_test)
print("Sklearn Library Tree Prediction Accuracy: {}".format(sum(sk_pred == y_test) / len(pred)))
def regression_example():
print('\n\nRegression Tree')
rng = np.random.RandomState(1)
X = np.sort(5 * rng.rand(80, 1), axis=0)
y = np.sin(X).ravel()
y[::5] += 3 * (0.5 - rng.rand(16))
# Fit regression model
reg = CART(tree='reg', criterion='mse', prune='depth', max_depth=2)
reg.fit(X, y)
reg.print_tree()
pred = reg.predict(np.sort(5 * rng.rand(1, 1), axis=0))
print('This Regression Tree Prediction: {}'.format(pred))
sk_reg = sktree.DecisionTreeRegressor(max_depth=3)
sk_reg.fit(X, y)
sk_pred = sk_reg.predict(np.sort(5 * rng.rand(1, 1), axis=0))
print('Sklearn Library Regression Tree Prediction: {}'.format(sk_pred))
# classification_example()
regression_example()
| [
"gla68@sfu.ca"
] | gla68@sfu.ca |
c1a9d7614dee9add2fb785fc83ec7bedfbff2655 | 1419418226b6ba0f510649daaf62b71554cc2284 | /amatrice/make_gps.py | f3e7202422146a69bb69df5e38665b74a0eaa03f | [] | no_license | shineusn/mylife | 2ef48a777e39be2ef746c3dad16ea963d5b23e5e | 61dfa72d9047551746d26b7fe01fb5c2f1f0657a | refs/heads/master | 2020-03-22T13:44:42.422127 | 2018-02-13T18:09:43 | 2018-02-13T18:09:43 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,978 | py | from numpy import genfromtxt,sqrt
out='/Users/dmelgar/Amatrice2016/GPS/neu_Oct26th/'
stafile='/Users/dmelgar/Amatrice2016/GPS/gps_Oct26th.sta'
threshold=0.005 #In m
#sta=genfromtxt('/Users/dmelgar/Amatrice2016/GPS/offsets.txt',usecols=0,dtype='S')
#lonlat=genfromtxt('/Users/dmelgar/Amatrice2016/GPS/offsets.txt',usecols=[1,2])
#e=genfromtxt('/Users/dmelgar/Amatrice2016/GPS/offsets.txt',usecols=4)
#n=genfromtxt('/Users/dmelgar/Amatrice2016/GPS/offsets.txt',usecols=6)
#u=genfromtxt('/Users/dmelgar/Amatrice2016/GPS/offsets.txt',usecols=8)
#sta=genfromtxt('/Users/dmelgar/Amatrice2016/GPS/Cosismico_30Oct2016_GPS_GdL_V1.dat',usecols=0,dtype='S')
#lonlat=genfromtxt('/Users/dmelgar/Amatrice2016/GPS/Cosismico_30Oct2016_GPS_GdL_V1.dat',usecols=[1,2])
#e=genfromtxt('/Users/dmelgar/Amatrice2016/GPS/Cosismico_30Oct2016_GPS_GdL_V1.dat',usecols=4)
#n=genfromtxt('/Users/dmelgar/Amatrice2016/GPS/Cosismico_30Oct2016_GPS_GdL_V1.dat',usecols=6)
#u=genfromtxt('/Users/dmelgar/Amatrice2016/GPS/Cosismico_30Oct2016_GPS_GdL_V1.dat',usecols=8)
sta=genfromtxt('/Users/dmelgar/Amatrice2016/GPS/Cosismico_26Oct2016_GPS_GdL_V1.dat',usecols=0,dtype='S')
lonlat=genfromtxt('/Users/dmelgar/Amatrice2016/GPS/Cosismico_26Oct2016_GPS_GdL_V1.dat',usecols=[1,2])
e=genfromtxt('/Users/dmelgar/Amatrice2016/GPS/Cosismico_26Oct2016_GPS_GdL_V1.dat',usecols=4)
n=genfromtxt('/Users/dmelgar/Amatrice2016/GPS/Cosismico_26Oct2016_GPS_GdL_V1.dat',usecols=6)
u=genfromtxt('/Users/dmelgar/Amatrice2016/GPS/Cosismico_26Oct2016_GPS_GdL_V1.dat',usecols=8)
#Make station file
f=open(stafile,'w')
for k in range(len(sta)):
line='%s\t%.4f\t%.4f\n' %(sta[k],lonlat[k,0],lonlat[k,1])
f.write(line)
f.close()
#Make neu files
for k in range(len(sta)):
offset=sqrt((n[k]/1000.)**2+(e[k]/1000.)**2)
if offset>=threshold:
f=open(out+sta[k]+'.neu','w')
f.write('%.6f\n' % (n[k]/1000.))
f.write('%.6f\n' % (e[k]/1000.))
f.write('%.6f' % (u[k]/1000.))
f.close()
| [
"dmelgar@berkeley.edu"
] | dmelgar@berkeley.edu |
1463098d36cf7bdefe89981ed0bf1c123c701674 | ca7aa979e7059467e158830b76673f5b77a0f5a3 | /Python_codes/p03569/s302751603.py | 4c25973ce80b184d1269be33c4a741981577c714 | [] | no_license | Aasthaengg/IBMdataset | 7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901 | f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8 | refs/heads/main | 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 598 | py | import sys
from collections import Counter
s = input()
n = len(s)
ns = []
no = []
for i in range(n):
if s[i] != "x":
ns.append(s[i])
no.append(i)
no = [-1] + no + [n]
m = len(no)
sa = []
for i in range(m-1):
sa.append(no[i+1] - no[i] - 1)
if ns != ns[::-1]:
print(-1)
sys.exit()
ans = 0
if m%2 == 1:
mm = m//2
te = no[mm]
ans = 0
for i in range(mm):
ans += abs(sa[i]-sa[m-2-i])
else:
mm = m//2
te = no[mm]
ans = 0
for i in range(mm-1):
ans += abs(sa[i]-sa[m-2-i])
print(ans) | [
"66529651+Aastha2104@users.noreply.github.com"
] | 66529651+Aastha2104@users.noreply.github.com |
0a1bbb4c953fe1a11da5f00cdc77fb7901b5af27 | 75452de12ec9eea346e3b9c7789ac0abf3eb1d73 | /build/zircon/populate_zircon_public.py | 7c7d07c57d69742b00f01e9cb616b92c007143f3 | [
"BSD-3-Clause"
] | permissive | oshunter/fuchsia | c9285cc8c14be067b80246e701434bbef4d606d1 | 2196fc8c176d01969466b97bba3f31ec55f7767b | refs/heads/master | 2022-12-22T11:30:15.486382 | 2020-08-16T03:41:23 | 2020-08-16T03:41:23 | 287,920,017 | 2 | 2 | BSD-3-Clause | 2022-12-16T03:30:27 | 2020-08-16T10:18:30 | C++ | UTF-8 | Python | false | false | 4,949 | py | #!/usr/bin/env python3.8
# Copyright 2019 The Fuchsia Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import json
import os
import shutil
import sys
SCRIPT_DIR = os.path.dirname(os.path.abspath(__file__))
FUCHSIA_ROOT = os.path.dirname( # $root
os.path.dirname( # build
SCRIPT_DIR)) # zircon
ZIRCON_PUBLIC = os.path.join(FUCHSIA_ROOT, 'zircon', 'public')
EXPORT_TEMPLATE_FILE = os.path.join(SCRIPT_DIR, 'lib_template.gn')
TOOL_TEMPLATE_FILE = os.path.join(SCRIPT_DIR, 'tool_template.gn')
UNIFICATION_DIR = os.path.join(FUCHSIA_ROOT, 'build', 'unification')
MAPPINGS_FILE = os.path.join(UNIFICATION_DIR, 'zircon_library_mappings.json')
FORWARD_TEMPLATE_FILE = os.path.join(UNIFICATION_DIR,
'zircon_library_forward.gn')
DIRS = {
'lib': True,
'tool': False,
}
PUBLIC_DIRS = set(DIRS.keys())
MARKER = 'ONLY EDIT IT BY THAT NAME!'
def is_template(build_file):
with open(build_file, 'r') as file:
return MARKER in file.read()
def has_sources(top_dir):
return DIRS[top_dir]
def main():
with open(sys.argv[1]) as f:
legacy_dirs = json.load(f)
with open(MAPPINGS_FILE, 'r') as f:
content = json.load(f)
mapped_lib_dirs = dict([('lib/' + i['name'], i['label'])
for i in content])
# Verify that we're not trying to create a forwarding target and an exported
# library under the same alias.
common_dirs = set(mapped_lib_dirs) & set(legacy_dirs)
if common_dirs:
print('The following paths cannot be both exports from Zircon and '
'forwarding targets:')
for dir in common_dirs:
print('//zircon/public/' + dir)
return 1
# Create a data structure holding all generated paths.
all_dirs = {}
for dir in legacy_dirs:
top_dir = os.path.dirname(dir)
if top_dir == 'tool':
all_dirs[dir] = TOOL_TEMPLATE_FILE
else:
all_dirs[dir] = EXPORT_TEMPLATE_FILE
for dir in mapped_lib_dirs:
all_dirs[dir] = FORWARD_TEMPLATE_FILE
dirs = {}
for dir, template in all_dirs.items():
top_dir = os.path.dirname(dir)
name = os.path.basename(dir)
subdirs, templates = dirs.setdefault(top_dir, ([], {}))
templates[name] = template
dirs[top_dir] = (subdirs + [name], templates)
assert set(dirs.keys()).issubset(PUBLIC_DIRS), (
"%r from JSON should be a subset of %r" %
(set(dirs.keys()), PUBLIC_DIRS))
stats = dict([(f, os.lstat(f))
for f in [EXPORT_TEMPLATE_FILE, FORWARD_TEMPLATE_FILE,
TOOL_TEMPLATE_FILE]])
for top_dir in dirs:
subdirs, templates = dirs[top_dir]
top_dir_name = top_dir
top_dir = os.path.join(ZIRCON_PUBLIC, top_dir)
subdirs = set(subdirs)
if not os.path.exists(top_dir):
os.mkdir(top_dir)
else:
# Go over the existing contents of the directory.
for existing in os.listdir(top_dir):
existing_dir = os.path.join(top_dir, existing)
if not os.path.isdir(existing_dir):
# Disregard files (e.g. .gitignore).
continue
build_file = os.path.join(existing_dir, 'BUILD.gn')
is_source = (has_sources(top_dir_name) and
os.path.exists(build_file) and
not is_template(build_file))
if existing in subdirs:
if is_source:
print('%s cannot be both a source and generated' %
existing_dir)
return 1
# An existing directory might already have the link.
# If the link doesn't exist or doesn't match, make it.
template = templates[existing]
if not os.path.exists(build_file):
os.link(template, build_file)
elif not os.path.samestat(os.lstat(build_file),
stats[template]):
os.remove(build_file)
os.link(template, build_file)
subdirs.remove(existing)
else:
if not is_source:
# A stale directory that shouldn't exist any more.
shutil.rmtree(existing_dir)
# Make and populate any directories that don't exist yet.
for subdir in subdirs:
template = templates[subdir]
subdir = os.path.join(top_dir, subdir)
os.mkdir(subdir)
os.link(template, os.path.join(subdir, 'BUILD.gn'))
return 0
if __name__ == '__main__':
sys.exit(main())
| [
"commit-bot@chromium.org"
] | commit-bot@chromium.org |
de63cc8f0fc0605459e386f4e4681be42967ca58 | 93f47ba04fc18c4e537f0a48fe6232e2a89a4d30 | /examples/adspygoogle/adwords/v201406/campaign_management/add_experiment.py | 0016c83287c0eebaeadcbcc187dafc3aba38b067 | [
"Apache-2.0"
] | permissive | jasonshih/googleads-python-legacy-lib | c56dc52a1dab28b9de461fd5db0fcd6020b84a04 | 510fad41ecf986fe15258af64b90f99a96dc5548 | refs/heads/master | 2021-04-30T22:12:12.900275 | 2015-03-06T15:35:21 | 2015-03-06T15:35:21 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,712 | py | #!/usr/bin/python
#
# Copyright 2013 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""This example creates an experiment using a query percentage of 10, which
defines what fraction of auctions should go to the control split (90%) vs. the
experiment split (10%), then adds experimental bid changes for an ad group, and
adds an experiment-only keyword. To get campaigns, run get_campaigns.py. To
get ad groups, run get_ad_groups.py. To get keywords, run get_keywords.py.
Tags: ExperimentService.mutate
Api: AdWordsOnly
"""
__author__ = 'api.kwinter@gmail.com (Kevin Winter)'
import datetime
import os
import sys
sys.path.insert(0, os.path.join('..', '..', '..', '..', '..'))
# Import appropriate classes from the client library.
from adspygoogle import AdWordsClient
from adspygoogle.common import Utils
campaign_id = 'INSERT_CAMPAIGN_ID_HERE'
ad_group_id = 'INSERT_AD_GROUP_ID_HERE'
def main(client, campaign_id, ad_group_id):
# Initialize appropriate service.
experiment_service = client.GetExperimentService(version='v201406')
ad_group_service = client.GetAdGroupService(version='v201406')
ad_group_criterion_service = client.GetAdGroupCriterionService(
version='v201406')
# Construct operations and add experiment.
tomorrow = datetime.datetime.now() + datetime.timedelta(1)
thirty_days = datetime.datetime.now() + datetime.timedelta(30)
operations = [{
'operator': 'ADD',
'operand': {
'campaignId': campaign_id,
'name': 'Interplanetary Experiment #%s' % Utils.GetUniqueName(),
'queryPercentage': '10',
'startDateTime': tomorrow.strftime('%Y%m%d %H%M%S'),
# Optional fields.
'status': 'ENABLED',
'endDateTime': thirty_days.strftime('%Y%m%d %H%M%S')
}
}]
result = experiment_service.Mutate(operations)[0]
# Display results.
for experiment in result['value']:
print ('Experiment with name \'%s\' and id \'%s\' was added.'
% (experiment['name'], experiment['id']))
# Construct operations and update ad group.
operations = [{
'operator': 'SET',
'operand': {
'id': ad_group_id,
'experimentData': {
'xsi_type': 'AdGroupExperimentData',
'experimentId': experiment['id'],
'experimentDeltaStatus': 'MODIFIED',
'experimentBidMultipliers': {
'xsi_type': 'ManualCPCAdGroupExperimentBidMultipliers',
'maxCpcMultiplier': {
'multiplier': '0.5'
}
}
}
}
}]
result = ad_group_service.Mutate(operations)[0]
# Display results.
for ad_group in result['value']:
print ('Ad group with name \'%s\' and id \'%s\' was updated in the '
'experiment.' % (ad_group['name'], ad_group['id']))
# Construct operations and add ad group crierion.
operations = [{
'operator': 'ADD',
'operand': {
'xsi_type': 'BiddableAdGroupCriterion',
'adGroupId': ad_group['id'],
'criterion': {
'xsi_type': 'Keyword',
'matchType': 'BROAD',
'text': 'mars cruise'
},
'experimentData': {
'xsi_type': 'BiddableAdGroupCriterionExperimentData',
'experimentId': experiment['id'],
'experimentDeltaStatus': 'EXPERIMENT_ONLY'
}
}
}]
result = ad_group_criterion_service.Mutate(operations)[0]
# Display results.
for criterion in result['value']:
print ('Ad group criterion with ad group id \'%s\' and criterion '
'id \'%s\' was added to the experiment.'
% (criterion['adGroupId'], criterion['criterion']['id']))
print
print ('Usage: %s units, %s operations' % (client.GetUnits(),
client.GetOperations()))
if __name__ == '__main__':
# Initialize client object.
client = AdWordsClient(path=os.path.join('..', '..', '..', '..', '..'))
main(client, campaign_id, ad_group_id)
| [
"msaniscalchi@google.com"
] | msaniscalchi@google.com |
19c43377525c88e3507724542a581786aad55373 | 6188f8ef474da80c9e407e8040de877273f6ce20 | /python_modules/libraries/dagster-fivetran/dagster_fivetran_tests/test_asset_defs.py | b7b03f9b2586e8537bfca4c70fd41ec34123525b | [
"Apache-2.0"
] | permissive | iKintosh/dagster | 99f2a1211de1f3b52f8bcf895dafaf832b999de2 | 932a5ba35263deb7d223750f211c2ddfa71e6f48 | refs/heads/master | 2023-01-24T15:58:28.497042 | 2023-01-20T21:51:35 | 2023-01-20T21:51:35 | 276,410,978 | 1 | 0 | Apache-2.0 | 2020-07-01T15:19:47 | 2020-07-01T15:13:56 | null | UTF-8 | Python | false | false | 5,327 | py | import pytest
import responses
from dagster import AssetKey, DagsterStepOutputNotFoundError
from dagster._legacy import build_assets_job
from dagster_fivetran import fivetran_resource
from dagster_fivetran.asset_defs import build_fivetran_assets
from dagster_fivetran.resources import (
FIVETRAN_API_BASE,
FIVETRAN_API_VERSION_PATH,
FIVETRAN_CONNECTOR_PATH,
)
from .utils import (
DEFAULT_CONNECTOR_ID,
get_sample_connector_response,
get_sample_connector_schema_config,
get_sample_sync_response,
get_sample_update_response,
)
def test_fivetran_asset_keys():
ft_assets = build_fivetran_assets(
connector_id=DEFAULT_CONNECTOR_ID, destination_tables=["x.foo", "y.bar"]
)
assert ft_assets[0].keys == {AssetKey(["x", "foo"]), AssetKey(["y", "bar"])}
@pytest.mark.parametrize(
"group_name,expected_group_name",
[
(None, "default"),
("my_group_name", "my_group_name"),
],
)
def test_fivetran_group_label(group_name, expected_group_name):
ft_assets = build_fivetran_assets(
connector_id=DEFAULT_CONNECTOR_ID,
destination_tables=["x.foo", "y.bar"],
group_name=group_name,
)
group_names = set(ft_assets[0].group_names_by_key.values())
assert len(group_names) == 1
assert list(group_names)[0] == expected_group_name
@pytest.mark.parametrize("schema_prefix", ["", "the_prefix"])
@pytest.mark.parametrize(
"tables,should_error",
[
([], False),
(["schema1.tracked"], False),
(["schema1.tracked", "schema2.tracked"], False),
(["does.not_exist"], True),
(["schema1.tracked", "does.not_exist"], True),
],
)
def test_fivetran_asset_run(tables, should_error, schema_prefix):
ft_resource = fivetran_resource.configured({"api_key": "foo", "api_secret": "bar"})
final_data = {"succeeded_at": "2021-01-01T02:00:00.0Z"}
api_prefix = f"{FIVETRAN_API_BASE}/{FIVETRAN_API_VERSION_PATH}{FIVETRAN_CONNECTOR_PATH}{DEFAULT_CONNECTOR_ID}"
if schema_prefix:
tables = [f"{schema_prefix}_{t}" for t in tables]
fivetran_assets = build_fivetran_assets(
connector_id=DEFAULT_CONNECTOR_ID,
destination_tables=tables,
poll_interval=0.1,
poll_timeout=10,
)
# expect the multi asset to have one asset key and one output for each specified asset key
assert fivetran_assets[0].keys == {AssetKey(table.split(".")) for table in tables}
assert len(fivetran_assets[0].op.output_defs) == len(tables)
fivetran_assets_job = build_assets_job(
name="fivetran_assets_job",
assets=fivetran_assets,
resource_defs={"fivetran": ft_resource},
)
with responses.RequestsMock() as rsps:
rsps.add(rsps.PATCH, api_prefix, json=get_sample_update_response())
rsps.add(rsps.POST, f"{api_prefix}/force", json=get_sample_sync_response())
# connector schema
rsps.add(
rsps.GET,
f"{api_prefix}/schemas",
json=get_sample_connector_schema_config(
tables=[
("schema1", "tracked"),
("schema1", "untracked"),
("schema2", "tracked"),
]
),
)
# initial state
rsps.add(
rsps.GET,
api_prefix,
json=get_sample_connector_response(),
)
final_json = get_sample_connector_response(data=final_data)
if schema_prefix:
final_json["data"]["config"]["schema_prefix"] = schema_prefix
# final state will be updated
rsps.add(rsps.GET, api_prefix, json=final_json)
if should_error:
with pytest.raises(DagsterStepOutputNotFoundError):
fivetran_assets_job.execute_in_process()
else:
result = fivetran_assets_job.execute_in_process()
assert result.success
# make sure we only have outputs for the explicit asset keys
outputs = [
event
for event in result.events_for_node(f"fivetran_sync_{DEFAULT_CONNECTOR_ID}")
if event.event_type_value == "STEP_OUTPUT"
]
assert len(outputs) == len(tables)
# make sure we have asset materializations for all the schemas/tables that were actually sync'd
asset_materializations = [
event
for event in result.events_for_node(f"fivetran_sync_{DEFAULT_CONNECTOR_ID}")
if event.event_type_value == "ASSET_MATERIALIZATION"
]
assert len(asset_materializations) == 3
found_asset_keys = set(
mat.event_specific_data.materialization.asset_key for mat in asset_materializations
)
if schema_prefix:
assert found_asset_keys == {
AssetKey(["the_prefix_schema1", "tracked"]),
AssetKey(["the_prefix_schema1", "untracked"]),
AssetKey(["the_prefix_schema2", "tracked"]),
}
else:
assert found_asset_keys == {
AssetKey(["schema1", "tracked"]),
AssetKey(["schema1", "untracked"]),
AssetKey(["schema2", "tracked"]),
}
| [
"noreply@github.com"
] | iKintosh.noreply@github.com |
67dbe2876b4fdc551b2fc9988b78fa0932c92363 | 8067ca3d96d00080df5d54aa5bc2ec33b7fc3035 | /Hexagonal Grid.py | bae5ce1e331b29f864651624619e26579cdf6bc1 | [
"Apache-2.0"
] | permissive | Ashutosh-gupt/HackerRankAlgorithms | 9620bd12c66a9f26f08949a292b3baad79387227 | 439bf2e31fd395d19d40f79e969153e50e5358b5 | refs/heads/master | 2022-12-24T18:56:53.263797 | 2016-10-05T03:14:07 | 2016-10-05T03:14:07 | 376,810,303 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,112 | py | # -*- coding: utf-8 -*-
"""
You are given a hexagonal grid of size 2xN. Your task is to construct the grid with 2x1 dominoes. The dominoes can be
arranged in any of the three orientations shown below. To add to the woes, certain cells of the hexogonal grid are
blackened i.e., no domino can occupy that cell. Can you construct such a hexagonal grid?
"""
__author__ = 'Danyang'
class Solution(object):
def __init__(self):
self.delta = [(0, 1), (1, 0), (1, -1)] # dominoes delta, coordinate: x downward, y rightward,
# need consistent directions
def solve(self, cipher):
"""
recursive solution, brute force, starting from top left
:param cipher: the cipher
"""
ret = self.rec(cipher)
if ret:
return "YES"
else:
return "NO"
def rec(self, grid):
changed = False
m = len(grid)
n = len(grid[0])
for i in xrange(m):
for j in xrange(n):
if not changed: # control the start from top, left
if grid[i][j] == 0:
changed = True
grid[i][j] = 1
for d in self.delta:
i2 = i + d[0]
j2 = j + d[1]
if 0 <= i2 < m and 0 <= j2 < n and grid[i2][j2] == 0:
grid[i2][j2] = 1
if self.rec(grid):
return True
grid[i2][j2] = 0
grid[i][j] = 0
if not changed:
return True
if __name__ == "__main__":
import sys
f = open("1.in", "r")
# f = sys.stdin
solution = Solution()
testcases = int(f.readline().strip())
for t in xrange(testcases):
# construct cipher
int(f.readline().strip())
cipher = []
for _ in xrange(2):
cipher.append(map(int, list(f.readline().strip())))
# solve
s = "%s\n" % (solution.solve(cipher))
print s,
| [
"zhangdanyangg@gmail.com"
] | zhangdanyangg@gmail.com |
059e84c2f1ff6af24c13aa2c403890209360ddbc | 6fa7f99d3d3d9b177ef01ebf9a9da4982813b7d4 | /WLKF79mxKnhjtrFRB_13.py | 19738ce84620bcbc808363c11292586b2032ab08 | [] | no_license | daniel-reich/ubiquitous-fiesta | 26e80f0082f8589e51d359ce7953117a3da7d38c | 9af2700dbe59284f5697e612491499841a6c126f | refs/heads/master | 2023-04-05T06:40:37.328213 | 2021-04-06T20:17:44 | 2021-04-06T20:17:44 | 355,318,759 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 181 | py |
def is_good_match(lst):
if len(lst) % 2 != 0:
return "bad match"
lst1 = [x for x in lst[0::2]]
lst2 = [x for x in lst[1::2]]
return [x + y for x,y in zip(lst1, lst2)]
| [
"daniel.reich@danielreichs-MacBook-Pro.local"
] | daniel.reich@danielreichs-MacBook-Pro.local |
065966e1fbabe141ee422c8cc29b6acefaf67a49 | 3545c3a5ede04aeb229c3da9792f1430959bbb0e | /BLOGGER/users/forms.py | f0c26fca518965f8952077f9858abb3d3c76ff61 | [] | no_license | Gourav2000/DJ3 | 6050315e4a65501b3f59617ad8bf174fbdaa8074 | bab01fa5fda0f8c274ed9e75d32306ff8d087355 | refs/heads/master | 2022-12-01T02:47:06.222790 | 2020-07-13T09:00:56 | 2020-07-13T09:00:56 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 306 | py | from django import forms
from django.contrib.auth.models import User
from django.contrib.auth.forms import UserCreationForm
class UserRegisterForm(UserCreationForm):
email=forms.EmailField();
class Meta:
model=User
fields=["username","email","password1",'password2'];
| [
"parajbhattacharjee123@gmail.com"
] | parajbhattacharjee123@gmail.com |
fef662ef6fd908b5b68e87d622023d04aa854e13 | 13f55c8fc102c64a8924d83579aeb0bd563daeb9 | /src/aria2p/cli/commands/add_magnet.py | d5e2d71da5711321591a2f8000b059a88e037584 | [
"ISC"
] | permissive | wqcsim/aria2p | e67b8b7d5c5afffc22d3728d3f8c89e5b24bfc29 | 6cdc9a1ef5ed0413fffa3be4885f4b5325177660 | refs/heads/master | 2023-07-26T20:02:05.134854 | 2021-08-26T20:47:20 | 2021-08-26T20:47:20 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,058 | py | """Command to add magnets."""
import sys
from typing import List
from aria2p.api import API
from aria2p.utils import read_lines
def add_magnets(
api: API,
uris: List[str] = None,
from_file: str = None,
options: dict = None,
position: int = None,
) -> int:
"""
Add magnet subcommand.
Arguments:
api: The API instance to use.
uris: The URIs of the magnets.
from_file: Path to the file to read uris from.
options: String of aria2c options to add to download.
position: Position to add new download in the queue.
Returns:
int: Always 0.
"""
ok = True
if not uris:
uris = []
if from_file:
try:
uris.extend(read_lines(from_file))
except OSError:
print(f"Cannot open file: {from_file}", file=sys.stderr)
ok = False
for uri in uris:
new_download = api.add_magnet(uri, options=options, position=position)
print(f"Created download {new_download.gid}")
return 0 if ok else 1
| [
"pawamoy@pm.me"
] | pawamoy@pm.me |
9f8044eff2a26ebffb2ef7e386df954dd5e218e3 | d0a2ff39d48dbcf6b019c7c7530bcda1a398e2be | /python_for_data_analysis/Chapter_2/chapter2.py | a60592d6087c79b204769f0779d3261e7e4605b2 | [] | no_license | foxcodenine/books | 2711fd8be596bc7fcbd4c00d6a0573acb29dee3f | 100624b41484f853ab3e850fb33d99d0dd65d4f3 | refs/heads/master | 2023-03-08T05:54:18.270243 | 2022-06-21T19:22:40 | 2022-06-21T19:22:40 | 205,091,163 | 0 | 0 | null | 2023-03-05T12:57:12 | 2019-08-29T05:57:41 | JavaScript | UTF-8 | Python | false | false | 1,689 | py | # IPYTHON $ JUPYTER NOTEBOOK
# from ipython shell you can do #run to run it:
# %run hello_world.py
# ______________________________________________________________________
# ipython use pretty-print.
from numpy.random import randn
data = {i: randn() for i in range(7)}
# ______________________________________________________________________
# to open jupyter notebook enter j.. n.. while in conda base env.
# $ jupyter notebook
# ______________________________________________________________________
# Tab Completion
_secret_key = 'xjfjhsdbfjvhbsdjbfv'
# _<Tab> <- auto comlete variables
# _secret_ket.<Tab> <- auto comlete function
# path = '/home/foxcodenine/git/'<Tab> <- auto comlete path
# %run ch<Tab> <- combined with %run
# ______________________________________________________________________
# Introspection (using ?)
b = [1, 2, 3]
# b?
# print?
# ------------------------------
def add_numbers(a, b):
'''
Add two numbers together.
Returns
_______
the sum : tye of arguments
'''
return a + b
# add_numbers? <- shows docstring
# add_numbers?? <- shows docstring & source code
# ------------------------------
# also combined with * it will show all names matching the wildcard:
# >> import numpy as np
# >> np.*load*?
# np.__loader__
# np.load
# np.loads
# np.loadtxt
# ______________________________________________________________________
# The %run Command
# %run script_test.py
# %run -i script_test.py <-scripy file can assess variables aleady defined in ipython
print(a) | [
"foxcode9@gmail.com"
] | foxcode9@gmail.com |
d3911e4c9f0cb924b32844dc531ca096d2def61c | 06c2bc496f9e285f06e4c3c71f14d5716f411d89 | /source/webapp/migrations/0007_auto_20210504_1243.py | 10b149d9e4237d16899b45386bb911ca394c7078 | [] | no_license | Beknasar/Coin_collection | 37a9e77cc00270dfcb9d0cb5916f985cec4c591d | 091860f98e7dc81d460ab0cbcb6ca1d7fdeffda8 | refs/heads/master | 2023-06-09T16:25:30.473134 | 2021-06-25T09:31:13 | 2021-06-25T09:31:13 | 365,229,399 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 700 | py | # Generated by Django 2.2 on 2021-05-04 12:43
import django.core.validators
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('webapp', '0006_auto_20210503_1327'),
]
operations = [
migrations.AlterField(
model_name='coin',
name='size',
field=models.FloatField(validators=[django.core.validators.MinValueValidator(0)], verbose_name='Размер'),
),
migrations.AlterField(
model_name='coin',
name='weight',
field=models.FloatField(validators=[django.core.validators.MinValueValidator(0)], verbose_name='Вес'),
),
]
| [
"680633@gmail.com"
] | 680633@gmail.com |
2726900ca710ad9b236b6180dcd2909b84e4d9e7 | 2b2af3a4924f74d0be10370f25121c015f37aba0 | /EVLA_pipe_statwt.py | 0def7573ebcbc6c607a05ab4529f67a8883704f1 | [] | no_license | tomr-stargazer/VLA_pipeline_custom | 98ba3cec311ccc8fa37e0d3424a3c97da1816669 | cf5720588ded8c7dd88cf5ecda4df82824183078 | refs/heads/master | 2020-04-02T08:01:25.574938 | 2018-10-22T23:54:22 | 2018-10-22T23:54:22 | 154,225,698 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,334 | py | ######################################################################
#
# Copyright (C) 2013
# Associated Universities, Inc. Washington DC, USA,
#
# This library is free software; you can redistribute it and/or modify it
# under the terms of the GNU Library General Public License as published by
# the Free Software Foundation; either version 2 of the License, or (at your
# option) any later version.
#
# This library is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
# FITNESS FOR A PARTICULAR PURPOSE. See the GNU Library General Public
# License for more details.
#
# You should have received a copy of the GNU Library General Public License
# along with this library; if not, write to the Free Software Foundation,
# Inc., 675 Massachusetts Ave, Cambridge, MA 02139, USA.
#
# Correspondence concerning VLA Pipelines should be addressed as follows:
# Please register and submit helpdesk tickets via: https://help.nrao.edu
# Postal address:
# National Radio Astronomy Observatory
# VLA Pipeline Support Office
# PO Box O
# Socorro, NM, USA
#
######################################################################
# CALCULATE DATA WEIGHTS BASED ON ST. DEV. WITHIN EACH SPW
# use statwt
logprint ("Starting EVLA_pipe_statwt.py", logfileout='logs/statwt.log')
time_list=runtiming('checkflag', 'start')
QA2_statwt='Pass'
logprint ("Calculate data weights per spw using statwt", logfileout='logs/statwt.log')
# Run on all calibrators
default(statwt)
vis=ms_active
dorms=False
fitspw=''
fitcorr=''
combine=''
minsamp=2
field=''
spw='7~38'
intent='*CALIBRATE*'
datacolumn='corrected'
statwt()
# Run on all targets
# set spw to exclude strong science spectral lines
default(statwt)
vis=ms_active
dorms=False
fitspw=''
fitcorr=''
combine=''
minsamp=2
field=''
spw='7~38'
intent='*TARGET*'
datacolumn='corrected'
statwt()
# Until we understand better the failure modes of this task, leave QA2
# score set to "Pass".
logprint ("QA2 score: "+QA2_statwt, logfileout='logs/statwt.log')
logprint ("Finished EVLA_pipe_statwt.py", logfileout='logs/statwt.log')
time_list=runtiming('targetflag', 'end')
pipeline_save()
######################################################################
| [
"t.rice90@gmail.com"
] | t.rice90@gmail.com |
5f80c5d519dbfcbcd70511459f0737348f4fd5b2 | 1566f14c336e67c77001b620df55f68f14b4e2c5 | /tests/bench.py | aeb901e3a963ea48ce486f3ec163f0a3bbe2d442 | [
"BSD-3-Clause"
] | permissive | wolfmetr/django-cacheops | 9645088c2f20f12aad955fc5ec7aaa2742ab4f41 | ce56df88f341c3a4c22a58d0cd0557e92838d89a | refs/heads/master | 2021-01-18T08:46:22.312893 | 2013-12-03T12:58:06 | 2013-12-03T12:58:06 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,379 | py | from cacheops import invalidate_obj
from cacheops.conf import redis_client
from .models import Category, Post
count_key = Category.objects.all()._cache_key(extra='count')
def invalidate_count():
redis_client.delete(count_key)
def do_count():
Category.objects.cache().count()
def do_count_no_cache():
Category.objects.nocache().count()
fetch_key = Category.objects.all()._cache_key()
def invalidate_fetch():
redis_client.delete(fetch_key)
def do_fetch():
list(Category.objects.cache().all())
def do_fetch_no_cache():
list(Category.objects.nocache().all())
def do_fetch_construct():
Category.objects.all()
def prepare_obj():
return Category.objects.cache().get(pk=1)
def do_invalidate_obj(obj):
invalidate_obj(obj)
def do_save_obj(obj):
obj.save()
TESTS = [
('count_no_cache', {'run': do_count_no_cache}),
('count_hit', {'prepare_once': do_count, 'run': do_count}),
('count_miss', {'prepare': invalidate_count, 'run': do_count}),
('fetch_construct', {'run': do_fetch_construct}),
('fetch_no_cache', {'run': do_fetch_no_cache}),
('fetch_hit', {'prepare_once': do_fetch, 'run': do_fetch}),
('fetch_miss', {'prepare': invalidate_fetch, 'run': do_fetch}),
('invalidate_obj', {'prepare': prepare_obj, 'run': do_invalidate_obj}),
('save_obj', {'prepare': prepare_obj, 'run': do_save_obj}),
]
| [
"suor.web@gmail.com"
] | suor.web@gmail.com |
065fe50031eb0f2ee2bd0f6c17af9744ac523539 | f3b233e5053e28fa95c549017bd75a30456eb50c | /ptp1b_input/L82/82-bs_wat_20Abox/set_1ns_equi.py | 28657cb7707bdfb592da52d7aa486e0d2ea88321 | [] | no_license | AnguseZhang/Input_TI | ddf2ed40ff1c0aa24eea3275b83d4d405b50b820 | 50ada0833890be9e261c967d00948f998313cb60 | refs/heads/master | 2021-05-25T15:02:38.858785 | 2020-02-18T16:57:04 | 2020-02-18T16:57:04 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 916 | py | import os
dir = '/mnt/scratch/songlin3/run/ptp1b/L82/wat_20Abox/ti_one-step/82_bs/'
filesdir = dir + 'files/'
temp_equiin = filesdir + 'temp_equi.in'
temp_pbs = filesdir + 'temp_1ns_equi.pbs'
lambd = [ 0.00922, 0.04794, 0.11505, 0.20634, 0.31608, 0.43738, 0.56262, 0.68392, 0.79366, 0.88495, 0.95206, 0.99078]
for j in lambd:
os.system("rm -r %6.5f" %(j))
os.system("mkdir %6.5f" %(j))
os.chdir("%6.5f" %(j))
os.system("rm *")
workdir = dir + "%6.5f" %(j) + '/'
#equiin
eqin = workdir + "%6.5f_equi.in" %(j)
os.system("cp %s %s" %(temp_equiin, eqin))
os.system("sed -i 's/XXX/%6.5f/g' %s" %(j, eqin))
#PBS
pbs = workdir + "%6.5f_1ns_equi.pbs" %(j)
os.system("cp %s %s" %(temp_pbs, pbs))
os.system("sed -i 's/XXX/%6.5f/g' %s" %(j, pbs))
#top
os.system("cp ../82-bs_merged.prmtop .")
os.system("cp ../0.5_equi_0.rst .")
#submit pbs
os.system("qsub %s" %(pbs))
os.chdir(dir)
| [
"songlin3@msu.edu"
] | songlin3@msu.edu |
f41f0aeac0150a5016682fef4713dad35084986b | 0b40458397355319e74f421b5903b6bdbdb5ee9c | /accounts/migrations/0007_remove_userprofile_friends.py | 1acf280c35ce724833ff78fa83c547be53db7390 | [] | no_license | OllyDorvelus/visumic | bae61c7768ed1fa0b76134dbd715e2f1ece3143d | 884a7c89bd562ef7e2e33a01a3239a48b038ac40 | refs/heads/master | 2022-12-09T10:02:06.921966 | 2018-03-01T03:12:43 | 2018-03-01T03:12:43 | 122,236,678 | 1 | 0 | null | 2022-12-08T00:39:38 | 2018-02-20T18:17:26 | HTML | UTF-8 | Python | false | false | 398 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.11 on 2017-04-29 23:16
from __future__ import unicode_literals
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('accounts', '0006_auto_20170401_2124'),
]
operations = [
migrations.RemoveField(
model_name='userprofile',
name='friends',
),
]
| [
"ollydorvelus@gmail.com"
] | ollydorvelus@gmail.com |
968ecbcdd00c75509c462e5effc5495acb927ec4 | e41b0bb4f8f835082f8c559101b94dc5f64976ae | /exp/exp34.py | ce1607b45bd2258286f1bebbbc233ab9b7fc5b4e | [] | no_license | voytekresearch/pacological | 5e6f5aba0ede883594863a56b4702f907d458a90 | 306f953f456e87298322065308ad4e2fbbe6d7f7 | refs/heads/master | 2021-01-21T15:34:35.003245 | 2018-12-03T01:00:26 | 2018-12-03T01:00:26 | 37,220,244 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,989 | py | #! /usr/bin/env python
# -*- coding: utf-8 -*-
"""PAC as selective amplification and information transmission."""
import numpy as np
import matplotlib.pyplot as plt; plt.ion()
if __name__ == "__main__":
from pacological.exp.exp6 import run
import sys
import pandas as pd
import os
from itertools import product
from collections import defaultdict
path = sys.argv[1]
# -- USER SETTINGS --------------------------------------------------------
n = 1000
t = 5
dt = 0.001
f = 10
Sstim = .05
# This ratio of k to excitability gives mean rates
# equivilant to Poisson
k_base = 1
excitability_base = 0.0001
bin_multipliers = range(2, 32, 2)
# Drives and iteration counter
Ioscs = [5, 30]
Istims = [5, 30]
iterations = range(200)
params = product(Ioscs, Istims, bin_multipliers)
for Iosc, Istim, b_mult in params:
# Create basename for the data
basename = "Iosc-{0}_Istim-{1}_k{2}".format(
Iosc, Istim, b_mult * k_base)
print(basename)
basepath = os.path.join(path, basename)
# Tmp dicts for each param set
d_H = defaultdict(list)
d_MI = defaultdict(list)
d_PAC = defaultdict(list)
d_rate = defaultdict(list)
# -- Run
k = k_base * b_mult
excitability = excitability_base / b_mult
for i in iterations:
print(i)
res = run(n, t, Iosc, f, Istim, Sstim * Istim, dt, k, excitability)
# Process the result
hys = {}
for b in res['H'].keys():
hys[b] = res['H'][b]['HY']
for b in hys.keys():
d_H[b].append(hys[b])
for b in res['MI'].keys():
d_MI[b].append(res['MI'][b])
for b in res['PAC'].keys():
d_PAC[b].append(res['PAC'][b])
for b in res['spikes'].keys():
mrate = np.mean(res['spikes'][b].sum(0) / float(t))
d_rate[b].append(mrate)
# -- Save
# H
df_H = pd.DataFrame(d_H)
df_H.to_csv(basepath + "_H.csv", index=False)
sum_H = df_H.describe(percentiles=[.05, .25, .75, .95]).T
sum_H.to_csv(basepath + "_H_summary.csv")
# MI
df_MI = pd.DataFrame(d_MI)
df_MI.to_csv(basepath + "_MI.csv", index=False)
sum_MI = df_MI.describe(percentiles=[.05, .25, .75, .95]).T
sum_MI.to_csv(basepath + "_MI_summary.csv")
# PAC
df_PAC = pd.DataFrame(d_PAC)
df_PAC.to_csv(basepath + "_PAC.csv", index=False)
sum_PAC = df_PAC.describe(percentiles=[.05, .25, .75, .95]).T
sum_PAC.to_csv(basepath + "_PAC_summary.csv")
# rate
df_rate = pd.DataFrame(d_rate)
df_rate.to_csv(basepath + "_rate.csv", index=False)
sum_rate = df_rate.describe(percentiles=[.05, .25, .75, .95]).T
sum_rate.to_csv(basepath + "_rate_summary.csv")
| [
"Erik.Exists@gmail.com"
] | Erik.Exists@gmail.com |
c30f6710203806ad57e0d9cfcaad2b3e8c7ed1fb | 2990b0841b63f300a722107933c01c7237a7976b | /all_xuef/程序员练级+Never/xuef code/xuef_code_python/python_cookbook_code/1. 数据结构和算法/1.6 字典中的键映射多个值.py | 735e481d02cb5f19c48205d6c10befff0a5121b4 | [] | no_license | xuefengCrown/Files_01_xuef | 8ede04751689e0495e3691fc5d8682da4d382b4d | 677329b0189149cb07e7ba934612ad2b3e38ae35 | refs/heads/master | 2021-05-15T04:34:49.936001 | 2019-01-23T11:50:54 | 2019-01-23T11:50:54 | 118,802,861 | 1 | 1 | null | null | null | null | UTF-8 | Python | false | false | 1,196 | py | """
问题
怎样实现一个键对应多个值的字典(也叫 multidict)?
解决方案
一个字典就是一个键对应一个单值的映射。
如果你想要一个键映射多个值,那么你就需要将这多个值放到另外的容器中, 比如列表或者集合里面。
比如,你可以像下面这样构造这样的字典:
d = {
'a' : [1, 2, 3],
'b' : [4, 5]
}
e = {
'a' : {1, 2, 3},
'b' : {4, 5}
}
"""
"""
选择使用列表还是集合取决于你的实际需求。
如果你想保持元素的插入顺序就应该使用列表,
如果想去掉重复元素就使用集合(并且不关心元素的顺序问题)。
"""
"""
你可以很方便的使用 collections 模块中的 defaultdict 来构造这样的字典。
defaultdict 的一个特征是它会自动初始化每个 key 刚开始对应的值,
所以你只需要关注添加元素操作了。比如:
"""
from collections import defaultdict
d = defaultdict(list) # list 表示值类型
d['a'].append(1)
d['a'].append(2)
d['b'].append(4)
d = defaultdict(set)
d['a'].add(1)
d['a'].add(2)
d['b'].add(4)
##d = defaultdict(list)
##for key, value in pairs:
## d[key].append(value)
| [
"643472092@qq.com"
] | 643472092@qq.com |
8e000e5070188bf89462da25f306d558f36ec373 | 4fca17a3dbc3e74ba7e46bd7869eb6d138e4c422 | /_1551_Minimum_Operations_to_Make_Array_Equal.py | c1ce0ef2b3b5d7928801d2d186042277770336b7 | [] | no_license | mingweihe/leetcode | a2cfee0e004627b817a3c0321bb9c74128f8c1a7 | edff905f63ab95cdd40447b27a9c449c9cefec37 | refs/heads/master | 2021-06-19T07:46:46.897952 | 2021-05-02T05:13:17 | 2021-05-02T05:13:17 | 205,740,338 | 3 | 0 | null | null | null | null | UTF-8 | Python | false | false | 281 | py | class Solution(object):
def minOperations(self, n):
"""
:type n: int
:rtype: int
"""
last = 2 * (n - 1) + 1
avg = (last + 1) / 2
res = 0
for i in xrange(n/2):
res += avg - (i*2 + 1)
return res
| [
"hemingwei2017@gmail.com"
] | hemingwei2017@gmail.com |
031545508eb0cde90949a94355460d23108404d2 | 4a1b61cf551db7843050cc7080cec6fd60c4f8cc | /2020/백준문제/Other/1018_체스판 다시 칠하기.py | 15e56f6ea95ea6eb5167092167e29d02dcf8c5b5 | [] | no_license | phoenix9373/Algorithm | 4551692027ca60e714437fd3b0c86462f635d8ff | c66fd70e14bb8357318e8b8f386d2e968f0c4d98 | refs/heads/master | 2023-08-24T10:01:20.798430 | 2021-10-15T07:57:36 | 2021-10-15T07:57:36 | 288,092,774 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,008 | py | n, m = map(int, input().split()) # m*n
arr = [list(input()) for i in range(n)]
# 왼쪽 맨 위가 W일때, B일때를 나누어 구함.
# 최대 row = (n - 8) index까지 가능.
max_r = n - 8 # 2
max_c = m - 8
s = ['W', 'B']
count = []
for char in s:
for r in range(0, max_r + 1):
for c in range(0, max_c + 1):
test = [row[c:c+8] for row in arr[r:r+8]]
cnt = 0
for i in range(8):
for j in range(8):
if r % 2 == c % 2:
if i % 2 == j % 2 and test[i][j] != char:
cnt += 1
elif i % 2 != j % 2 and test[i][j] == char:
cnt += 1
elif r % 2 != c % 2:
if i % 2 == j % 2 and test[i][j] == char:
cnt += 1
elif i % 2 != j % 2 and test[i][j] != char:
cnt += 1
count.append(cnt)
print(min(count)) | [
"phoenix9373@naver.com"
] | phoenix9373@naver.com |
5662acc9dfbb5eccbc2f255113862a6a02a9523d | bb150497a05203a718fb3630941231be9e3b6a32 | /framework/e2e/jit/test_Pad2D_25.py | 43af560c2452752f47a1cc5b0ee2f2d14862faf8 | [] | no_license | PaddlePaddle/PaddleTest | 4fb3dec677f0f13f7f1003fd30df748bf0b5940d | bd3790ce72a2a26611b5eda3901651b5a809348f | refs/heads/develop | 2023-09-06T04:23:39.181903 | 2023-09-04T11:17:50 | 2023-09-04T11:17:50 | 383,138,186 | 42 | 312 | null | 2023-09-13T11:13:35 | 2021-07-05T12:44:59 | Python | UTF-8 | Python | false | false | 608 | py | #!/bin/env python
# -*- coding: utf-8 -*-
# encoding=utf-8 vi:ts=4:sw=4:expandtab:ft=python
"""
test jit cases
"""
import os
import sys
sys.path.append(os.path.abspath(os.path.dirname(os.getcwd())))
sys.path.append(os.path.join(os.path.abspath(os.path.dirname(os.getcwd())), "utils"))
from utils.yaml_loader import YamlLoader
from jittrans import JitTrans
yaml_path = os.path.join(os.path.abspath(os.path.dirname(os.getcwd())), "yaml", "nn.yml")
yml = YamlLoader(yaml_path)
def test_Pad2D_25():
"""test Pad2D_25"""
jit_case = JitTrans(case=yml.get_case_info("Pad2D_25"))
jit_case.jit_run()
| [
"825276847@qq.com"
] | 825276847@qq.com |
ae24899f5d7f723d07cb58f1c053b40315c20c77 | d5c67ac21a5210d36c74bfd0a4d45c91ab3c1879 | /Spyder/python机器学习应用/学生上网分析/学生上网分析之上网时长聚类.py | 39c78009f51ebda23804e3ee7586778083623f24 | [] | no_license | HanKin2015/ACM | 93036222eb5e382e5a1269c0208c58bba4ad5af7 | 040779ce4a3e88c40c7beb9cba6a33aa3695bf50 | refs/heads/master | 2022-03-03T05:15:51.053240 | 2021-12-20T14:18:14 | 2021-12-20T14:21:11 | 57,268,602 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,295 | py | # -*- coding: utf-8 -*-
"""
Created on Wed Aug 23 16:03:02 2017
@author: HanKin
"""
# -*- coding: utf-8 -*-
'''
对上网时长时间进行聚类
'''
import numpy as np
from sklearn.cluster import DBSCAN
from sklearn import metrics#计算方法
import matplotlib.pyplot as plt
mac2id=dict()
onlinetimes=[]
f=open('TestData.txt',encoding='utf-8')
for line in f:
items = line.strip().split(",")
#mac地址
mac=items[2]
#上网时长
onlinetime=int(items[6])
#时间格式举例:2014-07-20 22:44:18.540000000
starttime=int(items[4].split(' ')[1].split(':')[0])#只保留时间的小时位
#保证onlinetime中对应一个mac地址有一个唯一的记录
if mac not in mac2id:
mac2id[mac]=len(onlinetimes)
onlinetimes.append((starttime,onlinetime))
else:
onlinetimes[mac2id[mac]]=(starttime,onlinetime)
real_X=np.array(onlinetimes).reshape((-1,2)) #-1代表行数由程序自行根据列数和总数据信息推算出
X=np.log(1+real_X[:,1:])#只得到上网时长,这里+1是为了防止为0的情况
#调用DBSCAN方法进行训练,labels为每个数据的簇标签
db=DBSCAN(eps=0.14,min_samples=10).fit(X)
labels = db.labels_#返回的数据的簇标签,噪声数据标签为-1
print('Labels:\n',labels)
#计算标签为-1的数据(即噪声数据)的比例
raito=len(labels[labels[:] == -1]) / len(labels)
print('Noise raito:',format(raito, '.2%'))
#计算簇的个数
n_clusters_ = len(set(labels)) - (1 if -1 in labels else 0)
print('Estimated number of clusters: %d' % n_clusters_)
#评价聚类效果:轮廓系数si,原理可参考:http://blog.csdn.net/xueyingxue001/article/details/51966932
'''
si接近1,则说明样本i聚类合理;
si接近-1,则说明样本i更应该分类到另外的簇;
若si 近似为0,则说明样本i在两个簇的边界上。
'''
print("Silhouette Coefficient: %0.3f"% metrics.silhouette_score(X, labels))#聚类效果评价指标
#打印各簇标号以及各簇内数据
for i in range(n_clusters_):
print('number of data in Cluster %s is : %s'%(i,len(X[labels==i])))
#print(list(X[labels == i].flatten()))
#绘制直方图分析
plt.hist(X,24) | [
"1058198502@qq.com"
] | 1058198502@qq.com |
5154a4e9ad8557de0e8b54229abfaae3972c2128 | 6c9b8812e1f5e1f6bc881265ce9de9efeb22869d | /model_zoo/obj_detection/rpn.py | 63b8ffae98cd091045ca49ae3bcdde701b367885 | [] | no_license | maxme1/model_zoo | a194582362ab07e2b40dfd43fc5e0aa60f7289cf | ef34432cbd7eb912ba976cd1448427f3cc3ff6f5 | refs/heads/master | 2022-07-12T10:08:43.279906 | 2022-06-23T18:37:57 | 2022-06-23T18:37:57 | 90,151,979 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,542 | py | import numpy as np
import torch.nn as nn
from torch.nn import functional
class RPN(nn.Module):
def __init__(self, in_channels, mid_channels, kernel_size, num_anchors):
super().__init__()
self.standardise = nn.Conv2d(in_channels, mid_channels, kernel_size, padding=kernel_size // 2)
self.classifier = nn.Conv2d(mid_channels, num_anchors, 1)
self.regressor = nn.Conv2d(mid_channels, num_anchors * 4, 1)
def forward(self, x):
x = self.standardise(x)
foreground = self.classifier(x)
boxes = self.regressor(x)
return foreground, boxes
class ROIPooling(nn.Module):
def __init__(self, output_shape, pooling=functional.max_pool2d):
super().__init__()
self.pooling = pooling
self.output_shape = output_shape
def forward(self, x):
shape = np.array(x.shape[2:])
scale = shape / self.output_shape
stride = tuple(map(int, np.floor(scale)))
kernel_size = tuple(map(int, np.ceil(scale)))
x = self.pooling(x, kernel_size=kernel_size, stride=stride)
return x
class ResNetC4(nn.Module):
def __init__(self, model):
super().__init__()
del model.avgpool, model.fc
self.model = model
def forward(self, x):
model = self.model
x = model.conv1(x)
x = model.bn1(x)
x = model.relu(x)
x = model.maxpool(x)
x = model.layer1(x)
x = model.layer2(x)
x = model.layer3(x)
x = model.layer4(x)
return x
| [
"maxs987@gmail.com"
] | maxs987@gmail.com |
cf0cb9b68aefd4e7d4cc1752125b10ee68486cd7 | c9ffc4b4d2bec921d7f7acbdcd3b2dda85c62a07 | /example_taxi/example/serializers.py | e707a860f0c63fb6bb37fda6952de28f402e2544 | [] | no_license | gridl/taxi-app-channels2 | 1c07f7e6832b743e4593653c633c377579756313 | 7958a071a37b0b90c2847a521f4196d4443318a2 | refs/heads/master | 2020-03-30T19:48:05.692197 | 2018-04-19T13:19:49 | 2018-04-19T13:19:49 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,134 | py | from django.contrib.auth import get_user_model
from rest_framework import serializers
from .models import Trip
class UserSerializer(serializers.ModelSerializer):
password1 = serializers.CharField(write_only=True)
password2 = serializers.CharField(write_only=True)
def validate(self, data):
if data['password1'] != data['password2']:
raise serializers.ValidationError('Passwords must match.')
return data
def create(self, validated_data):
data = {
key: value for key, value in validated_data.items()
if key not in ('password1', 'password2')
}
data['password'] = validated_data['password1']
return self.Meta.model.objects.create_user(**data)
class Meta:
model = get_user_model()
fields = (
'id', 'username', 'password1', 'password2',
'first_name', 'last_name',
)
read_only_fields = ('id',)
class TripSerializer(serializers.ModelSerializer):
class Meta:
model = Trip
fields = '__all__'
read_only_fields = ('id', 'nk', 'created', 'updated',)
| [
"hermanmu@gmail.com"
] | hermanmu@gmail.com |
1be012a64757e907d83446fe8297da8be07d1ca7 | 879ac03dd910d152170d6d1e3ff4d5e522b14d79 | /Tutorial/02. 30 Days of Code/025. Day 24; More Linked Lists.py | d5beb71d16b8a7bc1ee33d88692932b22a6858e9 | [] | no_license | dispe1/Hackerrank-Solutions | ae47920d7761546fd2ef753c1b4f9ae087aaed2a | 67b792dc2cb2933eb1f1565100ea13b0c9783fba | refs/heads/master | 2020-07-11T21:25:39.824667 | 2019-12-10T12:00:12 | 2019-12-10T12:00:12 | 204,646,756 | 4 | 3 | null | null | null | null | UTF-8 | Python | false | false | 1,186 | py | # Problem: https://www.hackerrank.com/challenges/30-linked-list-deletion/problem
# Difficulty : Easy
# Score : 30
class Node:
def __init__(self,data):
self.data = data
self.next = None
class Solution:
def insert(self,head,data):
p = Node(data)
if head==None:
head=p
elif head.next==None:
head.next=p
else:
start=head
while(start.next!=None):
start=start.next
start.next=p
return head
def display(self,head):
current = head
while current:
print(current.data,end=' ')
current = current.next
def removeDuplicates(self,head):
current = head
while head != None and current.next != None:
if current.next.data == current.data:
current.next = current.next.next
else:
current = current.next
return head
mylist= Solution()
T=int(input())
head=None
for i in range(T):
data=int(input())
head=mylist.insert(head,data)
head=mylist.removeDuplicates(head)
mylist.display(head);
| [
"lkjim0757@naver.com"
] | lkjim0757@naver.com |
dc343080a3e6d41edc7e4dd758414b296f10372e | 0a1742760b617db58d13bec3d715d83d4f552bdb | /scripts/delnopm.py | e6d88e6657644f1097cd0eee3bc9096c03d5e1e6 | [
"MIT"
] | permissive | rezvorck/vkbot | 1a97709e4bf3ec51e02af17ecc88bc0ceac59058 | f0e3a9ce4c6384bca3939960996e449d98d6ae46 | refs/heads/master | 2021-01-01T16:53:12.262398 | 2017-07-20T15:48:15 | 2017-07-20T15:48:15 | 97,942,346 | 1 | 0 | null | 2017-07-21T11:48:01 | 2017-07-21T11:48:00 | null | UTF-8 | Python | false | false | 436 | py | import logging
import scriptlib
# noinspection PyUnusedLocal
def main(a, args):
friends = scriptlib.getFriends(a, fields='can_write_private_message')
to_del = []
for j in friends:
if not j['can_write_private_message']:
to_del.append(str(j['id']))
logging.info('Found id{} ({} {})'.format(j['id'], j['first_name'], j['last_name']))
scriptlib.createFriendController().appendNoadd(to_del)
| [
"kalinochkind@gmail.com"
] | kalinochkind@gmail.com |
c54de06cec40dc1412ffe45eae83c0d130d2c7ec | e7efae2b83216d9621bd93390959d652de779c3d | /hyperv/datadog_checks/hyperv/check.py | aa4d44f2a269738f8b3d15872dcfd9dd47ce87f9 | [
"BSD-3-Clause",
"LicenseRef-scancode-unknown-license-reference",
"MIT",
"BSD-3-Clause-Modification",
"Unlicense",
"Apache-2.0",
"LGPL-3.0-only",
"LicenseRef-scancode-public-domain",
"BSD-2-Clause",
"CC0-1.0"
] | permissive | DataDog/integrations-core | ee1886cc7655972b2791e6ab8a1c62ab35afdb47 | 406072e4294edff5b46b513f0cdf7c2c00fac9d2 | refs/heads/master | 2023-08-31T04:08:06.243593 | 2023-08-30T18:22:10 | 2023-08-30T18:22:10 | 47,203,045 | 852 | 1,548 | BSD-3-Clause | 2023-09-14T16:39:54 | 2015-12-01T16:41:45 | Python | UTF-8 | Python | false | false | 427 | py | # (C) Datadog, Inc. 2021-present
# All rights reserved
# Licensed under a 3-clause BSD style license (see LICENSE)
from datadog_checks.base.checks.windows.perf_counters.base import PerfCountersBaseCheckWithLegacySupport
from .metrics import METRICS_CONFIG
class HypervCheckV2(PerfCountersBaseCheckWithLegacySupport):
__NAMESPACE__ = 'hyperv'
def get_default_config(self):
return {'metrics': METRICS_CONFIG}
| [
"noreply@github.com"
] | DataDog.noreply@github.com |
c5de719c57b09e8105dec4d270d58fd962cd0482 | 1c6283303ceb883add8de4ee07c5ffcfc2e93fab | /Jinja2/lib/python3.7/site-packages/uhd_restpy/testplatform/sessions/ixnetwork/traffic/trafficitem/configelement/frameratedistribution/frameratedistribution.py | 9e82189eba96293027ad24081a8cab44a50a6e6d | [] | no_license | pdobrinskiy/devcore | 0f5b3dfc2f3bf1e44abd716f008a01c443e14f18 | 580c7df6f5db8c118990cf01bc2b986285b9718b | refs/heads/main | 2023-07-29T20:28:49.035475 | 2021-09-14T10:02:16 | 2021-09-14T10:02:16 | 405,919,390 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,774 | py | # MIT LICENSE
#
# Copyright 1997 - 2020 by IXIA Keysight
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
from uhd_restpy.base import Base
from uhd_restpy.files import Files
from typing import List, Any, Union
class FrameRateDistribution(Base):
"""This object provides the options for Frame Rate distribution.
The FrameRateDistribution class encapsulates a required frameRateDistribution resource which will be retrieved from the server every time the property is accessed.
"""
__slots__ = ()
_SDM_NAME = 'frameRateDistribution'
_SDM_ATT_MAP = {
'PortDistribution': 'portDistribution',
'StreamDistribution': 'streamDistribution',
}
_SDM_ENUM_MAP = {
'portDistribution': ['applyRateToAll', 'splitRateEvenly'],
'streamDistribution': ['applyRateToAll', 'splitRateEvenly'],
}
def __init__(self, parent, list_op=False):
super(FrameRateDistribution, self).__init__(parent, list_op)
@property
def PortDistribution(self):
# type: () -> str
"""
Returns
-------
- str(applyRateToAll | splitRateEvenly): At the port level, apply the target configuration transmission rate for each encapsulation.
"""
return self._get_attribute(self._SDM_ATT_MAP['PortDistribution'])
@PortDistribution.setter
def PortDistribution(self, value):
# type: (str) -> None
self._set_attribute(self._SDM_ATT_MAP['PortDistribution'], value)
@property
def StreamDistribution(self):
# type: () -> str
"""
Returns
-------
- str(applyRateToAll | splitRateEvenly): At the flow group level, apply the target rate of each port.
"""
return self._get_attribute(self._SDM_ATT_MAP['StreamDistribution'])
@StreamDistribution.setter
def StreamDistribution(self, value):
# type: (str) -> None
self._set_attribute(self._SDM_ATT_MAP['StreamDistribution'], value)
def update(self, PortDistribution=None, StreamDistribution=None):
# type: (str, str) -> FrameRateDistribution
"""Updates frameRateDistribution resource on the server.
Args
----
- PortDistribution (str(applyRateToAll | splitRateEvenly)): At the port level, apply the target configuration transmission rate for each encapsulation.
- StreamDistribution (str(applyRateToAll | splitRateEvenly)): At the flow group level, apply the target rate of each port.
Raises
------
- ServerError: The server has encountered an uncategorized error condition
"""
return self._update(self._map_locals(self._SDM_ATT_MAP, locals()))
| [
"pdobrinskiy@yahoo.com"
] | pdobrinskiy@yahoo.com |
ab5a2cf7646ee4c873d6f8e1b936ef25d22e2781 | 39f9cdff9eca95b1018d2b869cb08c1b71905ead | /Lesson03/re_lang.py | 994273c73af1c078d431f1a4978dca42dcde6449 | [] | no_license | alexbaryzhikov/design-of-computer-programs | a900ec246a1d174da7fba4f209471aa44dfa7486 | 7b4b212b528a0164cbd283110426bb7e0a0f46ce | refs/heads/master | 2020-03-23T03:57:44.165186 | 2018-07-15T21:06:28 | 2018-07-15T21:06:28 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,107 | py | # Specifications
def test_search():
a, b, c = lit('a'), lit('b'), lit('c')
abcstars = seq(star(a), seq(star(b), star(c)))
dotstar = star(dot)
assert search(lit('def'), 'abcdefg') == 'def'
assert search(seq(lit('def'), eol), 'abcdefg') == None
assert search(a, 'not the start') == 'a'
assert match(a, 'not the start') == None
assert match(abcstars, 'aaabbbccccccccccdef') == 'aaabbbcccccccccc'
assert match(abcstars, 'junk') == ''
assert all(match(seq(abcstars, eol), s) == s
for s in 'abc aaabbccc aaaabcccc'.split())
assert all(match(seq(abcstars, eol), s) == None
for s in 'cab aaabbcccd aaaa-b-cccc'.split())
r = seq(lit('ab'), seq(dotstar, seq(lit('aca'), seq(dotstar, seq(a, eol)))))
assert all(search(r, s) is not None
for s in 'abracadabra abacaa about-acacia-flora'.split())
assert all(match(seq(c, seq(dotstar, b)), s) is not None
for s in 'cab cob carob cb carbuncle'.split())
assert not any(match(seq(c, seq(dot, b)), s)
for s in 'crab cb across scab'.split())
return 'test_search passes'
# Implementation -- Interpreter
def search(pattern, text):
"Match pattern anywhere in text; return longest earliest match or None."
for i in range(len(text)):
m = match(pattern, text[i:])
if m is not None:
return m
def match(pattern, text):
"Match pattern against start of text; return longest match found or None."
remainders = matchset(pattern, text)
if remainders:
shortest = min(remainders, key=len)
return text[:len(text)-len(shortest)]
def components(pattern):
"Return the op, x, and y arguments; x and y are None if missing."
x = pattern[1] if len(pattern) > 1 else None
y = pattern[2] if len(pattern) > 2 else None
return pattern[0], x, y
def matchset(pattern, text):
"Match pattern at start of text; return a set of remainders of text."
op, x, y = components(pattern)
if 'lit' == op:
return set([text[len(x):]]) if text.startswith(x) else null
elif 'seq' == op:
return set(t2 for t1 in matchset(x, text) for t2 in matchset(y, t1))
elif 'alt' == op:
return matchset(x, text) | matchset(y, text)
elif 'dot' == op:
return set([text[1:]]) if text else null
elif 'oneof' == op:
return set([text[1:]]) if text.startswith(x) else null
elif 'eol' == op:
return set(['']) if text == '' else null
elif 'star' == op:
return (set([text]) |
set(t2 for t1 in matchset(x, text)
for t2 in matchset(pattern, t1) if t1 != text))
else:
raise ValueError('unknown pattern: %s' % pattern)
null = frozenset()
def lit(string): return ('lit', string)
def seq(x, y): return ('seq', x, y)
def alt(x, y): return ('alt', x, y)
def star(x): return ('star', x)
def plus(x): return seq(x, star(x))
def opt(x): return alt(lit(''), x)
def oneof(chars): return ('oneof', tuple(chars))
dot = ('dot',)
eol = ('eol',)
print(test_search())
| [
"aleksiarts@gmail.com"
] | aleksiarts@gmail.com |
7b31f1cabe8e6e6e069065a085ffe735af6feec8 | 17575d8276d36cf5b32d0b6645fb5dd1b5c0962a | /algorithm/elements/height_balanced.py | 8ea07d761515626c6b9a234ae01c1f9b9e141ba4 | [] | no_license | upul/WhiteBoard | 2f720acc1b1c1e0002f8e0d7842c23707c58debe | e81feb8172add6b893fb4496a590c43f863a0346 | refs/heads/master | 2022-09-26T21:07:25.271461 | 2021-05-13T13:31:27 | 2021-05-13T13:31:27 | 47,049,709 | 8 | 20 | null | 2022-09-23T22:34:42 | 2015-11-29T04:20:21 | Jupyter Notebook | UTF-8 | Python | false | false | 1,267 | py |
from collections import namedtuple
class BinaryTreeNode:
def __init__(self, data=None, left=None, right=None):
self.data = data
self.left = left
self.right = right
def is_balanced_binary_tree(tree):
BalancedStatusWithHeight = namedtuple(
'BalancedStatusWithHeight', ('balanced', 'height'))
def check_balance(tree):
if not tree:
return BalancedStatusWithHeight(True, -1)
left_result = check_balance(tree.left)
if not left_result.balanced:
return BalancedStatusWithHeight(False, 0)
right_result = check_balance(tree.right)
if not right_result.balanced:
return BalancedStatusWithHeight(False, 0)
is_balanced = abs(left_result.height - right_result.height) <= 1
height = max(left_result.height, right_result.height)
return BalancedStatusWithHeight(is_balanced, height)
return check_balance(tree).balanced
if __name__ == '__main__':
L21 = BinaryTreeNode(271)
L22 = BinaryTreeNode(561)
L23 = BinaryTreeNode(2)
L24 = BinaryTreeNode(271)
L11 = BinaryTreeNode(6, L21, L22)
L12 = BinaryTreeNode(6, L23, L24)
root = BinaryTreeNode(314, L11, L12)
print(is_balanced_binary_tree(root))
| [
"upulbandara@gmail.com"
] | upulbandara@gmail.com |
545e9c52abe5d51cda54bc57e2d7a1abfa427f33 | 6db97ab761d59452c05611354637dfb2ce693c96 | /setup.py | 373a4c95fc18def50d4a44b1db4c7823a36f17d9 | [
"MIT"
] | permissive | Mahdi-Soheyli/compas_fab | e885efbdd5531ae5f245bf02b2f1acce0a308680 | 0e7d426903a5d9a1bca947cd7a1251031c4c71b4 | refs/heads/master | 2020-05-02T16:53:13.265526 | 2019-03-20T13:37:37 | 2019-03-20T13:37:37 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,309 | py | #!/usr/bin/env python
# -*- encoding: utf-8 -*-
from __future__ import absolute_import, print_function
import io
import re
from glob import glob
from os.path import abspath, basename, dirname, join, splitext
from setuptools import find_packages, setup
requirements = [
'compas==0.4.10',
'roslibpy>=0.4.0',
'pyserial',
]
keywords_list = ['robotic fabrication', 'digital fabrication', 'architecture', 'robotics', 'ros']
here = abspath(dirname(__file__))
def read(*names, **kwargs):
return io.open(
join(here, *names),
encoding=kwargs.get('encoding', 'utf8')
).read()
about = {}
exec(read('src', 'compas_fab', '__version__.py'), about)
setup(
name=about['__title__'],
version=about['__version__'],
license=about['__license__'],
description=about['__description__'],
author=about['__author__'],
author_email=about['__author_email__'],
url=about['__url__'],
long_description='%s\n%s' % (
re.compile('^.. start-badges.*^.. end-badges', re.M |
re.S).sub('', read('README.rst')),
re.sub(':[a-z]+:`~?(.*?)`', r'``\1``', read('CHANGELOG.rst'))
),
packages=find_packages('src'),
package_dir={'': 'src'},
py_modules=[splitext(basename(path))[0] for path in glob('src/*.py')],
include_package_data=True,
zip_safe=False,
classifiers=[
'Development Status :: 3 - Alpha',
'Intended Audience :: Developers',
'Intended Audience :: Science/Research',
'License :: OSI Approved :: MIT License',
'Operating System :: Unix',
'Operating System :: POSIX',
'Operating System :: Microsoft :: Windows',
'Programming Language :: Python',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: Implementation :: CPython',
'Programming Language :: Python :: Implementation :: IronPython',
'Topic :: Scientific/Engineering',
],
keywords=keywords_list,
install_requires=requirements,
extras_require={},
entry_points={},
)
| [
"casas@arch.ethz.ch"
] | casas@arch.ethz.ch |
58f5a3c0db2393623589440672e2123e1cb50dd6 | 7136e5242793b620fa12e9bd15bf4d8aeb0bfe7a | /examples/adspygoogle/dfp/v201103/get_all_companies.py | f8f7073f8e259464c0e996abd799d0c872d58852 | [
"Apache-2.0"
] | permissive | hockeyprincess/google-api-dfp-python | 534519695ffd26341204eedda7a8b50648f12ea9 | efa82a8d85cbdc90f030db9d168790c55bd8b12a | refs/heads/master | 2021-01-10T10:01:09.445419 | 2011-04-14T18:25:38 | 2011-04-14T18:25:38 | 52,676,942 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,729 | py | #!/usr/bin/python
#
# Copyright 2011 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""This code example gets all companies. To create companies, run
create_companies.py."""
__author__ = 'api.sgrinberg@gmail.com (Stan Grinberg)'
# Locate the client library. If module was installed via "setup.py" script, then
# the following two lines are not needed.
import os
import sys
sys.path.append(os.path.join('..', '..', '..', '..'))
# Import appropriate classes from the client library.
from adspygoogle.dfp import DfpUtils
from adspygoogle.dfp.DfpClient import DfpClient
# Initialize client object.
client = DfpClient(path=os.path.join('..', '..', '..', '..'))
# Initialize appropriate service. By default, the request is always made against
# sandbox environment.
company_service = client.GetCompanyService(
'https://sandbox.google.com', 'v201103')
# Get companies by statement.
companies = DfpUtils.GetAllEntitiesByStatement(client, 'Company')
# Display results.
for company in companies:
print ('Company with id \'%s\', name \'%s\', and type \'%s\' was found.'
% (company['id'], company['name'], company['type']))
print
print 'Number of results found: %s' % len(companies)
| [
"api.sgrinberg@7990c6e4-1bfd-11df-85e6-9b4bd7dd5138"
] | api.sgrinberg@7990c6e4-1bfd-11df-85e6-9b4bd7dd5138 |
070406b840ca91f2dc2ea9342d6a45aed96919b4 | abf4bfa1db4b9bacac3ffb7ab6aeee2e85b8667d | /minerva/controllers/system.py | 5562535ac1c0260f71bec732ad9ab09bb101cf6a | [
"MIT"
] | permissive | gitter-badger/minerva-1 | 0fac217c37992329fe83b1e4b366696ccc97a6aa | 9825c5494e83051afcfdec20771b64475fa35c84 | refs/heads/master | 2022-12-13T05:53:01.959423 | 2020-09-09T02:33:18 | 2020-09-09T02:33:18 | 293,982,383 | 1 | 0 | null | 2020-09-09T02:35:10 | 2020-09-09T02:35:10 | null | UTF-8 | Python | false | false | 1,252 | py | # pylint: disable=unidiomatic-typecheck
import json
from d3rlpy.gpu import get_gpu_count
from flask import Blueprint, jsonify
from ..models.experiment import Experiment, ExperimentSchema
from .project import _process_metrics
system_route = Blueprint('system', __name__)
@system_route.route('/status', methods=['GET'])
def get_system_status():
n_gpus = get_gpu_count()
# get all active experiments
experiments = Experiment.create_query().filter(Experiment.is_active).all()
gpu_jobs = {}
cpu_jobs = []
for experiment in experiments:
# identify device
config = json.loads(experiment.config)
device_id = config['use_gpu'] if 'use_gpu' in config else None
# make response data
data = ExperimentSchema().dump(experiment)
# update status
_process_metrics(experiment, data)
if type(device_id) == int:
if device_id not in gpu_jobs:
gpu_jobs[device_id] = []
gpu_jobs[device_id].append(data)
else:
cpu_jobs.append(data)
res = {
'gpu': {
'total': n_gpus,
'jobs': gpu_jobs
},
'cpu': {
'jobs': cpu_jobs
}
}
return jsonify(res)
| [
"takuma.seno@gmail.com"
] | takuma.seno@gmail.com |
6ebe68c85e467dcf90b2f16afae750ba710e29da | ca7aa979e7059467e158830b76673f5b77a0f5a3 | /Python_codes/p02900/s183018985.py | 26a0dfad96b01c45de7664128068ededb63e7367 | [] | no_license | Aasthaengg/IBMdataset | 7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901 | f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8 | refs/heads/main | 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 207 | py | from fractions import gcd
A,B=map(int,input().split())
p=set([1])
g=gcd(A,B)
for d in range(2,g):
while g%d==0:
p.add(d)
g//=d
if (d*d>=g): break
if g>=2:
p.add(g)
print(len(p))
| [
"66529651+Aastha2104@users.noreply.github.com"
] | 66529651+Aastha2104@users.noreply.github.com |
6889e85cc3123edb30ca517b5957ce75bd2bac61 | 95495baeb47fd40b9a7ecb372b79d3847aa7a139 | /swagger_client/models/i_console_user.py | f159b1559b5b8777fa48a6f57cad912d863bbf0f | [] | no_license | pt1988/fmc-api | b1d8ff110e12c13aa94d737f3fae9174578b019c | 075f229585fcf9bd9486600200ff9efea5371912 | refs/heads/main | 2023-01-07T09:22:07.685524 | 2020-10-30T03:21:24 | 2020-10-30T03:21:24 | 308,226,669 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,000 | py | # coding: utf-8
"""
Cisco Firepower Management Center Open API Specification
**Specifies the REST URLs and methods supported in the Cisco Firepower Management Center API. Refer to the version specific [REST API Quick Start Guide](https://www.cisco.com/c/en/us/support/security/defense-center/products-programming-reference-guides-list.html) for additional information.** # noqa: E501
OpenAPI spec version: 1.0.0
Contact: tac@cisco.com
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re # noqa: F401
import six
class IConsoleUser(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'name': 'str',
'links': 'ILinks',
'id': 'str',
'type': 'str'
}
attribute_map = {
'name': 'name',
'links': 'links',
'id': 'id',
'type': 'type'
}
def __init__(self, name=None, links=None, id=None, type=None): # noqa: E501
"""IConsoleUser - a model defined in Swagger""" # noqa: E501
self._name = None
self._links = None
self._id = None
self._type = None
self.discriminator = None
if name is not None:
self.name = name
if links is not None:
self.links = links
if id is not None:
self.id = id
if type is not None:
self.type = type
@property
def name(self):
"""Gets the name of this IConsoleUser. # noqa: E501
:return: The name of this IConsoleUser. # noqa: E501
:rtype: str
"""
return self._name
@name.setter
def name(self, name):
"""Sets the name of this IConsoleUser.
:param name: The name of this IConsoleUser. # noqa: E501
:type: str
"""
self._name = name
@property
def links(self):
"""Gets the links of this IConsoleUser. # noqa: E501
:return: The links of this IConsoleUser. # noqa: E501
:rtype: ILinks
"""
return self._links
@links.setter
def links(self, links):
"""Sets the links of this IConsoleUser.
:param links: The links of this IConsoleUser. # noqa: E501
:type: ILinks
"""
self._links = links
@property
def id(self):
"""Gets the id of this IConsoleUser. # noqa: E501
:return: The id of this IConsoleUser. # noqa: E501
:rtype: str
"""
return self._id
@id.setter
def id(self, id):
"""Sets the id of this IConsoleUser.
:param id: The id of this IConsoleUser. # noqa: E501
:type: str
"""
self._id = id
@property
def type(self):
"""Gets the type of this IConsoleUser. # noqa: E501
:return: The type of this IConsoleUser. # noqa: E501
:rtype: str
"""
return self._type
@type.setter
def type(self, type):
"""Sets the type of this IConsoleUser.
:param type: The type of this IConsoleUser. # noqa: E501
:type: str
"""
self._type = type
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
if issubclass(IConsoleUser, dict):
for key, value in self.items():
result[key] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, IConsoleUser):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| [
"pt1988@gmail.com"
] | pt1988@gmail.com |
461981baa6d5e7e3e901b71df7640e0723b32d40 | 719853613b5b96f02072be1fde736d883e799f02 | /server/accounts/urls.py | 106d5275a4cf4eb733a549e205bf7f2113d2753e | [
"MIT"
] | permissive | anmolkabra/opensurfaces | 5ba442123586533a93eb29890fa1694e3efdbfe8 | a42420083a777d7e1906506cc218f681c5cd145b | refs/heads/master | 2020-03-20T01:11:05.182880 | 2018-06-13T14:55:45 | 2018-06-13T14:55:45 | 137,068,945 | 0 | 0 | MIT | 2018-06-12T12:32:53 | 2018-06-12T12:32:52 | null | UTF-8 | Python | false | false | 178 | py | from django.conf.urls import patterns, url
from accounts.views import admin_shell
urlpatterns = patterns(
'',
url(r'^admin-shell/$', admin_shell, name='admin-shell'),
)
| [
"sbell@cs.cornell.edu"
] | sbell@cs.cornell.edu |
c7cfcaf109b00f593c5d0e7c165d89cac1516f38 | b289a2c1b42e17a2338c5414b6831f9cd44cb2dd | /valarie/executor/system.py | 496768e72ee769b6de032fc723323797ee8f093f | [
"MIT"
] | permissive | phnomcobra/valarie | fa2d3136092c80aeaca5474afe0ce726e36ade25 | 83bedeb50be5ab385c0851bf53044ee583e1adfd | refs/heads/master | 2022-10-27T21:38:23.669304 | 2022-10-20T12:44:03 | 2022-10-20T12:44:03 | 163,562,318 | 0 | 0 | MIT | 2022-10-09T03:30:39 | 2018-12-30T04:49:11 | JavaScript | UTF-8 | Python | false | false | 898 | py | #!/usr/bin/python3
"""This module implements system for the executor module."""
from subprocess import Popen, PIPE
from valarie.controller import logging
def system(command: str) -> int:
"""This function executes a command on the system.
Standard out and standard error emit into the logger.
Args:
command:
The command to execute.
Returns:
Returns the return code as an integer.
"""
logging.info(command)
process = Popen(command, shell=True, stdout=PIPE, stderr=PIPE)
output_buffer, stderr_buffer = process.communicate()
stdout, stderr = str(output_buffer.decode()).strip(), str(stderr_buffer.decode()).strip()
if len(stdout) > 0:
logging.debug(stdout)
if len(stderr) > 0:
logging.error(stderr)
logging.info(f'returned {process.returncode}')
return process.returncode
| [
"phnomcobra@gmail.com"
] | phnomcobra@gmail.com |
15c9115c35dcba355e97459f453a66e4f5821cd0 | cc9cf69b1534dc0d9530b4ff485084162a404e34 | /leetcode/without/leetcode_70.py | 1f25e342333eabed2950b304d24f79fb7ef1c40e | [] | no_license | NASA2333/study | 99a58b2c9979201e9a4fae0c797391a538de6f45 | ba63bc18f3c788090e43406315497329b00ec0a5 | refs/heads/master | 2021-05-03T22:26:52.541760 | 2018-02-07T02:24:55 | 2018-02-07T02:24:55 | 104,988,265 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 923 | py | '''
You are climbing a stair case. It takes n steps to reach to the top.
Each time you can either climb 1 or 2 steps. In how many distinct ways can you climb to the top?
Note: Given n will be a positive integer.
Example 1:
Input: 2
Output: 2
Explanation: There are two ways to climb to the top.
1. 1 step + 1 step
2. 2 steps
Example 2:
Input: 3
Output: 3
Explanation: There are three ways to climb to the top.
1. 1 step + 1 step + 1 step
2. 1 step + 2 steps
3. 2 steps + 1 step
'''
class Solution(object):
def climbStairs(self, n):
"""
:type n: int
:rtype: int
"""
if n <= 2:
return n
dp = [0 for __ in range(n)]
dp[0] = 1
dp[1] = 2
for i in range(2, n):
dp[i] = dp[i - 1] + dp[i - 2]
return dp[n - 1]
if __name__ == "__main__":
print(Solution().climbStairs(6))
assert Solution().climbStairs(6) == 13 | [
"422282539@qq.com"
] | 422282539@qq.com |
430b5945a07208c04556532b109363bdb908ea52 | 25b914aecd6b0cb49294fdc4f2efcfdf5803cc36 | /homeassistant/components/balboa/binary_sensor.py | b73872b664710068013ef6e8b5b0aeb2da9c9692 | [
"Apache-2.0"
] | permissive | jason0x43/home-assistant | 9114decaa8f7c2f1582f84e79dc06736b402b008 | 8bf6aba1cf44ee841de063755c935ea78040f399 | refs/heads/dev | 2023-03-04T01:14:10.257593 | 2022-01-01T12:11:56 | 2022-01-01T12:11:56 | 230,622,861 | 1 | 1 | Apache-2.0 | 2023-02-22T06:15:07 | 2019-12-28T14:45:43 | Python | UTF-8 | Python | false | false | 1,768 | py | """Support for Balboa Spa binary sensors."""
from homeassistant.components.binary_sensor import (
BinarySensorDeviceClass,
BinarySensorEntity,
)
from .const import CIRC_PUMP, DOMAIN, FILTER
from .entity import BalboaEntity
FILTER_STATES = [
[False, False], # self.FILTER_OFF
[True, False], # self.FILTER_1
[False, True], # self.FILTER_2
[True, True], # self.FILTER_1_2
]
async def async_setup_entry(hass, entry, async_add_entities):
"""Set up the spa's binary sensors."""
spa = hass.data[DOMAIN][entry.entry_id]
entities = [BalboaSpaFilter(entry, spa, FILTER, index) for index in range(1, 3)]
if spa.have_circ_pump():
entities.append(BalboaSpaCircPump(entry, spa, CIRC_PUMP))
async_add_entities(entities)
class BalboaSpaBinarySensor(BalboaEntity, BinarySensorEntity):
"""Representation of a Balboa Spa binary sensor entity."""
_attr_device_class = BinarySensorDeviceClass.MOVING
class BalboaSpaCircPump(BalboaSpaBinarySensor):
"""Representation of a Balboa Spa circulation pump."""
@property
def is_on(self) -> bool:
"""Return true if the filter is on."""
return self._client.get_circ_pump()
@property
def icon(self):
"""Return the icon to use in the frontend."""
return "mdi:water-pump" if self.is_on else "mdi:water-pump-off"
class BalboaSpaFilter(BalboaSpaBinarySensor):
"""Representation of a Balboa Spa Filter."""
@property
def is_on(self) -> bool:
"""Return true if the filter is on."""
return FILTER_STATES[self._client.get_filtermode()][self._num - 1]
@property
def icon(self):
"""Return the icon to use in the frontend."""
return "mdi:sync" if self.is_on else "mdi:sync-off"
| [
"noreply@github.com"
] | jason0x43.noreply@github.com |
cea2ec05a0670b294d79a17e8de73449644a3eb6 | 48e124e97cc776feb0ad6d17b9ef1dfa24e2e474 | /sdk/python/pulumi_azure_native/authorization/v20190901/get_policy_definition_at_management_group.py | 287a80a7cda013c3317067e3b4fba4c14062bbe9 | [
"BSD-3-Clause",
"Apache-2.0"
] | permissive | bpkgoud/pulumi-azure-native | 0817502630062efbc35134410c4a784b61a4736d | a3215fe1b87fba69294f248017b1591767c2b96c | refs/heads/master | 2023-08-29T22:39:49.984212 | 2021-11-15T12:43:41 | 2021-11-15T12:43:41 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,524 | py | # coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from ... import _utilities
from . import outputs
__all__ = [
'GetPolicyDefinitionAtManagementGroupResult',
'AwaitableGetPolicyDefinitionAtManagementGroupResult',
'get_policy_definition_at_management_group',
'get_policy_definition_at_management_group_output',
]
@pulumi.output_type
class GetPolicyDefinitionAtManagementGroupResult:
"""
The policy definition.
"""
def __init__(__self__, description=None, display_name=None, id=None, metadata=None, mode=None, name=None, parameters=None, policy_rule=None, policy_type=None, type=None):
if description and not isinstance(description, str):
raise TypeError("Expected argument 'description' to be a str")
pulumi.set(__self__, "description", description)
if display_name and not isinstance(display_name, str):
raise TypeError("Expected argument 'display_name' to be a str")
pulumi.set(__self__, "display_name", display_name)
if id and not isinstance(id, str):
raise TypeError("Expected argument 'id' to be a str")
pulumi.set(__self__, "id", id)
if metadata and not isinstance(metadata, dict):
raise TypeError("Expected argument 'metadata' to be a dict")
pulumi.set(__self__, "metadata", metadata)
if mode and not isinstance(mode, str):
raise TypeError("Expected argument 'mode' to be a str")
pulumi.set(__self__, "mode", mode)
if name and not isinstance(name, str):
raise TypeError("Expected argument 'name' to be a str")
pulumi.set(__self__, "name", name)
if parameters and not isinstance(parameters, dict):
raise TypeError("Expected argument 'parameters' to be a dict")
pulumi.set(__self__, "parameters", parameters)
if policy_rule and not isinstance(policy_rule, dict):
raise TypeError("Expected argument 'policy_rule' to be a dict")
pulumi.set(__self__, "policy_rule", policy_rule)
if policy_type and not isinstance(policy_type, str):
raise TypeError("Expected argument 'policy_type' to be a str")
pulumi.set(__self__, "policy_type", policy_type)
if type and not isinstance(type, str):
raise TypeError("Expected argument 'type' to be a str")
pulumi.set(__self__, "type", type)
@property
@pulumi.getter
def description(self) -> Optional[str]:
"""
The policy definition description.
"""
return pulumi.get(self, "description")
@property
@pulumi.getter(name="displayName")
def display_name(self) -> Optional[str]:
"""
The display name of the policy definition.
"""
return pulumi.get(self, "display_name")
@property
@pulumi.getter
def id(self) -> str:
"""
The ID of the policy definition.
"""
return pulumi.get(self, "id")
@property
@pulumi.getter
def metadata(self) -> Optional[Any]:
"""
The policy definition metadata. Metadata is an open ended object and is typically a collection of key value pairs.
"""
return pulumi.get(self, "metadata")
@property
@pulumi.getter
def mode(self) -> Optional[str]:
"""
The policy definition mode. Some examples are All, Indexed, Microsoft.KeyVault.Data.
"""
return pulumi.get(self, "mode")
@property
@pulumi.getter
def name(self) -> str:
"""
The name of the policy definition.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter
def parameters(self) -> Optional[Mapping[str, 'outputs.ParameterDefinitionsValueResponse']]:
"""
The parameter definitions for parameters used in the policy rule. The keys are the parameter names.
"""
return pulumi.get(self, "parameters")
@property
@pulumi.getter(name="policyRule")
def policy_rule(self) -> Optional[Any]:
"""
The policy rule.
"""
return pulumi.get(self, "policy_rule")
@property
@pulumi.getter(name="policyType")
def policy_type(self) -> Optional[str]:
"""
The type of policy definition. Possible values are NotSpecified, BuiltIn, Custom, and Static.
"""
return pulumi.get(self, "policy_type")
@property
@pulumi.getter
def type(self) -> str:
"""
The type of the resource (Microsoft.Authorization/policyDefinitions).
"""
return pulumi.get(self, "type")
class AwaitableGetPolicyDefinitionAtManagementGroupResult(GetPolicyDefinitionAtManagementGroupResult):
# pylint: disable=using-constant-test
def __await__(self):
if False:
yield self
return GetPolicyDefinitionAtManagementGroupResult(
description=self.description,
display_name=self.display_name,
id=self.id,
metadata=self.metadata,
mode=self.mode,
name=self.name,
parameters=self.parameters,
policy_rule=self.policy_rule,
policy_type=self.policy_type,
type=self.type)
def get_policy_definition_at_management_group(management_group_id: Optional[str] = None,
policy_definition_name: Optional[str] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetPolicyDefinitionAtManagementGroupResult:
"""
The policy definition.
:param str management_group_id: The ID of the management group.
:param str policy_definition_name: The name of the policy definition to get.
"""
__args__ = dict()
__args__['managementGroupId'] = management_group_id
__args__['policyDefinitionName'] = policy_definition_name
if opts is None:
opts = pulumi.InvokeOptions()
if opts.version is None:
opts.version = _utilities.get_version()
__ret__ = pulumi.runtime.invoke('azure-native:authorization/v20190901:getPolicyDefinitionAtManagementGroup', __args__, opts=opts, typ=GetPolicyDefinitionAtManagementGroupResult).value
return AwaitableGetPolicyDefinitionAtManagementGroupResult(
description=__ret__.description,
display_name=__ret__.display_name,
id=__ret__.id,
metadata=__ret__.metadata,
mode=__ret__.mode,
name=__ret__.name,
parameters=__ret__.parameters,
policy_rule=__ret__.policy_rule,
policy_type=__ret__.policy_type,
type=__ret__.type)
@_utilities.lift_output_func(get_policy_definition_at_management_group)
def get_policy_definition_at_management_group_output(management_group_id: Optional[pulumi.Input[str]] = None,
policy_definition_name: Optional[pulumi.Input[str]] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> pulumi.Output[GetPolicyDefinitionAtManagementGroupResult]:
"""
The policy definition.
:param str management_group_id: The ID of the management group.
:param str policy_definition_name: The name of the policy definition to get.
"""
...
| [
"noreply@github.com"
] | bpkgoud.noreply@github.com |
d140cf5c10646c09460c5fb577904cce75daf311 | cf3e9398e4a1a8b41aa12e3ef42aa2a73bff2507 | /src/compiler/frontend_test.py | bf75f5b52ccb47538806e8ac975c5e988e14b86b | [
"Apache-2.0",
"MIT"
] | permissive | fritzo/pomagma | fb207e8bfd77c7ac592ddb27d5fd3213da50a532 | ad2bf9c12eb58190f2761608c053ac89d3ddf305 | refs/heads/master | 2023-02-24T16:54:31.981623 | 2023-02-10T23:17:42 | 2023-02-10T23:17:42 | 4,943,857 | 12 | 0 | NOASSERTION | 2023-02-10T23:17:43 | 2012-07-08T05:22:16 | C++ | UTF-8 | Python | false | false | 465 | py | import pomagma.util
from pomagma.compiler import __main__ as main
from pomagma.compiler.util import find_theories
from pomagma.util.testing import for_each
@for_each(find_theories())
def test_compile(filename):
with pomagma.util.in_temp_dir():
main.compile(
filename,
symbols_out='temp.symbols',
facts_out='temp.facts',
programs_out='temp.programs',
optimized_out='temp.optimized.programs')
| [
"fritz.obermeyer@gmail.com"
] | fritz.obermeyer@gmail.com |
44564d43c1934ab6625863de0fc90ab17d11dada | 9afbcb367de9bf055d531d285bc299a9ca3040fe | /next_partial.py | 6290584281b1e87c10e787a4fd4eb350d01744a1 | [] | no_license | mysqlplus163/aboutPython | a41a5bc2efd43b53d4acf96e7477e80c022cf657 | fa7c3e6f123158011d8726b28bfcd0dee02fa853 | refs/heads/master | 2020-03-21T05:06:19.949902 | 2018-03-14T16:04:54 | 2018-03-14T16:04:54 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 653 | py | #! /usr/bin/env python
# -*- coding: utf-8 -*-
# __author__ = "Q1mi"
# Email: master@liwenzhou.com
"""
Python Web编程实战(小绿书)里面学到的知识点
"""
# 1.使用next获取循环中符合条件的值
a1 = -1
for i in range(1, 10):
if not i % 4:
a1 = i
break
print(a1)
a2 = next((i for i in range(1, 10) if not i % 4), -1)
print(a2)
# 2.执行调用直到某种情况结束
"""
blocks = []
while True:
block = f.read(32)
if block == "":
break
blocks.append(block)
"""
"""
from functools import partial
blocks = []
for block in iter(partial(f.read, 32), ""):
blocks.append(block)
"""
| [
"liwenzhou7@gmail.com"
] | liwenzhou7@gmail.com |
bf4ed8e20cfbcef2a691f202e56011fc09f79742 | 444a9480bce2035565332d4d4654244c0b5cd47b | /research/nlp/mass/src/transformer/self_attention.py | 5a21c5aaf31b296c7e83ef88ba3d0b095f8fca39 | [
"Apache-2.0",
"LicenseRef-scancode-unknown-license-reference",
"LicenseRef-scancode-proprietary-license"
] | permissive | mindspore-ai/models | 7ede9c6454e77e995e674628204e1c6e76bd7b27 | eab643f51336dbf7d711f02d27e6516e5affee59 | refs/heads/master | 2023-07-20T01:49:34.614616 | 2023-07-17T11:43:18 | 2023-07-17T11:43:18 | 417,393,380 | 301 | 92 | Apache-2.0 | 2023-05-17T11:22:28 | 2021-10-15T06:38:37 | Python | UTF-8 | Python | false | false | 3,168 | py | # Copyright 2020 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Self-Attention block."""
import mindspore.common.dtype as mstype
from mindspore import nn
from .multi_head_attention import MultiHeadAttention
from .residual_conn import ResidualConnection
from .components import LayerNorm
class SelfAttention(nn.Cell):
"""
Self-Attention.
Layer norm -> Multi-Head Self-Attention -> Add & Dropout.
Args:
attn_embed_dim (int): Dimensions of attention weight, e.g. Q, K, V.
num_attn_heads (int): Attention heads number. Default: 1.
attn_dropout_prob (float): Dropout rate in attention. Default: 0.1.
initializer_range (float): Initial range.
dropout_prob (float): Dropout rate.
has_attention_mask (bool): Whether has attention mask.
compute_type (mstype): Mindspore data type. Default: mstype.float32.
Returns:
Tensor, shape (N, T, D).
"""
def __init__(self,
attn_embed_dim,
num_attn_heads,
attn_dropout_prob=0.1,
initializer_range=0.02,
dropout_prob=0.1,
has_attention_mask=True,
compute_type=mstype.float32):
super(SelfAttention, self).__init__()
self.multi_head_self_attention = MultiHeadAttention(
src_dim=attn_embed_dim,
tgt_dim=attn_embed_dim,
attn_embed_dim=attn_embed_dim,
num_attn_heads=num_attn_heads,
attention_dropout_prob=attn_dropout_prob,
initializer_range=initializer_range,
has_attention_mask=has_attention_mask,
do_return_2d_tensor=False,
compute_type=compute_type)
self.layer_norm = LayerNorm(in_channels=attn_embed_dim)
self.residual = ResidualConnection(dropout_prob=dropout_prob)
def construct(self, queries, keys, values, attention_mask):
"""
Construct self-attention block.
Layer norm -> Multi-Head Self-Attention -> Add & Dropout.
Args:
queries (Tensor): Shape (N, T, D).
keys (Tensor): Shape (N, T', D).
values (Tensor): Shape (N, T', D).
attention_mask (Tensor): Shape (N, T, T').
Returns:
Tensor, shape (N, T, D).
"""
q = self.layer_norm(queries) # (N, T, D)
attention_output = self.multi_head_self_attention(
q, keys, values, attention_mask
) # (N, T, D)
q = self.residual(attention_output, queries)
return q
| [
"chenhaozhe1@huawei.com"
] | chenhaozhe1@huawei.com |
4b92a27efd3b3a79589583a27bfec2e8f52dbb20 | 888e79392cb660be5799cc5bd25d76bcfa9e2e2c | /doctorus/doctorus/doctype/estados_de_evaluacion/estados_de_evaluacion.py | 3ce28a77294e67d9b00cbd7cdc44cba53e700881 | [
"MIT"
] | permissive | Nirchains/doctorus | 269eadee5754612c521d1c6193d5fe7bbfdb3b8a | 38d39270742dfdae6597a06713952df01a2c3e9d | refs/heads/master | 2020-03-17T07:09:30.046005 | 2019-05-08T06:51:50 | 2019-05-08T06:51:50 | 133,386,354 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 272 | py | # -*- coding: utf-8 -*-
# Copyright (c) 2019, HISPALIS DIGITAL and contributors
# For license information, please see license.txt
from __future__ import unicode_literals
import frappe
from frappe.model.document import Document
class EstadosdeEvaluacion(Document):
pass
| [
"nirchains@gmail.com"
] | nirchains@gmail.com |
7443f4fff300825d8d672d4aa3125b64f6155161 | 5254c3a7e94666264120f26c87734ad053c54541 | /Entregas/Entrega Semana N°7/vida.py | e9cdc579900b67a214dad9a18b6c5ae8437809ea | [] | no_license | ccollado7/UNSAM---Python | 425eb29a2df8777e9f892b08cc250bce9b2b0b8c | f2d0e7b3f64efa8d03f9aa4707c90e992683672d | refs/heads/master | 2023-03-21T17:42:27.210599 | 2021-03-09T13:06:45 | 2021-03-09T13:06:45 | 286,613,172 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 954 | py | # -*- coding: utf-8 -*-
"""
Created on Mon Sep 28 14:23:33 2020
@author: Claudio Collado
"""
#Ejercicio 7.1
from datetime import datetime
def segundos_nacimiento(fecha):
''''Funcion que recibe una fecha del tipó dd/mm/AAAA'
(día, mes, año con 2, 2 y 4 dígitos, separados con barras normales)
y te devuelve la cantidad de segundos que viviste'''
fecha_inicio = datetime.strptime(fecha, '%d/%m/%Y') #Fecha de Nacimiento
fecha_actual = datetime.now() #Fecha Actual
diferencia_fechas = fecha_actual - fecha_inicio #Realizo la diferencia de fechas
total_segundos = diferencia_fechas.total_seconds() #Transformo a segundos
return int(total_segundos) #Retorno la cantidad de segundos en formato entero
#Pruebas
fecha_1 = segundos_nacimiento('02/04/1985')
print(fecha_1)
fecha_2 = segundos_nacimiento('01/01/2020')
print(fecha_2)
fecha_3 = segundos_nacimiento('01/01/2010')
print(fecha_3) | [
"46108725+ccollado7@users.noreply.github.com"
] | 46108725+ccollado7@users.noreply.github.com |
f72f1f8975ef40e6fe9352bd48103f7ec16d903e | 12362aa3c315e2b72ed29193ee24e3fd7f1a57db | /LeetCode/0232-Implement Queue using Stacks/main.py | 2261e5e4e7a767227fb8f251fe8dd92dd750aff4 | [] | no_license | PRKKILLER/Algorithm_Practice | f2f4662352516965777605ccf116dd7945c4b94a | 73654b6567fdb282af84a868608929be234075c5 | refs/heads/master | 2023-07-03T23:24:15.081892 | 2021-08-09T03:55:12 | 2021-08-09T03:55:12 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,302 | py | """
Implement the following operations of a queue using stacks.
push(x) -- Push element x to the back of queue.
pop() -- Removes the element from in front of queue.
peek() -- Get the front element.
empty() -- Return whether the queue is empty.
Example:
MyQueue queue = new MyQueue();
queue.push(1);
queue.push(2);
queue.peek(); // returns 1
queue.pop(); // returns 1
queue.empty(); // returns false
"""
class MyQueue:
def __init__(self):
"""
Initialize your data structure here.
"""
self._new = []
self._old = []
def push(self, x: int) -> None:
"""
Push element x to the back of queue.
"""
self._new.append(x)
def pop(self) -> int:
"""
Removes the element from in front of queue and returns that element.
"""
self.shuffle()
return self._old.pop()
def peek(self) -> int:
"""
Get the front element.
"""
self.shuffle()
return self._old[-1]
def empty(self) -> bool:
"""
Returns whether the queue is empty.
"""
return not self._new and not self._old
def shuffle(self):
if self._old:
return
while self._new:
self._old.append(self._new.pop())
| [
"dw6000@163.com"
] | dw6000@163.com |
8bf3648d4e9800f14bf2caa630487f844621f7e0 | ec21d4397a1939ac140c22eca12491c258ed6a92 | /instances/sapl23/Products/ILSAPL/skins/sk_sapl/pysc/.svn/text-base/votacao_restaurar_situacao_inicial_pysc.py.svn-base | 3bea46fc54a0e53dd862c413918976db43459968 | [] | no_license | wpjunior/proled | dc9120eaa6067821c983b67836026602bbb3a211 | 1c81471295a831b0970085c44e66172a63c3a2b0 | refs/heads/master | 2016-08-08T11:59:09.748402 | 2012-04-17T07:37:43 | 2012-04-17T07:37:43 | 3,573,786 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 394 | ## Script (Python) "votacao_restaurar_situacao_inicial_pysc"
##bind container=container
##bind context=context
##bind namespace=
##bind script=script
##bind subpath=traverse_subpath
##parameters=cod_materia
##title=
##
try:
context.zsql.votacao_restaurar_parlamentar_zsql(cod_materia=cod_materia)
context.zsql.votacao_restaurar_zsql(cod_materia=cod_materia)
except:
pass
return 1
| [
"root@cpro5106.publiccloud.com.br"
] | root@cpro5106.publiccloud.com.br | |
80cef7f808746a77dbe20e6a66357fe24b83d06b | 6819a924ee1cff66f508e85e26f826c1f0b08267 | /feeds.py | bc0b5e9d1d4101041835ec6fa1683aea521cf0da | [
"MIT"
] | permissive | joskid/snippify | 94245539a96a0327b8f431c51598673ef951d2ba | b692a941a7a46959df9aff064b7ad056d0125484 | refs/heads/master | 2021-01-18T05:25:51.818175 | 2011-06-16T20:13:05 | 2011-06-16T20:13:17 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,079 | py | """ For now there are """
from django.contrib.syndication.feeds import Feed, FeedDoesNotExist
from django.core.exceptions import ObjectDoesNotExist
from django.contrib.auth.models import User
from taggit.models import Tag
from snippets.models import Snippet
class LatestSnippets(Feed):
""" Get latest global snippets """
title = u"Latest snippets"
link = "/snippets/"
description = "Updates on changes and additions to snippify"
def items(self):
return Snippet.objects.order_by('-created_date')[:10]
class LatestTag(Feed):
"""Get latest snippets for a specific tag"""
def get_object(self, bits):
if len(bits) != 2:
raise ObjectDoesNotExist
tag = Tag.objects.get(name=bits[0])
if tag is None:
raise FeedDoesNotExist
return tag
def title(self, obj):
return u"Latest snippets in %s" % obj.name
def link(self, obj):
if not obj:
raise FeedDoesNotExist
return '/tag/' + str(obj.name) + '/'
def description(self, obj):
return u"Updates on changes and additions to snippify in "
"%s tag" % obj.name
def items(self, obj):
return Snippet.objects.filter(tags__name__in=[obj.name]).\
order_by('-created_date')[:10]
class LatestUser(Feed):
"""Get latest snippets for a specific user"""
def get_object(self, bits):
if len(bits) != 2:
raise ObjectDoesNotExist
user = User.objects.get(username=bits[0])
if user is None:
raise FeedDoesNotExist
return user
def title(self, obj):
return "Latest snippets in %s" % obj.username
def link(self, obj):
if not obj:
raise FeedDoesNotExist
return '/account/' + str(obj.username) + '/'
def description(self, obj):
return "Updates on changes and additions to snippify.me in %s tag" % obj.username
def items(self, obj):
return Snippet.objects.filter(author=obj.id).\
order_by('-created_date')[:10]
| [
"alexandru.plugaru@gmail.com"
] | alexandru.plugaru@gmail.com |
4197891e02e3bd1e8943643b9c545370a44a5c14 | 73e7f93353ff6fa706ec644ac24d87de970b7115 | /src/keyboard_handler/__init__.py | 74f3084f3ed6eb49189a7a608a115df20629d888 | [
"MIT"
] | permissive | wafiqtaher/TheQube | 59c873bf7554088a8d436b58c2f0b6e72e6660d9 | fcfd8a68b15948e0740642d635db24adef8cc314 | refs/heads/master | 2022-04-07T10:52:50.469554 | 2020-01-27T14:07:33 | 2020-01-27T14:07:33 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 181 | py | # -*- coding: utf-8 -*-
from main import KeyboardHandler, KeyboardHandlerError
__all__ = ["KeyboardHandler", "KeyboardHandlerError", "WXKeyboardHandler", "WXPanelKeyboardHandler"]
| [
"andre@oire.org"
] | andre@oire.org |
4f3ccf728dfe75aae7e2984b5f59f776001750f0 | 53fab060fa262e5d5026e0807d93c75fb81e67b9 | /backup/user_372/ch22_2020_03_31_23_32_56_982191.py | ce25e75a074e8b3a8f3aa789366088cc04c78302 | [] | no_license | gabriellaec/desoft-analise-exercicios | b77c6999424c5ce7e44086a12589a0ad43d6adca | 01940ab0897aa6005764fc220b900e4d6161d36b | refs/heads/main | 2023-01-31T17:19:42.050628 | 2020-12-16T05:21:31 | 2020-12-16T05:21:31 | 306,735,108 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 133 | py | cigarros = int(input('Quantos cigarros você fuma por dia? ')
anos = int(input('Há quantos anos você fuma? ')
a=(365*cigarros)*anos | [
"you@example.com"
] | you@example.com |
b02c815984671705a915af7da9c06659b7232120 | 48d232cc6dcf57abf6fca9cbbef8943e189acb04 | /cake-thief.py | 4544306cd668b4d93270e1585e8aca5e3bb9d888 | [] | no_license | csusb-005411285/CodeBreakersCode | dae796ba4262770e0a568e9c27597a041db0775c | 8f218164e1b9e42c1a928d22ef5a76328abb66a2 | refs/heads/master | 2022-01-12T09:11:33.668338 | 2021-12-27T04:45:13 | 2021-12-27T04:45:13 | 232,490,141 | 1 | 1 | null | 2021-01-29T23:09:14 | 2020-01-08T06:02:11 | Python | UTF-8 | Python | false | false | 789 | py | def max_duffel_bag_value(cake_tuples, weight_capacity):
if weight_capacity == 0:
return 0
cache = {}
for i in range(weight_capacity + 1):
cache[i] = 0
for weight in range(weight_capacity + 1): # 4
max_value_at_weight = 0
for cake in cake_tuples: # (2, 1)
max_value_cake = 0
if cake[0] == 0 and cake[1] != 0:
return float('inf')
if cake[0] <= weight: # 2 <= 4
max_value_cake = cake[1] # 1
remaining_weight = weight - cake[0] # 2
max_value_at_weight = max(max_value_at_weight, max_value_cake + cache[remaining_weight]) # 2
cache[weight] = max_value_at_weight # {0: 0, 1: 0, 2: 1, 3: 1, 4: 2}
return cache[weight_capacity]
| [
"noreply@github.com"
] | csusb-005411285.noreply@github.com |
1d90f7404d9cf3abc78760441a556f49047fbccc | b0eddf070767a08ea41a474eb424c51b81b856a9 | /week-5/monday/character.py | e7a02b28a926a8b461f86f3774307c3cced10f91 | [] | no_license | green-fox-academy/florimaros | fdd645c8ed6620e2b5021a2feca056049438a951 | 39741ea40e18441877c61a7fdf20b832cccf247a | refs/heads/master | 2021-05-30T14:35:57.456067 | 2016-02-25T19:53:44 | 2016-02-25T19:53:44 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 421 | py | class Character():
def __init__(self, name, hp, damage):
self.name = name
self.hp = hp
self.damage = damage
def drink_potion(self):
self.hp += 10
def strike(self, opponent):
opponent.hp -= self.damage
def get_status(self):
life_status = "dead"
if self.hp > 0:
life_status = "HP: " + str(self.hp)
return self.name + life_status
| [
"flori.maros.adw@gmail.com"
] | flori.maros.adw@gmail.com |
55366f7b56526ec007a05e15bef779b09f10e3bd | c175c4e3560c6c66ec2b0c4b439cd586878b44a5 | /prplatform/submissions/migrations/0016_answer_uploaded_file.py | aede969729e418c53d85a3ea901af39719f4650a | [
"MIT"
] | permissive | piehei/prplatform | fd30e2e388597583b9ef0e59462ea9643f7244ba | f3248b66019f207bb06a4681a62057e175408b3e | refs/heads/master | 2020-03-09T17:09:47.893706 | 2019-09-18T15:24:58 | 2019-09-18T15:24:58 | 128,902,940 | 3 | 0 | null | null | null | null | UTF-8 | Python | false | false | 487 | py | # Generated by Django 2.1.2 on 2018-11-13 13:12
from django.db import migrations, models
import prplatform.submissions.models
class Migration(migrations.Migration):
dependencies = [
('submissions', '0015_auto_20181109_1251'),
]
operations = [
migrations.AddField(
model_name='answer',
name='uploaded_file',
field=models.FileField(blank=True, upload_to=prplatform.submissions.models.answer_upload_fp),
),
]
| [
"ph@extreg.com"
] | ph@extreg.com |
966ea3d3546ffdf38dbbf81b30f804d781107f46 | 16076240644897ad0529a4cb7543e19dd5fc539a | /etl.py | ebf228080aabfd1519962b4734fac2b7a61292a0 | [] | no_license | MZ195/DEND-Data-Warehouse_AWS | a2eef32a6d372cd556868549c9422316dfeb2be0 | 47f96f91444a15e81386a9a4698dd5263fdb9f6e | refs/heads/master | 2022-10-14T18:12:34.309750 | 2020-06-10T09:33:30 | 2020-06-10T09:33:30 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,415 | py | import configparser
import psycopg2
from logging import getLogger
from sql_queries import copy_table_queries, insert_table_queries
log = getLogger(__name__)
def load_staging_tables(cur, conn):
"""Loading the data from S3 buckets into the staging tables of Redshift
Keyword arguments:
cur -- the curser of the database
conn -- the connection to the database
"""
log.info("Loading staging tables...")
print("Loading staging tables...\n")
for query in copy_table_queries:
cur.execute(query)
conn.commit()
def insert_tables(cur, conn):
"""inserting the data into the facts and dimensional tables
Keyword arguments:
cur -- the curser of the database
conn -- the connection to the database
"""
log.info("inserting into dimensional and facts tables...")
print("inserting into dimensional and facts tables...\n")
for query in insert_table_queries:
cur.execute(query)
conn.commit()
def main():
config = configparser.ConfigParser()
config.read('dwh.cfg')
conn = psycopg2.connect("host={} dbname={} user={} password={} port={}".format(*config['CLUSTER'].values()))
cur = conn.cursor()
log.info("Connection established")
print("Connection established\n")
load_staging_tables(cur, conn)
insert_tables(cur, conn)
conn.close()
if __name__ == "__main__":
main() | [
"40984264+MZ195@users.noreply.github.com"
] | 40984264+MZ195@users.noreply.github.com |
be99e2028248897425d63985f3a164926c163f06 | d41d18d3ea6edd2ec478b500386375a8693f1392 | /plotly/validators/scatterpolargl/__init__.py | ca20fdc223e891433052b3060c7dc3fe90e4a0e9 | [
"MIT"
] | permissive | miladrux/plotly.py | 38921dd6618650d03be9891d6078e771ffccc99a | dbb79e43e2cc6c5762251537d24bad1dab930fff | refs/heads/master | 2020-03-27T01:46:57.497871 | 2018-08-20T22:37:38 | 2018-08-20T22:37:38 | 145,742,203 | 1 | 0 | MIT | 2018-08-22T17:37:07 | 2018-08-22T17:37:07 | null | UTF-8 | Python | false | false | 1,266 | py | from ._visible import VisibleValidator
from ._unselected import UnselectedValidator
from ._uid import UidValidator
from ._thetaunit import ThetaunitValidator
from ._thetasrc import ThetasrcValidator
from ._theta import ThetaValidator
from ._textsrc import TextsrcValidator
from ._text import TextValidator
from ._subplot import SubplotValidator
from ._stream import StreamValidator
from ._showlegend import ShowlegendValidator
from ._selectedpoints import SelectedpointsValidator
from ._selected import SelectedValidator
from ._rsrc import RsrcValidator
from ._r import RValidator
from ._opacity import OpacityValidator
from ._name import NameValidator
from ._mode import ModeValidator
from ._marker import MarkerValidator
from ._line import LineValidator
from ._legendgroup import LegendgroupValidator
from ._idssrc import IdssrcValidator
from ._ids import IdsValidator
from ._hoveron import HoveronValidator
from ._hoverlabel import HoverlabelValidator
from ._hoverinfosrc import HoverinfosrcValidator
from ._hoverinfo import HoverinfoValidator
from ._fillcolor import FillcolorValidator
from ._fill import FillValidator
from ._customdatasrc import CustomdatasrcValidator
from ._customdata import CustomdataValidator
from ._connectgaps import ConnectgapsValidator
| [
"adam.kulidjian@gmail.com"
] | adam.kulidjian@gmail.com |
e08ed72e686789ad0ea484cf266e08a34d771429 | b33f1afe9f30c99f83ce2fe7ec1556b6dad8e0a6 | /03_roc.py | b090cd08264f082a535380984a2f8dfe4ce96087 | [] | no_license | Digital-Biobank/covid_variant_severity | e93845045adfc580c2cebbe6ecc5ee03aa02e9ba | cc0449a2429140352a1d6b97083321ae2002581f | refs/heads/master | 2023-04-01T16:09:00.206137 | 2021-04-26T19:09:12 | 2021-04-26T19:09:12 | 361,855,017 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,260 | py | import joblib
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
from sklearn.metrics import plot_roc_curve
from sklearn.model_selection import StratifiedKFold
from sklearn.metrics import auc, roc_curve, roc_auc_score
# %% Plot ROC curves
df = pd.read_parquet("03_77142-vcf_2-component-pca_3-cluster-kmeans_outcomes_dropna.pickle")
df_random = pd.read_parquet("03_77142-vcf_2-component-pca_3-cluster-kmeans_outcomes_dropna_random.pickle")
lr_master = joblib.load("03_77142-vcf_2-component-pca_3-cluster-kmeans_logistic-regression-model.pickle")
lr_random = joblib.load("03_77142-vcf_2-component-pca_3-cluster-kmeans_logistic-regression-model_random.pickle")
logreg = joblib.load("models/02_77142-vcf_sklearn-logistic-regression-model.pickle")
plot_roc_curve(logreg, X=X, y=y)
plt.show()
cv = StratifiedKFold(n_splits=5)
clfs = lr_master, lr_random
dfs = df, df_random
labs = ["Logistic Regression", "Logistic Regression (random)"]
for clf, df, lab in zip(clfs, dfs, labs):
X = df.drop("is_red", axis=1)
y = df["is_red"]
pred = clf.predict_proba(X)[::, 1]
fpr, tpr, _ = roc_curve(y, pred)
auc = roc_auc_score(y, pred)
plt.plot(fpr, tpr, label=f"{lab}, AUC={auc:.3f}")
plt.legend(loc=4)
plt.plot([0, 1], [0, 1], linestyle='--', lw=2, color='r', label='Chance', alpha=.8)
plt.savefig(
"02_77142-vcf_2-component-pca-transformed_"
"mortality_3-cluster-kmeans_"
"logistic-regression_roc-curve.png"
)
plt.show()
knn_master = joblib.load("03_77142-vcf_2-component-pca_3-cluster-kmeans_knn.pickle")
knn_random = joblib.load("03_77142-vcf_2-component-pca_3-cluster-kmeans_knn_random.pickle")
clfs = [knn_master, knn_random]
labs = ["K nearest neighbors", "K nearest neighbors (random)"]
for clf, df, lab in zip(clfs, dfs, labs):
X = df.drop("is_red", axis=1)
y = df["is_red"]
pred = clf.predict_proba(X)[::, 1]
fpr, tpr, _ = roc_curve(y, pred)
auc = roc_auc_score(y, pred)
plt.plot(fpr, tpr, label=f"{lab}, AUC={auc:.3f}")
plt.legend(loc=4)
plt.plot([0, 1], [0, 1], linestyle='--', lw=2, color='r', label='Chance', alpha=.8)
plt.xlabel("False positive rate")
plt.ylabel("True positive rate")
plt.savefig(
"02_77142-vcf_2-component-pca-transformed_"
"mortality_3-cluster-kmeans_"
"knn_roc-curve.png"
)
plt.show()
dt_master = joblib.load("03_77142-vcf_2-component-pca_3-cluster-kmeans_dt.pickle")
dt_random = joblib.load("03_77142-vcf_2-component-pca_3-cluster-kmeans_dt_random.pickle")
clfs = [dt_master, dt_random]
labs = ["Decision Tree", "Decision Tree (random)"]
for clf, df, lab in zip(clfs, dfs, labs):
X = df.drop("is_red", axis=1)
y = df["is_red"]
pred = clf.predict_proba(X)[::, 1]
fpr, tpr, _ = roc_curve(y, pred)
auc = roc_auc_score(y, pred)
plt.plot(fpr, tpr, label=f"{lab}, AUC={auc:.3f}")
plt.legend(loc=4)
plt.plot([0, 1], [0, 1], linestyle='--', lw=2, color='r', label='chance', alpha=.8)
plt.xlabel("False positive rate")
plt.ylabel("True positive rate")
plt.savefig(
"02_77142-vcf_2-component-pca-transformed_"
"mortality_3-cluster-kmeans_"
"decision-tree_roc-curve.png"
)
plt.show()
rf_master = joblib.load("03_77142-vcf_2-component-pca_3-cluster-kmeans_rf.pickle")
rf_random = joblib.load("03_77142-vcf_2-component-pca_3-cluster-kmeans_rf_random.pickle")
clfs = [rf_master, rf_random]
labs = ["Random Forest", "Random Forest (random)"]
for clf, df, lab in zip(clfs, dfs, labs):
X = df.drop("is_red", axis=1)
y = df["is_red"]
pred = clf.predict_proba(X)[::, 1]
fpr, tpr, _ = roc_curve(y, pred)
auc = roc_auc_score(y, pred)
plt.plot(fpr, tpr, label=f"{lab}, AUC={auc:.3f}")
plt.legend(loc=4)
plt.plot([0, 1], [0, 1], linestyle='--', lw=2, color='r', label='chance', alpha=.8)
plt.xlabel("False positive rate")
plt.ylabel("True positive rate")
plt.savefig(
"02_77142-vcf_2-component-pca-transformed_"
"mortality_3-cluster-kmeans_"
"random-forest_roc-curve.png"
)
plt.show()
# classifier = dt_master
#
# tprs = []
# aucs = []
# mean_fpr = np.linspace(0, 1, 100)
#
# fig, ax = plt.subplots()
# for i, (train, test) in enumerate(cv.split(X, y)):
# classifier.fit(X[train], y[train])
# viz = plot_roc_curve(classifier, X[test], y[test],
# name='ROC fold {}'.format(i),
# alpha=0.3, lw=1, ax=ax)
# interp_tpr = np.interp(mean_fpr, viz.fpr, viz.tpr)
# interp_tpr[0] = 0.0
# tprs.append(interp_tpr)
# aucs.append(viz.roc_auc)
#
# ax.plot([0, 1], [0, 1], linestyle='--', lw=2, color='r',
# label='Chance', alpha=.8)
#
# mean_tpr = np.mean(tprs, axis=0)
# mean_tpr[-1] = 1.0
# mean_auc = auc(mean_fpr, mean_tpr)
# std_auc = np.std(aucs)
# ax.plot(mean_fpr, mean_tpr, color='b',
# label=r'Mean ROC (AUC = %0.2f $\pm$ %0.2f)' % (mean_auc, std_auc),
# lw=2, alpha=.8)
#
# std_tpr = np.std(tprs, axis=0)
# tprs_upper = np.minimum(mean_tpr + std_tpr, 1)
# tprs_lower = np.maximum(mean_tpr - std_tpr, 0)
# ax.fill_between(mean_fpr, tprs_lower, tprs_upper, color='grey', alpha=.2,
# label=r'$\pm$ 1 std. dev.')
#
# ax.set(xlim=[-0.05, 1.05], ylim=[-0.05, 1.05],
# title="Receiver operating characteristic example")
# ax.legend(loc="lower right")
# plt.show() | [
"marskar@gmail.com"
] | marskar@gmail.com |
809baad515b0e0b836349fe344facf3fa45083de | debea7714c997912089fde6a0971989f363c72e8 | /lista4/f.py | 1054d6968108dbc32d6d25460721e442edf9ad97 | [] | no_license | ezequiasOR/aa-iniciante | 42bc0f9f4df5bd9a68fcc1ba2d6558bcffff6c90 | 7a4e3882f74eb3941b3658e82abbbd9a3ecd3776 | refs/heads/master | 2023-01-29T04:28:55.678450 | 2020-12-08T23:57:17 | 2020-12-08T23:57:17 | 293,347,924 | 1 | 0 | null | 2020-10-30T15:08:53 | 2020-09-06T19:28:48 | Python | UTF-8 | Python | false | false | 194 | py | resp = [0]*100001
n = int(raw_input())
count = 1
for i in range(2, n+1):
if resp[i] == 0:
for j in range(i, n+1, i):
resp[j] = count
count += 1
for i in range(2, n+1):
print resp[i],
| [
"ezequias.rocha@ccc.ufcg.edu.br"
] | ezequias.rocha@ccc.ufcg.edu.br |
bc1543ebdb3cc210318ada2fd80370218e3ac405 | f4cc5a888d0dd1a5975e0467e21da0bedf48c921 | /runtime/test_hq.py | 2e0d0ba8ede7cc804c35e035673420e2ea6254bf | [] | no_license | wenjiandu/QUANTAXISRUNTIME | 88dbca3e65ed2510e001392997b97577f8e1214c | 2cf28abe1f56d4219f1f89980d7b64460e65856c | refs/heads/master | 2020-04-10T04:01:04.097325 | 2017-11-21T04:45:24 | 2017-11-21T04:45:24 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 171 | py | from hqservice.fetcher import quotation
import QUANTAXIS as QA
stock_list=QA.QA_fetch_stock_block_adv().code
for i in range(100):
print(len(quotation(stock_list))) | [
"yutiansut@qq.com"
] | yutiansut@qq.com |
2df3e21491d807fa232e644142ee27c142df344e | f32421e59d1b42ff42ef56a529e365dd094160d9 | /configs/gcnet/nl_stage/mask_rcnn_nl_eg_c3_r50_fpn_1x.py | 86ab75cf3b4ce555de68d70c4531b16eeb29fbb8 | [
"Apache-2.0"
] | permissive | li-haoran/DNL-Object-Detection | 634d867c2c8126c333884de678c3d9c16a78a1ba | 6ae88842d6237a465559c420c610444bcb2d9405 | refs/heads/master | 2023-01-28T05:35:24.033494 | 2020-12-07T02:48:57 | 2020-12-07T02:48:57 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,880 | py | # model settings
model = dict(
type='MaskRCNN',
pretrained='torchvision://resnet50',
backbone=dict(
type='ResNet',
depth=50,
num_stages=4,
out_indices=(0, 1, 2, 3),
frozen_stages=1,
style='pytorch',
nlb=dict(mode='embedded_gaussian',
reduction=4),
stage_with_nlb=[[], [-2], [], []],
),
neck=dict(
type='FPN',
in_channels=[256, 512, 1024, 2048],
out_channels=256,
num_outs=5),
rpn_head=dict(
type='RPNHead',
in_channels=256,
feat_channels=256,
anchor_scales=[8],
anchor_ratios=[0.5, 1.0, 2.0],
anchor_strides=[4, 8, 16, 32, 64],
target_means=[.0, .0, .0, .0],
target_stds=[1.0, 1.0, 1.0, 1.0],
loss_cls=dict(
type='CrossEntropyLoss', use_sigmoid=True, loss_weight=1.0),
loss_bbox=dict(type='SmoothL1Loss', beta=1.0 / 9.0, loss_weight=1.0)),
bbox_roi_extractor=dict(
type='SingleRoIExtractor',
roi_layer=dict(type='RoIAlign', out_size=7, sample_num=2),
out_channels=256,
featmap_strides=[4, 8, 16, 32]),
bbox_head=dict(
type='SharedFCBBoxHead',
num_fcs=2,
in_channels=256,
fc_out_channels=1024,
roi_feat_size=7,
num_classes=81,
target_means=[0., 0., 0., 0.],
target_stds=[0.1, 0.1, 0.2, 0.2],
reg_class_agnostic=False,
loss_cls=dict(
type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0),
loss_bbox=dict(type='SmoothL1Loss', beta=1.0, loss_weight=1.0)),
mask_roi_extractor=dict(
type='SingleRoIExtractor',
roi_layer=dict(type='RoIAlign', out_size=14, sample_num=2),
out_channels=256,
featmap_strides=[4, 8, 16, 32]),
mask_head=dict(
type='FCNMaskHead',
num_convs=4,
in_channels=256,
conv_out_channels=256,
num_classes=81,
loss_mask=dict(
type='CrossEntropyLoss', use_mask=True, loss_weight=1.0)))
# model training and testing settings
train_cfg = dict(
rpn=dict(
assigner=dict(
type='MaxIoUAssigner',
pos_iou_thr=0.7,
neg_iou_thr=0.3,
min_pos_iou=0.3,
ignore_iof_thr=-1),
sampler=dict(
type='RandomSampler',
num=256,
pos_fraction=0.5,
neg_pos_ub=-1,
add_gt_as_proposals=False),
allowed_border=0,
pos_weight=-1,
debug=False),
rpn_proposal=dict(
nms_across_levels=False,
nms_pre=2000,
nms_post=2000,
max_num=2000,
nms_thr=0.7,
min_bbox_size=0),
rcnn=dict(
assigner=dict(
type='MaxIoUAssigner',
pos_iou_thr=0.5,
neg_iou_thr=0.5,
min_pos_iou=0.5,
ignore_iof_thr=-1),
sampler=dict(
type='RandomSampler',
num=512,
pos_fraction=0.25,
neg_pos_ub=-1,
add_gt_as_proposals=True),
mask_size=28,
pos_weight=-1,
debug=False))
test_cfg = dict(
rpn=dict(
nms_across_levels=False,
nms_pre=1000,
nms_post=1000,
max_num=1000,
nms_thr=0.7,
min_bbox_size=0),
rcnn=dict(
score_thr=0.05,
nms=dict(type='nms', iou_thr=0.5),
max_per_img=100,
mask_thr_binary=0.5))
# dataset settings
dataset_type = 'CocoDataset'
data_root = 'data/coco/'
img_norm_cfg = dict(
mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True)
train_pipeline = [
dict(type='LoadImageFromFile'),
dict(type='LoadAnnotations', with_bbox=True, with_mask=True),
dict(type='Resize', img_scale=(1333, 800), keep_ratio=True),
dict(type='RandomFlip', flip_ratio=0.5),
dict(type='Normalize', **img_norm_cfg),
dict(type='Pad', size_divisor=32),
dict(type='DefaultFormatBundle'),
dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels', 'gt_masks']),
]
test_pipeline = [
dict(type='LoadImageFromFile'),
dict(
type='MultiScaleFlipAug',
img_scale=(1333, 800),
flip=False,
transforms=[
dict(type='Resize', keep_ratio=True),
dict(type='RandomFlip'),
dict(type='Normalize', **img_norm_cfg),
dict(type='Pad', size_divisor=32),
dict(type='ImageToTensor', keys=['img']),
dict(type='Collect', keys=['img']),
])
]
data = dict(
imgs_per_gpu=2,
workers_per_gpu=2,
train=dict(
type=dataset_type,
ann_file=data_root + 'annotations/instances_train2017.json',
img_prefix=data_root + 'train2017/',
pipeline=train_pipeline),
val=dict(
type=dataset_type,
ann_file=data_root + 'annotations/instances_val2017.json',
img_prefix=data_root + 'val2017/',
pipeline=test_pipeline),
test=dict(
type=dataset_type,
ann_file=data_root + 'annotations/instances_val2017.json',
img_prefix=data_root + 'val2017/',
pipeline=test_pipeline))
# optimizer
optimizer = dict(type='SGD', lr=0.02, momentum=0.9, weight_decay=0.0001)
optimizer_config = dict(grad_clip=dict(max_norm=35, norm_type=2))
# learning policy
lr_config = dict(
policy='step',
warmup='linear',
warmup_iters=500,
warmup_ratio=1.0 / 3,
step=[8, 11])
checkpoint_config = dict(interval=1)
# yapf:disable
log_config = dict(
interval=50,
hooks=[
dict(type='TextLoggerHook'),
# dict(type='TensorboardLoggerHook')
])
# yapf:enable
# runtime settings
total_epochs = 12
dist_params = dict(backend='nccl')
log_level = 'INFO'
work_dir = './work_dirs/mask_rcnn_nl_eg_c3_r50_fpn_1x'
load_from = None
resume_from = None
workflow = [('train', 1)]
| [
"yaozhuliang13@gmail.com"
] | yaozhuliang13@gmail.com |
6c08fe9ceaf7eedae3d6d1ff8b2e4e8906ba1ac7 | e8708b79f22859c2623ea59d4e32193270d9c760 | /Caminata/frames.py | aae8b1cb69f226b651364da4fb6d3678b69e47bb | [] | no_license | vinsmokemau/PDI | 23c927ae36e37a6296ef6f1eb5576e9a800b8e20 | cd08cd02fbd81fee82c85673257912fc87e457d7 | refs/heads/master | 2023-07-19T08:37:38.036967 | 2020-10-21T01:33:46 | 2020-10-21T01:33:46 | 176,957,931 | 0 | 0 | null | 2023-07-06T21:34:03 | 2019-03-21T14:09:17 | Python | UTF-8 | Python | false | false | 2,529 | py | """Extracting and Saving Video Frames using OpenCV-PythonPython."""
import cv2
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.image as mpimg
def rgb2gray(image):
"""Transform a color image to a grayscale image."""
return np.dot(image[..., :3], [0.299, 0.587, 0.114])
# Opens the Video file
cap = cv2.VideoCapture('caminata_lenta.mp4')
i = 0
frames = []
while(cap.isOpened()):
ret, frame = cap.read()
if ret is False:
break
if len(str(i)) > 1:
cv2.imwrite('frame' + str(i) + '.jpg', frame)
frames.append(rgb2gray(mpimg.imread('frame' + str(i) + '.jpg')))
else:
cv2.imwrite('frame0' + str(i) + '.jpg', frame)
frames.append(rgb2gray(mpimg.imread('frame0' + str(i) + '.jpg')))
i += 1
cap.release()
cv2.destroyAllWindows()
blue_spot = rgb2gray(mpimg.imread('spot_blue.jpg'))
green_spot = rgb2gray(mpimg.imread('spot_green.jpg'))
red_spot = rgb2gray(mpimg.imread('spot_red.jpg'))
blue_spot_min = np.amin(blue_spot)
green_spot_min = np.amin(green_spot)
red_spot_min = np.amin(red_spot)
blue_spot_max = np.amax(blue_spot)
green_spot_max = np.amax(green_spot)
red_spot_max = np.amax(red_spot)
red_images = []
green_images = []
blue_images = []
for gray_frame in frames:
red_image = np.zeros(gray_frame.shape)
green_image = np.zeros(gray_frame.shape)
blue_image = np.zeros(gray_frame.shape)
y = gray_frame.shape[0]
x = gray_frame.shape[1]
for j in range(1, y - 1):
for i in range(1, x - 1):
if blue_spot_min < gray_frame[j, i] < blue_spot_max:
blue_image[j, i] = 255
if green_spot_min < gray_frame[j, i] < green_spot_max:
green_image[j, i] = 255
if red_spot_min < gray_frame[j, i] < red_spot_max:
red_image[j, i] = 255
red_images.append(red_image)
green_images.append(green_image)
blue_images.append(blue_image)
fig = plt.gcf()
fig.show()
for red_gray_image, green_gray_image, blue_gray_image in red_images, green_images, blue_images:
plt.subplot(1, 3, 1)
plt.imshow(red_gray_image, cmap=plt.get_cmap('gray'))
plt.axis('off')
plt.subplot(1, 3, 2)
plt.imshow(green_gray_image, cmap=plt.get_cmap('gray'))
plt.axis('off')
plt.subplot(1, 3, 3)
plt.imshow(blue_gray_image, cmap=plt.get_cmap('gray'))
plt.axis('off')
fig.canvas.draw()
"""
fig = plt.gcf()
fig.show()
for gray_frame in frames:
plt.imshow(gray_frame, cmap=plt.get_cmap('gray'))
fig.canvas.draw()
"""
| [
"maumg1196@gmail.com"
] | maumg1196@gmail.com |
b43b265ab5b8ec038ca93e7960531a9ce7c8c0a4 | 2aace9bb170363e181eb7520e93def25f38dbe5c | /build/idea-sandbox/system/python_stubs/-57053121/cv2/cv2/face_FacemarkKazemi.py | 73b898f65569c4285d3408241b623d11da0c7620 | [] | no_license | qkpqkp/PlagCheck | 13cb66fd2b2caa2451690bb72a2634bdaa07f1e6 | d229904674a5a6e46738179c7494488ca930045e | refs/heads/master | 2023-05-28T15:06:08.723143 | 2021-06-09T05:36:34 | 2021-06-09T05:36:34 | 375,235,940 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,957 | py | # encoding: utf-8
# module cv2.cv2
# from C:\Users\Doly\Anaconda3\lib\site-packages\cv2\cv2.cp37-win_amd64.pyd
# by generator 1.147
""" Python wrapper for OpenCV. """
# imports
import cv2.cv2 as # C:\Users\Doly\Anaconda3\lib\site-packages\cv2\cv2.cp37-win_amd64.pyd
import cv2.Error as Error # <module 'cv2.Error'>
import cv2.aruco as aruco # <module 'cv2.aruco'>
import cv2.bgsegm as bgsegm # <module 'cv2.bgsegm'>
import cv2.bioinspired as bioinspired # <module 'cv2.bioinspired'>
import cv2.cuda as cuda # <module 'cv2.cuda'>
import cv2.datasets as datasets # <module 'cv2.datasets'>
import cv2.detail as detail # <module 'cv2.detail'>
import cv2.dnn as dnn # <module 'cv2.dnn'>
import cv2.face as face # <module 'cv2.face'>
import cv2.fisheye as fisheye # <module 'cv2.fisheye'>
import cv2.flann as flann # <module 'cv2.flann'>
import cv2.ft as ft # <module 'cv2.ft'>
import cv2.hfs as hfs # <module 'cv2.hfs'>
import cv2.img_hash as img_hash # <module 'cv2.img_hash'>
import cv2.instr as instr # <module 'cv2.instr'>
import cv2.ipp as ipp # <module 'cv2.ipp'>
import cv2.kinfu as kinfu # <module 'cv2.kinfu'>
import cv2.line_descriptor as line_descriptor # <module 'cv2.line_descriptor'>
import cv2.linemod as linemod # <module 'cv2.linemod'>
import cv2.ml as ml # <module 'cv2.ml'>
import cv2.motempl as motempl # <module 'cv2.motempl'>
import cv2.multicalib as multicalib # <module 'cv2.multicalib'>
import cv2.ocl as ocl # <module 'cv2.ocl'>
import cv2.ogl as ogl # <module 'cv2.ogl'>
import cv2.omnidir as omnidir # <module 'cv2.omnidir'>
import cv2.optflow as optflow # <module 'cv2.optflow'>
import cv2.plot as plot # <module 'cv2.plot'>
import cv2.ppf_match_3d as ppf_match_3d # <module 'cv2.ppf_match_3d'>
import cv2.quality as quality # <module 'cv2.quality'>
import cv2.reg as reg # <module 'cv2.reg'>
import cv2.rgbd as rgbd # <module 'cv2.rgbd'>
import cv2.saliency as saliency # <module 'cv2.saliency'>
import cv2.samples as samples # <module 'cv2.samples'>
import cv2.structured_light as structured_light # <module 'cv2.structured_light'>
import cv2.text as text # <module 'cv2.text'>
import cv2.utils as utils # <module 'cv2.utils'>
import cv2.videoio_registry as videoio_registry # <module 'cv2.videoio_registry'>
import cv2.videostab as videostab # <module 'cv2.videostab'>
import cv2.xfeatures2d as xfeatures2d # <module 'cv2.xfeatures2d'>
import cv2.ximgproc as ximgproc # <module 'cv2.ximgproc'>
import cv2.xphoto as xphoto # <module 'cv2.xphoto'>
import cv2 as __cv2
class face_FacemarkKazemi(__cv2.face_Facemark):
# no doc
def __init__(self, *args, **kwargs): # real signature unknown
pass
@staticmethod # known case of __new__
def __new__(*args, **kwargs): # real signature unknown
""" Create and return a new object. See help(type) for accurate signature. """
pass
def __repr__(self, *args, **kwargs): # real signature unknown
""" Return repr(self). """
pass
| [
"qinkunpeng2015@163.com"
] | qinkunpeng2015@163.com |
ac4251061dbf04a4b211f6dc6b24aac16bd3c392 | 9743d5fd24822f79c156ad112229e25adb9ed6f6 | /xai/brain/wordbase/adjectives/_misbegotten.py | 0a2f1ba98baecb49da1a281a971696f07f881ffd | [
"MIT"
] | permissive | cash2one/xai | de7adad1758f50dd6786bf0111e71a903f039b64 | e76f12c9f4dcf3ac1c7c08b0cc8844c0b0a104b6 | refs/heads/master | 2021-01-19T12:33:54.964379 | 2017-01-28T02:00:50 | 2017-01-28T02:00:50 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 463 | py |
#calss header
class _MISBEGOTTEN():
def __init__(self,):
self.name = "MISBEGOTTEN"
self.definitions = [u'badly or stupidly planned or designed: ', u'not deserving to be respected or thought valuable: ']
self.parents = []
self.childen = []
self.properties = []
self.jsondata = {}
self.specie = 'adjectives'
def run(self, obj1, obj2):
self.jsondata[obj2] = {}
self.jsondata[obj2]['properties'] = self.name.lower()
return self.jsondata
| [
"xingwang1991@gmail.com"
] | xingwang1991@gmail.com |
17ada15b2b7a85d902eab53dcc44a0434d73b4ab | c7169415ae8abedd29ab83cddbcccb6768663062 | /image_tagging/dataset_a/twitter/predict_twitter_background.py | cc690218544bfe7ee85106a43155e431a16a7061 | [] | no_license | chrisWWU/cross_platform_feature_analysis | 26c33dd2adc00b7d8fbc24bfef45d6757b81ae1a | 572732554c73bdcb22f31bce5718fdf8beb77bd8 | refs/heads/master | 2022-12-29T11:11:18.910805 | 2020-10-16T11:38:04 | 2020-10-16T11:38:04 | 296,666,366 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,277 | py | from imageai.Prediction import ImagePrediction
import os
import pandas as pd
from PIL import Image
def clear_valid_path(path):
r = path.split('/')[7]
return r.replace('.jpg', '')
def get_image_tags(path_from, path_to, csv):
"""reads images and returns csv containing predictions, percentages and userid"""
# set up model
multiple_prediction = ImagePrediction()
multiple_prediction.setModelTypeAsResNet()
multiple_prediction.setModelPath(os.path.join(path_model))
multiple_prediction.loadModel()
# create list of profile pic filenames
pics = os.listdir(path_from)
all_images_array = []
# only use actual photos
for each_file in pics:
if each_file.endswith(".jpg") or each_file.endswith(".png"):
all_images_array.append(each_file)
# create path for each pic
path_pics = [f'{path_from + pic}' for pic in all_images_array]
valid_paths = []
# check for each image if its broken
for path in path_pics:
try:
im = Image.open(path)
valid_paths.append(path)
except IOError:
print(f'{path}: image is broken')
# create list of valid ids from valid paths list
valid_ids = [clear_valid_path(x) for x in valid_paths]
# predict valid paths
res = multiple_prediction.predictMultipleImages(valid_paths, result_count_per_image=5)
df = pd.DataFrame(columns=['prediction', 'percentage', 'twitterusername'])
c = 0
# append each prediction to df
for dict in res:
interdf = pd.DataFrame(
{'prediction': dict['predictions'],
'percentage': dict['percentage_probabilities'],
'twitterusername': valid_ids[c]}
)
df = df.append(interdf)
c += 1
df = df.reset_index(drop=True)
if csv:
df.to_csv(path_to)
if __name__ == '__main__':
dataset = 'dataset_a'
tw_standard_pic = f'../../../../data/{dataset}/twitter/twitter_profilepics/shardproducton.jpg'
path_from = f'../../../../data/{dataset}/twitter/twitter_profilepics/'
path_to = 'twitter_background_prediction/'
path_model = '../../image_pred_models/resnet50_weights_tf_dim_ordering_tf_kernels.h5'
csv = False
get_image_tags(path_from, path_to, csv) | [
"christian28bewerbung@gmail.com"
] | christian28bewerbung@gmail.com |
f2b444d8bf9b6798f4553ad51384063997abeeb3 | ee8c4c954b7c1711899b6d2527bdb12b5c79c9be | /assessment2/amazon/run/core/controllers/rot.py | bde30a20fd61ad84406edc6c861f513cad32bc25 | [] | no_license | sqlconsult/byte | 02ac9899aebea4475614969b594bfe2992ffe29a | 548f6cb5038e927b54adca29caf02c981fdcecfc | refs/heads/master | 2021-01-25T14:45:42.120220 | 2018-08-11T23:45:31 | 2018-08-11T23:45:31 | 117,135,069 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 360 | py | #!/usr/bin/env python3
from flask import Blueprint, Flask, render_template, request, url_for
controller = Blueprint('rot', __name__, url_prefix='/rot')
# @controller.route('/<string:title>', methods=['GET'])
# def lookup(title):
# if title == 'Republic': # TODO 2
# return render_template('republic.html') # TODO 2
# else:
# pass
| [
"sqlconsult@hotmail.com"
] | sqlconsult@hotmail.com |
e4a8db30865c1f641e1659dc8663a83b73f24ba0 | e2468c60810764971f2dae2b959650b553042810 | /32_longParentheses.py | 424c2c33dda1d69ea95600848da000f3d81f0741 | [] | no_license | awesome-liuxiao/leetcodesolution | 9a01b6f36266149ae7fe00625785d1ada41f190a | 3637cd1347b5153daeeb855ebc44cfea5649fc90 | refs/heads/master | 2023-06-08T13:42:14.653688 | 2023-06-01T08:39:35 | 2023-06-01T08:39:35 | 213,380,224 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 825 | py | class Solution:
def longestValidParentheses(self, s: str) -> int:
res = 0
sLen = len(s)
stack = []
start = 0
for i in range(sLen):
if s[i] == '(':
stack.append(i)
else:
if stack == []:
start = i+1
else:
stack.pop()
if stack == []:
res = max(res, i-start+1)
else:
res = max(res, i - stack[len(stack)-1])
# print(res)
return res
x = Solution()
data1 = "(()" # 2
# data2 = ")()())" # 4
data3 = "()(()" # 2
data4 = "(((())))" # 8
# x.longestValidParentheses(data1)
# x.longestValidParentheses(data2)
x.longestValidParentheses(data3)
# x.longestValidParentheses(data4)
| [
"lio4072@hotmail.com"
] | lio4072@hotmail.com |
beb00c8795fb8fcbdafe263d42cb4a3b1821cc54 | 6bf4e54f8ae95582b73bb969ba44069c64e87651 | /kdhi/main_site/migrations/0022_article_update_date.py | 52c02679337d35350bcf54c1168d52433162fa30 | [] | no_license | speedycowenator/kdhi_migration | 4bc983c4656a2a87cb056461bfb4219e38da1a85 | 422b2e3f142a30c81f428fb8eaa813e4a71d56fc | refs/heads/master | 2022-11-14T13:27:51.520697 | 2020-07-02T19:31:12 | 2020-07-02T19:31:12 | 246,138,874 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 392 | py | # Generated by Django 2.2.5 on 2020-03-23 22:22
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('main_site', '0021_auto_20200323_1821'),
]
operations = [
migrations.AddField(
model_name='article',
name='update_date',
field=models.DateField(auto_now=True),
),
]
| [
"54556114+speedycowenator@users.noreply.github.com"
] | 54556114+speedycowenator@users.noreply.github.com |
9d01f2e31bb38d3ccda4e090566d0ce168341b29 | 9b422078f4ae22fe16610f2ebc54b8c7d905ccad | /xlsxwriter/test/comparison/test_optimize06.py | 70a303d77c74404718a1c41d06dcd3a44592ecf3 | [
"BSD-2-Clause-Views"
] | permissive | projectsmahendra/XlsxWriter | 73d8c73ea648a911deea63cb46b9069fb4116b60 | 9b9d6fb283c89af8b6c89ad20f72b8208c2aeb45 | refs/heads/master | 2023-07-21T19:40:41.103336 | 2023-07-08T16:54:37 | 2023-07-08T16:54:37 | 353,636,960 | 0 | 0 | NOASSERTION | 2021-04-01T08:57:21 | 2021-04-01T08:57:20 | null | UTF-8 | Python | false | false | 1,138 | py | ###############################################################################
#
# Tests for XlsxWriter.
#
# Copyright (c), 2013-2021, John McNamara, jmcnamara@cpan.org
#
from ..excel_comparison_test import ExcelComparisonTest
from ...workbook import Workbook
class TestCompareXLSXFiles(ExcelComparisonTest):
"""
Test file created by XlsxWriter against a file created by Excel.
"""
def setUp(self):
self.set_filename('optimize06.xlsx')
def test_create_file(self):
"""Test the creation of a simple XlsxWriter file."""
workbook = Workbook(self.got_filename, {'constant_memory': True, 'in_memory': False})
worksheet = workbook.add_worksheet()
# Test that control characters and any other single byte characters are
# handled correctly by the SharedStrings module. We skip chr 34 = " in
# this test since it isn't encoded by Excel as ".
ordinals = list(range(0, 34))
ordinals.extend(range(35, 128))
for i in ordinals:
worksheet.write_string(i, 0, chr(i))
workbook.close()
self.assertExcelEqual()
| [
"jmcnamara@cpan.org"
] | jmcnamara@cpan.org |
af725df8bc8c67caf2efdca7c84665f967cc3fd5 | 463716b1e2dacba48802b3a58272de732c3e3382 | /scripts/earth_capture/OCP_moon_moon_leg.py | b375653c7091984a75bd58a87a6f76a5796cb94b | [] | no_license | TomSemblanet/Asteroid-Retrieval-Mission | e6afa5446ee27268faa8a56d72028d8649a24646 | 9d4b1809e868aec674d6bf3c48958b23418290e7 | refs/heads/main | 2023-06-12T20:45:21.493228 | 2021-07-01T14:03:38 | 2021-07-01T14:03:38 | 348,095,525 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,891 | py | import sys
import pickle
import numpy as np
import matplotlib.pyplot as plt
import cppad_py
from scipy.interpolate import interp1d
from collocation.GL_V.src.problem import Problem
from collocation.GL_V.src.optimization import Optimization
class MoonMoonLeg(Problem):
""" CR3BP : Moon-Moon Leg optimal control problem """
def __init__(self, cr3bp, mass0, Tmax, trajectory, time):
""" Initialization of the `GoddardRocket` class """
n_states = 7
n_controls = 4
n_st_path_con = 0
n_ct_path_con = 1
n_event_con = 13
n_f_par = 0
n_nodes = 200
Problem.__init__(self, n_states, n_controls, n_st_path_con, n_ct_path_con,
n_event_con, n_f_par, n_nodes)
# Set some attributs
self.cr3bp = cr3bp
self.mass0 = mass0 # [kg]
self.Tmax = Tmax # [kN]
self.trajectory = trajectory # [L] | [L/T]
self.time = time # [T]
def set_constants(self):
""" Setting of the problem constants """
self.Tmax /= self.cr3bp.L / self.cr3bp.T**2 # Thrusts dimensioning
self.g0 = 9.80665e-3 / (self.cr3bp.L / self.cr3bp.T**2)
self.Isp = 3000 / self.cr3bp.T
def set_boundaries(self):
""" Setting of the states, controls, free-parameters, initial and final times
boundaries """
# States boundaries
# X [-]
self.low_bnd.states[0] = -2
self.upp_bnd.states[0] = 2
# Y [-]
self.low_bnd.states[1] = -2
self.upp_bnd.states[1] = 2
# Z [-]
self.low_bnd.states[2] = -2
self.upp_bnd.states[2] = 2
# Vx [-]
self.low_bnd.states[3] = -10
self.upp_bnd.states[3] = 10
# Vy [-]
self.low_bnd.states[4] = -10
self.upp_bnd.states[4] = 10
# Vz [-]
self.low_bnd.states[5] = -10
self.upp_bnd.states[5] = 10
# m [kg]
self.low_bnd.states[6] = 1e-6
self.upp_bnd.states[6] = self.mass0
# T [-]
self.low_bnd.controls[0] = 1e-6
self.upp_bnd.controls[0] = self.Tmax
# Tx [-]
self.low_bnd.controls[1] = - 1
self.upp_bnd.controls[1] = 1
# Ty [-]
self.low_bnd.controls[2] = - 1
self.upp_bnd.controls[2] = 1
# Tz [-]
self.low_bnd.controls[3] = - 1
self.upp_bnd.controls[3] = 1
# Initial and final times boundaries
self.low_bnd.ti = self.upp_bnd.ti = self.time[0]
self.low_bnd.tf = 0.5 * self.time[-1]
self.upp_bnd.tf = 2.5 * self.time[-1]
def event_constraints(self, xi, ui, xf, uf, ti, tf, f_prm):
""" Computation of the events constraints """
events = np.ndarray((self.prm['n_event_con'], 1),
dtype=cppad_py.a_double)
x_i, y_i, z_i, vx_i, vy_i, vz_i, m_i = xi
x_f, y_f, z_f, vx_f, vy_f, vz_f, _ = xf
events[0] = x_i - self.trajectory[0, 0]
events[1] = y_i - self.trajectory[1, 0]
events[2] = z_i - self.trajectory[2, 0]
events[3] = vx_i - self.trajectory[3, 0]
events[4] = vy_i - self.trajectory[4, 0]
events[5] = vz_i - self.trajectory[5, 0]
events[6] = x_f - self.trajectory[0, -1]
events[7] = y_f - self.trajectory[1, -1]
events[8] = z_f - self.trajectory[2, -1]
events[9] = vx_f - self.trajectory[3, -1]
events[10] = vy_f - self.trajectory[4, -1]
events[11] = vz_f - self.trajectory[5, -1]
events[12] = m_i - self.mass0
return events
def set_events_constraints_boundaries(self):
""" Setting of the events constraints boundaries """
self.low_bnd.event[0] = self.upp_bnd.event[0] = 0
self.low_bnd.event[1] = self.upp_bnd.event[1] = 0
self.low_bnd.event[2] = self.upp_bnd.event[2] = 0
self.low_bnd.event[3] = self.upp_bnd.event[3] = 0
self.low_bnd.event[4] = self.upp_bnd.event[4] = 0
self.low_bnd.event[5] = self.upp_bnd.event[5] = 0
self.low_bnd.event[6] = self.upp_bnd.event[6] = 0
self.low_bnd.event[7] = self.upp_bnd.event[7] = 0
self.low_bnd.event[8] = self.upp_bnd.event[8] = 0
self.low_bnd.event[9] = self.upp_bnd.event[9] = 0
self.low_bnd.event[10] = self.upp_bnd.event[10] = 0
self.low_bnd.event[11] = self.upp_bnd.event[11] = 0
self.low_bnd.event[12] = self.upp_bnd.event[12] = 0
def path_constraints(self, states, controls, states_add, controls_add, controls_col, f_par):
st_path = np.ndarray((self.prm['n_st_path_con'],
2*self.prm['n_nodes']-1), dtype=cppad_py.a_double)
ct_path = np.ndarray((self.prm['n_ct_path_con'],
4*self.prm['n_nodes']-3), dtype=cppad_py.a_double)
# Thrust magnitude in x, y and z directions in the synodic frame [-]
ux = np.concatenate((controls[1], controls_add[1], controls_col[1]))
uy = np.concatenate((controls[2], controls_add[2], controls_col[2]))
uz = np.concatenate((controls[3], controls_add[3], controls_col[3]))
u2 = ux*ux + uy*uy + uz*uz
ct_path[0] = u2 - 1
return st_path, ct_path
def set_path_constraints_boundaries(self):
""" Setting of the path constraints boundaries """
self.low_bnd.ct_path[0] = self.upp_bnd.ct_path[0] = 0
def dynamics(self, states, controls, f_prm, expl_int=False):
""" Computation of the states derivatives """
if expl_int == False:
dynamics = np.ndarray(
(states.shape[0], states.shape[1]), dtype=cppad_py.a_double)
else:
dynamics = np.zeros(len(states))
# Mass [kg]
m = states[6]
# Extraction of controls
T = controls[0]
ux, uy, uz = controls[1:]
x_dot, y_dot, z_dot, vx_dot, vy_dot, vz_dot = self.cr3bp.states_derivatives(0, states[:-1])
dynamics[0] = x_dot
dynamics[1] = y_dot
dynamics[2] = z_dot
dynamics[3] = vx_dot + T / m * ux
dynamics[4] = vy_dot + T / m * uy
dynamics[5] = vz_dot + T / m * uz
dynamics[6] = - T / self.Isp / self.g0
return dynamics
def end_point_cost(self, ti, xi, tf, xf, f_prm):
""" Computation of the end point cost (Mayer term) """
mf = xf[-1]
return - mf / self.mass0
def set_initial_guess(self):
""" Setting of the initial guess for the states, controls, free-parameters
and time grid """
# Interpolation of the states
f_x = interp1d(self.time, self.trajectory[0])
f_y = interp1d(self.time, self.trajectory[1])
f_z = interp1d(self.time, self.trajectory[2])
f_vx = interp1d(self.time, self.trajectory[3])
f_vy = interp1d(self.time, self.trajectory[4])
f_vz = interp1d(self.time, self.trajectory[5])
# Time
self.initial_guess.time = np.linspace(self.time[0], self.time[-1], self.prm['n_nodes'])
# States
self.initial_guess.states = np.ndarray(
shape=(self.prm['n_states'], self.prm['n_nodes']))
self.initial_guess.states[0] = f_x(self.initial_guess.time)
self.initial_guess.states[1] = f_y(self.initial_guess.time)
self.initial_guess.states[2] = f_z(self.initial_guess.time)
self.initial_guess.states[3] = f_vx(self.initial_guess.time)
self.initial_guess.states[4] = f_vy(self.initial_guess.time)
self.initial_guess.states[5] = f_vz(self.initial_guess.time)
self.initial_guess.states[6] = self.mass0 * np.ones(self.prm['n_nodes'])
# Controls
self.initial_guess.controls = np.ndarray(
shape=(self.prm['n_controls'], self.prm['n_nodes']))
self.initial_guess.controls = np.zeros((4, self.prm['n_nodes']))
| [
"tomsemblanet@hotmail.fr"
] | tomsemblanet@hotmail.fr |
c609fe49aaa8f7ba0b62b3f030bf3d3de0ce70a8 | 3ead569228d28e173868dc307acb78f3e41947a1 | /greedy/thisisCT_Q04.py | 0ae4ce3ac5ed748b4bb35bc57fb27828fe4a4363 | [] | no_license | pjhq2/Algorithm | 96863d8343fb30fda9fe64b4d0f4abd4a1d7a81b | dd12ed47472f68d3a979d604aa32ca82e1c656b4 | refs/heads/main | 2023-08-11T11:14:10.720161 | 2021-10-07T14:01:58 | 2021-10-07T14:01:58 | 386,916,139 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 255 | py | # 04. 만들 수 없는 금액
N = int(input())
money = list(map(int, input().split()))
money.sort()
# x-1원까지 만들 수 있을 때, x를 만들 수 있는가?
target = 1
for x in money:
if target < x:
break
target += x
print(target) | [
"pkonu7@gmail.com"
] | pkonu7@gmail.com |
97847a90e19953edb59b4fc72b64ea864c749fe6 | 67377e04b769338d6370b20126aa09af26ffee66 | /tests/test_plotting_multiple_keras_histories.py | afc1400e47d9dd91a10fb279393b766308a6e093 | [
"MIT"
] | permissive | LucaCappelletti94/plot_keras_history | a9fe2eee28f8021897a7e937937163d99e1c704d | 1383a30e5659298d749678b75ea1d02f3bd73275 | refs/heads/master | 2023-05-23T18:34:57.090240 | 2022-11-19T12:00:40 | 2022-11-19T12:00:40 | 185,058,056 | 16 | 4 | null | null | null | null | UTF-8 | Python | false | false | 1,169 | py | """Test to check if multiple histories plots look ok."""
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Dense
import numpy as np
from plot_keras_history import plot_history
from extra_keras_metrics import get_minimal_multiclass_metrics
import matplotlib.pyplot as plt
import matplotlib
matplotlib.use("Agg")
def test_plotting_keras_history_object():
histories = []
for _ in range(5):
model = Sequential([
Dense(1, activation="sigmoid")
])
model.compile(
optimizer="nadam",
loss="binary_crossentropy",
metrics=get_minimal_multiclass_metrics()
)
size = 1000
X = np.random.uniform(
low=-1,
high=+1,
size=(size, 100)
)
y = np.mean(X, axis=1) > 0
histories.append(model.fit(
X[:size//2], y[:size//2],
batch_size=size//2,
validation_data=(X[size//2:], y[size//2:]),
validation_batch_size=size//2,
epochs=200,
verbose=False
))
plot_history(histories, path="./plots/multiple_histories.png")
| [
"cappelletti.luca94@gmail.com"
] | cappelletti.luca94@gmail.com |
f9a9d3d90bc48ff33ac94a9bdc430acb9e17d70f | a46d135ba8fd7bd40f0b7d7a96c72be446025719 | /packages/python/plotly/plotly/validators/bar/marker/colorbar/_ypad.py | fad1f6fcec651e9cb3a069e1030d2a0ba6a7a075 | [
"MIT"
] | permissive | hugovk/plotly.py | 5e763fe96f225d964c4fcd1dea79dbefa50b4692 | cfad7862594b35965c0e000813bd7805e8494a5b | refs/heads/master | 2022-05-10T12:17:38.797994 | 2021-12-21T03:49:19 | 2021-12-21T03:49:19 | 234,146,634 | 0 | 0 | MIT | 2020-01-15T18:33:43 | 2020-01-15T18:33:41 | null | UTF-8 | Python | false | false | 442 | py | import _plotly_utils.basevalidators
class YpadValidator(_plotly_utils.basevalidators.NumberValidator):
def __init__(self, plotly_name="ypad", parent_name="bar.marker.colorbar", **kwargs):
super(YpadValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "colorbars"),
min=kwargs.pop("min", 0),
**kwargs
)
| [
"noreply@github.com"
] | hugovk.noreply@github.com |
1cb2d663dd85e185e94758e265d09d2776abc213 | 89ee7302d7a6c53a8370315c15c136322766fb66 | /ch13/dframe_def2.py | 6da23eb51ded62a876d48b8ced783873bdc8d8de | [] | no_license | nbvc1003/python | 34424ce577335faf180160a82c1ba59b3233030b | bae2f2a066fbde9107e6a3cd26b82de47e71759e | refs/heads/master | 2020-09-04T20:18:50.102871 | 2020-01-29T06:21:47 | 2020-01-29T06:21:47 | 219,880,055 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 789 | py | import pandas as pd
# dictionary
data = {
"2015": [9904312, 3448737, 2890451, 2466052], # 열은 같은 데이터 타입
"2010": [9631482, 3393191, 2632035, 2431774],
"2005": [9762546, 3512547, 2517680, 2456016],
"2000": [9853972, 3655437, 2466338, 2473990],
"지역": ["수도권", "경상권", "수도권", "경상권"],
"2010-2015 증가율": [0.0283, 0.0163, 0.0982, 0.0141]
}
column = ["지역","2015","2010","2005","2000","2010-2015 증가율"]
index = ["서울","부산","인천","대구"]
df = pd.DataFrame(data, index=index, columns=column)
print(df)
# 인천삭제
df = df.drop('인천')
print(df)
# 서울 대구 삭제
df = df.drop(['서울','대구'])
print(df)
#2010년 삭제
df = df.drop('2010', axis=1)
print(df) | [
"nbvc@nate.com"
] | nbvc@nate.com |
76582253feea01bffeb2c528387633c0af0ee6aa | 8acffb8c4ddca5bfef910e58d3faa0e4de83fce8 | /ml-flask/Lib/site-packages/pandas/tests/util/test_assert_index_equal.py | d49e5ff9c4a7acb872d67258a15a3bba1db6dc40 | [
"MIT"
] | permissive | YaminiHP/SimilitudeApp | 8cbde52caec3c19d5fa73508fc005f38f79b8418 | 005c59894d8788c97be16ec420c0a43aaec99b80 | refs/heads/master | 2023-06-27T00:03:00.404080 | 2021-07-25T17:51:27 | 2021-07-25T17:51:27 | 389,390,951 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 129 | py | version https://git-lfs.github.com/spec/v1
oid sha256:a7a930c51ee077a3cf9db15abd6751d1d7e0fc22968c380545f80e6c3f58a838
size 5664
| [
"yamprakash130@gmail.com"
] | yamprakash130@gmail.com |
bf3b8438d8ee625611afccaa3eb3e39f83b7f91c | 272ae95716e530d538937ded59ec5b6e0b6d4db8 | /섹션 4/10. 역수열/AA.py | ac30788d08d5d305fed5b5beddba53a6d2fd1329 | [] | no_license | gogoheejun/algorithm | 83a1cb30bff5c349f53be16764e517a46e99cf1c | 39e999abf7170f434a7ac6e1f698f066e55aca03 | refs/heads/main | 2023-06-22T13:06:32.135917 | 2021-07-25T15:46:19 | 2021-07-25T15:46:19 | 383,379,782 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 336 | py | import sys
# sys.stdin = open("input.txt", "r")
n = int(input())
a = list(map(int, input().split()))
a.insert(0, 0)
seq = [0]*n
for i in range(1, n):
for j in range(n):
if a[i] == 0 and seq[j] == 0:
seq[j] = i
break
elif seq[j] == 0:
a[i] -= 1
for x in seq:
print(x, end=" ")
| [
"heejjuunn@gmail.com"
] | heejjuunn@gmail.com |
9bc46ea84932af7397f0c23c585801421a479073 | 3e23aaf1d482843e3640dc2721ab887082063b51 | /num201_300/num291_300/num300.py | 5981af0f9bb0efe30350182a50370d4ece08e4b8 | [] | no_license | guozhaoxin/leetcode | b19be28c0dc82fa7a5126edafa7c77ae2c77f22e | 807ba32ed7802b756e93dfe44264dac5bb9317a0 | refs/heads/master | 2020-04-01T10:29:49.375239 | 2019-02-24T03:27:18 | 2019-02-24T03:27:18 | 153,120,115 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,887 | py | #encoding:utf8
__author__ = 'gold'
'''
300. Longest Increasing Subsequence
Given an unsorted array of integers, find the length of longest increasing subsequence.
Example:
Input: [10,9,2,5,3,7,101,18]
Output: 4
Explanation: The longest increasing subsequence is [2,3,7,101], therefore the length is 4.
Note:
There may be more than one LIS combination, it is only necessary for you to return the length.
Your algorithm should run in O(n2) complexity.
Follow up: Could you improve it to O(n log n) time complexity?
Accepted
167,626
Submissions
424,735
'''
class Solution:
def lengthOfLIS(self, nums):
"""
:type nums: List[int]
:rtype: int
"""
if len(nums) <= 1:
return len(nums)
longestSubse = [0] * len(nums)
for index in range(len(nums)):
tempLongest = 0
for j in range(0,index):
if nums[index] > nums[j]:
tempLongest = max(tempLongest,longestSubse[j])
longestSubse[index] = tempLongest + 1
return max(longestSubse)
class Solution(object):
def lengthOfLIS(self, nums):
"""
:type nums: List[int]
:rtype: int
"""
if len(nums) < 2: return len(nums)
compare = []
for i in range(len(nums) - 1, -1, -1):
if not compare:
compare.append(nums[i])
else:
if nums[i] < compare[-1]:
compare.append(nums[i])
elif nums[i] == compare[-1]:
continue
else:
pos = self.find(compare, 0, len(compare) - 1, nums[i])
compare[pos] = nums[i]
print(compare)
return len(compare)
def find(self, compare, start, end, num):
if compare[start] <= num: return start
if compare[end] >= num: return end
if start + 1 == end or start == end:
return end
m = (start + end) // 2
if num == compare[m]:
return m
elif num < compare[m]:
return self.find(compare, m, end, num)
else:
return self.find(compare, start, m, num)
class Solution(object):
def lengthOfLIS(self, nums):
"""
:type nums: List[int]
:rtype: int
"""
if len(nums) <= 1:
return len(nums)
subStack = [nums[-1]] #先把最后一个塞进去,省的后边判断来判断去
for index in range(len((nums)) - 2 ,-1,-1):
if nums[index] < subStack[-1]:
subStack.append(nums[index])
elif nums[index] == subStack[-1]:
continue
else:
position = self.findPosition(nums,subStack,0,len(subStack) - 1,nums[index])
subStack[position] = nums[index]
return len(subStack)
def findPosition(self,nums,subStack,start,end,num):
'''
找到num在subStack中应该处于的位置,并返回那个索引
:param nums:[int,] ,原始的数列
:param subStack: [int,],到目前为止已经构造好的降序数列栈
:param start: int,开始索引
:param end: int,终止索引
:param num: int,新的要加进来的数字
:return: int,表示实际应该插入的位置
'''
if subStack[start] <= num:
return start
if subStack[end] >= num:
return end
if start == end or start + 1 == end:
return end
mid = (start + end) // 2
if num == subStack[mid]:
return mid
elif num < subStack[mid]:
return self.findPosition(nums,subStack,mid,end,num)
else:
return self.findPosition(nums,subStack,start,mid,num)
if __name__ == '__main__':
print(Solution().lengthOfLIS([10,9,2,5,3,7,101,18])) | [
"1345616978@qq.com"
] | 1345616978@qq.com |
06dd454f3cc6627b6107b4bad371fd543ba0df59 | ef32b87973a8dc08ba46bf03c5601548675de649 | /pytglib/api/types/option_value_boolean.py | c8645e9adc81b47b314846938b95c884dc4e4c7b | [
"MIT"
] | permissive | iTeam-co/pytglib | 1a7580f0e0c9e317fbb0de1d3259c8c4cb90e721 | d3b52d7c74ee5d82f4c3e15e4aa8c9caa007b4b5 | refs/heads/master | 2022-07-26T09:17:08.622398 | 2022-07-14T11:24:22 | 2022-07-14T11:24:22 | 178,060,880 | 10 | 9 | null | null | null | null | UTF-8 | Python | false | false | 595 | py |
from ..utils import Object
class OptionValueBoolean(Object):
"""
Represents a boolean option
Attributes:
ID (:obj:`str`): ``OptionValueBoolean``
Args:
value (:obj:`bool`):
The value of the option
Returns:
OptionValue
Raises:
:class:`telegram.Error`
"""
ID = "optionValueBoolean"
def __init__(self, value, **kwargs):
self.value = value # bool
@staticmethod
def read(q: dict, *args) -> "OptionValueBoolean":
value = q.get('value')
return OptionValueBoolean(value)
| [
"me@amirh.co"
] | me@amirh.co |
f42c817db89647881254bc96d1a90ddcfc7de826 | 91d1a6968b90d9d461e9a2ece12b465486e3ccc2 | /logs_write_1/retention-policy_delete.py | 4ce4469c1a441eb5b15a8dad07b0d26f4ffd024f | [] | no_license | lxtxl/aws_cli | c31fc994c9a4296d6bac851e680d5adbf7e93481 | aaf35df1b7509abf5601d3f09ff1fece482facda | refs/heads/master | 2023-02-06T09:00:33.088379 | 2020-12-27T13:38:45 | 2020-12-27T13:38:45 | 318,686,394 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 948 | py | #!/usr/bin/python
# -*- codding: utf-8 -*-
import os
import sys
sys.path.append(os.path.dirname(os.path.abspath(os.path.dirname(__file__))))
from common.execute_command import write_one_parameter
# url : https://awscli.amazonaws.com/v2/documentation/api/latest/reference/logs/delete-retention-policy.html
if __name__ == '__main__':
"""
put-retention-policy : https://awscli.amazonaws.com/v2/documentation/api/latest/reference/logs/put-retention-policy.html
"""
parameter_display_string = """
# log-group-name : The name of the log group.
"""
add_option_dict = {}
#######################################################################
# parameter display string
add_option_dict["parameter_display_string"] = parameter_display_string
# ex: add_option_dict["no_value_parameter_list"] = "--single-parameter"
write_one_parameter("logs", "delete-retention-policy", "log-group-name", add_option_dict)
| [
"hcseo77@gmail.com"
] | hcseo77@gmail.com |
50681b82fb23f55dca02a53dd784ad540e419c21 | baed2c2da1f776c0968d3cacd2fa45bdbe5482d6 | /S4cam/groupedCameras/TMP/legacy_designs/TMP_baseline_rev_multicam_test4_circular_elliptical_stop_leaders_8_39/elliptical_stop/polarization_analysis/plot_polarization_histograms.py | 63e6df9875b18b829138fcd042bd9ea942941e2f | [] | no_license | patogallardo/zemax_tools | 5ae2fe9a1e8b032684b8cf57457ee4f3239d9141 | 90d309c2f96c94469963eb905844d76fa2137bf9 | refs/heads/master | 2023-01-08T22:52:16.865852 | 2022-12-20T21:36:28 | 2022-12-20T21:36:28 | 234,634,525 | 7 | 3 | null | null | null | null | UTF-8 | Python | false | false | 501 | py | '''Opens results from distorted mirrors and plots histograms of polarization
leakage'''
import matplotlib.pyplot as plt
import numpy as np
import os
fname1 = os.path.abspath("./crosspol/crosspol_output.npz")
fname2 = os.path.abspath("../gravitational_thermal_deformations/polarization/crosspol/crosspol_output.npz") # noqa
pols1 = np.load(fname1)["T_db"]
pols2 = np.load(fname2)["T_db"]
plt.hist(pols1, histtype='step')
plt.hist(pols2, histtype='step')
plt.savefig("polarization_histograms.pdf")
| [
"26889221+patogallardo@users.noreply.github.com"
] | 26889221+patogallardo@users.noreply.github.com |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.