blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 3
281
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
57
| license_type
stringclasses 2
values | repo_name
stringlengths 6
116
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 313
values | visit_date
timestamp[us] | revision_date
timestamp[us] | committer_date
timestamp[us] | github_id
int64 18.2k
668M
⌀ | star_events_count
int64 0
102k
| fork_events_count
int64 0
38.2k
| gha_license_id
stringclasses 17
values | gha_event_created_at
timestamp[us] | gha_created_at
timestamp[us] | gha_language
stringclasses 107
values | src_encoding
stringclasses 20
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 4
6.02M
| extension
stringclasses 78
values | content
stringlengths 2
6.02M
| authors
listlengths 1
1
| author
stringlengths 0
175
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
d55b8c361a5e9a259b03d03acde5dbaabcbf9062
|
f9f698e577ac561e34f6e7851dea807e3429bc57
|
/DevTree/models/admin.py
|
a605ba4f05ca31238bcdd933e0a4ebbbe4c495f3
|
[] |
no_license
|
zjd2035/DevTree
|
22d5a628ad7f2fd03f4065243e5f1f21b6a3e249
|
99a7142aa5ef1f6d4991a2de2516419f83a13dbe
|
refs/heads/master
| 2021-08-26T08:16:05.837792
| 2017-11-22T13:39:09
| 2017-11-22T13:39:09
| 111,687,133
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 836
|
py
|
from django.contrib import admin
from DevSky.models.models import *
class DeveloperAdmin(admin.ModelAdmin):
#Fields for creating Developer object
fields = ['user', 'about', 'rating']
#Fields to display for developer
#list_display = ('username',)
class ProjectAdmin(admin.ModelAdmin):
#Fields for creating a Project
fields = ['title', 'languages', 'quick_description', 'full_description', 'owner', 'developers', 'rating' ]
#Fields to display for project
list_display = ('title', 'owner', 'quick_description')
#Search fields
search_fields = ['title', 'owner']
admin.site.register(Developer, DeveloperAdmin)
admin.site.register(Project, ProjectAdmin)
admin.site.register(DeveloperRating)
admin.site.register(ProjectRating)
admin.site.register(DeveloperComment)
admin.site.register(ProjectComment)
|
[
"zjd2035@gmail.com"
] |
zjd2035@gmail.com
|
1698c854294c33913a617563ebf95c58f4f0da0e
|
30082a1920ed9082864c24f1d3dbaff9bd2cb9a7
|
/nexusdatabroker/TLSNXAPITool1.0/TLSNXAPITool.py
|
a60df0a92657345aa569590e55ee3e85d5b153e5
|
[] |
no_license
|
NARESH9286/nexus9000
|
7885ade015f83d86b96e10a7cc3fbc262d157686
|
b6536915d1d22b087b0c93f775641d4be0bb2307
|
refs/heads/master
| 2021-06-24T22:05:47.759161
| 2017-09-12T06:52:38
| 2017-09-12T06:52:38
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,547
|
py
|
import os
import sys
import yaml
import requests
import subprocess
import logging
import paramiko
# pylint: disable-msg=E0611
from requests.packages.urllib3.exceptions import InsecureRequestWarning
from requests.packages.urllib3.exceptions import SNIMissingWarning
requests.packages.urllib3.disable_warnings(InsecureRequestWarning)
requests.packages.urllib3.disable_warnings(SNIMissingWarning)
class Server:
def __init__(self):
self.conn_type = "https"
with open(INPUTFILE) as file_ptr:
confi = yaml.safe_load(file_ptr)
self.server_ip = confi['ServerIP']['ServerIP1']['ip']
self.username = confi['ServerIP']['ServerIP1']['user']
self.password = confi['ServerIP']['ServerIP1']['password']
self.port = '8443'
self.web_url = ""
self.login_url = ""
self.add_device_url = ""
self.device_response = 0
self.xnc_pwd = str(confi['xnc_password'])
self.xnc_usr = str(confi['xnc_username'])
def ndb_servrer_login(self):
try:
self.web_url = self.conn_type+"://"+self.server_ip+":"\
+self.port+"/monitor/"
self.login_url = self.conn_type+"://"+self.server_ip+":"\
+self.port+"/monitor/j_security_check"
login_payload = {"j_username" : self.xnc_usr, "j_password" : self.xnc_pwd}
with open(INPUTFILE) as file_ptr:
dev_info = yaml.safe_load(file_ptr)
for dic in sorted(dev_info['IP'].keys()):
add_device_payload = dev_info['IP'][dic]
add_device_payload['connectiontype'] = 'NXAPI'
add_device_payload['auxnode'] = 'false'
for key in add_device_payload:
add_device_payload[key] = str(add_device_payload[key])
self.add_device_url = str(self.conn_type+"://"+\
str(self.server_ip)+":"+str(self.port)+\
"/controller/web/devices/extended//element/add")
#pylint: disable=maybe-no-member
with requests.session() as ses:
ses.get(self.web_url, verify=False)
ses.post(self.login_url, data=login_payload, verify=False)
ses.post(self.add_device_url, data=add_device_payload, verify=False)
LOGGER.info("Device - "+add_device_payload['address']+\
" Device added successfully")
except paramiko.SSHException:
LOGGER.error("Device - "+add_device_payload['address']+\
" Failed to add device in NDB")
if __name__ == "__main__":
FILE1 = '/etc/ssh/ssh_config'
DIR = os.path.dirname(__file__)
#sys.stdout = os.devnull
if not os.path.isdir('./Utilities/Log'):
os.mkdir("./Utilities/Log")
#sys.stdout = open(os.devnull, "w")
if len(sys.argv) == 1:
FILENAME = os.path.join(DIR, './Utilities/Log/Logfile.log')
LOGGER = logging.getLogger(__name__)
LOGGER.setLevel(logging.DEBUG)
CON_LOG_HANDLER = logging.StreamHandler()
FILE_LOG_HANDLER = logging.FileHandler(FILENAME)
FILE_LOG_HANDLER.setLevel(logging.DEBUG)
CON_LOG_HANDLER.setLevel(logging.DEBUG)
FORMATTER = logging.Formatter(
'%(asctime)s - %(name)s - %(levelname)s - %(message)s')
FILE_LOG_HANDLER.setFormatter(FORMATTER)
CON_LOG_HANDLER.setFormatter(FORMATTER)
LOGGER.addHandler(FILE_LOG_HANDLER)
LOGGER.addHandler(CON_LOG_HANDLER)
elif len(sys.argv) == 2:
if '--quiet' in sys.argv:
FILENAME = os.path.join(DIR, './Utilities/Log/Logfile.log')
LOGGER = logging.getLogger(__name__)
LOGGER.setLevel(logging.DEBUG)
FILE_LOG_HANDLER = logging.FileHandler(FILENAME)
FILE_LOG_HANDLER.setLevel(logging.DEBUG)
FORMATTER = logging.Formatter(
'%(asctime)s - %(name)s - %(levelname)s - %(message)s')
FILE_LOG_HANDLER.setFormatter(FORMATTER)
LOGGER.addHandler(FILE_LOG_HANDLER)
else:
FILENAME = os.path.join(DIR, './Utilities/Log/Logfile.log')
LOGGER = logging.getLogger(__name__)
LOGGER.setLevel(logging.DEBUG)
CON_LOG_HANDLER = logging.StreamHandler()
FILE_LOG_HANDLER = logging.FileHandler(FILENAME)
FILE_LOG_HANDLER.setLevel(logging.DEBUG)
CON_LOG_HANDLER.setLevel(logging.DEBUG)
FORMATTER = logging.Formatter(
'%(asctime)s - %(name)s - %(levelname)s - %(message)s')
FILE_LOG_HANDLER.setFormatter(FORMATTER)
CON_LOG_HANDLER.setFormatter(FORMATTER)
LOGGER.addHandler(FILE_LOG_HANDLER)
LOGGER.addHandler(CON_LOG_HANDLER)
LOGGER.error(" Run python script without arguments or along "+\
"with --quiet argument")
sys.exit(0)
else:
LOGGER.error(" Run python script without arguments or along "+\
"with --quiet argument")
sys.exit(0)
if '--quiet' in sys.argv:
subprocess.call(" python TLSScript.py --quiet", shell=True)
subprocess.call(" python OpenSSL.py --quiet", shell=True)
else:
subprocess.call(" python TLSScript.py 1", shell=True)
subprocess.call(" python OpenSSL.py 1", shell=True)
INPUTFILE = os.path.join(DIR, './Utilities/Input/inputfile.yaml')
DEV = Server()
DEV.ndb_servrer_login()
os.system("rm -rf ./Utilities/TlsCerts/temp")
os.system("rm -rf ./Utilities/TlsCerts/xnc.log")
|
[
"stumbala@cisco.com"
] |
stumbala@cisco.com
|
1a5dd463ef4a3efe08755888c769206aba42c6a0
|
70f58b1dba311ea632257c7262c143f410378051
|
/corona/index.py
|
cc46ba839a4966150b4805cb64995c8d68071581
|
[] |
no_license
|
azizyilmaz/data-analysis-with-python
|
e295488ae7f0d938a4d0422d06801c1241794763
|
2ee39d3bf3d72e5b43a785ba4690cb21e1666966
|
refs/heads/master
| 2022-11-18T09:21:14.390509
| 2020-07-12T21:45:14
| 2020-07-12T21:45:14
| 277,184,867
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 734
|
py
|
import pandas
dataset = pandas.read_csv("corona/coronavirusdata/covid_19_data.csv")
print(f'Covid19 veri listesi:\n{dataset}')
print(f'Satır ve sütun sayıları:\n{dataset.shape}')
print(f'Sütunları:\n{dataset.columns}')
print(f'Veri tipleri:\n{dataset.dtypes}')
print(f'İlk 10 veri listesi:\n{dataset.head(10)}')
print(f'Son 10 veri listesi:\n{dataset.tail(10)}')
print(f'Veri listesi bilgileri:\n{dataset.info()}')
print(f'Veri listesi özeti:\n{dataset.describe()}')
# En fazla ölümün olduğu 20 veriyi getirir
print(dataset.sort_values(by="Deaths", ascending=False).head(20))
# Türkiye sonuçları
print(dataset[dataset['Country/Region'] == 'Turkey']
.sort_values(by="Deaths", ascending=False).head(20))
|
[
"azizxyilmaz@outlook.com"
] |
azizxyilmaz@outlook.com
|
c5fa5fda6af41850d5525c2fbd4e5caab4f6ccee
|
73000d852404cc1fd3accfbfc8c83a0b88f6b4cb
|
/tests/test_xtbparser.py
|
17d521260fae33c10b5b68e03bef388a5d5191f7
|
[
"Apache-2.0"
] |
permissive
|
nomad-coe/nomad-parser-xtb
|
1580f90a2d391c63c3d291bc6a698779d599be10
|
09894dc15511fe6a9e9938027a0c48786dcdc084
|
refs/heads/master
| 2023-06-20T16:41:18.147131
| 2021-07-14T21:55:22
| 2021-07-14T21:55:22
| 386,077,099
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,111
|
py
|
#
# Copyright The NOMAD Authors.
#
# This file is part of NOMAD. See https://nomad-lab.eu for further info.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import pytest
import numpy as np
from nomad.datamodel import EntryArchive
from xtbparser import XTBParser
def approx(value, abs=0, rel=1e-6):
return pytest.approx(value, abs=abs, rel=rel)
@pytest.fixture(scope='module')
def parser():
return XTBParser()
def test_scf(parser):
archive = EntryArchive()
parser.parse('tests/data/gfn1/out', archive, None)
sec_run = archive.section_run[0]
assert sec_run.program_name == '6.4.1'
|
[
"ladinesalvinnoe@gmail.com"
] |
ladinesalvinnoe@gmail.com
|
cade017587a1fddf10ba53e82b78b74261e5d548
|
c5bf6fd200e76dc388770c86b365ba3a0721416a
|
/bcassessment/bcassessment/items.py
|
33fc306e236f3bb27d15c9d521ef9650033b0228
|
[] |
no_license
|
b15h4/ScrapingProjects
|
48a98ee8c23794cbd3ca0e62aeabf2866fdd5851
|
3906091811409ccb9ce12a5099a511e3e6771d50
|
refs/heads/master
| 2020-05-09T11:41:47.420389
| 2020-01-28T22:30:48
| 2020-01-28T22:30:48
| 181,089,587
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,236
|
py
|
# -*- coding: utf-8 -*-
# Define here the models for your scraped items
#
# See documentation in:
# https://docs.scrapy.org/en/latest/topics/items.html
import scrapy
class BcassessmentItem(scrapy.Item):
# define the fields for your item here like:
# name = scrapy.Field()
PID = scrapy.Field()
Address = scrapy.Field()
Description = scrapy.Field()
Bedrooms = scrapy.Field()
Bathrooms = scrapy.Field()
Carports = scrapy.Field()
Garages = scrapy.Field()
Land_Size = scrapy.Field()
First_Flor_Area = scrapy.Field()
Second_Flor_Area = scrapy.Field()
Basement_Finish_Area = scrapy.Field()
Strata_Area = scrapy.Field()
Building_Storeys = scrapy.Field()
Gross_Leasable_Area = scrapy.Field()
Net_Leasable_Area = scrapy.Field()
No_Of_Apartament_Units = scrapy.Field()
Sold_Date = scrapy.Field()
Sold_Price = scrapy.Field()
Total_Value = scrapy.Field()
Land = scrapy.Field()
Buildings = scrapy.Field()
Previous_Year_Value = scrapy.Field()
Previous_Land = scrapy.Field()
Previous_Buildings = scrapy.Field()
Year_Built = scrapy.Field()
Area_Jurisdiction_Roll = scrapy.Field()
Legal_Description_and_Parcel_ID = scrapy.Field()
|
[
"noreply@github.com"
] |
noreply@github.com
|
651296c899c3fc4e8987c32de2d56bef52ab2d79
|
57d51b3ec0db9a91054b81c5e33f6926da3cdb09
|
/cesiumpy/util/html.py
|
151be315b75ccce9ba288ba917f54c426e5511de
|
[
"Apache-2.0"
] |
permissive
|
cksammons7/cesiumpy
|
f75aa73f2b836bc16e148d9c66d4056942fa6501
|
0ffa7509fdac03644f0e2fb91385106c40284aa1
|
refs/heads/master
| 2022-05-28T09:45:56.385989
| 2020-04-20T17:20:55
| 2020-04-20T17:20:55
| 257,345,562
| 1
| 0
|
Apache-2.0
| 2020-04-20T16:47:33
| 2020-04-20T16:47:31
| null |
UTF-8
|
Python
| false
| false
| 1,477
|
py
|
#!/usr/bin/env python
# coding: utf-8
from __future__ import unicode_literals
import os
import six
import warnings
def _check_uri(sourceUri):
if not os.path.exists(sourceUri):
msg = "Unable to read specified path, be sure to the output HTML can read the path: {0}"
warnings.warn(msg.format(sourceUri))
return True
def _wrap_uri(sourceUri):
if sourceUri.endswith('.js'):
return '<script src="{0}"></script>'.format(sourceUri)
elif sourceUri.endswith('.css'):
return '<link rel="stylesheet" href="{0}" type="text/css">'.format(sourceUri)
else:
raise ValueError(sourceUri)
def _wrap_script(script):
if not isinstance(script, list):
script = [script]
# filter None and empty str
script = [s for s in script if s is not None and len(s) > 0]
script = _add_indent(script)
return ["""<script type="text/javascript">"""] + script + ["""</script>"""]
def _add_indent(script, indent=2):
""" Indent list of script with specfied number of spaces """
if not isinstance(script, list):
script = [script]
indent = ' ' * indent
return [indent + s for s in script]
def _build_html(*args):
results = []
for a in args:
if isinstance(a, list):
results.extend(a)
elif isinstance(a, six.string_types):
results.append(a)
else:
raise ValueError(type(a))
return os.linesep.join(results)
|
[
"noreply@github.com"
] |
noreply@github.com
|
b3096c51d5f0148b23157700f003b048c28d4cb6
|
efd6c1d24b0a392a177679429d53dd2f515d0d95
|
/bi_auth/migrations/0001_initial.py
|
fd577d05d960ad8b413cd4b9a52c1be60fe0f81b
|
[] |
no_license
|
mitshel/TakedaAnalitic
|
5ccfb4aa83a056cbeaebce03df41819c7ece7985
|
b04b08fb053bff238a1ce68df423f99314827b48
|
refs/heads/master
| 2021-07-02T21:27:10.023816
| 2019-02-14T14:32:18
| 2019-02-14T14:32:42
| 153,908,777
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 979
|
py
|
# Generated by Django 2.1.2 on 2018-12-15 11:16
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='UserProfile',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('is_orgadmin', models.BooleanField(verbose_name='Администратор организации')),
('user', models.OneToOneField(on_delete=django.db.models.deletion.CASCADE, related_name='user_profile', to=settings.AUTH_USER_MODEL)),
],
options={
'verbose_name': 'Профиль',
'verbose_name_plural': 'Профили',
},
),
]
|
[
"mitshel@mail.ru"
] |
mitshel@mail.ru
|
7ba0a06d9808d2f9a578e5b737327d2a711235eb
|
04a6f0914b128251bbd032bf13fbbfddb3c4eaa7
|
/pythonbind/src/head.py
|
8f67231978aa717ed41f4eb826b0187e5779b55a
|
[] |
no_license
|
YulinWu/qos_public
|
a8d6eb4cba65a17e292f7d072bfb6728e95c7815
|
58cb67f62c285d70f86cb8a79d147610abb7ff3f
|
refs/heads/master
| 2022-12-30T22:17:09.310242
| 2020-10-16T05:38:02
| 2020-10-16T05:38:02
| 304,203,187
| 0
| 1
| null | 2020-10-16T05:38:04
| 2020-10-15T03:48:59
|
Java
|
UTF-8
|
Python
| false
| false
| 3,284
|
py
|
import jpype
import jpype.imports
from jpype.types import *
import os
def matchArgument(patterns, args):
if len(patterns) != len(args):
return False
for p, v in zip(patterns, args):
if p.startswith('@NonNull ') and v == None:
return False
if p.startswith('@NonNull '):
p = p[9:]
vtype = p.split(' ')[-2]
if vtype[-2:] == '[]' and not isinstance(v, list):
return False
if vtype[-4:] == '[][]' and not isinstance(v[0], list):
return False
return True
def convertArgument(patterns, args):
typemap = {
'boolean': JBoolean,
'byte': JByte,
'char': JChar,
'short': JShort,
'int': JInt,
'long': JLong,
'float': JFloat,
'double': JDouble,
'String': JString,
'boolean[]': JArray(JBoolean),
'byte[]': JArray(JByte),
'char[]': JArray(JChar),
'short[]': JArray(JShort),
'int[]': JArray(JInt),
'long[]': JArray(JLong),
'float[]': JArray(JFloat),
'double[]': JArray(JDouble),
'String[]': JArray(JString),
'boolean[][]': JArray(JArray(JBoolean)),
'byte[][]': JArray(JArray(JByte)),
'char[][]': JArray(JArray(JChar)),
'short[][]': JArray(JArray(JShort)),
'int[][]': JArray(JArray(JInt)),
'long[][]': JArray(JArray(JLong)),
'float[][]': JArray(JArray(JFloat)),
'double[][]': JArray(JArray(JDouble)),
'String[][]': JArray(JArray(JString)),
'boolean[][][]': JArray(JArray(JArray(JBoolean))),
'byte[][][]': JArray(JArray(JArray(JByte))),
'char[][][]': JArray(JArray(JArray(JChar))),
'short[][][]': JArray(JArray(JArray(JShort))),
'int[][][]': JArray(JArray(JArray(JInt))),
'long[][][]': JArray(JArray(JArray(JLong))),
'float[][][]': JArray(JArray(JArray(JFloat))),
'double[][][]': JArray(JArray(JArray(JDouble))),
'String[][][]': JArray(JArray(JArray(JString))),
}
args2 = []
for p, v in zip(patterns, args):
vtype = p.split(' ')[-2]
args2.append(typemap[vtype](v))
return args2
jpype.startJVM(classpath=[os.path.abspath(os.path.dirname(
__file__))+'/../../rpcapi/target/qos-rpcapi-1.0.jar'])
# jpype.startJVM(classpath=[os.path.abspath(
# os.path.dirname(__file__))+'/../../rpcapi/target/classes'])
QOSServerApi = jpype.JClass('qc.qos.rpcapi.qpx1701.QOSServerApi')
class g:
Instance = None
def getInstance(*args):
patterns = ['@NonNull String userName',
'String password',
'@NonNull String host',
'int port']
if matchArgument(patterns, args):
args = convertArgument(patterns, args)
g.Instance = QOSServerApi(*args)
return
patterns = ['@NonNull String userName',
'String password',
'@NonNull String host',
'int port',
'boolean useSSL',
'String keyFile',
'char[] keyStorePassword']
if matchArgument(patterns, args):
args = convertArgument(patterns, args)
g.Instance = QOSServerApi(*args)
return
raise TypeError('no input pattern match')
|
[
"zhao.uv@gmail.com"
] |
zhao.uv@gmail.com
|
b1c19b8460916ca3e33a02b99ead0145444c29b0
|
e04c445698d94b7d98dfae2c92b481ce42647e9f
|
/main/views.py
|
e3aa66c2105bd9f85b7b155086bbfa7aa9931ba7
|
[] |
no_license
|
sersergious/taskmanager
|
48475bcbc94d737b5d80c7e7d58d3ab19a009348
|
7005712dfc7e755b89dd176df92ccefa3334b7f1
|
refs/heads/master
| 2023-03-11T07:22:46.016276
| 2021-02-24T22:03:05
| 2021-02-24T22:03:05
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 704
|
py
|
from django.shortcuts import render, redirect
from .models import Task
from .forms import TaskForm
def index(request):
tasks = Task.objects.all()
return render(request, 'main/index.html', {'title': 'Main page', 'tasks': tasks})
def about(request):
return render(request, 'main/about.html')
def create(request):
error = ''
if request.method == 'POST':
form = TaskForm(request.POST)
if form.is_valid():
form.save()
return redirect('home')
else:
error = 'The form was not correct'
form = TaskForm
context = {
'form': form,
'error': error
}
return render(request, 'main/create.html', context)
|
[
"sergeykuzmin495@gmail.com"
] |
sergeykuzmin495@gmail.com
|
ef57aa3b2a686f3f147faa1556323040b18f9c63
|
662aef24b143f21b44b74b1db3b7c0a5e6c4e202
|
/python/python标准库urllib.py
|
accaec446456e4a5a2031d688ff317af71f84573
|
[] |
no_license
|
Chan0619/FIS03
|
aa8724bbde0af19389288ecd605c4276a8658195
|
969823101daf1e761c63a959afaaebf56c28e79c
|
refs/heads/master
| 2023-06-23T06:00:22.896841
| 2021-07-20T04:56:31
| 2021-07-20T04:56:31
| 316,534,191
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 263
|
py
|
import urllib.request
response = urllib.request.urlopen('http://www.baidu.com')
print(response)
print(response.status)
print(response.read())
print(response.headers) # 头部信息
import math
print(math.ceil(1.5))
print(math.floor(1.5))
print(math.sqrt(25))
|
[
"phoebe.yanxi@gmail.com"
] |
phoebe.yanxi@gmail.com
|
9004a5e73ad5c6897f0e9b223cb0cbfb05dd568f
|
5b301fb18d62953cbf48e284594abe9e14ae90d9
|
/hyperbolic/euclid/origin_line.py
|
7b6431e6d2b143972ab8adfd1ab5dd4fd364c572
|
[
"MIT"
] |
permissive
|
cduck/hyperbolic
|
70ba6bec20aae2e7d63382ffb25ed5b491c61496
|
bebe9b546e12f28d76ed07d0a2d9954d9d143571
|
refs/heads/master
| 2023-08-21T22:36:13.504014
| 2023-02-27T02:42:04
| 2023-02-27T05:01:31
| 115,620,902
| 119
| 30
|
MIT
| 2023-09-01T13:58:35
| 2017-12-28T12:24:28
|
Python
|
UTF-8
|
Python
| false
| false
| 893
|
py
|
from . import line
class OriginLine(line.Line):
def __init__(self, px, py):
self.px = px
self.py = py
def to_line(self):
return line.Line(0, 0, self.px, self.py)
@property
def x1(self): return 0
@property
def y1(self): return 0
@property
def x2(self): return self.px
@property
def y2(self): return self.py
def __repr__(self):
return '{}({}, {})'.format(type(self).__name__,
round(self.px, 3), round(self.py, 3))
def reverse(self):
self.px, self.py = -self.px, -self.py
def reversed(self):
return OriginLine(-self.px, -self.py)
@staticmethod
def from_points(x1, y1, **kwargs):
return OriginLine(x1, y1, **kwargs)
def to_drawables(self, **kwargs):
import drawsvg as draw
return (draw.Line(self.x1, self.y1, self.x2, self.y2, **kwargs),)
|
[
"cduckering@gmail.com"
] |
cduckering@gmail.com
|
4ffd5dae6f79f999c134250acdaedc6bc61e49ee
|
2e6a926805852fb3d4db9d8d80602aa5cb9d86d3
|
/7.tests/stress_test/locustfile.py
|
7f7db16a514f10de1e0fade7f1f574c0abe82a91
|
[
"Apache-2.0"
] |
permissive
|
Sunao-Yoshii/WelcomePythonExamples
|
4db495ed70ba802e5837344c302a9179b8e6b96b
|
0f929aff7d72075e659e025bca3ce272b952e0a1
|
refs/heads/master
| 2020-08-02T16:10:43.762925
| 2019-11-24T02:03:32
| 2019-11-24T02:03:32
| 211,423,930
| 2
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,315
|
py
|
from locust import HttpLocust, TaskSet, task
import re
class UserBehavior(TaskSet):
def on_start(self):
self.login()
def on_stop(self):
self.logout()
def login(self):
# Cookie などを受け取る目的で
self.client.get("/wp-login.php")
parameters = {
'log': 'TestAccount',
'pwd': 'Fire!!1192',
'redirect_to': 'http://localhost:9000/wp-admin/',
'testcookie': '1'
}
self.client.post("/wp-login.php", parameters)
def logout(self):
response = self.client.get("/wp-admin/")
# ログアウト用 URL 取得
logout_url = re.search(
r'<a class="screen-reader-shortcut" href="http:\/\/localhost\:9000(.+)">ログアウト</a>',
response.text).group(1)
self.client.get(logout_url)
@task
def top(self):
self.client.get("/")
@task(2)
def mypage(self):
with self.client.get("/wp-admin/customize.php", catch_response=True) as response:
if response.status_code != 200:
response.failure("not authenticated???")
@task
def projects(self):
self.client.get("/wp-admin/")
class Wordpress(HttpLocust):
task_set = UserBehavior
min_wait = 500
max_wait = 1000
|
[
"2810066+Sunao-Yoshii@users.noreply.github.com"
] |
2810066+Sunao-Yoshii@users.noreply.github.com
|
5fe19acc4de946e7408e43a378612cded89edc88
|
aa91f6e8d59286f65e7f6ed065823c80b7694439
|
/scripts/analysis/baostock/yangxian/yangxian.py
|
adf483a1883a24304215af71aef322817a97af98
|
[
"MIT"
] |
permissive
|
davidyuqiwei/davidyu_stock
|
7f93bcc2c50a0e2c356e3b517dbf7e2e7467093f
|
c234911f49d5980f2dff651333f8ca957333e094
|
refs/heads/master
| 2023-01-07T02:08:04.330559
| 2023-01-02T16:31:37
| 2023-01-02T16:31:37
| 163,694,812
| 13
| 2
| null | 2022-12-18T03:55:10
| 2018-12-31T20:07:30
|
Jupyter Notebook
|
UTF-8
|
Python
| false
| false
| 1,725
|
py
|
from davidyu_cfg import *
from functions.data_dir import *
from functions.get_datetime import *
from functions.run_combine_all_csv import *
from functions.colNames import *
from functions.day_history.kLines import klineDate
from functions.LinearReg import *
from functions.common.dfProcess import *
from functions.common.loadModule.load_module_kdj import *
from scipy.stats import linregress
def stock_data(stock_index,start_date,end_date):
df_dir = os.path.join(data_path,"history_data","baostock","2020-12-17")
df1 = pd.read_csv(os.path.join(df_dir,stock_index+".csv"))
df1 = df1[(df1["dt"]>=start_date)&(df1["dt"]<=end_date)]
df1 = df1.drop_duplicates()
df1 = df1.sort_values("date")
df1["stock_index"] = [ x[3:9] for x in df1["code"]]
return df1
def get_3_pos_line(df1):
df1["line"] = df1["close"] - df1["open"]
df1["line"][df1["line"]>0]=1
df1["line"][df1["line"]<=0]=0
df1['mv_close'] = df1.close.rolling(window=3).mean()
df1['mv_close_120'] = df1.close.rolling(window=120).mean()
df1['mv_close_250'] = df1.close.rolling(window=250).mean()
df1['line_check_5'] = df1.line.rolling(window=5).sum()
df1['line_check_3'] = df1.line.rolling(window=3).sum()
df2 = df1[(df1["line_check_3"]==3)&(df1["close"]<df1['mv_close_250'])]
return df2
if __name__ =='__main__':
stock_index = sys.argv[1]
start_date = '2017-01-01'
end_date = '2020-12-17'
try:
df1 = stock_data(stock_index,start_date,end_date)
df2 = get_3_pos_line(df1)
#df3 = df2.tail(1)
#print("{},{}".format(df2['date'].values,df2['code'].values))
print(df2[["date","code"]].to_string(index=False,header=None))
except:
pass
|
[
"davidyuqiwei@outlook.com"
] |
davidyuqiwei@outlook.com
|
20222593e4e4c86708abb70c46aac55bb19e07aa
|
61361e75dda44c69b5c152ea932380fdd70c1397
|
/Lecture code/MongoDB 1-3.py
|
bc337c619de0522010d6bdb009168503f3fcecde
|
[] |
no_license
|
dsj529/DataWrangling
|
34b5d5d544e26491dbbe27aa264d1b11075052dd
|
446ec3a41f7b1eb8fa0ca9536d255b7c17dcdaee
|
refs/heads/master
| 2020-04-02T06:54:22.430939
| 2016-07-13T04:32:54
| 2016-07-13T04:32:54
| 63,209,058
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,650
|
py
|
# To experiment with this code freely you will have to run this code locally.
# We have provided an example json output here for you to look at,
# but you will not be able to run any queries through our UI.
import json
import requests
BASE_URL = "http://musicbrainz.org/ws/2/"
ARTIST_URL = BASE_URL + "artist/"
query_type = { "simple": {},
"atr": {"inc": "aliases+tags+ratings"},
"aliases": {"inc": "aliases"},
"releases": {"inc": "releases"}}
def query_site(url, params, uid="", fmt="json"):
params["fmt"] = fmt
r = requests.get(url + uid, params=params)
print "requesting", r.url
if r.status_code == requests.codes.ok:
return r.json()
else:
r.raise_for_status()
def query_by_name(url, params, name):
params["query"] = "artist:" + name
return query_site(url, params)
def pretty_print(data, indent=4):
if type(data) == dict:
print json.dumps(data, indent=indent, sort_keys=True)
else:
print data
def main():
results = query_by_name(ARTIST_URL, query_type["simple"], "Nirvana")
pretty_print(results)
artist_id = results["artists"][1]["id"]
print "\nARTIST:"
pretty_print(results["artists"][1])
artist_data = query_site(ARTIST_URL, query_type["releases"], artist_id)
releases = artist_data["releases"]
print "\nONE RELEASE:"
pretty_print(releases[0], indent=2)
release_titles = [r["title"] for r in releases]
print "\nALL TITLES:"
for t in release_titles:
print t
if __name__ == '__main__':
main()
|
[
"noreply@github.com"
] |
noreply@github.com
|
7696736a1e831bf9068ea45731dc549e00c9e8c7
|
aa25165d891ae591decc0c214761411bd62681a9
|
/utils/__init__.py
|
1575d6c975074a1397ca2ff39b320a6b8131795f
|
[] |
no_license
|
RaduSoro/MetinBot
|
8a94fa4fac0be2b30eb98049d795859cf1694dd5
|
8936eba0f53cc2e620a80ffabb6b93ad31ea296e
|
refs/heads/master
| 2023-08-26T22:37:07.233612
| 2021-11-02T08:18:55
| 2021-11-02T08:18:55
| 398,293,772
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 87
|
py
|
from .utils import *
from .vision import Vision, SnowManFilter, SnowManFilterRedForest
|
[
"radu.sorostinean@ausy.com"
] |
radu.sorostinean@ausy.com
|
f09be31c2f91323210f68ba6571fd449ab30ea4b
|
33360ff5f52c1cfbe01bcc001f0533900cee38a0
|
/bme280.py
|
1097289a031a069a2dc516eed3fae10908a2ca0a
|
[] |
no_license
|
diyron/uP_WifiNode
|
49d4090e402618a8b2373c95ea1db834f9ccac45
|
724e82663efab7c67a972b73cc2a8e6789b1020a
|
refs/heads/master
| 2021-01-05T03:14:07.349548
| 2020-02-23T11:32:54
| 2020-02-23T11:32:54
| 240,858,457
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 6,575
|
py
|
import time
from ustruct import unpack, unpack_from
from array import array
# BME280 default address.
BME280_I2CADDR = 0x76
# Operating Modes
BME280_OSAMPLE_1 = 1
BME280_OSAMPLE_2 = 2
BME280_OSAMPLE_4 = 3
BME280_OSAMPLE_8 = 4
BME280_OSAMPLE_16 = 5
BME280_REGISTER_CONTROL_HUM = 0xF2
BME280_REGISTER_CONTROL = 0xF4
class BME280:
def __init__(self,
mode=BME280_OSAMPLE_1,
address=BME280_I2CADDR,
i2c=None,
**kwargs):
# Check that mode is valid.
if mode not in [BME280_OSAMPLE_1, BME280_OSAMPLE_2, BME280_OSAMPLE_4,
BME280_OSAMPLE_8, BME280_OSAMPLE_16]:
raise ValueError(
'Unexpected mode value {0}. Set mode to one of '
'BME280_ULTRALOWPOWER, BME280_STANDARD, BME280_HIGHRES, or '
'BME280_ULTRAHIGHRES'.format(mode))
self._mode = mode
self.address = address
if i2c is None:
raise ValueError('An I2C object is required.')
self.i2c = i2c
# load calibration data
dig_88_a1 = self.i2c.readfrom_mem(self.address, 0x88, 26)
dig_e1_e7 = self.i2c.readfrom_mem(self.address, 0xE1, 7)
self.dig_T1, self.dig_T2, self.dig_T3, self.dig_P1, \
self.dig_P2, self.dig_P3, self.dig_P4, self.dig_P5, \
self.dig_P6, self.dig_P7, self.dig_P8, self.dig_P9, \
_, self.dig_H1 = unpack("<HhhHhhhhhhhhBB", dig_88_a1)
self.dig_H2, self.dig_H3 = unpack("<hB", dig_e1_e7)
e4_sign = unpack_from("<b", dig_e1_e7, 3)[0]
self.dig_H4 = (e4_sign << 4) | (dig_e1_e7[4] & 0xF)
e6_sign = unpack_from("<b", dig_e1_e7, 5)[0]
self.dig_H5 = (e6_sign << 4) | (dig_e1_e7[4] >> 4)
self.dig_H6 = unpack_from("<b", dig_e1_e7, 6)[0]
self.i2c.writeto_mem(self.address, BME280_REGISTER_CONTROL,
bytearray([0x3F]))
self.t_fine = 0
# temporary data holders which stay allocated
self._l1_barray = bytearray(1)
self._l8_barray = bytearray(8)
self._l3_resultarray = array("i", [0, 0, 0])
def read_raw_data(self, result):
""" Reads the raw (uncompensated) data from the sensor.
Args:
result: array of length 3 or alike where the result will be
stored, in temperature, pressure, humidity order
Returns:
None
"""
self._l1_barray[0] = self._mode
self.i2c.writeto_mem(self.address, BME280_REGISTER_CONTROL_HUM,
self._l1_barray)
self._l1_barray[0] = self._mode << 5 | self._mode << 2 | 1
self.i2c.writeto_mem(self.address, BME280_REGISTER_CONTROL,
self._l1_barray)
sleep_time = 1250 + 2300 * (1 << self._mode)
sleep_time = sleep_time + 2300 * (1 << self._mode) + 575
sleep_time = sleep_time + 2300 * (1 << self._mode) + 575
time.sleep_us(sleep_time) # Wait the required time
# burst readout from 0xF7 to 0xFE, recommended by datasheet
self.i2c.readfrom_mem_into(self.address, 0xF7, self._l8_barray)
readout = self._l8_barray
# pressure(0xF7): ((msb << 16) | (lsb << 8) | xlsb) >> 4
raw_press = ((readout[0] << 16) | (readout[1] << 8) | readout[2]) >> 4
# temperature(0xFA): ((msb << 16) | (lsb << 8) | xlsb) >> 4
raw_temp = ((readout[3] << 16) | (readout[4] << 8) | readout[5]) >> 4
# humidity(0xFD): (msb << 8) | lsb
raw_hum = (readout[6] << 8) | readout[7]
result[0] = raw_temp
result[1] = raw_press
result[2] = raw_hum
def read_compensated_data(self, result=None):
""" Reads the data from the sensor and returns the compensated data.
Args:
result: array of length 3 or alike where the result will be
stored, in temperature, pressure, humidity order. You may use
this to read out the sensor without allocating heap memory
Returns:
array with temperature, pressure, humidity. Will be the one from
the result parameter if not None
"""
self.read_raw_data(self._l3_resultarray)
raw_temp, raw_press, raw_hum = self._l3_resultarray
# temperature
var1 = ((raw_temp >> 3) - (self.dig_T1 << 1)) * (self.dig_T2 >> 11)
var2 = (((((raw_temp >> 4) - self.dig_T1) *
((raw_temp >> 4) - self.dig_T1)) >> 12) * self.dig_T3) >> 14
self.t_fine = var1 + var2
temp = (self.t_fine * 5 + 128) >> 8
# pressure
var1 = self.t_fine - 128000
var2 = var1 * var1 * self.dig_P6
var2 = var2 + ((var1 * self.dig_P5) << 17)
var2 = var2 + (self.dig_P4 << 35)
var1 = (((var1 * var1 * self.dig_P3) >> 8) +
((var1 * self.dig_P2) << 12))
var1 = (((1 << 47) + var1) * self.dig_P1) >> 33
if var1 == 0:
pressure = 0
else:
p = 1048576 - raw_press
p = (((p << 31) - var2) * 3125) // var1
var1 = (self.dig_P9 * (p >> 13) * (p >> 13)) >> 25
var2 = (self.dig_P8 * p) >> 19
pressure = ((p + var1 + var2) >> 8) + (self.dig_P7 << 4)
# humidity
h = self.t_fine - 76800
h = (((((raw_hum << 14) - (self.dig_H4 << 20) -
(self.dig_H5 * h)) + 16384)
>> 15) * (((((((h * self.dig_H6) >> 10) *
(((h * self.dig_H3) >> 11) + 32768)) >> 10) +
2097152) * self.dig_H2 + 8192) >> 14))
h = h - (((((h >> 15) * (h >> 15)) >> 7) * self.dig_H1) >> 4)
h = 0 if h < 0 else h
h = 419430400 if h > 419430400 else h
humidity = h >> 12
if result:
result[0] = temp
result[1] = pressure
result[2] = humidity
return result
return array("i", (temp, pressure, humidity))
@property
def values(self):
""" human readable values """
t, p, h = self.read_compensated_data()
p = p // 256
pi = p // 100
#pd = p - pi * 100
hi = h // 1024
#hd = h * 100 // 1024 - hi * 100
t = ((t - (t % 10)) / 10) / 10
p = pi
h = hi
return {"Temperatur": t, "Luftfeuchte": h, "Luftdruck": p}
# original:
# return ("{ '{}".format(t / 100), "{}.{:02d} hPa".format(pi, pd),
# "{}.{:02d} %".format(hi, hd))
|
[
"andre.lange.84@googlemail.com"
] |
andre.lange.84@googlemail.com
|
6ece01cf76bcfb1cd3fda00de8835a03e0b68a78
|
d1ba73dd406f680342a453439c6105bf5161e470
|
/src/show_results.py
|
f697a4b7df0af5c91195d6ccb3e1fc5add8be696
|
[
"MIT"
] |
permissive
|
yixchen/holistic_scene_human
|
737b5546529bb7aeed4eda7e4f0d61a43c8f1364
|
338b1cea172864b03451fd2f8e347ab751f5e70a
|
refs/heads/master
| 2021-07-10T14:40:00.210618
| 2020-11-24T03:44:24
| 2020-11-24T03:44:24
| 209,427,745
| 46
| 3
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 526
|
py
|
import os
import pickle
from visulization import vis_all
ROOT_DIR = '../data'
IMAGE_PATH = ROOT_DIR + '/image/'
RESULT_PATH = ROOT_DIR + '/result/'
all_image_path = os.listdir(IMAGE_PATH)
for im_name in all_image_path:
image_name = im_name.split('.')[0]
result_path = os.path.join(RESULT_PATH, image_name)
with open(os.path.join(result_path, 'sampler.pickle'), 'rb') as f:
sampler = pickle.load(f, encoding='latin1')
vis_all(sampler, IMAGE_PATH + im_name, result_path, save_image=False)
pass
|
[
"ethanchen@g.ucla.edu"
] |
ethanchen@g.ucla.edu
|
acc3729c60b5a5df550c04e809b00cbb02b4549b
|
de8081ddbfb02b6a3a0a79456e334a0704fea4f2
|
/OHTServer.py
|
b43a9da353347d8143c874132de2f73d21236760
|
[] |
no_license
|
Cking616/NCDStudio
|
01fb5d99577dac8a06ff5003f45b83374ddf3189
|
f09b306f37554032192f22ae675f8a9b0348b7a3
|
refs/heads/master
| 2021-09-14T07:00:01.709782
| 2018-05-09T06:15:41
| 2018-05-09T06:15:41
| 113,954,526
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 11,611
|
py
|
import time
import asyncore
import socket
import threading
import ctypes
import inspect
recCMD = ""
writeCMDBuffer = []
revCondition = threading.Condition()
isConnected = False
zEncoder = 0
wheelEncoder = 0
motorStatus = False
cmdError = False
gFlag = 0
isEndTimer = False
rampState = 0
def parser_receive(receive):
global gFlag
global cmdError
if receive[:9] == 'ERR Flags':
gFlag = 0
return True
elif receive[:6] == 'Flags:':
cmd = receive.split()
dn = cmd[2]
gFlag = int(dn[-2])
return True
elif receive[:6] == 'ERROR-':
cmdError = True
return True
elif receive[-1] == '.':
tmp_state = receive.split(',')[1]
global rampState
rampState = int(tmp_state)
return True
else:
return False
class OhtHandler(asyncore.dispatcher_with_send):
def handle_read(self):
global recCMD
global cmdError
global gFlag
revCondition.acquire()
data = self.recv(1024)
if data:
recCMD = ""
tmp_rec = data.decode('utf-8')
if not parser_receive(tmp_rec):
recCMD = tmp_rec
revCondition.notify()
revCondition.release()
def handle_close(self):
global isConnected
isConnected = False
def writable(self):
return True
def handle_write(self):
global writeCMDBuffer
if not writeCMDBuffer:
return
cmd = writeCMDBuffer.pop(0)
self.send(cmd.encode('utf-8'))
while writeCMDBuffer:
cmd = writeCMDBuffer.pop(0)
self.send(cmd.encode('utf-8'))
class OhtServer(asyncore.dispatcher):
def __init__(self, host, port):
asyncore.dispatcher.__init__(self)
self.create_socket(socket.AF_INET, socket.SOCK_STREAM)
self.set_reuse_addr()
self.bind((host, port))
self.listen(5)
self.handler = None
def handle_accept(self):
conn, address = self.accept()
print('Incoming connection from %s' % repr(address))
self.handler = OhtHandler(conn)
global isConnected
isConnected = True
class OhtServerThread(threading.Thread):
def __init__(self, address, port):
threading.Thread.__init__(self)
self.address = address
self.port = port
def run(self):
server = OhtServer(self.address, self.port)
asyncore.loop()
class _Timer(threading.Thread):
def __init__(self, interval, func, args=[], kwargs={}):
threading.Thread.__init__(self)
self.interval = interval
self.func = func
self.args = args
self.kwargs = kwargs
self.finished = threading.Event()
def cancel(self):
self.finished.set()
def run(self):
self.finished.wait(self.interval)
if not self.finished.is_set():
self.func(*self.args, **self.kwargs)
self.finished.set()
class LoopTimer(_Timer):
def __init__(self, interval, func, args=[], kwargs={}):
_Timer.__init__(self, interval, func, args, kwargs)
def run(self):
while True:
if not self.finished.is_set():
self.finished.wait(self.interval)
self.func(*self.args, **self.kwargs)
else:
break
def timer_thread():
global isConnected
global isEndTimer
if isEndTimer:
return
if not isConnected:
return
global writeCMDBuffer
global zEncoder
global wheelEncoder
global motorStatus
global recCMD
writeCMDBuffer.append('E9')
revCondition.acquire()
ret = revCondition.wait(5)
if not ret:
writeCMDBuffer.append('E9')
ret = revCondition.wait(5)
if ret:
wheel_encoder = recCMD
wheelEncoder = wheel_encoder.split()[0]
if wheelEncoder[1] == ':':
wheelEncoder = wheelEncoder[2:]
wheelEncoder = int(wheelEncoder)
revCondition.release()
# print(wheelEncoder)
# print('Wheel Encoder: %s' % wheel_encoder)
writeCMDBuffer.append('P2G6064')
revCondition.acquire()
ret = revCondition.wait(5)
if not ret:
writeCMDBuffer.append('P2G6064')
ret = revCondition.wait(5)
if ret:
if len(recCMD) < 2 or recCMD[1] != ':':
z_encoder = recCMD
zEncoder = int(z_encoder)
revCondition.release()
# print("z:%d" % zEncoder)
# print('Z Encoder: %s' % z_encoder)
writeCMDBuffer.append('D')
revCondition.acquire()
ret = revCondition.wait(5)
if not ret:
writeCMDBuffer.append('D')
ret = revCondition.wait(5)
if ret:
if recCMD[3] == ',':
motor_status = recCMD
if motor_status[:3] == '3ii':
motorStatus = True
elif motor_status[:3] == '3di':
motorStatus = True
else:
motorStatus = False
revCondition.release()
# print(motorStatus)
# print('Motor Status: %s' % motor_status)
def init_controller():
global isConnected
global motorStatus
global writeCMDBuffer
while not isConnected:
print("Wait for controller connect")
time.sleep(1.5)
writeCMDBuffer.append('P41')
time.sleep(0.2)
writeCMDBuffer.append('P4P460FE65537')
time.sleep(0.2)
writeCMDBuffer.append('P21')
while not motorStatus:
time.sleep(1)
writeCMDBuffer.append('D')
revCondition.acquire()
ret = revCondition.wait(5)
if not ret:
writeCMDBuffer.append('D')
ret = revCondition.wait()
if ret:
if recCMD[3] == ',':
motor_status = recCMD
if motor_status[:3] == '3ii':
motorStatus = True
elif motor_status[:3] == '3di':
motorStatus = True
else:
motorStatus = False
revCondition.release()
print("Wait for Motor init")
writeCMDBuffer.append('P2P460FE196609')
time.sleep(0.5)
writeCMDBuffer.append('P22')
time.sleep(0.5)
def scan_flags():
global writeCMDBuffer
global wheelEncoder
global motorStatus
global recCMD
global gFlag
writeCMDBuffer.append('E9')
revCondition.acquire()
ret = revCondition.wait(5)
if not ret:
writeCMDBuffer.append('E9')
ret = revCondition.wait()
if ret:
wheel_encoder = recCMD
wheelEncoder = wheel_encoder.split()[4]
if wheelEncoder[-2] == '1':
gFlag = 1
else:
gFlag = 0
revCondition.release()
while not gFlag:
writeCMDBuffer.append('m9fg601500')
time.sleep(0.3)
writeCMDBuffer.append('E9')
revCondition.acquire()
ret = revCondition.wait(5)
if not ret:
writeCMDBuffer.append('E9')
ret = revCondition.wait()
if ret:
wheel_encoder = recCMD
wheelEncoder = wheel_encoder.split()[4]
if wheelEncoder[-2] == '1':
gFlag = 1
else:
gFlag = 0
revCondition.release()
time.sleep(0.3)
print("Scanning")
def go_wheel_location(speed, flag, encoder):
global gFlag
if gFlag == 0:
print("Flags Error, Reset Flag")
return False
global writeCMDBuffer
global rampState
if speed > 70:
speed = 70
if speed < 10:
speed = 10
cmd = 'r9lf%02d%d%d' % (speed, flag, encoder)
writeCMDBuffer.append(cmd)
rampState = 1
time.sleep(1)
while rampState:
time.sleep(1)
print("Doing")
return True
def go_y_location(speed, encoder):
global gFlag
if gFlag == 0:
print("Flags Error, Reset Flag")
return False
global writeCMDBuffer
cmd = 'P4A%03d%d' % (speed, encoder)
writeCMDBuffer.append(cmd)
time.sleep(0.2)
while True:
writeCMDBuffer.append('P4G6064')
revCondition.acquire()
ret = revCondition.wait(5)
if not ret:
writeCMDBuffer.append('P4G6064')
revCondition.wait()
y_encoder = recCMD
cur_encoder = int(y_encoder)
revCondition.release()
err = encoder - cur_encoder
if -300 < err < 300:
break
writeCMDBuffer.append(cmd)
time.sleep(1.2)
def out_expand(speed, mm):
global writeCMDBuffer
writeCMDBuffer.append('P4G6064')
revCondition.acquire()
ret = revCondition.wait(5)
if not ret:
writeCMDBuffer.append('P4G6064')
revCondition.wait()
y_encoder = recCMD
cur_encoder = int(y_encoder)
revCondition.release()
num = mm * 100
encoder = cur_encoder - num
go_y_location(speed, 0)
time.sleep(0.5)
def in_expand(speed, mm):
global writeCMDBuffer
cur_encoder = 0
writeCMDBuffer.append('P4G6064')
revCondition.acquire()
ret = revCondition.wait(5)
if not ret:
writeCMDBuffer.append('P4G6064')
revCondition.wait()
y_encoder = recCMD
cur_encoder = int(y_encoder)
revCondition.release()
num = mm * 100
encoder = cur_encoder + num
go_y_location(speed, 97000)
time.sleep(0.5)
def grip():
cmd = 'm630t3700'
global writeCMDBuffer
writeCMDBuffer.append(cmd)
time.sleep(0.5)
writeCMDBuffer.append(cmd)
time.sleep(4)
def release():
cmd = 'm631t3700'
global writeCMDBuffer
writeCMDBuffer.append(cmd)
time.sleep(0.5)
writeCMDBuffer.append(cmd)
time.sleep(4)
def go_z_location(speed, encoder):
global gFlag
if gFlag == 0:
print("Flags Error, Reset Flag")
return False
global writeCMDBuffer
cmd = 'P2A%03d%d' % (speed, encoder)
writeCMDBuffer.append('P2P460FE196609')
time.sleep(0.5)
writeCMDBuffer.append(cmd)
time.sleep(0.2)
while True:
cur_encoder = 0
writeCMDBuffer.append('P2G6064')
revCondition.acquire()
ret = revCondition.wait(5)
if not ret:
writeCMDBuffer.append('P2G6064')
revCondition.wait()
z_encoder = recCMD
cur_encoder = int(z_encoder)
revCondition.release()
err = encoder - cur_encoder
if -500 < err < 500:
# writeCMDBuffer.append('P2P460FE1')
# time.sleep(0.2)
break
print("Doing, Err:%d" % err)
writeCMDBuffer.append(cmd)
time.sleep(0.5)
writeCMDBuffer.append('P2P460FE196609')
time.sleep(0.2)
def stop_wheel():
writeCMDBuffer.append('r9tf000')
time.sleep(0.3)
writeCMDBuffer.append('m9fb72000')
time.sleep(0.2)
def stop_z():
writeCMDBuffer.append('P2P460FE1')
time.sleep(0.2)
writeCMDBuffer.append('P2P260407')
time.sleep(0.3)
def _async_raise(tid, exc_type):
"""raises the exception, performs cleanup if needed"""
tid = ctypes.c_long(tid)
if not inspect.isclass(exc_type):
exc_type = type(exc_type)
res = ctypes.pythonapi.PyThreadState_SetAsyncExc(tid, ctypes.py_object(exc_type))
if res == 0:
raise ValueError("invalid thread id")
elif res != 1:
# """if it returns a number greater than one, you're in trouble,
# and you should call it again with exc=NULL to revert the effect"""
ctypes.pythonapi.PyThreadState_SetAsyncExc(tid, None)
raise SystemError("PyThreadState_SetAsyncExc failed")
def stop_thread(thread):
_async_raise(thread.ident, SystemExit)
|
[
"cking616@mail.ustc.edu.cn"
] |
cking616@mail.ustc.edu.cn
|
82d4bb5adde44d0c3f5d9dcf5fd38838388ed335
|
d1c4189ebec01e2e603618418537039fe88b6572
|
/RNA_1/bls_main.py
|
cee12d48be639efa530ce3a2155dcdaed3b691ae
|
[] |
no_license
|
arashk7/BLS_RNA_Classifier
|
0ee32a19dd8a12f72564a0289df0359aa6b222bf
|
6b320b2e538f54cc4960af338e368468be63ff18
|
refs/heads/master
| 2022-12-24T20:41:49.983691
| 2020-09-25T03:30:10
| 2020-09-25T03:30:10
| 298,180,974
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,741
|
py
|
import pandas as pd
from sklearn.model_selection import train_test_split
from sklearn.ensemble import RandomForestClassifier
from sklearn import metrics
import seaborn as sn
import matplotlib.pyplot as plt
import numpy as np
from RNA_1.BroadLearningSystem import BLS,BLS_AddEnhanceNodes
import pandas as pd
features1 = pd.read_csv('../Dataset/Sample1.csv')
features1.head()
features2 = pd.read_csv('../Dataset/Sample2.csv')
features2.head()
features = pd.concat([features1, features2])
features.head()
# print(features)
features = features.replace('mod', 0)
features = features.replace('unm', 1)
features = features.replace(np.nan, 0, regex=True)
# print(features)
X = features[['q1', 'q2', 'q3', 'q4', 'q5', 'mis1', 'mis2', 'mis3', 'mis4', 'mis5']].astype(float)
Y = features['sample'].astype(int)
from sklearn.preprocessing import MinMaxScaler
sc = MinMaxScaler(feature_range=(0, 1))
X = sc.fit_transform(X)
X_train, X_test, y_train, y_test = train_test_split(X, Y, test_size=0.25, shuffle=True)
N1 = 10 # # of nodes belong to each window 10
N2 = 10 # # of windows -------Feature mapping layer 10
N3 = 500 # # of enhancement nodes -----Enhance layer 500
L = 5 # # of incremental steps
M1 = 50 # # of adding enhance nodes 50
s = 0.8 # shrink coefficient
C = 2 ** -30 # Regularization coefficient
y_train=y_train.values
y_test = y_test.values
y_train=np.array([y_train]).transpose()
y_test =np.array([y_test]).transpose()
# X_train=X_train[:2]
# y_train=y_train[:2]
print('-------------------BLS_BASE---------------------------')
BLS(X_train, y_train, X_test, y_test, s, C, N1, N2, N3)
print('-------------------BLS_ENHANCE------------------------')
BLS_AddEnhanceNodes(X_train, y_train, X_test, y_test,s,C,N1,N2,N3,L,M1)
|
[
"ahmad.karambakhsh@gmail.com"
] |
ahmad.karambakhsh@gmail.com
|
96df10c1e180c5db30031a1555f1186f51078ddd
|
7de48c4ffee8f6ea59c3398a66fc90a27cf563ef
|
/Algorithms/leetcode/[820]单词的压缩编码.py
|
07f6dfe733617bb2c26efaefa608da3f3d3cc325
|
[] |
no_license
|
XiaoqiMa/StudyNotes
|
9a936e119387b65257b08c2b534fb76a95687a34
|
b4f0d27e647f7a25c93b18d148caf143b91337ce
|
refs/heads/master
| 2022-12-03T16:39:19.206183
| 2020-08-25T14:22:40
| 2020-08-25T14:22:40
| 258,927,625
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,934
|
py
|
# 给定一个单词列表,我们将这个列表编码成一个索引字符串 S 与一个索引列表 A。
#
# 例如,如果这个列表是 ["time", "me", "bell"],我们就可以将其表示为 S = "time#bell#" 和 indexes = [0,
# 2, 5]。
#
# 对于每一个索引,我们可以通过从字符串 S 中索引的位置开始读取字符串,直到 "#" 结束,来恢复我们之前的单词列表。
#
# 那么成功对给定单词列表进行编码的最小字符串长度是多少呢?
#
#
#
# 示例:
#
# 输入: words = ["time", "me", "bell"]
# 输出: 10
# 说明: S = "time#bell#" , indexes = [0, 2, 5] 。
#
#
#
#
# 提示:
#
#
# 1 <= words.length <= 2000
# 1 <= words[i].length <= 7
# 每个单词都是小写字母 。
#
#
# leetcode submit region begin(Prohibit modification and deletion)
from collections import defaultdict
class TrieNode(object):
def __init__(self):
self.children = defaultdict(TrieNode)
self.word_end = False
class Trie(object):
def __init__(self):
self.root = TrieNode()
def insert(self, word):
node = self.root
for w in word:
node = node.children[w]
node.word_end = True
def start_with(self, prefix):
node = self.root
for p in prefix:
if p not in node.children:
return False
node = node.children[p]
return True
class Solution(object):
def minimumLengthEncoding(self, words):
"""
:type words: List[str]
:rtype: int
"""
words = sorted(words, reverse=True, key=len)
count = 0
trie = Trie()
for word in words:
if not trie.start_with(word[::-1]):
count += len(word) + 1
trie.insert(word[::-1])
return count
# leetcode submit region end(Prohibit modification and deletion)
|
[
"xiaoqima2013@gmail.com"
] |
xiaoqima2013@gmail.com
|
c49070446f9c6e5c95466fbae9f419d4d29905da
|
8d87bf5b2c7125fdaf470b065f90f39a9fadc15c
|
/other/subarray.py
|
c70f60225023c6212fbc9f4eedfec13d8bbd360b
|
[] |
no_license
|
mujavidb/algorithm_solutions
|
50ddb65fa89803fbd1d216f0bf09f18552853b33
|
8c5352d9c2381dd0aeae82c77ae61fbbb3583262
|
refs/heads/master
| 2021-01-10T06:50:07.906460
| 2017-05-13T13:48:21
| 2017-05-13T13:48:21
| 46,529,143
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 492
|
py
|
# Length of longest subarray of sum less than or equal to k
def max_length(s, k): # These two mark the start and end of the subarray that current used to be.
start = end = total = 0
maxLength = -1 # returns -1 if there is no subsequence that adds up to k.
for i in s:
total += i
end += 1
while total > k: # Shrink the array from the left, until the sum is <= k.
total -= s[start]
start += 1
maxLength = max(maxLength, end - start)
return maxLength
|
[
"sirmuj@gmail.com"
] |
sirmuj@gmail.com
|
6f46f4705ff8b0afcec45c239afd53eecb646c90
|
304b0dfb4ae0261d7769963260e99e3b37f588b9
|
/crm1/urls.py
|
f324ade9fceedf306cd1c15467abfdd72c11b5cc
|
[] |
no_license
|
mohdfazlie/crm1
|
bc634af4ae2307e87f3956ec5cd39b9f1c72dc05
|
4c29fa466b6e32f2390bed401464d8621aa497d5
|
refs/heads/main
| 2023-07-13T05:07:21.542412
| 2021-08-26T14:53:21
| 2021-08-26T14:53:21
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 795
|
py
|
"""crm1 URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/3.2/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path, include
urlpatterns = [
path('', include('accounts.urls')),
path('admin/', admin.site.urls),
]
|
[
"59987681+mohdfazlie@users.noreply.github.com"
] |
59987681+mohdfazlie@users.noreply.github.com
|
1710dc2802c6eab6b4e818684ead157e2a79fa5c
|
9661db98a656462bd1b666b153f4727d54b436de
|
/sql_queries.py
|
f1b959bd3bccd723530a076000588aab98516936
|
[] |
no_license
|
gingersmog/Udacity-Data-Engineer
|
0f26703e6ba2bfa65b704414a22b6be9971fd940
|
a091acf2bf30d87e3e76ecb40975969d9acc5da5
|
refs/heads/master
| 2020-11-30T07:24:46.081833
| 2019-12-27T00:43:48
| 2019-12-27T00:43:48
| 230,346,961
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,681
|
py
|
# DROP TABLES
songplay_table_drop = "DROP TABLE IF EXISTS songplays"
user_table_drop = "DROP TABLE IF EXISTS users"
song_table_drop = "DROP TABLE IF EXISTS songs"
artist_table_drop = "DROP TABLE IF EXISTS artists"
time_table_drop = "DROP TABLE IF EXISTS time"
# CREATE TABLES
songplay_table_create = ("CREATE TABLE IF NOT EXISTS songplays (songplay_id varchar, start_time int, user_id int, level varchar, song_id varchar, artist_id varchar, session_id int, location varchar, user_agent varchar)")
user_table_create = ("CREATE TABLE IF NOT EXISTS users (user_id int, first_name varchar, last_name varchar, gender varchar, level varchar)")
song_table_create = ("CREATE TABLE IF NOT EXISTS songs (song_id varchar, title varchar, artist_id varchar, year int, duration numeric)")
artist_table_create = ("CREATE TABLE IF NOT EXISTS artists (artist_id varchar, name varchar, location varchar, latitude numeric, longitude numeric)")
time_table_create = ("CREATE TABLE IF NOT EXISTS time (start_time time, hour int, day int, week int, month int, year int, weekday int)")
# INSERT RECORDS
songplay_table_insert = ("""
""")
user_table_insert = ("""
""")
song_table_insert = ("INSERT INTO songs (song_id, title, artist_id, year, duration) VALUES (%s, %s, %s, %s, %s)", song_data)
artist_table_insert = ("""
""")
time_table_insert = ("""
""")
# FIND SONGS
song_select = ("""
""")
# QUERY LISTS
create_table_queries = [songplay_table_create, user_table_create, song_table_create, artist_table_create, time_table_create]
drop_table_queries = [songplay_table_drop, user_table_drop, song_table_drop, artist_table_drop, time_table_drop]
|
[
"noreply@github.com"
] |
noreply@github.com
|
5a36493f79cd6e0eef01ad3fd5e7a886f6864e24
|
642a6fe8873e651c64a1f62ce1411dbb12cebd98
|
/Python/Python Basic/w3resources/basic_practice_part1/Q26.py
|
157cfd670068ff4c0d280ab3de36bb7def77b268
|
[] |
no_license
|
lydia0423/Machine_Learning
|
4dc9cdb8049c0e26e527ebd35e100eab60186d7a
|
f4b241efe9522dbcb9dfa923f80d028d64a6054d
|
refs/heads/main
| 2023-03-26T18:17:37.029260
| 2021-03-29T04:17:01
| 2021-03-29T04:17:01
| 336,692,282
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 296
|
py
|
#?26. Write a Python program to create a histogram from a given list of integers.
def histogram(items):
for i in items:
output = ""
times = i
while(times > 0):
output += "*"
times -= 1
print(output)
print(histogram([1, 2, 3, 4, 5]))
|
[
"lydia08248@yahoo.com"
] |
lydia08248@yahoo.com
|
10e10f48b83650d04791cae5f7b32d3932dabb2d
|
aaf21aa2d251f52e6276bb71f4749a3d1b9e49e9
|
/service/urls.py
|
7e21f058963443c57bce091dd52ec82676bbef18
|
[] |
no_license
|
ClonedOne/parkado_server
|
e6ce7347219cfcbff612c1a1cbef2bbfac63c8c6
|
038f91717765c95173e2cfc28a727cb6d30b37c8
|
refs/heads/master
| 2021-08-31T20:17:30.456706
| 2017-02-08T23:54:47
| 2017-02-08T23:54:47
| 115,138,431
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 318
|
py
|
from rest_framework.urlpatterns import format_suffix_patterns
from django.conf.urls import url
from service import views
urlpatterns = [
url(r'^parkings/$', views.ParkingList.as_view()),
url(r'^parkings/(?P<pk>[0-9]+)/$', views.ParkingDetail.as_view()),
]
urlpatterns = format_suffix_patterns(urlpatterns)
|
[
"severi.giorgio@gmail.com"
] |
severi.giorgio@gmail.com
|
cb57c6ba8f92e88518798d39afee69873a5fab85
|
77bd1f8e9f9a8e790007db9ddbd346ed851b6880
|
/client/verta/verta/_internal_utils/_artifact_utils.py
|
d2d328ebbbb4d39f3fffa902c1d7d37ead4aa958
|
[
"Apache-2.0"
] |
permissive
|
eyadsibai/modeldb
|
3f5d3fa54c67c3e53e8fa971fc9e659e310d4665
|
b172bd7324d26edb3245f6671ec8351ea03b5832
|
refs/heads/master
| 2023-02-18T08:34:13.269578
| 2023-01-24T21:02:53
| 2023-01-24T21:02:53
| 89,958,297
| 1
| 0
| null | 2017-05-01T20:03:21
| 2017-05-01T20:03:21
| null |
UTF-8
|
Python
| false
| false
| 17,776
|
py
|
# -*- coding: utf-8 -*-
import hashlib
import os
import pickle
import shutil
import tempfile
import zipfile
import cloudpickle
from ..external import six
from .. import __about__
from .importer import maybe_dependency, get_tensorflow_major_version
# default chunk sizes
# these values were all chosen arbitrarily at different times
_64MB = 64 * (10**6) # used for artifact uploads
_32MB = 32 * (10**6) # used in _request_utils
_5MB = 5 * (10**6) # used in this module
# for zip_dir()
# dirs zipped by client need an identifiable extension to unzip durig d/l
ZIP_EXTENSION = "dir.zip"
# NOTE: keep up-to-date with Deployment API
CUSTOM_MODULES_KEY = "custom_modules"
REGISTRY_MODEL_KEY = "model"
MODEL_KEY = "model.pkl" # currently used by experiment run
MODEL_API_KEY = "model_api.json"
# TODO: maybe bind constants for other keys used throughout client
# NOTE: if blocklisting more keys, update the docstrings of
# - RegisteredModel.create_standard_model()
# - RegisteredModelVersion.log_artifact()
# - ExperimentRun.log_artifact()
BLOCKLISTED_KEYS = {
CUSTOM_MODULES_KEY,
MODEL_KEY,
MODEL_API_KEY,
"requirements.txt",
"train_data",
"tf_saved_model",
"setup_script",
}
KERAS_H5PY_ERROR = RuntimeError( # https://github.com/h5py/h5py/issues/1732
"Keras encountered an error saving/loading the model due to a bug in h5py v3.0.0;"
' consider downgrading with `pip install "h5py<3.0.0"`'
)
def validate_key(key):
"""
Validates user-specified artifact key.
Parameters
----------
key : str
Name of artifact.
Raises
------
ValueError
If `key` is blocklisted.
"""
if key in BLOCKLISTED_KEYS:
msg = '"{}" is reserved for internal use; please use a different key'.format(
key
)
raise ValueError(msg)
def get_file_ext(file):
"""
Obtain the filename extension of `file`.
This method assumes `file` is accessible on the user's filesystem.
Parameters
----------
file : str or file handle
Filepath or on-disk file stream.
Returns
-------
str
Filename extension without the leading period.
Raises
------
TypeError
If a filepath cannot be obtained from the argument.
ValueError
If the filepath lacks an extension.
"""
if isinstance(file, six.string_types) and not os.path.isdir(file):
filepath = file
elif hasattr(file, "read") and hasattr(file, "name"): # `open()` object
filepath = file.name
else:
raise TypeError(
"unable to obtain filepath from object of type {}".format(type(file))
)
filename = os.path.basename(filepath).lstrip(".")
try:
_, extension = filename.split(os.extsep, 1)
except ValueError:
six.raise_from(ValueError('no extension found in "{}"'.format(filepath)), None)
else:
return extension
def ext_from_method(method):
"""
Returns an appropriate file extension for a given model serialization method.
Parameters
----------
method : str
The return value of `method` from ``serialize_model()``.
Returns
-------
str or None
Filename extension without the leading period.
"""
if method == "keras":
return "hdf5"
elif method in ("joblib", "cloudpickle", "pickle"):
return "pkl"
elif method == "zip":
return "zip"
elif method == ZIP_EXTENSION: # zipped by client
return ZIP_EXTENSION
elif method is None:
return None
else:
raise ValueError("unrecognized method value: {}".format(method))
def reset_stream(stream):
"""
Resets the cursor of a stream to the beginning.
This is implemented with a try-except because not all file-like objects are guaranteed to have
a ``seek()`` method, so we carry on if we cannot reset the pointer.
Parameters
----------
stream : file-like
A stream that may or may not implement ``seek()``.
"""
try:
stream.seek(0)
except AttributeError:
pass
def ensure_bytestream(obj):
"""
Converts an object into a bytestream.
If `obj` is file-like, its contents will be read into memory and then wrapped in a bytestream.
This has a performance cost, but checking beforehand whether an arbitrary file-like object
returns bytes rather than encoded characters is an implementation nightmare.
If `obj` is not file-like, it will be serialized and then wrapped in a bytestream.
Parameters
----------
obj : file-like or object
Object to convert into a bytestream.
Returns
-------
bytestream : file-like
Buffered bytestream of the serialized artifacts.
method : {"joblib", "cloudpickle", "pickle", None}
Serialization method used to produce the bytestream.
Raises
------
pickle.PicklingError
If `obj` cannot be serialized.
ValueError
If `obj` contains no data.
"""
if hasattr(obj, "read"): # if `obj` is file-like
reset_stream(obj) # reset cursor to beginning in case user forgot
# read first element to check if bytes
try:
chunk = obj.read(1)
except TypeError: # read() doesn't take an argument
pass # fall through to read & cast full stream
else:
if chunk and isinstance(chunk, bytes): # contents are indeed bytes
reset_stream(obj)
return obj, None
else:
pass # fall through to read & cast full stream
# read full stream and cast to bytes
reset_stream(obj)
contents = obj.read() # read to cast into binary
reset_stream(obj) # reset cursor to beginning as a courtesy
if not len(contents):
# S3 raises unhelpful error on empty upload, so catch here
raise ValueError("object contains no data")
bytestring = six.ensure_binary(contents)
bytestream = six.BytesIO(bytestring)
bytestream.seek(0)
return bytestream, None
else: # `obj` is not file-like
bytestream = six.BytesIO()
try:
cloudpickle.dump(obj, bytestream)
except pickle.PicklingError: # can't be handled by cloudpickle
pass
else:
bytestream.seek(0)
return bytestream, "cloudpickle"
if maybe_dependency("joblib"):
try:
maybe_dependency("joblib").dump(obj, bytestream)
except (
NameError, # joblib not installed
pickle.PicklingError,
): # can't be handled by joblib
pass
else:
bytestream.seek(0)
return bytestream, "joblib"
try:
pickle.dump(obj, bytestream)
except pickle.PicklingError: # can't be handled by pickle
six.raise_from(pickle.PicklingError("unable to serialize artifact"), None)
else:
bytestream.seek(0)
return bytestream, "pickle"
def serialize_model(model):
"""
Serializes a model into a bytestream, attempting various methods.
Parameters
----------
model : object or file-like
Model to convert into a bytestream.
Returns
-------
bytestream : file-like
Buffered bytestream of the serialized model.
method : {"joblib", "cloudpickle", "pickle", "keras", None}
Serialization method used to produce the bytestream.
model_type : {"torch", "sklearn", "xgboost", "tensorflow", "custom", "callable"}
Framework with which the model was built.
"""
# if `model` is filesystem path
if isinstance(model, six.string_types):
if os.path.isdir(model):
return zip_dir(model), ZIP_EXTENSION, None
else: # filepath
# open and continue
model = open(model, "rb")
# if `model` is file-like
if hasattr(model, "read"):
try: # attempt to deserialize
reset_stream(model) # reset cursor to beginning in case user forgot
model = deserialize_model(model.read())
except (TypeError, pickle.UnpicklingError):
# unrecognized serialization method and model type
return model, None, None # return bytestream
finally:
reset_stream(model) # reset cursor to beginning as a courtesy
# if `model` is a class
if isinstance(model, six.class_types):
model_type = "class"
bytestream, method = ensure_bytestream(model)
return bytestream, method, model_type
# if`model` is an instance
pyspark_ml_base = maybe_dependency("pyspark.ml.base")
if pyspark_ml_base:
# https://spark.apache.org/docs/latest/api/python/_modules/pyspark/ml/base.html
pyspark_base_classes = (
pyspark_ml_base.Estimator,
pyspark_ml_base.Model,
pyspark_ml_base.Transformer,
)
if isinstance(model, pyspark_base_classes):
temp_dir = tempfile.mkdtemp()
try:
spark_model_dir = os.path.join(temp_dir, "spark-model")
model.save(spark_model_dir)
bytestream = zip_dir(spark_model_dir)
finally:
shutil.rmtree(temp_dir)
# TODO: see if more info would be needed to deserialize in model service
return bytestream, "zip", "pyspark"
for class_obj in model.__class__.__mro__:
module_name = class_obj.__module__
if not module_name:
continue
elif module_name.startswith("torch"):
model_type = "torch"
bytestream, method = ensure_bytestream(model)
break
elif module_name.startswith("sklearn"):
model_type = "sklearn"
bytestream, method = ensure_bytestream(model)
break
elif module_name.startswith("xgboost"):
model_type = "xgboost"
bytestream, method = ensure_bytestream(model)
break
elif module_name.startswith(
"tensorflow.python.keras"
) or module_name.startswith("keras"):
model_type = "tensorflow"
tempf = tempfile.NamedTemporaryFile()
try:
if (
get_tensorflow_major_version() == 2
): # save_format param may not exist in TF 1.X
model.save(
tempf.name, save_format="h5"
) # TF 2.X uses SavedModel by default
else:
model.save(tempf.name)
except TypeError as e:
h5py = maybe_dependency("h5py")
if (
str(e) == "a bytes-like object is required, not 'str'"
and h5py is not None
and h5py.__version__ == "3.0.0"
):
# h5py v3.0.0 improperly checks if a `bytes` contains a `str`.
# Encountering this generic error message here plus the fact
# that h5py==3.0.0 suggests that this is the problem.
six.raise_from(KERAS_H5PY_ERROR, e)
else:
six.raise_from(e, None)
tempf.seek(0)
bytestream = tempf
method = "keras"
break
else:
if hasattr(model, "predict"):
model_type = "custom"
elif callable(model):
model_type = "callable"
else:
model_type = None
bytestream, method = ensure_bytestream(model)
return bytestream, method, model_type
def deserialize_model(bytestring, error_ok=False):
"""
Deserializes a model from a bytestring, attempting various methods.
If the model is unable to be deserialized, the bytes will be returned as a buffered bytestream.
Parameters
----------
bytestring : bytes
Bytes representing the model.
error_ok : bool, default False
Whether to return the serialized bytes if the model cannot be
deserialized. If False, an ``UnpicklingError`` is raised instead.
Returns
-------
model : obj or file-like
Model or buffered bytestream representing the model.
Raises
------
pickle.UnpicklingError
If `bytestring` cannot be deserialized into an object, and `error_ok`
is False.
"""
keras = maybe_dependency("tensorflow.keras")
if keras is not None:
# try deserializing with Keras (HDF5)
with tempfile.NamedTemporaryFile() as tempf:
tempf.write(bytestring)
tempf.seek(0)
try:
return keras.models.load_model(tempf.name)
except AttributeError as e:
h5py = maybe_dependency("h5py")
if (
str(e) == "'str' object has no attribute 'decode'"
and h5py is not None
and h5py.__version__ == "3.0.0"
):
# h5py v3.0.0 returns a `str` instead of a `bytes` to Keras.
# Encountering this generic error message here plus the fact
# that h5py==3.0.0 suggests that this is the problem.
six.raise_from(KERAS_H5PY_ERROR, e)
else:
six.raise_from(e, None)
except (
NameError, # Tensorflow not installed
IOError,
OSError,
): # not a Keras model
pass
bytestream = six.BytesIO(bytestring)
torch = maybe_dependency("torch")
if torch is not None:
try:
return torch.load(bytestream)
except: # not something torch can deserialize
bytestream.seek(0)
# try deserializing with cloudpickle
try:
return cloudpickle.load(bytestream)
except: # not a pickled object
bytestream.seek(0)
if error_ok:
return bytestream
else:
raise pickle.UnpicklingError("unable to deserialize model")
def get_stream_length(stream, chunk_size=_5MB):
"""
Get the length of the contents of a stream.
Parameters
----------
stream : file-like
Stream.
chunk_size : int, default 5 MB
Number of bytes (or whatever `stream` contains) to read into memory at a time.
Returns
-------
length : int
Length of `stream`.
"""
# if it's file handle, get file size without reading stream
filename = getattr(stream, "name", None)
if filename is not None:
try:
return os.path.getsize(filename)
except OSError: # can't access file
pass
# read stream in chunks to get length
length = 0
try:
part_lengths = iter(lambda: len(stream.read(chunk_size)), 0)
for (
part_length
) in (
part_lengths
): # could be sum() but not sure GC runs during builtin one-liner
length += part_length
finally:
reset_stream(stream) # reset cursor to beginning as a courtesy
return length
def calc_sha256(bytestream, chunk_size=_5MB):
"""
Calculates the SHA-256 checksum of a bytestream.
Parameters
----------
bytestream : file-like opened in binary mode
Bytestream.
chunk_size : int, default 5 MB
Number of bytes to read into memory at a time.
Returns
-------
checksum : str
SHA-256 hash of `bytestream`'s contents.
Raises
------
TypeError
If `bytestream` is opened in text mode instead of binary mode.
"""
checksum = hashlib.sha256()
try:
parts = iter(lambda: bytestream.read(chunk_size), b"")
for part in parts:
checksum.update(part)
finally:
reset_stream(bytestream) # reset cursor to beginning as a courtesy
return checksum.hexdigest()
def zip_dir(dirpath, followlinks=True):
"""
ZIPs a directory.
Parameters
----------
dirpath : str
Directory path.
Returns
-------
tempf : :class:`tempfile.NamedTemporaryFile`
ZIP file handle.
"""
e_msg = "{} is not a directory".format(str(dirpath))
if not isinstance(dirpath, six.string_types):
raise TypeError(e_msg)
if not os.path.isdir(dirpath):
raise ValueError(e_msg)
os.path.expanduser(dirpath)
tempf = tempfile.NamedTemporaryFile(suffix="." + ZIP_EXTENSION)
with zipfile.ZipFile(tempf, "w") as zipf:
for root, _, files in os.walk(dirpath, followlinks=followlinks):
for filename in files:
filepath = os.path.join(root, filename)
zipf.write(filepath, os.path.relpath(filepath, dirpath))
tempf.seek(0)
return tempf
def global_read_zipinfo(filename):
"""
Returns a :class:`zipfile.ZipInfo` with ``644`` permissions.
:meth:`zipfile.ZipFile.writestr` creates files with ``600`` [1]_ [2]_,
which means non-owners are unable to read the file, which can be
problematic for custom modules in deployment.
Parameters
----------
filename : str
Name to assign to the file in the ZIP archive.
Returns
-------
zip_info : :class:`zipfile.ZipInfo`
File metadata; the first arg to :meth:`zipfile.ZipFile.writestr`.
References
----------
.. [1] https://github.com/python/cpython/blob/3.9/Lib/zipfile.py#L1791
.. [2] https://bugs.python.org/msg69937
"""
zip_info = zipfile.ZipInfo(filename)
zip_info.external_attr = 0o644 << 16 # ?rw-r--r--
return zip_info
|
[
"noreply@github.com"
] |
noreply@github.com
|
446132c9623e63a2d548c95694992161a693fcff
|
99241f023f2f256c7e6cbce6c6fe3de48627c9f8
|
/build/art-pathplanning-2014/catkin_generated/pkg.develspace.context.pc.py
|
7e3242213bfa7985b084d241f402bb7169f0c3cb
|
[] |
no_license
|
mohsenpour/art-meta
|
02acf40cc00468798113fccf18da59fb82aec698
|
3cb95fc16ae780362d2db52903c818d60e3bfa66
|
refs/heads/master
| 2021-07-25T01:04:38.540640
| 2017-11-04T20:26:31
| 2017-11-04T20:26:31
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 370
|
py
|
# generated from catkin/cmake/template/pkg.context.pc.in
CATKIN_PACKAGE_PREFIX = ""
PROJECT_PKG_CONFIG_INCLUDE_DIRS = "".split(';') if "" != "" else []
PROJECT_CATKIN_DEPENDS = "".replace(';', ' ')
PKG_CONFIG_LIBRARIES_WITH_PREFIX = "".split(';') if "" != "" else []
PROJECT_NAME = "path_planner"
PROJECT_SPACE_DIR = "/home/art/art-meta/devel"
PROJECT_VERSION = "0.0.0"
|
[
"mo@Mohammads-MacBook-Pro.local"
] |
mo@Mohammads-MacBook-Pro.local
|
b468d83e6f86299cc5a6da5cc3813594132a55dc
|
30b232051b10753e9103a70d88a387dfa1aca63f
|
/164.py
|
2f3c183e1682f60665db64f6c436ac296f2bf23b
|
[] |
no_license
|
samrithasudhagar/guvi2
|
fe6d7af8a73cef515991524d7abad754c3700dc5
|
f7eb8a8b2cd701c2708c414939cc139414d3310d
|
refs/heads/master
| 2020-04-20T12:27:47.748859
| 2019-05-26T09:45:42
| 2019-05-26T09:45:42
| 168,843,977
| 0
| 3
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 175
|
py
|
n,k=map(int,input().split())
l=list(map(int,input().split()))
m=0
if k in l:
print(k)
else:
for i in l:
if i>m and i <k:
m=i
print(m)
|
[
"noreply@github.com"
] |
noreply@github.com
|
11e6f191c6447181e1e8bbb7d69eb8cc0edac896
|
65b37165306dff35942a3ce5ba7bb586b85357d9
|
/manage.py
|
890442822014016635d68790f0dddd489a4c63aa
|
[] |
no_license
|
iankigen/drf-pet-store
|
81b803f671c58038da939324a4c28bc21c0b3d25
|
2c87fb5ecde30deac69bfb910d68d495ad932293
|
refs/heads/master
| 2022-05-01T15:01:26.667589
| 2021-03-01T08:40:59
| 2021-03-01T08:40:59
| 197,615,754
| 0
| 0
| null | 2022-04-22T21:50:09
| 2019-07-18T15:45:35
|
Python
|
UTF-8
|
Python
| false
| false
| 811
|
py
|
#!/usr/bin/env python
import os
import sys
if __name__ == "__main__":
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "pet_store_app.settings")
try:
from django.core.management import execute_from_command_line
except ImportError:
# The above import may fail for some other reason. Ensure that the
# issue is really that Django is missing to avoid masking other
# exceptions on Python 2.
try:
import django
except ImportError:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
)
raise
execute_from_command_line(sys.argv)
|
[
"ian.Gabe.ian@gmail.com"
] |
ian.Gabe.ian@gmail.com
|
8fb33330b1462f23987648fc31eb06140b7e5caa
|
1e03cd80d27d35ffdc8f68f70a36a461eaae4b9d
|
/apps/common/views.py
|
9da3528729470edad1fb1663ca5e9291ee3c0179
|
[] |
no_license
|
paddy375691/flask_zlbbs
|
bee8f15497c58bd5f1f614d6a686b93301f93d0a
|
90fb142b33aecca6ff66013953ecf6e3e39b6139
|
refs/heads/master
| 2023-02-04T07:55:00.798789
| 2020-12-25T08:51:13
| 2020-12-25T08:51:13
| 324,265,174
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 144
|
py
|
from flask import Blueprint
bp = Blueprint('common', __name__, url_prefix='/common')
@bp.route('/')
def index():
return 'common index'
|
[
"you@example.com"
] |
you@example.com
|
8c86c0ef0e47e5a09d544c09f18d6faa5632dd4e
|
f6f5d82eacd022235b3a85892554fbd9415e3bee
|
/src/saltext/vmware/modules/vmware_vsan.py
|
743ab40200464a7dddb1562166121ce43b7d7fe0
|
[
"Apache-2.0"
] |
permissive
|
garethgreenaway/saltext-vmware
|
a3dd81d3df90ae711178c2232fcdc16b9142d69f
|
f0cd63fc3454be43229658fc4ff825455fc9e691
|
refs/heads/main
| 2023-03-29T05:15:08.409668
| 2021-04-01T00:33:54
| 2021-04-01T00:33:54
| 348,489,162
| 0
| 0
|
Apache-2.0
| 2021-03-25T18:45:29
| 2021-03-16T20:55:30
|
Python
|
UTF-8
|
Python
| false
| false
| 21,601
|
py
|
import logging
import sys
import saltext.vmware.utils.vmware
from salt.utils.decorators import depends, ignores_kwargs
log = logging.getLogger(__name__)
try:
# pylint: disable=no-name-in-module
from pyVmomi import (
vim,
vmodl,
pbm,
VmomiSupport,
)
# pylint: enable=no-name-in-module
# We check the supported vim versions to infer the pyVmomi version
if (
"vim25/6.0" in VmomiSupport.versionMap
and sys.version_info > (2, 7)
and sys.version_info < (2, 7, 9)
):
log.debug(
"pyVmomi not loaded: Incompatible versions " "of Python. See Issue #29537."
)
raise ImportError()
HAS_PYVMOMI = True
except ImportError:
HAS_PYVMOMI = False
__virtualname__ = "vmware_vsan"
def __virtual__():
return __virtualname__
def _get_vsan_eligible_disks(service_instance, host, host_names):
"""
Helper function that returns a dictionary of host_name keys with either a list of eligible
disks that can be added to VSAN or either an 'Error' message or a message saying no
eligible disks were found. Possible keys/values look like:
return = {'host_1': {'Error': 'VSAN System Config Manager is unset ...'},
'host_2': {'Eligible': 'The host xxx does not have any VSAN eligible disks.'},
'host_3': {'Eligible': [disk1, disk2, disk3, disk4],
'host_4': {'Eligible': []}}
"""
ret = {}
for host_name in host_names:
# Get VSAN System Config Manager, if available.
host_ref = _get_host_ref(service_instance, host, host_name=host_name)
vsan_system = host_ref.configManager.vsanSystem
if vsan_system is None:
msg = (
"VSAN System Config Manager is unset for host '{}'. "
"VSAN configuration cannot be changed without a configured "
"VSAN System.".format(host_name)
)
log.debug(msg)
ret.update({host_name: {"Error": msg}})
continue
# Get all VSAN suitable disks for this host.
suitable_disks = []
query = vsan_system.QueryDisksForVsan()
for item in query:
if item.state == "eligible":
suitable_disks.append(item)
# No suitable disks were found to add. Warn and move on.
# This isn't an error as the state may run repeatedly after all eligible disks are added.
if not suitable_disks:
msg = "The host '{}' does not have any VSAN eligible disks.".format(host_name)
log.warning(msg)
ret.update({host_name: {"Eligible": msg}})
continue
# Get disks for host and combine into one list of Disk Objects
disks = _get_host_ssds(host_ref) + _get_host_non_ssds(host_ref)
# Get disks that are in both the disks list and suitable_disks lists.
matching = []
for disk in disks:
for suitable_disk in suitable_disks:
if disk.canonicalName == suitable_disk.disk.canonicalName:
matching.append(disk)
ret.update({host_name: {"Eligible": matching}})
return ret
@depends(HAS_PYVMOMI)
@ignores_kwargs("credstore")
def vsan_add_disks(
host, username, password, protocol=None, port=None, host_names=None, verify_ssl=True
):
"""
Add any VSAN-eligible disks to the VSAN System for the given host or list of host_names.
host
The location of the host.
username
The username used to login to the host, such as ``root``.
password
The password used to login to the host.
protocol
Optionally set to alternate protocol if the host is not using the default
protocol. Default protocol is ``https``.
port
Optionally set to alternate port if the host is not using the default
port. Default port is ``443``.
host_names
List of ESXi host names. When the host, username, and password credentials
are provided for a vCenter Server, the host_names argument is required to
tell vCenter which hosts need to add any VSAN-eligible disks to the host's
VSAN system.
If host_names is not provided, VSAN-eligible disks will be added to the hosts's
VSAN system for the ``host`` location instead. This is useful for when service
instance connection information is used for a single ESXi host.
verify_ssl
Verify the SSL certificate. Default: True
CLI Example:
.. code-block:: bash
# Used for single ESXi host connection information
salt '*' vsphere.vsan_add_disks my.esxi.host root bad-password
# Used for connecting to a vCenter Server
salt '*' vsphere.vsan_add_disks my.vcenter.location root bad-password \
host_names='[esxi-1.host.com, esxi-2.host.com]'
"""
service_instance = saltext.vmware.utils.vmware.get_service_instance(
host=host,
username=username,
password=password,
protocol=protocol,
port=port,
verify_ssl=verify_ssl,
)
host_names = _check_hosts(service_instance, host, host_names)
response = _get_vsan_eligible_disks(service_instance, host, host_names)
ret = {}
for host_name, value in response.items():
host_ref = _get_host_ref(service_instance, host, host_name=host_name)
vsan_system = host_ref.configManager.vsanSystem
# We must have a VSAN Config in place before we can manipulate it.
if vsan_system is None:
msg = (
"VSAN System Config Manager is unset for host '{}'. "
"VSAN configuration cannot be changed without a configured "
"VSAN System.".format(host_name)
)
log.debug(msg)
ret.update({host_name: {"Error": msg}})
else:
eligible = value.get("Eligible")
error = value.get("Error")
if eligible and isinstance(eligible, list):
# If we have eligible, matching disks, add them to VSAN.
try:
task = vsan_system.AddDisks(eligible)
saltext.vmware.utils.vmware.wait_for_task(
task, host_name, "Adding disks to VSAN", sleep_seconds=3
)
except vim.fault.InsufficientDisks as err:
log.debug(err.msg)
ret.update({host_name: {"Error": err.msg}})
continue
except Exception as err: # pylint: disable=broad-except
msg = "'vsphere.vsan_add_disks' failed for host {}: {}".format(
host_name, err
)
log.debug(msg)
ret.update({host_name: {"Error": msg}})
continue
log.debug(
"Successfully added disks to the VSAN system for host '{}'.".format(
host_name
)
)
# We need to return ONLY the disk names, otherwise Message Pack can't deserialize the disk objects.
disk_names = []
for disk in eligible:
disk_names.append(disk.canonicalName)
ret.update({host_name: {"Disks Added": disk_names}})
elif eligible and isinstance(eligible, str):
# If we have a string type in the eligible value, we don't
# have any VSAN-eligible disks. Pull the message through.
ret.update({host_name: {"Disks Added": eligible}})
elif error:
# If we hit an error, populate the Error return dict for state functions.
ret.update({host_name: {"Error": error}})
else:
# If we made it this far, we somehow have eligible disks, but they didn't
# match the disk list and just got an empty list of matching disks.
ret.update(
{
host_name: {
"Disks Added": "No new VSAN-eligible disks were found to add."
}
}
)
return ret
@depends(HAS_PYVMOMI)
@ignores_kwargs("credstore")
def vsan_disable(
host, username, password, protocol=None, port=None, host_names=None, verify_ssl=True
):
"""
Disable VSAN for a given host or list of host_names.
host
The location of the host.
username
The username used to login to the host, such as ``root``.
password
The password used to login to the host.
protocol
Optionally set to alternate protocol if the host is not using the default
protocol. Default protocol is ``https``.
port
Optionally set to alternate port if the host is not using the default
port. Default port is ``443``.
host_names
List of ESXi host names. When the host, username, and password credentials
are provided for a vCenter Server, the host_names argument is required to
tell vCenter which hosts should disable VSAN.
If host_names is not provided, VSAN will be disabled for the ``host``
location instead. This is useful for when service instance connection
information is used for a single ESXi host.
verify_ssl
Verify the SSL certificate. Default: True
CLI Example:
.. code-block:: bash
# Used for single ESXi host connection information
salt '*' vsphere.vsan_disable my.esxi.host root bad-password
# Used for connecting to a vCenter Server
salt '*' vsphere.vsan_disable my.vcenter.location root bad-password \
host_names='[esxi-1.host.com, esxi-2.host.com]'
"""
service_instance = saltext.vmware.utils.vmware.get_service_instance(
host=host,
username=username,
password=password,
protocol=protocol,
port=port,
verify_ssl=verify_ssl,
)
# Create a VSAN Configuration Object and set the enabled attribute to True
vsan_config = vim.vsan.host.ConfigInfo()
vsan_config.enabled = False
host_names = _check_hosts(service_instance, host, host_names)
ret = {}
for host_name in host_names:
host_ref = _get_host_ref(service_instance, host, host_name=host_name)
vsan_system = host_ref.configManager.vsanSystem
# We must have a VSAN Config in place before we can manipulate it.
if vsan_system is None:
msg = (
"VSAN System Config Manager is unset for host '{}'. "
"VSAN configuration cannot be changed without a configured "
"VSAN System.".format(host_name)
)
log.debug(msg)
ret.update({host_name: {"Error": msg}})
else:
try:
# Disable vsan on the host
task = vsan_system.UpdateVsan_Task(vsan_config)
saltext.vmware.utils.vmware.wait_for_task(
task, host_name, "Disabling VSAN", sleep_seconds=3
)
except vmodl.fault.SystemError as err:
log.debug(err.msg)
ret.update({host_name: {"Error": err.msg}})
continue
except Exception as err: # pylint: disable=broad-except
msg = "'vsphere.vsan_disable' failed for host {}: {}".format(
host_name, err
)
log.debug(msg)
ret.update({host_name: {"Error": msg}})
continue
ret.update({host_name: {"VSAN Disabled": True}})
return ret
@depends(HAS_PYVMOMI)
@ignores_kwargs("credstore")
def vsan_enable(
host, username, password, protocol=None, port=None, host_names=None, verify_ssl=True
):
"""
Enable VSAN for a given host or list of host_names.
host
The location of the host.
username
The username used to login to the host, such as ``root``.
password
The password used to login to the host.
protocol
Optionally set to alternate protocol if the host is not using the default
protocol. Default protocol is ``https``.
port
Optionally set to alternate port if the host is not using the default
port. Default port is ``443``.
host_names
List of ESXi host names. When the host, username, and password credentials
are provided for a vCenter Server, the host_names argument is required to
tell vCenter which hosts should enable VSAN.
If host_names is not provided, VSAN will be enabled for the ``host``
location instead. This is useful for when service instance connection
information is used for a single ESXi host.
verify_ssl
Verify the SSL certificate. Default: True
CLI Example:
.. code-block:: bash
# Used for single ESXi host connection information
salt '*' vsphere.vsan_enable my.esxi.host root bad-password
# Used for connecting to a vCenter Server
salt '*' vsphere.vsan_enable my.vcenter.location root bad-password \
host_names='[esxi-1.host.com, esxi-2.host.com]'
"""
service_instance = saltext.vmware.utils.vmware.get_service_instance(
host=host,
username=username,
password=password,
protocol=protocol,
port=port,
verify_ssl=verify_ssl,
)
# Create a VSAN Configuration Object and set the enabled attribute to True
vsan_config = vim.vsan.host.ConfigInfo()
vsan_config.enabled = True
host_names = _check_hosts(service_instance, host, host_names)
ret = {}
for host_name in host_names:
host_ref = _get_host_ref(service_instance, host, host_name=host_name)
vsan_system = host_ref.configManager.vsanSystem
# We must have a VSAN Config in place before we can manipulate it.
if vsan_system is None:
msg = (
"VSAN System Config Manager is unset for host '{}'. "
"VSAN configuration cannot be changed without a configured "
"VSAN System.".format(host_name)
)
log.debug(msg)
ret.update({host_name: {"Error": msg}})
else:
try:
# Enable vsan on the host
task = vsan_system.UpdateVsan_Task(vsan_config)
saltext.vmware.utils.vmware.wait_for_task(
task, host_name, "Enabling VSAN", sleep_seconds=3
)
except vmodl.fault.SystemError as err:
log.debug(err.msg)
ret.update({host_name: {"Error": err.msg}})
continue
except vim.fault.VsanFault as err:
msg = "'vsphere.vsan_enable' failed for host {}: {}".format(
host_name, err
)
log.debug(msg)
ret.update({host_name: {"Error": msg}})
continue
ret.update({host_name: {"VSAN Enabled": True}})
return ret
@depends(HAS_PYVMOMI)
@ignores_kwargs("credstore")
def get_vsan_enabled(
host,
username,
password,
protocol=None,
port=None,
host_names=None,
verify_ssl=True,
):
"""
Get the VSAN enabled status for a given host or a list of host_names. Returns ``True``
if VSAN is enabled, ``False`` if it is not enabled, and ``None`` if a VSAN Host Config
is unset, per host.
host
The location of the host.
username
The username used to login to the host, such as ``root``.
password
The password used to login to the host.
protocol
Optionally set to alternate protocol if the host is not using the default
protocol. Default protocol is ``https``.
port
Optionally set to alternate port if the host is not using the default
port. Default port is ``443``.
host_names
List of ESXi host names. When the host, username, and password credentials
are provided for a vCenter Server, the host_names argument is required to
tell vCenter which hosts to check if VSAN enabled.
If host_names is not provided, the VSAN status will be retrieved for the
``host`` location instead. This is useful for when service instance
connection information is used for a single ESXi host.
verify_ssl
Verify the SSL certificate. Default: True
CLI Example:
.. code-block:: bash
# Used for single ESXi host connection information
salt '*' vsphere.get_vsan_enabled my.esxi.host root bad-password
# Used for connecting to a vCenter Server
salt '*' vsphere.get_vsan_enabled my.vcenter.location root bad-password \
host_names='[esxi-1.host.com, esxi-2.host.com]'
"""
service_instance = saltext.vmware.utils.vmware.get_service_instance(
host=host,
username=username,
password=password,
protocol=protocol,
port=port,
verify_ssl=verify_ssl,
)
host_names = _check_hosts(service_instance, host, host_names)
ret = {}
for host_name in host_names:
host_ref = _get_host_ref(service_instance, host, host_name=host_name)
vsan_config = host_ref.config.vsanHostConfig
# We must have a VSAN Config in place get information about VSAN state.
if vsan_config is None:
msg = "VSAN System Config Manager is unset for host '{}'.".format(host_name)
log.debug(msg)
ret.update({host_name: {"Error": msg}})
else:
ret.update({host_name: {"VSAN Enabled": vsan_config.enabled}})
return ret
@depends(HAS_PYVMOMI)
@ignores_kwargs("credstore")
def get_vsan_eligible_disks(
host,
username,
password,
protocol=None,
port=None,
host_names=None,
verify_ssl=True,
):
"""
Returns a list of VSAN-eligible disks for a given host or list of host_names.
host
The location of the host.
username
The username used to login to the host, such as ``root``.
password
The password used to login to the host.
protocol
Optionally set to alternate protocol if the host is not using the default
protocol. Default protocol is ``https``.
port
Optionally set to alternate port if the host is not using the default
port. Default port is ``443``.
host_names
List of ESXi host names. When the host, username, and password credentials
are provided for a vCenter Server, the host_names argument is required to
tell vCenter which hosts to check if any VSAN-eligible disks are available.
If host_names is not provided, the VSAN-eligible disks will be retrieved
for the ``host`` location instead. This is useful for when service instance
connection information is used for a single ESXi host.
verify_ssl
Verify the SSL certificate. Default: True
CLI Example:
.. code-block:: bash
# Used for single ESXi host connection information
salt '*' vsphere.get_vsan_eligible_disks my.esxi.host root bad-password
# Used for connecting to a vCenter Server
salt '*' vsphere.get_vsan_eligible_disks my.vcenter.location root bad-password \
host_names='[esxi-1.host.com, esxi-2.host.com]'
"""
service_instance = saltext.vmware.utils.vmware.get_service_instance(
host=host,
username=username,
password=password,
protocol=protocol,
port=port,
verify_ssl=verify_ssl,
)
host_names = _check_hosts(service_instance, host, host_names)
response = _get_vsan_eligible_disks(service_instance, host, host_names)
ret = {}
for host_name, value in response.items():
error = value.get("Error")
if error:
ret.update({host_name: {"Error": error}})
continue
disks = value.get("Eligible")
# If we have eligible disks, it will be a list of disk objects
if disks and isinstance(disks, list):
disk_names = []
# We need to return ONLY the disk names, otherwise
# MessagePack can't deserialize the disk objects.
for disk in disks:
disk_names.append(disk.canonicalName)
ret.update({host_name: {"Eligible": disk_names}})
else:
# If we have disks, but it's not a list, it's actually a
# string message that we're passing along.
ret.update({host_name: {"Eligible": disks}})
return ret
@depends(HAS_PYVMOMI)
@_supports_proxies("esxdatacenter", "vcenter")
@_gets_service_instance_via_proxy
def list_default_vsan_policy(service_instance=None):
"""
Returns the default vsan storage policy.
service_instance
Service instance (vim.ServiceInstance) of the vCenter.
Default is None.
.. code-block:: bash
salt '*' vsphere.list_storage_policies
salt '*' vsphere.list_storage_policy policy_names=[policy_name]
"""
profile_manager = salt.utils.pbm.get_profile_manager(service_instance)
policies = salt.utils.pbm.get_storage_policies(profile_manager, get_all_policies=True)
def_policies = [p for p in policies if p.systemCreatedProfileType == "VsanDefaultProfile"]
if not def_policies:
raise VMwareObjectRetrievalError("Default VSAN policy was not " "retrieved")
return _get_policy_dict(def_policies[0])
|
[
"gareth@saltstack.com"
] |
gareth@saltstack.com
|
88ac908df79f1845c722a579324a1598126bc0dd
|
644984edc7bd12c395f20b204e0201aca068c541
|
/Basic Data Structures/array/leet_154_FindMinimuminRotatedSortedArrayII.py
|
0fae83c285503ec1c802172d4ef714d78f74bca3
|
[
"MIT"
] |
permissive
|
rush2catch/algorithms-leetcode
|
3bd7ad0fc00d303b520ab6b1f0b3ba84cf449115
|
38a5e6aa33d48fa14fe09c50c28a2eaabd736e55
|
refs/heads/master
| 2021-09-06T17:45:20.295678
| 2018-02-09T08:07:54
| 2018-02-09T08:07:54
| 91,420,882
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 880
|
py
|
# Problem: Find Minimum in Rotated Sorted Array
# Difficulty: Medium
# Category: Array
# Leetcode 154: https://leetcode.com/problems/find-minimum-in-rotated-sorted-array/description/
"""
Suppose an array sorted in ascending order is rotated at some pivot unknown to you beforehand.
(i.e., 0 1 2 4 5 6 7 might become 4 5 6 7 0 1 2).
Find the minimum element.
You may assume no duplicate exists in the array.
"""
class Solution(object):
def find_mini(self, nums):
if len(nums) == 0:
return None
start = 0
end = len(nums) - 1
while start < end:
mid = (start + end) // 2
if nums[mid] < nums[end]:
end = mid
elif nums[mid] > nums[end]:
start = mid + 1
else:
end -= 1
return nums[start]
obj = Solution()
arr1 = [4, 5, 6, 7, 0, 1, 2]
arr2 = [3, 4, 5, 0, 1, 1, 1, 2]
arr3 = [1]
print(obj.find_mini(arr1), obj.find_mini(arr2), obj.find_mini(arr3))
|
[
"jiangyuan1988@gmail.com"
] |
jiangyuan1988@gmail.com
|
93cfdd6f6d45a191d083e9d52f5f9dbc9fb63fdc
|
bb459bb1496ba48b2e4a640c97de28541735a337
|
/Pfeature_scripts/bin_at_wp.py
|
051afaab0ad35c6467e41877c19eb861bc8a55f9
|
[] |
no_license
|
Raman1121/AlgPred
|
43f5f38bdc61b887221c23ef7872f2b8f6942c73
|
c437cc930d76e3a123eafac82cae7e656c91eb03
|
refs/heads/master
| 2020-05-30T15:54:57.319206
| 2019-06-23T06:03:52
| 2019-06-23T06:03:52
| 189,832,818
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,532
|
py
|
import sys
import os
import pandas as pd
import getopt
def bin_at_wp(file,outt) :
filename, file_extension = os.path.splitext(file)
df=pd.read_csv(file,header=None)
############binary matrix for atoms
f = open('matrix_atom.out', 'w')
sys.stdout = f
print("C,H,N,O,S,")
x = []
for i in range(0,5) :
x.append([])
for j in range(0,5) :
if i == j :
x[i].append(1)
else :
x[i].append(0)
print(x[i][j], end=",")
print("")
f.close()
##############associate binary values to atoms
mat = pd.read_csv("matrix_atom.out")
mat1 = mat.iloc[:,:-1]
mat2 = mat1.transpose()
df1 = pd.read_csv("Data/atom.csv",header=None)
zz = []
kk = pd.DataFrame()
df1 = pd.read_csv("Data/atom.csv",header=None)
for i in range(0,len(df1)) :
zz.append([])
for j in range(0,len(df1[1][i])) :
temp = df1[1][i][j]
zz[i].append(mat2.loc[temp])
f1 = open('bin_atom', 'w')
sys.stdout = f1
for i in range(0,len(zz)) :
for row in zz[i]:
print(",".join(map(str,row)), end=",")
print("")
f1.close()
with open('bin_atom', 'r') as f:
g = list(f)
for i in range(0,len(g)) :
g[i] = g[i].replace(",\n","")
df1["bin"]=g
#########binary atom values for given file
xx=[]
jj = 0
for i in range(0,len(df)) :
xx.append([])
while jj < len(df[0][i]) :
temp=df[0][i][jj]
for k in range(0,len(df1)) :
if temp == df1[0][k][0] :
xx[i].append(df1.iloc[k,2])
jj += 1
jj = 0
f2 = open(outt, 'w')
sys.stdout = f2
for i in range(0,len(xx)) :
for row in xx[i]:
print("".join(map(str,row)), end=",")
print("")
f2.truncate()
os.remove("matrix_atom.out")
os.remove("bin_atom")
def main(argv):
global inputfile
global outputfile
inputfile = ''
outputfile = ''
#option = 1
if len(argv[1:]) == 0:
print ("\nUsage: bin_at_wp.py -i inputfile -o outputfile\n")
print('inputfile : file of peptide/protein sequences for which descriptors need to be generated\n')
print('outputfile : is the file of feature vectors\n')
sys.exit()
try:
opts, args = getopt.getopt(argv,"i:o:",["ifile=","ofile="])
except getopt.GetoptError:
print ("\nUsage: bin_at_wp.py -i inputfile -o outputfile\n")
print('inputfile : file of peptide/protein sequences for which descriptors need to be generated\n')
print('outputfile : is the file of feature vectors\n')
sys.exit(2)
for opt, arg in opts:
if opt == '--help' or opt == '--h':
print ('\nbin_at_wp.py -i inputfile -o outputfile\n')
print('inputfile : file of peptide/protein sequences for which descriptors need to be generated\n')
print('outputfile : is the file of feature vectors\n')
sys.exit()
elif opt in ("-i", "--ifile"):
inputfile = arg
elif opt in ("-o", "--ofile"):
outputfile = arg
bin_at_wp(sys.argv[2],sys.argv[4])
if __name__ == '__main__':
#print(sys.argv)
main(sys.argv[1:])
|
[
"happy07.dutt@gmail.com"
] |
happy07.dutt@gmail.com
|
3ad52e8c095b3ad50975940c78e30707311ab01e
|
32ba9f1c35ae916d33b121daeeea8e1910a447d7
|
/utils/tag.py
|
99083b27f2ee3987fa2f8e96f2665c4f0533c66f
|
[
"MIT"
] |
permissive
|
rituparna/glTools
|
8b02fa2751e1b997f7a202c7df8a3dd3d3032722
|
c512a96c20ba7a4ee93a123690b626bb408a8fcd
|
refs/heads/master
| 2020-03-19T19:23:47.684580
| 2018-06-10T23:53:58
| 2018-06-10T23:53:58
| 136,853,456
| 0
| 0
| null | 2018-06-10T23:46:54
| 2018-06-10T23:46:53
| null |
UTF-8
|
Python
| false
| false
| 2,893
|
py
|
import maya.cmds as mc
import glTools.tools.namingConvention
class Tag( object ):
def __init__(self):
'''
'''
self.nameTagAttr = 'nameTag'
def addNameTag(self,control,tag):
'''
Set the name tag value for the specified control
'''
# Check control
if not mc.objExists(control): raise Exception('Object '+control+' does not exist!')
# Add Tag attribute
if mc.objExists(control+'.'+self.nameTagAttr):
mc.addAttr(control,ln=self.nameTagAttr,dt='string')
mc.setAttr(control+'.'+self.nameTagAttr,tag,type='string')
def getNameTag(self,control):
'''
Return the name tag value of the specified control
'''
# Check control
if not mc.objExists(control): raise Exception('Object '+control+' does not exist!')
# Check tag attribute
if not mc.objExists(control+'.'+self.nameTagAttr): raise Exception('Object '+control+' does not have a "'+self.nameTagAttr+'" attribute!')
# Return tag string value
return mc.getAttr(control+'.'+self.nameTagAttr)
def guessNameTag(self,control,side=True,part=True,optSide=True,subPart=True,node=False):
'''
Return a best guess name tag based on a controls current name.
Uses name element comparison to our naming convention module.
'''
tag = ''
# Get naming convention dictionaries
nameConvention = glTools.tools.namingConvention.NamingConvention()
sideDict = dict((value, key) for key, value in nameConvention.side.iteritems())
partDict = dict((value, key) for key, value in nameConvention.part.iteritems())
subPartDict = dict((value, key) for key, value in nameConvention.subPart.iteritems())
nodeDict = dict((value, key) for key, value in nameConvention.node.iteritems())
# Get name elements
controlElem = control.split(nameConvention.delineator)
controlElemCnt = len(controlElem)
controlElemInd = 0
# Check number of elements
if controlElemCnt < 3: print 'Warning: Name pattern does not match naming convention'
# Get side
if side and sideDict.has_key(controlElem[controlElemInd]):
if controlElem[controlElemInd] != nameConvention.side['center']:
tag += sideDict[controlElem[controlElemInd]].capitalize()
controlElemInd += 1
else: return
# Get part
if part and partDict.has_key(controlElem[controlElemInd][0:-2]):
tag += partDict[controlElem[controlElemInd][0:-2]].capitalize()
controlElemInd += 1
else: return
# Get optional side
if optSide and sideDict.has_key(controlElem[controlElemInd][0:-2]):
tag += sideDict[controlElem[controlElemInd][0:-2]].capitalize()
controlElemInd += 1
# Get sub-part
if subPart and subPartDict.has_key(controlElem[controlElemInd][0:-2]):
tag += subPartDict[controlElem[controlElemInd][0:-2]].capitalize()
controlElemInd += 1
# Get type
if node and nodeDict.has_key(controlElem[controlElemInd]):
tag += nodeDict[controlElem[controlElemInd]].capitalize()
return tag
|
[
"grant@bungnoid.com"
] |
grant@bungnoid.com
|
eb74add9cc0461ba97666dc459e71a6df598d5c1
|
3f7bc519f83609c7aac1ca24615710019f35e20a
|
/pomodoro.py
|
a18ad7133fbabc883a36accbfbf26a447f080c79
|
[
"MIT"
] |
permissive
|
blipml/bliptimer
|
2077f18cb0db7209a27a5fb62aeb7fd746037961
|
f0ec6b1de4ac797b4f3ee98240719851a23b9f81
|
refs/heads/main
| 2023-03-29T12:52:08.841306
| 2021-04-12T06:50:47
| 2021-04-12T06:50:47
| 356,160,883
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,666
|
py
|
#!/usr/bin/python3
# -*- coding: utf-8 -*-
from datetime import datetime as dt, timedelta
from time import time, sleep
import sys
import asyncio
SLEEP_TIME=1
class TimeParseException(Exception):
pass
class Timer(object):
def __init__(
self, target=0, sleep_time=SLEEP_TIME):
self.target = Timer.parse_time(sys.argv[1])
self.elapsed = 0
print(self.target)
self.sleep_time = sleep_time
@staticmethod
def parse_time(string):
mul = [60*60, 60, 1][::-1]
components = string.split(':')[::-1]
if len(components) > len(mul):
raise TimeParseException()
elif len(components) != len(mul):
mul = mul[:len(components)-len(mul)]
_sum = 0
for m, c in zip(mul, components):
_sum += m*int(c)
return _sum
def start_timer(self):
last = now = time()
while(self.elapsed < self.target):
self.elapsed = now-last
sleep(self.sleep_time)
now = time()
self.tick_callback()
self.end_callback()
def tick_callback(self):
pass
def end_callback(self):
pass
class AsyncTimer(Timer):
# implement coroutines for callbacks?
def __init__(self, *args, **kwargs):
super(AsyncTimer, self).__init__(*args, **kwargs)
self.task = None # ???
class PreemptAsyncTimer(AsyncTimer):
# do we want to be able to somehow pre-empt timers?
def __init__(self, *args, **kwargs):
super(PreemptAsyncTimer, self).__init__(*args, **kwargs)
def cancel(self):
# maybe we don't want this here :)
# self.task.cancel()
pass
class Pomodowo(Timer):
def __init__(self, *args, **kwargs):
super(Pomodowo, self).__init__(*args, **kwargs)
def tick_callback(self):
"""
we can access tick level information here
"""
outfile = './discordtimer'
header = 'pomod🍅w🍅'
output = "{}\n\r◇{}◇".format(header, str(
timedelta(seconds=round(
self.target-self.elapsed))))
with open(outfile, 'w') as f:
f.write(output)
print(output)
class WaterTimer(Timer):
def __init__(self,*args, **kwargs):
super(WaterTimer, self).__init__(*args, **kwargs)
def end_callback(self):
# control OBS scene?
pass
if __name__ == '__main__':
if len(sys.argv) < 2:
print("Pass me a target for the timer format [HH]:[mm]:ss")
exit()
t = Pomodowo(sys.argv[1])
t.start_timer()
|
[
"ocean.kuzuri@gmail.com"
] |
ocean.kuzuri@gmail.com
|
2102290e12b10e2260838df9b63b6bbc4466e206
|
83d5439a809aaf029cfbab22d776a99c8ea85bd2
|
/vista1/migrations/0002_auto_20210412_1851.py
|
2b48628f1e6af25952df7855c37cd8adb70bcf23
|
[] |
no_license
|
lapcpc/exame2
|
cca8933ecc7049737d0c327cddf0cc4e2113ddbb
|
9695dcda5c1668c5bc92073c4c4fb1c2d98e43e5
|
refs/heads/main
| 2023-04-10T10:07:03.448832
| 2021-04-13T00:21:52
| 2021-04-13T00:21:52
| 357,371,901
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 563
|
py
|
# Generated by Django 2.2.6 on 2021-04-12 23:51
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('vista1', '0001_initial'),
]
operations = [
migrations.AlterField(
model_name='registro',
name='oxigenacion',
field=models.IntegerField(blank=True, null=True),
),
migrations.AlterField(
model_name='registro',
name='temperatura',
field=models.IntegerField(blank=True, null=True),
),
]
|
[
"lapcpc@hotmail.com"
] |
lapcpc@hotmail.com
|
e037ce0f746846b6294b60c884db7209be1e7464
|
efbc8c73e9ac5cbcb9321518ab06b3965369a5f0
|
/SWEA/D2/1974_스도쿠 검증.py
|
5c70e3a29c30f7e85436a3f0b4edb86ade4466a6
|
[] |
no_license
|
AshOil/APS
|
56b9395dcbb8eeec87a047407d4326b879481612
|
fe5a2cd63448fcc4b11b5e5bc060976234ed8eea
|
refs/heads/master
| 2023-07-15T17:32:20.684742
| 2021-08-23T13:04:05
| 2021-08-23T13:04:05
| 283,709,661
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,389
|
py
|
import sys
sys.stdin = open('input_data/1974.txt',"r")
num_dict = {}
T = int(input())
for t in range(1, T+1):
for tt in range(1,10):
num_dict[tt] = list(map(int, input() .split()))
result = True
# 가로부터 검사하자
for hori in num_dict.values():
if sorted(hori) != [1, 2, 3, 4, 5, 6, 7, 8, 9]:
result = False
#세로 검사
for num in range(9):
verti_check = []
for verti in num_dict.values():
verti_check.append(verti[num])
verti_result = verti_check
if sorted(verti_check) != [1, 2, 3, 4, 5, 6, 7, 8, 9]:
result = False
#블록검사
line_start = 0
line_end = 3
block_list = list(num_dict.values())
for __ in range(3):
turn_block_list = block_list[line_start:line_end]
block_start = 0
block_end = 3
for _ in range(3):
block_check = []
for turn in range(3):
for block in turn_block_list[turn][block_start:block_end]:
block_check.append(block)
block_start += 3
block_end += 3
if sorted(block_check) != [1, 2, 3, 4, 5, 6, 7, 8, 9]:
result = False
line_start += 3
line_end += 3
if result:
print('#{} 1'.format(t))
else:
print('#{} 0'.format(t))
|
[
"ka0@kakao.com"
] |
ka0@kakao.com
|
9be5e10a4b35c6bcf8efca2e17740a47f51645d7
|
1eba9645eb6922a415cf7c08cd76e3b9f489513b
|
/QuoteEngine/QuoteModel.py
|
89d9de7723398ad7a91577c858d42470df3af006
|
[] |
no_license
|
t4ngojuli3tt/UdacityMeme
|
96741e35162730c675af833302a95501bf5fbbef
|
9b710f3421653c1aee61a040d6da3909b28bdd88
|
refs/heads/main
| 2023-03-04T20:52:44.622380
| 2021-02-12T20:52:35
| 2021-02-12T20:52:35
| 338,428,171
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 183
|
py
|
class QuoteModel:
def __init__(self, body, author):
self.author = author
self.body = body
def __repr__(self):
return f'"{self.body}" - {self.author}'
|
[
"tomasz.janiczko@gmail.com"
] |
tomasz.janiczko@gmail.com
|
38e2ca7ad8bd0105e127dde9200cf8a028208f9b
|
de998d0170e81d61982cef9c3181c41fc926bac3
|
/Hardware/PCB/KiCadScript/KiAuto/KiPcb.py
|
3b58c4d5c530bf82be8c8e8d499bea6348ffbf05
|
[
"BSD-3-Clause"
] |
permissive
|
ymei/TMSPlane
|
f100294e039bb73e77964cf9c38930160d5f86d9
|
3e30749440b1a8a0fd81a848b368cfbce10dfb86
|
refs/heads/master
| 2021-01-23T07:44:23.156705
| 2018-03-25T03:16:43
| 2018-03-25T03:16:43
| 86,438,489
| 1
| 2
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 6,380
|
py
|
from __future__ import print_function
import pcbnew
from .util import *
## Class operating on an already established board
#
class KiPcbOp(object):
## @param[in] board Already open and established board object.
def __init__(self, board):
self._board = board
self._io = pcbnew.PCB_IO()
self.layerI = {pcbnew.BOARD_GetStandardLayerName(n):n for n in range(pcbnew.PCB_LAYER_ID_COUNT)}
self.layerN = {s:n for n, s in self.layerI.iteritems()}
# generate a LUT with shape integers to a string
self.padShapes = {
pcbnew.PAD_SHAPE_CIRCLE: "PAD_SHAPE_CIRCLE",
pcbnew.PAD_SHAPE_OVAL: "PAD_SHAPE_OVAL",
pcbnew.PAD_SHAPE_RECT: "PAD_SHAPE_RECT",
pcbnew.PAD_SHAPE_TRAPEZOID: "PAD_SHAPE_TRAPEZOID"
}
if hasattr(pcbnew, 'PAD_SHAPE_ROUNDRECT'):
self.padShapes[pcbnew.PAD_SHAPE_ROUNDRECT] = "PAD_SHAPE_ROUNDRECT"
# returns a dictionary netcode:netinfo_item
self.netCodes = self._board.GetNetsByNetcode()
## called by __init__()
def compute_layer_table(self):
self.layerTable = {}
for i in range(pcbnew.PCB_LAYER_ID_COUNT):
self.layerTable[i] = self._board.GetLayerName(i)
return self.layerTable
def layer_id_to_name(self, id):
return self.layerN[id]
def layer_name_to_id(self, name):
return self.layerI[name]
def print_list_of_nets(self):
for netcode, net in self.netCodes.items():
print("netcode: {}, name: {}".format(netcode, net.GetNetname()))
def get_board_boundary(self):
rect = None
for d in self._board.GetDrawings():
if (d.GetLayerName() != "Edge.Cuts"):
continue
if (rect == None):
rect = d.GetBoundingBox()
else:
rect.Merge(d.GetBoundingBox())
if rect:
return [pcbnew.wxPoint(rect.Centre().x-rect.GetWidth()/2, rect.Centre().y-rect.GetHeight()/2),
pcbnew.wxPoint(rect.Centre().x-rect.GetWidth()/2, rect.Centre().y+rect.GetHeight()/2),
pcbnew.wxPoint(rect.Centre().x+rect.GetWidth()/2, rect.Centre().y+rect.GetHeight()/2),
pcbnew.wxPoint(rect.Centre().x+rect.GetWidth()/2, rect.Centre().y-rect.GetHeight()/2)]
else:
return None
## @param[in] layerId consult layerTable, usually 0: F.Cu, 31: B.Cu
def place_footprint(self, lib, name, ref="", loc=(0,0), layerId=0):
mod = self._io.FootprintLoad(lib, name)
p = pcbnew.wxPoint(loc[0], loc[1]) if type(loc) == tuple or type(loc) == list else loc
mod.SetPosition(p)
mod.SetLayer(layerId)
mod.SetReference(ref)
self._board.Add(mod)
def move_footprint(self, ref, loc, rot=0.0, layerId=None, flip=False):
mod = self._board.FindModuleByReference(ref)
if mod == None:
return None
p = pcbnew.wxPoint(loc[0], loc[1]) if type(loc) == tuple or type(loc) == list else loc
mod.SetPosition(p)
if layerId != None:
mod.SetLayer(layerId)
if flip:
mod.Flip(p)
mod.SetOrientation(rot)
def set_footprint_nets(self, ref="", pinNet={1:'/VDD', 2:'/GND'}):
mod = self._board.FindModuleByReference(ref)
if mod == None:
return None
for p in mod.Pads():
netname = pinNet[int(p.GetPadName())]
netcode = self._board.GetNetcodeFromNetname(netname)
print(netname, netcode)
p.SetNetCode(netcode)
return True
def get_fp_pad_pos_netname(self, ref):
mod = self._board.FindModuleByReference(ref)
if mod == None:
return None
ppn = {}
for p in mod.Pads():
pn = int(p.GetPadName())
if pn in ppn:
if len(ppn[pn][0]) > 0:
ppn[pn][0].append(p.GetCenter())
else:
ppn[pn] = ([p.GetCenter()], p.GetNetname())
return ppn
def add_track(self, posList=[[0,0], [1,1]], width=None, layerId=0, netName="/GND"):
netcode = self._board.GetNetcodeFromNetname(netName)
for i in xrange(len(posList)-1):
t = posList[i]
p1 = pcbnew.wxPoint(t[0], t[1]) if type(t) == tuple or type(t) == list else t
t = posList[i+1]
p2 = pcbnew.wxPoint(t[0], t[1]) if type(t) == tuple or type(t) == list else t
if width == None:
width = self._board.GetDesignSettings().GetCurrentTrackWidth()
track = pcbnew.TRACK(self._board)
track.SetStart(p1)
track.SetEnd(p2)
track.SetWidth(width)
track.SetLayer(layerId)
self._board.Add(track)
track.SetNetCode(netcode)
def add_via(self, pos=[0,0], layerIdPair=(0, 31), netName="/GND", size=None, drill=None, vType=pcbnew.VIA_THROUGH):
netcode = self._board.GetNetcodeFromNetname(netName)
if size == None:
size = self._board.GetDesignSettings().GetCurrentViaSize()
if drill == None:
drill = self._board.GetDesignSettings().GetCurrentViaDrill()
if vType == pcbnew.VIA_THROUGH:
via = pcbnew.VIA(self._board)
via.SetWidth(size)
p1 = pcbnew.wxPoint(pos[0], pos[1]) if type(pos) == tuple or type(pos) == list else pos
via.SetStart(p1)
via.SetEnd(p1)
via.SetLayerPair(layerIdPair[0], layerIdPair[1])
via.SetDrill(drill)
via.SetViaType(pcbnew.VIA_THROUGH)
self._board.Add(via)
via.SetNetCode(netcode)
def add_zone(self, corners=None, layerId=0, netName="/GND",
clearance=mil(4), minWidth=mil(4), padConn=pcbnew.PAD_ZONE_CONN_FULL):
netcode = self._board.GetNetcodeFromNetname(netName)
area = self._board.InsertArea(netcode, 0, layerId, int(corners[0][0]), int(corners[0][1]),
pcbnew.CPolyLine.DIAGONAL_EDGE)
area.SetZoneClearance(clearance)
area.SetMinThickness(minWidth)
area.SetPadConnection(padConn)
for p in corners[1:]:
pw = pcbnew.wxPoint(p[0], p[1]) if type(p) == tuple or type(p) == list else p
area.AppendCorner(pw, -1)
area.Hatch()
|
[
"yuan.mei@gmail.com"
] |
yuan.mei@gmail.com
|
74a8b60c7bd5c37aa81e3159a4faeb777038fa95
|
d099ed0f4f34387d80a10490e5d621cbe8944320
|
/tests/decoder_test1.py
|
095efc613dc505ca5cca76fd24ec39c60e198d41
|
[] |
no_license
|
unfallible/jrpg_engine_experiment
|
c8c3da285cec68e4101b022c591325f7764a6efa
|
7709fb197aada8e991220d2174fac457d314d887
|
refs/heads/master
| 2023-08-28T02:47:53.186911
| 2021-10-13T03:21:07
| 2021-10-13T03:21:07
| 411,520,928
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,281
|
py
|
from pprint import PrettyPrinter
from JrpgBattle.Attack import VanillaAttack, AttackType
from JrpgBattle.Character import CharacterTemplate
from JrpgBattle.JsonProcessing.JrpgEncoder import decode_jrpg_data, encode_jrpg_data
from fractions import Fraction
import json
def main():
pp = PrettyPrinter(indent=2)
atk1 = VanillaAttack('chocolate', damage=3)
atk2 = VanillaAttack('strawberry', damage=5, stamina_point_cost=150)
atk1_json = json.dumps(atk1, default=encode_jrpg_data)
atk2_json = json.dumps(atk2, default=encode_jrpg_data)
chocolate = json.loads(atk1_json, object_hook=decode_jrpg_data)
strawberry = json.loads(atk2_json, object_hook=decode_jrpg_data)
dude = CharacterTemplate(name='tyler', max_hp=10, offensive_type_affinities={}, defensive_type_affinities={},
attack_list={chocolate, strawberry}, parry_effectiveness=Fraction(3, 4))
encoded_char = json.dumps(dude, default=encode_jrpg_data, indent=2)
print('encoded:')
print(encoded_char)
print('decoded:')
reencoded_char = json.dumps(json.loads(encoded_char, object_hook=decode_jrpg_data), default=encode_jrpg_data, indent=2)
print(reencoded_char)
assert encoded_char == reencoded_char
if __name__ == '__main__':
main()
|
[
"ryn292@mocs.utc.edu"
] |
ryn292@mocs.utc.edu
|
34ea96fab7aa2f5a03931e3a87d652ab5f3e629e
|
f6b5f0d72f3e5deb8a913d0a6d541ef3ad5445cb
|
/braintree/transaction.py
|
5c5a315ac43e7831732e7dec0f4ba96100d19ceb
|
[
"MIT"
] |
permissive
|
hathawsh/braintree_python
|
bf056a4d2b8c8b8094f2c876cea4782dc92c715a
|
4ec0f3696438b8c2117f5917834e67ddbf3ebdc7
|
refs/heads/master
| 2021-01-16T00:04:08.883102
| 2013-07-23T22:11:32
| 2013-07-23T22:11:32
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 15,913
|
py
|
import braintree
import urllib
import warnings
from decimal import Decimal
from braintree.add_on import AddOn
from braintree.disbursement_detail import DisbursementDetail
from braintree.discount import Discount
from braintree.successful_result import SuccessfulResult
from braintree.status_event import StatusEvent
from braintree.error_result import ErrorResult
from braintree.resource import Resource
from braintree.address import Address
from braintree.configuration import Configuration
from braintree.credit_card import CreditCard
from braintree.customer import Customer
from braintree.subscription_details import SubscriptionDetails
from braintree.resource_collection import ResourceCollection
from braintree.transparent_redirect import TransparentRedirect
from braintree.exceptions.not_found_error import NotFoundError
from braintree.descriptor import Descriptor
class Transaction(Resource):
"""
A class representing Braintree Transaction objects.
An example of creating an sale transaction with all available fields::
result = Transaction.sale({
"amount": "100.00",
"order_id": "123",
"channel": "MyShoppingCartProvider",
"credit_card": {
"number": "5105105105105100",
"expiration_date": "05/2011",
"cvv": "123"
},
"customer": {
"first_name": "Dan",
"last_name": "Smith",
"company": "Braintree Payment Solutions",
"email": "dan@example.com",
"phone": "419-555-1234",
"fax": "419-555-1235",
"website": "https://www.braintreepayments.com"
},
"billing": {
"first_name": "Carl",
"last_name": "Jones",
"company": "Braintree",
"street_address": "123 E Main St",
"extended_address": "Suite 403",
"locality": "Chicago",
"region": "IL",
"postal_code": "60622",
"country_name": "United States of America"
},
"shipping": {
"first_name": "Andrew",
"last_name": "Mason",
"company": "Braintree",
"street_address": "456 W Main St",
"extended_address": "Apt 2F",
"locality": "Bartlett",
"region": "IL",
"postal_code": "60103",
"country_name": "United States of America"
}
})
print(result.transaction.amount)
print(result.transaction.order_id)
For more information on Transactions, see https://www.braintreepayments.com/docs/python/transactions/create
"""
def __repr__(self):
detail_list = ["amount", "credit_card", "payment_method_token", "customer_id"]
return super(Transaction, self).__repr__(detail_list)
class CreatedUsing(object):
"""
Constants representing how the transaction was created. Available types are:
* braintree.Transaction.CreatedUsing.FullInformation
* braintree.Transaction.CreatedUsing.Token
"""
FullInformation = "full_information"
Token = "token"
class GatewayRejectionReason(object):
"""
Constants representing gateway rejection reasons. Available types are:
* braintree.Transaction.GatewayRejectionReason.Avs
* braintree.Transaction.GatewayRejectionReason.AvsAndCvv
* braintree.Transaction.GatewayRejectionReason.Cvv
* braintree.Transaction.GatewayRejectionReason.Duplicate
"""
Avs = "avs"
AvsAndCvv = "avs_and_cvv"
Cvv = "cvv"
Duplicate = "duplicate"
class Source(object):
Api = "api"
ControlPanel = "control_panel"
Recurring = "recurring"
class Status(object):
"""
Constants representing transaction statuses. Available statuses are:
* braintree.Transaction.Status.Authorized
* braintree.Transaction.Status.Authorizing
* braintree.Transaction.Status.Failed
* braintree.Transaction.Status.GatewayRejected
* braintree.Transaction.Status.ProcessorDeclined
* braintree.Transaction.Status.Settled
* braintree.Transaction.Status.SettlementFailed
* braintree.Transaction.Status.Settling
* braintree.Transaction.Status.SubmittedForSettlement
* braintree.Transaction.Status.Void
"""
AuthorizationExpired = "authorization_expired"
Authorized = "authorized"
Authorizing = "authorizing"
Failed = "failed"
GatewayRejected = "gateway_rejected"
ProcessorDeclined = "processor_declined"
Settled = "settled"
SettlementFailed = "settlement_failed"
Settling = "settling"
SubmittedForSettlement = "submitted_for_settlement"
Voided = "voided"
class Type(object):
"""
Constants representing transaction types. Available types are:
* braintree.Transaction.Type.Credit
* braintree.Transaction.Type.Sale
"""
Credit = "credit"
Sale = "sale"
@staticmethod
def clone_transaction(transaction_id, params):
return Configuration.gateway().transaction.clone_transaction(transaction_id, params)
@staticmethod
def confirm_transparent_redirect(query_string):
"""
Confirms a transparent redirect request. It expects the query string from the
redirect request. The query string should _not_ include the leading "?" character. ::
result = braintree.Transaction.confirm_transparent_redirect_request("foo=bar&id=12345")
"""
warnings.warn("Please use TransparentRedirect.confirm instead", DeprecationWarning)
return Configuration.gateway().transaction.confirm_transparent_redirect(query_string)
@staticmethod
def credit(params={}):
"""
Creates a transaction of type Credit.
Amount is required. Also, a credit card,
customer_id or payment_method_token is required. ::
result = braintree.Transaction.credit({
"amount": "100.00",
"payment_method_token": "my_token"
})
result = braintree.Transaction.credit({
"amount": "100.00",
"credit_card": {
"number": "4111111111111111",
"expiration_date": "12/2012"
}
})
result = braintree.Transaction.credit({
"amount": "100.00",
"customer_id": "my_customer_id"
})
"""
params["type"] = Transaction.Type.Credit
return Transaction.create(params)
@staticmethod
def find(transaction_id):
"""
Find a transaction, given a transaction_id. This does not return
a result object. This will raise a :class:`NotFoundError <braintree.exceptions.not_found_error.NotFoundError>` if the provided
credit_card_id is not found. ::
transaction = braintree.Transaction.find("my_transaction_id")
"""
return Configuration.gateway().transaction.find(transaction_id)
@staticmethod
def refund(transaction_id, amount=None):
"""
Refunds an existing transaction.
It expects a transaction_id.::
result = braintree.Transaction.refund("my_transaction_id")
"""
return Configuration.gateway().transaction.refund(transaction_id, amount)
@staticmethod
def sale(params={}):
"""
Creates a transaction of type Sale. Amount is required. Also, a credit card,
customer_id or payment_method_token is required. ::
result = braintree.Transaction.sale({
"amount": "100.00",
"payment_method_token": "my_token"
})
result = braintree.Transaction.sale({
"amount": "100.00",
"credit_card": {
"number": "4111111111111111",
"expiration_date": "12/2012"
}
})
result = braintree.Transaction.sale({
"amount": "100.00",
"customer_id": "my_customer_id"
})
"""
params["type"] = Transaction.Type.Sale
return Transaction.create(params)
@staticmethod
def search(*query):
return Configuration.gateway().transaction.search(*query)
@staticmethod
def submit_for_settlement(transaction_id, amount=None):
"""
Submits an authorized transaction for settlement.
Requires the transaction id::
result = braintree.Transaction.submit_for_settlement("my_transaction_id")
"""
return Configuration.gateway().transaction.submit_for_settlement(transaction_id, amount)
@staticmethod
def tr_data_for_credit(tr_data, redirect_url):
"""
Builds tr_data for a Transaction of type Credit
"""
return Configuration.gateway().transaction.tr_data_for_credit(tr_data, redirect_url)
@staticmethod
def tr_data_for_sale(tr_data, redirect_url):
"""
Builds tr_data for a Transaction of type Sale
"""
return Configuration.gateway().transaction.tr_data_for_sale(tr_data, redirect_url)
@staticmethod
def transparent_redirect_create_url():
"""
Returns the url to be used for creating Transactions through transparent redirect.
"""
warnings.warn("Please use TransparentRedirect.url instead", DeprecationWarning)
return Configuration.gateway().transaction.transparent_redirect_create_url()
@staticmethod
def void(transaction_id):
"""
Voids an existing transaction.
It expects a transaction_id.::
result = braintree.Transaction.void("my_transaction_id")
"""
return Configuration.gateway().transaction.void(transaction_id)
@staticmethod
def create(params):
"""
Creates a transaction. Amount and type are required. Also, a credit card,
customer_id or payment_method_token is required. ::
result = braintree.Transaction.sale({
"type": braintree.Transaction.Type.Sale,
"amount": "100.00",
"payment_method_token": "my_token"
})
result = braintree.Transaction.sale({
"type": braintree.Transaction.Type.Sale,
"amount": "100.00",
"credit_card": {
"number": "4111111111111111",
"expiration_date": "12/2012"
}
})
result = braintree.Transaction.sale({
"type": braintree.Transaction.Type.Sale,
"amount": "100.00",
"customer_id": "my_customer_id"
})
"""
return Configuration.gateway().transaction.create(params)
@staticmethod
def clone_signature():
return ["amount", "channel", {"options": ["submit_for_settlement"]}]
@staticmethod
def create_signature():
return [
"amount", "customer_id", "device_session_id", "merchant_account_id", "order_id", "channel",
"payment_method_token", "purchase_order_number", "recurring", "shipping_address_id",
"tax_amount", "tax_exempt", "type", "venmo_sdk_payment_method_code",
"device_data",
{
"credit_card": [
"token", "cardholder_name", "cvv", "expiration_date", "expiration_month", "expiration_year", "number"
]
},
{
"customer": [
"id", "company", "email", "fax", "first_name", "last_name", "phone", "website"
]
},
{
"billing": [
"first_name", "last_name", "company", "country_code_alpha2", "country_code_alpha3",
"country_code_numeric", "country_name", "extended_address", "locality",
"postal_code", "region", "street_address"
]
},
{
"shipping": [
"first_name", "last_name", "company", "country_code_alpha2", "country_code_alpha3",
"country_code_numeric", "country_name", "extended_address", "locality",
"postal_code", "region", "street_address"
]
},
{
"options": [
"store_in_vault", "store_in_vault_on_success", "submit_for_settlement",
"add_billing_address_to_payment_method", "store_shipping_address_in_vault",
"venmo_sdk_session"
]
},
{"custom_fields": ["__any_key__"]},
{"descriptor": ["name", "phone"]}
]
def __init__(self, gateway, attributes):
if "refund_id" in attributes.keys():
self._refund_id = attributes["refund_id"]
del(attributes["refund_id"])
else:
self._refund_id = None
Resource.__init__(self, gateway, attributes)
self.amount = Decimal(self.amount)
if self.tax_amount:
self.tax_amount = Decimal(self.tax_amount)
if "billing" in attributes:
self.billing_details = Address(gateway, attributes.pop("billing"))
if "credit_card" in attributes:
self.credit_card_details = CreditCard(gateway, attributes.pop("credit_card"))
if "customer" in attributes:
self.customer_details = Customer(gateway, attributes.pop("customer"))
if "shipping" in attributes:
self.shipping_details = Address(gateway, attributes.pop("shipping"))
if "add_ons" in attributes:
self.add_ons = [AddOn(gateway, add_on) for add_on in self.add_ons]
if "discounts" in attributes:
self.discounts = [Discount(gateway, discount) for discount in self.discounts]
if "status_history" in attributes:
self.status_history = [StatusEvent(gateway, status_event) for status_event in self.status_history]
if "subscription" in attributes:
self.subscription_details = SubscriptionDetails(attributes.pop("subscription"))
if "descriptor" in attributes:
self.descriptor = Descriptor(gateway, attributes.pop("descriptor"))
if "disbursement_details" in attributes:
self.disbursement_details = DisbursementDetail(attributes.pop("disbursement_details"))
@property
def refund_id(self):
warnings.warn("Please use Transaction.refund_ids instead", DeprecationWarning)
return self._refund_id
@property
def vault_billing_address(self):
"""
The vault billing address associated with this transaction
"""
return self.gateway.address.find(self.customer_details.id, self.billing_details.id)
@property
def vault_credit_card(self):
"""
The vault credit card associated with this transaction
"""
if self.credit_card_details.token is None:
return None
return self.gateway.credit_card.find(self.credit_card_details.token)
@property
def vault_customer(self):
"""
The vault customer associated with this transaction
"""
if self.customer_details.id is None:
return None
return self.gateway.customer.find(self.customer_details.id)
@property
def is_disbursed(self):
return self.disbursement_details.is_valid
|
[
"code@getbraintree.com"
] |
code@getbraintree.com
|
3f1fa779ebc4ad9bbf4068ec2d29801aa363b470
|
5061a6089dbf1997e2d25ffc47d1dac9761228a0
|
/bellamira/views.py
|
e26206ef766461ebfc5d62f816d97bf238347a6a
|
[] |
no_license
|
DmitriDergalov/Bellamira
|
49ccc903940ac7de82996e069d0032cafff19b8f
|
2b271ded6c5abdc8b9f39efac7682a4c20a5e5b8
|
refs/heads/master
| 2021-08-19T14:45:57.561253
| 2017-11-26T18:10:41
| 2017-11-26T18:10:41
| 112,104,831
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 275
|
py
|
from django.shortcuts import render
from django.views import generic
from .models import Suit
class SuitsView(generic.ListView):
template_name = 'bellamira/suits.html'
context_object_name = 'suits_list'
def get_queryset(self):
return Suit.objects.all()
|
[
"dergalov.mitya2011@yandex.ru"
] |
dergalov.mitya2011@yandex.ru
|
f2edf4b8d81e383e300f59c74dc202894f3b6498
|
18c0c008de1f1cb4c6d194fd95e114f0c06476fc
|
/listcomprehension/startso.py
|
cab91ee67e82cbce8ffbb284609b470dc5f3934e
|
[] |
no_license
|
betillogalvanfbc/pythoncourse
|
6e18065e463d8b3fde85a205b3a473703fb9a690
|
8e49ec581b1e4f5f477699cf3f3bc030f6478bbd
|
refs/heads/master
| 2022-05-14T12:25:03.342382
| 2020-04-23T23:07:34
| 2020-04-23T23:07:34
| 258,351,481
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 226
|
py
|
friends = ["Beto", "Bernanrdo", "Benito"]
starts_s = [friend for friend in friends if friend.startswith("B")]
print(starts_s)
print(friends)
print(friends is starts_s)
print("friends:", id(friends), "starts_s:", id(starts_s))
|
[
"alberto.galvanfbc@gmail.com"
] |
alberto.galvanfbc@gmail.com
|
49adb1a0d02abd33be4f5345e463f2839479722a
|
b2d3bd39b2de8bcc3b0f05f4800c2fabf83d3c6a
|
/examples/pwr_run/checkpointing/short/max_pwr/job18.py
|
8d86f952590173e5884246a93a2efab702b53071
|
[
"MIT"
] |
permissive
|
boringlee24/keras_old
|
3bf7e3ef455dd4262e41248f13c04c071039270e
|
1e1176c45c4952ba1b9b9e58e9cc4df027ab111d
|
refs/heads/master
| 2021-11-21T03:03:13.656700
| 2021-11-11T21:57:54
| 2021-11-11T21:57:54
| 198,494,579
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 7,216
|
py
|
"""
#Trains a ResNet on the CIFAR10 dataset.
"""
from __future__ import print_function
import keras
from keras.layers import Dense, Conv2D, BatchNormalization, Activation
from keras.layers import AveragePooling2D, Input, Flatten
from keras.optimizers import Adam
from keras.callbacks import ModelCheckpoint, LearningRateScheduler
from keras.callbacks import ReduceLROnPlateau, TensorBoard
from keras.preprocessing.image import ImageDataGenerator
from keras.regularizers import l2
from keras import backend as K
from keras.models import Model
from keras.datasets import cifar10
from keras.applications.densenet import DenseNet121, DenseNet169, DenseNet201
from keras import models, layers, optimizers
from datetime import datetime
import tensorflow as tf
import numpy as np
import os
import pdb
import sys
import argparse
import time
import signal
import glob
import json
parser = argparse.ArgumentParser(description='Tensorflow Cifar10 Training')
parser.add_argument('--tc', metavar='TESTCASE', type=str, help='specific testcase name')
parser.add_argument('--resume', dest='resume', action='store_true', help='if True, resume training from a checkpoint')
parser.add_argument('--gpu_num', metavar='GPU_NUMBER', type=str, help='select which gpu to use')
parser.set_defaults(resume=False)
args = parser.parse_args()
os.environ["CUDA_DEVICE_ORDER"]="PCI_BUS_ID"
os.environ["CUDA_VISIBLE_DEVICES"]=args.gpu_num
# Training parameters
batch_size = 256
args_lr = 0.007
args_model = 'densenet121'
epoch_begin_time = 0
job_name = sys.argv[0].split('.')[0]
save_files = '/scratch/li.baol/checkpoint_max_pwr/' + job_name + '*'
total_epochs = 19
starting_epoch = 0
# first step is to update the PID
pid_dict = {}
with open('pid_lock.json', 'r') as fp:
pid_dict = json.load(fp)
pid_dict[job_name] = os.getpid()
json_file = json.dumps(pid_dict)
with open('pid_lock.json', 'w') as fp:
fp.write(json_file)
os.rename('pid_lock.json', 'pid.json')
if args.resume:
save_file = glob.glob(save_files)[0]
# epochs = int(save_file.split('/')[4].split('_')[1].split('.')[0])
starting_epoch = int(save_file.split('/')[4].split('.')[0].split('_')[-1])
data_augmentation = True
num_classes = 10
# Subtracting pixel mean improves accuracy
subtract_pixel_mean = True
n = 3
# Model name, depth and version
model_type = args.tc #'P100_resnet50_he_256_1'
# Load the CIFAR10 data.
(x_train, y_train), (x_test, y_test) = cifar10.load_data()
# Normalize data.
x_train = x_train.astype('float32') / 255
x_test = x_test.astype('float32') / 255
# If subtract pixel mean is enabled
if subtract_pixel_mean:
x_train_mean = np.mean(x_train, axis=0)
x_train -= x_train_mean
x_test -= x_train_mean
print('x_train shape:', x_train.shape)
print(x_train.shape[0], 'train samples')
print(x_test.shape[0], 'test samples')
print('y_train shape:', y_train.shape)
# Convert class vectors to binary class matrices.
y_train = keras.utils.to_categorical(y_train, num_classes)
y_test = keras.utils.to_categorical(y_test, num_classes)
if args.resume:
print('resume from checkpoint')
model = keras.models.load_model(save_file)
else:
print('train from start')
model = models.Sequential()
if '121' in args_model:
base_model = DenseNet121(weights=None, include_top=False, input_shape=(32, 32, 3), pooling='avg')
elif '169' in args_model:
base_model = DenseNet169(weights=None, include_top=False, input_shape=(32, 32, 3), pooling='avg')
elif '201' in args_model:
base_model = DenseNet201(weights=None, include_top=False, input_shape=(32, 32, 3), pooling='avg')
model.add(base_model)
#model.add(layers.Flatten())
#model.add(layers.BatchNormalization())
#model.add(layers.Dense(128, activation='relu'))
#model.add(layers.Dropout(0.5))
#model.add(layers.BatchNormalization())
#model.add(layers.Dense(64, activation='relu'))
#model.add(layers.Dropout(0.5))
#model.add(layers.BatchNormalization())
model.add(layers.Dense(10, activation='softmax'))#, kernel_initializer='he_uniform'))
model.compile(loss='categorical_crossentropy',
optimizer=Adam(lr=args_lr),
metrics=['accuracy'])
#model.summary()
print(model_type)
#pdb.set_trace()
current_epoch = 0
################### connects interrupt signal to the process #####################
def terminateProcess(signalNumber, frame):
# first record the wasted epoch time
global epoch_begin_time
epoch_waste_time = int(time.time() - epoch_begin_time)
epoch_waste_dict = {}
with open('epoch_waste.json', 'r') as fp:
epoch_waste_dict = json.load(fp)
epoch_waste_dict[job_name] += epoch_waste_time
json_file3 = json.dumps(epoch_waste_dict)
with open('epoch_waste.json', 'w') as fp:
fp.write(json_file3)
print('checkpointing the model triggered by kill -15 signal')
# delete whatever checkpoint that already exists
for f in glob.glob(save_files):
os.remove(f)
model.save('/scratch/li.baol/checkpoint_max_pwr/' + job_name + '_' + str(current_epoch) + '.h5')
print ('(SIGTERM) terminating the process')
checkpoint_dict = {}
with open('checkpoint.json', 'r') as fp:
checkpoint_dict = json.load(fp)
checkpoint_dict[job_name] = 1
json_file3 = json.dumps(checkpoint_dict)
with open('checkpoint.json', 'w') as fp:
fp.write(json_file3)
sys.exit()
signal.signal(signal.SIGTERM, terminateProcess)
#################################################################################
logdir = '/scratch/li.baol/tsrbrd_log/job_runs/' + model_type + '/' + job_name
tensorboard_callback = TensorBoard(log_dir=logdir)#, update_freq='batch')
class PrintEpoch(keras.callbacks.Callback):
def on_epoch_begin(self, epoch, logs=None):
global current_epoch
#remaining_epochs = epochs - epoch
current_epoch = epoch
print('current epoch ' + str(current_epoch))
global epoch_begin_time
epoch_begin_time = time.time()
my_callback = PrintEpoch()
callbacks = [tensorboard_callback, my_callback]
#[checkpoint, lr_reducer, lr_scheduler, tensorboard_callback]
# Run training
# creates an file if job qualified for checkpoint
open('ckpt_qual/' + job_name + '.txt', 'a').close()
model.fit(x_train, y_train,
batch_size=batch_size,
epochs=round(total_epochs/2),
validation_data=(x_test, y_test),
shuffle=True,
callbacks=callbacks,
initial_epoch=starting_epoch,
verbose=1
)
# Score trained model.
scores = model.evaluate(x_test, y_test, verbose=1)
print('Test loss:', scores[0])
print('Test accuracy:', scores[1])
finish_dict = {}
while True:
if os.path.exists('finish.json'):
try:
os.rename('finish.json', 'finish_lock.json')
break
except Exception:
pass
else:
time.sleep(1)
with open('finish_lock.json', 'r') as fp:
finish_dict = json.load(fp)
finish_dict[job_name] = 1
json_file2 = json.dumps(finish_dict)
with open('finish_lock.json', 'w') as fp:
fp.write(json_file2)
os.rename('finish_lock.json', 'finish.json')
|
[
"baolin.li1994@gmail.com"
] |
baolin.li1994@gmail.com
|
b74265a2d8ce076cbf5577d14a301cccf86b39ff
|
0c25cc9150add4645d83cad35989ff9c8858c58e
|
/image_processing/classify/ex_svm.py
|
2582c596858bfc54be183cecad8d66c959242cc2
|
[] |
no_license
|
SelinaJing/python_proj
|
be0e7e6c1f5da233a87d764df8f5bb5ec40cf44e
|
8c0e4df00a877464a64548e8d3a655ee2c0879f3
|
refs/heads/master
| 2020-09-09T06:59:09.188166
| 2014-05-25T22:18:23
| 2014-05-25T22:18:23
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,153
|
py
|
# -*- coding: utf-8 -*-
"""
Spyder Editor
This temporary script file is located here:
D:\bao\WinPython-64bit-2.7.5.3\settings\.spyder2\.temp.py
"""
import pickle
from svmutil import *
import imtools
import numpy as np
# load 2D example points using Pickle
with open('points_normal.pkl','r') as f:
class_1 = pickle.load(f)
class_2 = pickle.load(f)
labels = pickle.load(f)
#convert to lists for libsvm
class_1 = map(list,class_1)
class_2 = map(list,class_2)
labels = list(labels)
samples = class_1+class_2
#create SVM
prob = svm_problem(labels,samples)
param = svm_parameter('-t 2')
# train SVM on data
m = svm_train(prob,param)
res = svm_predict(labels,samples,m)
with open('points_normal_test.pkl','r') as f:
class_1 = pickle.load(f)
class_2 = pickle.load(f)
labels = pickle.load(f)
#convert to lists for libsvm
class_1 = map(list,class_1)
class_2 = map(list,class_2)
#definefunctionforplotting
def predict(x,y,model=m):
return np.array(svm_predict([0]*len(x),zip(x,y),model)[0])
#plottheclassificationboundary
imtools.plot_2D_boundary([-6,6,-6,6],[np.array(class_1),np.array(class_2)],predict,[-1,1])
show()
|
[
"baoruihan@gmail.com"
] |
baoruihan@gmail.com
|
88b507bb6359e5f3d65c82163e8547cfab8d6b21
|
2c35db3ea04538d4d08a2d62aa4a7dfd295a2173
|
/code_review_plugins/js.py
|
159e9d7f76e3219dd90de4e317229a836fc36933
|
[] |
no_license
|
HugoDelval/pseudo-automated-code-review
|
2ecff25063594cfef6af20aeeaad3724d847599e
|
b04fd7fa0be87a2d88f1609d0167aa7ca1ca55b4
|
refs/heads/master
| 2021-01-12T15:07:06.222743
| 2018-10-01T14:22:05
| 2018-10-01T14:22:05
| 71,704,830
| 1
| 1
| null | 2018-10-01T13:32:09
| 2016-10-23T13:32:14
|
Python
|
UTF-8
|
Python
| false
| false
| 1,433
|
py
|
from code_review import CodeReview
class Js(CodeReview):
def __init__(self, directory_path):
super().__init__(directory_path)
self.exclude += []
self.language = "JS"
def launch_code_review(self):
print("Launching JS review for : " + self.directory_path)
"""
Assuming nodeJS :
- new run_cmd( ...
- exec( ...
- spawn( ...
"""
output_run_cmd = self.launch_command(
'egrep "run_cmd[ |\(]+" ' + self.directory_path + ' -R 2> /dev/null |' +
'grep -v ' + '| grep -v '.join(self.exclude) + " | grep -x '^.\{3,300\}'"
)
output_exec = self.launch_command(
'egrep "exec[ |\(]+" ' + self.directory_path + ' -R 2> /dev/null |' +
'grep -v ' + '| grep -v '.join(self.exclude) + " | grep -x '^.\{3,300\}'"
)
output_spawn = self.launch_command(
'egrep "spawn[ |\(]+" ' + self.directory_path + ' -R 2> /dev/null |' +
'grep -v ' + '| grep -v '.join(self.exclude) + " | grep -x '^.\{3,300\}'"
)
self.audit_results.update({
"outputs": {
"output_run_cmd": output_run_cmd,
"output_exec": output_exec,
"output_spawn": output_spawn,
},
"nb_outputs": len(output_spawn) + len(output_run_cmd) + len(output_exec)
})
return self.audit_results
|
[
"hugodelval@gmail.com"
] |
hugodelval@gmail.com
|
78f015df88e7a0ff2590c9378ea411fd33cbebbf
|
83ff6674ad1fc4ac1d9523216149d5dae82f8bbd
|
/trainer/migrations/0009_trainer_image.py
|
50deec7ba1e9345665e98a31775ed109aae6c5ba
|
[] |
no_license
|
4802852/Act-Agora
|
8a75c4622b0e306a514096b0a4aaa3e3360ec26e
|
7603444e32cebd6d5ae2d3a6f8f3a349373120d2
|
refs/heads/master
| 2023-04-28T19:49:18.498810
| 2021-05-10T13:44:03
| 2021-05-10T13:44:03
| 351,899,676
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 463
|
py
|
# Generated by Django 3.1.7 on 2021-02-28 20:04
from django.db import migrations
import imagekit.models.fields
class Migration(migrations.Migration):
dependencies = [
('trainer', '0008_auto_20210228_1822'),
]
operations = [
migrations.AddField(
model_name='trainer',
name='image',
field=imagekit.models.fields.ProcessedImageField(blank=True, null=True, upload_to='profiles/'),
),
]
|
[
"4802852@gmail.com"
] |
4802852@gmail.com
|
e80c6918df2886ea94e0f9f355d21492dedf1d0e
|
2b3e08faaa4edb548ef9bd386247f35f50a06766
|
/djangoProject7/migrations/0001_initial.py
|
1d639b122724c8bee5e28d6bbede30f6f3f745c0
|
[] |
no_license
|
aliciawill/pythonproject
|
e495d3a9e5d65768a69ea6ac01ff559e7112dd75
|
f6d166c366522f0e3c5c74fdd11ca7a7b5489ee1
|
refs/heads/master
| 2023-07-08T04:39:59.597131
| 2021-08-22T05:15:08
| 2021-08-22T05:15:08
| 360,455,729
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 629
|
py
|
# Generated by Django 3.2.6 on 2021-08-22 04:19
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Test',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=200)),
('tel', models.CharField(max_length=200)),
('addr', models.CharField(max_length=200)),
],
),
]
|
[
"noreply@github.com"
] |
noreply@github.com
|
8085959284cded2def530eb0f5a6b7c2e6f21c18
|
68cc98398c537d783a0e30dab1d09d81f0f15f01
|
/grammar/description_parser.py
|
8949960f8cc9b7dec6557b7c2b5a2290471c481d
|
[] |
no_license
|
Ulitochka/medicine
|
9bef0ed66dae1dd6bff2c821667a20cbc36fd9c3
|
49e55e73db0868592b33a715201e8a5f5ea6926d
|
refs/heads/master
| 2020-04-16T11:34:19.674655
| 2019-01-18T16:14:53
| 2019-01-18T16:14:53
| 165,541,659
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,357
|
py
|
from tools.utils import Utils
class DescriptionParser:
"""
Класс реализующий rule-based подход для выявления симпотомов в описаниях состояний, составленных самими пациентами.
"""
def __init__(self, *, config_data):
self.config_data = config_data
self.utils = Utils()
self.max_ngrams_size = self.config_data.get("max_ngrams_size")
def preprocessed(self, description):
"""
Метод осуществляющий препроцессинг данных:
исправление ошибок
токенизация
лемматизация
:param description:
:return:
"""
correction = self.utils.spell_checking(description)
tokens = self.utils.tokenization(description)
if correction:
tokens = [correction.get(token, token) for token in tokens]
morpho_info = [self.utils.get_pos(tokens[index], index) for index in range(len(tokens))]
return morpho_info
def parse(self, description, symptoms_block, ngrams, pattern_type='keywords'):
"""
Метод принимающий на вход сгенерированные из описания нграммы, список ключевых слов для определения симптома,
разделенных на несколько категорий: object - объект, feel - ощущения, place - место на теле человека, operators
- операторы, в нашем случае это будут различные служебные части речи (предлоги, союзы).
Суть метода в нахождении пересечений различных слов из нграммы со словами из категорий симптома.
В конце у нас получается бинарный вектор для каждой нграммы. Если все компоненты этого вектора (то есть пересе-
чения с определенными ключевыми леммами из категорий) совпадают с переменной kw_status (так как у какого то
симптома может не быть ключевых слов для какой-либо категории), то мы можем отнести симптом к описанию.
Таким образом, каждый набор symptoms_block представляет собой правило для нахождения определенного типа симптома.
Различные значения аргумента pattern_type.
Существует два вида паттернов: keywords - ключевые слова разных категорий; delimiters - так как в разметке
описаний много противоречивых моментов, то они помогают решать некоторые противоречивые случаи, когда по одним
и тем же словам можно отнести разные симпотомы. По сути это keywords, они используются следующим образом: так
как правила применяются независимо, хотя интересно было бы сделать иерархию, то симптом присваивается только
в следующем случае: если есть совпадение с keywords и нет совпадений с delimiters, то симптом присваивается;
:param description:
:param symptoms_block:
:param ngrams:
:param pattern_type:
:return:
"""
key_word_ngrams = []
kw_status = {
"object": True if symptoms_block[pattern_type]['object'] else False,
"feel": True if symptoms_block[pattern_type]['feel'] else False,
"place": True if symptoms_block[pattern_type]['place'] else False,
"operators": True if symptoms_block[pattern_type]['operators'] else False
}
if [s for s in kw_status if kw_status[s]]:
if not ngrams:
ngrams = [self.utils.ngrams(description, n) for n in range(2, self.max_ngrams_size)]
n_grams = [
{"ngrams": [t['normal_form'] for t in ngrams],
"object": False,
"feel": False,
"place": False,
"operators": False
} for ngram_variant in ngrams for ngrams in ngram_variant]
for collocation in n_grams:
for kw in symptoms_block[pattern_type]:
for word in symptoms_block[pattern_type][kw]:
if word in collocation['ngrams']:
collocation[kw] = True
checking_status = [collocation[kw] == kw_status[kw] for kw in kw_status]
if all(checking_status):
key_word_ngrams.append(collocation['ngrams'])
return key_word_ngrams
|
[
"m.domrachev.scientist@gmail.com"
] |
m.domrachev.scientist@gmail.com
|
7ece9319493f54ae20dca0009b5fd429c73c203f
|
6fdda8566a04114d506a09237f6f2d69a5ff9435
|
/ai/Codes/Project/first/pp.py
|
91dbc9f1d63440ab44f01705dc85d3c4d1dcdf84
|
[] |
no_license
|
vgswn/AI
|
d0cff1ae37b02b9359682e35bef5528be9775575
|
0c6d8b1918f53eda400552c440d24192ace0b284
|
refs/heads/master
| 2021-03-16T07:54:23.506696
| 2018-01-22T09:07:15
| 2018-01-22T09:07:15
| 107,943,151
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,881
|
py
|
import random
import queue as q
def RULE_MATCH(matrix, x, y, count, n, m, visited_cell):
d = q.PriorityQueue()
if x-1 >=0 and visited_cell[x-1][y]==0:
if matrix[x-1][y]=='Dirty':
d.put([1+(-5),'UP'])
else :
d.put([1,'UP'])
if x+1< n and visited_cell[x+1][y]==0 :
if matrix[x+1][y]=='Dirty':
d.put([2+(-5),'DOWN'])
else :
d.put([2,'DOWN'])
if y-1 >=0 and visited_cell[x][y-1]==0 :
if matrix[x][y-1]=='Dirty':
d.put([3+(-5),'LEFT'])
else :
d.put([3,'LEFT'])
if y+1 < m and visited_cell[x][y+1]==0:
if matrix[x][y+1]=='Dirty':
d.put([4+(-5),'RIGHT'])
else :
d.put([4,'RIGHT'])
if d.empty() :
flag = 0
while flag == 0:
r = random.randrange(4)
if r == 0 and x - 1 >= 0:
flag = 1
return 'UP'
elif r == 1 and x + 1 <= n:
flag = 1
return 'DOWN'
elif r == 2 and y - 1 >= 0:
flag = 1
return 'LEFT'
elif r == 3 and y + 1 <= m:
flag = 1
return 'RIGHT'
x=d.get()
return x[1]
def SIMPLE_REFLEX_AGENT(matrix, x, y, count, n, m, visited_cell):
if matrix[x][y] == 'Dirty':
matrix[x][y] = 'Clean'
print('Sucked at ', x, y)
return RULE_MATCH(matrix, x, y, count, n, m, visited_cell)
def vacuum_cleaner(matrix, x, y, count, n, m, visited_cell):
if count == (m + 1) * (n + 1):
return
if visited_cell[x][y] == 0:
count = count + 1
visited_cell[x][y] = 1
action = SIMPLE_REFLEX_AGENT(matrix, x, y, count, n, m, visited_cell)
if action == 'UP':
print('going up from ', x, y, ' to ', x - 1, y)
vacuum_cleaner(matrix, x - 1, y, count, n, m, visited_cell)
elif action == 'DOWN':
print('going down from ', x, y, ' to ', x + 1, y)
vacuum_cleaner(matrix, x + 1, y, count, n, m, visited_cell)
elif action == 'LEFT':
print('going left from ', x, y, ' to ', x, y - 1)
vacuum_cleaner(matrix, x, y - 1, count, n, m, visited_cell)
elif action == 'RIGHT':
print('going right from ', x, y, ' to ', x, y + 1)
vacuum_cleaner(matrix, x, y + 1, count, n, m, visited_cell)
n, m = input().split(' ')
n, m = (int(n), int(m))
matrix = []
visited_cell = []
for i in range(n):
matrix.append([])
visited_cell.append([])
for j in range(m):
x = random.randrange(2)
visited_cell[i].append(0)
if x == 0:
matrix[i].append('Clean')
else:
matrix[i].append('Dirty')
print(matrix)
x = random.randrange(n)
y = random.randrange(m)
count = 0
vacuum_cleaner(matrix, x, y, count, n - 1, m - 1, visited_cell)
print(visited_cell)
|
[
"iit2015038@iiita.ac.in"
] |
iit2015038@iiita.ac.in
|
bdb8ce5f430ef2154c672fd3a39dab1855522a76
|
72dd2e55d8c52390940548587c032201f3976a8d
|
/src/train_test.py
|
dd6468ba58f797591279d999bb0d06ff027c9e69
|
[] |
no_license
|
phber/dd2404_bioinformatics
|
f2143266092663829b326ba6bfcb7326e3a4c05f
|
da39e5c8987e92ee8971b3fbfb1215399b0d5d98
|
refs/heads/master
| 2021-09-02T20:26:13.374377
| 2018-01-03T21:47:57
| 2018-01-03T21:47:57
| 115,660,308
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 6,203
|
py
|
import pandas as pd
from sklearn.model_selection import KFold
import numpy as np
from Bio.HMM import MarkovModel, Trainer
from sklearn.metrics import confusion_matrix
from sklearn.utils import shuffle
from sklearn.metrics import matthews_corrcoef
from read_write import seq_alphabet, state_alphabet_pos, state_alphabet_neg
from read_write import load_dir, load_signalp, fill_train_df, store_result
from Bio.Seq import Seq
"""Train a HMM using ML-estimation"""
def fit_model(train_df, positive = True):
training_seqs = []
if positive:
builder = MarkovModel.MarkovModelBuilder(state_alphabet_pos, seq_alphabet)
builder.allow_transition('n', 'n')
builder.allow_transition('n', 'h')
builder.allow_transition('h', 'h')
builder.allow_transition('h', 'c')
builder.allow_transition('c', 'c')
builder.allow_transition('c', 'C')
builder.allow_transition('C', 'O')
builder.allow_transition('O', 'O')
builder.allow_transition('C', 'o')
builder.allow_transition('o', 'o')
builder.allow_transition('o', 'M')
builder.allow_transition('M', 'M')
builder.allow_transition('M', 'i')
builder.allow_transition('i', 'i')
builder.set_random_probabilities()
builder.set_initial_probabilities({'n' : 1})
else:
builder = MarkovModel.MarkovModelBuilder(state_alphabet_neg, seq_alphabet)
builder.allow_all_transitions()
builder.destroy_transition('O', 'i')
builder.destroy_transition('O', 'M')
builder.destroy_transition('O', 'o')
builder.set_initial_probabilities({'o' : 0.1, 'i' : 0.4, 'O': 0.5})
training_seqs = []
for i, row in train_df.iterrows():
ann = row['ann']
ann.alphabet = state_alphabet_pos if positive else state_alphabet_neg
train_seq = Trainer.TrainingSequence(row['seq'], ann)
training_seqs.append(train_seq)
model = builder.get_markov_model()
trainer = Trainer.KnownStateTrainer(model)
return trainer.train(training_seqs)
"""Prediction scores"""
def perf_measure(y_true, y_pred):
tn, fp, fn, tp = confusion_matrix(y_true, y_pred).ravel()
rec = 1.0*tp/(tp+fn)
prec = 1.0*tp/(tp+fp)
f = 2*prec*rec/(prec+rec)
mcc = matthews_corrcoef(y_true, y_pred)
return (prec, rec, f, mcc)
"""Computes test scores for a df with known labels"""
def hmm_test(pos_model, neg_model, test_df):
predictions = []
for i, row in test_df.iterrows():
seq = str(row['seq'])
seq = seq.replace('U', '')
pos_seq, pos_score = pos_model.viterbi(seq, state_alphabet_pos)
neg_sec, neg_score = neg_model.viterbi(seq, state_alphabet_neg)
if abs(pos_score - neg_score) < 2:
predictions.append(0)
elif pos_score > neg_score:
predictions.append(1)
else:
predictions.append(0)
print np.sum(predictions)
return perf_measure(list(test_df['label']), predictions)
"""Helper function for splitting dataframe by binary group"""
def split_df(df, group='label'):
#Split training into negative and positive
gb = df.groupby(group)
groups = [gb.get_group(x) for x in gb.groups]
train_df_neg = groups[0]
train_df_pos = groups[1]
return train_df_neg, train_df_pos
"""Get cross validation scores for training data"""
def cross_validate(df, write = True, validations = 5):
if len(df) == 0:
raise ValueError('No training data was found.')
kf = KFold(n_splits = validations, shuffle = True, random_state = 13)
scores_all = []
scores_non_tm = []
scores_tm = []
for train_index, test_index in kf.split(df):
print 'Running k-fold...'
# Split into train and test
train_df = df.iloc[train_index]
test_df = df.iloc[test_index]
#Split training into negative and positive
train_df_neg, train_df_pos = split_df(train_df, 'label')
#Train models
pos_model = fit_model(train_df_pos, True)
neg_model = fit_model(train_df_neg, False)
#Run tests
test_df_non_tm, test_df_tm = split_df(test_df, 'tm')
scores_all.append(hmm_test(pos_model, neg_model, test_df))
scores_tm.append(hmm_test(pos_model, neg_model, test_df_tm))
scores_non_tm.append(hmm_test(pos_model, neg_model, test_df_non_tm))
if write:
store_result(scores_all, scores_tm, scores_non_tm, 'CROSS VALIDATION', validations)
else:
print 'ALL TESTDATA', scores_all
"""Stats for proteom df"""
def stats(df):
test_df_non_tm, test_df_tm = split_df(df, 'tm')
print 'Non-TM count, TM count:'
print len(test_df_non_tm), len(test_df_tm)
print 'Negative TM count, Positive TM count:'
t1, t2 = split_df(test_df_tm)
print len(t1), len(t2)
print 'Negative Non-TM count, Positive Non-TM count:'
t1, t2 = split_df(test_df_non_tm)
print len(t1), len(t2)
"""Test a model HMM on a proteom with signalP resulsts"""
def test_proteom(train_df, proteom, stopidx, startidx = 1, write = True):
#Load proteom
prot_df = load_signalp(proteom, stopidx, startidx)
if proteom != 'bacillus':
test_df_non_tm, test_df_tm = split_df(prot_df, 'tm')
stats(prot_df)
else:
print len(prot_df), np.sum(prot_df['label'])
scores_all = []
scores_non_tm = []
scores_tm = []
train_df_neg, train_df_pos = split_df(train_df)
pos_model = fit_model(train_df_pos, True)
neg_model = fit_model(train_df_neg, False)
scores_all.append(hmm_test(pos_model, neg_model, prot_df))
if proteom != 'bacillus':
scores_tm.append(hmm_test(pos_model, neg_model, test_df_tm))
scores_non_tm.append(hmm_test(pos_model, neg_model, test_df_non_tm))
if write:
print scores_all
store_result(scores_all, scores_tm, scores_non_tm, 'PROTEOM of ' + proteom, 1)
else:
print 'ALL TESTDATA', np.mean(scores_all)
print 'TM TESTDATA', np.mean(scores_tm)
print 'NON TM TESTDATA', np.mean(scores_non_tm)
df = fill_train_df()
#test_proteom(df, 'bacillus', stopidx = 3, write = True)
#cross_validate(df, validations = 2, write = True)
|
[
"philip@Philips-MacBook.local"
] |
philip@Philips-MacBook.local
|
08389cf04636acaaeff77c935cbdfb741525b0e5
|
5c8b1cb941467020ec52c7bea0a03dff696881b9
|
/simorgh/polls/forms.py
|
4edcd15dd5147c4f47f46c13bd5e269bc2bf07ef
|
[] |
no_license
|
amir1370/simorgh
|
43dfaa4d3c7f4a4489cc35e84f6e24a61408649f
|
83788712a8ef9f5001446a9370aa0e0611eb91fc
|
refs/heads/master
| 2020-05-20T02:22:53.473410
| 2019-07-06T20:03:08
| 2019-07-06T20:03:08
| 185,330,230
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 673
|
py
|
from django import forms
from .models import Choice, Question
from django.forms import ModelForm, formset_factory
from django.forms.widgets import RadioSelect
class ChoiceForm(ModelForm):
EXCELLENT, GOOD, MEDIUM, BAD = 'EX', 'GO', 'ME', 'BA'
question_choices = (
(EXCELLENT, 'عالی'),
(GOOD, 'خوب'),
(MEDIUM, 'متوسط'),
(BAD, 'ضعیف')
)
choice_text = forms.ChoiceField(choices=question_choices, widget=RadioSelect(attrs={"required":"required"}), required=True)
class Meta:
model = Choice
fields = ['choice_text']
ChoiceFormset = formset_factory(ChoiceForm, extra=Question.objects.count())
|
[
"amirhoseintaherijam@gmail.com"
] |
amirhoseintaherijam@gmail.com
|
f6afdb1bcdb602520eb1f466922a2c9ea015a1db
|
2da476c3ba893f471760edc3553c5d4a98067370
|
/swapcase.py
|
fb2e8fa940996ddd13131980a5a8088c83233d3b
|
[] |
no_license
|
Yashasvini18/EDUYEAR-PYTHON-20
|
4b0b4e37feca34ad76f7d11599f1243c0ae4083b
|
43900aa11a1c57c3834d21ef94fb383be00e6fa6
|
refs/heads/main
| 2023-04-20T04:42:05.263050
| 2021-05-15T12:59:14
| 2021-05-15T12:59:14
| 364,542,373
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 83
|
py
|
input_str=input("Enter the word in alternate case ")
print(input_str.swapcase())
|
[
"noreply@github.com"
] |
noreply@github.com
|
b332b14aa6f86a6013de1b87f5c0920504478890
|
4be56098894a95da5964622fc4102b69e4530ab6
|
/题库/870.矩阵中的幻方.py
|
00fdbac5b41f5978d6246140777b7c174e01c850
|
[] |
no_license
|
ACENDER/LeetCode
|
7c7c7ecc8d0cc52215272f47ec34638637fae7ac
|
3383b09ab1246651b1d7b56ab426a456f56a4ece
|
refs/heads/master
| 2023-03-13T19:19:07.084141
| 2021-03-15T09:29:21
| 2021-03-15T09:29:21
| 299,332,864
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 85
|
py
|
# !/usr/bin/env python3
# -*- coding: utf-8 -*-
# @File : 870.矩阵中的幻方.py
|
[
"1641429327@qq.com"
] |
1641429327@qq.com
|
f2fb7b797424d3ba6ad396d5b0583a42bb65355b
|
8f3c35b663f554d62d259ccbbd0c972b22dbb382
|
/one_cycle.py
|
5eef32e001aa5794757799e8de06188822047d52
|
[
"MIT"
] |
permissive
|
WenjinSun/kaggle-google-quest
|
29f3875b224dfa4f9748480afbf9c6a12998dcff
|
69f84bb2c4ef83b300b4c7f8378c43f7dd84f7c6
|
refs/heads/master
| 2021-01-09T12:50:03.775453
| 2020-02-13T12:55:02
| 2020-02-13T12:55:02
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,244
|
py
|
import math
import numpy as np
import matplotlib.pyplot as plt
from torch.optim.lr_scheduler import LambdaLR
from utils.torch import set_optimizer_mom
def cosine_annealing(it, n_iter, start_val, end_val):
cos_inner = math.pi * (it % n_iter) / n_iter
return ((start_val - end_val) * (math.cos(cos_inner) + 1) / 2) + end_val
def cosine_annealing_range(n_iter, start_val, end_val):
return [cosine_annealing(i, n_iter, start_val, end_val)
for i in range(n_iter)]
class OneCycleLR(LambdaLR):
def __init__(self, optimizer, lr_div_factor=25, warmup_frac=0.3,
mom_range=(0.95, 0.85), n_epochs=10, n_batches=None,
start_epoch=0):
n_batches = 1 if n_batches is None else n_batches
self.n_epochs, self.n_iter = n_epochs, (n_epochs * n_batches) + 1
self.start_it = -1 if start_epoch==0 else start_epoch * n_batches
self._build_schedules(lr_div_factor, mom_range, warmup_frac)
super().__init__(optimizer, self.lr_lambda, last_epoch=self.start_it)
def _build_schedules(self, lr_div_factor, mom_range, warmup_frac):
n_warmup = int(self.n_iter * warmup_frac)
n_decay = self.n_iter - n_warmup
self.lrs = cosine_annealing_range(n_warmup, 1/lr_div_factor, 1)
self.lrs += cosine_annealing_range(n_decay, 1, 1/lr_div_factor)
self.lr_lambda = lambda i: self.lrs[i]
self.moms = cosine_annealing_range(n_warmup, *mom_range)
self.moms += cosine_annealing_range(n_decay, *mom_range[::-1])
self.mom_lambda = lambda i: self.moms[i]
def get_mom(self):
return self.mom_lambda(self.last_epoch)
def step(self, epoch=None):
super().step(epoch)
set_optimizer_mom(self.optimizer, self.get_mom())
def plot_schedules(self):
x = np.linspace(0, self.n_epochs, self.n_iter)
_, ax = plt.subplots(1, 2, figsize=(15, 4))
ax[0].set_title('LR Schedule')
ax[0].set_ylabel('lr')
ax[0].set_xlabel('epoch')
ax[0].plot(x, self.lrs)
ax[1].set_title('Momentum Schedule')
ax[1].set_ylabel('momentum')
ax[1].set_xlabel('epoch')
ax[1].plot(x, self.moms)
|
[
"48721401+robinniesert@users.noreply.github.com"
] |
48721401+robinniesert@users.noreply.github.com
|
d73f79ef6d4d5880eb4ee7c8c1972b1d25ee4cc0
|
594d059453f90a0b6d43009a630d162bf0ffaa46
|
/company_blog_flask/company_blog/models.py
|
a9b231749d0877d3a577c589061d55afb14b0315
|
[] |
no_license
|
sarfrajobject/Company_blog_post_flask
|
a707ba444b0205b07c69484d509d3a9c578c3100
|
3c0996178ad3eee35dde416bd1542c8b1560fe55
|
refs/heads/master
| 2020-09-08T23:22:43.423191
| 2019-11-12T17:43:01
| 2019-11-12T17:43:01
| 221,273,916
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,869
|
py
|
# models.py
from company_blog import db, login_manager
from werkzeug.security import generate_password_hash, check_password_hash
from flask_login import UserMixin
from datetime import datetime
@login_manager.user_loader
def load_user(user_id):
return User.query.get(user_id)
class User(db.Model,UserMixin):
__tablename__ = 'users'
id = db.Column(db.Integer, primary_key=True)
profile_image = db.Column(db.String(64),
nullable=False,
default='default_profile.png')
email = db.Column(db.String(),unique=True,
index=True)
username = db.Column(db.String(64),unique=True,
index=True)
password_hash = db.Column(db.String(128))
posts = db.relationship('BlogPost', backref='author', lazy=True)
def __init__(self,email,username,password):
self.email = email
self.username = username
self.password_hash = generate_password_hash(password)
def check_password(self,password):
return check_password_hash(self.password_hash,password)
def __repr__(self):
return 'Username {self.username}'
#########################################################
class BlogPost(db.Model,UserMixin):
users = db.relationship(User)
id = db.Column(db.Integer, primary_key=True)
user_id = db.Column(db.Integer,db.ForeignKey('users.id'),
nullable=False)
date = db.Column(db.DateTime, nullable=False,
default=datetime.utcnow)
title = db.Column(db.String(140), nullable=False)
text = db.Column(db.Text, nullable=False)
def __init__(self,title,text,user_id):
self.title = title
self.text = text
self.user_id = user_id
def __repr__(self):
return 'POST_ID: {self.id} : DATE {self.date} : TITLE {self.title}'
|
[
"sarfraj.object@gmail.com"
] |
sarfraj.object@gmail.com
|
74578cccd156b76ee3c9c58c8d8df9ba7a5fea79
|
d5e4c3f0d1ae6591d403804fc452a330a62d365d
|
/utils/analysis.py
|
be7fc4ea5d6a59545811ff213818a9c960112f00
|
[
"MIT"
] |
permissive
|
mtasende/Machine-Learning-Nanodegree-Capstone
|
e5ffb435b2c7a19e1e416c98d122f080a2a3ad0c
|
69c5ca499fa52122d51131a0607bc199ec054cfd
|
refs/heads/master
| 2021-05-15T16:12:20.252910
| 2017-10-22T23:42:42
| 2017-10-22T23:42:42
| 107,441,206
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,844
|
py
|
import pandas as pd
import matplotlib.pyplot as plt
import numpy as np
CLOSE_COL_NAME = 'Close'
def compute_portfolio_stats(portfolioValue, rfr=0.0, sf=252.0):
cumRet = (portfolioValue.iloc[-1]/portfolioValue.iloc[0]) - 1
dailyRets = get_daily(portfolioValue)
# Statistics below don't take into account the first day value
averageDReturn = dailyRets[1:].mean()
sdDReturn = dailyRets[1:].std()
# Sharpe ratio calculations
if rfr==0.0:
daily_rfr = 0.0
else:
daily_rfr = ((1.0+rfr)**(1.0/sf)) -1
sharpeRatio = np.sqrt(sf)*(dailyRets[1:] - daily_rfr).mean()/(dailyRets[1:]-daily_rfr).std()
return cumRet, averageDReturn, sdDReturn, sharpeRatio
def assess_portfolio(start_date,
end_date,
symbols,
allocations,
initial_capital=1000,
risk_free_rate=0.0,
sampling_frequency=252.0,
data=None,
gen_plot=False,
verbose=False):
"""
This function returns some statistics about a portfolio.
:param start_date: Starting Date; Type = dt.datetime
:param end_date: Ending Date; Type = dt.datetime
:param symbols: A list of ticker symbols; Type: list of strings
:param allocations: A list with the fraction of allocations to each symbol; Type: list of float
:param initial_capital: Starting Value of the portfolio; Type: float/int
:param risk_free_rate: Free rate of interest; Type: float
:param sampling_frequency: Sampling frequency per year (252.0 = daily)
:param data: A dataframe with the data of the s&p500 stocks
:param gen_plot: if True create a plot
:param verbose: if True, print the output data
:returns cumulative_ret: Cumulative Return; Type: float
:returns average_daily_ret: Average Daily Return. If 'sf' is different from 252,
then it is the average in the sampling period instead of "daily"; Type: float
:returns sd_daily_ret: Standard deviation of daily return; Type: float
:returns sharpe_ratio: Sharpe Ratio; Type: float
:returns end_value: End value of portfolio; Type: float/int
"""
adj_close = data.xs(CLOSE_COL_NAME, level='feature').loc[start_date:end_date,symbols]
adj_close /= adj_close.iloc[0] # Normalize to the first day
norm_value = adj_close.dot(allocations) # Get the normalized total value
portfolio_value = pd.DataFrame(norm_value * initial_capital)
# Compute statistics from the total portfolio value
cumulative_ret, \
average_daily_ret, \
sd_daily_ret, \
sharpe_ratio = compute_portfolio_stats(portfolio_value, risk_free_rate, sampling_frequency)
end_value = portfolio_value.iloc[-1]
if gen_plot:
adj_close_SPY = data.xs(CLOSE_COL_NAME, level='feature').loc[start_date:end_date,'SPY']
adj_close_SPY /= adj_close_SPY.iloc[0]
ax = adj_close_SPY.plot(color='g', label='SPY')
ax.plot(norm_value, color='b', label='Portfolio')
plt.title('Daily portfolio value and SPY')
plt.xlabel('Date')
plt.ylabel('Normalized price')
plt.legend(loc='upper left')
plt.show()
if verbose:
print('Start Date: ' + str(start_date))
print('End Date: ' + str(end_date))
print('Symbols: ' + str(symbols))
print('Allocations ' + str(allocations))
print('Sharpe Ratio: %.15f' % sharpe_ratio)
print('Volatility (stdev of daily returns): %.15f' % sd_daily_ret)
print('Average Daily Return: %.15f' % average_daily_ret)
print('Cumulative Return: %.15f' % cumulative_ret)
return float(cumulative_ret), float(average_daily_ret), float(sd_daily_ret), float(sharpe_ratio), float(end_value)
def value_eval(value_df,
risk_free_rate=0.0,
sampling_frequency=252.0,
verbose=False,
graph=False,
data_df=None):
""" This function takes a value of portfolio series, returns some statistics, and shows some plots"""
cumulative_ret = (value_df.iloc[-1]/value_df.iloc[0]) - 1
daily_rets = get_daily(value_df)
# Statistics below don't take into account the first day value
average_daily_ret = daily_rets[1:].mean()
sd_daily_ret = daily_rets[1:].std()
# Sharpe ratio calculations
if risk_free_rate == 0.0:
daily_rfr = 0.0
else:
daily_rfr = ((1.0 + risk_free_rate)**(1.0/sampling_frequency)) -1
sharpe_ratio = np.sqrt(sampling_frequency)*(daily_rets[1:] - daily_rfr).mean()/(daily_rets[1:]-daily_rfr).std()
if verbose:
print('sharpeRatio = %f' % sharpe_ratio)
print('cumRet = %f' % cumulative_ret)
print('sdDReturn = %f' % sd_daily_ret)
print('averageDReturn = %f' % average_daily_ret)
print('Final Value: %f' % value_df.iloc[-1])
if graph:
if data_df is not None:
value_df = value_df \
.join(data_df.xs('Close', level='feature').loc[:, 'SPY'], how='left')
value_df = value_df / value_df.iloc[0]
value_df.plot()
return sharpe_ratio.values[0], \
cumulative_ret.values[0], \
average_daily_ret.values[0], \
sd_daily_ret.values[0], \
value_df.iloc[-1].values[0]
# Returns the daily change of a variable
def get_daily(data):
daily = data.copy()
daily[1:] = (data[1:] / data[:-1].values)-1
daily.ix[0,:] = np.zeros(len(data.columns))
return daily
# Printing some basic data about a ticker's attribute data
def basic_data(ticker,attr_data):
print('Ticker name: '+ticker)
print(' Mean: %f'% attr_data[ticker].mean())
print(' Std: %f'% attr_data[ticker].std())
print(' Kurtosis: %f'% attr_data[ticker].kurtosis())
print('')
|
[
"mtasendebracco@antel.com.uy"
] |
mtasendebracco@antel.com.uy
|
c713a4ef7e8152c9c7d6aad296a20e9a6724c730
|
4909d98c871fbe71daf7538a2a074751333552b8
|
/file_io.py
|
2397248b04b3ebda2316861d725e553b189d0e68
|
[] |
no_license
|
jamathis77/Python-fundamentals
|
68653da4928b97a336888f2aac840ad26f6d4a6e
|
2e35a1ef384671b912dae81ba96827ec45576857
|
refs/heads/master
| 2020-08-14T09:40:09.375318
| 2019-10-14T20:39:04
| 2019-10-14T20:39:04
| 215,143,182
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 471
|
py
|
# # Create a python file in python, that itself modifies the testing.txt file to say
# # I'm sorry, I cannot do that.
# python_file = open("./file_io_hub/change_text.py", "x")
# valid_code = """
# to_change = open("./testing.txt", "w")
# to_change.write("I'm sorry, I cannot do that")
# to_change.close()
# """
# python_file.write(valid_code)
# python_file.close()
with open("with_statement.py", "w") as f:
f.write("# Im a comment\nprint('Hello world')")
|
[
"jamathis77@gmail.com"
] |
jamathis77@gmail.com
|
0185767e3d02ccdf330cda6d1f5e806dad9a8b00
|
aa0ab3eaee3a04eb39f1819cb411ce9fa2062c14
|
/scripts/driver_messaging/list_difference.py
|
1a1e13d62c6c560bce2fa535225742ac83bce60e
|
[] |
no_license
|
gilkra/tweet_proofer
|
30fd99dd11306e805526044a155a9c34dffc0713
|
04ead63aeb2cb8f0e2a92cc39a731ba926d9b617
|
refs/heads/master
| 2021-01-12T06:19:58.967602
| 2016-12-25T21:11:27
| 2016-12-25T21:11:27
| 77,342,680
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 310
|
py
|
import pandas as pd
import numpy as np
uuids = pd.read_csv('calc_didnt_drive.csv')
all_id = uuids['All']
not_paid = uuids['Not_Paid']
paid = uuids['Paid']
new_list = list(set(all_id)-set(not_paid)-set(paid))
new_list = pd.DataFrame(new_list)
new_list.to_csv('didnt_drive.csv', index = False, header=False)
|
[
"kazimirovg@gmail.com"
] |
kazimirovg@gmail.com
|
1bd246b511bcb25535f008e43dec5d7633a97690
|
2c112f781016f2022dc7ff1c616b1f57185fe8f8
|
/tests/conftest.py
|
34a2935ba0d6d69229c0b0455e16b60a8fcb1f85
|
[] |
no_license
|
dominicgs/Website
|
c15312a5b081b42db880b99df6811c8c04777824
|
fc3587daacff20ec3ab590df121c9f693f09a8ce
|
refs/heads/master
| 2020-03-21T16:49:07.492309
| 2018-06-26T21:41:50
| 2018-06-26T21:41:50
| 64,015,414
| 0
| 1
| null | 2016-07-23T12:33:16
| 2016-07-23T12:33:16
| null |
UTF-8
|
Python
| false
| false
| 2,265
|
py
|
" PyTest Config. This contains global-level pytest fixtures. "
import os
import os.path
import pytest
import shutil
from models.user import User
from main import create_app, db as db_obj, Mail
from utils import CreateBankAccounts, CreateTickets
@pytest.fixture(scope="module")
def app():
""" Fixture to provide an instance of the app.
This will also create a Flask app_context and tear it down.
This fixture is scoped to the module level to avoid too much
Postgres teardown/creation activity which is slow.
"""
if 'SETTINGS_FILE' not in os.environ:
root = os.path.join(os.path.dirname(os.path.abspath(__file__)), '..')
os.environ['SETTINGS_FILE'] = os.path.join(root, 'config', 'test.cfg')
tmpdir = os.environ.get('TMPDIR', '/tmp')
prometheus_dir = os.path.join(tmpdir, 'emf_test_prometheus')
os.environ['prometheus_multiproc_dir'] = prometheus_dir
if os.path.exists(prometheus_dir):
shutil.rmtree(prometheus_dir)
if not os.path.exists(prometheus_dir):
os.mkdir(prometheus_dir)
app = create_app()
with app.app_context():
try:
db_obj.session.close()
except:
pass
db_obj.drop_all()
db_obj.create_all()
CreateBankAccounts().run()
CreateTickets().run()
yield app
db_obj.session.close()
db_obj.drop_all()
@pytest.fixture
def client(app):
" Yield a test HTTP client for the app "
yield app.test_client()
@pytest.fixture
def db(app):
" Yield the DB object "
yield db_obj
@pytest.fixture
def request_context(app):
" Run the test in an app request context "
with app.test_request_context('/') as c:
yield c
@pytest.fixture
def user(db):
" Yield a test user. Note that this user will be identical across all tests in a module. "
email = 'test_user@test.invalid'
user = User.query.filter(User.email == email).one_or_none()
if not user:
user = User(email, 'Test User')
db.session.add(user)
db.session.commit()
yield user
@pytest.fixture
def outbox(app):
" Capture mail and yield the outbox. "
mail_obj = Mail()
with mail_obj.record_messages() as outbox:
yield outbox
|
[
"russ@garrett.co.uk"
] |
russ@garrett.co.uk
|
fafb61cff6256896c850cd738ec16f4c6209143c
|
279ed6bd82f604f40fc7d13a189b99b9f6c70c8b
|
/subtask1.py
|
4d8ddcfba87cc5b42c8101e575ba336ba945a7b0
|
[] |
no_license
|
AtarioGitHub/Sub-task-8.1-and-8.2
|
bbf8044d5bfc7f2b1c0c99a743948247a8304151
|
27430455d049f2bb04b6cb8f45f23ad7869100e6
|
refs/heads/main
| 2023-09-03T21:37:37.945018
| 2021-11-14T22:19:14
| 2021-11-14T22:19:14
| 428,054,172
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 339
|
py
|
import math
n = input('Enter your number which we will call n = ')
print('n = ',n)
DecimalNumberofN = float(n) # We use float so that we can also use a decimal value
TheSquareRootofThatNumber = math.sqrt(DecimalNumberofN)
xyz = int(TheSquareRootofThatNumber) # Let xyz be an integer value
q=pow(xyz,2)
print('q = ', q)
|
[
"noreply@github.com"
] |
noreply@github.com
|
38cb295c9d71a9429c4d97b4d12140241d2a185c
|
3efc463fb20d9f7532bd0da732f23f37ad425a54
|
/main.py
|
a43f499159f67c42b9990ea8ce9b90f694d445f8
|
[] |
no_license
|
workfunction/demosacing_py
|
0d44488a555e932fe7f8159d64571c7cdab1b141
|
b345c2f4e0db953ee581666989f8dbbe8f7579ba
|
refs/heads/master
| 2023-06-27T20:09:31.158870
| 2020-02-03T05:58:22
| 2020-02-03T05:58:22
| 245,328,695
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,687
|
py
|
from mepi import MEPI
from mepi_delta import MEPR
from mosaic import Mosaic
from MGBI5 import MGBI_5
import numpy as np
from PIL import Image
from matplotlib import pyplot as plt
import math
import tensorflow as tf
import sys
import os
def read_img(img):
return tf.convert_to_tensor(img, dtype=np.uint8)
@tf.function
def do_psnr(tf_img1, tf_img2):
return tf.image.psnr(tf_img1, tf_img2, max_val=255)
def main():
if len(sys.argv) != 3:
print("No input file!")
return -1
path = sys.argv[1]
if not os.path.isfile(path):
print("File '" + path + "' does not exist!")
return -1
oim = Image.open(path)
oimage = np.array(oim, dtype=np.uint8)
im = oim.resize((int(oim.width/2), int(oim.height/2)), Image.BICUBIC)
image = np.array(im, dtype=np.uint8)
mos = Mosaic(image, im.width, im.height)
mimage = mos.Algorithm()
epi = MEPR(mimage, 2)
out = epi.Algorithm()
#plt.imshow(out, cmap='gray', vmin=0, vmax=255)
#mos = MGBI_5(out)
#out = mos.Algorithm()
#out[:3, :, :] = oimage[:3, :, :]
#out[:, :3, :] = oimage[:, :3, :]
#out[-3:, :, :] = oimage[-3:, :, :]
#out[:, -3:, :] = oimage[:, -3:, :]
plt.imshow(out[...,1], cmap='gray', vmin=0, vmax=255)
#plt.imshow(out)
plt.show()
im = Image.fromarray(out)
filename, ext = os.path.splitext(path)
im.save("result/" + os.path.basename(filename) + "_2x" + ext)
p = do_psnr(read_img(oimage), read_img(out))
print(p)
#im.save("result/kodim01.png")
f = open("demofile2.txt", "a")
f.write(os.path.basename(filename) + ": " + str(p) + "\n")
f.close()
if __name__ == "__main__":
main()
|
[
"jason840507@gmail.com"
] |
jason840507@gmail.com
|
fe5e0fe2bf92a1ef8907a67d02628bc6953df529
|
c180719c80c8078e689d1add2e1ae0ab62095d67
|
/website/settings.py
|
44f8b0bb47975a133c01d60909fb24be87e73608
|
[] |
no_license
|
SeekerHub/Django_webapp
|
2ba1cf05e93f229cdd773eb9db870edf2389e805
|
342537921cb53e6f071aadf62f65b16d784a56f3
|
refs/heads/master
| 2020-06-05T20:01:28.880496
| 2019-09-27T14:48:35
| 2019-09-27T14:48:35
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,639
|
py
|
"""
Django settings for website project.
Generated by 'django-admin startproject' using Django 2.1.5.
For more information on this file, see
https://docs.djangoproject.com/en/2.1/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/2.1/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/2.1/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'f#kdl=s37%p=e-wex_tim2viw^qb_9=o%%1guy0^qk3-2!-=8g'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = ['127.0.0.1', '.pythonanywhere.com']
# Application definition
INSTALLED_APPS = [
'users.apps.UsersConfig',
'crispy_forms', #The new app created by us
'blog.apps.BlogConfig', #The new app created by us
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'website.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'website.wsgi.application'
# Database
# https://docs.djangoproject.com/en/2.1/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/2.1/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/2.1/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/2.1/howto/static-files/
STATIC_URL = '/static/'
MEDIA_ROOT = os.path.join(BASE_DIR, 'media') # where our uploaded files will be located in the file system it will create a media folder into which it will save the foles
MEDIA_URL = '/media/' #how we are going to acces in image
CRISPY_TEMPLATE_PACK = 'bootstrap4' #important to add when you add any kind of template
LOGIN_REDIRECT_URL = 'blog-home'
LOGIN_URL = 'login'
|
[
"bhaveshgandhi1999@gmail.com"
] |
bhaveshgandhi1999@gmail.com
|
569012129e554d8d356b12f5bb351e3f331f6621
|
d93529bee8c03312d376f7e0cc3d42aa538fc0d4
|
/travello/views.py
|
0ec5884194d205d78ff13645de4e84dbd03ba7f9
|
[] |
no_license
|
AL-NOMA/django-projects
|
962a9caaf4b75e090731a279f890c8b4bb0e7361
|
230e701b2890ac87995ec5b0dacfbf52badd788a
|
refs/heads/master
| 2023-06-03T11:51:20.187755
| 2021-06-16T12:10:00
| 2021-06-16T12:10:00
| 375,130,119
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 443
|
py
|
from django.shortcuts import render, get_object_or_404
from .models import Place
# Create your views here.
def index(request):
places = Place.objects.all()
context = {'places': places}
return render(request, 'travello/homepage.html', context)
def destination(request, place_id):
place = get_object_or_404(Place, pk=place_id)
context = {'place': place}
return render(request, 'travello/destination.html', context)
|
[
"alexisnomanyo@gmail.com"
] |
alexisnomanyo@gmail.com
|
1fc74891fa1324f804b07585e2b154d9b49afdf6
|
de681ebfa95a07c04fbb1280bf722847b06ee548
|
/migrations/versions/3fd0d7bc25ea_create_tag_model.py
|
c5297e733152e5145f95a89eca64b85173b984bb
|
[] |
no_license
|
Dzhoker/flask-lessons
|
156957ed29a674df474cfc6b8cdca12adae021d7
|
590e436516dbd8a3a9af4ad33aafbc854088a6aa
|
refs/heads/master
| 2023-03-18T09:44:19.431920
| 2021-02-18T02:52:07
| 2021-02-18T02:52:07
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 761
|
py
|
"""create Tag model
Revision ID: 3fd0d7bc25ea
Revises: 1b2fd89e61b5
Create Date: 2021-01-12 07:40:03.728879
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = '3fd0d7bc25ea'
down_revision = '1b2fd89e61b5'
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.create_table('tag',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('name', sa.String(length=32), server_default='', nullable=False),
sa.PrimaryKeyConstraint('id')
)
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_table('tag')
# ### end Alembic commands ###
|
[
"surenkhorenyan@gmail.com"
] |
surenkhorenyan@gmail.com
|
933cecb82f03c9b88ce4e634c0b0e9c7bf32ba2a
|
1fc6d2baa958992adecd42d57e89f979972dc3c5
|
/random/py/05.level_order_bfs.py
|
89bcc6c4ae80c9181ea30c396c217251888dbe28
|
[] |
no_license
|
Ritapravo/cpp
|
b5c56d3b649b0fd2698482e960e39217d547e8fb
|
0510f41b5ff5c59126461f4c36b3d05c9b1a362e
|
refs/heads/master
| 2021-08-20T06:30:49.176592
| 2021-07-12T17:43:43
| 2021-07-12T17:43:43
| 216,426,587
| 5
| 6
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,040
|
py
|
#level node tree using bfs
#https://www.geeksforgeeks.org/level-node-tree-source-node-using-bfs/
from collections import deque as queue
def makegraph():
n = int(input())
graph = [[] for i in range(n)]
for i in range(n-1):
x,y = map(int, input().split())
graph[x].append(y)
graph[y].append(x)
return graph
def bfs(graph, u):
n = len(graph)
vis = [False]*n
vis[u] = True
levels = []
q = queue()
q.append(u)
k = 1
q_size = 1
while(not q_size==0):
k -= 1
if(k==0):
k = q_size
levels.append(list(q))
v = q.popleft()
q_size -= 1
for i in graph[v]:
if not vis[i]:
vis[i] = True
q.append(i)
q_size += 1
return levels
if __name__ == "__main__":
#graph = [[1, 2], [0, 3, 4, 5], [0, 6], [1], [1], [1], [2, 7], [6]]
graph = [[],[2,3],[1,4,5],[1,6,7],[2,8],[2,8],[3,8],[3,8],[4,5,6,7]]
levels = bfs(graph, 1)
print(levels)
|
[
"ritopravosarker@gmail.com"
] |
ritopravosarker@gmail.com
|
5ebf743396b7da4369c2afb4a306a720519eb571
|
46645262901ac3117e8959b9a334617825def109
|
/python/couler/couler/pyfunc.py
|
c13509fdc5e68b39bf61472a6167618c496d987b
|
[
"Apache-2.0"
] |
permissive
|
ggaaooppeenngg/sqlflow
|
e8dc3c641db6aa3ba91f201f3f46d0d4bb11b83c
|
e2f04c6482d722aa952707c19f62670305a2bc3c
|
refs/heads/develop
| 2020-09-17T03:57:43.047158
| 2019-11-25T09:59:49
| 2019-11-25T09:59:49
| 223,980,975
| 0
| 0
|
Apache-2.0
| 2019-12-19T11:08:19
| 2019-11-25T15:28:07
| null |
UTF-8
|
Python
| false
| false
| 5,408
|
py
|
# Copyright 2019 The SQLFlow Authors. All rights reserved.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import base64
import importlib
import inspect
import os
import re
import textwrap
def _argo_safe_name(name):
"""Some names are to be used in the Argo YAML file. For example,
the generateName and template name in
https://github.com/argoproj/argo/blob/master/examples/hello-world.yaml. As
Argo is to use the YAML as part of Kubernetes job description
YAML, these names must follow Kubernetes's convention -- no
period or underscore. This function replaces these prohibited
characters into dashes.
"""
if name is None:
return None
# '_' and '.' are not allowed
return re.sub(r"_|\.", "-", name)
def invocation_location():
"""If a function A in file B calls function C, which in turn calls
invocation_location(), the call returns information about the invocation,
in particular, the caller's name "A" and the line number where A
calls C. Return (B + line_number) as function_name if A doesn't exist,
where users directly calls C in file B.
:return: a tuple of (function_name, invocation_line)
"""
stack = inspect.stack()
if len(stack) < 4:
line_number = stack[len(stack) - 1][2]
func_name = "%s-%d" % (
_argo_safe_name(workflow_filename()),
line_number,
)
else:
func_name = _argo_safe_name(stack[2][3])
line_number = stack[3][2]
return func_name, line_number
def body(func_obj):
"""If a function A calls body(), the call returns the Python source code of
the function definition body (not including the signature) of A.
"""
if func_obj is None:
return None
code = inspect.getsource(func_obj)
# Remove function signature
code = code[code.find(":") + 1 :] # noqa: E203
# Function might be defined in some indented scope
# (e.g. in another function).
# We need to handle this and properly dedent the function source code
return textwrap.dedent(code)
def workflow_filename():
"""Return the Python file that defines the workflow.
"""
stacks = inspect.stack()
frame = inspect.stack()[len(stacks) - 1]
full_path = frame[0].f_code.co_filename
filename, _ = os.path.splitext(os.path.basename(full_path))
filename = _argo_safe_name(filename)
return filename
def input_parameter(function_name, var_pos):
"""Generate parameter name for using as template input parameter names
in Argo YAML. For example, the parameter name "message" in the
container template print-message in
https://github.com/argoproj/argo/tree/master/examples#output-parameters.
"""
return "para-%s-%s" % (function_name, var_pos)
def container_output(function_name, caller_line, output_id):
"""Generate output name from an Argo container template. For example,
"{{steps.generate-parameter.outputs.parameters.hello-param}}" used in
https://github.com/argoproj/argo/tree/master/examples#output-parameters.
"""
function_id = invocation_name(function_name, caller_line)
return "couler.%s.%s.outputs.parameters.%s" % (
function_name,
function_id,
output_id,
)
def script_output(function_name, caller_line):
"""Generate output name from an Argo script template. For example,
"{{steps.generate.outputs.result}}" in
https://github.com/argoproj/argo/tree/master/examples#scripts--results
"""
function_id = invocation_name(function_name, caller_line)
return "couler.%s.%s.outputs.result" % (function_name, function_id)
def invocation_name(function_name, caller_line):
"""Argo YAML requires that each step, which is an invocation to a
template, has a name. For example, hello1, hello2a, and hello2b
in https://github.com/argoproj/argo/tree/master/examples#steps.
However, in Python programs, there are no names for function
invocations. So we hype a name by the callee and where it is
called.
"""
return "%s-%s" % (function_name, caller_line)
def load_cluster_config():
"""Load user provided cluster specification file. For example,
config file for Sigma EU95 cluster is placed at 'couler/clusters/eu95.py'.
"""
module_file = os.getenv("couler_cluster_config")
if module_file is None:
return None
spec = importlib.util.spec_from_file_location(module_file, module_file)
module = importlib.util.module_from_spec(spec)
spec.loader.exec_module(module)
return module.cluster
def encode_base64(s):
"""
Encode a string using base64 and return a binary string.
This function is used in Secret creation.
For example, the secrets for Argo YAML:
https://github.com/argoproj/argo/blob/master/examples/README.md#secrets
"""
bencode = base64.b64encode(s.encode("utf-8"))
return str(bencode, "utf-8")
|
[
"noreply@github.com"
] |
noreply@github.com
|
c96137e00332c4e6faaaafb5e3bf3bf35180ad73
|
e9caa9eeff9ec8443e492e825997f94d2a3a80d3
|
/pythonProject/HRank/max_sum_con.py
|
0890e412988dfd51dede4143d1d7daa92482f64a
|
[] |
no_license
|
saurabhpiyush1187/webapp_api_framework
|
1fe7270d660f2bd59914d449d649686b7a2a0d17
|
8c5c60720c7481d238a0cdac9c5aace356829a20
|
refs/heads/master
| 2023-03-02T05:13:46.960326
| 2021-02-13T13:33:23
| 2021-02-13T13:33:23
| 334,638,494
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 716
|
py
|
# This is a sample Python script.
# Press Shift+F10 to execute it or replace it with your code.
# Press Double Shift to search everywhere for classes, files, tool windows, actions, and settings.
def print_hi(name):
# Use a breakpoint in the code line below to debug your script.
print(f'Hi, {name}') # Press Ctrl+F8 to toggle the breakpoint.
lst_num = [2,3,4,1,90,4,98]
n=3
lst_max = []
i=0
while(i+n<=len(lst_num)):
Sum = sum(lst_num[i:i+n])
lst_max.append(Sum)
i+=1
print(lst_max)
# Press the green button in the gutter to run the script.
if __name__ == '__main__':
print_hi('PyCharm')
# See PyCharm help at https://www.jetbrains.com/help/pycharm/
|
[
"saurabhpiyush@spglobal.com"
] |
saurabhpiyush@spglobal.com
|
efaf672a163e3201ff01ff94b0c00aa263fe5a66
|
7896f35d698e4896be25eb879ac546ee9b46b69c
|
/main.py
|
a608e718e883d43b5c7a9cd940e98ac5e6dc179f
|
[] |
no_license
|
SajedNahian/GraphicsWork05
|
51297fe44e5072765af82ab7d6e837b40698089d
|
b2099746416a9470622bb496a788f4c9873e2467
|
refs/heads/master
| 2020-04-30T12:30:41.408764
| 2019-03-20T22:48:19
| 2019-03-20T22:48:19
| 176,828,348
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 489
|
py
|
from display import *
from draw import *
from scriptParser import *
from matrix import *
import math
screen = new_screen()
color = [ 0, 255, 0 ]
edges = []
transform = new_matrix()
# print_matrix( make_translate(3, 4, 5) )
# print
# print_matrix( make_scale(3, 4, 5) )
# print
# print_matrix( make_rotX(math.pi/4) )
# print
# print_matrix( make_rotY(math.pi/4) )
# print
# print_matrix( make_rotZ(math.pi/4) )
parse_file( 'script', edges, transform, screen, color )
|
[
"noreply@github.com"
] |
noreply@github.com
|
741cdf5634fbc3cb461a36e5e68dc1ac744f172f
|
68e0b34c7c749331d0d660c1b5721ea82617205f
|
/fortune_server_data_management/daily_call_for_vincent.py
|
096999fd8e80bb808e5915a94925932a5f4bfffb
|
[] |
no_license
|
TheIllusion/TheIllusionsLibraries
|
ffeebe3a6ddfd635b69d07c54058d3e8bf74c337
|
0adf5a0a5d4c2e4de41faac6fcc75700104c2b53
|
refs/heads/master
| 2022-05-08T18:16:00.883192
| 2022-04-14T01:08:26
| 2022-04-14T01:08:26
| 63,536,801
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 305
|
py
|
from subprocess import call
import datetime
import time
#Get the date of yesterday
timestamp = datetime.datetime.fromtimestamp(time.time() - 24*60*60).strftime('%Y_%m_%d')
print 'date = ', timestamp
call(["/home/nhnent/H1/users/rklee/Data/server_data/server_hand_data/get_data_from_svc.sh", timestamp])
|
[
"gunboy83@gmail.com"
] |
gunboy83@gmail.com
|
b2b58656fafea5b3228a70403034f42c9a4cc4c7
|
7bb3a08e8628780d2bdc86ed84690100acbb8771
|
/sphere.py
|
82d278a2b81dc077f503c61ffe86f0b09e6b1064
|
[] |
no_license
|
javamak/python_raytrace_tut
|
2e84b2a757f429925b7af3f8542f96a1ffc857a9
|
4db3e0650d2cd414d8f74b1b25f18657a8e6f320
|
refs/heads/main
| 2023-04-26T15:56:22.477737
| 2021-05-03T14:36:26
| 2021-05-03T14:36:26
| 362,014,469
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 700
|
py
|
from math import sqrt
class Sphere:
def __init__(self, center, radius, material):
self.center = center
self.radius = radius
self.material = material
def intesects(self, ray):
sphere_to_ray = ray.origin - self.center
#a = 1
b = 2 * ray.direction.dot_product(sphere_to_ray)
c = sphere_to_ray.dot_product(sphere_to_ray) - self.radius * self.radius
discriminant = b * b - 4 * c
if discriminant >= 0:
dist = (-b - sqrt(discriminant)) / 2
if dist > 0:
return dist
return None
def normal(self, surface_point):
return (surface_point - self.center).normalize()
|
[
"javamak@gmail.com"
] |
javamak@gmail.com
|
566d49bca2e69081ec3642a0f489a87b16e2ca06
|
3da85bca3dcedda942a7143393f84dd0132620ee
|
/vip52/models.py
|
3d311f0ff15d2113a5e8ffd26c939e8f47184b0e
|
[] |
no_license
|
afsmythe/vip_feed_builder
|
ff9320b72de76f6b73b6fc6e62ab283ce629fc67
|
ee68a717284b72437435315c9a10510ab59b457e
|
refs/heads/main
| 2023-07-14T00:04:29.672211
| 2021-08-18T22:07:54
| 2021-08-18T22:07:54
| 397,379,596
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 32,265
|
py
|
from django.core.exceptions import ObjectDoesNotExist
from django.db import models
from django.forms import model_to_dict, fields_for_model
from lxml import etree
from lxml.etree import Element
import pprint
def xml_element(model):
sequence = fields_for_model(model, exclude = ['id']).keys()
dict = model_to_dict(model, exclude = ['id'])
element = etree.Element(model.__class__.__name__, attrib = {'id': str(model.id)})
e_dict = dict
#e_dict = self.dict()
for i in sequence:
key, val = i, e_dict[i]
#for key, val in e_dict.items():
if isinstance(val, list) and len(val) > 0:
#print(val)
child = Element(key)
l = []
for i in val:
#check if type is a sub-type.
if isinstance(i, InternationalizedText):
#todo add .xml() method to InternationalizedText model
it_child = Element('Text', attrib = {'language': i.Language})
it_child.text = i.LanguageString
child.append(it_child)
element.append(child)
elif isinstance(i, ExternalIdentifier):
for ed in val:
child = Element(key)
child.append(ed.xml())
elif isinstance(i, LatLng):
latlng = LatLng.objects.get(id = i.id)
child = latlng.xml()
element.append(child)
elif isinstance(i, Schedule):
print('Schedule found')
schedule = Schedule.objects.get(id = i.id)
child = schedule.xml()
element.append(child)
elif isinstance(i, SimpleAddressType):
print('Structured Address')
structuredaddress = SimpleAddressType.objects.get(id = i.id)
child = structuredaddress.xml()
element.append(child)
elif isinstance(i, ElectionNotice):
print('Election Notice')
notice = ElectionNotice.objects.get(id = i.id)
child = notice.xml()
element.append(child)
else:
#it is not a sub-type, is IDXREFS
l.append(str(i.id))
child.text = " ".join(l)
element.append(child)
else:
if key == 'Date':
date = Element('Date')
date.text = str(val)
element.append(date)
elif val is None or val == '' or len(val) == 0:
#elif val is None or val == '':
continue
elif key == 'Department':
print('Department')
dep = Department.objects.get(id = val)
child = dep.xml()
element.append(child)
try:
vs = VoterService.objects.get(id = dep.VoterService_id)
print('VoterService')
child.append(vs.xml())
except:
ObjectDoesNotExist
# elif key == 'VoterService':
# print('Voter Service')
# vs = VoterService.objects.get(id = val)
# child = vs.xml()
# element.append(child)
else:
#regular value
child = Element(key)
child.text = str(val)
element.append(child)
return(element)
# Create your models here.
class Source(models.Model):
id = models.CharField(primary_key = True, max_length = 50)
Name = models.CharField(max_length = 500, blank = True, null = True)
VipId = models.CharField(max_length = 50, blank = True, null = True)
DateTime = models.CharField(max_length = 50, blank = True, null = True)
class InternationalizedText(models.Model):
Language = models.CharField(max_length = 2, default = 'en')
LanguageString = models.CharField(max_length = 5000)
def __str__(self):
return("{}: {}".format(self.Language, self.LanguageString))
def xml(self):
it_element = Element('Text', attrib = {'language': self.Language})
it_element.text = self.LanguageString
return(it_element)
class ExternalIdentifier(models.Model):
Type = models.CharField(max_length = 50, blank = True, null = True)
OtherType = models.CharField(max_length = 100, blank = True, null = True)
Value = models.CharField(max_length = 100, blank = True, null = True)
def __str__(self):
return("{}: {}".format(self.Type, self.Value))
def dict(self):
return(model_to_dict(self, exclude = ['id']))
def xml(self):
element = Element('ExternalIdentifier')
for key, val in self.dict().items():
if val is not None:
child = Element(key)
child.text = str(val)
element.append(child)
return(element)
class Hours(models.Model):
StartTime = models.CharField(max_length = 50, blank = True, null = True)
EndTime = models.CharField(max_length = 50, blank = True, null = True)
def xml(self):
return(xml_element(self))
class Schedule(models.Model):
Hours = models.ManyToManyField(Hours, blank = True, max_length = 1000)
IsOnlyByAppointment = models.CharField(max_length = 50, blank = True, null = True)
IsOrByAppointment = models.CharField(max_length = 50, blank = True, null = True)
IsSubjectToChange = models.CharField(max_length = 50, blank = True, null = True)
StartDate = models.CharField(max_length = 50, blank = True, null = True)
EndDate = models.CharField(max_length = 50, blank = True, null = True)
def xml(self):
schedule = etree.Element('Schedule')
for key, val in model_to_dict(self, exclude = ['id']).items():
if isinstance(val, list):
for ho in val:
child = ho.xml()
schedule.append(child)
elif val != None:
child = Element(key)
child.text = str(val)
schedule.append(child)
return(schedule)
class HoursOpen(models.Model):
id = models.CharField(primary_key = True, max_length = 50)
Schedule = models.ManyToManyField(Schedule, blank = True, max_length = 1000)
def xml(self):
hoursopen = etree.Element('HoursOpen', attrib = {'id':self.id})
for key, val in model_to_dict(self, exclude = ['id']).items():
if isinstance(val, list):
for sch in val:
child = sch.xml()
hoursopen.append(child)
else:
child = Element(key)
child.text = str(val)
hoursopen.append(child)
return(hoursopen)
class LatLng(models.Model):
Latitude = models.CharField(max_length = 50)
Longitude = models.CharField(max_length = 50)
Source = models.CharField(max_length = 50, blank = True, null = True)
def __str__(self):
return("{}: {}, {}".format(self.Source, self.Latitude, self.Longitude))
def xml(self):
sequence = fields_for_model(self, exclude = ['id']).keys()
dict = model_to_dict(self, exclude = ['id'])
element = etree.Element('LatLng')
for key in sequence:
val = dict[key]
#for key, val in model_to_dict(self, exclude = ['id']).items():
#print(key, val)
if val != None or val != '':
child = Element(key)
child.text = str(val)
element.append(child)
latlng = model_to_dict(self, exclude = ['id'])
return(element)
class ContactInformation(models.Model):
AddressLine = models.CharField(max_length = 1000, blank = True, null = True)
Directions = models.ManyToManyField(InternationalizedText, blank = True, max_length = 200)
Email = models.CharField(max_length = 100, blank = True, null = True)
Fax = models.CharField(max_length = 100, blank = True, null = True)
HoursOpenId = models.CharField(max_length = 100, blank = True, null = True)
LatLng = models.CharField(max_length = 100, blank = True, null = True)
Name = models.CharField(max_length = 1000, blank = True, null = True)
Phone = models.CharField(max_length = 100, blank = True, null = True)
Uri = models.URLField(blank = True, null = True)
parent_id = models.CharField(max_length = 100, blank = True, null = True)
def dict(self):
return(model_to_dict(self, exclude = ['id','parent_id']))
def xml(self):
element = Element('ContactInformation')
for key, val in self.dict().items():
if isinstance(val, list) and len(val) > 0:
child = Element(key)
it_child = Element('Text', attrib = {'language': i.Language})
it_child.text = i.LanguageString
child.append(it_child)
element.append(child)
else:
if val is None or val == '' or len(val) == 0:
continue
else:
child = Element(key)
child.text = str(val)
element.append(child)
return(element)
class Party(models.Model):
id = models.CharField(primary_key = True, max_length = 50)
Abbreviation = models.CharField(max_length = 50, blank = True, null = True)
Color = models.CharField(max_length = 10, blank = True, null = True)
ExternalIdentifiers = models.ManyToManyField(ExternalIdentifier, blank = True, max_length = 1000)
IsWriteIn = models.CharField(max_length = 10, blank = True, null = True)
LogoUri = models.URLField(blank = True, null = True)
Name = models.ManyToManyField(InternationalizedText, blank = True, max_length = 200)
class Person(models.Model):
id = models.CharField(primary_key = True, max_length = 50)
ContactInformation = models.ManyToManyField(ContactInformation, blank = True, max_length = 1000)
DateOfBirth = models.DateTimeField(blank = True, null = True)
ExternalIdentifiers = models.ManyToManyField(ExternalIdentifier, blank = True, max_length = 1000)
FirstName = models.CharField(max_length = 50, blank = True, null = True)
FullName = models.ManyToManyField(InternationalizedText, blank = True, max_length = 200, related_name = 'person_full_name')
Gender = models.CharField(max_length = 50, blank = True, null = True)
LastName = models.CharField(max_length = 50, blank = True, null = True)
MiddleName = models.CharField(max_length = 50, blank = True, null = True)
Nickname = models.CharField(max_length = 50, blank = True, null = True)
PartyId = models.ForeignKey(Party, on_delete = models.CASCADE, db_column = 'PartyId', blank = True, null = True)
Prefix = models.CharField(max_length = 50, blank = True, null = True)
Profession = models.ManyToManyField(InternationalizedText, blank = True, max_length = 200, related_name = 'person_profession')
Suffix = models.CharField(max_length = 50, blank = True, null = True)
Title = models.ManyToManyField(InternationalizedText, blank = True, max_length = 200, related_name = 'person_title')
class Candidate(models.Model):
id = models.CharField(primary_key = True, max_length = 50)
BallotName = models.ManyToManyField(InternationalizedText, blank = True, max_length = 200)
ContactInformation = models.ManyToManyField(ContactInformation, blank = True, max_length = 1000)
ExternalIdentifiers = models.ManyToManyField(ExternalIdentifier, blank = True, max_length = 1000)
FileDate = models.CharField(max_length = 50, blank = True, null = True)
IsIncumbent = models.CharField(max_length = 50, blank = True, null = True)
IsTopTicket = models.CharField(max_length = 50, blank = True, null = True)
PartyId = models.ForeignKey(Party, on_delete = models.CASCADE, db_column = 'PartyId', blank = True, null = True)
PersonId = models.ForeignKey(Person, on_delete = models.CASCADE, db_column = 'PersonId', blank = True, null = True)
PostElectionStatus = models.CharField(max_length = 50, blank = True, null = True)
PreElectionStatus = models.CharField(max_length = 50, blank = True, null = True)
SequenceOrder = models.CharField(max_length = 50, blank = True, null = True)
class BallotMeasureSelection(models.Model):
id = models.CharField(primary_key = True, max_length = 50)
Selection = models.ManyToManyField(InternationalizedText, blank = True, max_length = 200)
SequenceOrder = models.CharField(max_length = 50, blank = True, null = True)
def __str__(self):
return(self.id)
class BallotMeasureContest(models.Model):
id = models.CharField(primary_key = True, max_length = 50)
Abbreviation = models.CharField(max_length = 100, blank = True, null = True)
BallotSelectionIds = models.ManyToManyField(BallotMeasureSelection, blank = True, max_length = 1000)
BallotSubTitle = models.ManyToManyField(InternationalizedText, related_name = 'bmc_ballot_sub_title',blank = True, max_length = 200)
BallotTitle = models.ManyToManyField(InternationalizedText, related_name = 'bmc_ballot_title',blank = True, max_length = 200)
ElectoralDistrictId = models.ForeignKey('ElectoralDistrict', blank = True, db_column = 'ElectoralDistrictId', on_delete = models.CASCADE)
ElectorateSpecification = models.ManyToManyField(InternationalizedText, related_name = 'bmc_electorate_specification',blank = True, max_length = 200)
ExternalIdentifiers = models.ManyToManyField(ExternalIdentifier, blank = True, max_length = 200)
HasRotation = models.CharField(max_length = 50, blank = True, null = True)
Name = models.CharField(max_length = 100, blank = True, null = True)
SequenceOrder = models.CharField(max_length = 100, blank = True, null = True)
VoteVariation = models.CharField(max_length = 100, blank = True, null = True)
OtherVoteVariation = models.CharField(max_length = 100, blank = True, null = True)
ConStatement = models.ManyToManyField(InternationalizedText, related_name = 'bmc_con_statement', blank = True, max_length = 200)
EffectOfAbstain = models.ManyToManyField(InternationalizedText, related_name = 'bmc_effect_of_abstain',blank = True, max_length = 200)
FullText = models.ManyToManyField(InternationalizedText, related_name = 'bmc_full_text',blank = True, max_length = 200)
InfoUri = models.URLField(blank = True, null = True)
PassageThreshold = models.ManyToManyField(InternationalizedText, related_name = 'bmc_passage_threshold',blank = True, max_length = 200)
ProStatement = models.ManyToManyField(InternationalizedText, related_name = 'bmc_pro_statement',blank = True, max_length = 200)
SummaryText = models.ManyToManyField(InternationalizedText, related_name = 'bmc_summary_text',blank = True, max_length = 200)
Type = models.CharField(max_length = 50, blank = True, null = True)
OtherType = models.CharField(max_length = 50, blank = True, null = True)
class CandidateSelection(models.Model):
id = models.CharField(primary_key = True, max_length = 50)
SequenceOrder = models.CharField(max_length = 50, blank = True, null = True)
CandidateIds = models.ManyToManyField(Candidate, blank = True, max_length = 200)
EndorsementPartyIds = models.ManyToManyField(Party, blank = True, max_length = 200)
IsWriteIn = models.CharField(max_length = 50, blank = True, null = True)
class CandidateContest(models.Model):
id = models.CharField(primary_key = True, max_length = 100)
Abbreviation = models.CharField(max_length = 100, blank = True, null = True)
BallotSelectionIds = models.ManyToManyField(CandidateSelection, blank = True, max_length = 1000)
BallotSubTitle = models.ManyToManyField(InternationalizedText, related_name = 'cc_ballot_sub_title',blank = True, max_length = 200)
BallotTitle = models.ManyToManyField(InternationalizedText, related_name = 'cc_ballot_title',blank = True, max_length = 200)
ElectoralDistrictId = models.ForeignKey('ElectoralDistrict',on_delete = models.CASCADE, db_column = 'ElectoralDistrictId', max_length = 1000)
ElectorateSpecification = models.ManyToManyField(InternationalizedText, related_name = 'cc_electorate_specification', blank = True, max_length = 200)
ExternalIdentifiers = models.ManyToManyField(ExternalIdentifier, blank = True, max_length = 1000)
HasRotation = models.CharField(max_length = 1000, blank = True, null = True)
Name = models.CharField(max_length = 2000, blank = True, null = True)
SequenceOrder = models.CharField(max_length = 100, blank = True, null = True)
VoteVariation = models.CharField(max_length = 100, blank = True, null = True)
OtherVoteVariation = models.CharField(max_length = 100, blank = True, null = True)
NumberElected = models.CharField(max_length = 100, blank = True, null = True)
OfficeIds = models.ManyToManyField('Office', blank = True, max_length = 1000)
PrimaryPartyIds = models.ManyToManyField('Party', blank = True, max_length = 1000)
VotesAllowed = models.CharField(max_length = 100, blank = True, null = True)
class Election(models.Model):
id = models.CharField(primary_key = True, max_length = 50)
Date = models.CharField(max_length = 50, blank = True, null = True)
HoursOpenId = models.CharField(max_length = 50, blank = True, null = True)
PollingHours = models.ManyToManyField(InternationalizedText, blank = True, max_length = 200, related_name = 'election_hours')
ElectionType = models.ManyToManyField(InternationalizedText, blank = True, max_length = 200, related_name = 'election_type')
StateId = models.ForeignKey('State', on_delete=models.CASCADE, db_column = 'StateId')
IsStatewide = models.CharField(max_length = 10, blank = True, null = True)
Name = models.ManyToManyField(InternationalizedText, blank = True, max_length = 200, related_name = 'election_name')
RegistrationInfo = models.ManyToManyField(InternationalizedText, blank = True, max_length = 200, related_name = 'election_registration_info')
AbsenteeBallotInfo = models.ManyToManyField(InternationalizedText, blank = True, max_length = 200, related_name = 'absentee_ballot_info')
ResultsUri = models.URLField(blank = True, null = True)
HasElectionDayRegistration = models.CharField(max_length = 10, blank = True, null = True)
RegistrationDeadline = models.CharField(max_length = 50, blank = True, null = True)
AbsenteeRequestDeadline = models.CharField(max_length = 50, blank = True, null = True)
class VoterService(models.Model):
id = models.CharField(primary_key = True, max_length = 50)
Description = models.ForeignKey(InternationalizedText, blank = True, max_length = 2000, on_delete = models.CASCADE)
Type = models.CharField(max_length = 50, blank = True, null = True)
def dict(self):
return(model_to_dict(self, exclude = ['id']))
def xml(self):
element = etree.Element('VoterService')
for key, val in self.dict().items():
if key == 'Description':
child = Element('Description')
it = InternationalizedText.objects.get(id = val)
child.append(it.xml())
else:
if val is None or val == '' or len(val) == 0:
continue
else:
child = Element(key)
child.text = str(val)
element.append(child)
return(element)
class Department(models.Model):
id = models.CharField(primary_key = True, max_length = 50)
ContactInformation = models.ManyToManyField(ContactInformation, blank = True, max_length = 1000)
ElectionOfficialPersonId = models.ForeignKey(Person, db_column = 'ElectionOfficialPersonId', on_delete = models.CASCADE, blank = True, null = True)
VoterService = models.ForeignKey(VoterService, on_delete = models.CASCADE, blank = True, null = True)
election_administration_id = models.CharField(max_length = 50, blank = True, null = True)
#def __str__(self):
# return(self.ContactInformation)
def dict(self):
return(model_to_dict(self, exclude = ['id']))
def xml(self):
element = etree.Element('Department')
for key, val in self.dict().items():
if isinstance(val, list) and len(val) > 0:
for i in val:
child = Element(key)
it_child = Element('Text', attrib = {'language': i.Language})
it_child.text = i.LanguageString
child.append(it_child)
element.append(child)
# elif key == 'VoterService':
# print('Voter Service')
# vs = VoterService.objects.get(id = val)
# child = vs.xml()
# element.append(child)
else:
if val is None or val == '' or len(val) == 0:
continue
else:
child = Element(key)
child.text = str(val)
element.append(child)
return(element)
class ElectionNotice(models.Model):
NoticeText = models.ManyToManyField(InternationalizedText, blank = True, max_length = 5000)
NoticeUri = models.URLField(blank = True, null = True)
def dict(self):
return(model_to_dict(self, exclude = ['id']))
def xml(self):
element = Element('ElectionNotice')
for key, val in self.dict().items():
if isinstance(val, list) and len(val) > 0:
for i in val:
child = Element(key)
it_child = Element('Text', attrib = {'language': i.Language})
it_child.text = i.LanguageString
child.append(it_child)
element.append(child)
else:
if val is None or val == '' or len(val) == 0:
continue
else:
child = Element(key)
child.text = str(val)
element.append(child)
return(element)
class ElectionAdministration(models.Model):
id = models.CharField(primary_key = True, max_length = 50)
AbsenteeUri = models.URLField(blank = True, null = True)
AmIRegisteredUri = models.URLField(blank = True, null = True)
BallotTrackingUri = models.URLField(blank = True, null = True)
BallotProvisionalTrackingUri = models.URLField(blank = True, null = True)
Department = models.ForeignKey(Department, on_delete = models.CASCADE, max_length = 2000, default = 'dep1')
ElectionNotice = models.ManyToManyField(ElectionNotice, blank = True, null = True)
ElectionsUri = models.URLField(max_length = 1000, blank = True, null = True)
RegistrationUri = models.URLField(blank = True, null = True)
RulesUri = models.URLField(blank = True, null = True)
WhatIsOnMyBallotUri = models.URLField(blank = True, null = True)
WhereDoIVoteUri = models.URLField(blank = True, null = True)
class ElectoralDistrict(models.Model):
id = models.CharField(primary_key = True, max_length = 50)
ExternalIdentifiers = models.ManyToManyField(ExternalIdentifier, blank = True, max_length = 1000)
Name = models.CharField(blank = True, null = True ,max_length = 100)
Number = models.CharField(blank = True, null = True ,max_length = 50)
Type = models.CharField(blank = True, null = True ,max_length = 50)
OtherType = models.CharField(blank = True, null = True ,max_length = 50)
def __str__(self):
return("Name: {}; Number: {}".format(self.Name, self.Number))
class Office(models.Model):
id = models.CharField(primary_key = True, max_length = 50)
ContactInformation = models.ManyToManyField(ContactInformation, blank = True, max_length = 1000)
Description = models.ManyToManyField(InternationalizedText, blank = True, max_length = 200, related_name = 'office_description')
ElectoralDistrictId = models.ForeignKey(ElectoralDistrict, db_column = 'ElectoralDistrictId', on_delete = models.CASCADE, blank = True, null = True)
ExternalIdentifiers = models.ManyToManyField(ExternalIdentifier, blank = True, max_length = 1000)
FilingDeadline = models.CharField(blank = True, null = True, max_length = 100)
IsPartisan = models.CharField(blank = True, null = True, max_length = 100)
Name = models.ManyToManyField(InternationalizedText, blank = True, max_length = 200, related_name = 'office_name')
OfficeHolderPersonIds = models.ManyToManyField(Person, blank = True)
Term = models.CharField(blank = True, null = True, max_length = 100)
class Locality(models.Model):
id = models.CharField(primary_key = True, max_length = 50)
ElectionAdministrationId = models.ForeignKey('ElectionAdministration', on_delete = models.CASCADE, db_column = 'ElectionAdministrationId', blank = True, null = True)
ExternalIdentifiers = models.ManyToManyField(ExternalIdentifier, blank = True, max_length = 1000)
IsMailOnly = models.CharField(max_length = 50, blank = True, null = True)
Name = models.CharField(max_length = 100, blank = False, default = None)
PollingLocationIds = models.ManyToManyField('PollingLocation',max_length = 10000, blank = True)
StateId = models.ForeignKey('State', on_delete = models.CASCADE, db_column = 'StateId', blank = True, null = True)
Type = models.CharField(blank = True, null = True, max_length = 100)
OtherType = models.CharField(blank = True, null = True, max_length = 100)
# def __str__(self):
# return(self.Name)
class SimpleAddressType(models.Model):
Line1 = models.CharField(max_length = 100)
Line2 = models.CharField(blank = True, null = True, max_length = 100, default = '')
Line3 = models.CharField(blank = True, null = True, max_length = 100, default = '')
City = models.CharField(max_length = 100)
State = models.CharField(max_length = 100)
Zip = models.CharField(blank = True, null = True, max_length = 100, default = "")
def dict(self):
dict = model_to_dict(self, exclude = ['id'])
sequence = fields_for_model(self, exclude = ['id']).keys()
for i in sequence:
o_dict = collections.OrderedDict(i = dict[i])
return(o_dict)
#return(model_to_dict(self, exclude = ['id']))
def xml(self):
element = Element('AddressStructured')
dict = model_to_dict(self, exclude = ['id'])
sequence = fields_for_model(self, exclude = ['id']).keys()
for i in sequence:
val = dict[i]
if val is not None:
child = Element(i)
child.text = str(val)
element.append(child)
return(element)
class PollingLocation(models.Model):
id = models.CharField(primary_key = True, max_length = 50)
AddressStructured = models.ManyToManyField(SimpleAddressType, blank = True, null = True)
AddressLine = models.CharField(blank = True, null = True, max_length = 1000)
Directions = models.ManyToManyField(InternationalizedText, blank = True, max_length = 200, related_name = 'pl_directions')
Hours = models.ManyToManyField(InternationalizedText, blank = True, max_length = 200)
#HoursOpenId = models.ForeignKey('HoursOpen', on_delete=models.CASCADE, blank = True, null = True)
HoursOpenId = models.ForeignKey(HoursOpen, db_column = 'HoursOpenId', on_delete = models.SET_NULL, max_length = 50, blank = True, null = True)
IsDropBox = models.CharField(max_length = 50, blank = True, null = True, default = 'false')
IsEarlyVoting = models.CharField(max_length = 50, blank = True, null = True, default = 'false')
LatLng = models.ManyToManyField(LatLng, db_column = 'LatLng', max_length = 50, null = True, blank = True)
Name = models.CharField(blank = True, null = True, max_length = 1000)
PhotoUri = models.URLField(blank = True, null = True)
def __str__(self):
return(self.Name)
class OrderedContest(models.Model):
id = models.CharField(primary_key = True, max_length = 50)
ContestId = models.ForeignKey(CandidateContest, db_column = 'ContestId', on_delete = models.CASCADE, max_length = 50, blank = True, null = True)
OrderedBallotSelectionIds = models.ManyToManyField(CandidateSelection, blank = True, max_length = 1000)
class BallotStyle(models.Model):
id = models.CharField(primary_key = True, max_length = 50)
ImageUri = models.URLField(blank = True, null = True)
OrderedContestIds = models.ManyToManyField(OrderedContest, blank = True, max_length = 1000)
PartyIds = models.ManyToManyField(Party, blank = True, max_length = 1000)
class Precinct(models.Model):
id = models.CharField(primary_key = True, max_length = 50)
BallotStyleId = models.ForeignKey(BallotStyle, db_column = 'BallotStyleId', on_delete = models.CASCADE, max_length = 50, blank = True, null = True)
ElectoralDistrictIds = models.ManyToManyField(ElectoralDistrict, max_length = 1000, blank = True)
ExternalIdentifiers = models.ManyToManyField(ExternalIdentifier, blank = True, max_length = 1000)
IsMailOnly = models.CharField(max_length = 50, blank = True, null = True)
LocalityId = models.ForeignKey(Locality, db_column = 'LocalityId', on_delete = models.CASCADE, max_length = 50)
Name = models.CharField(max_length = 200, blank = True, null = True)
Number = models.CharField(max_length = 50, blank = True, null = True)
PollingLocationIds = models.ManyToManyField(PollingLocation,max_length = 1000, blank = True)
PrecinctSplitName = models.CharField(max_length = 50, blank = True, null = True)
Ward = models.CharField(max_length = 50, blank = True, null = True)
def __str__(self):
return("{} ({})".format(self.Name, self.LocalityId.Name))
class State(models.Model):
id = models.CharField(primary_key = True, max_length = 50, default = "st1")
ElectionAdministrationId = models.ForeignKey(ElectionAdministration, on_delete = models.CASCADE, db_column = 'ElectionAdministrationId', blank = True, null = True)
ExternalIdentifiers = models.ManyToManyField(ExternalIdentifier, blank = True, max_length = 1000)
Name = models.CharField(max_length = 50, blank = True, null = True)
PollingLocationIds = models.ManyToManyField(PollingLocation, max_length = 1000, blank = True)
def __str__(self):
return(self.Name)
def dict(self):
return(model_to_dict(self, exclude = ['id']))
class StreetSegment(models.Model):
id = models.CharField(primary_key = True, max_length = 50)
AddressDirection = models.CharField(max_length = 50, blank = True, null = True)
City = models.CharField(max_length = 50, blank = True, null = True)
IncludesAllAddresses = models.CharField(max_length = 50, blank = True, null = True)
IncludesAllStreets = models.CharField(max_length = 50, blank = True, null = True)
OddEvenBoth = models.CharField(max_length = 50, blank = True, null = True)
PrecinctId = models.ForeignKey(Precinct, on_delete = models.CASCADE, db_column = 'PrecinctId')
StartHouseNumber = models.CharField(max_length = 50, blank = True, null = True)
EndHouseNumber = models.CharField(max_length = 50, blank = True, null = True)
HouseNumberPrefix = models.CharField(max_length = 50, blank = True, null = True)
HouseNumberSuffix = models.CharField(max_length = 50, blank = True, null = True)
State = models.CharField(max_length = 50, blank = True, null = True)
StreetDirection = models.CharField(max_length = 50, blank = True, null = True)
StreetName = models.CharField(max_length = 50, blank = True, null = True)
StreetSuffix = models.CharField(max_length = 50, blank = True, null = True)
UnitNumber = models.CharField(max_length = 500, blank = True, null = True)
Zip = models.CharField(max_length = 50, blank = True, null = True)
class Error(models.Model):
id_error = models.CharField(max_length = 50, blank = True, null = True)
error_object = models.CharField(max_length = 50, blank = True, null = True)
error_message = models.CharField(max_length = 500, blank = True, null = True)
|
[
"franklin@democracy.works"
] |
franklin@democracy.works
|
ca408825b28434521ba7bb95bb73b324467a56b2
|
c2c45ff6987a161e945b34d3b377ce11a9c9cf3b
|
/python-flask-server/swagger_server/models/expression.py
|
cf1a5cf4c0e286dca3231a5f5dbecd00572320c2
|
[] |
no_license
|
JeSuisArina/Python-CGIHTTPServer
|
089cabad6b6dcfa77ae6fef380b235711fa89e2d
|
60853297b9337f27286916de55abfc7c39ad19a5
|
refs/heads/master
| 2020-03-17T03:31:37.175275
| 2018-06-06T07:51:16
| 2018-06-06T07:51:16
| 133,239,195
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,294
|
py
|
# coding: utf-8
from __future__ import absolute_import
from datetime import date, datetime # noqa: F401
from typing import List, Dict # noqa: F401
from swagger_server.models.base_model_ import Model
from swagger_server.models.expression_result import ExpressionResult # noqa: F401,E501
from swagger_server import util
class Expression(Model):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
def __init__(self, num: List[int]=None, result: ExpressionResult=None): # noqa: E501
"""Expression - a model defined in Swagger
:param num: The num of this Expression. # noqa: E501
:type num: List[int]
:param result: The result of this Expression. # noqa: E501
:type result: ExpressionResult
"""
self.swagger_types = {
'num': List[int],
'result': ExpressionResult
}
self.attribute_map = {
'num': 'num',
'result': 'result'
}
self._num = num
self._result = result
@classmethod
def from_dict(cls, dikt) -> 'Expression':
"""Returns the dict as a model
:param dikt: A dict.
:type: dict
:return: The Expression of this Expression. # noqa: E501
:rtype: Expression
"""
return util.deserialize_model(dikt, cls)
@property
def num(self) -> List[int]:
"""Gets the num of this Expression.
:return: The num of this Expression.
:rtype: List[int]
"""
return self._num
@num.setter
def num(self, num: List[int]):
"""Sets the num of this Expression.
:param num: The num of this Expression.
:type num: List[int]
"""
self._num = num
@property
def result(self) -> ExpressionResult:
"""Gets the result of this Expression.
:return: The result of this Expression.
:rtype: ExpressionResult
"""
return self._result
@result.setter
def result(self, result: ExpressionResult):
"""Sets the result of this Expression.
:param result: The result of this Expression.
:type result: ExpressionResult
"""
self._result = result
|
[
"noreply@github.com"
] |
noreply@github.com
|
855e3ca79856a91739da6ccf5b04275f51eadda3
|
f1d3bca01c6fd993aef2cc4d6b22f488cdc5e3b5
|
/cat_scraper.py
|
f58e21cbeaaedba8a34c1d4102e93013845bf8c8
|
[] |
no_license
|
HanKruiger/cat-scraper
|
cad1db39de6336321866a52a46f33e9b3153ee87
|
e4cef07e17e763f131a5fdc9aaec0db8ea55cc2b
|
refs/heads/main
| 2023-04-19T23:03:04.510215
| 2021-05-08T14:53:04
| 2021-05-08T14:53:04
| 365,496,083
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,432
|
py
|
import logging as log
import requests
from bs4 import BeautifulSoup
from urllib.parse import urlsplit
class CatScraper:
def __init__(self, url):
self.url = url
split_url = urlsplit(url)
self.base_path = f'{split_url.scheme}://{split_url.netloc}'
self.cats = None
def scrape(self):
log.info('Scraping %s', self.url)
try:
response = requests.get(self.url)
if not response.ok:
log.warning('Response to %s not OK: %s', self.url, response)
return []
except requests.exceptions.ConnectionError:
log.warning('Could not connect to %s', self.url)
return []
soup = BeautifulSoup(response.content, 'html.parser')
# Select all <div> elements with a 'dieren' class, but WITHOUT the
# 'header' class.
cat_divs = soup.select('div.dieren:not(.header)')
# Convert the <div> elements to cat dictionaries.
self.cats = [self.div_to_cat(cat_div) for cat_div in cat_divs]
def get_cats(self):
if self.cats is None:
self.scrape()
return self.cats
def div_to_cat(self, cat_div):
return {
'url': self.base_path + cat_div.find('a')['href'],
'name': cat_div.find('li', class_='naam').text.split(':')[1],
'status': cat_div.find('li', class_='status').text.split(':')[1],
}
|
[
"489397-hankruiger@users.noreply.gitlab.com"
] |
489397-hankruiger@users.noreply.gitlab.com
|
12180bacf865e009ab34847ca2fc32c7d48d7f9b
|
531c47c15b97cbcb263ec86821d7f258c81c0aaf
|
/sdk/network/azure-mgmt-network/azure/mgmt/network/v2019_12_01/operations/_ddos_custom_policies_operations.py
|
9bdb5268477680f052fb35e51b998db264bd5970
|
[
"LicenseRef-scancode-generic-cla",
"LGPL-2.1-or-later",
"MIT"
] |
permissive
|
YijunXieMS/azure-sdk-for-python
|
be364d3b88204fd3c7d223df23756386ff7a3361
|
f779de8e53dbec033f98f976284e6d9491fd60b3
|
refs/heads/master
| 2021-07-15T18:06:28.748507
| 2020-09-04T15:48:52
| 2020-09-04T15:48:52
| 205,457,088
| 1
| 2
|
MIT
| 2020-06-16T16:38:15
| 2019-08-30T21:08:55
|
Python
|
UTF-8
|
Python
| false
| false
| 19,277
|
py
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import TYPE_CHECKING
import warnings
from azure.core.exceptions import HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import HttpRequest, HttpResponse
from azure.core.polling import LROPoller, NoPolling, PollingMethod
from azure.mgmt.core.exceptions import ARMErrorFormat
from azure.mgmt.core.polling.arm_polling import ARMPolling
from .. import models
if TYPE_CHECKING:
# pylint: disable=unused-import,ungrouped-imports
from typing import Any, Callable, Dict, Generic, Optional, TypeVar, Union
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]]
class DdosCustomPoliciesOperations(object):
"""DdosCustomPoliciesOperations operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.network.v2019_12_01.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = models
def __init__(self, client, config, serializer, deserializer):
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
def _delete_initial(
self,
resource_group_name, # type: str
ddos_custom_policy_name, # type: str
**kwargs # type: Any
):
# type: (...) -> None
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {404: ResourceNotFoundError, 409: ResourceExistsError}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2019-12-01"
# Construct URL
url = self._delete_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'ddosCustomPolicyName': self._serialize.url("ddos_custom_policy_name", ddos_custom_policy_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
# Construct and send request
request = self._client.delete(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202, 204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
_delete_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/ddosCustomPolicies/{ddosCustomPolicyName}'} # type: ignore
def begin_delete(
self,
resource_group_name, # type: str
ddos_custom_policy_name, # type: str
**kwargs # type: Any
):
# type: (...) -> LROPoller
"""Deletes the specified DDoS custom policy.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param ddos_custom_policy_name: The name of the DDoS custom policy.
:type ddos_custom_policy_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: True for ARMPolling, False for no polling, or a
polling object for personal polling strategy
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of LROPoller that returns either None or the result of cls(response)
:rtype: ~azure.core.polling.LROPoller[None]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, PollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType[None]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = self._delete_initial(
resource_group_name=resource_group_name,
ddos_custom_policy_name=ddos_custom_policy_name,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
if cls:
return cls(pipeline_response, None, {})
if polling is True: polling_method = ARMPolling(lro_delay, lro_options={'final-state-via': 'location'}, **kwargs)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_delete.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/ddosCustomPolicies/{ddosCustomPolicyName}'} # type: ignore
def get(
self,
resource_group_name, # type: str
ddos_custom_policy_name, # type: str
**kwargs # type: Any
):
# type: (...) -> "models.DdosCustomPolicy"
"""Gets information about the specified DDoS custom policy.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param ddos_custom_policy_name: The name of the DDoS custom policy.
:type ddos_custom_policy_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: DdosCustomPolicy, or the result of cls(response)
:rtype: ~azure.mgmt.network.v2019_12_01.models.DdosCustomPolicy
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["models.DdosCustomPolicy"]
error_map = {404: ResourceNotFoundError, 409: ResourceExistsError}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2019-12-01"
# Construct URL
url = self.get.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'ddosCustomPolicyName': self._serialize.url("ddos_custom_policy_name", ddos_custom_policy_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = 'application/json'
# Construct and send request
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('DdosCustomPolicy', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/ddosCustomPolicies/{ddosCustomPolicyName}'} # type: ignore
def _create_or_update_initial(
self,
resource_group_name, # type: str
ddos_custom_policy_name, # type: str
parameters, # type: "models.DdosCustomPolicy"
**kwargs # type: Any
):
# type: (...) -> "models.DdosCustomPolicy"
cls = kwargs.pop('cls', None) # type: ClsType["models.DdosCustomPolicy"]
error_map = {404: ResourceNotFoundError, 409: ResourceExistsError}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2019-12-01"
content_type = kwargs.pop("content_type", "application/json")
# Construct URL
url = self._create_or_update_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'ddosCustomPolicyName': self._serialize.url("ddos_custom_policy_name", ddos_custom_policy_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = 'application/json'
# Construct and send request
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(parameters, 'DdosCustomPolicy')
body_content_kwargs['content'] = body_content
request = self._client.put(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 201]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('DdosCustomPolicy', pipeline_response)
if response.status_code == 201:
deserialized = self._deserialize('DdosCustomPolicy', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_create_or_update_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/ddosCustomPolicies/{ddosCustomPolicyName}'} # type: ignore
def begin_create_or_update(
self,
resource_group_name, # type: str
ddos_custom_policy_name, # type: str
parameters, # type: "models.DdosCustomPolicy"
**kwargs # type: Any
):
# type: (...) -> LROPoller
"""Creates or updates a DDoS custom policy.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param ddos_custom_policy_name: The name of the DDoS custom policy.
:type ddos_custom_policy_name: str
:param parameters: Parameters supplied to the create or update operation.
:type parameters: ~azure.mgmt.network.v2019_12_01.models.DdosCustomPolicy
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: True for ARMPolling, False for no polling, or a
polling object for personal polling strategy
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of LROPoller that returns either DdosCustomPolicy or the result of cls(response)
:rtype: ~azure.core.polling.LROPoller[~azure.mgmt.network.v2019_12_01.models.DdosCustomPolicy]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, PollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["models.DdosCustomPolicy"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = self._create_or_update_initial(
resource_group_name=resource_group_name,
ddos_custom_policy_name=ddos_custom_policy_name,
parameters=parameters,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize('DdosCustomPolicy', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
if polling is True: polling_method = ARMPolling(lro_delay, lro_options={'final-state-via': 'azure-async-operation'}, **kwargs)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_create_or_update.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/ddosCustomPolicies/{ddosCustomPolicyName}'} # type: ignore
def update_tags(
self,
resource_group_name, # type: str
ddos_custom_policy_name, # type: str
parameters, # type: "models.TagsObject"
**kwargs # type: Any
):
# type: (...) -> "models.DdosCustomPolicy"
"""Update a DDoS custom policy tags.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param ddos_custom_policy_name: The name of the DDoS custom policy.
:type ddos_custom_policy_name: str
:param parameters: Parameters supplied to update DDoS custom policy resource tags.
:type parameters: ~azure.mgmt.network.v2019_12_01.models.TagsObject
:keyword callable cls: A custom type or function that will be passed the direct response
:return: DdosCustomPolicy, or the result of cls(response)
:rtype: ~azure.mgmt.network.v2019_12_01.models.DdosCustomPolicy
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["models.DdosCustomPolicy"]
error_map = {404: ResourceNotFoundError, 409: ResourceExistsError}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2019-12-01"
content_type = kwargs.pop("content_type", "application/json")
# Construct URL
url = self.update_tags.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'ddosCustomPolicyName': self._serialize.url("ddos_custom_policy_name", ddos_custom_policy_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = 'application/json'
# Construct and send request
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(parameters, 'TagsObject')
body_content_kwargs['content'] = body_content
request = self._client.patch(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('DdosCustomPolicy', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
update_tags.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/ddosCustomPolicies/{ddosCustomPolicyName}'} # type: ignore
|
[
"noreply@github.com"
] |
noreply@github.com
|
a7a68c783777b2cd112ffd422219f4293e1aee15
|
ac329340f6376bdcf76a893e65641304faf85d86
|
/Des_Test.py
|
f8a0a13e2181550e3c1f2db84df317262b894fe8
|
[] |
no_license
|
JimShu716/Modified_MNIST_Classification
|
b9df06ba23eadc40f5958cea4024d6830e1785e3
|
ed17261d8579e1808a8839c7b883e7b38f9a3c5f
|
refs/heads/master
| 2020-08-30T14:00:34.464475
| 2020-02-11T00:22:38
| 2020-02-11T00:22:38
| 218,401,968
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,291
|
py
|
import pandas as pd
import numpy as np
import torch
import torch.nn as nn
import torch.utils.data as Data
import torch.nn.functional as F
import torchvision.models as model
import matplotlib.pyplot as plt
# Hyperparameters
EPOCH = 6
BATCH_SIZE = 64
LR = 0.0001
SPLIT_RATIO = 1
# 文件路径自己去改~~~~~~~~~~~~~~~~~~~~~~~~~~~~ :) :) :) :< :< :<
train_img = pd.read_pickle('/Users/tylerliu/GitHub/Proj3_source/train_max_x')
test_img = pd.read_pickle('/Users/tylerliu/GitHub/Proj3_source/test_max_x')
train_out = pd.read_csv('./data/train_max_y.csv').to_numpy().astype('int32')[:, -1]
train_img = torch.Tensor(train_img)
train_out = torch.tensor(train_out, dtype=torch.int64)
test_img = torch.Tensor(test_img)
# 自己改测试用的大小 50000改成别的->?
x = torch.unsqueeze(train_img, dim=1)[:SPLIT_RATIO*50000]/255.
y = train_out[:50000]
# mini-sample for testing
x_t = torch.unsqueeze(train_img, dim=1)[:300]/255.
y_t = train_out[:300]
my_dataset = Data.TensorDataset(x, y)
train_loader = Data.DataLoader(dataset=my_dataset, batch_size=BATCH_SIZE, shuffle=True)
# vgg16 = model.vgg16(pretrained=True)
des = model.DenseNet(num_init_features=32,num_classes=10)
cnn = nn.Sequential(
nn.Conv2d(1,3,5,1,2,bias=True),
des,
# nn.Conv2d(1,3,5,1,2,bias=True),
# vgg16,
# nn.Dropout(0.5),
# nn.Linear(1000,10),
# nn.Softmax()
)
# print(cnn)
# optimizer = torch.optim.Adam(cnn.parameters(), lr = LR)
# optimizer = torch.optim.Adam(cnn.parameters(), lr = 1e-4)
optimizer = torch.optim.Adam(cnn.parameters(), lr = 0.0001)
loss_fun = nn.CrossEntropyLoss()
def train():
for epoch in range(EPOCH):
for step, (x, y) in enumerate(train_loader):
output = cnn(x)
loss = loss_fun(output, y)
optimizer.zero_grad()
loss.backward()
optimizer.step()
if step % 10 == 0:
x_t_out = cnn(x_t)
y_pred = torch.max(x_t_out, dim=1)[1].data.numpy()
accuracy = float((y_pred == y_t.data.numpy()).astype(int).sum()) / float(y_t.size(0))
print('Epoch ', epoch, 'train loss: %.4f' % loss.data.numpy(), '| test accuracy: %.3f' % accuracy)
# if step % 30 == 0
# LR 做一个修正
train()
|
[
"tylerliu260@gmail.com"
] |
tylerliu260@gmail.com
|
aee404230ad4f577d5f611ed575b11337a8eb407
|
3ba478d3d52fba786c5421f59693fb094ad4364c
|
/week10/spiders/spiders/items.py
|
e08eb780639382d2863faee497f4ddd751171508
|
[] |
no_license
|
stingh711/Python001-class01
|
c5cfb91e3dcad4994cdf37251f06034e91748e50
|
51a320ada66b70ade4427bb352e28b84794d8503
|
refs/heads/master
| 2022-12-06T20:56:11.103688
| 2020-09-06T02:07:48
| 2020-09-06T02:07:48
| 273,141,919
| 0
| 0
| null | 2020-06-18T04:33:35
| 2020-06-18T04:33:34
| null |
UTF-8
|
Python
| false
| false
| 282
|
py
|
# -*- coding: utf-8 -*-
# Define here the models for your scraped items
#
# See documentation in:
# https://docs.scrapy.org/en/latest/topics/items.html
import scrapy
class GoodsItem(scrapy.Item):
name = scrapy.Field()
link = scrapy.Field()
comments = scrapy.Field()
|
[
"lihong.hu@siemens.com"
] |
lihong.hu@siemens.com
|
688072f226f2b49b242f3cc4a00b67dc6eae16bf
|
a393ccc763ee9cb78e7c6e2acd9ecb906bc006bd
|
/tests/conftest.py
|
411e2a0fe30f78b0827f2d421eea59b048341f54
|
[] |
no_license
|
Kilo59/GoT-Char_DB16
|
d2e548886d1c698439ad0b6b948e750f6da25149
|
122f81419b28590e138dd8fb082c7a0dbb9ae00a
|
refs/heads/master
| 2021-09-08T14:38:11.765705
| 2021-09-04T16:22:42
| 2021-09-04T16:22:42
| 65,403,337
| 1
| 0
| null | 2021-09-04T16:22:43
| 2016-08-10T17:37:48
|
Python
|
UTF-8
|
Python
| false
| false
| 657
|
py
|
"""
tests.conftest.py
~~~~~~~~~~~~~~~~~
Shared testing fixtures
"""
import pathlib
import pytest
ROOT = pathlib.Path(__file__).parents[1]
# TODO: don't rely on specific database path or name
SQLITE_DB = ROOT / "database.db"
@pytest.fixture(scope="session")
def monkey_session():
"""Session scoped monkeypatch"""
with pytest.MonkeyPatch.context() as monkeypatch:
yield monkeypatch
@pytest.fixture(scope="session", autouse=True)
def session_setup_teardown():
# setup
if SQLITE_DB.exists():
SQLITE_DB.unlink() # missing_ok py 3.8
# yield
yield
# teardown
if SQLITE_DB.exists():
SQLITE_DB.unlink()
|
[
"noreply@github.com"
] |
noreply@github.com
|
9c7ade8ee0b8e0940518d647d405439a209f33bc
|
3af9515966c7d1c074a0923ebab8e60787c8eafa
|
/oop5.py
|
9a39b6de0ef278a5b85ebb57262dfa4900b143e0
|
[] |
no_license
|
suzaana749/basicpython
|
8dbc4bcf903655143ed616f751a128d0ba8f1b67
|
035d67fe08c4bb5a4bb864c0b6da2d79bd21ff78
|
refs/heads/master
| 2020-04-13T11:15:40.938938
| 2018-12-31T11:45:49
| 2018-12-31T11:45:49
| 163,168,865
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 635
|
py
|
from __future__ import print_function
import math
import random
class circle:
def calCircum(self):
return 2*math.pi*self.radius
def calDiameter(self):
return self.radius *2
def calArea(self):
return math.pi * (self.radius ** 2)
def __init__(self): # constructor function called
self.radius - random.uniform(1.1,10.5)
circles = []
for i in range(0,10):
c = circle()
#c.radius = random.uniform(1.1,10.5)
circles.append(c)
for c in circles:
print("radius",round(c.radius,2),"circum",round(c.calCircum(),2),"diameter",round(c.calDiameter(),2),"area",round(c.calArea(),2))
|
[
"mgrsuzaana@gmail.com"
] |
mgrsuzaana@gmail.com
|
8786f9605a9ca3d5d09b16b60f4fb0c77fcd8b81
|
e365e230ad8a889107264b39a5eda71d68172f6f
|
/archive/combine_rev_aws_transcripts.py
|
7cb4923f0956dd8ea9ec05a340c1f00c2773b29f
|
[] |
no_license
|
wkevwang/Intuition-scribe
|
b4efd0584703a76f208c1a7b099782e26bb17607
|
969973103f13bb5e89d35cfe316527db8cd59cb1
|
refs/heads/master
| 2023-06-23T20:18:39.999554
| 2021-07-28T22:23:58
| 2021-07-28T22:23:58
| 280,757,348
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,356
|
py
|
"""
Patch up the low confidence parts of the Rev.ai transcipt using the
AWS Transcribe transcript, and return an edited version of the Rev.ai
transcript.
"""
import argparse
import os
import json
from rev_diarization import parse_transcript_elements as parse_rev_transcript_elements
def find_elements_in_time_bound(elements, start, end, margin=0.1):
"""
Find the elements in "elements" that have start and end times within
"start" and "end", with an additional margin time of "margin"
If an element in "elements" is used, mark it as used so it is not
used again.
"""
elements_in_bound = []
start_bound = start - margin
end_bound = end + margin
for e in elements:
if "ts" not in e: # Skip punctuation since it does not have timestamps
continue
if (start_bound <= e["ts"]) and (e["end_ts"] <= end_bound) and (not e.get("used", False)):
elements_in_bound.append(e)
e["used"] = True
return elements_in_bound
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("--audio_file", type=str, required=True) # Used for getting the original filename
parser.add_argument("--rev_transcript", type=str, required=True)
parser.add_argument("--aws_transcript", type=str, required=True)
parser.add_argument("--output_folder", type=str, required=True)
args = parser.parse_args()
with open(args.rev_transcript, 'r') as f:
rev_transcript = json.load(f)
with open(args.aws_transcript, 'r') as f:
aws_transcript = json.load(f)
# Form a single list of the elements for the transcript
# Each element has "value" and for non-punct elements, "ts" and "end_ts"
rev_transcript_elements = parse_rev_transcript_elements(rev_transcript)
aws_transcript_items = aws_transcript["results"]["items"]
aws_transcript_elements = []
for item in aws_transcript_items:
element = {
"value": item["alternatives"][0]["content"],
}
if "start_time" in item:
element["ts"] = float(item["start_time"])
element["end_ts"] = float(item["end_time"])
element["confidence"] = float(item["alternatives"][0]["confidence"])
aws_transcript_elements.append(element)
# Build list of final elements based on timestamps
# Use Rev element if confidence is high. Otherwise, use AWS Transcribe element
final_elements = []
for rev_e in rev_transcript_elements:
if (rev_e["type"] == "punct") or (rev_e["confidence"] > 0.95):
final_elements.append(rev_e)
else:
aws_elems = find_elements_in_time_bound(
aws_transcript_elements, rev_e["ts"], rev_e["end_ts"])
if len(aws_elems) > 0:
final_elements += aws_elems
else:
final_elements.append(rev_e)
filename_prefix = os.path.splitext(os.path.basename(args.audio_file))[0]
filename = filename_prefix + '_rev_aws_combined_transcript.json'
with open(os.path.join(args.output_folder, filename), 'w') as f:
# Write in Rev format (with a single speaker to meet the expected format)
json.dump({
"monologues": [{
"speaker": 0,
"elements": final_elements
}]
}, f, indent=4)
|
[
"weixiang@ualberta.ca"
] |
weixiang@ualberta.ca
|
7fd6bd8c1c7690c327cccbd16ff1cfdb292ee885
|
478cf410ccf4b1511e7d31d99aec68a6c5499792
|
/17-CS-Python/01-Intro-Python-Overview/src/15_classes.py
|
e10767a5ae1aa5e9d3d93c51dce62a9fee5720c5
|
[
"MIT"
] |
permissive
|
felipe0216/data-science-journal
|
8f4bf9d572c194044c4c738fd39f4816ea542bdc
|
1ea6208e7abdd514ffbac3f53d2279aac2d4e493
|
refs/heads/master
| 2020-07-13T18:52:19.916794
| 2019-08-23T18:34:05
| 2019-08-23T18:34:05
| 205,134,360
| 1
| 0
| null | 2019-08-29T10:02:33
| 2019-08-29T10:02:33
| null |
UTF-8
|
Python
| false
| false
| 1,467
|
py
|
# Make a class LatLon that can be passed parameters `lat` and `lon` to the
# constructor
# YOUR CODE HERE
class LatLon(object):
def __init__(self, lat, lon):
self.lat = lat
self.lon = lon
# Make a class Waypoint that can be passed parameters `name`, `lat`, and `lon` to the
# constructor. It should inherit from LatLon. Look up the `super` method.
# YOUR CODE HERE
class Waypoint(LatLon):
def __init__(self, name, lat, lon):
self.name = name
super().__init__(lat, lon)
def __str__(self):
return self.name
# Make a class Geocache that can be passed parameters `name`, `difficulty`,
# `size`, `lat`, and `lon` to the constructor. What should it inherit from?
# YOUR CODE HERE
class Geocache(Waypoint):
def __init__(self, name, difficulty, size, lat, lon):
self.difficulty = difficulty
self.size = size
super().__init__(name, lat, lon)
# Make a new waypoint and print it out: "Catacombs", 41.70505, -121.51521
# YOUR CODE HERE
waypoint = Waypoint("Catacombs", 41.70505, -121.51521)
# Without changing the following line, how can you make it print into something
# more human-readable? Hint: Look up the `object.__str__` method
print(waypoint)
# Make a new geocache "Newberry Views", diff 1.5, size 2, 44.052137, -121.41556
# YOUR CODE HERE
geocache = Geocache("Newberry Views", 1.5, 2, 44.052137, -121.41556)
# Print it--also make this print more nicely
print(geocache)
|
[
"chrisluedtke@gmail.com"
] |
chrisluedtke@gmail.com
|
72207f113df60216c97329d5cb3193a0ab4d1938
|
8edf69b3e354c09fb0950ea7be19e92b20b8d208
|
/pyzkaccess/ctypes.py
|
321cb0062313080fdf84b7beef9c96569ebc2934
|
[
"Apache-2.0"
] |
permissive
|
redjanym/pyzkaccess
|
41f57a42026b67da97f6ba4081f9fbdd0c30813d
|
051236992d5b3f6bdb01fe64b7a787b0c22369a9
|
refs/heads/master
| 2022-12-10T04:24:21.328624
| 2020-09-07T22:11:34
| 2020-09-07T22:11:34
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 705
|
py
|
"""This module is intended to safety import Windows-specific features
from `ctypes` stdlib module on non-windows platform -- they are
replaced by mock objects. Despite the code which uses that features
becomes partially unoperable in this case, we can import it and
generate documentation for instance
"""
import warnings
from ctypes import * # noqa
from unittest.mock import Mock
try:
from ctypes import WinDLL # noqa
except ImportError:
warnings.warn(
'ctypes.WinDLL is not available on non-Windows system. The code is not functional on '
'current platform, but in order to be able import it we mock WinDLL with '
'unittest.mock.Mock object'
)
WinDLL = Mock()
|
[
"gosha753951@gmail.com"
] |
gosha753951@gmail.com
|
e1f046bf1125c305df03d5d353029498f0cbe882
|
56d41bbc6b5d831ba699ad4a44f5880ba3d195c8
|
/thread_sync.py
|
bcd0c1730eb098adb4c5bb1a0e3dc4d595662b6d
|
[] |
no_license
|
pigliangliang/2018-07-05-08
|
4635e4dc1926f3f17eae7f607a0b188f6aaf9f43
|
ba95331f4b0cc0316377a5c67f86d03e8cc257b8
|
refs/heads/master
| 2020-03-22T08:51:08.209718
| 2018-07-08T07:58:06
| 2018-07-08T07:58:06
| 139,795,833
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 606
|
py
|
#author_by zhuxiaoliang
#2018-07-05 下午3:39
"""
A. Semaphore(信号量)
在多线程编程中,为了防止不同的线程同时对一个公用的资源(比如全部变量)进行修改,需要进行同时访问的数量(通常是1)的限制。信号量同步基于内部计数器,每调用一次acquire(),计数器减1;每调用一次release(),计数器加1.当计数器为0时,acquire()调用被阻塞。
"""
import time
from random import random
from threading import Thread,Semaphore,enumerate
sema = Semaphore(3)
def foo(tid):
with sema:
print()
|
[
"836904717@qq.com"
] |
836904717@qq.com
|
c80abae38d1dabb5dfaa1cc1b9606faa528421bd
|
13b72e5c48f5f7213d9a569f699dc1554bc363dd
|
/demo/libdemo/list_git__repos.py
|
35f238aa8d2d69b1030b7d8cfefa92fded15d932
|
[] |
no_license
|
srikanthpragada/PYTHON_02_MAR_2021
|
6997fece4ad627bb767c0bca5a5e166369087e68
|
5dfd0c471378bd22379ac0d66f8785d4d315017b
|
refs/heads/master
| 2023-04-04T20:28:22.738042
| 2021-04-17T14:19:48
| 2021-04-17T14:19:48
| 344,498,123
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 362
|
py
|
import requests
user = "srikanthpragada"
response = requests.get(f"https://api.github.com/users/{user}/repos")
if response.status_code != 200:
print(f"Sorry! Could not get details for {user} from github!")
exit()
repos = response.json() # Convert JSON to dict
for repo in repos:
print(repo['name'])
print(repo['description'])
print('-' * 50)
|
[
"srikanthpragada@gmail.com"
] |
srikanthpragada@gmail.com
|
40dbe20a67504c37c5be2abfab99add67569df21
|
585bac463cb1919ac697391ff130bbced73d6307
|
/36_ValidSudoku/solution1.py
|
91569e66f6dee678a57f5dd30000308804bcd22a
|
[] |
no_license
|
llgeek/leetcode
|
ce236cf3d3e3084933a7a4a5e8c7766f7f407285
|
4d340a45fb2e9459d47cbe179ebfa7a82e5f1b8c
|
refs/heads/master
| 2021-01-22T23:44:13.318127
| 2020-03-11T00:59:05
| 2020-03-11T00:59:05
| 85,667,214
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 769
|
py
|
class Solution:
def isValidSudoku(self, board: List[List[str]]) -> bool:
seen = set()
for i in range(len(board)):
for j in range(len(board[0])):
val = board[i][j]
if val == '.': continue
if str(val) + 'in row ' + str(i) in seen:
return False
seen.add(str(val) + 'in row ' + str(i))
if str(val) + 'in column ' + str(j) in seen:
return False
seen.add(str(val) + 'in column ' + str(j))
if str(val) + 'in grid ' + str(i // 3) + ' ' + str(j // 3) in seen:
return False
seen.add(str(val) + 'in grid ' + str(i // 3) + ' ' + str(j // 3))
return True
|
[
"angleflycll@gmail.com"
] |
angleflycll@gmail.com
|
a2be10df8f5d1ea4680193e91049bd3d406fbf87
|
747ef767341c4e6ecd1cad733fc84781951f2424
|
/pq/2020/p7/C/solC1.py
|
0318815ab20422cb93a5a75f01f4b1e8317f0281
|
[] |
no_license
|
islami00/csc102
|
4cdfd761814bab0d7ef45f2270aa70bf411d233b
|
93c89f7b087ff90c34b508b12d654c42857adbd2
|
refs/heads/main
| 2023-06-19T06:27:47.620859
| 2021-07-15T16:25:10
| 2021-07-15T16:25:10
| 348,317,933
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 57
|
py
|
n = [12, 3, 3, 4, 4, 5]
ls = [x for x in n if x % 2 == 0]
|
[
"islam-machine@gmail.com"
] |
islam-machine@gmail.com
|
0d0b792b12ae89e4345a54d9f5615577cc7c649f
|
9c09818cd7eabfe56c4744ab879f9e460f49add0
|
/gui/adapt.py
|
2210583aac5ee7946f633f9123aabb12def3c1b5
|
[] |
no_license
|
shamjithkv/gdb-gui
|
5498d2041829925917498cb34c22cec1e68deb73
|
b21838a4ea1a68f9d14511f53f4c5c1196478a0c
|
refs/heads/master
| 2020-07-27T22:48:30.282103
| 2017-08-28T16:52:57
| 2017-09-06T12:29:00
| 209,237,135
| 1
| 0
| null | 2019-09-18T06:42:57
| 2019-09-18T06:42:57
| null |
UTF-8
|
Python
| false
| false
| 2,012
|
py
|
# Copyright (C) 2015, 2016 Tom Tromey <tom@tromey.com>
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
# Adapt to gdb issues.
import gdb
import gui.params
# The rule for adding a new entry here is that the bug must have some
# notable user-visible effect.
bugs = {
15620: """Your gdb doesn't have a "new breakpoint" event.
This means that the source windows will not show you where
breakpoints have been set.""",
13598: """Your gdb doesn't have a "before prompt" event.
This means that various windows won't be able to react to
commands like "up" or "down".""",
18385: """Your gdb doesn't expose locations on a gdb.Breakpoint.
This can be worked around, but maybe not always reliably.
This means that sometimes breakpoints won't display in source windows.""",
18620: """Your gdb doesn't have a "breakpoint modified" event.
This means that when a pending breakpoint is resolved, the GUI won't
be able to update to reflect that fact."""
}
_warning = """See https://sourceware.org/bugzilla/show_bug.cgi?id=%s
for more information."""
_first_report = True
def notify_bug(bugno):
if not gui.params.warn_missing.value:
return
if not (bugno in bugs):
return
print("################")
print(bugs[bugno])
print(_warning % bugno)
print("")
print("You can use 'set gui mention-missing off' to disable this message.")
print("################")
del bugs[bugno]
|
[
"tom@tromey.com"
] |
tom@tromey.com
|
4b1ecbe8bfc1dfb288e7e30b8ba859c26d6a53c9
|
b13ca274b4463c9900840ee6516094b7509b6041
|
/empower/lvapp/lvaphandler.py
|
b5c42204161a1e3cc0f451116cafa63d304a1803
|
[
"Apache-2.0"
] |
permissive
|
imec-idlab/sdn_wifi_manager
|
09d206f2f649aa715752d3c44e011d3f54faf592
|
eda52649f855722fdec1d02e25a28c61a8fbda06
|
refs/heads/master
| 2021-06-23T08:03:22.482931
| 2020-12-03T11:30:10
| 2020-12-03T11:30:10
| 162,106,793
| 0
| 0
|
Apache-2.0
| 2019-03-27T16:23:31
| 2018-12-17T09:33:47
|
Python
|
UTF-8
|
Python
| false
| false
| 3,522
|
py
|
#!/usr/bin/env python3
#
# Copyright (c) 2016 Roberto Riggio
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""LVAPs Handerler."""
import tornado.web
import tornado.httpserver
from empower.datatypes.etheraddress import EtherAddress
from empower.restserver.apihandlers import EmpowerAPIHandler
from empower.core.resourcepool import ResourceBlock
from empower.main import RUNTIME
class LVAPHandler(EmpowerAPIHandler):
"""LVAP handler. Used to view LVAPs (controller-wide)."""
HANDLERS = [r"/api/v1/lvaps/?",
r"/api/v1/lvaps/([a-zA-Z0-9:]*)/?"]
def get(self, *args, **kwargs):
""" Get all LVAPs or just the specified one.
Args:
lvap_id: the lvap address
Example URLs:
GET /api/v1/lvaps
GET /api/v1/lvaps/11:22:33:44:55:66
"""
try:
if len(args) > 1:
raise ValueError("Invalid URL")
if not args:
self.write_as_json(RUNTIME.lvaps.values())
else:
lvap = EtherAddress(args[0])
self.write_as_json(RUNTIME.lvaps[lvap])
except KeyError as ex:
self.send_error(404, message=ex)
except ValueError as ex:
self.send_error(400, message=ex)
self.set_status(200, None)
def put(self, *args, **kwargs):
""" Set the WTP for a given LVAP, effectivelly hands-over the LVAP to
another WTP
Args:
lvap_id: the lvap address
Request:
version: the protocol version (1.0)
Example URLs:
PUT /api/v1/lvaps/11:22:33:44:55:66
"""
try:
if len(args) != 1:
raise ValueError("Invalid URL")
request = tornado.escape.json_decode(self.request.body)
if "version" not in request:
raise ValueError("missing version element")
lvap_addr = EtherAddress(args[0])
lvap = RUNTIME.lvaps[lvap_addr]
if "wtp" in request:
wtp_addr = EtherAddress(request['wtp'])
wtp = RUNTIME.wtps[wtp_addr]
lvap.wtp = wtp
elif "blocks" in request:
pool = []
for block in request["blocks"]:
wtp_addr = EtherAddress(block['wtp'])
wtp = RUNTIME.wtps[wtp_addr]
hwaddr = EtherAddress(block['hwaddr'])
channel = int(block['channel'])
band = int(block['band'])
r_block = ResourceBlock(wtp, hwaddr, channel, band)
pool.append(r_block)
lvap.blocks = pool
if "encap" in request:
encap = EtherAddress(request["encap"])
lvap.encap = encap
except KeyError as ex:
self.send_error(404, message=ex)
except ValueError as ex:
self.send_error(400, message=ex)
self.set_status(204, None)
|
[
"pedroisolani@gmail.com"
] |
pedroisolani@gmail.com
|
9e4155600c0214f59a5e96c16b9db5ae5e42af68
|
c4a0e8e5707f95c934a82e92a32cf749e35ab8d1
|
/Chapter3/numberprint.py
|
b96674777e5b1719dec3f306c7d998f7b31627a7
|
[] |
no_license
|
Blattarex/PublicFolder
|
2c42c5b760f03f53501c3a28834b91cf1eea721f
|
46b7d57fb92796fb0ac533b12125505c65b35889
|
refs/heads/master
| 2020-04-23T11:47:12.165028
| 2014-02-28T02:04:06
| 2014-02-28T02:04:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 245
|
py
|
file = open("binary.dat", "rb")
size = struct.calcsize("i")
bytes_read = file.read(size)
while bytes_read:
value = struct.unpack("i", bytes_read)
value = value[0]
print(value, end=" ")
bytes_read = file.read(size)
file.close()
|
[
"pidgetmidget@gmail.com"
] |
pidgetmidget@gmail.com
|
1e273a85868f0f6b461bfd41551779c6a908e717
|
eab72229ae04d1160704cbf90a08a582802a739c
|
/pipeline.py
|
951739aed5ac7ad0818e105dbff2397a48108344
|
[
"MIT"
] |
permissive
|
megatazm/Crowd-Counting
|
444d39b0e3d6e98995f53badf4c073829038b6b7
|
647a055baccee2c3b6b780f38930e2ffd14d1664
|
refs/heads/master
| 2022-04-01T04:49:16.409675
| 2020-01-31T21:24:02
| 2020-01-31T21:24:02
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 518
|
py
|
import os
# Crop area
#os.system("python3 crop.py")
## APPROACH 1 MCNN
os.system("python3 put_zero_image.py")
os.system("python3 test.py")
os.system("python3 put_zero_den.py")
os.system("python3 find_people.py")
os.system("python3 position.py")
## APPROACH 2 - RNN
#os.system("python3 tiny_face_eval.py --weight_file_path weight --prob_thresh 0.04 --nms_thresh 0.0")
## TRACKING
# Put heads into file
#os.system("python3 get_heads.py")
# Track heads among videos
#os.system("python3 track_video.py")
|
[
"gpsunicamp016@gmail.com"
] |
gpsunicamp016@gmail.com
|
bf439e9862b4ae08f44e047b1d51ff58c9ae6f67
|
c6666d0235d1d03ed9a5a2d1a3cfa9ccc9d9e88c
|
/webcore/migrations/0001_initial.py
|
bc167afd1bfb17d3738481c8cc02fc4ac5b3fcf0
|
[] |
no_license
|
boiyelove/savingsensei
|
67dc8a5690c7599dd126159837af6e567852aa73
|
8acd46ebd770b9e18f64e14ff08bfd2ddbcc0edc
|
refs/heads/master
| 2021-10-20T01:32:10.775234
| 2019-02-25T03:27:31
| 2019-02-25T03:27:31
| 172,426,033
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,812
|
py
|
# -*- coding: utf-8 -*-
# Generated by Django 1.9.7 on 2016-06-20 12:21
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Banner',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(max_length=30)),
('desc', models.CharField(max_length=60)),
('btn_link', models.URLField()),
('btn_title', models.CharField(max_length=18)),
('created', models.DateTimeField(auto_now=True)),
('updated', models.DateTimeField(auto_now_add=True)),
],
),
migrations.CreateModel(
name='Contact',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('email', models.EmailField(max_length=254)),
('subject', models.CharField(max_length=30)),
('content', models.TextField()),
('created', models.DateTimeField(auto_now=True)),
('updated', models.DateTimeField(auto_now_add=True)),
],
),
migrations.CreateModel(
name='Newsletter',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('email', models.EmailField(max_length=254)),
('created', models.DateTimeField(auto_now=True)),
('updated', models.DateTimeField(auto_now_add=True)),
],
),
]
|
[
"daahrmmieboiye@gmail.com"
] |
daahrmmieboiye@gmail.com
|
4ec929b17336e778d3d9066994a98845264bb1ae
|
e168e0dde760fc5fac8f96f8ac9f6d86ba471685
|
/ex41.py
|
84577b13ad6e3f8c529a9cbbc79c903904ce4212
|
[] |
no_license
|
Kalinitch/LPTHW
|
a9db1d61d3497c374670fc38f58123216aa32d01
|
a005cbd09a5bae1cd77a7f1c7777ad277d078510
|
refs/heads/master
| 2021-09-06T11:13:17.876476
| 2018-02-05T21:36:19
| 2018-02-05T21:36:19
| 111,433,628
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,419
|
py
|
import random
from urllib import urlopen
import sys
WORD_URL = "http://learncodethehardway.org/words.txt"
WORDS = []
PHRASES = {
"class %%%(%%%):":
"Make a class named %%% that is-a %%%.",
"class %%%(object):\n\tdef __init__(self, ***)" :
"class %%% has-a __init__ that takes self and *** parameters.",
"class %%%(object):\n\tdef ***(self, @@@)":
"class %%% has-a function named *** that takes self and @@@ parameters.",
"*** = %%%()":
"Set *** to an instance of class %%%.",
"***.***(@@@)":
"From *** get the *** function, and call it with parameters self, @@@.",
"***.*** = '***'":
"From *** get the *** attribute and set it to '***'."
}
# do they want to drill phrases first
if len(sys.argv) == 2 and sys.argv[1] == "english":
PHRASE_FIRST = True
else:
PHRASE_FIRST = False
#load up the words from the website
for word in urlopen(WORD_URL).readlines():
WORDS.append(word.strip())
def convert(snippet, phrase):
class_names = [w.capitalize() for w in
random.sample(WORDS, snippet.count("%%%"))]
other_names = random.sample(WORDS, snippet.count("***"))
results = []
param_names = []
for i in range(0, snippet.count("@@@")):
param_count = random.randint(1,3)
param_names.append(', '.join(random.sample(WORDS, param_count)))
for sentence in snippet, phrase:
result = sentence[:]
# fake class class_names
for word in class_names:
result = result.replace("%%%", word, 1)
# fake class class_names
for word in other_names:
result = result.replace("***", word, 1)
# fake class class_names
for word in class_names:
result = result.replace("@@@", word, 1)
results.append(result)
return results
# keep going untill they hit CTRL-D
try:
while True:
snippets = PHRASES.keys()
random.shuffle(snippets)
for snippet in snippets:
phrase = PHRASES[snippet]
question, answer = convert(snippet, phrase)
if PHRASE_FIRST:
question, answer = answer, question
print question
raw_input("> ")
print "ANSWER: %s\n\n" % answer
except EOFError:
print "\nBye"
|
[
"31504128+Kalinitch@users.noreply.github.com"
] |
31504128+Kalinitch@users.noreply.github.com
|
32c853e551c807d8e0690e5eef0d6d8be54c3e94
|
da6bf15899bff60785b4659277a94c043c669f8f
|
/morse.py
|
198d3b555f073e1525d3fc5bef7806a92a91b463
|
[] |
no_license
|
sandraonne/Python2
|
97aabe49b16043e3420dda1f49a18680b7268b01
|
7a07a02282afa1bb14390edf9082ee7da7200c6e
|
refs/heads/master
| 2021-01-24T00:33:29.414648
| 2018-02-24T19:02:40
| 2018-02-24T19:02:40
| 122,771,107
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,279
|
py
|
# Koosta programm, mis programmi käivitamisel tervitab
# kasutajat nii tavakeeles kui morse koodina, lubab
# seejärel kasutajal sisestada sõnu ning teisendab need
# sümbolhaaval morsetähestikku (lisades iga sümboli järele
# tühiku). Sõnastik ei pruugi sisaldada kõikvõimalikke
# märke, seega tuleb iga sümboli puhul kontrollida, kas
# see üldse esineb sõnastikus. kontrollimiseks kasutame
# ainult väikesed tähed. Samuti õ. ä. ü ja ö asemel kasutame o, a ja u.
tahestik = {"a":".-", "b":"-...", "c":"-.-.", "d":"-..", "e":".", "f":"..-.", "g":"--.", "h":"....", "i":"..",
"j":".---", "k":"-.-", "l":".-..", "m":"--", "n":"-.", "o":"---", "p":".--.", "q":"--.-", "r":".-.",
"s":"...", "t":"-", "u":"..-", "v":"...-", "w":".--", "x":"-..-", "y":"-.--", "z":"--..", " ":".......",
"ä":".-", "õ":"---", "ö":"---", "ü":"..-"}
tervitus = "Tervist"
print("Tervist")
for taht in tervitus:
for voti in tahestik:
if (taht.lower() == voti):
print(tahestik[voti])
sona = input("Sisesta sõna või lause: ")
for taht in sona:
for voti in tahestik:
if (taht.lower() == voti):
print(tahestik[voti])
# else:
# print("Sellist sümbolit ei esine morsetähestikus")
|
[
"sandra.onne@khk.ee"
] |
sandra.onne@khk.ee
|
f4c8214ad10722f296a55d35be50738047f802c1
|
0ffaf53637a5c2f776c8b65957007c2a59667ca4
|
/access_presentation_data/flow_logic.py
|
bd34acdb4b20e7c38c21d7946681777846884335
|
[] |
no_license
|
henriquekfmaia/LTSimApi
|
a4667de6c871be10c68fdfb1762c78e086ce3ec2
|
f77f5a8dfa44522496bb16cd613c888bdfe134e7
|
refs/heads/master
| 2020-03-20T06:08:29.875501
| 2018-10-23T12:09:02
| 2018-10-23T12:09:02
| 124,909,083
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,632
|
py
|
import math
import logic.comparer as co
def add_flows(flowA, flowB):
flowA.waterFlow.value = float(flowA.waterFlow.value) + float(flowB.waterFlow.value)
totalMass = float(flowA.massFlow.value) + float(flowB.massFlow.value)
if totalMass == 0:
totalMass = 1
wA = float(flowA.massFlow.value)/totalMass
wB = float(flowB.massFlow.value)/totalMass
flowA.massFlow.value = totalMass
arraySum = 0
for i in range(1, len(flowA.sizeDistribution.value.array)):
flowA.sizeDistribution.value.array[i].value = wA*float(flowA.sizeDistribution.value.array[i].value) + wB*float(flowB.sizeDistribution.value.array[i].value)
arraySum += flowA.sizeDistribution.value.array[i].value
flowA.sizeDistribution.value.array[0].value = 100 - arraySum
return flowA
def restart_flow(flow):
flow.waterFlow.value = 0
flow.massFlow.value = 0
for i in flow.sizeDistribution.value.array:
i.value = 0
flow.sizeDistribution.value.array[0].value = 100
return flow
def is_flow_known(flow):
return(hasattr(flow.waterFlow, 'value') and hasattr(flow.massFlow, 'value') and hasattr(flow.sizeDistribution, 'value'))
#return (flow.waterFlow.value == None or flow.massFlow.value == None or flow.sizeDistribution.value == None)
def get_flow_errors(flowA, flowB):
errors = []
errors.append(co.get_errors_number(flowA.waterFlow.value, flowB.waterFlow.value))
errors.append(co.get_errors_number(flowA.massFlow.value, flowB.massFlow.value))
errors.append(co.get_errors_distribution(flowA.sizeDistribution, flowB.sizeDistribution))
return max(errors)
|
[
"henrique.maia@poli.ufrj.br"
] |
henrique.maia@poli.ufrj.br
|
1dce9e2b6a0d7482d47bab26f855b928218015ad
|
ed7f129761f6ef2feaa3808c52bb306522911950
|
/app/main/__init__.py
|
a7fafd9bbdc8a3505fd7bca1b7159365735f55a0
|
[] |
no_license
|
dpvitt/notes
|
de7f55ad782a727cbd684b6b79ca2a4e38cf642d
|
3d7171fdc401a5c38e757b27e2dfee2b857e8939
|
refs/heads/master
| 2020-12-25T11:15:29.331707
| 2016-07-02T21:54:05
| 2016-07-02T21:54:05
| 61,044,112
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 105
|
py
|
from flask import Blueprint
main_route = Blueprint('main_route', __name__)
from . import views, errors
|
[
"daniel@danielpavitt.com"
] |
daniel@danielpavitt.com
|
8ac537b07be9198d7e763729701e1d2f9534ce38
|
456f6b8e7a8df0ea3e7c80a183a665f4066ef11d
|
/mulTwoInt.py
|
5eeb78d1e241c012f8e9cd4b081829ed913f8d77
|
[] |
no_license
|
diksha002/TD3_Diksha_Dwijesh
|
441118500865aed4908032b99e04501f0717160b
|
36609fd73f4d7edaf2f0ba38825e3a77932812c5
|
refs/heads/master
| 2021-02-14T19:23:51.276272
| 2020-03-10T20:20:49
| 2020-03-10T20:20:49
| 244,827,955
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 599
|
py
|
import sys
def mul(a,b):
multiply= a * b
return multiply
if __name__=="__main__":
if ( len(sys.argv) > 3 ):
print ("Erreur! Entrez que deux valeurs")
elif ( len(sys.argv) == 1 ):
print ("Erreur! Entrez deux valeurs")
a = input("premier valeur: ")
b = input("deuxieme valeur: ")
a = int(a)
b = int(b)
print ( mul(a,b) )
elif ( len(sys.argv) == 2 ):
print ("Erreur! Entrez encore une valeur")
y = input("entrez la deuxieme valeur: ")
x = int( sys.argv[1] )
y = int(y)
print ( mul(a,b) )
else:
x = int( sys.argv[1] )
y = int( sys.argv[2] )
print ( mul(a,b) )
|
[
"dwi.rag123@gmail.com"
] |
dwi.rag123@gmail.com
|
f8711cdf3509fa00badbd49057ade5fedad53403
|
94b2834ded570173806e7c49b9bbf0e36d57d2cf
|
/weather_api/users/migrations/0001_initial.py
|
bc1d65974c906974e97b3ce328a099a8eb5de3a8
|
[
"MIT"
] |
permissive
|
dimka2014/weather-api
|
2bb2ef02357ab1f498f7469b3dfe4d46c9be6772
|
14456b87c2da2edf4350f822788e04287d62e744
|
refs/heads/master
| 2021-01-20T05:32:40.930109
| 2017-05-04T11:21:38
| 2017-05-04T11:21:38
| 89,790,449
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,360
|
py
|
# -*- coding: utf-8 -*-
# Generated by Django 1.10 on 2017-04-28 10:57
from __future__ import unicode_literals
from django.db import migrations, models
import django.utils.timezone
class Migration(migrations.Migration):
initial = True
dependencies = [
('auth', '0008_alter_user_username_max_length'),
]
operations = [
migrations.CreateModel(
name='User',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('password', models.CharField(max_length=128, verbose_name='password')),
('last_login', models.DateTimeField(blank=True, null=True, verbose_name='last login')),
('is_superuser', models.BooleanField(default=False, help_text='Designates that this user has all permissions without explicitly assigning them.', verbose_name='superuser status')),
('email', models.EmailField(max_length=255, unique=True, verbose_name='email address')),
('is_staff', models.BooleanField(default=False, help_text='Designates whether the user can log into this admin site.', verbose_name='staff status')),
('is_active', models.BooleanField(default=True, help_text='Designates whether this user should be treated as active. Unselect this instead of deleting accounts.', verbose_name='active')),
('date_joined', models.DateTimeField(default=django.utils.timezone.now, verbose_name='date joined')),
('confirmation_token', models.CharField(blank=True, default=None, max_length=32, null=True)),
('reset_password_token', models.CharField(blank=True, default=None, max_length=32, null=True)),
('groups', models.ManyToManyField(blank=True, help_text='The groups this user belongs to. A user will get all permissions granted to each of their groups.', related_name='user_set', related_query_name='user', to='auth.Group', verbose_name='groups')),
('user_permissions', models.ManyToManyField(blank=True, help_text='Specific permissions for this user.', related_name='user_set', related_query_name='user', to='auth.Permission', verbose_name='user permissions')),
],
options={
'swappable': 'AUTH_USER_MODEL',
},
),
]
|
[
"dimabelyaev27@gmail.com"
] |
dimabelyaev27@gmail.com
|
7dee759bea033347ff76f5b4a3103bb226993ca7
|
c5975130be45244b9e163b280cee80d76021be23
|
/Demo_exp_simulation.py
|
cc2a2077e2fac91902a18c599112a0448cc4c9c6
|
[] |
no_license
|
shaunak-desai/Laser-Tracker-simulation
|
ce3fed597b4603e030db2d6846ea5ab1066ab547
|
d3a3dea274c60ce94ccbf8e669383a550832cb2f
|
refs/heads/main
| 2023-03-11T15:21:48.358890
| 2021-02-18T12:28:48
| 2021-02-18T12:28:48
| 340,024,767
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 7,010
|
py
|
'''
Simulation for demo experiment with 4 targtes placed at an approrpirate distance to simulate the 4 corners of the
ATHENA WFI CCD detector
'''
import numpy as np
import transformations as t
import transform
import matplotlib.pyplot as plt
import Uncertainities as un
import math as m
from Kabsch_best_fit import best_fit
'''
###############################################################################
'''
def demo_err(window_thickness, window_centring_theta, window_centring_phi, distance, mechanical_un, pressure, glass_index,iterations):
#Initilizing the 6-DOF arrays
X = []
Y = []
Z = []
Roll = []
Pitch = []
Yaw = []
scaling = False
for i in range(iterations):
#Target coordinates in the demo_CCD cordinate frame
CCD_target_1 = np.matrix([[-0.070],[0.070],[0],[1]])
CCD_target_2 = np.matrix([[0.070],[0.070],[0],[1]])
CCD_target_3 = np.matrix([[-0.070],[-0.070],[0],[1]])
CCD_target_4 = np.matrix([[0.070],[-0.070],[0],[1]])
targets_demo_CCD_frame = np.concatenate((CCD_target_1, CCD_target_2, CCD_target_3, CCD_target_4), axis = 1)
#transformation matrix to convert target coordinates from demo_CCD frame to LT frame
demo_CCD_to_LT = np.matrix([[1,0,0,0],[0,1,0,0],[0,0,1,-distance],[0,0,0,1]])
#Conversin of target coordinats to LT frame
Homogeneous_modelled_points = (demo_CCD_to_LT * targets_demo_CCD_frame).T
#removing the fourth dimension for ease of calculations
modelled_points = np.delete(Homogeneous_modelled_points, 3, 1)
#Various sources of errors
LT_err = un.LT_uncertainities(modelled_points, 1).T
Window_err = un.Window_uncertainities(modelled_points, window_thickness, window_centring_theta, window_centring_phi)
#print (Window_err)
#Vaccum_index_err = un.vaccum_pressure(modelled_points, window_centring, glass_index, pressure)
#print(Vaccum_index_err)
Mechanical_err = un.Mechanical_uncertainities(modelled_points, mechanical_un, mechanical_un, mechanical_un)
modelled_points_1 = modelled_points
#Adding mechanical uncertainities of mounting the targets
modelled_points_1 = modelled_points + Mechanical_err
#Converting the modelled points into Laser tracker's spherical coordinate system
spherical_points = transform.cartesian_to_spherical(modelled_points_1)
#Adding the uncertainities from the Laser Tracker
spherical_points = spherical_points + LT_err
#Adding uncertainities from the window
spherical_points = spherical_points + Window_err
#Adding angular shifts due to vaccum from Snell's law
#spherical_points = spherical_points + Vaccum_index_err
#Converting back to the cartesian coordinate system
cartesian_points = transform.spherical_to_cartesian(spherical_points)
Homogeneous_transform = t.superimposition_matrix(modelled_points, cartesian_points, usesvd=True)
Rotation = np.matrix(Homogeneous_transform[0:3, 0:3])
Translation = np.matrix(Homogeneous_transform[0:3,3]).T
#[Rotation, Translation] = best_fit(cartesian_points, modelled_points)
#calculating homogeneous transformation function
#Homogeneous_transform = np.zeros((4,4))
#Homogeneous_transform[:3,:3] = Rotation
#Homogeneous_transform[0,3] = Translation[0]
#Homogeneous_transform[1,3] = Translation[1]
#Homogeneous_transform[2,3] = Translation[2]
#Homogeneous_transform[3,3] = 1
#Finding the euler angles from the homogeneous transformation matrix
euler_angles = np.matrix(t.euler_from_matrix(Homogeneous_transform)).T
for i in range(3):
euler_angles[i,0] = m.degrees(euler_angles[i,0]) * 3600
#Appending strings for 6-DOF values in each iteration
X.append((Translation[0,0])*1e6)
Y.append((Translation[1,0])*1e6)
Z.append((Translation[2,0])*1e6)
Roll.append(euler_angles[0,0])
Pitch.append(euler_angles[1,0])
Yaw.append(euler_angles[2,0])
#calculating the standard deviation for the 6-DOF values
X = np.std(X)
Y = np.std(Y)
Z = np.std(Z)
Roll = np.std(Roll)
Pitch = np.std(Pitch)
Yaw = np.std(Yaw)
return X, Y, Z, Roll, Pitch, Yaw
'''
###############################################################################
'''
X = []
Y = []
Z = []
Roll = []
Pitch = []
Yaw = []
# changing LT distance
for LT_distance in np.linspace(1.7, 2.9, num = 5):
[x, y, z, roll, pitch, yaw] = demo_err(4e-5, 135, 135, LT_distance, 0, 0.01, 1.51289,10000)
X.append(x)
Y.append(y)
Z.append(z)
Roll.append(roll)
Pitch.append(pitch)
Yaw.append(yaw)
'''
#changing pressure in vaccum
for pressure in np.linspace(1e-6, 0.01, num = 100):
[x, y, z, roll, pitch, yaw] = demo_err(0, 0.00016968, 2, 0, pressure, 1.51289, 100)
X.append(x)
Y.append(y)
Z.append(z)
Roll.append(roll)
Pitch.append(pitch)
Yaw.append(yaw)
'''
'''
###############################################################################
'''
distance = np.linspace(1.7, 2.9, num =5)
plt.figure()
plt.plot(distance, X, 'r')
plt.plot(distance, Y, 'b')
plt.plot(distance, Z, 'g')
plt.legend(['X-err', 'Y-err', 'Z-err'], loc = 2)
plt.xlabel('Distance of LT from the Target plane (meters)')
plt.ylabel('Errors ($\mu$m)')
plt.title('Translational error propagation with increasing distance')
plt.grid()
plt.show
plt.figure()
plt.plot(distance, Roll, 'r')
plt.plot(distance, Pitch, 'b')
plt.plot(distance, Yaw, 'g')
plt.legend(['Roll', 'Pitch', 'Yaw'], loc =2)
plt.xlabel('Distance of LT from the Target plane (meters)')
plt.ylabel('Errors (arcecond)')
plt.title('Rotational error propagation with increasing distance')
plt.grid()
plt.show
'''
###############################################################################
'''
'''
pressure = np.linspace(1e-6, 0.01, num = 100)
plt.figure()
plt.plot(pressure, X, 'r')
plt.plot(pressure, Y, 'b')
plt.plot(pressure, Z, 'g')
plt.legend(['X-err', 'Y-err', 'Z-err'], loc = 2)
plt.xlabel('Prressure in vaccum chamber (mBar)')
plt.ylabel('Errors ($\mu$m)')
plt.title('Translational error')
plt.grid()
plt.show
plt.figure()
plt.plot(pressure, Roll, 'r')
plt.plot(pressure, Pitch, 'b')
plt.plot(pressure, Yaw, 'g')
plt.legend(['Roll', 'Pitch', 'Yaw'], loc =2)
plt.xlabel('Prressure in vaccum chamber (mBar)')
plt.ylabel('Errors (arcecond)')
plt.title('Rotational error')
plt.grid()
plt.show
'''
|
[
"noreply@github.com"
] |
noreply@github.com
|
7050fce62d11317def18ad9ac77a6d3dfed8819d
|
b4ca970112c5eefeb5863535aefbea8b51ac4daa
|
/app.py
|
73706eb3be17717dc19fcbe4f946b3e5b618c363
|
[] |
no_license
|
ramitsharma94/financial_summarizer
|
59c84d2a670605059840a21c5bac36848a076e42
|
13a75ba5bc648c2ede9d0f8f9595ed7cf8e7c439
|
refs/heads/main
| 2023-07-06T09:22:40.173135
| 2021-08-09T21:56:34
| 2021-08-09T21:56:34
| 314,419,824
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,622
|
py
|
from flask import Flask, render_template, url_for, request, redirect, jsonify, Response
# from flask_sqlalchemy import SQLAlchemy
import flask_excel as excel
from datetime import datetime
import pandas as pd
from werkzeug.utils import secure_filename
from flask_uploads import UploadSet, configure_uploads, DOCUMENTS, IMAGES
import pandas as pd
import torch
import time
from models_summarizer import predict_summaries, scrape_web_data
from models_QnA import answer
app = Flask(__name__)
docs = UploadSet('datafiles', DOCUMENTS)
app.config['UPLOADED_DATAFILES_DEST'] = 'static/uploads'
configure_uploads(app, docs)
@app.route("/", methods = ['GET', "POST"])
def index():
if(request.method == "POST"):
option = request.form["links"]
if(option == "single_link"):
try:
link = request.form["Enter_Link"]
input_text = scrape_web_data(link)
df2 = pd.DataFrame()
df2["input_text"] = [input_text]
df2["Predicted_Summaries"] = [predict_summaries(input_text)]
df2.drop("input_text", axis =1, inplace = True)
sub_q = "Who is the subject?"
obj_q = "Who is the object?"
df2['Subject_Predicted'] = df2['Predicted_Summaries'].apply(lambda x: answer(x, sub_q))
df2['Object_Predicted'] = df2['Predicted_Summaries'].apply(lambda x: answer(x, obj_q))
html = df2.to_html()
text_file = open("./templates/results.html", "w", encoding = "utf8")
text_file.write(html)
text_file.close()
return render_template("results.html")
except:
return "Please enter the correct link!"
else:
try:
filename = request.files['file']
data = pd.read_excel(filename)
links = data["Link"]
dict_links = {}
for link in links:
if(link not in dict_links):
dict_links[link] = scrape_web_data(link)
else:
dict_links[link] = 0
df2 = pd.DataFrame()
df2["input_text"] = [v for k, v in dict_links.items()]
print("Web Scraping Done. Prediction Start!")
summ = []
for i, text in enumerate(df2["input_text"]):
summ.append(predict_summaries(text))
print("Done: {}".format(i))
df2["Predicted_Summaries"] = summ
df2.drop("input_text", axis =1, inplace = True)
sub_q = "Who is the subject?"
obj_q = "Who is the object?"
df2['Subject_Predicted'] = df2['Predicted_Summaries'].apply(lambda x: answer(x, sub_q))
df2['Object_Predicted'] = df2['Predicted_Summaries'].apply(lambda x: answer(x, obj_q))
## Everything will be written to a html file
html = df2.to_html()
text_file = open("./templates/results.html", "w", encoding = "utf8")
text_file.write(html)
text_file.close()
return render_template("results.html")
except:
return "Either the input link is incorrect or the column name is incorrect!"
else:
return render_template("index.html")
if __name__=="__main__":
app.run(debug = True)
|
[
"noreply@github.com"
] |
noreply@github.com
|
6844ce56ffa18f4d971b348110a9f410a1502c7e
|
a3c662a5eda4e269a8c81c99e229879b946a76f6
|
/.venv/lib/python3.7/site-packages/pylint/test/input/func_noerror_yield_assign_py25.py
|
f40d8d96e837e9022fc2596b23ce8733990a450c
|
[
"MIT"
] |
permissive
|
ahmadreza-smdi/ms-shop
|
0c29da82c58b243507575672bbc94fb6e8068aeb
|
65ba3f3061e2ac5c63115b08dadfe7d67f645fb6
|
refs/heads/master
| 2023-04-27T19:51:34.858182
| 2019-11-24T20:57:59
| 2019-11-24T20:57:59
| 223,616,552
| 6
| 2
|
MIT
| 2023-04-21T20:51:21
| 2019-11-23T16:09:03
|
Python
|
UTF-8
|
Python
| false
| false
| 387
|
py
|
"""http://www.logilab.org/ticket/8771"""
from __future__ import print_function
def generator():
"""yield as assignment"""
yield 45
xxxx = yield 123
print(xxxx)
def generator_fp1(seq):
"""W0631 false positive"""
for val in seq:
pass
for val in seq:
yield val
def generator_fp2():
"""E0601 false positive"""
xxxx = 12
yield xxxx
|
[
"ahmadreza.smdi@gmail.com"
] |
ahmadreza.smdi@gmail.com
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.