blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 2
616
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
69
| license_type
stringclasses 2
values | repo_name
stringlengths 5
118
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringlengths 4
63
| visit_date
timestamp[us] | revision_date
timestamp[us] | committer_date
timestamp[us] | github_id
int64 2.91k
686M
⌀ | star_events_count
int64 0
209k
| fork_events_count
int64 0
110k
| gha_license_id
stringclasses 23
values | gha_event_created_at
timestamp[us] | gha_created_at
timestamp[us] | gha_language
stringclasses 213
values | src_encoding
stringclasses 30
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 2
10.3M
| extension
stringclasses 246
values | content
stringlengths 2
10.3M
| authors
listlengths 1
1
| author_id
stringlengths 0
212
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
e0f5eb927f3f0f40a96e58cfbc60a74967d760a0
|
f7a4393711d5ce971ecb77f3516745bba5aa169f
|
/squitra/urls.py
|
eff9891ba9b4ccb9f58a402405500c6197a8b05f
|
[] |
no_license
|
shubh-scientist/squirrel
|
ac4da62212a54bbec0b8c371ca53e45844a83788
|
b7f0ecaa03b0ead5516e072481766e5beef7fa0d
|
refs/heads/master
| 2023-04-04T09:50:20.478109
| 2021-04-15T05:34:49
| 2021-04-15T05:34:49
| 356,728,799
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 508
|
py
|
from django.urls import path
from . import views
app_name='squap'
urlpatterns = [
path('map/', views.view_map, name = 'map'),
path('',views.view_main, name = 'main'),
path('sightings/',views.sightings_view, name = 'sightings'),
path('sightings/stats', views.stats_view, name = 'stats'),
path('sightings/<str:Unique_Squirrel_ID>/',views.update_squirrel_view,name='edit_sighting'),
path('sightings/add',views.add_squirrel_view,name='add_sighting'),
]
|
[
"sarthakarora@yahoo.com"
] |
sarthakarora@yahoo.com
|
6a6413464dd983edc595212f9cc4c86527804bdf
|
210a726450e43f347f11ecceb3fec3a0febc4f58
|
/virtualenv/bin/pytest
|
fb7fc79f46bad309f590d660a1932bd8e89f9a9c
|
[] |
no_license
|
ngonzo95/BarrenLandAnalysis
|
fd206e026641e7a9adb3c67bf708a30b40d40842
|
4356397042e8398786701aaee455cf9f71eae0cb
|
refs/heads/master
| 2022-07-06T12:07:44.949072
| 2019-11-15T23:49:00
| 2019-11-15T23:49:00
| 221,755,578
| 0
| 0
| null | 2022-06-21T23:26:26
| 2019-11-14T17:49:42
|
Python
|
UTF-8
|
Python
| false
| false
| 254
|
#!/Users/Nick/Documents/BarrenLandAnalysis/virtualenv/bin/python3.7
# -*- coding: utf-8 -*-
import re
import sys
from pytest import main
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw|\.exe)?$', '', sys.argv[0])
sys.exit(main())
|
[
"ngonzo95@gmail.com"
] |
ngonzo95@gmail.com
|
|
46d6615dcb4ee5611f606ad779a55bf8197e7458
|
9f02b9c470dcd24e82e0806cb71a64e2b930dc95
|
/node_modules/fibers/build/config.gypi
|
f8cda853d5fc10863cda6e475d746cf35687faa6
|
[
"MIT"
] |
permissive
|
MeliorAI/web-automated-tests
|
08e19720750cc29e51d7455afc149fe78bd62821
|
618991074de4a394b064d7ccf847e99c3ca1c60f
|
refs/heads/master
| 2020-03-28T09:37:58.770791
| 2018-09-10T09:16:47
| 2018-09-10T09:16:47
| 148,047,576
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,941
|
gypi
|
# Do not edit. File was generated by node-gyp's "configure" step
{
"target_defaults": {
"cflags": [],
"default_configuration": "Release",
"defines": [],
"include_dirs": [],
"libraries": []
},
"variables": {
"asan": 0,
"coverage": "false",
"debug_devtools": "node",
"debug_http2": "false",
"debug_nghttp2": "false",
"force_dynamic_crt": 0,
"gas_version": "2.23",
"host_arch": "x64",
"icu_data_file": "icudt59l.dat",
"icu_data_in": "../../deps/icu-small/source/data/in/icudt59l.dat",
"icu_endianness": "l",
"icu_gyp_path": "tools/icu/icu-generic.gyp",
"icu_locales": "en,root",
"icu_path": "deps/icu-small",
"icu_small": "true",
"icu_ver_major": "59",
"node_byteorder": "little",
"node_enable_d8": "false",
"node_enable_v8_vtunejit": "false",
"node_install_npm": "true",
"node_module_version": 57,
"node_no_browser_globals": "false",
"node_prefix": "/",
"node_release_urlbase": "https://nodejs.org/download/release/",
"node_shared": "false",
"node_shared_cares": "false",
"node_shared_http_parser": "false",
"node_shared_libuv": "false",
"node_shared_openssl": "false",
"node_shared_zlib": "false",
"node_tag": "",
"node_use_bundled_v8": "true",
"node_use_dtrace": "false",
"node_use_etw": "false",
"node_use_lttng": "false",
"node_use_openssl": "true",
"node_use_perfctr": "false",
"node_use_v8_platform": "true",
"node_without_node_options": "false",
"openssl_fips": "",
"openssl_no_asm": 0,
"shlib_suffix": "so.57",
"target_arch": "x64",
"uv_parent_path": "/deps/uv/",
"uv_use_dtrace": "false",
"v8_enable_gdbjit": 0,
"v8_enable_i18n_support": 1,
"v8_enable_inspector": 1,
"v8_no_strict_aliasing": 1,
"v8_optimized_debug": 0,
"v8_promise_internal_field_count": 1,
"v8_random_seed": 0,
"v8_trace_maps": 0,
"v8_use_snapshot": "true",
"want_separate_host_toolset": 0,
"nodedir": "/home/jose/.node-gyp/8.9.1",
"standalone_static_library": 1,
"cache_lock_stale": "60000",
"ham_it_up": "",
"legacy_bundling": "",
"sign_git_tag": "",
"user_agent": "npm/5.5.1 node/v8.9.1 linux x64",
"always_auth": "",
"bin_links": "true",
"key": "",
"allow_same_version": "",
"description": "true",
"fetch_retries": "2",
"heading": "npm",
"if_present": "",
"init_version": "1.0.0",
"user": "500",
"prefer_online": "",
"force": "",
"only": "",
"read_only": "",
"cache_min": "10",
"init_license": "ISC",
"editor": "vi",
"rollback": "true",
"tag_version_prefix": "v",
"cache_max": "Infinity",
"timing": "",
"userconfig": "/home/jose/.npmrc",
"engine_strict": "",
"init_author_name": "",
"init_author_url": "",
"tmp": "/tmp",
"depth": "Infinity",
"save_dev": "",
"usage": "",
"metrics_registry": "https://registry.npmjs.org/",
"otp": "",
"package_lock": "true",
"progress": "true",
"https_proxy": "",
"save_prod": "",
"cidr": "",
"onload_script": "",
"sso_type": "oauth",
"rebuild_bundle": "true",
"save_bundle": "",
"shell": "/bin/bash",
"dry_run": "",
"prefix": "/usr/lib/nodejs/node-v8.9.1",
"scope": "",
"browser": "",
"cache_lock_wait": "10000",
"ignore_prepublish": "",
"registry": "https://registry.npmjs.org/",
"save_optional": "",
"searchopts": "",
"versions": "",
"cache": "/home/jose/.npm",
"send_metrics": "",
"global_style": "",
"ignore_scripts": "",
"version": "",
"local_address": "",
"viewer": "man",
"prefer_offline": "",
"color": "true",
"fetch_retry_mintimeout": "10000",
"maxsockets": "50",
"offline": "",
"sso_poll_frequency": "500",
"umask": "0022",
"fetch_retry_maxtimeout": "60000",
"logs_max": "10",
"message": "%s",
"ca": "",
"cert": "",
"global": "",
"link": "",
"access": "",
"also": "",
"save": "true",
"unicode": "true",
"long": "",
"production": "",
"searchlimit": "20",
"unsafe_perm": "true",
"auth_type": "legacy",
"node_version": "8.9.1",
"tag": "latest",
"git_tag_version": "true",
"commit_hooks": "true",
"script_shell": "",
"shrinkwrap": "true",
"fetch_retry_factor": "10",
"save_exact": "",
"strict_ssl": "true",
"dev": "",
"globalconfig": "/usr/lib/nodejs/node-v8.9.1/etc/npmrc",
"init_module": "/home/jose/.npm-init.js",
"parseable": "",
"globalignorefile": "/usr/lib/nodejs/node-v8.9.1/etc/npmignore",
"cache_lock_retries": "10",
"searchstaleness": "900",
"save_prefix": "^",
"scripts_prepend_node_path": "warn-only",
"group": "1000",
"init_author_email": "",
"searchexclude": "",
"git": "git",
"optional": "true",
"json": ""
}
}
|
[
"jose.marcos.rf@gmail.com"
] |
jose.marcos.rf@gmail.com
|
0153ff9a87faecc4f033e17915d1b292fc0049a8
|
a4f18871843878802f89ef3701055f9756cc425b
|
/src/ciudadfutura/apps/admin/urls.py
|
05233dc1e1be07ce3393423eeeeab665be29a9e4
|
[] |
no_license
|
CiudadFutura/ciudad-futura
|
01cc791e94e6aa24cbfcd961d960083132eb4850
|
091acb1c409ff2f73ca6a8f214e8272793782ac3
|
refs/heads/master
| 2016-09-15T12:43:45.355225
| 2015-11-11T22:35:04
| 2015-11-11T22:35:04
| 42,077,468
| 1
| 0
| null | 2016-05-06T01:39:18
| 2015-09-07T22:32:30
|
Python
|
UTF-8
|
Python
| false
| false
| 2,703
|
py
|
from django.conf.urls import url
from . import views
urlpatterns = [
# Admin urls
url(r'^admin/$', views.admin_dashboard, name='dashboard'),
url(r'^admin/login/$', views.admin_login, name='login'),
url(r'^admin/logout/$', views.admin_logout, name='logout'),
# Tags urls
url(r'^admin/tag/$', views.admin_tag_list, name='tag-list'),
url(r'^admin/tag/create/$', views.admin_tag_form, name='tag-create'),
url(r'^admin/tag/(?P<tag_id>\d+)/edit/$', views.admin_tag_form, name='tag-edit'),
url(r'^admin/tag/(?P<tag_id>\d+)/delete/$', views.admin_tag_delete, name='tag-delete'),
# User urls
url(r'^admin/user/$', views.admin_user_list, name='user-list'),
url(r'^admin/user/create/$', views.admin_user_form, name='user-create'),
url(r'^admin/user/(?P<user_id>\d+)/$', views.admin_user_details, name='user-details'),
url(r'^admin/user/(?P<user_id>\d+)/edit/$', views.admin_user_form, name='user-edit'),
url(r'^admin/user/(?P<user_id>\d+)/delete/$', views.admin_user_delete, name='user-delete'),
# Product urls
url(r'^admin/product/$', views.admin_product_list, name='product-list'),
url(r'^admin/product/create/$', views.admin_product_form, name='product-create'),
url(r'^admin/product/(?P<product_id>\d+)/edit/$', views.admin_product_form, name='product-edit'),
url(r'^admin/product/(?P<product_id>\d+)/delete/$', views.admin_product_delete, name='product-delete'),
# Supplier urls
url(r'^admin/supplier/$', views.admin_supplier_list, name='supplier-list'),
url(r'^admin/supplier/create/$', views.admin_supplier_form, name='supplier-create'),
url(r'^admin/supplier/(?P<supplier_id>\d+)/edit/$', views.admin_supplier_form, name='supplier-edit'),
url(r'^admin/supplier/(?P<supplier_id>\d+)/delete/$', views.admin_supplier_delete, name='supplier-delete'),
# Circle urls
url(r'^admin/circle/$', views.admin_circle_list, name='circle-list'),
url(r'^admin/circle/create/$', views.admin_circle_form, name='circle-create'),
url(r'^admin/circle/(?P<circle_id>\d+)/edit/$', views.admin_circle_form, name='circle-edit'),
url(r'^admin/circle/(?P<circle_id>\d+)/delete/$', views.admin_circle_delete, name='circle-delete'),
# ShoppingCycle urls
url(r'^admin/shopping/$', views.admin_shopping_cycle_list, name='shopping-cycle-list'),
url(r'^admin/shopping/create/$', views.admin_shopping_cycle_form, name='shopping-cycle-create'),
url(r'^admin/shopping/(?P<shopping_cycle_id>\d+)/edit/$', views.admin_shopping_cycle_form, name='shopping-cycle-edit'),
url(r'^admin/shopping/(?P<shopping_cycle_id>\d+)/delete/$', views.admin_shopping_cycle_delete, name='shopping-cycle-delete'),
]
|
[
"nicofheredia@gmail.com"
] |
nicofheredia@gmail.com
|
188770a875c243b86626f2e6b801de11aaf3a3b4
|
fa641dc4133f1a8626fbbf7c41f11af0f3f1f2cb
|
/Exercises/MaxTemperatures.py
|
ef862ce03e9d7741273d7bc3265fbd0b4c25f3aa
|
[] |
no_license
|
Guttapercha/CEBD-1160-Korchagina-Evgeniya
|
cbcd54d9ec74bde4f30462cc207b9db04d08f07a
|
8a24ab4c8ad910afb2fc732e9943fc1273fb642a
|
refs/heads/master
| 2020-03-09T02:28:09.107726
| 2018-09-28T17:11:14
| 2018-09-28T17:11:14
| 128,540,475
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 401
|
py
|
from mrjob.job import MRJob
class MRMaxTemperature(MRJob):
def mapper (self, _,line):
(stationID,date,type,temp,a,b,c,v)=line.split(',')
if (type=='TMAX'):
TmaxValue=temp
yield stationID, TmaxValue
def reducer(self,stationID, T):
yield stationID, max(T)
if __name__=='__main__':
MRMaxTemperature.run()
|
[
"noreply@github.com"
] |
Guttapercha.noreply@github.com
|
d1995db9e238a1d26af8acb2f4866cd925eff8c4
|
0d6dcc6a1e7942b80f91d905e71ce00f58734a9a
|
/src/alphaorm/utilities/constants.py
|
a74b73405bffbf599ea18176a2dca30f73c4d011
|
[
"MIT"
] |
permissive
|
Losintech/python-alpha-orm
|
7256c0b2d3c2a13d09c06ec45f5bc0665f1cfe94
|
01e88c5cf21b881dc670d605b353df8ae52eb83c
|
refs/heads/master
| 2020-09-26T12:03:41.536618
| 2019-12-02T21:57:01
| 2019-12-02T21:57:01
| 226,251,290
| 1
| 0
|
MIT
| 2019-12-06T05:18:31
| 2019-12-06T05:18:31
| null |
UTF-8
|
Python
| false
| false
| 334
|
py
|
UNDERSCORE_NOT_SUPORRTED_ERROR = 'Column names cannot contain `_` symbol'
SPACE_NOT_SUPORRTED_ERROR = 'Column names should not have a space'
def SETUP_PARAMETER_MISSING(paremeter):
return f"The '{paremeter}' is required!"
def DATA_TYPE_ERROR(method):
return f"Parameter passed into method `{method}` must be of type `AlphaRecord`"
|
[
"devclareo@gmail.com"
] |
devclareo@gmail.com
|
02dbfae7172325447630a979a9ced2502b711481
|
3dac0b9e5e7f2ba0fd23825fd6d376985c22a1c4
|
/examples/eap-tls/eap-tls-auth.py
|
3f02d9f9c6cf074662ec8c18927c84f9b63123df
|
[
"LicenseRef-scancode-x11-stanford"
] |
permissive
|
hobama/mininet-iot
|
68dfd90e96bf058a4947efb0403f927eb9b4dad2
|
55ca339f293695c065aad5b3805160098e2872c9
|
refs/heads/master
| 2020-12-03T17:57:38.670559
| 2019-10-16T00:39:26
| 2019-10-16T00:39:26
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,078
|
py
|
#!/usr/bin/python
'This example shows how to work with authentication'
from mininet.log import setLogLevel, info
from mn_iot.mac80211.cli import CLI_wifi
from mn_iot.mac80211.net import Mininet_wifi
import os.path
import os
from os import path
def topology():
"Create a network."
cwd = os.getcwd()
net = Mininet_wifi()
info("*** Creating nodes\n")
sta1 = net.addStation('sta1',
wpasup_flags='-dd > /tmp/debug1.txt',
wpasup_globals='eapol_version=2',
encrypt='wpa2',
config='key_mgmt=WPA-EAP,'
'identity="mranga@nist.gov",'
'ssid="simplewifi",'
'eap=TLS,'
'scan_ssid=1,'
'ca_cert="{}/examples/eap-tls/CA/ca.crt",'
'client_cert="{}/examples/eap-tls/CA/client.crt",'
'private_key="{}/examples/eap-tls/CA/client.key"'
.format(cwd, cwd, cwd))
sta2 = net.addStation('sta2',
wpasup_flags='-dd > /tmp/debug2.txt',
wpasup_globals='eapol_version=2',
encrypt='wpa2',
config='key_mgmt=WPA-EAP,'
'scan_ssid=1,'
'identity="mranga@nist.gov",'
'eap=TLS,'
'ssid="simplewifi",'
'ca_cert="{}/examples/eap-tls/CA/ca.crt",'
'client_cert="{}/examples/eap-tls/CA/client.crt",'
'private_key="{}/examples/eap-tls/CA/client.key"'
.format(cwd, cwd, cwd))
ap1 = net.addAccessPoint('ap1',
ssid="simplewifi",
hostapd_flags='-dd > /tmp/hostapd.txt',
mode="g", channel="1",
failMode="standalone", datapath='user',
config='eap_server=1,'
'ieee8021x=1,'
'wpa=2,'
'eap_message=howdy,'
'eapol_version=2,'
'wpa_key_mgmt=WPA-EAP,'
'logger_syslog=-1,'
'logger_syslog_level=0,'
'ca_cert={}/examples/eap-tls/CA/ca.crt,'
'server_cert={}/examples/eap-tls/CA/server.crt,'
'private_key={}/examples/eap-tls/CA/server.key,'
'eap_user_file={}/examples/eap-tls/eap_users'
.format(cwd, cwd, cwd, cwd),
isolate_clients=True)
info("*** Configuring wifi nodes\n")
net.configureWifiNodes()
info("*** Associating Stations\n")
net.addLink(sta1, ap1)
net.addLink(sta2, ap1)
info("*** Starting network\n")
net.build()
ap1.start([])
info("*** Adding openflow wireless rule : ")
# For wireless isolation hack. Put a normal flow in there so stations
# can ping each other
ap1.cmd('ovs-ofctl add-flow ap1 "priority=10,actions=in_port,normal"')
info("\n*** Try the following at the CLI \n")
info("sta1 ping sta2 \n")
info("/tmp/debug*.txt and /tmp/hostapd.txt contain logs \n")
info("cat /var/log/syslog | grep hostapd shows you if the authentication succeeded\n")
CLI_wifi(net)
info("*** Stopping network\n")
net.stop()
if __name__ == '__main__':
if path.exists("/tmp/debug1.txt"):
os.remove("/tmp/debug1.txt")
if path.exists("/tmp/debug2.txt"):
os.remove("/tmp/debug2.txt")
if path.exists("/tmp/hostapd.txt"):
os.remove("/tmp/hostapd.txt")
setLogLevel('info')
topology()
|
[
"ramonreisfontes@gmail.com"
] |
ramonreisfontes@gmail.com
|
f73f75bfe2a79428ec9f853d12da4bb691f3e073
|
364f674a86e688366288aceda1c4c02526454c89
|
/multimodal_models/StackGAN_V2_PyTorch/train.py
|
aa0829388df092c65a5d51b57bb27bdacfead1cc
|
[
"MIT"
] |
permissive
|
Rajarshi1001/model-zoo
|
6a70d32dde4298ada8ccad3248e154f2a20ed627
|
e0fa2e1899274baba3e6e2a6bd92fd4f3a8a3b3d
|
refs/heads/master
| 2023-08-19T19:15:53.426509
| 2021-09-28T03:54:12
| 2021-09-28T03:54:12
| 370,114,964
| 0
| 0
|
MIT
| 2021-05-23T17:27:31
| 2021-05-23T17:27:30
| null |
UTF-8
|
Python
| false
| false
| 9,129
|
py
|
import torch
import numpy as np
from discriminator_model import D64, D512, D256, D128, D1024
import time, cv2, models
from postprocessing import postprocessing
from datasets import TextDataset
import helper_functions.config as cfg
from torch.utils.data import Dataset, DataLoader
import torchvision.transforms as transforms
import warnings
warnings.filterwarnings('ignore')
# from helper_functions.losses import GenLoss
class StackGAN():
def __init__(self, eval_ = False,
inn_channels = cfg.channels,
generatorLR = cfg.generatorLR,
discriminatorLR = cfg.discriminatorLR,
StageNum = 4, beta1 = 0.5, beta2 = 0.999,
zDim = 100):
self.zDim = zDim
self.inn_channels = inn_channels
self.eval_ = eval_
self.StageNum = StageNum
if cfg.inception:
from losses import INCEPTION_V3, compute_inception_score
self.inception_model = INCEPTION_V3()
inception_model = inception_model.cuda()
inception_model.eval()
image_transform = transforms.Compose([
transforms.Scale(int((64 * 4) * 76 / 64)),
transforms.RandomCrop(64 * 4),
transforms.RandomHorizontalFlip()])
self.dataset = TextDataset('birds', 'train',
base_size=64,
transform=image_transform,
StageNum=StageNum)
self.generator = models.G_NET(StageNum=StageNum, zDim=zDim).cuda()
self.discriminator = []
if StageNum == 1:
self.discriminator.append(D_NET64(self.inn_channels).cuda())
elif StageNum == 2:
self.discriminator.append(D64(self.inn_channels).cuda())
self.discriminator.append(D128(self.inn_channels).cuda())
elif StageNum == 3:
self.discriminator.append(D64(self.inn_channels).cuda())
self.discriminator.append(D128(self.inn_channels).cuda())
self.discriminator.append(D256(self.inn_channels).cuda())
elif StageNum == 4:
self.discriminator.append(D64(self.inn_channels).cuda())
self.discriminator.append(D128(self.inn_channels).cuda())
self.discriminator.append(D256(self.inn_channels).cuda())
self.discriminator.append(D512(self.inn_channels).cuda())
elif StageNum == 5:
self.discriminator.append(D64(self.inn_channels).cuda())
self.discriminator.append(D128(self.inn_channels).cuda())
self.discriminator.append(D256(self.inn_channels).cuda())
self.discriminator.append(D512(self.inn_channels).cuda())
self.discriminator.append(D1024(self.inn_channels).cuda())
self.generator.apply(models.weights_init)
for i in range(len(self.discriminator)):
self.discriminator[i].apply(models.weights_init)
self.loss = torch.nn.BCELoss().cuda()
from torch.optim import Adam
self.gOptimizer = Adam(self.generator.parameters(), lr=generatorLR, betas=(beta1, beta2))
self.disOptimizer = []
for i in range(len(self.discriminator)):
opt = Adam(self.discriminator[i].parameters(), lr=discriminatorLR, betas=(beta1, beta2))
self.disOptimizer.append(opt)
def train(self, epochs, batchSize, saveInterval):
self.trainData = DataLoader(self.dataset,
batch_size=batchSize,
shuffle=True, drop_last=True,
num_workers=0)
rc = cfg.rowsColums
imgs = []
embs = []
nums = [1992, 1992, 1992, 1992,
5881, 5881, 5881, 5881,
7561, 7561, 7561, 7561,
1225, 1225, 1225, 1225]
for i in range(rc * rc):
imgs.append(self.dataset[nums[i]][0][2].reshape(1, 3, 256, 256))
embs.append(torch.Tensor(self.dataset[nums[i]][2]))
imgs = torch.Tensor(np.concatenate(imgs))
embs = torch.stack(embs).cuda()
embs = self.tile(embs[:rc * rc], rc)
fixedData = (imgs, embs)
noise = torch.Tensor(batchSize, self.zDim).cuda()
fixedNoise = torch.Tensor(cfg.rowsColums * cfg.rowsColums, self.zDim).normal_(0, 1).cuda()
real = torch.Tensor(batchSize).fill_(1).cuda()
fake = torch.Tensor(batchSize).fill_(0).cuda()
sizes = []
base = 64
for i in range(self.StageNum):
sizes.append(base)
base = base * 2
batches = self.trainData.__len__()
predictions = []
for epoch in range(epochs):
totalGenLoss = 0.0
totalKLloss = 0.0
totalDisLoss = 0.0
start = time.time()
for batch, data in enumerate(self.trainData):
images = [0, 0, 0, 0, 0]
for i in range(len(self.discriminator)):
images[i] = data[0][i].cuda()
embeddings = data[2].cuda()
noise.data.normal_(0, 1)
genImgs, mu, logvar = self.generator(noise, embeddings)
mean = mu.detach()
for i in range(len(self.discriminator)):
self.discriminator[i].zero_grad()
imgs = images[i]
logits, uncondLogits = self.discriminator[i](imgs, mean)
realLoss = self.loss(logits, real) + self.loss(uncondLogits, real)
logits, uncondLogits = self.discriminator[i](torch.roll(imgs, 1, 0), mean)
wrongLoss = self.loss(logits, fake) + self.loss(uncondLogits, real)
logits, uncondLogits = self.discriminator[i](genImgs[i].detach(), mean)
fakeLoss = self.loss(logits, fake) + self.loss(uncondLogits, fake)
disLoss = realLoss + wrongLoss + fakeLoss
totalDisLoss += disLoss
disLoss.backward()
self.disOptimizer[i].step()
self.generator.zero_grad()
gLoss = 0
for i in range(len(self.discriminator)):
logits = self.discriminator[i](genImgs[i], mean)
gLoss += self.loss(logits[0], real) + self.loss(logits[1], real)
totalGenLoss += gLoss
KLloss = models.KLloss(mu, logvar) * cfg.KL
totalKLloss += KLloss
gLoss = gLoss + KLloss
gLoss.backward()
self.gOptimizer.step()
if cfg.inception:
pred = self.inception_model(genImgs[-1].detach())
predictions.append(pred.data.cpu().numpy())
if len(predictions) > 100:
predictions = np.concatenate(predictions, 0)
mean, std = compute_inception_score(predictions, 10)
predictions = []
end = time.time()
duration = round(end - start, 1)
print (f"{epoch+1} / {epochs} epoch, Discriminator Loss: {totalDisLoss / batches}, Generator loss: {totalGenLoss / batches}, duration: {duration}s")
if self.eval_:
if epoch % saveInterval == 0:
self.sampleImages(epoch, fixedNoise, fixedData)
torch.save(self.generator.state_dict(), f"models/stackGAN-V2_Generator{epoch}.pyt")
def tile(self, x, n):
for i in range(n):
for j in range(1, n):
x[i * n + j] = x[i * n]
return x
def sampleImages(self, epoch, noise, data):
rc = cfg.rowsColums
genImgs, mu, logvar = self.generator(noise, data[1])
for i in range(self.StageNum):
genImgs[i] = genImgs[i].detach()
self.saveImages(genImgs, data[0], rc, "Train_", epoch)
def saveImages(self, genImgs, trainImgs, rc, name, epoch):
gap = 10
res = cfg.stage3Res
canvasSizeY = res * rc + (rc * gap)
canvasSizeX = canvasSizeY * 3 + (res + gap) + gap
canvas = torch.zeros((canvasSizeY + gap, canvasSizeX, 3), dtype=torch.uint8).cuda()
genImgs[0] = torch.nn.functional.interpolate(genImgs[0], scale_factor=4, mode="nearest")
genImgs[1] = torch.nn.functional.interpolate(genImgs[1], scale_factor=2, mode="nearest")
trainImgs = postprocessing(trainImgs)
for i in range(self.StageNum):
genImgs[i] = postprocessing(genImgs[i])
gapX = gap
gapY = gap
cnt = 0
for i in range(rc):
canvas[gapY:gapY+res, gapX:gapX+res] = trainImgs[i * rc]
for j in range(rc):
for l in range(self.StageNum):
gapX += res + gap
canvas[gapY:gapY+res, gapX:gapX+res] = genImgs[l][cnt]
cnt += 1
gapY += res + gap
gapX = gap
cv2.imwrite(f"images/{name}{epoch}.png", canvas.cpu().numpy())
|
[
"noreply@github.com"
] |
Rajarshi1001.noreply@github.com
|
e9e8fd9060bd56d887265e303395016527e6b814
|
1d0535c988756daad9468776e145baf0f50423d9
|
/FIXME_Get Full Name.py
|
962a0e780c687e6467af651dfa29b944b924676e
|
[] |
no_license
|
NandishRPatel/python-challenges
|
e41e5525ae78ef0f60905d692adf935ed4a9319a
|
d0d154c74005fbbcb064abae9778871afd7bdc70
|
refs/heads/master
| 2023-08-02T00:37:07.626547
| 2021-09-30T05:58:46
| 2021-09-30T05:58:46
| 194,492,872
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 282
|
py
|
class Dinglemouse(object):
def __init__(self, first_name, last_name):
self.first_name = first_name
self.last_name = last_name
def get_full_name(self):
return (self.first_name + ' ' + self.last_name).strip()
print(Dinglemouse('', '').get_full_name())
|
[
"1418beit30031@gmail.com"
] |
1418beit30031@gmail.com
|
0114f7c68c4f2eb88ff9ed2a175e2ac1b9ed1ac0
|
8a85eae62f9c45c275679584bd6590c7890fcda1
|
/3/3-1.py
|
28d0e2a23a3ae7c93b2ca2c79f7a20073a721dc7
|
[] |
no_license
|
ryu2129/FE_Python
|
3828c028bca28ab1013895fb328114136bf32934
|
2abb3a0df4855a17880bd12e73380f70df55d471
|
refs/heads/main
| 2023-06-27T07:50:51.621043
| 2021-07-26T08:16:24
| 2021-07-26T08:16:24
| 382,666,090
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,273
|
py
|
# %%
def triangle_area(base, height):
area = base * height / 2
return area
# %%
area = triangle_area(5, 3)
print(area)
# %%
def sum_list(digitlist):
sum_digit = 0
for digit in digitlist:
if digit.isdigit():
sum_digit += int(digit)
return sum_digit
# %%
digitlist = ['1', '4', 'abc']
sum_digit = sum_list(digitlist)
print(sum_digit)
# %%
def triangle_area(base, height=1):
area = base * height / 2
return area
# %%
area = triangle_area(5)
print(area)
# %%
pow(10, -2)
# %%
drinklist = ['coffee', 'tea', 'water']
print(list(enumerate(drinklist)))
# %%
for i, drink in enumerate(drinklist):
print(i, drink)
# %%
numlist = [1, 2, 4]
def double(x):
return x * 2
list(map(double, numlist))
# %%
numlist = [1, 3, 6, 8]
def even_three_div(x):
return x % 3 == 0
list(filter(even_three_div, numlist))
# %%
meallist = ['steak', 'salad', 'dessert']
drinklist = ['coffee', 'tea', 'water']
list(zip(meallist, drinklist))
# %%
dict_a = dict(steak=1, salad=2, dessert=3)
dict_a
# %%
meallist = ['steak', 'salad', 'dessert']
numlist = [1, 2, 3]
dict_a = dict(zip(meallist, numlist))
dict_a
# %%
double = lambda x : x * 2
double(2)
# %%
month_name = [(1, 'January'),(2, 'February'),(3, 'March')]
month_name.sort(key = lambda x : x[1])
month_name
# %%
def variable_args(first, *args):
print(args)
variable_args(1, 2, 3)
# %%
def double(x):
x = x * 2
return x
x = 1
y = double(x)
print(x, y)
# %%
def list_mod(original):
original[1] = 'Apple'
vegetables = ['Carrot', 'Potato', 'Pampkin']
list_mod(vegetables)
vegetables
# %%
lista = [1,2,3]
listb = lista
listb[1] = 4
print(lista, listb)
# %%
import copy
lista = [1, 2, 3]
listb = copy.copy(lista)
listb[1] = 4
print(lista, listb)
# %%
lista = [[1,2,3], [4,5,6]]
listb = copy.deepcopy(lista)
listb[1][1] = 7
print(lista, listb)
# %%
def count_up():
n = 1
while True:
yield n
n += 1
# %%
generator = count_up()
for num in generator:
print(num, end=' ')
if num == 7:
break
# %%
for num in generator:
print(num, end=' ')
if num == 15:
break
# %%
def wrapping(contents):
print('---- start ----')
contents
print('---- end ----')
# %%
@wrapping
def contents():
print('This is detail')
# %%
def contents():
print('This is detail')
contents = wrapping(contents)
# %%
|
[
"ryu212920@gmail.com"
] |
ryu212920@gmail.com
|
06401faf2f49ed4671887a35a38b057a517f6587
|
bd0d0aa82909fc6a8b739a6ba2df77e3ff698765
|
/libs/pytorch-deeplab-xception/dataloaders/warp_mask.py
|
31cc3e8d764ca7e2709144edfa9242bb433999a3
|
[
"MIT"
] |
permissive
|
shuangjiexu/MHP-VOS
|
ca90608a969ecee6df110d700ffd5e45b2163605
|
9f5b123b6e5ed7b9d91a96ac2e3d87b1d3d999c4
|
refs/heads/master
| 2020-05-04T10:16:25.747979
| 2020-04-08T03:51:18
| 2020-04-08T03:51:18
| 179,085,014
| 70
| 8
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,518
|
py
|
import os
import numpy as np
import cv2
def read_flow(flowfile):
f = open(flowfile, 'rb')
header = f.read(4)
if header.decode("utf-8") != 'PIEH':
raise Exception('Flow file header does not contain PIEH')
width = np.fromfile(f, np.int32, 1).squeeze()
height = np.fromfile(f, np.int32, 1).squeeze()
flow = np.fromfile(f, np.float32, width * height * 2).reshape((height, width, 2))
return flow.astype(np.float32)
def warp_back(img, flow, residueimg, validflowmap01):
h, w = flow.shape[:2]
flow[:,:,0] += np.arange(w)
flow[:,:,1] += np.arange(h)[:,np.newaxis]
res = cv2.remap(img, flow, None, cv2.INTER_CUBIC, borderMode =cv2.BORDER_CONSTANT )
validflowmap01[np.isnan(res)] = 0 # interp might cause NaN when indexing out of range
validflowmap01[residueimg > 0.3] = 0 # add additional validmap using residueimg
# ONLY reset nan pixels, do not reset invalid pixels
res[np.isnan(res)] = 0
return res, validflowmap01
def checkflow(flow01, flow10, th=2):
h,w,c = flow01.shape
x,y = np.meshgrid(np.arange(w),np.arange(h))
xid01 = np.clip((x+flow01[:,:,0]).astype(np.int16),0,w-1)
yid01 = np.clip((y+flow01[:,:,1]).astype(np.int16),0,h-1)
xid10 = np.clip((x+flow10[:,:,0]).astype(np.int16),0,w-1)
yid10 = np.clip((y+flow10[:,:,1]).astype(np.int16),0,h-1)
outofmap01 = ((x+flow01[:,:,0]).astype(np.int16) < 0) | ((y+flow01[:,:,1]).astype(np.int16) <0) | ((x+flow01[:,:,0]).astype(np.int16) >= w) | ((y+flow01[:,:,1]).astype(np.int16) >= h)
outofmap10 = ((x+flow10[:,:,0]).astype(np.int16) < 0) | ((y+flow10[:,:,1]).astype(np.int16) <0) | ((x+flow10[:,:,0]).astype(np.int16) >= w) | ((y+flow10[:,:,1]).astype(np.int16) >= h)
flow01_u = flow01[:,:,0]
flow01_v = flow01[:,:,1]
flow10_u = flow10[:,:,0]
flow10_v = flow10[:,:,1]
idx01_outlier_x = abs(flow10_u[yid01,xid01]+flow01[:,:,0]) > th
idx01_outlier_y = abs(flow10_v[yid01,xid01]+flow01[:,:,1]) > th
idx01_outlier = idx01_outlier_x | idx01_outlier_y
idx10_outlier_x = abs(flow01_u[yid10,xid10]+flow10[:,:,0]) > th
idx10_outlier_y = abs(flow01_v[yid10,xid10]+flow10[:,:,1]) > th
idx10_outlier = idx10_outlier_x | idx10_outlier_y
validflowmap01 = np.ones((h,w))
validflowmap10 = np.ones((h,w))
validflowmap01[(idx01_outlier!=0) | (outofmap01!=0)] = 0
validflowmap10[(idx10_outlier!=0) | (outofmap10!=0)] = 0
return validflowmap01, validflowmap10
def warp_mask(mask, im1_id, im2_id, flow_dir, img_dir):
# image load
img1 = cv2.imread(os.path.join(img_dir, '%05d.jpg'%im1_id))
img2 = cv2.imread(os.path.join(img_dir, '%05d.jpg'%im2_id))
# flow and warp load
flow_01_file = os.path.join(flow_dir, 'flownet2_%05d_%05d.flo'%(im1_id, im2_id)) # flownet2_00070_00069.flo
flow_10_file = os.path.join(flow_dir, 'flownet2_%05d_%05d.flo'%(im2_id, im1_id))
warp_01_file = os.path.join(flow_dir, 'flownet2_%05d_%05d.png'%(im1_id, im2_id)) # flownet2_00070_00069.png
warp_10_file = os.path.join(flow_dir, 'flownet2_%05d_%05d.png'%(im2_id, im1_id))
flow01 = read_flow(flow_01_file)
flow10 = read_flow(flow_10_file)
warpI01 = cv2.imread(warp_01_file).astype(np.float32)
warpI10 = cv2.imread(warp_10_file).astype(np.float32)
residueimg21 = np.max(abs(warpI10 - img2), axis=2)/255.0 # maximum residue from rgb channels
validflowmap01, validflowmap10 = checkflow(flow01, flow10)
warped_mask, validflowmap10 = warp_back(mask.astype(np.float32), flow10, residueimg21, validflowmap10)
return warped_mask, validflowmap10, flow01, validflowmap01
def main():
img_dir = '../data/DAVIS/trainval/JPEGImages/480p/bear'
flow_dir = '../data/trainval_flow/bear'
mask_dir = '../data/DAVIS/trainval/Annotations/480p/bear'
im1_id = 13
im2_id = 14
obj_id = 1
import davis as io
mask ,_ = io.imread_indexed(os.path.join(mask_dir, '%05d.png'%im1_id))
mask_1 ,_ = io.imread_indexed(os.path.join(mask_dir, '%05d.png'%im2_id))
mask_tmp = mask.copy()
mask_tmp[mask_tmp != obj_id] = 0
mask_tmp[mask_tmp != 0] = 1
mask_write_1 = mask_tmp.copy()
mask_write_1[mask_write_1 > 0.3]=255
cv2.imwrite('mask.jpg', mask_write_1)
warped_mask, validflowmap01,_,_ = warp_mask(mask_tmp, im1_id, im2_id, flow_dir, img_dir)
print(warped_mask.shape)
mask_write_2 = warped_mask.copy()
mask_write_2[mask_write_2 > 0.3]=255
cv2.imwrite('mask_warped.jpg', mask_write_2)
if __name__ == '__main__':
main()
|
[
"shuangjiexu@foxmail.com"
] |
shuangjiexu@foxmail.com
|
e7bf1229c2639c69435057d43312c103e4083947
|
fc3c9d2143aecedce191bb91dbd01babe7f6d40b
|
/tensorpack/dataflow/dataset/mnist.py
|
6eee2370e427c7f89687f8888a55ef6a30b483ff
|
[
"Apache-2.0"
] |
permissive
|
rahulbprakash/tensorpack
|
0ee10de245f486d17a252354833c98dd713fd6e6
|
b2ec42a8d152760498aa911818d50b01e408bb43
|
refs/heads/master
| 2020-12-30T19:12:08.800662
| 2016-06-09T23:03:37
| 2016-06-09T23:03:37
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,927
|
py
|
#!/usr/bin/env python2
# -*- coding: UTF-8 -*-
# File: mnist.py
# Author: Yuxin Wu <ppwwyyxx@gmail.com>
import os
import gzip
import random
import numpy
from six.moves import urllib, range
from ...utils import logger
from ...utils.fs import download
from ..base import DataFlow
__all__ = ['Mnist']
""" This file is mostly copied from tensorflow example """
SOURCE_URL = 'http://yann.lecun.com/exdb/mnist/'
def maybe_download(filename, work_directory):
"""Download the data from Yann's website, unless it's already here."""
filepath = os.path.join(work_directory, filename)
if not os.path.exists(filepath):
logger.info("Downloading mnist data to {}...".format(filepath))
download(SOURCE_URL + filename, work_directory)
return filepath
def _read32(bytestream):
dt = numpy.dtype(numpy.uint32).newbyteorder('>')
return numpy.frombuffer(bytestream.read(4), dtype=dt)[0]
def extract_images(filename):
"""Extract the images into a 4D uint8 numpy array [index, y, x, depth]."""
with gzip.open(filename) as bytestream:
magic = _read32(bytestream)
if magic != 2051:
raise ValueError(
'Invalid magic number %d in MNIST image file: %s' %
(magic, filename))
num_images = _read32(bytestream)
rows = _read32(bytestream)
cols = _read32(bytestream)
buf = bytestream.read(rows * cols * num_images)
data = numpy.frombuffer(buf, dtype=numpy.uint8)
data = data.reshape(num_images, rows, cols, 1)
return data
def extract_labels(filename):
"""Extract the labels into a 1D uint8 numpy array [index]."""
with gzip.open(filename) as bytestream:
magic = _read32(bytestream)
if magic != 2049:
raise ValueError(
'Invalid magic number %d in MNIST label file: %s' %
(magic, filename))
num_items = _read32(bytestream)
buf = bytestream.read(num_items)
labels = numpy.frombuffer(buf, dtype=numpy.uint8)
return labels
class DataSet(object):
def __init__(self, images, labels, fake_data=False):
"""Construct a DataSet. """
assert images.shape[0] == labels.shape[0], (
'images.shape: %s labels.shape: %s' % (images.shape,
labels.shape))
self._num_examples = images.shape[0]
# Convert shape from [num examples, rows, columns, depth]
# to [num examples, rows*columns] (assuming depth == 1)
assert images.shape[3] == 1
images = images.reshape(images.shape[0],
images.shape[1] * images.shape[2])
# Convert from [0, 255] -> [0.0, 1.0].
images = images.astype(numpy.float32)
images = numpy.multiply(images, 1.0 / 255.0)
self._images = images
self._labels = labels
@property
def images(self):
return self._images
@property
def labels(self):
return self._labels
@property
def num_examples(self):
return self._num_examples
class Mnist(DataFlow):
"""
Return [image, label],
image is 28x28 in the range [0,1]
"""
def __init__(self, train_or_test, shuffle=True, dir=None):
"""
Args:
train_or_test: string either 'train' or 'test'
"""
if dir is None:
dir = os.path.join(os.path.dirname(__file__), 'mnist_data')
assert train_or_test in ['train', 'test']
self.train_or_test = train_or_test
self.shuffle = shuffle
TRAIN_IMAGES = 'train-images-idx3-ubyte.gz'
TRAIN_LABELS = 'train-labels-idx1-ubyte.gz'
TEST_IMAGES = 't10k-images-idx3-ubyte.gz'
TEST_LABELS = 't10k-labels-idx1-ubyte.gz'
local_file = maybe_download(TRAIN_IMAGES, dir)
train_images = extract_images(local_file)
local_file = maybe_download(TRAIN_LABELS, dir)
train_labels = extract_labels(local_file)
local_file = maybe_download(TEST_IMAGES, dir)
test_images = extract_images(local_file)
local_file = maybe_download(TEST_LABELS, dir)
test_labels = extract_labels(local_file)
self.train = DataSet(train_images, train_labels)
self.test = DataSet(test_images, test_labels)
def size(self):
ds = self.train if self.train_or_test == 'train' else self.test
return ds.num_examples
def get_data(self):
ds = self.train if self.train_or_test == 'train' else self.test
idxs = list(range(ds.num_examples))
if self.shuffle:
random.shuffle(idxs)
for k in idxs:
img = ds.images[k].reshape((28, 28))
label = ds.labels[k]
yield [img, label]
if __name__ == '__main__':
ds = Mnist('train')
for (img, label) in ds.get_data():
from IPython import embed; embed()
break
|
[
"ppwwyyxxc@gmail.com"
] |
ppwwyyxxc@gmail.com
|
620a42f2de8387bd563da13297f293106ec6359a
|
af233cb4bfbc4d215aff9bece395162b181b948c
|
/samples/max.py
|
dd9b82c0d4a5c8fbc56dd84bf3d9aca819c1a15e
|
[] |
no_license
|
hujunalex1/python3
|
7108acdf1259910ddb3808175848db3874b85f34
|
8418ad1de83f7ebd8cf292ef5c4c0567ee2b7173
|
refs/heads/master
| 2020-12-25T09:08:52.962204
| 2016-11-25T03:51:39
| 2016-11-25T03:51:39
| 60,052,951
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 100
|
py
|
# max函数max()可以接收任意多个参数,并返回最大的那个.
x=max(-1,2,3,19)
print(x)
|
[
"huj@lizi.com"
] |
huj@lizi.com
|
c9d38489e612ac0b5fff74eba18d3370c578fb8f
|
e0b6f5bd451aa8af3273fbc948799637681342e1
|
/scripts/wm_representation/functions/IEM/Decoding_target/trainD_testT.py
|
565bed64df165b7759d3c984ce464fc7735d1901
|
[] |
no_license
|
davidbestue/encoding
|
6b304f6e7429f94f97bd562c7544d1fdccf7bdc1
|
c27319aa3bb652b3bfc6b7340044c0fda057bc62
|
refs/heads/master
| 2022-05-05T23:41:42.419252
| 2022-04-27T08:34:52
| 2022-04-27T08:34:52
| 144,248,690
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 7,641
|
py
|
# -*- coding: utf-8 -*-
"""
Created on Mon Jul 1 18:24:32 2019
@author: David Bestue
"""
############# Add to sys path the path where the tools folder is
import sys, os
path_tools = os.path.abspath(os.path.join(os.getcwd(), os.pardir)) ### same directory or one back options
sys.path.insert(1, path_tools)
from tools import *
############# Namefiles for the savings.
path_save_signal ='/home/david/Desktop/Reconstructions/IEM/IEM_trainD_testT.xlsx'
path_save_reconstructions = '/home/david/Desktop/Reconstructions/IEM/IEM_heatmap_trainD_testT.xlsx'
path_save_shuffle = '/home/david/Desktop/Reconstructions/IEM/shuff_IEM_trainD_testT.xlsx'
############# Testing options
decoding_thing = 'T_alone' #'dist_alone' 'T_alone'
############# Training options
training_item = 'dist_alone' #'dist_alone' 'T_alone'
cond_t = '2_7' #'1_7' '2_7'
Distance_to_use = 'mix' #'close' 'far'
training_time= 'delay' #'stim_p' 'delay' 'respo'
tr_st=4
tr_end=6
############# Options de training times, the TRs used for the training will be different
# training_time=='delay':
# tr_st=4
# tr_end=6
# training_time=='stim_p':
# tr_st=3
# tr_end=4
# training_time=='delay':
# tr_st=4
# tr_end=6
# training_time=='respo':
# if decoding_thing=='Target':
# tr_st=8
# tr_end=9
# elif decoding_thing=='Distractor':
# tr_st=11
# tr_end=12
############# Dictionary and List to save the files.
Reconstructions={}
Reconstructions_shuff=[]
############# Elements for the loop
Conditions=['1_0.2', '1_7', '2_0.2', '2_7']
Subjects=['d001', 'n001', 'b001', 'r001', 's001', 'l001']
brain_regions = ['visual', 'ips', 'pfc']
ref_angle=180
num_shuffles = 10 #100 #10
############# Analysis
#############
for Subject in Subjects:
for Brain_region in brain_regions:
for idx_c, Condition in enumerate(Conditions):
print(Subject, Brain_region, Condition )
#
if Condition == cond_t: ### Cross-validate if training and testing condition are the same (1_7 when training on target and 2_7 when training on distractor)
#############
############# Get the data
enc_fmri_paths, enc_beh_paths, wm_fmri_paths, wm_beh_paths, masks = data_to_use( Subject, 'together', Brain_region)
#############
###### Process wm files (I call them activity instead of training_ or testing_ as they come from the same condition)
activity, behaviour = preprocess_wm_data(wm_fmri_paths, masks, wm_beh_paths,
condition=Condition, distance=Distance_to_use, nscans_wm=nscans_wm)
#############
####### IEM cross-validating all the TRs
Reconstruction = IEM_cv_all(testing_activity=activity, testing_behaviour=behaviour,
decode_item=decoding_thing, training_item=training_item, tr_st=tr_st, tr_end=tr_end, n_slpits=10)
#
Reconstructions[Subject + '_' + Brain_region + '_' + Condition]=Reconstruction
#############
# IEM shuffle cross-validating all the TRs
shuff = IEM_cv_all_shuff(testing_activity=activity, testing_behaviour=behaviour,
decode_item=decoding_thing, training_item=training_item, tr_st=tr_st, tr_end=tr_end,
condition=Condition, subject=Subject, region=Brain_region,
iterations=num_shuffles, n_slpits=10)
Reconstructions_shuff.append(shuff)
else:
#############
############# Get the data
enc_fmri_paths, enc_beh_paths, wm_fmri_paths, wm_beh_paths, masks = data_to_use( Subject, 'together', Brain_region)
##################
###### Process training data
training_activity, training_behaviour = preprocess_wm_data(wm_fmri_paths, masks, wm_beh_paths,
condition=cond_t, distance=Distance_to_use, nscans_wm=nscans_wm)
#
# Subset training activity (TRs of the time and column of beh for the training)
delay_TR_cond, training_thing = subset_training(training_activity=training_activity, training_behaviour=training_behaviour,
training_item=training_item , training_time=training_time, tr_st=tr_st, tr_end=tr_end)
##################
##### Train your weigths
WM, Inter = Weights_matrix_LM( delay_TR_cond, training_thing )
WM_t = WM.transpose()
##################
###### Process testing data
testing_activity, testing_behaviour = preprocess_wm_data(wm_fmri_paths, masks, wm_beh_paths,
condition=Condition, distance=Distance_to_use, nscans_wm=nscans_wm)
##################
###### IEM
Reconstruction = IEM(testing_activity=testing_activity, testing_behaviour=testing_behaviour, decode_item=decoding_thing,
WM=WM, WM_t=WM_t, Inter=Inter, tr_st=tr_st, tr_end=tr_end)
#
Reconstructions[Subject + '_' + Brain_region + '_' + Condition]=Reconstruction
##################
###### IEM shuffle
shuff = IEM_shuff(testing_activity=testing_activity,testing_behaviour=testing_behaviour, decode_item=decoding_thing,
WM=WM, WM_t=WM_t, Inter=Inter, tr_st=tr_st, tr_end=tr_end,
condition=Condition, subject=Subject, region=Brain_region,
iterations=num_shuffles)
#
Reconstructions_shuff.append(shuff)
###### Save reconstruction (heatmap)
### Get signal from the reconstructions (get the signal before; not done in the function in case you want to save the whole)
### If you want to save the whole recosntruction, uncomment the following lines
writer = pd.ExcelWriter(path_save_reconstructions)
for i in range(len(Reconstructions.keys())):
Reconstructions[Reconstructions.keys()[i]].to_excel(writer, sheet_name=Reconstructions.keys()[i]) #each dataframe in a excel sheet
writer.save() #save reconstructions (heatmaps)
###### Save decoding signal (around the reference angle)
Decoding_df =[]
for dataframes in Reconstructions.keys():
df = Reconstructions[dataframes]
a = pd.DataFrame(df.iloc[ref_angle*2,:]) ##*2 because there are 720
a = a.reset_index()
a.columns = ['times', 'decoding'] # column names
a['decoding'] = [sum(df.iloc[:,i] * f2(ref_angle)) for i in range(len(a))] #"population vector method" scalar product
a['times']=a['times'].astype(float)
a['region'] = dataframes.split('_')[1]
a['subject'] = dataframes.split('_')[0]
a['condition'] = dataframes.split('_')[-2] + '_' + dataframes.split('_')[-1]
Decoding_df.append(a)
Df = pd.concat(Decoding_df)
Df['label'] = 'signal' #add the label of signal (you will concatenate this df with the one of the shuffleing)
Df.to_excel( path_save_signal ) #save signal
###### Save Shuffle
### I do not need to do the "pop vector" step becuase it is done inside the function IEM_shuff
### I do it different because eventually I might be interested in saving the whole reconstruction of the signal (I am not interested in the shuffles)
Df_shuffs = pd.concat(Reconstructions_shuff)
Df_shuffs['label'] = 'shuffle' ## add the label of shuffle
Df_shuffs.to_excel(path_save_shuffle) #save shuffle
##################
|
[
"davidsanchezbestue@hotmail.com"
] |
davidsanchezbestue@hotmail.com
|
26c87afea606d27b70e9a74c563abbc45a842651
|
23e1d4c609a489093cde70d12bcf292dad49baf0
|
/create_index_2.py
|
60368a12b058004bd03965eb34690a1136a94c57
|
[] |
no_license
|
kuochuwon/PPT_Keyword_Finder
|
1b6d2d7d573c04a1b6abf67cbd07e9d4dd856db4
|
81df0784d90353410075d905692617e26cf0cfd1
|
refs/heads/master
| 2023-07-11T13:56:57.421939
| 2021-08-12T04:29:07
| 2021-08-12T04:29:07
| 386,241,590
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,689
|
py
|
# -*- coding: utf-8 -*-
# Form implementation generated from reading ui file '.\create_index_2.ui'
#
# Created by: PyQt5 UI code generator 5.15.4
#
# WARNING: Any manual changes made to this file will be lost when pyuic5 is
# run again. Do not edit this file unless you know what you are doing.
# import msvcrt
import json
import os
from pathlib import Path
from PyQt5 import QtCore, QtGui, QtWidgets
from PyQt5.QtWidgets import QFileDialog, QMessageBox
from ppt_functions.Powerpoint_find_keyword import PowerPoint_keyword_search
from configs.log_config import get_logger as logger
ppt_finder = PowerPoint_keyword_search()
class Ui_Form(object):
def get_powerpoint_files(self):
response = QFileDialog.getOpenFileNames(
caption="Select your file",
directory=os.getcwd()
)
print(response)
raw_file_list = response[0]
file_list = ppt_finder.filter(raw_file_list)
# 在空白處加入檔名資訊
self.listWidget_3.addItems(file_list)
def create_keyword_library(self, Form):
try:
all_item = []
count = self.listWidget_3.count()
for i in range(count):
all_item.append(Path(self.listWidget_3.item(i).text()))
ppt_library = ppt_finder.convert_ppt_into_dict(all_item)
with open("ppt_library.txt", "w") as f:
json.dump(ppt_library, f)
logger().info("Writing ppt_library file complete.")
# msvcrt.getch() #會使mainwindow當掉
QMessageBox.information(Form, "通知", "索引表已完成。")
Form.close()
except Exception as e:
QMessageBox.critical(Form, "錯誤", f"索引表建置失敗。\n(您輸入的檔案之中,可能有部分檔案損毀"
",請檢查每一個檔案是否都能正常開啟。)")
Form.close()
logger().error(f"create_keyword_library failed: {e}")
def message(self, Form):
QMessageBox.information(Form, "通知", "索引表已完成。")
def closeEvent(self, Form):
print("QWidget closed")
Form.close()
def setupUi(self, Form):
Form.setObjectName("Form")
Form.resize(400, 300)
self.gridLayout = QtWidgets.QGridLayout(Form)
self.gridLayout.setObjectName("gridLayout")
self.label_2 = QtWidgets.QLabel(Form)
font = QtGui.QFont()
font.setFamily("微軟正黑體")
font.setPointSize(16)
self.label_2.setFont(font)
self.label_2.setObjectName("label_2")
self.gridLayout.addWidget(self.label_2, 0, 0, 1, 1)
spacerItem = QtWidgets.QSpacerItem(385, 17, QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Minimum)
self.gridLayout.addItem(spacerItem, 1, 0, 1, 2)
self.listWidget_3 = QtWidgets.QListWidget(Form)
self.listWidget_3.setObjectName("listWidget_3")
self.gridLayout.addWidget(self.listWidget_3, 2, 0, 1, 1)
self.pushButton_2 = QtWidgets.QPushButton(Form)
font = QtGui.QFont()
font.setFamily("微軟正黑體")
font.setPointSize(18)
self.pushButton_2.setFont(font)
self.pushButton_2.setAutoDefault(False)
self.pushButton_2.setObjectName("pushButton_2")
self.pushButton_2.clicked.connect(self.get_powerpoint_files)
self.gridLayout.addWidget(self.pushButton_2, 2, 1, 1, 1)
spacerItem1 = QtWidgets.QSpacerItem(385, 17, QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Minimum)
self.gridLayout.addItem(spacerItem1, 3, 0, 1, 2)
self.buttonBox = QtWidgets.QDialogButtonBox(Form)
font = QtGui.QFont()
font.setFamily("Calibri")
font.setPointSize(14)
self.buttonBox.setFont(font)
self.buttonBox.setOrientation(QtCore.Qt.Horizontal)
self.buttonBox.setStandardButtons(QtWidgets.QDialogButtonBox.Cancel | QtWidgets.QDialogButtonBox.Ok)
self.buttonBox.setObjectName("buttonBox")
# HINT for passing argument, using lambda expression
self.buttonBox.accepted.connect(lambda: self.create_keyword_library(Form))
self.buttonBox.rejected.connect(lambda: self.closeEvent(Form))
self.gridLayout.addWidget(self.buttonBox, 4, 0, 1, 2)
self.retranslateUi(Form)
QtCore.QMetaObject.connectSlotsByName(Form)
def retranslateUi(self, Form):
_translate = QtCore.QCoreApplication.translate
Form.setWindowTitle(_translate("Form", "Form"))
self.pushButton_2.setText(_translate("Form", "瀏覽"))
self.label_2.setText(_translate("Form", "檔案已選取...."))
|
[
"kuochuwon@gmail.com"
] |
kuochuwon@gmail.com
|
236f34818c27d1d5d51117013e8b5ed82ffa7292
|
ddd77802beb77169ed311df4eb1c7e8ed2dc85a2
|
/apps/categories/views.py
|
d32a2bb737e944d665bbb11fd82e2bb35ec4cf6b
|
[] |
no_license
|
An931/words-learning
|
472f20063ff79f50f2fe03e2fd639014dd1ca3b9
|
a3bc257537c20a1d050ebb12e6b3e7f39ad0bbd6
|
refs/heads/master
| 2022-12-08T11:19:22.163746
| 2020-09-05T06:20:35
| 2020-09-05T06:20:35
| 291,753,841
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 460
|
py
|
from rest_framework import viewsets
from rest_framework.permissions import IsAuthenticated
from permissions.api_key_permissions import APIKeyPermission
from .models import Category
from .serializers import CategorySerializer
class CategoryViewSet(viewsets.ModelViewSet):
"""API view set for Category model"""
queryset = Category.objects.all()
serializer_class = CategorySerializer
permission_classes = [APIKeyPermission | IsAuthenticated]
|
[
"anna.makarova@saritasa.com"
] |
anna.makarova@saritasa.com
|
e13a306985e467e8ce545e6f42c49c7df57a83ce
|
45d17fd269d29838df83673898273c9dbe0083eb
|
/Dzien01/008-petla-for.py
|
325973eacf03c43ecfbc6520177dc17cc351f2a0
|
[] |
no_license
|
elanco-python/szkolenie
|
15ffaaa7fb634200082b3fc30d6f9c0970d6e772
|
54897829e377e6fc0a6cad73532dcba430efb52b
|
refs/heads/master
| 2022-12-22T18:21:15.537184
| 2020-10-08T16:43:17
| 2020-10-08T16:43:17
| 301,567,085
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 233
|
py
|
# Pętla for
zakres = list( range(1, 11, 2) )
print(zakres)
for x in range(1,11):
print(x**2)
print("="*80)
lista = ["A", 1, None, (1,"OK")]
counter = 1
for counter, x in enumerate(lista, 100):
print(counter, x, sep=":")
|
[
"marian.witkowski2@orange.com"
] |
marian.witkowski2@orange.com
|
8ccb3a1d40d475060dbbf11cf67ffdd14167632a
|
17ef2ca91059e2ce9210de9c1c60508e7ddcf46d
|
/writershub/asgi.py
|
1b19b893afcc06c0500739b83cca4b3cb4fa1b23
|
[] |
no_license
|
osamwelian3/FreelanceWebApp
|
71bf6381de60130904a3d7021c1b598bede0f82f
|
6341d8332a39330561cf8c20e5b09170503d523f
|
refs/heads/master
| 2022-12-03T14:49:57.093439
| 2020-08-14T11:53:05
| 2020-08-14T11:53:05
| 287,521,150
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 397
|
py
|
"""
ASGI config for writershub project.
It exposes the ASGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/3.0/howto/deployment/asgi/
"""
import os
from django.core.asgi import get_asgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'writershub.settings')
application = get_asgi_application()
|
[
"samian@samian.samian.com"
] |
samian@samian.samian.com
|
59a5b59694320854440564f7714dbb7fb05bff93
|
a3375aeabc2782d92dc2c3208e5badb00daa3703
|
/BtsShell/application_lib/protocols/Gtp.py
|
437d2bd1cc87393b55f860c27654a02af210a4d4
|
[] |
no_license
|
jufei/BtsShell
|
f256ff573cbbb7a834ae608eb991eb337503f159
|
75487a40ac2cc5f24f70d011ad6cd3924908f783
|
refs/heads/master
| 2021-01-10T09:25:02.656231
| 2016-03-29T05:42:04
| 2016-03-29T05:42:04
| 54,948,974
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,316
|
py
|
import re
import traceback
from BtsShell.helper import CommonItem, ParserException
GTP_MESSAGE_TYPE = {'0x01': 'Echo Request',
'0x02': 'Echo Response',
'0xff': 'G-PDU'
}
class Gtp(CommonItem):
def __init__(self, udp_data):
"""
GPRS Tunneling Protocol
Flags: 0x32
001. .... = Version: GTP release 99 version (1)
...1 .... = Protocol type: GTP (1)
.... 0... = Reserved: 0
.... .0.. = Is Next Extension Header present?: no
.... ..1. = Is Sequence Number present?: yes
.... ...0 = Is N-PDU number present?: no
Message Type: Echo request (0x01)
Length: 4
TEID: 0x00000000
Sequence number: 0x0008
N-PDU Number: 0x00
Next extension header type: No more extension headers (0x00)
"""
self.flags = None
self.message_type = None
self.length = None
self.teid = None
self.sequence_number = None
self.npdu_number = None
self.parse_gtp_packet(udp_data)
def parse_gtp_packet(self, udp_data):
self.parse_gtp_header(udp_data.pop(0))
self.parse_gtp_data(udp_data)
def parse_gtp_header(self, gtp_header):
try:
self.flags = re.search('Flags:.*0x(\w+)', gtp_header, re.M).group(1)
message_id = re.search('Message\s*Type:.*(0x\w+)', gtp_header, re.M).group(1)
self.message_type = GTP_MESSAGE_TYPE[message_id]
self.length = re.search('Length:\s*(\d+)', gtp_header, re.M).group(1)
self.teid = re.search('TEID:\s*0x(\w+)', gtp_header, re.M).group(1)
except:
print 'gtp header:\n%s' % gtp_header
traceback.print_exc()
raise ParserException, 'parse_gtp_header failed'
try: # these two attributes are optional
self.sequence_number = re.search('Sequence\s*number:.*0x(\w+)', gtp_header, re.M).group(1)
self.npdu_number = re.search('N-PDU\s*Number:.*0x(\w+)', gtp_header, re.M).group(1)
except:
pass
def parse_gtp_data(self, gtp_data):
self.data = gtp_data
|
[
"fei.ju@nsn.com"
] |
fei.ju@nsn.com
|
f20f61dd084a86780430804aaf86bf356607c439
|
60aa3bcf5ace0282210685e74ee8ed31debe1769
|
/base/lib/encodings/unicode_internal.py
|
29e12df0a43d19180c4e495aec554a95337e1987
|
[] |
no_license
|
TheBreadGuy/sims4-ai-engine
|
42afc79b8c02527353cc084117a4b8da900ebdb4
|
865212e841c716dc4364e0dba286f02af8d716e8
|
refs/heads/master
| 2023-03-16T00:57:45.672706
| 2016-05-01T17:26:01
| 2016-05-01T17:26:01
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 983
|
py
|
import codecs
class Codec(codecs.Codec):
__qualname__ = 'Codec'
encode = codecs.unicode_internal_encode
decode = codecs.unicode_internal_decode
class IncrementalEncoder(codecs.IncrementalEncoder):
__qualname__ = 'IncrementalEncoder'
def encode(self, input, final=False):
return codecs.unicode_internal_encode(input, self.errors)[0]
class IncrementalDecoder(codecs.IncrementalDecoder):
__qualname__ = 'IncrementalDecoder'
def decode(self, input, final=False):
return codecs.unicode_internal_decode(input, self.errors)[0]
class StreamWriter(Codec, codecs.StreamWriter):
__qualname__ = 'StreamWriter'
class StreamReader(Codec, codecs.StreamReader):
__qualname__ = 'StreamReader'
def getregentry():
return codecs.CodecInfo(name='unicode-internal', encode=Codec.encode, decode=Codec.decode, incrementalencoder=IncrementalEncoder, incrementaldecoder=IncrementalDecoder, streamwriter=StreamWriter, streamreader=StreamReader)
|
[
"jp@bellgeorge.com"
] |
jp@bellgeorge.com
|
4fa84f2deee1994f61caf98271432496ed94b1c2
|
e9c0a258943a13e09f4d71bf1172ca18bb8102b1
|
/macro-kit/customize/expander/expander_files/python.py
|
0ffbff7095b6c1dc130088dfb00635857c2d2ec8
|
[] |
no_license
|
mrbald/nedit-macro-kit
|
15fcc8cf073a0be178139a480d5a37c562de5b18
|
b1a40850ea91bb914cae125ef4c33ed0caa3b6de
|
refs/heads/master
| 2021-01-10T06:14:49.783400
| 2009-10-30T10:15:07
| 2009-10-30T10:15:07
| 46,522,047
| 1
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 638
|
py
|
#! /usr/bin/env python
import sys, pdb
from swk import *
g_logfile=LogFile()
def GetParam():
from optparse import OptionParser
usage = "usage: %prog [options] rootpath"
parser = OptionParser(usage)
parser.add_option("-b", "--boolean", action="store_true", dest="boolean", help="boolean example", default=False)
parser.add_option("-s", "--string", dest="string", help="string example", default="defaultString")
(options, args) = parser.parse_args()
return (options, args)
def doSomething(args):
pass
def main():
(options, args)=GetParam()
doSomething(args)
print "hello world"
if __name__ == "__main__":
main()
|
[
"frank.perbet@399c9662-ede6-11dd-ac92-6b84c442103f"
] |
frank.perbet@399c9662-ede6-11dd-ac92-6b84c442103f
|
86a0a5eccd25c785366923e08dbd0bcdc20006a6
|
50c520d9b8b0fe66b94a874fa016fd06962dacf0
|
/inlinks.py
|
8c1fcd99d214dfafc79849371b97a7e760100b2d
|
[] |
no_license
|
shugamoe/cs123_wiki_proj
|
dc373d82364f8c81b118ad7ddd23f0e445785d0d
|
066352d5fcce825df1989e8300401fe32381d267
|
refs/heads/master
| 2021-01-17T13:08:28.837435
| 2016-07-04T03:19:02
| 2016-07-04T03:19:02
| 57,915,346
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 6,739
|
py
|
import pandas as pd
import json
import urllib.parse
def wiki_homepages(pagename, json_file, titles_file):
'''
Generates a list of inlinks (pages that link to pagename) of a page.
Inputs:
pagename - name of the page of interest
json_file - file of the json containing the line numbers of
pages and their inlinks
titles_file - file containing pagenames of each number in json_file
Output:
list of inlinks
'''
# Generates a dataframe containing pagenames for each line number
titles = pd.read_csv(titles_file, delimiter = ' ', names = ['page title'])
# Finds the line index of pagename
line_index = titles[titles[[0]] == pagename].dropna().index.tolist()
if line_index == []:
print('pagename does not exist')
return None
page_line_num = line_index[0] + 1
# Opens json_file, extracts the list of inlinks of pagename, and closes
# json_file
f = open(json_file, 'r')
homepage_line_nums = json.load(f).get(str(page_line_num), [])
f.close()
homepage_titles = []
for homepage_line_num in homepage_line_nums:
title = titles.iloc[[int(homepage_line_num) - 1]].values[0][0]
# appends the title of each inlink to a list
homepage_titles.append(title)
return homepage_titles
def one_to_five_inlinks_dump(json_file):
'''
Generates a dict of pages with one to five inlinks (pages that link to
pagename) of a page, and dumps it to a json file named 'one_to_five_inlinks'.
Inputs:
json_file - file of the json containing the line numbers of
pages and their inlinks
'''
one_to_five_dict = {}
with open(json_file, 'r') as f:
for key, val in json.load(f).items():
# if the pagename contains one to five inlinks, append to dict
if len(val) >= 1 and len(val) <= 5:
one_to_five_dict[key] = val
with open('one_to_five_inlinks', 'w') as f:
json.dump(one_to_five_dict, f)
def one_to_five_inlinks_sample_dump(json_file, titles_file, num_of_inlinks, \
num_of_pages):
'''
Generates a dict of num_of_pages pages with num_of_inlinks inlinks by
loading json_file (a dict containing all pages with one to five inlinks),
filtering through, and appending suitable items into the dict inlinks_sample.
inlinks_sample is then dumped into a sample directory, which is used for
the function one_to_five_inlinks_sample.
Inputs:
json_file - file of the json containing the line numbers of
pages and their inlinks
titles_file - file containing pagenames of each number in json_file
num_of_inlinks - parameter determining the number of inlinks for each
page
num_of_pages - parameter determining the number of pages needed
'''
titles = pd.read_csv(titles_file, delimiter = ' ', names = ['page title'])
inlinks_sample = {}
with open(json_file, 'r') as f:
count = 0
for key, val in json.load(f).items():
if len(val) == num_of_inlinks:
# convert key from line number (int) to pagename (str)
key_name = titles.iloc[[int(key) - 1]].values[0][0]
# parsing titles to remove unusual characters
title = urllib.parse.unquote_plus(key_name)
x = 0
while '%' in title:
if x == 10:
break
title = urllib.parse.unquote_plus(title)
x += 1
# convert each value from line number (int) to pagename (str)
for v in val:
val_name = titles.iloc[[int(v) - 1]].values[0][0]
# parsing title to remove unusual characters
title = urllib.parse.unquote_plus(val_name)
x = 0
while '%' in title:
if x == 10:
break
title = urllib.parse.unquote_plus(title)
x += 1
# appends val_name to key_name in inlink_sample dict
inlinks_sample[key_name] = inlinks_sample.get(key_name, \
[]) + [val_name]
count += 1
if count == num_of_pages:
# once num_of_pages pages with the specified attributes
# are obtained, dump the inlinks_sample dict and return None
with open('samples/sample_' + str(num_of_inlinks) + '_' + \
str(num_of_pages), 'w') as f:
json.dump(inlinks_sample, f)
return None
# if num_of_pages exceeds the number of pages that have num_of_inlinks
# inlinks, then all the pages with num_of_inlinks are dumped into the
# proper file
print('there are only {:} pages', count)
with open('samples/sample_' + str(num_of_inlinks) + '_' + \
str(num_of_pages), 'w') as f:
json.dump(inlinks_sample, f)
return None
def one_to_five_inlinks_sample(num_of_inlinks, num_of_pages):
'''
Loads the proper json file with num_of_inlinks inlinks and num_of_pages
pages, and returns the sample_dict.
Inputs:
num_of_inlinks - parameter determining the number of inlinks for each
page
num_of_pages - parameter determining the number of pages needed
Output:
sample_dict with num_of_pages pages with num_of_inlinks inlinks
'''
with open('samples/sample_' + str(num_of_inlinks) + '_' + \
str(num_of_pages), 'r') as f:
sample_dict = json.load(f)
return sample_dict
def two_inlinks_sample(json_file_two):
'''
Returns a sample dict with two inlinks each.
Input:
json_file_two - a json file with a dictionary sample of pages
with two inlinks each
Output:
a sample dict of pages with two inlinks as keys
'''
two_inlinks_sample = {}
with open(json_file_two, 'r') as f:
inlinks_dict = json.load(f)
two_inlinks_sample['Wrestling_Slang'] = inlinks_dict['Wrestling_Slang']
two_inlinks_sample['Concordia_University,_St._Paul'] = \
inlinks_dict['Concordia_University,_St._Paul']
two_inlinks_sample['A_Spaceman_Came_Travelling_(Christmas_Remix)'] = \
inlinks_dict['A_Spaceman_Came_Travelling_(Christmas_Remix)']
two_inlinks_sample['Transcendentals'] = inlinks_dict['Transcendentals']
two_inlinks_sample['Platinum_Card'] = inlinks_dict['Platinum_Card']
return two_inlinks_sample
|
[
"andyz422@cs.uchicago.edu"
] |
andyz422@cs.uchicago.edu
|
d0d3b67d53b8559c598eeabe2de449843ca4f522
|
4424642d85d750476e8549844b58636971a6515a
|
/training/level-4-creating-web-services/bfp-reference/bfp_friends_api/code-snapshots/exercise-8/api_helpers.py
|
2ccfe0a24ca77d6d12a12affecdc25673d0057d6
|
[] |
no_license
|
timmywilson/pandas-practical-python-primer
|
81b72b1d243a70b497fa0b17fbcca0403417dde2
|
0189c81a16dc3baf01b5d8fbcc7cd5eeba2cdbb9
|
refs/heads/master
| 2021-01-22T15:35:39.298247
| 2015-11-18T22:22:04
| 2015-11-18T22:22:04
| 43,707,883
| 0
| 0
| null | 2015-10-05T19:19:37
| 2015-10-05T19:19:37
| null |
UTF-8
|
Python
| false
| false
| 1,871
|
py
|
"""
This module provides functions that are commonly used by various
members of the api.py module.
"""
from werkzeug.exceptions import BadRequest
def json_payload(request) -> dict:
"""
Verify that a flask.request object has a JSON payload and
that it does not contain syntax errors.
Args:
request (flask.request): A request object that you want to
verify has a valid JSON payload.
Raises:
ValueError: If the incoming request object is either missing
a JSON payload or has one with syntax errors.
"""
try:
request_payload = request.get_json()
except BadRequest:
raise ValueError("JSON payload contains syntax errors. Please "
"fix and try again.")
if request_payload is None:
raise ValueError("No JSON payload present. Make sure that "
"appropriate `content-type` header is "
"included in your request and that you've "
"specified a payload.")
return request_payload
def verify_required_data_present(request_payload: dict, required_elements: set):
"""
Verify that a request_payload has all the keys indicated
in required_elements.
Args:
request_payload (dict): A set of request_payload to evaluate.
required_elements (set): The names of keys that must be present
in request_payload.
Raises:
ValueError: If any of the names in required_elements is not a
member of request_payload.keys()
"""
if not required_elements.issubset(request_payload.keys()):
raise ValueError(
"Missing required payload elements. "
"The following elements are "
"required: {}".format(required_elements))
|
[
"timothyscottwilson@gmail.com"
] |
timothyscottwilson@gmail.com
|
dea85b07a363f1d51f9b610e3cc7cd7ea9841e37
|
9ec437cfecb7e91428deed98dd841786013d45b6
|
/kathisattic/apps/account/models.py
|
fd73f6b49f278b97e5b844e73c38da00ddb78e74
|
[
"MIT"
] |
permissive
|
ninapavlich/kathisattic
|
0fc5901bfa2b4b93f40dd2017be201a73d01c030
|
0585e63f43da6841f7fc0d69f090d7ab556dcec0
|
refs/heads/master
| 2021-04-15T16:11:03.159798
| 2016-07-06T23:11:48
| 2016-07-06T23:11:48
| 61,773,031
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,090
|
py
|
from django.db import models
from django.conf import settings
from carbon.compounds.account.models import User as BaseUser
from carbon.compounds.account.models import Address as BaseAddress
from carbon.compounds.account.models import UserGroup as BaseUserGroup
from carbon.compounds.account.models import UserGroupMember as BaseUserGroupMember
from carbon.compounds.account.models import Organization as BaseOrganization
from carbon.compounds.account.models import OrganizationMember as BaseOrganizationMember
from carbon.compounds.account.models import SocialContactLink as BaseSocialContactLink
class User(BaseUser):
pass
class Address(BaseAddress):
pass
class UserGroupMember(BaseUserGroupMember):
group = models.ForeignKey('account.UserGroup',
blank=True, null=True)
class UserGroup(BaseUserGroup):
member_class = UserGroupMember
pass
class Organization(BaseOrganization):
pass
class OrganizationMember(BaseOrganizationMember):
organization = models.ForeignKey('Organization', blank=True, null=True)
class SocialContactLink(BaseSocialContactLink):
pass
|
[
"nina@ninalp.com"
] |
nina@ninalp.com
|
23c5cc3e60e317ec7e1c0ecc5a633d6297f44aa1
|
e3e6fc037f47527e6bc43f1d1300f39ac8f0aabc
|
/google/devtools/build/v1/build_status_pb2.py
|
269dbcf3e6c8e73d9f1818cff102d36674645b26
|
[] |
no_license
|
msachtler/bazel-event-protocol-parser
|
62c136cb1f60f4ee3316bf15e1e5a5e727445536
|
d7424d21aa0dc121acc4d64b427ba365a3581a20
|
refs/heads/master
| 2021-07-05T15:13:19.502829
| 2017-09-24T04:15:16
| 2017-09-24T04:15:16
| 102,999,437
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| true
| 5,705
|
py
|
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: google/devtools/build/v1/build_status.proto
import sys
_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
from google.protobuf import descriptor_pb2
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
from google.api import annotations_pb2 as google_dot_api_dot_annotations__pb2
from google.protobuf import any_pb2 as google_dot_protobuf_dot_any__pb2
DESCRIPTOR = _descriptor.FileDescriptor(
name='google/devtools/build/v1/build_status.proto',
package='google.devtools.build.v1',
syntax='proto3',
serialized_pb=_b('\n+google/devtools/build/v1/build_status.proto\x12\x18google.devtools.build.v1\x1a\x1cgoogle/api/annotations.proto\x1a\x19google/protobuf/any.proto\"\xc6\x02\n\x0b\x42uildStatus\x12<\n\x06result\x18\x01 \x01(\x0e\x32,.google.devtools.build.v1.BuildStatus.Result\x12%\n\x07\x64\x65tails\x18\x02 \x01(\x0b\x32\x14.google.protobuf.Any\"\xd1\x01\n\x06Result\x12\x12\n\x0eUNKNOWN_STATUS\x10\x00\x12\x15\n\x11\x43OMMAND_SUCCEEDED\x10\x01\x12\x12\n\x0e\x43OMMAND_FAILED\x10\x02\x12\x0e\n\nUSER_ERROR\x10\x03\x12\x10\n\x0cSYSTEM_ERROR\x10\x04\x12\x16\n\x12RESOURCE_EXHAUSTED\x10\x05\x12 \n\x1cINVOCATION_DEADLINE_EXCEEDED\x10\x06\x12\x1d\n\x19REQUEST_DEADLINE_EXCEEDED\x10\x08\x12\r\n\tCANCELLED\x10\x07\x42t\n\x1c\x63om.google.devtools.build.v1B\x10\x42uildStatusProtoP\x01Z=google.golang.org/genproto/googleapis/devtools/build/v1;build\xf8\x01\x01\x62\x06proto3')
,
dependencies=[google_dot_api_dot_annotations__pb2.DESCRIPTOR,google_dot_protobuf_dot_any__pb2.DESCRIPTOR,])
_BUILDSTATUS_RESULT = _descriptor.EnumDescriptor(
name='Result',
full_name='google.devtools.build.v1.BuildStatus.Result',
filename=None,
file=DESCRIPTOR,
values=[
_descriptor.EnumValueDescriptor(
name='UNKNOWN_STATUS', index=0, number=0,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='COMMAND_SUCCEEDED', index=1, number=1,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='COMMAND_FAILED', index=2, number=2,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='USER_ERROR', index=3, number=3,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='SYSTEM_ERROR', index=4, number=4,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='RESOURCE_EXHAUSTED', index=5, number=5,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='INVOCATION_DEADLINE_EXCEEDED', index=6, number=6,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='REQUEST_DEADLINE_EXCEEDED', index=7, number=8,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='CANCELLED', index=8, number=7,
options=None,
type=None),
],
containing_type=None,
options=None,
serialized_start=248,
serialized_end=457,
)
_sym_db.RegisterEnumDescriptor(_BUILDSTATUS_RESULT)
_BUILDSTATUS = _descriptor.Descriptor(
name='BuildStatus',
full_name='google.devtools.build.v1.BuildStatus',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='result', full_name='google.devtools.build.v1.BuildStatus.result', index=0,
number=1, type=14, cpp_type=8, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='details', full_name='google.devtools.build.v1.BuildStatus.details', index=1,
number=2, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
_BUILDSTATUS_RESULT,
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=131,
serialized_end=457,
)
_BUILDSTATUS.fields_by_name['result'].enum_type = _BUILDSTATUS_RESULT
_BUILDSTATUS.fields_by_name['details'].message_type = google_dot_protobuf_dot_any__pb2._ANY
_BUILDSTATUS_RESULT.containing_type = _BUILDSTATUS
DESCRIPTOR.message_types_by_name['BuildStatus'] = _BUILDSTATUS
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
BuildStatus = _reflection.GeneratedProtocolMessageType('BuildStatus', (_message.Message,), dict(
DESCRIPTOR = _BUILDSTATUS,
__module__ = 'google.devtools.build.v1.build_status_pb2'
# @@protoc_insertion_point(class_scope:google.devtools.build.v1.BuildStatus)
))
_sym_db.RegisterMessage(BuildStatus)
DESCRIPTOR.has_options = True
DESCRIPTOR._options = _descriptor._ParseOptions(descriptor_pb2.FileOptions(), _b('\n\034com.google.devtools.build.v1B\020BuildStatusProtoP\001Z=google.golang.org/genproto/googleapis/devtools/build/v1;build\370\001\001'))
try:
# THESE ELEMENTS WILL BE DEPRECATED.
# Please use the generated *_pb2_grpc.py files instead.
import grpc
from grpc.beta import implementations as beta_implementations
from grpc.beta import interfaces as beta_interfaces
from grpc.framework.common import cardinality
from grpc.framework.interfaces.face import utilities as face_utilities
except ImportError:
pass
# @@protoc_insertion_point(module_scope)
|
[
"matt.sachtler@gmail.com"
] |
matt.sachtler@gmail.com
|
023e380d1f9820e0b7068df001c1720639b6585f
|
28ce9792348ab9766f925a411652dcf9da90c26e
|
/webApplication/settings.py
|
05635a18e01478e85f73137002c2db468aa978c5
|
[] |
no_license
|
RupaMistry/machine-scikit-learning
|
1444edab354347430f95f357c956bb458d6d3587
|
22af95e663cb27f1847ce69613a10dc79e0c8ce4
|
refs/heads/master
| 2020-07-27T01:27:29.110965
| 2019-09-16T15:21:30
| 2019-09-16T15:21:30
| 208,822,330
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,924
|
py
|
import os
import datetime
from mongoengine import connect
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
SECRET_KEY = '43t@n5)84pc*y9-=$yul2_2pbtbya2a!zikplk+t_wi(7@mggl'
DEBUG = True
ALLOWED_HOSTS = ['*']
connect(db='machine-scikit-learningDB')
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'rest_framework',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
REST_FRAMEWORK = {
}
JWT_AUTH = {
'JWT_EXPIRATION_DELTA': datetime.timedelta(seconds=60*60*24),
}
ROOT_URLCONF = 'webApplication.appUrls'
STATICFILES_DIRS = [
os.path.join(BASE_DIR, "content"),
]
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': ["uiTemplates"], #modify this line
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'webApplication.wsgi.application'
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.mysql',
'OPTIONS': {
'read_default_file': '/home/mysql.cnf',
},
}
}
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
STATIC_URL = '/content/'
|
[
"mistryrupa7@gmail.com"
] |
mistryrupa7@gmail.com
|
d50295da822a4d302060f559849b059275fd3e75
|
8cf13446987a74e2cdcc6506b6137e104976e065
|
/init_work/conv_net/ker2.py
|
ad723c142884b1606d1f59320ca2ccfd51cf4db5
|
[] |
no_license
|
jurajmaslej/dipl
|
53f627ec24ff0da90c25d7ca959b7516b281b523
|
12c1c1e981eb8c459e176ab1344d60bcb6f61059
|
refs/heads/master
| 2020-03-17T18:39:58.107242
| 2018-05-18T08:28:16
| 2018-05-18T08:28:16
| 133,714,122
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,395
|
py
|
#C:> activate keras
#(keras) C:> set TF_CPP_MIN_LOG_LEVEL=2
#(keras) C:> python
# >>>
import numpy as np
import cv2
from keras.layers import Input
from keras.layers.convolutional import Conv2D
from keras.models import Model
def run_ker2(img_name):
inp = Input(shape=(None,None,1)) # objekt kt pohlti vstupny obrazok
out = Conv2D(1, (3, 3), kernel_initializer='normal', use_bias=False, padding='same')(inp)
model = Model(inputs=inp, outputs=out) # 2D konvolucna vrstva, prva 1 = iba jedna vrstva, kazda vrstva ma vlastny kernel, kernel rozmerov 3*3, padding = same -> zachovat okraje
len(model.layers)
#print model.layers[0].get_weights()
#print model.layers[1].get_weights()
w = np.array([[ #vahy nastavene na sobelov operator
[[[-1]],[[0]],[[1]]],
[[[-2]],[[0]],[[2]]],
[[[-1]],[[0]],[[1]]]
]]) #vahy do kernelu
w_T = np.array([[ #vahy nastavene na sobelov operator
[[[-1]],[[-2]],[[-1]]],
[[[0]],[[0]],[[0]]],
[[[1]],[[2]],[[1]]]
]])
#w = w_T
model.layers[1].set_weights(w)
model.layers[1].get_weights() # momentalne 2 vrstvy
input_images = np.array([[
[[1],[1],[1],[1],[1]],
[[1],[1],[1],[1],[1]],
[[1],[1],[1],[1],[1]],
[[1],[1],[1],[1],[1]],
[[1],[1],[1],[1],[1]]
]])
output_images = model.predict(input_images)
output_images[0]
input_images = np.array([[
[[1],[1],[0],[0],[0]],
[[1],[1],[0],[0],[0]],
[[1],[1],[0],[0],[0]],
[[1],[1],[0],[0],[0]],
[[1],[1],[0],[0],[0]]
]])
output_images = model.predict(input_images)
output_images[0]
image = cv2.imread(img_name, cv2.IMREAD_GRAYSCALE)
#cv2.imshow('lena',image)
#cv2.waitKey(0)
image.shape
rows,cols = image.shape
input_images.shape
input_image = np.array(image).reshape((rows,cols,1)) #reshape, pridat tretiu suradnicu
input_images = np.array([input_image])
output_images = model.predict(input_images)
output_image = output_images[0].reshape((rows,cols))
np.amax(output_image)
np.amin(output_image)
txt_fname = img_name[:-5] + '.txt'
np.savetxt(txt_fname, output_image, fmt='%f')
output_image = abs(output_image);
output_image = cv2.normalize(output_image,None,0,255,cv2.NORM_MINMAX)
output_image = np.uint8(output_image)
cv2.imwrite('edge-horizon.jpg',output_image)
#cv2.imshow('lena',output_image)
#cv2.waitKey(0)
#cv2.destroyWindow('lena')
#quit()
|
[
"juraj.maslej@gmail.com"
] |
juraj.maslej@gmail.com
|
91e6b4bbcb02bff20d792b753c4b889d92fa0b85
|
2620b9177a3b3850582435345d333d95569c2063
|
/ticket_trackr/wsgi.py
|
5da5740aa73642d44bc3fcfc0d2cf3abc1857307
|
[] |
no_license
|
LoganLeopold/Ticket_Trackr_BackEnd
|
b5598a633cb848c8e0855c18331318cb7e9d2b0f
|
ede244a6cce4d42ae90bdc7f73a496d302d575d6
|
refs/heads/master
| 2022-12-09T09:39:25.990324
| 2021-06-03T17:17:38
| 2021-06-03T17:17:38
| 175,481,315
| 0
| 0
| null | 2022-12-08T04:52:14
| 2019-03-13T18:56:24
|
Python
|
UTF-8
|
Python
| false
| false
| 403
|
py
|
"""
WSGI config for ticket_trackr project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/2.1/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'ticket_trackr.settings')
application = get_wsgi_application()
|
[
"loganleopld@me.com"
] |
loganleopld@me.com
|
5d1edb413dcc5cc28dd3a3d0ce46e172e7f19377
|
40f40f5e649536db3aac4efb97f88efd4e6dd759
|
/src/facturplus/productes/apps.py
|
55442c59fd7686a74f6a48fe7ba5a2ac779fe49a
|
[
"MIT"
] |
permissive
|
ctrl-alt-d/learn-orm-django
|
658c618d8f59f483dc4d8e721271a6dc539dddae
|
78a847dc8e58bc69be1e4e6165a143f56ecf4608
|
refs/heads/master
| 2022-04-30T12:46:37.572547
| 2022-04-23T07:36:10
| 2022-04-23T07:36:10
| 213,195,337
| 1
| 0
|
MIT
| 2022-04-23T07:36:11
| 2019-10-06T15:41:19
|
Python
|
UTF-8
|
Python
| false
| false
| 93
|
py
|
from django.apps import AppConfig
class ProductesConfig(AppConfig):
name = 'productes'
|
[
"ctrl.alt.d@gmail.com"
] |
ctrl.alt.d@gmail.com
|
4132ca9438aecd2d20d233401f8a783b37c16556
|
7f8dd611e8a213b50161fc23fc209ad7c6eacd2f
|
/scripts/tests/testun.py
|
c27e932c1c629b96ff2e6172d269c745a65806c8
|
[
"MIT"
] |
permissive
|
hermetique/6502-basic
|
b8fe2a91be06000d3954afd847731f5857cf3f16
|
d4c360041bfa49427a506465e58bb0ef94beaa44
|
refs/heads/main
| 2023-05-05T08:51:20.007841
| 2021-05-13T20:07:04
| 2021-05-13T20:07:04
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,599
|
py
|
# *****************************************************************************
# *****************************************************************************
#
# Name: testun.py
# Author: Paul Robson (paul@robsons.org.uk)
# Date: 27th February 2021
# Purpose: Unary test classes.
#
# *****************************************************************************
# *****************************************************************************
import random,sys
from testcore import *
# *****************************************************************************
#
# Unary operator test
#
# len sgn abs min max asc chr$(
#
# *****************************************************************************
class UnaryOpTest(Test):
def getTest(self,n):
s = random.randint(0,10)
#
n = random.randint(-10000,10000)
if random.randint(0,10) == 0:
n = random.randint(-10,10)
#
if s == 0:
return "assert abs({0}) = {1}".format(n,abs(n))
#
if s == 1:
sg = -1 if n < 0 else 1
sg = 0 if n == 0 else sg
return "assert sgn({0}) = {1}".format(n,sg)
#
if s == 2:
n = random.randint(0,9)
st = "".join([chr(random.randint(97,107)) for x in range(0,n)])
return "assert len(\"{0}\") = {1}".format(st,len(st))
#
if s == 3 or s == 4:
s = [random.randint(-1000,1000) for n in range(0,random.randint(1,5))]
st = ",".join([str(n) for n in s])
return "assert {0}({1}) = {2}".format("max" if s == 3 else "min",st,max(s) if s == 3 else min(s))
#
if s == 5:
n = random.randint(35,126)
return 'assert asc("{1}") = {0}'.format(n,chr(n))
#
if s == 6:
n = random.randint(35,126)
return 'assert chr$({0}) = "{1}"'.format(n,chr(n))
#
if s == 7:
s = self.getString()
c = random.randint(0,8)
return 'assert left$("{0}",{1}) = "{2}"'.format(s.strip(),c,s[:c].strip())
#
if s == 8:
s = self.getString().strip()
c = random.randint(0,8)
s1 = s[-c:] if c < len(s) else s
s1 = s1 if c != 0 else ""
return 'assert right$("{0}",{1}) = "{2}"'.format(s.strip(),c,s1)
#
if s == 9:
s = self.getString()
c1 = random.randint(1,6)
c2 = random.randint(0,6)
return 'assert mid$("{0}",{1},{2}) = "{3}"'.format(s.strip(),c1,c2,s[c1-1:][:c2].strip())
#
if s == 10:
s = self.getString()
c1 = random.randint(1,6)
return 'assert mid$("{0}",{1}) = "{2}"'.format(s.strip(),c1,s[c1-1:].strip())
#
assert False,str(s)
def getString(self):
return "".join([chr(random.randint(97,117)) for x in range(0,random.randint(1,6))])+(" "*128)
if __name__ == "__main__":
t = UnaryOpTest(50)
|
[
"paul@robsons.org.uk"
] |
paul@robsons.org.uk
|
4cc1233b02198b48234e5d8f58af689954841b47
|
c0ff9aea829261262cda457245742c28f38d6604
|
/stru_cls_gt_personal_kfold.py
|
c1c896a0829f69e428d40918ac3c82f6e3c1eaa9
|
[] |
no_license
|
zsb87/foodwatch
|
bb337e68bd5523da78bb8c82c06585f4b1f15a14
|
89abb183da21249ce99c3cc9525e87ad47d5d050
|
refs/heads/master
| 2020-06-27T08:50:09.578739
| 2016-12-05T17:40:11
| 2016-12-05T17:40:11
| 74,527,421
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 6,795
|
py
|
import os
import re
import csv
import matplotlib
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
import datetime
import random
from sklearn import preprocessing
from sklearn import svm, neighbors, metrics, cross_validation, preprocessing
from sklearn.externals import joblib
from sklearn.tree import DecisionTreeClassifier
from sklearn.ensemble import RandomForestClassifier, ExtraTreesClassifier, AdaBoostClassifier
from sklearn.naive_bayes import GaussianNB
from sklearn.neighbors import KNeighborsClassifier
from sklearn.linear_model import LogisticRegression
from sklearn.metrics import auc, silhouette_score
from sklearn.cluster import KMeans, DBSCAN
from scipy import *
from scipy.stats import *
from scipy.signal import *
from collections import Counter
from sklearn.metrics import *
from sklearn.metrics import precision_recall_fscore_support as score
from stru_utils import *
from stru_settings import *
from scipy.stats import *
# this subject list is following the order in the subject ID map.
subjs = ['Rawan','Shibo','Dzung','Will', 'Gleb', 'JC','Matt','Jiapeng', 'Cao', 'Eric']#
Nfolds = 10
Ntrials = 5
# avgRes = pd.DataFrame(columns = ['subject','mean fscore','var fscore'], index = range(len(subj_list)))
clf = RandomForestClassifier(n_estimators = 100)
allResultFolder = "./subject/overall/result/personalized/10fCV/"
if not os.path.exists(allResultFolder):
os.makedirs(allResultFolder)
columns = ['Fold' + str(i + 1) + meas for i in range(10) for meas in ['Prec(pos)','F1(pos)','TPR','FPR','Specificity','MCC','CKappa','w-acc']]
columns = columns +['aveFoldPrec(pos)','aveFoldF1(pos)','aveFoldTPR', 'aveFoldFPR','aveFoldSpecificity','aveFoldMCC','aveFoldCKappa','aveFoldw-acc']
for active_participant_counter, subj in enumerate(subjs):
crossValRes = pd.DataFrame(columns = columns, index = range(Ntrials+1))
if (not (active_participant_counter == 3)) and (not (active_participant_counter == 4)) and (not (active_participant_counter == 6)) :
print(subj)
subjfolder = subj + '(8Hz)/'
folder = '../inlabStr/subject/'
featFolder = folder+subjfolder+"feature/"
datafile = folder+subjfolder+"testdata.csv"
segfolder = folder+subjfolder+"segmentation/"
clsfolder = folder+subjfolder+"classification/"
act_rootfolder = segfolder+'activity/'
allfeatFolder = featFolder+"all_features/"
detAllfeatFolder = allfeatFolder+"detection/"
if not os.path.exists(clsfolder):
os.makedirs(clsfolder)
if not os.path.exists(allfeatFolder):
os.makedirs(allfeatFolder)
gtFeatPath = featFolder + "gt_features.csv"
outfile = clsfolder + "cm_gt_cls.csv"
df = pd.read_csv(gtFeatPath)
print(len(df))
df = df.dropna()
print(len(df))
#
# notice: duration should not be included in features
# as in detection period this distinguishable feature will be in different distribution
#
X = df.iloc[:,:-2].as_matrix()
equiv = {0:0, 1:0, 2:0, 3:0, 4:0, 5:0, 6:0,7:0, 8:0, 9:1, 10:1, 11:1, 12:1, 13:1, 14:1, 15:1, 16:1, 17:1}
df["f-nf"] = df["activity"].map(equiv)
Y = df['f-nf'].as_matrix()
for i in range(Ntrials+1):
print(i)
if i == Ntrials:
for j in range(Nfolds):
crossValRes['Fold' + str(j + 1)+'Prec(pos)'][i] = crossValRes['Fold' + str(j + 1)+'Prec(pos)'].mean()
crossValRes['Fold' + str(j + 1)+'F1(pos)'][i] = crossValRes['Fold' + str(j + 1)+'F1(pos)'].mean()
crossValRes['Fold' + str(j + 1)+'TPR'][i] = crossValRes['Fold' + str(j + 1)+'TPR'].mean()
crossValRes['Fold' + str(j + 1)+'FPR'][i] = crossValRes['Fold' + str(j + 1)+'FPR'].mean()
crossValRes['Fold' + str(j + 1)+'Specificity'][i] = crossValRes['Fold' + str(j + 1)+'Specificity'].mean()
crossValRes['Fold' + str(j + 1)+'MCC'][i] = crossValRes['Fold' + str(j + 1)+'MCC'].mean()
crossValRes['Fold' + str(j + 1)+'CKappa'][i] = crossValRes['Fold' + str(j + 1)+'CKappa'].mean()
crossValRes['Fold' + str(j + 1)+'w-acc'][i] = crossValRes['Fold' + str(j + 1)+'w-acc'].mean()
break
for j in range(Nfolds):
X_train, X_test = k_fold_split(X, Nfolds, j)
y_train, y_test = k_fold_split(Y, Nfolds, j)
# cm_file = allResultFolder + "cm_trial"+str(i)+"_fold"+str(j)+".csv"
prec_pos, f1_pos, TPR, FPR, Specificity, MCC, CKappa, w_acc,_ = clf_cm(X_train, X_test, y_train, y_test)
crossValRes['Fold' + str(j + 1)+'Prec(pos)'][i] = prec_pos
crossValRes['Fold' + str(j + 1)+'F1(pos)'][i] = f1_pos
crossValRes['Fold' + str(j + 1)+'TPR'][i] = TPR
crossValRes['Fold' + str(j + 1)+'FPR'][i] = FPR
crossValRes['Fold' + str(j + 1)+'Specificity'][i] = Specificity
crossValRes['Fold' + str(j + 1)+'MCC'][i] = MCC
crossValRes['Fold' + str(j + 1)+'CKappa'][i] = CKappa
crossValRes['Fold' + str(j + 1)+'w-acc'][i] = w_acc
crossValHit = crossValRes[['Fold' + str(i + 1) + 'Prec(pos)' for i in range(10)]]
crossValRes['aveFoldPrec(pos)'] = crossValHit.mean(axis = 1)
crossValHit = crossValRes[['Fold' + str(i + 1) + 'F1(pos)' for i in range(10)]]
crossValRes['aveFoldF1(pos)'] = crossValHit.mean(axis = 1)
crossValTPR = crossValRes[['Fold' + str(i + 1) + 'TPR' for i in range(10)]]
crossValRes['aveFoldTPR'] = crossValTPR.mean(axis = 1)
crossValFPR = crossValRes[['Fold' + str(i + 1) + 'FPR' for i in range(10)]]
crossValRes['aveFoldFPR'] = crossValFPR.mean(axis = 1)
crossValSpe = crossValRes[['Fold' + str(i + 1) + 'Specificity' for i in range(10)]]
crossValRes['aveFoldSpecificity'] = crossValSpe.mean(axis = 1)
crossValMCC = crossValRes[['Fold' + str(i + 1) + 'MCC' for i in range(10)]]
crossValRes['aveFoldMCC'] = crossValMCC.mean(axis = 1)
crossValCKappa = crossValRes[['Fold' + str(i + 1) + 'CKappa' for i in range(10)]]
crossValRes['aveFoldCKappa'] = crossValCKappa.mean(axis = 1)
crossValCKappa = crossValRes[['Fold' + str(i + 1) + 'w-acc' for i in range(10)]]
crossValRes['aveFoldw-acc'] = crossValCKappa.mean(axis = 1)
crossValRes.to_csv( allResultFolder+"10fCV_subj"+str(active_participant_counter)+".csv", index = None)
|
[
"noreply@github.com"
] |
zsb87.noreply@github.com
|
9e22550e6f18a4d4a351e5ad2167d5cbf66811ca
|
dd7f446eb5d64ce069a9f9b6ade3f69fc2412960
|
/text_predictor.py
|
4db5526b52e61c0e4f10c0749b28b84bad89138e
|
[] |
no_license
|
fcgll520/Watermark-Text-Detection
|
3563c331502ff24ac7e1c8cd2807faebb4051df8
|
1eb807d8b56509de1b38ae9fa53f90e2ab0b14a2
|
refs/heads/master
| 2020-04-25T02:03:38.385070
| 2018-05-28T10:22:36
| 2018-05-28T10:22:36
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,224
|
py
|
import cv2
import numpy as np
import pytesseract
import os
from PIL import Image
# Path of working folder on Disk
#src_path = "E:/Lab/Python/Project/OCR/"
src_path = "TestImages"
def get_string(img_path):
# Read image with opencv
img = cv2.imread(img_path)
# Convert to gray
img = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
# Apply dilation and erosion to remove some noise
kernel = np.ones((1, 1), np.uint8)
img = cv2.dilate(img, kernel, iterations=1)
img = cv2.erode(img, kernel, iterations=1)
# Write image after removed noise
cv2.imwrite("removed_noise.png", img)
# Apply threshold to get image with only black and white
#img = cv2.adaptiveThreshold(img, 255, cv2.ADAPTIVE_THRESH_GAUSSIAN_C, cv2.THRESH_BINARY, 31, 2)
# Write the image after apply opencv to do some ...
cv2.imwrite("thres.png", img)
# Recognize text with tesseract for python
result = pytesseract.image_to_string(Image.open("thres.png"))
# Remove template file
#os.remove(temp)
return result
print('--- Start recognize text from image ---')
print(get_string("TestImages/test2.jpeg"))
os.remove("removed_noise.png")
os.remove("thres.png")
print("------ Done -------")
|
[
"punit.kumar1661@gmail.com"
] |
punit.kumar1661@gmail.com
|
d23304d68276068928c0a8131eb48057ad7f20b5
|
814730d67d55751aca1b088e78a3be6491407fc7
|
/Python_Basics/setExample.py
|
dcf3cd993b666340520075d82681250f28b67b48
|
[] |
no_license
|
ghanshyam30/Code_snippets
|
e4410a8bb9758419a7786f28d8f6c2b6c39b88f0
|
1d75eb52c9c9f945e96c371a44df47cc498da72f
|
refs/heads/master
| 2021-12-14T14:38:16.656833
| 2021-12-12T10:11:39
| 2021-12-12T10:11:39
| 162,509,493
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 616
|
py
|
'''
Problem statement - You have 2 strings.
You need to find- 1] String1 characters that are not in String2 2] vice versa
'''
string1 = "Geeks For Geeks"
string2 = "Geek"
set_1 = set(string1)
set_2 = set(string2)
print("Set1: ",set_1)
print("Set2: ",set_2)
print("======Set difference====")
# characters present in set1 but not in Set2
print("set1-set2: ","".join(set_1 - set_2))
# characters present in set2 but not in set2
print("set2-set1: ","".join(set_2 - set_1))
# OUTPUT
'''
Set1: {'o', 's', 'r', 'G', 'e', 'k', ' ', 'F'}
Set2: {'k', 'G', 'e'}
======Set difference====
set1-set2: osr F
set2-set1:
'''
|
[
"gbambale@avaya.com"
] |
gbambale@avaya.com
|
3a687fa00b9c704a4b36677a42ff1ffee5b42dbf
|
61d943fddf915e1483a3c2c5f89eb03464556386
|
/games/atomix/bat_belt.py
|
7716b29e21e3d52741f8f9a6cd3947cf53a6a4bb
|
[
"WTFPL"
] |
permissive
|
darkrecher/PuzzleSalad
|
5fe5a3e0d1620ca78a4e2ada47b94c56da937c0e
|
0c778b31db8b070bfb15c80501a976546a9496dc
|
refs/heads/master
| 2021-01-12T11:15:20.124896
| 2020-05-14T16:00:53
| 2020-05-14T16:00:53
| 72,884,374
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,487
|
py
|
# coding: utf-8
"""
Bat-belt. Tas de petites fonctions utiles partout.
"""
def enum(enumName, *listValueNames):
"""
Ma super fonction pour créer des type enums (comme en C++).
:Example:
CARROT_STATE = enum(
"CARROT_STATE", # il faut répéter le nom du type enum.
"GRAIN", # nom de l'état 1
"GROWING", # nom de l'état 2
"OK", # etc...
"ROTTEN",
)
cst = CARROT_STATE
current_state = cst.GROWING
Pour plus de détail, voir mon article :
http://sametmax.com/faire-des-enums-en-python/
"""
# Une suite d'entiers, on en crée autant
# qu'il y a de valeurs dans l'enum.
listValueNumbers = range(len(listValueNames))
# création du dictionaire des attributs.
# Remplissage initial avec les correspondances : valeur d'enum -> entier
dictAttrib = dict( zip(listValueNames, listValueNumbers) )
# création du dictionnaire inverse. entier -> valeur d'enum
dictReverse = dict( zip(listValueNumbers, listValueNames) )
# ajout du dictionnaire inverse dans les attributs
dictAttrib["dictReverse"] = dictReverse
# création et renvoyage du type
# Attention, la fonction type accepte en premier paramètre une
# chaîne ASCII, et non pas une chaîne unicode. D'où le "str".
mainType = type(str(enumName), (), dictAttrib)
return mainType
""" orientations """
ORI = enum(
"ORI",
"RIGHT", "DOWN", "LEFT", "UP")
|
[
"recher_burp@yahoo.fr"
] |
recher_burp@yahoo.fr
|
67c797314b2ef6faf6bf816ba17bf37ff46adbcc
|
a2dd24c16fffdb86dc4b140e901a94eae74fab27
|
/pygcam/xmlEditor.py
|
491a7f58e395385c5f5832bc97745dde2a461c66
|
[
"MIT"
] |
permissive
|
danjtanner-EPA/pygcam
|
30eef701424f5ac0c225b6ac178efa2f223c31c4
|
41eeb452137a12711ed0d60784408610589e69a5
|
refs/heads/master
| 2020-04-25T04:44:50.975279
| 2019-02-22T22:01:52
| 2019-02-22T22:01:52
| 172,107,343
| 0
| 0
| null | 2019-02-22T17:31:50
| 2019-02-22T17:31:50
| null |
UTF-8
|
Python
| false
| false
| 71,173
|
py
|
'''
.. Copyright (c) 2016 Richard Plevin
See the https://opensource.org/licenses/MIT for license details.
'''
#
# Facilities setting up / customizing GCAM project's XML files.
#
# Common variables and functions for manipulating XML files.
# Basic approach is to create a directory for each defined scenario,
# in which modified files and a corresponding configuration XML file
# are stored.
#
# To allow functions to be called in any order or combination, each
# copies (if needed) the source file to the local scenario dir, then
# edits it in place. If was previously modified by another function,
# the copy is skipped, and the new edits are applied to the local,
# already modified file. Each function updates the local config file
# to refer to the modified file. (This may be done multiple times, to
# no ill effect.)
#
import glob
import os
import re
import shutil
import six
from lxml import etree as ET
from semver import VersionInfo
from .config import getParam, getParamAsBoolean, parse_version_info, unixPath, pathjoin
from .constants import LOCAL_XML_NAME, DYN_XML_NAME, GCAM_32_REGIONS
from .error import SetupException, PygcamException
from .log import getLogger
from .policy import (policyMarketXml, policyConstraintsXml, DEFAULT_MARKET_TYPE,
DEFAULT_POLICY_ELT, DEFAULT_POLICY_TYPE)
from .utils import (coercible, mkdirs, printSeries, symlinkOrCopyFile, removeTreeSafely)
# Names of key scenario components in reference GCAM 4.3 configuration.xml file
ENERGY_TRANSFORMATION_TAG = "energy_transformation"
SOLVER_TAG = "solver"
AttributePattern = re.compile('(.*)/@([-\w]*)$')
XmlDirPattern = re.compile('/[^/]*-xml/')
_logger = getLogger(__name__)
# methods callable from <function name="x">args</function> in
# XML scenario setup scripts.
CallableMethods = {}
# decorator it identify callable methods
def callableMethod(func):
CallableMethods[func.__name__] = func
return func
def getCallableMethod(name):
return CallableMethods.get(name)
def makeDirPath(elements, require=False, create=False, mode=0o775):
"""
Join the tuple of elements to create a path to a directory,
optionally checking that it exists or creating intermediate
directories as needed.
:param elements: a tuple of pathname elements to join
:param require: if True, raise an error if the path doesn't exist
:param create: if True, create the path if it doesn't exist
:param mode: file mode used when making directories
:return: the joined path
:raises: pygcam.error.SetupException
"""
path = pathjoin(*elements)
if (create or require) and not os.path.lexists(path):
if create:
os.makedirs(path, mode)
elif require:
raise SetupException("Required path '%s' does not exist." % path)
return path
#
# Copy src file to dst only if it doesn't already exist.
#
def copyIfMissing(src, dst, makedirs=False):
"""
Copy file `src` to `dst`, but only if `dst` doesn't already exist.
:param src: (str) pathname of the file to copy
:param dst: (str) pathname of the copy to create
:param makedirs: if True, make any missing directories
:return: none
"""
if not os.path.lexists(dst):
parentDir = os.path.dirname(dst)
if makedirs and not os.path.isdir(parentDir):
_logger.debug("mkdir %s" % parentDir)
os.makedirs(parentDir, 0o755)
_logger.info("Copy %s\n to %s" % (src, dst))
shutil.copy(src, dst)
os.chmod(dst, 0o644)
class CachedFile(object):
parser = ET.XMLParser(remove_blank_text=True)
# Store parsed XML trees here and use with xmlSel/xmlEdit if useCache is True
cache = {}
def __init__(self, filename):
self.filename = filename
self.edited = False
_logger.debug("Reading '%s'", filename)
self.tree = ET.parse(filename, self.parser)
self.cache[filename] = self
@classmethod
def getFile(cls, filename):
if filename in cls.cache:
#_logger.debug("Found '%s' in cache", filename)
item = cls.cache[filename]
else:
item = CachedFile(filename)
return item
def setEdited(self):
self.edited = True
def write(self):
_logger.info("Writing '%s'", self.filename)
self.tree.write(self.filename, xml_declaration=True, encoding='utf-8', pretty_print=True)
self.edited = False
def decache(self):
if self.edited:
self.write()
@classmethod
def decacheAll(cls):
for item in cls.cache.values():
item.decache()
def xmlSel(filename, xpath, asText=False):
"""
Return True if the XML component identified by the xpath argument
exists in `filename`. Useful for deciding whether to edit or
insert an XML element.
:param filename: (str) the file to edit
:param xpath: (str) the xml element(s) to search for
:param asText: (str) if True, return the text of the node, if found, else None
:return: (bool) True if found, False otherwise. (see asText)
"""
item = CachedFile.getFile(filename)
result = item.tree.find(xpath)
if asText:
return result.text if result is not None else None
return bool(result)
#
# xmlEdit can set a value, multiply a value in the XML by a constant,
# or add a constant to the value in the XML. These funcs handle each
# operation, allowing the logic to be outside the loop, which might
# iterate over thousands of elements.
#
def _set(elt, value):
elt.text = str(value)
def _multiply(elt, value):
elt.text = str(float(elt.text) * value)
def _add(elt, value):
elt.text = str(float(elt.text) + value)
_editFunc = {'set' : _set,
'multiply' : _multiply,
'add' : _add}
def xmlEdit(filename, pairs, op='set', useCache=True):
"""
Edit the XML file `filename` in place, applying the values to the given xpaths
in the list of pairs.
:param filename: the file to edit in-place.
:param pairs: (iterable of (xpath, value) pairs) In each pair, the xpath selects
elements or attributes to update with the given values.
:param op: (str) Operation to perform. Must be in ('set', 'multiply', 'add').
Note that 'multiply' and 'add' are *not* available for xpaths selecting
attributes rather than node values. For 'multiply' and 'add', the value
should be passed as a float. For 'set', it can be a float or a string.
:param useCache: (bool) if True, the etree is sought first in the XmlCache. This
avoids repeated parsing, but the file is always written (eventually) if updated
by this function.
:return: True on success, else False
"""
legalOps = _editFunc.keys()
if op not in legalOps:
raise PygcamException('xmlEdit: unknown operation "{}". Must be one of {}'.format(op, legalOps))
modFunc = _editFunc[op]
item = CachedFile.getFile(filename)
tree = item.tree
updated = False
# if at least one xpath is found, update and write file
for xpath, value in pairs:
attr = None
# If it's an attribute update, extract the attribute
# and use the rest of the xpath to select the elements.
match = re.match(AttributePattern, xpath)
if match:
attr = match.group(2)
xpath = match.group(1)
elts = tree.xpath(xpath)
if len(elts):
updated = True
if attr: # conditional outside loop since there may be many elements
value = str(value)
for elt in elts:
elt.set(attr, value)
else:
for elt in elts:
modFunc(elt, value)
if updated:
if useCache:
item.setEdited()
else:
item.write()
return updated
def extractStubTechnology(region, srcFile, dstFile, sector, subsector, technology,
sectorElement='supplysector', fromRegion=False):
"""
Extract a definition from the global-technology-database based on `sector`, `subsector`,
and `technology`, defined in `srcFile` and create a new file, `dstFile` with the extracted
bit as a stub-technology definition for the given region. If `fromRegion` is True,
extract the stub-technology from the regional definition, rather than from the
global-technology-database.
:param region: (str) the name of the GCAM region for which to copy the technology
:param srcFile: (str) the pathname of a source XML file with a global-technology-database
:param dstFile: (str) the pathname of the file to create
:param sector: (str) the name of a GCAM sector
:param subsector: (str) the name of a GCAM subsector within `sector`
:param technology: (str) the name of a GCAM technology within `sector` and `subsector`
:param sectorElement: (str) the name of the XML element to create (or search for, if `fromRegion`
is True) between the ``<region>`` and ``<subsector>`` XML elements. Defaults to 'supplysector'.
:param fromRegion: (bool) if True, the definition is extracted from a regional definition
rather than from the global-technology-database.
:return: True on success, else False
"""
_logger.info("Extract stub-technology for %s (%s) to %s" % (technology, region if fromRegion else 'global', dstFile))
if fromRegion:
xpath = "//region[@name='%s']/%s[@name='%s']/subsector[@name='%s']/stub-technology[@name='%s']" % \
(region, sectorElement, sector, subsector, technology)
else:
xpath = "//global-technology-database/location-info[@sector-name='%s' and @subsector-name='%s']/technology[@name='%s']" % \
(sector, subsector, technology)
# Read the srcFile to extract the required elements
parser = ET.XMLParser(remove_blank_text=True)
tree = ET.parse(srcFile, parser)
# Rename technology => stub-technology (for global-tech-db case)
elts = tree.xpath(xpath)
if len(elts) != 1:
raise PygcamException('Xpath "%s" failed' % xpath)
technologyElt = elts[0]
technologyElt.tag = 'stub-technology' # no-op if fromRegion == True
# Surround the extracted XML with the necessary hierarchy
scenarioElt = ET.Element('scenario')
worldElt = ET.SubElement(scenarioElt, 'world')
regionElt = ET.SubElement(worldElt, 'region', attrib={'name' : region})
sectorElt = ET.SubElement(regionElt, sectorElement, attrib={'name' : sector})
subsectorElt = ET.SubElement(sectorElt, 'subsector', attrib={'name' : subsector})
subsectorElt.append(technologyElt)
# Workaround for parsing error: explicitly name shutdown deciders
elts = scenarioElt.xpath("//phased-shutdown-decider|profit-shutdown-decider")
for elt in elts:
parent = elt.getparent()
parent.remove(elt)
_logger.info("Writing '%s'", dstFile)
newTree = ET.ElementTree(scenarioElt)
newTree.write(dstFile, xml_declaration=True, pretty_print=True)
return True
def expandYearRanges(seq):
"""
Expand a sequence of (year, value) tuples, or a dict keyed by
year, where the year argument may be a string containing identifying
range of values with an optional "step" value indicated after a ":".
The default step is 5 years. For example, "2015-2030" expands to
(2015, 2020, 2025, 2030), and "2015-2020:1" expands to
(2015, 2016, 2017, 2018, 2019, 2020). When a range is given, the
tuple is replaced with a sequence of tuples naming each year explicitly.
Typical usage is ``for year, price in expandYearRanges(values): ...``.
:param seq_or_dict:
The sequence of (year, value) tuples, or any object with an
items() method that returns (year, value) pairs.
:return:
A list of tuples with the expanded sequence.
"""
result = []
try:
seq = list(seq.items()) # convert dict or Series to list of pairs
except: # or quietly fail, and just use 'seq' as is
pass
for year, value in seq:
value = float(value)
if isinstance(year, six.string_types) and '-' in year:
m = re.search('^(\d{4})-(\d{4})(:(\d+))?$', year)
if not m:
raise SetupException('Unrecognized year range specification: %s' % year)
startYear = int(m.group(1))
endYear = int(m.group(2))
stepStr = m.group(4)
step = int(stepStr) if stepStr else 5
expanded = [[y, value] for y in range(startYear, endYear+step, step)]
result.extend(expanded)
else:
result.append((int(year), value))
return result
# TBD: maybe xmlSetup should be the only approach rather than supporting original setup subclasses.
# TBD: this way we can assume scenario definition exists in xml format and create an API to get the
# TBD: information about any scenario definition from xmlSetup.py.
#
# TBD: The question is whether command-line override capability is required, or if all should be in XML.
# TBD: Need to think through alternative use cases.
#
# TBD: should be no need to pass baseline since this can be inferred from scenario and scenarioGroup.
# TBD: also can tell if it's a baseline; if not, find and cache ref to baseline
class ScenarioInfo(object):
def __init__(self, scenarioGroup, scenarioName, scenarioSubdir,
xmlSourceDir, xmlGroupSubdir, sandboxRoot, sandboxGroupSubdir):
self.scenarioGroup = scenarioGroup
self.scenarioName = scenarioName
self.scenarioSubdir = scenarioSubdir or scenarioName
self.xmlSourceDir = xmlSourceDir
self.xmlGroupSubdir = xmlGroupSubdir or scenarioGroup
self.sandboxRoot = sandboxRoot
self.sandboxGroupSubdir = sandboxGroupSubdir or scenarioGroup
self.isBaseline = False # TBD
if not self.isBaseline:
self.baselineName = 'something'
self.baselineInfo = self.fromXmlSetup(scenarioGroup, self.baselineName)
# TBD: after setting self.x for all x:
self.configPath = pathjoin(self.scenarioDir(), 'config.xml', realPath=True)
@classmethod
def fromXmlSetup(cls, scenarioGroup, scenarioName):
# TBD: lookup the group and scenario, grab all data and
# TBD: return ScenarioInfo(...)
pass
def absPath(self, x):
pass
def relPath(self, y):
pass
def scenarioXmlSourceDir(self, xmlSubdir=True):
xmlDir = 'xml' if xmlSubdir else ''
return pathjoin(self.xmlSourceDir, self.xmlGroupSubdir, self.scenarioSubdir, xmlDir)
def scenarioXmlOutputDir(self):
return pathjoin(self.xmlOutputDir, self.scenarioGroup, self.scenarioName)
def scenarioXmlSourceFiles(self):
# These two versions handle legacy case with extra 'xml' subdir and new approach, without
files = glob.glob(self.scenarioXmlSourceDir(xmlSubdir=False) + '/*.xml')
files += glob.glob(self.scenarioXmlSourceDir(xmlSubdir=True) + '/*.xml')
return files
def cfgPath(self):
"""
Compute the name of the GCAM config file for the current scenario.
:return: (str) the pathname to the XML configuration file.
"""
if not self.configPath:
# compute the first time, then cache it
self.configPath = unixPath(os.path.realpath(pathjoin(self.scenario_dir_abs, 'config.xml')))
return self.configPath
class XMLEditor(object):
'''
Base class for scenario setup. Custom scenario processing classes must
subclass this. Represents the information required to setup a scenario, i.e.,
to generate and/or copy the required XML files into the XML output dir.
'''
# TBD: consider whether init should take an object describing the scenario
# TBD: that can be populated from a scenario instance from xmlSetup.py or something
# TBD: specific to the task. All these args are a pain, and there's no method API
# TBD: to perform common ops.
# TBD:
def __init__(self, baseline, scenario, xmlOutputRoot, xmlSourceDir, refWorkspace,
groupDir, srcGroupDir, subdir, parent=None):
self.name = name = scenario or baseline # if no scenario stated, assume baseline
self.baseline = baseline
self.scenario = scenario
self.xmlOutputRoot = xmlOutputRoot
self.refWorkspace = refWorkspace
self.xmlSourceDir = xmlSourceDir
self.sandboxExeDir = pathjoin(getParam('GCAM.SandboxRefWorkspace'), 'exe')
self.parent = parent
self.mcsMode = None
self.mcsValues = None
self.setupArgs = None
# TBD: this would be just ../local-xml "project/scenario" occurs once, above
# Allow scenario name to have arbitrary subdirs between "../local-xml" and
# the scenario name, e.g., "../local-xml/project/scenario"
self.subdir = subdir or ''
self.groupDir = groupDir
self.srcGroupDir = srcGroupDir or groupDir
self.configPath = None
# TBD: xmlOutputRoot is now just scenario dir, so this parameter can disappear
self.local_xml_abs = makeDirPath((xmlOutputRoot, LOCAL_XML_NAME), create=True)
self.dyn_xml_abs = makeDirPath((xmlOutputRoot, DYN_XML_NAME), create=True) # TBD eliminate
self.local_xml_rel = pathjoin("..", LOCAL_XML_NAME)
self.dyn_xml_rel = pathjoin("..", DYN_XML_NAME) # TBD eliminate
self.trial_xml_rel = self.trial_xml_abs = None # used by MCS only
# TBD: order changes using ScenarioInfo API
self.scenario_dir_abs = makeDirPath((self.local_xml_abs, groupDir, name), create=True)
self.scenario_dir_rel = pathjoin(self.local_xml_rel, groupDir, name)
# Get baseline from ScenarioGroup and use ScenarioInfo API to get this type of info
self.baseline_dir_rel = pathjoin(self.local_xml_rel, groupDir, self.parent.name) if self.parent else None
# TBD eliminate
self.scenario_dyn_dir_abs = makeDirPath((self.dyn_xml_abs, groupDir, name), create=True)
self.scenario_dyn_dir_rel = pathjoin(self.dyn_xml_rel, groupDir, name)
# Store commonly-used paths
gcam_xml = pathjoin('input', getParam('GCAM.DataDir'), 'xml')
self.gcam_prefix_abs = prefix_abs = pathjoin(refWorkspace, gcam_xml)
self.gcam_prefix_rel = prefix_rel = pathjoin('../', gcam_xml)
version = parse_version_info()
if version > VersionInfo(5, 1, 0):
# subdirs have been removed in v5.1
self.aglu_dir_abs = ''
self.emissions_dir_abs = ''
self.energy_dir_abs = ''
self.modeltime_dir_abs = ''
self.socioeconomics_dir_abs = ''
self.aglu_dir_rel = ''
self.emissions_dir_rel = ''
self.energy_dir_rel = ''
self.modeltime_dir_rel = ''
self.socioeconomics_dir_rel = ''
else:
# TBD: maybe no need to store these since computable from rel paths
self.aglu_dir_abs = pathjoin(prefix_abs, 'aglu-xml')
self.emissions_dir_abs = pathjoin(prefix_abs, 'emissions-xml')
self.energy_dir_abs = pathjoin(prefix_abs, 'energy-xml')
self.modeltime_dir_abs = pathjoin(prefix_abs, 'modeltime-xml')
self.socioeconomics_dir_abs = pathjoin(prefix_abs, 'socioeconomics-xml')
self.aglu_dir_rel = pathjoin(prefix_rel, 'aglu-xml')
self.emissions_dir_rel = pathjoin(prefix_rel, 'emissions-xml')
self.energy_dir_rel = pathjoin(prefix_rel, 'energy-xml')
self.modeltime_dir_rel = pathjoin(prefix_rel, 'modeltime-xml')
self.socioeconomics_dir_rel = pathjoin(prefix_rel, 'socioeconomics-xml')
# TBD: add climate and policy subdirs?
self.solution_prefix_abs = pathjoin(refWorkspace, "input", "solution")
self.solution_prefix_rel = pathjoin("..", "input", "solution")
def absPath(self, relPath):
"""
Convert `relPath` to an absolute path by treating it as relative to
the current scenario's "exe" directory.
:param relPath: (str) a path relative to the current "exe" directory
:return: (str) the absolute path corresponding to `relPath`.
"""
return pathjoin(self.xmlOutputRoot, 'exe', relPath, normpath=True)
@staticmethod
def recreateDir(path):
removeTreeSafely(path)
mkdirs(path)
def setupDynamic(self, args):
"""
Create dynamic XML files in dyn-xml. These files are generated for policy
scenarios when XML file contents must be computed from baseline results.
:param args: (argparse.Namespace) arguments passed from the top-level call
to setup sub-command
:return: none
"""
_logger.info("Generating dyn-xml for scenario %s" % self.name)
# Delete old generated scenario files
dynDir = self.scenario_dyn_dir_abs
self.recreateDir(dynDir)
scenDir = self.scenario_dir_abs
xmlFiles = glob.glob("%s/*.xml" % scenDir)
# TBD: no need to link or copy if all in one place. [But dyn are per-trial; local are not]
if xmlFiles:
mode = 'Copy' if getParamAsBoolean('GCAM.CopyAllFiles') else 'Link'
_logger.info("%s %d static XML files in %s to %s", mode, len(xmlFiles), scenDir, dynDir)
for xml in xmlFiles:
base = os.path.basename(xml)
dst = pathjoin(dynDir, base)
src = pathjoin(scenDir, base)
symlinkOrCopyFile(src, dst)
else:
_logger.info("No XML files to link in %s", unixPath(scenDir, abspath=True))
CachedFile.decacheAll()
def setupStatic(self, args):
"""
Create static XML files in local-xml. By "static", we mean files whose contents are
independent of baseline results. In comparison, policy scenarios may generate dynamic
XML files whose contents are computed from baseline results.
:param args: (argparse.Namespace) arguments passed from the top-level call to setup
sub-command.
:return: none
"""
_logger.info("Generating local-xml for scenario %s" % self.name)
scenDir = self.scenario_dir_abs
mkdirs(scenDir)
# TBD: there's nothing else now in these dirs, so "xml" subdir is not really needed
topDir = pathjoin(self.xmlSourceDir, self.srcGroupDir, self.subdir or self.name)
subDir = pathjoin(topDir, 'xml') # legacy only
xmlFiles = glob.glob("{}/*.xml".format(topDir)) + glob.glob("{}/*.xml".format(subDir))
if xmlFiles:
_logger.info("Copy {} static XML files from {} to {}".format(len(xmlFiles), topDir, scenDir))
for src in xmlFiles:
shutil.copy2(src, scenDir) # copy2 preserves metadata, e.g., timestamp
else:
_logger.info("No XML files to copy in %s", unixPath(topDir, abspath=True))
configPath = self.cfgPath()
parent = self.parent
parentConfigPath = parent.cfgPath() if parent else getParam('GCAM.RefConfigFile')
_logger.info("Copy %s\n to %s" % (parentConfigPath, configPath))
shutil.copy(parentConfigPath, configPath)
os.chmod(configPath, 0o664)
# set the scenario name
self.updateConfigComponent('Strings', 'scenarioName', self.name)
# This is inherited from baseline by policy scenarios; no need to redo this
if not self.parent:
self.makeScenarioComponentsUnique()
# For the following configuration file settings, no action is taken when value is None
if args.stopPeriod is not None:
self.setStopPeriod(args.stopPeriod)
# For the following boolean arguments, we first check if there is any value. If
# not, no change is made. If a value is given, the parameter is set accordingly.
if getParam('GCAM.WritePrices'):
self.updateConfigComponent('Bools', 'PrintPrices', int(getParamAsBoolean('GCAM.WritePrices')))
if getParam('GCAM.WriteDebugFile'):
self.updateConfigComponent('Files', 'xmlDebugFileName', value=None,
writeOutput=getParamAsBoolean('GCAM.WriteDebugFile'))
if getParam('GCAM.WriteXmlOutputFile'):
self.updateConfigComponent('Files', 'xmlOutputFileName', value=None,
writeOutput=getParamAsBoolean('GCAM.WriteXmlOutputFile'))
version = parse_version_info()
if version < VersionInfo(5, 1, 2):
# this option was removed in gcam-v5.1.2
if getParam('GCAM.WriteOutputCsv'):
self.updateConfigComponent('Files', 'outFileName', value=None,
writeOutput=getParamAsBoolean('GCAM.WriteOutputCsv'))
if version >= VersionInfo(5, 1, 2):
if getParam('GCAM.WriteRestartFiles'):
self.updateConfigComponent('Files', 'restart', value=None,
writeOutput=getParamAsBoolean('GCAM.WriteRestartFiles'))
CachedFile.decacheAll()
def setup(self, args):
"""
Calls setupStatic and/or setupDynamic, depending on flags set in args.
:param args: (argparse.Namespace) arguments passed from the top-level call
to setup
:return: none
"""
_logger.debug('Called XMLEditor.setup(%s)', args)
self.setupArgs = args # some subclasses/functions might want access to these
if not args.dynamicOnly:
self.setupStatic(args)
if not args.staticOnly:
self.setupDynamic(args)
CachedFile.decacheAll()
def makeScenarioComponentsUnique(self):
"""
Give all reference ScenarioComponents a unique "name" tag to facilitate
manipulation via XPath queries. This is a no-op in GCAM version >= 4.3.
:return: none
"""
version = parse_version_info()
# no longer necessary in 4.3. For 4.2, we reset names to those used in 4.3
if version < VersionInfo(4, 3, 0):
self.renameScenarioComponent("interest_rate", pathjoin(self.socioeconomics_dir_rel, "interest_rate.xml"))
self.renameScenarioComponent("socioeconomics", pathjoin(self.socioeconomics_dir_rel, "socioeconomics_GCAM3.xml"))
self.renameScenarioComponent("industry", pathjoin(self.energy_dir_rel, "industry.xml"))
self.renameScenarioComponent("industry_income_elas", pathjoin(self.energy_dir_rel, "industry_incelas_gcam3.xml"))
self.renameScenarioComponent("cement", pathjoin(self.energy_dir_rel, "cement.xml"))
self.renameScenarioComponent("cement_income_elas", pathjoin(self.energy_dir_rel, "cement_incelas_gcam3.xml"))
self.renameScenarioComponent("fertilizer_energy", pathjoin(self.energy_dir_rel, "en_Fert.xml"))
self.renameScenarioComponent("fertilizer_agriculture", pathjoin(self.aglu_dir_rel, "ag_Fert.xml"))
for i in (1,2,3):
tag = 'land%d' % i
filename = 'land_input_%d.xml' % i
self.renameScenarioComponent(tag, pathjoin(self.aglu_dir_rel, filename))
if i > 1:
tag = 'protected_' + tag
filename = 'protected_' + filename
self.renameScenarioComponent(tag, pathjoin(self.aglu_dir_rel, filename))
def cfgPath(self):
"""
Compute the name of the GCAM config file for the current scenario.
:return: (str) the pathname to the XML configuration file.
"""
if not self.configPath:
# compute the first time, then cache it
self.configPath = unixPath(os.path.realpath(pathjoin(self.scenario_dir_abs, 'config.xml')))
return self.configPath
def componentPath(self, tag, configPath=None):
configPath = configPath or self.cfgPath()
pathname = xmlSel(configPath, '//Value[@name="%s"]' % tag, asText=True)
if pathname is None:
raise PygcamException("Failed to find scenario component with tag '%s' in %s" % (tag, configPath))
return pathname
def getLocalCopy(self, configTag):
"""
Get the filename for the most local version (in terms of scenario hierarchy)
of the XML file identified in the configuration file with `configTag`, and
copy the file to our scenario dir if not already there.
:param configTag: (str) the configuration file tag (name="xxx") of an XML file
:return: (str, str) a tuple of the relative and absolute path of the
local (i.e., within the current scenario) copy of the file.
"""
# if configTag.endswith('.xml'):
# It's not a tag, but a filename
pathname = self.componentPath(configTag)
srcAbsPath = pathjoin(self.sandboxExeDir, pathname, abspath=True)
# TBD: test this
if not os.path.lexists(srcAbsPath):
_logger.debug("Didn't find %s; checking reference files" % srcAbsPath)
# look to sandbox workspace if not found locally
refWorkspace = getParam('GCAM.SandboxRefWorkspace')
refConfigFile = getParam('GCAM.RefConfigFile')
pathname = self.componentPath(configTag, configPath=refConfigFile)
srcAbsPath = pathjoin(refWorkspace, 'exe', pathname, abspath=True)
# If path includes /*-xml/* (e.g., '/energy-xml/', '/aglu-xml/'), retain
# this subdir in destination, else just use the basename of the path.
matches = list(re.finditer(XmlDirPattern, srcAbsPath))
if matches:
m = matches[-1]
suffix = os.path.basename(srcAbsPath) if m.group(0) == '/local-xml/' else srcAbsPath[m.start()+1:] # from after '/' to end
else:
suffix = os.path.basename(srcAbsPath)
dstAbsPath = pathjoin(self.scenario_dir_abs, suffix)
dstRelPath = pathjoin(self.scenario_dir_rel, suffix)
copyIfMissing(srcAbsPath, dstAbsPath, makedirs=True)
return dstRelPath, dstAbsPath
def updateConfigComponent(self, group, name, value=None, writeOutput=None, appendScenarioName=None):
"""
Update the value of an arbitrary element in GCAM's configuration.xml file, i.e.,
``<{group}><Value name="{name}>{value}</Value></{group}>``
Optional args are used only for ``<Files>`` group, which has entries like
``<Value write-output="1" append-scenario-name="0" name="outFileName">outFile.csv</Value>``
Values for the optional args can be passed as any of ``[0, 1, "0", "1", True, False]``.
:param group: (str) the name of a group of config elements in GCAM's configuration.xml
:param name: (str) the name of the element to be updated
:param value: (str) the value to set between the ``<Value></Value>`` elements
:param writeOutput: (coercible to int) for ``<Files>`` group, this sets the optional ``write-output``
attribute
:param appendScenarioName: (coercible to int) for ``<Files>`` group, this sets the optional
``append-scenario-name`` attribute.
:return: none
"""
textArgs = "name='%s'" % name
if writeOutput is not None:
textArgs += " write-output='%d'" % (int(writeOutput))
if appendScenarioName is not None:
textArgs += " append-scenario-name='%d'" % (int(appendScenarioName))
_logger.debug("Update <%s><Value %s>%s</Value>" % (group, textArgs, '...' if value is None else value))
cfg = self.cfgPath()
prefix = "//%s/Value[@name='%s']" % (group, name)
pairs = []
if value is not None:
pairs.append((prefix, value))
if writeOutput is not None:
pairs.append((prefix + "/@write-output", int(writeOutput)))
if appendScenarioName is not None:
pairs.append((prefix + "/@append-scenario-name", int(appendScenarioName)))
xmlEdit(cfg, pairs)
@callableMethod
def setClimateOutputInterval(self, years):
"""
Sets the the frequency at which climate-related outputs are
saved to the XML database to the given number of years,
e.g., ``<Value name="climateOutputInterval">1</Value>``.
**Callable from XML setup files.**
:param years: (coercible to int) the number of years to set as the climate (GHG)
output interval
:return: none
"""
self.updateConfigComponent('Ints', 'climateOutputInterval', coercible(years, int))
def addScenarioComponent(self, name, xmlfile):
"""
Add a new ``<ScenarioComponent>`` to the configuration file, at the end of the list
of components.
:param name: (str) the name to assign to the new scenario component
:param xmlfile: (str) the location of the XML file, relative to the `exe` directory
:return: none
"""
# Ensure no duplicates tags
self.deleteScenarioComponent(name)
xmlfile = unixPath(xmlfile)
_logger.info("Add ScenarioComponent name='%s', xmlfile='%s'" % (name, xmlfile))
cfg = self.cfgPath()
item = CachedFile.getFile(cfg)
item.setEdited()
elt = item.tree.find('//ScenarioComponents')
node = ET.SubElement(elt, 'Value')
node.set('name', name)
node.text = xmlfile
def insertScenarioComponent(self, name, xmlfile, after):
"""
Insert a ``<ScenarioComponent>`` to the configuration file, following the
entry named by ``after``.
:param name: (str) the name to assign to the new scenario component
:param xmlfile: (str) the location of the XML file, relative to the `exe` directory
:param after: (str) the name of the element after which to insert the new component
:return: none
"""
# Ensure no duplicates tags
self.deleteScenarioComponent(name)
xmlfile = unixPath(xmlfile)
_logger.info("Insert ScenarioComponent name='%s', xmlfile='%s' after value '%s'" % (name, xmlfile, after))
cfg = self.cfgPath()
item = CachedFile.getFile(cfg)
item.setEdited()
elt = item.tree.find('//ScenarioComponents')
afterNode = elt.find('Value[@name="%s"]' % after)
if afterNode is None:
raise SetupException("Can't insert %s after %s, as the latter doesn't exist" % (name, after))
index = elt.index(afterNode) + 1
node = ET.Element('Value')
node.set('name', name)
node.text = xmlfile
elt.insert(index, node)
def updateScenarioComponent(self, name, xmlfile):
"""
Set a new filename for a ScenarioComponent identified by the ``<Value>`` element name.
:param name: (str) the name of the scenario component to update
:param xmlfile: (str) the location of the XML file, relative to the `exe` directory, that
should replace the existing value
:return: none
"""
xmlfile = unixPath(xmlfile)
self.updateConfigComponent('ScenarioComponents', name, xmlfile)
def deleteScenarioComponent(self, name, useCache=True):
"""
Delete a ``<ScenarioComponent>`` identified by the ``<Value>`` element name.
:param name: (str) the name of the ScenarioComponent to delete
:return: none
"""
_logger.info("Delete ScenarioComponent name='%s' for scenario" % name)
cfg = self.cfgPath()
item = CachedFile.getFile(cfg)
elt = item.tree.find("//ScenarioComponents")
valueNode = elt.find("Value[@name='%s']" % name)
if valueNode is not None:
elt.remove(valueNode)
item.setEdited()
def renameScenarioComponent(self, name, xmlfile):
"""
Modify the name of a ``ScenarioComponent``, located by the XML file path it holds.
This is used in to create a local reference XML that has unique names
for all scenario components, which allows all further modifications to refer
only to the (now unique) names.
:param name: (str) the new name for the scenario component
:param xmlfile: (str) the XML file path used to locate the scenario component
:return: none
"""
xmlfile = unixPath(xmlfile)
_logger.debug("Rename ScenarioComponent name='%s', xmlfile='%s'" % (name, xmlfile))
cfg = self.cfgPath()
xmlEdit(cfg, [("//ScenarioComponents/Value[text()='%s']/@name" % xmlfile, name)])
@callableMethod
def multiply(self, tag, xpath, value):
"""
Run the `xpath` query on the XML file with `tag` in the config file, and
replace all values found with the result of multiplying them by `value`.
:param tag: (str) the tag identifying a scenario component
:param xpath: (str) an XPath query to run against the file indicated by `tag`
:param value: (float) a value to multiply results of the `xpath` query by.
:return: none
"""
_logger.info("multiply: tag='{}', xpath='{}', value={}".format(tag, xpath, value))
fileRel, fileAbs = self.getLocalCopy(tag)
xmlEdit(fileAbs, [(xpath, value)], op='multiply')
self.updateScenarioComponent(tag, fileRel)
@callableMethod
def add(self, tag, xpath, value):
"""
Run the `xpath` query on the XML file with `tag` in the config file, and
replace all values found with the result of adding `value` to them.
:param tag: (str) the tag identifying a scenario component
:param xpath: (str) an XPath query to run against the file indicated by `tag`
:param value: (float) a value to multiply results of the `xpath` query by.
:return: none
"""
_logger.info("add: tag='{}', xpath='{}', value={}".format(tag, xpath, value))
fileRel, fileAbs = self.getLocalCopy(tag)
xmlEdit(fileAbs, [(xpath, value)], op='add')
self.updateScenarioComponent(tag, fileRel)
# TBD dynamic keyword might still be useful if subdir e.g. local-xml/dynamic but policy file would be in local-xml anyway
@callableMethod
def addMarketConstraint(self, target, policy, dynamic=False,
baselinePolicy=False): # TBD: should be able to eliminate this arg
"""
Adds references to a pair of files comprising a policy, i.e., a policy definition
file and a constraint file. References to the two files--assumed to be named ``XXX-{subsidy,tax}.xml``
and ``XXX-{subsidy,tax}-constraint.xml`` for policy `target` ``XXX``--are added to the configuration file.
**Callable from XML setup files.**
:param target: (str) the subject of the policy, e.g., corn-etoh, cell-etoh, ft-biofuel, biodiesel
:param policy: (str) one of ``subsidy`` or ``tax``
:param dynamic: (str) True if the XML file was dynamically generated, and thus found in ``dyn-xml``
rather than ``local-xml``
:param baselinePolicy: (bool) if True, the policy file is linked to the baseline directory
rather than this scenario's own directory.
:return: none
"""
_logger.info("Add market constraint: %s %s for %s" % (target, policy, self.name))
cfg = self.cfgPath()
basename = "%s-%s" % (target, policy) # e.g., biodiesel-subsidy
policyTag = target + "-policy"
constraintTag = target + "-constraint"
reldir = self.scenario_dyn_dir_rel if dynamic else self.scenario_dir_rel
# TBD: Could look for file in scenario, but if not found, look in baseline, eliminating this flag
policyReldir = self.baseline_dir_rel if baselinePolicy else reldir
policyXML = pathjoin(policyReldir, basename + ".xml") # TBD: "-market.xml" for symmetry?
constraintXML = pathjoin(reldir, basename + "-constraint.xml")
# See if element exists in config file (-Q => quiet; just report exit status)
xpath = '//ScenarioComponents/Value[@name="%s"]' % policyTag
# If we've already added files for policy/constraint on this target,
# we replace the old values with new ones. Otherwise, we add them.
addOrUpdate = self.updateScenarioComponent if xmlSel(cfg, xpath) else self.addScenarioComponent
addOrUpdate(policyTag, policyXML)
addOrUpdate(constraintTag, constraintXML)
@callableMethod
def delMarketConstraint(self, target, policy):
"""
Delete the two elements defining a market constraint from the configuration file.
The filenames are constructed as indicated in the `addMarketConstraint` method.
**Callable from XML setup files.**
:param target: (str) the subject of the policy, e.g., corn-etoh, cell-etoh,
ft-biofuel, biodiesel
:param policy: (str) one of ``subsidy`` or ``tax``
:return: none
"""
_logger.info("Delete market constraint: %s %s for %s" % (target, policy, self.name))
cfg = self.cfgPath()
# if policy == "subsidy":
# policy = "subs" # use shorthand in filename
policyTag = target + "-" + policy
constraintTag = target + "-constraint"
# See if element exists in config file (-Q => quiet; just report exit status)
xpath = '//ScenarioComponents/Value[@name="%s"]' % policyTag
if xmlSel(cfg, xpath):
# found it; delete the elements
self.deleteScenarioComponent(policyTag)
self.deleteScenarioComponent(constraintTag)
@callableMethod
def setStopPeriod(self, yearOrPeriod):
"""
Sets the model stop period. If `stopPeriod` is <= 22, the stop period is set to
the given value. If the value > 2000, the value is treated as a year and converted
to the correct stop period for the configuration file.
**Callable from XML setup files.**
:param yearOrPeriod: (coercible to int) this argument is treated as a literal
stop period if the value is < 1000. (N.B. 2015 = step 4, 2020 = step 5, and so
on.) If yearOrPeriod >= 1000, it is treated as a year and converted
to a stopPeriod for use in the GCAM configuration file.
:return: none
:raises: SetupException
"""
value = coercible(yearOrPeriod, int)
stopPeriod = value if 1 < value < 1000 else 1 + (value - 2000)//5
self.updateConfigComponent('Ints', 'stop-period', stopPeriod)
@callableMethod
def setInterpolationFunction(self, region, supplysector, subsector, fromYear, toYear,
funcName='linear', applyTo='share-weight', stubTechnology=None,
delete=False):
"""
Set the interpolation function for the share-weight of the `subsector`
of `supplysector` to `funcName` between years `fromYear` to `toYear`
in `region`. **Callable from XML setup files.**
:param region: (str) the GCAM region to operate on
:param supplysector: (str) the name of a supply sector
:param subsector: (str) the name of a sub-sector
:param fromYear: (str or int) the year to start interpolating
:param toYear: (str or int) the year to stop interpolating
:param funcName: (str) the name of an interpolation function
:param applyTo: (str) what the interpolation function is applied to
:param stubTechnology: (str) the name of a technology to apply function to
:param delete: (bool) if True, set delete="1", otherwise don't.
:return: none
"""
_logger.info("Set interpolation function for '%s' : '%s' to '%s'" % (supplysector, subsector, funcName))
enTransFileRel, enTransFileAbs = self.getLocalCopy(ENERGY_TRANSFORMATION_TAG)
# /scenario/world/region[@name='USA']/supplysector[@name='refining']/subsector[@name='biomass liquids']/interpolation-rule
prefix = '//region[@name="%s"]/supplysector[@name="%s"]/subsector[@name="%s"]%s/interpolation-rule[@apply-to="%s"]' % \
(region, supplysector, subsector,
'/stub-technology[@name="%s"]' % stubTechnology if stubTechnology else '',
applyTo)
args = [(prefix + '/@from-year', str(fromYear)),
(prefix + '/@to-year', str(toYear)),
(prefix + '/interpolation-function/@name', funcName)]
if delete:
args.append((prefix + '/@delete', "1"))
xmlEdit(enTransFileAbs, args)
self.updateScenarioComponent("energy_transformation", enTransFileRel)
@callableMethod
def setupSolver(self, solutionTolerance=None, broydenTolerance=None,
maxModelCalcs=None, maxIterations=None):
"""
Set the model solution tolerance to the given values for the solver
"driver" (`solutionTolerance`) and, optionally for the Broyden component
(`broydenTolerance`).
**Callable from XML setup files.**
:param solutionTolerance: (coercible to float, > 0.0) the value to set for the driver tolerance
:param broydenTolerance: (coercible to float, > 0.0) the value to set for the Broyden component
tolerance. (If both are provided, the function requires that
componentTolerance <= driverTolerance.)
:param maxModelCalcs: (coercible to int, > 0) maximum number of calculations to run in the driver
:param maxIterations: (coercible to int, > 0) maximum number of iterations to allow in the
Broyden component
:return: none
"""
def coercibleAndPositive(name, value, requiredType):
if value is None:
return None
value = coercible(value, requiredType)
if value <= 0:
raise SetupException(name + ' must be greater than zero')
_logger.info("Set %s to %s", name, value)
return value
solutionTol = coercibleAndPositive('Driver solution tolerance', solutionTolerance, float)
broydenTol = coercibleAndPositive('Broyden component tolerance', broydenTolerance, float)
if solutionTol and broydenTol:
if broydenTol > solutionTol:
raise SetupException('Broyden component tolerance cannot be greater than driver solution tolerance')
maxModelCalcs = coercibleAndPositive('maxModelCalcs', maxModelCalcs, int)
maxIterations = coercibleAndPositive('maxIterations', maxIterations, int)
solverFile = 'cal_broyden_config.xml'
solverFileRel, solverFileAbs = self.getLocalCopy(SOLVER_TAG)
prefix = "//scenario/user-configurable-solver[@year>=2010]/"
pairs = []
if solutionTolerance:
pairs.append((prefix + 'solution-tolerance', solutionTolerance))
if broydenTolerance:
pairs.append((prefix + 'broyden-solver-component/ftol', broydenTolerance))
if maxModelCalcs:
pairs.append((prefix + 'max-model-calcs', maxModelCalcs))
if maxIterations:
pairs.append((prefix + 'broyden-solver-component/max-iterations', maxIterations))
xmlEdit(solverFileAbs, pairs)
self.updateScenarioComponent("solver", solverFileRel)
@callableMethod
def dropLandProtection(self, dropEmissions=True):
self.deleteScenarioComponent("protected_land2")
self.deleteScenarioComponent("protected_land3")
if dropEmissions:
version = parse_version_info()
if version > VersionInfo(5, 0, 0):
# drop emissions for protected land
self.deleteScenarioComponent("nonco2_aglu_prot")
@callableMethod
def protectLand(self, fraction, landClasses=None, otherArable=False,
regions=None, unprotectFirst=False):
"""
Modify land_input files to protect a constant fraction of unmanaged
land of the given classes, in the given regions.
**Callable from XML setup files.**
:param fraction: (float) the fraction of land in the given land classes
to protect
:param landClasses: a string or a list of strings, or None. If None, all
"standard" unmanaged land classes are modified.
:param otherArable: (bool) if True, land class 'OtherArableLand' is
included in default land classes.
:param regions: a string or a list of strings, or None. If None, all
regions are modified.
"""
from .landProtection import protectLand
_logger.info("Protecting %d%% of land globally", int(fraction * 100))
# NB: this code depends on these being the tags assigned to the land files
# as is currently the case in XmlEditor.makeScenarioComponentsUnique()
for num in [2, 3]:
fileTag = 'land%d' % num
landFileRel, landFileAbs = self.getLocalCopy(fileTag)
protectLand(landFileAbs, landFileAbs, fraction, landClasses=landClasses,
otherArable=otherArable, regions=regions, unprotectFirst=unprotectFirst)
self.updateScenarioComponent(fileTag, landFileRel)
# TBD: test
@callableMethod
def protectionScenario(self, scenarioName, unprotectFirst=True):
"""
Implement the protection scenario `scenarioName`, defined in the file given
by config variable `GCAM.LandProtectionXmlFile`.
**Callable from XML setup files.**
:param scenarioName: (str) the name of a scenario defined in the land
protection XML file.
:param unprotectFirst: (bool) if True, make all land "unprotected" before
protecting.
:return: none
"""
from .landProtection import runProtectionScenario
_logger.info("Using protection scenario %s", scenarioName)
landXmlFiles = []
# NB: this code depends on these being the tags assigned to the land files
# as is currently the case in XmlEditor.makeScenarioComponentsUnique()
for num in [2, 3]:
fileTag = 'land%d' % num
landFileRel, landFileAbs = self.getLocalCopy(fileTag)
landXmlFiles.append(landFileAbs)
self.updateScenarioComponent(fileTag, landFileRel)
# TBD: revisit this; it's a bit of a hack for Oct 16 deliverable
scenarioFile = pathname = getParam('GCAM.LandProtectionXmlFile')
if self.mcsMode == 'trial':
basename = os.path.basename(pathname)
scenario = self.scenario or self.baseline
scenarioFile = unixPath(pathjoin(self.trial_xml_abs, 'local-xml',
self.groupDir, scenario, basename))
runProtectionScenario(scenarioName, scenarioFile=scenarioFile, inPlace=True,
xmlFiles=landXmlFiles, unprotectFirst=unprotectFirst)
def getScenarioOrTrialDirs(self, subdir=''):
dirRel = pathjoin(self.trial_xml_rel, subdir) if self.mcsMode == 'trial' \
else self.scenario_dir_rel
dirAbs = pathjoin(self.trial_xml_abs, subdir) if self.mcsMode == 'trial' \
else self.scenario_dir_abs
return dirRel, dirAbs
@callableMethod
def taxCarbon(self, value, startYear=2020, endYear=2100, timestep=5,
rate=0.05, regions=GCAM_32_REGIONS, market='global'):
'''
Generate an XML file defining a global carbon tax starting
at `value` and increasing by `rate` annually. Generate values
for the give `years`. The first year in `years` is assumed to be
the year at which the tax starts at `value`. The generated file
is named 'carbon-tax-{market}.xml' and is added to the configuration.
**Callable from XML setup files.**
:param value: (float) the initial value of the tax ($/tonne)
:param years: (list(int)) years to set carbon taxes. Default is 2020-2100
at 10 year time-steps.
:param rate: (float) annual rate of increase. Default is 0.05.
:param regions: (list(str)) the regions for which to create a C tax market.
Default is all 32 GCAM regions.
:param market: (str) the name of the market to create. Default is 'global'.
:return: none
'''
from .carbonTax import genCarbonTaxFile
tag = 'carbon-tax-' + market
filename = tag + '.xml'
# TBD: need to generalize this since any modification can be per-trial or universal
dirRel, dirAbs = self.getScenarioOrTrialDirs(subdir='local-xml')
fileRel = pathjoin(dirRel, filename)
fileAbs = pathjoin(dirAbs, filename)
genCarbonTaxFile(fileAbs, value, startYear=startYear, endYear=endYear,
timestep=timestep, rate=rate, regions=regions, market=market)
self.addScenarioComponent(tag, fileRel)
@callableMethod
def taxBioCarbon(self, market='global', regions=None, forTax=True, forCap=False):
"""
Create the XML for a linked policy to include LUC CO2 in a CO2 cap or tax policy (or both).
This function generates the equivalent of any of the 4 files in input/policy/:
global_ffict.xml (forTax=False, forCap=False)
global_ffict_in_constraint.xml (forTax=False, forCap=True)
global_uct.xml (forTax=True, forCap=False)
global_uct_in_constraint.xml (forTax=True, forCap=True)
However, unlike those files, the market need not be global, and the set of regions to
which to apply the policy can be specified.
:param market: (str) the name of the market for which to create the linked policy
:param regions: (list of str or None) the regions to apply the policy to, or None
to indicate all regions.
:param forTax: (bool) True if the linked policy should apply to a CO2 tax
:param forCap: (bool) True if the linked policy should apply to a CO2 cap
:return: (str) the generated XML text
"""
from .carbonTax import genLinkedBioCarbonPolicyFile
tag = 'bio-carbon-tax-' + market
filename = tag + '.xml'
# TBD: need to generalize this since any modification can be per-trial or universal
dirRel, dirAbs = self.getScenarioOrTrialDirs(subdir='local-xml')
fileRel = pathjoin(dirRel, filename)
fileAbs = pathjoin(dirAbs, filename)
genLinkedBioCarbonPolicyFile(fileAbs, market=market, regions=regions,
forTax=forTax, forCap=forCap)
self.addScenarioComponent(tag, fileRel)
# TBD: test
@callableMethod
def setRegionPopulation(self, region, values):
"""
Set the population for the given region to the values for the given years.
**Callable from XML setup files.**
:param region: (str) the name of one of GCAM's regions.
:param values: (dict-like or iterable of tuples of (year, pop)), specifying
the population to set for each year given.
:return: none
"""
# msg = "Set population for %s in %s to:" % (region, self.name)
# printSeries(values, region, header=msg, loglevel='INFO')
tag = 'socioeconomics'
#path = self.componentPath(tag)
# fileRel, fileAbs = self.getLocalCopy(path)
fileRel, fileAbs = self.getLocalCopy(tag)
prefix = '//region[@name="%s"]/demographics/populationMiniCAM' % region
pairs = []
for year, pop in expandYearRanges(values):
pairs.append((prefix + ('[@year="%s"]/totalPop' % year), int(round(pop))))
xmlEdit(fileAbs, pairs)
self.updateScenarioComponent(tag, fileRel)
# TBD: test
@callableMethod
def setGlobalTechNonEnergyCost(self, sector, subsector, technology, values):
"""
Set the non-energy cost of for technology in the global-technology-database,
given a list of values of (year, price). The price is applied to all years
indicated by the range.
**Callable from XML setup files.**
:param sector: (str) the name of a GCAM sector
:param subsector: (str) the name of a GCAM subsector within `sector`
:param technology: (str) the name of a GCAM technology in `subsector`
:param values: (dict-like or iterable of tuples of (year, price)) `year` can
be a single year (as string or int), or a string specifying a range of
years, of the form "xxxx-yyyy", which implies 5 year timestep, or "xxxx-yyyy:s",
which provides an alternative timestep. If `values` is dict-like (e.g. a
pandas Series) a list of tuples is created by calling values.items() after
which the rest of the explanation above applies. The `price` can be
anything coercible to float.
:return: none
"""
msg = "Set non-energy-cost of %s for %s to:" % (technology, self.name)
_logger.info(printSeries(values, technology, header=msg, asStr=True))
#_logger.info("Set non-energy-cost of %s for %s to %s" % (technology, self.name, values))
enTransFileRel, enTransFileAbs = self.getLocalCopy(ENERGY_TRANSFORMATION_TAG)
prefix = '//global-technology-database/location-info[@sector-name="%s" and @subsector-name="%s"]/technology[@name="%s"]' % \
(sector, subsector, technology)
suffix = '/minicam-non-energy-input[@name="non-energy"]/input-cost'
pairs = []
for year, price in expandYearRanges(values):
pairs.append((prefix + ('/period[@year="%s"]' % year) + suffix, price))
xmlEdit(enTransFileAbs, pairs)
self.updateScenarioComponent("energy_transformation", enTransFileRel)
# TBD: Test
@callableMethod
def setGlobalTechShutdownRate(self, sector, subsector, technology, values):
"""
Create a modified version of en_transformation.xml with the given shutdown
rates for `technology` in `sector` based on the data in `values`.
**Callable from XML setup files.**
:param sector: (str) the name of a GCAM sector
:param subsector: (str) the name of a GCAM subsector within `sector`
:param technology: (str) the name of a GCAM technology in `subsector`
:param values: (dict-like or iterable of tuples of (year, shutdownRate)) `year` can
be a single year (as string or int), or a string specifying a range of
years, of the form "xxxx-yyyy", which implies 5 year timestep, or "xxxx-yyyy:s",
which provides an alternative timestep. If `values` is dict-like (e.g. a
pandas Series) a list of tuples is created by calling values.items() after
which the rest of the explanation above applies. The `shutdownRate` can be
anything coercible to float.
:param xmlBasename: (str) the name of an xml file in the energy-xml folder to edit.
:param configFileTag: (str) the 'name' of a <File> element in the <ScenarioComponents>
section of a config file. This must match `xmlBasename`.
:return: none
"""
_logger.info("Set shutdown rate for (%s, %s) to %s for %s" % (sector, technology, values, self.name))
enTransFileRel, enTransFileAbs = self.getLocalCopy(ENERGY_TRANSFORMATION_TAG)
prefix = "//global-technology-database/location-info[@sector-name='%s' and @subsector-name='%s']/technology[@name='%s']" % \
(sector, subsector, technology)
pairs = []
for year, value in expandYearRanges(values):
pairs.append((prefix + "/period[@year='%s']/phased-shutdown-decider/shutdown-rate" % year,
coercible(value, float)))
xmlEdit(enTransFileAbs, pairs)
self.updateScenarioComponent("energy_transformation", enTransFileRel)
#
# //region[@name=""]/energy-final-demand[@name=""]/price-elasticity[@year=""]
#
# names of energy-final-demand:
# 'aglu-xml/demand_input.xml': "Exports_Meat", "FoodDemand_Crops", "FoodDemand_Meat", "NonFoodDemand_Crops", "NonFoodDemand_Forest", "NonFoodDemand_Meat"
# 'energy-xml/transportation_UCD.xml': "trn_aviation_intl", "trn_freight", "trn_pass", "trn_shipping_intl"
# 'energy-xml/cement.xml: "cement"
# 'energy-xml/industry.xml: "industry"
#
@callableMethod
def setPriceElasticity(self, regions, sectors, configFileTag, values):
"""
Modify price-elasticity values for the given `regions` and `sectors` in `sector` based on the data in `values`.
**Callable from XML setup files.**
:param regions: (str or list of str) the name(s) of a GCAM region or regions, or "global"
to indicate that price elasticity should be set in all regions. (Or more precisely,
the change should not be restricted by region.)
:param sector: (str or list of str) the name of a GCAM (demand) sector. In GCAM v4.3, this
should be one of {"cement", "industry", "trn_aviation_intl", "trn_freight", "trn_pass",
"trn_shipping_intl", "Exports_Meat", "FoodDemand_Crops", "FoodDemand_Meat",
"NonFoodDemand_Crops", "NonFoodDemand_Forest", "NonFoodDemand_Meat"}, however if input
files have been customized, other values can be used.
:param configFileTag: (str) the 'name' of a <File> element in the <ScenarioComponents>
section of a config file. This determines which file is edited, so it must correspond to
the indicated sector(s).
:param values: (dict-like or iterable of tuples of (year, elasticity)) `year` can
be a single year (as string or int), or a string specifying a range of
years, of the form "xxxx-yyyy", which implies 5 year timestep, or "xxxx-yyyy:s",
which provides an alternative timestep. If `values` is dict-like (e.g. a
pandas Series) a list of tuples is created by calling values.items() after
which the rest of the explanation above applies. The `elasticity` can be
anything coercible to float.
:return: none
"""
_logger.info("Set price-elasticity for (%s, %s) to %s for %s" % (regions, sectors, values, self.name))
filenameRel, filenameAbs = self.getLocalCopy(configFileTag)
def listifyString(value, aliasForNone=None):
if isinstance(value, six.string_types):
value = [value]
# Treat "global" as not restricting by region
if aliasForNone and len(value) == 1 and value[0] == aliasForNone:
return None
return value
def nameExpression(values):
'''
Turn ['a', 'b'] into '@name="a" or @name="b"'
'''
names = ['@name="%s"' % v for v in values]
return ' or '.join(names)
regions = listifyString(regions, aliasForNone='global')
nameExpr = '[' + nameExpression(regions) + ']' if regions else ''
regionExpr = '//region' + nameExpr
prefix = regionExpr + '/energy-final-demand[%s]' % nameExpression(sectors)
pairs = []
for year, value in expandYearRanges(values):
pairs.append((prefix + '/price-elasticity[@year="%s"]' % year, coercible(value, float)))
xmlEdit(filenameAbs, pairs)
self.updateScenarioComponent(configFileTag, filenameRel)
# TBD: test
@callableMethod
def setRegionalShareWeights(self, region, sector, subsector, values,
stubTechnology=None,
configFileTag=ENERGY_TRANSFORMATION_TAG):
"""
Create a modified version of en_transformation.xml with the given share-weights
for `technology` in `sector` based on the data in `values`.
**Callable from XML setup files.**
:param region: if not None, changes are made in a specific region, otherwise they're
made in the global-technology-database.
:param sector: (str) the name of a GCAM sector
:param subsector: (str) the name of a GCAM subsector
:param values: (dict-like or iterable of tuples of (year, shareWeight)) `year` can
be a single year (as string or int), or a string specifying a range of
years, of the form "xxxx-yyyy", which implies 5 year timestep, or "xxxx-yyyy:s",
which provides an alternative timestep. If `values` is dict-like (e.g. a
pandas Series) a list of tuples is created by calling values.items() after
which the rest of the explanation above applies. The `shareWeight` can be
anything coercible to float.
:param stubTechnology: (str) the name of a GCAM technology in the global technology database
:param configFileTag: (str) the 'name' of a <File> element in the <ScenarioComponents>
section of a config file. This must match `xmlBasename`.
:return: none
"""
from .utils import printSeries
_logger.info("Set share-weights for (%r, %r, %r, %r) for %r",
region, sector, subsector, stubTechnology, self.name)
_logger.info(printSeries(values, 'share-weights', asStr=True))
enTransFileRel, enTransFileAbs = self.getLocalCopy(configFileTag)
prefix = "//region[@name='%s']/supplysector[@name='%s']/subsector[@name='%s']" % (region, sector, subsector)
shareWeight = '/stub-technology[@name="{technology}"]/period[@year="{year}"]/share-weight' \
if stubTechnology else '/share-weight[@year="{year}"]'
pairs = []
for year, value in expandYearRanges(values):
pairs.append((prefix + shareWeight.format(technology=stubTechnology, year=year),
coercible(value, float)))
xmlEdit(enTransFileAbs, pairs)
self.updateScenarioComponent(configFileTag, enTransFileRel)
# TBD: Test
@callableMethod
def setGlobalTechShareWeight(self, sector, subsector, technology, values,
configFileTag=ENERGY_TRANSFORMATION_TAG):
"""
Create a modified version of en_transformation.xml with the given share-weights
for `technology` in `sector` based on the data in `values`.
**Callable from XML setup files.**
:param sector: (str) the name of a GCAM sector
:param technology: (str) the name of a GCAM technology in `sector`
:param values: (dict-like or iterable of tuples of (year, shareWeight)) `year` can
be a single year (as string or int), or a string specifying a range of
years, of the form "xxxx-yyyy", which implies 5 year timestep, or "xxxx-yyyy:s",
which provides an alternative timestep. If `values` is dict-like (e.g. a
pandas Series) a list of tuples is created by calling values.items() after
which the rest of the explanation above applies. The `shareWeight` can be
anything coercible to float.
:param xmlBasename: (str) the name of an xml file in the energy-xml folder to edit.
:param configFileTag: (str) the 'name' of a <File> element in the <ScenarioComponents>
section of a config file. This must match `xmlBasename`.
:return: none
"""
_logger.info("Set share-weights for (%s, %s) to %s for %s" % (sector, technology, values, self.name))
enTransFileRel, enTransFileAbs = self.getLocalCopy(configFileTag)
prefix = "//global-technology-database/location-info[@sector-name='%s' and @subsector-name='%s']/technology[@name='%s']" % \
(sector, subsector, technology)
pairs = []
for year, value in expandYearRanges(values):
pairs.append((prefix + "/period[@year=%s]/share-weight" % year, coercible(value, float)))
xmlEdit(enTransFileAbs, pairs)
self.updateScenarioComponent(configFileTag, enTransFileRel)
# TBD: test
@callableMethod
def setEnergyTechnologyCoefficients(self, subsector, technology, energyInput, values):
'''
Set the coefficients in the global technology database for the given energy input
of the given technology in the given subsector.
**Callable from XML setup files.**
:param subsector: (str) the name of the subsector
:param technology: (str)
The name of the technology, e.g., 'cellulosic ethanol', 'FT biofuel', etc.
:param energyInput: (str) the name of the minicam-energy-input
:param values:
A sequence of tuples or object with ``items`` method returning
(year, coefficient). For example, to set
the coefficients for cellulosic ethanol for years 2020 and 2025 to 1.234,
the pairs would be ((2020, 1.234), (2025, 1.234)).
:return:
none
'''
_logger.info("Set coefficients for %s in global technology %s, subsector %s: %s" % \
(energyInput, technology, subsector, values))
enTransFileRel, enTransFileAbs = \
self.getLocalCopy(ENERGY_TRANSFORMATION_TAG)
prefix = "//global-technology-database/location-info[@subsector-name='%s']/technology[@name='%s']" % \
(subsector, technology)
suffix = "minicam-energy-input[@name='%s']/coefficient" % energyInput
pairs = []
for year, coef in expandYearRanges(values):
pairs.append(("%s/period[@year='%s']/%s" % (prefix, year, suffix), coef))
xmlEdit(enTransFileAbs, pairs)
self.updateScenarioComponent("energy_transformation", enTransFileRel)
@callableMethod
def writePolicyMarketFile(self, filename, policyName, region, sector, subsector, technology, years,
marketType=DEFAULT_MARKET_TYPE):
pathname = pathjoin(self.scenario_dir_abs, filename)
policyMarketXml(policyName, region, sector, subsector, technology, years,
marketType=marketType, pathname=pathname)
@callableMethod
def writePolicyConstraintFile(self, filename, policyName, region, targets, market=None, minPrice=None,
policyElement=DEFAULT_POLICY_ELT, policyType=DEFAULT_POLICY_TYPE):
pathname = pathjoin(self.scenario_dir_abs, filename)
policyConstraintsXml(policyName, region, expandYearRanges(targets), market=market, minPrice=minPrice,
policyElement=policyElement, policyType=policyType, pathname=pathname)
|
[
"rich@plevin.com"
] |
rich@plevin.com
|
77bac63e5e9ca787741c3fbe55de2d61b8ee5a61
|
d36d7215a624ff810fdaa40cba74d5e3f974bb92
|
/poc/profiling/fibo_profile.py
|
0f71e7cf90e88f9cd177361b1396deb89245d3e4
|
[] |
no_license
|
nboutin/PyEvol
|
05c43ce6e7885d50160029f3e1e1c6861ec0a7e5
|
17f178bb1a19e7ac16f0c836676be21a0fe2ffb1
|
refs/heads/master
| 2020-06-18T02:34:07.546771
| 2019-08-10T19:36:38
| 2019-08-10T19:36:38
| 196,138,380
| 0
| 0
| null | 2019-07-10T05:39:19
| 2019-07-10T05:39:18
| null |
UTF-8
|
Python
| false
| false
| 435
|
py
|
'''
Created on 16 juil. 2019
@author: nboutin
'''
import pstats
from pstats import SortKey
if __name__ == '__main__':
p = pstats.Stats('fibo.cprof')
p.strip_dirs().sort_stats(-1).print_stats()
p.sort_stats(SortKey.NAME)
p.print_stats()
# what algorithms are taking time
p.sort_stats(SortKey.CUMULATIVE).print_stats(10)
# what functions were looping a lo
p.sort_stats(SortKey.TIME).print_stats(10)
|
[
"boutwork@gmail.com"
] |
boutwork@gmail.com
|
62dcae19cb3492dc1e1d21ca702a9e415568041e
|
b98f824600c00343851917c646ed3430755e2eea
|
/inet/bin/inet_fingerprinttest
|
24e3bf14900e6044422485819b9298964b2009fc
|
[] |
no_license
|
ZitaoLi/tsn_omnetpp_nesting_rev
|
7be3e15957a16b9d3071d6526e2a4d19e236e6e6
|
23ab3a2e9cffa5d01a5297547e7e8a71a66b60c8
|
refs/heads/master
| 2020-05-07T22:23:45.523901
| 2019-04-12T10:49:40
| 2019-04-12T10:49:40
| 180,943,408
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 28,689
|
#!/usr/bin/env python
#
# Fingerprint-based regression test tool
#
# Accepts one or more CSV files with 6 columns: working directory,
# command to run, simulation time limit, expected fingerprint,
# expected result, tags.
# The program runs the simulations in the CSV files, and
# reports fingerprint mismatches as FAILed test cases. To facilitate
# test suite maintenance, the program also creates a new file (or files)
# with the updated fingerprints.
#
# Implementation is based on Python's unit testing library, so it can be
# integrated into larger test suites with minimal effort
#
# Authors: Andras Varga, Zoltan Bojthe
#
from __future__ import absolute_import, division, print_function, unicode_literals
import argparse
import copy
import csv
import glob
import multiprocessing
import os
import re
import subprocess
import sys
import threading
import time
import unittest
from distutils import spawn
from io import StringIO
# FIXME this is a hard coded path!!! must be specified from command line or dicovered automatically
rootDir = os.path.abspath(".") # the working directory in the CSV file is relative to this dir
cpuTimeLimit = "300s"
logFile = "test.out"
extraOppRunArgs = ""
debug=False
release=False
exitCode = 0
class FingerprintTestCaseGenerator():
fileToSimulationsMap = {}
def generateFromCSV(self, csvFileList, filterRegexList, excludeFilterRegexList, repeat):
testcases = []
for csvFile in csvFileList:
simulations = self.parseSimulationsTable(csvFile)
self.fileToSimulationsMap[csvFile] = simulations
testcases.extend(self.generateFromDictList(simulations, filterRegexList, excludeFilterRegexList, repeat))
return testcases
def generateFromDictList(self, simulations, filterRegexList, excludeFilterRegexList, repeat):
class StoreFingerprintCallback:
def __init__(self, simulation):
self.simulation = simulation
def __call__(self, fingerprint):
self.simulation['computedFingerprint'] = fingerprint
class StoreExitcodeCallback:
def __init__(self, simulation):
self.simulation = simulation
def __call__(self, exitcode):
self.simulation['exitcode'] = exitcode
testcases = []
for simulation in simulations:
title = simulation['wd'] + " " + simulation['args'] + " " + simulation['tags']
if not filterRegexList or ['x' for regex in filterRegexList if re.search(regex, title)]: # if any regex matches title
if not excludeFilterRegexList or not ['x' for regex in excludeFilterRegexList if re.search(regex, title)]: # if NO exclude-regex matches title
testcases.append(FingerprintTestCase(title, simulation['file'], simulation['wd'], simulation['args'],
simulation['simtimelimit'], simulation['fingerprint'], simulation['expectedResult'], StoreFingerprintCallback(simulation), StoreExitcodeCallback(simulation), repeat))
return testcases
def commentRemover(self, csvData):
p = re.compile(' *#.*$')
for line in csvData:
yield p.sub('',line.decode('utf-8'))
# parse the CSV into a list of dicts
def parseSimulationsTable(self, csvFile):
simulations = []
f = open(csvFile, 'rb')
csvReader = csv.reader(self.commentRemover(f), delimiter=str(','), quotechar=str('"'), skipinitialspace=True)
for fields in csvReader:
if len(fields) == 0:
pass # empty line
elif len(fields) == 6:
if fields[4] in ['PASS', 'FAIL', 'ERROR']:
simulations.append({'file': csvFile, 'line' : csvReader.line_num,
'wd': fields[0], 'args': fields[1], 'simtimelimit': fields[2], 'fingerprint': fields[3], 'expectedResult': fields[4], 'tags': fields[5]})
else:
raise Exception(csvFile + " Line " + str(csvReader.line_num) + ": the 5th item must contain one of 'PASS', 'FAIL', 'ERROR'" + ": " + '"' + '", "'.join(fields) + '"')
else:
raise Exception(csvFile + " Line " + str(csvReader.line_num) + " must contain 6 items, but contains " + str(len(fields)) + ": " + '"' + '", "'.join(fields) + '"')
f.close()
return simulations
def writeUpdatedCSVFiles(self):
for csvFile, simulations in self.fileToSimulationsMap.items():
updatedContents = self.formatUpdatedSimulationsTable(csvFile, simulations)
if updatedContents:
updatedFile = csvFile + ".UPDATED"
ff = open(updatedFile, 'w')
ff.write(updatedContents)
ff.close()
print("Check " + updatedFile + " for updated fingerprints")
def writeFailedCSVFiles(self):
for csvFile, simulations in self.fileToSimulationsMap.items():
failedContents = self.formatFailedSimulationsTable(csvFile, simulations)
if failedContents:
failedFile = csvFile + ".FAILED"
ff = open(failedFile, 'w')
ff.write(failedContents)
ff.close()
print("Check " + failedFile + " for failed fingerprints")
def writeErrorCSVFiles(self):
for csvFile, simulations in self.fileToSimulationsMap.items():
errorContents = self.formatErrorSimulationsTable(csvFile, simulations)
if errorContents:
errorFile = csvFile + ".ERROR"
ff = open(errorFile, 'w')
ff.write(errorContents)
ff.close()
print("Check " + errorFile + " for errors")
def escape(self, str):
if re.search(r'[\r\n\",]', str):
str = '"' + re.sub('"','""',str) + '"'
return str
def formatUpdatedSimulationsTable(self, csvFile, simulations):
# if there is a computed fingerprint, print that instead of existing one
ff = open(csvFile, 'r')
lines = ff.readlines()
ff.close()
lines.insert(0, '') # csv line count is 1..n; insert an empty item --> lines[1] is the first line
containsComputedFingerprint = False
for simulation in simulations:
if 'computedFingerprint' in simulation:
oldFingerprint = simulation['fingerprint']
newFingerprint = simulation['computedFingerprint']
oldFpList = oldFingerprint.split(' ')
if '/' in newFingerprint:
# keep old omnetpp4 fp
keepFpList = [elem for elem in oldFpList if not '/' in elem]
if keepFpList:
newFingerprint = ' '.join(keepFpList) + ' ' + newFingerprint
else:
# keep all old omnetpp5 fp
keepFpList = [elem for elem in oldFpList if '/' in elem]
if keepFpList:
newFingerprint = newFingerprint + ' ' + ' '.join(keepFpList)
if ',' in newFingerprint:
newFingerprint = '"' + newFingerprint + '"'
containsComputedFingerprint = True
line = simulation['line']
pattern = "\\b" + oldFingerprint + "\\b"
(newLine, cnt) = re.subn(pattern, newFingerprint, lines[line])
if (cnt == 1):
lines[line] = newLine
else:
print("ERROR: Cannot replace fingerprint '%s' to '%s' at '%s' line %d:\n %s" % (oldFingerprint, newFingerprint, csvFile, line, lines[line]))
return ''.join(lines) if containsComputedFingerprint else None
def formatFailedSimulationsTable(self, csvFile, simulations):
ff = open(csvFile, 'r')
lines = ff.readlines()
ff.close()
lines.insert(0, '') # csv line count is 1..n; insert an empty item --> lines[1] is the first line
result = []
containsFailures = False
for simulation in simulations:
if 'computedFingerprint' in simulation:
oldFingerprint = simulation['fingerprint']
newFingerprint = simulation['computedFingerprint']
if oldFingerprint != newFingerprint:
if not containsFailures:
containsFailures = True
result.append("# Failures:\n")
result.append(lines[simulation['line']])
return ''.join(result) if containsFailures else None
def formatErrorSimulationsTable(self, csvFile, simulations):
ff = open(csvFile, 'r')
lines = ff.readlines()
ff.close()
lines.insert(0, '') # csv line count is 1..n; insert an empty item --> lines[1] is the first line
result = []
containsErrors = False
for simulation in simulations:
if 'exitcode' in simulation and simulation['exitcode'] != 0:
if not containsErrors:
containsErrors = True
result.append("# Errors:\n")
result.append(lines[simulation['line']])
return ''.join(result) if containsErrors else None
class SimulationResult:
def __init__(self, command, workingdir, exitcode, errorMsg=None, isFingerprintOK=None,
computedFingerprint=None, simulatedTime=None, numEvents=None, elapsedTime=None, cpuTimeLimitReached=None):
self.command = command
self.workingdir = workingdir
self.exitcode = exitcode
self.errorMsg = errorMsg
self.isFingerprintOK = isFingerprintOK
self.computedFingerprint = computedFingerprint
self.simulatedTime = simulatedTime
self.numEvents = numEvents
self.elapsedTime = elapsedTime
self.cpuTimeLimitReached = cpuTimeLimitReached
class SimulationTestCase(unittest.TestCase):
def runSimulation(self, title, command, workingdir, resultdir):
global logFile
ensure_dir(workingdir + "/results")
# run the program and log the output
t0 = time.time()
(exitcode, out) = self.runProgram(command, workingdir, resultdir)
elapsedTime = time.time() - t0
FILE = open(logFile, "a")
FILE.write("------------------------------------------------------\n"
+ "Running: " + title + "\n\n"
+ "$ cd " + workingdir + "\n"
+ "$ " + command + "\n\n"
+ out.strip() + "\n\n"
+ "Exit code: " + str(exitcode) + "\n"
+ "Elapsed time: " + str(round(elapsedTime,2)) + "s\n\n")
FILE.close()
FILE = open(resultdir + "/test.out", "w")
FILE.write("------------------------------------------------------\n"
+ "Running: " + title + "\n\n"
+ "$ cd " + workingdir + "\n"
+ "$ " + command + "\n\n"
+ out.strip() + "\n\n"
+ "Exit code: " + str(exitcode) + "\n"
+ "Elapsed time: " + str(round(elapsedTime,2)) + "s\n\n")
FILE.close()
result = SimulationResult(command, workingdir, exitcode, elapsedTime=elapsedTime)
# process error messages
errorLines = re.findall("<!>.*", out, re.M)
errorMsg = ""
for err in errorLines:
err = err.strip()
if re.search("Fingerprint", err):
if re.search("successfully", err):
result.isFingerprintOK = True
else:
m = re.search("(computed|calculated): ([-a-zA-Z0-9]+(/[a-z0]+)?)", err)
if m:
result.isFingerprintOK = False
result.computedFingerprint = m.group(2)
else:
raise Exception("Cannot parse fingerprint-related error message: " + err)
else:
errorMsg += "\n" + err
if re.search("CPU time limit reached", err):
result.cpuTimeLimitReached = True
m = re.search(r"at t=([0-9]*(\.[0-9]+)?)s, event #([0-9]+)", err)
if m:
result.simulatedTime = float(m.group(1))
result.numEvents = int(m.group(3))
result.errormsg = errorMsg.strip()
return result
def runProgram(self, command, workingdir, resultdir):
env = os.environ
# env['CPUPROFILE'] = resultdir+"/cpuprofile"
# env['CPUPROFILE_FREQUENCY'] = "1000"
process = subprocess.Popen(['sh','-c',command], shell=sys.platform.startswith('win'), cwd=workingdir, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, env=env)
out = process.communicate()[0]
out = re.sub("\r", "", out.decode('utf-8'))
return (process.returncode, out)
class FingerprintTestCase(SimulationTestCase):
def __init__(self, title, csvFile, wd, cmdLine, simtimelimit, fingerprint, expectedResult, storeFingerprintCallback, storeExitcodeCallback, repeat):
SimulationTestCase.__init__(self)
self.title = title
self.csvFile = csvFile
self.wd = wd
self.cmdLine = cmdLine
self.simtimelimit = simtimelimit
self.fingerprint = fingerprint
self.expectedResult = expectedResult
self.storeFingerprintCallback = storeFingerprintCallback
self.storeExitcodeCallback = storeExitcodeCallback
self.repeat = repeat
def runTest(self):
# CPU time limit is a safety guard: fingerprint checks shouldn't take forever
global rootDir, executable, debug, release, cpuTimeLimit, extraOppRunArgs
# run the simulation
workingdir = _iif(self.wd.startswith('/'), rootDir + "/" + self.wd, self.wd)
wdname = '' + self.wd + ' ' + self.cmdLine
wdname = re.sub('/', '_', wdname)
wdname = re.sub('[\W]+', '_', wdname)
resultdir = os.path.abspath(".") + "/results/" + self.csvFile + "/" + wdname
ensure_dir(resultdir)
# Check if the command line does not contain executable name (starts with an option i.e. - char)
# and use the executable name from the command line.
# Otherwise, assume the first word as the name of the executable.
(exeName, progArgs) = (executable, self.cmdLine) if (self.cmdLine.startswith("-")) else self.cmdLine.split(None, 1)
command = (exeName + "_dbg" if debug else exeName + "_release" if release else exeName) + " -u Cmdenv " + progArgs + \
_iif(self.simtimelimit != "", " --sim-time-limit=" + self.simtimelimit, "") + \
" \"--fingerprint=" + self.fingerprint + "\" --cpu-time-limit=" + cpuTimeLimit + \
" --vector-recording=false --scalar-recording=true" + \
" --result-dir=" + resultdir + \
" " + extraOppRunArgs
# print("COMMAND: " + command + '\n')
anyFingerprintBad = False
computedFingerprints = set()
for rep in range(self.repeat):
result = self.runSimulation(self.title, command, workingdir, resultdir)
# process the result
# note: fingerprint mismatch is technically NOT an error in 4.2 or before! (exitcode==0)
self.storeExitcodeCallback(result.exitcode)
if result.exitcode != 0:
raise Exception("runtime error with exitcode="+str(result.exitcode)+": " + result.errormsg)
elif result.cpuTimeLimitReached:
raise Exception("cpu time limit exceeded")
elif result.simulatedTime == 0 and self.simtimelimit != '0s':
raise Exception("zero time simulated")
elif result.isFingerprintOK is None:
raise Exception("other")
elif result.isFingerprintOK == False:
computedFingerprints.add(result.computedFingerprint)
anyFingerprintBad = True
else:
# fingerprint OK:
computedFingerprints.add(self.fingerprint)
# pass
if anyFingerprintBad:
self.storeFingerprintCallback(",".join(computedFingerprints))
assert False, "some fingerprint mismatch; actual " + " '" + ",".join(computedFingerprints) +"'"
def __str__(self):
return self.title
class ThreadSafeIter:
"""Takes an iterator/generator and makes it thread-safe by
serializing call to the `next` method of given iterator/generator.
"""
def __init__(self, it):
self.it = it
self.lock = threading.Lock()
def __iter__(self):
return self
def __next__(self):
with self.lock:
return next(self.it)
next = __next__ # for python 2 compatibility
class ThreadedTestSuite(unittest.BaseTestSuite):
""" runs toplevel tests in n threads
"""
# How many test process at the time.
thread_count = multiprocessing.cpu_count()
def run(self, result):
it = ThreadSafeIter(self.__iter__())
result.buffered = True
threads = []
for i in range(self.thread_count):
# Create self.thread_count number of threads that together will
# cooperate removing every ip in the list. Each thread will do the
# job as fast as it can.
t = threading.Thread(target=self.runThread, args=(result, it))
t.daemon = True
t.start()
threads.append(t)
# Wait until all the threads are done. .join() is blocking.
#for t in threads:
# t.join()
runApp = True
while runApp and threading.active_count() > 1:
try:
time.sleep(0.1)
except KeyboardInterrupt:
runApp = False
return result
def runThread(self, result, it):
tresult = result.startThread()
for test in it:
if result.shouldStop:
break
test(tresult)
tresult.stopThread()
class ThreadedTestResult(unittest.TestResult):
"""TestResult with threads
"""
def __init__(self, stream=None, descriptions=None, verbosity=None):
super(ThreadedTestResult, self).__init__()
self.parent = None
self.lock = threading.Lock()
def startThread(self):
ret = copy.copy(self)
ret.parent = self
return ret
def stop():
super(ThreadedTestResult, self).stop()
if self.parent:
self.parent.stop()
def stopThread(self):
if self.parent == None:
return 0
self.parent.testsRun += self.testsRun
return 1
def startTest(self, test):
"Called when the given test is about to be run"
super(ThreadedTestResult, self).startTest(test)
self.oldstream = self.stream
self.stream = StringIO()
def stopTest(self, test):
"""Called when the given test has been run"""
super(ThreadedTestResult, self).stopTest(test)
out = self.stream.getvalue()
with self.lock:
self.stream = self.oldstream
self.stream.write(out)
#
# Copy/paste of TextTestResult, with minor modifications in the output:
# we want to print the error text after ERROR and FAIL, but we don't want
# to print stack traces.
#
class SimulationTextTestResult(ThreadedTestResult):
"""A test result class that can print formatted text results to a stream.
Used by TextTestRunner.
"""
separator1 = '=' * 70
separator2 = '-' * 70
def __init__(self, stream, descriptions, verbosity):
super(SimulationTextTestResult, self).__init__()
self.stream = stream
self.showAll = verbosity > 1
self.dots = verbosity == 1
self.descriptions = descriptions
self.expectedErrors = []
def getDescription(self, test):
doc_first_line = test.shortDescription()
if self.descriptions and doc_first_line:
return '\n'.join((str(test), doc_first_line))
else:
return str(test)
def startTest(self, test):
super(SimulationTextTestResult, self).startTest(test)
if self.showAll:
self.stream.write(""+self.getDescription(test)) # NOTE: the empty "" string is needed here for python2/3 compatibility (unicode vs. str) - can be removed if only python3 is used
self.stream.write(" ... ")
self.stream.flush()
def addSuccess(self, test):
super(SimulationTextTestResult, self).addSuccess(test)
if test.expectedResult == 'PASS':
if self.showAll:
self.stream.write(": PASS\n")
elif self.dots:
self.stream.write('.')
self.stream.flush()
else:
self.addUnexpectedSuccess(test)
def addError(self, test, err):
# modified
if test.expectedResult == 'ERROR':
self.addExpectedError(test, err)
else:
super(SimulationTextTestResult, self).addError(test, err)
errmsg = err[1]
self.errors[-1] = (test, errmsg) # super class method inserts stack trace; we don't need that, so overwrite it
if self.showAll:
self.stream.write(": ERROR (should be %s): %s\n" % (test.expectedResult, errmsg))
elif self.dots:
self.stream.write('E')
self.stream.flush()
global exitCode
exitCode = 1 # result is not the expected result
def addExpectedError(self, test, err):
self.expectedErrors.append((test, self._exc_info_to_string(err, test)))
self._mirrorOutput = True
self.expectedErrors[-1] = (test, err[1]) # super class method inserts stack trace; we don't need that, so overwrite it
if self.showAll:
self.stream.write(": ERROR (expected)\n")
elif self.dots:
self.stream.write('e')
self.stream.flush()
def addFailure(self, test, err):
# modified
if test.expectedResult == 'FAIL':
self.addExpectedFailure(test, err)
else:
super(SimulationTextTestResult, self).addFailure(test, err)
errmsg = err[1]
self.failures[-1] = (test, errmsg) # super class method inserts stack trace; we don't need that, so overwrite it
if self.showAll:
self.stream.write(": FAIL (should be %s): %s\n" % (test.expectedResult, errmsg))
elif self.dots:
self.stream.write('F')
self.stream.flush()
global exitCode
exitCode = 1 # result is not the expected result
def addSkip(self, test, reason):
super(SimulationTextTestResult, self).addSkip(test, reason)
if self.showAll:
self.stream.write(": skipped {0!r}".format(reason))
self.stream.write("\n")
elif self.dots:
self.stream.write("s")
self.stream.flush()
def addExpectedFailure(self, test, err):
super(SimulationTextTestResult, self).addExpectedFailure(test, err)
self.expectedFailures[-1] = (test, err[1]) # super class method inserts stack trace; we don't need that, so overwrite it
if self.showAll:
self.stream.write(":FAIL (expected)\n")
elif self.dots:
self.stream.write("x")
self.stream.flush()
def addUnexpectedSuccess(self, test):
super(SimulationTextTestResult, self).addUnexpectedSuccess(test)
self.unexpectedSuccesses[-1] = (test) # super class method inserts stack trace; we don't need that, so overwrite it
if self.showAll:
self.stream.write(": PASS (unexpected)\n")
elif self.dots:
self.stream.write("u")
self.stream.flush()
global exitCode
exitCode = 1 # result is not the expected result
def printErrors(self):
# modified
if self.dots or self.showAll:
self.stream.write("\n")
self.printErrorList('Errors', self.errors)
self.printErrorList('Failures', self.failures)
self.printUnexpectedSuccessList('Unexpected successes', self.unexpectedSuccesses)
self.printErrorList('Expected errors', self.expectedErrors)
self.printErrorList('Expected failures', self.expectedFailures)
def printErrorList(self, flavour, errors):
# modified
if errors:
self.stream.write("%s:\n" % flavour)
for test, err in errors:
self.stream.write(" %s (%s)\n" % (self.getDescription(test), err))
def printUnexpectedSuccessList(self, flavour, errors):
if errors:
self.stream.write("%s:\n" % flavour)
for test in errors:
self.stream.write(" %s\n" % (self.getDescription(test)))
def _iif(cond,t,f):
return t if cond else f
def ensure_dir(f):
try:
os.makedirs(f)
except:
pass # do nothing if already exist
if __name__ == "__main__":
defaultNumThreads = multiprocessing.cpu_count()
if defaultNumThreads >= 6:
defaultNumThreads = defaultNumThreads - 1
parser = argparse.ArgumentParser(description='Run the fingerprint tests specified in the input files.')
parser.add_argument('testspecfiles', nargs='*', metavar='testspecfile', help='CSV files that contain the tests to run (default: *.csv). Expected CSV file columns: working directory, command to run, simulation time limit, expected fingerprint, expected result, tags. The command column may contain only options without a program name (i.e. it starts with - ). In this case the --executable option can be used to specify a program name.')
parser.add_argument('-m', '--match', action='append', metavar='regex', help='Line filter: a line (more precisely, workingdir+SPACE+args) must match any of the regular expressions in order for that test case to be run')
parser.add_argument('-x', '--exclude', action='append', metavar='regex', help='Negative line filter: a line (more precisely, workingdir+SPACE+args) must NOT match any of the regular expressions in order for that test case to be run')
parser.add_argument('-t', '--threads', type=int, default=defaultNumThreads, help='number of parallel threads (default: number of CPUs, currently '+str(defaultNumThreads)+')')
parser.add_argument('-r', '--repeat', type=int, default=1, help='number of repeating each test (default: 1)')
parser.add_argument('-e', '--executable', help='Determines which binary to execute (e.g. opp_run_dbg, opp_run_release) if the command column in the CSV file does not specify one.')
parser.add_argument('-C', '--directory', help='Change to DIRECTORY before executing the tests. Working dirs in the CSV files are relative to this.')
parser.add_argument('-d', '--debug', action='store_true', help='Run debug executables: use the debug version of the executable (appends _dbg to the executable name)')
parser.add_argument('-s', '--release', action='store_true', help='Run release executables: use the release version of the executable (appends _release to the executable name)')
parser.add_argument('-a', '--oppargs', action='append', metavar='oppargs', nargs=argparse.REMAINDER, help='extra opp_run arguments until the end of the line')
args = parser.parse_args()
if os.path.isfile(logFile):
FILE = open(logFile, "w")
FILE.close()
if not args.testspecfiles:
args.testspecfiles = glob.glob('*.csv')
if args.oppargs:
for oppArgList in args.oppargs:
for oppArg in oppArgList:
extraOppRunArgs += " " + oppArg
if args.executable:
executable = args.executable
if args.directory:
rootDir = os.path.abspath(args.directory)
debug = args.debug
generator = FingerprintTestCaseGenerator()
testcases = generator.generateFromCSV(args.testspecfiles, args.match, args.exclude, args.repeat)
testSuite = ThreadedTestSuite()
testSuite.addTests(testcases)
testSuite.thread_count = args.threads
testSuite.repeat = args.repeat
testRunner = unittest.TextTestRunner(stream=sys.stdout, verbosity=9, resultclass=SimulationTextTestResult)
testRunner.run(testSuite)
print()
generator.writeUpdatedCSVFiles()
generator.writeErrorCSVFiles()
generator.writeFailedCSVFiles()
print("Log has been saved to %s" % logFile)
if exitCode == 0:
print("Test results equals to expected results")
else:
print("Test results differ from expected results")
exit(exitCode)
|
[
"494240799@qq.com"
] |
494240799@qq.com
|
|
a8334b8adc77f38c650e2979eff621348afb8ad7
|
1edd74c80fc97b8795cfe41fe69ec1f87af0148f
|
/src/eefig_learning/scripts/LPV_MPC_EEFIG.py
|
2f84515d1cdb333dd522797611d6eac4ab074c69
|
[] |
no_license
|
shivamchaubey/eefig_autonomous_vehicle
|
2c91d03579ce184a475fb302311306c23adfe42d
|
32d07f55f8ccc2a107abff686c792931d5fb2891
|
refs/heads/main
| 2023-08-14T10:16:45.423315
| 2021-09-25T12:06:14
| 2021-09-25T12:06:14
| 410,260,196
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,823
|
py
|
# Tools
import numpy as np
import warnings
# Imports
from lpv_mpc_eefig.common import EEFIG
from lpv_mpc_eefig.common import mdg
class LPV_MPC_EEFIG (EEFIG):
def __init__ (self, settings, configuration_file = None):
# super().__init__(settings) #python3
super(LPV_MPC_EEFIG, self).__init__(settings) #python2.7
self.ready = False # Ready to start predicting A & B (LPV Matrices)
# If an EEFIG configuration file is provided use it
if configuration_file is not None:
self.load(configuration_file)
# Update EEFIG
def update (self, xk):
# Detect & Update Counter
self.update_anomalies(xk)
self.K = self.update_K(xk)
self.P = self.update_P(xk)
if not self.update_last_samples(xk):
return
# If New Granule Create
if self.check_new_granule_creation(xk):
self.create_new_granule()
# NOTE: If we do not have a Granule yet just skip the rest
# of the process
if self.nEEFIG < 1:
return
self.ready = True
# Update Granule Distribution
gran_idx = self.data_evaluation(xk)
# Update/Create A and B in Granules
psik = self.last_samples[:-1, :]
# WLS
# NOTE: Used to create new Granules A's & B's
if self.EEFIG[gran_idx].A.size == 0:
xr = self.last_samples[1:, 0:self.nx] # xr contains the states x of the buffer (eq. 24)
self.create_using_WLS (gran_idx, xr, psik)
# RLS
# NOTE: Used to update existing A's & B's in Granules
else:
self.update_using_RLS (gran_idx, xk, psik)
# WARNING:
for i in range(self.nEEFIG):
if self.EEFIG[i].A.size == 0:
warnings.warn("LPC_MPC_EEFIG.py: One granule had a zero size A matrix. We used WLS to solve this issue.")
xr = self.last_samples[1:, 0:self.nx] # xr contains the states x of the buffer (eq. 24)
self.create_using_WLS (i, xr, psik)
# Obtain the Linear Paramenter Variant Matrixes & Update EEFIG
def get_LPV_matrices (self, xk):
# NOTE: Obtain for xk all the normalized weights for each granule
wk = np.zeros([self.nEEFIG, 1])
for i in range(self.nEEFIG):
wk[i, 0], _, _, _ = mdg(xk, self.EEFIG[i].m, self.EEFIG[i].a, self.EEFIG[i].b)
gsum = sum(wk)
g = wk / gsum
# LPV Model A & B
# NOTE: We make a weighted medium to extract the A and B using all the granules
A = np.zeros([self.nx, self.nx])
B = np.zeros([self.nx, self.nu])
for i in range(self.nEEFIG):
A += g[i] * self.EEFIG[i].A
B += g[i] * self.EEFIG[i].B
return A, B
|
[
"shivam.chaubey1006@gmail.com"
] |
shivam.chaubey1006@gmail.com
|
3e627782b13d008610964fe7079a03449bd5ade7
|
e11a8212fba5f6b4e5aa133653c28cf0799ab16f
|
/sum_double_warmup_problem.py
|
5be215a348e0f2086885c2ed191a892ec5a2c250
|
[] |
no_license
|
felipecook/practice_problems
|
dfa984e7425c6a007a7f4576fe70287647d32d81
|
2b5e647c5828ffa9ab314cb85913d2e361121159
|
refs/heads/master
| 2022-08-31T04:59:40.208501
| 2020-05-21T16:14:49
| 2020-05-21T16:14:49
| 264,021,164
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 205
|
py
|
# Given two int values, return their sum.
# Unless the two values are the same, then return double their sum.
def sum_double(a, b):
if a == b:
return (a+b) * 2
else:
return a + b
|
[
"felipepcook@gmail.com"
] |
felipepcook@gmail.com
|
afd12ce0366e2fc375ab00fe60774a98f2e13464
|
71d578606b4010100c7c2130a2c663f988c42131
|
/faster_rcnn_3.5_cpu/network.py
|
00b96effc871d68360bbf701658d34dbba10cd4d
|
[] |
no_license
|
HK017/scripts
|
c8b07eb91a9d033a93efb4263ce3226f53f03c2b
|
c3df4d98c17e68d06f548ae39291e535eb00e7c6
|
refs/heads/master
| 2020-04-04T00:41:07.756766
| 2019-06-13T09:23:11
| 2019-06-13T09:23:11
| 155,659,065
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 14,173
|
py
|
import tensorflow as tf
from utils import Conv_layer,Max_pooling,Global_average_pooling,Fc_layers,Flatten
def ResNet50(inputs, class_num, end_point='Average_pool/average_pooling', scope='ResNet50'):
end_points = {}
"""每一个stage_layer 为3,4,6,3 个Block"""
with tf.variable_scope(scope):
with tf.variable_scope('Head_layer'):
net = Conv_layer(inputs, filter=64, kernel=[7,7], stride=2, padding='valid', activation=tf.nn.relu, scope='conv2d_7x7_2')
net = Max_pooling(net, ksize=3, stride=2, scope='max_pooling_3x3_2')
end_points['Head_layer/max_pooling_3x3_2'] = net
if end_point == 'Head_layer/max_pooling_3x3_2':
return net, end_points
with tf.variable_scope('Stage_layer_1'):
with tf.variable_scope('Block1'):
net1 = Conv_layer(net, filter=64, kernel=[1, 1], stride=1, padding='valid',activation=tf.nn.relu, scope='conv2d_a_1x1_1')
net1 = Conv_layer(net1, filter=64, kernel=[3, 3], stride=1, padding='same',activation=tf.nn.relu, scope='conv2d_b_3x3_1')
net1 = Conv_layer(net1, filter=256, kernel=[1, 1], stride=1, padding='valid',activation=tf.nn.relu, scope='conv2d_c_1x1_1')
identify_net = Conv_layer(net, filter=256, kernel=[1, 1], stride=1, padding='valid',activation=tf.nn.relu, scope='conv2d_d_1x1_1')
net = tf.add(identify_net, net1, name='add')
net = tf.nn.relu(net, name='relu')
end_points['Stage_layer_1/Block1/relu'] = net
if end_point == 'Stage_layer_1/Block1/relu':
return net, end_points
with tf.variable_scope('Block2'):
net2 = Conv_layer(net, filter=64, kernel=[1, 1], stride=1, padding='valid', activation=tf.nn.relu, scope='conv2d_a_1x1_1')
net2 = Conv_layer(net2, filter=64, kernel=[3, 3], stride=1, padding='same', activation=tf.nn.relu, scope='conv2d_b_3x3_1')
net2 = Conv_layer(net2, filter=256, kernel=[1, 1], stride=1, padding='valid', activation=tf.nn.relu, scope='conv2d_c_1x1_1')
net = tf.add(net, net2, name='add')
net = tf.nn.relu(net, name='relu')
end_points['Stage_layer_1/Block2/relu'] = net
if end_point == 'Stage_layer_1/Block2/relu':
return net, end_points
with tf.variable_scope('Block3'):
net3 = Conv_layer(net, filter=64, kernel=[1, 1], stride=1, padding='valid', activation=tf.nn.relu, scope='conv2d_a_1x1_1')
net3 = Conv_layer(net3, filter=64, kernel=[3, 3], stride=1, padding='same', activation=tf.nn.relu, scope='conv2d_b_3x3_1')
net3 = Conv_layer(net3, filter=256, kernel=[1, 1], stride=1, padding='valid', activation=tf.nn.relu,scope='conv2d_c_1x1_1')
net = tf.add(net, net3, name='add')
net = tf.nn.relu(net, name='relu')
end_points['Stage_layer_1/Block3/relu'] = net
if end_point == 'Stage_layer_1/Block3/relu':
return net, end_points
with tf.variable_scope('Stage_layer_2'):
with tf.variable_scope('Block1'):
net1 = Conv_layer(net, filter=128, kernel=[1, 1], stride=2, padding='valid', activation=tf.nn.relu, scope='conv2d_a_1x1_2')
net1 = Conv_layer(net1, filter=128, kernel=[3, 3], stride=1, padding='same', activation=tf.nn.relu, scope='conv2d_b_3x3_1')
net1 = Conv_layer(net1, filter=512, kernel=[1, 1], stride=1, padding='valid', activation=tf.nn.relu, scope='conv2d_c_1x1_1')
identify_net = Conv_layer(net, filter=512, kernel=[1, 1], stride=2, padding='valid', activation=tf.nn.relu, scope='conv2d_d_1x1_1')
net = tf.add(identify_net, net1, name='add')
net = tf.nn.relu(net, name='relu')
end_points['Stage_layer_2/Block1/relu'] = net
if end_point == 'Stage_layer_2/Block1/relu':
return net, end_points
with tf.variable_scope('Block2'):
net2 = Conv_layer(net, filter=128, kernel=[1, 1], stride=1, padding='valid', activation=tf.nn.relu, scope='conv2d_a_1x1_1')
net2 = Conv_layer(net2, filter=128, kernel=[3, 3], stride=1, padding='same', activation=tf.nn.relu, scope='conv2d_b_3x3_1')
net2 = Conv_layer(net2, filter=512, kernel=[1, 1], stride=1, padding='valid', activation=tf.nn.relu, scope='conv2d_c_1x1_1')
net = tf.add(net, net2, name='add')
net = tf.nn.relu(net, name='relu')
end_points['Stage_layer_2/Block2/relu'] = net
if end_point == 'Stage_layer_2/Block2/relu':
return net, end_points
with tf.variable_scope('Block3'):
net3 = Conv_layer(net, filter=128, kernel=[1, 1], stride=1, padding='valid', activation=tf.nn.relu, scope='conv2d_a_1x1_1')
net3 = Conv_layer(net3, filter=128, kernel=[3, 3], stride=1, padding='same', activation=tf.nn.relu, scope='conv2d_b_3x3_1')
net3 = Conv_layer(net3, filter=512, kernel=[1, 1], stride=1, padding='valid', activation=tf.nn.relu,scope='conv2d_c_1x1_1')
net = tf.add(net, net3, name='add')
net = tf.nn.relu(net, name='relu')
end_points['Stage_layer_2/Block3/relu'] = net
if end_point == 'Stage_layer_2/Block3/relu':
return net, end_points
with tf.variable_scope('Block4'):
net4 = Conv_layer(net, filter=128, kernel=[1, 1], stride=1, padding='valid', activation=tf.nn.relu, scope='conv2d_a_1x1_1')
net4 = Conv_layer(net4, filter=128, kernel=[3, 3], stride=1, padding='same', activation=tf.nn.relu, scope='conv2d_b_3x3_1')
net4 = Conv_layer(net4, filter=512, kernel=[1, 1], stride=1, padding='valid', activation=tf.nn.relu,scope='conv2d_c_1x1_1')
net = tf.add(net, net4, name='add')
net = tf.nn.relu(net, name='relu')
end_points['Stage_layer_2/Block4/relu'] = net
if end_point == 'Stage_layer_2/Block4/relu':
return net, end_points
with tf.variable_scope('Stage_layer_3'):
with tf.variable_scope('Block1'):
net1 = Conv_layer(net, filter=256, kernel=[1, 1], stride=2, padding='valid', activation=tf.nn.relu,scope='conv2d_a_1x1_2')
net1 = Conv_layer(net1, filter=256, kernel=[3, 3], stride=1, padding='same', activation=tf.nn.relu,scope='conv2d_b_3x3_1')
net1 = Conv_layer(net1, filter=1024, kernel=[1, 1], stride=1, padding='valid', activation=tf.nn.relu,scope='conv2d_c_1x1_1')
identify_net = Conv_layer(net, filter=1024, kernel=[1, 1], stride=2, padding='valid',activation=tf.nn.relu, scope='conv2d_d_1x1_1')
net = tf.add(identify_net, net1, name='add')
net = tf.nn.relu(net, name='relu')
end_points['Stage_layer_3/Block1/relu'] = net
if end_point == 'Stage_layer_3/Block1/relu':
return net, end_points
with tf.variable_scope('Block2'):
net2 = Conv_layer(net, filter=256, kernel=[1, 1], stride=1, padding='valid', activation=tf.nn.relu,scope='conv2d_a_1x1_1')
net2 = Conv_layer(net2, filter=256, kernel=[3, 3], stride=1, padding='same', activation=tf.nn.relu,scope='conv2d_b_3x3_1')
net2 = Conv_layer(net2, filter=1024, kernel=[1, 1], stride=1, padding='valid', activation=tf.nn.relu,scope='conv2d_c_1x1_1')
net = tf.add(net, net2, name='add')
net = tf.nn.relu(net, name='relu')
end_points['Stage_layer_3/Block2/relu'] = net
if end_point == 'Stage_layer_3/Block2/relu':
return net, end_points
with tf.variable_scope('Block3'):
net3 = Conv_layer(net, filter=256, kernel=[1, 1], stride=1, padding='valid', activation=tf.nn.relu,scope='conv2d_a_1x1_1')
net3 = Conv_layer(net3, filter=256, kernel=[3, 3], stride=1, padding='same', activation=tf.nn.relu,scope='conv2d_b_3x3_1')
net3 = Conv_layer(net3, filter=1024, kernel=[1, 1], stride=1, padding='valid', activation=tf.nn.relu,scope='conv2d_c_1x1_1')
net = tf.add(net, net3, name='add')
net = tf.nn.relu(net, name='relu')
end_points['Stage_layer_3/Block3/relu'] = net
if end_point == 'Stage_layer_3/Block3/relu':
return net, end_points
with tf.variable_scope('Block4'):
net4 = Conv_layer(net, filter=256, kernel=[1, 1], stride=1, padding='valid', activation=tf.nn.relu,scope='conv2d_a_1x1_1')
net4 = Conv_layer(net4, filter=256, kernel=[3, 3], stride=1, padding='same', activation=tf.nn.relu,scope='conv2d_b_3x3_1')
net4 = Conv_layer(net4, filter=1024, kernel=[1, 1], stride=1, padding='valid', activation=tf.nn.relu,scope='conv2d_c_1x1_1')
net = tf.add(net, net4, name='add')
net = tf.nn.relu(net, name='relu')
end_points['Stage_layer_3/Block4/relu'] = net
if end_point == 'Stage_layer_3/Block4/relu':
return net, end_points
with tf.variable_scope('Block5'):
net5 = Conv_layer(net, filter=256, kernel=[1, 1], stride=1, padding='valid', activation=tf.nn.relu,scope='conv2d_a_1x1_1')
net5 = Conv_layer(net5, filter=256, kernel=[3, 3], stride=1, padding='same', activation=tf.nn.relu,scope='conv2d_b_3x3_1')
net5 = Conv_layer(net5, filter=1024, kernel=[1, 1], stride=1, padding='valid', activation=tf.nn.relu,scope='conv2d_c_1x1_1')
net = tf.add(net, net5, name='add')
net = tf.nn.relu(net, name='relu')
end_points['Stage_layer_3/Block5/relu'] = net
if end_point == 'Stage_layer_3/Block5/relu':
return net, end_points
with tf.variable_scope('Block6'):
net6 = Conv_layer(net, filter=256, kernel=[1, 1], stride=1, padding='valid', activation=tf.nn.relu,scope='conv2d_a_1x1_1')
net6 = Conv_layer(net6, filter=256, kernel=[3, 3], stride=1, padding='same', activation=tf.nn.relu,scope='conv2d_b_3x3_1')
net6 = Conv_layer(net6, filter=1024, kernel=[1, 1], stride=1, padding='valid', activation=tf.nn.relu,scope='conv2d_c_1x1_1')
net = tf.add(net, net6, name='add')
net = tf.nn.relu(net, name='relu')
end_points['Stage_layer_3/Block6/relu'] = net
if end_point == 'Stage_layer_3/Block6/relu':
return net, end_points
with tf.variable_scope('Stage_layer_4'):
with tf.variable_scope('Block1'):
net1 = Conv_layer(net, filter=512, kernel=[1, 1], stride=2, padding='valid', activation=tf.nn.relu,scope='conv2d_a_1x1_2')
net1 = Conv_layer(net1, filter=512, kernel=[3, 3], stride=1, padding='same', activation=tf.nn.relu,scope='conv2d_b_3x3_1')
net1 = Conv_layer(net1, filter=2048, kernel=[1, 1], stride=1, padding='valid',activation=tf.nn.relu, scope='conv2d_c_1x1_1')
identify_net = Conv_layer(net, filter=2048, kernel=[1, 1], stride=2, padding='valid', activation=tf.nn.relu, scope='conv2d_d_1x1_1')
net = tf.add(identify_net, net1, name='add')
net = tf.nn.relu(net, name='relu')
end_points['Stage_layer_4/Block1/relu'] = net
if end_point == 'Stage_layer_4/Block1/relu':
return net, end_points
with tf.variable_scope('Block2'):
net2 = Conv_layer(net, filter=512, kernel=[1, 1], stride=1, padding='valid', activation=tf.nn.relu,scope='conv2d_a_1x1_1')
net2 = Conv_layer(net2, filter=512, kernel=[3, 3], stride=1, padding='same', activation=tf.nn.relu,scope='conv2d_b_3x3_1')
net2 = Conv_layer(net2, filter=2048, kernel=[1, 1], stride=1, padding='valid',activation=tf.nn.relu, scope='conv2d_c_1x1_1')
net = tf.add(net, net2, name='add')
net = tf.nn.relu(net, name='relu')
end_points['Stage_layer_4/Block2/relu'] = net
if end_point == 'Stage_layer_4/Block2/relu':
return net, end_points
with tf.variable_scope('Block3'):
net3 = Conv_layer(net, filter=512, kernel=[1, 1], stride=1, padding='valid', activation=tf.nn.relu,scope='conv2d_a_1x1_1')
net3 = Conv_layer(net3, filter=512, kernel=[3, 3], stride=1, padding='same', activation=tf.nn.relu,scope='conv2d_b_3x3_1')
net3 = Conv_layer(net3, filter=2048, kernel=[1, 1], stride=1, padding='valid',activation=tf.nn.relu, scope='conv2d_c_1x1_1')
net = tf.add(net, net3, name='add')
net = tf.nn.relu(net, name='relu')
end_points['Stage_layer_4/Block3/relu'] = net
if end_point == 'Stage_layer_4/Block3/relu':
return net, end_points
with tf.variable_scope('Average_pool'):
net = Global_average_pooling(net,'average_pooling')
print(net.get_shape())
end_points['Average_pool/average_pooling'] = net
if end_point == 'Average_pool/average_pooling':
return net, end_points
with tf.variable_scope('Fc_layer'):
net = Flatten(net, 'flatten')
net = Fc_layers(net, class_num, 'fc_layer')
end_points['Fc_layer/fc_layer'] = net
if end_point == 'Fc_layer/fc_layer':
return net, end_points
return net, end_points
if __name__ == '__main__':
x = tf.placeholder(dtype=tf.float32, shape=[100, 229,229,3], name='x')
result,end_points = ResNet50(x, 1000)
for i in tf.trainable_variables():
print(i)
print(result.get_shape())
|
[
"kai.hou@yhouse.com"
] |
kai.hou@yhouse.com
|
57f3fb8e6a12c4000ca9f11756cad69aa5c084de
|
a29a37fc7f53ac0ac4c30663fc7cb742beb2286a
|
/pingdom_sync.py
|
5e76fa5f68fdc34151cd0d95c7a88e0e53a7c261
|
[] |
no_license
|
emmettbutler/pingfast
|
54748630a6af8331bbc0b624d3fd7e28b7c68260
|
45a0b22cc48ee41180d80cafeab8276c30a4763e
|
refs/heads/master
| 2022-03-20T17:54:42.477461
| 2012-04-10T14:34:18
| 2012-04-10T14:34:18
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,745
|
py
|
import pingdom
import settings
def primary_account_login():
return pingdom.Pingdom(
username=settings.PRIMARY_USERNAME,
password=settings.PRIMARY_PASSWORD,
appkey=settings.PRIMARY_APPKEY
)
def secondary_account_login():
return pingdom.Pingdom(
username=settings.SECONDARY_USERNAME,
password=settings.SECONDARY_PASSWORD,
appkey=settings.SECONDARY_APPKEY
)
def sync_pingdom_accounts():
"""synchronizes the two pingdom accounts by adding new checks to the 2nd
account"""
p = primary_account_login()
#get primary account checks and save their names/ids
main_checks = p.method('checks')
main_checks = {
check['name']: check['id'] for check in main_checks['checks']
}
#get primary account contacts
main_contacts = p.method('contacts')
main_contacts = {
contact['email']: {'name': contact['name'], 'id': contact['id']} \
for contact in main_contacts['contacts']
}
print "> Checks and contacts retrieved"
p = secondary_account_login()
#get 2nd account checks
secondary_checks = p.method('checks')
secondary_checks = {
check['name'][:len(check['name']) - 16]: check['id'] \
for check in secondary_checks['checks']
}
for check in main_checks:
if check not in secondary_checks:
#get the details
p = primary_account_login()
details = p.method('checks/%s' % (main_checks[check],))
details = {details[d]['name']: details[d] for d in details}
#put those details in the new check
p = secondary_account_login()
check_type = details[check]['type'].keys()[0]
new_check = p.method(
url='checks',
method="POST",
parameters={
'name': "%s [Response Time]" % (check,),
'type': 'httpcustom',
'host': settings.DEPLOY_SERVER,
'url': '/response/%s' % (details[check]['id'],),
'port': settings.DEPLOY_PORT,
'resolution': details[check]['resolution'],
'sendtoemail': details[check]['sendtoemail'],
'sendtosms': details[check]['sendtosms'],
'sendtotwitter': details[check]['sendtotwitter'],
'sendtoiphone': details[check]['sendtoiphone'],
'sendnotificationwhendown': details[check]['sendnotificationwhendown'],
'notifyagainevery': details[check]['notifyagainevery'],
'notifywhenbackup': details[check]['notifywhenbackup'],
#for some reason, contactids yields a bad request
#'contactids': ''.join([str(main_contacts[a]['id']) + ',' for a in main_contacts])
}
)
print "> Created new check '%s [Response Time]'" % (check,)
#get 2nd account contacts
secondary_contacts = p.method('contacts')
secondary_contacts = {
contact['email']: contact['name'] \
for contact in secondary_contacts['contacts']
}
for contact in main_contacts:
if contact not in secondary_contacts:
#add the contact to the 2nd account
new_contact = p.method(
url='contacts',
method="POST",
parameters={
'name': main_contacts[contact]['name'],
'email': contact,
}
)
print "> Created new contact '%s'" % (contact,)
print ("> All checks and contacts synchronized")
if __name__ == '__main__':
sync_pingdom_accounts()
|
[
"andrew@parsely.com"
] |
andrew@parsely.com
|
a30311f86fe86387605a24fd707f25f28eccb5ec
|
26321a44bbc322370bc62ac43b9c634162d9ac84
|
/django_hogun/songrequest/wsgi.py
|
644932795875be54380fcee87c15e7daa95d28e9
|
[
"MIT"
] |
permissive
|
jisoo1170/song-request-app
|
4e9be2d6d87ac39957b99725a1a659ae01b7eddd
|
b882d63beb907f0b04dcc2bab8a8225ddc1bcadf
|
refs/heads/master
| 2020-08-18T00:21:00.695982
| 2018-11-01T18:00:38
| 2018-11-01T18:00:38
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 400
|
py
|
"""
WSGI config for songrequest project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/1.11/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "songrequest.settings")
application = get_wsgi_application()
|
[
"hogunhogun@naver.com"
] |
hogunhogun@naver.com
|
82eb72c1b5699a9cd0d87b1d913385f1c39fce3f
|
abad82a1f487c5ff2fb6a84059a665aa178275cb
|
/Codewars/8kyu/sum-without-highest-and-lowest-number/Python/test.py
|
b114137cb761a281ddf7f8f1076730201d46f413
|
[
"MIT"
] |
permissive
|
RevansChen/online-judge
|
8ae55f136739a54f9c9640a967ec931425379507
|
ad1b07fee7bd3c49418becccda904e17505f3018
|
refs/heads/master
| 2021-01-19T23:02:58.273081
| 2019-07-05T09:42:40
| 2019-07-05T09:42:40
| 88,911,035
| 9
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 611
|
py
|
# Python - 3.6.0
Test.describe('Basic tests')
Test.it('None or Empty')
Test.assert_equals(sum_array(None), 0)
Test.assert_equals(sum_array([]), 0)
Test.it('Only one Element')
Test.assert_equals(sum_array([3]), 0)
Test.assert_equals(sum_array([-3]), 0)
Test.it('Only two Element')
Test.assert_equals(sum_array([3, 5]), 0)
Test.assert_equals(sum_array([-3, -5]), 0)
Test.it('Real Tests')
Test.assert_equals(sum_array([6, 2, 1, 8, 10]), 16)
Test.assert_equals(sum_array([6, 0, 1, 10, 10]), 17)
Test.assert_equals(sum_array([-6, -20, -1, -10, -12]), -28)
Test.assert_equals(sum_array([-6, 20, -1, 10, -12]), 3)
|
[
"d79523@hotmail.com"
] |
d79523@hotmail.com
|
8c9d1d675ca0aa584d90ba612d4c6d1eb88d8962
|
26d7a50b479e58d73f61c7bcf2e70f912bf11303
|
/src/utils/data.py
|
d7775866f226eefb60a30902af650fd87b5c8270
|
[] |
no_license
|
jakeparker/10-601-project
|
3aa2fcf95f0a926e05ea6812da9c7889c43cd9c1
|
25245396f8d512173ed050225bcf6bf6af551420
|
refs/heads/master
| 2020-03-07T19:05:58.239076
| 2017-05-04T03:55:40
| 2017-05-04T03:55:40
| 127,661,900
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,063
|
py
|
import os
import numpy as np
from scipy.io import loadmat
from keras.utils import np_utils
def get_filepath(dataset):
dirpath = os.path.join(dataset['io']['path'], dataset['io']['directory'])
train_filename = dataset['io']['train filename'] + dataset['io']['filetype']
test_filename = dataset['io']['test filename'] + dataset['io']['filetype']
train_filepath = os.path.join(dirpath, train_filename)
test_filepath = os.path.join(dirpath, test_filename)
return(train_filepath, test_filepath)
def format_data(data, shape):
(h, w, c) = shape
X = data['x']
X = X.reshape(X.shape[0], c, w, h)
X = np.transpose(X, (0, 2, 3, 1)) ## ?
data['x'] = X.astype('float32') / 255
return None
def split_data(dataset, data):
(w,h,c) = dataset['image']['shape']
n_classes = dataset['num classes']
n_valid = dataset['num train'] // dataset['validation split']
n_valid = int(n_valid)
n_train = dataset['num train'] - n_valid
cutoff = n_valid // n_classes
dataset['num train'] = n_train
dataset['num valid'] = n_valid
train = {}
train['x'] = np.zeros((n_train, h, w, c))
train['y'] = np.zeros((n_train, 1))
valid = {}
valid['x'] = np.zeros((n_valid, h, w, c))
valid['y'] = np.zeros((n_valid, 1))
label = 0
val_idx = 0
remove_list = []
for idx in range(n_train + n_valid):
if val_idx >= cutoff:
label = val_idx // cutoff
if label >= n_classes:
break
if data['y'][idx] == label:
valid['x'][val_idx] = data['x'][idx]
valid['y'][val_idx] = data['y'][idx]
remove_list.append(idx)
val_idx += 1
print(len(remove_list))
train['x'] = np.delete(data['x'], remove_list, 0)
train['y'] = np.delete(data['y'], remove_list, 0)
train['y'] = np_utils.to_categorical(train['y'])
valid['y'] = np_utils.to_categorical(valid['y'])
print(train['x'].shape)
print(train['y'].shape)
return (train, valid)
def load_data(dataset):
"""Load cifar-3 dataset,
Args:
dataset: dictionary defining
- `io`: a dictionary defining:
- `path`: the relative path to the data directory w.r.t. abs path of `utils.py`
- `train filename`: filename of train dataset
- `test filename`: filename of test dataset
- `directory`: the directory the data is stored in
- `filetype`: the file format of both the training and testing datasets
- `image`: a dictionary defining:
- `shape`: (height, width, channels)
- `unrolled`: height*width*channels
- `num classes`: number of classifications
- `num train`: number of training examples (later modified by validation split)
- 'num valid': number of validation examples (originally 0)
- `num test`: number of test examples (immutable)
- `validation split`: integer; `num valid` = `num train` // `validation split`
Returns: tuple of train, valid, and test data
train: dictionary of training data and labels
- `x`: shape = (num_train, unrolled)
- `y`: shape = (num_train, unrolled)
valid: dictionary of validation data and labels
- `x`: shape = (num_valid, unrolled)
- `y`: shape = (num_valid, unrolled)
test: dictionary of data and labels
- `x`: shape = (num_test, unrolled)
- `y`: defaults to `None`; can be set by user to `shape = (num_test, 1)`
- 'y_pred': defaults to `None`; set by model to `shape = (num_test, unrolled)`
"""
n_train = dataset['num train'] # 12000
n_test = dataset['num test'] # 3000
shape = dataset['image']['shape']
unrolled = dataset['image']['unrolled']
(train_filepath, test_filepath) = get_filepath(dataset)
train_data = {'x': {}, 'y': {}}
test_data = {'x': {}, 'y': None, 'y_pred': None}
if dataset['io']['filetype'] == '.mat':
train_mat = loadmat(train_filepath)
test_mat = loadmat(test_filepath)
train_data['x'] = train_mat.get('data')
train_data['y'] = train_mat.get('labels')
test_data['x'] = test_mat.get('data')
elif dataset['io']['filetype'] == '.bin':
with np.memmap(train_filepath, dtype='uint8', mode='c', shape=(n_train, unrolled+1)) as mm:
train_data['x'] = mm[np.repeat(np.arange(n_train), unrolled), np.tile(np.arange(1,unrolled+1), n_train)]
train_data['y'] = mm[np.arange(n_train), np.repeat(0, n_train)]
with np.memmap(test_filepath, dtype='uint8', mode='c', shape=(n_test, unrolled)) as mm:
test_data['x'] = np.reshape(mm, dataset['image']['shape'])
else:
raise ValueError, "unsupported filetype: %s \n" %(dataset['io']['filetype'])
format_data(train_data, shape)
format_data(test_data, shape)
(train, valid) = split_data(dataset, train_data)
test = test_data
return (train, valid, test)
|
[
"jake.l.parker@gmail.com"
] |
jake.l.parker@gmail.com
|
106afed6936e5ba76a8b3f114a7d57fe8df2e595
|
0b953c73458679beeef3b95f366601c834cff9b4
|
/Code Kata/counting no of lines in a paragraph.py
|
15d30b5df189bf7784ea1c381021887b2eb7f202
|
[] |
no_license
|
Sravaniram/Python-Programming
|
41531de40e547f0f461e77b88e4c0d562faa041c
|
f6f2a4e3a6274ecab2795062af8899c2a06c9dc1
|
refs/heads/master
| 2020-04-11T12:49:18.677561
| 2018-06-04T18:04:13
| 2018-06-04T18:04:13
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 42
|
py
|
a=raw_input()
a=a.split(".")
print len(a)
|
[
"noreply@github.com"
] |
Sravaniram.noreply@github.com
|
bb5e0cd1e330a5c3fc527a836f672e1ff7f3e65e
|
7c246374fa3f252b7efea7d3b95970b082d341b3
|
/venv/Scripts/pip3-script.py
|
573881d4351470b9f90e48b9f6511f7d930d9486
|
[] |
no_license
|
juancarloss9812/Codigos-phyton
|
705a8713e32e1bb736b699f8f65f4ac767627008
|
74d27ed27ff6004f7ad9bbdc07ad2393fc1369c9
|
refs/heads/master
| 2020-08-05T15:59:28.008295
| 2019-10-03T14:53:42
| 2019-10-03T14:53:42
| 212,605,470
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 422
|
py
|
#!C:\Users\HP\Desktop\EjerciciosPhyton\ejercicio1\venv\Scripts\python.exe
# EASY-INSTALL-ENTRY-SCRIPT: 'pip==19.0.3','console_scripts','pip3'
__requires__ = 'pip==19.0.3'
import re
import sys
from pkg_resources import load_entry_point
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(
load_entry_point('pip==19.0.3', 'console_scripts', 'pip3')()
)
|
[
"juancarloss@unicauca.edu.co"
] |
juancarloss@unicauca.edu.co
|
5bd4beb9fdcf81f0050f1943605d5cc13f2d2120
|
888ee3eafc8a2490809e7bf3621560103e270bd9
|
/core/const.py
|
b54f6a29f2c7bd1f8ccb8443452efa9bae48e9f2
|
[] |
no_license
|
domodo2012/quandomo2020
|
0bde9fe5667b6b98a4b12b7c0c6f31751f51bd4b
|
a462cccfb9e86ac0ab6a7fedaeeb3592bfc5a1f6
|
refs/heads/master
| 2023-03-06T14:33:28.360350
| 2023-02-21T09:44:36
| 2023-02-21T09:44:36
| 309,451,082
| 3
| 4
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,479
|
py
|
# -*- coding: utf-8 -*-
"""
常用的常量
"""
from enum import Enum
# 策略运行模式
class RunMode(Enum):
BACKTESTING = "backtesting"
LIVE = "live"
# 市场数据的周期级别/时间间隔
class Interval(Enum):
MIN = "1m"
HOUR = "1h"
DAILY = "d"
WEEKLY = "w"
# 除复权方式
class RightsAdjustment(Enum):
NONE = "none"
FROWARD = "forward"
BACKWARD = "backward"
# 事件类别
class Event(Enum):
TIMER = "event_timer" # 定时事件
BAR = "event_bar" # 市场 bar 数据事件
ORDER = "event_order" # 委托订单事件
PORTFOLIO = "event_portfolio" # 投资组合层面的风控事件
TRADE = "event_trade" # 成交/交易事件
RECORD = "update_bar_info" # 数据记录事件
LOG = "event_log" # 日志记录事件
ACCOUNT = "event_account" # 账户事件
RIGHTS = "event_rights" # 股票的分配送转事件
DELIVERY = "event_delivery" # 期货交割事件
STRATEGY = "event_strategy" # 组合管理器对所管策略的调整事件
POOL = "event_pool" # 股票池更新事件
BLACK_LIST = "event_black_list" # 黑名单更新事件
# 零值
class Empty(Enum):
eSTRING = ""
eINT = 0
eFLOAT = 0.0
# 订单/交易/持仓的方向
class Direction(Enum):
LONG = "long" # 做多
SHORT = "short" # 做空
# 开平仓状态
class Offset(Enum):
OPEN = "open"
CLOSE = "close"
CLOSETODAY = "close_today" # 平今
CLOSEYESTERDAY = "close_yesterday" # 平昨
# 委托单状态
class Status(Enum):
SUBMITTING = "submitting" # 待提交
WITHDRAW = "withdraw" # 已撤销
NOT_TRADED = "pending" # 未成交
PART_TRADED = "partial filled" # 部分成交
ALL_TRADED = "filled" # 全部成交
CANCELLED = "cancelled" # 已取消
REJECTED = "rejected" # 已拒绝
UNKNOWN = "unknown" # 未知
# 委托单类型
class OrderType(Enum):
LIMIT = "limit" # 限价单
MARKET = "market" # 市价单
STOP = "stop" # 止损单
FAK = "FAK" # 立即成交,剩余的自动撤销的限价单
FOK = "FOK" # 立即全部成交否则自动撤销的限价单
# 止损单状态
class StopOrderStatus(Enum):
WAITING = "等待中"
CANCELLED = "已撤销"
TRIGGERED = "已触发"
# 滑点类型
class Slippage(Enum):
FIX = "slippage_fix" # 固定值滑点
PERCENT = "slippage_percent" # 比例值滑点
# 交易所
class Exchange(Enum):
CFFEX = "CFFEX" # China Financial Futures Exchange
SHFE = "SHFE" # Shanghai Futures Exchange
CZCE = "CZCE" # Zhengzhou Commodity Exchange
DCE = "DCE" # Dalian Commodity Exchange
INE = "INE" # Shanghai International Energy Exchange
SSE = "SSE" # Shanghai Stock Exchange
SZSE = "SZSE" # Shenzhen Stock Exchange
SGE = "SGE" # Shanghai Gold Exchange
# 产品类别
class Product(Enum):
STOCK = "stock" # 股票
STOCK_SH = "stock_sh" # 上海股票
STOCK_SZ = "stock_sz" # 深圳股票
FUTURES = "futures" # 期货
INDEX = "index" # 指数
# mongodb 数据库名
class MongoDbName(Enum):
MARKET_DATA_DAILY = "market_data_daily"
FINANCIAL_DATA = "financial_data"
MARKET_DATA_1_MIN = "market_data_1min"
DAILY_DB_NAME = 'market_data_daily'
MINUTE_DB_NAME = 'Min_Db'
# sqlite 数据库名
class SqliteDbName(Enum):
DB = "quandomo_data.db"
BASE = "base_data.db"
MARKET = "market_data.db"
FACTOR = "factor_data.db"
Futures_contracts = {
'SHFE': ['cu', 'al', 'zn', 'ni', 'sn', 'au', 'ag', 'rb', 'wr', 'hc', 'ss',
'fu', 'bu', 'ru', 'sp'],
'DCE': ['a', 'b', 'm', 'y', 'p', 'c', 'cs', 'jd', 'rr',
'l', 'v', 'pp', 'eb', 'j', 'jm', 'i', 'eg', 'pg'],
'CZCE': ['AP', 'CF', 'CJ', 'CY', 'FG', 'JR', 'LR', 'MA', 'OI', 'RM', 'SA', 'SF', 'SM', 'SR',
'TA', 'UR', 'ZC'],
'CFFEX': ['IC', 'IF', 'IH', 'TS', 'TF', 'T'],
'INE': ['sc', 'lu', 'nr']
}
|
[
"1111938@qq.com"
] |
1111938@qq.com
|
b5f85c87ab85cc26d172997c4595f1d90bbe390d
|
e063e473a07ec4fdabfdf9e0674fd3d459b5367c
|
/plot-03-demo=interpret_hdphmm_params_and_run_viterbi.py
|
b29cb583e10cdb5c1bacf2e5e26a90189caf229a
|
[] |
no_license
|
nakulgopalan/change_point_detection
|
24a61e8093543dd8a31ef8bb5dc1bdb79cb3a114
|
edc52cfab3f82efec8fd604fa91eebb3d5dfb8c5
|
refs/heads/master
| 2022-09-27T00:36:29.598453
| 2020-06-04T01:01:24
| 2020-06-04T01:01:24
| 192,130,565
| 5
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 7,014
|
py
|
"""
================================================================
Visualizing learned state sequences and transition probabilities
================================================================
Train a sticky HDP-HMM model on small motion capture data, then visualize the MAP state sequences under the estimated model parameters by running Viterbi.
Also has some info on how to inspect the learned HMM parameters of a sticky HDP-HMM model trained on small motion capture data.
"""
# sphinx_gallery_thumbnail_number = 3
import bnpy
import numpy as np
import os
import matplotlib
from matplotlib import pylab
import seaborn as sns
np.set_printoptions(suppress=1, precision=3)
FIG_SIZE = (10, 5)
pylab.rcParams['figure.figsize'] = FIG_SIZE
###############################################################################
#
# Load dataset from file
dataset_path = os.path.join(bnpy.DATASET_PATH, 'mocap6')
dataset = bnpy.data.GroupXData.read_npz(
os.path.join(dataset_path, 'dataset.npz'))
###############################################################################
#
# Setup: Function to make a simple plot of the raw data
# -----------------------------------------------------
def show_single_sequence(
seq_id,
zhat_T=None,
z_img_cmap=None,
ylim=[-120, 120],
K=5,
left=0.2, bottom=0.2, right=0.8, top=0.95):
if z_img_cmap is None:
z_img_cmap = matplotlib.cm.get_cmap('Set1', K)
if zhat_T is None:
nrows = 1
else:
nrows = 2
fig_h, ax_handles = pylab.subplots(
nrows=nrows, ncols=1, sharex=True, sharey=False)
ax_handles = np.atleast_1d(ax_handles).flatten().tolist()
start = dataset.doc_range[seq_id]
stop = dataset.doc_range[seq_id + 1]
# Extract current sequence
# as a 2D array : T x D (n_timesteps x n_dims)
curX_TD = dataset.X[start:stop]
for dim in xrange(12):
ax_handles[0].plot(curX_TD[:, dim], '.-')
ax_handles[0].set_ylabel('angle')
ax_handles[0].set_ylim(ylim)
z_img_height = int(np.ceil(ylim[1] - ylim[0]))
pylab.subplots_adjust(
wspace=0.1,
hspace=0.1,
left=left, right=right,
bottom=bottom, top=top)
if zhat_T is not None:
img_TD = np.tile(zhat_T, (z_img_height, 1))
ax_handles[1].imshow(
img_TD,
interpolation='nearest',
vmin=-0.5, vmax=(K-1)+0.5,
cmap=z_img_cmap)
ax_handles[1].set_ylim(0, z_img_height)
ax_handles[1].set_yticks([])
bbox = ax_handles[1].get_position()
width = (1.0 - bbox.x1) / 3
height = bbox.y1 - bbox.y0
cax = fig_h.add_axes([right + 0.01, bottom, width, height])
cbax_h = fig_h.colorbar(
ax_handles[1].images[0], cax=cax, orientation='vertical')
cbax_h.set_ticks(np.arange(K))
cbax_h.set_ticklabels(np.arange(K))
cbax_h.ax.tick_params(labelsize=9)
ax_handles[-1].set_xlabel('time')
return ax_handles
###############################################################################
#
# Visualization of the first sequence (1 of 6)
# --------------------------------------------
show_single_sequence(0)
###############################################################################
#
# Setup: hyperparameters
# ----------------------------------------------------------
K = 10 # Number of clusters/states
# Allocation model (HDP)
gamma = 5.0 # top-level Dirichlet concentration parameter
transAlpha = 0.5 # trans-level Dirichlet concentration parameter
startAlpha = 10.0 # starting-state Dirichlet concentration parameter
hmmKappa = 50.0 # set sticky self-transition weight
# Observation model (1st-order Auto-regressive Gaussian)
sF = 1.0 # Set observation model prior so E[covariance] = identity
ECovMat = 'eye'
###############################################################################
#
# Train HDP-HMM with *AutoRegGauss* observation model
# ----------------------------------------------
#
# Train single model for all 6 sequences.
#
# Do small number of clusters jut to make visualization easy.
#
# Take the best of 5 random initializations (in terms of evidence lower bound).
#
hdphmm_trained_model, hmmar_info_dict = bnpy.run(
dataset, 'HDPHMM', 'AutoRegGauss', 'memoVB',
output_path=(
'/tmp/mocap6/showcase-K=%d-model=HDPHMM+AutoRegGauss-ECovMat=1*eye/'
% (K)),
nLap=100, nTask=5, nBatch=1, convergeThr=0.0001,
transAlpha=transAlpha, startAlpha=startAlpha, hmmKappa=hmmKappa,
gamma=gamma,
sF=sF, ECovMat=ECovMat,
K=K, initname='randexamples',
printEvery=25,
)
###############################################################################
#
# Visualize the starting-state probabilities
# ------------------------------------------
#
# start_prob_K : 1D array, size K
# start_prob_K[k] = exp( E[log Pr(start state = k)] )
start_prob_K = hdphmm_trained_model.allocModel.get_init_prob_vector()
print(start_prob_K)
###############################################################################
#
# Visualize the transition probabilities
# --------------------------------------
#
# trans_prob_KK : 2D array, K x K
# trans_prob_KK[j, k] = exp( E[log Pr(z_t = k | z_t-1 = j)] )
#
trans_prob_KK = hdphmm_trained_model.allocModel.get_trans_prob_matrix()
print(trans_prob_KK)
###############################################################################
#
# Compute log likelihood of each timestep for sequence 0
# ------------------------------------------------------
#
# log_lik_TK : 2D array, T x K
# log_lik_TK[t, k] = E[ log Pr( observed data at time t | z_t = k)]
log_lik_seq0_TK = hdphmm_trained_model.obsModel.calcLogSoftEvMatrix_FromPost(
dataset.make_subset([0])
)
print(log_lik_seq0_TK[:10, :])
###############################################################################
#
# Run Viterbi algorithm for sequence 0
# ------------------------------------
#
# zhat_T : 1D array, size T
# MAP state sequence
# zhat_T[t] = state assigned to timestep t, will be int value in {0, 1, ... K-1}
zhat_seq0_T = bnpy.allocmodel.hmm.HMMUtil.runViterbiAlg(
log_lik_seq0_TK, np.log(start_prob_K), np.log(trans_prob_KK))
print(zhat_seq0_T[:10])
###############################################################################
#
# Visualize the segmentation for sequence 0
# -----------------------------------------
#
show_single_sequence(0, zhat_T=zhat_seq0_T, K=K)
###############################################################################
#
# Visualize the segmentation for sequence 1
# -----------------------------------------
#
log_lik_seq1_TK = hdphmm_trained_model.obsModel.calcLogSoftEvMatrix_FromPost(
dataset.make_subset([1])
)
zhat_seq1_T = bnpy.allocmodel.hmm.HMMUtil.runViterbiAlg(
log_lik_seq1_TK, np.log(start_prob_K), np.log(trans_prob_KK))
show_single_sequence(1, zhat_T=zhat_seq1_T, K=K)
pylab.show()
|
[
"nakulgopalan@gmail.com"
] |
nakulgopalan@gmail.com
|
512d168899da08511faff0bfaf433e661bdc72ac
|
ec4e94c8d2e57e25f179f73d613f70d0b56fee15
|
/WorkFlow2/AB/AUGMENT/B.2/10000/qaz_train_10000_5/ocean_annealing.py
|
3a244d679e24bf1e77634e6a167b9374a1d6863e
|
[] |
no_license
|
whigg/susy_qa_interns_2020
|
a352eec40e615a01e8a70bd0aefb01108a1a0200
|
986f2037b59fb69dc4ee11b07456da3df72d0da8
|
refs/heads/master
| 2023-01-01T20:10:09.508435
| 2020-10-28T21:25:23
| 2020-10-28T21:25:23
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 14,999
|
py
|
############################################################################
#11/07/2020 - WORKING
#train_size20000
#cutoff = 93
#2 folds
#AUGMENT
#Fix_variable = True
############################################################################
#Python imports
import numpy as np
from scipy.optimize import basinhopping
from contextlib import closing
from multiprocessing import Pool
import os
import datetime
import time
#Dwave imports
from dwave.system.samplers import DWaveSampler
from dwave.cloud import Client
import dimod
import dwave_networkx as dnx
from dwave.system.composites import EmbeddingComposite
a_time = 5
nreads = 200
train_size = 10000
start_num = 0
end_num = 10
zoom_factor = 0.5
n_iterations = 8
flip_probs = np.array([0.16, 0.08, 0.04, 0.02] + [0.01]*(n_iterations - 4))
flip_others_probs = np.array([0.16, 0.08, 0.04, 0.02] + [0.01]*(n_iterations - 4))/2
flip_state = -1
AUGMENT_CUTOFF_PERCENTILE = 93
AUGMENT_SIZE = 7 # must be an odd number (since augmentation includes original value in middle)
AUGMENT_OFFSET = 0.007
AUGMENT = True
UPDATING_HAMILTONIAN = True
FIXING_VARIABLES = True
def total_hamiltonian(s, C_i, C_ij):
bits = len(s)
h = 0 - np.dot(s, C_i)
for i in range(bits):
h += s[i] * np.dot(s[i+1:], C_ij[i][i+1:])
return h
def anneal(C_i, C_ij, mu, sigma, l, strength_scale, energy_fraction, ngauges, max_excited_states):
#Initialising h and J as dictionnaries
h = {}
J = {}
for i in range(len(C_i)):
h_i = -2*sigma[i]*C_i[i]
for j in range(len(C_ij[0])):
if j > i:
J[(i, j)] = float(2*C_ij[i][j]*sigma[i]*sigma[j])
h_i += 2*(sigma[i]*C_ij[i][j]*mu[j])
h[i] = h_i
#applying cutoff
print("Number of J before : "+str(len(J))) #J before cutoff
float_vals = []
for i in J.values():
float_vals.append(i)
cutoff = np.percentile(float_vals, AUGMENT_CUTOFF_PERCENTILE)
to_delete = []
for k, v in J.items():
if v < cutoff:
to_delete.append(k)
for k in to_delete:
del J[k]
print("Number of J after : "+str(len(J))) # J after cutof
new_Q = {}
isingpartial = {}
if FIXING_VARIABLES:
#Optimising heuristically the number of coupling terms
Q, _ = dimod.ising_to_qubo(h, J, offset = 0.0)
bqm = dimod.BinaryQuadraticModel.from_qubo(Q, offset = 0.0)
simple = dimod.fix_variables(bqm, sampling_mode = False)
if simple == {} :
new_Q = Q
else :
Q_indices = []
for i in Q :
if i in simple.keys() :
continue
else :
Q_indices.append(i)
new_Q = {key : Q[key] for key in Q_indices}
print('new length', len(new_Q))
isingpartial = simple
if (not FIXING_VARIABLES) or len(new_Q) > 0:
mapping = []
offset = 0
for i in range(len(C_i)):
if i in isingpartial:
mapping.append(None)
offset += 1
else:
mapping.append(i - offset)
if FIXING_VARIABLES:
new_Q_mapped = {}
for (first, second), val in new_Q.items():
new_Q_mapped[(mapping[first], mapping[second])] = val
h, J, _ = dimod.qubo_to_ising(new_Q_mapped)
#Run gauges
qaresults = []
print("Number of variables to anneal :"+str(len(h)))
for g in range(ngauges):
#Finding embedding
qaresult = []
embedded = False
for attempt in range(5):
a = np.sign(np.random.rand(len(h)) - 0.5)
float_h = []
for i in h.values():
float_h.append(i)
h_gauge = float_h*a
J_gauge = {}
for i in range(len(h)):
for j in range(len(h)):
if (i, j) in J:
J_gauge[(i, j)] = J[(i, j)]*a[i]*a[j]
try:
print("Trying to find embeding")
sampler = EmbeddingComposite(DWaveSampler(token='secret_token') )
embedded = True
break
except ValueError: # no embedding found
print('no embedding found')
embedded = False
continue
if not embedded:
continue
print("emebeding found")
print("Quantum annealing")
try_again = True
while try_again:
try:
#Annealing, saving energy and sample list
sampleset = sampler.sample_ising(h_gauge, J_gauge, chain_strength = strength_scale, num_reads=200, annealing_time = 20)
try_again = False
except:
print('runtime or ioerror, trying again')
time.sleep(10)
try_again = True
print("Quantum done")
qaresult.append(sampleset.record[0][0].tolist())
qaresult = np.asarray(qaresult)
qaresult = qaresult * a
qaresults[g*nreads:(g+1)*nreads] = qaresult
full_strings= np.zeros((len(qaresults),len(C_i)))
full_strings = np.asarray(full_strings)
qaresults = np.asarray(qaresults)
if FIXING_VARIABLES:
j = 0
for i in range(len(C_i)):
if i in isingpartial:
full_strings[:, i] = 2*isingpartial[i] - 1
else:
full_strings[:, i] = qaresults[:, j]
j += 1
else:
full_strings = qaresults
s = np.asarray(full_strings)
energies = np.zeros(len(qaresults))
s[np.where(s > 1)] = 1.0
s[np.where(s < -1)] = -1.0
bits = len(s[0])
for i in range(bits):
energies += 2*s[:, i]*(-sigma[i]*C_i[i])
for j in range(bits):
if j > i:
energies += 2*s[:, i]*s[:, j]*sigma[i]*sigma[j]*C_ij[i][j]
energies += 2*s[:, i]*sigma[i]*C_ij[i][j] * mu[j]
unique_energies, unique_indices = np.unique(energies, return_index=True)
ground_energy = np.amin(unique_energies)
if ground_energy < 0:
threshold_energy = (1 - energy_fraction) * ground_energy
else:
threshold_energy = (1 + energy_fraction) * ground_energy
lowest = np.where(unique_energies < threshold_energy)
unique_indices = unique_indices[lowest]
if len(unique_indices) > max_excited_states:
sorted_indices = np.argsort(energies[unique_indices])[-max_excited_states:]
unique_indices = unique_indices[sorted_indices]
print("unique indices : ", unique_indices)
print(type(unique_indices[0]))
print(type(full_strings))
final_answers = full_strings[unique_indices]
print('number of selected excited states', len(final_answers))
return final_answers
else:
final_answer = []
print("Evrything resolved by FIXING_VARIABLES")
for i in range(len(C_i)):
if i in isingpartial:
final_answer.append(2*isingpartial[i] - 1)
final_answer = np.array(final_answer)
return np.array([final_answer])
def create_data(sig, bkg, sig_label, bkg_label):
n_classifiers = sig.shape[1]
predictions = np.concatenate((np.sign(sig), np.sign(bkg)))
predictions = np.transpose(predictions) / float(n_classifiers)
y = np.concatenate((np.ones(len(sig)), -np.ones(len(bkg))))
tag = np.concatenate((sig_label , bkg_label))
return predictions, y , tag
def create_augmented_data(sig, bkg, sig_label, bkg_label):
offset = AUGMENT_OFFSET
scale = AUGMENT_SIZE
n_samples = len(sig) + len(bkg)
n_classifiers = sig.shape[1]
predictions_raw = np.concatenate((sig, bkg))
predictions_raw = np.transpose(predictions_raw)
predictions = np.zeros((n_classifiers * scale, n_samples))
for i in range(n_classifiers):
for j in range(scale):
predictions[i*scale + j] = np.sign(predictions_raw[i] + (j-scale//2)*offset) / (n_classifiers * scale)
y = np.concatenate((np.ones(len(sig)), -np.ones(len(bkg))))
tag = np.concatenate((sig_label , bkg_label))
#print('predictions', predictions)
return predictions, y , tag
def strong_classifier(predictions, weights):
return np.dot(predictions.T, weights)
print('loading data')
sig = np.loadtxt("/workspace/susy_qa_interns_2020/WorkFlow2/AB/CSV/stop_train_sig_wc_AB.csv",delimiter=",", usecols=(3,4,5,6,7,8,9,10,11,12,13,14,16,17,18,19,20,21,22,23,24))
sig_tag = np.loadtxt("/workspace/susy_qa_interns_2020/WorkFlow2/AB/CSV/stop_train_sig_wc_AB.csv",delimiter=",",dtype="str", usecols=(15))
bkg = np.loadtxt("/workspace/susy_qa_interns_2020/WorkFlow2/AB/CSV/stop_train_bkg_wc_AB.csv",delimiter=",", usecols=(3,4,5,6,7,8,9,10,11,12,13,14,16,17,18,19,20,21,22,23,24))
bkg_tag = np.loadtxt("/workspace/susy_qa_interns_2020/WorkFlow2/AB/CSV/stop_train_bkg_wc_AB.csv",delimiter=",",dtype="str", usecols=(15))
sig_pct = float(len(sig)) / (len(sig) + len(bkg))
bkg_pct = float(len(bkg)) / (len(sig) + len(bkg))
print('loaded data')
if not os.path.exists("./mus/") :
os.mkdir("./mus/")
print("created mus directory")
if not os.path.exists("./energies/") :
os.mkdir("./energies/")
print("created energies directory")
if not os.path.exists("./strong_train_predictions/") :
os.mkdir("./strong_train_predictions/")
print("created predictions directory")
n_folds = 2
num = 0
sig_indices = np.arange(len(sig))
bkg_indices = np.arange(len(bkg))
remaining_sig = sig_indices
remaining_bkg = bkg_indices
fold_generator = np.random.RandomState(0)
ground_energies=np.zeros((n_folds,n_iterations))
ground_energies_test=np.zeros((n_folds,n_iterations))
for f in range(n_folds):
if num >= end_num:
break
print('fold', f)
train_sig = fold_generator.choice(remaining_sig, size=int(train_size*sig_pct), replace=False)
train_bkg = fold_generator.choice(remaining_bkg, size=int(train_size*bkg_pct), replace=False)
remaining_sig = np.delete(remaining_sig, train_sig)
remaining_bkg = np.delete(remaining_bkg, train_bkg)
test_sig = np.delete(sig_indices, train_sig)
test_bkg = np.delete(bkg_indices, train_bkg)
if AUGMENT:
predictions_train, y_train, tag_train = create_augmented_data(sig[train_sig], bkg[train_bkg], sig_tag[train_sig], bkg_tag[train_bkg])
predictions_test, y_test , tag_test = create_augmented_data(sig[test_sig], bkg[test_bkg], sig_tag[test_sig], bkg_tag[test_bkg])
else:
predictions_train, y_train , tag_train= create_data(sig[train_sig], bkg[train_bkg], sig_tag[train_sig], bkg_tag[train_bkg])
predictions_test, y_test , tag_test = create_data(sig[test_sig], bkg[test_bkg], sig_tag[test_sig], bkg_tag[test_bkg])
print('Data splited intro train ({} events) and test ({} events)'.format(train_size , len(y_test)))
if num < start_num:
num += 1
continue
# create C_ij and C_i matrices
n_classifiers = len(predictions_train)
test_size = len(y_test)
C_ij = np.zeros((n_classifiers, n_classifiers))
C_ij_test= np. zeros ((n_classifiers, n_classifiers))
C_i = np.dot(predictions_train, y_train)
C_i_test = np.dot(predictions_test, y_test)
for i in range(n_classifiers):
for j in range(n_classifiers):
C_ij[i][j] = np.dot(predictions_train[i], predictions_train[j])
C_ij_test[i][j] = np.dot(predictions_test[i], predictions_test[j])
print('created C_ij and C_i matrices')
mu0 = np.zeros(n_classifiers)
sigma0 = np.ones(n_classifiers)
mu = np.copy(mu0)
sigma = np.copy(sigma0)
reg = 0.0
l0 = reg*np.amax(np.diagonal(C_ij)*sigma*sigma - 2*sigma*C_i)
strengths = [3.0, 1.0, 0.5, 0.2] + [0.1]*(n_iterations - 4)
energy_fractions = [0.08, 0.04, 0.02] + [0.01]*(n_iterations - 3)
gauges = [50, 10] + [10]*(n_iterations - 2)
max_states = [1]*(n_iterations) # cap the number of excited states accepted per iteration
if UPDATING_HAMILTONIAN:
mus = [np.zeros(n_classifiers)]
iterations = n_iterations
for i in range(iterations):
print('iteration', i)
l = reg*np.amax(np.diagonal(C_ij)*sigma*sigma - 2*sigma*C_i)
new_mus = []
for mu in mus:
excited_states = anneal(C_i, C_ij, mu, sigma, l, strengths[i], energy_fractions[i], gauges[i], max_states[i])
for s in excited_states:
new_energy = total_hamiltonian(mu + s*sigma*zoom_factor, C_i, C_ij) / (train_size - 1)
flips = np.ones(len(s))
for a in range(len(s)):
temp_s = np.copy(s)
temp_s[a] = 0
old_energy = total_hamiltonian(mu + temp_s*sigma*zoom_factor, C_i, C_ij) / (train_size - 1)
energy_diff = new_energy - old_energy
if energy_diff > 0:
flip_prob = flip_probs[i]
flip = np.random.choice([1, flip_state], size=1, p=[1-flip_prob, flip_prob])[0]
flips[a] = flip
else:
flip_prob = flip_others_probs[i]
flip = np.random.choice([1, flip_state], size=1, p=[1-flip_prob, flip_prob])[0]
flips[a] = flip
flipped_s = s * flips
new_mus.append(mu + flipped_s*sigma*zoom_factor)
sigma *= zoom_factor
mus = new_mus
ground_energies[f,i]=total_hamiltonian(mus[0],C_i,C_ij)/(train_size-1)
ground_energies_test[f,i] = total_hamiltonian(mus[0],C_i_test,C_ij_test)/(test_size-1)
np.save('./mus/' +'mus' + str(train_size) + "_fold" + str(f) + '_iter' + str(i) + '.npy', np.array(mus))
final_predictions_train=[]
final_predictions_test=[]
strong_classifier_train = strong_classifier(predictions_train , mus[0])
strong_classifier_test = strong_classifier(predictions_test , mus[0])
for i in range(len(tag_train)) :
final_predictions_train.append([strong_classifier_train[i] , tag_train[i]])
for i in range(len(tag_test)) :
final_predictions_test.append([strong_classifier_test[i], tag_test[i]])
np.save("./strong_train_predictions/prediction_lables_train_f"+str(f)+".npy", final_predictions_train)
np.save("./strong_train_predictions/prediction_lables_test_f"+str(f)+".npy", final_predictions_test)
num += 1
np.save("./energies/ground_energies.npy",ground_energies)
np.save("./energies/ground_energies_test.npy",ground_energies_test)
|
[
"hessel.timothee@gmail.com"
] |
hessel.timothee@gmail.com
|
38c86aa214abc8f836da92f539d0ab1091f34500
|
7be9944771b967d0802fd92d4373d15ad3a53d92
|
/official/resnet/cifar10_download_and_extract.py
|
d9022117d042722ea0e6e29ed453da37a9ae5c56
|
[
"Apache-2.0"
] |
permissive
|
horsetmotiv/Single_Channel_object_detection_from_Google_API
|
ce85e14360aba0274047e5ec4fdabbfeff9dcd3b
|
de311f9607cf40e775c0fc837f22749a59c55800
|
refs/heads/master
| 2022-12-05T05:05:47.187961
| 2017-12-24T08:59:55
| 2017-12-24T08:59:55
| 111,869,378
| 1
| 2
|
Apache-2.0
| 2022-11-18T11:53:59
| 2017-11-24T02:52:49
|
Python
|
UTF-8
|
Python
| false
| false
| 2,039
|
py
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Downloads and extracts the binary version of the CIFAR-10 dataset."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import argparse
import os
import sys
import tarfile
from six.moves import urllib
import tensorflow as tf
DATA_URL = 'https://www.cs.toronto.edu/~kriz/cifar-10-binary.tar.gz'
parser = argparse.ArgumentParser()
parser.add_argument(
'--data_dir', type=str, default='/tmp/cifar10_data',
help='Directory to download data and extract the tarball')
def main(unused_argv):
"""Download and extract the tarball from Alex's website."""
if not os.path.exists(FLAGS.data_dir):
os.makedirs(FLAGS.data_dir)
filename = DATA_URL.split('/')[-1]
filepath = os.path.join(FLAGS.data_dir, filename)
if not os.path.exists(filepath):
def _progress(count, block_size, total_size):
sys.stdout.write('\r>> Downloading %s %.1f%%' % (
filename, 100.0 * count * block_size / total_size))
sys.stdout.flush()
filepath, _ = urllib.request.urlretrieve(DATA_URL, filepath, _progress)
print()
statinfo = os.stat(filepath)
print('Successfully downloaded', filename, statinfo.st_size, 'bytes.')
tarfile.open(filepath, 'r:gz').extractall(FLAGS.data_dir)
if __name__ == '__main__':
FLAGS = parser.parse_args()
tf.app.run()
|
[
"hd_chenwei@foxmail.com"
] |
hd_chenwei@foxmail.com
|
fe940ce2666cf609c7af074fa2aa6d565e20d8fe
|
756fd530e6de9706bc1cddad8d9199054cc44319
|
/app.py
|
63cb0aa2299b5dc42106312275979d6b3e5e2b3e
|
[] |
no_license
|
Rana-Tej-Singh/Dockerfile
|
7471df7f627d47f2dbd5797e6ad8d6cf23c559c8
|
71b3dcc551398bf684e33f248dfa60b03ca6bb20
|
refs/heads/master
| 2021-04-17T19:05:09.322917
| 2020-03-23T15:34:12
| 2020-03-23T15:34:12
| 249,468,182
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,729
|
py
|
#Part of a Dockerfile
#app.py && Dockerfile
from flask import Flask
from flask import render_template
import socket
import random
import os
import argparse
app = Flask(__name__)
color_codes = {
"red": "#e74c3c",
"green": "#16a085",
"blue": "#2980b9",
"blue2": "#30336b",
"pink": "#be2edd",
"darkblue": "#130f40"
}
SUPPORTED_COLORS = ",".join(color_codes.keys())
# Get color from Environment variable
COLOR_FROM_ENV = os.environ.get('APP_COLOR')
VERSION_FROM_ENV = os.environ.get('VERSION') or "v1"
# Generate a random color
COLOR = random.choice(["red", "green", "blue", "blue2", "darkblue", "pink"])
@app.route("/")
def main():
return render_template('hello.html', name=socket.gethostname(), color=color_codes[COLOR], version=VERSION_FROM_ENV)
@app.route("/color")
def color():
return COLOR
@app.route("/version")
def version():
return "Hello, Application Version: {}".format(VERSION_FROM_ENV)
@app.route("/info")
def info():
return "Hello, Application Version: {} ; Color: {}".format(VERSION_FROM_ENV, COLOR)
if __name__ == "__main__":
print(" This is a sample web application that displays a colored background. \n"
" A color can be specified in two ways. \n"
"\n"
" 1. As a command line argument with --color as the argument. Accepts one of " + SUPPORTED_COLORS + " \n"
" 2. As an Environment variable APP_COLOR. Accepts one of " + SUPPORTED_COLORS + " \n"
" 3. If none of the above then a random color is picked from the above list. \n"
" Note: Command line argument precedes over environment variable.\n"
"\n"
"")
# Check for Command Line Parameters for color
parser = argparse.ArgumentParser()
parser.add_argument('--color', required=False)
args = parser.parse_args()
if args.color:
print("Color from command line argument =" + args.color)
COLOR = args.color
if COLOR_FROM_ENV:
print("A color was set through environment variable -" + COLOR_FROM_ENV + ". However, color from command line argument takes precendence.")
elif COLOR_FROM_ENV:
print("No Command line argument. Color from environment variable =" + COLOR_FROM_ENV)
COLOR = COLOR_FROM_ENV
else:
print("No command line argument or environment variable. Picking a Random Color =" + COLOR)
# Check if input color is a supported one
if COLOR not in color_codes:
print("Color not supported. Received '" + COLOR + "' expected one of " + SUPPORTED_COLORS)
exit(1)
# Run Flask Application
app.run(host="0.0.0.0", port=8080)
|
[
"noreply@github.com"
] |
Rana-Tej-Singh.noreply@github.com
|
3c06eb6c67913b81335e806660b45c6df8246eff
|
294fbc88da56700109c8b723f4da079cd85f4376
|
/websockets/compliance/test_client.py
|
382d06a05f0edecbc0530d398e7c5096c46fa64b
|
[
"MIT",
"BSD-3-Clause"
] |
permissive
|
sirvan3tr/OmneePortalGOSH
|
2e190f8df117b8ce6d617c746bb078aea08b1f3d
|
fdd318e85158d9031f92336a6013c794cb02f6f3
|
refs/heads/master
| 2022-12-09T12:48:07.505324
| 2019-08-13T18:50:56
| 2019-08-13T18:50:56
| 143,923,598
| 0
| 0
|
MIT
| 2022-12-08T02:33:51
| 2018-08-07T20:28:17
|
Python
|
UTF-8
|
Python
| false
| false
| 1,530
|
py
|
import json
import logging
import urllib.parse
import asyncio
import websockets
logging.basicConfig(level=logging.WARNING)
# Uncomment this line to make only websockets more verbose.
# logging.getLogger('websockets').setLevel(logging.DEBUG)
SERVER = 'ws://127.0.0.1:8642'
AGENT = 'websockets'
@asyncio.coroutine
def get_case_count(server):
uri = server + '/getCaseCount'
ws = yield from websockets.connect(uri)
msg = yield from ws.recv()
yield from ws.close()
return json.loads(msg)
@asyncio.coroutine
def run_case(server, case, agent):
uri = server + '/runCase?case={}&agent={}'.format(case, agent)
ws = yield from websockets.connect(uri, max_size=2 ** 25, max_queue=1)
while True:
try:
msg = yield from ws.recv()
yield from ws.send(msg)
except websockets.ConnectionClosed:
break
@asyncio.coroutine
def update_reports(server, agent):
uri = server + '/updateReports?agent={}'.format(agent)
ws = yield from websockets.connect(uri)
yield from ws.close()
@asyncio.coroutine
def run_tests(server, agent):
cases = yield from get_case_count(server)
for case in range(1, cases + 1):
print("Running test case {} out of {}".format(case, cases), end="\r")
yield from run_case(server, case, agent)
print("Ran {} test cases ".format(cases))
yield from update_reports(server, agent)
main = run_tests(SERVER, urllib.parse.quote(AGENT))
asyncio.get_event_loop().run_until_complete(main)
|
[
"sirvan3tr@gmail.com"
] |
sirvan3tr@gmail.com
|
a756b56e890c28a5657fecb120bd476dc89666b8
|
064b8579d242dff116d5476d4fca00aa29623a39
|
/fbpic/particles/injection/ballistic_before_plane.py
|
19eb778b6ee50a3453ef1166b2d4af5493501299
|
[
"BSD-2-Clause",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
soerenjalas/fbpic
|
3216f97fb1b8168135dbf0347ebd36c1aaaa28cb
|
fd8cc6f98e234bfc7ef4e60ad14d56dbca5436d6
|
refs/heads/dev
| 2021-07-18T20:14:00.255484
| 2019-07-15T17:35:20
| 2019-07-15T17:35:20
| 69,226,325
| 0
| 0
|
NOASSERTION
| 2019-02-01T09:51:59
| 2016-09-26T07:52:47
|
Python
|
UTF-8
|
Python
| false
| false
| 2,142
|
py
|
# Copyright 2017, FBPIC contributors
# Authors: Remi Lehe, Manuel Kirchen
# License: 3-Clause-BSD-LBNL
"""
This file is part of the Fourier-Bessel Particle-In-Cell code (FB-PIC)
It defines a class for particle injection "through a plane".
"""
from scipy.constants import c
class BallisticBeforePlane( object ):
"""
Class that defines particle injection "though a plane".
In practice, when using this injection method, particles
move ballistically before crossing a given plane.
This is useful when running boosted-frame simulation, whereby a
relativistic particle beam is initialized in vacuum and later enters the
plasma. In this case, the particle beam may feel its own space charge
force for a long distance (in the boosted-frame), which may alter its
properties. Imposing that particles move ballistically before a plane
(which corresponds to the entrance of the plasma) ensures that the
particles do not feel this space charge force.
"""
def __init__(self, z_plane_lab, boost):
"""
Initialize the parameters of the plane.
Parameters
----------
z_plane_lab: float (in meters)
The (fixed) position of the plane, in the lab frame
boost: a BoostConverter object, optional
Defines the Lorentz boost of the simulation.
"""
# Register the parameters of the plane
self.z_plane_lab = z_plane_lab
if boost is not None:
self.inv_gamma_boost = 1./boost.gamma0
self.beta_boost = boost.beta0
else:
self.gamma0 = 1.
def get_current_plane_position( self, t ):
"""
Get the current position of the plane, in the frame of the simulation
Parameters:
-----------
t: float (in seconds)
The time in the frame of the simulation
Returns:
--------
z_plane: float (in meters)
The position of the plane at t
"""
z_plane = self.inv_gamma_boost*self.z_plane_lab - self.beta_boost*c*t
return( z_plane )
|
[
"remi.lehe@normalesup.org"
] |
remi.lehe@normalesup.org
|
733143b3f36644caa1b51f1a2d434c2d7a0f7319
|
e9c1f6da8e228a2f4d81e2ffc652a6ff582e9d7c
|
/Second_Live/读取列表信息创建字典.py
|
afa55f58118840e764ea2d8d59a9697db679438b
|
[] |
no_license
|
nickshaw0609/Luffycity_project
|
e8b8e32aaa1e9f13d6b291482f916e504e70a3ad
|
59c407babaa51fde279e06be580ebcc68e2f116e
|
refs/heads/main
| 2023-03-26T01:43:35.256440
| 2021-03-29T12:35:42
| 2021-03-29T12:35:42
| 347,565,721
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,441
|
py
|
"""
原文件形式:
Enclosure Device ID: 32
Slot Number: 1
Drive's postion: DiskGroup: 0, Span: 0, Arm: 0
Enclosure position: 0
Device Id: 0
WWN: 5000C5007272C288
Sequence Number: 2
Media Error Count: 0
Other Error Count: 0
Predictive Failure Count: 0
Last Predictive Failure Event Seq Number: 0
PD Type: SAS
Raw Size: 279.396 GB
Non Coerced Size: 278.896 GB [0x22dcb25c Sectors]
Coerced Size: 278.875 GB [0x22dc0000 Sectors]
Firmware state: Online, Spun Up
Device Firmware Level: LS08
Shield Counter: 0
Successful diagnostics completion on : N/A
SAS Address(0): 0x5000c5007272c289
SAS Address(1): 0x0
Connected Port Number: 0(path0)
Inquiry Data: SEAGATE ST300MM0006 LS08S0K2B5NV
FDE Enable: Disable
Secured: Unsecured
Locked: Unlocked
Needs EKM Attention: No
Foreign State: None
目标文件格式:
info = {
"slot":"1",
"capacity":"279.396 GB",
"model":"SEAGATE ST300MM0006 LS08S0K2B5NV",
"pd_type":"SAS"
}
"""
list = []
info = {}
target = ["Slot Number", "Raw Size", "Inquiry Data", "PD Type"]
res = ["slot", "capacity", "model", "pd_type"] # res中的元素与target中一一对应
f = open("计算机信息.txt", encoding='utf-8')
for line in f:
line = line.strip()
list.append(line)
for each in list:
target_name = each.split(":")[0]
value = each.split(":")[1]
if target_name in target:
index = target.index(f"{target_name}")
info[res[index]] = value
print(info)
|
[
"1391323502@qq.com"
] |
1391323502@qq.com
|
dd26ec48b8847c767fec300c62b9f73b158b81b7
|
321881dc2b85e3e20d8ff463014e542ff07e4879
|
/setup.py
|
d182bfe2d033a9003df320d27b9a3877b31fac01
|
[
"MIT"
] |
permissive
|
mattmilten/TreeD
|
cc775191489d1e7c6ee2a15eb8eac19db85d50ed
|
1d8517c0559c848f8ca92c621a6965e00f4e4607
|
refs/heads/main
| 2023-05-23T18:48:30.891964
| 2022-04-26T10:27:14
| 2022-04-26T10:27:14
| 134,833,506
| 19
| 3
|
MIT
| 2022-01-23T11:07:02
| 2018-05-25T09:17:20
|
Python
|
UTF-8
|
Python
| false
| false
| 992
|
py
|
import re, os
from setuptools import setup, find_packages
with open(os.path.join("src", "treed", "__init__.py")) as initfile:
(version,) = re.findall('__version__ = "(.*)"', initfile.read())
with open("README.md", "r", encoding="utf-8") as fh:
long_description = fh.read()
setup(
name="treed",
version=version,
author="Matthias Miltenberger",
author_email="matthias.miltenberger@gmail.com",
description="3D Visualization of Branch-and-Cut Trees using PySCIPOpt",
long_description=long_description,
long_description_content_type="text/markdown",
url="https://github.com/mattmilten/TreeD",
package_dir={"": "src"},
packages=find_packages(where="src"),
classifiers=[
"Programming Language :: Python :: 3",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
],
install_requires=["pyscipopt", "scikit-learn", "pandas", "plotly", "networkx", "numpy"],
python_requires=">=3.6"
)
|
[
"matthias.miltenberger@gmail.com"
] |
matthias.miltenberger@gmail.com
|
527483c784518735c1ca7699d78a3b71f405c863
|
1283b27ecb31d6c95351e2560af1b9de35bade68
|
/Orientacao a Objetos/Classe Retangulo.py
|
97d1fbbfe94c32a70ab52db750c475d0e15b2e1f
|
[] |
no_license
|
Felipecard/Orientacao_a_Objetos_-POO
|
151d71410c2edf0dce732f52dff3b57e3947313a
|
abc5335d4b15d365a2f14924b74c32b2af106e43
|
refs/heads/master
| 2023-04-19T11:09:01.225415
| 2021-05-06T01:05:18
| 2021-05-06T01:05:18
| 272,838,815
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,365
|
py
|
class Retangulo:
def __init__(self, comp, larg):
self.comp = comp
self.larg = larg
def muda_lados(self, muda_comp, muda_larg):
self.comp = muda_comp
self.larg = muda_larg
def valor_lados(self):
print(f'Os lados do Retangulo são: {self.comp} cm de comp e {self.larg} cm de largura')
def area(self):
print(f'A area do retangulo é: {self.comp} x {self.larg} = {self.comp * self.larg}')
def perimetro(self):
p = (self.comp + self.larg) * 2
return f'A Perimetro do retangulo é: {self.comp} + {self.larg} + {self.comp} + {self.larg} = {p}'
# MAIN
# objeto:
retangulo1 = Retangulo(10, 5)
# rodando:
print('------------------------------------------------------------------------------')
muda_comp = int(input('O retanfulo tem 10 cm de comprimento, deseja mudar pra quanto?'))
muda_larg = int(input('O retanfulo tem 5 cm de largura, deseja mudar pra quanto?'))
retangulo1.muda_lados(muda_comp, muda_larg)
print('------------------------------------------------------------------------------')
retangulo1.valor_lados()
print('------------------------------------------------------------------------------')
retangulo1.area()
print('------------------------------------------------------------------------------')
print(retangulo1.perimetro())
|
[
"noreply@github.com"
] |
Felipecard.noreply@github.com
|
cd5433bc08728fb41dc6c435fa84de2143e5764b
|
6a51fcb9348b85b16867e0ea4e889f78f25b712a
|
/problems/FINAL EXAM/41.py
|
bd88a607da3ef3b3a8748f529833341b866cd1db
|
[] |
no_license
|
AudhootChavan/solved-problems-MITx-6.00.1x-edx
|
1e06b97de3bb97ea7610f614ceb67aa561a0da71
|
5d1ef059db972d70155bb8f303a615fd41d2f55f
|
refs/heads/master
| 2021-04-27T09:17:00.997150
| 2018-02-22T17:22:33
| 2018-02-22T17:22:33
| 122,510,135
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 746
|
py
|
class Person(object):
def __init__(self, name):
self.name = name
def say(self, stuff):
return self.name + ' says: ' + stuff
def __str__(self):
return self.name
class Lecturer(Person):
def lecture(self, stuff):
return 'I believe that ' + Person.say(self, stuff)
class Professor(Lecturer):
def say(self, stuff):
return self.name + ' says: ' + self.lecture(stuff)
class ArrogantProfessor(Professor):
def say(self, stuff):
return self.name + ' says: It is obvious that ' + Person.say(self, stuff)
def lecture(self, stuff):
return 'It is obvious that ' + Person.say(self, stuff)
|
[
"noreply@github.com"
] |
AudhootChavan.noreply@github.com
|
824fc7b60812d6d5df07974f9588de2ef2a84fb9
|
39e799aa09d31b4a3dcc48a3b2ec17699506eb63
|
/StackOverFlow Test.py
|
1b0a8489360ba1873a77d459cb30562038198c5d
|
[] |
no_license
|
raavcorp-intelligence/Raav-1.0
|
7d2d0109dfaae177ebaea92bf5493b8dfd25a647
|
31bbab8a468494dbb58420d77955d9d5fc79aa72
|
refs/heads/master
| 2020-05-14T20:17:16.208469
| 2019-10-09T13:47:17
| 2019-10-09T13:47:17
| 181,942,082
| 0
| 0
| null | 2019-10-09T13:47:18
| 2019-04-17T17:52:50
|
Python
|
UTF-8
|
Python
| false
| false
| 164
|
py
|
import os
import webbrowser
chrome_path = 'C:/Program Files (x86)/Google/Chrome/Application/chrome.exe %s'
webbrowser.get(chrome_path).open("raavcorp.com")
|
[
"noreply@github.com"
] |
raavcorp-intelligence.noreply@github.com
|
aaf754f028f4426e8798cb218634726f7642940c
|
1798ba59a187a8868e32b4d4f5f54ec81efbf807
|
/devel/lib/python2.7/dist-packages/roborts_msgs/msg/_SupplierStatus.py
|
acd7808258d04ef98e87871718e3d2dd45bd1cd7
|
[] |
no_license
|
chalkchalk/fl1oth_ws
|
60d17ee4d9206c436a221b82e2f92d0eedd78eb0
|
4c53588c129ad206ebc1354cc55ff6d2d88863d4
|
refs/heads/master
| 2022-12-11T11:15:58.773602
| 2020-09-13T04:04:24
| 2020-09-13T04:04:24
| 294,903,878
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,660
|
py
|
# This Python file uses the following encoding: utf-8
"""autogenerated by genpy from roborts_msgs/SupplierStatus.msg. Do not edit."""
import sys
python3 = True if sys.hexversion > 0x03000000 else False
import genpy
import struct
class SupplierStatus(genpy.Message):
_md5sum = "81f3d032e85b689acf259876e6f8d051"
_type = "roborts_msgs/SupplierStatus"
_has_header = False #flag to mark the presence of a Header object
_full_text = """#supplier status
uint8 CLOSE = 0
uint8 PREPARING = 1
uint8 SUPPLYING = 2
uint8 status
"""
# Pseudo-constants
CLOSE = 0
PREPARING = 1
SUPPLYING = 2
__slots__ = ['status']
_slot_types = ['uint8']
def __init__(self, *args, **kwds):
"""
Constructor. Any message fields that are implicitly/explicitly
set to None will be assigned a default value. The recommend
use is keyword arguments as this is more robust to future message
changes. You cannot mix in-order arguments and keyword arguments.
The available fields are:
status
:param args: complete set of field values, in .msg order
:param kwds: use keyword arguments corresponding to message field names
to set specific fields.
"""
if args or kwds:
super(SupplierStatus, self).__init__(*args, **kwds)
#message fields cannot be None, assign default values for those that are
if self.status is None:
self.status = 0
else:
self.status = 0
def _get_types(self):
"""
internal API method
"""
return self._slot_types
def serialize(self, buff):
"""
serialize message into buffer
:param buff: buffer, ``StringIO``
"""
try:
buff.write(_get_struct_B().pack(self.status))
except struct.error as se: self._check_types(struct.error("%s: '%s' when writing '%s'" % (type(se), str(se), str(locals().get('_x', self)))))
except TypeError as te: self._check_types(ValueError("%s: '%s' when writing '%s'" % (type(te), str(te), str(locals().get('_x', self)))))
def deserialize(self, str):
"""
unpack serialized message in str into this message instance
:param str: byte array of serialized message, ``str``
"""
try:
end = 0
start = end
end += 1
(self.status,) = _get_struct_B().unpack(str[start:end])
return self
except struct.error as e:
raise genpy.DeserializationError(e) #most likely buffer underfill
def serialize_numpy(self, buff, numpy):
"""
serialize message with numpy array types into buffer
:param buff: buffer, ``StringIO``
:param numpy: numpy python module
"""
try:
buff.write(_get_struct_B().pack(self.status))
except struct.error as se: self._check_types(struct.error("%s: '%s' when writing '%s'" % (type(se), str(se), str(locals().get('_x', self)))))
except TypeError as te: self._check_types(ValueError("%s: '%s' when writing '%s'" % (type(te), str(te), str(locals().get('_x', self)))))
def deserialize_numpy(self, str, numpy):
"""
unpack serialized message in str into this message instance using numpy for array types
:param str: byte array of serialized message, ``str``
:param numpy: numpy python module
"""
try:
end = 0
start = end
end += 1
(self.status,) = _get_struct_B().unpack(str[start:end])
return self
except struct.error as e:
raise genpy.DeserializationError(e) #most likely buffer underfill
_struct_I = genpy.struct_I
def _get_struct_I():
global _struct_I
return _struct_I
_struct_B = None
def _get_struct_B():
global _struct_B
if _struct_B is None:
_struct_B = struct.Struct("<B")
return _struct_B
|
[
"1261677461@qq.com"
] |
1261677461@qq.com
|
abdc502d36c8b4623a5f79001810cd15d375cddc
|
eaa0ac20da4128ca48410d7e69da1aaca428fdd3
|
/Listes/SinglyLinkedList.py
|
1d74260244536783815487e4f8bb903389608191
|
[] |
no_license
|
seangrogan-archive/datastructures_class
|
2eab48cdd6dbb413366060ed29aca72d5f57d4d4
|
80970b4f2dc5d63759f28bcda64fe6e09aa21b00
|
refs/heads/master
| 2020-03-29T03:47:44.704872
| 2019-10-17T22:44:38
| 2019-10-17T22:44:38
| 149,501,082
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,167
|
py
|
from SinglyLinkedNode import SinglyLinkedNode
class SinglyLinkedList:
#implements the ADT List (List.py)
#uses the SinglyLinkedNode class (SinglyLinkedNode.py)
def __init__( self ):
self._head = None
self._last = None
self._size = 0
def __len__( self ):
return self._size
def __str__( self ):
if self.is_empty():
return "[](size = 0)"
else:
pp = "["
curr = self._head
while curr != self._last:
pp += str( curr.element ) + ", "
curr = curr.next
pp += str( curr.element ) + "]"
pp += "(size = " + str( self._size ) + ")"
return pp
def is_empty( self ):
return self._size == 0
def append( self, element ):
newNode = SinglyLinkedNode( element, None )
if self._last == None:
self._head = self._last = newNode
else:
self._last.next = newNode
self._last = newNode
self._size += 1
def insert( self, element ):
newNode = SinglyLinkedNode( element, self._head )
if self._head == None:
self._last = newNode
self._head = newNode
self._size += 1
def remove( self, k ):
if self.is_empty():
return False
else:
curr = self._head
prev = None
for i in range( k - 1 ):
prev = curr
curr = curr.next
if prev == None:
#remove the first element
self._head = curr.next
else:
prev.next = None
self._last = prev
self._size -= 1
if self._size == 0:
self._last = None
return curr.element
def find( self, element ):
if self.is_empty():
return False
else:
curr = self._head
for i in range( self._size ):
if curr.element == element:
return i + 1
else:
curr = curr.next
def last( self ):
if self.is_empty():
return False
else:
return self._last.element
def first( self ):
if self.is_empty():
return False
else:
return self._head.element
"""unit testing
"""
if __name__ == '__main__':
data = SinglyLinkedList()
print( data )
data.append( 'titi' )
data.append( 'toto' )
data.append( 'tata' )
print( data )
idx = data.find( 'titi' )
if idx:
print( "found titi ranked", idx )
else:
print( "titi not found" )
idx = data.find( 'cece' )
if idx:
print( "found cece ranked", idx )
else:
print( "cece not found" )
print( "remove 1 =", data.remove( 1 ) )
print( "new size = ", str( len( data ) ) )
print( data )
print( "remove 2 = ", data.remove( 2 ) )
print( data )
print( "remove 1 = ", data.remove( 1 ) )
print( data )
print( "remove 1 = ", data.remove( 1 ) )
print( data )
|
[
"noreply@github.com"
] |
seangrogan-archive.noreply@github.com
|
a1ca2e34f9cfa3283e566ce9afa85b9d85628a7c
|
f9d64555b85c5b9ca577a90f0a92ad258f23f91f
|
/amazon/makeTopEdges.py
|
50736ddb22befc2a0e9a6fcc07441056ded0dedc
|
[] |
no_license
|
wchickering/cs399
|
055530e401d12edcd902a0c952ccf00a4ef2ee7e
|
6f3325faa650d1af0e114d446f208a4ed58c33ca
|
refs/heads/master
| 2021-01-22T09:26:45.686385
| 2015-07-21T01:23:34
| 2015-07-21T01:23:34
| 16,038,545
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,206
|
py
|
#!/usr/local/bin/python
"""
Make csv containing product pairs for which we have the most information about
their similarity as per user-item collaborative filtering.
"""
from optparse import OptionParser
import sqlite3
import os
import csv
# params
outputFileTemplate = '%s.csv'
# db params
createSimilaritiesNumUsersIndexStmt =\
('CREATE INDEX IF NOT EXISTS Similarities_NumUsers_Idx ON '
'Similarities(NumUsers)')
createStoreProductsProductIdIndexStmt =\
('CREATE INDEX IF NOT EXISTS StoreProducts_ProductId_Idx ON '
'StoreProducts(ProductId)')
selectSimilaritiesStmt =\
('SELECT ProductId1, ProductId2, CosineSim, NumUsers '
'FROM Similarities '
'ORDER BY NumUsers DESC')
selectStoreProductsStmt =\
'SELECT StoreId FROM StoreProducts WHERE ProductId = :ProductId'
def getParser(usage=None):
parser = OptionParser(usage=usage)
parser.add_option('-d', '--database', dest='db_fname',
default='data/amazon.db', help='sqlite3 database file.', metavar='FILE')
parser.add_option('-o', '--output-dir', dest='outputDir', default='output',
help='Output directory.', metavar='DIR')
parser.add_option('-l', '--limit', dest='limit', type='int', default=100,
help='Limit to the number of edges made.', metavar='NUM')
parser.add_option('-s', '--storeId', dest='storeId', type='int', default=1,
help='StoreId from which to select edges.', metavar='ID')
return parser
def main():
# Parse options
usage = 'Usage: %prog [options]'
parser = getParser(usage=usage)
(options, args) = parser.parse_args()
# connect to db
print 'Connecting to %s. . .' % options.db_fname
db_conn = sqlite3.connect(options.db_fname)
with db_conn:
db_curs = db_conn.cursor()
# create indexes if not already exists
db_curs.execute(createSimilaritiesNumUsersIndexStmt)
db_curs.execute(createStoreProductsProductIdIndexStmt)
outputFileName = os.path.join(options.outputDir,
outputFileTemplate % os.path.splitext(os.path.basename(__file__))[0])
print 'Writing to %s . . .' % outputFileName
with open(outputFileName, 'wb') as csvfile:
writer = csv.writer(csvfile)
# fetch top similarity records
num_writes = 0
db_curs = db_conn.cursor()
db_curs.execute(selectSimilaritiesStmt)
for row in db_curs.fetchall():
productId1 = row[0]
productId2 = row[1]
cosineSim = row[2]
numUsers = row[3]
# skip edges where both products are not in our store
db_curs1 = db_conn.cursor()
db_curs1.execute(selectStoreProductsStmt, (productId1,))
if options.storeId not in [row[0] for row in db_curs1.fetchall()]:
continue
db_curs1.execute(selectStoreProductsStmt, (productId2,))
if options.storeId not in [row[0] for row in db_curs1.fetchall()]:
continue
# output edge
writer.writerow([productId1, productId2, cosineSim, numUsers])
num_writes += 1
if num_writes >= options.limit:
break
if __name__ == '__main__':
main()
|
[
"chickering@cs.stanford.edu"
] |
chickering@cs.stanford.edu
|
854fb1fa949f34817d2d1677281dc24c585cb6a5
|
51f1e1ba45fd9dd82873a99ae893f866c8e52ad2
|
/python/make_obs_index.py
|
435d42c226f7553ace53b0d1651e678c943dc5c6
|
[] |
no_license
|
rjleveque/tohoku2011-paper2
|
7cede95649096da9a55dbc002c55def7f09f14b0
|
44f6c6afe4a00a4ab0e0a265a273e70ee41e2ff7
|
refs/heads/master
| 2020-05-03T23:32:01.496240
| 2017-02-02T16:39:33
| 2017-02-02T16:39:33
| 19,281,330
| 1
| 2
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,643
|
py
|
"""
Script to create index html files for viewing all the observation data.
"""
import os,sys,glob
import velocities as V
from gaugedirs import set_gauges
os.chdir("../Observations")
gaugenos, HAIdirs, rundirs = set_gauges()
index_file = 'index.html'
html = open(index_file,'w')
html.write("""
<html>
<body>
<h1>Velocity plots</h1>
<p>
<a href="gauge_locations.png">Gauge locations</a>
<p>
<ul>
""")
detide_file = 'detide.html'
htmld = open(detide_file,'w')
htmld.write("""
<html>
<body>
<h1>Detiding results</h1>
<p>
Comparison of detiding using harmonic consituents vs. 15 degree
polynomial.
<ul>
""")
for gaugeno in gaugenos:
dir = HAIdirs[gaugeno]
html.write("""
<p>
<li><a href="%s/plots.html">%s</a><p>
<img src="%s/fig14.png" height=300>
<img src="%s/fig13.png" height=300>
""" % (dir,dir,dir,dir))
htmld.write("""
<p>
<li><a href="%s/plots.html">%s</a><p>
<img src="%s/u_detided.png" width=600>
<img src="%s/v_detided.png" width=600>
""" % (dir,dir,dir,dir))
subdir_index_file = os.path.join(dir,'plots.html')
html2 = open(subdir_index_file,'w')
html2.write("""
<html>
<body>
<h1>Plots for %s</h1>
<a href="../gauge_locations.png">Gauge locations</a>
<p>
<a href="%s_station_data.txt">Station data</a> ...
<a href=".">raw data files </a>
<hr>
<h2>Speed at different depths:</h2>
<p> <img src="fig10.png" width=900><p>
<hr>
<h2>Speed at different depths:</h2>
<p> <img src="fig11.png" width=900><p>
<hr>
<h2>Average speed at all depths:</h2>
<p> <img src="fig14.png" width=900><p>
<hr>
<h2> u, v at all depths:</h2>
<p> <img src="fig18.png" width=600>
<img src="fig12.png" width=600>
<hr>
<h2> u, v at all depths and average:</h2>
<p> <img src="fig17.png" width=600><p>
<hr>
<h2>Average u, v at all depths:</h2>
<p> <img src="fig16.png" width=600>
<img src="fig13.png" width=600>
<hr>
<h2>Result of de-tiding:</h2>
<p> <img src="u_detided.png" width=600>
<img src="v_detided.png" width=600><p>
""" % (dir,dir[:7]))
html2.close()
print "Created ",subdir_index_file
html.close()
htmld.close()
print "Created ",index_file
print "Created ",detide_file
|
[
"rjl@uw.edu"
] |
rjl@uw.edu
|
27070c33757f27d7344a9e80d15cdd5b1599fced
|
1e3dc39f7cd8b3a69d1530895ff91813c2e6a43a
|
/Game Arkan Python3/help.py
|
f47d0414ac86bb55274739fb58e7640b8da68027
|
[] |
no_license
|
georgy-n/python
|
7f17444f41070a5abc7ba2c9b95c964d26d8f56e
|
e38615be69fae0a5712310c1b7313512d1929302
|
refs/heads/master
| 2021-09-16T22:29:41.152026
| 2018-06-25T13:40:28
| 2018-06-25T13:40:28
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 100
|
py
|
from threading import Timer
def hello():
print ("hello, world")
t = Timer(1, hello)
t.start()
|
[
"gosha3548@gmail.com"
] |
gosha3548@gmail.com
|
05a9dff263d9ed7e935ae30988c9a04b8f108294
|
ab3e0ebc88754f03f9b28fa39055d7c3502a8a0e
|
/computer_guessing.py
|
fc40e8e349e8f8ed815e4d34b8b15fb67ebd16ba
|
[] |
no_license
|
apheyhys/Python
|
0dab047e8c318b75ccdf39c17ccd7aba51211a7a
|
8301d0d8bee63c8458aa1efaa3a983d9e6eb9043
|
refs/heads/master
| 2021-08-16T22:08:32.891249
| 2017-11-20T11:39:33
| 2017-11-20T11:39:33
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,079
|
py
|
#Программа пытается отгадать загадонное число
print("Добрый день! Я отгадаю любое число, которое вы загадали в диапазоне от 1 до 100")
tries = 1
number = 50
low = 1
high = 100
guess = 0
while True:
print("Это число ", number, "?")
guess = (input("Больше, меньше, угадал?\n"))
if guess == "Больше" or guess == "больше":
low = number
number = number - (low-high)//2
elif guess == "Меньше" or guess == "меньше":
high = number
number = number + (low-high)//2
elif guess == "Угадал" or guess == "угадал":
break
else:
print("Введите команду еще раз. Я не понял.")
tries +=1
print("Мне удалось отгадать число! Это число ", number)
print("Для этого мне потребовалось ", tries, "попыток")
print("\n\nВведите Enter, для того чтобы выйти")
|
[
"apheyhys@gmail.com"
] |
apheyhys@gmail.com
|
5abb44b8a6cb1b7cb2955c03c67da586f2c5ab2d
|
ff2cff130a3ed0bb354cdb8b3f2fe0c85184585b
|
/sum_pi_window_CDS.py
|
e1be1b584481fefde3bb7271a1599008bf160b6f
|
[] |
no_license
|
bioCKO/Ostrich_Z_polymorphism
|
38e699cc806b530a1187e2f2af907a2500272c91
|
1234dd8d60f76359fd0dc189a6666be6ef710757
|
refs/heads/master
| 2020-07-30T08:32:40.612676
| 2019-06-14T11:34:12
| 2019-06-14T11:34:12
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,986
|
py
|
#!/usr/bin/python
from __future__ import division
import sys
f1 = open(sys.argv[1], "r")
scaffold_order = ["superscaffold26", "superscaffold54", "superscaffold35", "superscaffold36", "superscaffold62", "superscaffold67", "superscaffold69-1", "superscaffold93", "superscaffold63", "superscaffold88", "superscaffold83", "superscaffold92"]
f1_dict = {}
f1_list = []
for line in f1:
line = line.strip("\n").split("\t")
key = line[0]+"_"+line[12]+"_"+line[13]
value = line
if not key in f1_list:
f1_list.append(key)
if key in f1_dict.keys():
f1_dict[key].append(value)
else:
f1_dict[key] = [value]
#175224 77196 161059 161087 48809 48868 6147 2657 60070 66414 100986 46979 187155 187157 68091 27560
#print(f1_list)
#print(f1_dict)
header=["Scaffold", "Window_start", "Window_end", "pi_per_window"]
print("\t".join(header))
for scaffold in scaffold_order:
for key in f1_list:
if key.split("_")[0] == scaffold:
#print(key)
#for key in f1_dict.keys():
# Number of segregating sites per window
#snp_count = len(f1_dict[key])
pi_count = []
for element in f1_dict[key]:
pi_count.append(float(element[6]))
pi_count_key = sum(pi_count)
if key.split("_")[0] == "superscaffold54":
if int(key.split("_")[1]) > 16379243: # Removing the non-Z linked part of superscaffold54
pass
else:
print(key.split("_")[0]+"\t"+key.split("_")[1]+"\t"+key.split("_")[2]+"\t"+str(pi_count_key/int(element[22])))#+"\t"+str(int(element[14])/int(element[13]))+"\t"+str((int(element[16])-int(element[15]))/int(element[13]))+"\t"+str(int(element[19])/int(element[13]))) # repeat density# CDS density
else:
#print(element[11])
print(key.split("_")[0]+"\t"+key.split("_")[1]+"\t"+key.split("_")[2]+"\t"+str(pi_count_key/int(element[22])))#+"\t"+str(int(element[14])/int(element[13]))+"\t"+str((int(element[16])-int(element[15]))/int(element[13]))+"\t"+str(int(element[19])/int(element[13]))) # repeat density# CDS density
|
[
"homap@rackham3.uppmax.uu.se"
] |
homap@rackham3.uppmax.uu.se
|
501506f493746ede91b66db7cad78e8be0a2ba23
|
ab3cb316f9f10abfe7208b726670875de0b7d772
|
/rdmo/projects/filters.py
|
5478990d85dcad77f52ef30245de1099a68235dd
|
[
"Apache-2.0",
"BSD-3-Clause",
"MIT",
"LicenseRef-scancode-warranty-disclaimer",
"BSD-2-Clause"
] |
permissive
|
hkrock/rdmo
|
9e1e8222ec7368c9c1cca8924af079b6465e34de
|
80bbd3b5749f48a918e9aa4549a96479bf665b93
|
refs/heads/master
| 2020-09-04T14:33:33.136791
| 2020-02-24T09:04:08
| 2020-02-24T09:04:08
| 219,756,770
| 0
| 0
|
Apache-2.0
| 2019-11-05T14:36:51
| 2019-11-05T13:52:13
| null |
UTF-8
|
Python
| false
| false
| 632
|
py
|
from rest_framework.filters import BaseFilterBackend
from rdmo.domain.models import Attribute
class ValueFilterBackend(BaseFilterBackend):
def filter_queryset(self, request, queryset, view):
set_attribute = request.GET.get('set_attribute')
if set_attribute:
try:
attribute = Attribute.objects.get(pk=set_attribute)
attributes = attribute.get_descendants(include_self=True).filter()
queryset = queryset.filter(attribute__in=attributes)
except Attribute.DoesNotExist:
queryset = queryset.none()
return queryset
|
[
"jklar@aip.de"
] |
jklar@aip.de
|
c090f5ff80dc662d91291d81c61d3fa00dd1ad64
|
62ba42e846b3ee9882ed5818402f9db99d881523
|
/training/training_helper.py
|
9232206ebe65e1f884c2050ab2a19a6db3b3d2c9
|
[
"BSD-3-Clause"
] |
permissive
|
agomez08/patrones_proyecto1
|
36cd43c3c32052625f96ce37521bd3a817275439
|
7d5fb626c76365acf8243ac4eb54338d9c429e94
|
refs/heads/main
| 2023-04-09T12:05:44.308389
| 2021-04-24T16:33:14
| 2021-04-24T16:33:14
| 349,795,598
| 0
| 0
| null | 2021-04-12T01:59:13
| 2021-03-20T17:44:08
|
Jupyter Notebook
|
UTF-8
|
Python
| false
| false
| 7,620
|
py
|
"""This module implements helper functions to assist with the training process."""
import numpy as np
import torch
def update_total_corrects(target, predictions, class_correct_list, class_total_list):
"""Perform update on lists of total and correct predictions based on last run of model."""
# Compare predictions against target
correct_tensor = predictions.eq(target.data.view_as(predictions))
if torch.cuda.is_available():
# Bring back to CPU if GPU was being used
correct = np.squeeze(correct_tensor.cpu().numpy())
else:
correct = np.squeeze(correct_tensor.numpy())
# For each of the possible targets
for i in range(target.size(0)):
# Save the number of elements classified correctly and the total number of elements
label = target.data[i]
class_correct_list[label] += correct[i].item()
class_total_list[label] += 1
def train_batch(optimizer, model, criterion, inputs, target):
"""Perform one iteration of training over a batch of the dataset."""
# Start batch with zero gradients
optimizer.zero_grad()
# Forward pass inputs through model
logps = model.forward(inputs)
# Calculate loss for predictions
loss = criterion(logps, target)
# Do back propagation to update gradient
loss.backward()
# Let the optimizer update coefficients
optimizer.step()
# Use logps probabilities to determine the prediction (prediction is class with maximum logps)
_, predictions = torch.max(logps, 1)
# Calculate and return batch loss
batch_loss = loss.item() * inputs.size(0)
return batch_loss, predictions
def train_epoch(num_classes, optimizer, model, criterion, train_loader, device):
"""Perform one epoch iteration of training."""
# Switch model to training mode
epoch_loss = 0.0
model.train()
class_correct_list = list(0. for _ in range(num_classes))
class_total_list = list(0. for _ in range(num_classes))
# Run through each of the batches
for batch_idx, (inputs, target) in enumerate(train_loader):
# print("DEBUG: Starting TRAINING batch {}".format(batch_idx))
# Mode tensors to GPU if available
inputs, target = inputs.to(device), target.to(device)
# Perform training over batch and update training loss
batch_loss, predictions = train_batch(optimizer, model, criterion, inputs, target)
epoch_loss += batch_loss
# Update total count and correct predictions count from this last run
update_total_corrects(target, predictions, class_correct_list, class_total_list)
# Calculate global accuracy
global_accuracy = 100. * np.sum(class_correct_list) / np.sum(class_total_list)
# Return average epoch loss and accuracy for this epoch
return epoch_loss / len(train_loader.sampler), global_accuracy
def validate_batch(model, criterion, inputs, target):
"""Perform one iteration of training over a batch of the dataset."""
# Forward pass inputs through model
logps = model.forward(inputs)
# Calculate loss for predictions
loss = criterion(logps, target)
# Use logps probabilities to determine the prediction (prediction is class with maximum logps)
_, predictions = torch.max(logps, 1)
# Calculate and return batch loss
validation_loss = loss.item() * inputs.size(0)
return validation_loss, predictions
def validate_epoch(num_classes, model, criterion, valid_loader, device):
"""Perform one epoch iteration of validation."""
# Switch model to validation mode
epoch_loss = 0.0
model.eval()
class_correct_list = list(0. for _ in range(num_classes))
class_total_list = list(0. for _ in range(num_classes))
for batch_idx, (inputs, target) in enumerate(valid_loader):
# print("DEBUG: Starting VALIDATION batch {}".format(batch_idx))
# Mode tensors to GPU if available
inputs, target = inputs.to(device), target.to(device)
# Perform validation over batch and update validation loss
batch_loss, predictions = validate_batch(model, criterion, inputs, target)
epoch_loss += batch_loss
# Update total count and correct predictions count from this last run
update_total_corrects(target, predictions, class_correct_list, class_total_list)
# Calculate global accuracy
global_accuracy = 100. * np.sum(class_correct_list) / np.sum(class_total_list)
# Return average epoch loss and accuracy for this epoch
return epoch_loss / len(valid_loader.sampler), global_accuracy
def test_eval_batch(model, criterion, inputs, target):
"""Perform one iteration of testing over a batch of the dataset and report loss and predictions."""
# Forward pass inputs through model
logps = model.forward(inputs)
# Calculate loss for predictions
loss = criterion(logps, target)
# Use logps probabilities to determine the prediction (prediction is class with maximum logps)
_, predictions = torch.max(logps, 1)
# Calculate batch loss
validation_loss = loss.item() * inputs.size(0)
return validation_loss, predictions
def test_eval(num_classes, model, criterion, test_loader, device):
"""Perform evaluation of results for trained model over the testing portion of the dataset."""
# track test loss
test_loss = 0.0
class_correct_list = list(0. for _ in range(num_classes))
class_total_list = list(0. for _ in range(num_classes))
model.eval()
# iterate over test data
for batch_idx, (inputs, target) in enumerate(test_loader):
# print("DEBUG: Starting TESTING batch {}".format(batch_idx))
# Mode tensors to GPU if available
inputs, target = inputs.to(device), target.to(device)
# Perform evaluation over batch and update test loss
batch_loss, predictions = test_eval_batch(model, criterion, inputs, target)
test_loss += batch_loss
# Update total count and correct predictions count from this last run
update_total_corrects(target, predictions, class_correct_list, class_total_list)
# Compare predictions against target
# correct_tensor = predictions.eq(target.data.view_as(predictions))
# if torch.cuda.is_available():
# # Bring back to CPU if GPU was being used
# correct = np.squeeze(correct_tensor.cpu().numpy())
# else:
# correct = np.squeeze(correct_tensor.numpy())
# # For each of the possible targets
# for i in range(target.size(0)):
# # Save the number of elements classified correctly and the total number of elements
# label = target.data[i]
# class_correct_list[label] += correct[i].item()
# class_total_list[label] += 1
# Determine average testing loss
test_loss_avg = test_loss / len(test_loader.dataset)
# Determine results for each of the classes
classes_results = []
for i in range(num_classes):
class_correct = np.sum(class_correct_list[i])
class_total = np.sum(class_total_list[i])
class_accuracy = 100 * class_correct / class_total
class_results = {'class_accuracy': class_accuracy, 'class_correct': class_correct, 'class_total': class_total}
classes_results.append(class_results)
# Calculate global accuracy
global_accuracy = 100. * np.sum(class_correct_list) / np.sum(class_total_list)
# Put all results in dictionary and return it
results = {'test_loss_avg': test_loss_avg, 'classes_results': classes_results, 'global_accuracy': global_accuracy}
return results
|
[
"agomez10010@gmail.com"
] |
agomez10010@gmail.com
|
646a0acfffa0d6a2d2486824e8da708f5a3191f0
|
1d78266ba83052ed16c1a3a2401c38e617301e60
|
/inference.py
|
0878f6171e6e5f9c1170def1daca7473ea1ccad4
|
[] |
no_license
|
christiankiesewetter/tf2_chatbot
|
e00d45d91fa91ed27d0bc20097964f182e16757d
|
0b45abf4021b123a5821d0575017fc5729babd05
|
refs/heads/main
| 2023-04-15T01:36:47.085624
| 2021-04-29T08:51:47
| 2021-04-29T08:51:47
| 348,070,259
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,365
|
py
|
# -*- coding: utf-8 -*-
import os
import tensorflow as tf
import pickle
from model import Encoder, Decoder
from preprocessing2 import preformat
import tensorflow_addons as tfa
from tensorflow.keras.preprocessing.sequence import pad_sequences
from tensorflow_addons.seq2seq import BasicDecoder, AttentionWrapper, BahdanauAttention
from tensorflow_addons.seq2seq.sampler import TrainingSampler
from tensorflow.keras.optimizers import Adam
class InferenceModel:
def __init__(self,
checkpoint_dir,
embedding_dim,
units,
input_length,
output_length,
batch_size):
self.batch_size = batch_size
self.units = units
self.input_length = input_length
self.output_length = output_length
with open(os.path.join(checkpoint_dir, 'tokenizer.pickle'), 'rb') as handle:
self.tokenizer = pickle.load(handle)
self.encoder = Encoder(
vocab_size = self.tokenizer.num_words,
embedding_dims = embedding_dim,
encoder_units = units,
batch_size = batch_size)
self.decoder = Decoder(
vocab_size = self.tokenizer.num_words,
embedding_dims = embedding_dim,
decoder_units = units,
batch_size = batch_size,
max_length_input = input_length,
max_length_output = output_length)
checkpoint = tf.train.Checkpoint(encoder = self.encoder,
decoder = self.decoder)
checkpoint.restore(
tf.train.latest_checkpoint(checkpoint_dir)).expect_partial()
greedy_sampler = tfa.seq2seq.GreedyEmbeddingSampler(
self.decoder.embedding)
# Instantiate BasicDecoder object
self.decoder_instance = tfa.seq2seq.BasicDecoder(
cell = self.decoder.rnn_cell,
sampler = greedy_sampler,
output_layer = self.decoder.fc,
maximum_iterations = 160)
def __call__(self, input_sequence):
input_sequence = preformat(input_sequence)
input_sequence = self.tokenizer.texts_to_sequences([input_sequence])
input_sequence = tf.constant(input_sequence)
enc_start_state = [tf.zeros((self.batch_size, self.units)), # _h
tf.zeros((self.batch_size, self.units)), # _c
tf.zeros((self.batch_size, self.units)), # _hbw
tf.zeros((self.batch_size, self.units))] # _cbw
enc_out, enc_h, enc_c, enc_hbw, enc_cbw = self.encoder(input_sequence, enc_start_state)
dec_h = enc_h
dec_c = enc_c
start_tokens = tf.constant([self.tokenizer.word_index['<sos>']])
end_token = self.tokenizer.word_index['<eos>']
# Setup Memory in decoder stack
self.decoder.attn.setup_memory(enc_out)
# set decoder_initial_state
decoder_initial_state = self.decoder.build_initial_state(
self.batch_size,
[dec_h, dec_c],
tf.float32)
outputs, state, lengths = self.decoder_instance(None,
start_tokens = start_tokens,
end_token = end_token,
initial_state = decoder_initial_state)
phrase = " ".join([self.tokenizer.index_word[o] for o in outputs.sample_id.numpy()[0] if o != 0 and self.tokenizer.index_word[o] not in ['<out>', '<eos>', '<sos>']])
return phrase, outputs, state, lengths
if __name__ == '__main__':
inference = InferenceModel(
checkpoint_dir = './training_checkpoints',
embedding_dim = 512,
units = 512,
input_length = 382,
output_length = 686,
batch_size = 1)
keep_talking = True
while keep_talking:
text = input("Your turn:")
phrase, _, _, _ = inference(text.lower())
print(phrase)
keep_talking = (text.lower() != 'enough')
|
[
"corkscrew.ki@gmail.com"
] |
corkscrew.ki@gmail.com
|
6f9d1391cf496c1d627ce0360a58a8f635112566
|
43b99b3c75b596583f37537ebf70a99bf13d640c
|
/rover_driver/nodes/rover_command.py
|
ea42baf9a2abcda93937e2d137a98f9c78d8b5f0
|
[] |
no_license
|
anliec/GTL_autonomiousRobotic
|
f7edddea9850058b024fb111fd0cc911a73b342a
|
d3770c73712d309f02d81443e7cf421a4bc518dd
|
refs/heads/master
| 2021-09-13T09:05:05.267456
| 2018-04-27T13:11:25
| 2018-04-27T13:11:25
| 117,143,457
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,085
|
py
|
#!/usr/bin/env python
import roslib;
roslib.load_manifest('rover_driver')
import rospy
from std_msgs.msg import Float64
from sensor_msgs.msg import JointState
from geometry_msgs.msg import Twist, Pose
from math import atan2, hypot, pi, cos, sin
import tf
import message_filters
import numpy
from numpy.linalg import pinv
from rover_driver.rover_kinematics import *
class RoverDriver:
def __init__(self, name):
self.name = name
rospy.init_node('rover_driver')
self.name = rospy.get_param("~rover_name", self.name)
self.skidsteer = rospy.get_param("~skidsteer", False)
self.check_timeout = rospy.get_param("~check_timeout", True)
rospy.loginfo("Starting rover driver for rover '%s'" % self.name)
self.last_cmd = rospy.Time.now()
self.listener = tf.TransformListener()
self.steering_pub = {}
self.drive_pub = {}
self.ready = False
self.kinematics = RoverKinematics()
self.twist_sub = rospy.Subscriber('~twistCommand', Twist, self.twist_cb)
# print "Initialising wheel data structure"
for k in prefix:
self.steering_pub[k] = rospy.Publisher("/vrep/%s/%sSteerCommand" % (self.name, k), Float64, queue_size=1)
self.drive_pub[k] = rospy.Publisher("/vrep/%s/%sDriveCommand" % (self.name, k), Float64, queue_size=1)
def twist_cb(self, twist):
if not self.ready:
return
# print "Got twist: " + str(twist)
self.last_cmd = rospy.Time.now()
# Get the pose of all drives
drive_cfg = {}
for k in prefix:
# try:
# self.listener.waitForTransform('/%s/ground'%(self.name),
# '/%s/%sDrive'%(self.name,k), self.last_cmd, rospy.Duration(1.0))
((x, y, z), rot) = self.listener.lookupTransform('/%s/ground' % (self.name),
'/%s/%sDrive' % (self.name, k), rospy.Time(0))
drive_cfg[k] = DriveConfiguration(self.radius[k], x, y, z)
# except (tf.LookupException, tf.ConnectivityException, tf.ExtrapolationException):
# return
# Now compute for each drive, its rotation speed and steering angle
motors = self.kinematics.twist_to_motors(twist, drive_cfg, self.skidsteer)
self.publish(motors)
def publish(self, motor):
for k in prefix:
self.drive_pub[k].publish(Float64(motor.drive[k]))
self.steering_pub[k].publish(Float64(motor.steering[k]))
def run(self):
timeout = True
rate = rospy.Rate(10)
rospy.loginfo("Waiting for initial transforms")
rospy.sleep(1.0)
self.radius = {}
for k in prefix:
try:
self.listener.waitForTransform('/%s/ground' % (self.name),
'/%s/%sDrive' % (self.name, k), rospy.Time(0), rospy.Duration(5.0))
((x, y, z), rot) = self.listener.lookupTransform('/%s/ground' % (self.name),
'/%s/%sDrive' % (self.name, k), rospy.Time(0))
self.radius[k] = z
rospy.loginfo("Got transform for " + k)
except tf.Exception, e:
rospy.logerr("TF exception: " + repr(e))
self.ready = True
while not rospy.is_shutdown():
if self.check_timeout:
if (rospy.rostime.get_time() - self.last_cmd.to_sec()) < 0.5:
if timeout:
timeout = False
rospy.loginfo("Accepting joystick commands")
else:
if not timeout:
timeout = True
rospy.loginfo("Timeout: ignoring joystick commands")
motors = RoverMotors()
self.publish(motors)
rate.sleep()
if __name__ == '__main__':
try:
rd = RoverDriver("rover")
rd.run()
except rospy.ROSInterruptException:
pass
|
[
"nsix@georgiatech-metz.fr"
] |
nsix@georgiatech-metz.fr
|
64a1c1a296f4dcf13bad2e935f769f610f9a5e1d
|
9d102cd2d22c6bdad2ec455df0a85d9ffb9a0f5f
|
/clients/views.py
|
6f6c8699eae658aaf6d4c86b83a6c71d22f2e46e
|
[] |
no_license
|
dave-caputo/matmod
|
6a9664a02be8ecf92ff4112bde174b9063422fbb
|
441b947d669ae92c5f5f0d107deebd0fb26dcbef
|
refs/heads/master
| 2021-08-17T17:41:14.244217
| 2019-05-24T20:23:23
| 2019-05-24T20:23:23
| 139,707,479
| 0
| 0
| null | 2021-06-10T17:36:13
| 2018-07-04T10:32:42
|
Python
|
UTF-8
|
Python
| false
| false
| 1,609
|
py
|
from django.contrib.auth.mixins import LoginRequiredMixin
from django.urls import reverse, reverse_lazy
from django.views import generic
from .forms import ClientForm
from .models import Client
class ClientCreateView(LoginRequiredMixin, generic.CreateView):
form_class = ClientForm
model = Client
template_name = 'clients/create.html'
success_url = reverse_lazy('clients:create')
def get_initial(self):
initial = super().get_initial()
initial['org'] = self.request.user.org
return initial
class ClientListView(generic.ListView):
model = Client
template_name = 'clients/list.html'
def get_queryset(self):
qs = super().get_queryset()
if self.request.user.is_superuser:
return qs
else:
qs.filter(org=self.request.user.org, id=self.request.user.client.id)
class ClientDetailView(LoginRequiredMixin, generic.DetailView):
model = Client
template_name = 'clients/detail.html'
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
context['assessment_list'] = self.object.assessments.all()
return context
class ClientUpdateView(LoginRequiredMixin, generic.UpdateView):
model = Client
template_name = 'clients/update.html'
fields = ['name']
def get_success_url(self):
return reverse('clients:update', kwargs={'pk': self.kwargs['pk']})
class ClientDeleteView(LoginRequiredMixin, generic.DeleteView):
model = Client
template_name = 'clients/delete.html'
success_url = reverse_lazy('dashboard:index')
|
[
"davecaputo@hotmail.com"
] |
davecaputo@hotmail.com
|
c5602665fc89b52a000ff68ae6a0d88bfac337d6
|
933d52b6f7e982de026d7fcabf51b6e043b8e0cb
|
/06/17/dictionary.py
|
1afa0a6e7c698a330b4266e8c5faa8284fbeb90d
|
[] |
no_license
|
niuyaning/PythonProctice
|
23733ec4c6255c681779c14c0b7572932f1107ca
|
7637b3ae20694afafab0207ec243a9fccff60b0e
|
refs/heads/master
| 2023-06-03T12:14:20.936682
| 2021-06-21T16:30:17
| 2021-06-21T16:30:17
| 279,471,489
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 167
|
py
|
#定义字典以及取值
dict = {"color":'red','points':5}
#获取key
print(dict.keys())
#获取值
print(dict.values())
#访问字典中的值
print(dict['color'])
|
[
"Niu#19930819"
] |
Niu#19930819
|
ee8e5076ebe154b1f70ce12971b35adfc2f95190
|
016f7c173e3de8c379786d3896b74961456ffa82
|
/Autocoders/Python/src/fprime_ac/generators/writers/ImplHWriter.py
|
0cb08ccd40bf0a90818fb292d95f061d046bdc0d
|
[
"Apache-2.0"
] |
permissive
|
nodcah/fprime
|
4299c14c7a9eff96868e99f8550986122ba2429b
|
d19cade2140231b4e0879b2f6ab4a62b25792dea
|
refs/heads/master
| 2020-12-03T20:18:49.244510
| 2020-02-22T00:59:49
| 2020-02-22T00:59:49
| 231,472,301
| 0
| 0
|
Apache-2.0
| 2020-02-22T00:59:51
| 2020-01-02T22:52:59
| null |
UTF-8
|
Python
| false
| false
| 2,037
|
py
|
#!/bin/env python
#===============================================================================
# NAME: ImplHWriter.py
#
# DESCRIPTION: A writer class for generating component implementation
# header files.
#
# AUTHOR: Jordan Ishii
# EMAIL: jordan.ishii@jpl.nasa.gov
# DATE CREATED : August 8, 2019
#
# Copyright 2015, California Institute of Technology.
# ALL RIGHTS RESERVED. U.S. Government Sponsorship acknowledged.
#===============================================================================
from fprime_ac.utils import ConfigManager
from fprime_ac.generators.templates.impl import hpp
from fprime_ac.generators.writers import ImplWriterBase
class ImplHWriter(ImplWriterBase.ImplWriterBase):
"""
A writer class for generating component implementation header files.
"""
__config = None
def __init__(self):
self.__config = ConfigManager.ConfigManager.getInstance()
self.initBase("ImplH")
def emitPortParams(self, params):
return self.emitPortParamsHpp(10, params)
def emitNonPortParams(self, params):
return self.emitNonPortParamsHpp(10, params)
def _startSourceFilesWrite(self, obj):
c = hpp.hpp()
self.init(obj, c)
self.initImpl(obj, c)
c.emit_port_params = self.emitPortParams
c.emit_non_port_params = self.emitNonPortParams
self._writeTmpl(c, "startSourceFilesVisit")
def write(self, obj):
"""
Calls all of the write methods so that full file is made
"""
self.setFileName(obj)
self.initFilesWrite(obj)
self._startSourceFilesWrite(obj)
self.includes1Write(obj)
self.includes2Write(obj)
self.namespaceWrite(obj)
self.publicWrite(obj)
self.protectedWrite(obj)
self.privateWrite(obj)
self.finishSourceFilesWrite(obj)
def setFileName(self, obj):
self.FILE_NAME = obj.get_name() + self.__config.get("component", "ImplH")
def toString(self):
return self.FILE_NAME
|
[
"jishii@jpl.nasa.gov"
] |
jishii@jpl.nasa.gov
|
068c05ed5f8dc46f479acf2bfa78ee3916a7daaf
|
098c13f4a3edd3aba879ab097a0d26904a63d8d4
|
/headlines.py
|
29f2a1f7cb653a869788382b91dbea2d7fed2cb1
|
[] |
no_license
|
kooltzh/headlines
|
b13158cd6b321fcf37305ac5ac9f9e59727ee9f1
|
f76e5554199c308c416bd0db7992bbdbd97e296c
|
refs/heads/master
| 2020-04-10T21:56:11.895678
| 2018-12-18T07:32:01
| 2018-12-18T07:32:01
| 161,311,039
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 727
|
py
|
import feedparser
from flask import Flask
from flask import render_template
from flask import request
app = Flask(__name__)
RSS_FEEDS = {'bbc': 'http://newsrss.bbc.co.uk/rss/newsonline_uk_edition/front_page/rss.xml',
'abc': 'http://feeds.abcnews.com/abcnews/topstories',
'cnn': 'http://rss.cnn.com/rss/edition.rss'}
@app.route("/")
def get_news():
query = request.args.get("publication")
if not query or query.lower() not in RSS_FEEDS:
publication = "bbc"
else:
publication = query.lower()
feed = feedparser.parse(RSS_FEEDS[publication])
return render_template("home.html", articles=feed['entries'])
if __name__ == '__main__':
app.run(port=5000, debug=True)
|
[
"kooltzh@gmail.com"
] |
kooltzh@gmail.com
|
347d93084f1dc8f8135b5a156ffd05976db7f167
|
3bc75306737fe903553249e57657e2d2e3eaf7bc
|
/crawler_bulbapedia.py
|
0999dede5ae9e70c41d4d3c48b1e7a59bb96dc97
|
[] |
no_license
|
ThiagoLira/pkm-ep-generator
|
61d8847124c3f7b9f9f97528ecf16602238e2c69
|
8f88ee918e38ba86a7a0d8775e6778d23191a4f4
|
refs/heads/master
| 2023-04-06T23:14:58.153577
| 2019-12-05T16:08:57
| 2019-12-05T16:08:57
| 216,455,560
| 6
| 1
| null | 2023-03-24T23:19:13
| 2019-10-21T01:47:42
|
Python
|
UTF-8
|
Python
| false
| false
| 2,137
|
py
|
import requests,re
from bs4 import BeautifulSoup
import os.path
# skip these, as the crawler messes up it's text
problematic_episodes_list = ['AG120','XY124','DP048','DP120']
# We don't need this warning in the middle of our corpus hehe
str_warning = "This article does not yet meet the quality standards of Bulbapedia."
str_warning2 = "Please feel free to edit this article to make it conform to Bulbapedia norms and conventions."
str_warning3 = "quality standards of Bulbapedia"
str_warning4 = "This section does not yet meet the . Please feel free to edit this section to make it conform to Bulbapedia norms and conventions."
def prepareEpisode (link):
html = requests.get(link).text
episode_name = re.search('/wiki/(.*)',link)
episode_name = episode_name.group(1)
soup = BeautifulSoup(html, "lxml")
texto_que_importa = soup.find_all('h2')[2]
s = ''
for node in texto_que_importa.next_elements:
if(hasattr(node,'text')):
if(node.text == 'Major events'):
break
else:
if (len(node.text) > 20):
s = s + node.text
file_name = episode_name + '.txt'
with open('data/pokeCorpusBulba/' + file_name, 'w') as text_file:
if (episode_name not in problematic_episodes_list):
# remove warning if it is present in this particular episode
s = re.sub(str_warning,'',s)
s = re.sub(str_warning2,'',s)
s = re.sub(str_warning3,'',s)
s = re.sub(str_warning4,'',s)
text_file.write(s)
episode_links = []
main_link = 'https://bulbapedia.bulbagarden.net/wiki/'
for suffix,final_epi in [("EP",274),("AG",192),("DP",191),("XY",140),("SM",140)]:
for i in range (1, final_epi + 1):
# mask episode number in 3 digit string with 0s
ep_number = '{:=03d}'.format(i)
episode_links.append(main_link + suffix + ep_number)
if not os.path.exists('data/pokeCorpusBulba'):
os.makedirs('data/pokeCorpusBulba')
#print (episode_links)
for link in episode_links:
prepareEpisode(link)
print('Saved episode ' + link)
|
[
"thlira15@gmail.com"
] |
thlira15@gmail.com
|
ea923ea511391a51833ed29f1a4a3feb6e8d5795
|
130b2355289dfefc188d6465ae72e24783e7076f
|
/estate_management/estate_management/doctype/contract_breakdown/contract_breakdown.py
|
802b262955fd47846d0314864037ba7dfa133e8c
|
[
"MIT"
] |
permissive
|
brownharryb/estate_management
|
3fe0e4d0c8229463f46aca2af83218769110630a
|
27bfd1ea1856ca3f8988371a69b2fe13368b1899
|
refs/heads/master
| 2021-01-17T15:49:56.288997
| 2016-08-30T22:56:02
| 2016-08-30T22:56:02
| 66,996,783
| 0
| 3
| null | 2016-08-31T02:48:08
| 2016-08-31T02:48:08
| null |
UTF-8
|
Python
| false
| false
| 261
|
py
|
# -*- coding: utf-8 -*-
# Copyright (c) 2015, Manqala and contributors
# For license information, please see license.txt
from __future__ import unicode_literals
import frappe
from frappe.model.document import Document
class ContractBreakdown(Document):
pass
|
[
"chude.osiegbu@manqala.com"
] |
chude.osiegbu@manqala.com
|
50b608a288999c17ad8d1e709908ce57ad2def86
|
dead1da1f4a6b40d746d28eb888aea00ebd734a0
|
/Core/admin.py
|
19bcd45feb9ed79b0d15532be6cb09a81eb2f3ac
|
[] |
no_license
|
jamshi/django-formbuilder
|
7437daa00cd608b6a539a13bd9932f63e17f571b
|
65e97e7164527d3ec1ed10e8ebcccc4dd6356378
|
refs/heads/master
| 2021-01-22T00:52:29.901219
| 2017-09-02T18:55:00
| 2017-09-02T18:55:00
| 102,197,500
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 118
|
py
|
from django.contrib import admin
from .models import MyForms
# Register your models here.
admin.site.register(MyForms)
|
[
"jamshi.onnet@gmail.com"
] |
jamshi.onnet@gmail.com
|
2a1e4abd8fc88f0bbc1813bd1cc78d160eddd381
|
99d17ddba93db1105e8941b1b592d9d9e22864fb
|
/superset/explore/schemas.py
|
457c99422a3a6ee05d7bfc0931fa3976cc4d979c
|
[
"Apache-2.0",
"OFL-1.1"
] |
permissive
|
apache-superset/incubator-superset
|
f376dc15d6e2187d0b65a0dc5476d6c6c3378f21
|
0945d4a2f46667aebb9b93d0d7685215627ad237
|
refs/heads/master
| 2023-03-15T04:12:40.478792
| 2022-07-25T14:44:43
| 2022-07-25T14:44:43
| 146,225,581
| 21
| 20
|
Apache-2.0
| 2023-03-13T16:00:14
| 2018-08-26T23:56:08
|
TypeScript
|
UTF-8
|
Python
| false
| false
| 5,622
|
py
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# License ); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# AS IS BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from marshmallow import fields, Schema
class DatasetSchema(Schema):
cache_timeout = fields.Integer(
description="Duration (in seconds) of the caching timeout for this dataset."
)
column_formats = fields.Dict(description="Column formats.")
columns = fields.List(fields.Dict(), description="Columns metadata.")
database = fields.Dict(description="Database associated with the dataset.")
datasource_name = fields.String(description="Dataset name.")
default_endpoint = fields.String(description="Default endpoint for the dataset.")
description = fields.String(description="Dataset description.")
edit_url = fields.String(description="The URL for editing the dataset.")
extra = fields.Dict(
description="JSON string containing extra configuration elements."
)
fetch_values_predicate = fields.String(
description="Predicate used when fetching values from the dataset."
)
filter_select = fields.Bool(description="SELECT filter applied to the dataset.")
filter_select_enabled = fields.Bool(description="If the SELECT filter is enabled.")
granularity_sqla = fields.List(
fields.List(fields.Dict()),
description=(
"Name of temporal column used for time filtering for SQL datasources. "
"This field is deprecated, use `granularity` instead."
),
)
health_check_message = fields.String(description="Health check message.")
id = fields.Integer(description="Dataset ID.")
is_sqllab_view = fields.Bool(description="If the dataset is a SQL Lab view.")
main_dttm_col = fields.String(description="The main temporal column.")
metrics = fields.List(fields.Dict(), description="Dataset metrics.")
name = fields.String(description="Dataset name.")
offset = fields.Integer(description="Dataset offset.")
order_by_choices = fields.List(
fields.List(fields.String()), description="List of order by columns."
)
owners = fields.List(fields.Integer(), description="List of owners identifiers")
params = fields.Dict(description="Extra params for the dataset.")
perm = fields.String(description="Permission expression.")
schema = fields.String(description="Dataset schema.")
select_star = fields.String(description="Select all clause.")
sql = fields.String(description="A SQL statement that defines the dataset.")
table_name = fields.String(
description="The name of the table associated with the dataset."
)
template_params = fields.Dict(description="Table template params.")
time_grain_sqla = fields.List(
fields.List(fields.String()),
description="List of temporal granularities supported by the dataset.",
)
type = fields.String(description="Dataset type.")
uid = fields.String(description="Dataset unique identifier.")
verbose_map = fields.Dict(description="Mapping from raw name to verbose name.")
class SliceSchema(Schema):
cache_timeout = fields.Integer(
description="Duration (in seconds) of the caching timeout for this chart."
)
certification_details = fields.String(description="Details of the certification.")
certified_by = fields.String(
description="Person or group that has certified this dashboard."
)
changed_on = fields.String(description="Timestamp of the last modification.")
changed_on_humanized = fields.String(
description="Timestamp of the last modification in human readable form."
)
datasource = fields.String(description="Datasource identifier.")
description = fields.String(description="Slice description.")
description_markeddown = fields.String(
description="Sanitized HTML version of the chart description."
)
edit_url = fields.String(description="The URL for editing the slice.")
form_data = fields.Dict(description="Form data associated with the slice.")
is_managed_externally = fields.Bool(
description="If the chart is managed outside externally."
)
modified = fields.String(description="Last modification in human readable form.")
owners = fields.List(fields.Integer(), description="Owners identifiers.")
query_context = fields.Dict(description="The context associated with the query.")
slice_id = fields.Integer(description="The slice ID.")
slice_name = fields.String(description="The slice name.")
slice_url = fields.String(description="The slice URL.")
class ExploreContextSchema(Schema):
form_data = fields.Dict(
description=(
"Form data from the Explore controls used to form the "
"chart's data query."
)
)
dataset = fields.Nested(DatasetSchema)
slice = fields.Nested(SliceSchema)
message = fields.String(description="Any message related to the processed request.")
|
[
"noreply@github.com"
] |
apache-superset.noreply@github.com
|
f00a10d6fd459080c0cf67787dcf21ebf3fb633a
|
a508e95a98a0637b83b29e3d265366b20065db73
|
/CollegeProject/faculty/serializer.py
|
cd83ae9cb377c1c9aae55f59ee54fdc40a90f156
|
[] |
no_license
|
aparnabreddy/Django-practice
|
d2cea7a9dcf2d4dd2202ad5d680a7a8075e6264f
|
02dda5293ae3a29a821ecd92d2ed2792bd824404
|
refs/heads/master
| 2023-07-13T07:42:20.224636
| 2021-08-25T17:36:04
| 2021-08-25T17:36:04
| 397,089,111
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 271
|
py
|
from rest_framework import serializers
from faculty.models import faculty
class facultySerializer(serializers.ModelSerializer):
class Meta:
model=faculty
fields=('faculty_code', 'name', 'department', 'address', 'mobilenumber', 'username', 'password')
|
[
"aparnabreddy26@gmail.com"
] |
aparnabreddy26@gmail.com
|
a88986e2c5ad76c7302d1f0f3ff5ad314edff837
|
6783b7f66e56e88161a6b395c0a039489ec1a94b
|
/ESPCN(YeongBean)/test_image.py
|
ccd23feac1e09ac4b336a21ae25d0fe45c31572d
|
[] |
no_license
|
Dcom-KHU/2019-DeepLearning-SuperResolution
|
25d1df54c269016fa823c10e37b7e78135550e18
|
796ec20781521449bf15c333ee1bebffc73a9b7f
|
refs/heads/master
| 2020-04-17T16:20:40.138003
| 2019-03-01T03:53:05
| 2019-03-01T03:53:05
| 166,736,428
| 7
| 3
| null | 2019-03-01T03:53:06
| 2019-01-21T02:32:59
|
Python
|
UTF-8
|
Python
| false
| false
| 1,923
|
py
|
import argparse
import os
from os import listdir
import numpy as np
import torch
from PIL import Image
from torch.autograd import Variable
from torchvision.transforms import ToTensor
from tqdm import tqdm
from data_utils import is_image_file
from model import Net
if __name__ == "__main__":
parser = argparse.ArgumentParser(description='Test Super Resolution')
parser.add_argument('--upscale_factor', default=3, type=int, help='super resolution upscale factor')
parser.add_argument('--model_name', default='epoch_3_100.pt', type=str, help='super resolution model name')
opt = parser.parse_args()
UPSCALE_FACTOR = opt.upscale_factor
MODEL_NAME = opt.model_name
path = 'data/test/SRF_' + str(UPSCALE_FACTOR) + '/data/'
images_name = [x for x in listdir(path) if is_image_file(x)]
model = Net(upscale_factor=UPSCALE_FACTOR)
if torch.cuda.is_available():
model = model.cuda()
model.load_state_dict(torch.load('epochs/' + MODEL_NAME))
out_path = 'results/SRF_' + str(UPSCALE_FACTOR) + '/'
if not os.path.exists(out_path):
os.makedirs(out_path)
for image_name in tqdm(images_name, desc='convert LR images to HR images'):
img = Image.open(path + image_name).convert('YCbCr')
y, cb, cr = img.split()
image = Variable(ToTensor()(y)).view(1, -1, y.size[1], y.size[0])
if torch.cuda.is_available():
image = image.cuda()
out = model(image)
out = out.cpu()
out_img_y = out.data[0].numpy()
out_img_y *= 255.0
out_img_y = out_img_y.clip(0, 255)
out_img_y = Image.fromarray(np.uint8(out_img_y[0]), mode='L')
out_img_cb = cb.resize(out_img_y.size, Image.BICUBIC)
out_img_cr = cr.resize(out_img_y.size, Image.BICUBIC)
out_img = Image.merge('YCbCr', [out_img_y, out_img_cb, out_img_cr]).convert('RGB')
out_img.save(out_path + image_name)
|
[
"nogadahalf12@naver.com"
] |
nogadahalf12@naver.com
|
32f4feeeb0b49a89b0f88e4fa53d55868da0a579
|
e9444c75691b0aaf9bb68c5c74cf39cd6257db89
|
/repositories/repositories.py
|
5442424fd896a0d13b763bd28e8f7224f8b1f4c3
|
[] |
no_license
|
DyegoMaas/weather-forecast
|
82cef6b2460649cde57bb0f5443051d7721bee2e
|
a98d2a1671ec3d822b986a7bfed836cf822586c2
|
refs/heads/master
| 2022-12-15T19:08:56.631510
| 2019-03-05T11:23:48
| 2019-03-05T11:23:48
| 170,938,024
| 0
| 0
| null | 2022-12-08T13:46:18
| 2019-02-15T22:43:48
|
JavaScript
|
UTF-8
|
Python
| false
| false
| 438
|
py
|
from repositories.abc import JsonLinesRepository
class CitiesRepository(JsonLinesRepository):
def __init__(self):
super().__init__('cities.jsonl')
def add(self, city):
if self._is_city_saved_already(city.name):
return
super().add(city)
def _is_city_saved_already(self, city_name):
cities_names = [city.name for city in self.get_all()]
return city_name in cities_names
|
[
"dyego.maas@gmail.com"
] |
dyego.maas@gmail.com
|
0d275fa0ee72fd7293b043388f01a5757cdae47b
|
8fdd1256be0d06759def233a1e92347cdeeed7be
|
/pyoslc/vocabularies/data.py
|
5387faa1fa6eba6588ab8db28e305a8f10e7e3e5
|
[
"BSD-3-Clause"
] |
permissive
|
jljohnson/pyoslc
|
504776d9ec9ada54cf53f48e1346b3f369dae1e0
|
10e1895b0ea46fe2b256f991fc612678438ab072
|
refs/heads/master
| 2023-04-30T18:38:32.188430
| 2021-05-21T06:11:31
| 2021-05-21T06:11:31
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 274
|
py
|
from rdflib import URIRef
from rdflib.namespace import ClosedNamespace
OSLCData = ClosedNamespace(
uri=URIRef("http://open-services.net/ns/servicemanagement/1.0/"),
terms=[
# RDFS Classes in this namespace
# RDF Properties in this namespace
]
)
|
[
"mario.carrasco@gmail.com"
] |
mario.carrasco@gmail.com
|
0d444bde55222ac364a5afc0e82a75edaf5e20d6
|
83de24182a7af33c43ee340b57755e73275149ae
|
/aliyun-python-sdk-sae/aliyunsdksae/request/v20190506/BindSlbRequest.py
|
0de7751b0f73a44db5da405020a5aa2249dd06d8
|
[
"Apache-2.0"
] |
permissive
|
aliyun/aliyun-openapi-python-sdk
|
4436ca6c57190ceadbc80f0b1c35b1ab13c00c7f
|
83fd547946fd6772cf26f338d9653f4316c81d3c
|
refs/heads/master
| 2023-08-04T12:32:57.028821
| 2023-08-04T06:00:29
| 2023-08-04T06:00:29
| 39,558,861
| 1,080
| 721
|
NOASSERTION
| 2023-09-14T08:51:06
| 2015-07-23T09:39:45
|
Python
|
UTF-8
|
Python
| false
| false
| 2,247
|
py
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
#
# http://www.apache.org/licenses/LICENSE-2.0
#
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from aliyunsdkcore.request import RoaRequest
from aliyunsdksae.endpoint import endpoint_data
class BindSlbRequest(RoaRequest):
def __init__(self):
RoaRequest.__init__(self, 'sae', '2019-05-06', 'BindSlb','serverless')
self.set_uri_pattern('/pop/v1/sam/app/slb')
self.set_method('POST')
if hasattr(self, "endpoint_map"):
setattr(self, "endpoint_map", endpoint_data.getEndpointMap())
if hasattr(self, "endpoint_regional"):
setattr(self, "endpoint_regional", endpoint_data.getEndpointRegional())
def get_Intranet(self): # String
return self.get_query_params().get('Intranet')
def set_Intranet(self, Intranet): # String
self.add_query_param('Intranet', Intranet)
def get_IntranetSlbId(self): # String
return self.get_query_params().get('IntranetSlbId')
def set_IntranetSlbId(self, IntranetSlbId): # String
self.add_query_param('IntranetSlbId', IntranetSlbId)
def get_InternetSlbId(self): # String
return self.get_query_params().get('InternetSlbId')
def set_InternetSlbId(self, InternetSlbId): # String
self.add_query_param('InternetSlbId', InternetSlbId)
def get_AppId(self): # String
return self.get_query_params().get('AppId')
def set_AppId(self, AppId): # String
self.add_query_param('AppId', AppId)
def get_Internet(self): # String
return self.get_query_params().get('Internet')
def set_Internet(self, Internet): # String
self.add_query_param('Internet', Internet)
|
[
"sdk-team@alibabacloud.com"
] |
sdk-team@alibabacloud.com
|
90aa15c9ea57c06a3eb3025848b1e1835f9898fa
|
5f3fc418c6b66667062de361aff1be1115a5045a
|
/PSet7/finance/application.py
|
1086036081cdbe517c67c613bb3802316bc502f1
|
[] |
no_license
|
georgelroberts/Course-Projects
|
fcd26b17063002d1f5c922ac901bd8f7b507794e
|
ea1e9ddadd9561ec360b6d98624c43c9ccd92b53
|
refs/heads/master
| 2023-07-19T22:03:19.354988
| 2017-03-05T10:57:11
| 2017-03-05T10:57:11
| 81,113,997
| 0
| 0
| null | 2023-07-13T11:34:04
| 2017-02-06T17:41:37
|
C
|
UTF-8
|
Python
| false
| false
| 11,603
|
py
|
from sql import SQL
from flask import Flask
from passlib.apps import custom_app_context as pwd_context
import os
from helpers import *
# configure application
app = Flask(__name__)
# ensure responses aren't cached
if app.config["DEBUG"]:
@app.after_request
def after_request(response):
response.headers["Cache-Control"] = "no-cache, no-store, must-revalidate"
response.headers["Expires"] = 0
response.headers["Pragma"] = "no-cache"
return response
# custom filter
app.jinja_env.filters["usd"] = usd
# create a random key for the session
app.secret_key = os.urandom(24)
# configure CS50 Library to use SQLite database
db = SQL("sqlite:///finance.db")
@app.route("/")
@login_required
def index():
"""List all of the items from the portfolio database, alongside current funds and the total worth"""
symbols = db.execute("SELECT symbol FROM portfolio WHERE id = :idsession GROUP BY symbol",
idsession=session["user_id"])
indexinfo = []
currentcash = 0
grandtotal=0
if symbols:
counter = 0
for symbol in symbols:
# uses yahoo to search for the company based on the symbol
currentstockinfo = lookup(symbol['symbol'])
noshares = db.execute("SELECT SUM(noShares) FROM portfolio where symbol=:symbol AND id=:idsession",
symbol=symbol["symbol"], idsession=session["user_id"])
if noshares[0]['SUM(noShares)'] > 0:
counter += 1
currentstock = {'symbol': currentstockinfo['symbol'], 'name': currentstockinfo['name'],
'shares': noshares[0]['SUM(noShares)'], 'price': currentstockinfo['price']}
currentstock['total'] = currentstock['price'] * currentstock['shares']
indexinfo.append(currentstock)
usercash = db.execute("SELECT cash FROM users where id=:idsession", idsession=session["user_id"])
currentcash = usercash[0]["cash"]
grandtotal = usercash[0]["cash"]
for i in range(counter):
grandtotal += indexinfo[i]['total']
for i in range(counter):
indexinfo[i]['price'] = usd(indexinfo[i]['price'])
indexinfo[i]['total'] = usd(indexinfo[i]['total'])
return render_template("index.html", holdings=indexinfo, currentCash=usd(currentcash), grandTotal=usd(grandtotal))
@app.route("/buy", methods=["GET", "POST"])
@login_required
def buy():
"""Allow user to purchase shares of real companies"""
if request.method == "POST":
if request.form.get("symbol") == "":
return render_template("apology.html", message="Please enter a symbol!")
if request.form.get("shares") == "" or request.form.get("shares").isdigit() is False or int(
request.form.get("shares")) < 1:
return render_template("apology.html", message="Please enter number of shares!")
else:
sharedetails = lookup(request.form.get("symbol"))
if sharedetails is None:
return render_template("apology.html", message="Enter a correct symbol")
else:
currentcash = db.execute("SELECT cash FROM users WHERE id = :idsession", idsession=session["user_id"])
# Check if the user has the funds
if float(request.form.get("shares")) * sharedetails["price"] < currentcash[0]["cash"]:
db.execute(
"INSERT INTO portfolio (id,symbol,sharePrice,noShares,totalCost) VALUES (:id,:symbol,:sharePrice,:noShares,:totalCost)",
id=session["user_id"], symbol=sharedetails["symbol"], sharePrice=sharedetails["price"],
noShares=float(request.form.get("shares")),
totalCost=float(request.form.get("shares")) * sharedetails["price"])
db.execute("UPDATE users SET cash = cash - :totalCost where id=:idsession",
totalCost=float(request.form.get("shares")) * sharedetails["price"],
idsession=session["user_id"])
return redirect(url_for("index"))
else:
return render_template("apology.html", message="Insufficient funds available")
else:
return render_template("buy.html")
@app.route("/history")
@login_required
def history():
"""Return all user activity"""
userhistory = db.execute("SELECT symbol,noShares,sharePrice,time FROM portfolio WHERE id=:sessionid",
sessionid=session["user_id"])
return render_template("history.html", userHistory=userhistory)
@app.route("/login", methods=["GET", "POST"])
def login():
"""Log user in."""
# forget any user_id
session.clear()
# if user reached route via POST (as by submitting a form via POST)
if request.method == "POST":
# ensure username was submitted
if not request.form.get("username"):
return render_template("apology.html", message="Must provide username")
# ensure password was submitted
elif not request.form.get("password"):
return render_template("apology.html", message="must provide password")
# query database for username
rows = db.execute("SELECT * FROM users WHERE username = :username", username=request.form.get("username"))
# ensure username exists and password is correct
if len(rows) != 1 or not pwd_context.verify(request.form.get("password"), rows[0]["hash"]):
return render_template("apology.html", message="invalid username and/or password")
# remember which user has logged in
session['user_id'] = rows[0]["id"]
# redirect user to home page
return redirect(url_for("index"))
# else if user reached route via GET (as by clicking a link or via redirect)
else:
return render_template("login.html")
@app.route("/logout")
def logout():
"""Log user out."""
# forget any user_id
session.clear()
# redirect user to login form
return redirect(url_for("login"))
@app.route("/quote", methods=["GET", "POST"])
@login_required
def quote():
""" Find current share price of company"""
if request.method == "POST":
if request.form.get("symbol") == "":
return render_template("apology.html", message="Please enter a symbol!")
else:
sharedetails = lookup(request.form.get("symbol"))
if sharedetails is None:
return render_template("apology.html", message="Enter a correct symbol")
else:
return render_template("quoted.html", name=sharedetails["name"], symbol=sharedetails["symbol"],
price=sharedetails["price"])
else:
return render_template("quote.html")
@app.route("/register", methods=["GET", "POST"])
def register():
""" Register a username and password"""
if request.method == "POST":
if request.form.get("username") == "" or request.form.get("password") == "" or request.form.get(
"confirmation") == "":
return render_template("apology.html", message="Must enter a username, password and confirmation!")
elif request.form.get("password") != request.form.get("confirmation"):
return render_template("apology.html", message="Passwords must match!")
else:
# store a hash of the password for security
passwordhash = pwd_context.encrypt(request.form.get("password"))
rows = db.execute("SELECT * FROM users WHERE username = :username", username=request.form.get("username"))
if len(rows) != 0:
return render_template("apology.html", message="User already exists!")
db.execute("INSERT INTO users (username,hash) VALUES (:username,:passwordHash)",
username=request.form.get("username"), passwordHash=passwordhash)
session["user_id"] = \
db.execute("SELECT * FROM users WHERE username = :username", username=request.form.get("username"))[0][
"id"]
session["user_name"] = \
db.execute("SELECT * FROM users WHERE username = :username", username=request.form.get("username"))[0][
"username"]
return render_template("register.html")
else:
return render_template("register.html")
@app.route("/sell", methods=["GET", "POST"])
@login_required
def sell():
""" Let user sell shares"""
if request.method == "POST":
if request.form.get("symbol") == "":
return render_template("apology.html", message="Please enter a symbol!")
if request.form.get("shares") == "" or request.form.get("shares").isdigit() is False or int(
request.form.get("shares")) < 1:
return render_template("apology.html", message="Please enter number of shares!")
else:
sharedetails = lookup(request.form.get("symbol"))
if sharedetails is None:
return render_template("apology.html", message="This company doesn't exist. Please try again")
else:
noshares = db.execute(
"SELECT SUM(noShares) FROM portfolio where id=:idsession and symbol=:currentSymbol GROUP BY symbol",
idsession=session["user_id"], currentSymbol=sharedetails["symbol"])
if int(request.form.get("shares")) <= noshares[0]["SUM(noShares)"]:
db.execute(
"INSERT INTO portfolio (id,symbol,sharePrice,noShares,totalCost) VALUES (:id,:symbol,:sharePrice,:noShares,:totalCost)",
id=session["user_id"], symbol=sharedetails["symbol"], sharePrice=sharedetails["price"],
noShares=-1 * int(request.form.get("shares")),
totalCost=float(request.form.get("shares")) * sharedetails["price"])
db.execute("UPDATE users SET cash = cash + :totalCost where id=:sessionid",
totalCost=float(request.form.get("shares")) * sharedetails["price"],
sessionid=session["user_id"])
return redirect(url_for("index"))
else:
return render_template("apology.html",
message="You have " + str(noshares[0]["SUM(noShares)"]) + " share(s) in " +
sharedetails["name"] + " and are trying to sell " + request.form.get(
"shares") + " shares. Please try again.")
else:
return render_template("sell.html")
@app.route("/addcash", methods=["GET", "POST"])
@login_required
def addcash():
"""Let user add funds to their account"""
if request.method == "POST":
if request.form.get("money") == "" or request.form.get("money").isnumeric() is False or int(
request.form.get("money")) < 0:
return render_template("apology.html", message="Please enter some money!")
else:
db.execute("UPDATE users SET cash = cash + :money where id=:sessionid",
money=float(request.form.get("money")), sessionid=session["user_id"])
return redirect(url_for("index"))
else:
return render_template("addcash.html")
if __name__ == "__main__":
app.debug = True
app.run()
|
[
"georgelroberts@hotmail.com"
] |
georgelroberts@hotmail.com
|
23bab1390bf7112b2093362ebef06bbf34918d9c
|
ff7a8ed8afea953cb20e64005290f636d43a37b8
|
/gui_view/wxpy_gui.py
|
eff5fedb5638d084cdf651b25f00ddcbaba05642
|
[] |
no_license
|
qxf323/ApkInstallTool
|
32493bf41e300f532ef93d61935fc2c98d06e4e1
|
0a6522b4a9d0efb59c9561048526eeddb5a4b190
|
refs/heads/master
| 2021-01-01T17:30:26.633294
| 2016-11-11T17:23:06
| 2016-11-11T17:23:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,084
|
py
|
# -*- coding=utf8 -*-
from gui_controller.packageController import PackageController
from gui_controller.apkController import ApkController
from gui_controller.deviceInfo import DeviceInfo
import easygui
import re
import time
import wx
'''
在导入模块的时候,一定要注意在文件夹同级或子集目录下都要有__init__.py文件
业务逻辑
1.首先是确认使用哪一个设备
2.在该设备上安装哪一个apk
3.确认完后,进行安装
'''
class ApkInstallGui(wx.Frame):
def __init__(self):
wx.Frame.__init__(self,None,-1,'Python Application for Android',pos=(400,100),size=(600,550))
panel = wx.Panel(self,-1)
#第一步:确认使用哪一个设备
dinfoObj = DeviceInfo()
self.deviceInfo = dinfoObj.catch_devices_info()
self.infolist = ["All"]
for i in self.deviceInfo:
a = self.deviceInfo[i]["phone_brand"]
b = self.deviceInfo[i]["phone_model"]
c = self.deviceInfo[i]["os_version"]
d = self.deviceInfo[i]["dpi"]
e = self.deviceInfo[i]["image_resolution"]
f = self.deviceInfo[i]["ip"]
t = a+" :: "+b+" :: "+c+" :: "+d+" :: "+e+" :: "+f
self.infolist.append(t)
self.choise_device_res_obj = wx.CheckListBox(panel,-1,(30,20),(500,100),self.infolist)
#第二部:在该设备上安装哪一个apk
self.apkObj = ApkController()
apklist = self.apkObj.apk_list()
self.choise_apk_res_obj = wx.RadioBox(panel,-1,"Apk list",(30,140),(500,200),apklist,1,wx.RA_SPECIFY_COLS)
self.button = wx.Button(panel,-1,'立即安装'.decode("utf8"),pos=(440,400))
self.Bind(wx.EVT_BUTTON,self.on_btn_click,self.button)
def on_btn_click(self,event):
choise_apk_res = self.choise_apk_res_obj.GetStringSelection()
apkPath = self.apkObj.apk_abs_path(choise_apk_res)
apkPackageName = self.apkObj.get_apk_package_name(apkPath)
choise_device_res = self.choise_device_res_obj.GetCheckedStrings()
#第三部:执行安装工作
pctrObj = PackageController()
if choise_device_res is None or len(choise_device_res) == 0:
print "all devices will be installed apk"
pctrObj.install_all_devices(apkPath,apkPackageName) #向所有链接的设备安装
elif choise_device_res[0] == 'All' :
print "all devices will be installed apk"
pctrObj.install_all_devices(apkPath,apkPackageName) #向所有链接的设备安装
else:
print "Will install apk on your choise device"
for i in self.deviceInfo:
for y in range(len(choise_device_res)):
if re.search(self.deviceInfo[i]["phone_model"],choise_device_res[y]):
pctrObj.install_one_device(i,apkPath,apkPackageName)
wx.MessageBox('Install work is Successed','Install Result Info',wx.OK|wx.ICON_INFORMATION)
if __name__ == '__main__':
app = wx.App()
ApkInstallGui().Show()
app.MainLoop()
|
[
"jayzhen_testing@163.com"
] |
jayzhen_testing@163.com
|
262372a9a6ec7b443fa7955eca2f632cefb1864f
|
4fa5e1c64d38054d4de8136dade072fd1eb097c4
|
/BaiduSpider/baidu/baidu/pipelines.py
|
8c52bffa880d705e43413eecc2aaae59eb348c1a
|
[] |
no_license
|
atanx/web_crawler
|
1d88950ec496fce169f33813db2cecd53053bce3
|
2ad4f1988e50b6a3bc494a8445edbb4da767d32a
|
refs/heads/master
| 2021-01-11T17:29:34.322770
| 2017-03-25T10:30:57
| 2017-03-25T10:30:57
| 79,782,096
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,123
|
py
|
# -*- coding: utf-8 -*-
# Define your item pipelines here
#
# Don't forget to add your pipeline to the ITEM_PIPELINES setting
# See: http://doc.scrapy.org/en/latest/topics/item-pipeline.html
import sys
reload(sys)
sys.setdefaultencoding('utf-8')
class BaiduPipeline(object):
def __init__(self):
super(BaiduPipeline, self).__init__()
print u'关键词,标题,简介,访问链接,网站,推广'.encode('gbk')
def process_item(self, item, spider):
keys = ('keyword',
'title',
'abstract',
'url',
'show_url',
'is_ad')
str_item = []
for key in keys:
field = item.get(key, '').strip()
if len(field) > 50 and key == 'title':
field = field[0:50]
field = field.replace(',', ';')
if isinstance(field, str):
pass
# field = field.decode('gbk')
str_item.append(field)
str_item = u','.join(str_item)
print str_item.decode('utf-8').encode('gbk', errors='ignore')
return item
|
[
"07jiangbin@163.com"
] |
07jiangbin@163.com
|
aed1361eac2345b873f2d1f1c5129f646abfa7c5
|
41188a72facc51c65d0d58efe127f5e8c8811f5e
|
/剑指offer系列/剑指 Offer 04. 二维数组中的查找 LCOF/Solution.py
|
339652115d85307f4cb36562a82571bf5f874407
|
[
"MIT"
] |
permissive
|
furutuki/LeetCodeSolution
|
74ccebc8335125bbc4cbf1a76eb8d4281802f5b9
|
089d27af04bf81149251787409d1866c7c4390fb
|
refs/heads/master
| 2022-10-31T08:46:15.124759
| 2022-10-25T02:57:54
| 2022-10-25T02:57:54
| 168,449,346
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 614
|
py
|
from typing import List
class Solution:
def findNumberIn2DArray(self, matrix: List[List[int]], target: int) -> bool:
if not matrix:
return False
row = len(matrix)
col = len(matrix[0])
if row == 0 or col == 0:
return False
r = 0
c = col - 1
while 0 <= r < row and 0 <= c < col:
if target > matrix[r][c]:
r += 1
elif target < matrix[r][c]:
c -= 1
else:
return True
return False
s = Solution()
print(s.findNumberIn2DArray([[-5]], -10))
|
[
"furutuki@foxmail.com"
] |
furutuki@foxmail.com
|
4a12aebfb18d823b39ae578ee0883c6e273bb18e
|
9ce8761f56058af0be265e6bb37ae24f4472fda3
|
/slack.py
|
30a6bb35fab742ad8482f63e5170cd5965629168
|
[] |
no_license
|
Kunstmaan/docker-gitlab-dind
|
23e76fff951bad323021f0faa0062d1510cf63f0
|
f5effd6085a9bb48e288dd17fe09f1ce8331d0c9
|
refs/heads/master
| 2021-01-18T22:20:23.666443
| 2016-06-08T13:46:54
| 2016-06-08T13:46:54
| 53,861,214
| 2
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 171
|
py
|
import sys
from slacker import Slacker
token = sys.argv[1]
channel = sys.argv[2]
message = sys.argv[3]
slack = Slacker(token)
slack.chat.post_message(channel, message)
|
[
"roderik.van.der.veer@kunstmaan.be"
] |
roderik.van.der.veer@kunstmaan.be
|
6264dd11ae7e0eb16614fe361ceb505785416fdc
|
0266986446077c493033f23a1a26a9253763fd84
|
/Interview Based/Equilibrium Point.py
|
5a857b417992a31f84d2a3a308bf61818357835e
|
[
"MIT"
] |
permissive
|
devangi2000/HacktoberFest2020-1
|
775139900265aa8516ce0eca36536b1e2e0167ff
|
b0545988b28443419053fafde5dede6d6c5aca83
|
refs/heads/master
| 2022-12-29T19:22:23.239317
| 2020-10-19T10:53:08
| 2020-10-19T10:53:08
| 305,351,252
| 0
| 0
|
MIT
| 2020-10-19T10:47:09
| 2020-10-19T10:47:08
| null |
UTF-8
|
Python
| false
| false
| 1,416
|
py
|
""" Given an array A of N positive numbers. The task is to find the first Equilibium Point in the array.
Equilibrium Point in an array is a position
such that the sum of elements before it is equal to the sum of elements after it.
Example 1:
Input:
N = 1
A[] = {1}
Output: 1
Explanation: Since its the only
element hence its the only equilibrium
point.
Input:
N = 5
A[] = {1,3,5,2,2}
Output: 3
Explanation: For second test case
equilibrium point is at position 3
as elements before it (1+3) =
elements after it (2+2).
"""
def Sum_before(A,mid):
sum1=0
for j in range(0,mid+1):
sum1+=A[j]
return sum1
def Sum_After(A,mid,l):
sum2=0
for k in range(mid,l+1):
sum2+=A[k]
return sum2
def equilibriumPoint(A, N):
sum = 0
if (N == 1):
return(1)
if N==2:
return(-1)
else:
if N%2==0:
mid = N//2
if(Sum_before(A, mid-1) == Sum_After(A, mid, N-1)):
return mid
else:
return False
else:
mid = N//2
#print(mid)
if(Sum_before(A, mid-1) == Sum_After(A, mid+1, N-1)):
return mid+1
else:
return False
if __name__ == "__main__":
N = int(input())
A = [int(item) for item in input().split()]
print(equilibriumPoint(A,N))
|
[
"choudhuryabhishek76@gmail.com"
] |
choudhuryabhishek76@gmail.com
|
8c7fdeb7013a420c27f7521c42724715d80919a2
|
6f802e09cae7ed8f89a9c937fa75af36921c597c
|
/cmd/cmdset.py
|
e3443244d08bf3dd865b178633f7c650b6988714
|
[] |
no_license
|
JodeZer/gegimp
|
653f2e609754135ef2d84b44e0b22f72dc26703b
|
46a19fc57f1b9eb854731dd0d0051a9e63d07fd8
|
refs/heads/master
| 2020-08-30T18:17:59.282994
| 2019-11-13T06:37:56
| 2019-11-13T06:37:56
| 218,455,384
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,555
|
py
|
import tempfile
import command
class CmdSet():
def __init__(self, infile, outfile):
self.cmds = []
self.infile = infile
self.outfile = outfile
self.suffix = infile[-4:]
def append(self, cmd):
self.cmds.append(cmd)
def __compact(self):
newcmd = []
aggcmd = []
for cmd in self.cmds:
if cmd.cmd_type == command.Cmd_Type.GEGL:
aggcmd.append(cmd)
if cmd.cmd_type == command.Cmd_Type.GIMP:
if len(aggcmd) != 0:
if len(aggcmd) == 1:
newcmd.append(aggcmd[0])
else:
newcmd.append(command.Cmd_gegl_aggregation(aggcmd))
aggcmd = []
newcmd.append(cmd)
if len(aggcmd) != 0:
newcmd.append(command.Cmd_gegl_aggregation(aggcmd))
self.cmds = newcmd
def exec(self):
if len(self.cmds) == 0:
return 0
self.__compact()
if len(self.cmds) == 1:
cmd = self.cmds[0]
cmd.setIOFile(self.infile, self.outfile)
return cmd.exec()
tmpf = tempfile.NamedTemporaryFile(delete=True, suffix=self.suffix)
self.cmds[0].setIOFile(self.infile, tmpf.name)
assert self.cmds[0].exec() == 0
for cmd in self.cmds[1:-1]:
assert cmd.setIOFile(tmpf.name, tmpf.name) == 0
self.cmds[-1].setIOFile(tmpf.name, self.outfile)
assert self.cmds[-1].exec() == 0
tmpf.close()
|
[
"jeffery1993@hotmail.com"
] |
jeffery1993@hotmail.com
|
ec213db897765b6d7b6ee100ebaab12c3b9d4276
|
9cc9994143d6de18b8b9cb7279a21129e6697682
|
/crunchviz/wsgi.py
|
a10ce90b4fc6d2743bd26e5889537792b2fa6614
|
[] |
no_license
|
lebmatter/crunchviz
|
5d9475af772d4a17ef76e1db1c17ae6e821b8566
|
eb79ab9410c2586f58e6f112c7a3dde0348efab2
|
refs/heads/master
| 2021-06-07T03:03:55.521358
| 2016-10-16T06:25:49
| 2016-10-16T06:25:49
| 70,991,339
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 396
|
py
|
"""
WSGI config for crunchviz project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/1.10/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "crunchviz.settings")
application = get_wsgi_application()
|
[
"mmlabeeb@gmail.com"
] |
mmlabeeb@gmail.com
|
4f3f6bce1e2fcd480f861cbe4e4bd5dc4f5cf643
|
0c24f5f7e20c78cc5bccbbdadb4e45f3c633e798
|
/divergence_all_lines_batch_folder.py
|
b85d3e0b3c3e11257151d61c8c0e178792dca42f
|
[
"MIT"
] |
permissive
|
Similarities/HHG_divergence
|
7597f3429c3fab64e719de4ec993fbf4118d46cc
|
cd1fa0e1655190f35dda19adacc2447f2d58fca0
|
refs/heads/master
| 2021-06-30T17:34:45.658750
| 2020-10-01T15:56:34
| 2020-10-01T15:56:34
| 175,637,403
| 0
| 0
|
MIT
| 2020-10-01T15:56:36
| 2019-03-14T14:23:37
|
Python
|
UTF-8
|
Python
| false
| false
| 8,429
|
py
|
# -*- coding: utf-8 -*-
"""
Created on Tue Feb 19 16:54:54 2019
@author: similarities
"""
import matplotlib.pyplot as plt
import numpy as np
import os
class FwhmImageProcessing:
def __init__(self, filename, lambda_fundamental, maximum_harmonic, harmonic_number):
self.filename = filename
self.filedescription = self.filename[31:42] + '_' + self.filename[-6:-4]
self.y_min = 0
self.y_max = 2048
self.x_min = 150
self.x_max = 1200
self.picture = np.empty([])
self.harmonic_selected = harmonic_number
self.x_backsubstracted = np.empty([2048, 2048])
self.lambda_fundamental = lambda_fundamental
self.line_out = np.zeros([self.y_max, 1])
self.line_out_x = np.arange(self.x_min, self.x_max)
self.calibration_to_msr = 17.5 / 2048
self.full_divergence = 17.5
self.normalization_factor_mrad = np.zeros([20, 1])
self.border_up, self.border_down = self.energy_range()
self.maximum_harmonic = maximum_harmonic
self.result_array = np.zeros([self.maximum_harmonic, 5])
def open_file(self):
self.picture = plt.imread(self.filename)
return self.picture
def background_y(self):
back_mean = np.mean(self.picture[:, 1780:1948], axis=1)
for x in range(0, self.y_max):
self.x_backsubstracted[::, x] = self.picture[::, x] - back_mean[x]
self.background_x()
plt.figure(1)
# plt.ylim(100, 1000)
plt.imshow(self.x_backsubstracted)
plt.vlines(self.x_min, 0, 2048)
plt.vlines(self.x_max, 0, 2048)
return self.x_backsubstracted
def background_x(self):
back_mean = np.mean(self.picture[1780:1948, :], axis=0)
for x in range(0, 2048):
self.x_backsubstracted[x, ::] = self.picture[x, ::] - back_mean[x]
return self.x_backsubstracted
def energy_range(self):
print(self.harmonic_selected, ':')
previous_harmonic = self.lambda_fundamental / (self.harmonic_selected - 0.3)
next_harmonic = self.lambda_fundamental / (self.harmonic_selected + 0.3)
self.border_up = np.int(self.nm_in_px(previous_harmonic))
self.border_down = np.int(self.nm_in_px(next_harmonic))
print(self.border_up, self.border_down, "ROI in px")
self.pixel_range = np.int(self.border_down - self.border_up)
print(self.pixel_range, 'ROI in pixel range')
self.plot_roi_on_image()
return self.border_up, self.border_down
def nm_in_px(self, wavelength_in):
return int(7.79104482e-01 * wavelength_in ** 2 - 1.24499534e+02 * wavelength_in + 3.38549944e+03)
def plot_roi_on_image(self):
plt.figure(1)
plt.hlines(self.border_up, xmin=0, xmax=2048, color="m", linewidth=0.5)
plt.hlines(self.border_down, xmin=0, xmax=2048, color="w", linewidth=1.)
def sum_over_pixel_range_y(self):
self.line_out = self.x_backsubstracted[self.border_up: self.border_down, ::]
self.line_out = np.sum(self.line_out, axis=0)
self.line_out = self.line_out[self.x_min:self.x_max]
return self.line_out
def correction_background(self, value):
self.line_out[::] = self.line_out[::] - value
return self.line_out
def integrated_signal_in_lineout(self):
integrated = np.sum(self.line_out[::])
return integrated
def plot_x_y(self, x, y, name, plot_number, axis_x_name, axis_y_name):
plt.figure(plot_number)
plt.plot(x, y, label=name)
plt.xlabel(str(axis_x_name))
plt.ylabel(str(axis_y_name))
plt.legend()
def calibrate_px_to_msr(self, array_x):
array_x[::] = array_x[::] * self.calibration_to_msr
return array_x
def prepare_for_stepfunction(self):
self.sum_over_pixel_range_y()
maximum = np.amax(self.line_out[::])
minimum = np.amin(self.line_out[::])
if minimum < 0:
self.correction_background(minimum)
maximum = np.amax(self.line_out[::])
minimum = np.amin(self.line_out[::])
half_max = (maximum - minimum) / 2
# self.plot_x_y(self.line_out_x, self.line_out, 'linout_corrected', 2, 'px', 'counts')
self.plot_x_y(self.line_out_x, self.line_out, str(self.harmonic_selected), 2, 'px', 'counts')
return half_max
def step_function_for_fwhm(self):
half_max = self.prepare_for_stepfunction()
# width of step function is FWHM
d = np.sign(half_max - self.line_out[::]) - 1
self.line_out_x = self.calibrate_px_to_msr(self.line_out_x)
self.plot_x_y(self.line_out_x, d, 'stepfunction', 3, 'mrad', 'value')
self.line_out_x = np.arange(self.x_min, self.x_max)
result_FWHM = 1.5 * self.calibration_to_msr * (np.amax(np.nonzero(d)) - np.amin(np.nonzero(d)))
return result_FWHM
def px_in_nm(self, px_number):
return 1.27877896e-06 * px_number ** 2 - 1.37081526e-02 * px_number + 3.46785380e+01
def delta_energy(self):
delta = self.px_in_nm(self.border_up) - self.px_in_nm(self.border_down)
energy_nm = (self.lambda_fundamental / self.harmonic_selected)
delta_vs_energy = delta / energy_nm
return energy_nm, delta_vs_energy
def batch_over_N(self):
for x in range(self.harmonic_selected, self.maximum_harmonic):
self.result_array[x, 0] = x
self.harmonic_selected = x
self.energy_range()
self.result_array[x, 1] = self.step_function_for_fwhm()
self.result_array[x, 2] = np.sum(self.line_out[::])
self.result_array[x, 4], self.result_array[x, 3] = self.delta_energy()
# clean for empty entries
self.result_array = np.delete(self.result_array, np.where(~self.result_array.any(axis=1))[0],
axis=0)
self.plot_scatter(self.result_array[::, 0], self.result_array[::, 1], self.filedescription,
'harmonic number N', 'divergence in mrad', 5)
self.save_data()
return self.result_array
def plot_scatter(self, x, y, name, axis_name_x, axis_name_y, plot_number):
plt.figure(plot_number)
plt.scatter(x, y, label=name)
plt.xlabel(axis_name_x)
plt.ylabel(axis_name_y)
#plt.legend()
def prepare_header(self):
self.integrated_signal_in_lineout()
self.delta_energy()
# insert header line and change index
header_names = (['harmonic_number', 'mrad', 'integrated_counts_in_delta_E', 'harmonic_in_nm', 'delta_E/E'])
parameter_info = (
['fundamental_nm:', str(self.lambda_fundamental), 'pixel_range:', str(self.border_down-self.border_up), 'xxxx'])
return np.vstack((header_names, self.result_array, parameter_info))
def save_data(self):
result = self.prepare_header()
plt.figure(1)
plt.savefig(self.filedescription + "_raw_roi_" + ".png", bbox_inches="tight", dpi=1000)
plt.figure(2)
plt.savefig(self.filedescription + "_integrated_lineout" + ".png", bbox_inches="tight", dpi=1000)
plt.figure(5)
plt.savefig(self.filedescription + "_div_mrad_FWHM" + ".png", bbox_inches="tight", dpi=1000)
print('saved data')
np.savetxt(self.filedescription + ".txt", result, delimiter=' ',
header='string', comments='',
fmt='%s')
def get_file_list(path_picture):
tif_files = []
counter = 0
for file in os.listdir(path_picture):
print(file)
try:
if file.endswith(".tif"):
tif_files.append(str(file))
counter = counter + 1
else:
print("only other files found")
except Exception as e:
raise e
print("no files found here")
return tif_files
def process_files(my_files, path):
for x in range(7, len(my_files)):
file = path +'/'+ my_files[x]
Processing_Picture = FwhmImageProcessing(file, 802, 36, 24)
Processing_Picture.open_file()
Processing_Picture.background_y()
Processing_Picture.batch_over_N()
Processing_Picture.save_data()
plt.close(1)
plt.close(2)
plt.close(5)
my_files = get_file_list('rotated_20190129')
process_files(my_files, 'rotated_20190129')
|
[
"j.braenzel@gmx.net"
] |
j.braenzel@gmx.net
|
e8ee1645f29484e6fd052512aa6e95da7d47cb99
|
577881a3eb187fbadcf0e0518511a91620227ee3
|
/lesson4/less4_task5.py
|
e08e3f9a6a72ea060f2afa94add12f0501559476
|
[] |
no_license
|
Xuhen17/Python_Basic
|
aac9dbc09e55a9f6ac4afc47d37d5f2c27b458f6
|
a491c66b6d57443983eb15c95f4d1bc9a68a8793
|
refs/heads/master
| 2023-03-08T02:55:55.667984
| 2021-02-26T10:39:32
| 2021-02-26T10:39:32
| 331,872,468
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 655
|
py
|
# 5. Реализовать формирование списка, используя функцию range() и возможности генератора.
# В список должны войти четные числа от 100 до 1000 (включая границы).
# Необходимо получить результат вычисления произведения всех элементов списка.
# Подсказка: использовать функцию reduce().
from functools import reduce
def multiply(el, next_el):
return el * next_el
my_list = [i for i in range(100, 1001, 2)]
print(reduce(multiply, my_list))
|
[
"xuhen@yandex.ru"
] |
xuhen@yandex.ru
|
1dd46505106e1cd61a86faf06060ecdc00dc9955
|
37fef592f365194c28579f95abd222cc4e1243ae
|
/streamlit/venv/lib/python3.7/site-packages/matplotlib/axis.py
|
10a6c7f747ba316643bf074012fb33b1acb91362
|
[] |
no_license
|
edimaudo/Python-projects
|
be61e0d3fff63fb7bd00513dbf1401e2c1822cfb
|
85d54badf82a0b653587a02e99daf389df62e012
|
refs/heads/master
| 2023-04-07T03:26:23.259959
| 2023-03-24T12:03:03
| 2023-03-24T12:03:03
| 72,611,253
| 4
| 3
| null | 2022-10-31T18:10:41
| 2016-11-02T06:37:17
| null |
UTF-8
|
Python
| false
| false
| 91,403
|
py
|
"""
Classes for the ticks and x and y axis.
"""
import datetime
import functools
import logging
import numpy as np
import matplotlib as mpl
from matplotlib import _api
import matplotlib.artist as martist
import matplotlib.cbook as cbook
import matplotlib.lines as mlines
import matplotlib.scale as mscale
import matplotlib.text as mtext
import matplotlib.ticker as mticker
import matplotlib.transforms as mtransforms
import matplotlib.units as munits
_log = logging.getLogger(__name__)
GRIDLINE_INTERPOLATION_STEPS = 180
# This list is being used for compatibility with Axes.grid, which
# allows all Line2D kwargs.
_line_inspector = martist.ArtistInspector(mlines.Line2D)
_line_param_names = _line_inspector.get_setters()
_line_param_aliases = [list(d)[0] for d in _line_inspector.aliasd.values()]
_gridline_param_names = ['grid_' + name
for name in _line_param_names + _line_param_aliases]
class Tick(martist.Artist):
"""
Abstract base class for the axis ticks, grid lines and labels.
Ticks mark a position on an Axis. They contain two lines as markers and
two labels; one each for the bottom and top positions (in case of an
`.XAxis`) or for the left and right positions (in case of a `.YAxis`).
Attributes
----------
tick1line : `.Line2D`
The left/bottom tick marker.
tick2line : `.Line2D`
The right/top tick marker.
gridline : `.Line2D`
The grid line associated with the label position.
label1 : `.Text`
The left/bottom tick label.
label2 : `.Text`
The right/top tick label.
"""
def __init__(self, axes, loc, *,
size=None, # points
width=None,
color=None,
tickdir=None,
pad=None,
labelsize=None,
labelcolor=None,
zorder=None,
gridOn=None, # defaults to axes.grid depending on
# axes.grid.which
tick1On=True,
tick2On=True,
label1On=True,
label2On=False,
major=True,
labelrotation=0,
grid_color=None,
grid_linestyle=None,
grid_linewidth=None,
grid_alpha=None,
**kw # Other Line2D kwargs applied to gridlines.
):
"""
bbox is the Bound2D bounding box in display coords of the Axes
loc is the tick location in data coords
size is the tick size in points
"""
super().__init__()
if gridOn is None:
if major and (mpl.rcParams['axes.grid.which']
in ('both', 'major')):
gridOn = mpl.rcParams['axes.grid']
elif (not major) and (mpl.rcParams['axes.grid.which']
in ('both', 'minor')):
gridOn = mpl.rcParams['axes.grid']
else:
gridOn = False
self.set_figure(axes.figure)
self.axes = axes
self._loc = loc
self._major = major
name = self.__name__
major_minor = "major" if major else "minor"
if size is None:
size = mpl.rcParams[f"{name}.{major_minor}.size"]
self._size = size
if width is None:
width = mpl.rcParams[f"{name}.{major_minor}.width"]
self._width = width
if color is None:
color = mpl.rcParams[f"{name}.color"]
if pad is None:
pad = mpl.rcParams[f"{name}.{major_minor}.pad"]
self._base_pad = pad
if labelcolor is None:
labelcolor = mpl.rcParams[f"{name}.labelcolor"]
if labelcolor == 'inherit':
# inherit from tick color
labelcolor = mpl.rcParams[f"{name}.color"]
if labelsize is None:
labelsize = mpl.rcParams[f"{name}.labelsize"]
self._set_labelrotation(labelrotation)
if zorder is None:
if major:
zorder = mlines.Line2D.zorder + 0.01
else:
zorder = mlines.Line2D.zorder
self._zorder = zorder
if grid_color is None:
grid_color = mpl.rcParams["grid.color"]
if grid_linestyle is None:
grid_linestyle = mpl.rcParams["grid.linestyle"]
if grid_linewidth is None:
grid_linewidth = mpl.rcParams["grid.linewidth"]
if grid_alpha is None:
grid_alpha = mpl.rcParams["grid.alpha"]
grid_kw = {k[5:]: v for k, v in kw.items()}
self.tick1line = mlines.Line2D(
[], [],
color=color, linestyle="none", zorder=zorder, visible=tick1On,
markeredgecolor=color, markersize=size, markeredgewidth=width,
)
self.tick2line = mlines.Line2D(
[], [],
color=color, linestyle="none", zorder=zorder, visible=tick2On,
markeredgecolor=color, markersize=size, markeredgewidth=width,
)
self.gridline = mlines.Line2D(
[], [],
color=grid_color, alpha=grid_alpha, visible=gridOn,
linestyle=grid_linestyle, linewidth=grid_linewidth, marker="",
**grid_kw,
)
self.gridline.get_path()._interpolation_steps = \
GRIDLINE_INTERPOLATION_STEPS
self.label1 = mtext.Text(
np.nan, np.nan,
fontsize=labelsize, color=labelcolor, visible=label1On,
rotation=self._labelrotation[1])
self.label2 = mtext.Text(
np.nan, np.nan,
fontsize=labelsize, color=labelcolor, visible=label2On,
rotation=self._labelrotation[1])
self._apply_tickdir(tickdir)
for artist in [self.tick1line, self.tick2line, self.gridline,
self.label1, self.label2]:
self._set_artist_props(artist)
self.update_position(loc)
@property
@_api.deprecated("3.1", alternative="Tick.label1", pending=True)
def label(self):
return self.label1
def _set_labelrotation(self, labelrotation):
if isinstance(labelrotation, str):
mode = labelrotation
angle = 0
elif isinstance(labelrotation, (tuple, list)):
mode, angle = labelrotation
else:
mode = 'default'
angle = labelrotation
_api.check_in_list(['auto', 'default'], labelrotation=mode)
self._labelrotation = (mode, angle)
def _apply_tickdir(self, tickdir):
"""Set tick direction. Valid values are 'out', 'in', 'inout'."""
# This method is responsible for updating `_pad`, and, in subclasses,
# for setting the tick{1,2}line markers as well. From the user
# perspective this should always be called though _apply_params, which
# further updates ticklabel positions using the new pads.
if tickdir is None:
tickdir = mpl.rcParams[f'{self.__name__}.direction']
_api.check_in_list(['in', 'out', 'inout'], tickdir=tickdir)
self._tickdir = tickdir
self._pad = self._base_pad + self.get_tick_padding()
@_api.deprecated("3.5", alternative="`.Axis.set_tick_params`")
def apply_tickdir(self, tickdir):
self._apply_tickdir(tickdir)
self.stale = True
def get_tickdir(self):
return self._tickdir
def get_tick_padding(self):
"""Get the length of the tick outside of the axes."""
padding = {
'in': 0.0,
'inout': 0.5,
'out': 1.0
}
return self._size * padding[self._tickdir]
def get_children(self):
children = [self.tick1line, self.tick2line,
self.gridline, self.label1, self.label2]
return children
def set_clip_path(self, clippath, transform=None):
# docstring inherited
super().set_clip_path(clippath, transform)
self.gridline.set_clip_path(clippath, transform)
self.stale = True
def get_pad_pixels(self):
return self.figure.dpi * self._base_pad / 72
def contains(self, mouseevent):
"""
Test whether the mouse event occurred in the Tick marks.
This function always returns false. It is more useful to test if the
axis as a whole contains the mouse rather than the set of tick marks.
"""
inside, info = self._default_contains(mouseevent)
if inside is not None:
return inside, info
return False, {}
def set_pad(self, val):
"""
Set the tick label pad in points
Parameters
----------
val : float
"""
self._apply_params(pad=val)
self.stale = True
def get_pad(self):
"""Get the value of the tick label pad in points."""
return self._base_pad
def _get_text1(self):
"""Get the default Text 1 instance."""
def _get_text2(self):
"""Get the default Text 2 instance."""
def _get_tick1line(self):
"""Get the default line2D instance for tick1."""
def _get_tick2line(self):
"""Get the default line2D instance for tick2."""
def _get_gridline(self):
"""Get the default grid Line2d instance for this tick."""
def get_loc(self):
"""Return the tick location (data coords) as a scalar."""
return self._loc
@martist.allow_rasterization
def draw(self, renderer):
if not self.get_visible():
self.stale = False
return
renderer.open_group(self.__name__, gid=self.get_gid())
for artist in [self.gridline, self.tick1line, self.tick2line,
self.label1, self.label2]:
artist.draw(renderer)
renderer.close_group(self.__name__)
self.stale = False
def set_label1(self, s):
"""
Set the label1 text.
Parameters
----------
s : str
"""
self.label1.set_text(s)
self.stale = True
set_label = set_label1
def set_label2(self, s):
"""
Set the label2 text.
Parameters
----------
s : str
"""
self.label2.set_text(s)
self.stale = True
def set_url(self, url):
"""
Set the url of label1 and label2.
Parameters
----------
url : str
"""
super().set_url(url)
self.label1.set_url(url)
self.label2.set_url(url)
self.stale = True
def _set_artist_props(self, a):
a.set_figure(self.figure)
def get_view_interval(self):
"""
Return the view limits ``(min, max)`` of the axis the tick belongs to.
"""
raise NotImplementedError('Derived must override')
def _apply_params(self, **kw):
for name, target in [("gridOn", self.gridline),
("tick1On", self.tick1line),
("tick2On", self.tick2line),
("label1On", self.label1),
("label2On", self.label2)]:
if name in kw:
target.set_visible(kw.pop(name))
if any(k in kw for k in ['size', 'width', 'pad', 'tickdir']):
self._size = kw.pop('size', self._size)
# Width could be handled outside this block, but it is
# convenient to leave it here.
self._width = kw.pop('width', self._width)
self._base_pad = kw.pop('pad', self._base_pad)
# _apply_tickdir uses _size and _base_pad to make _pad, and also
# sets the ticklines markers.
self._apply_tickdir(kw.pop('tickdir', self._tickdir))
for line in (self.tick1line, self.tick2line):
line.set_markersize(self._size)
line.set_markeredgewidth(self._width)
# _get_text1_transform uses _pad from _apply_tickdir.
trans = self._get_text1_transform()[0]
self.label1.set_transform(trans)
trans = self._get_text2_transform()[0]
self.label2.set_transform(trans)
tick_kw = {k: v for k, v in kw.items() if k in ['color', 'zorder']}
if 'color' in kw:
tick_kw['markeredgecolor'] = kw['color']
self.tick1line.set(**tick_kw)
self.tick2line.set(**tick_kw)
for k, v in tick_kw.items():
setattr(self, '_' + k, v)
if 'labelrotation' in kw:
self._set_labelrotation(kw.pop('labelrotation'))
self.label1.set(rotation=self._labelrotation[1])
self.label2.set(rotation=self._labelrotation[1])
label_kw = {k[5:]: v for k, v in kw.items()
if k in ['labelsize', 'labelcolor']}
self.label1.set(**label_kw)
self.label2.set(**label_kw)
grid_kw = {k[5:]: v for k, v in kw.items()
if k in _gridline_param_names}
self.gridline.set(**grid_kw)
def update_position(self, loc):
"""Set the location of tick in data coords with scalar *loc*."""
raise NotImplementedError('Derived must override')
def _get_text1_transform(self):
raise NotImplementedError('Derived must override')
def _get_text2_transform(self):
raise NotImplementedError('Derived must override')
class XTick(Tick):
"""
Contains all the Artists needed to make an x tick - the tick line,
the label text and the grid line
"""
__name__ = 'xtick'
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
# x in data coords, y in axes coords
ax = self.axes
self.tick1line.set(
data=([0], [0]), transform=ax.get_xaxis_transform("tick1"))
self.tick2line.set(
data=([0], [1]), transform=ax.get_xaxis_transform("tick2"))
self.gridline.set(
data=([0, 0], [0, 1]), transform=ax.get_xaxis_transform("grid"))
# the y loc is 3 points below the min of y axis
trans, va, ha = self._get_text1_transform()
self.label1.set(
x=0, y=0,
verticalalignment=va, horizontalalignment=ha, transform=trans,
)
trans, va, ha = self._get_text2_transform()
self.label2.set(
x=0, y=1,
verticalalignment=va, horizontalalignment=ha, transform=trans,
)
def _get_text1_transform(self):
return self.axes.get_xaxis_text1_transform(self._pad)
def _get_text2_transform(self):
return self.axes.get_xaxis_text2_transform(self._pad)
def _apply_tickdir(self, tickdir):
# docstring inherited
super()._apply_tickdir(tickdir)
mark1, mark2 = {
'out': (mlines.TICKDOWN, mlines.TICKUP),
'in': (mlines.TICKUP, mlines.TICKDOWN),
'inout': ('|', '|'),
}[self._tickdir]
self.tick1line.set_marker(mark1)
self.tick2line.set_marker(mark2)
def update_position(self, loc):
"""Set the location of tick in data coords with scalar *loc*."""
self.tick1line.set_xdata((loc,))
self.tick2line.set_xdata((loc,))
self.gridline.set_xdata((loc,))
self.label1.set_x(loc)
self.label2.set_x(loc)
self._loc = loc
self.stale = True
def get_view_interval(self):
# docstring inherited
return self.axes.viewLim.intervalx
class YTick(Tick):
"""
Contains all the Artists needed to make a Y tick - the tick line,
the label text and the grid line
"""
__name__ = 'ytick'
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
# x in axes coords, y in data coords
ax = self.axes
self.tick1line.set(
data=([0], [0]), transform=ax.get_yaxis_transform("tick1"))
self.tick2line.set(
data=([1], [0]), transform=ax.get_yaxis_transform("tick2"))
self.gridline.set(
data=([0, 1], [0, 0]), transform=ax.get_yaxis_transform("grid"))
# the y loc is 3 points below the min of y axis
trans, va, ha = self._get_text1_transform()
self.label1.set(
x=0, y=0,
verticalalignment=va, horizontalalignment=ha, transform=trans,
)
trans, va, ha = self._get_text2_transform()
self.label2.set(
x=1, y=0,
verticalalignment=va, horizontalalignment=ha, transform=trans,
)
def _get_text1_transform(self):
return self.axes.get_yaxis_text1_transform(self._pad)
def _get_text2_transform(self):
return self.axes.get_yaxis_text2_transform(self._pad)
def _apply_tickdir(self, tickdir):
# docstring inherited
super()._apply_tickdir(tickdir)
mark1, mark2 = {
'out': (mlines.TICKLEFT, mlines.TICKRIGHT),
'in': (mlines.TICKRIGHT, mlines.TICKLEFT),
'inout': ('_', '_'),
}[self._tickdir]
self.tick1line.set_marker(mark1)
self.tick2line.set_marker(mark2)
def update_position(self, loc):
"""Set the location of tick in data coords with scalar *loc*."""
self.tick1line.set_ydata((loc,))
self.tick2line.set_ydata((loc,))
self.gridline.set_ydata((loc,))
self.label1.set_y(loc)
self.label2.set_y(loc)
self._loc = loc
self.stale = True
def get_view_interval(self):
# docstring inherited
return self.axes.viewLim.intervaly
class Ticker:
"""
A container for the objects defining tick position and format.
Attributes
----------
locator : `matplotlib.ticker.Locator` subclass
Determines the positions of the ticks.
formatter : `matplotlib.ticker.Formatter` subclass
Determines the format of the tick labels.
"""
def __init__(self):
self._locator = None
self._formatter = None
self._locator_is_default = True
self._formatter_is_default = True
@property
def locator(self):
return self._locator
@locator.setter
def locator(self, locator):
if not isinstance(locator, mticker.Locator):
raise TypeError('locator must be a subclass of '
'matplotlib.ticker.Locator')
self._locator = locator
@property
def formatter(self):
return self._formatter
@formatter.setter
def formatter(self, formatter):
if not isinstance(formatter, mticker.Formatter):
raise TypeError('formatter must be a subclass of '
'matplotlib.ticker.Formatter')
self._formatter = formatter
class _LazyTickList:
"""
A descriptor for lazy instantiation of tick lists.
See comment above definition of the ``majorTicks`` and ``minorTicks``
attributes.
"""
def __init__(self, major):
self._major = major
def __get__(self, instance, cls):
if instance is None:
return self
else:
# instance._get_tick() can itself try to access the majorTicks
# attribute (e.g. in certain projection classes which override
# e.g. get_xaxis_text1_transform). In order to avoid infinite
# recursion, first set the majorTicks on the instance to an empty
# list, then create the tick and append it.
if self._major:
instance.majorTicks = []
tick = instance._get_tick(major=True)
instance.majorTicks.append(tick)
return instance.majorTicks
else:
instance.minorTicks = []
tick = instance._get_tick(major=False)
instance.minorTicks.append(tick)
return instance.minorTicks
class Axis(martist.Artist):
"""
Base class for `.XAxis` and `.YAxis`.
Attributes
----------
isDefault_label : bool
axes : `matplotlib.axes.Axes`
The `~.axes.Axes` to which the Axis belongs.
major : `matplotlib.axis.Ticker`
Determines the major tick positions and their label format.
minor : `matplotlib.axis.Ticker`
Determines the minor tick positions and their label format.
callbacks : `matplotlib.cbook.CallbackRegistry`
label : `.Text`
The axis label.
labelpad : float
The distance between the axis label and the tick labels.
Defaults to :rc:`axes.labelpad` = 4.
offsetText : `.Text`
A `.Text` object containing the data offset of the ticks (if any).
pickradius : float
The acceptance radius for containment tests. See also `.Axis.contains`.
majorTicks : list of `.Tick`
The major ticks.
minorTicks : list of `.Tick`
The minor ticks.
"""
OFFSETTEXTPAD = 3
def __str__(self):
return "{}({},{})".format(
type(self).__name__, *self.axes.transAxes.transform((0, 0)))
def __init__(self, axes, pickradius=15):
"""
Parameters
----------
axes : `matplotlib.axes.Axes`
The `~.axes.Axes` to which the created Axis belongs.
pickradius : float
The acceptance radius for containment tests. See also
`.Axis.contains`.
"""
super().__init__()
self._remove_overlapping_locs = True
self.set_figure(axes.figure)
self.isDefault_label = True
self.axes = axes
self.major = Ticker()
self.minor = Ticker()
self.callbacks = cbook.CallbackRegistry()
self._autolabelpos = True
self.label = mtext.Text(
np.nan, np.nan,
fontsize=mpl.rcParams['axes.labelsize'],
fontweight=mpl.rcParams['axes.labelweight'],
color=mpl.rcParams['axes.labelcolor'],
)
self._set_artist_props(self.label)
self.offsetText = mtext.Text(np.nan, np.nan)
self._set_artist_props(self.offsetText)
self.labelpad = mpl.rcParams['axes.labelpad']
self.pickradius = pickradius
# Initialize here for testing; later add API
self._major_tick_kw = dict()
self._minor_tick_kw = dict()
self.clear()
self._set_scale('linear')
@property
def isDefault_majloc(self):
return self.major._locator_is_default
@isDefault_majloc.setter
def isDefault_majloc(self, value):
self.major._locator_is_default = value
@property
def isDefault_majfmt(self):
return self.major._formatter_is_default
@isDefault_majfmt.setter
def isDefault_majfmt(self, value):
self.major._formatter_is_default = value
@property
def isDefault_minloc(self):
return self.minor._locator_is_default
@isDefault_minloc.setter
def isDefault_minloc(self, value):
self.minor._locator_is_default = value
@property
def isDefault_minfmt(self):
return self.minor._formatter_is_default
@isDefault_minfmt.setter
def isDefault_minfmt(self, value):
self.minor._formatter_is_default = value
# During initialization, Axis objects often create ticks that are later
# unused; this turns out to be a very slow step. Instead, use a custom
# descriptor to make the tick lists lazy and instantiate them as needed.
majorTicks = _LazyTickList(major=True)
minorTicks = _LazyTickList(major=False)
def get_remove_overlapping_locs(self):
return self._remove_overlapping_locs
def set_remove_overlapping_locs(self, val):
self._remove_overlapping_locs = bool(val)
remove_overlapping_locs = property(
get_remove_overlapping_locs, set_remove_overlapping_locs,
doc=('If minor ticker locations that overlap with major '
'ticker locations should be trimmed.'))
def set_label_coords(self, x, y, transform=None):
"""
Set the coordinates of the label.
By default, the x coordinate of the y label and the y coordinate of the
x label are determined by the tick label bounding boxes, but this can
lead to poor alignment of multiple labels if there are multiple axes.
You can also specify the coordinate system of the label with the
transform. If None, the default coordinate system will be the axes
coordinate system: (0, 0) is bottom left, (0.5, 0.5) is center, etc.
"""
self._autolabelpos = False
if transform is None:
transform = self.axes.transAxes
self.label.set_transform(transform)
self.label.set_position((x, y))
self.stale = True
def get_transform(self):
return self._scale.get_transform()
def get_scale(self):
"""Return this Axis' scale (as a str)."""
return self._scale.name
def _set_scale(self, value, **kwargs):
if not isinstance(value, mscale.ScaleBase):
self._scale = mscale.scale_factory(value, self, **kwargs)
else:
self._scale = value
self._scale.set_default_locators_and_formatters(self)
self.isDefault_majloc = True
self.isDefault_minloc = True
self.isDefault_majfmt = True
self.isDefault_minfmt = True
def limit_range_for_scale(self, vmin, vmax):
return self._scale.limit_range_for_scale(vmin, vmax, self.get_minpos())
def get_children(self):
return [self.label, self.offsetText,
*self.get_major_ticks(), *self.get_minor_ticks()]
def _reset_major_tick_kw(self):
self._major_tick_kw.clear()
self._major_tick_kw['gridOn'] = (
mpl.rcParams['axes.grid'] and
mpl.rcParams['axes.grid.which'] in ('both', 'major'))
def _reset_minor_tick_kw(self):
self._minor_tick_kw.clear()
self._minor_tick_kw['gridOn'] = (
mpl.rcParams['axes.grid'] and
mpl.rcParams['axes.grid.which'] in ('both', 'minor'))
def clear(self):
"""
Clear the axis.
This resets axis properties to their default values:
- the label
- the scale
- locators, formatters and ticks
- major and minor grid
- units
- registered callbacks
"""
self.label.set_text('') # self.set_label_text would change isDefault_
self._set_scale('linear')
# Clear the callback registry for this axis, or it may "leak"
self.callbacks = cbook.CallbackRegistry()
# whether the grids are on
self._major_tick_kw['gridOn'] = (
mpl.rcParams['axes.grid'] and
mpl.rcParams['axes.grid.which'] in ('both', 'major'))
self._minor_tick_kw['gridOn'] = (
mpl.rcParams['axes.grid'] and
mpl.rcParams['axes.grid.which'] in ('both', 'minor'))
self.reset_ticks()
self.converter = None
self.units = None
self.set_units(None)
self.stale = True
@_api.deprecated("3.4", alternative="`.Axis.clear`")
def cla(self):
"""Clear this axis."""
return self.clear()
def reset_ticks(self):
"""
Re-initialize the major and minor Tick lists.
Each list starts with a single fresh Tick.
"""
# Restore the lazy tick lists.
try:
del self.majorTicks
except AttributeError:
pass
try:
del self.minorTicks
except AttributeError:
pass
try:
self.set_clip_path(self.axes.patch)
except AttributeError:
pass
def set_tick_params(self, which='major', reset=False, **kw):
"""
Set appearance parameters for ticks, ticklabels, and gridlines.
For documentation of keyword arguments, see
:meth:`matplotlib.axes.Axes.tick_params`.
"""
_api.check_in_list(['major', 'minor', 'both'], which=which)
kwtrans = self._translate_tick_kw(kw)
# the kwargs are stored in self._major/minor_tick_kw so that any
# future new ticks will automatically get them
if reset:
if which in ['major', 'both']:
self._reset_major_tick_kw()
self._major_tick_kw.update(kwtrans)
if which in ['minor', 'both']:
self._reset_minor_tick_kw()
self._minor_tick_kw.update(kwtrans)
self.reset_ticks()
else:
if which in ['major', 'both']:
self._major_tick_kw.update(kwtrans)
for tick in self.majorTicks:
tick._apply_params(**kwtrans)
if which in ['minor', 'both']:
self._minor_tick_kw.update(kwtrans)
for tick in self.minorTicks:
tick._apply_params(**kwtrans)
# labelOn and labelcolor also apply to the offset text.
if 'label1On' in kwtrans or 'label2On' in kwtrans:
self.offsetText.set_visible(
self._major_tick_kw.get('label1On', False)
or self._major_tick_kw.get('label2On', False))
if 'labelcolor' in kwtrans:
self.offsetText.set_color(kwtrans['labelcolor'])
self.stale = True
@staticmethod
def _translate_tick_kw(kw):
# The following lists may be moved to a more accessible location.
kwkeys = ['size', 'width', 'color', 'tickdir', 'pad',
'labelsize', 'labelcolor', 'zorder', 'gridOn',
'tick1On', 'tick2On', 'label1On', 'label2On',
'length', 'direction', 'left', 'bottom', 'right', 'top',
'labelleft', 'labelbottom', 'labelright', 'labeltop',
'labelrotation'] + _gridline_param_names
kwtrans = {}
if 'length' in kw:
kwtrans['size'] = kw.pop('length')
if 'direction' in kw:
kwtrans['tickdir'] = kw.pop('direction')
if 'rotation' in kw:
kwtrans['labelrotation'] = kw.pop('rotation')
if 'left' in kw:
kwtrans['tick1On'] = kw.pop('left')
if 'bottom' in kw:
kwtrans['tick1On'] = kw.pop('bottom')
if 'right' in kw:
kwtrans['tick2On'] = kw.pop('right')
if 'top' in kw:
kwtrans['tick2On'] = kw.pop('top')
if 'labelleft' in kw:
kwtrans['label1On'] = kw.pop('labelleft')
if 'labelbottom' in kw:
kwtrans['label1On'] = kw.pop('labelbottom')
if 'labelright' in kw:
kwtrans['label2On'] = kw.pop('labelright')
if 'labeltop' in kw:
kwtrans['label2On'] = kw.pop('labeltop')
if 'colors' in kw:
c = kw.pop('colors')
kwtrans['color'] = c
kwtrans['labelcolor'] = c
# Maybe move the checking up to the caller of this method.
for key in kw:
if key not in kwkeys:
raise ValueError(
"keyword %s is not recognized; valid keywords are %s"
% (key, kwkeys))
kwtrans.update(kw)
return kwtrans
def set_clip_path(self, clippath, transform=None):
super().set_clip_path(clippath, transform)
for child in self.majorTicks + self.minorTicks:
child.set_clip_path(clippath, transform)
self.stale = True
def get_view_interval(self):
"""Return the ``(min, max)`` view limits of this axis."""
raise NotImplementedError('Derived must override')
def set_view_interval(self, vmin, vmax, ignore=False):
"""
Set the axis view limits. This method is for internal use; Matplotlib
users should typically use e.g. `~.Axes.set_xlim` or `~.Axes.set_ylim`.
If *ignore* is False (the default), this method will never reduce the
preexisting view limits, only expand them if *vmin* or *vmax* are not
within them. Moreover, the order of *vmin* and *vmax* does not matter;
the orientation of the axis will not change.
If *ignore* is True, the view limits will be set exactly to ``(vmin,
vmax)`` in that order.
"""
raise NotImplementedError('Derived must override')
def get_data_interval(self):
"""Return the ``(min, max)`` data limits of this axis."""
raise NotImplementedError('Derived must override')
def set_data_interval(self, vmin, vmax, ignore=False):
"""
Set the axis data limits. This method is for internal use.
If *ignore* is False (the default), this method will never reduce the
preexisting data limits, only expand them if *vmin* or *vmax* are not
within them. Moreover, the order of *vmin* and *vmax* does not matter;
the orientation of the axis will not change.
If *ignore* is True, the data limits will be set exactly to ``(vmin,
vmax)`` in that order.
"""
raise NotImplementedError('Derived must override')
def get_inverted(self):
"""
Return whether this Axis is oriented in the "inverse" direction.
The "normal" direction is increasing to the right for the x-axis and to
the top for the y-axis; the "inverse" direction is increasing to the
left for the x-axis and to the bottom for the y-axis.
"""
low, high = self.get_view_interval()
return high < low
def set_inverted(self, inverted):
"""
Set whether this Axis is oriented in the "inverse" direction.
The "normal" direction is increasing to the right for the x-axis and to
the top for the y-axis; the "inverse" direction is increasing to the
left for the x-axis and to the bottom for the y-axis.
"""
# Currently, must be implemented in subclasses using set_xlim/set_ylim
# rather than generically using set_view_interval, so that shared
# axes get updated as well.
raise NotImplementedError('Derived must override')
def set_default_intervals(self):
"""
Set the default limits for the axis data and view interval if they
have not been not mutated yet.
"""
# this is mainly in support of custom object plotting. For
# example, if someone passes in a datetime object, we do not
# know automagically how to set the default min/max of the
# data and view limits. The unit conversion AxisInfo
# interface provides a hook for custom types to register
# default limits through the AxisInfo.default_limits
# attribute, and the derived code below will check for that
# and use it if it's available (else just use 0..1)
def _set_artist_props(self, a):
if a is None:
return
a.set_figure(self.figure)
def get_ticklabel_extents(self, renderer):
"""
Get the extents of the tick labels on either side
of the axes.
"""
ticks_to_draw = self._update_ticks()
ticklabelBoxes, ticklabelBoxes2 = self._get_tick_bboxes(ticks_to_draw,
renderer)
if len(ticklabelBoxes):
bbox = mtransforms.Bbox.union(ticklabelBoxes)
else:
bbox = mtransforms.Bbox.from_extents(0, 0, 0, 0)
if len(ticklabelBoxes2):
bbox2 = mtransforms.Bbox.union(ticklabelBoxes2)
else:
bbox2 = mtransforms.Bbox.from_extents(0, 0, 0, 0)
return bbox, bbox2
def _update_ticks(self):
"""
Update ticks (position and labels) using the current data interval of
the axes. Return the list of ticks that will be drawn.
"""
major_locs = self.get_majorticklocs()
major_labels = self.major.formatter.format_ticks(major_locs)
major_ticks = self.get_major_ticks(len(major_locs))
self.major.formatter.set_locs(major_locs)
for tick, loc, label in zip(major_ticks, major_locs, major_labels):
tick.update_position(loc)
tick.set_label1(label)
tick.set_label2(label)
minor_locs = self.get_minorticklocs()
minor_labels = self.minor.formatter.format_ticks(minor_locs)
minor_ticks = self.get_minor_ticks(len(minor_locs))
self.minor.formatter.set_locs(minor_locs)
for tick, loc, label in zip(minor_ticks, minor_locs, minor_labels):
tick.update_position(loc)
tick.set_label1(label)
tick.set_label2(label)
ticks = [*major_ticks, *minor_ticks]
view_low, view_high = self.get_view_interval()
if view_low > view_high:
view_low, view_high = view_high, view_low
interval_t = self.get_transform().transform([view_low, view_high])
ticks_to_draw = []
for tick in ticks:
try:
loc_t = self.get_transform().transform(tick.get_loc())
except AssertionError:
# transforms.transform doesn't allow masked values but
# some scales might make them, so we need this try/except.
pass
else:
if mtransforms._interval_contains_close(interval_t, loc_t):
ticks_to_draw.append(tick)
return ticks_to_draw
def _get_tick_bboxes(self, ticks, renderer):
"""Return lists of bboxes for ticks' label1's and label2's."""
return ([tick.label1.get_window_extent(renderer)
for tick in ticks if tick.label1.get_visible()],
[tick.label2.get_window_extent(renderer)
for tick in ticks if tick.label2.get_visible()])
def get_tightbbox(self, renderer, *, for_layout_only=False):
"""
Return a bounding box that encloses the axis. It only accounts
tick labels, axis label, and offsetText.
If *for_layout_only* is True, then the width of the label (if this
is an x-axis) or the height of the label (if this is a y-axis) is
collapsed to near zero. This allows tight/constrained_layout to ignore
too-long labels when doing their layout.
"""
if not self.get_visible():
return
ticks_to_draw = self._update_ticks()
self._update_label_position(renderer)
# go back to just this axis's tick labels
ticklabelBoxes, ticklabelBoxes2 = self._get_tick_bboxes(
ticks_to_draw, renderer)
self._update_offset_text_position(ticklabelBoxes, ticklabelBoxes2)
self.offsetText.set_text(self.major.formatter.get_offset())
bboxes = [
*(a.get_window_extent(renderer)
for a in [self.offsetText]
if a.get_visible()),
*ticklabelBoxes,
*ticklabelBoxes2,
]
# take care of label
if self.label.get_visible():
bb = self.label.get_window_extent(renderer)
# for constrained/tight_layout, we want to ignore the label's
# width/height because the adjustments they make can't be improved.
# this code collapses the relevant direction
if for_layout_only:
if self.axis_name == "x" and bb.width > 0:
bb.x0 = (bb.x0 + bb.x1) / 2 - 0.5
bb.x1 = bb.x0 + 1.0
if self.axis_name == "y" and bb.height > 0:
bb.y0 = (bb.y0 + bb.y1) / 2 - 0.5
bb.y1 = bb.y0 + 1.0
bboxes.append(bb)
bboxes = [b for b in bboxes
if 0 < b.width < np.inf and 0 < b.height < np.inf]
if bboxes:
return mtransforms.Bbox.union(bboxes)
else:
return None
def get_tick_padding(self):
values = []
if len(self.majorTicks):
values.append(self.majorTicks[0].get_tick_padding())
if len(self.minorTicks):
values.append(self.minorTicks[0].get_tick_padding())
return max(values, default=0)
@martist.allow_rasterization
def draw(self, renderer, *args, **kwargs):
# docstring inherited
if not self.get_visible():
return
renderer.open_group(__name__, gid=self.get_gid())
ticks_to_draw = self._update_ticks()
ticklabelBoxes, ticklabelBoxes2 = self._get_tick_bboxes(ticks_to_draw,
renderer)
for tick in ticks_to_draw:
tick.draw(renderer)
# scale up the axis label box to also find the neighbors, not
# just the tick labels that actually overlap note we need a
# *copy* of the axis label box because we don't want to scale
# the actual bbox
self._update_label_position(renderer)
self.label.draw(renderer)
self._update_offset_text_position(ticklabelBoxes, ticklabelBoxes2)
self.offsetText.set_text(self.major.formatter.get_offset())
self.offsetText.draw(renderer)
renderer.close_group(__name__)
self.stale = False
def get_gridlines(self):
r"""Return this Axis' grid lines as a list of `.Line2D`\s."""
ticks = self.get_major_ticks()
return cbook.silent_list('Line2D gridline',
[tick.gridline for tick in ticks])
def get_label(self):
"""Return the axis label as a Text instance."""
return self.label
def get_offset_text(self):
"""Return the axis offsetText as a Text instance."""
return self.offsetText
def get_pickradius(self):
"""Return the depth of the axis used by the picker."""
return self.pickradius
def get_majorticklabels(self):
"""Return this Axis' major tick labels, as a list of `~.text.Text`."""
ticks = self.get_major_ticks()
labels1 = [tick.label1 for tick in ticks if tick.label1.get_visible()]
labels2 = [tick.label2 for tick in ticks if tick.label2.get_visible()]
return labels1 + labels2
def get_minorticklabels(self):
"""Return this Axis' minor tick labels, as a list of `~.text.Text`."""
ticks = self.get_minor_ticks()
labels1 = [tick.label1 for tick in ticks if tick.label1.get_visible()]
labels2 = [tick.label2 for tick in ticks if tick.label2.get_visible()]
return labels1 + labels2
def get_ticklabels(self, minor=False, which=None):
"""
Get this Axis' tick labels.
Parameters
----------
minor : bool
Whether to return the minor or the major ticklabels.
which : None, ('minor', 'major', 'both')
Overrides *minor*.
Selects which ticklabels to return
Returns
-------
list of `~matplotlib.text.Text`
Notes
-----
The tick label strings are not populated until a ``draw`` method has
been called.
See also: `~.pyplot.draw` and `~.FigureCanvasBase.draw`.
"""
if which is not None:
if which == 'minor':
return self.get_minorticklabels()
elif which == 'major':
return self.get_majorticklabels()
elif which == 'both':
return self.get_majorticklabels() + self.get_minorticklabels()
else:
_api.check_in_list(['major', 'minor', 'both'], which=which)
if minor:
return self.get_minorticklabels()
return self.get_majorticklabels()
def get_majorticklines(self):
r"""Return this Axis' major tick lines as a list of `.Line2D`\s."""
lines = []
ticks = self.get_major_ticks()
for tick in ticks:
lines.append(tick.tick1line)
lines.append(tick.tick2line)
return cbook.silent_list('Line2D ticklines', lines)
def get_minorticklines(self):
r"""Return this Axis' minor tick lines as a list of `.Line2D`\s."""
lines = []
ticks = self.get_minor_ticks()
for tick in ticks:
lines.append(tick.tick1line)
lines.append(tick.tick2line)
return cbook.silent_list('Line2D ticklines', lines)
def get_ticklines(self, minor=False):
r"""Return this Axis' tick lines as a list of `.Line2D`\s."""
if minor:
return self.get_minorticklines()
return self.get_majorticklines()
def get_majorticklocs(self):
"""Return this Axis' major tick locations in data coordinates."""
return self.major.locator()
def get_minorticklocs(self):
"""Return this Axis' minor tick locations in data coordinates."""
# Remove minor ticks duplicating major ticks.
major_locs = self.major.locator()
minor_locs = self.minor.locator()
transform = self._scale.get_transform()
tr_minor_locs = transform.transform(minor_locs)
tr_major_locs = transform.transform(major_locs)
lo, hi = sorted(transform.transform(self.get_view_interval()))
# Use the transformed view limits as scale. 1e-5 is the default rtol
# for np.isclose.
tol = (hi - lo) * 1e-5
if self.remove_overlapping_locs:
minor_locs = [
loc for loc, tr_loc in zip(minor_locs, tr_minor_locs)
if ~np.isclose(tr_loc, tr_major_locs, atol=tol, rtol=0).any()]
return minor_locs
def get_ticklocs(self, *, minor=False):
"""Return this Axis' tick locations in data coordinates."""
return self.get_minorticklocs() if minor else self.get_majorticklocs()
def get_ticks_direction(self, minor=False):
"""
Get the tick directions as a numpy array
Parameters
----------
minor : bool, default: False
True to return the minor tick directions,
False to return the major tick directions.
Returns
-------
numpy array of tick directions
"""
if minor:
return np.array(
[tick._tickdir for tick in self.get_minor_ticks()])
else:
return np.array(
[tick._tickdir for tick in self.get_major_ticks()])
def _get_tick(self, major):
"""Return the default tick instance."""
raise NotImplementedError('derived must override')
def _get_tick_label_size(self, axis_name):
"""
Return the text size of tick labels for this Axis.
This is a convenience function to avoid having to create a `Tick` in
`.get_tick_space`, since it is expensive.
"""
tick_kw = self._major_tick_kw
size = tick_kw.get('labelsize',
mpl.rcParams[f'{axis_name}tick.labelsize'])
return mtext.FontProperties(size=size).get_size_in_points()
def _copy_tick_props(self, src, dest):
"""Copy the properties from *src* tick to *dest* tick."""
if src is None or dest is None:
return
dest.label1.update_from(src.label1)
dest.label2.update_from(src.label2)
dest.tick1line.update_from(src.tick1line)
dest.tick2line.update_from(src.tick2line)
dest.gridline.update_from(src.gridline)
def get_label_text(self):
"""Get the text of the label."""
return self.label.get_text()
def get_major_locator(self):
"""Get the locator of the major ticker."""
return self.major.locator
def get_minor_locator(self):
"""Get the locator of the minor ticker."""
return self.minor.locator
def get_major_formatter(self):
"""Get the formatter of the major ticker."""
return self.major.formatter
def get_minor_formatter(self):
"""Get the formatter of the minor ticker."""
return self.minor.formatter
def get_major_ticks(self, numticks=None):
r"""Return the list of major `.Tick`\s."""
if numticks is None:
numticks = len(self.get_majorticklocs())
while len(self.majorTicks) < numticks:
# Update the new tick label properties from the old.
tick = self._get_tick(major=True)
self.majorTicks.append(tick)
self._copy_tick_props(self.majorTicks[0], tick)
return self.majorTicks[:numticks]
def get_minor_ticks(self, numticks=None):
r"""Return the list of minor `.Tick`\s."""
if numticks is None:
numticks = len(self.get_minorticklocs())
while len(self.minorTicks) < numticks:
# Update the new tick label properties from the old.
tick = self._get_tick(major=False)
self.minorTicks.append(tick)
self._copy_tick_props(self.minorTicks[0], tick)
return self.minorTicks[:numticks]
@_api.rename_parameter("3.5", "b", "visible")
def grid(self, visible=None, which='major', **kwargs):
"""
Configure the grid lines.
Parameters
----------
visible : bool or None
Whether to show the grid lines. If any *kwargs* are supplied, it
is assumed you want the grid on and *visible* will be set to True.
If *visible* is *None* and there are no *kwargs*, this toggles the
visibility of the lines.
which : {'major', 'minor', 'both'}
The grid lines to apply the changes on.
**kwargs : `.Line2D` properties
Define the line properties of the grid, e.g.::
grid(color='r', linestyle='-', linewidth=2)
"""
if kwargs:
if visible is None:
visible = True
elif not visible: # something false-like but not None
_api.warn_external('First parameter to grid() is false, '
'but line properties are supplied. The '
'grid will be enabled.')
visible = True
which = which.lower()
_api.check_in_list(['major', 'minor', 'both'], which=which)
gridkw = {'grid_' + item[0]: item[1] for item in kwargs.items()}
if which in ['minor', 'both']:
gridkw['gridOn'] = (not self._minor_tick_kw['gridOn']
if visible is None else visible)
self.set_tick_params(which='minor', **gridkw)
if which in ['major', 'both']:
gridkw['gridOn'] = (not self._major_tick_kw['gridOn']
if visible is None else visible)
self.set_tick_params(which='major', **gridkw)
self.stale = True
def update_units(self, data):
"""
Introspect *data* for units converter and update the
axis.converter instance if necessary. Return *True*
if *data* is registered for unit conversion.
"""
converter = munits.registry.get_converter(data)
if converter is None:
return False
neednew = self.converter != converter
self.converter = converter
default = self.converter.default_units(data, self)
if default is not None and self.units is None:
self.set_units(default)
elif neednew:
self._update_axisinfo()
self.stale = True
return True
def _update_axisinfo(self):
"""
Check the axis converter for the stored units to see if the
axis info needs to be updated.
"""
if self.converter is None:
return
info = self.converter.axisinfo(self.units, self)
if info is None:
return
if info.majloc is not None and \
self.major.locator != info.majloc and self.isDefault_majloc:
self.set_major_locator(info.majloc)
self.isDefault_majloc = True
if info.minloc is not None and \
self.minor.locator != info.minloc and self.isDefault_minloc:
self.set_minor_locator(info.minloc)
self.isDefault_minloc = True
if info.majfmt is not None and \
self.major.formatter != info.majfmt and self.isDefault_majfmt:
self.set_major_formatter(info.majfmt)
self.isDefault_majfmt = True
if info.minfmt is not None and \
self.minor.formatter != info.minfmt and self.isDefault_minfmt:
self.set_minor_formatter(info.minfmt)
self.isDefault_minfmt = True
if info.label is not None and self.isDefault_label:
self.set_label_text(info.label)
self.isDefault_label = True
self.set_default_intervals()
def have_units(self):
return self.converter is not None or self.units is not None
def convert_units(self, x):
# If x is natively supported by Matplotlib, doesn't need converting
if munits._is_natively_supported(x):
return x
if self.converter is None:
self.converter = munits.registry.get_converter(x)
if self.converter is None:
return x
try:
ret = self.converter.convert(x, self.units, self)
except Exception as e:
raise munits.ConversionError('Failed to convert value(s) to axis '
f'units: {x!r}') from e
return ret
def set_units(self, u):
"""
Set the units for axis.
Parameters
----------
u : units tag
Notes
-----
The units of any shared axis will also be updated.
"""
if u == self.units:
return
for name, axis in self.axes._get_axis_map().items():
if self is axis:
shared = [
getattr(ax, f"{name}axis")
for ax
in self.axes._shared_axes[name].get_siblings(self.axes)]
break
else:
shared = [self]
for axis in shared:
axis.units = u
axis._update_axisinfo()
axis.callbacks.process('units')
axis.callbacks.process('units finalize')
axis.stale = True
def get_units(self):
"""Return the units for axis."""
return self.units
def set_label_text(self, label, fontdict=None, **kwargs):
"""
Set the text value of the axis label.
Parameters
----------
label : str
Text string.
fontdict : dict
Text properties.
**kwargs
Merged into fontdict.
"""
self.isDefault_label = False
self.label.set_text(label)
if fontdict is not None:
self.label.update(fontdict)
self.label.update(kwargs)
self.stale = True
return self.label
def set_major_formatter(self, formatter):
"""
Set the formatter of the major ticker.
In addition to a `~matplotlib.ticker.Formatter` instance,
this also accepts a ``str`` or function.
For a ``str`` a `~matplotlib.ticker.StrMethodFormatter` is used.
The field used for the value must be labeled ``'x'`` and the field used
for the position must be labeled ``'pos'``.
See the `~matplotlib.ticker.StrMethodFormatter` documentation for
more information.
For a function, a `~matplotlib.ticker.FuncFormatter` is used.
The function must take two inputs (a tick value ``x`` and a
position ``pos``), and return a string containing the corresponding
tick label.
See the `~matplotlib.ticker.FuncFormatter` documentation for
more information.
Parameters
----------
formatter : `~matplotlib.ticker.Formatter`, ``str``, or function
"""
self._set_formatter(formatter, self.major)
def set_minor_formatter(self, formatter):
"""
Set the formatter of the minor ticker.
In addition to a `~matplotlib.ticker.Formatter` instance,
this also accepts a ``str`` or function.
See `.Axis.set_major_formatter` for more information.
Parameters
----------
formatter : `~matplotlib.ticker.Formatter`, ``str``, or function
"""
self._set_formatter(formatter, self.minor)
def _set_formatter(self, formatter, level):
if isinstance(formatter, str):
formatter = mticker.StrMethodFormatter(formatter)
# Don't allow any other TickHelper to avoid easy-to-make errors,
# like using a Locator instead of a Formatter.
elif (callable(formatter) and
not isinstance(formatter, mticker.TickHelper)):
formatter = mticker.FuncFormatter(formatter)
else:
_api.check_isinstance(mticker.Formatter, formatter=formatter)
if (isinstance(formatter, mticker.FixedFormatter)
and len(formatter.seq) > 0
and not isinstance(level.locator, mticker.FixedLocator)):
_api.warn_external('FixedFormatter should only be used together '
'with FixedLocator')
if level == self.major:
self.isDefault_majfmt = False
else:
self.isDefault_minfmt = False
level.formatter = formatter
formatter.set_axis(self)
self.stale = True
def set_major_locator(self, locator):
"""
Set the locator of the major ticker.
Parameters
----------
locator : `~matplotlib.ticker.Locator`
"""
_api.check_isinstance(mticker.Locator, locator=locator)
self.isDefault_majloc = False
self.major.locator = locator
if self.major.formatter:
self.major.formatter._set_locator(locator)
locator.set_axis(self)
self.stale = True
def set_minor_locator(self, locator):
"""
Set the locator of the minor ticker.
Parameters
----------
locator : `~matplotlib.ticker.Locator`
"""
_api.check_isinstance(mticker.Locator, locator=locator)
self.isDefault_minloc = False
self.minor.locator = locator
if self.minor.formatter:
self.minor.formatter._set_locator(locator)
locator.set_axis(self)
self.stale = True
def set_pickradius(self, pickradius):
"""
Set the depth of the axis used by the picker.
Parameters
----------
pickradius : float
"""
self.pickradius = pickradius
# Helper for set_ticklabels. Defining it here makes it pickleable.
@staticmethod
def _format_with_dict(tickd, x, pos):
return tickd.get(x, "")
def set_ticklabels(self, ticklabels, *, minor=False, **kwargs):
r"""
Set the text values of the tick labels.
.. admonition:: Discouraged
The use of this method is discouraged, because of the dependency
on tick positions. In most cases, you'll want to use
``set_[x/y]ticks(positions, labels)`` instead.
If you are using this method, you should always fix the tick
positions before, e.g. by using `.Axis.set_ticks` or by explicitly
setting a `~.ticker.FixedLocator`. Otherwise, ticks are free to
move and the labels may end up in unexpected positions.
Parameters
----------
ticklabels : sequence of str or of `.Text`\s
Texts for labeling each tick location in the sequence set by
`.Axis.set_ticks`; the number of labels must match the number of
locations.
minor : bool
If True, set minor ticks instead of major ticks.
**kwargs
Text properties.
Returns
-------
list of `.Text`\s
For each tick, includes ``tick.label1`` if it is visible, then
``tick.label2`` if it is visible, in that order.
"""
ticklabels = [t.get_text() if hasattr(t, 'get_text') else t
for t in ticklabels]
locator = (self.get_minor_locator() if minor
else self.get_major_locator())
if isinstance(locator, mticker.FixedLocator):
# Passing [] as a list of ticklabels is often used as a way to
# remove all tick labels, so only error for > 0 ticklabels
if len(locator.locs) != len(ticklabels) and len(ticklabels) != 0:
raise ValueError(
"The number of FixedLocator locations"
f" ({len(locator.locs)}), usually from a call to"
" set_ticks, does not match"
f" the number of ticklabels ({len(ticklabels)}).")
tickd = {loc: lab for loc, lab in zip(locator.locs, ticklabels)}
func = functools.partial(self._format_with_dict, tickd)
formatter = mticker.FuncFormatter(func)
else:
formatter = mticker.FixedFormatter(ticklabels)
if minor:
self.set_minor_formatter(formatter)
locs = self.get_minorticklocs()
ticks = self.get_minor_ticks(len(locs))
else:
self.set_major_formatter(formatter)
locs = self.get_majorticklocs()
ticks = self.get_major_ticks(len(locs))
ret = []
for pos, (loc, tick) in enumerate(zip(locs, ticks)):
tick.update_position(loc)
tick_label = formatter(loc, pos)
# deal with label1
tick.label1.set_text(tick_label)
tick.label1.update(kwargs)
# deal with label2
tick.label2.set_text(tick_label)
tick.label2.update(kwargs)
# only return visible tick labels
if tick.label1.get_visible():
ret.append(tick.label1)
if tick.label2.get_visible():
ret.append(tick.label2)
self.stale = True
return ret
# Wrapper around set_ticklabels used to generate Axes.set_x/ytickabels; can
# go away once the API of Axes.set_x/yticklabels becomes consistent.
def _set_ticklabels(self, labels, *, fontdict=None, minor=False, **kwargs):
"""
Set this Axis' labels with list of string labels.
.. warning::
This method should only be used after fixing the tick positions
using `.Axis.set_ticks`. Otherwise, the labels may end up in
unexpected positions.
Parameters
----------
labels : list of str
The label texts.
fontdict : dict, optional
A dictionary controlling the appearance of the ticklabels.
The default *fontdict* is::
{'fontsize': rcParams['axes.titlesize'],
'fontweight': rcParams['axes.titleweight'],
'verticalalignment': 'baseline',
'horizontalalignment': loc}
minor : bool, default: False
Whether to set the minor ticklabels rather than the major ones.
Returns
-------
list of `.Text`
The labels.
Other Parameters
----------------
**kwargs : `~.text.Text` properties.
"""
if fontdict is not None:
kwargs.update(fontdict)
return self.set_ticklabels(labels, minor=minor, **kwargs)
def _set_tick_locations(self, ticks, *, minor=False):
# see docstring of set_ticks
# XXX if the user changes units, the information will be lost here
ticks = self.convert_units(ticks)
for name, axis in self.axes._get_axis_map().items():
if self is axis:
shared = [
getattr(ax, f"{name}axis")
for ax
in self.axes._shared_axes[name].get_siblings(self.axes)]
break
else:
shared = [self]
for axis in shared:
if len(ticks) > 1:
xleft, xright = axis.get_view_interval()
if xright > xleft:
axis.set_view_interval(min(ticks), max(ticks))
else:
axis.set_view_interval(max(ticks), min(ticks))
self.axes.stale = True
if minor:
self.set_minor_locator(mticker.FixedLocator(ticks))
return self.get_minor_ticks(len(ticks))
else:
self.set_major_locator(mticker.FixedLocator(ticks))
return self.get_major_ticks(len(ticks))
def set_ticks(self, ticks, labels=None, *, minor=False, **kwargs):
"""
Set this Axis' tick locations and optionally labels.
If necessary, the view limits of the Axis are expanded so that all
given ticks are visible.
Parameters
----------
ticks : list of floats
List of tick locations.
labels : list of str, optional
List of tick labels. If not set, the labels show the data value.
minor : bool, default: False
If ``False``, set the major ticks; if ``True``, the minor ticks.
**kwargs
`.Text` properties for the labels. These take effect only if you
pass *labels*. In other cases, please use `~.Axes.tick_params`.
Notes
-----
The mandatory expansion of the view limits is an intentional design
choice to prevent the surprise of a non-visible tick. If you need
other limits, you should set the limits explicitly after setting the
ticks.
"""
result = self._set_tick_locations(ticks, minor=minor)
if labels is not None:
self.set_ticklabels(labels, minor=minor, **kwargs)
return result
def _get_tick_boxes_siblings(self, renderer):
"""
Get the bounding boxes for this `.axis` and its siblings
as set by `.Figure.align_xlabels` or `.Figure.align_ylabels`.
By default it just gets bboxes for self.
"""
# Get the Grouper keeping track of x or y label groups for this figure.
axis_names = [
name for name, axis in self.axes._get_axis_map().items()
if name in self.figure._align_label_groups and axis is self]
if len(axis_names) != 1:
return [], []
axis_name, = axis_names
grouper = self.figure._align_label_groups[axis_name]
bboxes = []
bboxes2 = []
# If we want to align labels from other axes:
for ax in grouper.get_siblings(self.axes):
axis = getattr(ax, f"{axis_name}axis")
ticks_to_draw = axis._update_ticks()
tlb, tlb2 = axis._get_tick_bboxes(ticks_to_draw, renderer)
bboxes.extend(tlb)
bboxes2.extend(tlb2)
return bboxes, bboxes2
def _update_label_position(self, renderer):
"""
Update the label position based on the bounding box enclosing
all the ticklabels and axis spine.
"""
raise NotImplementedError('Derived must override')
def _update_offset_text_position(self, bboxes, bboxes2):
"""
Update the offset text position based on the sequence of bounding
boxes of all the ticklabels.
"""
raise NotImplementedError('Derived must override')
def axis_date(self, tz=None):
"""
Set up axis ticks and labels to treat data along this Axis as dates.
Parameters
----------
tz : str or `datetime.tzinfo`, default: :rc:`timezone`
The timezone used to create date labels.
"""
# By providing a sample datetime instance with the desired timezone,
# the registered converter can be selected, and the "units" attribute,
# which is the timezone, can be set.
if isinstance(tz, str):
import dateutil.tz
tz = dateutil.tz.gettz(tz)
self.update_units(datetime.datetime(2009, 1, 1, 0, 0, 0, 0, tz))
def get_tick_space(self):
"""Return the estimated number of ticks that can fit on the axis."""
# Must be overridden in the subclass
raise NotImplementedError()
def _get_ticks_position(self):
"""
Helper for `XAxis.get_ticks_position` and `YAxis.get_ticks_position`.
Check the visibility of tick1line, label1, tick2line, and label2 on
the first major and the first minor ticks, and return
- 1 if only tick1line and label1 are visible (which corresponds to
"bottom" for the x-axis and "left" for the y-axis);
- 2 if only tick2line and label2 are visible (which corresponds to
"top" for the x-axis and "right" for the y-axis);
- "default" if only tick1line, tick2line and label1 are visible;
- "unknown" otherwise.
"""
major = self.majorTicks[0]
minor = self.minorTicks[0]
if all(tick.tick1line.get_visible()
and not tick.tick2line.get_visible()
and tick.label1.get_visible()
and not tick.label2.get_visible()
for tick in [major, minor]):
return 1
elif all(tick.tick2line.get_visible()
and not tick.tick1line.get_visible()
and tick.label2.get_visible()
and not tick.label1.get_visible()
for tick in [major, minor]):
return 2
elif all(tick.tick1line.get_visible()
and tick.tick2line.get_visible()
and tick.label1.get_visible()
and not tick.label2.get_visible()
for tick in [major, minor]):
return "default"
else:
return "unknown"
def get_label_position(self):
"""
Return the label position (top or bottom)
"""
return self.label_position
def set_label_position(self, position):
"""
Set the label position (top or bottom)
Parameters
----------
position : {'top', 'bottom'}
"""
raise NotImplementedError()
def get_minpos(self):
raise NotImplementedError()
def _make_getset_interval(method_name, lim_name, attr_name):
"""
Helper to generate ``get_{data,view}_interval`` and
``set_{data,view}_interval`` implementations.
"""
def getter(self):
# docstring inherited.
return getattr(getattr(self.axes, lim_name), attr_name)
def setter(self, vmin, vmax, ignore=False):
# docstring inherited.
if ignore:
setattr(getattr(self.axes, lim_name), attr_name, (vmin, vmax))
else:
oldmin, oldmax = getter(self)
if oldmin < oldmax:
setter(self, min(vmin, vmax, oldmin), max(vmin, vmax, oldmax),
ignore=True)
else:
setter(self, max(vmin, vmax, oldmin), min(vmin, vmax, oldmax),
ignore=True)
self.stale = True
getter.__name__ = f"get_{method_name}_interval"
setter.__name__ = f"set_{method_name}_interval"
return getter, setter
class XAxis(Axis):
__name__ = 'xaxis'
axis_name = 'x' #: Read-only name identifying the axis.
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
# x in axes coords, y in display coords (to be updated at draw time by
# _update_label_positions and _update_offset_text_position).
self.label.set(
x=0.5, y=0,
verticalalignment='top', horizontalalignment='center',
transform=mtransforms.blended_transform_factory(
self.axes.transAxes, mtransforms.IdentityTransform()),
)
self.label_position = 'bottom'
self.offsetText.set(
x=1, y=0,
verticalalignment='top', horizontalalignment='right',
transform=mtransforms.blended_transform_factory(
self.axes.transAxes, mtransforms.IdentityTransform()),
fontsize=mpl.rcParams['xtick.labelsize'],
color=mpl.rcParams['xtick.color'],
)
self.offset_text_position = 'bottom'
def contains(self, mouseevent):
"""Test whether the mouse event occurred in the x axis."""
inside, info = self._default_contains(mouseevent)
if inside is not None:
return inside, info
x, y = mouseevent.x, mouseevent.y
try:
trans = self.axes.transAxes.inverted()
xaxes, yaxes = trans.transform((x, y))
except ValueError:
return False, {}
(l, b), (r, t) = self.axes.transAxes.transform([(0, 0), (1, 1)])
inaxis = 0 <= xaxes <= 1 and (
b - self.pickradius < y < b or
t < y < t + self.pickradius)
return inaxis, {}
def _get_tick(self, major):
if major:
tick_kw = self._major_tick_kw
else:
tick_kw = self._minor_tick_kw
return XTick(self.axes, 0, major=major, **tick_kw)
def set_label_position(self, position):
"""
Set the label position (top or bottom)
Parameters
----------
position : {'top', 'bottom'}
"""
self.label.set_verticalalignment(_api.check_getitem({
'top': 'baseline', 'bottom': 'top',
}, position=position))
self.label_position = position
self.stale = True
def _update_label_position(self, renderer):
"""
Update the label position based on the bounding box enclosing
all the ticklabels and axis spine
"""
if not self._autolabelpos:
return
# get bounding boxes for this axis and any siblings
# that have been set by `fig.align_xlabels()`
bboxes, bboxes2 = self._get_tick_boxes_siblings(renderer=renderer)
x, y = self.label.get_position()
if self.label_position == 'bottom':
try:
spine = self.axes.spines['bottom']
spinebbox = spine.get_window_extent()
except KeyError:
# use axes if spine doesn't exist
spinebbox = self.axes.bbox
bbox = mtransforms.Bbox.union(bboxes + [spinebbox])
bottom = bbox.y0
self.label.set_position(
(x, bottom - self.labelpad * self.figure.dpi / 72)
)
else:
try:
spine = self.axes.spines['top']
spinebbox = spine.get_window_extent()
except KeyError:
# use axes if spine doesn't exist
spinebbox = self.axes.bbox
bbox = mtransforms.Bbox.union(bboxes2 + [spinebbox])
top = bbox.y1
self.label.set_position(
(x, top + self.labelpad * self.figure.dpi / 72)
)
def _update_offset_text_position(self, bboxes, bboxes2):
"""
Update the offset_text position based on the sequence of bounding
boxes of all the ticklabels
"""
x, y = self.offsetText.get_position()
if not hasattr(self, '_tick_position'):
self._tick_position = 'bottom'
if self._tick_position == 'bottom':
if not len(bboxes):
bottom = self.axes.bbox.ymin
else:
bbox = mtransforms.Bbox.union(bboxes)
bottom = bbox.y0
y = bottom - self.OFFSETTEXTPAD * self.figure.dpi / 72
else:
if not len(bboxes2):
top = self.axes.bbox.ymax
else:
bbox = mtransforms.Bbox.union(bboxes2)
top = bbox.y1
y = top + self.OFFSETTEXTPAD * self.figure.dpi / 72
self.offsetText.set_position((x, y))
def get_text_heights(self, renderer):
"""
Return how much space should be reserved for text above and below the
axes, as a pair of floats.
"""
bbox, bbox2 = self.get_ticklabel_extents(renderer)
# MGDTODO: Need a better way to get the pad
padPixels = self.majorTicks[0].get_pad_pixels()
above = 0.0
if bbox2.height:
above += bbox2.height + padPixels
below = 0.0
if bbox.height:
below += bbox.height + padPixels
if self.get_label_position() == 'top':
above += self.label.get_window_extent(renderer).height + padPixels
else:
below += self.label.get_window_extent(renderer).height + padPixels
return above, below
def set_ticks_position(self, position):
"""
Set the ticks position.
Parameters
----------
position : {'top', 'bottom', 'both', 'default', 'none'}
'both' sets the ticks to appear on both positions, but does not
change the tick labels. 'default' resets the tick positions to
the default: ticks on both positions, labels at bottom. 'none'
can be used if you don't want any ticks. 'none' and 'both'
affect only the ticks, not the labels.
"""
_api.check_in_list(['top', 'bottom', 'both', 'default', 'none'],
position=position)
if position == 'top':
self.set_tick_params(which='both', top=True, labeltop=True,
bottom=False, labelbottom=False)
self._tick_position = 'top'
self.offsetText.set_verticalalignment('bottom')
elif position == 'bottom':
self.set_tick_params(which='both', top=False, labeltop=False,
bottom=True, labelbottom=True)
self._tick_position = 'bottom'
self.offsetText.set_verticalalignment('top')
elif position == 'both':
self.set_tick_params(which='both', top=True,
bottom=True)
elif position == 'none':
self.set_tick_params(which='both', top=False,
bottom=False)
elif position == 'default':
self.set_tick_params(which='both', top=True, labeltop=False,
bottom=True, labelbottom=True)
self._tick_position = 'bottom'
self.offsetText.set_verticalalignment('top')
else:
assert False, "unhandled parameter not caught by _check_in_list"
self.stale = True
def tick_top(self):
"""
Move ticks and ticklabels (if present) to the top of the axes.
"""
label = True
if 'label1On' in self._major_tick_kw:
label = (self._major_tick_kw['label1On']
or self._major_tick_kw['label2On'])
self.set_ticks_position('top')
# If labels were turned off before this was called, leave them off.
self.set_tick_params(which='both', labeltop=label)
def tick_bottom(self):
"""
Move ticks and ticklabels (if present) to the bottom of the axes.
"""
label = True
if 'label1On' in self._major_tick_kw:
label = (self._major_tick_kw['label1On']
or self._major_tick_kw['label2On'])
self.set_ticks_position('bottom')
# If labels were turned off before this was called, leave them off.
self.set_tick_params(which='both', labelbottom=label)
def get_ticks_position(self):
"""
Return the ticks position ("top", "bottom", "default", or "unknown").
"""
return {1: "bottom", 2: "top",
"default": "default", "unknown": "unknown"}[
self._get_ticks_position()]
get_view_interval, set_view_interval = _make_getset_interval(
"view", "viewLim", "intervalx")
get_data_interval, set_data_interval = _make_getset_interval(
"data", "dataLim", "intervalx")
def get_minpos(self):
return self.axes.dataLim.minposx
def set_inverted(self, inverted):
# docstring inherited
a, b = self.get_view_interval()
# cast to bool to avoid bad interaction between python 3.8 and np.bool_
self.axes.set_xlim(sorted((a, b), reverse=bool(inverted)), auto=None)
def set_default_intervals(self):
# docstring inherited
# only change view if dataLim has not changed and user has
# not changed the view:
if (not self.axes.dataLim.mutatedx() and
not self.axes.viewLim.mutatedx()):
if self.converter is not None:
info = self.converter.axisinfo(self.units, self)
if info.default_limits is not None:
xmin, xmax = self.convert_units(info.default_limits)
self.axes.viewLim.intervalx = xmin, xmax
self.stale = True
def get_tick_space(self):
ends = mtransforms.Bbox.from_bounds(0, 0, 1, 1)
ends = ends.transformed(self.axes.transAxes -
self.figure.dpi_scale_trans)
length = ends.width * 72
# There is a heuristic here that the aspect ratio of tick text
# is no more than 3:1
size = self._get_tick_label_size('x') * 3
if size > 0:
return int(np.floor(length / size))
else:
return 2**31 - 1
class YAxis(Axis):
__name__ = 'yaxis'
axis_name = 'y' #: Read-only name identifying the axis.
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
# x in display coords, y in axes coords (to be updated at draw time by
# _update_label_positions and _update_offset_text_position).
self.label.set(
x=0, y=0.5,
verticalalignment='bottom', horizontalalignment='center',
rotation='vertical', rotation_mode='anchor',
transform=mtransforms.blended_transform_factory(
mtransforms.IdentityTransform(), self.axes.transAxes),
)
self.label_position = 'left'
# x in axes coords, y in display coords(!).
self.offsetText.set(
x=0, y=0.5,
verticalalignment='baseline', horizontalalignment='left',
transform=mtransforms.blended_transform_factory(
self.axes.transAxes, mtransforms.IdentityTransform()),
fontsize=mpl.rcParams['ytick.labelsize'],
color=mpl.rcParams['ytick.color'],
)
self.offset_text_position = 'left'
def contains(self, mouseevent):
# docstring inherited
inside, info = self._default_contains(mouseevent)
if inside is not None:
return inside, info
x, y = mouseevent.x, mouseevent.y
try:
trans = self.axes.transAxes.inverted()
xaxes, yaxes = trans.transform((x, y))
except ValueError:
return False, {}
(l, b), (r, t) = self.axes.transAxes.transform([(0, 0), (1, 1)])
inaxis = 0 <= yaxes <= 1 and (
l - self.pickradius < x < l or
r < x < r + self.pickradius)
return inaxis, {}
def _get_tick(self, major):
if major:
tick_kw = self._major_tick_kw
else:
tick_kw = self._minor_tick_kw
return YTick(self.axes, 0, major=major, **tick_kw)
def set_label_position(self, position):
"""
Set the label position (left or right)
Parameters
----------
position : {'left', 'right'}
"""
self.label.set_rotation_mode('anchor')
self.label.set_verticalalignment(_api.check_getitem({
'left': 'bottom', 'right': 'top',
}, position=position))
self.label_position = position
self.stale = True
def _update_label_position(self, renderer):
"""
Update the label position based on the bounding box enclosing
all the ticklabels and axis spine
"""
if not self._autolabelpos:
return
# get bounding boxes for this axis and any siblings
# that have been set by `fig.align_ylabels()`
bboxes, bboxes2 = self._get_tick_boxes_siblings(renderer=renderer)
x, y = self.label.get_position()
if self.label_position == 'left':
try:
spine = self.axes.spines['left']
spinebbox = spine.get_window_extent()
except KeyError:
# use axes if spine doesn't exist
spinebbox = self.axes.bbox
bbox = mtransforms.Bbox.union(bboxes + [spinebbox])
left = bbox.x0
self.label.set_position(
(left - self.labelpad * self.figure.dpi / 72, y)
)
else:
try:
spine = self.axes.spines['right']
spinebbox = spine.get_window_extent()
except KeyError:
# use axes if spine doesn't exist
spinebbox = self.axes.bbox
bbox = mtransforms.Bbox.union(bboxes2 + [spinebbox])
right = bbox.x1
self.label.set_position(
(right + self.labelpad * self.figure.dpi / 72, y)
)
def _update_offset_text_position(self, bboxes, bboxes2):
"""
Update the offset_text position based on the sequence of bounding
boxes of all the ticklabels
"""
x, _ = self.offsetText.get_position()
if 'outline' in self.axes.spines:
# Special case for colorbars:
bbox = self.axes.spines['outline'].get_window_extent()
else:
bbox = self.axes.bbox
top = bbox.ymax
self.offsetText.set_position(
(x, top + self.OFFSETTEXTPAD * self.figure.dpi / 72)
)
def set_offset_position(self, position):
"""
Parameters
----------
position : {'left', 'right'}
"""
x, y = self.offsetText.get_position()
x = _api.check_getitem({'left': 0, 'right': 1}, position=position)
self.offsetText.set_ha(position)
self.offsetText.set_position((x, y))
self.stale = True
def get_text_widths(self, renderer):
bbox, bbox2 = self.get_ticklabel_extents(renderer)
# MGDTODO: Need a better way to get the pad
padPixels = self.majorTicks[0].get_pad_pixels()
left = 0.0
if bbox.width:
left += bbox.width + padPixels
right = 0.0
if bbox2.width:
right += bbox2.width + padPixels
if self.get_label_position() == 'left':
left += self.label.get_window_extent(renderer).width + padPixels
else:
right += self.label.get_window_extent(renderer).width + padPixels
return left, right
def set_ticks_position(self, position):
"""
Set the ticks position.
Parameters
----------
position : {'left', 'right', 'both', 'default', 'none'}
'both' sets the ticks to appear on both positions, but does not
change the tick labels. 'default' resets the tick positions to
the default: ticks on both positions, labels at left. 'none'
can be used if you don't want any ticks. 'none' and 'both'
affect only the ticks, not the labels.
"""
_api.check_in_list(['left', 'right', 'both', 'default', 'none'],
position=position)
if position == 'right':
self.set_tick_params(which='both', right=True, labelright=True,
left=False, labelleft=False)
self.set_offset_position(position)
elif position == 'left':
self.set_tick_params(which='both', right=False, labelright=False,
left=True, labelleft=True)
self.set_offset_position(position)
elif position == 'both':
self.set_tick_params(which='both', right=True,
left=True)
elif position == 'none':
self.set_tick_params(which='both', right=False,
left=False)
elif position == 'default':
self.set_tick_params(which='both', right=True, labelright=False,
left=True, labelleft=True)
else:
assert False, "unhandled parameter not caught by _check_in_list"
self.stale = True
def tick_right(self):
"""
Move ticks and ticklabels (if present) to the right of the axes.
"""
label = True
if 'label1On' in self._major_tick_kw:
label = (self._major_tick_kw['label1On']
or self._major_tick_kw['label2On'])
self.set_ticks_position('right')
# if labels were turned off before this was called
# leave them off
self.set_tick_params(which='both', labelright=label)
def tick_left(self):
"""
Move ticks and ticklabels (if present) to the left of the axes.
"""
label = True
if 'label1On' in self._major_tick_kw:
label = (self._major_tick_kw['label1On']
or self._major_tick_kw['label2On'])
self.set_ticks_position('left')
# if labels were turned off before this was called
# leave them off
self.set_tick_params(which='both', labelleft=label)
def get_ticks_position(self):
"""
Return the ticks position ("left", "right", "default", or "unknown").
"""
return {1: "left", 2: "right",
"default": "default", "unknown": "unknown"}[
self._get_ticks_position()]
get_view_interval, set_view_interval = _make_getset_interval(
"view", "viewLim", "intervaly")
get_data_interval, set_data_interval = _make_getset_interval(
"data", "dataLim", "intervaly")
def get_minpos(self):
return self.axes.dataLim.minposy
def set_inverted(self, inverted):
# docstring inherited
a, b = self.get_view_interval()
# cast to bool to avoid bad interaction between python 3.8 and np.bool_
self.axes.set_ylim(sorted((a, b), reverse=bool(inverted)), auto=None)
def set_default_intervals(self):
# docstring inherited
# only change view if dataLim has not changed and user has
# not changed the view:
if (not self.axes.dataLim.mutatedy() and
not self.axes.viewLim.mutatedy()):
if self.converter is not None:
info = self.converter.axisinfo(self.units, self)
if info.default_limits is not None:
ymin, ymax = self.convert_units(info.default_limits)
self.axes.viewLim.intervaly = ymin, ymax
self.stale = True
def get_tick_space(self):
ends = mtransforms.Bbox.from_bounds(0, 0, 1, 1)
ends = ends.transformed(self.axes.transAxes -
self.figure.dpi_scale_trans)
length = ends.height * 72
# Having a spacing of at least 2 just looks good.
size = self._get_tick_label_size('y') * 2
if size > 0:
return int(np.floor(length / size))
else:
return 2**31 - 1
|
[
"edimaudo@gmail.com"
] |
edimaudo@gmail.com
|
5da3f71feaeac4a7a571de67e65acb80eb2d2dde
|
a502e9070dfac3237537a4891d2a7e80132bd053
|
/pages/urls.py
|
2dc9c8e4f8f2065a9af7ea984ba35d90fa143ac8
|
[] |
no_license
|
frobledoruiz/altair_project
|
d89c63486e99da71ed1ac1b299f65d821694ed16
|
93ad742b4ccfc6786bcb714e46f8198b77c7424f
|
refs/heads/master
| 2020-04-19T01:46:29.725275
| 2019-02-01T23:11:02
| 2019-02-01T23:11:02
| 167,881,284
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 115
|
py
|
from django.urls import path
from pages import views
urlpatterns = [
path('', views.index, name='index'),
]
|
[
"frobledo.email@gmail.com"
] |
frobledo.email@gmail.com
|
6a311dfbbbe9624f0c9bfa5422aeef3bb2e41331
|
d0ca2af0ab117f60a8b756b73b1fd857dbc34dc4
|
/3b.py
|
2ae5982f7840515ee75b099f3314d898c7e5469f
|
[] |
no_license
|
k3rainboe/pr
|
badc69791d170ea46864d175aeb7f7a9aa1af161
|
d606b1758fe058cb37a3f25d7b29361bf5637ccb
|
refs/heads/master
| 2020-08-18T03:41:28.941838
| 2019-10-31T19:01:37
| 2019-10-31T19:01:37
| 215,744,006
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,376
|
py
|
import random
import time
arr=[]
send=[]
recv=[]
recv_ack=[]
def input1():
n=int(input('enter no. of bits for sequence :'))
m=2**n
fsai=int(m/2)
t=0
for i in range(0,m):
arr.append(t)
t=(t+1)%m
for i in range(0,fsai):
send.append(arr[i])
recv.append(arr[i])
recv_ack.append('n')
sender(fsai,m)
def sender(fsai,m):
for i in range(0,fsai):
if(recv_ack[i]=='n'):
print("SENDER : Frame {} is sent".format(send[i]))
receiver(fsai,m)
def receiver(fsai,m):
rw=sw=fsai
time.sleep(1)
a=[i for i in range(0,10)]
for i in range(0,fsai):
if(recv_ack[i]=='n'):
f=random.choice(a)
if(f!=5):
print('frame correctly received {}'.format(recv[i]))
a1=[k for k in range(0,5)]
f1=random.choice(a1)
if(f1==3):
print("(acknowledgement {} lost)".format(send[i]))
print('sender timeouts-->Resend the frame')
else:
print("(acknowledgement {} recieved)".format(send[i]))
recv_ack[i]='p'
else:
a1=[k for k in range(0,2)]
f2=random.choice(a1)
if(f2==0):
print('frame {} lost'.format(send[i]))
print('RECEIVER : Negative Acknowledgement {} sent'.format(send[i]))
else:
print('frame {} damaged'.format(send[i]))
print('(SENDER TIMEOUTS-->RESEND THE FRAME)')
recv_ack[i]='n'
print('do you want to continue')
a=input()
if(a=='y'):
sender(fsai,m)
else:
return
input1()
|
[
"noreply@github.com"
] |
k3rainboe.noreply@github.com
|
edbe29bfe51002fe53683c262c5b9ebe9709db64
|
9bab378be677cf83a4a70a109af0c58e655503dc
|
/apps/pages/views.py
|
f6ffea58e8550bdcf6b18e0ccaedb8b4f94daae4
|
[
"MIT"
] |
permissive
|
MeirKriheli/debian.org.il
|
a13de0d7fb829bf3035b4b8dd4c01560861544d9
|
90bff955c38f7e6e51647463c6a59701302de8e1
|
refs/heads/master
| 2021-11-09T17:39:42.247589
| 2021-11-02T11:36:04
| 2021-11-02T11:36:04
| 90,523,154
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 360
|
py
|
from django.views.generic import DetailView
from django.shortcuts import get_object_or_404
from .models import Page
class PageView(DetailView):
model = Page
def get_object(self, queryset=None):
slug = self.kwargs.get('slug')
if not slug:
slug = 'index'
return get_object_or_404(self.get_queryset(), slug=slug)
|
[
"mkriheli@gmail.com"
] |
mkriheli@gmail.com
|
aca3235daa0b017f6399339b3cb5fee47e6737cb
|
68cab239fc647184d8bd53651bb02303fcf3f26c
|
/samples/routing/routing_utils.py
|
60a09360c303bcafdb7d5cd9fa9c932ad250aad5
|
[] |
no_license
|
omnetpp/omnetpp
|
0c91b230bf937701de95f42f2383318ed39add07
|
e42d35366ec3e354a528b17d0fe15ef1f0ac7b55
|
refs/heads/master
| 2023-08-19T22:10:18.311271
| 2023-08-18T09:18:23
| 2023-08-18T13:18:11
| 160,219,599
| 507
| 160
| null | 2023-09-05T18:48:09
| 2018-12-03T16:18:31
|
C
|
UTF-8
|
Python
| false
| false
| 6,612
|
py
|
import cppyy
from omnetpp.runtime import *
def readDataFile(fileName):
with open(fileName, "r") as f:
return [float(s) for s in f.read().split(",")]
def build_networkx_graph(g, nodeType, namePrefix="node", parentModule=None, gateName=None, **channelArgs):
"""
Builds a network using the topology provided in the form of a networkx.Graph.
"""
# resolve nodeType
if type(nodeType) == str:
nodeType = omnetpp.cModuleType.get(nodeType)
elif not isinstance(nodeType, omnetpp.cModuleType):
raise TypeError(f"invalid type {type(nodeType)} for nodeType argument, should be cModuleType or string")
# resolve parentModule
if not parentModule:
parentModule = omnetpp.cSimulation.getActiveSimulation().getSystemModule()
if not parentModule:
raise ValueError(f"simulation must already contain the top-level module")
elif type(parentModule) == str:
parentModule = omnetpp.cSimulation.getActiveSimulation().getModuleByPath(parentModule)
elif not isinstance(parentModule, omnetpp.cModule):
raise TypeError(f"invalid type {type(parentModule)} for parentModule argument, should be cModule, string or None")
# build network
n = g.number_of_nodes()
nodes = [nodeType.create(namePrefix+str(i), parentModule) for i in range(n)]
for edge in g.edges():
srcNode, destNode = nodes[edge[0]], nodes[edge[1]]
edgeAttrs = g.get_edge_data(*edge)
connectNodes(srcNode, destNode, srcGateName=gateName, destGateName=gateName, **(channelArgs | edgeAttrs))
for node in nodes:
node.buildInside()
def connectNodes(srcNode, destNode, srcGateName=None, destGateName=None, **channelArgs):
"""
Connects two nodes in both ways.
"""
connectGates(srcNode, destNode, srcGate=srcGateName, destGate=destGateName, **channelArgs)
connectGates(destNode, srcNode, destGate=destGateName, srcGate=srcGateName, **channelArgs)
def connectGates(srcNode=None, destNode=None, srcGate=None, destGate=None, srcGateIndex=None, destGateIndex=None, **channelArgs):
"""
Connects two gates, figuring out the details.
Parameters:
- `srcNode`, `destNode` (cModule or string): the source and destination of the connection
- `srcGate`, `destGate` (cGate, string or None): the gates or gate names.
- TODO
"""
def resolveModule(module, gate):
if isinstance(module, omnetpp.cModule):
return module
elif type(module) == str:
return omnetpp.cSimulation.getActiveSimulation().getModuleByPath(module)
elif module is None:
# gate parameter MUST contain a cGate object
return gate.getOwnerModule()
else:
raise TypeError(f"invalid type {type(module)} for srcNode/destNode argument, should be cModule, string or None")
srcNode = resolveModule(srcNode, srcGate)
destNode = resolveModule(destNode, destGate)
areSiblings = srcNode.getParentModule() == destNode.getParentModule()
def resolveGate(node, gate, gateIndex, gateType):
if isinstance(gate, omnetpp.cGate):
return gate
elif areSiblings:
return resolveGateToConnect(node, gate, gateIndex, gateType=gateType, inside=False, expand=True)
else:
assert False #TODO
srcGate = resolveGate(srcNode, srcGate, srcGateIndex, omnetpp.cGate.OUTPUT)
destGate = resolveGate(destNode, destGate, destGateIndex, omnetpp.cGate.INPUT)
channel = createChannel(**channelArgs) if channelArgs else cppyy.nullptr
srcGate.connectTo(destGate, channel)
def resolveGateToConnect(module=None, gateName=None, gateIndex=None, gateType=None, inside=False, expand=True):
"""
Utility for connectGates().
"""
if module is None:
raise RuntimeError("module not specified")
gateNameSuffix = 0
if gateName is None:
names = module.getGateNames(gateType)
inoutNames = module.getGateNames(omnetpp.cGate.INOUT)
numNames = len(names) + len(inoutNames)
if numNames == 0:
raise RuntimeError("has no gate of the appropriate type")
elif numNames > 1:
raise RuntimeError("ambiguity: has more than one gate of the appropriate type")
if names:
gateName = names[0]
else:
gateName = inoutNames[0]
gateNameSuffix = "i" if gateType == omnetpp.cGate.INPUT else "o"
if type(gateName) == cppyy.gbl.std.string:
gateName = str(gateName)
if gateIndex is not None and not module.isGateVector(gateName):
raise RuntimeError("gate index specified for scalar gate")
if gateIndex is None and module.isGateVector(gateName):
gateIndex = module.getOrCreateFirstUnconnectedGatePairIndex(gateName, inside, expand) if gateNameSuffix \
else module.getOrCreateFirstUnconnectedGateIndex(gateName, gateNameSuffix, inside, expand)
gateNameWithSuffix = gateName + "$" + gateNameSuffix if gateNameSuffix else gateName
return module.gate(gateNameWithSuffix, gateIndex)
def createChannel(channelType=None, name=None, displayString=None, **channelParams):
"""
Utility for connectGates().
"""
if name is None:
name = "channel"
if type(channelType) == str:
channelType = omnetpp.cChannelType.get(channelType)
if channelType is not None:
channel = channelType.create(name)
elif ("datarate" in channelParams) or ("ber" in channelParams) or ("per" in channelParams):
channel = omnetpp.cDatarateChannel.create(name)
elif "delay" in channelParams:
channel = omnetpp.cDelayChannel.create(name)
else:
channel = omnetpp.cIdealChannel.create(name)
if displayString is not None:
channel.setDisplayString(displayString)
for name, value in channelParams.items():
param = channel.par(name)
if type(value) == str and param.getType() != omnetpp.cPar.STRING:
param.parse(value)
elif param.getType() == omnetpp.cPar.BOOL:
param.setBoolValue(value)
elif param.getType() == omnetpp.cPar.INT:
param.setIntValue(value)
elif param.getType() == omnetpp.cPar.DOUBLE:
param.setDoubleValue(value)
elif param.getType() == omnetpp.cPar.STRING:
param.setStringValue(value)
elif param.getType() == omnetpp.cPar.XML:
param.setXmlValue(value)
elif param.getType() == omnetpp.cPar.OBJECT:
param.setObjectValue(value)
else:
assert False
return channel
|
[
"andras@omnetpp.org"
] |
andras@omnetpp.org
|
757de3e1477d9ee15b1e69143bad027615a786d3
|
25b430d7c67aa351b032ad962c73b836dfd19fe5
|
/prac_05/emails.py
|
db53967fe453f38151782767b6a57082cd66e0b4
|
[] |
no_license
|
brightlee93/cp1404practicals
|
0bc2283886b2bf956e8a0e8b254da25ad6aad771
|
d7a83237a9b6364d55bf01e9a6b4e81b5c19ada3
|
refs/heads/master
| 2023-01-05T14:46:03.808118
| 2020-11-01T08:28:47
| 2020-11-01T08:28:47
| 287,468,002
| 0
| 0
| null | 2020-09-24T08:30:50
| 2020-08-14T07:09:23
|
Python
|
UTF-8
|
Python
| false
| false
| 823
|
py
|
email_to_name = {}
email = input("Email: ")
while email != "":
name, domain = email.split("@")
if "." in name:
first_name, last_name = name.split(".")
name = first_name + " " + last_name
name_check = input("Is your name {0}? (Y/n) ".format(name.title())).lower()
while name_check == "y" or name_check == "yes" or name_check == "":
email_to_name[name.title()] = email
name_check = 0
email = input("Email: ")
while name_check == "n" or name_check == "no":
name = input("Name: ")
email_to_name[name.title()] = email
name_check = 0
email = input("Email: ")
for name, email in email_to_name.items():
print("{0} ({1})".format(name, email))
# for name in email_to_name:
# print("{0} ({1})".format(name, email_to_name[name]))
|
[
"brightlee93@gmail.com"
] |
brightlee93@gmail.com
|
3a8bf29d67ceedcabc0e6c47775fd97a8aa47577
|
c46eeb35b31cb248e30bccf923602a95d6accf67
|
/visualization.py
|
fc214579057cff63212e8ec479f421fe6951e1e5
|
[] |
no_license
|
chunyuanY/MusicGenre
|
d209c725b04712ae89d11253a92d1f740d474b03
|
592a1fe48579b4bc51368626910385a07607db37
|
refs/heads/master
| 2022-02-13T09:13:16.455123
| 2022-01-24T03:08:03
| 2022-01-24T03:08:03
| 220,592,257
| 2
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,989
|
py
|
import pickle
import matplotlib.pyplot as plt
import numpy as np
import seaborn as sns
from sklearn.preprocessing import Normalizer
path = "./data/douban_music/"
composer_dic = pickle.load(open(path + "composer_dic.pkl", mode='rb'))
audience_dic = pickle.load(open(path + "audience_dic.pkl", mode='rb'))
X_composer = [
'Michael Jackson',
'Avril Lavigne',
'Lady Gaga',
'Justin Bieber',
'Leona Lewis',
'Lana Del Rey',
'Whitney Houston',
'Taylor Swift',
'Tori Amos',
'Mary J Blige',
'Tom Waits',
'Sam Smith',
'Marilyn Manson',
'Mariah Carey',
'YUI',
'Neil Young',
'Keith Urban',
'Justin Timberlake',
'John Mayer',
'Jason Mraz'
]
X_cid = [composer_dic[c] for c in X_composer]
composer_embedding = pickle.load(open(path + "composer_embedding.pkl", mode='rb'))
style_embedding = pickle.load(open(path + "style_embedding.pkl", mode='rb'))
X_c_ = composer_embedding[X_cid]
X_style = style_embedding
def softmax(x):
"""Compute softmax values for each sets of scores in x."""
e_x = np.exp(x - np.max(x)) #
return e_x / e_x.sum(axis=1, keepdims=1)
X_c_s = softmax(X_c_.dot(X_style.T))
# np.random.seed(0)
label_dict = pickle.load(open(path + "label_dic.pkl", 'rb'))
label_name = [0] * len(label_dict)
for k, v in label_dict.items():
label_name[v] = k
# label_dict = [item for item in label_name]
# data = pickle.load(open("label_embedding.pkl", 'rb'))
# data = data.cpu().detach().numpy().T
#
# scaler = Normalizer(norm='l2')
# scaler.fit(data)
# data = scaler.transform(data)
# XX= np.dot(data,data.T)
print(X_c_s.shape)
sns.set()
ax = sns.heatmap(X_c_s, square=True, cmap='Blues', vmin=0, vmax=0.4)
#设置坐标字体方向
ax.set_xticklabels( label_name)
ax.set_yticklabels(X_composer)
label_y = ax.get_yticklabels()
plt.setp(label_y, rotation=360, horizontalalignment='right')
label_x = ax.get_xticklabels()
plt.setp(label_x, rotation=90, horizontalalignment='center')
plt.show()
|
[
"chunyuanY93@gmail.com"
] |
chunyuanY93@gmail.com
|
ef33156702d8c126d2a1d8362deb1499e2c197fa
|
de17634e6b149d5828c1c78f7f5f5e1f6c17c4d0
|
/nnvm/tvm/python/tvm/stmt.py
|
1f5fea11a4720e53ffb88878adc1a3087e753d40
|
[
"Apache-2.0",
"BSD-2-Clause",
"LicenseRef-scancode-unknown-license-reference",
"MIT"
] |
permissive
|
starimpact/mxnet_v1.0.0
|
e135cc9e4c2711314d03cf1281a72b755f53144e
|
fcd6f7398ef811c3f8b01e7c9c16fb25c8d202bd
|
refs/heads/bv1.0.0
| 2022-11-10T09:09:11.966942
| 2018-07-13T04:59:30
| 2018-07-13T04:59:30
| 120,399,107
| 8
| 4
|
Apache-2.0
| 2022-11-02T20:24:32
| 2018-02-06T03:54:35
|
C++
|
UTF-8
|
Python
| false
| false
| 1,302
|
py
|
"""Statement AST Node in TVM.
User do not need to deal with AST node directly.
But they can be helpful for developer to do quick proptyping.
While not displayed in the document and python file.
Each statement node have subfields that can be visited from python side.
.. code-block:: python
x = tvm.var("n")
a = tvm.var("array", tvm.handle)
st = tvm.make.Store(a, x + 1, 1)
assert isinstance(st, tvm.stmt.Store)
assert(st.buffer_var == a)
"""
from __future__ import absolute_import as _abs
from ._ffi.node import NodeBase, register_node
class Stmt(NodeBase):
pass
@register_node
class LetStmt(Stmt):
pass
@register_node
class AssertStmt(Stmt):
pass
@register_node
class ProducerConsumer(Stmt):
pass
@register_node
class For(Stmt):
Serial = 0
Parallel = 1
Vectorized = 2
Unrolled = 3
@register_node
class Store(Stmt):
pass
@register_node
class Provide(Stmt):
pass
@register_node
class Allocate(Stmt):
pass
@register_node
class AttrStmt(Stmt):
pass
@register_node
class Free(Stmt):
pass
@register_node
class Realize(Stmt):
pass
@register_node
class Block(Stmt):
pass
@register_node
class IfThenElse(Stmt):
pass
@register_node
class Evaluate(Stmt):
pass
@register_node
class Prefetch(Stmt):
pass
|
[
"mingzhang@deepglint.com"
] |
mingzhang@deepglint.com
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.