blob_id stringlengths 40 40 | directory_id stringlengths 40 40 | path stringlengths 3 616 | content_id stringlengths 40 40 | detected_licenses listlengths 0 112 | license_type stringclasses 2 values | repo_name stringlengths 5 115 | snapshot_id stringlengths 40 40 | revision_id stringlengths 40 40 | branch_name stringclasses 777 values | visit_date timestamp[us]date 2015-08-06 10:31:46 2023-09-06 10:44:38 | revision_date timestamp[us]date 1970-01-01 02:38:32 2037-05-03 13:00:00 | committer_date timestamp[us]date 1970-01-01 02:38:32 2023-09-06 01:08:06 | github_id int64 4.92k 681M ⌀ | star_events_count int64 0 209k | fork_events_count int64 0 110k | gha_license_id stringclasses 22 values | gha_event_created_at timestamp[us]date 2012-06-04 01:52:49 2023-09-14 21:59:50 ⌀ | gha_created_at timestamp[us]date 2008-05-22 07:58:19 2023-08-21 12:35:19 ⌀ | gha_language stringclasses 149 values | src_encoding stringclasses 26 values | language stringclasses 1 value | is_vendor bool 2 classes | is_generated bool 2 classes | length_bytes int64 3 10.2M | extension stringclasses 188 values | content stringlengths 3 10.2M | authors listlengths 1 1 | author_id stringlengths 1 132 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
eb2a00823d54942b073c34b8e71e91ea83f0d77b | a5d82d6da2bc6a950c43cd70ba35cb6e22174a07 | /DjangoRV/urls.py | 99342009c3ad39187aed19f27142ce80ad757fcb | [] | no_license | cryptopotluck/Django-RV | 3f1688d236141bb85e449cd4e0d98280418b584b | d70b3a1b6227aa89075e7607645199ddaf923c09 | refs/heads/master | 2020-04-29T06:49:01.159122 | 2019-03-16T05:34:38 | 2019-03-16T05:34:39 | 175,930,706 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 928 | py | """DjangoRV URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/2.1/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path, include
from django.conf import settings
from django.conf.urls.static import static
urlpatterns = [
path('admin/', admin.site.urls),
path('', include('staticpages.urls')),
path('booking/', include('booking.urls')),
]
| [
"ajkiesewetter@gmail.com"
] | ajkiesewetter@gmail.com |
d0ec0186ae9afc856f78b53b6439831f4865b158 | 4ea43f3f79ad483d83238d88572feb822f451372 | /philo/migrations/0004_auto__del_field_attribute_json_value.py | 2cfc222a490142fcde0e59222b460dd47760eab8 | [
"ISC"
] | permissive | kgodey/philo | c8c433d44b2f31121f13bd0ee101605be11fe9da | c19bf577d44606d2b284e6058d633f4a174b61cc | refs/heads/master | 2020-12-29T02:54:11.746966 | 2011-05-24T21:57:47 | 2011-05-24T21:57:47 | 686,009 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,789 | py | # encoding: utf-8
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Deleting field 'Attribute.json_value'
db.delete_column('philo_attribute', 'json_value')
def backwards(self, orm):
# Adding field 'Attribute.json_value'
db.add_column('philo_attribute', 'json_value', self.gf('django.db.models.fields.TextField')(default=''), keep_default=False)
models = {
'contenttypes.contenttype': {
'Meta': {'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'philo.attribute': {
'Meta': {'unique_together': "(('key', 'entity_content_type', 'entity_object_id'),)", 'object_name': 'Attribute'},
'entity_content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'entity_object_id': ('django.db.models.fields.PositiveIntegerField', [], {}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'value': ('philo.models.fields.JSONField', [], {})
},
'philo.collection': {
'Meta': {'object_name': 'Collection'},
'description': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'})
},
'philo.collectionmember': {
'Meta': {'object_name': 'CollectionMember'},
'collection': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'members'", 'to': "orm['philo.Collection']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'index': ('django.db.models.fields.PositiveIntegerField', [], {'null': 'True', 'blank': 'True'}),
'member_content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'member_object_id': ('django.db.models.fields.PositiveIntegerField', [], {})
},
'philo.contentlet': {
'Meta': {'object_name': 'Contentlet'},
'content': ('philo.models.fields.TemplateField', [], {}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'page': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'contentlets'", 'to': "orm['philo.Page']"})
},
'philo.contentreference': {
'Meta': {'object_name': 'ContentReference'},
'content_id': ('django.db.models.fields.PositiveIntegerField', [], {'null': 'True', 'blank': 'True'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'page': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'contentreferences'", 'to': "orm['philo.Page']"})
},
'philo.file': {
'Meta': {'object_name': 'File'},
'file': ('django.db.models.fields.files.FileField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'mimetype': ('django.db.models.fields.CharField', [], {'max_length': '255'})
},
'philo.node': {
'Meta': {'object_name': 'Node'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'parent': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'children'", 'null': 'True', 'to': "orm['philo.Node']"}),
'slug': ('django.db.models.fields.SlugField', [], {'max_length': '255', 'db_index': 'True'}),
'view_content_type': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'node_view_set'", 'to': "orm['contenttypes.ContentType']"}),
'view_object_id': ('django.db.models.fields.PositiveIntegerField', [], {})
},
'philo.page': {
'Meta': {'object_name': 'Page'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'template': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'pages'", 'to': "orm['philo.Template']"}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '255'})
},
'philo.redirect': {
'Meta': {'object_name': 'Redirect'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'status_code': ('django.db.models.fields.IntegerField', [], {'default': '302'}),
'target': ('django.db.models.fields.CharField', [], {'max_length': '200'})
},
'philo.relationship': {
'Meta': {'unique_together': "(('key', 'entity_content_type', 'entity_object_id'),)", 'object_name': 'Relationship'},
'entity_content_type': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'relationship_entity_set'", 'to': "orm['contenttypes.ContentType']"}),
'entity_object_id': ('django.db.models.fields.PositiveIntegerField', [], {}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'value_content_type': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'relationship_value_set'", 'null': 'True', 'to': "orm['contenttypes.ContentType']"}),
'value_object_id': ('django.db.models.fields.PositiveIntegerField', [], {'null': 'True', 'blank': 'True'})
},
'philo.tag': {
'Meta': {'object_name': 'Tag'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'slug': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '255', 'db_index': 'True'})
},
'philo.template': {
'Meta': {'object_name': 'Template'},
'code': ('philo.models.fields.TemplateField', [], {}),
'documentation': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'mimetype': ('django.db.models.fields.CharField', [], {'default': "'text/html'", 'max_length': '255'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'parent': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'children'", 'null': 'True', 'to': "orm['philo.Template']"}),
'slug': ('django.db.models.fields.SlugField', [], {'max_length': '255', 'db_index': 'True'})
}
}
complete_apps = ['philo']
| [
"stephen.r.burrows@gmail.com"
] | stephen.r.burrows@gmail.com |
e57a2eac0d5aae324db2f1f3da2271d06bba059e | 44f216cc3bb4771c8186349013ff0ed1abc98ea6 | /torchgen/shape_functions/gen_jit_shape_functions.py | d25539d3fa2cd35023e6989e6bb93576a1c69ef5 | [
"BSD-2-Clause",
"BSD-3-Clause",
"LicenseRef-scancode-generic-cla",
"BSL-1.0",
"Apache-2.0"
] | permissive | eiphy/pytorch | a8fc21a3c0552b392ed8c3a1d69f7ed8660c56ac | 104f0bf09ec7609d1c5626a7d7953ade4f8c9007 | refs/heads/master | 2022-05-23T02:10:13.158924 | 2022-05-07T21:26:00 | 2022-05-07T21:26:00 | 244,914,898 | 2 | 0 | NOASSERTION | 2020-03-04T14:00:53 | 2020-03-04T14:00:53 | null | UTF-8 | Python | false | false | 3,366 | py | #!/usr/bin/env python3
import os
from pathlib import Path
from torch.jit._shape_functions import shape_compute_graph_mapping
SHAPE_HEADER = r"""
/**
* @generated
* This is an auto-generated file. Please do not modify it by hand.
* To re-generate, please run:
* cd ~/pytorch && python
* torchgen/shape_functions/gen_jit_shape_functions.py
*/
#include <torch/csrc/jit/jit_log.h>
#include <torch/csrc/jit/passes/inliner.h>
#include <torch/csrc/jit/runtime/serialized_shape_function_registry.h>
#include <torch/csrc/jit/runtime/operator.h>
// clang-format off
namespace torch {
namespace jit {
std::string shape_funcs = ""
"""
DECOMP_CENTER = r"""
const std::string& GetSerializedShapeFunctions() {
return shape_funcs;
}
const OperatorMap<std::string>& GetShapeFunctionMappings() {
static const OperatorMap<std::string> shape_mappings {
"""
DECOMP_END = r"""
};
return shape_mappings;
}
// clang-format on
} // namespace jit
} // namespace torch
"""
SERIALIZED_SHAPE_UTIL_FILE_NAME = "serialized_shape_function_registry.cpp"
def gen_serialized_decompisitions() -> str:
already_serialized_names = set()
unique_funcs = []
for scripted_func in shape_compute_graph_mapping.values():
if scripted_func.name in already_serialized_names:
continue
already_serialized_names.add(scripted_func.name)
unique_funcs.append(scripted_func)
output_strs = []
curr_str = ""
for scripted_func in unique_funcs:
serialized_code = scripted_func.code
# technically its higher but give a buffer bc there are weird rules
# around some characters
# TODO: this was the limit I found by googling but it seems way
# too short ?
MAX_MSFT_STR_LEN = 2000
if len(curr_str) + len(serialized_code) <= MAX_MSFT_STR_LEN:
curr_str += "\n" + serialized_code
else:
output_strs.append(curr_str)
curr_str = scripted_func.code
output_strs.append(curr_str)
final_output = ""
# Windows compiler doesnt correctly handle adjacent
# string literals
for output_str in output_strs:
start = '+ std::string(R"=====('
end = '\n)=====")\n'
final_output += start + output_str + end
final_output += ";"
return final_output
def gen_shape_mappings() -> str:
shape_mappings = []
for schema, scripted_func in shape_compute_graph_mapping.items():
shape_mappings.append(' {"' + schema + '", "' + scripted_func.name + '"},')
return "\n".join(shape_mappings)
def write_decomposition_util_file(path: str) -> None:
decomposition_str = gen_serialized_decompisitions()
shape_mappings = gen_shape_mappings()
file_components = [
SHAPE_HEADER,
decomposition_str,
DECOMP_CENTER,
shape_mappings,
DECOMP_END,
]
print("writing file to : ", path + "/" + SERIALIZED_SHAPE_UTIL_FILE_NAME)
with open(os.path.join(path, SERIALIZED_SHAPE_UTIL_FILE_NAME), "wb") as out_file:
final_output = "".join(file_components)
out_file.write(final_output.encode("utf-8"))
def main() -> None:
pytorch_dir = Path(__file__).resolve().parents[2]
upgrader_path = pytorch_dir / "torch" / "csrc" / "jit" / "runtime"
write_decomposition_util_file(str(upgrader_path))
if __name__ == "__main__":
main()
| [
"pytorchmergebot@users.noreply.github.com"
] | pytorchmergebot@users.noreply.github.com |
f16e57b0781c6ce1aaf648186b412f2f16a75ec9 | 92963d596f263b04d244fe87d1cad149961c7e39 | /caffe2_tutorial/Basics/test_caffe2.py | fadd2f155da66095dbc4383b8de459a6739f61a8 | [] | no_license | zchen0211/ml_system | 1d993c6f481d269013c4193bbe6de2d178f0b3fb | 7a74656eb8fab559890513ee318cf726654ff44a | refs/heads/master | 2021-01-20T05:19:10.393493 | 2019-03-08T22:45:53 | 2019-03-08T22:45:53 | 101,426,846 | 0 | 2 | null | null | null | null | UTF-8 | Python | false | false | 1,533 | py | from caffe2.python import workspace, model_helper
import numpy as np
import glog as log
# Create random tensor of three dimensions
x = np.random.rand(4, 3, 2)
print(x)
print(x.shape)
workspace.FeedBlob("my_x", x)
x2 = workspace.FetchBlob("my_x")
print(x2)
### Nets and Operators
# Create the input data
data = np.random.rand(16, 100).astype(np.float32)
# Create labels for the data as integers [0, 9].
label = (np.random.rand(16) * 10).astype(np.int32)
workspace.FeedBlob("data", data)
workspace.FeedBlob("label", label)
# Create model using a model helper
m = model_helper.ModelHelper(name="my first net")
weight = m.param_init_net.XavierFill([], 'fc_w', shape=[10, 100])
bias = m.param_init_net.ConstantFill([], 'fc_b', shape=[10, ])
fc_1 = m.net.FC(["data", "fc_w", "fc_b"], "fc1")
pred = m.net.Sigmoid(fc_1, "pred")
[softmax, loss] = m.net.SoftmaxWithLoss([pred, "label"], ["softmax", "loss"])
print(str(m.net.Proto()))
### Executing
# 1. initialization
m.AddGradientOperators([loss])
workspace.RunNetOnce(m.param_init_net)
# 2. create the actual training
workspace.CreateNet(m.net)
# 3. Run it
# Run 100 x 10 iterations
for j in range(0, 100):
data = np.random.rand(16, 100).astype(np.float32)
label = (np.random.rand(16) * 10).astype(np.int32)
workspace.FeedBlob("data", data)
workspace.FeedBlob("label", label)
workspace.RunNet(m.name, 10) # run for 10 times
# print(workspace.FetchBlob("softmax"))
log.info('The loss of forward running: %f' % workspace.FetchBlob("loss"))
print(str(m.net.Proto()))
| [
"chenzhuoyuan07@gmail.com"
] | chenzhuoyuan07@gmail.com |
2f790ce03892c3adfa51150861c24ae3011c33be | 41de4210af23a8a8a3ca7dd090bb51faecf4a0c8 | /lib/python3.5/site-packages/statsmodels/stats/tests/test_diagnostic.py | 66be6e0764adde6802f8edd1f3d81d3962122e7a | [
"Python-2.0"
] | permissive | randybrown-github/ziplineMacOS | 42a0c2bfca2a54baa03d2803dc41317647811285 | eb5872c0903d653e19f259f0800fb7aecee0ee5c | refs/heads/master | 2022-11-07T15:51:39.808092 | 2020-06-18T20:06:42 | 2020-06-18T20:06:42 | 272,631,387 | 0 | 1 | null | 2022-11-02T03:21:45 | 2020-06-16T06:48:53 | Python | UTF-8 | Python | false | false | 44,611 | py | # -*- coding: utf-8 -*-
"""Tests for Regression Diagnostics and Specification Tests
Created on Thu Feb 09 13:19:47 2012
Author: Josef Perktold
License: BSD-3
currently all tests are against R
"""
#import warnings
#warnings.simplefilter("default")
# ResourceWarning doesn't exist in python 2
#warnings.simplefilter("ignore", ResourceWarning)
import os
import numpy as np
import pandas as pd
# skipping some parts
from distutils.version import LooseVersion
PD_GE_17 = LooseVersion(pd.__version__) >= '0.17'
from numpy.testing import (assert_, assert_almost_equal, assert_equal,
assert_approx_equal, assert_allclose,
assert_array_equal)
import pytest
from statsmodels.regression.linear_model import OLS, GLSAR
from statsmodels.tools.tools import add_constant
from statsmodels.datasets import macrodata
import statsmodels.stats.sandwich_covariance as sw
import statsmodels.stats.diagnostic as smsdia
import json
#import statsmodels.sandbox.stats.diagnostic as smsdia
import statsmodels.stats.outliers_influence as oi
cur_dir = os.path.abspath(os.path.dirname(__file__))
def compare_t_est(sp, sp_dict, decimal=(14, 14)):
assert_almost_equal(sp[0], sp_dict['statistic'], decimal=decimal[0])
assert_almost_equal(sp[1], sp_dict['pvalue'], decimal=decimal[1])
def notyet_atst():
d = macrodata.load().data
realinv = d['realinv']
realgdp = d['realgdp']
realint = d['realint']
endog = realinv
exog = add_constant(np.c_[realgdp, realint])
res_ols1 = OLS(endog, exog).fit()
#growth rates
gs_l_realinv = 400 * np.diff(np.log(d['realinv']))
gs_l_realgdp = 400 * np.diff(np.log(d['realgdp']))
lint = d['realint'][:-1]
tbilrate = d['tbilrate'][:-1]
endogg = gs_l_realinv
exogg = add_constant(np.c_[gs_l_realgdp, lint])
exogg2 = add_constant(np.c_[gs_l_realgdp, tbilrate])
res_ols = OLS(endogg, exogg).fit()
res_ols2 = OLS(endogg, exogg2).fit()
#the following were done accidentally with res_ols1 in R,
#with original Greene data
params = np.array([-272.3986041341653, 0.1779455206941112,
0.2149432424658157])
cov_hac_4 = np.array([1321.569466333051, -0.2318836566017612,
37.01280466875694, -0.2318836566017614, 4.602339488102263e-05,
-0.0104687835998635, 37.012804668757, -0.0104687835998635,
21.16037144168061]).reshape(3,3, order='F')
cov_hac_10 = np.array([2027.356101193361, -0.3507514463299015,
54.81079621448568, -0.350751446329901, 6.953380432635583e-05,
-0.01268990195095196, 54.81079621448564, -0.01268990195095195,
22.92512402151113]).reshape(3,3, order='F')
#goldfeld-quandt
het_gq_greater = dict(statistic=13.20512768685082, df1=99, df2=98,
pvalue=1.246141976112324e-30, distr='f')
het_gq_less = dict(statistic=13.20512768685082, df1=99, df2=98, pvalue=1.)
het_gq_2sided = dict(statistic=13.20512768685082, df1=99, df2=98,
pvalue=1.246141976112324e-30, distr='f')
#goldfeld-quandt, fraction = 0.5
het_gq_greater_2 = dict(statistic=87.1328934692124, df1=48, df2=47,
pvalue=2.154956842194898e-33, distr='f')
gq = smsdia.het_goldfeldquandt(endog, exog, split=0.5)
compare_t_est(gq, het_gq_greater, decimal=(13, 14))
assert_equal(gq[-1], 'increasing')
harvey_collier = dict(stat=2.28042114041313, df=199,
pvalue=0.02364236161988260, distr='t')
#hc = harvtest(fm, order.by=ggdp , data = list())
harvey_collier_2 = dict(stat=0.7516918462158783, df=199,
pvalue=0.4531244858006127, distr='t')
##################################
class TestDiagnosticG(object):
@classmethod
def setup_class(cls):
d = macrodata.load().data
#growth rates
gs_l_realinv = 400 * np.diff(np.log(d['realinv']))
gs_l_realgdp = 400 * np.diff(np.log(d['realgdp']))
lint = d['realint'][:-1]
tbilrate = d['tbilrate'][:-1]
endogg = gs_l_realinv
exogg = add_constant(np.c_[gs_l_realgdp, lint])
exogg2 = add_constant(np.c_[gs_l_realgdp, tbilrate])
exogg3 = add_constant(np.c_[gs_l_realgdp])
res_ols = OLS(endogg, exogg).fit()
res_ols2 = OLS(endogg, exogg2).fit()
res_ols3 = OLS(endogg, exogg3).fit()
cls.res = res_ols
cls.res2 = res_ols2
cls.res3 = res_ols3
cls.endog = cls.res.model.endog
cls.exog = cls.res.model.exog
def test_basic(self):
#mainly to check I got the right regression
#> mkarray(fm$coefficients, "params")
params = np.array([-9.48167277465485, 4.3742216647032,
-0.613996969478989])
assert_almost_equal(self.res.params, params, decimal=12)
def test_hac(self):
res = self.res
#> nw = NeweyWest(fm, lag = 4, prewhite = FALSE, verbose=TRUE)
#> nw2 = NeweyWest(fm, lag=10, prewhite = FALSE, verbose=TRUE)
#> mkarray(nw, "cov_hac_4")
cov_hac_4 = np.array([1.385551290884014, -0.3133096102522685,
-0.0597207976835705, -0.3133096102522685, 0.1081011690351306,
0.000389440793564336, -0.0597207976835705, 0.000389440793564339,
0.0862118527405036]).reshape(3,3, order='F')
#> mkarray(nw2, "cov_hac_10")
cov_hac_10 = np.array([1.257386180080192, -0.2871560199899846,
-0.03958300024627573, -0.2871560199899845, 0.1049107028987101,
0.0003896205316866944, -0.03958300024627578, 0.0003896205316866961,
0.0985539340694839]).reshape(3,3, order='F')
cov = sw.cov_hac_simple(res, nlags=4, use_correction=False)
bse_hac = sw.se_cov(cov)
assert_almost_equal(cov, cov_hac_4, decimal=14)
assert_almost_equal(bse_hac, np.sqrt(np.diag(cov)), decimal=14)
cov = sw.cov_hac_simple(res, nlags=10, use_correction=False)
bse_hac = sw.se_cov(cov)
assert_almost_equal(cov, cov_hac_10, decimal=14)
assert_almost_equal(bse_hac, np.sqrt(np.diag(cov)), decimal=14)
def test_het_goldfeldquandt(self):
#TODO: test options missing
#> gq = gqtest(fm, alternative='greater')
#> mkhtest_f(gq, 'het_gq_greater', 'f')
het_gq_greater = dict(statistic=0.5313259064778423,
pvalue=0.9990217851193723,
parameters=(98, 98), distr='f')
#> gq = gqtest(fm, alternative='less')
#> mkhtest_f(gq, 'het_gq_less', 'f')
het_gq_less = dict(statistic=0.5313259064778423,
pvalue=0.000978214880627621,
parameters=(98, 98), distr='f')
#> gq = gqtest(fm, alternative='two.sided')
#> mkhtest_f(gq, 'het_gq_two_sided', 'f')
het_gq_two_sided = dict(statistic=0.5313259064778423,
pvalue=0.001956429761255241,
parameters=(98, 98), distr='f')
#> gq = gqtest(fm, fraction=0.1, alternative='two.sided')
#> mkhtest_f(gq, 'het_gq_two_sided_01', 'f')
het_gq_two_sided_01 = dict(statistic=0.5006976835928314,
pvalue=0.001387126702579789,
parameters=(88, 87), distr='f')
#> gq = gqtest(fm, fraction=0.5, alternative='two.sided')
#> mkhtest_f(gq, 'het_gq_two_sided_05', 'f')
het_gq_two_sided_05 = dict(statistic=0.434815645134117,
pvalue=0.004799321242905568,
parameters=(48, 47), distr='f')
endogg, exogg = self.endog, self.exog
#tests
gq = smsdia.het_goldfeldquandt(endogg, exogg, split=0.5)
compare_t_est(gq, het_gq_greater, decimal=(14, 14))
assert_equal(gq[-1], 'increasing')
gq = smsdia.het_goldfeldquandt(endogg, exogg, split=0.5,
alternative='decreasing')
compare_t_est(gq, het_gq_less, decimal=(14, 14))
assert_equal(gq[-1], 'decreasing')
gq = smsdia.het_goldfeldquandt(endogg, exogg, split=0.5,
alternative='two-sided')
compare_t_est(gq, het_gq_two_sided, decimal=(14, 14))
assert_equal(gq[-1], 'two-sided')
#TODO: forcing the same split as R 202-90-90-1=21
gq = smsdia.het_goldfeldquandt(endogg, exogg, split=90, drop=21,
alternative='two-sided')
compare_t_est(gq, het_gq_two_sided_01, decimal=(14, 14))
assert_equal(gq[-1], 'two-sided')
#TODO other options ???
def test_het_breusch_pagan(self):
res = self.res
bptest = dict(statistic=0.709924388395087, pvalue=0.701199952134347,
parameters=(2,), distr='f')
bp = smsdia.het_breuschpagan(res.resid, res.model.exog)
compare_t_est(bp, bptest, decimal=(12, 12))
def test_het_white(self):
res = self.res
#TODO: regressiontest, compare with Greene or Gretl or Stata
hw = smsdia.het_white(res.resid, res.model.exog)
hw_values = (33.503722896538441, 2.9887960597830259e-06,
7.7945101228430946, 1.0354575277704231e-06)
assert_almost_equal(hw, hw_values)
def test_het_arch(self):
#test het_arch and indirectly het_lm against R
#> library(FinTS)
#> at = ArchTest(residuals(fm), lags=4)
#> mkhtest(at, 'archtest_4', 'chi2')
archtest_4 = dict(statistic=3.43473400836259,
pvalue=0.487871315392619, parameters=(4,),
distr='chi2')
#> at = ArchTest(residuals(fm), lags=12)
#> mkhtest(at, 'archtest_12', 'chi2')
archtest_12 = dict(statistic=8.648320999014171,
pvalue=0.732638635007718, parameters=(12,),
distr='chi2')
at4 = smsdia.het_arch(self.res.resid, maxlag=4)
at12 = smsdia.het_arch(self.res.resid, maxlag=12)
compare_t_est(at4[:2], archtest_4, decimal=(12, 13))
compare_t_est(at12[:2], archtest_12, decimal=(12, 13))
def test_het_arch2(self):
#test autolag options, this also test het_lm
#unfortunately optimal lag=1 for this data
resid = self.res.resid
res1 = smsdia.het_arch(resid, maxlag=1, autolag=None, store=True)
rs1 = res1[-1]
res2 = smsdia.het_arch(resid, maxlag=5, autolag='aic', store=True)
rs2 = res2[-1]
assert_almost_equal(rs2.resols.params, rs1.resols.params, decimal=13)
assert_almost_equal(res2[:4], res1[:4], decimal=13)
#test that smallest lag, maxlag=1 works
res3 = smsdia.het_arch(resid, maxlag=1, autolag='aic')
assert_almost_equal(res3[:4], res1[:4], decimal=13)
def test_acorr_breusch_godfrey(self):
res = self.res
#bgf = bgtest(fm, order = 4, type="F")
breuschgodfrey_f = dict(statistic=1.179280833676792,
pvalue=0.321197487261203,
parameters=(4,195,), distr='f')
#> bgc = bgtest(fm, order = 4, type="Chisq")
#> mkhtest(bgc, "breuschpagan_c", "chi2")
breuschgodfrey_c = dict(statistic=4.771042651230007,
pvalue=0.3116067133066697,
parameters=(4,), distr='chi2')
bg = smsdia.acorr_breusch_godfrey(res, nlags=4)
bg_r = [breuschgodfrey_c['statistic'], breuschgodfrey_c['pvalue'],
breuschgodfrey_f['statistic'], breuschgodfrey_f['pvalue']]
assert_almost_equal(bg, bg_r, decimal=13)
# check that lag choice works
bg2 = smsdia.acorr_breusch_godfrey(res, nlags=None)
bg3 = smsdia.acorr_breusch_godfrey(res, nlags=14)
assert_almost_equal(bg2, bg3, decimal=13)
def test_acorr_ljung_box(self):
#unit-test which may be useful later
#ddof correction for fitted parameters in ARMA(p,q) fitdf=p+q
#> bt = Box.test(residuals(fm), lag=4, type = "Ljung-Box", fitdf=2)
#> mkhtest(bt, "ljung_box_4df2", "chi2")
# ljung_box_4df2 = dict(statistic=5.23587172795227,
# pvalue=0.0729532930400377,
# parameters=(2,), distr='chi2')
#> bt = Box.test(residuals(fm), lag=4, type = "Box-Pierce", fitdf=2)
#> mkhtest(bt, "ljung_box_bp_4df2", "chi2")
# ljung_box_bp_4df2 = dict(statistic=5.12462932741681,
# pvalue=0.0771260128929921,
# parameters=(2,), distr='chi2')
res = self.res
#general test
#> bt = Box.test(residuals(fm), lag=4, type = "Ljung-Box")
#> mkhtest(bt, "ljung_box_4", "chi2")
ljung_box_4 = dict(statistic=5.23587172795227, pvalue=0.263940335284713,
parameters=(4,), distr='chi2')
#> bt = Box.test(residuals(fm), lag=4, type = "Box-Pierce")
#> mkhtest(bt, "ljung_box_bp_4", "chi2")
ljung_box_bp_4 = dict(statistic=5.12462932741681,
pvalue=0.2747471266820692,
parameters=(4,), distr='chi2')
lb, lbpval, bp, bppval = smsdia.acorr_ljungbox(res.resid, 4,
boxpierce=True)
compare_t_est([lb[-1], lbpval[-1]], ljung_box_4, decimal=(13, 13))
compare_t_est([bp[-1], bppval[-1]], ljung_box_bp_4, decimal=(13, 13))
def test_acorr_ljung_box_big_default(self):
res = self.res
#test with big dataset and default lag
#> bt = Box.test(residuals(fm), type = "Ljung-Box")
#> mkhtest(bt, "ljung_box_none", "chi2")
ljung_box_none = dict(statistic=51.03724531797195, pvalue=0.11334744923390,
distr='chi2')
#> bt = Box.test(residuals(fm), type = "Box-Pierce")
#> mkhtest(bt, "ljung_box_bp_none", "chi2")
ljung_box_bp_none = dict(statistic=45.12238537034000,
pvalue=0.26638168491464,
distr='chi2')
lb, lbpval, bp, bppval = smsdia.acorr_ljungbox(res.resid, boxpierce=True)
compare_t_est([lb[-1], lbpval[-1]], ljung_box_none, decimal=(13, 13))
compare_t_est([bp[-1], bppval[-1]], ljung_box_bp_none, decimal=(13, 13))
def test_acorr_ljung_box_small_default(self):
res = self.res
#test with small dataset and default lag
#> bt = Box.test(residuals(fm), type = "Ljung-Box")
#> mkhtest(bt, "ljung_box_small", "chi2")
ljung_box_small = dict(statistic=9.61503968281915, pvalue=0.72507000996945,
parameters=(0,), distr='chi2')
#> bt = Box.test(residuals(fm), type = "Box-Pierce")
#> mkhtest(bt, "ljung_box_bp_small", "chi2")
ljung_box_bp_small = dict(statistic=7.41692150864936,
pvalue=0.87940785887006,
parameters=(0,), distr='chi2')
lb, lbpval, bp, bppval = smsdia.acorr_ljungbox(res.resid[:30], boxpierce=True)
compare_t_est([lb[-1], lbpval[-1]], ljung_box_small, decimal=(13, 13))
compare_t_est([bp[-1], bppval[-1]], ljung_box_bp_small, decimal=(13, 13))
def test_harvey_collier(self):
#> hc = harvtest(fm, order.by = NULL, data = list())
#> mkhtest_f(hc, 'harvey_collier', 't')
harvey_collier = dict(statistic=0.494432160939874,
pvalue=0.6215491310408242,
parameters=(198), distr='t')
#> hc2 = harvtest(fm, order.by=ggdp , data = list())
#> mkhtest_f(hc2, 'harvey_collier_2', 't')
harvey_collier_2 = dict(statistic=1.42104628340473,
pvalue=0.1568762892441689,
parameters=(198), distr='t')
hc = smsdia.linear_harvey_collier(self.res)
compare_t_est(hc, harvey_collier, decimal=(12, 12))
def test_rainbow(self):
#rainbow test
#> rt = raintest(fm)
#> mkhtest_f(rt, 'raintest', 'f')
raintest = dict(statistic=0.6809600116739604, pvalue=0.971832843583418,
parameters=(101, 98), distr='f')
#> rt = raintest(fm, center=0.4)
#> mkhtest_f(rt, 'raintest_center_04', 'f')
raintest_center_04 = dict(statistic=0.682635074191527,
pvalue=0.971040230422121,
parameters=(101, 98), distr='f')
#> rt = raintest(fm, fraction=0.4)
#> mkhtest_f(rt, 'raintest_fraction_04', 'f')
raintest_fraction_04 = dict(statistic=0.565551237772662,
pvalue=0.997592305968473,
parameters=(122, 77), distr='f')
#> rt = raintest(fm, order.by=ggdp)
#Warning message:
#In if (order.by == "mahalanobis") { :
# the condition has length > 1 and only the first element will be used
#> mkhtest_f(rt, 'raintest_order_gdp', 'f')
raintest_order_gdp = dict(statistic=1.749346160513353,
pvalue=0.002896131042494884,
parameters=(101, 98), distr='f')
rb = smsdia.linear_rainbow(self.res)
compare_t_est(rb, raintest, decimal=(13, 14))
rb = smsdia.linear_rainbow(self.res, frac=0.4)
compare_t_est(rb, raintest_fraction_04, decimal=(13, 14))
def test_compare_lr(self):
res = self.res
res3 = self.res3 #nested within res
#lrtest
#lrt = lrtest(fm, fm2)
#Model 1: ginv ~ ggdp + lint
#Model 2: ginv ~ ggdp
lrtest = dict(loglike1=-763.9752181602237, loglike2=-766.3091902020184,
chi2value=4.66794408358942, pvalue=0.03073069384028677,
df=(4,3,1))
lrt = res.compare_lr_test(res3)
assert_almost_equal(lrt[0], lrtest['chi2value'], decimal=11)
assert_almost_equal(lrt[1], lrtest['pvalue'], decimal=11)
waldtest = dict(fvalue=4.65216373312492, pvalue=0.03221346195239025,
df=(199,200,1))
wt = res.compare_f_test(res3)
assert_almost_equal(wt[0], waldtest['fvalue'], decimal=11)
assert_almost_equal(wt[1], waldtest['pvalue'], decimal=11)
def test_compare_nonnested(self):
res = self.res
res2 = self.res2
#jt = jtest(fm, lm(ginv ~ ggdp + tbilrate))
#Estimate Std. Error t value Pr(>|t|)
jtest = [('M1 + fitted(M2)', 1.591505670785873, 0.7384552861695823,
2.155182176352370, 0.032354572525314450, '*'),
('M2 + fitted(M1)', 1.305687653016899, 0.4808385176653064,
2.715438978051544, 0.007203854534057954, '**')]
jt1 = smsdia.compare_j(res2, res)
assert_almost_equal(jt1, jtest[0][3:5], decimal=13)
jt2 = smsdia.compare_j(res, res2)
assert_almost_equal(jt2, jtest[1][3:5], decimal=14)
#Estimate Std. Error z value Pr(>|z|)
coxtest = [('fitted(M1) ~ M2', -0.782030488930356, 0.599696502782265,
-1.304043770977755, 1.922186587840554e-01, ' '),
('fitted(M2) ~ M1', -2.248817107408537, 0.392656854330139,
-5.727181590258883, 1.021128495098556e-08, '***')]
ct1 = smsdia.compare_cox(res, res2)
assert_almost_equal(ct1, coxtest[0][3:5], decimal=13)
ct2 = smsdia.compare_cox(res2, res)
assert_almost_equal(ct2, coxtest[1][3:5], decimal=12)
#TODO should be approx
# Res.Df Df F Pr(>F)
encomptest = [('M1 vs. ME', 198, -1, 4.644810213266983,
0.032354572525313666, '*'),
('M2 vs. ME', 198, -1, 7.373608843521585,
0.007203854534058054, '**')]
# Estimate Std. Error t value
petest = [('M1 + log(fit(M1))-fit(M2)', -229.281878354594596,
44.5087822087058598, -5.15139, 6.201281252449979e-07),
('M2 + fit(M1)-exp(fit(M2))', 0.000634664704814,
0.0000462387010349, 13.72583, 1.319536115230356e-30)]
def test_cusum_ols(self):
#R library(strucchange)
#> sc = sctest(ginv ~ ggdp + lint, type="OLS-CUSUM")
#> mkhtest(sc, 'cusum_ols', 'BB')
cusum_ols = dict(statistic=1.055750610401214, pvalue=0.2149567397376543,
parameters=(), distr='BB') #Brownian Bridge
k_vars=3
cs_ols = smsdia.breaks_cusumolsresid(self.res.resid, ddof=k_vars) #
compare_t_est(cs_ols, cusum_ols, decimal=(12, 12))
def test_breaks_hansen(self):
#> sc = sctest(ginv ~ ggdp + lint, type="Nyblom-Hansen")
#> mkhtest(sc, 'breaks_nyblom_hansen', 'BB')
breaks_nyblom_hansen = dict(statistic=1.0300792740544484,
pvalue=0.1136087530212015,
parameters=(), distr='BB')
bh = smsdia.breaks_hansen(self.res)
assert_almost_equal(bh[0], breaks_nyblom_hansen['statistic'],
decimal=13)
#TODO: breaks_hansen doesn't return pvalues
def test_recursive_residuals(self):
reccumres_standardize = np.array([-2.151, -3.748, -3.114, -3.096,
-1.865, -2.230, -1.194, -3.500, -3.638, -4.447, -4.602, -4.631, -3.999,
-4.830, -5.429, -5.435, -6.554, -8.093, -8.567, -7.532, -7.079, -8.468,
-9.320, -12.256, -11.932, -11.454, -11.690, -11.318, -12.665, -12.842,
-11.693, -10.803, -12.113, -12.109, -13.002, -11.897, -10.787, -10.159,
-9.038, -9.007, -8.634, -7.552, -7.153, -6.447, -5.183, -3.794, -3.511,
-3.979, -3.236, -3.793, -3.699, -5.056, -5.724, -4.888, -4.309, -3.688,
-3.918, -3.735, -3.452, -2.086, -6.520, -7.959, -6.760, -6.855, -6.032,
-4.405, -4.123, -4.075, -3.235, -3.115, -3.131, -2.986, -1.813, -4.824,
-4.424, -4.796, -4.000, -3.390, -4.485, -4.669, -4.560, -3.834, -5.507,
-3.792, -2.427, -1.756, -0.354, 1.150, 0.586, 0.643, 1.773, -0.830,
-0.388, 0.517, 0.819, 2.240, 3.791, 3.187, 3.409, 2.431, 0.668, 0.957,
-0.928, 0.327, -0.285, -0.625, -2.316, -1.986, -0.744, -1.396, -1.728,
-0.646, -2.602, -2.741, -2.289, -2.897, -1.934, -2.532, -3.175, -2.806,
-3.099, -2.658, -2.487, -2.515, -2.224, -2.416, -1.141, 0.650, -0.947,
0.725, 0.439, 0.885, 2.419, 2.642, 2.745, 3.506, 4.491, 5.377, 4.624,
5.523, 6.488, 6.097, 5.390, 6.299, 6.656, 6.735, 8.151, 7.260, 7.846,
8.771, 8.400, 8.717, 9.916, 9.008, 8.910, 8.294, 8.982, 8.540, 8.395,
7.782, 7.794, 8.142, 8.362, 8.400, 7.850, 7.643, 8.228, 6.408, 7.218,
7.699, 7.895, 8.725, 8.938, 8.781, 8.350, 9.136, 9.056, 10.365, 10.495,
10.704, 10.784, 10.275, 10.389, 11.586, 11.033, 11.335, 11.661, 10.522,
10.392, 10.521, 10.126, 9.428, 9.734, 8.954, 9.949, 10.595, 8.016,
6.636, 6.975])
rr = smsdia.recursive_olsresiduals(self.res, skip=3, alpha=0.95)
assert_equal(np.round(rr[5][1:], 3), reccumres_standardize) #extra zero in front
#assert_equal(np.round(rr[3][4:], 3), np.diff(reccumres_standardize))
assert_almost_equal(rr[3][4:], np.diff(reccumres_standardize),3)
assert_almost_equal(rr[4][3:].std(ddof=1), 10.7242, decimal=4)
#regression number, visually checked with graph from gretl
ub0 = np.array([ 13.37318571, 13.50758959, 13.64199346, 13.77639734,
13.91080121])
ub1 = np.array([ 39.44753774, 39.58194162, 39.7163455 , 39.85074937,
39.98515325])
lb, ub = rr[6]
assert_almost_equal(ub[:5], ub0, decimal=7)
assert_almost_equal(lb[:5], -ub0, decimal=7)
assert_almost_equal(ub[-5:], ub1, decimal=7)
assert_almost_equal(lb[-5:], -ub1, decimal=7)
#test a few values with explicit OLS
endog = self.res.model.endog
exog = self.res.model.exog
params = []
ypred = []
for i in range(3,10):
resi = OLS(endog[:i], exog[:i]).fit()
ypred.append(resi.model.predict(resi.params, exog[i]))
params.append(resi.params)
assert_almost_equal(rr[2][3:10], ypred, decimal=12)
assert_almost_equal(rr[0][3:10], endog[3:10] - ypred, decimal=12)
assert_almost_equal(rr[1][2:9], params, decimal=12)
def test_normality(self):
res = self.res
#> library(nortest) #Lilliefors (Kolmogorov-Smirnov) normality test
#> lt = lillie.test(residuals(fm))
#> mkhtest(lt, "lilliefors", "-")
lilliefors1 = dict(statistic=0.0723390908786589,
pvalue=0.01204113540102896, parameters=(), distr='-')
#> lt = lillie.test(residuals(fm)**2)
#> mkhtest(lt, "lilliefors", "-")
lilliefors2 = dict(statistic=0.301311621898024,
pvalue=1.004305736618051e-51,
parameters=(), distr='-')
#> lt = lillie.test(residuals(fm)[1:20])
#> mkhtest(lt, "lilliefors", "-")
lilliefors3 = dict(statistic=0.1333956004203103,
pvalue=0.20, parameters=(), distr='-')
lf1 = smsdia.lilliefors(res.resid)
lf2 = smsdia.lilliefors(res.resid**2)
lf3 = smsdia.lilliefors(res.resid[:20])
compare_t_est(lf1, lilliefors1, decimal=(14, 14))
compare_t_est(lf2, lilliefors2, decimal=(14, 14)) #pvalue very small
assert_approx_equal(lf2[1], lilliefors2['pvalue'], significant=10)
compare_t_est(lf3, lilliefors3, decimal=(14, 1))
#R uses different approximation for pvalue in last case
#> ad = ad.test(residuals(fm))
#> mkhtest(ad, "ad3", "-")
adr1 = dict(statistic=1.602209621518313, pvalue=0.0003937979149362316,
parameters=(), distr='-')
#> ad = ad.test(residuals(fm)**2)
#> mkhtest(ad, "ad3", "-")
adr2 = dict(statistic=np.inf, pvalue=np.nan, parameters=(), distr='-')
#> ad = ad.test(residuals(fm)[1:20])
#> mkhtest(ad, "ad3", "-")
adr3 = dict(statistic=0.3017073732210775, pvalue=0.5443499281265933,
parameters=(), distr='-')
ad1 = smsdia.normal_ad(res.resid)
compare_t_est(ad1, adr1, decimal=(11, 13))
ad2 = smsdia.normal_ad(res.resid**2)
assert_(np.isinf(ad2[0]))
ad3 = smsdia.normal_ad(res.resid[:20])
compare_t_est(ad3, adr3, decimal=(11, 12))
def test_influence(self):
res = self.res
#this test is slow
infl = oi.OLSInfluence(res)
path = os.path.join(cur_dir, "results", "influence_lsdiag_R.json")
with open(path, 'r') as fp:
lsdiag = json.load(fp)
#basic
assert_almost_equal(np.array(lsdiag['cov.scaled']).reshape(3, 3),
res.cov_params(), decimal=14)
assert_almost_equal(np.array(lsdiag['cov.unscaled']).reshape(3, 3),
res.normalized_cov_params, decimal=14)
c0, c1 = infl.cooks_distance #TODO: what's c1
assert_almost_equal(c0, lsdiag['cooks'], decimal=14)
assert_almost_equal(infl.hat_matrix_diag, lsdiag['hat'], decimal=14)
assert_almost_equal(infl.resid_studentized_internal,
lsdiag['std.res'], decimal=14)
#slow:
#infl._get_all_obs() #slow, nobs estimation loop, called implicitly
dffits, dffth = infl.dffits
assert_almost_equal(dffits, lsdiag['dfits'], decimal=14)
assert_almost_equal(infl.resid_studentized_external,
lsdiag['stud.res'], decimal=14)
import pandas
fn = os.path.join(cur_dir,"results/influence_measures_R.csv")
infl_r = pandas.read_csv(fn, index_col=0)
conv = lambda s: 1 if s=='TRUE' else 0
fn = os.path.join(cur_dir,"results/influence_measures_bool_R.csv")
#not used yet:
#infl_bool_r = pandas.read_csv(fn, index_col=0,
# converters=dict(zip(lrange(7),[conv]*7)))
infl_r2 = np.asarray(infl_r)
assert_almost_equal(infl.dfbetas, infl_r2[:,:3], decimal=13)
assert_almost_equal(infl.cov_ratio, infl_r2[:,4], decimal=14)
#duplicates
assert_almost_equal(dffits, infl_r2[:,3], decimal=14)
assert_almost_equal(c0, infl_r2[:,5], decimal=14)
assert_almost_equal(infl.hat_matrix_diag, infl_r2[:,6], decimal=14)
#Note: for dffits, R uses a threshold around 0.36, mine: dffits[1]=0.24373
#TODO: finish and check thresholds and pvalues
'''
R has
>>> np.nonzero(np.asarray(infl_bool_r["dffit"]))[0]
array([ 6, 26, 63, 76, 90, 199])
>>> np.nonzero(np.asarray(infl_bool_r["cov.r"]))[0]
array([ 4, 26, 59, 61, 63, 72, 76, 84, 91, 92, 94, 95, 108,
197, 198])
>>> np.nonzero(np.asarray(infl_bool_r["hat"]))[0]
array([ 62, 76, 84, 90, 91, 92, 95, 108, 197, 199])
'''
class TestDiagnosticGPandas(TestDiagnosticG):
@classmethod
def setup_class(cls):
d = macrodata.load_pandas().data
#growth rates
d['gs_l_realinv'] = 400 * np.log(d['realinv']).diff()
d['gs_l_realgdp'] = 400 * np.log(d['realgdp']).diff()
d['lint'] = d['realint'].shift(1)
d['tbilrate'] = d['tbilrate'].shift(1)
d = d.dropna()
cls.d = d
endogg = d['gs_l_realinv']
exogg = add_constant(d[['gs_l_realgdp', 'lint']])
exogg2 = add_constant(d[['gs_l_realgdp', 'tbilrate']])
exogg3 = add_constant(d[['gs_l_realgdp']])
res_ols = OLS(endogg, exogg).fit()
res_ols2 = OLS(endogg, exogg2).fit()
res_ols3 = OLS(endogg, exogg3).fit()
cls.res = res_ols
cls.res2 = res_ols2
cls.res3 = res_ols3
cls.endog = cls.res.model.endog
cls.exog = cls.res.model.exog
def grangertest():
#> gt = grangertest(ginv, ggdp, order=4)
#> gt
#Granger causality test
#
#Model 1: ggdp ~ Lags(ggdp, 1:4) + Lags(ginv, 1:4)
#Model 2: ggdp ~ Lags(ggdp, 1:4)
grangertest = dict(fvalue=1.589672703015157, pvalue=0.178717196987075,
df=(198,193))
def test_outlier_influence_funcs():
#smoke test
x = add_constant(np.random.randn(10, 2))
y = x.sum(1) + np.random.randn(10)
res = OLS(y, x).fit()
out_05 = oi.summary_table(res)
# GH3344 : Check alpha has an effect
out_01 = oi.summary_table(res, alpha=0.01)
assert_(np.all(out_01[1][:, 6] <= out_05[1][:, 6]))
assert_(np.all(out_01[1][:, 7] >= out_05[1][:, 7]))
res2 = OLS(y, x[:,0]).fit()
oi.summary_table(res2, alpha=0.05)
infl = res2.get_influence()
infl.summary_table()
def test_influence_wrapped():
from pandas import DataFrame
from pandas.util.testing import assert_series_equal
d = macrodata.load_pandas().data
#growth rates
gs_l_realinv = 400 * np.log(d['realinv']).diff().dropna()
gs_l_realgdp = 400 * np.log(d['realgdp']).diff().dropna()
lint = d['realint'][:-1]
# re-index these because they won't conform to lint
gs_l_realgdp.index = lint.index
gs_l_realinv.index = lint.index
data = dict(const=np.ones_like(lint), lint=lint, lrealgdp=gs_l_realgdp)
#order is important
exog = DataFrame(data, columns=['const','lrealgdp','lint'])
res = OLS(gs_l_realinv, exog).fit()
#basic
# already tested
#assert_almost_equal(lsdiag['cov.scaled'],
# res.cov_params().values.ravel(), decimal=14)
#assert_almost_equal(lsdiag['cov.unscaled'],
# res.normalized_cov_params.values.ravel(), decimal=14)
infl = oi.OLSInfluence(res)
# smoke test just to make sure it works, results separately tested
df = infl.summary_frame()
assert_(isinstance(df, DataFrame))
#this test is slow
path = os.path.join(cur_dir, "results", "influence_lsdiag_R.json")
with open(path, "r") as fp:
lsdiag = json.load(fp)
c0, c1 = infl.cooks_distance #TODO: what's c1, it's pvalues? -ss
#NOTE: we get a hard-cored 5 decimals with pandas testing
assert_almost_equal(c0, lsdiag['cooks'], 14)
assert_almost_equal(infl.hat_matrix_diag, (lsdiag['hat']), 14)
assert_almost_equal(infl.resid_studentized_internal,
lsdiag['std.res'], 14)
#slow:
dffits, dffth = infl.dffits
assert_almost_equal(dffits, lsdiag['dfits'], 14)
assert_almost_equal(infl.resid_studentized_external,
lsdiag['stud.res'], 14)
import pandas
fn = os.path.join(cur_dir,"results/influence_measures_R.csv")
infl_r = pandas.read_csv(fn, index_col=0)
conv = lambda s: 1 if s=='TRUE' else 0
fn = os.path.join(cur_dir,"results/influence_measures_bool_R.csv")
#not used yet:
#infl_bool_r = pandas.read_csv(fn, index_col=0,
# converters=dict(zip(lrange(7),[conv]*7)))
infl_r2 = np.asarray(infl_r)
#TODO: finish wrapping this stuff
assert_almost_equal(infl.dfbetas, infl_r2[:,:3], decimal=13)
assert_almost_equal(infl.cov_ratio, infl_r2[:,4], decimal=14)
def test_influence_dtype():
# see #2148 bug when endog is integer
y = np.ones(20)
np.random.seed(123)
x = np.random.randn(20, 3)
res1 = OLS(y, x).fit()
res2 = OLS(y*1., x).fit()
cr1 = res1.get_influence().cov_ratio
cr2 = res2.get_influence().cov_ratio
assert_allclose(cr1, cr2, rtol=1e-14)
# regression test for values
cr3 = np.array(
[ 1.22239215, 1.31551021, 1.52671069, 1.05003921, 0.89099323,
1.57405066, 1.03230092, 0.95844196, 1.15531836, 1.21963623,
0.87699564, 1.16707748, 1.10481391, 0.98839447, 1.08999334,
1.35680102, 1.46227715, 1.45966708, 1.13659521, 1.22799038])
assert_almost_equal(cr1, cr3, decimal=8)
def test_outlier_test():
# results from R with NA -> 1. Just testing interface here because
# outlier_test is just a wrapper
labels = ['accountant', 'pilot', 'architect', 'author', 'chemist',
'minister', 'professor', 'dentist', 'reporter', 'engineer',
'undertaker', 'lawyer', 'physician', 'welfare.worker', 'teacher',
'conductor', 'contractor', 'factory.owner', 'store.manager',
'banker', 'bookkeeper', 'mail.carrier', 'insurance.agent',
'store.clerk', 'carpenter', 'electrician', 'RR.engineer',
'machinist', 'auto.repairman', 'plumber', 'gas.stn.attendant',
'coal.miner', 'streetcar.motorman', 'taxi.driver',
'truck.driver', 'machine.operator', 'barber', 'bartender',
'shoe.shiner', 'cook', 'soda.clerk', 'watchman', 'janitor',
'policeman', 'waiter']
#Duncan's prestige data from car
exog = [[1.0, 62.0, 86.0], [1.0, 72.0, 76.0], [1.0, 75.0, 92.0],
[1.0, 55.0, 90.0], [1.0, 64.0, 86.0], [1.0, 21.0, 84.0],
[1.0, 64.0, 93.0], [1.0, 80.0, 100.0], [1.0, 67.0, 87.0],
[1.0, 72.0, 86.0], [1.0, 42.0, 74.0], [1.0, 76.0, 98.0],
[1.0, 76.0, 97.0], [1.0, 41.0, 84.0], [1.0, 48.0, 91.0],
[1.0, 76.0, 34.0], [1.0, 53.0, 45.0], [1.0, 60.0, 56.0],
[1.0, 42.0, 44.0], [1.0, 78.0, 82.0], [1.0, 29.0, 72.0],
[1.0, 48.0, 55.0], [1.0, 55.0, 71.0], [1.0, 29.0, 50.0],
[1.0, 21.0, 23.0], [1.0, 47.0, 39.0], [1.0, 81.0, 28.0],
[1.0, 36.0, 32.0], [1.0, 22.0, 22.0], [1.0, 44.0, 25.0],
[1.0, 15.0, 29.0], [1.0, 7.0, 7.0], [1.0, 42.0, 26.0],
[1.0, 9.0, 19.0], [1.0, 21.0, 15.0], [1.0, 21.0, 20.0],
[1.0, 16.0, 26.0], [1.0, 16.0, 28.0], [1.0, 9.0, 17.0],
[1.0, 14.0, 22.0], [1.0, 12.0, 30.0], [1.0, 17.0, 25.0],
[1.0, 7.0, 20.0], [1.0, 34.0, 47.0], [1.0, 8.0, 32.0]]
endog = [ 82., 83., 90., 76., 90., 87., 93., 90., 52., 88., 57.,
89., 97., 59., 73., 38., 76., 81., 45., 92., 39., 34.,
41., 16., 33., 53., 67., 57., 26., 29., 10., 15., 19.,
10., 13., 24., 20., 7., 3., 16., 6., 11., 8., 41.,
10.]
ndarray_mod = OLS(endog, exog).fit()
rstudent = [3.1345185839, -2.3970223990, 2.0438046359, -1.9309187757,
1.8870465798, -1.7604905300, -1.7040324156, 1.6024285876,
-1.4332485037, -1.1044851583, 1.0688582315, 1.0185271840,
-0.9024219332, -0.9023876471, -0.8830953936, 0.8265782334,
0.8089220547, 0.7682770197, 0.7319491074, -0.6665962829,
0.5227352794, -0.5135016547, 0.5083881518, 0.4999224372,
-0.4980818221, -0.4759717075, -0.4293565820, -0.4114056499,
-0.3779540862, 0.3556874030, 0.3409200462, 0.3062248646,
0.3038999429, -0.3030815773, -0.1873387893, 0.1738050251,
0.1424246593, -0.1292266025, 0.1272066463, -0.0798902878,
0.0788467222, 0.0722556991, 0.0505098280, 0.0233215136,
0.0007112055]
unadj_p = [0.003177202, 0.021170298, 0.047432955, 0.060427645, 0.066248120,
0.085783008, 0.095943909, 0.116738318, 0.159368890, 0.275822623,
0.291386358, 0.314400295, 0.372104049, 0.372122040, 0.382333561,
0.413260793, 0.423229432, 0.446725370, 0.468363101, 0.508764039,
0.603971990, 0.610356737, 0.613905871, 0.619802317, 0.621087703,
0.636621083, 0.669911674, 0.682917818, 0.707414459, 0.723898263,
0.734904667, 0.760983108, 0.762741124, 0.763360242, 0.852319039,
0.862874018, 0.887442197, 0.897810225, 0.899398691, 0.936713197,
0.937538115, 0.942749758, 0.959961394, 0.981506948, 0.999435989]
bonf_p = [0.1429741, 0.9526634, 2.1344830, 2.7192440, 2.9811654, 3.8602354,
4.3174759, 5.2532243, 7.1716001, 12.4120180, 13.1123861, 14.1480133,
16.7446822, 16.7454918, 17.2050103, 18.5967357, 19.0453245,
20.1026416, 21.0763395, 22.8943818, 27.1787396, 27.4660532,
27.6257642, 27.8911043, 27.9489466, 28.6479487, 30.1460253,
30.7313018, 31.8336506, 32.5754218, 33.0707100, 34.2442399,
34.3233506, 34.3512109, 38.3543568, 38.8293308, 39.9348989,
40.4014601, 40.4729411, 42.1520939, 42.1892152, 42.4237391,
43.1982627, 44.1678127, 44.9746195]
bonf_p = np.array(bonf_p)
bonf_p[bonf_p > 1] = 1
sorted_labels = ["minister", "reporter", "contractor", "insurance.agent",
"machinist", "store.clerk", "conductor", "factory.owner",
"mail.carrier", "streetcar.motorman", "carpenter", "coal.miner",
"bartender", "bookkeeper", "soda.clerk", "chemist", "RR.engineer",
"professor", "electrician", "gas.stn.attendant", "auto.repairman",
"watchman", "banker", "machine.operator", "dentist", "waiter",
"shoe.shiner", "welfare.worker", "plumber", "physician", "pilot",
"engineer", "accountant", "lawyer", "undertaker", "barber",
"store.manager", "truck.driver", "cook", "janitor", "policeman",
"architect", "teacher", "taxi.driver", "author"]
res2 = np.c_[rstudent, unadj_p, bonf_p]
res = oi.outlier_test(ndarray_mod, method='b', labels=labels, order=True)
np.testing.assert_almost_equal(res.values, res2, 7)
np.testing.assert_equal(res.index.tolist(), sorted_labels) # pylint: disable-msg=E1103
data = pd.DataFrame(np.column_stack((endog, exog)),
columns='y const var1 var2'.split(),
index=labels)
# check `order` with pandas bug in #3971
res_pd = OLS.from_formula('y ~ const + var1 + var2 - 0', data).fit()
res_outl2 = oi.outlier_test(res_pd, method='b', order=True)
assert_almost_equal(res_outl2.values, res2, 7)
assert_equal(res_outl2.index.tolist(), sorted_labels)
if PD_GE_17:
# pandas < 0.17 does not have sort_values method
res_outl1 = res_pd.outlier_test(method='b')
res_outl1 = res_outl1.sort_values(['unadj_p'], ascending=True)
assert_almost_equal(res_outl1.values, res2, 7)
assert_equal(res_outl1.index.tolist(), sorted_labels)
assert_array_equal(res_outl2.index, res_outl1.index)
# additional keywords in method
res_outl3 = res_pd.outlier_test(method='b', order=True)
assert_equal(res_outl3.index.tolist(), sorted_labels)
res_outl4 = res_pd.outlier_test(method='b', order=True, cutoff=0.15)
assert_equal(res_outl4.index.tolist(), sorted_labels[:1])
if __name__ == '__main__':
import pytest
pytest.main([__file__, '-vvs', '-x', '--pdb'])
#t = TestDiagnosticG()
#t.test_basic()
#t.test_hac()
#t.test_acorr_breusch_godfrey()
#t.test_acorr_ljung_box()
#t.test_het_goldfeldquandt()
#t.test_het_breusch_pagan()
#t.test_het_white()
#t.test_compare_lr()
#t.test_compare_nonnested()
#t.test_influence()
##################################################
'''
J test
Model 1: ginv ~ ggdp + lint
Model 2: ginv ~ ggdp + tbilrate
Estimate Std. Error t value Pr(>|t|)
M1 + fitted(M2) 1.591505670785873 0.7384552861695823 2.15518 0.0323546 *
M2 + fitted(M1) 1.305687653016899 0.4808385176653064 2.71544 0.0072039 **
---
Signif. codes: 0 ‘***’ 0.001 ‘**’ 0.01 ‘*’ 0.05 ‘.’ 0.1 ‘ ’ 1
= lm(ginv ~ ggdp + tbilrate)
> ct = coxtest(fm, fm3)
> ct
Cox test
Model 1: ginv ~ ggdp + lint
Model 2: ginv ~ ggdp + tbilrate
Estimate Std. Error z value Pr(>|z|)
fitted(M1) ~ M2 -0.782030488930356 0.599696502782265 -1.30404 0.19222
fitted(M2) ~ M1 -2.248817107408537 0.392656854330139 -5.72718 1.0211e-08 ***
---
Signif. codes: 0 ‘***’ 0.001 ‘**’ 0.01 ‘*’ 0.05 ‘.’ 0.1 ‘ ’ 1
> et = encomptest(fm, fm3)
> et
Encompassing test
Model 1: ginv ~ ggdp + lint
Model 2: ginv ~ ggdp + tbilrate
Model E: ginv ~ ggdp + lint + tbilrate
Res.Df Df F Pr(>F)
M1 vs. ME 198 -1 4.64481 0.0323546 *
M2 vs. ME 198 -1 7.37361 0.0072039 **
---
Signif. codes: 0 ‘***’ 0.001 ‘**’ 0.01 ‘*’ 0.05 ‘.’ 0.1 ‘ ’ 1
> fm4 = lm(realinv ~ realgdp + realint, data=d)
> fm5 = lm(log(realinv) ~ realgdp + realint, data=d)
> pet = petest(fm4, fm5)
> pet
PE test
Model 1: realinv ~ realgdp + realint
Model 2: log(realinv) ~ realgdp + realint
Estimate Std. Error t value
M1 + log(fit(M1))-fit(M2) -229.281878354594596 44.5087822087058598 -5.15139
M2 + fit(M1)-exp(fit(M2)) 0.000634664704814 0.0000462387010349 13.72583
Pr(>|t|)
M1 + log(fit(M1))-fit(M2) 6.2013e-07 ***
M2 + fit(M1)-exp(fit(M2)) < 2.22e-16 ***
---
Signif. codes: 0 ‘***’ 0.001 ‘**’ 0.01 ‘*’ 0.05 ‘.’ 0.1 ‘ ’ 1
'''
| [
"randybrown18@me.com"
] | randybrown18@me.com |
4786f697c4b5c0fe502134c1c9619662fd57bd4c | 13b70fefe74a4df57c80207a9f5fddb2c2474f1d | /Ui/Ui_FormReceivables.py | db1413407c8c9c455baa3348dcce61139de3c0b0 | [] | no_license | golden7602/zion | 4f8ae947fd754d64de44bb16d9bd2bd7f8c819a5 | 70a5ba13eb1b504f94fdaceba7cc6d0564618c00 | refs/heads/master | 2021-07-01T13:46:03.769916 | 2020-12-01T11:15:01 | 2020-12-01T11:15:01 | 196,413,935 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 10,534 | py | # -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'e:\Zion\zion\Ui\FormReceivables.ui'
#
# Created by: PyQt5 UI code generator 5.11.3
#
# WARNING! All changes made in this file will be lost!
from PyQt5 import QtCore, QtGui, QtWidgets
class Ui_Form(object):
def setupUi(self, Form):
Form.setObjectName("Form")
Form.resize(1419, 736)
font = QtGui.QFont()
font.setFamily("Arial")
font.setBold(False)
font.setWeight(50)
Form.setFont(font)
self.verticalLayout_6 = QtWidgets.QVBoxLayout(Form)
self.verticalLayout_6.setContentsMargins(0, 0, 0, 0)
self.verticalLayout_6.setSpacing(0)
self.verticalLayout_6.setObjectName("verticalLayout_6")
self.splitter_3 = QtWidgets.QSplitter(Form)
self.splitter_3.setOrientation(QtCore.Qt.Vertical)
self.splitter_3.setObjectName("splitter_3")
self.frame_4 = QtWidgets.QFrame(self.splitter_3)
self.frame_4.setFrameShape(QtWidgets.QFrame.StyledPanel)
self.frame_4.setFrameShadow(QtWidgets.QFrame.Raised)
self.frame_4.setObjectName("frame_4")
self.verticalLayout = QtWidgets.QVBoxLayout(self.frame_4)
self.verticalLayout.setContentsMargins(0, 0, 0, 0)
self.verticalLayout.setSpacing(0)
self.verticalLayout.setObjectName("verticalLayout")
self.frame_3 = QtWidgets.QFrame(self.frame_4)
self.frame_3.setMinimumSize(QtCore.QSize(0, 30))
self.frame_3.setMaximumSize(QtCore.QSize(16777215, 30))
self.frame_3.setFrameShape(QtWidgets.QFrame.StyledPanel)
self.frame_3.setFrameShadow(QtWidgets.QFrame.Raised)
self.frame_3.setObjectName("frame_3")
self.horizontalLayout = QtWidgets.QHBoxLayout(self.frame_3)
self.horizontalLayout.setContentsMargins(0, 0, 0, 0)
self.horizontalLayout.setSpacing(0)
self.horizontalLayout.setObjectName("horizontalLayout")
self.widget = QtWidgets.QWidget(self.frame_3)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Preferred)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.widget.sizePolicy().hasHeightForWidth())
self.widget.setSizePolicy(sizePolicy)
self.widget.setMinimumSize(QtCore.QSize(500, 0))
self.widget.setObjectName("widget")
self.Layout_Button = QtWidgets.QHBoxLayout(self.widget)
self.Layout_Button.setContentsMargins(0, 0, 0, 0)
self.Layout_Button.setSpacing(2)
self.Layout_Button.setObjectName("Layout_Button")
self.horizontalLayout.addWidget(self.widget)
spacerItem = QtWidgets.QSpacerItem(20, 20, QtWidgets.QSizePolicy.Minimum, QtWidgets.QSizePolicy.Minimum)
self.horizontalLayout.addItem(spacerItem)
self.label_5 = QtWidgets.QLabel(self.frame_3)
self.label_5.setMinimumSize(QtCore.QSize(0, 0))
self.label_5.setObjectName("label_5")
self.horizontalLayout.addWidget(self.label_5)
self.SelectDate = QtWidgets.QDateEdit(self.frame_3)
self.SelectDate.setMinimumSize(QtCore.QSize(0, 25))
self.SelectDate.setCalendarPopup(True)
self.SelectDate.setObjectName("SelectDate")
self.horizontalLayout.addWidget(self.SelectDate)
spacerItem1 = QtWidgets.QSpacerItem(1226, 20, QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Minimum)
self.horizontalLayout.addItem(spacerItem1)
self.verticalLayout.addWidget(self.frame_3)
self.frame_2 = QtWidgets.QFrame(self.frame_4)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Preferred, QtWidgets.QSizePolicy.Preferred)
sizePolicy.setHorizontalStretch(3)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.frame_2.sizePolicy().hasHeightForWidth())
self.frame_2.setSizePolicy(sizePolicy)
self.frame_2.setFrameShape(QtWidgets.QFrame.StyledPanel)
self.frame_2.setFrameShadow(QtWidgets.QFrame.Raised)
self.frame_2.setObjectName("frame_2")
self.verticalLayout_2 = QtWidgets.QVBoxLayout(self.frame_2)
self.verticalLayout_2.setContentsMargins(0, 0, 0, 0)
self.verticalLayout_2.setSpacing(0)
self.verticalLayout_2.setObjectName("verticalLayout_2")
self.label = QtWidgets.QLabel(self.frame_2)
self.label.setMinimumSize(QtCore.QSize(0, 15))
self.label.setMaximumSize(QtCore.QSize(16777215, 15))
self.label.setObjectName("label")
self.verticalLayout_2.addWidget(self.label)
self.splitter = QtWidgets.QSplitter(self.frame_2)
self.splitter.setOrientation(QtCore.Qt.Horizontal)
self.splitter.setObjectName("splitter")
self.tabCurrentDayRec = QtWidgets.QTableView(self.splitter)
self.tabCurrentDayRec.setMinimumSize(QtCore.QSize(0, 100))
self.tabCurrentDayRec.setMaximumSize(QtCore.QSize(16777214, 500))
self.tabCurrentDayRec.setSelectionMode(QtWidgets.QAbstractItemView.SingleSelection)
self.tabCurrentDayRec.setSelectionBehavior(QtWidgets.QAbstractItemView.SelectRows)
self.tabCurrentDayRec.setObjectName("tabCurrentDayRec")
self.tabCurrentDayRec.verticalHeader().setDefaultSectionSize(25)
self.tabCurrentDayRec.verticalHeader().setMinimumSectionSize(25)
self.SumPaymentMethod = QtWidgets.QTableView(self.splitter)
self.SumPaymentMethod.setSelectionMode(QtWidgets.QAbstractItemView.SingleSelection)
self.SumPaymentMethod.setSelectionBehavior(QtWidgets.QAbstractItemView.SelectRows)
self.SumPaymentMethod.setObjectName("SumPaymentMethod")
self.verticalLayout_2.addWidget(self.splitter)
self.verticalLayout.addWidget(self.frame_2)
self.frame = QtWidgets.QFrame(self.splitter_3)
self.frame.setFrameShape(QtWidgets.QFrame.StyledPanel)
self.frame.setFrameShadow(QtWidgets.QFrame.Raised)
self.frame.setObjectName("frame")
self.verticalLayout_5 = QtWidgets.QVBoxLayout(self.frame)
self.verticalLayout_5.setContentsMargins(0, 0, 0, 0)
self.verticalLayout_5.setObjectName("verticalLayout_5")
self.line = QtWidgets.QFrame(self.frame)
self.line.setFrameShape(QtWidgets.QFrame.HLine)
self.line.setFrameShadow(QtWidgets.QFrame.Sunken)
self.line.setObjectName("line")
self.verticalLayout_5.addWidget(self.line)
self.splitter_2 = QtWidgets.QSplitter(self.frame)
self.splitter_2.setOrientation(QtCore.Qt.Horizontal)
self.splitter_2.setObjectName("splitter_2")
self.layoutWidget = QtWidgets.QWidget(self.splitter_2)
self.layoutWidget.setObjectName("layoutWidget")
self.verticalLayout_3 = QtWidgets.QVBoxLayout(self.layoutWidget)
self.verticalLayout_3.setContentsMargins(0, 0, 0, 0)
self.verticalLayout_3.setObjectName("verticalLayout_3")
self.label_4 = QtWidgets.QLabel(self.layoutWidget)
self.label_4.setMinimumSize(QtCore.QSize(0, 15))
self.label_4.setMaximumSize(QtCore.QSize(16777215, 15))
self.label_4.setObjectName("label_4")
self.verticalLayout_3.addWidget(self.label_4)
self.tabCustomerRecorder = QtWidgets.QTableView(self.layoutWidget)
self.tabCustomerRecorder.setMinimumSize(QtCore.QSize(100, 0))
self.tabCustomerRecorder.setSelectionMode(QtWidgets.QAbstractItemView.SingleSelection)
self.tabCustomerRecorder.setSelectionBehavior(QtWidgets.QAbstractItemView.SelectRows)
self.tabCustomerRecorder.setObjectName("tabCustomerRecorder")
self.tabCustomerRecorder.verticalHeader().setDefaultSectionSize(25)
self.tabCustomerRecorder.verticalHeader().setMinimumSectionSize(25)
self.verticalLayout_3.addWidget(self.tabCustomerRecorder)
self.layoutWidget1 = QtWidgets.QWidget(self.splitter_2)
self.layoutWidget1.setObjectName("layoutWidget1")
self.horizontalLayout_2 = QtWidgets.QHBoxLayout(self.layoutWidget1)
self.horizontalLayout_2.setContentsMargins(0, 0, 0, 0)
self.horizontalLayout_2.setSpacing(2)
self.horizontalLayout_2.setObjectName("horizontalLayout_2")
self.line_2 = QtWidgets.QFrame(self.layoutWidget1)
self.line_2.setFrameShape(QtWidgets.QFrame.VLine)
self.line_2.setFrameShadow(QtWidgets.QFrame.Sunken)
self.line_2.setObjectName("line_2")
self.horizontalLayout_2.addWidget(self.line_2)
self.verticalLayout_4 = QtWidgets.QVBoxLayout()
self.verticalLayout_4.setObjectName("verticalLayout_4")
self.label_2 = QtWidgets.QLabel(self.layoutWidget1)
self.label_2.setMinimumSize(QtCore.QSize(0, 15))
self.label_2.setMaximumSize(QtCore.QSize(16777215, 15))
self.label_2.setObjectName("label_2")
self.verticalLayout_4.addWidget(self.label_2)
self.tabCustomerArrearsList = QtWidgets.QTableView(self.layoutWidget1)
self.tabCustomerArrearsList.setMinimumSize(QtCore.QSize(100, 0))
self.tabCustomerArrearsList.setSelectionMode(QtWidgets.QAbstractItemView.SingleSelection)
self.tabCustomerArrearsList.setSelectionBehavior(QtWidgets.QAbstractItemView.SelectRows)
self.tabCustomerArrearsList.setObjectName("tabCustomerArrearsList")
self.tabCustomerArrearsList.verticalHeader().setDefaultSectionSize(25)
self.tabCustomerArrearsList.verticalHeader().setMinimumSectionSize(25)
self.verticalLayout_4.addWidget(self.tabCustomerArrearsList)
self.horizontalLayout_2.addLayout(self.verticalLayout_4)
self.verticalLayout_5.addWidget(self.splitter_2)
self.verticalLayout_6.addWidget(self.splitter_3)
self.retranslateUi(Form)
QtCore.QMetaObject.connectSlotsByName(Form)
def retranslateUi(self, Form):
_translate = QtCore.QCoreApplication.translate
Form.setWindowTitle(_translate("Form", "Form"))
self.label_5.setText(_translate("Form", " Daily Rreport: "))
self.SelectDate.setDisplayFormat(_translate("Form", "yyyy-MM-dd"))
self.label.setText(_translate("Form", "Receivables"))
self.label_4.setText(_translate("Form", "Customer Recorder:"))
self.label_2.setText(_translate("Form", "Customer Arrears:"))
if __name__ == "__main__":
import sys
app = QtWidgets.QApplication(sys.argv)
Form = QtWidgets.QWidget()
ui = Ui_Form()
ui.setupUi(Form)
Form.show()
sys.exit(app.exec_())
| [
"419331959@qq.com"
] | 419331959@qq.com |
a5267614375d4244c70ef9d22e43775759ce616f | b2abec1469351de38a37b6189fd365be71ac1a5c | /v2/api/assets/user_preferences.py | 9bdefa24e0677c120d05de4c4a0e13925375780f | [] | no_license | stainedart/kdm-manager | 49804eb258ebc22a7679dad8e1e704c997694747 | 3b73fc037be3b2b63c0baf4280e379bdf4e7cb75 | refs/heads/master | 2020-03-07T02:28:49.893626 | 2018-03-25T14:24:47 | 2018-03-25T14:24:47 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,676 | py | preferences_dict = {
"beta": {
"type": "General",
"desc": "Enable beta features of the Manager?",
"affirmative": "Enable",
"negative": "Disable",
"patron_level": 2,
},
"preserve_sessions": {
"type": "General",
"desc": "Preserve Sessions?",
"affirmative": "Keep me logged in",
"negative": "Remove sessions after 24 hours",
"patron_level": 1,
},
"random_names_for_unnamed_assets": {
"type": "General",
"desc": "Choose random names for Settlements/Survivors without names?",
"affirmative": "Choose randomly",
"negative": "Use 'Unknown' and 'Anonymous'",
"patron_level": 0,
},
"apply_new_survivor_buffs": {
"type": "Automation",
"desc": "Automatically apply settlement bonuses to new, newborn and current survivors where appropriate?",
"affirmative": "Automatically apply",
"negative": "Do not apply",
"patron_level": 0,
},
"apply_weapon_specialization": {
"type": "Automation",
"desc": "Automatically add weapon specializations if Innovations include the mastery?",
"affirmative": "Add",
"negative": "Do Not Add",
"patron_level": 0,
},
"show_endeavor_token_controls": {
"type": "Campaign Summary",
"desc": "Show Endeavor Token controls on Campaign Summary view?",
"affirmative": "Show controls",
"negative": "Hide controls",
"patron_level": 0,
},
# "update_timeline": {
# "type": "Automation",
# "desc": "Automatically Update Timeline with Milestone Story Events?",
# "affirmative": "Update settlement timelines when milestone conditions are met",
# "negative": "Do not automatically update settlement timelines",
# "patron_level": 0,
# },
"show_epithet_controls": {
"type": "Survivor Sheet",
"desc": "Use survivor epithets?",
"affirmative": "Show controls on Survivor Sheets",
"negative": "Hide controls and survivor epithets on Survivor Sheets",
"patron_level": 0,
},
"show_remove_button": {
"type": "General",
"desc": "Show controls for removing Settlements and Survivors?",
"affirmative": "Show controls on Settlement and Survivor Sheets",
"negative": "Hide controls on Settlement and Survivor Sheets",
"patron_level": 0,
},
"show_ui_tips": {
"type": "General",
"desc": "Display in-line help and user interface tips?",
"affirmative": "Show UI tips",
"negative": "Hide UI tips",
"patron_level": 2,
},
}
| [
"toconnell@tyrannybelle.com"
] | toconnell@tyrannybelle.com |
6392b62f74dce1302bb6f079eac6e731541b0828 | 653c1dcfa2f78491722706c126f69505b750e2f1 | /pyNastran/bdf/mesh_utils/remove_unused.py | 7116d64cb09a05aa2d74367b7fefaa48cfbe7f41 | [] | no_license | lnderuiter/pyNastran | b0a2e20a9555a4f460358136f52dfc1827894a80 | cc596e637b53cf0a997f92e0e09f43222960052c | refs/heads/master | 2020-07-07T15:43:11.885862 | 2019-08-13T23:37:48 | 2019-08-13T23:37:48 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 29,908 | py | """
defines some methods for cleaning up a model
- model = remove_unused(bdf_filename, remove_nids=True, remove_cids=True,
remove_pids=True, remove_mids=True)
"""
from pyNastran.bdf.bdf import BDF, read_bdf
#from pyNastran.bdf.mesh_utils.bdf_renumber import bdf_renumber
def remove_unused(bdf_filename, remove_nids=True, remove_cids=True,
remove_pids=True, remove_mids=True):
"""
Takes an uncross-referenced bdf and removes unused data
removes unused:
- nodes
- properties
- materials
- coords
"""
if isinstance(bdf_filename, BDF):
model = bdf_filename
else:
model = read_bdf(bdf_filename, xref=False)
#nids = model.nodes.keys()
#cids =
#nids = set(list(model.nodes.keys()))
#cids = set(list(model.coords.keys()))
#pids = set(list(model.properties.keys()))
nids_used = set()
cids_used = set()
eids_used = set()
pids_used = set()
pids_mass_used = set()
mids_used = set()
mids_thermal_used = set()
sets_used = set()
desvars_used = set()
#nsms_used = set()
#card_types = list(model.card_count.keys())
#card_map = model.get_card_ids_by_card_types(
#card_types=card_types,
#reset_type_to_slot_map=False,
#stop_on_missing_card=True)
#for nid, node in model.nodes.items():
#cids_used.update([node.Cp(), node.Cd()])
skip_cards = [
'ENDDATA', 'PARAM', 'EIGR', 'EIGRL', 'EIGB', 'EIGP', 'EIGC',
'SPOINT', 'EPOINT', 'DESVAR',
'SET1', 'FREQ', 'FREQ1', 'FREQ2',
'TSTEP', 'TSTEPNL', 'NLPCI',
#'LOAD', 'LSEQ', 'DLOAD', 'LOADCYN',
'NLPARM', 'ROTORG', 'ROTORD',
'DAREA', 'DEQATN',
'DMIG', 'DMI', 'DMIJ', 'DMIK', 'DMIJI',
'POINT', 'EPOINT',
'DELAY', 'DPHASE',
'CBARAO', 'AEPARM',
# properties
'PELAS', 'PDAMP', 'PBUSH',
'PELAST', 'PDAMPT', 'PBUSHT',
'PGAP', 'PBUSH1D', 'PFAST', 'PVISC', 'PMASS',
'FLFACT', 'FLUTTER', 'DLINK', 'DDVAL', 'DIVERG', 'GUST',
'AELINK', 'AELIST', 'TRIM', 'TRIM2', 'PAERO1', 'AEFACT', 'AESTAT',
'BCTPARA', 'BCRPARA', 'BSURF', 'BSURFS', 'BCTADD',
'BCTSET',
# not checked------------------------------------------
'PHBDY', 'CHBDYG', 'CHBDYP', 'CHBDYE', 'RADBC', 'CONV',
'QVOL', 'PCONV', 'PCONVM',
#'PBCOMP', 'PDAMP5', 'CFAST',
'AECOMP', 'CAERO2', 'CAERO3', 'CAERO4', 'CAERO5',
'PAERO2', 'PAERO3', 'PAERO4', 'PAERO5',
'DCONADD',
'GMCORD',
'MONPNT1', 'MONPNT2', 'MONPNT3',
'DSCREEN', 'DTI', 'NSMADD',
'AESURFS', 'CSSCHD',
'CGEN', 'NXSTRAT',
]
set_types_simple = [
'SET1', 'SET3',
]
set_types = [
'ASET', 'ASET1', 'BSET', 'BSET1', 'CSET', 'CSET1',
'QSET', 'QSET1', 'USET', 'USET1', 'OMIT', 'OMIT1',
]
seset_types = [
'SESET',
]
load_types = [
'GRAV', 'RANDPS', 'FORCE', 'FORCE1', 'FORCE2',
'MOMENT', 'MOMENT1', 'MOMENT2',
'PLOAD', 'PLOAD1', 'PLOAD2', 'PLOAD4', 'SPCD',
'GMLOAD', 'RFORCE', 'RFORCE1',
'TEMP', 'QBDY1', 'QBDY2', 'QBDY3', 'QHBDY',
'ACCEL', 'PLOADX1', 'SLOAD', 'ACCEL1', 'LOADCYN', 'LOAD',
'LSEQ', 'DLOAD', 'QVECT', 'RADM', 'TEMPAX', 'DEFORM',
]
# could remove some if we look at the rid_trace
#for cid, coord in model.coords.items():
#if coord.type in ['CORD1R', 'CORD1C', 'CORD1S']:
#nids_used.update(node_ids)
#elif coord.type in ['CORD1R', 'CORD1C', 'CORD1S']:
#cids_used.update(coord.Rid())
#else:
#raise NotImplementedError(coord)
for card_type, ids in model._type_to_id_map.items():
#for card_type, ids in card_map.items():
if card_type in ['CORD1R', 'CORD1C', 'CORD1S']:
for cid in ids:
coord = model.coords[cid]
nids_used.update(coord.node_ids)
elif card_type in ['CORD2R', 'CORD2C', 'CORD2S']:
for cid in ids:
coord = model.coords[cid]
cids_used.add(coord.Rid())
elif card_type in ['MAT1', 'MAT2', 'MAT3', 'MAT4', 'MAT5',
'MAT8', 'MAT9', 'MAT10', 'MAT11']:
# todo: MATS1, MATT1, etc.
pass
elif card_type in ['MATS1', 'MATT1', 'MATT2', 'MATT3', 'MATT4', 'MATT5',
'MATT8', 'MATHE', 'MATHP', 'CREEP']:
mids_used.update(ids)
elif card_type in ['CTETRA', 'CPENTA', 'CPYRAM', 'CHEXA']:
for eid in ids:
elem = model.elements[eid]
nids_used.update(elem.node_ids)
pids_used.add(elem.Pid())
elif card_type in ['CONM1', 'CONM2']:
for eid in ids:
elem = model.masses[eid]
nids_used.add(elem.Nid())
cids_used.add(elem.Cid())
#print(elem.object_attributes())
#print(elem.object_methods())
#aaa
elif card_type in ['CMASS1', 'CMASS3']:
for eid in ids:
elem = model.masses[eid]
pids_mass_used.add(elem.Pid())
nids_used.update(elem.node_ids)
elif card_type in ['CMASS2', 'CMASS4']:
for eid in ids:
elem = model.masses[eid]
nids_used.update(elem.node_ids)
elif card_type in ['CELAS1', 'CDAMP1', 'CVISC', 'CDAMP5']:
for eid in ids:
elem = model.elements[eid]
nids_used.update(elem.node_ids)
pids_used.add(elem.Pid())
elif card_type in ['CELAS2', 'CDAMP2']:
for eid in ids:
elem = model.elements[eid]
nids_used.update(elem.node_ids)
elif card_type in ['CELAS3', 'CDAMP3']:
for eid in ids:
elem = model.elements[eid]
nids_used.update(elem.node_ids)
pids_used.add(elem.Pid())
elif card_type in ['CELAS4', 'CDAMP4', 'GENEL']:
for eid in ids:
elem = model.elements[eid]
nids_used.update(elem.node_ids)
elif card_type in ['CTRIA3', 'CQUAD4', 'CTRIA6', 'CTRIAR', 'CQUAD8', 'CQUADR',
'CTRIAX', 'CQUADX', 'CQUAD']:
for eid in ids:
elem = model.elements[eid]
nids_used.update(elem.node_ids)
pids_used.add(elem.Pid())
if isinstance(elem.theta_mcid, int):
cids_used.add(elem.theta_mcid)
elif card_type in ['CTRIAX6']:
for eid in ids:
elem = model.elements[eid]
nids_used.update(elem.node_ids)
mids_used.add(elem.Mid())
elif card_type in ['CSHEAR', 'CTUBE']:
for eid in ids:
elem = model.elements[eid]
nids_used.update(elem.node_ids)
pids_used.add(elem.Pid())
elif card_type in ['CPLSTN3', 'CPLSTN4', 'CPLSTN6', 'CPLSTN8',
'CPLSTS3', 'CPLSTS4', 'CPLSTS6', 'CPLSTS8',
'CQUADX4', 'CQUADX8', 'CTRIAX6',
'CTRAX3', 'CTRAX6']:
for eid in ids:
elem = model.elements[eid]
nids_used.update(elem.node_ids)
pids_used.add(elem.Pid())
elif card_type == 'PLPLANE':
for pid in ids:
prop = model.properties[pid]
cids_used.add(prop.cid)
mids_used.add(prop.Mid())
elif card_type == 'PPLANE':
for pid in ids:
prop = model.properties[pid]
mids_used.add(prop.Mid())
elif card_type in ['CROD', 'CRAC2D', 'CRAC3D']:
for eid in ids:
elem = model.elements[eid]
nids_used.update(elem.node_ids)
pids_used.add(elem.Pid())
elif card_type in ['CONROD']:
for eid in ids:
elem = model.elements[eid]
nids_used.update(elem.node_ids)
pids_used.add(elem.Mid())
elif card_type == 'CCONEAX':
for eid in ids:
elem = model.elements[eid]
pids_used.add(elem.Pid())
elif card_type in ['PLOTEL']:
for eid in ids:
elem = model.plotels[eid]
nids_used.update(elem.node_ids)
elif card_type in ['PSOLID', 'PLSOLID', 'PIHEX']:
for pid in ids:
prop = model.properties[pid]
mids_used.add(prop.Mid())
elif card_type in ['PDAMP5']:
for pid in ids:
prop = model.properties[pid]
mids_thermal_used.add(prop.Mid())
elif card_type in ['PBAR', 'PBARL', 'PROD', 'PTUBE', 'PBEAM', 'PBEAML', 'PBEAM3',
'PSHEAR', 'PRAC2D', 'PRAC3D', 'PBEND']:
for pid in ids:
prop = model.properties[pid]
mids_used.add(prop.Mid())
elif card_type in ['PSHELL']:
for pid in ids:
prop = model.properties[pid]
mids = [mid for mid in prop.material_ids if mid is not None]
mids_used.update(mids)
elif card_type in ['PCOMP', 'PCOMPG']:
for pid in ids:
prop = model.properties[pid]
mids = prop.material_ids
mids_used.update(mids)
elif card_type in ['PBCOMP']:
for pid in ids:
prop = model.properties[pid]
mids = prop.Mids()
mids_used.add(prop.Mid())
mids_used.update(mids)
elif card_type in ['PCOMPS']:
for pid in ids:
prop = model.properties[pid]
mids = prop.Mids()
mids_used.update(mids)
cids_used.update(prop.cordm)
elif card_type == 'PCONEAX':
for pid in ids:
# MID1 T1 MID2 I MID3 T2 NSM
prop = model.properties[pid]
#print(prop.object_methods())
mids = [mid for mid in prop.Mids() if mid not in (0, None)]
prop = model.properties[pid]
mids_used.update(mids)
elif card_type in ['RBAR', 'RBAR1', 'RBE1', 'RBE2', 'RBE3', 'RROD', 'RSPLINE', 'RSSCON']:
for eid in ids:
elem = model.rigid_elements[eid]
#print(elem.object_attributes())
#print(elem.object_methods())
nids_used.update(elem.independent_nodes)
nids_used.update(elem.dependent_nodes)
elif card_type in ['TLOAD1', 'TLOAD2', 'RLOAD1', 'RLOAD2', 'ACSRCE']:
pass
elif card_type in load_types:
_store_loads(model, card_type, ids, nids_used, eids_used, cids_used)
elif card_type == 'TEMPD':
pass
#for temp_id in ids:
#tempd = self.tempds[temp_id]
elif card_type == 'MPCADD':
pass
#for mpcadds in model.mpcadds.values():
#for mpcadd in mpcadds:
#nids_used.update(mpc.node_ids)
elif card_type == 'MPC':
for mpcs in model.mpcs.values():
for mpc in mpcs:
nids_used.update(mpc.node_ids)
elif card_type == 'SPCADD':
pass
#for spcadds in model.spcadds.values():
#for spcadd in spcadds:
#nids_used.update(spc.node_ids)
elif card_type in ['SPC1', 'SPC', 'GMSPC', 'SPCAX']:
for spcs in model.spcs.values():
for spc in spcs:
if spc.type in ['GMSPC', 'SPCAX']:
pass
elif spc.type in ['SPC1', 'SPC']:
nids_used.update(spc.node_ids)
else:
raise NotImplementedError(spc)
elif card_type in ['TABLED1', 'TABLED2', 'TABLED3', 'TABLED4',
'TABLEM1', 'TABLEM2', 'TABLEM3', 'TABLEM4',
'TABDMP1', 'TABRND1', 'TABLES1',]:
pass
elif card_type == 'SUPORT':
for suport in model.suport:
nids_used.update(suport.node_ids)
elif card_type == 'SUPORT1':
for suport1 in model.suport1.values():
nids_used.update(suport1.node_ids)
elif card_type == 'GRID':
for unused_nid, node in model.nodes.items():
cids_used.update([node.Cp(), node.Cd()])
elif card_type in ['CBAR', 'CBEAM', 'CBEND']:
for eid in ids:
elem = model.elements[eid]
nids_used.update(elem.node_ids)
pids_used.add(elem.Pid())
if elem.g0 is not None:
assert isinstance(elem.g0, int), elem.g0
nids_used.add(elem.g0)
elif card_type == 'CBEAM3':
for eid in ids:
elem = model.elements[eid]
nids_used.add(elem.Ga())
nids_used.add(elem.Gb())
if elem.gc is not None:
nids_used.add(elem.gc)
pids_used.add(elem.Pid())
if elem.g0 is not None:
assert isinstance(elem.g0, int), elem.g0
elif card_type == 'CFAST':
for eid in ids:
elem = model.elements[eid]
nids_used.update(elem.node_ids)
pids_used.add(elem.Pid())
elif card_type == 'CGAP':
for eid in ids:
elem = model.elements[eid]
nids_used.update(elem.node_ids)
pids_used.add(elem.Pid())
if elem.g0 is not None:
assert isinstance(elem.G0(), int), elem.G0()
nids_used.add(elem.G0())
elif card_type in ['CBUSH1D', 'CBUSH2D']:
for eid in ids:
elem = model.elements[eid]
nids_used.update(elem.node_ids)
pids_used.add(elem.Pid())
cids_used.add(elem.Cid())
elif card_type in ['PBUSH']:
pass
#for pid in ids:
#prop = model.properties[pid]
#raise RuntimeError(prop)
elif card_type == 'PBUSHT':
# tables
pass
elif card_type in ['CBUSH']:
for eid in ids:
elem = model.elements[eid]
nids_used.update(elem.node_ids)
pids_used.add(elem.Pid())
if elem.g0 is not None:
assert isinstance(elem.g0, int), elem.g0
nids_used.add(elem.g0)
# TODO: cid
elif card_type == 'AESURF':
#CID1 | ALID1 | CID2 | ALID2
for aesurf in model.aesurf.values():
cids_used.add(aesurf.Cid1())
cid2 = aesurf.Cid2()
if cid2 is not None:
cids_used.add(cid2)
elif card_type in ['SPLINE1', 'SPLINE2', 'SPLINE3', 'SPLINE4', 'SPLINE5']:
pass
#for spline_id in ids:
#spline = model.splines[spline_id]
#if card_type in ['SPLINE1', 'SPLINE2', 'SPLINE4', 'SPLINE5']:
#sets_used.add(spline.Set())
elif card_type in ['CAERO1']:
for eid in ids:
caero = model.caeros[eid]
# PID, LSPAN, LCHORD
cids_used.add(caero.Cp())
elif card_type in skip_cards:
pass
elif card_type in set_types_simple:
# handled based on context in other blocks
pass
elif card_type in ['USET', 'USET1']:
for set_cards in model.usets.values():
for set_card in set_cards:
nids_used.update(set_card.ids)
elif card_type in set_types:
obj = card_type[:4].lower() + 's'
sets = getattr(model, obj) # list of SETs
for set_card in sets:
nids_used.update(set_card.ids)
elif card_type in seset_types:
obj = card_type[:6].lower() + 's'
sets = getattr(model, obj) # list of SETs
for set_card in sets:
nids_used.update(set_card.ids)
elif card_type in ['DCONSTR']:
pass
elif card_type == 'DRESP1':
_store_dresp1(model, ids, nids_used, pids_used)
elif card_type == 'DRESP2':
pass
#for dresp_id in ids:
#dresp = model.dresps[dresp_id]
#dresp.deqatn
#if dresp.property_type in ['PSHELL', 'PCOMP', 'PBAR', 'PBARL', 'PBEAM', 'PROD']:
#pids_used.update(dresp.atti_values())
#elif dresp.property_type is None:
#if dresp.response_type in ['WEIGHT', 'EIGN', 'VOLUME']:
#pass
#elif dresp.response_type in ['DISP']:
#nids_used.update(dresp.atti)
#else:
#msg = str(dresp) + 'response_type=%r' % dresp.response_type
#raise NotImplementedError(msg)
#else:
#raise NotImplementedError(dresp)
#msg = str(dresp) + 'response_type=%r' % dresp.response_type
#raise NotImplementedError(msg)
elif card_type == 'DRESP3':
pass
elif card_type in ['DVPREL1', 'DVPREL2']:
for dvprel_id in ids:
dvprel = model.dvprels[dvprel_id]
desvars_used.update(dvprel.desvar_ids)
if dvprel.prop_type in ['PSHELL', 'PCOMP', 'PBAR', 'PBARL', 'PBEAM',
'PROD', 'PELAS', 'PBUSH', 'PDAMP', 'PTUBE',
'PSHEAR', 'PDAMP', 'PMASS', 'PBEAML', 'PCOMPG',
'PVISC', 'PBUSHT', 'PELAST', 'PBUSH1D', 'PGAP']:
pids_used.add(dvprel.Pid())
elif dvprel.prop_type in ['DISP']:
msg = str(dvprel) + 'dvprel.prop_type=%r' % dvprel.prop_type
raise NotImplementedError(msg)
else:
raise NotImplementedError(dvprel)
elif card_type in ['DVCREL1', 'DVCREL2']:
for dvcrel_id in ids:
dvcrel = model.dvcrels[dvcrel_id]
desvars_used.update(dvcrel.desvar_ids)
if dvcrel.element_type in ['CMASS2', 'CMASS4', 'CONM1', 'CONM2',
'CELAS2', 'CELAS4', 'CBUSH',
'CDAMP2', 'CQUAD4', 'CGAP', 'CBAR']:
#eids_used.add(dvcrel.Eid()) # we don't remove elements...for now
pass
else:
msg = str(dvcrel) + 'element_type=%r' % dvcrel.element_type
raise NotImplementedError(msg)
elif card_type in ['DVMREL1', 'DVMREL2']:
for dvmrel_id in ids:
dvmrel = model.dvmrels[dvmrel_id]
desvars_used.update(dvmrel.desvar_ids)
if dvmrel.mat_type in ['MAT1', 'MAT2', 'MAT8', 'MAT9', 'MAT10', 'MAT11']:
mids_used.add(dvmrel.Mid())
else:
msg = str(dvmrel) + 'mat_type=%r' % dvmrel.mat_type
raise NotImplementedError(msg)
elif card_type == 'DVGRID':
for dvgrid_id in ids:
dvgrids = model.dvgrids[dvgrid_id]
for dvgrid in dvgrids:
desvars_used.add(dvgrid.desvar_id)
nids_used.add(dvgrid.nid)
cids_used.add(dvgrid.cid)
elif card_type == 'TF':
for tf_id in ids:
tfs = model.transfer_functions[tf_id]
for transfer_function in tfs:
nids_used.update(transfer_function.nids)
elif card_type in ['NSM', 'NSM1', 'NSML', 'NSML1']:
_store_nsm(model, ids, pids_used)
elif card_type in ['POINTAX', 'AXIC', 'RINGAX']:
pass
#for eid in ids:
#elem = model.plotels[eid]
#nids_used.update(elem.node_ids)
elif card_type in ['PBRSECT', 'PBMSECT']:
for pid in ids:
prop = model.properties[pid]
if prop.outp:
sets_used.add(prop.outp)
if prop.brps:
for unused_key, value in prop.brps.items():
sets_used.add(value)
#if prop.cores:
#for key, value in prop.cores.items():
#pids_used.add(value)
else:
raise NotImplementedError(card_type)
#for pid, prop in model.properties.items():
#prop = model.properties[pid]
#if prop.type in no_materials:
#continue
#elif prop.type == 'PSHELL':
#mids_used.extend([mid for mid in prop.material_ids if mid is not None])
#elif prop.type == 'PCONEAX':
#mids_used.extend([mid for mid in model.Mids() if mid is not None])
#elif prop.type in prop_mid:
#mids_used.append(prop.Mid())
#elif prop.type in ['PCOMP', 'PCOMPG', 'PCOMPS']:
#mids_used.extend(prop.Mids())
#elif prop.type == 'PBCOMP':
#mids_used.append(prop.Mid())
#mids_used.extend(prop.Mids())
#else:
#raise NotImplementedError(prop)
remove_desvars = False
_remove(
model,
nids_used, cids_used,
pids_used, pids_mass_used,
mids_used,
desvars_used,
remove_nids=remove_nids,
remove_cids=remove_cids,
remove_pids=remove_pids,
remove_mids=remove_mids,
unused_remove_desvars=remove_desvars,
)
def _store_nsm(model, ids, pids_used):
"""helper for ``remove_unused``"""
for nsm_id in ids:
nsms = model.nsms[nsm_id]
for nsm in nsms:
idsi = nsm.ids
if nsm.nsm_type in ['PROD', 'PBARL', 'PBEAML',
'PSHELL', 'PCOMP', ]:
if len(idsi) == 1 and idsi[0] == 'ALL':
idsi = list(model.properties.keys())
#raise NotImplementedError('found ALL...\n%s' % str(nsm))
pids_used.update(idsi)
elif nsm.nsm_type in ['CONROD', 'ELEMENT']:
# we skip this because we assume all elements are used
#if len(idsi) == 1 and idsi[0] == 'ALL':
#raise NotImplementedError('found ALL...\n%s' % str(nsm))
#eids_used.update(idsi)
pass
else:
msg = 'found nsm_type=%r...\n%s' % (nsm.nsm_type, str(nsm))
raise NotImplementedError(msg)
def _store_loads(model, unused_card_type, unused_ids, nids_used, eids_used, cids_used):
"""helper for ``remove_unused``"""
for loads in model.loads.values():
for load in loads:
if load.type in ['FORCE', 'MOMENT']:
nids_used.add(load.node_id)
cids_used.add(load.Cid())
elif load.type in ['FORCE1', 'FORCE2', 'MOMENT1', 'MOMENT2']:
nids_used.update(load.node_ids)
elif load.type == 'GRAV':
cids_used.add(load.Cid())
elif load.type == 'RANDPS':
pass
elif load.type == 'PLOAD':
nids_used.update(load.node_ids)
elif load.type == 'PLOAD1':
#eid = integer(card, 2, 'eid')
pass
elif load.type == 'PLOAD2':
#eids_used.update(load.element_ids)
pass
elif load.type == 'PLOAD4':
# eids, g1, g34
cids_used.add(load.Cid())
elif load.type == 'DEFORM':
eids_used.add(load.Eid())
elif load.type == 'SPCD':
nids_used.update(load.node_ids)
elif load.type == 'GMLOAD':
cids_used.add(load.Cid())
elif load.type in ['RFORCE', 'RFORCE1']:
nids_used.add(load.node_id)
cids_used.add(load.Cid())
elif load.type == 'TEMP':
nids_used.update(list(load.temperatures.keys()))
elif load.type == 'ACCEL':
# nids?
cids_used.add(load.Cid())
elif load.type == 'ACCEL1':
# nids?
cids_used.add(load.Cid())
elif load.type in ['QBDY1', 'QBDY2', 'QBDY3', 'QHBDY']:
pass
#'QBDY1', 'QBDY2', 'QBDY3', 'QHBDY', 'PLOADX1
elif load.type in ['PLOADX1']:
nids_used.update(load.node_ids)
elif load.type in ['SLOAD']:
nids_used.update(load.node_ids)
elif load.type in ['LOAD', 'LSEQ', 'LOADCYN']:
pass
elif load.type in ['QVOL']:
# eids
pass
elif load.type in ['TEMPAX']:
pass # not done...
else:
raise NotImplementedError(load)
def _store_dresp1(model, ids, nids_used, pids_used):
"""helper for ``remove_unused``"""
for dresp_id in ids:
dresp = model.dresps[dresp_id]
if dresp.property_type in ['PSHELL', 'PCOMP', 'PCOMPG', 'PBAR', 'PBARL', 'PBEAM',
'PROD', 'PDAMP', 'PVISC', 'PTUBE', 'PSHEAR', 'PELAS',
'PSOLID', 'PBEAML']:
pids_used.update(dresp.atti_values())
elif dresp.property_type == 'ELEM':
if dresp.response_type in ['STRESS', 'FRSTRE',
'CFAILURE',
'TFORC', 'FRFORC']:
#eids_used.update(dresp.atti_values())
pass
else:
msg = (
str(dresp) + 'region=%r property_type=%r response_type=%r, '
'atta=%r attb=%s atti=%s' % (
dresp.region, dresp.property_type, dresp.response_type,
dresp.atta, dresp.attb, dresp.atti))
raise NotImplementedError(msg)
#elif dresp.property_type == 'STRESS':
elif dresp.property_type is None:
if dresp.response_type in ['WEIGHT', 'EIGN', 'VOLUME', 'LAMA', 'CEIG',
'FREQ', 'STABDER']:
pass
elif dresp.response_type in ['DISP', 'FRDISP', 'TDISP', 'RMSDISP', 'PSDDISP',
'TVELO', 'FRVELO', 'RMSVELO',
'TACCL', 'FRACCL', 'RMSACCL',
'SPCFORCE', 'TSPCF', 'FRSPCF',
'FORCE', 'TFORC', 'FRFORC']:
nids_used.update(dresp.atti)
elif dresp.response_type in ['FLUTTER', 'TRIM', 'DIVERG']:
# flutter_id / trim_id
pass
else:
msg = (
str(dresp) + 'region=%r property_type=%r response_type=%r '
'atta=%r attb=%s atti=%s' % (
dresp.region, dresp.property_type, dresp.response_type,
dresp.atta, dresp.attb, dresp.atti))
raise NotImplementedError(msg)
else:
msg = (
str(dresp) + 'region=%r property_type=%r response_type=%r '
'atta=%r attb=%s atti=%s' % (
dresp.region, dresp.property_type, dresp.response_type,
dresp.atta, dresp.attb, dresp.atti))
raise NotImplementedError(msg)
def _remove(model,
nids_used, cids_used,
pids_used, pids_mass_used, mids_used,
unused_desvars_used,
remove_nids=True, remove_cids=True,
remove_pids=True, remove_mids=True,
unused_remove_desvars=True):
"""actually removes the cards"""
nids = set(model.nodes.keys())
pids = set(model.properties.keys())
pids_mass = set(model.properties_mass.keys())
cids = set(model.coords.keys())
mids = set(model.materials.keys())
nids_to_remove = list(nids - nids_used)
pids_to_remove = list(pids - pids_used)
pids_mass_to_remove = list(pids_mass - pids_mass_used)
mids_to_remove = list(mids - mids_used)
cids_to_remove = list(cids - cids_used)
if 0 in cids_to_remove:
cids_to_remove.remove(0)
if remove_nids and nids_to_remove:
for nid in nids_to_remove:
del model.nodes[nid]
nids_to_remove.sort()
model.log.debug('removed nodes %s' % nids_to_remove)
if remove_cids and cids_to_remove:
for cid in cids_to_remove:
del model.coords[cid]
cids_to_remove.sort()
model.log.debug('removing coords %s' % cids_to_remove)
if remove_pids and pids_to_remove:
for pid in pids_mass_to_remove:
del model.properties_mass[pid]
pids_mass_to_remove.sort()
model.log.debug('removing properties_mass %s' % pids_mass_to_remove)
for pid in pids_to_remove:
del model.properties[pid]
pids_to_remove.sort()
model.log.debug('removing properties %s' % pids_to_remove)
if remove_mids and mids_to_remove:
for mid in mids_to_remove:
del model.materials[mid]
mids_to_remove.sort()
model.log.debug('removing materials %s' % mids_to_remove)
return model
| [
"mesheb82@gmail.com"
] | mesheb82@gmail.com |
4236e0d6d96b40e5aafd2aa62ded0a3dd03084ea | 0dddc0508138396c740901be4a0f9eebefb8fded | /ax/storage/sqa_store/save.py | e0c902858ec5062e3292a3e03a407a0e851c1bb3 | [
"MIT"
] | permissive | facebook/Ax | 473beb143016f95f4ec381ed1bd95b32c1ca31f8 | 6443cee30cbf8cec290200a7420a3db08e4b5445 | refs/heads/main | 2023-09-01T09:29:13.684709 | 2023-08-31T21:49:30 | 2023-08-31T21:49:30 | 169,880,381 | 2,207 | 315 | MIT | 2023-09-14T21:26:51 | 2019-02-09T15:23:44 | Jupyter Notebook | UTF-8 | Python | false | false | 19,890 | py | #!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import os
from logging import Logger
from typing import Any, Callable, Dict, List, Optional, Sequence, Union
from ax.core.base_trial import BaseTrial
from ax.core.experiment import Experiment
from ax.core.generator_run import GeneratorRun
from ax.core.metric import Metric
from ax.core.outcome_constraint import ObjectiveThreshold, OutcomeConstraint
from ax.core.runner import Runner
from ax.exceptions.core import UserInputError
from ax.exceptions.storage import SQADecodeError
from ax.modelbridge.generation_strategy import GenerationStrategy
from ax.storage.sqa_store.db import session_scope, SQABase
from ax.storage.sqa_store.decoder import Decoder
from ax.storage.sqa_store.encoder import Encoder
from ax.storage.sqa_store.sqa_classes import (
SQAData,
SQAGeneratorRun,
SQAMetric,
SQARunner,
SQATrial,
)
from ax.storage.sqa_store.sqa_config import SQAConfig
from ax.storage.sqa_store.utils import copy_db_ids
from ax.utils.common.base import Base
from ax.utils.common.logger import get_logger
from ax.utils.common.typeutils import checked_cast, not_none
logger: Logger = get_logger(__name__)
def save_experiment(experiment: Experiment, config: Optional[SQAConfig] = None) -> None:
"""Save experiment (using default SQAConfig)."""
if not isinstance(experiment, Experiment):
raise ValueError("Can only save instances of Experiment")
if not experiment.has_name:
raise ValueError("Experiment name must be set prior to saving.")
config = config or SQAConfig()
encoder = Encoder(config=config)
decoder = Decoder(config=config)
_save_experiment(experiment=experiment, encoder=encoder, decoder=decoder)
def _save_experiment(
experiment: Experiment,
encoder: Encoder,
decoder: Decoder,
return_sqa: bool = False,
validation_kwargs: Optional[Dict[str, Any]] = None,
) -> Optional[SQABase]:
"""Save experiment, using given Encoder instance.
1) Convert Ax object to SQLAlchemy object.
2) Determine if there is an existing experiment with that name in the DB.
3) If not, create a new one.
4) If so, update the old one.
The update works by merging the new SQLAlchemy object into the
existing SQLAlchemy object, and then letting SQLAlchemy handle the
actual DB updates.
"""
exp_sqa_class = encoder.config.class_to_sqa_class[Experiment]
with session_scope() as session:
existing_sqa_experiment_id = (
# pyre-ignore Undefined attribute [16]: `SQABase` has no attribute `id`
session.query(exp_sqa_class.id)
.filter_by(name=experiment.name)
.one_or_none()
)
if existing_sqa_experiment_id:
existing_sqa_experiment_id = existing_sqa_experiment_id[0]
encoder.validate_experiment_metadata(
experiment,
existing_sqa_experiment_id=existing_sqa_experiment_id,
**(validation_kwargs or {}),
)
experiment_sqa = _merge_into_session(
obj=experiment,
encode_func=encoder.experiment_to_sqa,
decode_func=decoder.experiment_from_sqa,
)
return checked_cast(SQABase, experiment_sqa) if return_sqa else None
def save_generation_strategy(
generation_strategy: GenerationStrategy, config: Optional[SQAConfig] = None
) -> int:
"""Save generation strategy (using default SQAConfig if no config is
specified). If the generation strategy has an experiment set, the experiment
will be saved first.
Returns:
The ID of the saved generation strategy.
"""
# Start up SQA encoder.
config = config or SQAConfig()
encoder = Encoder(config=config)
decoder = Decoder(config=config)
return _save_generation_strategy(
generation_strategy=generation_strategy, encoder=encoder, decoder=decoder
)
def _save_generation_strategy(
generation_strategy: GenerationStrategy, encoder: Encoder, decoder: Decoder
) -> int:
# If the generation strategy has not yet generated anything, there will be no
# experiment set on it.
experiment = generation_strategy._experiment
if experiment is None:
experiment_id = None
else:
# Experiment was set on the generation strategy, so need to check whether
# if has been saved and create a relationship b/w GS and experiment if so.
experiment_id = experiment.db_id
if experiment_id is None:
raise ValueError(
f"Experiment {experiment.name} should be saved before "
"generation strategy."
)
_merge_into_session(
obj=generation_strategy,
encode_func=encoder.generation_strategy_to_sqa,
decode_func=decoder.generation_strategy_from_sqa,
encode_args={"experiment_id": experiment_id},
decode_args={"experiment": experiment},
)
return not_none(generation_strategy.db_id)
def save_or_update_trial(
experiment: Experiment, trial: BaseTrial, config: Optional[SQAConfig] = None
) -> None:
"""Add new trial to the experiment, or update if already exists
(using default SQAConfig)."""
config = config or SQAConfig()
encoder = Encoder(config=config)
decoder = Decoder(config=config)
_save_or_update_trial(
experiment=experiment, trial=trial, encoder=encoder, decoder=decoder
)
def _save_or_update_trial(
experiment: Experiment,
trial: BaseTrial,
encoder: Encoder,
decoder: Decoder,
reduce_state_generator_runs: bool = False,
) -> None:
"""Add new trial to the experiment, or update if already exists."""
_save_or_update_trials(
experiment=experiment,
trials=[trial],
encoder=encoder,
decoder=decoder,
reduce_state_generator_runs=reduce_state_generator_runs,
)
def save_or_update_trials(
experiment: Experiment,
trials: List[BaseTrial],
config: Optional[SQAConfig] = None,
batch_size: Optional[int] = None,
reduce_state_generator_runs: bool = False,
) -> None:
"""Add new trials to the experiment, or update if already exists
(using default SQAConfig).
Note that new data objects (whether attached to existing or new trials)
will also be added to the experiment, but existing data objects in the
database will *not* be updated or removed.
"""
config = config or SQAConfig()
encoder = Encoder(config=config)
decoder = Decoder(config=config)
_save_or_update_trials(
experiment=experiment,
trials=trials,
encoder=encoder,
decoder=decoder,
batch_size=batch_size,
reduce_state_generator_runs=reduce_state_generator_runs,
)
def _save_or_update_trials(
experiment: Experiment,
trials: List[BaseTrial],
encoder: Encoder,
decoder: Decoder,
batch_size: Optional[int] = None,
reduce_state_generator_runs: bool = False,
) -> None:
"""Add new trials to the experiment, or update if they already exist.
Note that new data objects (whether attached to existing or new trials)
will also be added to the experiment, but existing data objects in the
database will *not* be updated or removed.
"""
experiment_id = experiment._db_id
if experiment_id is None:
raise ValueError("Must save experiment first.")
# pyre-fixme[53]: Captured variable `experiment_id` is not annotated.
# pyre-fixme[3]: Return type must be annotated.
def add_experiment_id(sqa: Union[SQATrial, SQAData]):
sqa.experiment_id = experiment_id
if reduce_state_generator_runs:
latest_trial = trials[-1]
trials_to_reduce_state = trials[0:-1]
# pyre-fixme[3]: Return type must be annotated.
def trial_to_reduced_state_sqa_encoder(t: BaseTrial):
return encoder.trial_to_sqa(t, generator_run_reduced_state=True)
_bulk_merge_into_session(
objs=trials_to_reduce_state,
encode_func=trial_to_reduced_state_sqa_encoder,
decode_func=decoder.trial_from_sqa,
decode_args_list=[{"experiment": experiment} for _ in range(len(trials))],
modify_sqa=add_experiment_id,
batch_size=batch_size,
)
_bulk_merge_into_session(
objs=[latest_trial],
encode_func=encoder.trial_to_sqa,
decode_func=decoder.trial_from_sqa,
decode_args_list=[{"experiment": experiment} for _ in range(len(trials))],
modify_sqa=add_experiment_id,
batch_size=batch_size,
)
else:
_bulk_merge_into_session(
objs=trials,
encode_func=encoder.trial_to_sqa,
decode_func=decoder.trial_from_sqa,
decode_args_list=[{"experiment": experiment} for _ in range(len(trials))],
modify_sqa=add_experiment_id,
batch_size=batch_size,
)
datas = []
data_encode_args = []
for trial in trials:
trial_datas = experiment.data_by_trial.get(trial.index, {})
for ts, data in trial_datas.items():
if data.db_id is None:
# Only need to worry about new data, since it's not really possible
# or supported to modify or remove existing data.
datas.append(data)
data_encode_args.append({"trial_index": trial.index, "timestamp": ts})
_bulk_merge_into_session(
objs=datas,
encode_func=encoder.data_to_sqa,
decode_func=decoder.data_from_sqa,
encode_args_list=data_encode_args,
decode_args_list=[
{"data_constructor": experiment.default_data_constructor}
for _ in range(len(datas))
],
modify_sqa=add_experiment_id,
batch_size=batch_size,
)
def update_generation_strategy(
generation_strategy: GenerationStrategy,
generator_runs: List[GeneratorRun],
config: Optional[SQAConfig] = None,
batch_size: Optional[int] = None,
reduce_state_generator_runs: bool = False,
) -> None:
"""Update generation strategy's current step and attach generator runs
(using default SQAConfig)."""
config = config or SQAConfig()
encoder = Encoder(config=config)
decoder = Decoder(config=config)
_update_generation_strategy(
generation_strategy=generation_strategy,
generator_runs=generator_runs,
encoder=encoder,
decoder=decoder,
batch_size=batch_size,
reduce_state_generator_runs=reduce_state_generator_runs,
)
def _update_generation_strategy(
generation_strategy: GenerationStrategy,
generator_runs: List[GeneratorRun],
encoder: Encoder,
decoder: Decoder,
batch_size: Optional[int] = None,
reduce_state_generator_runs: bool = False,
) -> None:
"""Update generation strategy's current step and attach generator runs."""
gs_sqa_class = encoder.config.class_to_sqa_class[GenerationStrategy]
gs_id = generation_strategy.db_id
if gs_id is None:
raise ValueError("GenerationStrategy must be saved before being updated.")
experiment_id = generation_strategy.experiment.db_id
if experiment_id is None:
raise ValueError(
f"Experiment {generation_strategy.experiment.name} "
"should be saved before generation strategy."
)
with session_scope() as session:
session.query(gs_sqa_class).filter_by(id=gs_id).update(
{
"curr_index": generation_strategy._curr.index,
"experiment_id": experiment_id,
}
)
# pyre-fixme[53]: Captured variable `gs_id` is not annotated.
# pyre-fixme[3]: Return type must be annotated.
def add_generation_strategy_id(sqa: SQAGeneratorRun):
sqa.generation_strategy_id = gs_id
# pyre-fixme[3]: Return type must be annotated.
def generator_run_to_sqa_encoder(gr: GeneratorRun, weight: Optional[float] = None):
return encoder.generator_run_to_sqa(
gr,
weight=weight,
reduced_state=reduce_state_generator_runs,
)
_bulk_merge_into_session(
objs=generator_runs,
encode_func=generator_run_to_sqa_encoder,
decode_func=decoder.generator_run_from_sqa,
decode_args_list=[
{
"reduced_state": False,
"immutable_search_space_and_opt_config": False,
}
for _ in range(len(generator_runs))
],
modify_sqa=add_generation_strategy_id,
batch_size=batch_size,
)
def update_runner_on_experiment(
experiment: Experiment, runner: Runner, encoder: Encoder, decoder: Decoder
) -> None:
runner_sqa_class = encoder.config.class_to_sqa_class[Runner]
exp_id = experiment.db_id
if exp_id is None:
raise ValueError("Experiment must be saved before being updated.")
with session_scope() as session:
session.query(runner_sqa_class).filter_by(experiment_id=exp_id).delete()
# pyre-fixme[53]: Captured variable `exp_id` is not annotated.
# pyre-fixme[3]: Return type must be annotated.
def add_experiment_id(sqa: SQARunner):
sqa.experiment_id = exp_id
_merge_into_session(
obj=runner,
encode_func=encoder.runner_to_sqa,
decode_func=decoder.runner_from_sqa,
modify_sqa=add_experiment_id,
)
def update_outcome_constraint_on_experiment(
experiment: Experiment,
outcome_constraint: OutcomeConstraint,
encoder: Encoder,
decoder: Decoder,
) -> None:
oc_sqa_class = encoder.config.class_to_sqa_class[Metric]
exp_id = experiment.db_id
if exp_id is None:
raise UserInputError("Experiment must be saved before being updated.")
oc_id = outcome_constraint.db_id
if oc_id is not None:
with session_scope() as session:
session.query(oc_sqa_class).filter_by(experiment_id=exp_id).filter_by(
id=oc_id
).delete()
# pyre-fixme[53]: Captured variable `exp_id` is not annotated.
# pyre-fixme[3]: Return type must be annotated.
def add_experiment_id(sqa: SQAMetric):
sqa.experiment_id = exp_id
encode_func = (
encoder.objective_threshold_to_sqa
if isinstance(outcome_constraint, ObjectiveThreshold)
else encoder.outcome_constraint_to_sqa
)
_merge_into_session(
obj=outcome_constraint,
encode_func=encode_func,
decode_func=decoder.metric_from_sqa,
modify_sqa=add_experiment_id,
)
def update_properties_on_experiment(
experiment_with_updated_properties: Experiment,
config: Optional[SQAConfig] = None,
) -> None:
config = config or SQAConfig()
exp_sqa_class = config.class_to_sqa_class[Experiment]
exp_id = experiment_with_updated_properties.db_id
if exp_id is None:
raise ValueError("Experiment must be saved before being updated.")
with session_scope() as session:
session.query(exp_sqa_class).filter_by(id=exp_id).update(
{
"properties": experiment_with_updated_properties._properties,
}
)
def _merge_into_session(
obj: Base,
# pyre-fixme[24]: Generic type `Callable` expects 2 type parameters.
encode_func: Callable,
# pyre-fixme[24]: Generic type `Callable` expects 2 type parameters.
decode_func: Callable,
encode_args: Optional[Dict[str, Any]] = None,
decode_args: Optional[Dict[str, Any]] = None,
# pyre-fixme[24]: Generic type `Callable` expects 2 type parameters.
modify_sqa: Optional[Callable] = None,
) -> SQABase:
"""Given a user-facing object (that may or may not correspond to an
existing DB object), perform the following steps to either create or
update the necessary DB objects, and ensure the user-facing object
is annotated with the appropriate db_ids:
1. Encode the user-facing object `obj` to a sqa object `sqa`
2. If the `modify_sqa` argument is passed in, apply this to `sqa`
before continuing
3. Merge `sqa` into the session
Note: if `sqa` and its children contain ids, they will be merged into
those corresponding DB objects. If not, new DB objects will be created.
4. `session.merge` returns `new_sqa`, which is the same as `sqa` but
but annotated ids.
5. Decode `new_sqa` into a new user-facing object `new_obj`
6. Copy db_ids from `new_obj` to the originally passed-in `obj`
"""
sqa = encode_func(obj, **(encode_args or {}))
if modify_sqa is not None:
modify_sqa(sqa=sqa)
with session_scope() as session:
new_sqa = session.merge(sqa)
session.flush()
new_obj = decode_func(new_sqa, **(decode_args or {}))
_copy_db_ids_if_possible(obj=obj, new_obj=new_obj)
return new_sqa
def _bulk_merge_into_session(
objs: Sequence[Base],
# pyre-fixme[24]: Generic type `Callable` expects 2 type parameters.
encode_func: Callable,
# pyre-fixme[24]: Generic type `Callable` expects 2 type parameters.
decode_func: Callable,
encode_args_list: Optional[Union[List[None], List[Dict[str, Any]]]] = None,
decode_args_list: Optional[Union[List[None], List[Dict[str, Any]]]] = None,
# pyre-fixme[24]: Generic type `Callable` expects 2 type parameters.
modify_sqa: Optional[Callable] = None,
batch_size: Optional[int] = None,
) -> List[SQABase]:
"""Bulk version of _merge_into_session.
Takes in a list of objects to merge into the session together
(i.e. within one session scope), along with corresponding (but optional)
lists of encode and decode arguments.
If batch_size is specified, the list of objects will be chunked
accordingly, and multiple session scopes will be used to merge
the objects in, one batch at a time.
"""
if len(objs) == 0:
return []
encode_args_list = encode_args_list or [None for _ in range(len(objs))]
decode_args_list = decode_args_list or [None for _ in range(len(objs))]
sqas = []
for obj, encode_args in zip(objs, encode_args_list):
sqa = encode_func(obj, **(encode_args or {}))
if modify_sqa is not None:
modify_sqa(sqa=sqa)
sqas.append(sqa)
# https://stackoverflow.com/a/312464
# pyre-fixme[3]: Return type must be annotated.
# pyre-fixme[2]: Parameter must be annotated.
def split_into_batches(lst, n):
for i in range(0, len(lst), n):
yield lst[i : i + n]
new_sqas = []
batch_size = batch_size or len(sqas)
for batch in split_into_batches(lst=sqas, n=batch_size):
with session_scope() as session:
for sqa in batch:
new_sqa = session.merge(sqa)
new_sqas.append(new_sqa)
session.flush()
new_objs = []
for new_sqa, decode_args in zip(new_sqas, decode_args_list):
new_obj = decode_func(new_sqa, **(decode_args or {}))
new_objs.append(new_obj)
for obj, new_obj in zip(objs, new_objs):
_copy_db_ids_if_possible(obj=obj, new_obj=new_obj)
return new_sqas
# pyre-fixme[2]: Parameter annotation cannot be `Any`.
def _copy_db_ids_if_possible(new_obj: Any, obj: Any) -> None:
"""Wraps _copy_db_ids in a try/except, and logs warnings on error."""
try:
copy_db_ids(new_obj, obj, [])
except SQADecodeError as e:
# Raise these warnings in unittests only
if os.environ.get("TESTENV"):
raise e
logger.warning(
f"Error encountered when copying db_ids from {new_obj} "
f"back to user-facing object {obj}. "
"This might cause issues if you re-save this experiment. "
f"Exception: {e}"
)
| [
"facebook-github-bot@users.noreply.github.com"
] | facebook-github-bot@users.noreply.github.com |
e164c452409ec98b640f0278c6a8b1e41ad50837 | ef3a7391b0a5c5d8e276355e97cbe4de621d500c | /venv/Lib/site-packages/spacy/lang/en/tokenizer_exceptions.py | c45197771df93cf2b7c2d65795f64d91fe306336 | [
"Apache-2.0"
] | permissive | countBMB/BenjiRepo | 143f6da5d198ea6f06404b4559e1f4528b71b3eb | 79d882263baaf2a11654ca67d2e5593074d36dfa | refs/heads/master | 2022-12-11T07:37:04.807143 | 2019-12-25T11:26:29 | 2019-12-25T11:26:29 | 230,090,428 | 1 | 1 | Apache-2.0 | 2022-12-08T03:21:09 | 2019-12-25T11:05:59 | Python | UTF-8 | Python | false | false | 18,495 | py | # coding: utf8
from __future__ import unicode_literals
from ...symbols import ORTH, LEMMA, TAG, NORM, PRON_LEMMA
_exc = {}
_exclude = [
"Ill",
"ill",
"Its",
"its",
"Hell",
"hell",
"Shell",
"shell",
"Shed",
"shed",
"were",
"Were",
"Well",
"well",
"Whore",
"whore",
]
# Pronouns
for pron in ["i"]:
for orth in [pron, pron.title()]:
_exc[orth + "'m"] = [
{ORTH: orth, LEMMA: PRON_LEMMA, NORM: pron, TAG: "PRP"},
{ORTH: "'m", LEMMA: "be", NORM: "am", TAG: "VBP"},
]
_exc[orth + "m"] = [
{ORTH: orth, LEMMA: PRON_LEMMA, NORM: pron, TAG: "PRP"},
{ORTH: "m", LEMMA: "be", TAG: "VBP", "tenspect": 1, "number": 1},
]
_exc[orth + "'ma"] = [
{ORTH: orth, LEMMA: PRON_LEMMA, NORM: pron, TAG: "PRP"},
{ORTH: "'m", LEMMA: "be", NORM: "am"},
{ORTH: "a", LEMMA: "going to", NORM: "gonna"},
]
_exc[orth + "ma"] = [
{ORTH: orth, LEMMA: PRON_LEMMA, NORM: pron, TAG: "PRP"},
{ORTH: "m", LEMMA: "be", NORM: "am"},
{ORTH: "a", LEMMA: "going to", NORM: "gonna"},
]
for pron in ["i", "you", "he", "she", "it", "we", "they"]:
for orth in [pron, pron.title()]:
_exc[orth + "'ll"] = [
{ORTH: orth, LEMMA: PRON_LEMMA, NORM: pron, TAG: "PRP"},
{ORTH: "'ll", LEMMA: "will", NORM: "will", TAG: "MD"},
]
_exc[orth + "ll"] = [
{ORTH: orth, LEMMA: PRON_LEMMA, NORM: pron, TAG: "PRP"},
{ORTH: "ll", LEMMA: "will", NORM: "will", TAG: "MD"},
]
_exc[orth + "'ll've"] = [
{ORTH: orth, LEMMA: PRON_LEMMA, NORM: pron, TAG: "PRP"},
{ORTH: "'ll", LEMMA: "will", NORM: "will", TAG: "MD"},
{ORTH: "'ve", LEMMA: "have", NORM: "have", TAG: "VB"},
]
_exc[orth + "llve"] = [
{ORTH: orth, LEMMA: PRON_LEMMA, NORM: pron, TAG: "PRP"},
{ORTH: "ll", LEMMA: "will", NORM: "will", TAG: "MD"},
{ORTH: "ve", LEMMA: "have", NORM: "have", TAG: "VB"},
]
_exc[orth + "'d"] = [
{ORTH: orth, LEMMA: PRON_LEMMA, NORM: pron, TAG: "PRP"},
{ORTH: "'d", LEMMA: "would", NORM: "would", TAG: "MD"},
]
_exc[orth + "d"] = [
{ORTH: orth, LEMMA: PRON_LEMMA, NORM: pron, TAG: "PRP"},
{ORTH: "d", LEMMA: "would", NORM: "would", TAG: "MD"},
]
_exc[orth + "'d've"] = [
{ORTH: orth, LEMMA: PRON_LEMMA, NORM: pron, TAG: "PRP"},
{ORTH: "'d", LEMMA: "would", NORM: "would", TAG: "MD"},
{ORTH: "'ve", LEMMA: "have", NORM: "have", TAG: "VB"},
]
_exc[orth + "dve"] = [
{ORTH: orth, LEMMA: PRON_LEMMA, NORM: pron, TAG: "PRP"},
{ORTH: "d", LEMMA: "would", NORM: "would", TAG: "MD"},
{ORTH: "ve", LEMMA: "have", NORM: "have", TAG: "VB"},
]
for pron in ["i", "you", "we", "they"]:
for orth in [pron, pron.title()]:
_exc[orth + "'ve"] = [
{ORTH: orth, LEMMA: PRON_LEMMA, NORM: pron, TAG: "PRP"},
{ORTH: "'ve", LEMMA: "have", NORM: "have", TAG: "VB"},
]
_exc[orth + "ve"] = [
{ORTH: orth, LEMMA: PRON_LEMMA, NORM: pron, TAG: "PRP"},
{ORTH: "ve", LEMMA: "have", NORM: "have", TAG: "VB"},
]
for pron in ["you", "we", "they"]:
for orth in [pron, pron.title()]:
_exc[orth + "'re"] = [
{ORTH: orth, LEMMA: PRON_LEMMA, NORM: pron, TAG: "PRP"},
{ORTH: "'re", LEMMA: "be", NORM: "are"},
]
_exc[orth + "re"] = [
{ORTH: orth, LEMMA: PRON_LEMMA, NORM: pron, TAG: "PRP"},
{ORTH: "re", LEMMA: "be", NORM: "are", TAG: "VBZ"},
]
for pron in ["he", "she", "it"]:
for orth in [pron, pron.title()]:
_exc[orth + "'s"] = [
{ORTH: orth, LEMMA: PRON_LEMMA, NORM: pron, TAG: "PRP"},
{ORTH: "'s", NORM: "'s"},
]
_exc[orth + "s"] = [
{ORTH: orth, LEMMA: PRON_LEMMA, NORM: pron, TAG: "PRP"},
{ORTH: "s"},
]
# W-words, relative pronouns, prepositions etc.
for word in ["who", "what", "when", "where", "why", "how", "there", "that"]:
for orth in [word, word.title()]:
_exc[orth + "'s"] = [
{ORTH: orth, LEMMA: word, NORM: word},
{ORTH: "'s", NORM: "'s"},
]
_exc[orth + "s"] = [{ORTH: orth, LEMMA: word, NORM: word}, {ORTH: "s"}]
_exc[orth + "'ll"] = [
{ORTH: orth, LEMMA: word, NORM: word},
{ORTH: "'ll", LEMMA: "will", NORM: "will", TAG: "MD"},
]
_exc[orth + "ll"] = [
{ORTH: orth, LEMMA: word, NORM: word},
{ORTH: "ll", LEMMA: "will", NORM: "will", TAG: "MD"},
]
_exc[orth + "'ll've"] = [
{ORTH: orth, LEMMA: word, NORM: word},
{ORTH: "'ll", LEMMA: "will", NORM: "will", TAG: "MD"},
{ORTH: "'ve", LEMMA: "have", NORM: "have", TAG: "VB"},
]
_exc[orth + "llve"] = [
{ORTH: orth, LEMMA: word, NORM: word},
{ORTH: "ll", LEMMA: "will", NORM: "will", TAG: "MD"},
{ORTH: "ve", LEMMA: "have", NORM: "have", TAG: "VB"},
]
_exc[orth + "'re"] = [
{ORTH: orth, LEMMA: word, NORM: word},
{ORTH: "'re", LEMMA: "be", NORM: "are"},
]
_exc[orth + "re"] = [
{ORTH: orth, LEMMA: word, NORM: word},
{ORTH: "re", LEMMA: "be", NORM: "are"},
]
_exc[orth + "'ve"] = [
{ORTH: orth, LEMMA: word, NORM: word},
{ORTH: "'ve", LEMMA: "have", TAG: "VB"},
]
_exc[orth + "ve"] = [
{ORTH: orth, LEMMA: word},
{ORTH: "ve", LEMMA: "have", NORM: "have", TAG: "VB"},
]
_exc[orth + "'d"] = [
{ORTH: orth, LEMMA: word, NORM: word},
{ORTH: "'d", NORM: "'d"},
]
_exc[orth + "d"] = [{ORTH: orth, LEMMA: word, NORM: word}, {ORTH: "d"}]
_exc[orth + "'d've"] = [
{ORTH: orth, LEMMA: word, NORM: word},
{ORTH: "'d", LEMMA: "would", NORM: "would", TAG: "MD"},
{ORTH: "'ve", LEMMA: "have", NORM: "have", TAG: "VB"},
]
_exc[orth + "dve"] = [
{ORTH: orth, LEMMA: word, NORM: word},
{ORTH: "d", LEMMA: "would", NORM: "would", TAG: "MD"},
{ORTH: "ve", LEMMA: "have", NORM: "have", TAG: "VB"},
]
# Verbs
for verb_data in [
{ORTH: "ca", LEMMA: "can", NORM: "can", TAG: "MD"},
{ORTH: "could", NORM: "could", TAG: "MD"},
{ORTH: "do", LEMMA: "do", NORM: "do"},
{ORTH: "does", LEMMA: "do", NORM: "does"},
{ORTH: "did", LEMMA: "do", NORM: "do", TAG: "VBD"},
{ORTH: "had", LEMMA: "have", NORM: "have", TAG: "VBD"},
{ORTH: "may", NORM: "may", TAG: "MD"},
{ORTH: "might", NORM: "might", TAG: "MD"},
{ORTH: "must", NORM: "must", TAG: "MD"},
{ORTH: "need", NORM: "need"},
{ORTH: "ought", NORM: "ought", TAG: "MD"},
{ORTH: "sha", LEMMA: "shall", NORM: "shall", TAG: "MD"},
{ORTH: "should", NORM: "should", TAG: "MD"},
{ORTH: "wo", LEMMA: "will", NORM: "will", TAG: "MD"},
{ORTH: "would", NORM: "would", TAG: "MD"},
]:
verb_data_tc = dict(verb_data)
verb_data_tc[ORTH] = verb_data_tc[ORTH].title()
for data in [verb_data, verb_data_tc]:
_exc[data[ORTH] + "n't"] = [
dict(data),
{ORTH: "n't", LEMMA: "not", NORM: "not", TAG: "RB"},
]
_exc[data[ORTH] + "nt"] = [
dict(data),
{ORTH: "nt", LEMMA: "not", NORM: "not", TAG: "RB"},
]
_exc[data[ORTH] + "n't've"] = [
dict(data),
{ORTH: "n't", LEMMA: "not", NORM: "not", TAG: "RB"},
{ORTH: "'ve", LEMMA: "have", NORM: "have", TAG: "VB"},
]
_exc[data[ORTH] + "ntve"] = [
dict(data),
{ORTH: "nt", LEMMA: "not", NORM: "not", TAG: "RB"},
{ORTH: "ve", LEMMA: "have", NORM: "have", TAG: "VB"},
]
for verb_data in [
{ORTH: "could", NORM: "could", TAG: "MD"},
{ORTH: "might", NORM: "might", TAG: "MD"},
{ORTH: "must", NORM: "must", TAG: "MD"},
{ORTH: "should", NORM: "should", TAG: "MD"},
{ORTH: "would", NORM: "would", TAG: "MD"},
]:
verb_data_tc = dict(verb_data)
verb_data_tc[ORTH] = verb_data_tc[ORTH].title()
for data in [verb_data, verb_data_tc]:
_exc[data[ORTH] + "'ve"] = [dict(data), {ORTH: "'ve", LEMMA: "have", TAG: "VB"}]
_exc[data[ORTH] + "ve"] = [dict(data), {ORTH: "ve", LEMMA: "have", TAG: "VB"}]
for verb_data in [
{ORTH: "ai", LEMMA: "be", TAG: "VBP", "number": 2},
{ORTH: "are", LEMMA: "be", NORM: "are", TAG: "VBP", "number": 2},
{ORTH: "is", LEMMA: "be", NORM: "is", TAG: "VBZ"},
{ORTH: "was", LEMMA: "be", NORM: "was"},
{ORTH: "were", LEMMA: "be", NORM: "were"},
{ORTH: "have", NORM: "have"},
{ORTH: "has", LEMMA: "have", NORM: "has"},
{ORTH: "dare", NORM: "dare"},
]:
verb_data_tc = dict(verb_data)
verb_data_tc[ORTH] = verb_data_tc[ORTH].title()
for data in [verb_data, verb_data_tc]:
_exc[data[ORTH] + "n't"] = [
dict(data),
{ORTH: "n't", LEMMA: "not", NORM: "not", TAG: "RB"},
]
_exc[data[ORTH] + "nt"] = [
dict(data),
{ORTH: "nt", LEMMA: "not", NORM: "not", TAG: "RB"},
]
# Other contractions with trailing apostrophe
for exc_data in [
{ORTH: "doin", LEMMA: "do", NORM: "doing"},
{ORTH: "goin", LEMMA: "go", NORM: "going"},
{ORTH: "nothin", LEMMA: "nothing", NORM: "nothing"},
{ORTH: "nuthin", LEMMA: "nothing", NORM: "nothing"},
{ORTH: "ol", LEMMA: "old", NORM: "old"},
{ORTH: "somethin", LEMMA: "something", NORM: "something"},
]:
exc_data_tc = dict(exc_data)
exc_data_tc[ORTH] = exc_data_tc[ORTH].title()
for data in [exc_data, exc_data_tc]:
data_apos = dict(data)
data_apos[ORTH] = data_apos[ORTH] + "'"
_exc[data[ORTH]] = [dict(data)]
_exc[data_apos[ORTH]] = [dict(data_apos)]
# Other contractions with leading apostrophe
for exc_data in [
{ORTH: "cause", NORM: "because"},
{ORTH: "em", LEMMA: PRON_LEMMA, NORM: "them"},
{ORTH: "ll", LEMMA: "will", NORM: "will"},
{ORTH: "nuff", LEMMA: "enough", NORM: "enough"},
]:
exc_data_apos = dict(exc_data)
exc_data_apos[ORTH] = "'" + exc_data_apos[ORTH]
for data in [exc_data, exc_data_apos]:
_exc[data[ORTH]] = [data]
# Times
for h in range(1, 12 + 1):
for period in ["a.m.", "am"]:
_exc["%d%s" % (h, period)] = [
{ORTH: "%d" % h},
{ORTH: period, LEMMA: "a.m.", NORM: "a.m."},
]
for period in ["p.m.", "pm"]:
_exc["%d%s" % (h, period)] = [
{ORTH: "%d" % h},
{ORTH: period, LEMMA: "p.m.", NORM: "p.m."},
]
# Rest
_other_exc = {
"y'all": [{ORTH: "y'", LEMMA: PRON_LEMMA, NORM: "you"}, {ORTH: "all"}],
"yall": [{ORTH: "y", LEMMA: PRON_LEMMA, NORM: "you"}, {ORTH: "all"}],
"how'd'y": [
{ORTH: "how", LEMMA: "how"},
{ORTH: "'d", LEMMA: "do"},
{ORTH: "'y", LEMMA: PRON_LEMMA, NORM: "you"},
],
"How'd'y": [
{ORTH: "How", LEMMA: "how", NORM: "how"},
{ORTH: "'d", LEMMA: "do"},
{ORTH: "'y", LEMMA: PRON_LEMMA, NORM: "you"},
],
"not've": [
{ORTH: "not", LEMMA: "not", TAG: "RB"},
{ORTH: "'ve", LEMMA: "have", NORM: "have", TAG: "VB"},
],
"notve": [
{ORTH: "not", LEMMA: "not", TAG: "RB"},
{ORTH: "ve", LEMMA: "have", NORM: "have", TAG: "VB"},
],
"Not've": [
{ORTH: "Not", LEMMA: "not", NORM: "not", TAG: "RB"},
{ORTH: "'ve", LEMMA: "have", NORM: "have", TAG: "VB"},
],
"Notve": [
{ORTH: "Not", LEMMA: "not", NORM: "not", TAG: "RB"},
{ORTH: "ve", LEMMA: "have", NORM: "have", TAG: "VB"},
],
"cannot": [
{ORTH: "can", LEMMA: "can", TAG: "MD"},
{ORTH: "not", LEMMA: "not", TAG: "RB"},
],
"Cannot": [
{ORTH: "Can", LEMMA: "can", NORM: "can", TAG: "MD"},
{ORTH: "not", LEMMA: "not", TAG: "RB"},
],
"gonna": [
{ORTH: "gon", LEMMA: "go", NORM: "going"},
{ORTH: "na", LEMMA: "to", NORM: "to"},
],
"Gonna": [
{ORTH: "Gon", LEMMA: "go", NORM: "going"},
{ORTH: "na", LEMMA: "to", NORM: "to"},
],
"gotta": [{ORTH: "got"}, {ORTH: "ta", LEMMA: "to", NORM: "to"}],
"Gotta": [{ORTH: "Got", NORM: "got"}, {ORTH: "ta", LEMMA: "to", NORM: "to"}],
"let's": [{ORTH: "let"}, {ORTH: "'s", LEMMA: PRON_LEMMA, NORM: "us"}],
"Let's": [
{ORTH: "Let", LEMMA: "let", NORM: "let"},
{ORTH: "'s", LEMMA: PRON_LEMMA, NORM: "us"},
],
}
_exc.update(_other_exc)
for exc_data in [
{ORTH: "'S", LEMMA: "'s", NORM: "'s"},
{ORTH: "'s", LEMMA: "'s", NORM: "'s"},
{ORTH: "\u2018S", LEMMA: "'s", NORM: "'s"},
{ORTH: "\u2018s", LEMMA: "'s", NORM: "'s"},
{ORTH: "and/or", LEMMA: "and/or", NORM: "and/or", TAG: "CC"},
{ORTH: "w/o", LEMMA: "without", NORM: "without"},
{ORTH: "'re", LEMMA: "be", NORM: "are"},
{ORTH: "'Cause", LEMMA: "because", NORM: "because"},
{ORTH: "'cause", LEMMA: "because", NORM: "because"},
{ORTH: "'cos", LEMMA: "because", NORM: "because"},
{ORTH: "'Cos", LEMMA: "because", NORM: "because"},
{ORTH: "'coz", LEMMA: "because", NORM: "because"},
{ORTH: "'Coz", LEMMA: "because", NORM: "because"},
{ORTH: "'cuz", LEMMA: "because", NORM: "because"},
{ORTH: "'Cuz", LEMMA: "because", NORM: "because"},
{ORTH: "'bout", LEMMA: "about", NORM: "about"},
{ORTH: "ma'am", LEMMA: "madam", NORM: "madam"},
{ORTH: "Ma'am", LEMMA: "madam", NORM: "madam"},
{ORTH: "o'clock", LEMMA: "o'clock", NORM: "o'clock"},
{ORTH: "O'clock", LEMMA: "o'clock", NORM: "o'clock"},
{ORTH: "lovin'", LEMMA: "love", NORM: "loving"},
{ORTH: "Lovin'", LEMMA: "love", NORM: "loving"},
{ORTH: "lovin", LEMMA: "love", NORM: "loving"},
{ORTH: "Lovin", LEMMA: "love", NORM: "loving"},
{ORTH: "havin'", LEMMA: "have", NORM: "having"},
{ORTH: "Havin'", LEMMA: "have", NORM: "having"},
{ORTH: "havin", LEMMA: "have", NORM: "having"},
{ORTH: "Havin", LEMMA: "have", NORM: "having"},
{ORTH: "doin'", LEMMA: "do", NORM: "doing"},
{ORTH: "Doin'", LEMMA: "do", NORM: "doing"},
{ORTH: "doin", LEMMA: "do", NORM: "doing"},
{ORTH: "Doin", LEMMA: "do", NORM: "doing"},
{ORTH: "goin'", LEMMA: "go", NORM: "going"},
{ORTH: "Goin'", LEMMA: "go", NORM: "going"},
{ORTH: "goin", LEMMA: "go", NORM: "going"},
{ORTH: "Goin", LEMMA: "go", NORM: "going"},
{ORTH: "Mt.", LEMMA: "Mount", NORM: "Mount"},
{ORTH: "Ak.", LEMMA: "Alaska", NORM: "Alaska"},
{ORTH: "Ala.", LEMMA: "Alabama", NORM: "Alabama"},
{ORTH: "Apr.", LEMMA: "April", NORM: "April"},
{ORTH: "Ariz.", LEMMA: "Arizona", NORM: "Arizona"},
{ORTH: "Ark.", LEMMA: "Arkansas", NORM: "Arkansas"},
{ORTH: "Aug.", LEMMA: "August", NORM: "August"},
{ORTH: "Calif.", LEMMA: "California", NORM: "California"},
{ORTH: "Colo.", LEMMA: "Colorado", NORM: "Colorado"},
{ORTH: "Conn.", LEMMA: "Connecticut", NORM: "Connecticut"},
{ORTH: "Dec.", LEMMA: "December", NORM: "December"},
{ORTH: "Del.", LEMMA: "Delaware", NORM: "Delaware"},
{ORTH: "Feb.", LEMMA: "February", NORM: "February"},
{ORTH: "Fla.", LEMMA: "Florida", NORM: "Florida"},
{ORTH: "Ga.", LEMMA: "Georgia", NORM: "Georgia"},
{ORTH: "Ia.", LEMMA: "Iowa", NORM: "Iowa"},
{ORTH: "Id.", LEMMA: "Idaho", NORM: "Idaho"},
{ORTH: "Ill.", LEMMA: "Illinois", NORM: "Illinois"},
{ORTH: "Ind.", LEMMA: "Indiana", NORM: "Indiana"},
{ORTH: "Jan.", LEMMA: "January", NORM: "January"},
{ORTH: "Jul.", LEMMA: "July", NORM: "July"},
{ORTH: "Jun.", LEMMA: "June", NORM: "June"},
{ORTH: "Kan.", LEMMA: "Kansas", NORM: "Kansas"},
{ORTH: "Kans.", LEMMA: "Kansas", NORM: "Kansas"},
{ORTH: "Ky.", LEMMA: "Kentucky", NORM: "Kentucky"},
{ORTH: "La.", LEMMA: "Louisiana", NORM: "Louisiana"},
{ORTH: "Mar.", LEMMA: "March", NORM: "March"},
{ORTH: "Mass.", LEMMA: "Massachusetts", NORM: "Massachusetts"},
{ORTH: "May.", LEMMA: "May", NORM: "May"},
{ORTH: "Mich.", LEMMA: "Michigan", NORM: "Michigan"},
{ORTH: "Minn.", LEMMA: "Minnesota", NORM: "Minnesota"},
{ORTH: "Miss.", LEMMA: "Mississippi", NORM: "Mississippi"},
{ORTH: "N.C.", LEMMA: "North Carolina", NORM: "North Carolina"},
{ORTH: "N.D.", LEMMA: "North Dakota", NORM: "North Dakota"},
{ORTH: "N.H.", LEMMA: "New Hampshire", NORM: "New Hampshire"},
{ORTH: "N.J.", LEMMA: "New Jersey", NORM: "New Jersey"},
{ORTH: "N.M.", LEMMA: "New Mexico", NORM: "New Mexico"},
{ORTH: "N.Y.", LEMMA: "New York", NORM: "New York"},
{ORTH: "Neb.", LEMMA: "Nebraska", NORM: "Nebraska"},
{ORTH: "Nebr.", LEMMA: "Nebraska", NORM: "Nebraska"},
{ORTH: "Nev.", LEMMA: "Nevada", NORM: "Nevada"},
{ORTH: "Nov.", LEMMA: "November", NORM: "November"},
{ORTH: "Oct.", LEMMA: "October", NORM: "October"},
{ORTH: "Okla.", LEMMA: "Oklahoma", NORM: "Oklahoma"},
{ORTH: "Ore.", LEMMA: "Oregon", NORM: "Oregon"},
{ORTH: "Pa.", LEMMA: "Pennsylvania", NORM: "Pennsylvania"},
{ORTH: "S.C.", LEMMA: "South Carolina", NORM: "South Carolina"},
{ORTH: "Sep.", LEMMA: "September", NORM: "September"},
{ORTH: "Sept.", LEMMA: "September", NORM: "September"},
{ORTH: "Tenn.", LEMMA: "Tennessee", NORM: "Tennessee"},
{ORTH: "Va.", LEMMA: "Virginia", NORM: "Virginia"},
{ORTH: "Wash.", LEMMA: "Washington", NORM: "Washington"},
{ORTH: "Wis.", LEMMA: "Wisconsin", NORM: "Wisconsin"},
]:
_exc[exc_data[ORTH]] = [exc_data]
for orth in [
"'d",
"a.m.",
"Adm.",
"Bros.",
"co.",
"Co.",
"Corp.",
"D.C.",
"Dr.",
"e.g.",
"E.g.",
"E.G.",
"Gen.",
"Gov.",
"i.e.",
"I.e.",
"I.E.",
"Inc.",
"Jr.",
"Ltd.",
"Md.",
"Messrs.",
"Mo.",
"Mont.",
"Mr.",
"Mrs.",
"Ms.",
"p.m.",
"Ph.D.",
"Prof.",
"Rep.",
"Rev.",
"Sen.",
"St.",
"vs.",
"v.s.",
]:
_exc[orth] = [{ORTH: orth}]
for string in _exclude:
if string in _exc:
_exc.pop(string)
TOKENIZER_EXCEPTIONS = _exc
| [
"bengmen92@gmail.com"
] | bengmen92@gmail.com |
0ddbe61e94debc1edc2a81af4086a174761ad71a | 26a257186d90694e51119c388287edbfcbf5d0d3 | /manage.py | fbc5e79b8b5c7718b21f11ccfed541ffb9e2618b | [] | no_license | csxoa/16-2nd-market-ssua-backend | cfe79a29c68188b62b950e7a1da4cc10fee1b0c4 | 09888462ed16471649cace891ac27e52e0a5b8c0 | refs/heads/main | 2023-02-28T20:02:55.957448 | 2021-02-09T03:58:47 | 2021-02-09T03:58:47 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 667 | py | #!/usr/bin/env python
"""Django's command-line utility for administrative tasks."""
import os
import sys
def main():
"""Run administrative tasks."""
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'market_ssua.settings')
try:
from django.core.management import execute_from_command_line
except ImportError as exc:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
) from exc
execute_from_command_line(sys.argv)
if __name__ == '__main__':
main()
| [
"sol35352000@gmail.com"
] | sol35352000@gmail.com |
b4901ff780580eb8733db95e8de4824e965fd50e | 077c91b9d5cb1a6a724da47067483c622ce64be6 | /nox_mesh_4_loop_repro_debug_verbose/interreplay_20_l_5/replay_config.py | 81a227dc97ef5fe3361b91f64be4cda59ae66e9f | [] | no_license | Spencerx/experiments | 0edd16398725f6fd9365ddbb1b773942e4878369 | aaa98b0f67b0d0c0c826b8a1565916bf97ae3179 | refs/heads/master | 2020-04-03T10:11:40.671606 | 2014-06-11T23:55:11 | 2014-06-11T23:55:11 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 876 | py |
from config.experiment_config_lib import ControllerConfig
from sts.topology import *
from sts.control_flow import Replayer
from sts.simulation_state import SimulationConfig
from sts.input_traces.input_logger import InputLogger
simulation_config = SimulationConfig(controller_configs=[ControllerConfig(start_cmd='./nox_core -v -v -i ptcp:6635 routing', address='127.0.0.1', port=6635, cwd='nox_classic/build/src')],
topology_class=MeshTopology,
topology_params="num_switches=4",
patch_panel_class=BufferedPatchPanel,
multiplex_sockets=False)
control_flow = Replayer(simulation_config, "experiments/nox_mesh_4_loop_repro_debug_verbose/interreplay_20_l_5/events.trace",
input_logger=InputLogger(),
wait_on_deterministic_values=False)
# Invariant check: 'None'
| [
"cs@cs.berkeley.edu"
] | cs@cs.berkeley.edu |
eb26b4d645ca9ad3766a6fcd1e53b646322b4db4 | 282d0a84b45b12359b96bbf0b1d7ca9ee0cb5d19 | /Malware1/venv/Lib/site-packages/sklearn/base.py | 10620bcf6f59c053e9249cfa67230a2ee5e90210 | [] | no_license | sameerakhtar/CyberSecurity | 9cfe58df98495eac6e4e2708e34e70b7e4c055d3 | 594973df27b4e1a43f8faba0140ce7d6c6618f93 | refs/heads/master | 2022-12-11T11:53:40.875462 | 2020-09-07T23:13:22 | 2020-09-07T23:13:22 | 293,598,094 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 130 | py | version https://git-lfs.github.com/spec/v1
oid sha256:2b7638a252b70d507fafc022659ee844b25e7e5cf54b0595420920b4a8c42ce7
size 22906
| [
"46763165+sameerakhtar@users.noreply.github.com"
] | 46763165+sameerakhtar@users.noreply.github.com |
8bab9654a13dd0c4fa950b150ecde8c461622a4a | cd4bbecc3f713b0c25508d0c5674d9e103db5df4 | /toontown/estate/houseDesign.py | 2d46639382ae5909289726d39f4f1793cfaafe6c | [] | no_license | peppythegod/ToontownOnline | dce0351cfa1ad8c476e035aa3947fdf53de916a6 | 2e5a106f3027714d301f284721382cb956cd87a0 | refs/heads/master | 2020-04-20T05:05:22.934339 | 2020-01-02T18:05:28 | 2020-01-02T18:05:28 | 168,646,608 | 11 | 2 | null | null | null | null | UTF-8 | Python | false | false | 84,243 | py | from direct.directtools.DirectSelection import *
from direct.directtools.DirectUtil import ROUND_TO
from direct.directtools.DirectGeometry import LineNodePath
from direct.gui.DirectGui import *
from pandac.PandaModules import *
from direct.showbase.DirectObject import DirectObject
from toontown.toonbase import ToontownGlobals
from direct.directnotify import DirectNotifyGlobal
from direct.task import Task
from toontown.catalog import CatalogFurnitureItem
from toontown.catalog import CatalogItemTypes
from direct.showbase import PythonUtil
from toontown.toontowngui import TTDialog
from toontown.toonbase import TTLocalizer
from otp.otpbase import OTPLocalizer
camPos50 = (Point3(0.0, -10.0, 50.0),
Point3(0.0, -9.6600000000000001, 49.060000000000002),
Point3(0.0, 1.5, 12.380000000000001),
Point3(0.0, 1.5, -3.1000000000000001), 1)
camPos40 = (Point3(0.0, -15.0, 40.0), Point3(0.0, -14.5, 39.130000000000003),
Point3(0.0, 1.5, 12.380000000000001),
Point3(0.0, 1.5, -3.1000000000000001), 1)
camPos30 = (Point3(0.0, -20.0, 30.0),
Point3(0.0, -19.289999999999999, 29.289999999999999),
Point3(0.0, 1.5, 12.380000000000001),
Point3(0.0, 1.5, -3.1000000000000001), 1)
camPos20 = (Point3(0.0, -20.0, 20.0), Point3(0.0, -19.129999999999999, 19.5),
Point3(0.0, 1.5, 12.380000000000001),
Point3(0.0, 1.5, -3.1000000000000001), 1)
camPosList = [camPos20, camPos30, camPos40, camPos50]
DEFAULT_CAM_INDEX = 2
NormalPickerPanelColor = (1, 0.90000000000000002, 0.745, 1)
DisabledPickerPanelColor = (0.69999999999999996, 0.65000000000000002,
0.57999999999999996, 1)
DeletePickerPanelColor = (1, 0.40000000000000002, 0.40000000000000002, 1)
DisabledDeletePickerPanelColor = (0.69999999999999996, 0.29999999999999999,
0.29999999999999999, 1)
class FurnitureItemPanel(DirectButton):
def __init__(self,
item,
itemId,
command=None,
deleteMode=0,
withinFunc=None,
helpCategory=None):
self.item = item
self.itemId = itemId
self.command = command
self.origHelpCategory = helpCategory
self.deleteMode = deleteMode
if self.deleteMode:
framePanelColor = DeletePickerPanelColor
else:
framePanelColor = NormalPickerPanelColor
DirectButton.__init__(
self,
relief=DGG.RAISED,
frameSize=(-0.25, 0.25, -0.20000000000000001, 0.20000000000000001),
frameColor=framePanelColor,
borderWidth=(0.02, 0.02),
command=self.clicked)
if self.deleteMode:
helpCategory = 'FurnitureItemPanelDelete'
self.bindHelpText(helpCategory)
if withinFunc:
self.bind(DGG.WITHIN, lambda event: withinFunc(self.itemId))
self.initialiseoptions(FurnitureItemPanel)
self.load()
def show(self):
DirectFrame.show(self)
if self.ival:
self.ival.resume()
def hide(self):
DirectFrame.hide(self)
if self.ival:
self.ival.pause()
def load(self):
panelWidth = 7
panelCenter = 0
(self.picture, self.ival) = self.item.getPicture(base.localAvatar)
if self.picture:
self.picture.reparentTo(self)
self.picture.setScale(0.14000000000000001)
self.picture.setPos(0, 0, -0.02)
text = self.item.getName()
text_pos = (0, -0.10000000000000001, 0)
else:
text = self.item.getTypeName() + ': ' + self.item.getName()
text_pos = (0, -0.29999999999999999, 0)
if self.ival:
self.ival.loop()
self.ival.pause()
self.nameLabel = DirectLabel(
parent=self,
relief=None,
pos=(0, 0, 0.17000000000000001),
scale=0.45000000000000001,
text=text,
text_scale=0.14999999999999999,
text_fg=(0, 0, 0, 1),
text_pos=text_pos,
text_font=ToontownGlobals.getInterfaceFont(),
text_wordwrap=panelWidth)
def clicked(self):
self.command(self.item, self.itemId)
def unload(self):
if self.item.hasPicture:
self.item.cleanupPicture()
del self.item
self.nameLabel.destroy()
del self.nameLabel
if self.ival:
self.ival.finish()
del self.ival
del self.picture
self.command = None
def destroy(self):
self.unload()
DirectButton.destroy(self)
def bindHelpText(self, category):
self.unbind(DGG.ENTER)
self.unbind(DGG.EXIT)
if category is None:
category = self.origHelpCategory
self.bind(
DGG.ENTER,
base.cr.objectManager.showHelpText,
extraArgs=[category, self.item.getName()])
self.bind(DGG.EXIT, base.cr.objectManager.hideHelpText)
def setDeleteMode(self, deleteMode):
self.deleteMode = deleteMode
self._FurnitureItemPanel__updateAppearance()
def enable(self, enabled):
if enabled:
self['state'] = DGG.NORMAL
else:
self['state'] = DGG.DISABLED
self._FurnitureItemPanel__updateAppearance()
def _FurnitureItemPanel__updateAppearance(self):
color = NormalPickerPanelColor
relief = DGG.RAISED
if self.deleteMode:
if self['state'] == DGG.DISABLED:
color = DisabledDeletePickerPanelColor
relief = DGG.SUNKEN
else:
color = DeletePickerPanelColor
relief = DGG.RAISED
elif self['state'] == DGG.DISABLED:
color = DisabledPickerPanelColor
relief = DGG.SUNKEN
else:
color = NormalPickerPanelColor
relief = DGG.RAISED
self['frameColor'] = color
class MovableObject(NodePath, DirectObject):
def __init__(self, dfitem, parent=render):
NodePath.__init__(self)
self.assign(dfitem)
self.dfitem = dfitem
dfitem.transmitRelativeTo = dfitem.getParent()
self.reparentTo(parent)
self.setTag('movableObject', '1')
self.builtInCNodes = self.findAllMatches('**/+CollisionNode')
self.numBuiltInNodes = self.builtInCNodes.getNumPaths()
self.stashBuiltInCollisionNodes()
shadows = self.findAllMatches('**/*shadow*')
shadows.addPathsFrom(self.findAllMatches('**/*Shadow*'))
shadows.stash()
flags = self.dfitem.item.getFlags()
if flags & CatalogFurnitureItem.FLPainting:
self.setOnFloor(0)
self.setOnWall(1)
else:
self.setOnFloor(1)
self.setOnWall(0)
if flags & CatalogFurnitureItem.FLOnTable:
self.setOnTable(1)
else:
self.setOnTable(0)
if flags & CatalogFurnitureItem.FLRug:
self.setIsRug(1)
else:
self.setIsRug(0)
if flags & CatalogFurnitureItem.FLIsTable:
self.setIsTable(1)
else:
self.setIsTable(0)
m = self.getTransform()
self.iPosHpr()
(bMin, bMax) = self.getTightBounds()
self.bounds = self.getTightBounds()
bMin -= Vec3(0.10000000000000001, 0.10000000000000001, 0)
bMax += Vec3(0.10000000000000001, 0.10000000000000001, 0)
self.c0 = Point3(bMin[0], bMin[1], 0.20000000000000001)
self.c1 = Point3(bMax[0], bMin[1], 0.20000000000000001)
self.c2 = Point3(bMax[0], bMax[1], 0.20000000000000001)
self.c3 = Point3(bMin[0], bMax[1], 0.20000000000000001)
self.center = (bMin + bMax) / 2.0
if flags & CatalogFurnitureItem.FLPainting:
self.dragPoint = Vec3(self.center[0], bMax[1], self.center[2])
else:
self.dragPoint = Vec3(self.center[0], self.center[1], bMin[2])
delta = self.dragPoint - self.c0
self.radius = min(delta[0], delta[1])
if self.getOnWall():
self.setWallOffset(0.10000000000000001)
else:
self.setWallOffset(self.radius + 0.10000000000000001)
self.makeCollisionBox()
self.setTransform(m)
self.unstashBuiltInCollisionNodes()
shadows.unstash()
def resetMovableObject(self):
self.unstashBuiltInCollisionNodes()
self.collisionNodePath.removeNode()
self.clearTag('movableObject')
def setOnFloor(self, fOnFloor):
self.fOnFloor = fOnFloor
def getOnFloor(self):
return self.fOnFloor
def setOnWall(self, fOnWall):
self.fOnWall = fOnWall
def getOnWall(self):
return self.fOnWall
def setOnTable(self, fOnTable):
self.fOnTable = fOnTable
def getOnTable(self):
return self.fOnTable
def setIsRug(self, fIsRug):
self.fIsRug = fIsRug
def getIsRug(self):
return self.fIsRug
def setIsTable(self, fIsTable):
self.fIsTable = fIsTable
def getIsTable(self):
return self.fIsTable
def setWallOffset(self, offset):
self.wallOffset = offset
def getWallOffset(self):
return self.wallOffset
def destroy(self):
self.removeNode()
def stashBuiltInCollisionNodes(self):
self.builtInCNodes.stash()
def unstashBuiltInCollisionNodes(self):
self.builtInCNodes.unstash()
def getFloorBitmask(self):
if self.getOnTable():
return ToontownGlobals.FloorBitmask | ToontownGlobals.FurnitureTopBitmask
else:
return ToontownGlobals.FloorBitmask
def getWallBitmask(self):
if self.getIsRug() or self.getOnWall():
return ToontownGlobals.WallBitmask
else:
return ToontownGlobals.WallBitmask | ToontownGlobals.FurnitureSideBitmask
def makeCollisionBox(self):
self.collisionNodePath = self.attachNewNode('furnitureCollisionNode')
if self.getIsRug() or self.getOnWall():
return None
mx = self.bounds[0][0] - 0.01
Mx = self.bounds[1][0] + 0.01
my = self.bounds[0][1] - 0.01
My = self.bounds[1][1] + 0.01
mz = self.bounds[0][2]
Mz = self.bounds[1][2]
cn = CollisionNode('sideCollisionNode')
cn.setIntoCollideMask(ToontownGlobals.FurnitureSideBitmask)
self.collisionNodePath.attachNewNode(cn)
cp = CollisionPolygon(
Point3(mx, My, mz), Point3(mx, my, mz), Point3(mx, my, Mz),
Point3(mx, My, Mz))
cn.addSolid(cp)
cp = CollisionPolygon(
Point3(Mx, my, mz), Point3(Mx, My, mz), Point3(Mx, My, Mz),
Point3(Mx, my, Mz))
cn.addSolid(cp)
cp = CollisionPolygon(
Point3(mx, my, mz), Point3(Mx, my, mz), Point3(Mx, my, Mz),
Point3(mx, my, Mz))
cn.addSolid(cp)
cp = CollisionPolygon(
Point3(Mx, My, mz), Point3(mx, My, mz), Point3(mx, My, Mz),
Point3(Mx, My, Mz))
cn.addSolid(cp)
if self.getIsTable():
cn = CollisionNode('topCollisionNode')
cn.setIntoCollideMask(ToontownGlobals.FurnitureTopBitmask)
self.collisionNodePath.attachNewNode(cn)
cp = CollisionPolygon(
Point3(mx, my, Mz), Point3(Mx, my, Mz), Point3(Mx, My, Mz),
Point3(mx, My, Mz))
cn.addSolid(cp)
class ObjectManager(NodePath, DirectObject):
notify = DirectNotifyGlobal.directNotify.newCategory('ObjectManager')
def __init__(self):
NodePath.__init__(self)
self.assign(render.attachNewNode('objectManager'))
self.objectDict = {}
self.selectedObject = None
self.movingObject = 0
self.deselectEvent = None
self.startPose = render.attachNewNode('startPose')
self.dragPointNP = self.attachNewNode('dragPoint')
self.gridSnapNP = self.dragPointNP.attachNewNode('gridSnap')
self.collisionOffsetNP = self.gridSnapNP.attachNewNode(
'collisionResponse')
self.iRay = SelectionRay()
self.iSegment = SelectionSegment(numSegments=6)
self.iSegment4 = SelectionSegment(numSegments=4)
self.iSphere = SelectionSphere()
self.houseExtents = None
self.doorBlocker = None
cp = CollisionPolygon(
Point3(-100, -100, 0), Point3(100, -100, 0), Point3(100, 100, 0),
Point3(-100, 100, 0))
cn = CollisionNode('dragCollisionNode')
cn.addSolid(cp)
cn.setIntoCollideMask(ToontownGlobals.FurnitureDragBitmask)
self.collisionNP = NodePath(cn)
self.lnp = LineNodePath()
self.fRecenter = 0
self.gridSpacing = None
self.firstTime = 0
guiModels = loader.loadModel('phase_5.5/models/gui/house_design_gui')
self.createSelectedObjectPanel(guiModels)
self.createMainControls(guiModels)
self.furnitureManager = None
self.atticPicker = None
self.inRoomPicker = None
self.inTrashPicker = None
self.dialog = None
self.deleteMode = 0
self.nonDeletableItem = None
self.verifyFrame = None
self.deleteItemText = None
self.okButton = None
self.cancelButton = None
self.itemIval = None
self.itemPanel = None
self.guiInterval = None
self.accept('enterFurnitureMode', self.enterFurnitureMode)
self.accept('exitFurnitureMode', self.exitFurnitureMode)
def enterFurnitureMode(self, furnitureManager, fDirector):
if not fDirector:
if self.furnitureManager:
self.exitFurnitureMode(self.furnitureManager)
return None
if furnitureManager == self.furnitureManager:
return None
if self.furnitureManager is not None:
self.exitFurnitureMode(self.furnitureManager)
self.notify.info('enterFurnitureMode, fDirector = %s' % fDirector)
self.furnitureManager = furnitureManager
self.furnitureManager.d_avatarEnter()
house = furnitureManager.getInteriorObject()
house.hideExteriorWindows()
self.setTargetNodePath(house.interior)
self.createAtticPicker()
self.initializeDistributedFurnitureItems(furnitureManager.dfitems)
self.setCamPosIndex(DEFAULT_CAM_INDEX)
base.localAvatar.setGhostMode(1)
taskMgr.remove('editModeTransition')
self.orientCamH(base.localAvatar.getH(self.targetNodePath))
self.accept('mouse1', self.moveObjectStart)
self.accept('mouse1-up', self.moveObjectStop)
self.furnitureGui.show()
self.deleteMode = 0
self._ObjectManager__updateDeleteButtons()
self.showAtticPicker()
base.localAvatar.laffMeter.stop()
base.setCellsAvailable(base.leftCells + [base.bottomCells[0]], 0)
if self.guiInterval:
self.guiInterval.finish()
self.guiInterval = self.furnitureGui.posHprScaleInterval(
1.0,
Point3(-1.1599999999999999, 1, -0.029999999999999999),
Vec3(0),
Vec3(0.059999999999999998),
startPos=Point3(-1.1899999999999999, 1, 0.33000000000000002),
startHpr=Vec3(0),
startScale=Vec3(0.040000000000000001),
blendType='easeInOut',
name='lerpFurnitureButton')
self.guiInterval.start()
taskMgr.add(self.recenterButtonFrameTask, 'recenterButtonFrameTask',
10)
messenger.send('wakeup')
def exitFurnitureMode(self, furnitureManager):
if furnitureManager != self.furnitureManager:
return None
self.notify.info('exitFurnitureMode')
house = furnitureManager.getInteriorObject()
if house:
house.showExteriorWindows()
self.furnitureManager.d_avatarExit()
self.furnitureManager = None
base.localAvatar.setCameraPositionByIndex(0)
self.exitDeleteMode()
self.houseExtents.detachNode()
self.doorBlocker.detachNode()
self.deselectObject()
self.ignore('mouse1')
self.ignore('mouse1-up')
if self.atticPicker:
self.atticPicker.destroy()
self.atticPicker = None
if self.inRoomPicker:
self.inRoomPicker.destroy()
self.inRoomPicker = None
if self.inTrashPicker:
self.inTrashPicker.destroy()
self.inTrashPicker = None
self._ObjectManager__cleanupVerifyDelete()
self.furnitureGui.hide()
base.setCellsAvailable(base.leftCells + [base.bottomCells[0]], 1)
base.localAvatar.laffMeter.start()
taskMgr.remove('recenterButtonFrameTask')
self.cleanupDialog()
taskMgr.remove('showHelpTextDoLater')
messenger.send('wakeup')
def initializeDistributedFurnitureItems(self, dfitems):
self.objectDict = {}
for item in dfitems:
mo = MovableObject(item, parent=self.targetNodePath)
self.objectDict[mo.id()] = mo
def setCamPosIndex(self, index):
self.camPosIndex = index
base.localAvatar.setCameraSettings(camPosList[index])
def zoomCamIn(self):
self.setCamPosIndex(max(0, self.camPosIndex - 1))
messenger.send('wakeup')
def zoomCamOut(self):
self.setCamPosIndex(min(len(camPosList) - 1, self.camPosIndex + 1))
messenger.send('wakeup')
def rotateCamCW(self):
self.orientCamH(base.localAvatar.getH(self.targetNodePath) - 90)
messenger.send('wakeup')
def rotateCamCCW(self):
self.orientCamH(base.localAvatar.getH(self.targetNodePath) + 90)
messenger.send('wakeup')
def orientCamH(self, toonH):
targetH = ROUND_TO(toonH, 90)
base.localAvatar.hprInterval(
duration=1,
hpr=Vec3(targetH, 0, 0),
other=self.targetNodePath,
blendType='easeInOut',
name='editModeTransition').start()
def setTargetNodePath(self, nodePath):
self.targetNodePath = nodePath
if self.houseExtents:
self.houseExtents.removeNode()
if self.doorBlocker:
self.doorBlocker.removeNode()
self.makeHouseExtentsBox()
self.makeDoorBlocker()
self.collisionNP.reparentTo(self.targetNodePath)
def loadObject(self, filename):
mo = MovableObject(filename, parent=self.targetNodePath)
self.objectDict[mo.id()] = mo
self.selectObject(mo)
return mo
def pickObject(self):
self.iRay.setParentNP(base.cam)
entry = self.iRay.pickGeom(
targetNodePath=self.targetNodePath, skipFlags=SKIP_ALL)
if entry:
nodePath = entry.getIntoNodePath()
if self.isMovableObject(nodePath):
self.selectObject(self.findObject(nodePath))
return None
self.deselectObject()
def pickInRoom(self, objectId):
self.selectObject(self.objectDict.get(objectId))
def selectObject(self, selectedObject):
messenger.send('wakeup')
if self.selectedObject:
self.deselectObject()
if selectedObject:
self.selectedObject = selectedObject
self.deselectEvent = self.selectedObject.dfitem.uniqueName(
'disable')
self.acceptOnce(self.deselectEvent, self.deselectObject)
self.lnp.reset()
self.lnp.reparentTo(selectedObject)
self.lnp.moveTo(selectedObject.c0)
self.lnp.drawTo(selectedObject.c1)
self.lnp.drawTo(selectedObject.c2)
self.lnp.drawTo(selectedObject.c3)
self.lnp.drawTo(selectedObject.c0)
self.lnp.create()
self.buttonFrame.show()
self.enableButtonFrameTask()
self.sendToAtticButton.show()
self.atticRoof.hide()
def deselectObject(self):
self.moveObjectStop()
if self.deselectEvent:
self.ignore(self.deselectEvent)
self.deselectEvent = None
self.selectedObject = None
self.lnp.detachNode()
self.buttonFrame.hide()
self.disableButtonFrameTask()
self.sendToAtticButton.hide()
self.atticRoof.show()
def isMovableObject(self, nodePath):
return nodePath.hasNetTag('movableObject')
def findObject(self, nodePath):
np = nodePath.findNetTag('movableObject')
if np.isEmpty():
return None
else:
return self.objectDict.get(np.id(), None)
def moveObjectStop(self, *args):
if self.movingObject:
self.movingObject = 0
taskMgr.remove('moveObjectTask')
if self.selectedObject:
self.selectedObject.wrtReparentTo(self.targetNodePath)
self.selectedObject.collisionNodePath.unstash()
self.selectedObject.dfitem.stopAdjustPosHpr()
for object in self.objectDict.values():
object.unstashBuiltInCollisionNodes()
self.centerMarker['image'] = [
self.grabUp, self.grabDown, self.grabRollover
]
self.centerMarker.configure(
text=['', TTLocalizer.HDMoveLabel],
text_pos=(0, 1),
text_scale=0.69999999999999996,
text_fg=(1, 1, 1, 1),
text_shadow=(0, 0, 0, 1),
image_scale=0.29999999999999999)
def moveObjectStart(self):
self.moveObjectStop()
self.pickObject()
self.moveObjectContinue()
def moveObjectContinue(self, *args):
messenger.send('wakeup')
if self.selectedObject:
for object in self.objectDict.values():
object.stashBuiltInCollisionNodes()
self.selectedObject.collisionNodePath.stash()
self.selectedObject.dfitem.startAdjustPosHpr()
self.firstTime = 1
self.iPosHpr()
self.startPoseValid = 0
self.centerMarker['image'] = self.grabDown
self.centerMarker.configure(
text=TTLocalizer.HDMoveLabel,
text_pos=(0, 1),
text_scale=0.69999999999999996,
text_fg=(1, 1, 1, 1),
text_shadow=(0, 0, 0, 1),
image_scale=0.29999999999999999)
taskMgr.add(self.moveObjectTask, 'moveObjectTask')
self.movingObject = 1
def setLnpColor(self, r, g, b):
for i in range(5):
self.lnp.lineSegs.setVertexColor(i, r, g, b)
def markNewPosition(self, isValid):
if not isValid:
if self.startPoseValid:
self.collisionOffsetNP.setPosHpr(self.startPose,
self.selectedObject.dragPoint,
Vec3(0))
else:
self.startPoseValid = 1
def moveObjectTask(self, state):
so = self.selectedObject
target = self.targetNodePath
self.startPose.iPosHpr(so)
self.iRay.setParentNP(base.cam)
entry = self.iRay.pickBitMask(
bitMask=ToontownGlobals.FurnitureDragBitmask,
targetNodePath=target,
skipFlags=SKIP_BACKFACE | SKIP_CAMERA | SKIP_UNPICKABLE)
if not entry:
return Task.cont
self.setPos(base.cam, entry.getSurfacePoint(base.cam))
if self.firstTime:
self.moveObjectInit()
self.firstTime = 0
else:
self.gridSnapNP.iPos()
self.collisionOffsetNP.iPosHpr()
if self.gridSpacing:
pos = self.dragPointNP.getPos(target)
self.gridSnapNP.setPos(target, ROUND_TO(pos[0], self.gridSpacing),
ROUND_TO(pos[1], self.gridSpacing), pos[2])
self.iRay.setParentNP(base.cam)
entry = self.iRay.pickBitMask3D(
bitMask=so.getWallBitmask(),
targetNodePath=target,
dir=Vec3(self.getNearProjectionPoint(self.gridSnapNP)),
skipFlags=SKIP_BACKFACE | SKIP_CAMERA | SKIP_UNPICKABLE)
fWall = 0
if not so.getOnTable():
while entry:
intoMask = entry.getIntoNodePath().node().getIntoCollideMask()
fClosest = (intoMask & ToontownGlobals.WallBitmask).isZero()
if self.alignObject(entry, target, fClosest=fClosest):
fWall = 1
break
entry = self.iRay.findNextCollisionEntry(
skipFlags=SKIP_BACKFACE | SKIP_CAMERA | SKIP_UNPICKABLE)
if so.getOnWall():
self.markNewPosition(fWall)
return Task.cont
self.iRay.setParentNP(target)
entry = self.iRay.pickBitMask3D(
bitMask=so.getFloorBitmask(),
targetNodePath=target,
origin=Point3(self.gridSnapNP.getPos(target) + Vec3(0, 0, 10)),
dir=Vec3(0, 0, -1),
skipFlags=SKIP_BACKFACE | SKIP_CAMERA | SKIP_UNPICKABLE)
if not entry:
self.markNewPosition(0)
return Task.cont
nodePath = entry.getIntoNodePath()
if self.isMovableObject(nodePath):
self.gridSnapNP.setPos(target, Point3(
entry.getSurfacePoint(target)))
else:
self.gridSnapNP.setPos(
target,
Point3(
entry.getSurfacePoint(target) +
Vec3(0, 0, ToontownGlobals.FloorOffset)))
if not fWall:
self.iSphere.setParentNP(self.gridSnapNP)
self.iSphere.setCenterRadius(0, Point3(0), so.radius * 1.25)
entry = self.iSphere.pickBitMask(
bitMask=so.getWallBitmask(),
targetNodePath=target,
skipFlags=SKIP_CAMERA | SKIP_UNPICKABLE)
if entry:
self.alignObject(entry, target, fClosest=1)
isValid = self.collisionTest()
self.markNewPosition(isValid)
return Task.cont
def collisionTest(self):
so = self.selectedObject
target = self.targetNodePath
entry = self.segmentCollision()
if not entry:
return 1
offsetDict = {}
while entry:
offset = self.computeSegmentOffset(entry)
if offset:
eid = entry.getInto()
maxOffsetVec = offsetDict.get(eid, Vec3(0))
if offset.length() > maxOffsetVec.length():
maxOffsetVec.assign(offset)
offsetDict[eid] = maxOffsetVec
entry = self.iSegment.findNextCollisionEntry(skipFlags=SKIP_CAMERA
| SKIP_UNPICKABLE)
if offsetDict:
keys = offsetDict.keys()
ortho1 = offsetDict[keys[0]]
ortho2 = Vec3(0)
v1 = Vec3(ortho1)
v1.normalize()
for key in keys[1:]:
offset = offsetDict[key]
v2 = Vec3(offset)
v2.normalize()
dp = v1.dot(v2)
if abs(dp) > 0.94999999999999996:
if offset.length() > ortho1.length():
ortho1.assign(offset)
offset.length() > ortho1.length()
if abs(dp) < 0.050000000000000003:
if offset.length() > ortho2.length():
ortho2.assign(offset)
offset.length() > ortho2.length()
o1Len = ortho1.length()
parallelVec = Vec3(ortho1 * offset.dot(ortho1) / o1Len * o1Len)
perpVec = Vec3(offset - parallelVec)
if parallelVec.length() > o1Len:
ortho1.assign(parallelVec)
if perpVec.length() > ortho2.length():
ortho2.assign(perpVec)
continue
totalOffset = ortho1 + ortho2
self.collisionOffsetNP.setPos(self.collisionOffsetNP, totalOffset)
if not self.segmentCollision():
return 1
m = self.startPose.getMat(so)
deltaMove = Vec3(m.getRow3(3))
if deltaMove.length() == 0:
return 1
self.iSegment4.setParentNP(so)
entry = self.iSegment4.pickBitMask(
bitMask=so.getWallBitmask(),
targetNodePath=target,
endPointList=[(so.c0, Point3(m.xformPoint(so.c0))),
(so.c1, Point3(m.xformPoint(so.c1))),
(so.c2, Point3(m.xformPoint(so.c2))),
(so.c3, Point3(m.xformPoint(so.c3)))],
skipFlags=SKIP_CAMERA | SKIP_UNPICKABLE)
maxLen = 0
maxOffset = None
while entry:
offset = Vec3(
entry.getSurfacePoint(entry.getFromNodePath()) -
entry.getFrom().getPointA())
offsetLen = Vec3(offset).length()
if offsetLen > maxLen:
maxLen = offsetLen
maxOffset = offset
entry = self.iSegment4.findNextCollisionEntry(skipFlags=SKIP_CAMERA
| SKIP_UNPICKABLE)
if maxOffset:
self.collisionOffsetNP.setPos(self.collisionOffsetNP, maxOffset)
if not self.segmentCollision():
return 1
return 0
def segmentCollision(self):
so = self.selectedObject
self.iSegment.setParentNP(so)
entry = self.iSegment.pickBitMask(
bitMask=so.getWallBitmask(),
targetNodePath=self.targetNodePath,
endPointList=[(so.c0, so.c1), (so.c1, so.c2), (so.c2, so.c3),
(so.c3, so.c0), (so.c0, so.c2), (so.c1, so.c3)],
skipFlags=SKIP_CAMERA | SKIP_UNPICKABLE)
return entry
def computeSegmentOffset(self, entry):
fromNodePath = entry.getFromNodePath()
if entry.hasSurfaceNormal():
normal = entry.getSurfaceNormal(fromNodePath)
else:
return None
hitPoint = entry.getSurfacePoint(fromNodePath)
m = self.selectedObject.getMat(self.startPose)
hp = Point3(m.xformPoint(hitPoint))
hpn = Vec3(m.xformVec(normal))
hitPointVec = Vec3(hp - self.selectedObject.dragPoint)
if hitPointVec.dot(hpn) > 0:
return None
nLen = normal.length()
offsetVecA = hitPoint - entry.getFrom().getPointA()
offsetA = normal * offsetVecA.dot(normal) / nLen * nLen
if offsetA.dot(normal) > 0:
return offsetA * 1.01
else:
offsetVecB = hitPoint - entry.getFrom().getPointB()
offsetB = normal * offsetVecB.dot(normal) / nLen * nLen
return offsetB * 1.01
def alignObject(self, entry, target, fClosest=0, wallOffset=None):
if not entry.hasSurfaceNormal():
return 0
normal = entry.getSurfaceNormal(target)
if abs(normal.dot(Vec3(0, 0, 1))) < 0.10000000000000001:
tempNP = target.attachNewNode('temp')
normal.setZ(0)
normal.normalize()
lookAtNormal = Point3(normal)
lookAtNormal *= -1
tempNP.lookAt(lookAtNormal)
realAngle = ROUND_TO(self.gridSnapNP.getH(tempNP), 90.0)
if fClosest:
angle = realAngle
else:
angle = 0
self.gridSnapNP.setHpr(tempNP, angle, 0, 0)
hitPoint = entry.getSurfacePoint(target)
tempNP.setPos(hitPoint)
if wallOffset is None:
wallOffset = self.selectedObject.getWallOffset()
self.gridSnapNP.setPos(tempNP, 0, -wallOffset, 0)
tempNP.removeNode()
if realAngle == 180.0:
self.gridSnapNP.setH(self.gridSnapNP.getH() + 180.0)
return 1
return 0
def rotateLeft(self):
if not self.selectedObject:
return None
so = self.selectedObject
so.dfitem.startAdjustPosHpr()
self.iPosHpr(so)
self.moveObjectInit()
if so.getOnWall():
startR = self.gridSnapNP.getR()
newR = ROUND_TO(startR + 22.5, 22.5)
self.gridSnapNP.setR(newR)
else:
startH = self.gridSnapNP.getH(self.targetNodePath)
newH = ROUND_TO(startH - 22.5, 22.5)
self.iSphere.setParentNP(self.gridSnapNP)
self.iSphere.setCenterRadius(0, Point3(0), so.radius * 1.25)
entry = self.iSphere.pickBitMask(
bitMask=so.getWallBitmask(),
targetNodePath=self.targetNodePath,
skipFlags=SKIP_CAMERA | SKIP_UNPICKABLE)
if not entry:
self.gridSnapNP.setHpr(self.targetNodePath, newH, 0, 0)
self.collisionTest()
so.wrtReparentTo(self.targetNodePath)
self.disableButtonFrameTask()
so.dfitem.stopAdjustPosHpr()
def rotateRight(self):
if not self.selectedObject:
return None
so = self.selectedObject
so.dfitem.startAdjustPosHpr()
self.iPosHpr(so)
self.moveObjectInit()
if so.getOnWall():
startR = self.gridSnapNP.getR()
newR = ROUND_TO(startR - 22.5, 22.5)
self.gridSnapNP.setR(newR)
else:
startH = self.gridSnapNP.getH(self.targetNodePath)
newH = ROUND_TO(startH + 22.5, 22.5) % 360.0
self.iSphere.setParentNP(self.gridSnapNP)
self.iSphere.setCenterRadius(0, Point3(0), so.radius * 1.25)
entry = self.iSphere.pickBitMask(
bitMask=so.getWallBitmask(),
targetNodePath=self.targetNodePath,
skipFlags=SKIP_CAMERA | SKIP_UNPICKABLE)
if not entry:
self.gridSnapNP.setHpr(self.targetNodePath, newH, 0, 0)
self.collisionTest()
so.wrtReparentTo(self.targetNodePath)
self.disableButtonFrameTask()
so.dfitem.stopAdjustPosHpr()
def moveObjectInit(self):
self.dragPointNP.setPosHpr(self.selectedObject,
self.selectedObject.dragPoint, Vec3(0))
self.gridSnapNP.iPosHpr()
self.collisionOffsetNP.iPosHpr()
self.selectedObject.wrtReparentTo(self.collisionOffsetNP)
def resetFurniture(self):
for o in self.objectDict.values():
o.resetMovableObject()
self.objectDict = {}
self.deselectObject()
self.buttonFrame.hide()
def destroy(self):
self.ignore('enterFurnitureMode')
self.ignore('exitFurnitureMode')
if self.guiInterval:
self.guiInterval.finish()
if self.furnitureManager:
self.exitFurnitureMode(self.furnitureManager)
self.cleanupDialog()
self.resetFurniture()
self.buttonFrame.destroy()
self.furnitureGui.destroy()
if self.houseExtents:
self.houseExtents.removeNode()
if self.doorBlocker:
self.doorBlocker.removeNode()
self.removeNode()
if self.verifyFrame:
self.verifyFrame.destroy()
self.verifyFrame = None
self.deleteItemText = None
self.okButton = None
self.cancelButton = None
def createSelectedObjectPanel(self, guiModels):
self.buttonFrame = DirectFrame(scale=0.5)
self.grabUp = guiModels.find('**/handup')
self.grabDown = guiModels.find('**/handdown')
self.grabRollover = guiModels.find('**/handrollover')
self.centerMarker = DirectButton(
parent=self.buttonFrame,
text=['', TTLocalizer.HDMoveLabel],
text_pos=(0, 1),
text_scale=0.69999999999999996,
text_fg=(1, 1, 1, 1),
text_shadow=(0, 0, 0, 1),
image=[self.grabUp, self.grabDown, self.grabRollover],
image_scale=0.29999999999999999,
relief=None,
scale=0.12)
self.centerMarker.bind(DGG.B1PRESS, self.moveObjectContinue)
self.centerMarker.bind(DGG.B1RELEASE, self.moveObjectStop)
guiCCWArrowUp = guiModels.find('**/LarrowUp')
guiCCWArrowDown = guiModels.find('**/LarrowDown')
guiCCWArrowRollover = guiModels.find('**/LarrowRollover')
self.rotateLeftButton = DirectButton(
parent=self.buttonFrame,
relief=None,
image=(guiCCWArrowUp, guiCCWArrowDown, guiCCWArrowRollover,
guiCCWArrowUp),
image_pos=(0, 0, 0.10000000000000001),
image_scale=0.14999999999999999,
image3_color=Vec4(0.5, 0.5, 0.5, 0.75),
text=('', TTLocalizer.HDRotateCCWLabel,
TTLocalizer.HDRotateCCWLabel, ''),
text_pos=(0.13500000000000001, -0.10000000000000001),
text_scale=0.10000000000000001,
text_align=TextNode.ARight,
text_fg=(1, 1, 1, 1),
text_shadow=(0, 0, 0, 1),
pos=(-0.125, 0, -0.20000000000000001),
scale=0.69999999999999996,
command=self.rotateLeft)
self.rotateLeftButton.bind(DGG.EXIT, self.enableButtonFrameTask)
guiCWArrowUp = guiModels.find('**/RarrowUp')
guiCWArrowDown = guiModels.find('**/RarrowDown')
guiCWArrowRollover = guiModels.find('**/RarrowRollover')
self.rotateRightButton = DirectButton(
parent=self.buttonFrame,
relief=None,
image=(guiCWArrowUp, guiCWArrowDown, guiCWArrowRollover,
guiCWArrowUp),
image_pos=(0, 0, 0.10000000000000001),
image_scale=0.14999999999999999,
image3_color=Vec4(0.5, 0.5, 0.5, 0.75),
text=('', TTLocalizer.HDRotateCWLabel, TTLocalizer.HDRotateCWLabel,
''),
text_pos=(-0.13500000000000001, -0.10000000000000001),
text_scale=0.10000000000000001,
text_align=TextNode.ALeft,
text_fg=(1, 1, 1, 1),
text_shadow=(0, 0, 0, 1),
pos=(0.125, 0, -0.20000000000000001),
scale=0.69999999999999996,
command=self.rotateRight)
self.rotateRightButton.bind(DGG.EXIT, self.enableButtonFrameTask)
self.buttonFrame.hide()
def recenterButtonFrameTask(self, state):
if self.selectedObject and self.fRecenter:
self.buttonFrame.setPos(self.getSelectedObjectScreenXY())
return Task.cont
def disableButtonFrameTask(self, event=None):
self.fRecenter = 0
def enableButtonFrameTask(self, event=None):
self.fRecenter = 1
def getNearProjectionPoint(self, nodePath):
origin = nodePath.getPos(camera)
if origin[1] != 0.0:
return origin * (base.camLens.getNear() / origin[1])
else:
return Point3(0, base.camLens.getNear(), 0)
def getSelectedObjectScreenXY(self):
tNodePath = self.selectedObject.attachNewNode('temp')
tNodePath.setPos(self.selectedObject.center)
nearVec = self.getNearProjectionPoint(tNodePath)
nearVec *= base.camLens.getFocalLength() / base.camLens.getNear()
render2dX = CLAMP(nearVec[0] / base.camLens.getFilmSize()[0] / 2.0,
-0.90000000000000002, 0.90000000000000002)
aspect2dX = render2dX * base.getAspectRatio()
aspect2dZ = CLAMP(nearVec[2] / base.camLens.getFilmSize()[1] / 2.0,
-0.80000000000000004, 0.90000000000000002)
tNodePath.removeNode()
return Vec3(aspect2dX, 0, aspect2dZ)
def createMainControls(self, guiModels):
attic = guiModels.find('**/attic')
self.furnitureGui = DirectFrame(
relief=None,
pos=(-1.1899999999999999, 1, 0.33000000000000002),
scale=0.040000000000000001,
image=attic)
bMoveStopUp = guiModels.find('**/bu_atticX/bu_attic_up')
bMoveStopDown = guiModels.find('**/bu_atticX/bu_attic_down')
bMoveStopRollover = guiModels.find('**/bu_atticX/bu_attic_rollover')
self.bStopMoveFurniture = DirectButton(
parent=self.furnitureGui,
relief=None,
image=[bMoveStopUp, bMoveStopDown, bMoveStopRollover, bMoveStopUp],
text=[
'', TTLocalizer.HDStopMoveFurnitureButton,
TTLocalizer.HDStopMoveFurnitureButton
],
text_fg=(1, 1, 1, 1),
text_shadow=(0, 0, 0, 1),
text_font=ToontownGlobals.getInterfaceFont(),
pos=(-0.29999999999999999, 0, 9.4000000000000004),
command=base.localAvatar.stopMoveFurniture)
self.bindHelpText(self.bStopMoveFurniture, 'DoneMoving')
self.atticRoof = DirectLabel(
parent=self.furnitureGui,
relief=None,
image=guiModels.find('**/rooftile'))
self.itemBackgroundFrame = DirectFrame(
parent=self.furnitureGui,
relief=None,
image=guiModels.find('**/item_backgroun'),
image_pos=(0, 0, -22),
image_scale=(1, 1, 5))
self.scrollUpFrame = DirectFrame(
parent=self.furnitureGui,
relief=None,
image=guiModels.find('**/scrollup'),
pos=(0, 0, -0.57999999999999996))
self.camButtonFrame = DirectFrame(
parent=self.furnitureGui,
relief=None,
image=guiModels.find('**/low'),
pos=(0, 0, -11.69))
tagUp = guiModels.find('**/tag_up')
tagDown = guiModels.find('**/tag_down')
tagRollover = guiModels.find('**/tag_rollover')
self.inAtticButton = DirectButton(
parent=self.itemBackgroundFrame,
relief=None,
text=TTLocalizer.HDInAtticLabel,
text_pos=(-0.10000000000000001, -0.25),
image=[tagUp, tagDown, tagRollover],
pos=(2.8500000000000001, 0, 4),
scale=0.80000000000000004,
command=self.showAtticPicker)
self.bindHelpText(self.inAtticButton, 'Attic')
self.inRoomButton = DirectButton(
parent=self.itemBackgroundFrame,
relief=None,
text=TTLocalizer.HDInRoomLabel,
text_pos=(-0.10000000000000001, -0.25),
image=[tagUp, tagDown, tagRollover],
pos=(2.8500000000000001, 0, 1.1000000000000001),
scale=0.80000000000000004,
command=self.showInRoomPicker)
self.bindHelpText(self.inRoomButton, 'Room')
self.inTrashButton = DirectButton(
parent=self.itemBackgroundFrame,
relief=None,
text=TTLocalizer.HDInTrashLabel,
text_pos=(-0.10000000000000001, -0.25),
image=[tagUp, tagDown, tagRollover],
pos=(2.8500000000000001, 0, -1.8),
scale=0.80000000000000004,
command=self.showInTrashPicker)
self.bindHelpText(self.inTrashButton, 'Trash')
for i in range(4):
self.inAtticButton.component('text%d' % i).setR(-90)
self.inRoomButton.component('text%d' % i).setR(-90)
self.inTrashButton.component('text%d' % i).setR(-90)
backInAtticUp = guiModels.find('**/bu_backinattic_up1')
backInAtticDown = guiModels.find('**/bu_backinattic_down1')
backInAtticRollover = guiModels.find('**/bu_backinattic_rollover2')
self.sendToAtticButton = DirectButton(
parent=self.furnitureGui,
relief=None,
pos=(0.40000000000000002, 0, 12.800000000000001),
text=['', TTLocalizer.HDToAtticLabel],
text_fg=(1, 1, 1, 1),
text_shadow=(0, 0, 0, 1),
text_pos=(1.2, -0.29999999999999999),
image=[backInAtticUp, backInAtticDown, backInAtticRollover],
command=self.sendItemToAttic)
self.sendToAtticButton.hide()
self.bindHelpText(self.sendToAtticButton, 'SendToAttic')
zoomInUp = guiModels.find('**/bu_RzoomOut_up')
zoomInDown = guiModels.find('**/bu_RzoomOut_down')
zoomInRollover = guiModels.find('**/bu_RzoomOut_rollover')
self.zoomInButton = DirectButton(
parent=self.camButtonFrame,
image=[zoomInUp, zoomInDown, zoomInRollover],
relief=None,
pos=(0.90000000000000002, 0, -0.75),
command=self.zoomCamIn)
self.bindHelpText(self.zoomInButton, 'ZoomIn')
zoomOutUp = guiModels.find('**/bu_LzoomIn_up')
zoomOutDown = guiModels.find('**/bu_LzoomIn_down')
zoomOutRollover = guiModels.find('**/buLzoomIn_rollover')
self.zoomOutButton = DirectButton(
parent=self.camButtonFrame,
image=[zoomOutUp, zoomOutDown, zoomOutRollover],
relief=None,
pos=(-1.3999999999999999, 0, -0.75),
command=self.zoomCamOut)
self.bindHelpText(self.zoomOutButton, 'ZoomOut')
camCCWUp = guiModels.find('**/bu_Rarrow_up1')
camCCWDown = guiModels.find('**/bu_Rarrow_down1')
camCCWRollover = guiModels.find('**/bu_Rarrow_orllover')
self.rotateCamLeftButton = DirectButton(
parent=self.camButtonFrame,
image=[camCCWUp, camCCWDown, camCCWRollover],
relief=None,
pos=(0.90000000000000002, 0, -3.0),
command=self.rotateCamCCW)
self.bindHelpText(self.rotateCamLeftButton, 'RotateLeft')
camCWUp = guiModels.find('**/bu_Larrow_up1')
camCWDown = guiModels.find('**/bu_Larrow_down1')
camCWRollover = guiModels.find('**/bu_Larrow_rollover2')
self.rotateCamRightButton = DirectButton(
parent=self.camButtonFrame,
image=[camCWUp, camCWDown, camCWRollover],
relief=None,
pos=(-1.3999999999999999, 0, -3.0),
command=self.rotateCamCW)
self.bindHelpText(self.rotateCamRightButton, 'RotateRight')
trashcanGui = loader.loadModel('phase_3/models/gui/trashcan_gui')
trashcanUp = trashcanGui.find('**/TrashCan_CLSD')
trashcanDown = trashcanGui.find('**/TrashCan_OPEN')
trashcanRollover = trashcanGui.find('**/TrashCan_RLVR')
self.deleteEnterButton = DirectButton(
parent=self.furnitureGui,
image=(trashcanUp, trashcanDown, trashcanRollover, trashcanUp),
text=[
'', TTLocalizer.InventoryDelete, TTLocalizer.InventoryDelete,
''
],
text_fg=(1, 1, 1, 1),
text_shadow=(0, 0, 0, 1),
text_scale=0.10000000000000001,
text_align=TextNode.ACenter,
text_pos=(0, -0.12),
text_font=ToontownGlobals.getInterfaceFont(),
textMayChange=0,
relief=None,
pos=(3.7000000000000002, 0.0, -13.800000000000001),
scale=7.1299999999999999,
command=self.enterDeleteMode)
self.bindHelpText(self.deleteEnterButton, 'DeleteEnter')
self.deleteExitButton = DirectButton(
parent=self.furnitureGui,
image=(trashcanUp, trashcanDown, trashcanRollover, trashcanUp),
text=('', TTLocalizer.InventoryDone, TTLocalizer.InventoryDone,
''),
text_fg=(1, 1, 1, 1),
text_shadow=(0, 0, 0, 1),
text_scale=0.10000000000000001,
text_align=TextNode.ACenter,
text_pos=(0, -0.12),
text_font=ToontownGlobals.getInterfaceFont(),
textMayChange=0,
relief=None,
pos=(3.7000000000000002, 0.0, -13.800000000000001),
scale=7.1299999999999999,
command=self.exitDeleteMode)
self.bindHelpText(self.deleteExitButton, 'DeleteExit')
self.deleteExitButton.hide()
self.trashcanBase = DirectLabel(
parent=self.furnitureGui,
image=guiModels.find('**/trashcan_base'),
relief=None,
pos=(0, 0, -11.640000000000001))
self.furnitureGui.hide()
self.helpText = DirectLabel(
parent=self.furnitureGui,
relief=DGG.SUNKEN,
frameSize=(-0.5, 10, -3, 0.90000000000000002),
frameColor=(0.20000000000000001, 0.20000000000000001,
0.20000000000000001, 0.5),
borderWidth=(0.01, 0.01),
text='',
text_wordwrap=12,
text_fg=(1, 1, 1, 1),
text_shadow=(0, 0, 0, 1),
text_scale=0.80000000000000004,
pos=(3, 0.0, -7),
scale=1,
text_align=TextNode.ALeft)
self.helpText.hide()
def createAtticPicker(self):
self.atticItemPanels = []
for itemIndex in range(len(self.furnitureManager.atticItems)):
panel = FurnitureItemPanel(
self.furnitureManager.atticItems[itemIndex],
itemIndex,
command=self.bringItemFromAttic,
deleteMode=self.deleteMode,
helpCategory='FurnitureItemPanelAttic')
self.atticItemPanels.append(panel)
self.atticWallpaperPanels = []
for itemIndex in range(len(self.furnitureManager.atticWallpaper)):
panel = FurnitureItemPanel(
self.furnitureManager.atticWallpaper[itemIndex],
itemIndex,
command=self.bringWallpaperFromAttic,
deleteMode=self.deleteMode,
helpCategory='FurnitureItemPanelAttic')
self.atticWallpaperPanels.append(panel)
self.atticWindowPanels = []
for itemIndex in range(len(self.furnitureManager.atticWindows)):
panel = FurnitureItemPanel(
self.furnitureManager.atticWindows[itemIndex],
itemIndex,
command=self.bringWindowFromAttic,
deleteMode=self.deleteMode,
helpCategory='FurnitureItemPanelAttic')
self.atticWindowPanels.append(panel)
self.regenerateAtticPicker()
def regenerateAtticPicker(self):
selectedIndex = 0
if self.atticPicker:
selectedIndex = self.atticPicker.getSelectedIndex()
for panel in self.atticItemPanels:
panel.detachNode()
for panel in self.atticWallpaperPanels:
panel.detachNode()
for panel in self.atticWindowPanels:
panel.detachNode()
self.atticPicker.destroy()
self.atticPicker = None
itemList = self.atticItemPanels + self.atticWallpaperPanels + self.atticWindowPanels
if self.deleteMode:
text = TTLocalizer.HDDeletePickerLabel
else:
text = TTLocalizer.HDAtticPickerLabel
self.atticPicker = self.createScrolledList(
itemList, text, 'atticPicker', selectedIndex)
if self.inRoomPicker or self.inTrashPicker:
self.atticPicker.hide()
else:
self.atticPicker.show()
def createInRoomPicker(self):
self.inRoomPanels = []
for (objectId, object) in self.objectDict.items():
panel = FurnitureItemPanel(
object.dfitem.item,
objectId,
command=self.requestReturnToAttic,
deleteMode=self.deleteMode,
withinFunc=self.pickInRoom,
helpCategory='FurnitureItemPanelRoom')
self.inRoomPanels.append(panel)
self.regenerateInRoomPicker()
def regenerateInRoomPicker(self):
selectedIndex = 0
if self.inRoomPicker:
selectedIndex = self.inRoomPicker.getSelectedIndex()
for panel in self.inRoomPanels:
panel.detachNode()
self.inRoomPicker.destroy()
self.inRoomPicker = None
if self.deleteMode:
text = TTLocalizer.HDDeletePickerLabel
else:
text = TTLocalizer.HDInRoomPickerLabel
self.inRoomPicker = self.createScrolledList(
self.inRoomPanels, text, 'inRoomPicker', selectedIndex)
def createInTrashPicker(self):
self.inTrashPanels = []
for itemIndex in range(len(self.furnitureManager.deletedItems)):
panel = FurnitureItemPanel(
self.furnitureManager.deletedItems[itemIndex],
itemIndex,
command=self.requestReturnToAtticFromTrash,
helpCategory='FurnitureItemPanelTrash')
self.inTrashPanels.append(panel)
self.regenerateInTrashPicker()
def regenerateInTrashPicker(self):
selectedIndex = 0
if self.inTrashPicker:
selectedIndex = self.inTrashPicker.getSelectedIndex()
for panel in self.inTrashPanels:
panel.detachNode()
self.inTrashPicker.destroy()
self.inTrashPicker = None
text = TTLocalizer.HDInTrashPickerLabel
self.inTrashPicker = self.createScrolledList(
self.inTrashPanels, text, 'inTrashPicker', selectedIndex)
def createScrolledList(self, itemList, text, name, selectedIndex):
gui = loader.loadModel('phase_3.5/models/gui/friendslist_gui')
picker = DirectScrolledList(
parent=self.furnitureGui,
pos=(-0.38, 0.0, 3),
scale=7.125,
relief=None,
items=itemList,
numItemsVisible=5,
text=text,
text_fg=(1, 1, 1, 1),
text_shadow=(0, 0, 0, 1),
text_scale=0.10000000000000001,
text_pos=(0, 0.40000000000000002),
decButton_image=(gui.find('**/FndsLst_ScrollUp'),
gui.find('**/FndsLst_ScrollDN'),
gui.find('**/FndsLst_ScrollUp_Rllvr'),
gui.find('**/FndsLst_ScrollUp')),
decButton_relief=None,
decButton_scale=(1.5, 1.5, 1.5),
decButton_pos=(0, 0, 0.29999999999999999),
decButton_image3_color=Vec4(1, 1, 1, 0.10000000000000001),
incButton_image=(gui.find('**/FndsLst_ScrollUp'),
gui.find('**/FndsLst_ScrollDN'),
gui.find('**/FndsLst_ScrollUp_Rllvr'),
gui.find('**/FndsLst_ScrollUp')),
incButton_relief=None,
incButton_scale=(1.5, 1.5, -1.5),
incButton_pos=(0, 0, -1.8779999999999999),
incButton_image3_color=Vec4(1, 1, 1, 0.10000000000000001))
picker.setName(name)
picker.scrollTo(selectedIndex)
return picker
def reset():
self.destroy()
furnitureMenu.destroy()
def showAtticPicker(self):
if self.inRoomPicker:
self.inRoomPicker.destroy()
self.inRoomPicker = None
if self.inTrashPicker:
self.inTrashPicker.destroy()
self.inTrashPicker = None
self.atticPicker.show()
self.inAtticButton['image_color'] = Vec4(1, 1, 1, 1)
self.inRoomButton['image_color'] = Vec4(
0.80000000000000004, 0.80000000000000004, 0.80000000000000004, 1)
self.inTrashButton['image_color'] = Vec4(
0.80000000000000004, 0.80000000000000004, 0.80000000000000004, 1)
self.deleteExitButton['state'] = 'normal'
self.deleteEnterButton['state'] = 'normal'
def showInRoomPicker(self):
messenger.send('wakeup')
if not self.inRoomPicker:
self.createInRoomPicker()
self.atticPicker.hide()
if self.inTrashPicker:
self.inTrashPicker.destroy()
self.inTrashPicker = None
self.inAtticButton['image_color'] = Vec4(
0.80000000000000004, 0.80000000000000004, 0.80000000000000004, 1)
self.inRoomButton['image_color'] = Vec4(1, 1, 1, 1)
self.inTrashButton['image_color'] = Vec4(
0.80000000000000004, 0.80000000000000004, 0.80000000000000004, 1)
self.deleteExitButton['state'] = 'normal'
self.deleteEnterButton['state'] = 'normal'
def showInTrashPicker(self):
messenger.send('wakeup')
if not self.inTrashPicker:
self.createInTrashPicker()
self.atticPicker.hide()
if self.inRoomPicker:
self.inRoomPicker.destroy()
self.inRoomPicker = None
self.inAtticButton['image_color'] = Vec4(
0.80000000000000004, 0.80000000000000004, 0.80000000000000004, 1)
self.inRoomButton['image_color'] = Vec4(
0.80000000000000004, 0.80000000000000004, 0.80000000000000004, 1)
self.inTrashButton['image_color'] = Vec4(1, 1, 1, 1)
self.deleteExitButton['state'] = 'disabled'
self.deleteEnterButton['state'] = 'disabled'
def sendItemToAttic(self):
if base.config.GetBool('want-qa-regression', 0):
self.notify.info('QA-REGRESSION: ESTATE: Send Item to Attic')
messenger.send('wakeup')
if self.selectedObject:
callback = PythonUtil.Functor(
self._ObjectManager__sendItemToAtticCallback,
self.selectedObject.id())
self.furnitureManager.moveItemToAttic(self.selectedObject.dfitem,
callback)
self.deselectObject()
def _ObjectManager__sendItemToAtticCallback(self, objectId, retcode, item):
self._ObjectManager__enableItemButtons(1)
if retcode < 0:
self.notify.info('Unable to send item %s to attic, reason %s.' %
(item.getName(), retcode))
return None
del self.objectDict[objectId]
if self.selectedObject is not None and self.selectedObject.id(
) == objectId:
self.selectedObject.detachNode()
self.deselectObject()
itemIndex = len(self.atticItemPanels)
panel = FurnitureItemPanel(
item,
itemIndex,
command=self.bringItemFromAttic,
deleteMode=self.deleteMode,
helpCategory='FurnitureItemPanelAttic')
self.atticItemPanels.append(panel)
self.regenerateAtticPicker()
if self.inRoomPicker:
for i in range(len(self.inRoomPanels)):
if self.inRoomPanels[i].itemId == objectId:
del self.inRoomPanels[i]
self.regenerateInRoomPicker()
return None
continue
def cleanupDialog(self, buttonValue=None):
if self.dialog:
self.dialog.cleanup()
self.dialog = None
self._ObjectManager__enableItemButtons(1)
def enterDeleteMode(self):
self.deleteMode = 1
self._ObjectManager__updateDeleteMode()
def exitDeleteMode(self):
self.deleteMode = 0
self._ObjectManager__updateDeleteMode()
def _ObjectManager__updateDeleteMode(self):
if not self.atticPicker:
return None
self.notify.debug('__updateDeleteMode deleteMode=%s' % self.deleteMode)
if self.deleteMode:
framePanelColor = DeletePickerPanelColor
atticText = TTLocalizer.HDDeletePickerLabel
inRoomText = TTLocalizer.HDDeletePickerLabel
helpCategory = 'FurnitureItemPanelDelete'
else:
framePanelColor = NormalPickerPanelColor
atticText = TTLocalizer.HDAtticPickerLabel
inRoomText = TTLocalizer.HDInRoomPickerLabel
helpCategory = None
if self.inRoomPicker:
self.inRoomPicker['text'] = inRoomText
for panel in self.inRoomPicker['items']:
panel.setDeleteMode(self.deleteMode)
panel.bindHelpText(helpCategory)
if self.atticPicker:
self.atticPicker['text'] = atticText
for panel in self.atticPicker['items']:
panel.setDeleteMode(self.deleteMode)
panel.bindHelpText(helpCategory)
self._ObjectManager__updateDeleteButtons()
def _ObjectManager__updateDeleteButtons(self):
if self.deleteMode:
self.deleteExitButton.show()
self.deleteEnterButton.hide()
else:
self.deleteEnterButton.show()
self.deleteExitButton.hide()
def deleteItemFromRoom(self, dfitem, objectId, itemIndex):
messenger.send('wakeup')
callback = PythonUtil.Functor(
self._ObjectManager__deleteItemFromRoomCallback, objectId,
itemIndex)
self.furnitureManager.deleteItemFromRoom(dfitem, callback)
def _ObjectManager__deleteItemFromRoomCallback(self, objectId, itemIndex,
retcode, item):
self._ObjectManager__enableItemButtons(1)
if retcode < 0:
self.notify.info('Unable to delete item %s from room, reason %s.' %
(item.getName(), retcode))
return None
del self.objectDict[objectId]
if self.selectedObject is not None and self.selectedObject.id(
) == objectId:
self.selectedObject.detachNode()
self.deselectObject()
if self.inRoomPicker and itemIndex is not None:
del self.inRoomPanels[itemIndex]
self.regenerateInRoomPicker()
def bringItemFromAttic(self, item, itemIndex):
if base.config.GetBool('want-qa-regression', 0):
self.notify.info('QA-REGRESSION: ESTATE: Place Item in Room')
messenger.send('wakeup')
self._ObjectManager__enableItemButtons(0)
if self.deleteMode:
self.requestDelete(item, itemIndex, self.deleteItemFromAttic)
return None
pos = self.targetNodePath.getRelativePoint(base.localAvatar,
Point3(0, 2, 0))
hpr = Point3(0, 0, 0)
if abs(pos[0]) > 3000 and abs(pos[1]) > 3000 or abs(pos[2]) > 300:
self.notify.warning(
'bringItemFromAttic extreme pos targetNodePath=%s avatar=%s %s'
% (repr(self.targetNodePath.getPos(render)),
repr(base.localAvatar.getPos(render)), repr(pos)))
if item.getFlags() & CatalogFurnitureItem.FLPainting:
for object in self.objectDict.values():
object.stashBuiltInCollisionNodes()
self.gridSnapNP.iPosHpr()
target = self.targetNodePath
self.iRay.setParentNP(base.localAvatar)
entry = self.iRay.pickBitMask3D(
bitMask=ToontownGlobals.WallBitmask,
targetNodePath=target,
origin=Point3(0, 0, 6),
dir=Vec3(0, 1, 0),
skipFlags=SKIP_BACKFACE | SKIP_CAMERA | SKIP_UNPICKABLE)
for object in self.objectDict.values():
object.unstashBuiltInCollisionNodes()
if entry:
self.alignObject(
entry, target, fClosest=0, wallOffset=0.10000000000000001)
pos = self.gridSnapNP.getPos(target)
hpr = self.gridSnapNP.getHpr(target)
else:
self.notify.warning('wall not found for painting')
self.furnitureManager.moveItemFromAttic(
itemIndex, (pos[0], pos[1], pos[2], hpr[0], hpr[1], hpr[2]),
self._ObjectManager__bringItemFromAtticCallback)
def _ObjectManager__bringItemFromAtticCallback(self, retcode, dfitem,
itemIndex):
self._ObjectManager__enableItemButtons(1)
if retcode < 0:
self.notify.info(
'Unable to bring furniture item %s into room, reason %s.' %
(itemIndex, retcode))
return None
mo = self.loadObject(dfitem)
objectId = mo.id()
self.atticItemPanels[itemIndex].destroy()
del self.atticItemPanels[itemIndex]
for i in range(itemIndex, len(self.atticItemPanels)):
self.atticItemPanels[i].itemId -= 1
self.regenerateAtticPicker()
if self.inRoomPicker:
panel = FurnitureItemPanel(
dfitem.item,
objectId,
command=self.requestReturnToAttic,
helpCategory='FurnitureItemPanelRoom')
self.inRoomPanels.append(panel)
self.regenerateInRoomPicker()
def deleteItemFromAttic(self, item, itemIndex):
messenger.send('wakeup')
self.furnitureManager.deleteItemFromAttic(
item, itemIndex, self._ObjectManager__deleteItemFromAtticCallback)
def _ObjectManager__deleteItemFromAtticCallback(self, retcode, item,
itemIndex):
self._ObjectManager__enableItemButtons(1)
if retcode < 0:
self.notify.info('Unable to delete furniture item %s, reason %s.' %
(itemIndex, retcode))
return None
self.atticItemPanels[itemIndex].destroy()
del self.atticItemPanels[itemIndex]
for i in range(itemIndex, len(self.atticItemPanels)):
self.atticItemPanels[i].itemId -= 1
self.regenerateAtticPicker()
def bringWallpaperFromAttic(self, item, itemIndex):
messenger.send('wakeup')
self._ObjectManager__enableItemButtons(0)
if self.deleteMode:
self.requestDelete(item, itemIndex, self.deleteWallpaperFromAttic)
return None
if base.localAvatar.getY() < 2.2999999999999998:
room = 0
else:
room = 1
self.furnitureManager.moveWallpaperFromAttic(
itemIndex, room,
self._ObjectManager__bringWallpaperFromAtticCallback)
def _ObjectManager__bringWallpaperFromAtticCallback(
self, retcode, itemIndex, room):
self._ObjectManager__enableItemButtons(1)
if retcode < 0:
self.notify.info(
'Unable to bring wallpaper %s into room %s, reason %s.' %
(itemIndex, room, retcode))
return None
self.atticWallpaperPanels[itemIndex].destroy()
item = self.furnitureManager.atticWallpaper[itemIndex]
panel = FurnitureItemPanel(
item,
itemIndex,
command=self.bringWallpaperFromAttic,
deleteMode=self.deleteMode,
helpCategory='FurnitureItemPanelAttic')
self.atticWallpaperPanels[itemIndex] = panel
self.regenerateAtticPicker()
def deleteWallpaperFromAttic(self, item, itemIndex):
messenger.send('wakeup')
self.furnitureManager.deleteWallpaperFromAttic(
item, itemIndex,
self._ObjectManager__deleteWallpaperFromAtticCallback)
def _ObjectManager__deleteWallpaperFromAtticCallback(
self, retcode, item, itemIndex):
self._ObjectManager__enableItemButtons(1)
if retcode < 0:
self.notify.info('Unable to delete wallpaper %s, reason %s.' %
(itemIndex, retcode))
return None
self.atticWallpaperPanels[itemIndex].destroy()
del self.atticWallpaperPanels[itemIndex]
for i in range(itemIndex, len(self.atticWallpaperPanels)):
self.atticWallpaperPanels[i].itemId -= 1
self.regenerateAtticPicker()
def bringWindowFromAttic(self, item, itemIndex):
messenger.send('wakeup')
self._ObjectManager__enableItemButtons(0)
if self.deleteMode:
self.requestDelete(item, itemIndex, self.deleteWindowFromAttic)
return None
if base.localAvatar.getY() < 2.2999999999999998:
slot = 2
else:
slot = 4
self.furnitureManager.moveWindowFromAttic(
itemIndex, slot, self._ObjectManager__bringWindowFromAtticCallback)
def _ObjectManager__bringWindowFromAtticCallback(self, retcode, itemIndex,
slot):
self._ObjectManager__enableItemButtons(1)
if retcode < 0:
self.notify.info(
'Unable to bring window %s into slot %s, reason %s.' %
(itemIndex, slot, retcode))
return None
if retcode == ToontownGlobals.FM_SwappedItem:
self.atticWindowPanels[itemIndex].destroy()
item = self.furnitureManager.atticWindows[itemIndex]
panel = FurnitureItemPanel(
item,
itemIndex,
command=self.bringWindowFromAttic,
deleteMode=self.deleteMode,
helpCategory='FurnitureItemPanelAttic')
self.atticWindowPanels[itemIndex] = panel
else:
self.atticWindowPanels[itemIndex].destroy()
del self.atticWindowPanels[itemIndex]
for i in range(itemIndex, len(self.atticWindowPanels)):
self.atticWindowPanels[i].itemId -= 1
self.regenerateAtticPicker()
def deleteWindowFromAttic(self, item, itemIndex):
messenger.send('wakeup')
self.furnitureManager.deleteWindowFromAttic(
item, itemIndex,
self._ObjectManager__deleteWindowFromAtticCallback)
def _ObjectManager__deleteWindowFromAtticCallback(self, retcode, item,
itemIndex):
self._ObjectManager__enableItemButtons(1)
if retcode < 0:
self.notify.info('Unable to delete window %s, reason %s.' %
(itemIndex, retcode))
return None
self.atticWindowPanels[itemIndex].destroy()
del self.atticWindowPanels[itemIndex]
for i in range(itemIndex, len(self.atticWindowPanels)):
self.atticWindowPanels[i].itemId -= 1
self.regenerateAtticPicker()
def setGridSpacingString(self, spacingStr):
spacing = eval(spacingStr)
self.setGridSpacing(spacing)
def setGridSpacing(self, gridSpacing):
self.gridSpacing = gridSpacing
def makeHouseExtentsBox(self):
houseGeom = self.targetNodePath.findAllMatches('**/group*')
targetBounds = houseGeom.getTightBounds()
self.houseExtents = self.targetNodePath.attachNewNode(
'furnitureCollisionNode')
mx = targetBounds[0][0]
Mx = targetBounds[1][0]
my = targetBounds[0][1]
My = targetBounds[1][1]
mz = targetBounds[0][2]
Mz = targetBounds[1][2]
cn = CollisionNode('extentsCollisionNode')
cn.setIntoCollideMask(ToontownGlobals.GhostBitmask)
self.houseExtents.attachNewNode(cn)
cp = CollisionPolygon(
Point3(mx, my, mz), Point3(mx, My, mz), Point3(mx, My, Mz),
Point3(mx, my, Mz))
cn.addSolid(cp)
cp = CollisionPolygon(
Point3(Mx, My, mz), Point3(Mx, my, mz), Point3(Mx, my, Mz),
Point3(Mx, My, Mz))
cn.addSolid(cp)
cp = CollisionPolygon(
Point3(Mx, my, mz), Point3(mx, my, mz), Point3(mx, my, Mz),
Point3(Mx, my, Mz))
cn.addSolid(cp)
cp = CollisionPolygon(
Point3(mx, My, mz), Point3(Mx, My, mz), Point3(Mx, My, Mz),
Point3(mx, My, Mz))
cn.addSolid(cp)
def makeDoorBlocker(self):
self.doorBlocker = self.targetNodePath.attachNewNode('doorBlocker')
cn = CollisionNode('doorBlockerCollisionNode')
cn.setIntoCollideMask(ToontownGlobals.FurnitureSideBitmask)
self.doorBlocker.attachNewNode(cn)
cs = CollisionSphere(Point3(-12, -33, 0), 7.5)
cn.addSolid(cs)
def createVerifyDialog(self, item, verifyText, okFunc, cancelFunc):
if self.verifyFrame is None:
buttons = loader.loadModel(
'phase_3/models/gui/dialog_box_buttons_gui')
okButtonImage = (buttons.find('**/ChtBx_OKBtn_UP'),
buttons.find('**/ChtBx_OKBtn_DN'),
buttons.find('**/ChtBx_OKBtn_Rllvr'))
cancelButtonImage = (buttons.find('**/CloseBtn_UP'),
buttons.find('**/CloseBtn_DN'),
buttons.find('**/CloseBtn_Rllvr'))
self.verifyFrame = DirectFrame(
pos=(-0.40000000000000002, 0.10000000000000001,
0.29999999999999999),
scale=0.75,
relief=None,
image=DGG.getDefaultDialogGeom(),
image_color=ToontownGlobals.GlobalDialogColor,
image_scale=(1.2, 1, 1.3),
text='',
text_wordwrap=19,
text_scale=0.059999999999999998,
text_pos=(0, 0.5),
textMayChange=1,
sortOrder=NO_FADE_SORT_INDEX)
self.okButton = DirectButton(
parent=self.verifyFrame,
image=okButtonImage,
relief=None,
text=OTPLocalizer.DialogOK,
text_scale=0.050000000000000003,
text_pos=(0.0, -0.10000000000000001),
textMayChange=0,
pos=(-0.22, 0.0, -0.5))
self.cancelButton = DirectButton(
parent=self.verifyFrame,
image=cancelButtonImage,
relief=None,
text=OTPLocalizer.DialogCancel,
text_scale=0.050000000000000003,
text_pos=(0.0, -0.10000000000000001),
textMayChange=0,
pos=(0.22, 0.0, -0.5))
self.deleteItemText = DirectLabel(
parent=self.verifyFrame,
relief=None,
text='',
text_wordwrap=16,
pos=(0.0, 0.0, -0.40000000000000002),
scale=0.089999999999999997)
self.verifyFrame['text'] = verifyText
self.deleteItemText['text'] = item.getName()
self.okButton['command'] = okFunc
self.cancelButton['command'] = cancelFunc
self.verifyFrame.show()
(self.itemPanel, self.itemIval) = item.getPicture(base.localAvatar)
if self.itemPanel:
self.itemPanel.reparentTo(self.verifyFrame, -1)
self.itemPanel.setPos(0, 0, 0.050000000000000003)
self.itemPanel.setScale(0.34999999999999998)
self.deleteItemText.setPos(0.0, 0.0, -0.40000000000000002)
else:
self.deleteItemText.setPos(0, 0, 0.070000000000000007)
if self.itemIval:
self.itemIval.loop()
def _ObjectManager__handleVerifyDeleteOK(self):
if base.config.GetBool('want-qa-regression', 0):
self.notify.info('QA-REGRESSION: ESTATE: Send Item to Trash')
deleteFunction = self.verifyItems[0]
deleteFunctionArgs = self.verifyItems[1:]
self._ObjectManager__cleanupVerifyDelete()
deleteFunction(*deleteFunctionArgs)
def _ObjectManager__cleanupVerifyDelete(self, *args):
if self.nonDeletableItem:
self.nonDeletableItem.cleanup()
self.nonDeletableItem = None
if self.verifyFrame:
self.verifyFrame.hide()
if self.itemIval:
self.itemIval.finish()
self.itemIval = None
if self.itemPanel:
self.itemPanel.destroy()
self.itemPanel = None
self.verifyItems = None
def _ObjectManager__enableItemButtons(self, enabled):
self.notify.debug('__enableItemButtons %d' % enabled)
if enabled:
buttonState = DGG.NORMAL
else:
buttonState = DGG.DISABLED
if hasattr(self, 'inAtticButton'):
self.inAtticButton['state'] = buttonState
if hasattr(self, 'inRoomButton'):
self.inRoomButton['state'] = buttonState
if hasattr(self, 'inTrashButton'):
self.inTrashButton['state'] = buttonState
pickers = [self.atticPicker, self.inRoomPicker, self.inTrashPicker]
for picker in pickers:
if picker:
for panel in picker['items']:
if not panel.isEmpty():
panel.enable(enabled)
continue
def _ObjectManager__resetAndCleanup(self, *args):
self._ObjectManager__enableItemButtons(1)
self._ObjectManager__cleanupVerifyDelete()
def requestDelete(self, item, itemIndex, deleteFunction):
self._ObjectManager__cleanupVerifyDelete()
if self.furnitureManager.ownerId != base.localAvatar.doId or not item.isDeletable(
):
self.warnNonDeletableItem(item)
return None
self.createVerifyDialog(item, TTLocalizer.HDDeleteItem,
self._ObjectManager__handleVerifyDeleteOK,
self._ObjectManager__resetAndCleanup)
self.verifyItems = (deleteFunction, item, itemIndex)
def requestRoomDelete(self, dfitem, objectId, itemIndex):
self._ObjectManager__cleanupVerifyDelete()
item = dfitem.item
if self.furnitureManager.ownerId != base.localAvatar.doId or not item.isDeletable(
):
self.warnNonDeletableItem(item)
return None
self.createVerifyDialog(item, TTLocalizer.HDDeleteItem,
self._ObjectManager__handleVerifyDeleteOK,
self._ObjectManager__resetAndCleanup)
self.verifyItems = (self.deleteItemFromRoom, dfitem, objectId,
itemIndex)
def warnNonDeletableItem(self, item):
message = TTLocalizer.HDNonDeletableItem
if not item.isDeletable():
if item.getFlags() & CatalogFurnitureItem.FLBank:
message = TTLocalizer.HDNonDeletableBank
elif item.getFlags() & CatalogFurnitureItem.FLCloset:
message = TTLocalizer.HDNonDeletableCloset
elif item.getFlags() & CatalogFurnitureItem.FLPhone:
message = TTLocalizer.HDNonDeletablePhone
elif item.getFlags() & CatalogFurnitureItem.FLTrunk:
message = TTLocalizer.HDNonDeletableTrunk
if self.furnitureManager.ownerId != base.localAvatar.doId:
message = TTLocalizer.HDNonDeletableNotOwner % self.furnitureManager.ownerName
self.nonDeletableItem = TTDialog.TTDialog(
text=message,
style=TTDialog.Acknowledge,
fadeScreen=0,
command=self._ObjectManager__resetAndCleanup)
self.nonDeletableItem.show()
def requestReturnToAttic(self, item, objectId):
self._ObjectManager__cleanupVerifyDelete()
itemIndex = None
for i in range(len(self.inRoomPanels)):
if self.inRoomPanels[i].itemId == objectId:
itemIndex = i
self._ObjectManager__enableItemButtons(0)
break
continue
if self.deleteMode:
dfitem = self.objectDict[objectId].dfitem
self.requestRoomDelete(dfitem, objectId, itemIndex)
return None
self.createVerifyDialog(item, TTLocalizer.HDReturnVerify,
self._ObjectManager__handleVerifyReturnOK,
self._ObjectManager__resetAndCleanup)
self.verifyItems = (item, objectId)
def _ObjectManager__handleVerifyReturnOK(self):
(item, objectId) = self.verifyItems
self._ObjectManager__cleanupVerifyDelete()
self.pickInRoom(objectId)
self.sendItemToAttic()
def requestReturnToAtticFromTrash(self, item, itemIndex):
self._ObjectManager__cleanupVerifyDelete()
self._ObjectManager__enableItemButtons(0)
self.createVerifyDialog(
item, TTLocalizer.HDReturnFromTrashVerify,
self._ObjectManager__handleVerifyReturnFromTrashOK,
self._ObjectManager__resetAndCleanup)
self.verifyItems = (item, itemIndex)
def _ObjectManager__handleVerifyReturnFromTrashOK(self):
if base.config.GetBool('want-qa-regression', 0):
self.notify.info('QA-REGRESSION: ESTATE: Send Item to Attic')
(item, itemIndex) = self.verifyItems
self._ObjectManager__cleanupVerifyDelete()
self.recoverDeletedItem(item, itemIndex)
def recoverDeletedItem(self, item, itemIndex):
messenger.send('wakeup')
self.furnitureManager.recoverDeletedItem(
item, itemIndex, self._ObjectManager__recoverDeletedItemCallback)
def _ObjectManager__recoverDeletedItemCallback(self, retcode, item,
itemIndex):
self._ObjectManager__cleanupVerifyDelete()
if retcode < 0:
if retcode == ToontownGlobals.FM_HouseFull:
self.showHouseFullDialog()
self.notify.info('Unable to recover deleted item %s, reason %s.' %
(itemIndex, retcode))
return None
self._ObjectManager__enableItemButtons(1)
self.inTrashPanels[itemIndex].destroy()
del self.inTrashPanels[itemIndex]
for i in range(itemIndex, len(self.inTrashPanels)):
self.inTrashPanels[i].itemId -= 1
self.regenerateInTrashPicker()
itemType = item.getTypeCode()
if itemType == CatalogItemTypes.WALLPAPER_ITEM and itemType == CatalogItemTypes.FLOORING_ITEM and itemType == CatalogItemTypes.MOULDING_ITEM or itemType == CatalogItemTypes.WAINSCOTING_ITEM:
itemIndex = len(self.atticWallpaperPanels)
bringCommand = self.bringWallpaperFromAttic
elif itemType == CatalogItemTypes.WINDOW_ITEM:
itemIndex = len(self.atticWindowPanels)
bringCommand = self.bringWindowFromAttic
else:
itemIndex = len(self.atticItemPanels)
bringCommand = self.bringItemFromAttic
panel = FurnitureItemPanel(
item,
itemIndex,
command=bringCommand,
deleteMode=self.deleteMode,
helpCategory='FurnitureItemPanelAttic')
if itemType == CatalogItemTypes.WALLPAPER_ITEM and itemType == CatalogItemTypes.FLOORING_ITEM and itemType == CatalogItemTypes.MOULDING_ITEM or itemType == CatalogItemTypes.WAINSCOTING_ITEM:
self.atticWallpaperPanels.append(panel)
elif itemType == CatalogItemTypes.WINDOW_ITEM:
self.atticWindowPanels.append(panel)
else:
self.atticItemPanels.append(panel)
self.regenerateAtticPicker()
def showHouseFullDialog(self):
self.cleanupDialog()
self.dialog = TTDialog.TTDialog(
style=TTDialog.Acknowledge,
text=TTLocalizer.HDHouseFull,
text_wordwrap=15,
command=self.cleanupDialog)
self.dialog.show()
def bindHelpText(self, button, category):
button.bind(DGG.ENTER, self.showHelpText, extraArgs=[category, None])
button.bind(DGG.EXIT, self.hideHelpText)
def showHelpText(self, category, itemName, xy):
def showIt(task):
helpText = TTLocalizer.HDHelpDict.get(category)
if helpText:
if itemName:
helpText = helpText % itemName
self.helpText['text'] = helpText
self.helpText.show()
else:
print 'category: %s not found'
taskMgr.doMethodLater(0.75, showIt, 'showHelpTextDoLater')
def hideHelpText(self, xy):
taskMgr.remove('showHelpTextDoLater')
self.helpText['text'] = ''
self.helpText.hide()
| [
"47166977+peppythegod@users.noreply.github.com"
] | 47166977+peppythegod@users.noreply.github.com |
408a09caa0f100dcf5c20e1fdf23a5e47ce33265 | e940e2d5e0696b9f1385962100796c3d990c33d1 | /chapter 3/exercise_3.14.py | fa55234e74e6fed3a7a23a112f1e030b0ee30fb6 | [] | no_license | sfwarnock/python_programming | a84b6c3d18f55e59d50e5299cedd102c265dfc6b | aa45be8984cd80094f685d4fc4d0b9aca9e9eefb | refs/heads/master | 2021-04-12T03:16:49.267143 | 2018-08-06T12:24:00 | 2018-08-06T12:24:00 | 125,944,584 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 630 | py | # -*- coding: utf-8 -*-
"""
Created on Thu Apr 19
@author: Scott Warnock
"""
# Exercise 3.14
#
# Write a program that finds the average of a series of numbers entered by the user.
# First prompt the user for how many numbers are to be entered..
print("This program averages numbers entered by the user.")
print()
def main():
tn = eval(input("How many numbers do you want to average? "))
sum = 0
for n in range(tn):
n = eval(input("Enter a number: "))
sum = sum + n
mean = sum / tn
print()
print ("The mean of the numbers you entered is", mean)
main() | [
"noreply@github.com"
] | sfwarnock.noreply@github.com |
94f25a94f89bd31421515bc201727e6530947fab | e23a4f57ce5474d468258e5e63b9e23fb6011188 | /125_algorithms/_exercises/templates/_algorithms_challenges/leetcode/LeetCode_with_solution/417 Pacific Atlantic Water Flow.py | e749a8f4525325e00fb1e9c945f5d40b2aff4e87 | [] | no_license | syurskyi/Python_Topics | 52851ecce000cb751a3b986408efe32f0b4c0835 | be331826b490b73f0a176e6abed86ef68ff2dd2b | refs/heads/master | 2023-06-08T19:29:16.214395 | 2023-05-29T17:09:11 | 2023-05-29T17:09:11 | 220,583,118 | 3 | 2 | null | 2023-02-16T03:08:10 | 2019-11-09T02:58:47 | Python | UTF-8 | Python | false | false | 4,163 | py | #!/usr/bin/python3
"""
Given an m x n matrix of non-negative integers representing the height of each
nit cell in a continent, the "Pacific ocean" touches the left and top edges of
the matrix and the "Atlantic ocean" touches the right and bottom edges.
Water can only flow in four directions (up, down, left, or right) from a cell to
another one with height equal or lower.
Find the list of grid coordinates where water can flow to both the Pacific and
Atlantic ocean.
Note:
The order of returned grid coordinates does not matter.
Both m and n are less than 150.
Example:
Given the following 5x5 matrix:
Pacific ~ ~ ~ ~ ~
~ 1 2 2 3 (5) *
~ 3 2 3 (4) (4) *
~ 2 4 (5) 3 1 *
~ (6) (7) 1 4 5 *
~ (5) 1 1 2 4 *
* * * * * Atlantic
Return:
[[0, 4], [1, 3], [1, 4], [2, 2], [3, 0], [3, 1], [4, 0]] (positions with
parentheses in above matrix).
"""
dirs ((0, 1), (0, -1), (1, 0), (-1, 0
c_ Solution:
___ pacificAtlantic matrix
"""
dfs, visisted O(1)
Similar to Trapping Rainwater II (BFS + heap), but no need to record
volume, thus, dfs is enough.
Similar to longest increasing path
Starting from the edge point rather than any point, dfs visit the
possible cell
Complexity analysis, although a cell can be checked multiple times
(at most 4 times); but only perform 1 dfs on each cell; thus
O(mn)
:type matrix: List[List[int]]
:rtype: List[List[int]]
"""
__ n.. matrix o. n.. matrix[0]:
r.. # list
m, n l..(matrix), l..(matrix 0 # row, col
# don't do [[False] * n ] * m, memory management, all rows reference the same row
P [[F.. ___ _ __ r..(n)] ___ _ __ r..(m)]
A [[F.. ___ _ __ r..(n)] ___ _ __ r..(m)]
# starting from edge point
___ i __ r..(m
dfs(matrix, i, 0, P)
dfs(matrix, i, n-1, A)
___ j __ r..(n
dfs(matrix, 0, j, P)
dfs(matrix, m-1, j, A)
ret [
[i, j]
___ i __ r..(m)
___ j __ r..(n)
__ P[i][j] a.. A[i][j]
]
r.. ret
___ dfs matrix, i, j, C
# check before dfs (to be consistent)
C[i][j] T..
m, n l..(matrix), l..(matrix 0
___ x, y __ dirs:
I i + x
J j + y
__ 0 <_ I < m a.. 0 <_ J < n a.. matrix[i][j] <_ matrix[I][J]:
__ n.. C[I][J]:
dfs(matrix, I, J, C)
___ pacificAtlantic_error matrix
"""
DP
dfs, visisted O(1)
:type matrix: List[List[int]]
:rtype: List[List[int]]
"""
__ n.. matrix o. n.. matrix[0]:
r.. # list
m, n l..(matrix), l..(matrix 0 # row, col
P [[F..] * n ] * m
A [[F..] * n ] * m
visisted [[F..] * n ] * m
___ i __ r..(m
___ j __ r..(n
dfs_error(matrix, i, j, visisted, P, l.... i, j: i < 0 o. j <0)
visisted [[F..] * n ] * m
___ i __ r..(m
___ j __ r..(n
dfs_error(matrix, i, j, visisted, A, l.... i, j: i >_ m o. j >_ n)
ret [
[i, j]
___ i __ r..(m)
___ j __ r..(n)
__ P[i][j] a.. A[i][j]
]
r.. ret
___ dfs_error matrix, i, j, visisted, C, predicate
m, n l..(matrix), l..(matrix 0
__ visisted[i][j]:
r.. C[i][j]
visisted[i][j] T..
___ x, y __ dirs:
i2 i + x
j2= j + y
__ 0 <_ i2 < m a.. 0 <_ j2 < n:
__ dfs_error(matrix, i2, j2, visisted, C, predicate) a.. matrix[i][j] >_ matrix[i2][j2]:
C[i][j] T..
____ predicate(i2, j2
C[i][j] T..
r.. C[i][j]
__ _______ __ _______
... Solution().pacificAtlantic([
[1,2,2,3,5],
[3,2,3,4,4],
[2,4,5,3,1],
[6,7,1,4,5],
[5,1,1,2,4]
]) __ [[0, 4], [1, 3], [1, 4], [2, 2], [3, 0], [3, 1], [4, 0]]
| [
"sergejyurskyj@yahoo.com"
] | sergejyurskyj@yahoo.com |
3fd0f6ad32c23a1851c4dde901c0d66727e175e1 | e776e45ae9f78765fb11e3e8cf5c87a0a1b9f0da | /OLD/tests/test-websockets.py | a3a2ab909c839346c259af6dd028fc9718d30345 | [
"MIT"
] | permissive | mvandepanne/seamless | 5100b9994b84c83d82815b572b2ee4e1f61931d6 | 1dc9108176cca2d7e2fe57eb1695aec6d39df456 | refs/heads/master | 2020-04-17T10:06:14.829539 | 2019-01-15T19:31:36 | 2019-01-15T19:31:36 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,682 | py | import seamless
from seamless import context, cell, reactor, transformer
from seamless.lib.filelink import link
ctx = context()
ctx.server = reactor({"socket": {"pin": "output", "dtype": "int"}})
ctx.servercode = ctx.server.code_start.cell()
link(ctx.servercode, ".", "test-websockets_pycell.py")
ctx.server.code_update.cell().set("")
ctx.server.code_stop.cell().set("""
server.close()
loop.run_until_complete(server.wait_closed())
""")
from seamless.lib.gui.browser import browse
ctx.client_template = cell("text")
link(ctx.client_template, ".", "test-websockets_client.jinja")
tf_params = {"inp":{"pin": "input", "dtype": "text"},
"identifier":{"pin": "input", "dtype": "text"},
"socket":{"pin": "input", "dtype": "int"},
"outp":{"pin": "output", "dtype": ("text", "html")} }
tf_code = """
import jinja2
d = dict(IDENTIFIER=identifier, socket=socket)
return jinja2.Template(inp).render(d)
"""
ctx.client1 = cell(("text", "html"))
ctx.tf_client1 = transformer(tf_params)
ctx.server.socket.cell().connect(ctx.tf_client1.socket)
ctx.client_template.connect(ctx.tf_client1.inp)
ctx.tf_client1.code.cell().set(tf_code)
ctx.tf_client1.identifier.cell().set("First WebSocket client")
ctx.tf_client1.outp.connect(ctx.client1)
browse(ctx.client1)
ctx.client2 = cell(("text", "html"))
ctx.tf_client2 = transformer(tf_params)
ctx.server.socket.cell().connect(ctx.tf_client2.socket)
ctx.client_template.connect(ctx.tf_client2.inp)
ctx.tf_client2.code.cell().set(tf_code)
ctx.tf_client2.identifier.cell().set("Second WebSocket client")
ctx.tf_client2.outp.connect(ctx.client2)
browse(ctx.client2)
if not seamless.ipython:
seamless.mainloop()
| [
"sjdv1982@gmail.com"
] | sjdv1982@gmail.com |
5e91a4dab811e752ac55b1c8c0f67c5649ab163e | 159d4ae61f4ca91d94e29e769697ff46d11ae4a4 | /venv/lib/python3.9/site-packages/prompt_toolkit/output/color_depth.py | a6166bacafb2941014a2080438fc330df92d394d | [
"MIT"
] | permissive | davidycliao/bisCrawler | 729db002afe10ae405306b9eed45b782e68eace8 | f42281f35b866b52e5860b6a062790ae8147a4a4 | refs/heads/main | 2023-05-24T00:41:50.224279 | 2023-01-22T23:17:51 | 2023-01-22T23:17:51 | 411,470,732 | 8 | 0 | MIT | 2023-02-09T16:28:24 | 2021-09-28T23:48:13 | Python | UTF-8 | Python | false | false | 1,387 | py | import os
from enum import Enum
from typing import Optional
__all__ = [
"ColorDepth",
]
class ColorDepth(str, Enum):
"""
Possible color depth values for the output.
"""
value: str
#: One color only.
DEPTH_1_BIT = "DEPTH_1_BIT"
#: ANSI Colors.
DEPTH_4_BIT = "DEPTH_4_BIT"
#: The default.
DEPTH_8_BIT = "DEPTH_8_BIT"
#: 24 bit True color.
DEPTH_24_BIT = "DEPTH_24_BIT"
# Aliases.
MONOCHROME = DEPTH_1_BIT
ANSI_COLORS_ONLY = DEPTH_4_BIT
DEFAULT = DEPTH_8_BIT
TRUE_COLOR = DEPTH_24_BIT
@classmethod
def from_env(cls) -> Optional["ColorDepth"]:
"""
Return the color depth if the $PROMPT_TOOLKIT_COLOR_DEPTH environment
variable has been set.
This is a way to enforce a certain color depth in all prompt_toolkit
applications.
"""
# Check the `PROMPT_TOOLKIT_COLOR_DEPTH` environment variable.
all_values = [i.value for i in ColorDepth]
if os.environ.get("PROMPT_TOOLKIT_COLOR_DEPTH") in all_values:
return cls(os.environ["PROMPT_TOOLKIT_COLOR_DEPTH"])
return None
@classmethod
def default(cls) -> "ColorDepth":
"""
Return the default color depth for the default output.
"""
from .defaults import create_output
return create_output().get_default_color_depth()
| [
"davidycliao@gmail.com"
] | davidycliao@gmail.com |
91ffadc0956fd77e0b62bd670aa556229fd3ab4a | e33199ecbe80ef7205473bf1ad584bbffd1a24a5 | /test.py | a3fa8b56983b7216b2f94532745867fe0510f4da | [] | no_license | zhangjiulong/Seq2Seq_Chatbot_QA | cba49143df1a20910f687fa7c48b10a99a7127c3 | 0c3ea305615ee572d2b8a4bef654a3182709107a | refs/heads/master | 2021-01-11T01:31:13.841357 | 2016-10-10T15:57:40 | 2016-10-10T15:57:40 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,301 | py | #!/usr/bin/env python3
__author__ = 'qhduan@memect.co'
import sys
import math
import time
import random
import numpy as np
from sklearn.utils import shuffle
import tensorflow as tf
from tqdm import tqdm
import data_util
tf.device(data_util.test_device)
encoder_inputs = [tf.placeholder(tf.int32, [None], name='encoder_inputs_{}'.format(i))
for i in range(data_util.input_len)]
decoder_inputs = [tf.placeholder(tf.int32, [None], name='decoder_inputs_{}'.format(i))
for i in range(data_util.output_len)]
decoder_targets = [tf.placeholder(tf.int32, [None], name='decoder_targets_{}'.format(i))
for i in range(data_util.output_len)]
decoder_weights = [tf.placeholder(tf.float32, [None], name='decoder_weights_{}'.format(i))
for i in range(data_util.output_len)]
outputs, states = data_util.build_model(encoder_inputs, decoder_inputs, True)
loss_func = tf.nn.seq2seq.sequence_loss(
outputs,
decoder_targets,
decoder_weights,
data_util.dim
)
sess = tf.Session()
init = tf.initialize_all_variables()
sess.run(init)
data_util.load_model(sess)
def test_sentence(s):
s = s.strip()
if len(s) > data_util.input_len:
s = s[:data_util.input_len]
encoder, decoder = data_util.get_sentence(s)
feed_dict = {}
for i in range(len(encoder_inputs)):
feed_dict[encoder_inputs[i]] = encoder[i]
feed_dict[decoder_inputs[0]] = decoder[0]
output = sess.run(outputs, feed_dict)
output = np.asarray(output).argmax(axis=2).T
for o in output:
return data_util.indice_sentence(o)
def test_qa(s):
o = test_sentence(s)
print('Q:', s)
print(o)
print('-' * 10)
def test_example():
t = [
'你好',
'你是谁',
'你从哪来',
'你到哪去'
]
for x in t:
test_qa(x)
def test_db():
asks, answers = data_util.read_db('db/conversation.db')
for _ in range(20):
s = random.choice(asks)
test_qa(s)
if __name__ == '__main__':
while True:
sentence = input('说:')
sentence = sentence.strip()
if sentence in ('quit', 'exit'):
break
if len(sentence) <= 0:
break
recall = test_sentence(sentence)
print(recall)
| [
"mail@qhduan.com"
] | mail@qhduan.com |
ce8b2390d1ca1c2670dca353c0dfdebe4586f810 | 9ba439d691359a6296e182e0b8cea30b89f95530 | /modules/processing/parsers/malwareconfig/Ursnif.py | cb18b7eb4ad29f561d173ce2fb771cb482dcf475 | [] | no_license | naxonez/CAPE | ffed1b8c54199ac149dbe21df25f89db650d4369 | b6295b40de8b9020e4bb25b2c0b09a126736b5f5 | refs/heads/master | 2020-07-02T22:19:41.421961 | 2019-10-01T07:20:44 | 2019-10-01T07:20:44 | 201,684,949 | 1 | 0 | null | 2019-08-10T21:30:36 | 2019-08-10T21:30:36 | null | UTF-8 | Python | false | false | 3,988 | py | # Copyright (C) 2017 Kevin O'Reilly (kevin.oreilly@contextis.co.uk)
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import struct
import os.path
MAX_STRING_SIZE = 256
def string_from_offset(buffer, offset):
string = buffer[offset:offset+MAX_STRING_SIZE].split("\0")[0]
return string
def get_config_item(config, offset):
config_string = string_from_offset(config, offset)
if ' ' in config_string:
config_list = config_string.split(' ')
return config_list
else:
return config_string
def config(raw_data):
number_of_sections = struct.unpack('I', raw_data[0:4])[0]
section_offset = 8
section_count = 0
config_dict = {}
while section_count < number_of_sections:
section_key = struct.unpack('I', raw_data[section_offset:section_offset+4])[0]
section_type = struct.unpack('I', raw_data[section_offset+4:section_offset+8])[0]
if section_type == 1:
data_offset = struct.unpack('I', raw_data[section_offset+8:section_offset+12])[0]
config_item = get_config_item(raw_data, section_offset + data_offset)
if config_item == None:
continue
if section_key == 0xD0665BF6:
config_dict['Domains'] = config_item
elif section_key == 0x73177345:
config_dict['DGA Base URL'] = config_item
elif section_key == 0xCD850E68:
config_dict['DGA CRC'] = config_item
elif section_key == 0xC61EFA7A:
config_dict['DGA TLDs'] = config_item
elif section_key == 0x510F22D2:
config_dict['TOR Domains'] = config_item
elif section_key == 0xDF351E24:
config_dict['32-bit DLL URLs'] = config_item
elif section_key == 0x4B214F54:
config_dict['64-bit DLL URLs'] = config_item
elif section_key == 0xEC99DF2E:
config_dict['IP Service'] = config_item
elif section_key == 0x11271C7F:
config_dict['Timer'] = config_item
elif section_key == 0xDF2E7488:
config_dict['DGA Season'] = config_item
elif section_key == 0x556AED8F:
config_dict['Server'] = config_item
elif section_key == 0x4FA8693E:
config_dict['Encryption key'] = config_item
elif section_key == 0xD7A003C9:
config_dict['Config Fail Timeout'] = config_item
elif section_key == 0x18A632BB:
config_dict['Config Timeout'] = config_item
elif section_key == 0x31277BD5:
config_dict['Task Timeout'] = config_item
elif section_key == 0x955879A6:
config_dict['Send Timeout'] = config_item
elif section_key == 0xACC79A02:
config_dict['Knocker Timeout'] = config_item
elif section_key == 0x6DE85128:
config_dict['BC Timeout'] = config_item
elif section_key == 0x656B798A:
config_dict['Botnet ID'] = config_item
elif section_key == 0xEFC574AE:
config_dict['Value 11'] = config_item
#elif section_key == 0x584E5925:
# config_dict['EndPointer'] = config_item
section_count += 1
section_offset += 24
return config_dict
| [
"kevoreilly@gmail.com"
] | kevoreilly@gmail.com |
d067a21fa8765b22a0f88cdba5e0a412f49c94ca | 9743d5fd24822f79c156ad112229e25adb9ed6f6 | /xai/brain/wordbase/adjectives/_perilous.py | a1869b74937c931bf046a9fb6d81b59763772b4b | [
"MIT"
] | permissive | cash2one/xai | de7adad1758f50dd6786bf0111e71a903f039b64 | e76f12c9f4dcf3ac1c7c08b0cc8844c0b0a104b6 | refs/heads/master | 2021-01-19T12:33:54.964379 | 2017-01-28T02:00:50 | 2017-01-28T02:00:50 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 383 | py |
#calss header
class _PERILOUS():
def __init__(self,):
self.name = "PERILOUS"
self.definitions = [u'extremely dangerous: ']
self.parents = []
self.childen = []
self.properties = []
self.jsondata = {}
self.specie = 'adjectives'
def run(self, obj1, obj2):
self.jsondata[obj2] = {}
self.jsondata[obj2]['properties'] = self.name.lower()
return self.jsondata
| [
"xingwang1991@gmail.com"
] | xingwang1991@gmail.com |
13db48c20ac0aeb1293560cbe2a6d3b6db7c0101 | f576f0ea3725d54bd2551883901b25b863fe6688 | /sdk/alertsmanagement/azure-mgmt-alertsmanagement/generated_samples/alerts_summary.py | 8794d5a0df8d764bf286909b59972416f2b2b42d | [
"LicenseRef-scancode-generic-cla",
"MIT",
"LGPL-2.1-or-later"
] | permissive | Azure/azure-sdk-for-python | 02e3838e53a33d8ba27e9bcc22bd84e790e4ca7c | c2ca191e736bb06bfbbbc9493e8325763ba990bb | refs/heads/main | 2023-09-06T09:30:13.135012 | 2023-09-06T01:08:06 | 2023-09-06T01:08:06 | 4,127,088 | 4,046 | 2,755 | MIT | 2023-09-14T21:48:49 | 2012-04-24T16:46:12 | Python | UTF-8 | Python | false | false | 1,547 | py | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from azure.identity import DefaultAzureCredential
from azure.mgmt.alertsmanagement import AlertsManagementClient
"""
# PREREQUISITES
pip install azure-identity
pip install azure-mgmt-alertsmanagement
# USAGE
python alerts_summary.py
Before run the sample, please set the values of the client ID, tenant ID and client secret
of the AAD application as environment variables: AZURE_CLIENT_ID, AZURE_TENANT_ID,
AZURE_CLIENT_SECRET. For more info about how to get the value, please see:
https://docs.microsoft.com/azure/active-directory/develop/howto-create-service-principal-portal
"""
def main():
client = AlertsManagementClient(
credential=DefaultAzureCredential(),
subscription_id="1e3ff1c0-771a-4119-a03b-be82a51e232d",
)
response = client.alerts.get_summary(
groupby="severity,alertState",
)
print(response)
# x-ms-original-file: specification/alertsmanagement/resource-manager/Microsoft.AlertsManagement/preview/2019-05-05-preview/examples/Alerts_Summary.json
if __name__ == "__main__":
main()
| [
"noreply@github.com"
] | Azure.noreply@github.com |
25433c3b9058296a7d47ba555fd4cc166775905f | 0fe11fbe31be719a253c0b2d9e41e20fedc2c40f | /dapper/mods/Lorenz95/boc10.py | e051e268a9250ed1e2e4aa60b8ac50ba136e0b59 | [
"MIT"
] | permissive | lijunde/DAPPER | 148ff5cefb92d1bb01c78bd4a82a6f1ecdebdad2 | dc92a7339932af059967bd9cf0a473ae9b8d7bf9 | refs/heads/master | 2020-12-10T21:44:54.468785 | 2019-09-24T18:18:36 | 2019-09-24T18:18:36 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,162 | py | # From Fig. 1 of Bocquet 2010 "Beyond Gaussian Statistical Modeling
# in Geophysical Data Assimilation".
from dapper import *
from dapper.mods.Lorenz95 import core
t = Chronology(0.05,dkObs=1,T=4**3,BurnIn=20)
Nx = 10
Dyn = {
'M' : Nx,
'model': core.step,
'noise': 0
}
X0 = GaussRV(M=Nx, C=0.001)
jj = arange(0,Nx,2)
Obs = partial_Id_Obs(Nx,jj)
Obs['noise'] = 1.5
HMM = HiddenMarkovModel(Dyn,Obs,t,X0)
####################
# Suggested tuning
####################
# Why are these benchmarks superior to those in the article?
# We use, in the EnKF,
# - inflation instead of additive noise ?
# - Sqrt instead of perturbed obs
# - random orthogonal rotations.
# The particle filters are also probably better tuned:
# - jitter covariance proportional to ensemble (weighted) cov
# - no jitter on unique particles after resampling
#
# For a better "picture" of the relative performances,
# see benchmarks in presentation from SIAM_SEAS.
# Note: They are slightly unrealiable (short runs).
# Expected RMSE_a:
# cfgs += EnKF_N(N=8,rot=True,xN=1.3) # 0.31
# cfgs += PartFilt(N=50 ,NER=0.3 ,reg=1.7) # 1.0
# cfgs += PartFilt(N=100,NER=0.2 ,reg=1.3) # 0.36
# cfgs += PartFilt(N=800,NER=0.2 ,reg=0.8) # 0.25
# cfgs += OptPF( N=50 ,NER=0.25,reg=1.4,Qs=0.4) # 0.61
# cfgs += OptPF( N=100,NER=0.2 ,reg=1.0,Qs=0.3) # 0.37
# cfgs += OptPF( N=800,NER=0.2 ,reg=0.6,Qs=0.1) # 0.25
# cfgs += PFa( N=50 ,alpha=0.4,NER=0.5,reg=1.0) # 0.45
# cfgs += PFa( N=100,alpha=0.3,NER=0.4,reg=1.0) # 0.38
# cfgs += PFxN (N=30, NER=0.4, Qs=1.0,xN=1000) # 0.48
# cfgs += PFxN (N=50, NER=0.3, Qs=1.1,xN=100 ) # 0.43
# cfgs += PFxN (N=100,NER=0.2, Qs=1.0,xN=100 ) # 0.32
# cfgs += PFxN (N=400,NER=0.2, Qs=0.8,xN=100 ) # 0.27
# cfgs += PFxN (N=800,NER=0.2, Qs=0.6,xN=100 ) # 0.25
# cfgs += PFxN_EnKF(N=25 ,NER=0.4 ,Qs=1.5,xN=100) # 0.49
# cfgs += PFxN_EnKF(N=50 ,NER=0.25,Qs=1.5,xN=100) # 0.36
# cfgs += PFxN_EnKF(N=100,NER=0.20,Qs=1.0,xN=100) # 0.32
# cfgs += PFxN_EnKF(N=300,NER=0.10,Qs=1.0,xN=100) # 0.28
| [
"patrick.n.raanes@gmail.com"
] | patrick.n.raanes@gmail.com |
fa89d08d72199fe676941a86fa19de36749c1879 | 3b8013a29b6800f0f15569d74603346cef62e4e7 | /Reinforcement_learning_TUT/5.2_Prioritized_Replay_DQN/run_MountainCar.py | 127e28eff91a62bb7e6ee2a4f27218d1eb4197f4 | [] | no_license | pentium3/tutorials | 3978140a2038a988b6043cb4efcb5cab67e7ca89 | abf2ea80ba0e5c6800701908367aeb4b5ee2369b | refs/heads/master | 2021-01-22T22:28:32.828169 | 2017-03-20T03:05:18 | 2017-03-20T03:05:18 | 85,548,010 | 3 | 0 | null | 2017-03-20T07:38:15 | 2017-03-20T07:38:15 | null | UTF-8 | Python | false | false | 2,039 | py | """
The DQN improvement: Prioritized Experience Replay (based on https://arxiv.org/abs/1511.05952)
View more on 莫烦Python: https://morvanzhou.github.io/tutorials/
Using:
Tensorflow: 1.0
gym: 0.8.0
"""
import gym
from RL_brain import DQNPrioritizedReplay
import matplotlib.pyplot as plt
import tensorflow as tf
import numpy as np
env = gym.make('MountainCar-v0')
env = env.unwrapped
env.seed(21)
MEMORY_SIZE = 10000
sess = tf.Session()
with tf.variable_scope('natural_DQN'):
RL_natural = DQNPrioritizedReplay(
n_actions=3, n_features=2, memory_size=MEMORY_SIZE,
e_greedy_increment=0.00005, sess=sess, prioritized=False,
)
with tf.variable_scope('DQN_with_prioritized_replay'):
RL_prio = DQNPrioritizedReplay(
n_actions=3, n_features=2, memory_size=MEMORY_SIZE,
e_greedy_increment=0.00005, sess=sess, prioritized=True, output_graph=True,
)
sess.run(tf.global_variables_initializer())
def train(RL):
total_steps = 0
steps = []
episodes = []
for i_episode in range(20):
observation = env.reset()
while True:
# env.render()
action = RL.choose_action(observation)
observation_, reward, done, info = env.step(action)
if done: reward = 10
RL.store_transition(observation, action, reward, observation_)
if total_steps > MEMORY_SIZE:
RL.learn()
if done:
print('episode ', i_episode, ' finished')
steps.append(total_steps)
episodes.append(i_episode)
break
observation = observation_
total_steps += 1
return np.vstack((episodes, steps))
his_natural = train(RL_natural)
his_prio = train(RL_prio)
plt.plot(his_natural[0, :], his_natural[1, :], c='b', label='natural DQN')
plt.plot(his_prio[0, :], his_prio[1, :], c='r', label='DQN with prioritized replay')
plt.legend(loc='best')
plt.ylabel('total training time')
plt.xlabel('episode')
plt.grid()
plt.show()
| [
"morvanzhou@hotmail.com"
] | morvanzhou@hotmail.com |
af19ca4c8a24e5a881d2b25b64da4fe3e104f5be | d90283bff72b5a55dd4d0f90c7325355b00ce7b1 | /p1804/p12/tuple.py | 6cab125c1070a8dc3fdf2ce36d74f9586479b242 | [] | no_license | yuemeiss/p1804daima | f841f52e63081d53d50a199e4d148d4533605bb6 | 6ea08eb9971e42bf4ac535033a006d98ed98bf98 | refs/heads/master | 2020-03-15T23:29:59.691297 | 2018-08-06T02:42:49 | 2018-08-06T02:42:49 | 132,395,078 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 312 | py | t = ("张三",22,"未婚","有钱","likedog","Ture",'sb')
print(t[3])
print(t[5])
print(t[6])
print(len(t))
print(t.index(22))
print(t.count("Ture"))
print(type(t))
print(t)
print("姓名: %s, \n年龄: %d, \n为什么: %s, \n爱好: %s, \n性别: %s, \n性格: %s, \n相貌: %s " % t )
for a in t:
print(a)
| [
"1083027306@qq.com"
] | 1083027306@qq.com |
dcba45dc6badfffc9ef1b173f06e2e3c525948fc | 0184a8149c063dd7a350dac476ef705304864040 | /rsbeams/rsphysics/decoherence.py | 0c205e51bddb76c15d195d8f4c9b9e1a77bbcb5d | [
"Apache-2.0",
"LicenseRef-scancode-warranty-disclaimer",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | radiasoft/rsbeams | 71a8bed396aced7da3f4adc3daebe1a694284c51 | 732b40b233609418b5204b4d865568bf89386005 | refs/heads/master | 2023-06-10T15:11:11.905713 | 2023-05-01T22:11:52 | 2023-05-01T22:11:52 | 56,353,370 | 4 | 24 | NOASSERTION | 2023-05-26T23:03:41 | 2016-04-15T22:28:48 | Python | UTF-8 | Python | false | false | 5,783 | py | import numpy as np
from pathos.multiprocessing import Pool, cpu_count
from scipy.integrate import quad
from scipy.special import fresnel
class CentroidPosition:
"""
Calculate position of an initially offset beam centroid vs turn.
Assumes a waterbag distribution and arbitrary order in tune dependence with amplitude.
Based on SSC-N-360.
"""
def __init__(self, N, Z, nu0, mu):
"""
Set up to perform integrations of centroid positions. Centroid positions can be found after setup by calling
the `calculate_centroid` method.
Note that mu contains the coefficients for the tune amplitude dependence with amplitude:
mu_0 * a**2 + mu_1 * a**4 + ...
Args:
N: (int) Max turn number to calculate out to.
Z: (float) Initial offset normalized by rms beam size at offset position.
nu0: (float)Linear tune.
mu: (floats in iterable object) Iterable containing mu values to desired order.
"""
self.N = N
self.Z = Z
self.nu0 = nu0
self.mu = mu
def _reduced_integrand(self, a, n):
"""
Calculate the integrand. Based on SSC-N-360 eq. 13.
Args:
a: (float or array of floats) Normalized amplitude on range [0, 2*Pi*N].
n: (int) Turn number for calculation.
Returns:
Float
"""
order = 1
advance = 0
for m in self.mu:
advance += m * a ** order / (2. * np.pi * n) ** (order - 1)
order += 1
coeff = self.Z / (2 * n)
const_slip = 2 * np.pi * self.nu0 * n
angular_term = np.cos(const_slip) * np.cos(advance) + np.sin(const_slip) * np.sin(advance)
# Calculate cutoff if a is float or array
try:
maxa = 1. * 2 * np.pi * n
if a <= maxa:
distr = angular_term / 1. / np.pi
else:
distr = 0.
except ValueError:
maxa = np.ones_like(a, dtype='float') * 2 * np.pi * n
distr = angular_term / 1. / np.pi * np.less(a, maxa)
return coeff * distr
def integrate_any_order(self, turn=None):
"""
Performs numerical integration over range [0, 2*Pi*n] for each turn out to N. Up to arbitrary order in a.
Args:
turn: [None] (Int) If not None then specify a single turn to calculate the centroid position at.
Returns:
Float or array of floats
"""
if turn is not None:
n = turn
else:
n = self.N
if n == 0:
return self.Z
# noinspection PyTupleAssignmentBalance
result, _ = quad(self._reduced_integrand,
0, 2 * np.pi * n,
args=n)
return result
def integrate_first_order(self, turn=None):
"""
Exact value of integral if only a**2 term in tune dependent amplitude is used.
Args:
turn: [None] (Int) If not None then specify a single turn to calculate the centroid position at.
Returns:
Float or array of floats
"""
if turn is not None:
n = turn
else:
n = self.N
if n == 0:
return self.Z
xN = self.Z / (2. * np.pi * n * self.mu[0]) * \
(np.cos(2 * np.pi * self.nu0 * n) * np.sin(2 * np.pi * n * self.mu[0]) +
2. * np.sin(2 * np.pi * self.nu0 * n) * np.sin(
np.pi * n * self.mu[0]) ** 2)
return xN
def integrate_second_order(self, turn=None):
"""
Exact value of integral if only a**2 and a**4 terms in tune dependent amplitude are used.
Args:
turn: [None] (Int) If not None then specify a single turn to calculate the centroid position at.
Returns:
Float or array of floats
"""
if turn is not None:
n = turn
else:
n = self.N
if n == 0:
return self.Z
def integrand(u, N):
fS, fC = fresnel((self.mu[0] * N * np.pi + self.mu[1] * u) / np.sqrt(self.mu[1] * N * np.pi**2))
term1 = np.cos(np.pi * self.mu[0]**2 * N / (2. * self.mu[1]) + 2. * np.pi * self.nu0 * N)
term2 = np.sin(np.pi * self.mu[0]**2 * N / (2. * self.mu[1]) + 2. * np.pi * self.nu0 * N)
return fC * term1 + fS * term2
xN = integrand(2 * np.pi * n, n) - integrand(0, n)
return xN * self.Z / np.sqrt(4. * self.mu[1] * n)
def calculate_centroids(self, p=None):
"""
Perform integration to find centroid at all turns up to N. Multiprocessing pool used to calculate independent
turn values.
Will automatically use `integrate_first_order` or `integrate_second_order` if appropriate.
Args:
p: Specify number of processes for pool. If not given then `cpu_count` is used.
Returns:
array of floats
"""
if p:
pool_size = p
else:
pool_size = cpu_count()
pool = Pool(pool_size)
# attempt to speed things up by spreading out difficult integration values at the end of range
# appeared to not work
# x = []
# for i in range(cpu_count()):
# x += range(N)[i::4]
if len(self.mu) == 1:
integration_function = self.integrate_first_order
elif len(self.mu) == 2:
integration_function = self.integrate_second_order
else:
integration_function = self.integrate_any_order
x = range(self.N)
results = pool.map(integration_function, x)
pool.close()
return results
| [
"chall@radiasoft.net"
] | chall@radiasoft.net |
f14a5f507d6c84a401f49bf011da731a8090d3e6 | 82aee3211216f55392d5a757eb57f02c859e9a28 | /Easy/680_validPalindrome_II.py | 69e73cef406dfb2488f769e23635c4306938a0dc | [] | no_license | Yucheng7713/CodingPracticeByYuch | 505d18095d4b9a35c1f3b23632a90a76d811b64a | 1461b10b8910fa90a311939c6df9082a8526f9b1 | refs/heads/master | 2022-05-01T11:51:00.612603 | 2022-04-18T09:46:55 | 2022-04-18T09:46:55 | 198,961,132 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 519 | py | class Solution:
def validPalindrome(self, s):
l_index, r_index = 0, len(s) - 1
while l_index < r_index:
if s[l_index] != s[r_index]:
ld = s[:l_index] + s[l_index + 1:]
rd = s[:r_index] + s[r_index + 1:]
if (ld == ld[::-1]) or (rd == rd[::-1]):
return True
return False
l_index += 1
r_index -= 1
return True
s = Solution()
mystr = "abcdef"
print(s.validPalindrome(mystr)) | [
"yuchengh@usc.edu"
] | yuchengh@usc.edu |
d573c33b815eaa794a8c2ddd9347b9ff3acb8449 | 4491549f0b1bbf5397ae0b56192605a7abcb61b0 | /python/Session-Management-2/models/SQL.py | 9f80310ec3149956a81008768c2ebd2f1a619fae | [
"Apache-2.0"
] | permissive | iNoSec2/skf-labs | 81e9d400ccac1007632add23bd50a094de1f50d5 | 8af9edc83e313be1578c5dee0fd4ecdf7ac18a32 | refs/heads/master | 2023-08-17T00:20:12.274684 | 2023-08-04T13:13:10 | 2023-08-04T13:13:10 | 235,376,119 | 0 | 0 | Apache-2.0 | 2023-08-05T00:31:43 | 2020-01-21T15:33:01 | Python | UTF-8 | Python | false | false | 1,288 | py | #!/usr/bin/python
# -*- coding: utf-8 -*-
#
# SKF Labs - Security Knowledge Framework (SKF)
# Copyright (C) 2022, OWASP Foundation, Inc.
#
# This software is provided under a slightly modified version
# of The GNU Affero General Public License. See the accompanying LICENSE
# file for more information.
#
# Description:
# Database layer functionalities including:
# - User credential validation
#
# Author:
# Alex Romero (@NtAlexio2)
#
from config.sqlite import *
import hashlib
class DataAccess:
def validateCredentials(self, username, password):
hash = hashlib.md5(password.encode()).hexdigest().lower()
connection = create_db_connection()
cursor = connection.execute('SELECT username, hash FROM Users WHERE username=? AND hash=?', (username, hash, ))
return cursor.fetchone() is not None
def checkUserExists(self, username):
connection = create_db_connection()
cursor = connection.execute('SELECT username FROM Users WHERE username=?', (username, ))
return cursor.fetchone() is not None
def isAdmin(self, username):
connection = create_db_connection()
cursor = connection.execute('SELECT is_admin FROM Users WHERE username=?', (username, ))
return bool(cursor.fetchone()[0])
| [
"glenntencate@gmail.com"
] | glenntencate@gmail.com |
861b5dc42fe75933ac0cd3d42acd0499ef6f55f1 | 2ff7e53d5e512cd762217ca54317982e07a2bb0c | /eve-8.51.857815/carbon/common/script/entities/audioEmitter.py | 69d4790838f2e6414248d5702ab6b90aa4cedabd | [] | no_license | nanxijw/Clara-Pretty-One-Dick | 66d3d69426642b79e8fd4cc8e0bec23adeeca6d6 | 50de3488a2140343c364efc2615cf6e67f152be0 | refs/heads/master | 2021-01-19T09:25:07.555284 | 2015-02-17T21:49:33 | 2015-02-17T21:49:33 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 544 | py | #Embedded file name: carbon/common/script/entities\audioEmitter.py
"""
Contains a set of available audio components.
"""
INITIAL_EVENT_NAME = 'initialEventName'
INITIAL_SOUND_ID = 'initialSoundID'
EMITTER_GROUP_NAME = 'groupName'
class AudioEmitterComponent:
__guid__ = 'audio.AudioEmitterComponent'
def __init__(self):
self.initialEventName = None
self.initialSoundID = None
self.groupName = None
import carbon.common.script.util.autoexport as autoexport
exports = autoexport.AutoExports('audio', locals())
| [
"billchang.e@gmail.com"
] | billchang.e@gmail.com |
f386041caffaa7e937c6f6caa066b710a265b17a | 66c3ff83c3e3e63bf8642742356f6c1817a30eca | /.vim/tmp/neocomplete/buffer_cache/=+home=+dante=+proyectos=+regis_comp=+regis_compr=+regis_compr=+regis_compr=+urls.py | 65df4785e69abfe3e627ed4a0ab3320b899b6f07 | [] | no_license | pacifi/vim | 0a708e8bc741b4510a8da37da0d0e1eabb05ec83 | 22e706704357b961acb584e74689c7080e86a800 | refs/heads/master | 2021-05-20T17:18:10.481921 | 2020-08-06T12:38:58 | 2020-08-06T12:38:58 | 30,074,530 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,944 | py | {'coding', 'utf', 'from', 'django', 'conf', 'urls', 'import', 'patterns', 'include', 'url', 'static', 'settings', 'views', 'generic', 'TemplateView', 'compras', 'ComprasHechasList', 'ProveedoresList', 'ProveedoresDetail', 'ProveedoresCreate', 'ProveedoresUpdate', 'ProveedoresDelete', 'ProductProveeCreate', 'ProductProveeList', 'ProductProveeDetail', 'CompradoresCreate', 'CompradoresDetail', 'ComprasHechasCreate', 'ComprasHechasDetail', 'webServices', 'wsProductos', 'contrib', 'admin', 'urlpatterns', 'r', 'as_view', 'template_name', 'base', 'html', 'Examples', 'project_name', 'home', 'name', 'blog', 'grappelli', 'site', 'account', 'Buscar', 'Proveedor', 'por', 'RUC', 'Sessi', 'n', 'index_view', 'vista_principal', 'about', 'about_view', 'vista_about', 'login', 'login_view', 'vista_login', 'registro', 'register_view', 'vista_registro', 'logout', 'logout_view', 'vista_logout', 'ws', 'productos', 'wsProductos_view', 'ws_productos_url', 'App', 'Compras', 'Operaciones', 'add', 'producto', 'add_product_view', 'vista_agregar_producto', 'edit', 'P', 'id_prod', 'edit_product_view', 'vista_editar_producto', 'buy', 'compra_view', 'comprar_producto', 'getcart', 'get_carrito_compras', 'get_carrito', 'clean', 'cart', 'borrar_carrito', 'finish', 'real_compra', 'visualizar_compra', 'topdf', 'to_pdf', 'page', 'pagina', 'productos_view', 'vista_productos', 'singleProduct_view', 'vista_single_producto', 'search', 'ruc', 'search_ruc', 'proveedor', 'create', 'proveedor_create', 'list', 'proveedor_list', 'pk', 'd', 'Update', 'proveedor_update', 'Delete', 'proveedor_delete', 'proveedor_detail', 'product', 'product_provee_create', 'produc_provee_list', 'producto_proveedor', 'product_provee_detail', 'comprador', 'comprador_create', 'comprador_detail', 'comprashechas', 'comprashechas_list', 'comprashechas_create', 'comprashechas_detail', 'media', 'path', 'serve', 'document_root', 'MEDIA_ROOT', 'if', 'DEBUG', 'debug_toolbar', '__debug__'}
| [
"pacifi.bnr@gmail.com"
] | pacifi.bnr@gmail.com |
e6fe06b32a6c28761c3ce92ab678de60ed034977 | d82e7c3cb7a8c4fe718fb041fcc9533ece6839f3 | /tf/applib/display.py | 72857c3aa8d5e8400cc83f30e96a3da43394a9e0 | [
"MIT"
] | permissive | OsvaldoJ/text-fabric | cb64a0475f229ca299862ce4697423f82948e9b8 | 63c4589ffd71d7ac11c549818ffeb54017a6216f | refs/heads/master | 2020-08-27T13:02:23.702835 | 2019-09-11T13:24:06 | 2019-09-11T13:24:06 | 217,378,170 | 0 | 1 | MIT | 2019-10-24T19:25:27 | 2019-10-24T19:25:27 | null | UTF-8 | Python | false | false | 17,377 | py | import os
import types
from ..parameters import URL_TFDOC, DOWNLOADS
from ..core.helpers import mdEsc, htmlEsc, flattenToSet
from .app import findAppConfig
from .helpers import configure, RESULT, dh
from .links import outLink
from .condense import condense, condenseSet
from .highlight import getTupleHighlights, getHlAtt
LIMIT_SHOW = 100
LIMIT_TABLE = 2000
FONT_BASE = 'https://github.com/annotation/text-fabric/blob/master/tf/server/static/fonts'
CSS_FONT = '''
<link rel="stylesheet" href="/server/static/fonts.css"/>
'''
CSS_FONT_API = f'''
@font-face {{{{
font-family: "{{fontName}}";
src:
local("{{font}}"),
url("{FONT_BASE}/{{fontw}}?raw=true");
}}}}
'''
def displayApi(app, silent, hoist):
app.export = types.MethodType(export, app)
app.table = types.MethodType(table, app)
app.plainTuple = types.MethodType(plainTuple, app)
app.plain = types.MethodType(plain, app)
app.show = types.MethodType(show, app)
app.prettyTuple = types.MethodType(prettyTuple, app)
app.pretty = types.MethodType(pretty, app)
app.loadCss = types.MethodType(loadCss, app)
api = app.api
app.classNames = (
{nType[0]: nType[0] for nType in api.C.levels.data}
if app.classNames is None else
app.classNames
)
if not app._asApp:
app.loadCss()
if hoist:
docs = api.makeAvailableIn(hoist)
if not silent:
dh(
'<details open><summary><b>API members</b>:</summary>\n' + '<br/>\n'.join(
', '.join(
outLink(
entry,
f'{URL_TFDOC}/Api/{head}/#{ref}',
title='doc',
) for entry in entries
) for (head, ref, entries) in docs
) + '</details>'
)
def export(
app,
tuples,
toDir=None,
toFile='results.tsv',
**options,
):
display = app.display
if not display.check('table', options):
return ''
d = display.get(options)
if toDir is None:
toDir = os.path.expanduser(DOWNLOADS)
if not os.path.exists(toDir):
os.makedirs(toDir, exist_ok=True)
toPath = f'{toDir}/{toFile}'
resultsX = getResultsX(
app,
tuples,
d.tupleFeatures,
d.condenseType or app.condenseType,
app.noDescendTypes,
fmt=d.fmt,
)
with open(toPath, 'w', encoding='utf_16_le') as fh:
fh.write(
'\ufeff' + ''.join(
('\t'.join('' if t is None else str(t) for t in tup) + '\n')
for tup in resultsX
)
)
def table(
app,
tuples,
_asString=False,
**options,
):
display = app.display
if not display.check('table', options):
return ''
d = display.get(options)
api = app.api
F = api.F
fOtype = F.otype.v
item = d.condenseType if d.condensed else RESULT
if d.condensed:
tuples = condense(api, tuples, d.condenseType, multiple=True)
passageHead = '</th><th class="tf">p' if d.withPassage else ''
html = []
one = True
for (i, tup) in _tupleEnum(tuples, d.start, d.end, LIMIT_TABLE, item):
if one:
heads = '</th><th>'.join(fOtype(n) for n in tup)
html.append(
f'''
<tr class="tf">
<th class="tf">n{passageHead}</th>
<th class="tf">{heads}</th>
</tr>
'''
)
one = False
html.append(
plainTuple(
app,
tup,
i,
item=item,
position=None,
opened=False,
_asString=True,
**options,
)
)
html = '<table>' + '\n'.join(html) + '</table>'
if _asString:
return html
dh(html)
def plainTuple(
app,
tup,
seq,
item=RESULT,
position=None,
opened=False,
_asString=False,
**options,
):
display = app.display
if not display.check('plainTuple', options):
return ''
d = display.get(options)
_asApp = app._asApp
api = app.api
F = api.F
T = api.T
fOtype = F.otype.v
if d.withPassage:
passageNode = _getRefMember(app, tup, d.linked, d.condensed)
passageRef = (
'' if passageNode is None else
app._sectionLink(passageNode)
if _asApp else
app.webLink(passageNode, _asString=True)
)
if passageRef:
passageRef = f' {passageRef}'
else:
passageRef = ''
newOptions = display.consume(options, 'withPassage')
newOptionsH = display.consume(options, 'withPassage', 'highlights')
highlights = (
getTupleHighlights(api, tup, d.highlights, d.colorMap, d.condenseType)
)
if _asApp:
prettyRep = prettyTuple(
app,
tup,
seq,
withPassage=False,
**newOptions,
) if opened else ''
current = ' focus' if seq == position else ''
attOpen = ' open ' if opened else ''
tupSeq = ','.join(str(n) for n in tup)
if d.withPassage:
sParts = T.sectionFromNode(passageNode, fillup=True)
passageAtt = ' '.join(
f'sec{i}="{sParts[i] if i < len(sParts) else ""}"'
for i in range(3)
)
else:
passageAtt = ''
plainRep = ''.join(
f'''<span>{mdEsc(app.plain(
n,
isLinked=i == d.linked - 1,
withPassage=False,
highlights=highlights,
**newOptionsH,
))
}
</span>
''' for (i, n) in enumerate(tup)
)
html = (
f'''
<details
class="pretty dtrow{current}"
seq="{seq}"
{attOpen}
>
<summary>
<a href="#" class="pq fa fa-solar-panel fa-xs" title="show in context" {passageAtt}></a>
<a href="#" class="sq" tup="{tupSeq}">{seq}</a>
{passageRef}
{plainRep}
</summary>
<div class="pretty">{prettyRep}</div>
</details>
'''
)
return html
html = [str(seq)]
if passageRef:
html.append(passageRef)
for (i, n) in enumerate(tup):
html.append(
app.plain(
n,
isLinked=i == d.linked - 1,
_asString=True,
withPassage=False,
highlights=highlights,
**newOptionsH,
)
)
html = '<tr class="tf"><td class="tf">' + ('</td><td class="tf">'.join(html)) + '</td></tr>'
if _asString:
return html
head = [
'<tr class="tf"><th class="tf">n</th><th class="tf">' +
('</th><th class="tf">'.join(fOtype(n) for n in tup)) +
'</th></tr>'
]
head.append(html)
dh('\n'.join(head))
def plain(
app,
n,
isLinked=True,
_asString=False,
secLabel=True,
**options,
):
display = app.display
if not display.check('plain', options):
return ''
d = display.get(options)
api = app.api
F = api.F
T = api.T
sectionTypes = T.sectionTypes
fOtype = F.otype.v
nType = fOtype(n)
passage = ''
if d.withPassage:
if nType not in sectionTypes:
passage = app.webLink(n, _asString=True)
passage = f'{passage} ' if passage else ''
highlights = (
{m: '' for m in d.highlights}
if type(d.highlights) is set else
d.highlights
)
return app._plain(
n,
passage,
isLinked,
_asString,
secLabel,
highlights=highlights,
**display.consume(options, 'highlights'),
)
def show(
app,
tuples,
**options,
):
display = app.display
if not display.check('show', options):
return ''
d = display.get(options)
api = app.api
F = api.F
item = d.condenseType if d.condensed else RESULT
if d.condensed:
rawHighlights = getTupleHighlights(
api, tuples, d.highlights, d.colorMap, d.condenseType, multiple=True
)
highlights = {}
colorMap = None
tuples = condense(api, tuples, d.condenseType, multiple=True)
else:
highlights = d.highlights
rawHighlights = None
colorMap = d.colorMap
for (i, tup) in _tupleEnum(tuples, d.start, d.end, LIMIT_SHOW, item):
item = F.otype.v(tup[0]) if d.condensed and d.condenseType else RESULT
prettyTuple(
app,
tup,
i,
item=item,
highlights=highlights,
colorMap=colorMap,
rawHighlights=rawHighlights,
**display.consume(options, 'highlights', 'colorMap'),
)
def prettyTuple(
app,
tup,
seq,
item=RESULT,
rawHighlights=None,
**options,
):
display = app.display
if not display.check('prettyTuple', options):
return ''
d = display.get(options)
_asApp = app._asApp
if len(tup) == 0:
if _asApp:
return ''
else:
return
api = app.api
sortKey = api.sortKey
containers = {tup[0]} if d.condensed else condenseSet(api, tup, d.condenseType)
highlights = (
getTupleHighlights(api, tup, d.highlights, d.colorMap, d.condenseType)
if rawHighlights is None else rawHighlights
)
if not _asApp:
dh(f'<p><b>{item}</b> <i>{seq}</i></p>')
if _asApp:
html = []
for t in sorted(containers, key=sortKey):
h = app.pretty(
t,
highlights=highlights,
**display.consume(options, 'highlights'),
)
if _asApp:
html.append(h)
if _asApp:
return '\n'.join(html)
def pretty(
app,
n,
**options,
):
display = app.display
if not display.check('pretty', options):
return ''
d = display.get(options)
_asApp = app._asApp
api = app.api
F = api.F
L = api.L
T = api.T
fOtype = F.otype.v
otypeRank = api.otypeRank
sectionTypes = T.sectionTypes
containerN = None
nType = fOtype(n)
if d.condensed and d.condenseType:
if nType == d.condenseType:
containerN = n
elif otypeRank[nType] < otypeRank[d.condenseType]:
ups = L.u(n, otype=d.condenseType)
if ups:
containerN = ups[0]
(firstSlot, lastSlot) = (
getBoundary(api, n) if not d.condensed or not d.condenseType else
(None, None) if containerN is None else getBoundary(api, containerN)
)
html = []
if d.withPassage:
if nType not in sectionTypes:
html.append(app.webLink(n, _asString=True))
highlights = (
{m: '' for m in d.highlights}
if type(d.highlights) is set else
d.highlights
)
extraFeatures = sorted(flattenToSet(d.extraFeatures) | flattenToSet(d.tupleFeatures))
app._pretty(
n,
True,
html,
firstSlot,
lastSlot,
extraFeatures=extraFeatures,
highlights=highlights,
**display.consume(options, 'extraFeatures', 'highlights'),
)
htmlStr = '\n'.join(html)
if _asApp:
return htmlStr
dh(htmlStr)
def prettyPre(
app,
n,
firstSlot,
lastSlot,
withNodes,
highlights,
):
api = app.api
F = api.F
fOtype = F.otype.v
slotType = F.otype.slotType
nType = fOtype(n)
boundaryClass = ''
myStart = None
myEnd = None
(myStart, myEnd) = getBoundary(api, n)
if firstSlot is not None:
if myEnd < firstSlot:
return False
if myStart < firstSlot:
boundaryClass += ' rno'
if lastSlot is not None:
if myStart > lastSlot:
return False
if myEnd > lastSlot:
boundaryClass += ' lno'
hlAtt = getHlAtt(app, n, highlights)
nodePart = (f'<a href="#" class="nd">{n}</a>' if withNodes else '')
className = app.classNames.get(nType, None)
return (
slotType,
nType,
className.lower() if className else className,
boundaryClass.lower() if boundaryClass else boundaryClass,
hlAtt,
nodePart,
myStart,
myEnd,
)
# COMPOSE TABLES FOR CSV EXPORT
def getResultsX(app, results, features, condenseType, noDescendTypes, fmt=None):
api = app.api
F = api.F
Fs = api.Fs
T = api.T
fOtype = F.otype.v
otypeRank = api.otypeRank
sectionTypes = set(T.sectionTypes)
sectionDepth = len(sectionTypes)
if len(results) == 0:
return ()
firstResult = results[0]
nTuple = len(firstResult)
refColumns = [i for (i, n) in enumerate(firstResult) if fOtype(n) not in sectionTypes]
refColumn = refColumns[0] if refColumns else nTuple - 1
header = ['R'] + [f'S{i}' for i in range(1, sectionDepth + 1)]
emptyA = []
featureDict = {i: tuple(f.split()) if type(f) is str else f for (i, f) in features}
def withText(nodeType):
return (
condenseType is None and nodeType not in sectionTypes
or
otypeRank[nodeType] <= otypeRank[condenseType]
)
for j in range(nTuple):
i = j + 1
n = firstResult[j]
nType = fOtype(n)
header.extend([f'NODE{i}', f'TYPE{i}'])
if withText(nType):
header.append(f'TEXT{i}')
header.extend(f'{feature}{i}' for feature in featureDict.get(j, emptyA))
rows = [tuple(header)]
for (rm, r) in enumerate(results):
rn = rm + 1
row = [rn]
refN = r[refColumn]
sParts = T.sectionFromNode(refN)
nParts = len(sParts)
section = sParts + ((None, ) * (sectionDepth - nParts))
row.extend(section)
for j in range(nTuple):
n = r[j]
nType = fOtype(n)
row.extend((n, nType))
if withText(nType):
text = T.text(n, fmt=fmt, descend=nType not in noDescendTypes)
row.append(text)
row.extend(Fs(feature).v(n) for feature in featureDict.get(j, emptyA))
rows.append(tuple(row))
return tuple(rows)
def getBoundary(api, n):
F = api.F
fOtype = F.otype.v
slotType = F.otype.slotType
if fOtype(n) == slotType:
return (n, n)
E = api.E
maxSlot = F.otype.maxSlot
slots = E.oslots.data[n - maxSlot - 1]
return (slots[0], slots[-1])
def getFeatures(
app,
n,
features,
withName=None,
o=None,
givenValue={},
plain=False,
**options,
):
display = app.display
d = display.get(options)
api = app.api
Fs = api.Fs
featurePartB = '<div class="features">'
featurePartE = '</div>'
givenFeatureSet = set(features)
xFeatures = tuple(f for f in d.extraFeatures if f not in givenFeatureSet)
extraSet = set(xFeatures)
featureList = tuple(features) + xFeatures
nFeatures = len(features)
showWithName = extraSet
if not plain:
featurePart = featurePartB
hasB = True
else:
featurePart = ''
hasB = False
for (i, name) in enumerate(featureList):
if name not in d.suppress:
if name in givenValue:
value = givenValue[name]
else:
if Fs(name) is None:
continue
value = Fs(name).v(n)
oValue = None if o is None else Fs(name).v(o)
valueRep = None if value in d.noneValues else htmlEsc(value)
oValueRep = None if o is None or oValue in d.noneValues else htmlEsc(oValue)
if valueRep is None and oValueRep is None:
value = None
else:
sep = '' if valueRep is None or oValueRep is None else '|'
valueRep = '' if valueRep is None else valueRep
oValueRep = '' if oValueRep is None else oValueRep
value = valueRep if valueRep == oValueRep else f'{valueRep}{sep}{oValueRep}'
if value is not None:
value = value.replace('\n', '<br/>')
showName = withName or (withName is None and name in showWithName)
nameRep = f'<span class="f">{name}=</span>' if showName else ''
xClass = ' xft' if name in extraSet else ''
featureRep = f' <span class="{name.lower()}{xClass}">{nameRep}{value}</span>'
if i >= nFeatures:
if not hasB:
featurePart += featurePartB
hasB = True
featurePart += featureRep
if hasB:
featurePart += featurePartE
return featurePart
def loadCss(app, reload=False):
'''
The CSS is looked up and then loaded into a notebook if we are not
running in the TF browser,
else the CSS is returned.
With reload=True, the app-specific display.css will be read again from disk
'''
_asApp = app._asApp
if _asApp:
return app.css
if reload:
config = findAppConfig(app.appName, app.appPath)
cfg = configure(config, app.version)
app.css = cfg['css']
hlCssFile = (
f'{os.path.dirname(os.path.dirname(os.path.abspath(__file__)))}'
'/server/static/highlight.css'
)
with open(hlCssFile) as fh:
hlCss = fh.read()
cssFont = (
'' if app.fontName is None else CSS_FONT_API.format(
fontName=app.fontName,
font=app.font,
fontw=app.fontw,
)
)
tableCss = '''
tr.tf, td.tf, th.tf {
text-align: left;
}
'''
dh(f'<style>{cssFont + app.css + tableCss + hlCss}</style>')
def _getRefMember(app, tup, linked, condensed):
api = app.api
T = api.T
sectionTypes = T.sectionTypes
ln = len(tup)
return (
None
if not tup or any(n in sectionTypes for n in tup) else
tup[0] if condensed else
tup[min((linked, ln - 1))] if linked else
tup[0]
)
def _tupleEnum(tuples, start, end, limit, item):
if start is None:
start = 1
i = -1
if not hasattr(tuples, '__len__'):
if end is None or end - start + 1 > limit:
end = start - 1 + limit
for tup in tuples:
i += 1
if i < start - 1:
continue
if i >= end:
break
yield (i + 1, tup)
else:
if end is None or end > len(tuples):
end = len(tuples)
rest = 0
if end - (start - 1) > limit:
rest = end - (start - 1) - limit
end = start - 1 + limit
for i in range(start - 1, end):
yield (i + 1, tuples[i])
if rest:
dh(
f'<b>{rest} more {item}s skipped</b> because we show a maximum of'
f' {limit} {item}s at a time'
)
| [
"dirk.roorda@dans.knaw.nl"
] | dirk.roorda@dans.knaw.nl |
b275678714d301a028aa868acf30bec68fc76782 | 76de4fc4f00a04c8c9acc1e9e4a5fae12cf0c08a | /trunk/pyformex/examples/SpaceTrussRoof_abq.py | 0216d8a2ac79315d917755356de57ab4bf7795cf | [] | no_license | BackupTheBerlios/pyformex-svn | ec2361b1b9967918be65e892217a691a6f8b145d | f5404809095711334bbb938d9d119a69ad8fc260 | refs/heads/master | 2020-12-24T13:20:47.422165 | 2011-11-15T11:52:23 | 2011-11-15T11:52:23 | 40,749,266 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,340 | py | #!/usr/bin/env pyformex
# $Id$
##
## This file is part of pyFormex 0.8.5 Sun Nov 6 17:27:05 CET 2011
## pyFormex is a tool for generating, manipulating and transforming 3D
## geometrical models by sequences of mathematical operations.
## Home page: http://pyformex.org
## Project page: https://savannah.nongnu.org/projects/pyformex/
## Copyright (C) Benedict Verhegghe (benedict.verhegghe@ugent.be)
## Distributed under the GNU General Public License version 3 or later.
##
##
## This program is free software: you can redistribute it and/or modify
## it under the terms of the GNU General Public License as published by
## the Free Software Foundation, either version 3 of the License, or
## (at your option) any later version.
##
## This program is distributed in the hope that it will be useful,
## but WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
## GNU General Public License for more details.
##
## You should have received a copy of the GNU General Public License
## along with this program. If not, see http://www.gnu.org/licenses/.
##
"""Double Layer Flat Space Truss Roof
level = 'advanced'
topics = ['FEA']
techniques = ['color']
"""
from plugins.properties import *
from plugins.fe_abq import *
import os
####
#Data
###################
dx = 1800 # Modular size [mm]
ht = 900 # Deck height [mm]
nx = 4 # number of bottom deck modules in x direction
ny = 5 # number of bottom deck modules in y direction
q = -0.005 #distributed load [N/mm^2]
#############
#Creating the model
###################
top = (Formex("1").replic2(nx-1,ny,1,1) + Formex("2").replic2(nx,ny-1,1,1)).scale(dx)
top.setProp(3)
bottom = (Formex("1").replic2(nx,ny+1,1,1) + Formex("2").replic2(nx+1,ny,1,1)).scale(dx).translate([-dx/2,-dx/2,-ht])
bottom.setProp(0)
T0 = Formex(4*[[[0,0,0]]]) # 4 times the corner of the top deck
T4 = bottom.select([0,1,nx,nx+1]) # 4 nodes of corner module of bottom deck
dia = connect([T0,T4]).replic2(nx,ny,dx,dx)
dia.setProp(1)
F = (top+bottom+dia)
# Show upright
createView('myview1',(0.,-90.,0.))
clear();linewidth(1);draw(F,view='myview1')
############
#Creating FE-model
###################
M = F.toMesh()
###############
#Creating elemsets
###################
# Remember: elems are in the same order as elements in F
topbar = where(F.prop==3)[0]
bottombar = where(F.prop==0)[0]
diabar = where(F.prop==1)[0]
###############
#Creating nodesets
###################
nnod=M.ncoords()
nlist=arange(nnod)
count = zeros(nnod)
for n in M.elems.flat:
count[n] += 1
field = nlist[count==8]
topedge = nlist[count==7]
topcorner = nlist[count==6]
bottomedge = nlist[count==5]
bottomcorner = nlist[count==3]
support = concatenate([bottomedge,bottomcorner])
edge = concatenate([topedge,topcorner])
########################
#Defining and assigning the properties
#############################
Q = 0.5*q*dx*dx
P = PropertyDB()
P.nodeProp(set=field,cload = [0,0,Q,0,0,0])
P.nodeProp(set=edge,cload = [0,0,Q/2,0,0,0])
P.nodeProp(set=support,bound = [1,1,1,0,0,0])
circ20 = ElemSection(section={'name':'circ20','sectiontype':'Circ','radius':10, 'cross_section':314.159}, material={'name':'S500', 'young_modulus':210000, 'shear_modulus':81000, 'poisson_ratio':0.3, 'yield_stress' : 500,'density':0.000007850})
# example of how to set the element type by set
P.elemProp(set=topbar,section=circ20,eltype='T3D2')
P.elemProp(set=bottombar,section=circ20,eltype='T3D2')
# alternatively, we can specify the elements by an index value
# in an array that we will pass in the Abqdata 'eprop' argument
P.elemProp(prop=1,section=circ20,eltype='T3D2')
# Since all elements have same characteristics, we could just have used:
# P.elemProp(section=circ20,elemtype='T3D2')
# But putting the elems in three sets allows for separate postprocessing
# Print node and element property databases
for p in P.nprop:
print p
for p in P.eprop:
print p
#############
#Writing the inputfile
###################
step = Step()
out = Output(type='field',variable='preselect')
res = [ Result(kind='element',keys=['S']),
Result(kind='node',keys=['U'])
]
model = Model(M.coords,M.elems)
if not checkWorkdir():
exit()
AbqData(model,P,[step],eprop=F.prop,out=[out],res=res).write('SpaceTruss')
# End
| [
"bverheg@8d6f1305-3bde-0310-9e88-884b4813ce35"
] | bverheg@8d6f1305-3bde-0310-9e88-884b4813ce35 |
67370eb3a4b958731d7ec128f3d2da56dcf993f9 | 98bebd68f01daa7e328d06e8f6d98042b587995f | /D2/다리를지나는트럭/timecomplexity.py | 99512214389e7583dd4ebd2aa7fb5dd0ea316a05 | [] | no_license | Ysh096/programmers | f189108f2e0cd792697821e806558dea489254f9 | c391ee58df1554af91a7099817b208d6883adca8 | refs/heads/master | 2023-04-13T06:06:48.938489 | 2021-04-15T16:43:30 | 2021-04-15T16:43:30 | 332,446,374 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 552 | py | import timeit
start_time = timeit.default_timer()
test_list = [1, 2, 3]
for _ in range(10000):
test_list = [0] + test_list
# test_list.insert(0, 0)
terminate_time = timeit.default_timer()
print('덧셈: ', terminate_time - start_time)
start_time = timeit.default_timer()
test_list = [1, 2, 3]
print('insert 전: ', id(test_list))
for _ in range(10000):
# test_list = [0] + test_list
test_list.insert(0, 0)
terminate_time = timeit.default_timer()
print('insert 후: ', id(test_list))
print('insert:', terminate_time - start_time) | [
"skk7541@gmail.com"
] | skk7541@gmail.com |
d6fc338427aacaabf688b490a8a6d5eaf9716e3a | a222e2999251ba7f0d62c428ba8cc170b6d0b3b7 | /AtC_Beg_Con_071-080/ABC075/A.py | db33151934d67c6c93187cf992931dde59650476 | [
"MIT"
] | permissive | yosho-18/AtCoder | 3e1f3070c5eb44f154c8104fbd5449f47446ce14 | 50f6d5c92a01792552c31ac912ce1cd557b06fb0 | refs/heads/master | 2020-06-02T10:21:29.458365 | 2020-05-29T12:40:48 | 2020-05-29T12:40:48 | 188,795,239 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 100 | py | a,b,c = map(int, input().split())
if a == b:
print(c)
if c == b:
print(a)
if a == c:
print(b)
| [
"44283410+wato18@users.noreply.github.com"
] | 44283410+wato18@users.noreply.github.com |
65f8dcf11bee03d8da86bda4c16e8fef954c3273 | 52585c8d95cef15199c18ba1a76899d2c31329f0 | /05PythonCookbook/ch12Concurrency/13polling_multiple_thread_queques/pqueue.py | 91396071d9996401e4454a9b17767eed6909d23c | [] | no_license | greatabel/PythonRepository | c7a952257303a21083ed7d535274c339362bd126 | 836fcdd3f5c1b150122302685104fe51b5ebe1a3 | refs/heads/master | 2023-08-30T15:56:05.376391 | 2023-08-26T03:34:14 | 2023-08-26T03:34:14 | 29,392,599 | 33 | 6 | null | 2023-02-14T13:33:21 | 2015-01-17T13:54:58 | Python | UTF-8 | Python | false | false | 1,615 | py | import queue
import socket
import os
class PollableQueue(queue.Queue):
def __init__(self):
super().__init__()
# Create a pair of connected sockets
if os.name == 'posix':
self._putsocket, self._getsocket = socket.socketpair()
else:
# Compatibility on non-POSIX systems
server = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
server.bind(('127.0.0.1', 0))
server.listen(1)
self._putsocket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self._putsocket.connect(server.getsockname())
self._getsocket, _ = server.accept()
server.close()
def fileno(self):
return self._getsocket.fileno()
def put(self, item):
super().put(item)
self._putsocket.send(b'x')
def get(self):
self._getsocket.recv(1)
return super().get()
if __name__ == '__main__':
import select
import threading
import time
def consumer(queues):
'''
Consumer that reads data on multiple queues simultaneously
'''
while True:
can_read, _, _ = select.select(queues,[],[])
for r in can_read:
item = r.get()
print('Got:', item)
q1 = PollableQueue()
q2 = PollableQueue()
q3 = PollableQueue()
t = threading.Thread(target=consumer, args=([q1,q2,q3],))
t.daemon = True
t.start()
# Feed data to the queues
q1.put(1)
q2.put(10)
q3.put('hello')
q2.put(15)
# Give thread time to run
time.sleep(1) | [
"greatabel1@126.com"
] | greatabel1@126.com |
7e6c9b781e4c749a5a0b3bde3a2fb1bfe9d9f012 | 38e0a6aa9df9c968135b348845abfa489cda4031 | /binhaishiPaper/binhaishiPaper/spiders/newsPaperSpider.py | 662ae17c09cb15907a2cbf17c062004b6274cae7 | [] | no_license | AReallyMan/everySpiders | bb923de508bd986bcf158728d17638c4ce608db8 | 19419ae5097a522ed0c88e9ab63aa62419c25b44 | refs/heads/master | 2022-09-20T02:18:06.205480 | 2020-06-03T06:33:47 | 2020-06-03T06:33:47 | 263,788,915 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,362 | py | # -*- coding: utf-8 -*-
# @Time : 2020-06-04
# @Author : ZhangYangyang
# @Software: PyCharm
import scrapy
import datetime
import re
import time
from scrapy.spiders import Rule, CrawlSpider
from scrapy.linkextractors import LinkExtractor
from ..items import BinhaishipaperItem
from ..settings import ELASTICSEARCH_TYPE
# 滨海时报
class NewpaperSpider(CrawlSpider):
name = 'newpaperSpider'
current_time = time.strftime("%Y/%m%d", time.localtime())
today = datetime.date.today()
start_urls = ['http://www.tjbhnews.com/finanec/', 'http://www.tjbhnews.com/life/',
'http://www.tjbhnews.com/xinwen/', 'http://bhsb.tjbhnews.com/']
rules = {
Rule(LinkExtractor(allow='/'+current_time+'/\d+\.html'),
callback='parse_item'),
Rule(LinkExtractor(allow='/'+current_time+'/\d+_\d+\.html'),
callback='parse_item')
}
def parse_item(self, response):
item = BinhaishipaperItem()
if self.duplicate.redis_db.hexists(self.duplicate.redis_data_dict, response.url):
print("该连接已被爬取")
else:
item['title'] = response.xpath("//div[@class='contTit']/font/text()").extract_first()
editor = response.xpath("//div[@class='contTit']/font/text()").extract_first()
if editor:
item['editor'] = editor
else:
item['editor'] = ''
item['publishtime'] = response.xpath("//span[@id='pubtime_baidu']/text()").extract_first()
content = response.xpath("//div[@class='contTxt']/div").xpath('string(.)').extract_first()
if content:
content = re.findall(u"[\u4e00-\u9fa5]+", content)
item['content'] = ''.join(content)
else:
item['content'] = ''
item['fromwhere'] = response.xpath("//span[@id='source_baidu']/text()").extract_first()
item['url'] = response.url
item['spiderName'] = ELASTICSEARCH_TYPE
item['spiderDesc'] = '滨海时报'
item['siteType'] = '纸媒'
item['source'] = '滨海时报'
item['publicTimeStamp'] = int(time.mktime(self.today.timetuple()))
item['insertTimeStamp'] = int(time.time() * 1000)
yield item
| [
"969114624@qq.com"
] | 969114624@qq.com |
8359cc1c8ef4d13f10400870ed08a0427261a636 | 85043cfb5a7cc86e9f6e22c45925df778062efd5 | /rapid7vmconsole/models/disk_total.py | 3ec375fe69e6ae816c17f596f1d1231ad7c29cbc | [
"MIT"
] | permissive | xtenex/vm-console-client-python | 1b33abb6d8c0d368da16dd182f44a3ad20f090ee | 3e04e5aa4a15274ec0bcd8be38d306e42b0c9a59 | refs/heads/master | 2021-04-27T03:01:56.645918 | 2018-02-14T22:46:34 | 2018-02-14T22:46:34 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 51,793 | py | # coding: utf-8
"""
InsightVM API
# Overview This guide documents the InsightVM Application Programming Interface (API) Version 3. This API supports the Representation State Transfer (REST) design pattern. Unless noted otherwise this API accepts and produces the `application/json` media type. This API uses Hypermedia as the Engine of Application State (HATEOAS) and is hypermedia friendly. All API connections must be made to the security console using HTTPS. ## Versioning Versioning is specified in the URL and the base path of this API is: `https://<host>:<port>/api/3/`. ## Specification An <a target=\"_blank\" href=\"https://github.com/OAI/OpenAPI-Specification/blob/master/versions/2.0.md\">OpenAPI v2</a> specification (also known as Swagger 2) of this API is available. Tools such as <a target=\"_blank\" href=\"https://github.com/swagger-api/swagger-codegen\">swagger-codegen</a> can be used to generate an API client in the language of your choosing using this specification document. <p class=\"openapi\">Download the specification: <a class=\"openapi-button\" target=\"_blank\" download=\"\" href=\"/api/3/json\"> Download </a></p> ## Authentication Authorization to the API uses HTTP Basic Authorization (see <a target=\"_blank\" href=\"https://www.ietf.org/rfc/rfc2617.txt\">RFC 2617</a> for more information). Requests must supply authorization credentials in the `Authorization` header using a Base64 encoded hash of `\"username:password\"`. <!-- ReDoc-Inject: <security-definitions> --> ### 2FA This API supports two-factor authentication (2FA) by supplying an authentication token in addition to the Basic Authorization. The token is specified using the `Token` request header. To leverage two-factor authentication, this must be enabled on the console and be configured for the account accessing the API. ## Resources ### Naming Resource names represent nouns and identify the entity being manipulated or accessed. All collection resources are pluralized to indicate to the client they are interacting with a collection of multiple resources of the same type. Singular resource names are used when there exists only one resource available to interact with. The following naming conventions are used by this API: | Type | Case | | --------------------------------------------- | ------------------------ | | Resource names | `lower_snake_case` | | Header, body, and query parameters parameters | `camelCase` | | JSON fields and property names | `camelCase` | #### Collections A collection resource is a parent resource for instance resources, but can itself be retrieved and operated on independently. Collection resources use a pluralized resource name. The resource path for collection resources follow the convention: ``` /api/3/{resource_name} ``` #### Instances An instance resource is a \"leaf\" level resource that may be retrieved, optionally nested within a collection resource. Instance resources are usually retrievable with opaque identifiers. The resource path for instance resources follows the convention: ``` /api/3/{resource_name}/{instance_id}... ``` ## Verbs The following HTTP operations are supported throughout this API. The general usage of the operation and both its failure and success status codes are outlined below. | Verb | Usage | Success | Failure | | --------- | ------------------------------------------------------------------------------------- | ----------- | -------------------------------------------------------------- | | `GET` | Used to retrieve a resource by identifier, or a collection of resources by type. | `200` | `400`, `401`, `402`, `404`, `405`, `408`, `410`, `415`, `500` | | `POST` | Creates a resource with an application-specified identifier. | `201` | `400`, `401`, `404`, `405`, `408`, `413`, `415`, `500` | | `POST` | Performs a request to queue an asynchronous job. | `202` | `400`, `401`, `405`, `408`, `410`, `413`, `415`, `500` | | `PUT` | Creates a resource with a client-specified identifier. | `200` | `400`, `401`, `403`, `405`, `408`, `410`, `413`, `415`, `500` | | `PUT` | Performs a full update of a resource with a specified identifier. | `201` | `400`, `401`, `403`, `405`, `408`, `410`, `413`, `415`, `500` | | `DELETE` | Deletes a resource by identifier or an entire collection of resources. | `204` | `400`, `401`, `405`, `408`, `410`, `413`, `415`, `500` | | `OPTIONS` | Requests what operations are available on a resource. | `200` | `401`, `404`, `405`, `408`, `500` | ### Common Operations #### OPTIONS All resources respond to the `OPTIONS` request, which allows discoverability of available operations that are supported. The `OPTIONS` response returns the acceptable HTTP operations on that resource within the `Allow` header. The response is always a `200 OK` status. ### Collection Resources Collection resources can support the `GET`, `POST`, `PUT`, and `DELETE` operations. #### GET The `GET` operation invoked on a collection resource indicates a request to retrieve all, or some, of the entities contained within the collection. This also includes the optional capability to filter or search resources during the request. The response from a collection listing is a paginated document. See [hypermedia links](#section/Overview/Paging) for more information. #### POST The `POST` is a non-idempotent operation that allows for the creation of a new resource when the resource identifier is not provided by the system during the creation operation (i.e. the Security Console generates the identifier). The content of the `POST` request is sent in the request body. The response to a successful `POST` request should be a `201 CREATED` with a valid `Location` header field set to the URI that can be used to access to the newly created resource. The `POST` to a collection resource can also be used to interact with asynchronous resources. In this situation, instead of a `201 CREATED` response, the `202 ACCEPTED` response indicates that processing of the request is not fully complete but has been accepted for future processing. This request will respond similarly with a `Location` header with link to the job-oriented asynchronous resource that was created and/or queued. #### PUT The `PUT` is an idempotent operation that either performs a create with user-supplied identity, or a full replace or update of a resource by a known identifier. The response to a `PUT` operation to create an entity is a `201 Created` with a valid `Location` header field set to the URI that can be used to access to the newly created resource. `PUT` on a collection resource replaces all values in the collection. The typical response to a `PUT` operation that updates an entity is hypermedia links, which may link to related resources caused by the side-effects of the changes performed. #### DELETE The `DELETE` is an idempotent operation that physically deletes a resource, or removes an association between resources. The typical response to a `DELETE` operation is hypermedia links, which may link to related resources caused by the side-effects of the changes performed. ### Instance Resources Instance resources can support the `GET`, `PUT`, `POST`, `PATCH` and `DELETE` operations. #### GET Retrieves the details of a specific resource by its identifier. The details retrieved can be controlled through property selection and property views. The content of the resource is returned within the body of the response in the acceptable media type. #### PUT Allows for and idempotent \"full update\" (complete replacement) on a specific resource. If the resource does not exist, it will be created; if it does exist, it is completely overwritten. Any omitted properties in the request are assumed to be undefined/null. For \"partial updates\" use `POST` or `PATCH` instead. The content of the `PUT` request is sent in the request body. The identifier of the resource is specified within the URL (not the request body). The response to a successful `PUT` request is a `201 CREATED` to represent the created status, with a valid `Location` header field set to the URI that can be used to access to the newly created (or fully replaced) resource. #### POST Performs a non-idempotent creation of a new resource. The `POST` of an instance resource most commonly occurs with the use of nested resources (e.g. searching on a parent collection resource). The response to a `POST` of an instance resource is typically a `200 OK` if the resource is non-persistent, and a `201 CREATED` if there is a resource created/persisted as a result of the operation. This varies by endpoint. #### PATCH The `PATCH` operation is used to perform a partial update of a resource. `PATCH` is a non-idempotent operation that enforces an atomic mutation of a resource. Only the properties specified in the request are to be overwritten on the resource it is applied to. If a property is missing, it is assumed to not have changed. #### DELETE Permanently removes the individual resource from the system. If the resource is an association between resources, only the association is removed, not the resources themselves. A successful deletion of the resource should return `204 NO CONTENT` with no response body. This operation is not fully idempotent, as follow-up requests to delete a non-existent resource should return a `404 NOT FOUND`. ## Requests Unless otherwise indicated, the default request body media type is `application/json`. ### Headers Commonly used request headers include: | Header | Example | Purpose | | ------------------ | --------------------------------------------- | ---------------------------------------------------------------------------------------------- | | `Accept` | `application/json` | Defines what acceptable content types are allowed by the client. For all types, use `*/*`. | | `Accept-Encoding` | `deflate, gzip` | Allows for the encoding to be specified (such as gzip). | | `Accept-Language` | `en-US` | Indicates to the server the client's locale (defaults `en-US`). | | `Authorization ` | `Basic Base64(\"username:password\")` | Basic authentication | | `Token ` | `123456` | Two-factor authentication token (if enabled) | ### Dates & Times Dates and/or times are specified as strings in the ISO 8601 format(s). The following formats are supported as input: | Value | Format | Notes | | --------------------------- | ------------------------------------------------------ | ----------------------------------------------------- | | Date | YYYY-MM-DD | Defaults to 12 am UTC (if used for a date & time | | Date & time only | YYYY-MM-DD'T'hh:mm:ss[.nnn] | Defaults to UTC | | Date & time in UTC | YYYY-MM-DD'T'hh:mm:ss[.nnn]Z | | | Date & time w/ offset | YYYY-MM-DD'T'hh:mm:ss[.nnn][+|-]hh:mm | | | Date & time w/ zone-offset | YYYY-MM-DD'T'hh:mm:ss[.nnn][+|-]hh:mm[<zone-id>] | | ### Timezones Timezones are specified in the regional zone format, such as `\"America/Los_Angeles\"`, `\"Asia/Tokyo\"`, or `\"GMT\"`. ### Paging Pagination is supported on certain collection resources using a combination of two query parameters, `page` and `size`. As these are control parameters, they are prefixed with the underscore character. The page parameter dictates the zero-based index of the page to retrieve, and the `size` indicates the size of the page. For example, `/resources?page=2&size=10` will return page 3, with 10 records per page, giving results 21-30. The maximum page size for a request is 500. ### Sorting Sorting is supported on paginated resources with the `sort` query parameter(s). The sort query parameter(s) supports identifying a single or multi-property sort with a single or multi-direction output. The format of the parameter is: ``` sort=property[,ASC|DESC]... ``` Therefore, the request `/resources?sort=name,title,DESC` would return the results sorted by the name and title descending, in that order. The sort directions are either ascending `ASC` or descending `DESC`. With single-order sorting, all properties are sorted in the same direction. To sort the results with varying orders by property, multiple sort parameters are passed. For example, the request `/resources?sort=name,ASC&sort=title,DESC` would sort by name ascending and title descending, in that order. ## Responses The following response statuses may be returned by this API. | Status | Meaning | Usage | | ------ | ------------------------ |------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | | `200` | OK | The operation performed without error according to the specification of the request, and no more specific 2xx code is suitable. | | `201` | Created | A create request has been fulfilled and a resource has been created. The resource is available as the URI specified in the response, including the `Location` header. | | `202` | Accepted | An asynchronous task has been accepted, but not guaranteed, to be processed in the future. | | `400` | Bad Request | The request was invalid or cannot be otherwise served. The request is not likely to succeed in the future without modifications. | | `401` | Unauthorized | The user is unauthorized to perform the operation requested, or does not maintain permissions to perform the operation on the resource specified. | | `403` | Forbidden | The resource exists to which the user has access, but the operating requested is not permitted. | | `404` | Not Found | The resource specified could not be located, does not exist, or an unauthenticated client does not have permissions to a resource. | | `405` | Method Not Allowed | The operations may not be performed on the specific resource. Allowed operations are returned and may be performed on the resource. | | `408` | Request Timeout | The client has failed to complete a request in a timely manner and the request has been discarded. | | `413` | Request Entity Too Large | The request being provided is too large for the server to accept processing. | | `415` | Unsupported Media Type | The media type is not supported for the requested resource. | | `500` | Internal Server Error | An internal and unexpected error has occurred on the server at no fault of the client. | ### Security The response statuses 401, 403 and 404 need special consideration for security purposes. As necessary, error statuses and messages may be obscured to strengthen security and prevent information exposure. The following is a guideline for privileged resource response statuses: | Use Case | Access | Resource | Permission | Status | | ------------------------------------------------------------------ | ------------------ |------------------- | ------------ | ------------ | | Unauthenticated access to an unauthenticated resource. | Unauthenticated | Unauthenticated | Yes | `20x` | | Unauthenticated access to an authenticated resource. | Unauthenticated | Authenticated | No | `401` | | Unauthenticated access to an authenticated resource. | Unauthenticated | Non-existent | No | `401` | | Authenticated access to a unauthenticated resource. | Authenticated | Unauthenticated | Yes | `20x` | | Authenticated access to an authenticated, unprivileged resource. | Authenticated | Authenticated | No | `404` | | Authenticated access to an authenticated, privileged resource. | Authenticated | Authenticated | Yes | `20x` | | Authenticated access to an authenticated, non-existent resource | Authenticated | Non-existent | Yes | `404` | ### Headers Commonly used response headers include: | Header | Example | Purpose | | -------------------------- | --------------------------------- | --------------------------------------------------------------- | | `Allow` | `OPTIONS, GET` | Defines the allowable HTTP operations on a resource. | | `Cache-Control` | `no-store, must-revalidate` | Disables caching of resources (as they are all dynamic). | | `Content-Encoding` | `gzip` | The encoding of the response body (if any). | | `Location` | | Refers to the URI of the resource created by a request. | | `Transfer-Encoding` | `chunked` | Specified the encoding used to transform response. | | `Retry-After` | 5000 | Indicates the time to wait before retrying a request. | | `X-Content-Type-Options` | `nosniff` | Disables MIME type sniffing. | | `X-XSS-Protection` | `1; mode=block` | Enables XSS filter protection. | | `X-Frame-Options` | `SAMEORIGIN` | Prevents rendering in a frame from a different origin. | | `X-UA-Compatible` | `IE=edge,chrome=1` | Specifies the browser mode to render in. | ### Format When `application/json` is returned in the response body it is always pretty-printed (indented, human readable output). Additionally, gzip compression/encoding is supported on all responses. #### Dates & Times Dates or times are returned as strings in the ISO 8601 'extended' format. When a date and time is returned (instant) the value is converted to UTC. For example: | Value | Format | Example | | --------------- | ------------------------------ | --------------------- | | Date | `YYYY-MM-DD` | 2017-12-03 | | Date & Time | `YYYY-MM-DD'T'hh:mm:ss[.nnn]Z` | 2017-12-03T10:15:30Z | #### Content In some resources a Content data type is used. This allows for multiple formats of representation to be returned within resource, specifically `\"html\"` and `\"text\"`. The `\"text\"` property returns a flattened representation suitable for output in textual displays. The `\"html\"` property returns an HTML fragment suitable for display within an HTML element. Note, the HTML returned is not a valid stand-alone HTML document. #### Paging The response to a paginated request follows the format: ```json { resources\": [ ... ], \"page\": { \"number\" : ..., \"size\" : ..., \"totalResources\" : ..., \"totalPages\" : ... }, \"links\": [ \"first\" : { \"href\" : \"...\" }, \"prev\" : { \"href\" : \"...\" }, \"self\" : { \"href\" : \"...\" }, \"next\" : { \"href\" : \"...\" }, \"last\" : { \"href\" : \"...\" } ] } ``` The `resources` property is an array of the resources being retrieved from the endpoint, each which should contain at minimum a \"self\" relation hypermedia link. The `page` property outlines the details of the current page and total possible pages. The object for the page includes the following properties: - number - The page number (zero-based) of the page returned. - size - The size of the pages, which is less than or equal to the maximum page size. - totalResources - The total amount of resources available across all pages. - totalPages - The total amount of pages. The last property of the paged response is the `links` array, which contains all available hypermedia links. For paginated responses, the \"self\", \"next\", \"previous\", \"first\", and \"last\" links are returned. The \"self\" link must always be returned and should contain a link to allow the client to replicate the original request against the collection resource in an identical manner to that in which it was invoked. The \"next\" and \"previous\" links are present if either or both there exists a previous or next page, respectively. The \"next\" and \"previous\" links have hrefs that allow \"natural movement\" to the next page, that is all parameters required to move the next page are provided in the link. The \"first\" and \"last\" links provide references to the first and last pages respectively. Requests outside the boundaries of the pageable will result in a `404 NOT FOUND`. Paginated requests do not provide a \"stateful cursor\" to the client, nor does it need to provide a read consistent view. Records in adjacent pages may change while pagination is being traversed, and the total number of pages and resources may change between requests within the same filtered/queries resource collection. #### Property Views The \"depth\" of the response of a resource can be configured using a \"view\". All endpoints supports two views that can tune the extent of the information returned in the resource. The supported views are `summary` and `details` (the default). View are specified using a query parameter, in this format: ```bash /<resource>?view={viewName} ``` #### Error Any error responses can provide a response body with a message to the client indicating more information (if applicable) to aid debugging of the error. All 40x and 50x responses will return an error response in the body. The format of the response is as follows: ```json { \"status\": <statusCode>, \"message\": <message>, \"links\" : [ { \"rel\" : \"...\", \"href\" : \"...\" } ] } ``` The `status` property is the same as the HTTP status returned in the response, to ease client parsing. The message property is a localized message in the request client's locale (if applicable) that articulates the nature of the error. The last property is the `links` property. This may contain additional [hypermedia links](#section/Overview/Authentication) to troubleshoot. #### Search Criteria <a section=\"section/Responses/SearchCriteria\"></a> Multiple resources make use of search criteria to match assets. Search criteria is an array of search filters. Each search filter has a generic format of: ```json { \"field\": \"<field-name>\", \"operator\": \"<operator>\", [\"value\": \"<value>\",] [\"lower\": \"<value>\",] [\"upper\": \"<value>\"] } ``` Every filter defines two required properties `field` and `operator`. The field is the name of an asset property that is being filtered on. The operator is a type and property-specific operating performed on the filtered property. The valid values for fields and operators are outlined in the table below. Every filter also defines one or more values that are supplied to the operator. The valid values vary by operator and are outlined below. ##### Fields The following table outlines the search criteria fields and the available operators: | Field | Operators | | --------------------------------- | ------------------------------------------------------------------------------------------------------------------------------ | | `alternate-address-type` | `in` | | `container-image` | `is` ` is not` ` starts with` ` ends with` ` contains` ` does not contain` ` is like` ` not like` | | `container-status` | `is` ` is not` | | `containers` | `are` | | `criticality-tag` | `is` ` is not` ` is greater than` ` is less than` ` is applied` ` is not applied` | | `custom-tag` | `is` ` is not` ` starts with` ` ends with` ` contains` ` does not contain` ` is applied` ` is not applied` | | `cve` | `is` ` is not` ` contains` ` does not contain` | | `cvss-access-complexity` | `is` ` is not` | | `cvss-authentication-required` | `is` ` is not` | | `cvss-access-vector` | `is` ` is not` | | `cvss-availability-impact` | `is` ` is not` | | `cvss-confidentiality-impact` | `is` ` is not` | | `cvss-integrity-impact` | `is` ` is not` | | `cvss-v3-confidentiality-impact` | `is` ` is not` | | `cvss-v3-integrity-impact` | `is` ` is not` | | `cvss-v3-availability-impact` | `is` ` is not` | | `cvss-v3-attack-vector` | `is` ` is not` | | `cvss-v3-attack-complexity` | `is` ` is not` | | `cvss-v3-user-interaction` | `is` ` is not` | | `cvss-v3-privileges-required` | `is` ` is not` | | `host-name` | `is` ` is not` ` starts with` ` ends with` ` contains` ` does not contain` ` is empty` ` is not empty` ` is like` ` not like` | | `host-type` | `in` ` not in` | | `ip-address` | `is` ` is not` ` in range` ` not in range` ` is like` ` not like` | | `ip-address-type` | `in` ` not in` | | `last-scan-date` | `is-on-or-before` ` is on or after` ` is between` ` is earlier than` ` is within the last` | | `location-tag` | `is` ` is not` ` starts with` ` ends with` ` contains` ` does not contain` ` is applied` ` is not applied` | | `mobile-device-last-sync-time` | `is-within-the-last` ` is earlier than` | | `open-ports` | `is` ` is not` ` in range` | | `operating-system` | `contains` ` does not contain` ` is empty` ` is not empty` | | `owner-tag` | `is` ` is not` ` starts with` ` ends with` ` contains` ` does not contain` ` is applied` ` is not applied` | | `pci-compliance` | `is` | | `risk-score` | `is` ` is not` ` in range` ` greater than` ` less than` | | `service-name` | `contains` ` does not contain` | | `site-id` | `in` ` not in` | | `software` | `contains` ` does not contain` | | `vAsset-cluster` | `is` ` is not` ` contains` ` does not contain` ` starts with` | | `vAsset-datacenter` | `is` ` is not` | | `vAsset-host-name` | `is` ` is not` ` contains` ` does not contain` ` starts with` | | `vAsset-power-state` | `in` ` not in` | | `vAsset-resource-pool-path` | `contains` ` does not contain` | | `vulnerability-assessed` | `is-on-or-before` ` is on or after` ` is between` ` is earlier than` ` is within the last` | | `vulnerability-category` | `is` ` is not` ` starts with` ` ends with` ` contains` ` does not contain` | | `vulnerability-cvss-v3-score` | `is` ` is not` | | `vulnerability-cvss-score` | `is` ` is not` ` in range` ` is greater than` ` is less than` | | `vulnerability-exposures` | `includes` ` does not include` | | `vulnerability-title` | `contains` ` does not contain` ` is` ` is not` ` starts with` ` ends with` | | `vulnerability-validated-status` | `are` | ##### Enumerated Properties The following fields have enumerated values: | Field | Acceptable Values | | ----------------------------------------- | ------------------------------------------------------------------------------------------------------------- | | `alternate-address-type` | 0=IPv4, 1=IPv6 | | `containers` | 0=present, 1=not present | | `container-status` | `created` `running` `paused` `restarting` `exited` `dead` `unknown` | | `cvss-access-complexity` | <ul><li><code>L</code> = Low</li><li><code>M</code> = Medium</li><li><code>H</code> = High</li></ul> | | `cvss-integrity-impact` | <ul><li><code>N</code> = None</li><li><code>P</code> = Partial</li><li><code>C</code> = Complete</li></ul> | | `cvss-confidentiality-impact` | <ul><li><code>N</code> = None</li><li><code>P</code> = Partial</li><li><code>C</code> = Complete</li></ul> | | `cvss-availability-impact` | <ul><li><code>N</code> = None</li><li><code>P</code> = Partial</li><li><code>C</code> = Complete</li></ul> | | `cvss-access-vector` | <ul><li><code>L</code> = Local</li><li><code>A</code> = Adjacent</li><li><code>N</code> = Network</li></ul> | | `cvss-authentication-required` | <ul><li><code>N</code> = None</li><li><code>S</code> = Single</li><li><code>M</code> = Multiple</li></ul> | | `cvss-v3-confidentiality-impact` | <ul><li><code>L</code> = Local</li><li><code>L</code> = Low</li><li><code>N</code> = None</li><li><code>H</code> = High</li></ul> | | `cvss-v3-integrity-impact` | <ul><li><code>L</code> = Local</li><li><code>L</code> = Low</li><li><code>N</code> = None</li><li><code>H</code> = High</li></ul> | | `cvss-v3-availability-impact` | <ul><li><code>N</code> = None</li><li><code>L</code> = Low</li><li><code>H</code> = High</li></ul> | | `cvss-v3-attack-vector` | <ul><li><code>N</code> = Network</li><li><code>A</code> = Adjacent</li><li><code>L</code> = Local</li><li><code>P</code> = Physical</li></ul> | | `cvss-v3-attack-complexity` | <ul><li><code>L</code> = Low</li><li><code>H</code> = High</li></ul> | | `cvss-v3-user-interaction` | <ul><li><code>N</code> = None</li><li><code>R</code> = Required</li></ul> | | `cvss-v3-privileges-required` | <ul><li><code>N</code> = None</li><li><code>L</code> = Low</li><li><code>H</code> = High</li></ul> | | `host-type` | 0=Unknown, 1=Guest, 2=Hypervisor, 3=Physical, 4=Mobile | | `ip-address-type` | 0=IPv4, 1=IPv6 | | `pci-compliance` | 0=fail, 1=pass | | `vulnerability-validated-status` | 0=present, 1=not present | ##### Operator Properties <a section=\"section/Responses/SearchCriteria/OperatorProperties\"></a> The following table outlines which properties are required for each operator and the appropriate data type(s): | Operator | `value` | `lower` | `upper` | | ----------------------|-----------------------|-----------------------|-----------------------| | `are` | `string` | | | | `contains` | `string` | | | | `does-not-contain` | `string` | | | | `ends with` | `string` | | | | `in` | `Array[ string ]` | | | | `in-range` | | `numeric` | `numeric` | | `includes` | `Array[ string ]` | | | | `is` | `string` | | | | `is-applied` | | | | | `is-between` | | `numeric` | `numeric` | | `is-earlier-than` | `numeric` | | | | `is-empty` | | | | | `is-greater-than` | `numeric` | | | | `is-on-or-after` | `string` (yyyy-MM-dd) | | | | `is-on-or-before` | `string` (yyyy-MM-dd) | | | | `is-not` | `string` | | | | `is-not-applied` | | | | | `is-not-empty` | | | | | `is-within-the-last` | `string` | | | | `less-than` | `string` | | | | `like` | `string` | | | | `not-contains` | `string` | | | | `not-in` | `Array[ string ]` | | | | `not-in-range` | | `numeric` | `numeric` | | `not-like` | `string` | | | | `starts-with` | `string` | | | #### Discovery Connection Search Criteria <a section=\"section/Responses/DiscoverySearchCriteria\"></a> Dynamic sites make use of search criteria to match assets from a discovery connection. Search criteria is an array of search filters. Each search filter has a generic format of: ```json { \"field\": \"<field-name>\", \"operator\": \"<operator>\", [\"value\": \"<value>\",] [\"lower\": \"<value>\",] [\"upper\": \"<value>\"] } ``` Every filter defines two required properties `field` and `operator`. The field is the name of an asset property that is being filtered on. The list of supported fields vary depending on the type of discovery connection configured for the dynamic site (e.g vSphere, ActiveSync, etc.). The operator is a type and property-specific operating performed on the filtered property. The valid values for fields outlined in the tables below and are grouped by the type of connection. Every filter also defines one or more values that are supplied to the operator. See <a href=\"#section/Responses/SearchCriteria/OperatorProperties\">Search Criteria Operator Properties</a> for more information on the valid values for each operator. ##### Fields (ActiveSync) This section documents search criteria information for ActiveSync discovery connections. The discovery connections must be one of the following types: `\"activesync-ldap\"`, `\"activesync-office365\"`, or `\"activesync-powershell\"`. The following table outlines the search criteria fields and the available operators for ActiveSync connections: | Field | Operators | | --------------------------------- | ------------------------------------------------------------- | | `last-sync-time` | `is-within-the-last` ` is-earlier-than` | | `operating-system` | `contains` ` does-not-contain` | | `user` | `is` ` is-not` ` contains` ` does-not-contain` ` starts-with` | ##### Fields (AWS) This section documents search criteria information for AWS discovery connections. The discovery connections must be the type `\"aws\"`. The following table outlines the search criteria fields and the available operators for AWS connections: | Field | Operators | | ----------------------- | ------------------------------------------------------------- | | `availability-zone` | `contains` ` does-not-contain` | | `guest-os-family` | `contains` ` does-not-contain` | | `instance-id` | `contains` ` does-not-contain` | | `instance-name` | `is` ` is-not` ` contains` ` does-not-contain` ` starts-with` | | `instance-state` | `in` ` not-in` | | `instance-type` | `in` ` not-in` | | `ip-address` | `in-range` ` not-in-range` ` is` ` is-not` | | `region` | `in` ` not-in` | | `vpc-id` | `is` ` is-not` ` contains` ` does-not-contain` ` starts-with` | ##### Fields (DHCP) This section documents search criteria information for DHCP discovery connections. The discovery connections must be the type `\"dhcp\"`. The following table outlines the search criteria fields and the available operators for DHCP connections: | Field | Operators | | --------------- | ------------------------------------------------------------- | | `host-name` | `is` ` is-not` ` contains` ` does-not-contain` ` starts-with` | | `ip-address` | `in-range` ` not-in-range` ` is` ` is-not` | | `mac-address` | `is` ` is-not` ` contains` ` does-not-contain` ` starts-with` | ##### Fields (Sonar) This section documents search criteria information for Sonar discovery connections. The discovery connections must be the type `\"sonar\"`. The following table outlines the search criteria fields and the available operators for Sonar connections: | Field | Operators | | ------------------- | -------------------- | | `search-domain` | `contains` ` is` | | `ip-address` | `in-range` ` is` | | `sonar-scan-date` | `is-within-the-last` | ##### Fields (vSphere) This section documents search criteria information for vSphere discovery connections. The discovery connections must be the type `\"vsphere\"`. The following table outlines the search criteria fields and the available operators for vSphere connections: | Field | Operators | | -------------------- | ------------------------------------------------------------------------------------------ | | `cluster` | `is` ` is-not` ` contains` ` does-not-contain` ` starts-with` | | `data-center` | `is` ` is-not` | | `discovered-time` | `is-on-or-before` ` is-on-or-after` ` is-between` ` is-earlier-than` ` is-within-the-last` | | `guest-os-family` | `contains` ` does-not-contain` | | `host-name` | `is` ` is-not` ` contains` ` does-not-contain` ` starts-with` | | `ip-address` | `in-range` ` not-in-range` ` is` ` is-not` | | `power-state` | `in` ` not-in` | | `resource-pool-path` | `contains` ` does-not-contain` | | `last-time-seen` | `is-on-or-before` ` is-on-or-after` ` is-between` ` is-earlier-than` ` is-within-the-last` | | `vm` | `is` ` is-not` ` contains` ` does-not-contain` ` starts-with` | ##### Enumerated Properties (vSphere) The following fields have enumerated values: | Field | Acceptable Values | | ------------- | ------------------------------------ | | `power-state` | `poweredOn` `poweredOff` `suspended` | ## HATEOAS This API follows Hypermedia as the Engine of Application State (HATEOAS) principals and is therefore hypermedia friendly. Hyperlinks are returned in the `links` property of any given resource and contain a fully-qualified hyperlink to the corresponding resource. The format of the hypermedia link adheres to both the <a target=\"_blank\" href=\"http://jsonapi.org\">{json:api} v1</a> <a target=\"_blank\" href=\"http://jsonapi.org/format/#document-links\">\"Link Object\"</a> and <a target=\"_blank\" href=\"http://json-schema.org/latest/json-schema-hypermedia.html\">JSON Hyper-Schema</a> <a target=\"_blank\" href=\"http://json-schema.org/latest/json-schema-hypermedia.html#rfc.section.5.2\">\"Link Description Object\"</a> formats. For example: ```json \"links\": [{ \"rel\": \"<relation>\", \"href\": \"<href>\" ... }] ``` Where appropriate link objects may also contain additional properties than the `rel` and `href` properties, such as `id`, `type`, etc. See the [Root](#tag/Root) resources for the entry points into API discovery. # noqa: E501
OpenAPI spec version: 3
Contact: support@rapid7.com
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re # noqa: F401
import six
class DiskTotal(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'bytes': 'int',
'formatted': 'str'
}
attribute_map = {
'bytes': 'bytes',
'formatted': 'formatted'
}
def __init__(self, bytes=None, formatted=None): # noqa: E501
"""DiskTotal - a model defined in Swagger""" # noqa: E501
self._bytes = None
self._formatted = None
self.discriminator = None
if bytes is not None:
self.bytes = bytes
if formatted is not None:
self.formatted = formatted
@property
def bytes(self):
"""Gets the bytes of this DiskTotal. # noqa: E501
The raw value in bytes. # noqa: E501
:return: The bytes of this DiskTotal. # noqa: E501
:rtype: int
"""
return self._bytes
@bytes.setter
def bytes(self, bytes):
"""Sets the bytes of this DiskTotal.
The raw value in bytes. # noqa: E501
:param bytes: The bytes of this DiskTotal. # noqa: E501
:type: int
"""
self._bytes = bytes
@property
def formatted(self):
"""Gets the formatted of this DiskTotal. # noqa: E501
The value formatted in human-readable notation (e.g. GB, MB, KB, bytes). # noqa: E501
:return: The formatted of this DiskTotal. # noqa: E501
:rtype: str
"""
return self._formatted
@formatted.setter
def formatted(self, formatted):
"""Sets the formatted of this DiskTotal.
The value formatted in human-readable notation (e.g. GB, MB, KB, bytes). # noqa: E501
:param formatted: The formatted of this DiskTotal. # noqa: E501
:type: str
"""
self._formatted = formatted
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, DiskTotal):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| [
"zachary_youtz@rapid7.com"
] | zachary_youtz@rapid7.com |
a5f00db22afe958b88f3b951e3100919543dcdf9 | 77c641fd0708b279dddbe01f6af32a8531b93185 | /marketsim/gen/_intrinsic/orderbook/of_trader.py | 3818a80cc72db0e909b4478f6c75154923f96bcb | [] | no_license | abensrhir/marketsimulator | aea286afd2bb2e0c8a547bfa879601aef21c0cd5 | f9f55c72fb34cdbec42b96737ca20839f26c6299 | refs/heads/master | 2020-12-13T20:55:55.795344 | 2014-02-24T22:52:24 | 2014-02-24T22:52:24 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,098 | py | from marketsim import types
from marketsim.gen._out.trader._singleproxy import SingleProxy
from marketsim import getLabel
class Base(object):
_properties = {}
def __getattr__(self, name):
if name[0:2] != '__' and self._impl:
return getattr(self._impl, name)
else:
raise AttributeError
def __str__(self):
return getLabel(self._impl) if self._impl else ''
def __repr__(self):
return self.__str__()
class _OfTrader_Impl(Base):
def __init__(self):
self._alias = ["$(TraderAsset)"] if type(self.Trader) == SingleProxy else ['OfTrader']
Base.__init__(self)
@property
def _impl(self):
try:
return self.Trader.orderBook
except AttributeError:
return None
class _Proxy_Impl(Base):
def __init__(self):
self._impl = None
Base.__init__(self)
@property
def label(self):
return self._impl.label if self._impl else '$(OrderBook)'
def bind(self, ctx):
assert self._impl is None
self._impl = ctx.orderbook
| [
"anton.kolotaev@gmail.com"
] | anton.kolotaev@gmail.com |
53c3f734336bf253cd01c5cc8db9119e31f584a6 | a1aba83b90285def84cc425c0b089dd632a01a51 | /py千峰/day1函数/func10.py | ac2e75f8e338836fa45056bb68612f7bafc0267b | [] | no_license | 15929134544/wangwang | 8ada14acb505576f07f01e37c936500ee95573a0 | 47f9abbf46f8d3cbc0698cb64c043735b06940d4 | refs/heads/master | 2023-05-11T19:59:54.462454 | 2021-05-25T15:19:43 | 2021-05-25T15:19:43 | 328,119,916 | 1 | 1 | null | 2021-05-11T16:13:18 | 2021-01-09T09:33:29 | JavaScript | UTF-8 | Python | false | false | 873 | py | # global 变量的范围
# 全局变量 局部变量
# 声明在函数外部的是全局变量,所有函数都可以访问
name = '月月'
def func():
# 函数内部声明的变量,局部变量,仅限于在函数内部使用
s = 'abcd'
s += 'X'
print(s, name)
def func1():
global name # 不修改全局变量,只是获取或者打印。但是如果要修改全局变量。则需要
# 在函数内部声明:global 变量名
# 修改后,全局变量的值发生改变
# print(s, name)
name += '弹吉他的小美女'
print(name)
# 报错:函数内部的变量可以随意修改赋值
# 但是全局变量不能随便在函数体中修改
def func2():
name = '小月月' # 全局变量与局部变量同名了
name += '弹吉他的小美女'
print(name)
# print(s) 报错
func1()
func2()
| [
"you@example.com"
] | you@example.com |
326693eabcfe1a9d41f11f7c08ff574a844a8568 | bdb1c323968cd9d5441a187a29ed7e25a2e4f07e | /slave_server/runner/test_scripts/bxtp_ivi_m/operation_lib/base_lib/makelog.py | b80127c92706da5548863284c0dc568cdaff53c0 | [] | no_license | liangzhaowang/automation_system | beee351dd9f09a51e2b81617ac5bee63023ea9b8 | f77ef433c2366253dc9d9fdb7c54911cb38ed3e8 | refs/heads/master | 2022-02-19T11:07:44.047000 | 2019-09-23T02:16:00 | 2019-09-23T02:16:00 | 209,732,359 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 1,177 | py | #!/usr/bin/env python
# coding=utf-8
import os, sys
import logging.config
import logging
import time
class makelog():
def __init__(self, filename="", filepath=""):
self.filename = filename
self.filepath = filepath
self.makelogfile()
self.logger = logging.getLogger()
self.write()
def makelogfile(self):
if(os.path.exists(self.filepath)):
pass
# cmd = 'gedit %s/%s'%(self.filepath, self.filename)
# os.system(cmd)
else:
print self.filepath
cmd = 'mkdir %s'%(self.filepath)
os.system(cmd)
self.makelogfile()
def write(self):
logging.basicConfig(filename =self.filepath + self.filename)
self.logger.setLevel(logging.DEBUG)
fh = logging.FileHandler(self.filepath + self.filename)
fh.setLevel(logging.DEBUG)
ch = logging.StreamHandler()
ch.setLevel(logging.DEBUG)
formatter = logging.Formatter("%(asctime)s [%(levelname)s] [%(funcName)s] %(message)s")
ch.setFormatter(formatter)
fh.setFormatter(formatter)
self.logger.addHandler(ch)
self.logger.addHandler(fh)
file_path = "./log_info/"
file_name = "log_message_20170912_075156.txt"
print file_path
log_info = makelog(filepath = file_path, filename = file_name)
| [
"zhaowangx.liang@intel.com"
] | zhaowangx.liang@intel.com |
45f051b6b85c7aa298756f2cbe0f5c6d051359c2 | 8afb5afd38548c631f6f9536846039ef6cb297b9 | /_PYTHON/DATA_STRUC_PYTHON_NOTES/python-prac/mini-scripts/Python_RegEx_metacharacters__exactly_the_specifies_number_of_occurrences.txt.py | 75b978dc9b6e5add1c1041161a9bbb0f41ebfb6f | [
"MIT"
] | permissive | bgoonz/UsefulResourceRepo2.0 | d87588ffd668bb498f7787b896cc7b20d83ce0ad | 2cb4b45dd14a230aa0e800042e893f8dfb23beda | refs/heads/master | 2023-03-17T01:22:05.254751 | 2022-08-11T03:18:22 | 2022-08-11T03:18:22 | 382,628,698 | 10 | 12 | MIT | 2022-10-10T14:13:54 | 2021-07-03T13:58:52 | null | UTF-8 | Python | false | false | 281 | py | import re
txt = "The rain in Spain falls mainly in the plain!"
# Check if the string contains "a" followed by exactly two "l" characters:
x = re.findall("al{2}", txt)
print(x)
if x:
print("Yes, there is at least one match!")
else:
print("No match")
# Author: Bryan G
| [
"bryan.guner@gmail.com"
] | bryan.guner@gmail.com |
fba115b2085192f099c00764f8bfc49dcc98f713 | 279caab77d0731196b82548a35e6e61334a2141e | /n_gram_segmentation/segmenter.py | e49613368a50556db728e07bc4ae5cd18bf4e66a | [] | no_license | VitalyRomanov/segmented-embeddings | 52d6382feb36c65e12c513535a7bc5f0793d85ce | 9a8d4b897214e73b0ce18621b9ac121085c88e3a | refs/heads/master | 2022-10-19T22:38:08.148913 | 2022-10-03T14:31:53 | 2022-10-03T14:31:53 | 172,964,423 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,779 | py | import sys
import pickle
file_path = sys.argv[1]
lang = sys.argv[2]
words = open(file_path, "r").read().strip().split("\n")
char_gram_size_min = 3
char_gram_size_max = 4
char_grams = set()
def segment_recursively(dest, win, word):
dest.append(word[:win])
if win < char_gram_size_max and len(word) > win:
segment_recursively(dest, win+1, word)
elif win == char_gram_size_max and len(word) > win:
segment_recursively(dest, win, word[1:])
else:
if win > char_gram_size_min:
segment_recursively(dest, win-1, word[1:])
# if len(word) > len_:
# if len_ <= char_gram_size_max:
# dest.append(word[:len_])
# segment_recursively(dest, word[1:], len_+1)
def get_grams(w):
if w[0] == '<':
grams = [w]
else:
w = '<' + word + '>'
# grams = [w[i: i + char_gram_size] for i in range(len(w) - char_gram_size + 1)]
grams = []
segment_recursively(grams, char_gram_size_min, w)
return grams
with open("{}_word_{}_grams.txt".format(lang, char_gram_size_min), "w") as word_grams:
for word in words:
word_grams.write(word)
word_grams.write("\t")
grams = get_grams(word)
for g in grams:
word_grams.write(g)
word_grams.write(" ")
char_grams.add(g)
word_grams.write("\n")
grams = list(char_grams)
grams.sort()
grams_dict = {}
for id_, g in enumerate(grams):
grams_dict[g] = id_
print(len(grams))
word2gram = {}
for id_, word in enumerate(words):
word2gram[id_] = [grams_dict[g] for g in get_grams(word)]
pickle.dump(word2gram, open("%s_word2segment.pkl" % lang, "wb"))
pickle.dump(grams_dict, open("%s_segment2id.pkl" % lang , "wb")) | [
"mortiv16@gmail.com"
] | mortiv16@gmail.com |
ed08a844307b2d776880dd97684ac94e4920196a | da9c4a9a92d49d2fb2983a54e0f64c2a1ce8aa19 | /symphony/cli/pyinventory/graphql/mutation/edit_equipment_port.py | 43a16d01545cd8442f49d0bcedae9fa6a4d02f20 | [
"BSD-3-Clause",
"Apache-2.0"
] | permissive | rohan-prasad/magma | 347c370347724488215a0783504788eac41d8ec7 | 2c1f36d2fd04eae90366cc8b314eaab656d7f8ad | refs/heads/master | 2022-10-14T14:08:14.067593 | 2020-06-11T23:52:03 | 2020-06-11T23:54:27 | 271,671,835 | 0 | 0 | NOASSERTION | 2020-06-12T00:20:23 | 2020-06-12T00:17:39 | null | UTF-8 | Python | false | false | 3,035 | py | #!/usr/bin/env python3
# @generated AUTOGENERATED file. Do not Change!
from dataclasses import dataclass
from datetime import datetime
from gql.gql.datetime_utils import DATETIME_FIELD
from gql.gql.graphql_client import GraphqlClient
from gql.gql.client import OperationException
from gql.gql.reporter import FailedOperationException
from functools import partial
from numbers import Number
from typing import Any, Callable, List, Mapping, Optional
from time import perf_counter
from dataclasses_json import DataClassJsonMixin
from ..fragment.link import LinkFragment, QUERY as LinkFragmentQuery
from ..fragment.property import PropertyFragment, QUERY as PropertyFragmentQuery
from ..input.edit_equipment_port import EditEquipmentPortInput
QUERY: List[str] = LinkFragmentQuery + PropertyFragmentQuery + ["""
mutation EditEquipmentPortMutation($input: EditEquipmentPortInput!) {
editEquipmentPort(input: $input) {
id
properties {
...PropertyFragment
}
definition {
id
name
portType {
id
name
}
}
link {
...LinkFragment
}
}
}
"""]
@dataclass
class EditEquipmentPortMutation(DataClassJsonMixin):
@dataclass
class EditEquipmentPortMutationData(DataClassJsonMixin):
@dataclass
class EquipmentPort(DataClassJsonMixin):
@dataclass
class Property(PropertyFragment):
pass
@dataclass
class EquipmentPortDefinition(DataClassJsonMixin):
@dataclass
class EquipmentPortType(DataClassJsonMixin):
id: str
name: str
id: str
name: str
portType: Optional[EquipmentPortType]
@dataclass
class Link(LinkFragment):
pass
id: str
properties: List[Property]
definition: EquipmentPortDefinition
link: Optional[Link]
editEquipmentPort: EquipmentPort
data: EditEquipmentPortMutationData
@classmethod
# fmt: off
def execute(cls, client: GraphqlClient, input: EditEquipmentPortInput) -> EditEquipmentPortMutationData.EquipmentPort:
# fmt: off
variables = {"input": input}
try:
network_start = perf_counter()
response_text = client.call(''.join(set(QUERY)), variables=variables)
decode_start = perf_counter()
res = cls.from_json(response_text).data
decode_time = perf_counter() - decode_start
network_time = decode_start - network_start
client.reporter.log_successful_operation("EditEquipmentPortMutation", variables, network_time, decode_time)
return res.editEquipmentPort
except OperationException as e:
raise FailedOperationException(
client.reporter,
e.err_msg,
e.err_id,
"EditEquipmentPortMutation",
variables,
)
| [
"facebook-github-bot@users.noreply.github.com"
] | facebook-github-bot@users.noreply.github.com |
578eae42b892dd158465af5f918e947bc035c9b9 | c4240f5084a4d564e3e49a722458c91c5eef0d35 | /for_3b1b_videos/pi_creature.py | ea688c4677b13b382c8103ac0f32c93997169b13 | [
"MIT"
] | permissive | HdXu/manim | 8619089153e72aa32a6fd0995bde406cc22bc42e | 7c272c6236926bc4e521fa37189d8d1d11a1b59c | refs/heads/master | 2020-03-08T23:33:36.842315 | 2018-04-06T20:09:53 | 2018-04-06T20:09:53 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 10,222 | py | import numpy as np
from constants import *
from mobject.mobject import Mobject
from mobject.svg.svg_mobject import SVGMobject
from mobject.svg.tex_mobject import TextMobject
from mobject.types.vectorized_mobject import VGroup
from mobject.types.vectorized_mobject import VMobject
from mobject.svg.drawings import ThoughtBubble
from animation.transform import Transform
from utils.config_ops import digest_config
from utils.rate_functions import squish_rate_func
from utils.rate_functions import there_and_back
PI_CREATURE_DIR = os.path.join(MEDIA_DIR, "designs", "PiCreature")
PI_CREATURE_SCALE_FACTOR = 0.5
LEFT_EYE_INDEX = 0
RIGHT_EYE_INDEX = 1
LEFT_PUPIL_INDEX = 2
RIGHT_PUPIL_INDEX = 3
BODY_INDEX = 4
MOUTH_INDEX = 5
class PiCreature(SVGMobject):
CONFIG = {
"color" : BLUE_E,
"file_name_prefix" : "PiCreatures",
"stroke_width" : 0,
"stroke_color" : BLACK,
"fill_opacity" : 1.0,
"propagate_style_to_family" : True,
"height" : 3,
"corner_scale_factor" : 0.75,
"flip_at_start" : False,
"is_looking_direction_purposeful" : False,
"start_corner" : None,
#Range of proportions along body where arms are
"right_arm_range" : [0.55, 0.7],
"left_arm_range" : [.34, .462],
}
def __init__(self, mode = "plain", **kwargs):
digest_config(self, kwargs)
self.parts_named = False
try:
svg_file = os.path.join(
PI_CREATURE_DIR,
"%s_%s.svg"%(self.file_name_prefix, mode)
)
SVGMobject.__init__(self, file_name = svg_file, **kwargs)
except:
warnings.warn("No %s design with mode %s"%(self.file_name_prefix, mode))
svg_file = os.path.join(
FILE_DIR,
"PiCreatures_plain.svg",
)
SVGMobject.__init__(self, file_name = svg_file, **kwargs)
if self.flip_at_start:
self.flip()
if self.start_corner is not None:
self.to_corner(self.start_corner)
def name_parts(self):
self.mouth = self.submobjects[MOUTH_INDEX]
self.body = self.submobjects[BODY_INDEX]
self.pupils = VGroup(*[
self.submobjects[LEFT_PUPIL_INDEX],
self.submobjects[RIGHT_PUPIL_INDEX]
])
self.eyes = VGroup(*[
self.submobjects[LEFT_EYE_INDEX],
self.submobjects[RIGHT_EYE_INDEX]
])
self.eye_parts = VGroup(self.eyes, self.pupils)
self.parts_named = True
def init_colors(self):
SVGMobject.init_colors(self)
if not self.parts_named:
self.name_parts()
self.mouth.set_fill(BLACK, opacity = 1)
self.body.set_fill(self.color, opacity = 1)
self.pupils.set_fill(BLACK, opacity = 1)
self.eyes.set_fill(WHITE, opacity = 1)
return self
def copy(self):
copy_mobject = SVGMobject.copy(self)
copy_mobject.name_parts()
return copy_mobject
def set_color(self, color):
self.body.set_fill(color)
return self
def change_mode(self, mode):
new_self = self.__class__(
mode = mode,
color = self.color
)
new_self.scale_to_fit_height(self.get_height())
if self.is_flipped() ^ new_self.is_flipped():
new_self.flip()
new_self.shift(self.eyes.get_center() - new_self.eyes.get_center())
if hasattr(self, "purposeful_looking_direction"):
new_self.look(self.purposeful_looking_direction)
Transform(self, new_self).update(1)
return self
def look(self, direction):
norm = np.linalg.norm(direction)
if norm == 0:
return
direction /= norm
self.purposeful_looking_direction = direction
for pupil, eye in zip(self.pupils.split(), self.eyes.split()):
pupil_radius = pupil.get_width()/2.
eye_radius = eye.get_width()/2.
pupil.move_to(eye)
if direction[1] < 0:
pupil.shift(pupil_radius*DOWN/3)
pupil.shift(direction*(eye_radius-pupil_radius))
bottom_diff = eye.get_bottom()[1] - pupil.get_bottom()[1]
if bottom_diff > 0:
pupil.shift(bottom_diff*UP)
#TODO, how to handle looking up...
# top_diff = eye.get_top()[1]-pupil.get_top()[1]
# if top_diff < 0:
# pupil.shift(top_diff*UP)
return self
def look_at(self, point_or_mobject):
if isinstance(point_or_mobject, Mobject):
point = point_or_mobject.get_center()
else:
point = point_or_mobject
self.look(point - self.eyes.get_center())
return self
def change(self, new_mode, look_at_arg = None):
self.change_mode(new_mode)
if look_at_arg is not None:
self.look_at(look_at_arg)
return self
def get_looking_direction(self):
return np.sign(np.round(
self.pupils.get_center() - self.eyes.get_center(),
decimals = 2
))
def is_flipped(self):
return self.eyes.submobjects[0].get_center()[0] > \
self.eyes.submobjects[1].get_center()[0]
def blink(self):
eye_parts = self.eye_parts
eye_bottom_y = eye_parts.get_bottom()[1]
eye_parts.apply_function(
lambda p : [p[0], eye_bottom_y, p[2]]
)
return self
def to_corner(self, vect = None, **kwargs):
if vect is not None:
SVGMobject.to_corner(self, vect, **kwargs)
else:
self.scale(self.corner_scale_factor)
self.to_corner(DOWN+LEFT, **kwargs)
return self
def get_bubble(self, *content, **kwargs):
bubble_class = kwargs.get("bubble_class", ThoughtBubble)
bubble = bubble_class(**kwargs)
if len(content) > 0:
if isinstance(content[0], str):
content_mob = TextMobject(*content)
else:
content_mob = content[0]
bubble.add_content(content_mob)
if "height" not in kwargs and "width" not in kwargs:
bubble.resize_to_content()
bubble.pin_to(self)
self.bubble = bubble
return bubble
def make_eye_contact(self, pi_creature):
self.look_at(pi_creature.eyes)
pi_creature.look_at(self.eyes)
return self
def shrug(self):
self.change_mode("shruggie")
top_mouth_point, bottom_mouth_point = [
self.mouth.points[np.argmax(self.mouth.points[:,1])],
self.mouth.points[np.argmin(self.mouth.points[:,1])]
]
self.look(top_mouth_point - bottom_mouth_point)
return self
def get_arm_copies(self):
body = self.body
return VGroup(*[
body.copy().pointwise_become_partial(body, *alpha_range)
for alpha_range in self.right_arm_range, self.left_arm_range
])
def get_all_pi_creature_modes():
result = []
prefix = "%s_"%PiCreature.CONFIG["file_name_prefix"]
suffix = ".svg"
for file in os.listdir(PI_CREATURE_DIR):
if file.startswith(prefix) and file.endswith(suffix):
result.append(
file[len(prefix):-len(suffix)]
)
return result
class Randolph(PiCreature):
pass #Nothing more than an alternative name
class Mortimer(PiCreature):
CONFIG = {
"color" : GREY_BROWN,
"flip_at_start" : True,
}
class Mathematician(PiCreature):
CONFIG = {
"color" : GREY,
}
class BabyPiCreature(PiCreature):
CONFIG = {
"scale_factor" : 0.5,
"eye_scale_factor" : 1.2,
"pupil_scale_factor" : 1.3
}
def __init__(self, *args, **kwargs):
PiCreature.__init__(self, *args, **kwargs)
self.scale(self.scale_factor)
self.shift(LEFT)
self.to_edge(DOWN, buff = LARGE_BUFF)
eyes = VGroup(self.eyes, self.pupils)
eyes_bottom = eyes.get_bottom()
eyes.scale(self.eye_scale_factor)
eyes.move_to(eyes_bottom, aligned_edge = DOWN)
looking_direction = self.get_looking_direction()
for pupil in self.pupils:
pupil.scale_in_place(self.pupil_scale_factor)
self.look(looking_direction)
class TauCreature(PiCreature):
CONFIG = {
"file_name_prefix" : "TauCreatures"
}
class ThreeLeggedPiCreature(PiCreature):
CONFIG = {
"file_name_prefix" : "ThreeLeggedPiCreatures"
}
class Eyes(VMobject):
CONFIG = {
"height" : 0.3,
"thing_looked_at" : None,
"mode" : "plain",
}
def __init__(self, mobject, **kwargs):
VMobject.__init__(self, **kwargs)
self.mobject = mobject
self.submobjects = self.get_eyes().submobjects
def get_eyes(self, mode = None, thing_to_look_at = None):
mode = mode or self.mode
if thing_to_look_at is None:
thing_to_look_at = self.thing_looked_at
pi = Randolph(mode = mode)
eyes = VGroup(pi.eyes, pi.pupils)
pi.scale(self.height/eyes.get_height())
if self.submobjects:
eyes.move_to(self, DOWN)
else:
eyes.move_to(self.mobject.get_top(), DOWN)
if thing_to_look_at is not None:
pi.look_at(thing_to_look_at)
return eyes
def change_mode_anim(self, mode, **kwargs):
self.mode = mode
return Transform(self, self.get_eyes(mode = mode), **kwargs)
def look_at_anim(self, point_or_mobject, **kwargs):
self.thing_looked_at = point_or_mobject
return Transform(
self, self.get_eyes(thing_to_look_at = point_or_mobject),
**kwargs
)
def blink_anim(self, **kwargs):
target = self.copy()
bottom_y = self.get_bottom()[1]
for submob in target:
submob.apply_function(
lambda p : [p[0], bottom_y, p[2]]
)
if "rate_func" not in kwargs:
kwargs["rate_func"] = squish_rate_func(there_and_back)
return Transform(self, target, **kwargs)
| [
"grant@3blue1brown.com"
] | grant@3blue1brown.com |
ed3b3e49d2373541f1c4a08baaea4b9b8235163d | b03497e9c38e27aac47792c30ad0e2945ed2fca9 | /mqtt.py | ca15170b9e14dc176b9a957e70000f7c57d3ba22 | [] | no_license | ThomasMoellerR/11_02_rpi_cube | c92522e0d2dd910a383c83dd49d55ddb06b0c1b4 | 54c2a8ea6e24a7fa358773a72dade8c1354d1b37 | refs/heads/master | 2020-04-17T11:41:15.158843 | 2019-11-02T13:26:15 | 2019-11-02T13:26:15 | 166,550,814 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,288 | py | import paho.mqtt.client as pmc
import time
import queue
class c_mqtt:
def __init__(self, hostname = "192.168.178.52", port = "1880", sub_list = []):
self.hostname = hostname
self.port = port
self.try_to_connect = True
self.sub_list = sub_list
self.connected = False
self.q = queue.Queue()
self.was_connected = False
self.client = pmc.Client()
self.client.on_connect = self.on_connect
self.client.on_message = self.on_message
def on_connect(self, client, userdata, flags, rc):
# rc = result code
if rc == 0:
print("Successfully connected to broker")
self.connected = True
else:
print("Error while trying to connect to broker")
self.connected = False
# subscribe
for topic in self.sub_list:
self.client.subscribe(topic)
def on_message(self, client, userdata, msg):
t = msg.topic
m = msg.payload.decode("utf-8")
#print("Received", t + " "+ m)
self.q.put((t, m))
def loop(self):
if self.try_to_connect:
if self.was_connected == True:
time.sleep(1)
print("Try to connect to broker", self.hostname, int(self.port))
try:
self.client.connect(self.hostname, int(self.port), 60)
self.try_to_connect = False
self.connected = True
self.was_connected = True
except Exception as e:
print(e)
self.connected = False
if self.connected:
try:
self.client.loop_forever()
except Exception as e:
print(e)
self.try_to_connect = True
self.connected = False
def pub(self, topic, msg):
if self.connected:
self.client.publish(topic, msg, qos=0, retain=False)
def set_connection_state(self, state):
self.connected = state
def get_connection_state(self):
return self.connected
def sub(self, topic):
self.sub_list.append(topic)
def empty(self):
return self.q.empty()
def get(self):
return self.q.get()
| [
"test"
] | test |
53fa7be54b523395a65b6f0b0053527f50bfa22f | 4015291afebfd346da3fee4b1d5a775882b5b461 | /packages/service-library/src/servicelib/rest_constants.py | e03667f15f28a500dff9aaa58b93cb6ee3f2a129 | [
"MIT"
] | permissive | pcrespov/osparc-simcore | 3a8a6b5252038542f515c7e90d983ac6f1fb4de7 | eb5e00bc2cf4acfe81f5dc422a5e50a4646c9596 | refs/heads/master | 2023-08-06T04:33:38.594066 | 2023-07-12T09:47:00 | 2023-07-12T09:47:00 | 130,357,545 | 0 | 1 | MIT | 2023-04-18T08:04:27 | 2018-04-20T12:10:41 | Python | UTF-8 | Python | false | false | 212 | py | # SEE https://pydantic-docs.helpmanual.io/usage/exporting_models/#modeldict
RESPONSE_MODEL_POLICY = {
"by_alias": True,
"exclude_unset": True,
"exclude_defaults": False,
"exclude_none": False,
}
| [
"noreply@github.com"
] | pcrespov.noreply@github.com |
16c41ea1c7c2797237e891f1ac38e86347e93b15 | d59bf974dd42d74dae62f58c0272ceb246d935c9 | /7.2.py | 9ef0a1a85e8beb93d1c39e39aa74b67020b39afb | [] | no_license | Serega1000rr/Ser | 423d2de1ba1fcc1f3f684363b90f05d018fb1306 | e349cb8b5c7aea333a78e448e7edfaa6c13edd61 | refs/heads/main | 2023-05-14T02:39:47.855859 | 2021-06-07T10:46:56 | 2021-06-07T10:46:56 | 374,630,452 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 151 | py | things=['mozarella','cinderella','salmonella']
print(things[0].capitalize())
things[1]=things[1].upper()
print(things)
del things[2]
print(things) | [
"unknown@example.com"
] | unknown@example.com |
2ee91074d8f40b5b85c430e2d87d4936587af0df | 177d7066f6a0326ed937a56174d7e2241653929a | /Array&String/lc4.py | 4921ef92374c02b329b3c45acd8dc4842c62be5f | [] | no_license | jasonusaco/Leetcode-Practice | 276bcdb62b28806b3d297338882f4b1eef56cc13 | 91dc73202eb9952a6064013ef4ed20dfa4137c01 | refs/heads/master | 2020-07-06T08:29:09.419062 | 2019-10-10T01:43:03 | 2019-10-10T01:43:03 | 202,955,682 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 88 | py | class Solution(object):
def findMedianSortedArrays(self, nums1, nums2):
| [
"yangyx@raysdata.com"
] | yangyx@raysdata.com |
09b95d91f2931902d3ccab96b894edd1818d2827 | 53784d3746eccb6d8fca540be9087a12f3713d1c | /res/packages/scripts/scripts/common/gun_rotation_shared.py | 04cdb63b1822d5294990244854eebc8db0ae3d35 | [] | no_license | webiumsk/WOT-0.9.17.1-CT | 736666d53cbd0da6745b970e90a8bac6ea80813d | d7c3cf340ae40318933e7205bf9a17c7e53bac52 | refs/heads/master | 2021-01-09T06:00:33.898009 | 2017-02-03T21:40:17 | 2017-02-03T21:40:17 | 80,870,824 | 0 | 0 | null | null | null | null | WINDOWS-1250 | Python | false | false | 4,744 | py | # 2017.02.03 21:54:59 Střední Evropa (běžný čas)
# Embedded file name: scripts/common/gun_rotation_shared.py
import BigWorld
import Math
from math import pi
from constants import IS_CLIENT, IS_CELLAPP
from debug_utils import *
if IS_CELLAPP:
from server_constants import MAX_VEHICLE_RADIUS
def calcPitchLimitsFromDesc(turretYaw, pitchLimitsDesc):
minPitch = pitchLimitsDesc['minPitch']
maxPitch = pitchLimitsDesc['maxPitch']
return BigWorld.wg_calcGunPitchLimits(turretYaw, minPitch, maxPitch)
def encodeAngleToUint(angle, bits):
mask = (1 << bits) - 1
return int(round((mask + 1) * (angle + pi) / (pi * 2.0))) & mask
def decodeAngleFromUint(code, bits):
return pi * 2.0 * code / (1 << bits) - pi
def encodeRestrictedValueToUint(angle, bits, minBound, maxBound):
t = 0 if maxBound == minBound else (angle - minBound) / (maxBound - minBound)
t = _clamp(0.0, t, 1.0)
mask = (1 << bits) - 1
return int(round(mask * t)) & mask
def decodeRestrictedValueFromUint(code, bits, minBound, maxBound):
t = float(code) / ((1 << bits) - 1)
return minBound + t * (maxBound - minBound)
def encodeGunAngles(yaw, pitch, pitchLimits):
return encodeAngleToUint(yaw, 10) << 6 | encodeRestrictedValueToUint(pitch, 6, *pitchLimits)
def decodeGunAngles(code, pitchLimits):
return (decodeAngleFromUint(code >> 6 & 1023, 10), decodeRestrictedValueFromUint((code & 63), 6, *pitchLimits))
def _clamp(minBound, value, maxBound):
if value < minBound:
return minBound
if value > maxBound:
return maxBound
return value
def isShootPositionInsideOtherVehicle(vehicle, turretPosition, shootPosition):
if IS_CLIENT:
def getNearVehicles(vehicle, shootPosition):
nearVehicles = []
arenaVehicles = BigWorld.player().arena.vehicles
for id in arenaVehicles.iterkeys():
v = BigWorld.entities.get(id)
if v and not v.isPlayerVehicle:
nearVehicles.append(v)
return nearVehicles
elif IS_CELLAPP:
def getNearVehicles(vehicle, shootPosition):
return vehicle.entitiesInRange(MAX_VEHICLE_RADIUS, 'Vehicle', shootPosition)
nearVehicles = getNearVehicles(vehicle, shootPosition)
for v in nearVehicles:
if shootPosition.distTo(v.position) < v.typeDescriptor.boundingRadius and isSegmentCollideWithVehicle(v, turretPosition, shootPosition):
return True
return False
def isSegmentCollideWithVehicle(vehicle, startPoint, endPoint):
if IS_CLIENT:
def getVehicleSpaceMatrix(vehicle):
toVehSpace = Math.Matrix(vehicle.model.matrix)
toVehSpace.invert()
return toVehSpace
def getVehicleComponents(vehicle):
return vehicle.getComponents()
elif IS_CELLAPP:
def getVehicleSpaceMatrix(vehicle):
toVehSpace = Math.Matrix(vehicle.mover.matrix)
toVehSpace.invert()
return toVehSpace
def getVehicleComponents(vehicle):
return vehicle.getComponents(vehicle.gunAngles)
toVehSpace = getVehicleSpaceMatrix(vehicle)
vehStartPoint = toVehSpace.applyPoint(startPoint)
vehEndPoint = toVehSpace.applyPoint(endPoint)
for compDescr, toCompSpace, isAttached in getVehicleComponents(vehicle):
if not isAttached or compDescr.get('itemTypeName') == 'vehicleGun':
continue
compStartPoint = toCompSpace.applyPoint(vehStartPoint)
compEndPoint = toCompSpace.applyPoint(vehEndPoint)
collisions = compDescr['hitTester'].localAnyHitTest(compStartPoint, compEndPoint)
if collisions is not None:
return True
return False
def getLocalAimPoint(vehicleDescriptor):
if vehicleDescriptor is None:
return Math.Vector3(0.0, 0.0, 0.0)
else:
hullBox = vehicleDescriptor.hull['hitTester'].bbox
hullPosition = vehicleDescriptor.chassis['hullPosition']
middleX = (hullBox[0].x + hullBox[1].x) * 0.5 + hullPosition.x
middleZ = (hullBox[0].z + hullBox[1].z) * 0.5 + hullPosition.z
calculatedHullPosition = (middleX, hullPosition.y, middleZ)
turretPosition = vehicleDescriptor.hull['turretPositions'][0] * 0.5
maxZOffset = abs(hullBox[1].z - hullBox[0].z) * 0.2
turretPosition.z = max(-maxZOffset, min(maxZOffset, turretPosition.z))
localAimPoint = calculatedHullPosition + turretPosition
return localAimPoint
# okay decompyling c:\Users\PC\wotsources\files\originals\res\packages\scripts\scripts\common\gun_rotation_shared.pyc
# decompiled 1 files: 1 okay, 0 failed, 0 verify failed
# 2017.02.03 21:54:59 Střední Evropa (běžný čas)
| [
"info@webium.sk"
] | info@webium.sk |
4913ad4bd0547ee7fe66485d15f1970ac3f1ce06 | ea90a06e4f953f51aeb97b6ab93e89f8ea9ffce0 | /backend/manage.py | c65cac1ad0c9ad3290ee24f664fed922bd10e718 | [] | no_license | crowdbotics-apps/roots-africa-28471 | 787b179413afa60ada1553462b1ef90f0c402183 | 7d1271a5b2496d98cfea919441824448e0790983 | refs/heads/master | 2023-06-15T19:11:02.174439 | 2021-07-05T23:44:09 | 2021-07-05T23:44:09 | 383,278,018 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 638 | py | #!/usr/bin/env python
"""Django's command-line utility for administrative tasks."""
import os
import sys
def main():
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "roots_africa_28471.settings")
try:
from django.core.management import execute_from_command_line
except ImportError as exc:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
) from exc
execute_from_command_line(sys.argv)
if __name__ == "__main__":
main()
| [
"team@crowdbotics.com"
] | team@crowdbotics.com |
24f1ef6fd36b61bb20469a5bfc7613033a19d292 | c411c5513ec5d58eb0e0edab0b6a697974d638fb | /model/DeepLabV3.py | 0e3406abccfff10e00297a28aed6ff7b0ce8b37f | [] | no_license | blue88blue/Segmentation | ab7f9dec4ab1ab4cdb4b8ca5af0cb9e1a560e20f | 69c4db1897a550a08a63811ffbb817754c20fbf2 | refs/heads/master | 2023-03-01T06:58:49.405779 | 2021-01-27T02:07:56 | 2021-01-27T02:07:56 | 296,049,616 | 4 | 3 | null | null | null | null | UTF-8 | Python | false | false | 4,784 | py | import torch
import torch.nn as nn
import torch.nn.functional as F
from model.segbase import SegBaseModel
from model.model_utils import init_weights, _FCNHead
class DeepLabV3(SegBaseModel):
r"""DeepLabV3
Parameters
----------
nclass : int
Number of categories for the training dataset.
backbone : string
Pre-trained dilated backbone network type (default:'resnet50'; 'resnet50',
'resnet101' or 'resnet152').
norm_layer : object
Normalization layer used in backbone network (default: :class:`nn.BatchNorm`;
for Synchronized Cross-GPU BachNormalization).
aux : bool
Auxiliary loss.
Reference:
Chen, Liang-Chieh, et al. "Rethinking atrous convolution for semantic image segmentation."
arXiv preprint arXiv:1706.05587 (2017).
"""
def __init__(self, n_class, backbone='resnet34', aux=False, pretrained_base=False, dilated=False, **kwargs):
super(DeepLabV3, self).__init__(backbone, pretrained_base=pretrained_base, dilated=dilated, **kwargs)
self.head = _DeepLabHead(self.base_channel[-1], n_class, **kwargs)
self.aux = aux
if self.aux:
self.auxlayer = _FCNHead(256, n_class, **kwargs)
def forward(self, x):
size = x.size()[2:]
_, _, c3, c4 = self.base_forward(x)
outputs = dict()
x = self.head(c4)
x = F.interpolate(x, size, mode='bilinear', align_corners=True)
outputs.update({"main_out": x})
if self.aux:
auxout = self.auxlayer(c3)
auxout = F.interpolate(auxout, size, mode='bilinear', align_corners=True)
outputs.update({"auxout": [auxout]})
return outputs
class _DeepLabHead(nn.Module):
def __init__(self, in_channel, nclass, norm_layer=nn.BatchNorm2d, norm_kwargs=None, **kwargs):
super(_DeepLabHead, self).__init__()
self.aspp = _ASPP(in_channel, [12, 24, 36], norm_layer=norm_layer, norm_kwargs=norm_kwargs, **kwargs)
self.block = nn.Sequential(
nn.Conv2d(256, 256, 3, padding=1, bias=False),
norm_layer(256, **({} if norm_kwargs is None else norm_kwargs)),
nn.ReLU(True),
nn.Dropout(0.1),
nn.Conv2d(256, nclass, 1)
)
def forward(self, x):
x = self.aspp(x)
return self.block(x)
class _ASPPConv(nn.Module):
def __init__(self, in_channels, out_channels, atrous_rate, norm_layer, norm_kwargs):
super(_ASPPConv, self).__init__()
self.block = nn.Sequential(
nn.Conv2d(in_channels, out_channels, 3, padding=atrous_rate, dilation=atrous_rate, bias=False),
norm_layer(out_channels, **({} if norm_kwargs is None else norm_kwargs)),
nn.ReLU(True)
)
def forward(self, x):
return self.block(x)
class _AsppPooling(nn.Module):
def __init__(self, in_channels, out_channels, norm_layer, norm_kwargs, **kwargs):
super(_AsppPooling, self).__init__()
self.gap = nn.Sequential(
nn.AdaptiveAvgPool2d(1),
nn.Conv2d(in_channels, out_channels, 1, bias=False),
norm_layer(out_channels, **({} if norm_kwargs is None else norm_kwargs)),
nn.ReLU(True)
)
def forward(self, x):
size = x.size()[2:]
pool = self.gap(x)
out = F.interpolate(pool, size, mode='bilinear', align_corners=True)
return out
class _ASPP(nn.Module):
def __init__(self, in_channels, atrous_rates, norm_layer, norm_kwargs, out_channels=256, **kwargs):
super(_ASPP, self).__init__()
self.b0 = nn.Sequential(
nn.Conv2d(in_channels, out_channels, 1, bias=False),
norm_layer(out_channels, **({} if norm_kwargs is None else norm_kwargs)),
nn.ReLU(True)
)
rate1, rate2, rate3 = tuple(atrous_rates)
self.b1 = _ASPPConv(in_channels, out_channels, rate1, norm_layer, norm_kwargs)
self.b2 = _ASPPConv(in_channels, out_channels, rate2, norm_layer, norm_kwargs)
self.b3 = _ASPPConv(in_channels, out_channels, rate3, norm_layer, norm_kwargs)
self.b4 = _AsppPooling(in_channels, out_channels, norm_layer=norm_layer, norm_kwargs=norm_kwargs)
self.project = nn.Sequential(
nn.Conv2d(5 * out_channels, out_channels, 1, bias=False),
norm_layer(out_channels, **({} if norm_kwargs is None else norm_kwargs)),
nn.ReLU(True),
nn.Dropout(0.5)
)
def forward(self, x):
feat1 = self.b0(x)
feat2 = self.b1(x)
feat3 = self.b2(x)
feat4 = self.b3(x)
feat5 = self.b4(x)
x = torch.cat((feat1, feat2, feat3, feat4, feat5), dim=1)
x = self.project(x)
return x
| [
"805207107@qq.com"
] | 805207107@qq.com |
f2190533c3b9802af9fa0c749b96108bf2036c1a | 74a1b51082e18a152626eb8044ab5d598283dacb | /easy/leetCode1646.py | 63c66927a02405ce8571af38833036cebcea6577 | [] | no_license | git874997967/LeetCode_Python | 6eb7d869d3737e946a8c6f0c51899a80bf03d650 | 1248cd19ab0d9d8aba503c487e163808c1d107cb | refs/heads/master | 2023-08-22T13:24:15.612040 | 2021-09-20T05:53:07 | 2021-09-20T05:53:07 | 340,973,354 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 398 | py | #1646. Get Maximum in Generated Array
def getMaximumGenerated(n):
arr = []
arr.append(0)
arr.append(1)
for i in range(2, n+ 1):
print(i)
if i % 2 == 0:
arr.append(i // 2)
else:
arr.append(arr[(i+1)//2] + arr[(i -1)//2])
return max(arr) if n >= 1 else 0
getMaximumGenerated(7)
getMaximumGenerated(2)
getMaximumGenerated(3)
| [
"g8749979677@gmail.com"
] | g8749979677@gmail.com |
e7748d6bb1278b3c1a57344f2797748c13590dfa | 0d399688fdd3568b2bbf209e573ccdbb4f9fb276 | /trainer.py | d109c50b22a55276bd446403553aafed4ac8ccdb | [] | no_license | woaksths/Weak-Supervision-Based-Self-Training | 08ff838b090430e071d228b0912ac847b6457fa9 | 1d8162a5c941a36d891effed62e27869b660dc49 | refs/heads/master | 2023-04-12T15:17:11.371147 | 2021-05-09T17:19:54 | 2021-05-09T17:19:54 | 348,579,632 | 3 | 1 | null | null | null | null | UTF-8 | Python | false | false | 16,240 | py | import os
import torch
from evaluator import Evaluator
from torch.utils.data import DataLoader
from util.early_stopping import EarlyStopping
from util.dataset import Dataset
from util.lexicon_util import stop_words
from transformers import BertTokenizer, BertForSequenceClassification
from weak_supervision import guide_pseudo_labeling
from nltk.stem import WordNetLemmatizer
from model import BERT_ATTN
from util.augment import *
import random
import copy
class Trainer(object):
def __init__(self, config, model, criterion, optimizer,
save_path, dev_dataset, test_dataset, model_type, do_augment):
self.config = config
self.loss = criterion
self.evaluator = Evaluator(loss=self.loss, batch_size=self.config.test_batch_size)
self.optimizer = optimizer
self.device = self.config.device
self.model = model.to(self.device)
self.model_type = model_type
self.do_augment = do_augment
if self.model_type != 'baseline':
self.lexicon = {label:{} for label in range(self.config.class_num)}
self.lexicon_temp = {label:{} for label in range(self.config.class_num)}
self.tokenizer = BertTokenizer.from_pretrained('bert-base-uncased')
self.lemmatizer = WordNetLemmatizer()
self.train_loader = None
self.valid_loader = DataLoader(dev_dataset, **self.config.valid_params)
self.test_loader = DataLoader(test_dataset, **self.config.test_params)
self.early_stopping = None
self.save_path = save_path
self.sup_path = self.save_path +'/sup'
self.ssl_path = self.save_path +'/ssl'
if not os.path.isabs(self.sup_path):
self.sup_path = os.path.join(os.getcwd(), self.sup_path)
if not os.path.exists(self.sup_path):
os.makedirs(self.sup_path)
if not os.path.isabs(self.ssl_path):
self.ssl_path = os.path.join(os.getcwd(), self.ssl_path)
if not os.path.exists(self.ssl_path):
os.makedirs(self.ssl_path)
def calculate_accu(self, big_idx, targets):
n_correct = (big_idx==targets).sum().item()
return n_correct
def train_epoch(self, epoch):
tr_loss = 0
n_correct = 0
nb_tr_steps = 0
nb_tr_examples = 0
self.model.train()
print('train_epoch', epoch)
for _, batch in enumerate(self.train_loader):
ids = batch['input_ids'].to(self.device, dtype=torch.long)
attention_mask = batch['attention_mask'].to(self.device, dtype=torch.long)
token_type_ids = batch['token_type_ids'].to(self.device, dtype=torch.long)
targets = batch['labels'].to(self.device, dtype=torch.long)
outputs = self.model(ids, attention_mask, token_type_ids, labels=targets)
loss, logits = outputs[0], outputs[1]
attn = None
if self.model_type != 'baseline':
attn = outputs[2]
self.build_lexicon(ids, targets, attn)
tr_loss += loss.item()
scores = torch.softmax(logits, dim=-1)
big_val, big_idx = torch.max(scores.data, dim=-1)
n_correct += self.calculate_accu(big_idx, targets)
nb_tr_steps += 1
nb_tr_examples += targets.size(0)
if _ % 1000 == 0:
loss_step = tr_loss/nb_tr_steps
accu_step = (n_correct*100)/nb_tr_examples
print(f"Training Loss per 1000 steps: {loss_step}")
print(f"Training Accuracy per 1000 steps: {accu_step}")
self.optimizer.zero_grad()
loss.backward()
self.optimizer.step()
epoch_loss = tr_loss/nb_tr_steps
epoch_accu = (n_correct*100)/nb_tr_examples
print(f"Training Loss Epoch: {epoch_loss}")
print(f"Training Accuracy Epoch: {epoch_accu}")
def initial_train(self, label_dataset):
print('initial train module')
self.train_loader = DataLoader(label_dataset, **self.config.train_params)
self.early_stopping = EarlyStopping(patience=5, verbose=True)
best_dev_acc = -1
for epoch in range(self.config.epochs):
self.train_epoch(epoch)
dev_loss, dev_acc = self.evaluator.evaluate(self.model, self.valid_loader)
self.early_stopping(dev_loss)
if best_dev_acc <= dev_acc:
best_dev_acc = dev_acc
if self.model_type == 'baseline':
self.model.save_pretrained(self.sup_path)
else:
# replcae lexicon with ones generated by best epochs
self.lexicon = copy.deepcopy(self.lexicon_temp)
torch.save({'model_state_dict':self.model.state_dict(),
'optimizer_state_dict':self.optimizer.state_dict(),'epoch':epoch},
self.sup_path +'/checkpoint.pt')
if epoch % 1 == 0:
test_loss, test_acc = self.evaluator.evaluate(self.model, self.test_loader, is_test=True)
if self.model_type != 'baseline':
self.lexicon_temp = {label:{} for label in range(self.config.class_num)}
if self.early_stopping.early_stop:
print("Eearly Stopping!")
break
def self_train(self, labeled_dataset, unlabeled_dataset, guide_type=None, confidence_threshold=0.9):
best_accuracy = -1
min_dev_loss = 987654321
print(len(unlabeled_dataset))
print(type(unlabeled_dataset))
for outer_epoch in range(self.config.epochs):
sampled_num = len(unlabeled_dataset) // 2
random.shuffle(unlabeled_dataset)
sampled_unlabeled = unlabeled_dataset[:sampled_num]
sampled_text = [data[0] for data in sampled_unlabeled]
sampled_labels = [data[1] for data in sampled_unlabeled]
sampled_encodings = self.tokenizer(sampled_text, truncation=True, padding=True)
sampled_unlabeled_dataset = Dataset(sampled_encodings, sampled_labels)
print('outer_epoch {} sampled unlabeled dataset {}'.format(outer_epoch, len(sampled_unlabeled_dataset)))
# pseudo-labeling
new_dataset = self.pseudo_labeling(sampled_unlabeled_dataset, confidence_threshold, guide_type)
# add pseudo-label into labeled data
combined_dataset, new_dataset = self.add_dataset(labeled_dataset, new_dataset)
# remove pseudo-label from unlabeled data
# unlabeled_dataset = self.remove_dataset(unlabeled_dataset, new_dataset)
self.train_loader = DataLoader(combined_dataset, **self.config.train_params)
self.early_stopping = EarlyStopping(patience=5, verbose=True)
# re-initialize the student model from scratch
del self.model, self.optimizer
if self.model_type =='baseline':
self.model = BertForSequenceClassification.from_pretrained('bert-base-uncased', num_labels=self.config.class_num).to(self.config.device)
self.optimizer = torch.optim.Adam(self.model.parameters(), lr=2e-5)
else:
self.model = BERT_ATTN(num_labels=self.config.class_num).to(self.config.device)
self.optimizer = torch.optim.Adam(self.model.parameters(), lr=2e-5)
# retrain model with labeled data + pseudo-labeled data
best_dev_acc = -1
for inner_epoch in range(self.config.epochs):
print('outer_epoch {} inner_epoch {} best_accuracy {}'.format(outer_epoch, inner_epoch, best_accuracy))
self.train_epoch(inner_epoch)
dev_loss, dev_acc = self.evaluator.evaluate(self.model, self.valid_loader)
self.early_stopping(dev_loss)
# save model when current dev_acc is greater than best_dev_acc
if dev_acc > best_dev_acc:
best_dev_acc = dev_acc
if self.model_type =='baseline':
self.model.save_pretrained(self.ssl_path)
else:
self.lexicon = copy.deepcopy(self.lexicon_temp)
torch.save({'model_state_dict':self.model.state_dict(),
'optimizer_state_dict':self.optimizer.state_dict(),
'epoch': {'outer_epoch':outer_epoch, 'inner_epoch':inner_epoch}},
self.ssl_path +'/checkpoint.pt')
if inner_epoch % 1 == 0:
test_loss, test_acc = self.evaluator.evaluate(self.model, self.test_loader, is_test=True)
if best_accuracy < test_acc:
best_accuracy = test_acc
if self.model_type != 'baseline':
self.lexicon_temp = {label:{} for label in range(self.config.class_num)}
if self.early_stopping.early_stop:
print("Early Stopping!")
break
print('Best accuracy {}'.format(best_accuracy))
def pseudo_labeling(self, unlabeled_dataset, confidence_threshold, guide_type=None):
unlabeled_loader = DataLoader(unlabeled_dataset, **self.config.unlabeled_params)
self.model.eval()
new_dataset = {label:[] for label in range(self.config.class_num)}
with torch.no_grad():
for _, batch in enumerate(unlabeled_loader):
ids = batch['input_ids'].to(self.device, dtype=torch.long)
attention_mask = batch['attention_mask'].to(self.device, dtype=torch.long)
token_type_ids = batch['token_type_ids'].to(self.device, dtype=torch.long)
targets = batch['labels'].to(self.device, dtype=torch.long)
outputs = self.model(ids, attention_mask, token_type_ids, labels=targets)
loss, logits = outputs[0], outputs[1]
confidences = torch.softmax(logits, dim=-1)
big_val, big_idx = torch.max(confidences.data, dim=-1)
for text_id, label, conf_val, target in zip(ids, big_idx, big_val, targets):
pred_label, conf_val, target = label.item(), conf_val.item(), target.item()
if conf_val >= confidence_threshold:
decoded_text = self.tokenizer.decode(text_id, skip_special_tokens=True)
new_dataset[pred_label].append((text_id, decoded_text, pred_label, target, conf_val))
if guide_type == 'predefined_lexicon_pl':
new_dataset = guide_pseudo_labeling(new_dataset, guide_type)
elif guide_type =='lexicon_pl':
new_dataset = guide_pseudo_labeling(new_dataset, guide_type, self.lexicon)
elif guide_type == 'weigthed_lexicon_pl':
pass
# make new_dataset being balanced
num_of_min_dataset = 987654321
for label, dataset in new_dataset.items():
print('label:{} len:{}'.format(label, len(dataset)))
if num_of_min_dataset > len(dataset):
num_of_min_dataset = len(dataset)
print('num_of_min_dataset', num_of_min_dataset)
num_of_min_dataset = num_of_min_dataset // 2
total, correct = 0, 0
balanced_dataset = []
for label in new_dataset.keys():
# sort by confidence
new_dataset[label].sort(key=lambda x:x[4], reverse=True)
balanced_dataset.extend(new_dataset[label][:num_of_min_dataset])
for data in balanced_dataset:
text_id, decoded_text, pred_label, target, confidence = data[0], data[1], data[2], data[3], data[4]
if pred_label == target:
correct+=1
total+=1
print('#'*100)
print(' pseduo-label {}/{}'.format(correct, total))
return balanced_dataset
def build_lexicon(self, input_ids, targets, attns):
top_k = 3
values, indices = torch.topk(attns, top_k, dim=-1)
decoded_inputs = self.tokenizer.batch_decode(input_ids)
for input_id, sent, seq_idxs, attn, label in zip(input_ids, decoded_inputs, indices, attns, targets):
words = self.tokenizer.tokenize(sent)
cleaned_words = self.tokenizer.decode(input_id, skip_special_tokens=True)
label = label.item()
if len(self.tokenizer.tokenize(cleaned_words)) <= top_k:
# choose top one
vocab_id = input_id[seq_idxs[0].item()].item()
word = self.tokenizer.convert_ids_to_tokens(vocab_id)
if '#' in word or len(word) <=2 or word in stop_words:
continue
word = self.lemmatizer.lemmatize(word)
if word in self.lexicon_temp[label]:
self.lexicon_temp[label][word] +=1
else:
self.lexicon_temp[label][word] = 1
else:
# choose top three
vocab_ids = [input_id[idx.item()].item() for idx in seq_idxs]
words = self.tokenizer.convert_ids_to_tokens(vocab_ids)
for word in words:
if '#' in word or len(word) <=2 or word in stop_words:
continue
word = self.lemmatizer.lemmatize(word)
if word in self.lexicon_temp[label]:
self.lexicon_temp[label][word] += 1
else:
self.lexicon_temp[label][word] = 1
def add_dataset(self, labeled_dataset, new_dataset):
labeled_texts, labeled_labels = self.decode_dataset(labeled_dataset)
new_texts = []
new_labels = []
for idx in range(len(new_dataset)):
decoded_text = new_dataset[idx][1]
pred_label = new_dataset[idx][2]
new_texts.append(decoded_text)
new_labels.append(pred_label)
combined_texts = labeled_texts + new_texts
combined_labels = labeled_labels + new_labels
combined_dataset = self.encode_dataset(combined_texts, combined_labels)
return combined_dataset, list(zip(new_texts, new_labels))
def remove_dataset(self, unlabeled_dataset, new_dataset):
unlabeled_texts = [data[0] for data in unlabeled_dataset]
unlabeled_labels = [data[1] for data in unlabeled_dataset]
new_texts = [data[0] for data in new_dataset]
new_labels = [data[1] for data in new_dataset]
# remove pseudo-labeled from unlabeled dataset
for text in new_texts:
idx = unlabeled_texts.index(text)
unlabeled_texts.pop(idx)
unlabeled_labels.pop(idx)
return list(zip(unlabeled_texts, unlabeled_labels))
def encode_dataset(self, texts, labels):
encodings = self.tokenizer(texts, truncation=True, padding=True)
dataset = Dataset(encodings, labels)
return dataset
def decode_dataset(self, dataset):
decoded_texts = []
labels = []
for idx in range(len(dataset)):
text_id = dataset[idx]['input_ids']
label = dataset[idx]['labels'].item()
decoded_text = self.tokenizer.decode(text_id, skip_special_tokens=True)
decoded_texts.append(decoded_text)
labels.append(label)
return decoded_texts, labels
| [
"woaksths@gmail.com"
] | woaksths@gmail.com |
403d845645672867f12a3e55739cd6c86e6594d3 | b31c0f0d1e8a3bf575e6b86591ec1071cd9a8a3d | /mlonmcu/platform/microtvm/microtvm_zephyr_target.py | c70668791002be5ca39259d7331daa22b90d3ca3 | [
"Apache-2.0"
] | permissive | tum-ei-eda/mlonmcu | e75238cd7134771217153c740301a8327a7b93b1 | f1b934d5bd42b5471d21bcf257bf88c055698918 | refs/heads/main | 2023-08-07T15:12:13.466944 | 2023-07-15T13:26:21 | 2023-07-15T13:26:21 | 448,808,394 | 22 | 4 | Apache-2.0 | 2023-06-09T23:00:19 | 2022-01-17T08:20:05 | Python | UTF-8 | Python | false | false | 2,969 | py | #
# Copyright (c) 2022 TUM Department of Electrical and Computer Engineering.
#
# This file is part of MLonMCU.
# See https://github.com/tum-ei-eda/mlonmcu.git for further info.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from pathlib import Path
from mlonmcu.target.target import Target
from mlonmcu.logging import get_logger
from .microtvm_template_target import TemplateMicroTvmPlatformTarget
logger = get_logger()
class ZephyrMicroTvmPlatformTarget(TemplateMicroTvmPlatformTarget):
FEATURES = Target.FEATURES + []
DEFAULTS = {
**Target.DEFAULTS,
"extra_files_tar": None,
"project_type": "host_driven",
"zephyr_board": "",
# "zephyr_base": "?",
# "west_cmd": "?",
"verbose": False,
"warning_as_error": True,
"compile_definitions": "",
# "config_main_stack_size": None,
"config_main_stack_size": "16384",
"gdbserver_port": None,
"nrfjprog_snr": None,
"openocd_serial": None,
"port": None, # Workaround to overwrite esptool detection
}
REQUIRED = Target.REQUIRED + ["zephyr.install_dir", "zephyr.sdk_dir"]
def __init__(self, name=None, features=None, config=None):
super().__init__(name=name, features=features, config=config)
self.template_path = None
self.option_names = [
"extra_files_tar",
"project_type",
"zephyr_board",
# "verbose",
"warning_as_error",
"compile_definitions",
"config_main_stack_size",
"gdbserver_port",
"nrfjprog_snr",
"openocd_serial",
]
# self.platform = platform
# self.template = name2template(name)
@property
def zephyr_install_dir(self):
return Path(self.config["zephyr.install_dir"])
@property
def port(self):
return self.config["port"]
@property
def zephyr_sdk_dir(self):
return Path(self.config["zephyr.sdk_dir"])
def get_project_options(self):
ret = super().get_project_options()
ret.update({"zephyr_base": self.zephyr_install_dir / "zephyr"})
return ret
def update_environment(self, env):
super().update_environment(env)
env["ZEPHYR_BASE"] = str(self.zephyr_install_dir / "zephyr")
env["ZEPHYR_SDK_INSTALL_DIR"] = str(self.zephyr_sdk_dir)
if self.port:
env["ESPTOOL_PORT"] = self.port
| [
"philipp.van-kempen@tum.de"
] | philipp.van-kempen@tum.de |
0a363bd967dc4a9e87872bd8e62484085cbd6476 | cc2df07a053b0ee13b05fe53ea9463033dd70c36 | /app/models.py | cad45bdeca5f36778a00f25f80049c6608156b71 | [] | no_license | sinjorjob/django-progress-bar | 5d0263d0cacc867fcc4ac6e5d07b37833ab7c849 | d948663231859b3485b7a35608f1c97246f952b7 | refs/heads/master | 2023-07-01T19:19:30.712511 | 2021-08-07T00:19:28 | 2021-08-07T00:19:28 | 393,536,082 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 289 | py | from django.db import models
class FileUpload(models.Model):
title = models.CharField(verbose_name="画像のタイトル", max_length=100)
image = models.ImageField(verbose_name="画像",upload_to="images/upload_files/")
def __str__(self):
return self.title | [
"sinforjob@gmail.com"
] | sinforjob@gmail.com |
edaccd77699ca8d4dccc010070da555c9528b148 | c05ab2b704fd779f0ea76f4bd69ee58b68ab4bb7 | /resale_market_place/accounts/migrations/0001_initial.py | c619d03fb096b2a9f94af9a8cb8d9b58a3d08cbd | [] | no_license | cmrajib/django_restaurant | 8aaaa73937fe76768c88149e58417b21bacacba7 | e09a6d6855eb79d30ae5adfa2720e9c86960ecd0 | refs/heads/main | 2023-02-28T18:30:47.415293 | 2021-01-12T20:03:35 | 2021-01-12T20:03:35 | 329,099,021 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,931 | py | # Generated by Django 3.1.4 on 2021-01-08 08:31
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
('auth', '0012_alter_user_first_name_max_length'),
]
operations = [
migrations.CreateModel(
name='User',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('password', models.CharField(max_length=128, verbose_name='password')),
('last_login', models.DateTimeField(blank=True, null=True, verbose_name='last login')),
('is_superuser', models.BooleanField(default=False, help_text='Designates that this user has all permissions without explicitly assigning them.', verbose_name='superuser status')),
('email', models.EmailField(max_length=254, unique=True)),
('is_staff', models.BooleanField(default=False, help_text='Designates whether the user can log in the site', verbose_name='Staff')),
('is_active', models.BooleanField(default=True, help_text='Designates whether this user should be treatea as active', verbose_name='active')),
('groups', models.ManyToManyField(blank=True, help_text='The groups this user belongs to. A user will get all permissions granted to each of their groups.', related_name='user_set', related_query_name='user', to='auth.Group', verbose_name='groups')),
('user_permissions', models.ManyToManyField(blank=True, help_text='Specific permissions for this user.', related_name='user_set', related_query_name='user', to='auth.Permission', verbose_name='user permissions')),
],
options={
'abstract': False,
},
),
migrations.CreateModel(
name='Profile',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('username', models.CharField(blank=True, max_length=264, null=True)),
('full_name', models.CharField(blank=True, max_length=264, null=True)),
('address_1', models.TextField(blank=True, max_length=300, null=True)),
('city', models.CharField(blank=True, max_length=40, null=True)),
('zipcode', models.CharField(blank=True, max_length=10, null=True)),
('country', models.CharField(blank=True, max_length=20, null=True)),
('phone', models.CharField(blank=True, max_length=20, null=True)),
('date_joined', models.DateTimeField(auto_now_add=True)),
('user', models.OneToOneField(on_delete=django.db.models.deletion.CASCADE, related_name='profile', to=settings.AUTH_USER_MODEL)),
],
),
]
| [
"cmrajib@gmail.com"
] | cmrajib@gmail.com |
a96def7f2dc97e51d0ec0b74e35ba3f491feb1d4 | 312d8dbbf980bf164f210e7935b17dc08d64ff87 | /Model/repeat3_attribute_prediction_exist_PTS_utilize_all/Baseline/main.py | f40523340de517fcd318799d311d421cece624ef | [] | no_license | hsack6/OWGP_NBA | 27dafbd6e59c17ce4a66e92132ee56782e2126bf | 56656efb5884cd9f806e476a92c5e6485c71adeb | refs/heads/master | 2023-02-25T09:52:05.165494 | 2021-02-03T12:44:04 | 2021-02-03T12:44:04 | 288,363,250 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,381 | py | import argparse
import random
import pandas as pd
from utils.inference import inference
from utils.data.dataset import BADataset
from utils.data.dataloader import BADataloader
import sys
import os
current_dir = os.path.dirname(os.path.abspath("__file__"))
sys.path.append( str(current_dir) + '/../../../' )
from setting_param import Model_repeat3_attribute_prediction_exist_PTS_utilize_all_InputDir as InputDir
from setting_param import Model_repeat3_attribute_prediction_exist_PTS_utilize_all_Baseline_OutputDir as OutputDir
from setting_param import repeat3_attribute_prediction_exist_PTS_utilize_all_worker
from setting_param import repeat3_attribute_prediction_exist_PTS_utilize_all_batchSize
from setting_param import repeat3_attribute_prediction_exist_PTS_utilize_all_init_L
from setting_param import repeat3_attribute_prediction_exist_PTS_utilize_all_state_dim
from setting_param import repeat3_attribute_prediction_exist_PTS_utilize_all_output_dim
from setting_param import repeat3_attribute_prediction_exist_PTS_utilize_all_idx as Attribute_idx
parser = argparse.ArgumentParser()
parser.add_argument('--workers', type=int, help='number of data loading workers', default=repeat3_attribute_prediction_exist_PTS_utilize_all_worker)
parser.add_argument('--batchSize', type=int, default=repeat3_attribute_prediction_exist_PTS_utilize_all_batchSize, help='input batch size')
parser.add_argument('--state_dim', type=int, default=repeat3_attribute_prediction_exist_PTS_utilize_all_state_dim, help='GGNN hidden state size')
parser.add_argument('--output_dim', type=int, default=repeat3_attribute_prediction_exist_PTS_utilize_all_output_dim, help='Model output state size')
parser.add_argument('--init_L', type=int, default=repeat3_attribute_prediction_exist_PTS_utilize_all_init_L, help='number of observation time step')
opt = parser.parse_args()
print(opt)
opt.dataroot = InputDir
opt.L = opt.init_L
def main(opt):
all_dataset = BADataset(opt.dataroot, opt.L, False, False, False)
all_dataloader = BADataloader(all_dataset, batch_size=opt.batchSize, \
shuffle=False, num_workers=opt.workers, drop_last=False)
opt.annotation_dim = 10
opt.n_edge_types = all_dataset.n_edge_types
opt.n_node = all_dataset.n_node
inference(all_dataloader, opt, OutputDir, Attribute_idx)
if __name__ == "__main__":
main(opt)
| [
"yamasaki.shohei@ist.osaka-u.ac.jp"
] | yamasaki.shohei@ist.osaka-u.ac.jp |
a501d1205297f9df8f05e56b27a9b3f0ea2f6122 | 50948d4cb10dcb1cc9bc0355918478fb2841322a | /azure-mgmt-redis/azure/mgmt/redis/models/redis_linked_server_with_properties.py | d4982b6a62d0bc2d014e157738b34cc984be1ebf | [
"MIT"
] | permissive | xiafu-msft/azure-sdk-for-python | de9cd680b39962702b629a8e94726bb4ab261594 | 4d9560cfd519ee60667f3cc2f5295a58c18625db | refs/heads/master | 2023-08-12T20:36:24.284497 | 2019-05-22T00:55:16 | 2019-05-22T00:55:16 | 187,986,993 | 1 | 0 | MIT | 2020-10-02T01:17:02 | 2019-05-22T07:33:46 | Python | UTF-8 | Python | false | false | 2,830 | py | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from .proxy_resource import ProxyResource
class RedisLinkedServerWithProperties(ProxyResource):
"""Response to put/get linked server (with properties) for Redis cache.
Variables are only populated by the server, and will be ignored when
sending a request.
All required parameters must be populated in order to send to Azure.
:ivar id: Resource ID.
:vartype id: str
:ivar name: Resource name.
:vartype name: str
:ivar type: Resource type.
:vartype type: str
:param linked_redis_cache_id: Required. Fully qualified resourceId of the
linked redis cache.
:type linked_redis_cache_id: str
:param linked_redis_cache_location: Required. Location of the linked redis
cache.
:type linked_redis_cache_location: str
:param server_role: Required. Role of the linked server. Possible values
include: 'Primary', 'Secondary'
:type server_role: str or ~azure.mgmt.redis.models.ReplicationRole
:ivar provisioning_state: Terminal state of the link between primary and
secondary redis cache.
:vartype provisioning_state: str
"""
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'type': {'readonly': True},
'linked_redis_cache_id': {'required': True},
'linked_redis_cache_location': {'required': True},
'server_role': {'required': True},
'provisioning_state': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'linked_redis_cache_id': {'key': 'properties.linkedRedisCacheId', 'type': 'str'},
'linked_redis_cache_location': {'key': 'properties.linkedRedisCacheLocation', 'type': 'str'},
'server_role': {'key': 'properties.serverRole', 'type': 'ReplicationRole'},
'provisioning_state': {'key': 'properties.provisioningState', 'type': 'str'},
}
def __init__(self, **kwargs):
super(RedisLinkedServerWithProperties, self).__init__(**kwargs)
self.linked_redis_cache_id = kwargs.get('linked_redis_cache_id', None)
self.linked_redis_cache_location = kwargs.get('linked_redis_cache_location', None)
self.server_role = kwargs.get('server_role', None)
self.provisioning_state = None
| [
"lmazuel@microsoft.com"
] | lmazuel@microsoft.com |
3f8dbbb445a2fdbe9505bb3a6007467a1dca973c | 00c6ded41b84008489a126a36657a8dc773626a5 | /.history/Sizing_Method/ConstrainsAnalysis/ConstrainsAnalysisPDP1P2_20210714171109.py | 252ccb2b4185bbc2c4376719387e8bd06591fab3 | [] | no_license | 12libao/DEA | 85f5f4274edf72c7f030a356bae9c499e3afc2ed | 1c6f8109bbc18c4451a50eacad9b4dedd29682bd | refs/heads/master | 2023-06-17T02:10:40.184423 | 2021-07-16T19:05:18 | 2021-07-16T19:05:18 | 346,111,158 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 23,499 | py | # author: Bao Li #
# Georgia Institute of Technology #
import sys
import os
sys.path.insert(0, os.getcwd())
import numpy as np
import matplotlib.pylab as plt
import Sizing_Method.Other.US_Standard_Atmosphere_1976 as atm
import Sizing_Method.Aerodynamics.ThrustLapse as thrust_lapse
import Sizing_Method.Aerodynamics.Aerodynamics as ad
import Sizing_Method.ConstrainsAnalysis.ConstrainsAnalysis as ca
import Sizing_Method.ConstrainsAnalysis.ConstrainsAnalysisPD as ca_pd
from scipy.optimize import curve_fit
"""
The unit use is IS standard
"""
class ConstrainsAnalysis_Mattingly_Method_with_DP_turbofun:
"""This is a power-based master constraints analysis"""
def __init__(self, altitude, velocity, beta, wing_load, Hp=0.2, number_of_motor=12, C_DR=0):
"""
:param beta: weight fraction
:param Hp: P_motor/P_total
:param n: number of motor
:param K1: drag polar coefficient for 2nd order term
:param K2: drag polar coefficient for 1st order term
:param C_D0: the drag coefficient at zero lift
:param C_DR: additional drag caused, for example, by external stores,
braking parachutes or flaps, or temporary external hardware
:return:
power load: P_WTO
"""
self.h = altitude
self.v = velocity
self.rho = atm.atmosphere(geometric_altitude=self.h).density()
self.beta = beta
self.hp = Hp
self.n = number_of_motor
# power lapse ratio
self.alpha = thrust_lapse.thrust_lapse_calculation(altitude=self.h,
velocity=self.v).high_bypass_ratio_turbofan()
self.k1 = ad.aerodynamics_without_pd(self.h, self.v).K1()
self.k2 = ad.aerodynamics_without_pd(self.h, self.v).K2()
self.cd0 = ad.aerodynamics_without_pd(self.h, self.v).CD_0()
self.cdr = C_DR
self.w_s = wing_load
self.g0 = 9.80665
self.coefficient = (1 - self.hp) * self.beta * self.v / self.alpha
# Estimation of ΔCL and ΔCD
pd = ad.aerodynamics_with_pd(
self.h, self.v, Hp=self.hp, n=self.n, W_S=self.w_s)
self.q = 0.5 * self.rho * self.v ** 2
self.cl = self.beta * self.w_s / self.q
# print(self.cl)
self.delta_cl = pd.delta_lift_coefficient(self.cl)
self.delta_cd0 = pd.delta_CD_0()
def master_equation(self, n, dh_dt, dV_dt):
cl = self.cl * n + self.delta_cl
cd = self.k1 * cl ** 2 + self.k2 * cl + self.cd0 + self.cdr + self.delta_cd0
p_w = self.coefficient * \
(self.q / (self.beta * self.w_s) *
cd + dh_dt / self.v + dV_dt / self.g0)
return p_w
def cruise(self):
p_w = ConstrainsAnalysis_Mattingly_Method_with_DP_turbofun.master_equation(
self, n=1, dh_dt=0, dV_dt=0)
return p_w
def climb(self, roc):
p_w = ConstrainsAnalysis_Mattingly_Method_with_DP_turbofun.master_equation(
self, n=1, dh_dt=roc, dV_dt=0)
return p_w
def level_turn(self, turn_rate=3, v=100):
"""
assume 2 min for 360 degree turn, which is 3 degree/seconds
assume turn at 300 knots, which is about 150 m/s
"""
load_factor = (1 + ((turn_rate * np.pi / 180)
* v / self.g0) ** 2) ** 0.5
p_w = ConstrainsAnalysis_Mattingly_Method_with_DP_turbofun.master_equation(
self, n=load_factor, dh_dt=0, dV_dt=0)
return p_w
def take_off(self):
"""
A320neo take-off speed is about 150 knots, which is about 75 m/s
required runway length is about 2000 m
K_TO is a constant greater than one set to 1.2 (generally specified by appropriate flying regulations)
"""
Cl_max_to = 2.3 # 2.3
K_TO = 1.2 # V_TO / V_stall
s_G = 1266
p_w = 2 / 3 * self.coefficient / self.v * self.beta * K_TO ** 2 / (
s_G * self.rho * self.g0 * Cl_max_to) * self.w_s ** (
3 / 2)
return p_w
def stall_speed(self, V_stall_to=65, Cl_max_to=2.32):
V_stall_ld = 62
Cl_max_ld = 2.87
a = 10
w_s = 6000
while a >= 1:
cl = self.beta * w_s / self.q
delta_cl = ad.aerodynamics_with_pd(
self.h, self.v, Hp=self.hp, n=self.n, W_S=w_s).delta_lift_coefficient(cl)
W_S_1 = 1 / 2 * self.rho * V_stall_to ** 2 * (Cl_max_to + delta_cl)
W_S_2 = 1 / 2 * self.rho * V_stall_ld ** 2 * (Cl_max_ld + delta_cl)
W_S = min(W_S_1, W_S_2)
a = abs(w_s-W_S)
w_s = W_S
return W_S
def service_ceiling(self, roc=0.5):
p_w = ConstrainsAnalysis_Mattingly_Method_with_DP_turbofun.master_equation(
self, n=1, dh_dt=roc, dV_dt=0)
return p_w
allFuncs = [stall_speed, take_off, cruise,
service_ceiling, level_turn, climb]
class ConstrainsAnalysis_Mattingly_Method_with_DP_electric:
"""This is a power-based master constraints analysis
the difference between turbofun and electric for constrains analysis:
1. assume the thrust_lapse = 1 for electric propution
2. hp = 1 - hp_turbofun
"""
def __init__(self, altitude, velocity, beta, wing_load, Hp=0.2, number_of_motor=12, C_DR=0):
"""
:param beta: weight fraction
:param Hp: P_motor/P_total
:param n: number of motor
:param K1: drag polar coefficient for 2nd order term
:param K2: drag polar coefficient for 1st order term
:param C_D0: the drag coefficient at zero lift
:param C_DR: additional drag caused, for example, by external stores,
braking parachutes or flaps, or temporary external hardware
:return:
power load: P_WTO
"""
self.h = altitude
self.v = velocity
self.rho = atm.atmosphere(geometric_altitude=self.h).density()
self.beta = beta
self.hp = Hp # this is the difference part compare with turbofun
self.n = number_of_motor
# power lapse ratio
self.alpha = 0.75 # this is the difference part compare with turbofun
self.k1 = ad.aerodynamics_without_pd(self.h, self.v).K1()
self.k2 = ad.aerodynamics_without_pd(self.h, self.v).K2()
self.cd0 = ad.aerodynamics_without_pd(self.h, self.v).CD_0()
self.cdr = C_DR
self.w_s = wing_load
self.g0 = 9.80665
self.coefficient = self.hp * self.beta * self.v / self.alpha
# Estimation of ΔCL and ΔCD
pd = ad.aerodynamics_with_pd(
self.h, self.v, Hp=self.hp, n=self.n, W_S=self.w_s)
self.q = 0.5 * self.rho * self.v ** 2
self.cl = self.beta * self.w_s / self.q
# print(self.cl)
self.delta_cl = pd.delta_lift_coefficient(self.cl)
self.delta_cd0 = pd.delta_CD_0()
def master_equation(self, n, dh_dt, dV_dt):
cl = self.cl * n + self.delta_cl
cd = self.k1 * cl ** 2 + self.k2 * cl + self.cd0 + self.cdr + self.delta_cd0
p_w = self.coefficient * \
(self.q / (self.beta * self.w_s) *
cd + dh_dt / self.v + dV_dt / self.g0)
return p_w
def cruise(self):
p_w = ConstrainsAnalysis_Mattingly_Method_with_DP_electric.master_equation(
self, n=1, dh_dt=0, dV_dt=0)
return p_w
def climb(self, roc):
p_w = ConstrainsAnalysis_Mattingly_Method_with_DP_electric.master_equation(
self, n=1, dh_dt=roc, dV_dt=0)
return p_w
def level_turn(self, turn_rate=3, v=100):
"""
assume 2 min for 360 degree turn, which is 3 degree/seconds
assume turn at 300 knots, which is about 150 m/s
"""
load_factor = (1 + ((turn_rate * np.pi / 180)
* v / self.g0) ** 2) ** 0.5
p_w = ConstrainsAnalysis_Mattingly_Method_with_DP_electric.master_equation(
self, n=load_factor, dh_dt=0, dV_dt=0)
return p_w
def take_off(self):
"""
A320neo take-off speed is about 150 knots, which is about 75 m/s
required runway length is about 2000 m
K_TO is a constant greater than one set to 1.2 (generally specified by appropriate flying regulations)
"""
Cl_max_to = 2.3 # 2.3
K_TO = 1.2 # V_TO / V_stall
s_G = 1266
p_w = 2 / 3 * self.coefficient / self.v * self.beta * K_TO ** 2 / (
s_G * self.rho * self.g0 * Cl_max_to) * self.w_s ** (
3 / 2)
return p_w
def stall_speed(self, V_stall_to=65, Cl_max_to=2.32):
V_stall_ld = 62
Cl_max_ld = 2.87
a = 10
w_s = 6000
while a >= 1:
cl = self.beta * w_s / self.q
delta_cl = ad.aerodynamics_with_pd(
self.h, self.v, Hp=self.hp, n=self.n, W_S=w_s).delta_lift_coefficient(cl)
W_S_1 = 1 / 2 * self.rho * V_stall_to ** 2 * (Cl_max_to + delta_cl)
W_S_2 = 1 / 2 * self.rho * V_stall_ld ** 2 * (Cl_max_ld + delta_cl)
W_S = min(W_S_1, W_S_2)
a = abs(w_s-W_S)
w_s = W_S
return W_S
def service_ceiling(self, roc=0.5):
p_w = ConstrainsAnalysis_Mattingly_Method_with_DP_electric.master_equation(
self, n=1, dh_dt=roc, dV_dt=0)
return p_w
allFuncs = [stall_speed, take_off, cruise,
service_ceiling, level_turn, climb]
class ConstrainsAnalysis_Gudmundsson_Method_with_DP_turbofun:
"""This is a power-based master constraints analysis based on Gudmundsson_method"""
def __init__(self, altitude, velocity, beta, wing_load, Hp=0.2, number_of_motor=12, e=0.75, AR=10.3):
"""
:param beta: weight fraction
:param e: wing planform efficiency factor is between 0.75 and 0.85, no more than 1
:param AR: wing aspect ratio, normally between 7 and 10
:return:
power load: P_WTO
"""
self.h = altitude
self.v = velocity
self.beta = beta
self.w_s = wing_load
self.g0 = 9.80665
self.hp = Hp
self.n = number_of_motor
self.rho = atm.atmosphere(geometric_altitude=self.h).density()
# power lapse ratio
self.alpha = thrust_lapse.thrust_lapse_calculation(altitude=self.h,
velocity=self.v).high_bypass_ratio_turbofan()
h = 2.43 # height of winglets
b = 35.8
# equation 9-88, If the wing has winglets the aspect ratio should be corrected
ar_corr = AR * (1 + 1.9 * h / b)
self.k = 1 / (np.pi * ar_corr * e)
self.coefficient = (1 - self.hp) * self.beta * self.v / self.alpha
# Estimation of ΔCL and ΔCD
pd = ad.aerodynamics_with_pd(
self.h, self.v, Hp=self.hp, n=self.n, W_S=self.w_s)
self.q = 0.5 * self.rho * self.v ** 2
cl = self.beta * self.w_s / self.q
self.delta_cl = pd.delta_lift_coefficient(cl)
self.delta_cd0 = pd.delta_CD_0()
# TABLE 3-1 Typical Aerodynamic Characteristics of Selected Classes of Aircraft
cd_min = 0.02
cd_to = 0.03
cl_to = 0.8
self.v_to = 68
self.s_g = 1480
self.mu = 0.04
self.cd_min = cd_min + self.delta_cd0
self.cl = cl + self.delta_cl
self.cd_to = cd_to + self.delta_cd0
self.cl_to = cl_to + self.delta_cl
def cruise(self):
p_w = self.q / self.w_s * (self.cd_min + self.k * self.cl ** 2)
return p_w * self.coefficient
def climb(self, roc):
p_w = roc / self.v + self.q * self.cd_min / self.w_s + self.k * self.cl
return p_w * self.coefficient
def level_turn(self, turn_rate=3, v=100):
"""
assume 2 min for 360 degree turn, which is 3 degree/seconds
assume turn at 100 m/s
"""
load_factor = (1 + ((turn_rate * np.pi / 180)
* v / self.g0) ** 2) ** 0.5
q = 0.5 * self.rho * v ** 2
p_w = q / self.w_s * (self.cd_min + self.k *
(load_factor / q * self.w_s + self.delta_cl) ** 2)
return p_w * self.coefficient
def take_off(self):
q = self.q / 2
p_w = self.v_to ** 2 / (2 * self.g0 * self.s_g) + q * self.cd_to / self.w_s + self.mu * (
1 - q * self.cl_to / self.w_s)
return p_w * self.coefficient
def service_ceiling(self, roc=0.5):
vy = (2 / self.rho * self.w_s *
(self.k / (3 * self.cd_min)) ** 0.5) ** 0.5
q = 0.5 * self.rho * vy ** 2
p_w = roc / vy + q / self.w_s * \
(self.cd_min + self.k * (self.w_s / q + self.delta_cl) ** 2)
# p_w = roc / (2 / self.rho * self.w_s * (self.k / (3 * self.cd_min)) ** 0.5) ** 0.5 + 4 * (
# self.k * self.cd_min / 3) ** 0.5
return p_w * self.coefficient
def stall_speed(self, V_stall_to=65, Cl_max_to=2.32):
V_stall_ld = 62
Cl_max_ld = 2.87
a = 10
w_s = 6000
while a >= 1:
cl = self.beta * w_s / self.q
delta_cl = ad.aerodynamics_with_pd(
self.h, self.v, Hp=self.hp, n=self.n, W_S=w_s).delta_lift_coefficient(cl)
W_S_1 = 1 / 2 * self.rho * V_stall_to ** 2 * (Cl_max_to + delta_cl)
W_S_2 = 1 / 2 * self.rho * V_stall_ld ** 2 * (Cl_max_ld + delta_cl)
W_S = min(W_S_1, W_S_2)
a = abs(w_s-W_S)
w_s = W_S
return W_S
allFuncs = [stall_speed, take_off, cruise,
service_ceiling, level_turn, climb]
class ConstrainsAnalysis_Gudmundsson_Method_with_DP_electric:
"""This is a power-based master constraints analysis based on Gudmundsson_method
the difference between turbofun and electric for constrains analysis:
1. assume the thrust_lapse = 1 for electric propution
2. hp = 1 - hp_turbofun
"""
def __init__(self, altitude, velocity, beta, wing_load, Hp=0.2, number_of_motor=12, e=0.75, AR=10.3):
"""
:param beta: weight fraction
:param e: wing planform efficiency factor is between 0.75 and 0.85, no more than 1
:param AR: wing aspect ratio, normally between 7 and 10
:return:
power load: P_WTO
"""
self.h = altitude
self.v = velocity
self.beta = beta
self.w_s = wing_load
self.g0 = 9.80665
self.hp = Hp # this is the difference part compare with turbofun
self.n = number_of_motor
self.rho = atm.atmosphere(geometric_altitude=self.h).density()
# power lapse ratio
self.alpha = 0.75 # this is the difference part compare with turbofun
h = 2.43 # height of winglets
b = 35.8
# equation 9-88, If the wing has winglets the aspect ratio should be corrected
ar_corr = AR * (1 + 1.9 * h / b)
self.k = 1 / (np.pi * ar_corr * e)
self.coefficient = self.hp*self.beta * self.v / self.alpha
# Estimation of ΔCL and ΔCD
pd = ad.aerodynamics_with_pd(
self.h, self.v, Hp=self.hp, n=self.n, W_S=self.w_s)
self.q = 0.5 * self.rho * self.v ** 2
cl = self.beta * self.w_s / self.q
self.delta_cl = pd.delta_lift_coefficient(cl)
self.delta_cd0 = pd.delta_CD_0()
# TABLE 3-1 Typical Aerodynamic Characteristics of Selected Classes of Aircraft
cd_min = 0.02
cd_to = 0.03
cl_to = 0.8
self.v_to = 68
self.s_g = 1480
self.mu = 0.04
self.cd_min = cd_min + self.delta_cd0
self.cl = cl + self.delta_cl
self.cd_to = cd_to + self.delta_cd0
self.cl_to = cl_to + self.delta_cl
def cruise(self):
p_w = self.q / self.w_s * (self.cd_min + self.k * self.cl ** 2)
return p_w * self.coefficient
def climb(self, roc):
p_w = roc / self.v + self.q * self.cd_min / self.w_s + self.k * self.cl
return p_w * self.coefficient
def level_turn(self, turn_rate=3, v=100):
"""
assume 2 min for 360 degree turn, which is 3 degree/seconds
assume turn at 100 m/s
"""
load_factor = (1 + ((turn_rate * np.pi / 180)
* v / self.g0) ** 2) ** 0.5
q = 0.5 * self.rho * v ** 2
p_w = q / self.w_s * (self.cd_min + self.k *
(load_factor / q * self.w_s + self.delta_cl) ** 2)
return p_w * self.coefficient
def take_off(self):
q = self.q / 2
p_w = self.v_to ** 2 / (2 * self.g0 * self.s_g) + q * self.cd_to / self.w_s + self.mu * (
1 - q * self.cl_to / self.w_s)
return p_w * self.coefficient
def service_ceiling(self, roc=0.5):
vy = (2 / self.rho * self.w_s *
(self.k / (3 * self.cd_min)) ** 0.5) ** 0.5
q = 0.5 * self.rho * vy ** 2
p_w = roc / vy + q / self.w_s * \
(self.cd_min + self.k * (self.w_s / q + self.delta_cl) ** 2)
# p_w = roc / (2 / self.rho * self.w_s * (self.k / (3 * self.cd_min)) ** 0.5) ** 0.5 + 4 * (
# self.k * self.cd_min / 3) ** 0.5
return p_w * self.coefficient
def stall_speed(self, V_stall_to=65, Cl_max_to=2.32):
V_stall_ld = 62
Cl_max_ld = 2.87
a = 10
w_s = 6000
while a >= 1:
cl = self.beta * w_s / self.q
delta_cl = ad.aerodynamics_with_pd(
self.h, self.v, Hp=self.hp, n=self.n, W_S=w_s).delta_lift_coefficient(cl)
W_S_1 = 1 / 2 * self.rho * V_stall_to ** 2 * (Cl_max_to + delta_cl)
W_S_2 = 1 / 2 * self.rho * V_stall_ld ** 2 * (Cl_max_ld + delta_cl)
W_S = min(W_S_1, W_S_2)
a = abs(w_s-W_S)
w_s = W_S
return W_S
allFuncs = [stall_speed, take_off, cruise,
service_ceiling, level_turn, climb]
if __name__ == "__main__":
n = 250
w_s = np.linspace(100, 9000, n)
constrains_name = ['stall speed', 'take off', 'cruise', 'service ceiling', 'level turn @3000m',
'climb @S-L', 'climb @3000m', 'climb @7000m', 'feasible region-hybrid', 'feasible region-conventional']
constrains = np.array([[0, 80, 1, 0.2], [0, 68, 0.988, 0.5], [11300, 230, 0.948, 0.8],
[11900, 230, 0.78, 0.8], [3000, 100, 0.984, 0.8], [0, 100, 0.984, 0.5],
[3000, 200, 0.975, 0.6], [7000, 230, 0.96, 0.7]])
color = ['k', 'c', 'b', 'g', 'y', 'plum', 'violet', 'm']
methods = [ConstrainsAnalysis_Mattingly_Method_with_DP_turbofun,
ConstrainsAnalysis_Gudmundsson_Method_with_DP_turbofun,
ConstrainsAnalysis_Mattingly_Method_with_DP_electric,
ConstrainsAnalysis_Gudmundsson_Method_with_DP_electric,
ConstrainsAnalysis_Mattingly_Method_with_DP_turbofun,
ConstrainsAnalysis_Gudmundsson_Method_with_DP_turbofun,
ConstrainsAnalysis_Mattingly_Method_with_DP_turbofun,
ConstrainsAnalysis_Gudmundsson_Method_with_DP_turbofun]
m = constrains.shape[0]
p_w = np.zeros([m, n, 8])
# plots
fig, ax = plt.subplots(3, 2, sharex=True, figsize=(10, 10))
ax = ax.flatten()
for k in range(8):
for i in range(m):
for j in range(n):
h = constrains[i, 0]
v = constrains[i, 1]
beta = constrains[i, 2]
hp = constrains[i, 3]
# calculate p_w
if k < 4:
problem = methods[k](h, v, beta, w_s[j], hp)
if i >= 5:
p_w[i, j, k] = problem.allFuncs[-1](problem, roc=15 - 5 * (i - 5))
else:
p_w[i, j, k] = problem.allFuncs[i](problem)
elif k > 5:
problem = methods[k](h, v, beta, w_s[j], Hp=0)
if i >= 5:
p_w[i, j, k] = problem.allFuncs[-1](problem, roc=15 - 5 * (i - 5))
else:
p_w[i, j, k] = problem.allFuncs[i](problem)
elif k == 4:
if i == 0:
problem = methods[k](h, v, beta, w_s[j], hp)
p_w[i, j, k] = problem.allFuncs[i](problem)
else:
p_w[i, j, k] = p_w[i, j, 0] + p_w[i, j, 2]
else:
if i == 0:
problem = methods[k](h, v, beta, w_s[j], hp)
p_w[i, j, k] = problem.allFuncs[i](problem)
else:
p_w[i, j, k] = p_w[i, j, 1] + p_w[i, j, 3]
if k <= 5:
if i == 0:
ax[k].plot(p_w[i, :], np.linspace(0, 50, n),
linewidth=1, color=color[i], label=constrains_name[i])
else:
ax[k].plot(w_s, p_w[i, :, k], color=color[i],
linewidth=1, alpha=1, label=constrains_name[i])
else:
if i == 1:
ax[k-2].plot(p_w[i, :, k], np.linspace(
0, 150, n), color=color[i], linewidth=1, alpha=0.5, linestyle='--')
else:
ax[k-2].plot(w_s, p_w[i, :, k], color=color[i],
linewidth=1, alpha=0.5, linestyle='--')
if k <= 5:
ax[k].fill_between(w_s, np.amax(p_w[0:m, :, k], axis=0),
200, color='b', alpha=0.5, label=constrains_name[-2])
ax[k].set_xlim(200, 9000)
ax[k].grid()
if k <= 3:
ax[k].set_ylim(0, 50)
else:
ax[k].set_ylim(0, 150)
else:
p_w[1, :, k] = 200 / (p_w[1, -1, k] - p_w[1, 20, k]) * (w_s - p_w[1, 2, k])
ax[k-2].fill_between(w_s, np.amax(p_w[0:m, :, k], axis=0),
200, color='r', alpha=0.5, label=constrains_name[-1])
handles, labels = plt.gca().get_legend_handles_labels()
fig.legend(handles, labels, bbox_to_anchor=(0.125, 0.02, 0.75, 0.25), loc="lower left",
mode="expand", borderaxespad=0, ncol=4, frameon=False)
hp = constrains[:, 3]
plt.setp(ax[0].set_title(r'$\bf{Mattingly Method}$'))
plt.setp(ax[1].set_title(r'$\bf{Gudmundsson Method}$'))
plt.setp(ax[4:6], xlabel='Wing Load: $W_{TO}$/S (N/${m^2}$)')
plt.setp(ax[0], ylabel=r'$\bf{Turbofun}$''\n $P_{SL}$/$W_{TO}$ (W/N)')
plt.setp(ax[2], ylabel=r'$\bf{Motor}$ ''\n $P_{SL}$/$W_{TO}$ (W/N)')
plt.setp(ax[4], ylabel=r'$\bf{Turbofun+Motor}$' '\n' r'$\bf{vs.Conventional}$ ''\n $P_{SL}$/$W_{TO}$ (W/N)')
plt.subplots_adjust(bottom=0.15)
plt.suptitle(r'$\bf{Component}$' ' ' r'$\bf{P_{SL}/W_{TO}}$' ' ' r'$\bf{Diagrams}$'
' ' r'$\bf{After}$' ' ' r'$\bf{Adjust}$' ' ' r'$\bf{Degree-of-Hybridization}$'
'\n hp: take-off=' +
str(hp[0]) + ' stall-speed=' +
str(hp[1]) + ' cruise=' +
str(hp[2]) + ' service-ceiling='+
str(hp[3]) + '\n level-turn=@3000m' +
str(hp[4]) + ' climb@S-L=' +
str(hp[5]) + ' climb@3000m=' +
str(hp[6]) + ' climb@7000m=' + str(hp[7]))
plt.show()
| [
"libao@gatech.edu"
] | libao@gatech.edu |
3e70b5fbab2fd9affbed3b12e2d134f46d64fbcc | 33524b5c049f934ce27fbf046db95799ac003385 | /Дистанционная_подготовка/Программирование_на_python/6_Строки/zadache_I.py | 3d675f1a488dad1ae893370c2cef39a39693b5f2 | [] | no_license | mgbo/My_Exercise | 07b5f696d383b3b160262c5978ad645b46244b70 | 53fb175836717493e2c813ecb45c5d5e9d28dd23 | refs/heads/master | 2022-12-24T14:11:02.271443 | 2020-10-04T04:44:38 | 2020-10-04T04:44:38 | 291,413,440 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 55 | py |
n = input()
ans = n.replace('1', 'one')
print (ans) | [
"mgbo433@gmail.com"
] | mgbo433@gmail.com |
37be7de681f708069186af725c1ab6a772547fcf | 1985e545df5ddfee396e87af6501fe517661cc77 | /bin/make_slides_github_action | 0cb25a135267d1a417524d79005623c2d1f311f1 | [
"MIT"
] | permissive | blester125/dotfiles | 46e657966582ba0b4552317107c85a44426ce9fd | 03b6856552040246b4d60330d0af6f37b440024d | refs/heads/master | 2023-07-24T11:13:11.989638 | 2023-07-12T13:30:15 | 2023-07-12T14:50:18 | 127,983,262 | 1 | 0 | null | 2022-02-12T23:09:41 | 2018-04-04T00:10:30 | Emacs Lisp | UTF-8 | Python | false | false | 2,132 | #!/usr/bin/python3
import os
import argparse
import textwrap
def slides_action(title):
title = title.replace(" ", "-")
return textwrap.dedent(r"""
name: Slides
on: [push]
jobs:
build:
runs-on: ubuntu-latest
container: blester125/beamer-image:latest
steps:
- uses: actions/checkout@v2
- name: Build Slides
run: |
make clean
make release
- uses: actions/upload-artifact@v1
if: success()
with:
name: artifacts
path: %s.pdf
commit:
needs: build
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v2
- name: Delete slides
run: |
rm -rf %s.pdf
- uses: actions/download-artifact@v1
with:
name: artifacts
path: tmp
- name: Move artifacts
run: |
mv tmp/* .
rm -rf tmp
- name: Commit Files
shell: bash
run: |
git add -A
git diff-index --quiet HEAD \
|| git -c user.name="GitHub" -c user.email="noreply@github.com" commit \
--author="github-actions[bot] <41898282+github-actions[bot]@users.noreply.github.com>" \
-m "Built Slides"
- name: Push changes
uses: ad-m/github-push-action@master
with:
github_token: ${{ secrets.GITHUB_TOKEN }}
""".lstrip("\n")) % (title, title)
def main():
parser = argparse.ArgumentParser()
parser.add_argument("--title", required=True)
args = parser.parse_args()
workflow_dir = os.path.join(".github", "workflows")
if not os.path.exists(workflow_dir):
os.makedirs(workflow_dir)
with open(os.path.join(workflow_dir, "slides.yml"), "w") as wf:
wf.write(slides_action(args.title))
if __name__ == "__main__":
main()
| [
"blester125@gmail.com"
] | blester125@gmail.com | |
07224a9608ee55b656a3fe877c7771a8a6bd459d | 0eb0657ad8262952c2ec87e7605246d1bebb9cd0 | /storops/vnx/resource/mirror_view.py | 41275de1ffc7ba7358cda4a4dabb7be540b919d3 | [
"Apache-2.0"
] | permissive | cdailing/storops | b666d204bf5fc8a561c436a927e72de3f3d9d64f | d24c48b3cb58f02dce1f131e7448b5400904f8ee | refs/heads/master | 2021-01-19T10:02:01.132218 | 2017-03-29T08:01:55 | 2017-03-29T08:02:18 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,041 | py | # coding=utf-8
# Copyright (c) 2015 EMC Corporation.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from __future__ import unicode_literals
from storops.exception import raise_if_err, \
VNXMirrorException, VNXMirrorImageNotFoundError
from storops.lib.common import check_text, instance_cache
from storops.vnx.enums import VNXMirrorViewRecoveryPolicy
from storops.vnx.enums import VNXMirrorViewSyncRate
import storops.vnx.resource.lun
from storops.vnx.resource import VNXCliResource, VNXCliResourceList
__author__ = 'Cedric Zhuang'
class VNXMirrorViewImage(VNXCliResource):
@staticmethod
def get_id(image):
if isinstance(image, VNXMirrorViewImage):
image = image.uid
try:
image = check_text(image)
except ValueError:
raise ValueError('invalid image id supplied: {}'
.format(image))
return image
@property
def wwn(self):
return self.uid
class VNXMirrorViewImageList(VNXCliResourceList):
@classmethod
def get_resource_class(cls):
return VNXMirrorViewImage
class VNXMirrorView(VNXCliResource):
def __init__(self, name=None, cli=None):
super(VNXMirrorView, self).__init__()
self._cli = cli
self._name = name
def _get_raw_resource(self):
return self._cli.get_mirror_view(name=self._name, poll=self.poll)
@classmethod
def create(cls, cli, name, src_lun, use_write_intent_log=True):
lun_clz = storops.vnx.resource.lun.VNXLun
lun_id = lun_clz.get_id(src_lun)
out = cli.create_mirror_view(name, lun_id, use_write_intent_log)
raise_if_err(out, default=VNXMirrorException)
return VNXMirrorView(name, cli=cli)
@classmethod
def get(cls, cli, name=None):
if name is None:
ret = VNXMirrorViewList(cli)
else:
ret = VNXMirrorView(name, cli)
return ret
def add_image(self, sp_ip, lun_id,
recovery_policy=VNXMirrorViewRecoveryPolicy.AUTO,
sync_rate=VNXMirrorViewSyncRate.HIGH):
if hasattr(sp_ip, 'spa_ip'):
sp_ip = sp_ip.spa_ip
lun_clz = storops.vnx.resource.lun.VNXLun
lun_id = lun_clz.get_id(lun_id)
out = self._cli.add_mirror_view_image(self._get_name(), sp_ip, lun_id,
recovery_policy, sync_rate,
poll=self.poll)
raise_if_err(out, default=VNXMirrorException)
def get_image(self, image_id):
for image in self.images:
if image.uid == image_id:
ret = image
break
else:
raise VNXMirrorImageNotFoundError(
'image {} not found in mirror view {}.'.format(
image_id, self._get_name()))
return ret
@staticmethod
def _get_image_id(image_id):
return VNXMirrorViewImage.get_id(image_id)
@property
@instance_cache
def primary_image(self):
for image in self.images:
if image.is_primary:
ret = image
break
else:
ret = None
return ret
@property
@instance_cache
def secondary_image(self):
for image in self.images:
if not image.is_primary:
ret = image
break
else:
ret = None
return ret
@property
def is_primary(self):
return self.remote_mirror_status == 'Mirrored'
@property
def primary_image_id(self):
return self.primary_image.uid
@property
def secondary_image_id(self):
image = self.secondary_image
if image is None:
raise VNXMirrorImageNotFoundError(
'no secondary image exists for this mirror view.')
return image.uid
def remove_image(self, image_id=None):
if image_id is None:
image_id = self.secondary_image_id
image_id = self._get_image_id(image_id)
out = self._cli.delete_mirror_view_image(self._get_name(), image_id,
poll=self.poll)
raise_if_err(out, default=VNXMirrorException)
def fracture_image(self, image_id=None):
if image_id is None:
image_id = self.secondary_image_id
image_id = self._get_image_id(image_id)
out = self._cli.mirror_view_fracture_image(self._get_name(), image_id,
poll=self.poll)
raise_if_err(out, default=VNXMirrorException)
def sync_image(self, image_id=None):
if image_id is None:
image_id = self.secondary_image_id
image_id = self._get_image_id(image_id)
out = self._cli.mirror_view_sync_image(self._get_name(), image_id,
poll=self.poll)
raise_if_err(out, default=VNXMirrorException)
def promote_image(self, image_id=None):
if image_id is None:
image_id = self.secondary_image_id
image_id = self._get_image_id(image_id)
out = self._cli.mirror_view_promote_image(self._get_name(), image_id,
poll=self.poll)
raise_if_err(out, default=VNXMirrorException)
def delete(self, force=False):
if force:
if self.secondary_image:
self.remove_image()
out = self._cli.delete_mirror_view(self._get_name())
raise_if_err(out, default=VNXMirrorException)
class VNXMirrorViewList(VNXCliResourceList):
@classmethod
def get_resource_class(cls):
return VNXMirrorView
def __init__(self, cli=None, src_lun=None, tgt_lun=None):
super(VNXMirrorViewList, self).__init__()
self._cli = cli
self._src_lun = src_lun
self._tgt_lun = tgt_lun
def _filter(self, item):
if self._src_lun is None and self._tgt_lun is None:
ret = True
else:
ret = False
pi = item.primary_image
si = item.secondary_image
if self._src_lun is not None:
ret |= self._src_lun.wwn == pi.logical_unit_uid
if self._tgt_lun is not None and si is not None:
ret |= self._tgt_lun.wwn == si.logical_unit_uid
return ret
def _get_raw_resource(self):
return self._cli.get_mirror_view(poll=self.poll)
| [
"cedric.zhuang@emc.com"
] | cedric.zhuang@emc.com |
b945dff1986dd6fd177224f743126fcb7d69fa73 | 826bffcd468f0979d05251e6578de13fff1029d4 | /tests/utils/_duplicate_console_output_check.py | 28e4ed0277ddd521f04db7319c8e9740b1c82837 | [
"MIT"
] | permissive | korepwx/madoka | 58b9b65cb1e3edf476d741ee1b5ffc67a9793a48 | 56675bd8220935c6a9c1571a886a84bed235fd3b | refs/heads/master | 2021-01-12T03:18:46.261909 | 2017-01-06T06:36:21 | 2017-01-06T06:36:21 | 78,190,020 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 607 | py | import os
import subprocess
import sys
sys.path.insert(0, os.path.abspath(os.path.join(os.path.dirname(__file__), '../..')))
from madoka.utils import duplicate_console_output
with duplicate_console_output(sys.argv[1]):
print('from print')
sys.stdout.flush()
sys.stdout.write('from stdout.write\n')
sys.stdout.flush()
sys.stderr.write('from stderr.write\n')
sys.stderr.flush()
os.system('echo os.system+stdout')
subprocess.check_call([
sys.executable,
'-c',
'import sys; sys.stderr.write("os.system+stderr\\n");'
'sys.stderr.flush()'
])
| [
"public@korepwx.com"
] | public@korepwx.com |
bdc7e6c501652abf318d816b800f9404c0ac8d58 | 30ac2f9831ebd33885a6f48d153356c2e3731c26 | /Python_Stack/django/django_orm/project_marcela/app_marcela/models.py | 287fbc16c76b573eca3b93d52409f5d482b42977 | [] | no_license | pharaoht/Coding-Dojo-Projects | 192cfd8c36b6dadb049e81d31bd780c7ab340d1e | 504f71acbac3c006cf866a08aea0566058f81ce2 | refs/heads/master | 2023-05-11T21:09:17.316257 | 2021-06-08T00:54:09 | 2021-06-08T00:54:09 | 334,003,413 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,796 | py | from django.db import models
import re
import bcrypt
# Create your models here.
class UserManager(models.Manager):
def register_validator(self, formInfo):
errors = {}
EMAIL_REGEX = re.compile(
r'[a-zA-Z0-9.+_-]+@[a-zA-Z0-9._-]+\.[a-zA-Z]+$')
emailChecker = User.objects.filter(email=formInfo['email'])
if len(formInfo['username']) == 0:
errors['usernamelenCheck'] = "User name field is required"
elif len(formInfo['username']) < 4:
errors['usernamelenCheck2'] = "User name my be at least 4 characters"
if len(formInfo['email']) == 0:
errors['emailLenCheck'] = "Email field is required"
elif not EMAIL_REGEX.match(formInfo['email']):
errors['emailnotmatch'] = 'Invalid email'
elif len(emailChecker) > 0:
errors['emailtaken'] = 'Sorry, that email is already resgistered'
if len(formInfo['password']) == 0:
errors['passworcheck'] = "A password is required"
elif len(formInfo['password']) < 8:
errors['passwordlengthcheck'] = "Password must be 8 characters long"
if formInfo['password'] != formInfo['cpassword']:
errors['psmatch'] = "Your Password must be the same as confirmed password"
return errors
def login_validator(self, formInfo):
errors = {}
emailChecker = User.objects.filter(email=formInfo['email'])
if len(formInfo['email']) == 0:
errors['emallencheck'] = "Email field can not be empty"
elif len(emailChecker) == 0:
errors['emailcheck'] = "Sorry that email, could not be found."
if len(formInfo['password']) == 0:
errors['passwordcheck'] = "Password field can not be empty"
if len(emailChecker) != 0:
if not bcrypt.checkpw(formInfo['password'].encode(), emailChecker[0].password.encode()):
errors['errorpassword'] = "Incorrect password"
return errors
class PostManager(models.Manager):
pass
class User(models.Model):
user_name = models.CharField(max_length=255)
email = models.CharField(max_length=255)
password = models.CharField(max_length=255)
created_at = models.DateTimeField(auto_now_add=True, null=True)
updated_at = models.DateTimeField(auto_now=True, null=True)
objects = UserManager()
class Post(models.Model):
title = models.CharField(max_length=255)
img = models.CharField(max_length=255)
posted_at = models.DateField()
desc = models.TextField()
posted_by = models.ForeignKey(
User, related_name="uploader", on_delete=models.CASCADE)
liked_by = models.ManyToManyField(User, related_name='likes')
created_at = models.DateTimeField(auto_now_add=True, null=True)
| [
"pharaohmanson@gmail.com"
] | pharaohmanson@gmail.com |
c5fc50bd9c9a74dc99617e3b0491bb8b90d339a0 | 03a2c1eb549a66cc0cff72857963eccb0a56031d | /hacker_rank/domains/algorithms/implementation/kaprekar-numbers_sunghyo.jung.py | fd4ff909df1f992a146f1752423d6d81c3c36433 | [] | no_license | nobe0716/problem_solving | c56e24564dbe3a8b7093fb37cd60c9e0b25f8e59 | cd43dc1eddb49d6b5965419e36db708c300dadf5 | refs/heads/master | 2023-01-21T14:05:54.170065 | 2023-01-15T16:36:30 | 2023-01-15T16:36:30 | 80,906,041 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 461 | py | __author__ = 'sunghyo.jung'
p, q = int(raw_input()), int(raw_input())
def is_kaprekar(n):
if n == 1:
return True
d = len(str(n))
s = str(n * n)
d = len(s) - d
a = int(s[:d] if len(s[:d]) > 0 else '0')
b = int(s[d:] if len(s[d:]) > 0 else '0')
return n == a + b and b > 0
flag = False
for i in range(p, q + 1):
if is_kaprekar(i):
flag = True
print i,
if flag:
print ''
else:
print 'INVALID RANGE' | [
"sunghyo.jung@navercorp.com"
] | sunghyo.jung@navercorp.com |
5fb4190fbcf940f1f1faea22e2a81c53b7e0a41d | bb150497a05203a718fb3630941231be9e3b6a32 | /framework/api/nn/test_hardshrink.py | e608bf97d4639e51495891a2b9089ff59b265844 | [] | no_license | PaddlePaddle/PaddleTest | 4fb3dec677f0f13f7f1003fd30df748bf0b5940d | bd3790ce72a2a26611b5eda3901651b5a809348f | refs/heads/develop | 2023-09-06T04:23:39.181903 | 2023-09-04T11:17:50 | 2023-09-04T11:17:50 | 383,138,186 | 42 | 312 | null | 2023-09-13T11:13:35 | 2021-07-05T12:44:59 | Python | UTF-8 | Python | false | false | 1,951 | py | #!/bin/env python
# -*- coding: utf-8 -*-
# encoding=utf-8 vi:ts=4:sw=4:expandtab:ft=python
"""
test_hardshrink
"""
from apibase import APIBase
from apibase import randtool
import paddle
import pytest
import numpy as np
class TestNNHardshrink(APIBase):
"""
test
"""
def hook(self):
"""
implement
"""
self.types = [np.float32, np.float64]
# self.debug = True
# self.static = True
# enable check grad
# self.enable_backward = True
obj = TestNNHardshrink(paddle.nn.Hardshrink)
@pytest.mark.api_nn_Hardshrink_vartype
def test_hardshrink_base():
"""
base
"""
x = np.array([-1, 0.3, 2.5])
res = np.array([-1, 0, 2.5])
obj.base(res=res, data=x)
@pytest.mark.api_nn_Hardshrink_parameters
def test_hardshrink():
"""
default
"""
x = np.array([-1, 0.3, 2.5])
res = np.array([-1, 0, 2.5])
obj.run(res=res, data=x)
@pytest.mark.api_nn_Hardshrink_parameters
def test_hardshrink1():
"""
threshold = 0
"""
x = np.array([-1, 0.3, 2.5])
threshold = 0
res = np.array([-1, 0.3, 2.5])
obj.run(res=res, data=x, threshold=threshold)
@pytest.mark.api_nn_Hardshrink_parameters
def test_hardshrink2():
"""
threshold = 0 x contains 0.01
"""
x = np.array([-1, -0.01, 2.5])
threshold = 0
res = np.array([-1, -0.01, 2.5])
obj.run(res=res, data=x, threshold=threshold)
@pytest.mark.api_nn_Hardshrink_vartype
def test_hardshrink3():
"""
threshold = -1
"""
x = np.array([-1, -0.01, 2.5])
threshold = -1
res = np.array([-1, -0.01, 2.5])
obj.base(res=res, data=x, threshold=threshold)
@pytest.mark.api_nn_Hardshrink_exception
def test_hardshrink4():
"""
threshold = "1"
"""
x = np.array([-1, -0.01, 2.5])
threshold = "1"
# res = np.array([-1, -0.01, 2.5])
obj.exception(etype="InvalidArgumentError", data=x, threshold=threshold)
| [
"noreply@github.com"
] | PaddlePaddle.noreply@github.com |
0002086686eef8eef58f00ddfaa9a3c6b02f158c | 6a4d30fc4850a04f7370f30dbe1b6ffa9f616392 | /intake/migrations/0003_fillablepdf_name.py | a2eb63e24a5799a885567b47ac1da2dc90379735 | [
"MIT"
] | permissive | codeforamerica/intake | bb88d63ae914ce6eebeb957b26451dbb9cfaeb88 | 8755e64c13e2b6f9bef9bbee47011253f20e7e0d | refs/heads/master | 2021-11-26T09:51:10.084263 | 2021-11-15T19:37:23 | 2021-11-15T19:37:23 | 57,333,621 | 51 | 24 | MIT | 2021-09-29T00:07:26 | 2016-04-28T21:03:55 | Python | UTF-8 | Python | false | false | 499 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.9 on 2016-05-09 00:40
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('intake', '0002_fillablepdf'),
]
operations = [
migrations.AddField(
model_name='fillablepdf',
name='name',
field=models.CharField(default='Sample pdf', max_length=50),
preserve_default=False,
),
]
| [
"bgolder@codeforamerica.org"
] | bgolder@codeforamerica.org |
d97b68cb2186fd512abb20603927df0360996948 | 2d6a2539055e1efd67f4252d11adfaf7ccd2720b | /principal/models/users.py | aa5c85bbaa81af1f8ba7fa27e4c174724c1fb67d | [] | no_license | r202-coe-psu/principal | 96997818073e6dd74df3590caef90ffec16709cc | 1781eacb880ecdf6dbe35cd1433530708eb29875 | refs/heads/master | 2021-09-16T13:56:28.940394 | 2018-06-21T13:52:53 | 2018-06-21T13:52:53 | 111,189,331 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,887 | py | import mongoengine as me
import datetime
from passlib.hash import bcrypt
from flask_login import UserMixin
class DataSource(me.EmbeddedDocument):
provider = me.StringField(required=True)
data = me.DictField()
created_date = me.DateTimeField(required=True,
default=datetime.datetime.utcnow)
updated_date = me.DateTimeField(required=True,
default=datetime.datetime.utcnow,
auto_now=True)
class User(me.Document, UserMixin):
username = me.StringField(required=True, unique=True)
password = me.StringField()
email = me.StringField()
first_name = me.StringField(required=True)
last_name = me.StringField(required=True)
status = me.StringField(required=True, default='disactive')
roles = me.ListField(me.StringField(), default=['user'])
created_date = me.DateTimeField(required=True,
default=datetime.datetime.utcnow)
updated_date = me.DateTimeField(required=True,
default=datetime.datetime.utcnow,
auto_now=True)
data_sources = me.EmbeddedDocumentListField(DataSource)
meta = {'collection': 'users'}
def get_user_id(self):
return self.id
def __get_salt(self, salt):
token = salt.replace(' ', '.')
return '{:.<22.22}'.format(token)
def set_password(self, password, salt=''):
self.password = bcrypt.using(rounds=16).hash(
password,
salt=self.__get_salt(salt))
def verify_password(self, password, salt=''):
return bcrypt.verify(password,
self.password)
def has_roles(self, roles):
for role in roles:
if role in self.roles:
return True
return False
| [
"boatkrap@gmail.com"
] | boatkrap@gmail.com |
471c75482c71e69c35c6564ab841c25fb6e2fcd5 | c3df3ded1da5b86c06643955edf6d75863018c55 | /Block Schematic Swapper/BlockSchematicSwapper_v2.py | 0027207ccc5390839c1b29d8d37cef0376b2e642 | [] | no_license | abrightmoore/ScriptsForMCEdit | 064d0e3ec7b686df78317f0d5792f74728250418 | abb6ac80eb6e3c1778f3910e4d8b07bdb8130edb | refs/heads/master | 2022-11-29T16:00:53.676704 | 2020-08-13T05:59:14 | 2020-08-13T05:59:14 | 287,128,480 | 5 | 0 | null | null | null | null | UTF-8 | Python | false | false | 8,954 | py | # This filter provides a way to find a block and replace that block with a schematic.
# Requested by james22402 on the forums: http://www.minecraftforum.net/topic/213853-mcedit-filter-scripts/page__st__300#entry22658577
# abrightmoore@yahoo.com.au
# http://brightmoore.net
#
import time # for timing
from math import sqrt, tan, sin, cos, pi, ceil, floor, acos, atan, asin, degrees, radians, log, atan2
from random import *
from numpy import *
from pymclevel import alphaMaterials
from pymclevel import alphaMaterials, MCSchematic, MCLevel, BoundingBox
from mcplatform import *
from os import listdir
from os.path import isfile, join
import glob
# MCSchematic access method @TexelElf
# Texelelf's guidance:
# from pymclevel import MCSchematic, mclevel
# deformation = pymclevel.MCSchematic((width, height, length), mats=self.editor.level.materials)
# deformation.setBlockAt(x,y,z,blockID)
# deformation.setBlockDataAt(x,y,z,blockData)
# deformation.Blocks[::4] = 57
# schematic_file = mcplatform.askSaveFile(mcplatform.lastSchematicsDir? or mcplatform.schematicsDir, "Save Schematic As...", "", "Schematic\0*.schematic\0\0", ".schematic")
# deformation.saveToFile(schematic_file)
# And from Codewarrior0's filterdemo.py:
# level.copyBlocksFrom(temp, temp.bounds, box.origin)
# Global constants
inputs = (
("BlockSchematicSwapper", "label"),
("Choose the block to locate:", "blocktype"),
("What should I look for?", ("Match Block Type Only", "Match Block Data") ),
("What is the schematic to use?", ("string","value=BlockSchematicSwapper_CowSpawner.schematic")),
("Random Schematics?", False),
("Schematic Set:", ("string","value=")),
("abrightmoore@yahoo.com.au", "label"),
("http://brightmoore.net", "label"),
)
# Utility methods
def setBlockIfEmpty(level, (block, data), x, y, z):
tempBlock = level.blockAt(x,y,z)
if tempBlock == 0:
setBlock(level, (block, data), x, y, z)
def setBlock(level, (block, data), x, y, z):
level.setBlockAt(x, y, z, block)
level.setBlockDataAt(x, y, z, data)
def setBlockToGround(level, (block, data), x, y, z, ymin):
for iterY in xrange(ymin, y):
setBlockIfEmpty(level, (block, data), x, iterY, z)
def getBoxSize(box):
return (box.maxx - box.minx, box.maxy - box.miny, box.maxz - box.minz)
def fix(angle):
while angle > pi:
angle = angle - 2 * pi
while angle < -pi:
angle = angle + 2 * pi
return angle
def drawLine(scratchpad, (blockID, blockData), (x,y,z), (x1,y1,z1) ):
drawLineConstrained(scratchpad, (blockID, blockData), (x,y,z), (x1,y1,z1), 0 )
def drawLineConstrained(scratchpad, (blockID, blockData), (x,y,z), (x1,y1,z1), maxLength ):
dx = x1 - x
dy = y1 - y
dz = z1 - z
distHoriz = dx*dx + dz*dz
distance = sqrt(dy*dy + distHoriz)
if distance < maxLength or maxLength < 1:
phi = atan2(dy, sqrt(distHoriz))
theta = atan2(dz, dx)
iter = 0
while iter <= distance:
scratchpad.setBlockAt(x+iter*cos(theta)*cos(phi), y+iter*sin(phi), z+iter*sin(theta)*cos(phi), blockID)
scratchpad.setBlockDataAt(x+iter*cos(theta)*cos(phi), y+iter*sin(phi), z+iter*sin(theta)*cos(phi), blockData)
iter = iter+0.5 # slightly oversample because I lack faith.
def analyse(level):
''' Examine the object in the schematic for min, max non-empty co-ordinates so we can pack-them-in! '''
# Find the bounding box for the object within this schematic. i.e. clip empty space
method = "Analyse schematic contents for the object dimensions"
print '%s: Started at %s' % (method, time.ctime())
box = level.bounds
(width, height, depth) = getBoxSize(level.bounds)
print 'ANALYSE %s %s %s' % (width, height, depth)
minX = width
minY = height
minZ = depth
maxX = 0
maxY = 0
maxZ = 0
found = False
for iterY in xrange(0, height):
for iterX in xrange(0, width):
for iterZ in xrange(0, depth):
if level.blockAt(iterX, iterY, iterZ) != 0:
print 'ANALYSING %s %s %s' % (iterX, iterY, iterZ)
if iterX > maxX:
maxX = iterX
if iterY > maxY:
maxY = iterY
if iterZ > maxZ:
maxZ = iterZ
if iterX < minX:
minX = iterX
if iterY < minY:
minY = iterY
if iterZ < minZ:
minZ = iterZ
found = True
print 'ANALYSE RESULT %s %s %s %s %s %s' % (minX, minY, minZ, maxX, maxY, maxZ)
print '%s: Ended at %s' % (method, time.ctime())
if found == False:
return BoundingBox((0, 0, 0), (width, height, depth))
else:
return BoundingBox((minX, 0, minZ), (maxX+1, maxY+1, maxZ+1))
def findSurface(x, y, z, level, box, options):
# Find a candidate surface
iterY = 250
miny = y
foundy = y
while iterY > miny:
block = level.blockAt(x,iterY,z)
if block != 0: # not AIR
#if (block not in ignoreList):
foundy = iterY
iterY = miny # end search, block found was a legitimate candidate for the surface
iterY = iterY -1
return foundy
def perform(level, box, options):
''' Feedback to abrightmoore@yahoo.com.au '''
blockSchematicSwapper(level, box, options)
level.markDirtyBox(box)
def blockSchematicSwapper(level, box, options):
# CONSTANTS
method = "blockSchematicSwapper"
print '%s: Started at %s' % (method, time.ctime())
(width, height, depth) = getBoxSize(box)
centreWidth = width / 2
centreHeight = height / 2
centreDepth = depth / 2
AIR = (0,0)
SHAPE = (200,200,200)
# END CONSTANTS
baseBlock = options["Choose the block to locate:"].ID
baseBlockData = options["Choose the block to locate:"].blockData
theFileName = "filters/"+options["What is the schematic to use?"]
randomSchemas = options["Random Schematics?"]
DIRPATH = options["Schematic Set:"]
StartSchematicFiles = []
if randomSchemas == True:
# Prefill a list of schematic file names which we will choose from later on
StartSchematicFiles = glob.glob("filters/"+DIRPATH+"/*.schematic")
for fileName in StartSchematicFiles:
print fileName
print 'Found %s start schematic files' % (len(StartSchematicFiles))
else:
# import the corresponding MCSchematic to the supplied filename
print 'Loading schematic from file - %s' % (theFileName)
print os.getcwd()
charSchematic = MCSchematic(shape=SHAPE,filename=theFileName)
modeMatchBlockData = False
if options["What should I look for?"] == "Match Block Data":
modeMatchBlockData = True
# First pass - search down-up for the block of interest. On the first hit at x/z, import schematic and move on with the search
# END CONSTANTS
found = 0
counter = 0
for x in xrange(box.minx, box.maxx):
for z in xrange(box.minz, box.maxz):
for y in xrange(box.miny, box.maxy):
counter = counter +1
if counter%10000 == 0:
print '%s %s: Searching at x=%s y=%s z=%s' % (method, time.ctime(), x, y, z)
if modeMatchBlockData == True:
if level.blockAt(x,y,z) == baseBlock and level.blockDataAt(x,y,z) == baseBlockData:
print 'I found your block %s at %s %s %s with data value %s' % (baseBlock, x, y, z, baseBlockData)
# level.copyBlocksFrom(charSchematic, BoundingBox((0,0,0),(1,1,1)), (x, y, z))
if randomSchemas == False:
placeASchematic(x,y,z, theFileName, level, box, options)
else:
chosenSchematic = randint(0,len(StartSchematicFiles)) % len(StartSchematicFiles)
placeASchematic(x,y,z, StartSchematicFiles[chosenSchematic], level, box, options)
found = found +1
else:
if level.blockAt(x,y,z) == baseBlock:
print 'I found your block %s at %s %s %s' % (baseBlock, x, y, z)
# level.copyBlocksFrom(charSchematic, BoundingBox((0,0,0),(2,2,2)), (x, y, z))
if randomSchemas == False:
placeASchematic(x,y,z, theFileName, level, box, options)
else:
chosenSchematic = randint(0,len(StartSchematicFiles)) % len(StartSchematicFiles)
placeASchematic(x,y,z, StartSchematicFiles[chosenSchematic], level, box, options)
found = found +1
print '%s: %s. Found %s' % (method, time.ctime(), found)
print '%s: Ended at %s' % (method, time.ctime())
def placeASchematic(x,y,z, theFileName, level, box, options):
# CONSTANTS AND GLOBAL VARIABLES
method = "placeASchematic"
print '%s: Started at %s' % (method, time.ctime())
(width, height, depth) = getBoxSize(box)
centreWidth = width / 2
centreHeight = height / 2
centreDepth = depth / 2
SHAPE = (32,32,32)
# END CONSTANTS
# cursorPosn = box.origin
# import the corresponding MCSchematic to the supplied filename
print 'Loading schematic from file - %s' % (theFileName)
charSchematic = MCSchematic(shape=SHAPE,filename=theFileName)
cursorPosn = (x, y, z)
bb = analyse(charSchematic)
level.copyBlocksFrom(charSchematic, bb, cursorPosn)
print '%s: Ended at %s' % (method, time.ctime())
| [
"noreply@github.com"
] | abrightmoore.noreply@github.com |
01112d45a464ff83d5d7b67ff843feaee71e4958 | 255e19ddc1bcde0d3d4fe70e01cec9bb724979c9 | /dockerized-gists/5c70ccf26bba78eb4d1b/snippet.py | 599476d4f4e0a616826b54de9f63877e1de77cae | [
"MIT"
] | permissive | gistable/gistable | 26c1e909928ec463026811f69b61619b62f14721 | 665d39a2bd82543d5196555f0801ef8fd4a3ee48 | refs/heads/master | 2023-02-17T21:33:55.558398 | 2023-02-11T18:20:10 | 2023-02-11T18:20:10 | 119,861,038 | 76 | 19 | null | 2020-07-26T03:14:55 | 2018-02-01T16:19:24 | Python | UTF-8 | Python | false | false | 159 | py | import sys
def exec_(code, globals, locals):
if sys.version_info >= (3, 0):
exec(code, globals, locals)
else:
exec("exec code in globals, locals") | [
"gistshub@gmail.com"
] | gistshub@gmail.com |
790e27357a9eeb93997620d266585d71e33aed53 | d0d1e07c984651f96bd9386d546c85c0341e46b2 | /timedata/control/action.py | 0af1dc5757f38f1299f42cfed2bc37a071cdf313 | [
"MIT"
] | permissive | timedata-org/timedata | 61cde905b1fe9eb60ac83ecbf5a5a2114793c45d | 3faac7450678aaccd4a283d0d41ca3e7f113f51b | refs/heads/master | 2020-04-11T12:03:57.962646 | 2019-06-09T10:05:16 | 2019-06-09T10:05:52 | 51,217,217 | 5 | 3 | null | 2016-09-18T16:20:43 | 2016-02-06T19:13:43 | C++ | UTF-8 | Python | false | false | 1,802 | py | from .ops import Ops
from .editor import Editor
from .receiver import Receiver
class Action(Receiver):
"""
An Action takes an incoming message, applies Ops to it, and then
uses it to set a value on a Editor.
"""
def __init__(self, address, ops=()):
self.address = Editor(address)
self.ops = Ops(*ops)
def set_project(self, project):
self.address.set_project(project)
def receive(self, values):
if self.ops:
if len(values) == 1:
values = [self.ops(values[0])]
else:
# TODO: They specified ops, but we can't use it.
# Should we warn here? Can we use the ops somehow?
pass
return self.address.receive(values)
def __bool__(self):
return bool(self.address or self.ops)
def __str__(self):
if self.ops:
return '%s->%s' % self.address, self.ops
return str(self.address)
@classmethod
def make(cls, action):
if isinstance(action, str):
return cls(action)
if isinstance(action, dict):
return cls(**action)
return cls(*action)
class ActionList(Receiver):
"""A list of Actions."""
def __init__(self, actions=None):
if isinstance(actions, (str, dict)):
actions = [actions]
self.actions = tuple(Action.make(a) for a in actions or ())
def set_project(self, project):
for a in self.actions:
a.set_project(project)
def receive(self, msg):
values = tuple(msg.values())
for action in self.actions:
action.receive(values)
def __bool__(self):
return bool(self.actions)
def __str__(self):
return ' + '.join(str(a) for a in self.actions)
| [
"tom@swirly.com"
] | tom@swirly.com |
f54ba2b9e658843fa70413a13b059f89900ab3dd | 0fe0ffe29ca6f76c6f15c85c8d82b09beaada246 | /third_party/catapult/tracing/tracing_build/strip_memory_infra_trace.py | e4c0cabb5366abaa54d428d65fbac289f6414a55 | [
"BSD-3-Clause"
] | permissive | hanpfei/chromium-net | 4dc8fd48cf3b05d89b11dc121f9c3abdd3ba962e | 9df8ce98c2a14fb60c2f581853011e32eb4bed0f | refs/heads/master | 2023-07-08T15:28:01.033104 | 2023-06-14T13:02:39 | 2023-06-14T13:02:39 | 65,541,033 | 297 | 73 | null | 2022-11-02T23:33:48 | 2016-08-12T09:25:34 | C++ | UTF-8 | Python | false | false | 3,059 | py | # Copyright 2016 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Filters a big trace keeping only the last memory-infra dumps."""
import collections
import gzip
import json
def FormatBytes(value):
units = ['B', 'kB', 'MB', 'GB']
while abs(value) >= 1000 and len(units) > 1:
value /= 1000
units = units.pop(0)
return '%3.1f %s' % (value, units[0])
def Main(argv):
if len(argv) < 2:
print 'Usage: %s trace.json[.gz]' % argv[0]
return 1
in_path = argv[1]
if in_path.lower().endswith('.gz'):
fin = gzip.open(in_path, 'rb')
else:
fin = open(in_path, 'r')
with fin:
print 'Loading trace (can take 1 min on a z620 for a 1GB trace)...'
trace = json.load(fin)
print 'Done. Read ' + FormatBytes(fin.tell())
print 'Filtering events'
phase_count = collections.defaultdict(int)
out_events = []
global_dumps = collections.OrderedDict()
if isinstance(trace, dict):
in_events = trace.get('traceEvents', [])
elif isinstance(trace, list) and isinstance(trace[0], dict):
in_events = trace
for evt in in_events:
phase = evt.get('ph', '?')
phase_count[phase] += 1
# Drop all diagnostic events for memory-infra debugging.
if phase not in ('v', 'V') and evt.get('cat', '').endswith('memory-infra'):
continue
# pass-through all the other non-memory-infra events
if phase != 'v':
out_events.append(evt)
continue
# Recreate the global dump groups
event_id = evt['id']
global_dumps.setdefault(event_id, [])
global_dumps[event_id].append(evt)
print 'Detected %d memory-infra global dumps' % len(global_dumps)
if global_dumps:
max_procs = max(len(x) for x in global_dumps.itervalues())
print 'Max number of processes seen: %d' % max_procs
ndumps = 2
print 'Preserving the last %d memory-infra dumps' % ndumps
detailed_dumps = []
non_detailed_dumps = []
for global_dump in global_dumps.itervalues():
try:
level_of_detail = global_dump[0]['args']['dumps']['level_of_detail']
except KeyError:
level_of_detail = None
if level_of_detail == 'detailed':
detailed_dumps.append(global_dump)
else:
non_detailed_dumps.append(global_dump)
dumps_to_preserve = detailed_dumps[-ndumps:]
ndumps -= len(dumps_to_preserve)
if ndumps:
dumps_to_preserve += non_detailed_dumps[-ndumps:]
for global_dump in dumps_to_preserve:
out_events += global_dump
print '\nEvents histogram for the original trace (count by phase)'
print '--------------------------------------------------------'
for phase, count in sorted(phase_count.items(), key=lambda x: x[1]):
print '%s %d' % (phase, count)
out_path = in_path.split('.json')[0] + '-filtered.json'
print '\nWriting filtered trace to ' + out_path,
with open(out_path, 'w') as fout:
json.dump({'traceEvents': out_events}, fout)
num_bytes_written = fout.tell()
print ' (%s written)' % FormatBytes(num_bytes_written)
| [
"hanpfei@gmail.com"
] | hanpfei@gmail.com |
46cdc653fc60973ef289314fc703e4b92617f736 | 0e1e643e864bcb96cf06f14f4cb559b034e114d0 | /Exps_7_v3/I_to_M_Gk3_no_pad/pyramid_tight_crop_size256_pad60/pyr_3s/bce_s001_tv_s0p1_L7/step10_a.py | 233c9845f52842f89df06df38f3c389a591562fa | [] | no_license | KongBOy/kong_model2 | 33a94a9d2be5b0f28f9d479b3744e1d0e0ebd307 | 1af20b168ffccf0d5293a393a40a9fa9519410b2 | refs/heads/master | 2022-10-14T03:09:22.543998 | 2022-10-06T11:33:42 | 2022-10-06T11:33:42 | 242,080,692 | 3 | 0 | null | null | null | null | UTF-8 | Python | false | false | 53,295 | py | #############################################################################################################################################################################################################
#############################################################################################################################################################################################################
### 把 kong_model2 加入 sys.path
import os
code_exe_path = os.path.realpath(__file__) ### 目前執行 step10_b.py 的 path
code_exe_path_element = code_exe_path.split("\\") ### 把 path 切分 等等 要找出 kong_model 在第幾層
code_dir = "\\".join(code_exe_path_element[:-1])
kong_layer = code_exe_path_element.index("kong_model2") ### 找出 kong_model2 在第幾層
kong_model2_dir = "\\".join(code_exe_path_element[:kong_layer + 1]) ### 定位出 kong_model2 的 dir
import sys ### 把 kong_model2 加入 sys.path
sys.path.append(kong_model2_dir)
sys.path.append(code_dir)
# print(__file__.split("\\")[-1])
# print(" code_exe_path:", code_exe_path)
# print(" code_exe_path_element:", code_exe_path_element)
# print(" code_dir:", code_dir)
# print(" kong_layer:", kong_layer)
# print(" kong_model2_dir:", kong_model2_dir)
#############################################################################################################################################################################################################
kong_to_py_layer = len(code_exe_path_element) - 1 - kong_layer ### 中間 -1 是為了長度轉index
# print(" kong_to_py_layer:", kong_to_py_layer)
if (kong_to_py_layer == 0): template_dir = ""
elif(kong_to_py_layer == 2): template_dir = code_exe_path_element[kong_layer + 1][0:] ### [7:] 是為了去掉 step1x_, 後來覺得好像改有意義的名字不去掉也行所以 改 0
elif(kong_to_py_layer == 3): template_dir = code_exe_path_element[kong_layer + 1][0:] + "/" + code_exe_path_element[kong_layer + 2][0:] ### [5:] 是為了去掉 mask_ ,前面的 mask_ 是為了python 的 module 不能 數字開頭, 隨便加的這樣子, 後來覺得 自動排的順序也可以接受, 所以 改0
elif(kong_to_py_layer > 3): template_dir = code_exe_path_element[kong_layer + 1][0:] + "/" + code_exe_path_element[kong_layer + 2][0:] + "/" + "/".join(code_exe_path_element[kong_layer + 3: -1])
# print(" template_dir:", template_dir) ### 舉例: template_dir: 7_mask_unet/5_os_book_and_paper_have_dtd_hdr_mix_bg_tv_s04_mae
#############################################################################################################################################################################################################
exp_dir = template_dir
#############################################################################################################################################################################################################
from step06_a_datas_obj import *
from step09_3side_L7 import *
from step10_a2_loss_info_obj import *
from step10_b2_exp_builder import Exp_builder
rm_paths = [path for path in sys.path if code_dir in path]
for rm_path in rm_paths: sys.path.remove(rm_path)
rm_moduless = [module for module in sys.modules if "step09" in module]
for rm_module in rm_moduless: del sys.modules[rm_module]
#############################################################################################################################################################################################################
'''
exp_dir 是 決定 result_dir 的 "上一層"資料夾 名字喔! exp_dir要巢狀也沒問題~
比如:exp_dir = "6_mask_unet/自己命的名字",那 result_dir 就都在:
6_mask_unet/自己命的名字/result_a
6_mask_unet/自己命的名字/result_b
6_mask_unet/自己命的名字/...
'''
use_db_obj = type9_mask_flow_have_bg_dtd_hdr_mix_and_paper
use_loss_obj = [G_bce_s001_loss_info_builder.set_loss_target("UNet_Mask").copy()] ### z, y, x 順序是看 step07_b_0b_Multi_UNet 來對應的喔
#############################################################
### 為了resul_analyze畫空白的圖,建一個empty的 Exp_builder
empty = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_1__2side_1__3side_1, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end=ch032_pyramid_1side_1__2side_1__3side_1.kong_model.model_describe) .set_train_args(epochs= 60).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_result_name(result_name="為了resul_analyze畫空白的圖,建一個empty的 Exp_builder")
#############################################################
ch032_1side_1__2side_1__3side_1 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_1__2side_1__3side_1, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end=ch032_pyramid_1side_1__2side_1__3side_1.kong_model.model_describe) .set_train_args(epochs= 60).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_result_name(result_name="")
ch032_1side_2__2side_1__3side_1 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_2__2side_1__3side_1, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end=ch032_pyramid_1side_2__2side_1__3side_1.kong_model.model_describe) .set_train_args(epochs= 60).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_result_name(result_name="")
ch032_1side_2__2side_2__3side_1 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_2__2side_2__3side_1, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end=ch032_pyramid_1side_2__2side_2__3side_1.kong_model.model_describe) .set_train_args(epochs= 60).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_result_name(result_name="")
ch032_1side_2__2side_2__3side_2 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_2__2side_2__3side_2, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end=ch032_pyramid_1side_2__2side_2__3side_2.kong_model.model_describe) .set_train_args(epochs= 60).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_result_name(result_name="")
ch032_1side_3__2side_1__3side_1 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_3__2side_1__3side_1, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end=ch032_pyramid_1side_3__2side_1__3side_1.kong_model.model_describe) .set_train_args(epochs= 60).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_result_name(result_name="")
ch032_1side_3__2side_2__3side_1 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_3__2side_2__3side_1, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end=ch032_pyramid_1side_3__2side_2__3side_1.kong_model.model_describe) .set_train_args(epochs= 60).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_result_name(result_name="")
ch032_1side_3__2side_2__3side_2 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_3__2side_2__3side_2, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end=ch032_pyramid_1side_3__2side_2__3side_2.kong_model.model_describe) .set_train_args(epochs= 60).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_result_name(result_name="")
ch032_1side_3__2side_3__3side_1 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_3__2side_3__3side_1, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end=ch032_pyramid_1side_3__2side_3__3side_1.kong_model.model_describe) .set_train_args(epochs= 60).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_result_name(result_name="")
ch032_1side_3__2side_3__3side_2 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_3__2side_3__3side_2, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end=ch032_pyramid_1side_3__2side_3__3side_2.kong_model.model_describe) .set_train_args(epochs= 60).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_result_name(result_name="")
ch032_1side_3__2side_3__3side_3 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_3__2side_3__3side_3, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end=ch032_pyramid_1side_3__2side_3__3side_3.kong_model.model_describe) .set_train_args(epochs= 60).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_result_name(result_name="")
ch032_1side_4__2side_1__3side_1 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_4__2side_1__3side_1, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end=ch032_pyramid_1side_4__2side_1__3side_1.kong_model.model_describe) .set_train_args(epochs= 60).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_result_name(result_name="")
ch032_1side_4__2side_2__3side_1 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_4__2side_2__3side_1, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end=ch032_pyramid_1side_4__2side_2__3side_1.kong_model.model_describe) .set_train_args(epochs= 60).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_result_name(result_name="")
ch032_1side_4__2side_2__3side_2 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_4__2side_2__3side_2, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end=ch032_pyramid_1side_4__2side_2__3side_2.kong_model.model_describe) .set_train_args(epochs= 60).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_result_name(result_name="")
ch032_1side_4__2side_3__3side_1 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_4__2side_3__3side_1, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end=ch032_pyramid_1side_4__2side_3__3side_1.kong_model.model_describe) .set_train_args(epochs= 60).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_result_name(result_name="")
ch032_1side_4__2side_3__3side_2 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_4__2side_3__3side_2, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end=ch032_pyramid_1side_4__2side_3__3side_2.kong_model.model_describe) .set_train_args(epochs= 60).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_result_name(result_name="")
ch032_1side_4__2side_3__3side_3 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_4__2side_3__3side_3, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end=ch032_pyramid_1side_4__2side_3__3side_3.kong_model.model_describe) .set_train_args(epochs= 60).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_result_name(result_name="")
ch032_1side_4__2side_4__3side_1 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_4__2side_4__3side_1, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end=ch032_pyramid_1side_4__2side_4__3side_1.kong_model.model_describe) .set_train_args(epochs= 60).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_result_name(result_name="")
ch032_1side_4__2side_4__3side_2 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_4__2side_4__3side_2, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end=ch032_pyramid_1side_4__2side_4__3side_2.kong_model.model_describe) .set_train_args(epochs= 60).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_result_name(result_name="")
ch032_1side_4__2side_4__3side_3 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_4__2side_4__3side_3, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end=ch032_pyramid_1side_4__2side_4__3side_3.kong_model.model_describe) .set_train_args(epochs= 60).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_result_name(result_name="")
ch032_1side_4__2side_4__3side_4 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_4__2side_4__3side_4, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end=ch032_pyramid_1side_4__2side_4__3side_4.kong_model.model_describe) .set_train_args(epochs= 60).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_result_name(result_name="")
ch032_1side_5__2side_1__3side_1 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_5__2side_1__3side_1, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end=ch032_pyramid_1side_5__2side_1__3side_1.kong_model.model_describe) .set_train_args(epochs= 60).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_result_name(result_name="")
ch032_1side_5__2side_2__3side_1 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_5__2side_2__3side_1, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end=ch032_pyramid_1side_5__2side_2__3side_1.kong_model.model_describe) .set_train_args(epochs= 60).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_result_name(result_name="")
ch032_1side_5__2side_2__3side_2 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_5__2side_2__3side_2, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end=ch032_pyramid_1side_5__2side_2__3side_2.kong_model.model_describe) .set_train_args(epochs= 60).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_result_name(result_name="")
ch032_1side_5__2side_3__3side_1 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_5__2side_3__3side_1, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end=ch032_pyramid_1side_5__2side_3__3side_1.kong_model.model_describe) .set_train_args(epochs= 60).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_result_name(result_name="")
ch032_1side_5__2side_3__3side_2 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_5__2side_3__3side_2, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end=ch032_pyramid_1side_5__2side_3__3side_2.kong_model.model_describe) .set_train_args(epochs= 60).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_result_name(result_name="")
ch032_1side_5__2side_3__3side_3 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_5__2side_3__3side_3, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end=ch032_pyramid_1side_5__2side_3__3side_3.kong_model.model_describe) .set_train_args(epochs= 60).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_result_name(result_name="")
ch032_1side_5__2side_4__3side_1 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_5__2side_4__3side_1, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end=ch032_pyramid_1side_5__2side_4__3side_1.kong_model.model_describe) .set_train_args(epochs= 60).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_result_name(result_name="")
ch032_1side_5__2side_4__3side_2 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_5__2side_4__3side_2, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end=ch032_pyramid_1side_5__2side_4__3side_2.kong_model.model_describe) .set_train_args(epochs= 60).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_result_name(result_name="")
ch032_1side_5__2side_4__3side_3 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_5__2side_4__3side_3, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end=ch032_pyramid_1side_5__2side_4__3side_3.kong_model.model_describe) .set_train_args(epochs= 60).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_result_name(result_name="")
ch032_1side_5__2side_4__3side_4 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_5__2side_4__3side_4, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end=ch032_pyramid_1side_5__2side_4__3side_4.kong_model.model_describe) .set_train_args(epochs= 60).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_result_name(result_name="")
ch032_1side_5__2side_5__3side_1 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_5__2side_5__3side_1, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end=ch032_pyramid_1side_5__2side_5__3side_1.kong_model.model_describe) .set_train_args(epochs= 60).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_result_name(result_name="")
ch032_1side_5__2side_5__3side_2 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_5__2side_5__3side_2, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end=ch032_pyramid_1side_5__2side_5__3side_2.kong_model.model_describe) .set_train_args(epochs= 60).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_result_name(result_name="")
ch032_1side_5__2side_5__3side_3 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_5__2side_5__3side_3, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end=ch032_pyramid_1side_5__2side_5__3side_3.kong_model.model_describe) .set_train_args(epochs= 60).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_result_name(result_name="")
ch032_1side_5__2side_5__3side_4 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_5__2side_5__3side_4, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end=ch032_pyramid_1side_5__2side_5__3side_4.kong_model.model_describe) .set_train_args(epochs= 60).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_result_name(result_name="")
ch032_1side_5__2side_5__3side_5 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_5__2side_5__3side_5, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end=ch032_pyramid_1side_5__2side_5__3side_5.kong_model.model_describe) .set_train_args(epochs= 60).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_result_name(result_name="")
ch032_1side_6__2side_1__3side_1 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_6__2side_1__3side_1, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end=ch032_pyramid_1side_6__2side_1__3side_1.kong_model.model_describe) .set_train_args(epochs= 60).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_result_name(result_name="")
ch032_1side_6__2side_2__3side_1 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_6__2side_2__3side_1, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end=ch032_pyramid_1side_6__2side_2__3side_1.kong_model.model_describe) .set_train_args(epochs= 60).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_result_name(result_name="")
ch032_1side_6__2side_2__3side_2 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_6__2side_2__3side_2, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end=ch032_pyramid_1side_6__2side_2__3side_2.kong_model.model_describe) .set_train_args(epochs= 60).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_result_name(result_name="")
ch032_1side_6__2side_3__3side_1 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_6__2side_3__3side_1, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end=ch032_pyramid_1side_6__2side_3__3side_1.kong_model.model_describe) .set_train_args(epochs= 60).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_result_name(result_name="")
ch032_1side_6__2side_3__3side_2 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_6__2side_3__3side_2, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end=ch032_pyramid_1side_6__2side_3__3side_2.kong_model.model_describe) .set_train_args(epochs= 60).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_result_name(result_name="")
ch032_1side_6__2side_3__3side_3 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_6__2side_3__3side_3, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end=ch032_pyramid_1side_6__2side_3__3side_3.kong_model.model_describe) .set_train_args(epochs= 60).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_result_name(result_name="")
ch032_1side_6__2side_4__3side_1 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_6__2side_4__3side_1, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end=ch032_pyramid_1side_6__2side_4__3side_1.kong_model.model_describe) .set_train_args(epochs= 60).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_result_name(result_name="")
ch032_1side_6__2side_4__3side_2 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_6__2side_4__3side_2, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end=ch032_pyramid_1side_6__2side_4__3side_2.kong_model.model_describe) .set_train_args(epochs= 60).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_result_name(result_name="")
ch032_1side_6__2side_4__3side_3 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_6__2side_4__3side_3, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end=ch032_pyramid_1side_6__2side_4__3side_3.kong_model.model_describe) .set_train_args(epochs= 60).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_result_name(result_name="")
ch032_1side_6__2side_4__3side_4 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_6__2side_4__3side_4, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end=ch032_pyramid_1side_6__2side_4__3side_4.kong_model.model_describe) .set_train_args(epochs= 60).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_result_name(result_name="")
ch032_1side_6__2side_5__3side_1 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_6__2side_5__3side_1, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end=ch032_pyramid_1side_6__2side_5__3side_1.kong_model.model_describe) .set_train_args(epochs= 60).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_result_name(result_name="")
ch032_1side_6__2side_5__3side_2 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_6__2side_5__3side_2, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end=ch032_pyramid_1side_6__2side_5__3side_2.kong_model.model_describe) .set_train_args(epochs= 60).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_result_name(result_name="")
ch032_1side_6__2side_5__3side_3 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_6__2side_5__3side_3, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end=ch032_pyramid_1side_6__2side_5__3side_3.kong_model.model_describe) .set_train_args(epochs= 60).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_result_name(result_name="")
ch032_1side_6__2side_5__3side_4 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_6__2side_5__3side_4, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end=ch032_pyramid_1side_6__2side_5__3side_4.kong_model.model_describe) .set_train_args(epochs= 60).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_result_name(result_name="")
ch032_1side_6__2side_5__3side_5 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_6__2side_5__3side_5, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end=ch032_pyramid_1side_6__2side_5__3side_5.kong_model.model_describe) .set_train_args(epochs= 60).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_result_name(result_name="")
ch032_1side_6__2side_6__3side_1 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_6__2side_6__3side_1, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end=ch032_pyramid_1side_6__2side_6__3side_1.kong_model.model_describe) .set_train_args(epochs= 60).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_result_name(result_name="")
ch032_1side_6__2side_6__3side_2 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_6__2side_6__3side_2, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end=ch032_pyramid_1side_6__2side_6__3side_2.kong_model.model_describe) .set_train_args(epochs= 60).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_result_name(result_name="")
ch032_1side_6__2side_6__3side_3 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_6__2side_6__3side_3, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end=ch032_pyramid_1side_6__2side_6__3side_3.kong_model.model_describe) .set_train_args(epochs= 60).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_result_name(result_name="")
ch032_1side_6__2side_6__3side_4 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_6__2side_6__3side_4, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end=ch032_pyramid_1side_6__2side_6__3side_4.kong_model.model_describe) .set_train_args(epochs= 60).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_result_name(result_name="")
ch032_1side_6__2side_6__3side_5 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_6__2side_6__3side_5, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end=ch032_pyramid_1side_6__2side_6__3side_5.kong_model.model_describe) .set_train_args(epochs= 60).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_result_name(result_name="")
ch032_1side_6__2side_6__3side_6 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_6__2side_6__3side_6, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end=ch032_pyramid_1side_6__2side_6__3side_6.kong_model.model_describe) .set_train_args(epochs= 60).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_result_name(result_name="")
ch032_1side_7__2side_1__3side_1 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_7__2side_1__3side_1, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end=ch032_pyramid_1side_7__2side_1__3side_1.kong_model.model_describe) .set_train_args(epochs= 60).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_result_name(result_name="")
ch032_1side_7__2side_2__3side_1 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_7__2side_2__3side_1, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end=ch032_pyramid_1side_7__2side_2__3side_1.kong_model.model_describe) .set_train_args(epochs= 60).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_result_name(result_name="")
ch032_1side_7__2side_2__3side_2 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_7__2side_2__3side_2, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end=ch032_pyramid_1side_7__2side_2__3side_2.kong_model.model_describe) .set_train_args(epochs= 60).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_result_name(result_name="")
ch032_1side_7__2side_3__3side_1 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_7__2side_3__3side_1, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end=ch032_pyramid_1side_7__2side_3__3side_1.kong_model.model_describe) .set_train_args(epochs= 60).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_result_name(result_name="")
ch032_1side_7__2side_3__3side_2 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_7__2side_3__3side_2, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end=ch032_pyramid_1side_7__2side_3__3side_2.kong_model.model_describe) .set_train_args(epochs= 60).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_result_name(result_name="")
ch032_1side_7__2side_3__3side_3 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_7__2side_3__3side_3, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end=ch032_pyramid_1side_7__2side_3__3side_3.kong_model.model_describe) .set_train_args(epochs= 60).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_result_name(result_name="")
ch032_1side_7__2side_4__3side_1 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_7__2side_4__3side_1, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end=ch032_pyramid_1side_7__2side_4__3side_1.kong_model.model_describe) .set_train_args(epochs= 60).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_result_name(result_name="")
ch032_1side_7__2side_4__3side_2 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_7__2side_4__3side_2, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end=ch032_pyramid_1side_7__2side_4__3side_2.kong_model.model_describe) .set_train_args(epochs= 60).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_result_name(result_name="")
ch032_1side_7__2side_4__3side_3 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_7__2side_4__3side_3, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end=ch032_pyramid_1side_7__2side_4__3side_3.kong_model.model_describe) .set_train_args(epochs= 60).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_result_name(result_name="")
ch032_1side_7__2side_4__3side_4 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_7__2side_4__3side_4, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end=ch032_pyramid_1side_7__2side_4__3side_4.kong_model.model_describe) .set_train_args(epochs= 60).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_result_name(result_name="")
ch032_1side_7__2side_5__3side_1 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_7__2side_5__3side_1, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end=ch032_pyramid_1side_7__2side_5__3side_1.kong_model.model_describe) .set_train_args(epochs= 60).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_result_name(result_name="")
ch032_1side_7__2side_5__3side_2 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_7__2side_5__3side_2, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end=ch032_pyramid_1side_7__2side_5__3side_2.kong_model.model_describe) .set_train_args(epochs= 60).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_result_name(result_name="")
ch032_1side_7__2side_5__3side_3 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_7__2side_5__3side_3, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end=ch032_pyramid_1side_7__2side_5__3side_3.kong_model.model_describe) .set_train_args(epochs= 60).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_result_name(result_name="")
ch032_1side_7__2side_5__3side_4 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_7__2side_5__3side_4, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end=ch032_pyramid_1side_7__2side_5__3side_4.kong_model.model_describe) .set_train_args(epochs= 60).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_result_name(result_name="")
ch032_1side_7__2side_5__3side_5 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_7__2side_5__3side_5, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end=ch032_pyramid_1side_7__2side_5__3side_5.kong_model.model_describe) .set_train_args(epochs= 60).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_result_name(result_name="")
ch032_1side_7__2side_6__3side_1 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_7__2side_6__3side_1, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end=ch032_pyramid_1side_7__2side_6__3side_1.kong_model.model_describe) .set_train_args(epochs= 60).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_result_name(result_name="")
ch032_1side_7__2side_6__3side_2 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_7__2side_6__3side_2, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end=ch032_pyramid_1side_7__2side_6__3side_2.kong_model.model_describe) .set_train_args(epochs= 60).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_result_name(result_name="")
ch032_1side_7__2side_6__3side_3 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_7__2side_6__3side_3, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end=ch032_pyramid_1side_7__2side_6__3side_3.kong_model.model_describe) .set_train_args(epochs= 60).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_result_name(result_name="")
ch032_1side_7__2side_6__3side_4 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_7__2side_6__3side_4, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end=ch032_pyramid_1side_7__2side_6__3side_4.kong_model.model_describe) .set_train_args(epochs= 60).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_result_name(result_name="")
ch032_1side_7__2side_6__3side_5 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_7__2side_6__3side_5, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end=ch032_pyramid_1side_7__2side_6__3side_5.kong_model.model_describe) .set_train_args(epochs= 60).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_result_name(result_name="")
ch032_1side_7__2side_6__3side_6 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_7__2side_6__3side_6, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end=ch032_pyramid_1side_7__2side_6__3side_6.kong_model.model_describe) .set_train_args(epochs= 60).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_result_name(result_name="")
ch032_1side_7__2side_7__3side_1 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_7__2side_7__3side_1, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end=ch032_pyramid_1side_7__2side_7__3side_1.kong_model.model_describe) .set_train_args(epochs= 60).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_result_name(result_name="")
ch032_1side_7__2side_7__3side_2 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_7__2side_7__3side_2, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end=ch032_pyramid_1side_7__2side_7__3side_2.kong_model.model_describe) .set_train_args(epochs= 60).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_result_name(result_name="")
ch032_1side_7__2side_7__3side_3 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_7__2side_7__3side_3, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end=ch032_pyramid_1side_7__2side_7__3side_3.kong_model.model_describe) .set_train_args(epochs= 60).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_result_name(result_name="")
ch032_1side_7__2side_7__3side_4 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_7__2side_7__3side_4, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end=ch032_pyramid_1side_7__2side_7__3side_4.kong_model.model_describe) .set_train_args(epochs= 60).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_result_name(result_name="")
ch032_1side_7__2side_7__3side_5 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_7__2side_7__3side_5, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end=ch032_pyramid_1side_7__2side_7__3side_5.kong_model.model_describe) .set_train_args(epochs= 60).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_result_name(result_name="")
ch032_1side_7__2side_7__3side_6 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_7__2side_7__3side_6, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end=ch032_pyramid_1side_7__2side_7__3side_6.kong_model.model_describe) .set_train_args(epochs= 60).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_result_name(result_name="")
ch032_1side_7__2side_7__3side_7 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_7__2side_7__3side_7, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end=ch032_pyramid_1side_7__2side_7__3side_7.kong_model.model_describe) .set_train_args(epochs= 60).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_result_name(result_name="")
ch032_1side_8__2side_1__3side_1 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_8__2side_1__3side_1, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end=ch032_pyramid_1side_8__2side_1__3side_1.kong_model.model_describe) .set_train_args(epochs= 60).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_result_name(result_name="")
ch032_1side_8__2side_2__3side_1 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_8__2side_2__3side_1, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end=ch032_pyramid_1side_8__2side_2__3side_1.kong_model.model_describe) .set_train_args(epochs= 60).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_result_name(result_name="")
ch032_1side_8__2side_2__3side_2 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_8__2side_2__3side_2, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end=ch032_pyramid_1side_8__2side_2__3side_2.kong_model.model_describe) .set_train_args(epochs= 60).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_result_name(result_name="")
ch032_1side_8__2side_3__3side_1 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_8__2side_3__3side_1, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end=ch032_pyramid_1side_8__2side_3__3side_1.kong_model.model_describe) .set_train_args(epochs= 60).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_result_name(result_name="")
ch032_1side_8__2side_3__3side_2 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_8__2side_3__3side_2, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end=ch032_pyramid_1side_8__2side_3__3side_2.kong_model.model_describe) .set_train_args(epochs= 60).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_result_name(result_name="")
ch032_1side_8__2side_3__3side_3 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_8__2side_3__3side_3, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end=ch032_pyramid_1side_8__2side_3__3side_3.kong_model.model_describe) .set_train_args(epochs= 60).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_result_name(result_name="")
ch032_1side_8__2side_4__3side_1 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_8__2side_4__3side_1, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end=ch032_pyramid_1side_8__2side_4__3side_1.kong_model.model_describe) .set_train_args(epochs= 60).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_result_name(result_name="")
ch032_1side_8__2side_4__3side_2 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_8__2side_4__3side_2, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end=ch032_pyramid_1side_8__2side_4__3side_2.kong_model.model_describe) .set_train_args(epochs= 60).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_result_name(result_name="")
ch032_1side_8__2side_4__3side_3 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_8__2side_4__3side_3, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end=ch032_pyramid_1side_8__2side_4__3side_3.kong_model.model_describe) .set_train_args(epochs= 60).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_result_name(result_name="")
ch032_1side_8__2side_4__3side_4 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_8__2side_4__3side_4, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end=ch032_pyramid_1side_8__2side_4__3side_4.kong_model.model_describe) .set_train_args(epochs= 60).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_result_name(result_name="")
ch032_1side_8__2side_5__3side_1 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_8__2side_5__3side_1, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end=ch032_pyramid_1side_8__2side_5__3side_1.kong_model.model_describe) .set_train_args(epochs= 60).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_result_name(result_name="")
ch032_1side_8__2side_5__3side_2 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_8__2side_5__3side_2, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end=ch032_pyramid_1side_8__2side_5__3side_2.kong_model.model_describe) .set_train_args(epochs= 60).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_result_name(result_name="")
ch032_1side_8__2side_5__3side_3 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_8__2side_5__3side_3, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end=ch032_pyramid_1side_8__2side_5__3side_3.kong_model.model_describe) .set_train_args(epochs= 60).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_result_name(result_name="")
ch032_1side_8__2side_5__3side_4 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_8__2side_5__3side_4, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end=ch032_pyramid_1side_8__2side_5__3side_4.kong_model.model_describe) .set_train_args(epochs= 60).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_result_name(result_name="")
ch032_1side_8__2side_5__3side_5 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_8__2side_5__3side_5, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end=ch032_pyramid_1side_8__2side_5__3side_5.kong_model.model_describe) .set_train_args(epochs= 60).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_result_name(result_name="")
ch032_1side_8__2side_6__3side_1 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_8__2side_6__3side_1, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end=ch032_pyramid_1side_8__2side_6__3side_1.kong_model.model_describe) .set_train_args(epochs= 60).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_result_name(result_name="")
ch032_1side_8__2side_6__3side_2 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_8__2side_6__3side_2, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end=ch032_pyramid_1side_8__2side_6__3side_2.kong_model.model_describe) .set_train_args(epochs= 60).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_result_name(result_name="")
ch032_1side_8__2side_6__3side_3 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_8__2side_6__3side_3, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end=ch032_pyramid_1side_8__2side_6__3side_3.kong_model.model_describe) .set_train_args(epochs= 60).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_result_name(result_name="")
ch032_1side_8__2side_6__3side_4 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_8__2side_6__3side_4, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end=ch032_pyramid_1side_8__2side_6__3side_4.kong_model.model_describe) .set_train_args(epochs= 60).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_result_name(result_name="")
ch032_1side_8__2side_6__3side_5 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_8__2side_6__3side_5, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end=ch032_pyramid_1side_8__2side_6__3side_5.kong_model.model_describe) .set_train_args(epochs= 60).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_result_name(result_name="")
ch032_1side_8__2side_6__3side_6 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_8__2side_6__3side_6, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end=ch032_pyramid_1side_8__2side_6__3side_6.kong_model.model_describe) .set_train_args(epochs= 60).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_result_name(result_name="")
ch032_1side_8__2side_7__3side_1 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_8__2side_7__3side_1, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end=ch032_pyramid_1side_8__2side_7__3side_1.kong_model.model_describe) .set_train_args(epochs= 60).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_result_name(result_name="")
ch032_1side_8__2side_7__3side_2 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_8__2side_7__3side_2, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end=ch032_pyramid_1side_8__2side_7__3side_2.kong_model.model_describe) .set_train_args(epochs= 60).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_result_name(result_name="")
ch032_1side_8__2side_7__3side_3 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_8__2side_7__3side_3, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end=ch032_pyramid_1side_8__2side_7__3side_3.kong_model.model_describe) .set_train_args(epochs= 60).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_result_name(result_name="")
ch032_1side_8__2side_7__3side_4 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_8__2side_7__3side_4, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end=ch032_pyramid_1side_8__2side_7__3side_4.kong_model.model_describe) .set_train_args(epochs= 60).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_result_name(result_name="")
ch032_1side_8__2side_7__3side_5 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_8__2side_7__3side_5, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end=ch032_pyramid_1side_8__2side_7__3side_5.kong_model.model_describe) .set_train_args(epochs= 60).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_result_name(result_name="")
ch032_1side_8__2side_7__3side_6 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_8__2side_7__3side_6, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end=ch032_pyramid_1side_8__2side_7__3side_6.kong_model.model_describe) .set_train_args(epochs= 60).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_result_name(result_name="")
ch032_1side_8__2side_7__3side_7 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_8__2side_7__3side_7, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end=ch032_pyramid_1side_8__2side_7__3side_7.kong_model.model_describe) .set_train_args(epochs= 60).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_result_name(result_name="")
ch032_1side_8__2side_8__3side_1 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_8__2side_8__3side_1, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end=ch032_pyramid_1side_8__2side_8__3side_1.kong_model.model_describe) .set_train_args(epochs= 60).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_result_name(result_name="")
ch032_1side_8__2side_8__3side_2 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_8__2side_8__3side_2, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end=ch032_pyramid_1side_8__2side_8__3side_2.kong_model.model_describe) .set_train_args(epochs= 60).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_result_name(result_name="")
ch032_1side_8__2side_8__3side_3 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_8__2side_8__3side_3, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end=ch032_pyramid_1side_8__2side_8__3side_3.kong_model.model_describe) .set_train_args(epochs= 60).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_result_name(result_name="")
ch032_1side_8__2side_8__3side_4 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_8__2side_8__3side_4, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end=ch032_pyramid_1side_8__2side_8__3side_4.kong_model.model_describe) .set_train_args(epochs= 60).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_result_name(result_name="")
ch032_1side_8__2side_8__3side_5 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_8__2side_8__3side_5, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end=ch032_pyramid_1side_8__2side_8__3side_5.kong_model.model_describe) .set_train_args(epochs= 60).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_result_name(result_name="")
ch032_1side_8__2side_8__3side_6 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_8__2side_8__3side_6, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end=ch032_pyramid_1side_8__2side_8__3side_6.kong_model.model_describe) .set_train_args(epochs= 60).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_result_name(result_name="")
ch032_1side_8__2side_8__3side_7 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_8__2side_8__3side_7, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end=ch032_pyramid_1side_8__2side_8__3side_7.kong_model.model_describe) .set_train_args(epochs= 60).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_result_name(result_name="")
ch032_1side_8__2side_8__3side_8 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_8__2side_8__3side_8, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end=ch032_pyramid_1side_8__2side_8__3side_8.kong_model.model_describe) .set_train_args(epochs= 60).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_result_name(result_name="")
#############################################################
if(__name__ == "__main__"):
print("build exps cost time:", time.time() - start_time)
if len(sys.argv) < 2:
############################################################################################################
### 直接按 F5 或打 python step10_b1_exp_obj_load_and_train_and_test.py,後面沒有接東西喔!才不會跑到下面給 step10_b_subprocss.py 用的程式碼~~~
ch032_1side_4__2side_3__3side_2.build().run()
# print('no argument')
sys.exit()
### 以下是給 step10_b_subprocess.py 用的,相當於cmd打 python step10_b1_exp_obj_load_and_train_and_test.py 某個exp.build().run()
eval(sys.argv[1])
| [
"s89334roy@yahoo.com.tw"
] | s89334roy@yahoo.com.tw |
4d8406d067af9e8bc7b63302376f873e80d00c09 | 2b9289c6348a58a839501f3088030061046e2b6c | /local_dm_control_suite/hopper.py | fe253ac364d772509e4d399753d88138d689dcda | [
"MIT"
] | permissive | rohitkuk/mtenv | 2a612e03cfcb8f373ae2a7e7fb21e7fe108bbe1f | 4a6d9d6fdfb321f1b51f890ef36b5161359e972d | refs/heads/main | 2023-04-21T09:26:57.666390 | 2021-05-30T00:29:02 | 2021-05-30T00:29:02 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,194 | py | # Copyright 2017 The dm_control Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Hopper domain."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
from dm_control import mujoco
from dm_control.rl import control
from . import base
from . import common
from dm_control.suite.utils import randomizers
from dm_control.utils import containers
from dm_control.utils import rewards
import numpy as np
SUITE = containers.TaggedTasks()
_CONTROL_TIMESTEP = 0.02 # (Seconds)
# Default duration of an episode, in seconds.
_DEFAULT_TIME_LIMIT = 20
# Minimal height of torso over foot above which stand reward is 1.
_STAND_HEIGHT = 0.6
# Hopping speed above which hop reward is 1.
_HOP_SPEED = 2
def get_model_and_assets():
"""Returns a tuple containing the model XML string and a dict of assets."""
return common.read_model("hopper.xml"), common.ASSETS
@SUITE.add("benchmarking")
def stand(time_limit=_DEFAULT_TIME_LIMIT, random=None, environment_kwargs=None):
"""Returns a Hopper that strives to stand upright, balancing its pose."""
physics = Physics.from_xml_string(*get_model_and_assets())
task = Hopper(hopping=False, random=random)
environment_kwargs = environment_kwargs or {}
return control.Environment(
physics,
task,
time_limit=time_limit,
control_timestep=_CONTROL_TIMESTEP,
**environment_kwargs
)
@SUITE.add("benchmarking")
def hop(time_limit=_DEFAULT_TIME_LIMIT, random=None, environment_kwargs=None):
"""Returns a Hopper that strives to hop forward."""
physics = Physics.from_xml_string(*get_model_and_assets())
task = Hopper(hopping=True, random=random)
environment_kwargs = environment_kwargs or {}
return control.Environment(
physics,
task,
time_limit=time_limit,
control_timestep=_CONTROL_TIMESTEP,
**environment_kwargs
)
class Physics(mujoco.Physics):
"""Physics simulation with additional features for the Hopper domain."""
def height(self):
"""Returns height of torso with respect to foot."""
return self.named.data.xipos["torso", "z"] - self.named.data.xipos["foot", "z"]
def speed(self):
"""Returns horizontal speed of the Hopper."""
return self.named.data.sensordata["torso_subtreelinvel"][0]
def touch(self):
"""Returns the signals from two foot touch sensors."""
return np.log1p(self.named.data.sensordata[["touch_toe", "touch_heel"]])
class Hopper(base.Task):
"""A Hopper's `Task` to train a standing and a jumping Hopper."""
def __init__(self, hopping, random=None):
"""Initialize an instance of `Hopper`.
Args:
hopping: Boolean, if True the task is to hop forwards, otherwise it is to
balance upright.
random: Optional, either a `numpy.random.RandomState` instance, an
integer seed for creating a new `RandomState`, or None to select a seed
automatically (default).
"""
self._hopping = hopping
super(Hopper, self).__init__(random=random)
def initialize_episode(self, physics):
"""Sets the state of the environment at the start of each episode."""
randomizers.randomize_limited_and_rotational_joints(physics, self.random)
self._timeout_progress = 0
super(Hopper, self).initialize_episode(physics)
def get_observation(self, physics):
"""Returns an observation of positions, velocities and touch sensors."""
obs = collections.OrderedDict()
# Ignores horizontal position to maintain translational invariance:
obs["position"] = physics.data.qpos[1:].copy()
obs["velocity"] = physics.velocity()
obs["touch"] = physics.touch()
return obs
def get_reward(self, physics):
"""Returns a reward applicable to the performed task."""
standing = rewards.tolerance(physics.height(), (_STAND_HEIGHT, 2))
if self._hopping:
hopping = rewards.tolerance(
physics.speed(),
bounds=(_HOP_SPEED, float("inf")),
margin=_HOP_SPEED / 2,
value_at_margin=0.5,
sigmoid="linear",
)
return standing * hopping
else:
small_control = rewards.tolerance(
physics.control(), margin=1, value_at_margin=0, sigmoid="quadratic"
).mean()
small_control = (small_control + 4) / 5
return standing * small_control
| [
"sodhani@fb.com"
] | sodhani@fb.com |
19478359aa8460360b3dff1eb95b58f9100ec364 | 7e6f17a6015ad3d20052562c142fa29a6e88e927 | /Dictionary/names-f.py | 00509e3fcd7ec90a87021fb04c4ad70ecd2a416f | [
"BSD-3-Clause"
] | permissive | voussoir/else | 9a6ce00c374cf254c7be5ebb741773ae735899b7 | 296cf5550aab13ceca5177f3357fbcebbe4a041f | refs/heads/master | 2023-07-09T15:08:51.649293 | 2023-07-04T18:37:50 | 2023-07-04T18:37:50 | 25,059,251 | 14 | 4 | null | null | null | null | UTF-8 | Python | false | false | 49,997 | py | words=['Aaren',
'Aarika',
'Abagael',
'Abagail',
'Abbe',
'Abbey',
'Abbi',
'Abbie',
'Abby',
'Abbye',
'Abigael',
'Abigail',
'Abigale',
'Abra',
'Ada',
'Adah',
'Adaline',
'Adan',
'Adara',
'Adda',
'Addi',
'Addia',
'Addie',
'Addy',
'Adel',
'Adela',
'Adelaida',
'Adelaide',
'Adele',
'Adelheid',
'Adelice',
'Adelina',
'Adelind',
'Adeline',
'Adella',
'Adelle',
'Adena',
'Adey',
'Adi',
'Adiana',
'Adina',
'Adora',
'Adore',
'Adoree',
'Adorne',
'Adrea',
'Adria',
'Adriaens',
'Adrian',
'Adriana',
'Adriane',
'Adrianna',
'Adrianne',
'Adriena',
'Adrienne',
'Aeriel',
'Aeriela',
'Aeriell',
'Afton',
'Ag',
'Agace',
'Agata',
'Agatha',
'Agathe',
'Aggi',
'Aggie',
'Aggy',
'Agna',
'Agnella',
'Agnes',
'Agnese',
'Agnesse',
'Agneta',
'Agnola',
'Agretha',
'Aida',
'Aidan',
'Aigneis',
'Aila',
'Aile',
'Ailee',
'Aileen',
'Ailene',
'Ailey',
'Aili',
'Ailina',
'Ailis',
'Ailsun',
'Ailyn',
'Aime',
'Aimee',
'Aimil',
'Aindrea',
'Ainslee',
'Ainsley',
'Ainslie',
'Ajay',
'Alaine',
'Alameda',
'Alana',
'Alanah',
'Alane',
'Alanna',
'Alayne',
'Alberta',
'Albertina',
'Albertine',
'Albina',
'Alecia',
'Aleda',
'Aleece',
'Aleen',
'Alejandra',
'Alejandrina',
'Alena',
'Alene',
'Alessandra',
'Aleta',
'Alethea',
'Alex',
'Alexa',
'Alexandra',
'Alexandrina',
'Alexi',
'Alexia',
'Alexina',
'Alexine',
'Alexis',
'Alfi',
'Alfie',
'Alfreda',
'Alfy',
'Ali',
'Alia',
'Alica',
'Alice',
'Alicea',
'Alicia',
'Alida',
'Alidia',
'Alie',
'Alika',
'Alikee',
'Alina',
'Aline',
'Alis',
'Alisa',
'Alisha',
'Alison',
'Alissa',
'Alisun',
'Alix',
'Aliza',
'Alla',
'Alleen',
'Allegra',
'Allene',
'Alli',
'Allianora',
'Allie',
'Allina',
'Allis',
'Allison',
'Allissa',
'Allix',
'Allsun',
'Allx',
'Ally',
'Allyce',
'Allyn',
'Allys',
'Allyson',
'Alma',
'Almeda',
'Almeria',
'Almeta',
'Almira',
'Almire',
'Aloise',
'Aloisia',
'Aloysia',
'Alta',
'Althea',
'Alvera',
'Alverta',
'Alvina',
'Alvinia',
'Alvira',
'Alyce',
'Alyda',
'Alys',
'Alysa',
'Alyse',
'Alysia',
'Alyson',
'Alyss',
'Alyssa',
'Amabel',
'Amabelle',
'Amalea',
'Amalee',
'Amaleta',
'Amalia',
'Amalie',
'Amalita',
'Amalle',
'Amanda',
'Amandi',
'Amandie',
'Amandy',
'Amara',
'Amargo',
'Amata',
'Amber',
'Amberly',
'Ambur',
'Ame',
'Amelia',
'Amelie',
'Amelina',
'Ameline',
'Amelita',
'Ami',
'Amie',
'Amii',
'Amil',
'Amitie',
'Amity',
'Ammamaria',
'Amy',
'Amye',
'Ana',
'Anabal',
'Anabel',
'Anabella',
'Anabelle',
'Analiese',
'Analise',
'Anallese',
'Anallise',
'Anastasia',
'Anastasie',
'Anastassia',
'Anatola',
'Andee',
'Andeee',
'Anderea',
'Andi',
'Andie',
'Andra',
'Andrea',
'Andreana',
'Andree',
'Andrei',
'Andria',
'Andriana',
'Andriette',
'Andromache',
'Andy',
'Anestassia',
'Anet',
'Anett',
'Anetta',
'Anette',
'Ange',
'Angel',
'Angela',
'Angele',
'Angelia',
'Angelica',
'Angelika',
'Angelina',
'Angeline',
'Angelique',
'Angelita',
'Angelle',
'Angie',
'Angil',
'Angy',
'Ania',
'Anica',
'Anissa',
'Anita',
'Anitra',
'Anjanette',
'Anjela',
'Ann',
'Ann-Marie',
'Anna',
'Anna-Diana',
'Anna-Diane',
'Anna-Maria',
'Annabal',
'Annabel',
'Annabela',
'Annabell',
'Annabella',
'Annabelle',
'Annadiana',
'Annadiane',
'Annalee',
'Annaliese',
'Annalise',
'Annamaria',
'Annamarie',
'Anne',
'Anne-Corinne',
'Anne-Marie',
'Annecorinne',
'Anneliese',
'Annelise',
'Annemarie',
'Annetta',
'Annette',
'Anni',
'Annice',
'Annie',
'Annis',
'Annissa',
'Annmaria',
'Annmarie',
'Annnora',
'Annora',
'Anny',
'Anselma',
'Ansley',
'Anstice',
'Anthe',
'Anthea',
'Anthia',
'Anthiathia',
'Antoinette',
'Antonella',
'Antonetta',
'Antonia',
'Antonie',
'Antonietta',
'Antonina',
'Anya',
'Appolonia',
'April',
'Aprilette',
'Ara',
'Arabel',
'Arabela',
'Arabele',
'Arabella',
'Arabelle',
'Arda',
'Ardath',
'Ardeen',
'Ardelia',
'Ardelis',
'Ardella',
'Ardelle',
'Arden',
'Ardene',
'Ardenia',
'Ardine',
'Ardis',
'Ardisj',
'Ardith',
'Ardra',
'Ardyce',
'Ardys',
'Ardyth',
'Aretha',
'Ariadne',
'Ariana',
'Aridatha',
'Ariel',
'Ariela',
'Ariella',
'Arielle',
'Arlana',
'Arlee',
'Arleen',
'Arlen',
'Arlena',
'Arlene',
'Arleta',
'Arlette',
'Arleyne',
'Arlie',
'Arliene',
'Arlina',
'Arlinda',
'Arline',
'Arluene',
'Arly',
'Arlyn',
'Arlyne',
'Aryn',
'Ashely',
'Ashia',
'Ashien',
'Ashil',
'Ashla',
'Ashlan',
'Ashlee',
'Ashleigh',
'Ashlen',
'Ashley',
'Ashli',
'Ashlie',
'Ashly',
'Asia',
'Astra',
'Astrid',
'Astrix',
'Atalanta',
'Athena',
'Athene',
'Atlanta',
'Atlante',
'Auberta',
'Aubine',
'Aubree',
'Aubrette',
'Aubrey',
'Aubrie',
'Aubry',
'Audi',
'Audie',
'Audra',
'Audre',
'Audrey',
'Audrie',
'Audry',
'Audrye',
'Audy',
'Augusta',
'Auguste',
'Augustina',
'Augustine',
'Aundrea',
'Aura',
'Aurea',
'Aurel',
'Aurelea',
'Aurelia',
'Aurelie',
'Auria',
'Aurie',
'Aurilia',
'Aurlie',
'Auroora',
'Aurora',
'Aurore',
'Austin',
'Austina',
'Austine',
'Ava',
'Aveline',
'Averil',
'Averyl',
'Avie',
'Avis',
'Aviva',
'Avivah',
'Avril',
'Avrit',
'Ayn',
'Bab',
'Babara',
'Babb',
'Babbette',
'Babbie',
'Babette',
'Babita',
'Babs',
'Bambi',
'Bambie',
'Bamby',
'Barb',
'Barbabra',
'Barbara',
'Barbara-Anne',
'Barbaraanne',
'Barbe',
'Barbee',
'Barbette',
'Barbey',
'Barbi',
'Barbie',
'Barbra',
'Barby',
'Bari',
'Barrie',
'Barry',
'Basia',
'Bathsheba',
'Batsheva',
'Bea',
'Beatrice',
'Beatrisa',
'Beatrix',
'Beatriz',
'Bebe',
'Becca',
'Becka',
'Becki',
'Beckie',
'Becky',
'Bee',
'Beilul',
'Beitris',
'Bekki',
'Bel',
'Belia',
'Belicia',
'Belinda',
'Belita',
'Bell',
'Bella',
'Bellanca',
'Belle',
'Bellina',
'Belva',
'Belvia',
'Bendite',
'Benedetta',
'Benedicta',
'Benedikta',
'Benetta',
'Benita',
'Benni',
'Bennie',
'Benny',
'Benoite',
'Berenice',
'Beret',
'Berget',
'Berna',
'Bernadene',
'Bernadette',
'Bernadina',
'Bernadine',
'Bernardina',
'Bernardine',
'Bernelle',
'Bernete',
'Bernetta',
'Bernette',
'Berni',
'Bernice',
'Bernie',
'Bernita',
'Berny',
'Berri',
'Berrie',
'Berry',
'Bert',
'Berta',
'Berte',
'Bertha',
'Berthe',
'Berti',
'Bertie',
'Bertina',
'Bertine',
'Berty',
'Beryl',
'Beryle',
'Bess',
'Bessie',
'Bessy',
'Beth',
'Bethanne',
'Bethany',
'Bethena',
'Bethina',
'Betsey',
'Betsy',
'Betta',
'Bette',
'Bette-Ann',
'Betteann',
'Betteanne',
'Betti',
'Bettina',
'Bettine',
'Betty',
'Bettye',
'Beulah',
'Bev',
'Beverie',
'Beverlee',
'Beverley',
'Beverlie',
'Beverly',
'Bevvy',
'Bianca',
'Bianka',
'Bibbie',
'Bibby',
'Bibbye',
'Bibi',
'Biddie',
'Biddy',
'Bidget',
'Bili',
'Bill',
'Billi',
'Billie',
'Billy',
'Billye',
'Binni',
'Binnie',
'Binny',
'Bird',
'Birdie',
'Birgit',
'Birgitta',
'Blair',
'Blaire',
'Blake',
'Blakelee',
'Blakeley',
'Blanca',
'Blanch',
'Blancha',
'Blanche',
'Blinni',
'Blinnie',
'Blinny',
'Bliss',
'Blisse',
'Blithe',
'Blondell',
'Blondelle',
'Blondie',
'Blondy',
'Blythe',
'Bobbe',
'Bobbee',
'Bobbette',
'Bobbi',
'Bobbie',
'Bobby',
'Bobbye',
'Bobette',
'Bobina',
'Bobine',
'Bobinette',
'Bonita',
'Bonnee',
'Bonni',
'Bonnibelle',
'Bonnie',
'Bonny',
'Brana',
'Brandais',
'Brande',
'Brandea',
'Brandi',
'Brandice',
'Brandie',
'Brandise',
'Brandy',
'Breanne',
'Brear',
'Bree',
'Breena',
'Bren',
'Brena',
'Brenda',
'Brenn',
'Brenna',
'Brett',
'Bria',
'Briana',
'Brianna',
'Brianne',
'Bride',
'Bridget',
'Bridgette',
'Bridie',
'Brier',
'Brietta',
'Brigid',
'Brigida',
'Brigit',
'Brigitta',
'Brigitte',
'Brina',
'Briney',
'Brinn',
'Brinna',
'Briny',
'Brit',
'Brita',
'Britney',
'Britni',
'Britt',
'Britta',
'Brittan',
'Brittaney',
'Brittani',
'Brittany',
'Britte',
'Britteny',
'Brittne',
'Brittney',
'Brittni',
'Brook',
'Brooke',
'Brooks',
'Brunhilda',
'Brunhilde',
'Bryana',
'Bryn',
'Bryna',
'Brynn',
'Brynna',
'Brynne',
'Buffy',
'Bunni',
'Bunnie',
'Bunny',
'Cacilia',
'Cacilie',
'Cahra',
'Cairistiona',
'Caitlin',
'Caitrin',
'Cal',
'Calida',
'Calla',
'Calley',
'Calli',
'Callida',
'Callie',
'Cally',
'Calypso',
'Cam',
'Camala',
'Camel',
'Camella',
'Camellia',
'Cami',
'Camila',
'Camile',
'Camilla',
'Camille',
'Cammi',
'Cammie',
'Cammy',
'Candace',
'Candi',
'Candice',
'Candida',
'Candide',
'Candie',
'Candis',
'Candra',
'Candy',
'Caprice',
'Cara',
'Caralie',
'Caren',
'Carena',
'Caresa',
'Caressa',
'Caresse',
'Carey',
'Cari',
'Caria',
'Carie',
'Caril',
'Carilyn',
'Carin',
'Carina',
'Carine',
'Cariotta',
'Carissa',
'Carita',
'Caritta',
'Carla',
'Carlee',
'Carleen',
'Carlen',
'Carlene',
'Carley',
'Carlie',
'Carlin',
'Carlina',
'Carline',
'Carlita',
'Carlota',
'Carlotta',
'Carly',
'Carlye',
'Carlyn',
'Carlynn',
'Carlynne',
'Carma',
'Carmel',
'Carmela',
'Carmelia',
'Carmelina',
'Carmelita',
'Carmella',
'Carmelle',
'Carmen',
'Carmencita',
'Carmina',
'Carmine',
'Carmita',
'Carmon',
'Caro',
'Carol',
'Carol-Jean',
'Carola',
'Carolan',
'Carolann',
'Carole',
'Carolee',
'Carolin',
'Carolina',
'Caroline',
'Caroljean',
'Carolyn',
'Carolyne',
'Carolynn',
'Caron',
'Carree',
'Carri',
'Carrie',
'Carrissa',
'Carroll',
'Carry',
'Cary',
'Caryl',
'Caryn',
'Casandra',
'Casey',
'Casi',
'Casie',
'Cass',
'Cassandra',
'Cassandre',
'Cassandry',
'Cassaundra',
'Cassey',
'Cassi',
'Cassie',
'Cassondra',
'Cassy',
'Catarina',
'Cate',
'Caterina',
'Catha',
'Catharina',
'Catharine',
'Cathe',
'Cathee',
'Catherin',
'Catherina',
'Catherine',
'Cathi',
'Cathie',
'Cathleen',
'Cathlene',
'Cathrin',
'Cathrine',
'Cathryn',
'Cathy',
'Cathyleen',
'Cati',
'Catie',
'Catina',
'Catlaina',
'Catlee',
'Catlin',
'Catrina',
'Catriona',
'Caty',
'Caye',
'Cayla',
'Cecelia',
'Cecil',
'Cecile',
'Ceciley',
'Cecilia',
'Cecilla',
'Cecily',
'Ceil',
'Cele',
'Celene',
'Celesta',
'Celeste',
'Celestia',
'Celestina',
'Celestine',
'Celestyn',
'Celestyna',
'Celia',
'Celie',
'Celina',
'Celinda',
'Celine',
'Celinka',
'Celisse',
'Celka',
'Celle',
'Cesya',
'Chad',
'Chanda',
'Chandal',
'Chandra',
'Channa',
'Chantal',
'Chantalle',
'Charil',
'Charin',
'Charis',
'Charissa',
'Charisse',
'Charita',
'Charity',
'Charla',
'Charlean',
'Charleen',
'Charlena',
'Charlene',
'Charline',
'Charlot',
'Charlotta',
'Charlotte',
'Charmain',
'Charmaine',
'Charmane',
'Charmian',
'Charmine',
'Charmion',
'Charo',
'Charyl',
'Chastity',
'Chelsae',
'Chelsea',
'Chelsey',
'Chelsie',
'Chelsy',
'Cher',
'Chere',
'Cherey',
'Cheri',
'Cherianne',
'Cherice',
'Cherida',
'Cherie',
'Cherilyn',
'Cherilynn',
'Cherin',
'Cherise',
'Cherish',
'Cherlyn',
'Cherri',
'Cherrita',
'Cherry',
'Chery',
'Cherye',
'Cheryl',
'Cheslie',
'Chiarra',
'Chickie',
'Chicky',
'Chiquia',
'Chiquita',
'Chlo',
'Chloe',
'Chloette',
'Chloris',
'Chris',
'Chrissie',
'Chrissy',
'Christa',
'Christabel',
'Christabella',
'Christal',
'Christalle',
'Christan',
'Christean',
'Christel',
'Christen',
'Christi',
'Christian',
'Christiana',
'Christiane',
'Christie',
'Christin',
'Christina',
'Christine',
'Christy',
'Christye',
'Christyna',
'Chrysa',
'Chrysler',
'Chrystal',
'Chryste',
'Chrystel',
'Cicely',
'Cicily',
'Ciel',
'Cilka',
'Cinda',
'Cindee',
'Cindelyn',
'Cinderella',
'Cindi',
'Cindie',
'Cindra',
'Cindy',
'Cinnamon',
'Cissiee',
'Cissy',
'Clair',
'Claire',
'Clara',
'Clarabelle',
'Clare',
'Claresta',
'Clareta',
'Claretta',
'Clarette',
'Clarey',
'Clari',
'Claribel',
'Clarice',
'Clarie',
'Clarinda',
'Clarine',
'Clarissa',
'Clarisse',
'Clarita',
'Clary',
'Claude',
'Claudelle',
'Claudetta',
'Claudette',
'Claudia',
'Claudie',
'Claudina',
'Claudine',
'Clea',
'Clem',
'Clemence',
'Clementia',
'Clementina',
'Clementine',
'Clemmie',
'Clemmy',
'Cleo',
'Cleopatra',
'Clerissa',
'Clio',
'Clo',
'Cloe',
'Cloris',
'Clotilda',
'Clovis',
'Codee',
'Codi',
'Codie',
'Cody',
'Coleen',
'Colene',
'Coletta',
'Colette',
'Colleen',
'Collen',
'Collete',
'Collette',
'Collie',
'Colline',
'Colly',
'Con',
'Concettina',
'Conchita',
'Concordia',
'Conni',
'Connie',
'Conny',
'Consolata',
'Constance',
'Constancia',
'Constancy',
'Constanta',
'Constantia',
'Constantina',
'Constantine',
'Consuela',
'Consuelo',
'Cookie',
'Cora',
'Corabel',
'Corabella',
'Corabelle',
'Coral',
'Coralie',
'Coraline',
'Coralyn',
'Cordelia',
'Cordelie',
'Cordey',
'Cordi',
'Cordie',
'Cordula',
'Cordy',
'Coreen',
'Corella',
'Corenda',
'Corene',
'Coretta',
'Corette',
'Corey',
'Cori',
'Corie',
'Corilla',
'Corina',
'Corine',
'Corinna',
'Corinne',
'Coriss',
'Corissa',
'Corliss',
'Corly',
'Cornela',
'Cornelia',
'Cornelle',
'Cornie',
'Corny',
'Correna',
'Correy',
'Corri',
'Corrianne',
'Corrie',
'Corrina',
'Corrine',
'Corrinne',
'Corry',
'Cortney',
'Cory',
'Cosetta',
'Cosette',
'Costanza',
'Courtenay',
'Courtnay',
'Courtney',
'Crin',
'Cris',
'Crissie',
'Crissy',
'Crista',
'Cristabel',
'Cristal',
'Cristen',
'Cristi',
'Cristie',
'Cristin',
'Cristina',
'Cristine',
'Cristionna',
'Cristy',
'Crysta',
'Crystal',
'Crystie',
'Cthrine',
'Cyb',
'Cybil',
'Cybill',
'Cymbre',
'Cynde',
'Cyndi',
'Cyndia',
'Cyndie',
'Cyndy',
'Cynthea',
'Cynthia',
'Cynthie',
'Cynthy',
'Dacey',
'Dacia',
'Dacie',
'Dacy',
'Dael',
'Daffi',
'Daffie',
'Daffy',
'Dagmar',
'Dahlia',
'Daile',
'Daisey',
'Daisi',
'Daisie',
'Daisy',
'Dale',
'Dalenna',
'Dalia',
'Dalila',
'Dallas',
'Daloris',
'Damara',
'Damaris',
'Damita',
'Dana',
'Danell',
'Danella',
'Danette',
'Dani',
'Dania',
'Danica',
'Danice',
'Daniela',
'Daniele',
'Daniella',
'Danielle',
'Danika',
'Danila',
'Danit',
'Danita',
'Danna',
'Danni',
'Dannie',
'Danny',
'Dannye',
'Danya',
'Danyelle',
'Danyette',
'Daphene',
'Daphna',
'Daphne',
'Dara',
'Darb',
'Darbie',
'Darby',
'Darcee',
'Darcey',
'Darci',
'Darcie',
'Darcy',
'Darda',
'Dareen',
'Darell',
'Darelle',
'Dari',
'Daria',
'Darice',
'Darla',
'Darleen',
'Darlene',
'Darline',
'Darlleen',
'Daron',
'Darrelle',
'Darryl',
'Darsey',
'Darsie',
'Darya',
'Daryl',
'Daryn',
'Dasha',
'Dasi',
'Dasie',
'Dasya',
'Datha',
'Daune',
'Daveen',
'Daveta',
'Davida',
'Davina',
'Davine',
'Davita',
'Dawn',
'Dawna',
'Dayle',
'Dayna',
'Ddene',
'De',
'Deana',
'Deane',
'Deanna',
'Deanne',
'Deb',
'Debbi',
'Debbie',
'Debby',
'Debee',
'Debera',
'Debi',
'Debor',
'Debora',
'Deborah',
'Debra',
'Dede',
'Dedie',
'Dedra',
'Dee',
'Dee Dee',
'Deeann',
'Deeanne',
'Deedee',
'Deena',
'Deerdre',
'Deeyn',
'Dehlia',
'Deidre',
'Deina',
'Deirdre',
'Del',
'Dela',
'Delcina',
'Delcine',
'Delia',
'Delila',
'Delilah',
'Delinda',
'Dell',
'Della',
'Delly',
'Delora',
'Delores',
'Deloria',
'Deloris',
'Delphine',
'Delphinia',
'Demeter',
'Demetra',
'Demetria',
'Demetris',
'Dena',
'Deni',
'Denice',
'Denise',
'Denna',
'Denni',
'Dennie',
'Denny',
'Deny',
'Denys',
'Denyse',
'Deonne',
'Desdemona',
'Desirae',
'Desiree',
'Desiri',
'Deva',
'Devan',
'Devi',
'Devin',
'Devina',
'Devinne',
'Devon',
'Devondra',
'Devonna',
'Devonne',
'Devora',
'Di',
'Diahann',
'Dian',
'Diana',
'Diandra',
'Diane',
'Diane-Marie',
'Dianemarie',
'Diann',
'Dianna',
'Dianne',
'Diannne',
'Didi',
'Dido',
'Diena',
'Dierdre',
'Dina',
'Dinah',
'Dinnie',
'Dinny',
'Dion',
'Dione',
'Dionis',
'Dionne',
'Dita',
'Dix',
'Dixie',
'Dniren',
'Dode',
'Dodi',
'Dodie',
'Dody',
'Doe',
'Doll',
'Dolley',
'Dolli',
'Dollie',
'Dolly',
'Dolores',
'Dolorita',
'Doloritas',
'Domeniga',
'Dominga',
'Domini',
'Dominica',
'Dominique',
'Dona',
'Donella',
'Donelle',
'Donetta',
'Donia',
'Donica',
'Donielle',
'Donna',
'Donnamarie',
'Donni',
'Donnie',
'Donny',
'Dora',
'Doralia',
'Doralin',
'Doralyn',
'Doralynn',
'Doralynne',
'Dore',
'Doreen',
'Dorelia',
'Dorella',
'Dorelle',
'Dorena',
'Dorene',
'Doretta',
'Dorette',
'Dorey',
'Dori',
'Doria',
'Dorian',
'Dorice',
'Dorie',
'Dorine',
'Doris',
'Dorisa',
'Dorise',
'Dorita',
'Doro',
'Dorolice',
'Dorolisa',
'Dorotea',
'Doroteya',
'Dorothea',
'Dorothee',
'Dorothy',
'Dorree',
'Dorri',
'Dorrie',
'Dorris',
'Dorry',
'Dorthea',
'Dorthy',
'Dory',
'Dosi',
'Dot',
'Doti',
'Dotti',
'Dottie',
'Dotty',
'Dre',
'Dreddy',
'Dredi',
'Drona',
'Dru',
'Druci',
'Drucie',
'Drucill',
'Drucy',
'Drusi',
'Drusie',
'Drusilla',
'Drusy',
'Dulce',
'Dulcea',
'Dulci',
'Dulcia',
'Dulciana',
'Dulcie',
'Dulcine',
'Dulcinea',
'Dulcy',
'Dulsea',
'Dusty',
'Dyan',
'Dyana',
'Dyane',
'Dyann',
'Dyanna',
'Dyanne',
'Dyna',
'Dynah',
'Eachelle',
'Eada',
'Eadie',
'Eadith',
'Ealasaid',
'Eartha',
'Easter',
'Eba',
'Ebba',
'Ebonee',
'Ebony',
'Eda',
'Eddi',
'Eddie',
'Eddy',
'Ede',
'Edee',
'Edeline',
'Eden',
'Edi',
'Edie',
'Edin',
'Edita',
'Edith',
'Editha',
'Edithe',
'Ediva',
'Edna',
'Edwina',
'Edy',
'Edyth',
'Edythe',
'Effie',
'Eileen',
'Eilis',
'Eimile',
'Eirena',
'Ekaterina',
'Elaina',
'Elaine',
'Elana',
'Elane',
'Elayne',
'Elberta',
'Elbertina',
'Elbertine',
'Eleanor',
'Eleanora',
'Eleanore',
'Electra',
'Eleen',
'Elena',
'Elene',
'Eleni',
'Elenore',
'Eleonora',
'Eleonore',
'Elfie',
'Elfreda',
'Elfrida',
'Elfrieda',
'Elga',
'Elianora',
'Elianore',
'Elicia',
'Elie',
'Elinor',
'Elinore',
'Elisa',
'Elisabet',
'Elisabeth',
'Elisabetta',
'Elise',
'Elisha',
'Elissa',
'Elita',
'Eliza',
'Elizabet',
'Elizabeth',
'Elka',
'Elke',
'Ella',
'Elladine',
'Elle',
'Ellen',
'Ellene',
'Ellette',
'Elli',
'Ellie',
'Ellissa',
'Elly',
'Ellyn',
'Ellynn',
'Elmira',
'Elna',
'Elnora',
'Elnore',
'Eloisa',
'Eloise',
'Elonore',
'Elora',
'Elsa',
'Elsbeth',
'Else',
'Elset',
'Elsey',
'Elsi',
'Elsie',
'Elsinore',
'Elspeth',
'Elsy',
'Elva',
'Elvera',
'Elvina',
'Elvira',
'Elwira',
'Elyn',
'Elyse',
'Elysee',
'Elysha',
'Elysia',
'Elyssa',
'Em',
'Ema',
'Emalee',
'Emalia',
'Emelda',
'Emelia',
'Emelina',
'Emeline',
'Emelita',
'Emelyne',
'Emera',
'Emilee',
'Emili',
'Emilia',
'Emilie',
'Emiline',
'Emily',
'Emlyn',
'Emlynn',
'Emlynne',
'Emma',
'Emmalee',
'Emmaline',
'Emmalyn',
'Emmalynn',
'Emmalynne',
'Emmeline',
'Emmey',
'Emmi',
'Emmie',
'Emmy',
'Emmye',
'Emogene',
'Emyle',
'Emylee',
'Engracia',
'Enid',
'Enrica',
'Enrichetta',
'Enrika',
'Enriqueta',
'Eolanda',
'Eolande',
'Eran',
'Erda',
'Erena',
'Erica',
'Ericha',
'Ericka',
'Erika',
'Erin',
'Erina',
'Erinn',
'Erinna',
'Erma',
'Ermengarde',
'Ermentrude',
'Ermina',
'Erminia',
'Erminie',
'Erna',
'Ernaline',
'Ernesta',
'Ernestine',
'Ertha',
'Eryn',
'Esma',
'Esmaria',
'Esme',
'Esmeralda',
'Essa',
'Essie',
'Essy',
'Esta',
'Estel',
'Estele',
'Estell',
'Estella',
'Estelle',
'Ester',
'Esther',
'Estrella',
'Estrellita',
'Ethel',
'Ethelda',
'Ethelin',
'Ethelind',
'Etheline',
'Ethelyn',
'Ethyl',
'Etta',
'Etti',
'Ettie',
'Etty',
'Eudora',
'Eugenia',
'Eugenie',
'Eugine',
'Eula',
'Eulalie',
'Eunice',
'Euphemia',
'Eustacia',
'Eva',
'Evaleen',
'Evangelia',
'Evangelin',
'Evangelina',
'Evangeline',
'Evania',
'Evanne',
'Eve',
'Eveleen',
'Evelina',
'Eveline',
'Evelyn',
'Evey',
'Evie',
'Evita',
'Evonne',
'Evvie',
'Evvy',
'Evy',
'Eyde',
'Eydie',
'Ezmeralda',
'Fae',
'Faina',
'Faith',
'Fallon',
'Fan',
'Fanchette',
'Fanchon',
'Fancie',
'Fancy',
'Fanechka',
'Fania',
'Fanni',
'Fannie',
'Fanny',
'Fanya',
'Fara',
'Farah',
'Farand',
'Farica',
'Farra',
'Farrah',
'Farrand',
'Faun',
'Faunie',
'Faustina',
'Faustine',
'Fawn',
'Fawne',
'Fawnia',
'Fay',
'Faydra',
'Faye',
'Fayette',
'Fayina',
'Fayre',
'Fayth',
'Faythe',
'Federica',
'Fedora',
'Felecia',
'Felicdad',
'Felice',
'Felicia',
'Felicity',
'Felicle',
'Felipa',
'Felisha',
'Felita',
'Feliza',
'Fenelia',
'Feodora',
'Ferdinanda',
'Ferdinande',
'Fern',
'Fernanda',
'Fernande',
'Fernandina',
'Ferne',
'Fey',
'Fiann',
'Fianna',
'Fidela',
'Fidelia',
'Fidelity',
'Fifi',
'Fifine',
'Filia',
'Filide',
'Filippa',
'Fina',
'Fiona',
'Fionna',
'Fionnula',
'Fiorenze',
'Fleur',
'Fleurette',
'Flo',
'Flor',
'Flora',
'Florance',
'Flore',
'Florella',
'Florence',
'Florencia',
'Florentia',
'Florenza',
'Florette',
'Flori',
'Floria',
'Florida',
'Florie',
'Florina',
'Florinda',
'Floris',
'Florri',
'Florrie',
'Florry',
'Flory',
'Flossi',
'Flossie',
'Flossy',
'Flss',
'Fran',
'Francene',
'Frances',
'Francesca',
'Francine',
'Francisca',
'Franciska',
'Francoise',
'Francyne',
'Frank',
'Frankie',
'Franky',
'Franni',
'Frannie',
'Franny',
'Frayda',
'Fred',
'Freda',
'Freddi',
'Freddie',
'Freddy',
'Fredelia',
'Frederica',
'Fredericka',
'Frederique',
'Fredi',
'Fredia',
'Fredra',
'Fredrika',
'Freida',
'Frieda',
'Friederike',
'Fulvia',
'Gabbey',
'Gabbi',
'Gabbie',
'Gabey',
'Gabi',
'Gabie',
'Gabriel',
'Gabriela',
'Gabriell',
'Gabriella',
'Gabrielle',
'Gabriellia',
'Gabrila',
'Gaby',
'Gae',
'Gael',
'Gail',
'Gale',
'Gale',
'Galina',
'Garland',
'Garnet',
'Garnette',
'Gates',
'Gavra',
'Gavrielle',
'Gay',
'Gaye',
'Gayel',
'Gayla',
'Gayle',
'Gayleen',
'Gaylene',
'Gaynor',
'Gelya',
'Gena',
'Gene',
'Geneva',
'Genevieve',
'Genevra',
'Genia',
'Genna',
'Genni',
'Gennie',
'Gennifer',
'Genny',
'Genovera',
'Genvieve',
'George',
'Georgeanna',
'Georgeanne',
'Georgena',
'Georgeta',
'Georgetta',
'Georgette',
'Georgia',
'Georgiana',
'Georgianna',
'Georgianne',
'Georgie',
'Georgina',
'Georgine',
'Geralda',
'Geraldine',
'Gerda',
'Gerhardine',
'Geri',
'Gerianna',
'Gerianne',
'Gerladina',
'Germain',
'Germaine',
'Germana',
'Gerri',
'Gerrie',
'Gerrilee',
'Gerry',
'Gert',
'Gerta',
'Gerti',
'Gertie',
'Gertrud',
'Gertruda',
'Gertrude',
'Gertrudis',
'Gerty',
'Giacinta',
'Giana',
'Gianina',
'Gianna',
'Gigi',
'Gilberta',
'Gilberte',
'Gilbertina',
'Gilbertine',
'Gilda',
'Gilemette',
'Gill',
'Gillan',
'Gilli',
'Gillian',
'Gillie',
'Gilligan',
'Gilly',
'Gina',
'Ginelle',
'Ginevra',
'Ginger',
'Ginni',
'Ginnie',
'Ginnifer',
'Ginny',
'Giorgia',
'Giovanna',
'Gipsy',
'Giralda',
'Gisela',
'Gisele',
'Gisella',
'Giselle',
'Giuditta',
'Giulia',
'Giulietta',
'Giustina',
'Gizela',
'Glad',
'Gladi',
'Gladys',
'Gleda',
'Glen',
'Glenda',
'Glenine',
'Glenn',
'Glenna',
'Glennie',
'Glennis',
'Glori',
'Gloria',
'Gloriana',
'Gloriane',
'Glory',
'Glyn',
'Glynda',
'Glynis',
'Glynnis',
'Gnni',
'Godiva',
'Golda',
'Goldarina',
'Goldi',
'Goldia',
'Goldie',
'Goldina',
'Goldy',
'Grace',
'Gracia',
'Gracie',
'Grata',
'Gratia',
'Gratiana',
'Gray',
'Grayce',
'Grazia',
'Greer',
'Greta',
'Gretal',
'Gretchen',
'Grete',
'Gretel',
'Grethel',
'Gretna',
'Gretta',
'Grier',
'Griselda',
'Grissel',
'Guendolen',
'Guenevere',
'Guenna',
'Guglielma',
'Gui',
'Guillema',
'Guillemette',
'Guinevere',
'Guinna',
'Gunilla',
'Gus',
'Gusella',
'Gussi',
'Gussie',
'Gussy',
'Gusta',
'Gusti',
'Gustie',
'Gusty',
'Gwen',
'Gwendolen',
'Gwendolin',
'Gwendolyn',
'Gweneth',
'Gwenette',
'Gwenneth',
'Gwenni',
'Gwennie',
'Gwenny',
'Gwenora',
'Gwenore',
'Gwyn',
'Gwyneth',
'Gwynne',
'Gypsy',
'Hadria',
'Hailee',
'Haily',
'Haleigh',
'Halette',
'Haley',
'Hali',
'Halie',
'Halimeda',
'Halley',
'Halli',
'Hallie',
'Hally',
'Hana',
'Hanna',
'Hannah',
'Hanni',
'Hannie',
'Hannis',
'Hanny',
'Happy',
'Harlene',
'Harley',
'Harli',
'Harlie',
'Harmonia',
'Harmonie',
'Harmony',
'Harri',
'Harrie',
'Harriet',
'Harriett',
'Harrietta',
'Harriette',
'Harriot',
'Harriott',
'Hatti',
'Hattie',
'Hatty',
'Hayley',
'Hazel',
'Heath',
'Heather',
'Heda',
'Hedda',
'Heddi',
'Heddie',
'Hedi',
'Hedvig',
'Hedvige',
'Hedwig',
'Hedwiga',
'Hedy',
'Heida',
'Heidi',
'Heidie',
'Helaina',
'Helaine',
'Helen',
'Helen-Elizabeth',
'Helena',
'Helene',
'Helenka',
'Helga',
'Helge',
'Helli',
'Heloise',
'Helsa',
'Helyn',
'Hendrika',
'Henka',
'Henrie',
'Henrieta',
'Henrietta',
'Henriette',
'Henryetta',
'Hephzibah',
'Hermia',
'Hermina',
'Hermine',
'Herminia',
'Hermione',
'Herta',
'Hertha',
'Hester',
'Hesther',
'Hestia',
'Hetti',
'Hettie',
'Hetty',
'Hilary',
'Hilda',
'Hildagard',
'Hildagarde',
'Hilde',
'Hildegaard',
'Hildegarde',
'Hildy',
'Hillary',
'Hilliary',
'Hinda',
'Holli',
'Hollie',
'Holly',
'Holly-Anne',
'Hollyanne',
'Honey',
'Honor',
'Honoria',
'Hope',
'Horatia',
'Hortense',
'Hortensia',
'Hulda',
'Hyacinth',
'Hyacintha',
'Hyacinthe',
'Hyacinthia',
'Hyacinthie',
'Hynda',
'Ianthe',
'Ibbie',
'Ibby',
'Ida',
'Idalia',
'Idalina',
'Idaline',
'Idell',
'Idelle',
'Idette',
'Ileana',
'Ileane',
'Ilene',
'Ilise',
'Ilka',
'Illa',
'Ilsa',
'Ilse',
'Ilysa',
'Ilyse',
'Ilyssa',
'Imelda',
'Imogen',
'Imogene',
'Imojean',
'Ina',
'Indira',
'Ines',
'Inesita',
'Inessa',
'Inez',
'Inga',
'Ingaberg',
'Ingaborg',
'Inge',
'Ingeberg',
'Ingeborg',
'Inger',
'Ingrid',
'Ingunna',
'Inna',
'Iolande',
'Iolanthe',
'Iona',
'Iormina',
'Ira',
'Irena',
'Irene',
'Irina',
'Iris',
'Irita',
'Irma',
'Isa',
'Isabel',
'Isabelita',
'Isabella',
'Isabelle',
'Isadora',
'Isahella',
'Iseabal',
'Isidora',
'Isis',
'Isobel',
'Issi',
'Issie',
'Issy',
'Ivett',
'Ivette',
'Ivie',
'Ivonne',
'Ivory',
'Ivy',
'Izabel',
'Jacenta',
'Jacinda',
'Jacinta',
'Jacintha',
'Jacinthe',
'Jackelyn',
'Jacki',
'Jackie',
'Jacklin',
'Jacklyn',
'Jackquelin',
'Jackqueline',
'Jacky',
'Jaclin',
'Jaclyn',
'Jacquelin',
'Jacqueline',
'Jacquelyn',
'Jacquelynn',
'Jacquenetta',
'Jacquenette',
'Jacquetta',
'Jacquette',
'Jacqui',
'Jacquie',
'Jacynth',
'Jada',
'Jade',
'Jaime',
'Jaimie',
'Jaine',
'Jami',
'Jamie',
'Jamima',
'Jammie',
'Jan',
'Jana',
'Janaya',
'Janaye',
'Jandy',
'Jane',
'Janean',
'Janeczka',
'Janeen',
'Janel',
'Janela',
'Janella',
'Janelle',
'Janene',
'Janenna',
'Janessa',
'Janet',
'Janeta',
'Janetta',
'Janette',
'Janeva',
'Janey',
'Jania',
'Janice',
'Janie',
'Janifer',
'Janina',
'Janine',
'Janis',
'Janith',
'Janka',
'Janna',
'Jannel',
'Jannelle',
'Janot',
'Jany',
'Jaquelin',
'Jaquelyn',
'Jaquenetta',
'Jaquenette',
'Jaquith',
'Jasmin',
'Jasmina',
'Jasmine',
'Jayme',
'Jaymee',
'Jayne',
'Jaynell',
'Jazmin',
'Jean',
'Jeana',
'Jeane',
'Jeanelle',
'Jeanette',
'Jeanie',
'Jeanine',
'Jeanna',
'Jeanne',
'Jeannette',
'Jeannie',
'Jeannine',
'Jehanna',
'Jelene',
'Jemie',
'Jemima',
'Jemimah',
'Jemmie',
'Jemmy',
'Jen',
'Jena',
'Jenda',
'Jenelle',
'Jeni',
'Jenica',
'Jeniece',
'Jenifer',
'Jeniffer',
'Jenilee',
'Jenine',
'Jenn',
'Jenna',
'Jennee',
'Jennette',
'Jenni',
'Jennica',
'Jennie',
'Jennifer',
'Jennilee',
'Jennine',
'Jenny',
'Jeralee',
'Jere',
'Jeri',
'Jermaine',
'Jerrie',
'Jerrilee',
'Jerrilyn',
'Jerrine',
'Jerry',
'Jerrylee',
'Jess',
'Jessa',
'Jessalin',
'Jessalyn',
'Jessamine',
'Jessamyn',
'Jesse',
'Jesselyn',
'Jessi',
'Jessica',
'Jessie',
'Jessika',
'Jessy',
'Jewel',
'Jewell',
'Jewelle',
'Jill',
'Jillana',
'Jillane',
'Jillayne',
'Jilleen',
'Jillene',
'Jilli',
'Jillian',
'Jillie',
'Jilly',
'Jinny',
'Jo',
'Jo Ann',
'Jo-Ann',
'Jo-Anne',
'Joan',
'Joana',
'Joane',
'Joanie',
'Joann',
'Joanna',
'Joanne',
'Joannes',
'Jobey',
'Jobi',
'Jobie',
'Jobina',
'Joby',
'Jobye',
'Jobyna',
'Jocelin',
'Joceline',
'Jocelyn',
'Jocelyne',
'Jodee',
'Jodi',
'Jodie',
'Jody',
'Joeann',
'Joela',
'Joelie',
'Joell',
'Joella',
'Joelle',
'Joellen',
'Joelly',
'Joellyn',
'Joelynn',
'Joete',
'Joey',
'Johanna',
'Johannah',
'Johna',
'Johnath',
'Johnette',
'Johnna',
'Joice',
'Jojo',
'Jolee',
'Joleen',
'Jolene',
'Joletta',
'Joli',
'Jolie',
'Joline',
'Joly',
'Jolyn',
'Jolynn',
'Jonell',
'Joni',
'Jonie',
'Jonis',
'Jordain',
'Jordan',
'Jordana',
'Jordanna',
'Jorey',
'Jori',
'Jorie',
'Jorrie',
'Jorry',
'Joscelin',
'Josee',
'Josefa',
'Josefina',
'Josepha',
'Josephina',
'Josephine',
'Josey',
'Josi',
'Josie',
'Josselyn',
'Josy',
'Jourdan',
'Joy',
'Joya',
'Joyan',
'Joyann',
'Joyce',
'Joycelin',
'Joye',
'Jsandye',
'Juana',
'Juanita',
'Judi',
'Judie',
'Judith',
'Juditha',
'Judy',
'Judye',
'Juieta',
'Julee',
'Juli',
'Julia',
'Juliana',
'Juliane',
'Juliann',
'Julianna',
'Julianne',
'Julie',
'Julienne',
'Juliet',
'Julieta',
'Julietta',
'Juliette',
'Julina',
'Juline',
'Julissa',
'Julita',
'June',
'Junette',
'Junia',
'Junie',
'Junina',
'Justina',
'Justine',
'Justinn',
'Jyoti',
'Kacey',
'Kacie',
'Kacy',
'Kaela',
'Kai',
'Kaia',
'Kaila',
'Kaile',
'Kailey',
'Kaitlin',
'Kaitlyn',
'Kaitlynn',
'Kaja',
'Kakalina',
'Kala',
'Kaleena',
'Kali',
'Kalie',
'Kalila',
'Kalina',
'Kalinda',
'Kalindi',
'Kalli',
'Kally',
'Kameko',
'Kamila',
'Kamilah',
'Kamillah',
'Kandace',
'Kandy',
'Kania',
'Kanya',
'Kara',
'Kara-Lynn',
'Karalee',
'Karalynn',
'Kare',
'Karee',
'Karel',
'Karen',
'Karena',
'Kari',
'Karia',
'Karie',
'Karil',
'Karilynn',
'Karin',
'Karina',
'Karine',
'Kariotta',
'Karisa',
'Karissa',
'Karita',
'Karla',
'Karlee',
'Karleen',
'Karlen',
'Karlene',
'Karlie',
'Karlotta',
'Karlotte',
'Karly',
'Karlyn',
'Karmen',
'Karna',
'Karol',
'Karola',
'Karole',
'Karolina',
'Karoline',
'Karoly',
'Karon',
'Karrah',
'Karrie',
'Karry',
'Kary',
'Karyl',
'Karylin',
'Karyn',
'Kasey',
'Kass',
'Kassandra',
'Kassey',
'Kassi',
'Kassia',
'Kassie',
'Kat',
'Kata',
'Katalin',
'Kate',
'Katee',
'Katerina',
'Katerine',
'Katey',
'Kath',
'Katha',
'Katharina',
'Katharine',
'Katharyn',
'Kathe',
'Katherina',
'Katherine',
'Katheryn',
'Kathi',
'Kathie',
'Kathleen',
'Kathlin',
'Kathrine',
'Kathryn',
'Kathryne',
'Kathy',
'Kathye',
'Kati',
'Katie',
'Katina',
'Katine',
'Katinka',
'Katleen',
'Katlin',
'Katrina',
'Katrine',
'Katrinka',
'Katti',
'Kattie',
'Katuscha',
'Katusha',
'Katy',
'Katya',
'Kay',
'Kaycee',
'Kaye',
'Kayla',
'Kayle',
'Kaylee',
'Kayley',
'Kaylil',
'Kaylyn',
'Keeley',
'Keelia',
'Keely',
'Kelcey',
'Kelci',
'Kelcie',
'Kelcy',
'Kelila',
'Kellen',
'Kelley',
'Kelli',
'Kellia',
'Kellie',
'Kellina',
'Kellsie',
'Kelly',
'Kellyann',
'Kelsey',
'Kelsi',
'Kelsy',
'Kendra',
'Kendre',
'Kenna',
'Keri',
'Keriann',
'Kerianne',
'Kerri',
'Kerrie',
'Kerrill',
'Kerrin',
'Kerry',
'Kerstin',
'Kesley',
'Keslie',
'Kessia',
'Kessiah',
'Ketti',
'Kettie',
'Ketty',
'Kevina',
'Kevyn',
'Ki',
'Kiah',
'Kial',
'Kiele',
'Kiersten',
'Kikelia',
'Kiley',
'Kim',
'Kimberlee',
'Kimberley',
'Kimberli',
'Kimberly',
'Kimberlyn',
'Kimbra',
'Kimmi',
'Kimmie',
'Kimmy',
'Kinna',
'Kip',
'Kipp',
'Kippie',
'Kippy',
'Kira',
'Kirbee',
'Kirbie',
'Kirby',
'Kiri',
'Kirsten',
'Kirsteni',
'Kirsti',
'Kirstin',
'Kirstyn',
'Kissee',
'Kissiah',
'Kissie',
'Kit',
'Kitti',
'Kittie',
'Kitty',
'Kizzee',
'Kizzie',
'Klara',
'Klarika',
'Klarrisa',
'Konstance',
'Konstanze',
'Koo',
'Kora',
'Koral',
'Koralle',
'Kordula',
'Kore',
'Korella',
'Koren',
'Koressa',
'Kori',
'Korie',
'Korney',
'Korrie',
'Korry',
'Kris',
'Krissie',
'Krissy',
'Krista',
'Kristal',
'Kristan',
'Kriste',
'Kristel',
'Kristen',
'Kristi',
'Kristien',
'Kristin',
'Kristina',
'Kristine',
'Kristy',
'Kristyn',
'Krysta',
'Krystal',
'Krystalle',
'Krystle',
'Krystyna',
'Kyla',
'Kyle',
'Kylen',
'Kylie',
'Kylila',
'Kylynn',
'Kym',
'Kynthia',
'Kyrstin',
'La Verne',
'Lacee',
'Lacey',
'Lacie',
'Lacy',
'Ladonna',
'Laetitia',
'Laina',
'Lainey',
'Lana',
'Lanae',
'Lane',
'Lanette',
'Laney',
'Lani',
'Lanie',
'Lanita',
'Lanna',
'Lanni',
'Lanny',
'Lara',
'Laraine',
'Lari',
'Larina',
'Larine',
'Larisa',
'Larissa',
'Lark',
'Laryssa',
'Latashia',
'Latia',
'Latisha',
'Latrena',
'Latrina',
'Laura',
'Lauraine',
'Laural',
'Lauralee',
'Laure',
'Lauree',
'Laureen',
'Laurel',
'Laurella',
'Lauren',
'Laurena',
'Laurene',
'Lauretta',
'Laurette',
'Lauri',
'Laurianne',
'Laurice',
'Laurie',
'Lauryn',
'Lavena',
'Laverna',
'Laverne',
'Lavina',
'Lavinia',
'Lavinie',
'Layla',
'Layne',
'Layney',
'Lea',
'Leah',
'Leandra',
'Leann',
'Leanna',
'Leanor',
'Leanora',
'Lebbie',
'Leda',
'Lee',
'Leeann',
'Leeanne',
'Leela',
'Leelah',
'Leena',
'Leesa',
'Leese',
'Legra',
'Leia',
'Leigh',
'Leigha',
'Leila',
'Leilah',
'Leisha',
'Lela',
'Lelah',
'Leland',
'Lelia',
'Lena',
'Lenee',
'Lenette',
'Lenka',
'Lenna',
'Lenora',
'Lenore',
'Leodora',
'Leoine',
'Leola',
'Leoline',
'Leona',
'Leonanie',
'Leone',
'Leonelle',
'Leonie',
'Leonora',
'Leonore',
'Leontine',
'Leontyne',
'Leora',
'Leshia',
'Lesley',
'Lesli',
'Leslie',
'Lesly',
'Lesya',
'Leta',
'Lethia',
'Leticia',
'Letisha',
'Letitia',
'Letizia',
'Letta',
'Letti',
'Lettie',
'Letty',
'Lexi',
'Lexie',
'Lexine',
'Lexis',
'Lexy',
'Leyla',
'Lezlie',
'Lia',
'Lian',
'Liana',
'Liane',
'Lianna',
'Lianne',
'Lib',
'Libbey',
'Libbi',
'Libbie',
'Libby',
'Licha',
'Lida',
'Lidia',
'Liesa',
'Lil',
'Lila',
'Lilah',
'Lilas',
'Lilia',
'Lilian',
'Liliane',
'Lilias',
'Lilith',
'Lilla',
'Lilli',
'Lillian',
'Lillis',
'Lilllie',
'Lilly',
'Lily',
'Lilyan',
'Lin',
'Lina',
'Lind',
'Linda',
'Lindi',
'Lindie',
'Lindsay',
'Lindsey',
'Lindsy',
'Lindy',
'Linea',
'Linell',
'Linet',
'Linette',
'Linn',
'Linnea',
'Linnell',
'Linnet',
'Linnie',
'Linzy',
'Lira',
'Lisa',
'Lisabeth',
'Lisbeth',
'Lise',
'Lisetta',
'Lisette',
'Lisha',
'Lishe',
'Lissa',
'Lissi',
'Lissie',
'Lissy',
'Lita',
'Liuka',
'Liv',
'Liva',
'Livia',
'Livvie',
'Livvy',
'Livvyy',
'Livy',
'Liz',
'Liza',
'Lizabeth',
'Lizbeth',
'Lizette',
'Lizzie',
'Lizzy',
'Loella',
'Lois',
'Loise',
'Lola',
'Loleta',
'Lolita',
'Lolly',
'Lona',
'Lonee',
'Loni',
'Lonna',
'Lonni',
'Lonnie',
'Lora',
'Lorain',
'Loraine',
'Loralee',
'Loralie',
'Loralyn',
'Loree',
'Loreen',
'Lorelei',
'Lorelle',
'Loren',
'Lorena',
'Lorene',
'Lorenza',
'Loretta',
'Lorette',
'Lori',
'Loria',
'Lorianna',
'Lorianne',
'Lorie',
'Lorilee',
'Lorilyn',
'Lorinda',
'Lorine',
'Lorita',
'Lorna',
'Lorne',
'Lorraine',
'Lorrayne',
'Lorri',
'Lorrie',
'Lorrin',
'Lorry',
'Lory',
'Lotta',
'Lotte',
'Lotti',
'Lottie',
'Lotty',
'Lou',
'Louella',
'Louisa',
'Louise',
'Louisette',
'Loutitia',
'Lu',
'Luce',
'Luci',
'Lucia',
'Luciana',
'Lucie',
'Lucienne',
'Lucila',
'Lucilia',
'Lucille',
'Lucina',
'Lucinda',
'Lucine',
'Lucita',
'Lucky',
'Lucretia',
'Lucy',
'Ludovika',
'Luella',
'Luelle',
'Luisa',
'Luise',
'Lula',
'Lulita',
'Lulu',
'Lura',
'Lurette',
'Lurleen',
'Lurlene',
'Lurline',
'Lusa',
'Luz',
'Lyda',
'Lydia',
'Lydie',
'Lyn',
'Lynda',
'Lynde',
'Lyndel',
'Lyndell',
'Lyndsay',
'Lyndsey',
'Lyndsie',
'Lyndy',
'Lynea',
'Lynelle',
'Lynett',
'Lynette',
'Lynn',
'Lynna',
'Lynne',
'Lynnea',
'Lynnell',
'Lynnelle',
'Lynnet',
'Lynnett',
'Lynnette',
'Lynsey',
'Lyssa',
'Mab',
'Mabel',
'Mabelle',
'Mable',
'Mada',
'Madalena',
'Madalyn',
'Maddalena',
'Maddi',
'Maddie',
'Maddy',
'Madel',
'Madelaine',
'Madeleine',
'Madelena',
'Madelene',
'Madelin',
'Madelina',
'Madeline',
'Madella',
'Madelle',
'Madelon',
'Madelyn',
'Madge',
'Madlen',
'Madlin',
'Madonna',
'Mady',
'Mae',
'Maegan',
'Mag',
'Magda',
'Magdaia',
'Magdalen',
'Magdalena',
'Magdalene',
'Maggee',
'Maggi',
'Maggie',
'Maggy',
'Mahala',
'Mahalia',
'Maia',
'Maible',
'Maiga',
'Maighdiln',
'Mair',
'Maire',
'Maisey',
'Maisie',
'Maitilde',
'Mala',
'Malanie',
'Malena',
'Malia',
'Malina',
'Malinda',
'Malinde',
'Malissa',
'Malissia',
'Mallissa',
'Mallorie',
'Mallory',
'Malorie',
'Malory',
'Malva',
'Malvina',
'Malynda',
'Mame',
'Mamie',
'Manda',
'Mandi',
'Mandie',
'Mandy',
'Manon',
'Manya',
'Mara',
'Marabel',
'Marcela',
'Marcelia',
'Marcella',
'Marcelle',
'Marcellina',
'Marcelline',
'Marchelle',
'Marci',
'Marcia',
'Marcie',
'Marcile',
'Marcille',
'Marcy',
'Mareah',
'Maren',
'Marena',
'Maressa',
'Marga',
'Margalit',
'Margalo',
'Margaret',
'Margareta',
'Margarete',
'Margaretha',
'Margarethe',
'Margaretta',
'Margarette',
'Margarita',
'Margaux',
'Marge',
'Margeaux',
'Margery',
'Marget',
'Margette',
'Margi',
'Margie',
'Margit',
'Margo',
'Margot',
'Margret',
'Marguerite',
'Margy',
'Mari',
'Maria',
'Mariam',
'Marian',
'Mariana',
'Mariann',
'Marianna',
'Marianne',
'Maribel',
'Maribelle',
'Maribeth',
'Marice',
'Maridel',
'Marie',
'Marie-Ann',
'Marie-Jeanne',
'Marieann',
'Mariejeanne',
'Mariel',
'Mariele',
'Marielle',
'Mariellen',
'Marietta',
'Mariette',
'Marigold',
'Marijo',
'Marika',
'Marilee',
'Marilin',
'Marillin',
'Marilyn',
'Marin',
'Marina',
'Marinna',
'Marion',
'Mariquilla',
'Maris',
'Marisa',
'Mariska',
'Marissa',
'Marita',
'Maritsa',
'Mariya',
'Marj',
'Marja',
'Marje',
'Marji',
'Marjie',
'Marjorie',
'Marjory',
'Marjy',
'Marketa',
'Marla',
'Marlane',
'Marleah',
'Marlee',
'Marleen',
'Marlena',
'Marlene',
'Marley',
'Marlie',
'Marline',
'Marlo',
'Marlyn',
'Marna',
'Marne',
'Marney',
'Marni',
'Marnia',
'Marnie',
'Marquita',
'Marrilee',
'Marris',
'Marrissa',
'Marsha',
'Marsiella',
'Marta',
'Martelle',
'Martguerita',
'Martha',
'Marthe',
'Marthena',
'Marti',
'Martica',
'Martie',
'Martina',
'Martita',
'Marty',
'Martynne',
'Mary',
'Marya',
'Maryann',
'Maryanna',
'Maryanne',
'Marybelle',
'Marybeth',
'Maryellen',
'Maryjane',
'Maryjo',
'Maryl',
'Marylee',
'Marylin',
'Marylinda',
'Marylou',
'Marylynne',
'Maryrose',
'Marys',
'Marysa',
'Masha',
'Matelda',
'Mathilda',
'Mathilde',
'Matilda',
'Matilde',
'Matti',
'Mattie',
'Matty',
'Maud',
'Maude',
'Maudie',
'Maura',
'Maure',
'Maureen',
'Maureene',
'Maurene',
'Maurine',
'Maurise',
'Maurita',
'Maurizia',
'Mavis',
'Mavra',
'Max',
'Maxi',
'Maxie',
'Maxine',
'Maxy',
'May',
'Maybelle',
'Maye',
'Mead',
'Meade',
'Meagan',
'Meaghan',
'Meara',
'Mechelle',
'Meg',
'Megan',
'Megen',
'Meggi',
'Meggie',
'Meggy',
'Meghan',
'Meghann',
'Mehetabel',
'Mei',
'Mel',
'Mela',
'Melamie',
'Melania',
'Melanie',
'Melantha',
'Melany',
'Melba',
'Melesa',
'Melessa',
'Melicent',
'Melina',
'Melinda',
'Melinde',
'Melisa',
'Melisande',
'Melisandra',
'Melisenda',
'Melisent',
'Melissa',
'Melisse',
'Melita',
'Melitta',
'Mella',
'Melli',
'Mellicent',
'Mellie',
'Mellisa',
'Mellisent',
'Melloney',
'Melly',
'Melodee',
'Melodie',
'Melody',
'Melonie',
'Melony',
'Melosa',
'Melva',
'Mercedes',
'Merci',
'Mercie',
'Mercy',
'Meredith',
'Meredithe',
'Meridel',
'Meridith',
'Meriel',
'Merilee',
'Merilyn',
'Meris',
'Merissa',
'Merl',
'Merla',
'Merle',
'Merlina',
'Merline',
'Merna',
'Merola',
'Merralee',
'Merridie',
'Merrie',
'Merrielle',
'Merrile',
'Merrilee',
'Merrili',
'Merrill',
'Merrily',
'Merry',
'Mersey',
'Meryl',
'Meta',
'Mia',
'Micaela',
'Michaela',
'Michaelina',
'Michaeline',
'Michaella',
'Michal',
'Michel',
'Michele',
'Michelina',
'Micheline',
'Michell',
'Michelle',
'Micki',
'Mickie',
'Micky',
'Midge',
'Mignon',
'Mignonne',
'Miguela',
'Miguelita',
'Mikaela',
'Mil',
'Mildred',
'Mildrid',
'Milena',
'Milicent',
'Milissent',
'Milka',
'Milli',
'Millicent',
'Millie',
'Millisent',
'Milly',
'Milzie',
'Mimi',
'Min',
'Mina',
'Minda',
'Mindy',
'Minerva',
'Minetta',
'Minette',
'Minna',
'Minnaminnie',
'Minne',
'Minni',
'Minnie',
'Minnnie',
'Minny',
'Minta',
'Miof Mela',
'Miquela',
'Mira',
'Mirabel',
'Mirabella',
'Mirabelle',
'Miran',
'Miranda',
'Mireielle',
'Mireille',
'Mirella',
'Mirelle',
'Miriam',
'Mirilla',
'Mirna',
'Misha',
'Missie',
'Missy',
'Misti',
'Misty',
'Mitzi',
'Modesta',
'Modestia',
'Modestine',
'Modesty',
'Moina',
'Moira',
'Moll',
'Mollee',
'Molli',
'Mollie',
'Molly',
'Mommy',
'Mona',
'Monah',
'Monica',
'Monika',
'Monique',
'Mora',
'Moreen',
'Morena',
'Morgan',
'Morgana',
'Morganica',
'Morganne',
'Morgen',
'Moria',
'Morissa',
'Morna',
'Moselle',
'Moyna',
'Moyra',
'Mozelle',
'Muffin',
'Mufi',
'Mufinella',
'Muire',
'Mureil',
'Murial',
'Muriel',
'Murielle',
'Myra',
'Myrah',
'Myranda',
'Myriam',
'Myrilla',
'Myrle',
'Myrlene',
'Myrna',
'Myrta',
'Myrtia',
'Myrtice',
'Myrtie',
'Myrtle',
'Nada',
'Nadean',
'Nadeen',
'Nadia',
'Nadine',
'Nadiya',
'Nady',
'Nadya',
'Nalani',
'Nan',
'Nana',
'Nananne',
'Nance',
'Nancee',
'Nancey',
'Nanci',
'Nancie',
'Nancy',
'Nanete',
'Nanette',
'Nani',
'Nanice',
'Nanine',
'Nannette',
'Nanni',
'Nannie',
'Nanny',
'Nanon',
'Naoma',
'Naomi',
'Nara',
'Nari',
'Nariko',
'Nat',
'Nata',
'Natala',
'Natalee',
'Natalie',
'Natalina',
'Nataline',
'Natalya',
'Natasha',
'Natassia',
'Nathalia',
'Nathalie',
'Natividad',
'Natka',
'Natty',
'Neala',
'Neda',
'Nedda',
'Nedi',
'Neely',
'Neila',
'Neile',
'Neilla',
'Neille',
'Nelia',
'Nelie',
'Nell',
'Nelle',
'Nelli',
'Nellie',
'Nelly',
'Nerissa',
'Nerita',
'Nert',
'Nerta',
'Nerte',
'Nerti',
'Nertie',
'Nerty',
'Nessa',
'Nessi',
'Nessie',
'Nessy',
'Nesta',
'Netta',
'Netti',
'Nettie',
'Nettle',
'Netty',
'Nevsa',
'Neysa',
'Nichol',
'Nichole',
'Nicholle',
'Nicki',
'Nickie',
'Nicky',
'Nicol',
'Nicola',
'Nicole',
'Nicolea',
'Nicolette',
'Nicoli',
'Nicolina',
'Nicoline',
'Nicolle',
'Nikaniki',
'Nike',
'Niki',
'Nikki',
'Nikkie',
'Nikoletta',
'Nikolia',
'Nina',
'Ninetta',
'Ninette',
'Ninnetta',
'Ninnette',
'Ninon',
'Nissa',
'Nisse',
'Nissie',
'Nissy',
'Nita',
'Nixie',
'Noami',
'Noel',
'Noelani',
'Noell',
'Noella',
'Noelle',
'Noellyn',
'Noelyn',
'Noemi',
'Nola',
'Nolana',
'Nolie',
'Nollie',
'Nomi',
'Nona',
'Nonah',
'Noni',
'Nonie',
'Nonna',
'Nonnah',
'Nora',
'Norah',
'Norean',
'Noreen',
'Norene',
'Norina',
'Norine',
'Norma',
'Norri',
'Norrie',
'Norry',
'Novelia',
'Nydia',
'Nyssa',
'Octavia',
'Odele',
'Odelia',
'Odelinda',
'Odella',
'Odelle',
'Odessa',
'Odetta',
'Odette',
'Odilia',
'Odille',
'Ofelia',
'Ofella',
'Ofilia',
'Ola',
'Olenka',
'Olga',
'Olia',
'Olimpia',
'Olive',
'Olivette',
'Olivia',
'Olivie',
'Oliy',
'Ollie',
'Olly',
'Olva',
'Olwen',
'Olympe',
'Olympia',
'Olympie',
'Ondrea',
'Oneida',
'Onida',
'Oona',
'Opal',
'Opalina',
'Opaline',
'Ophelia',
'Ophelie',
'Ora',
'Oralee',
'Oralia',
'Oralie',
'Oralla',
'Oralle',
'Orel',
'Orelee',
'Orelia',
'Orelie',
'Orella',
'Orelle',
'Oriana',
'Orly',
'Orsa',
'Orsola',
'Ortensia',
'Otha',
'Othelia',
'Othella',
'Othilia',
'Othilie',
'Ottilie',
'Page',
'Paige',
'Paloma',
'Pam',
'Pamela',
'Pamelina',
'Pamella',
'Pammi',
'Pammie',
'Pammy',
'Pandora',
'Pansie',
'Pansy',
'Paola',
'Paolina',
'Papagena',
'Pat',
'Patience',
'Patrica',
'Patrice',
'Patricia',
'Patrizia',
'Patsy',
'Patti',
'Pattie',
'Patty',
'Paula',
'Paule',
'Pauletta',
'Paulette',
'Pauli',
'Paulie',
'Paulina',
'Pauline',
'Paulita',
'Pauly',
'Pavia',
'Pavla',
'Pearl',
'Pearla',
'Pearle',
'Pearline',
'Peg',
'Pegeen',
'Peggi',
'Peggie',
'Peggy',
'Pen',
'Penelopa',
'Penelope',
'Penni',
'Pennie',
'Penny',
'Pepi',
'Pepita',
'Peri',
'Peria',
'Perl',
'Perla',
'Perle',
'Perri',
'Perrine',
'Perry',
'Persis',
'Pet',
'Peta',
'Petra',
'Petrina',
'Petronella',
'Petronia',
'Petronilla',
'Petronille',
'Petunia',
'Phaedra',
'Phaidra',
'Phebe',
'Phedra',
'Phelia',
'Phil',
'Philipa',
'Philippa',
'Philippe',
'Philippine',
'Philis',
'Phillida',
'Phillie',
'Phillis',
'Philly',
'Philomena',
'Phoebe',
'Phylis',
'Phyllida',
'Phyllis',
'Phyllys',
'Phylys',
'Pia',
'Pier',
'Pierette',
'Pierrette',
'Pietra',
'Piper',
'Pippa',
'Pippy',
'Polly',
'Pollyanna',
'Pooh',
'Poppy',
'Portia',
'Pris',
'Prisca',
'Priscella',
'Priscilla',
'Prissie',
'Pru',
'Prudence',
'Prudi',
'Prudy',
'Prue',
'Queenie',
'Quentin',
'Querida',
'Quinn',
'Quinta',
'Quintana',
'Quintilla',
'Quintina',
'Rachael',
'Rachel',
'Rachele',
'Rachelle',
'Rae',
'Raeann',
'Raf',
'Rafa',
'Rafaela',
'Rafaelia',
'Rafaelita',
'Rahal',
'Rahel',
'Raina',
'Raine',
'Rakel',
'Ralina',
'Ramona',
'Ramonda',
'Rana',
'Randa',
'Randee',
'Randene',
'Randi',
'Randie',
'Randy',
'Ranee',
'Rani',
'Rania',
'Ranice',
'Ranique',
'Ranna',
'Raphaela',
'Raquel',
'Raquela',
'Rasia',
'Rasla',
'Raven',
'Ray',
'Raychel',
'Raye',
'Rayna',
'Raynell',
'Rayshell',
'Rea',
'Reba',
'Rebbecca',
'Rebe',
'Rebeca',
'Rebecca',
'Rebecka',
'Rebeka',
'Rebekah',
'Rebekkah',
'Ree',
'Reeba',
'Reena',
'Reeta',
'Reeva',
'Regan',
'Reggi',
'Reggie',
'Regina',
'Regine',
'Reiko',
'Reina',
'Reine',
'Remy',
'Rena',
'Renae',
'Renata',
'Renate',
'Rene',
'Renee',
'Renell',
'Renelle',
'Renie',
'Rennie',
'Reta',
'Retha',
'Revkah',
'Rey',
'Reyna',
'Rhea',
'Rheba',
'Rheta',
'Rhetta',
'Rhiamon',
'Rhianna',
'Rhianon',
'Rhoda',
'Rhodia',
'Rhodie',
'Rhody',
'Rhona',
'Rhonda',
'Riane',
'Riannon',
'Rianon',
'Rica',
'Ricca',
'Rici',
'Ricki',
'Rickie',
'Ricky',
'Riki',
'Rikki',
'Rina',
'Risa',
'Rita',
'Riva',
'Rivalee',
'Rivi',
'Rivkah',
'Rivy',
'Roana',
'Roanna',
'Roanne',
'Robbi',
'Robbie',
'Robbin',
'Robby',
'Robbyn',
'Robena',
'Robenia',
'Roberta',
'Robin',
'Robina',
'Robinet',
'Robinett',
'Robinetta',
'Robinette',
'Robinia',
'Roby',
'Robyn',
'Roch',
'Rochell',
'Rochella',
'Rochelle',
'Rochette',
'Roda',
'Rodi',
'Rodie',
'Rodina',
'Rois',
'Romola',
'Romona',
'Romonda',
'Romy',
'Rona',
'Ronalda',
'Ronda',
'Ronica',
'Ronna',
'Ronni',
'Ronnica',
'Ronnie',
'Ronny',
'Roobbie',
'Rora',
'Rori',
'Rorie',
'Rory',
'Ros',
'Rosa',
'Rosabel',
'Rosabella',
'Rosabelle',
'Rosaleen',
'Rosalia',
'Rosalie',
'Rosalind',
'Rosalinda',
'Rosalinde',
'Rosaline',
'Rosalyn',
'Rosalynd',
'Rosamond',
'Rosamund',
'Rosana',
'Rosanna',
'Rosanne',
'Rose',
'Roseann',
'Roseanna',
'Roseanne',
'Roselia',
'Roselin',
'Roseline',
'Rosella',
'Roselle',
'Rosemaria',
'Rosemarie',
'Rosemary',
'Rosemonde',
'Rosene',
'Rosetta',
'Rosette',
'Roshelle',
'Rosie',
'Rosina',
'Rosita',
'Roslyn',
'Rosmunda',
'Rosy',
'Row',
'Rowe',
'Rowena',
'Roxana',
'Roxane',
'Roxanna',
'Roxanne',
'Roxi',
'Roxie',
'Roxine',
'Roxy',
'Roz',
'Rozalie',
'Rozalin',
'Rozamond',
'Rozanna',
'Rozanne',
'Roze',
'Rozele',
'Rozella',
'Rozelle',
'Rozina',
'Rubetta',
'Rubi',
'Rubia',
'Rubie',
'Rubina',
'Ruby',
'Ruperta',
'Ruth',
'Ruthann',
'Ruthanne',
'Ruthe',
'Ruthi',
'Ruthie',
'Ruthy',
'Ryann',
'Rycca',
'Saba',
'Sabina',
'Sabine',
'Sabra',
'Sabrina',
'Sacha',
'Sada',
'Sadella',
'Sadie',
'Sadye',
'Saidee',
'Sal',
'Salaidh',
'Sallee',
'Salli',
'Sallie',
'Sally',
'Sallyann',
'Sallyanne',
'Saloma',
'Salome',
'Salomi',
'Sam',
'Samantha',
'Samara',
'Samaria',
'Sammy',
'Sande',
'Sandi',
'Sandie',
'Sandra',
'Sandy',
'Sandye',
'Sapphira',
'Sapphire',
'Sara',
'Sara-Ann',
'Saraann',
'Sarah',
'Sarajane',
'Saree',
'Sarena',
'Sarene',
'Sarette',
'Sari',
'Sarina',
'Sarine',
'Sarita',
'Sascha',
'Sasha',
'Sashenka',
'Saudra',
'Saundra',
'Savina',
'Sayre',
'Scarlet',
'Scarlett',
'Sean',
'Seana',
'Seka',
'Sela',
'Selena',
'Selene',
'Selestina',
'Selia',
'Selie',
'Selina',
'Selinda',
'Seline',
'Sella',
'Selle',
'Selma',
'Sena',
'Sephira',
'Serena',
'Serene',
'Shae',
'Shaina',
'Shaine',
'Shalna',
'Shalne',
'Shana',
'Shanda',
'Shandee',
'Shandeigh',
'Shandie',
'Shandra',
'Shandy',
'Shane',
'Shani',
'Shanie',
'Shanna',
'Shannah',
'Shannen',
'Shannon',
'Shanon',
'Shanta',
'Shantee',
'Shara',
'Sharai',
'Shari',
'Sharia',
'Sharity',
'Sharl',
'Sharla',
'Sharleen',
'Sharlene',
'Sharline',
'Sharon',
'Sharona',
'Sharron',
'Sharyl',
'Shaun',
'Shauna',
'Shawn',
'Shawna',
'Shawnee',
'Shay',
'Shayla',
'Shaylah',
'Shaylyn',
'Shaylynn',
'Shayna',
'Shayne',
'Shea',
'Sheba',
'Sheela',
'Sheelagh',
'Sheelah',
'Sheena',
'Sheeree',
'Sheila',
'Sheila-Kathryn',
'Sheilah',
'Shel',
'Shela',
'Shelagh',
'Shelba',
'Shelbi',
'Shelby',
'Shelia',
'Shell',
'Shelley',
'Shelli',
'Shellie',
'Shelly',
'Shena',
'Sher',
'Sheree',
'Sheri',
'Sherie',
'Sherill',
'Sherilyn',
'Sherline',
'Sherri',
'Sherrie',
'Sherry',
'Sherye',
'Sheryl',
'Shina',
'Shir',
'Shirl',
'Shirlee',
'Shirleen',
'Shirlene',
'Shirley',
'Shirline',
'Shoshana',
'Shoshanna',
'Siana',
'Sianna',
'Sib',
'Sibbie',
'Sibby',
'Sibeal',
'Sibel',
'Sibella',
'Sibelle',
'Sibilla',
'Sibley',
'Sibyl',
'Sibylla',
'Sibylle',
'Sidoney',
'Sidonia',
'Sidonnie',
'Sigrid',
'Sile',
'Sileas',
'Silva',
'Silvana',
'Silvia',
'Silvie',
'Simona',
'Simone',
'Simonette',
'Simonne',
'Sindee',
'Siobhan',
'Sioux',
'Siouxie',
'Sisely',
'Sisile',
'Sissie',
'Sissy',
'Siusan',
'Sofia',
'Sofie',
'Sondra',
'Sonia',
'Sonja',
'Sonni',
'Sonnie',
'Sonnnie',
'Sonny',
'Sonya',
'Sophey',
'Sophi',
'Sophia',
'Sophie',
'Sophronia',
'Sorcha',
'Sosanna',
'Stace',
'Stacee',
'Stacey',
'Staci',
'Stacia',
'Stacie',
'Stacy',
'Stafani',
'Star',
'Starla',
'Starlene',
'Starlin',
'Starr',
'Stefa',
'Stefania',
'Stefanie',
'Steffane',
'Steffi',
'Steffie',
'Stella',
'Stepha',
'Stephana',
'Stephani',
'Stephanie',
'Stephannie',
'Stephenie',
'Stephi',
'Stephie',
'Stephine',
'Stesha',
'Stevana',
'Stevena',
'Stoddard',
'Storm',
'Stormi',
'Stormie',
'Stormy',
'Sue',
'Suellen',
'Sukey',
'Suki',
'Sula',
'Sunny',
'Sunshine',
'Susan',
'Susana',
'Susanetta',
'Susann',
'Susanna',
'Susannah',
'Susanne',
'Susette',
'Susi',
'Susie',
'Susy',
'Suzann',
'Suzanna',
'Suzanne',
'Suzette',
'Suzi',
'Suzie',
'Suzy',
'Sybil',
'Sybila',
'Sybilla',
'Sybille',
'Sybyl',
'Sydel',
'Sydelle',
'Sydney',
'Sylvia',
'Tabatha',
'Tabbatha',
'Tabbi',
'Tabbie',
'Tabbitha',
'Tabby',
'Tabina',
'Tabitha',
'Taffy',
'Talia',
'Tallia',
'Tallie',
'Tallou',
'Tallulah',
'Tally',
'Talya',
'Talyah',
'Tamar',
'Tamara',
'Tamarah',
'Tamarra',
'Tamera',
'Tami',
'Tamiko',
'Tamma',
'Tammara',
'Tammi',
'Tammie',
'Tammy',
'Tamqrah',
'Tamra',
'Tana',
'Tandi',
'Tandie',
'Tandy',
'Tanhya',
'Tani',
'Tania',
'Tanitansy',
'Tansy',
'Tanya',
'Tara',
'Tarah',
'Tarra',
'Tarrah',
'Taryn',
'Tasha',
'Tasia',
'Tate',
'Tatiana',
'Tatiania',
'Tatum',
'Tawnya',
'Tawsha',
'Ted',
'Tedda',
'Teddi',
'Teddie',
'Teddy',
'Tedi',
'Tedra',
'Teena',
'TEirtza',
'Teodora',
'Tera',
'Teresa',
'Terese',
'Teresina',
'Teresita',
'Teressa',
'Teri',
'Teriann',
'Terra',
'Terri',
'Terrie',
'Terrijo',
'Terry',
'Terrye',
'Tersina',
'Terza',
'Tess',
'Tessa',
'Tessi',
'Tessie',
'Tessy',
'Thalia',
'Thea',
'Theadora',
'Theda',
'Thekla',
'Thelma',
'Theo',
'Theodora',
'Theodosia',
'Theresa',
'Therese',
'Theresina',
'Theresita',
'Theressa',
'Therine',
'Thia',
'Thomasa',
'Thomasin',
'Thomasina',
'Thomasine',
'Tiena',
'Tierney',
'Tiertza',
'Tiff',
'Tiffani',
'Tiffanie',
'Tiffany',
'Tiffi',
'Tiffie',
'Tiffy',
'Tilda',
'Tildi',
'Tildie',
'Tildy',
'Tillie',
'Tilly',
'Tim',
'Timi',
'Timmi',
'Timmie',
'Timmy',
'Timothea',
'Tina',
'Tine',
'Tiphani',
'Tiphanie',
'Tiphany',
'Tish',
'Tisha',
'Tobe',
'Tobey',
'Tobi',
'Toby',
'Tobye',
'Toinette',
'Toma',
'Tomasina',
'Tomasine',
'Tomi',
'Tommi',
'Tommie',
'Tommy',
'Toni',
'Tonia',
'Tonie',
'Tony',
'Tonya',
'Tonye',
'Tootsie',
'Torey',
'Tori',
'Torie',
'Torrie',
'Tory',
'Tova',
'Tove',
'Tracee',
'Tracey',
'Traci',
'Tracie',
'Tracy',
'Trenna',
'Tresa',
'Trescha',
'Tressa',
'Tricia',
'Trina',
'Trish',
'Trisha',
'Trista',
'Trix',
'Trixi',
'Trixie',
'Trixy',
'Truda',
'Trude',
'Trudey',
'Trudi',
'Trudie',
'Trudy',
'Trula',
'Tuesday',
'Twila',
'Twyla',
'Tybi',
'Tybie',
'Tyne',
'Ula',
'Ulla',
'Ulrica',
'Ulrika',
'Ulrikaumeko',
'Ulrike',
'Umeko',
'Una',
'Ursa',
'Ursala',
'Ursola',
'Ursula',
'Ursulina',
'Ursuline',
'Uta',
'Val',
'Valaree',
'Valaria',
'Vale',
'Valeda',
'Valencia',
'Valene',
'Valenka',
'Valentia',
'Valentina',
'Valentine',
'Valera',
'Valeria',
'Valerie',
'Valery',
'Valerye',
'Valida',
'Valina',
'Valli',
'Vallie',
'Vally',
'Valma',
'Valry',
'Van',
'Vanda',
'Vanessa',
'Vania',
'Vanna',
'Vanni',
'Vannie',
'Vanny',
'Vanya',
'Veda',
'Velma',
'Velvet',
'Venita',
'Venus',
'Vera',
'Veradis',
'Vere',
'Verena',
'Verene',
'Veriee',
'Verile',
'Verina',
'Verine',
'Verla',
'Verna',
'Vernice',
'Veronica',
'Veronika',
'Veronike',
'Veronique',
'Vevay',
'Vi',
'Vicki',
'Vickie',
'Vicky',
'Victoria',
'Vida',
'Viki',
'Vikki',
'Vikky',
'Vilhelmina',
'Vilma',
'Vin',
'Vina',
'Vinita',
'Vinni',
'Vinnie',
'Vinny',
'Viola',
'Violante',
'Viole',
'Violet',
'Violetta',
'Violette',
'Virgie',
'Virgina',
'Virginia',
'Virginie',
'Vita',
'Vitia',
'Vitoria',
'Vittoria',
'Viv',
'Viva',
'Vivi',
'Vivia',
'Vivian',
'Viviana',
'Vivianna',
'Vivianne',
'Vivie',
'Vivien',
'Viviene',
'Vivienne',
'Viviyan',
'Vivyan',
'Vivyanne',
'Vonni',
'Vonnie',
'Vonny',
'Vyky',
'Wallie',
'Wallis',
'Walliw',
'Wally',
'Waly',
'Wanda',
'Wandie',
'Wandis',
'Waneta',
'Wanids',
'Wenda',
'Wendeline',
'Wendi',
'Wendie',
'Wendy',
'Wendye',
'Wenona',
'Wenonah',
'Whitney',
'Wileen',
'Wilhelmina',
'Wilhelmine',
'Wilie',
'Willa',
'Willabella',
'Willamina',
'Willetta',
'Willette',
'Willi',
'Willie',
'Willow',
'Willy',
'Willyt',
'Wilma',
'Wilmette',
'Wilona',
'Wilone',
'Wilow',
'Windy',
'Wini',
'Winifred',
'Winna',
'Winnah',
'Winne',
'Winni',
'Winnie',
'Winnifred',
'Winny',
'Winona',
'Winonah',
'Wren',
'Wrennie',
'Wylma',
'Wynn',
'Wynne',
'Wynnie',
'Wynny',
'Xaviera',
'Xena',
'Xenia',
'Xylia',
'Xylina',
'Yalonda',
'Yasmeen',
'Yasmin',
'Yelena',
'Yetta',
'Yettie',
'Yetty',
'Yevette',
'Ynes',
'Ynez',
'Yoko',
'Yolanda',
'Yolande',
'Yolane',
'Yolanthe',
'Yoshi',
'Yoshiko',
'Yovonnda',
'Ysabel',
'Yvette',
'Yvonne',
'Zabrina',
'Zahara',
'Zandra',
'Zaneta',
'Zara',
'Zarah',
'Zaria',
'Zarla',
'Zea',
'Zelda',
'Zelma',
'Zena',
'Zenia',
'Zia',
'Zilvia',
'Zita',
'Zitella',
'Zoe',
'Zola',
'Zonda',
'Zondra',
'Zonnya',
'Zora',
'Zorah',
'Zorana',
'Zorina',
'Zorine',
'Zsa Zsa',
'Zsazsa',
'Zulema',
'Zuzana'] | [
"edalool@yahoo.com"
] | edalool@yahoo.com |
20a1b173592c601afd39cfed31ef20cabac2e687 | a838d4bed14d5df5314000b41f8318c4ebe0974e | /sdk/iothub/azure-mgmt-iothub/azure/mgmt/iothub/v2019_03_22/aio/operations/_certificates_operations.py | cb3ee9059642cc0a1b57affe8c5fa30dbe7387b1 | [
"MIT",
"LicenseRef-scancode-generic-cla",
"LGPL-2.1-or-later"
] | permissive | scbedd/azure-sdk-for-python | ee7cbd6a8725ddd4a6edfde5f40a2a589808daea | cc8bdfceb23e5ae9f78323edc2a4e66e348bb17a | refs/heads/master | 2023-09-01T08:38:56.188954 | 2021-06-17T22:52:28 | 2021-06-17T22:52:28 | 159,568,218 | 2 | 0 | MIT | 2019-08-11T21:16:01 | 2018-11-28T21:34:49 | Python | UTF-8 | Python | false | false | 23,419 | py | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import Any, Callable, Dict, Generic, Optional, TypeVar, Union
import warnings
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import AsyncHttpResponse, HttpRequest
from azure.mgmt.core.exceptions import ARMErrorFormat
from ... import models as _models
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]]
class CertificatesOperations:
"""CertificatesOperations async operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.iothub.v2019_03_22.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer) -> None:
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
async def list_by_iot_hub(
self,
resource_group_name: str,
resource_name: str,
**kwargs
) -> "_models.CertificateListDescription":
"""Get the certificate list.
Returns the list of certificates.
:param resource_group_name: The name of the resource group that contains the IoT hub.
:type resource_group_name: str
:param resource_name: The name of the IoT hub.
:type resource_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: CertificateListDescription, or the result of cls(response)
:rtype: ~azure.mgmt.iothub.v2019_03_22.models.CertificateListDescription
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.CertificateListDescription"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2019-03-22"
accept = "application/json"
# Construct URL
url = self.list_by_iot_hub.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'resourceName': self._serialize.url("resource_name", resource_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorDetails, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
deserialized = self._deserialize('CertificateListDescription', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
list_by_iot_hub.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Devices/IotHubs/{resourceName}/certificates'} # type: ignore
async def get(
self,
resource_group_name: str,
resource_name: str,
certificate_name: str,
**kwargs
) -> "_models.CertificateDescription":
"""Get the certificate.
Returns the certificate.
:param resource_group_name: The name of the resource group that contains the IoT hub.
:type resource_group_name: str
:param resource_name: The name of the IoT hub.
:type resource_name: str
:param certificate_name: The name of the certificate.
:type certificate_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: CertificateDescription, or the result of cls(response)
:rtype: ~azure.mgmt.iothub.v2019_03_22.models.CertificateDescription
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.CertificateDescription"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2019-03-22"
accept = "application/json"
# Construct URL
url = self.get.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'resourceName': self._serialize.url("resource_name", resource_name, 'str'),
'certificateName': self._serialize.url("certificate_name", certificate_name, 'str', pattern=r'^[A-Za-z0-9-._]{1,64}$'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorDetails, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
deserialized = self._deserialize('CertificateDescription', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Devices/IotHubs/{resourceName}/certificates/{certificateName}'} # type: ignore
async def create_or_update(
self,
resource_group_name: str,
resource_name: str,
certificate_name: str,
certificate_description: "_models.CertificateBodyDescription",
if_match: Optional[str] = None,
**kwargs
) -> "_models.CertificateDescription":
"""Upload the certificate to the IoT hub.
Adds new or replaces existing certificate.
:param resource_group_name: The name of the resource group that contains the IoT hub.
:type resource_group_name: str
:param resource_name: The name of the IoT hub.
:type resource_name: str
:param certificate_name: The name of the certificate.
:type certificate_name: str
:param certificate_description: The certificate body.
:type certificate_description: ~azure.mgmt.iothub.v2019_03_22.models.CertificateBodyDescription
:param if_match: ETag of the Certificate. Do not specify for creating a brand new certificate.
Required to update an existing certificate.
:type if_match: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: CertificateDescription, or the result of cls(response)
:rtype: ~azure.mgmt.iothub.v2019_03_22.models.CertificateDescription
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.CertificateDescription"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2019-03-22"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self.create_or_update.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'resourceName': self._serialize.url("resource_name", resource_name, 'str'),
'certificateName': self._serialize.url("certificate_name", certificate_name, 'str', pattern=r'^[A-Za-z0-9-._]{1,64}$'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
if if_match is not None:
header_parameters['If-Match'] = self._serialize.header("if_match", if_match, 'str')
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(certificate_description, 'CertificateBodyDescription')
body_content_kwargs['content'] = body_content
request = self._client.put(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 201]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorDetails, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
if response.status_code == 200:
deserialized = self._deserialize('CertificateDescription', pipeline_response)
if response.status_code == 201:
deserialized = self._deserialize('CertificateDescription', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
create_or_update.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Devices/IotHubs/{resourceName}/certificates/{certificateName}'} # type: ignore
async def delete(
self,
resource_group_name: str,
resource_name: str,
certificate_name: str,
if_match: str,
**kwargs
) -> None:
"""Delete an X509 certificate.
Deletes an existing X509 certificate or does nothing if it does not exist.
:param resource_group_name: The name of the resource group that contains the IoT hub.
:type resource_group_name: str
:param resource_name: The name of the IoT hub.
:type resource_name: str
:param certificate_name: The name of the certificate.
:type certificate_name: str
:param if_match: ETag of the Certificate.
:type if_match: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: None, or the result of cls(response)
:rtype: None
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2019-03-22"
accept = "application/json"
# Construct URL
url = self.delete.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'resourceName': self._serialize.url("resource_name", resource_name, 'str'),
'certificateName': self._serialize.url("certificate_name", certificate_name, 'str', pattern=r'^[A-Za-z0-9-._]{1,64}$'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['If-Match'] = self._serialize.header("if_match", if_match, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.delete(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorDetails, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
delete.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Devices/IotHubs/{resourceName}/certificates/{certificateName}'} # type: ignore
async def generate_verification_code(
self,
resource_group_name: str,
resource_name: str,
certificate_name: str,
if_match: str,
**kwargs
) -> "_models.CertificateWithNonceDescription":
"""Generate verification code for proof of possession flow.
Generates verification code for proof of possession flow. The verification code will be used to
generate a leaf certificate.
:param resource_group_name: The name of the resource group that contains the IoT hub.
:type resource_group_name: str
:param resource_name: The name of the IoT hub.
:type resource_name: str
:param certificate_name: The name of the certificate.
:type certificate_name: str
:param if_match: ETag of the Certificate.
:type if_match: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: CertificateWithNonceDescription, or the result of cls(response)
:rtype: ~azure.mgmt.iothub.v2019_03_22.models.CertificateWithNonceDescription
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.CertificateWithNonceDescription"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2019-03-22"
accept = "application/json"
# Construct URL
url = self.generate_verification_code.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'resourceName': self._serialize.url("resource_name", resource_name, 'str'),
'certificateName': self._serialize.url("certificate_name", certificate_name, 'str', pattern=r'^[A-Za-z0-9-._]{1,64}$'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['If-Match'] = self._serialize.header("if_match", if_match, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.post(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorDetails, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
deserialized = self._deserialize('CertificateWithNonceDescription', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
generate_verification_code.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Devices/IotHubs/{resourceName}/certificates/{certificateName}/generateVerificationCode'} # type: ignore
async def verify(
self,
resource_group_name: str,
resource_name: str,
certificate_name: str,
if_match: str,
certificate_verification_body: "_models.CertificateVerificationDescription",
**kwargs
) -> "_models.CertificateDescription":
"""Verify certificate's private key possession.
Verifies the certificate's private key possession by providing the leaf cert issued by the
verifying pre uploaded certificate.
:param resource_group_name: The name of the resource group that contains the IoT hub.
:type resource_group_name: str
:param resource_name: The name of the IoT hub.
:type resource_name: str
:param certificate_name: The name of the certificate.
:type certificate_name: str
:param if_match: ETag of the Certificate.
:type if_match: str
:param certificate_verification_body: The name of the certificate.
:type certificate_verification_body: ~azure.mgmt.iothub.v2019_03_22.models.CertificateVerificationDescription
:keyword callable cls: A custom type or function that will be passed the direct response
:return: CertificateDescription, or the result of cls(response)
:rtype: ~azure.mgmt.iothub.v2019_03_22.models.CertificateDescription
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.CertificateDescription"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2019-03-22"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self.verify.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'resourceName': self._serialize.url("resource_name", resource_name, 'str'),
'certificateName': self._serialize.url("certificate_name", certificate_name, 'str', pattern=r'^[A-Za-z0-9-._]{1,64}$'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['If-Match'] = self._serialize.header("if_match", if_match, 'str')
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(certificate_verification_body, 'CertificateVerificationDescription')
body_content_kwargs['content'] = body_content
request = self._client.post(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorDetails, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
deserialized = self._deserialize('CertificateDescription', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
verify.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Devices/IotHubs/{resourceName}/certificates/{certificateName}/verify'} # type: ignore
| [
"noreply@github.com"
] | scbedd.noreply@github.com |
6a175ea3d24d0ba13c9ba9188e4c07e166cac602 | a777170c979214015df511999f5f08fc2e0533d8 | /claf/factory/tokens.py | 0f6f9d5845d72d7970d5be980590319398151d30 | [
"MIT",
"Apache-2.0",
"BSD-3-Clause"
] | permissive | srlee-ai/claf | 210b2d51918cf210683e7489ccb8347cb8b1f146 | 89b3e5c5ec0486886876ea3bac381508c6a6bf58 | refs/heads/master | 2021-02-13T04:38:36.198288 | 2020-03-03T15:01:01 | 2020-03-03T15:01:01 | 244,661,892 | 0 | 0 | MIT | 2020-03-03T14:45:52 | 2020-03-03T14:45:52 | null | UTF-8 | Python | false | false | 3,202 | py |
from overrides import overrides
from claf.config.registry import Registry
from claf.config.utils import convert_config2dict
from claf.tokens import tokenizer
from .base import Factory
def make_tokenizer(tokenizer_cls, tokenizer_config, parent_tokenizers={}):
if tokenizer_config is None or "name" not in tokenizer_config:
return None
package_name = tokenizer_config["name"]
package_config = tokenizer_config.get(package_name, {})
tokenizer_config["config"] = package_config
if package_name in tokenizer_config:
del tokenizer_config[package_name]
tokenizer_config.update(parent_tokenizers)
return tokenizer_cls(**tokenizer_config)
def make_all_tokenizers(all_tokenizer_config):
""" Tokenizer is resource used all token together """
sent_tokenizer = make_tokenizer(
tokenizer.SentTokenizer, all_tokenizer_config.get("sent", {"name": "punkt"})
)
word_tokenizer = make_tokenizer(
tokenizer.WordTokenizer,
all_tokenizer_config.get("word", None),
parent_tokenizers={"sent_tokenizer": sent_tokenizer},
)
subword_tokenizer = make_tokenizer(
tokenizer.SubwordTokenizer,
all_tokenizer_config.get("subword", None),
parent_tokenizers={"word_tokenizer": word_tokenizer},
)
char_tokenizer = make_tokenizer(
tokenizer.CharTokenizer,
all_tokenizer_config.get("char", None),
parent_tokenizers={"word_tokenizer": word_tokenizer},
)
bpe_tokenizer = make_tokenizer(
tokenizer.BPETokenizer,
all_tokenizer_config.get("bpe", None),
)
return {
"bpe": bpe_tokenizer,
"char": char_tokenizer,
"subword": subword_tokenizer,
"word": word_tokenizer,
"sent": sent_tokenizer,
}
class TokenMakersFactory(Factory):
"""
TokenMakers Factory Class
* Args:
config: token config from argument (config.token)
"""
LANGS = ["eng", "kor"]
def __init__(self):
self.registry = Registry()
@overrides
def create(self, config):
if getattr(config, "tokenizer", None):
tokenizers = make_all_tokenizers(convert_config2dict(config.tokenizer))
else:
tokenizers = {}
token_names, token_types = config.names, config.types
if len(token_names) != len(token_types):
raise ValueError("token_names and token_types must be same length.")
token_makers = {"tokenizers": tokenizers}
for token_name, token_type in sorted(zip(token_names, token_types)):
token_config = getattr(config, token_name, {})
if token_config != {}:
token_config = convert_config2dict(token_config)
# Token (tokenizer, indexer, embedding, vocab)
token_config = {
"tokenizers": tokenizers,
"indexer_config": token_config.get("indexer", {}),
"embedding_config": token_config.get("embedding", {}),
"vocab_config": token_config.get("vocab", {}),
}
token_makers[token_name] = self.registry.get(f"token:{token_type}")(**token_config)
return token_makers
| [
"humanbrain.djlee@gmail.com"
] | humanbrain.djlee@gmail.com |
ccb3efc9358bc44e6f4d99ee6cd99ba7342e7f28 | 4a48593a04284ef997f377abee8db61d6332c322 | /python/dbm/python2/test_dbm.py | 9104e09525b70761eee62c3643b98bb9ef3753c2 | [
"MIT"
] | permissive | jeremiedecock/snippets | 8feaed5a8d873d67932ef798e16cb6d2c47609f0 | b90a444041c42d176d096fed14852d20d19adaa7 | refs/heads/master | 2023-08-31T04:28:09.302968 | 2023-08-21T07:22:38 | 2023-08-21T07:22:38 | 36,926,494 | 26 | 9 | MIT | 2023-06-06T02:17:44 | 2015-06-05T10:19:09 | Python | UTF-8 | Python | false | false | 1,614 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright (c) 2012 Jérémie DECOCK (http://www.jdhp.org)
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
import dbm
import whichdb
def main():
"""Main function"""
# WRITE #######
db = dbm.open('foo_dbm', 'c')
db['one'] = 'un'
db['two'] = 'dos'
db['three'] = 'tres'
db.close()
# WHICH DBM ###
print "whichdb:", whichdb.whichdb('foo_dbm')
print
# READ ########
db = dbm.open('foo_dbm', 'r')
for k in db.keys():
print k, ':', db[k]
db.close()
if __name__ == '__main__':
main()
| [
"jd.jdhp@gmail.com"
] | jd.jdhp@gmail.com |
76f40dbe916e27ef75c91cef03d606f26fd73a67 | 487ce91881032c1de16e35ed8bc187d6034205f7 | /codes/CodeJamCrawler/CJ/16_0_2_aMAN_plus.py | e499fe2553e862196dbf07b73f6585542cbcf6da | [] | no_license | DaHuO/Supergraph | 9cd26d8c5a081803015d93cf5f2674009e92ef7e | c88059dc66297af577ad2b8afa4e0ac0ad622915 | refs/heads/master | 2021-06-14T16:07:52.405091 | 2016-08-21T13:39:13 | 2016-08-21T13:39:13 | 49,829,508 | 2 | 0 | null | 2021-03-19T21:55:46 | 2016-01-17T18:23:00 | Python | UTF-8 | Python | false | false | 1,187 | py | t = int(input())
arr = []
s = ""
times = 0
def rev(x): # index of last from 0
global arr
global times
times = times +1
half = (x+1)//2
for i in range(half):
temp = 1 - arr[i]
arr[i] = 1 - arr[x-i]
arr[x-i] = temp
if((x+1)%2 != 0):
arr[half] = 1 - arr[half]
def check(n):
global arr
for i in range(n-1):
if(arr[i]!=arr[i+1]):
return i
return -1
def ini():
global s
global arr
for i in range(len(s)):
if(s[i] == '+'):
arr.append(1)
else:
arr.append(0)
for i in range(t):
global arr
global s
global times
s = input()
ini()
boo = True
while(boo):
j = check(len(s))
if(j== (-1)):
boo = False
else:
rev(j) # index
if(1 not in arr):
rev(len(s)-1)
boo = False
elif(0 not in arr):
boo = False
#######################
print("Case #"+str(i+1)+": "+str(times))
arr = []
s = ""
times = 0
| [
"[dhuo@tcd.ie]"
] | [dhuo@tcd.ie] |
6d45841a1bc911599365d6efe618b8bd10ce654d | fd85e5320da3e0dae5ffc270c54caa8f85d20af7 | /user_analytics/views.py | c1164b992a4c427f2472395f8cdc5ad598a66611 | [
"Apache-2.0"
] | permissive | madre/analytics_nvd3 | 9a657937c91c9acd4b60e4ff33daecdf75a78c49 | 052f775c12f04e0e3a9fd321ee05de1fbceec09a | refs/heads/master | 2021-01-10T18:26:29.051575 | 2015-04-13T10:26:30 | 2015-04-13T10:26:30 | 33,605,665 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,751 | py | # -*- coding: utf-8 -*-
# !/usr/local/bin/python
__version__ = "1.0"
__license__ = "Copyright (c) 2014-2010, levp-inc, All rights reserved."
__author__ = "madeling <madeling@letvpicture.com>"
from django.views.generic import TemplateView
from utils.redis_cache import REDIS_INS
class UserBasicTemplate(TemplateView):
template_name = "device.html"
def get_context_data(self, **kwargs):
context = super(UserBasicTemplate, self).get_context_data(**kwargs)
device_wifi_total = REDIS_INS.hget("analytics_wifi_user_", "device_wifi_total")
context['device_wifi_total'] = device_wifi_total
user_wifi_total = REDIS_INS.hget("analytics_wifi_user_", "user_wifi_total")
context['user_wifi_total'] = user_wifi_total
user_wifi_origin_total = REDIS_INS.hget("analytics_wifi_user_", "user_wifi_origin_total")
context['user_wifi_origin_total'] = user_wifi_origin_total
# 报表数据
xdata = ["设备", "用户", "独立用户"]
ydata = [device_wifi_total, user_wifi_total, user_wifi_origin_total]
extra_serie1 = {"tooltip": {"y_start": "", "y_end": " cal"}}
chartdata = {
'x': xdata, 'name1': '', 'y1': ydata, 'extra1': extra_serie1,
}
charttype = "discreteBarChart"
chartcontainer = 'discretebarchart_container' # container name
data = {
'charttype': charttype,
'chartdata': chartdata,
'chartcontainer': chartcontainer,
'extra': {
'x_is_date': False,
'x_axis_format': '',
'tag_script_js': True,
'jquery_on_ready': True,
},
}
context.update(data)
return context
| [
"lingnck@gmail.com"
] | lingnck@gmail.com |
56d1a355702247f5513deef778923b1b68ad26fb | 397e125e94f4f139f2bf5055824d81f24b8b1757 | /ABC/145/D.py | e13c534bf3fde50c178423dff2b3ee1085432ceb | [] | no_license | tails1434/Atcoder | ecbab6ee238e3f225551297db961b1b502841fa4 | e7c7fed36be46bbaaf020a70997842240ba98d62 | refs/heads/master | 2021-07-07T00:31:49.235625 | 2020-09-30T01:42:01 | 2020-09-30T01:42:01 | 189,009,622 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 750 | py | def cmb(n, r, MOD, g1, g2):
if ( r<0 or r>n ):
return 0
r = min(r, n-r)
return g1[n] * g2[r] * g2[n-r] % MOD
def main():
X, Y = map(int, input().split())
MOD = 10 ** 9 + 7
if (X + Y) % 3 != 0:
print(0)
exit()
m = (2 * X - Y) // 3
n = (2 * Y - X) // 3
N = 10**6
g1 = [1, 1] # 元テーブル
g2 = [1, 1] #逆元テーブル
inverse = [0, 1] #逆元テーブル計算用テーブル
for i in range( 2, N + 1 ):
g1.append( ( g1[-1] * i ) % MOD )
inverse.append( ( -inverse[MOD % i] * (MOD//i) ) % MOD )
g2.append( (g2[-1] * inverse[-1]) % MOD )
ans = cmb(n + m, n, MOD, g1, g2)
print(ans)
if __name__ == "__main__":
main() | [
"sososo1333@gmail.com"
] | sososo1333@gmail.com |
0ac3ce67a375b998817489ff3c11903d0feb0220 | 804ce3c2897a8720a27e0d86ac3b868ebd41cd20 | /archive/admin.py | e4f227d12c67b1dd80f1b36e7d55cdbcb8853546 | [] | no_license | hoboland21/mango | 383359aa85b685bfe77c6336974600038454cf80 | be8bf3398612a0c3dbb4498eb5eb18407c574ce3 | refs/heads/master | 2023-07-13T06:25:39.508434 | 2021-08-25T03:25:37 | 2021-08-25T03:25:37 | 399,520,705 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 1,082 | py | from django.contrib import admin
from rsvn.models import *
# Register your models here.
#---------------------------------------------------------
class RateHeadingAdmin(admin.ModelAdmin) :
list_display = ('title','descr',)
ordering = ('title',)
#---------------------------------------------------------
class RateAtomAdmin(admin.ModelAdmin) :
list_display = ('rateHeading','rateName','rateType','rateDays','lowSeason','highSeason','peakSeason',)
ordering = ('rateName',)
#---------------------------------------------------------
class RoomInfoAdmin(admin.ModelAdmin) :
list_display = ('type', 'number', 'beds','connect', 'notes')
ordering = ('type','number')
#---------------------------------------------------------
class SeasonAdmin(admin.ModelAdmin) :
list_display = ('name','beginDate','endDate')
ordering = ('beginDate',)
admin.site.register(RoomInfo,RoomInfoAdmin)
admin.site.register(Season,SeasonAdmin)
#admin.site.register(RateAtom,RateAtomAdmin)
#admin.site.register(RateHeading,RateHeadingAdmin)
#admin.site.register(ServiceRate,ServiceRateAdmin)
| [
"jc@saipantech.com"
] | jc@saipantech.com |
48b265ee6ff2631ca78f8a2252c5ec978f7961fd | 4be56098894a95da5964622fc4102b69e4530ab6 | /题库/100305.二叉搜索树与双向链表.py | 9e2ff50e4126f7779f567dc905bf18390c74e9d1 | [] | no_license | ACENDER/LeetCode | 7c7c7ecc8d0cc52215272f47ec34638637fae7ac | 3383b09ab1246651b1d7b56ab426a456f56a4ece | refs/heads/master | 2023-03-13T19:19:07.084141 | 2021-03-15T09:29:21 | 2021-03-15T09:29:21 | 299,332,864 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 100 | py | # !/usr/bin/env python3
# -*- coding: utf-8 -*-
# @File : 100305.二叉搜索树与双向链表.py
| [
"1641429327@qq.com"
] | 1641429327@qq.com |
01712697928ec9ebd687a93b160d3d87fd2b3bec | c3082eb2adc43b311dd3c9ff16fd3ed9df85f266 | /python/examples/pandas/genome_calculation.py | 74384393563d0a41da51f305348efca0a30d59db | [] | no_license | szabgab/slides | 78818c7138331b3ba9e221c81da3678a46efe9b3 | 63bba06678554db737602f2fbcd6510c36037e8a | refs/heads/main | 2023-08-31T07:13:51.536711 | 2023-08-29T13:17:59 | 2023-08-29T13:17:59 | 122,212,527 | 87 | 69 | null | 2023-05-19T06:55:11 | 2018-02-20T14:57:03 | Python | UTF-8 | Python | false | false | 662 | py | import pandas as pd
import numpy as np
import datetime
import sys
filename = 'raw_data.xlsx'
if len(sys.argv) == 2:
filename = sys.argv[1]
def calculate_averages(row):
v1 = row.iloc[0:3].mean()
v2 = row.iloc[3:6].mean()
return np.log2(v1/v2)
start_time = datetime.datetime.now()
df = pd.read_excel(filename, index_col='genome name')
load_time = datetime.datetime.now()
print(load_time - start_time)
print(df.head())
calculated_value = df.apply(calculate_averages, axis=1)
threshold = 0.2
filtered_df = df[calculated_value > threshold]
print(filtered_df.head())
calculate_time = datetime.datetime.now()
print(calculate_time - load_time)
| [
"gabor@szabgab.com"
] | gabor@szabgab.com |
8d83837460b7f44dea09a8afbd1b6c73e717ecef | eb204640c0d941d83636973ed47cebad72d9fee3 | /examples/scanvi/scanvi.py | 3ddd7e404bd4a0b79dc63d7bc1ceca8c411e1c0b | [
"Apache-2.0"
] | permissive | vishalbelsare/pyro | 708bcda2b6783caf10f74be78fac055d1b575cb5 | b64c15e2c2c1b94897c90aa032cf7156ca3ff531 | refs/heads/dev | 2023-09-02T21:43:50.380769 | 2022-03-16T23:20:11 | 2022-03-16T23:20:11 | 137,397,138 | 0 | 0 | Apache-2.0 | 2022-03-17T20:06:11 | 2018-06-14T18:56:42 | Python | UTF-8 | Python | false | false | 16,796 | py | # Copyright Contributors to the Pyro project.
# SPDX-License-Identifier: Apache-2.0
"""
We use a semi-supervised deep generative model of transcriptomics data to propagate labels
from a small set of labeled cells to a larger set of unlabeled cells. In particular we
use a dataset of peripheral blood mononuclear cells (PBMC) from 10x Genomics and
(approximately) reproduce Figure 6 in reference [1].
Note that for simplicity we do not reproduce every aspect of the scANVI pipeline. For
example, we do not use dropout in our neural network encoders/decoders, nor do we include
batch/dataset annotations in our model.
References:
[1] "Harmonization and Annotation of Single-cell Transcriptomics data with Deep Generative Models,"
Chenling Xu, Romain Lopez, Edouard Mehlman, Jeffrey Regier, Michael I. Jordan, Nir Yosef.
[2] https://github.com/YosefLab/scvi-tutorials/blob/50dd3269abfe0c375ec47114f2c20725a016736f/seed_labeling.ipynb
"""
import argparse
import matplotlib.pyplot as plt
import numpy as np
import torch
import torch.nn as nn
from matplotlib.patches import Patch
from torch.distributions import constraints
from torch.nn.functional import softmax, softplus
from torch.optim import Adam
import pyro
import pyro.distributions as dist
import pyro.poutine as poutine
from pyro.contrib.examples.scanvi_data import get_data
from pyro.distributions.util import broadcast_shape
from pyro.infer import SVI, TraceEnum_ELBO, config_enumerate
from pyro.optim import MultiStepLR
# Helper for making fully-connected neural networks
def make_fc(dims):
layers = []
for in_dim, out_dim in zip(dims, dims[1:]):
layers.append(nn.Linear(in_dim, out_dim))
layers.append(nn.BatchNorm1d(out_dim))
layers.append(nn.ReLU())
return nn.Sequential(*layers[:-1]) # Exclude final ReLU non-linearity
# Splits a tensor in half along the final dimension
def split_in_half(t):
return t.reshape(t.shape[:-1] + (2, -1)).unbind(-2)
# Helper for broadcasting inputs to neural net
def broadcast_inputs(input_args):
shape = broadcast_shape(*[s.shape[:-1] for s in input_args]) + (-1,)
input_args = [s.expand(shape) for s in input_args]
return input_args
# Used in parameterizing p(z2 | z1, y)
class Z2Decoder(nn.Module):
def __init__(self, z1_dim, y_dim, z2_dim, hidden_dims):
super().__init__()
dims = [z1_dim + y_dim] + hidden_dims + [2 * z2_dim]
self.fc = make_fc(dims)
def forward(self, z1, y):
z1_y = torch.cat([z1, y], dim=-1)
# We reshape the input to be two-dimensional so that nn.BatchNorm1d behaves correctly
_z1_y = z1_y.reshape(-1, z1_y.size(-1))
hidden = self.fc(_z1_y)
# If the input was three-dimensional we now restore the original shape
hidden = hidden.reshape(z1_y.shape[:-1] + hidden.shape[-1:])
loc, scale = split_in_half(hidden)
# Here and elsewhere softplus ensures that scale is positive. Note that we generally
# expect softplus to be more numerically stable than exp.
scale = softplus(scale)
return loc, scale
# Used in parameterizing p(x | z2)
class XDecoder(nn.Module):
def __init__(self, num_genes, z2_dim, hidden_dims):
super().__init__()
dims = [z2_dim] + hidden_dims + [2 * num_genes]
self.fc = make_fc(dims)
def forward(self, z2):
gate_logits, mu = split_in_half(self.fc(z2))
mu = softmax(mu, dim=-1)
return gate_logits, mu
# Used in parameterizing q(z2 | x) and q(l | x)
class Z2LEncoder(nn.Module):
def __init__(self, num_genes, z2_dim, hidden_dims):
super().__init__()
dims = [num_genes] + hidden_dims + [2 * z2_dim + 2]
self.fc = make_fc(dims)
def forward(self, x):
# Transform the counts x to log space for increased numerical stability.
# Note that we only use this transform here; in particular the observation
# distribution in the model is a proper count distribution.
x = torch.log(1 + x)
h1, h2 = split_in_half(self.fc(x))
z2_loc, z2_scale = h1[..., :-1], softplus(h2[..., :-1])
l_loc, l_scale = h1[..., -1:], softplus(h2[..., -1:])
return z2_loc, z2_scale, l_loc, l_scale
# Used in parameterizing q(z1 | z2, y)
class Z1Encoder(nn.Module):
def __init__(self, num_labels, z1_dim, z2_dim, hidden_dims):
super().__init__()
dims = [num_labels + z2_dim] + hidden_dims + [2 * z1_dim]
self.fc = make_fc(dims)
def forward(self, z2, y):
# This broadcasting is necessary since Pyro expands y during enumeration (but not z2)
z2_y = broadcast_inputs([z2, y])
z2_y = torch.cat(z2_y, dim=-1)
# We reshape the input to be two-dimensional so that nn.BatchNorm1d behaves correctly
_z2_y = z2_y.reshape(-1, z2_y.size(-1))
hidden = self.fc(_z2_y)
# If the input was three-dimensional we now restore the original shape
hidden = hidden.reshape(z2_y.shape[:-1] + hidden.shape[-1:])
loc, scale = split_in_half(hidden)
scale = softplus(scale)
return loc, scale
# Used in parameterizing q(y | z2)
class Classifier(nn.Module):
def __init__(self, z2_dim, hidden_dims, num_labels):
super().__init__()
dims = [z2_dim] + hidden_dims + [num_labels]
self.fc = make_fc(dims)
def forward(self, x):
logits = self.fc(x)
return logits
# Encompasses the scANVI model and guide as a PyTorch nn.Module
class SCANVI(nn.Module):
def __init__(
self,
num_genes,
num_labels,
l_loc,
l_scale,
latent_dim=10,
alpha=0.01,
scale_factor=1.0,
):
assert isinstance(num_genes, int)
self.num_genes = num_genes
assert isinstance(num_labels, int) and num_labels > 1
self.num_labels = num_labels
# This is the dimension of both z1 and z2
assert isinstance(latent_dim, int) and latent_dim > 0
self.latent_dim = latent_dim
# The next two hyperparameters determine the prior over the log_count latent variable `l`
assert isinstance(l_loc, float)
self.l_loc = l_loc
assert isinstance(l_scale, float) and l_scale > 0
self.l_scale = l_scale
# This hyperparameter controls the strength of the auxiliary classification loss
assert isinstance(alpha, float) and alpha > 0
self.alpha = alpha
assert isinstance(scale_factor, float) and scale_factor > 0
self.scale_factor = scale_factor
super().__init__()
# Setup the various neural networks used in the model and guide
self.z2_decoder = Z2Decoder(
z1_dim=self.latent_dim,
y_dim=self.num_labels,
z2_dim=self.latent_dim,
hidden_dims=[50],
)
self.x_decoder = XDecoder(
num_genes=num_genes, hidden_dims=[100], z2_dim=self.latent_dim
)
self.z2l_encoder = Z2LEncoder(
num_genes=num_genes, z2_dim=self.latent_dim, hidden_dims=[100]
)
self.classifier = Classifier(
z2_dim=self.latent_dim, hidden_dims=[50], num_labels=num_labels
)
self.z1_encoder = Z1Encoder(
num_labels=num_labels,
z1_dim=self.latent_dim,
z2_dim=self.latent_dim,
hidden_dims=[50],
)
self.epsilon = 5.0e-3
def model(self, x, y=None):
# Register various nn.Modules with Pyro
pyro.module("scanvi", self)
# This gene-level parameter modulates the variance of the observation distribution
theta = pyro.param(
"inverse_dispersion",
10.0 * x.new_ones(self.num_genes),
constraint=constraints.positive,
)
# We scale all sample statements by scale_factor so that the ELBO is normalized
# wrt the number of datapoints and genes
with pyro.plate("batch", len(x)), poutine.scale(scale=self.scale_factor):
z1 = pyro.sample(
"z1", dist.Normal(0, x.new_ones(self.latent_dim)).to_event(1)
)
# Note that if y is None (i.e. y is unobserved) then y will be sampled;
# otherwise y will be treated as observed.
y = pyro.sample(
"y", dist.OneHotCategorical(logits=x.new_zeros(self.num_labels)), obs=y
)
z2_loc, z2_scale = self.z2_decoder(z1, y)
z2 = pyro.sample("z2", dist.Normal(z2_loc, z2_scale).to_event(1))
l_scale = self.l_scale * x.new_ones(1)
l = pyro.sample("l", dist.LogNormal(self.l_loc, l_scale).to_event(1))
# Note that by construction mu is normalized (i.e. mu.sum(-1) == 1) and the
# total scale of counts for each cell is determined by `l`
gate_logits, mu = self.x_decoder(z2)
# TODO revisit this parameterization if torch.distributions.NegativeBinomial changes
# from failure to success parametrization;
# see https://github.com/pytorch/pytorch/issues/42449
nb_logits = (l * mu + self.epsilon).log() - (theta + self.epsilon).log()
x_dist = dist.ZeroInflatedNegativeBinomial(
gate_logits=gate_logits, total_count=theta, logits=nb_logits
)
# Observe the datapoint x using the observation distribution x_dist
pyro.sample("x", x_dist.to_event(1), obs=x)
# The guide specifies the variational distribution
def guide(self, x, y=None):
pyro.module("scanvi", self)
with pyro.plate("batch", len(x)), poutine.scale(scale=self.scale_factor):
z2_loc, z2_scale, l_loc, l_scale = self.z2l_encoder(x)
pyro.sample("l", dist.LogNormal(l_loc, l_scale).to_event(1))
z2 = pyro.sample("z2", dist.Normal(z2_loc, z2_scale).to_event(1))
y_logits = self.classifier(z2)
y_dist = dist.OneHotCategorical(logits=y_logits)
if y is None:
# x is unlabeled so sample y using q(y|z2)
y = pyro.sample("y", y_dist)
else:
# x is labeled so add a classification loss term
# (this way q(y|z2) learns from both labeled and unlabeled data)
classification_loss = y_dist.log_prob(y)
# Note that the negative sign appears because we're adding this term in the guide
# and the guide log_prob appears in the ELBO as -log q
pyro.factor(
"classification_loss",
-self.alpha * classification_loss,
has_rsample=False,
)
z1_loc, z1_scale = self.z1_encoder(z2, y)
pyro.sample("z1", dist.Normal(z1_loc, z1_scale).to_event(1))
def main(args):
# Fix random number seed
pyro.util.set_rng_seed(args.seed)
# Enable optional validation warnings
# Load and pre-process data
dataloader, num_genes, l_mean, l_scale, anndata = get_data(
dataset=args.dataset, batch_size=args.batch_size, cuda=args.cuda
)
# Instantiate instance of model/guide and various neural networks
scanvi = SCANVI(
num_genes=num_genes,
num_labels=4,
l_loc=l_mean,
l_scale=l_scale,
scale_factor=1.0 / (args.batch_size * num_genes),
)
if args.cuda:
scanvi.cuda()
# Setup an optimizer (Adam) and learning rate scheduler.
# By default we start with a moderately high learning rate (0.005)
# and reduce by a factor of 5 after 20 epochs.
scheduler = MultiStepLR(
{
"optimizer": Adam,
"optim_args": {"lr": args.learning_rate},
"milestones": [20],
"gamma": 0.2,
}
)
# Tell Pyro to enumerate out y when y is unobserved
guide = config_enumerate(scanvi.guide, "parallel", expand=True)
# Setup a variational objective for gradient-based learning.
# Note we use TraceEnum_ELBO in order to leverage Pyro's machinery
# for automatic enumeration of the discrete latent variable y.
elbo = TraceEnum_ELBO(strict_enumeration_warning=False)
svi = SVI(scanvi.model, guide, scheduler, elbo)
# Training loop
for epoch in range(args.num_epochs):
losses = []
for x, y in dataloader:
if y is not None:
y = y.type_as(x)
loss = svi.step(x, y)
losses.append(loss)
# Tell the scheduler we've done one epoch.
scheduler.step()
print("[Epoch %04d] Loss: %.5f" % (epoch, np.mean(losses)))
# Put neural networks in eval mode (needed for batchnorm)
scanvi.eval()
# Now that we're done training we'll inspect the latent representations we've learned
if args.plot and args.dataset == "pbmc":
import scanpy as sc
# Compute latent representation (z2_loc) for each cell in the dataset
latent_rep = scanvi.z2l_encoder(dataloader.data_x)[0]
# Compute inferred cell type probabilities for each cell
y_logits = scanvi.classifier(latent_rep)
y_probs = softmax(y_logits, dim=-1).data.cpu().numpy()
# Use scanpy to compute 2-dimensional UMAP coordinates using our
# learned 10-dimensional latent representation z2
anndata.obsm["X_scANVI"] = latent_rep.data.cpu().numpy()
sc.pp.neighbors(anndata, use_rep="X_scANVI")
sc.tl.umap(anndata)
umap1, umap2 = anndata.obsm["X_umap"][:, 0], anndata.obsm["X_umap"][:, 1]
# Construct plots; all plots are scatterplots depicting the two-dimensional UMAP embedding
# and only differ in how points are colored
# The topmost plot depicts the 200 hand-curated seed labels in our dataset
fig, axes = plt.subplots(3, 2)
seed_marker_sizes = anndata.obs["seed_marker_sizes"]
axes[0, 0].scatter(
umap1,
umap2,
s=seed_marker_sizes,
c=anndata.obs["seed_colors"],
marker=".",
alpha=0.7,
)
axes[0, 0].set_title("Hand-Curated Seed Labels")
patch1 = Patch(color="lightcoral", label="CD8-Naive")
patch2 = Patch(color="limegreen", label="CD4-Naive")
patch3 = Patch(color="deepskyblue", label="CD4-Memory")
patch4 = Patch(color="mediumorchid", label="CD4-Regulatory")
axes[0, 1].legend(loc="center left", handles=[patch1, patch2, patch3, patch4])
axes[0, 1].get_xaxis().set_visible(False)
axes[0, 1].get_yaxis().set_visible(False)
axes[0, 1].set_frame_on(False)
# The remaining plots depict the inferred cell type probability for each of the four cell types
s10 = axes[1, 0].scatter(
umap1, umap2, s=1, c=y_probs[:, 0], marker=".", alpha=0.7
)
axes[1, 0].set_title("Inferred CD8-Naive probability")
fig.colorbar(s10, ax=axes[1, 0])
s11 = axes[1, 1].scatter(
umap1, umap2, s=1, c=y_probs[:, 1], marker=".", alpha=0.7
)
axes[1, 1].set_title("Inferred CD4-Naive probability")
fig.colorbar(s11, ax=axes[1, 1])
s20 = axes[2, 0].scatter(
umap1, umap2, s=1, c=y_probs[:, 2], marker=".", alpha=0.7
)
axes[2, 0].set_title("Inferred CD4-Memory probability")
fig.colorbar(s20, ax=axes[2, 0])
s21 = axes[2, 1].scatter(
umap1, umap2, s=1, c=y_probs[:, 3], marker=".", alpha=0.7
)
axes[2, 1].set_title("Inferred CD4-Regulatory probability")
fig.colorbar(s21, ax=axes[2, 1])
fig.tight_layout()
plt.savefig("scanvi.pdf")
if __name__ == "__main__":
assert pyro.__version__.startswith("1.8.0")
# Parse command line arguments
parser = argparse.ArgumentParser(
description="single-cell ANnotation using Variational Inference"
)
parser.add_argument("-s", "--seed", default=0, type=int, help="rng seed")
parser.add_argument(
"-n", "--num-epochs", default=60, type=int, help="number of training epochs"
)
parser.add_argument(
"-d",
"--dataset",
default="pbmc",
type=str,
help="which dataset to use",
choices=["pbmc", "mock"],
)
parser.add_argument(
"-bs", "--batch-size", default=100, type=int, help="mini-batch size"
)
parser.add_argument(
"-lr", "--learning-rate", default=0.005, type=float, help="learning rate"
)
parser.add_argument(
"--cuda", action="store_true", default=False, help="whether to use cuda"
)
parser.add_argument(
"--plot", action="store_true", default=False, help="whether to make a plot"
)
args = parser.parse_args()
main(args)
| [
"noreply@github.com"
] | vishalbelsare.noreply@github.com |
60d08f29afa6ffc2672f0e31e78ffda838221d70 | 3fd47598050ab6098088eddc79624dfa855c2143 | /djangoRest/settings.py | 5a18da26ac279010d5a33f80c85edab2135a8091 | [] | no_license | sajibuzzaman/djangoRest_Framework | cf6be098744e2506cea089ebc8f9e0dc21c0162f | a90b571f2c6dc1b9f832a0e0dda5f08b1724d9cc | refs/heads/master | 2023-04-10T04:28:49.872941 | 2021-04-21T18:41:07 | 2021-04-21T18:41:07 | 359,920,304 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,539 | py | """
Django settings for djangoRest project.
Generated by 'django-admin startproject' using Django 3.2.
For more information on this file, see
https://docs.djangoproject.com/en/3.2/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/3.2/ref/settings/
"""
from pathlib import Path
# Build paths inside the project like this: BASE_DIR / 'subdir'.
BASE_DIR = Path(__file__).resolve().parent.parent
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/3.2/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'django-insecure-^l$vw5bdp-f7zk0m^s2f8xe&38l)6k-_9lh$(80fet%86q+sor'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
# apps
'djangoRestApp',
'articleApp',
# Rest Framework
'rest_framework',
'rest_framework.authtoken',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'djangoRest.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [BASE_DIR / 'templates'],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'djangoRest.wsgi.application'
# Database
# https://docs.djangoproject.com/en/3.2/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': BASE_DIR / 'db.sqlite3',
}
}
# Password validation
# https://docs.djangoproject.com/en/3.2/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/3.2/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/3.2/howto/static-files/
STATIC_ROOT = BASE_DIR / 'staticfiles'
STATIC_URL = '/static/'
STATICFILES_DIRS =[
BASE_DIR / 'static',
]
MEDIA_ROOT = BASE_DIR / 'media'
MEDIA_URL = '/media/'
# Default primary key field type
# https://docs.djangoproject.com/en/3.2/ref/settings/#default-auto-field
DEFAULT_AUTO_FIELD = 'django.db.models.BigAutoField'
| [
"muhammadsajibuzzaman1998@gmail.com"
] | muhammadsajibuzzaman1998@gmail.com |
f01c048210b678c812f3bb6b87718e5bd62b2199 | 43d0413d129d997b41cd87a740010f889ab0c646 | /dataset_balancing/balance_dataset.py | a159cf9add6b0a13dae208db000a3772fbf4e165 | [] | no_license | LTTTDH/WebVision | 467aa46f63c2b95332f83504feb73f8628382c26 | 4a133071441b7412638382c4465dc2925b87235f | refs/heads/master | 2023-03-15T21:59:29.473087 | 2017-09-27T10:12:06 | 2017-09-27T10:12:06 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,129 | py | import numpy as np
path = "../../../datasets/WebVision/info/train_filelist_all.txt"
dest_path = "../../../datasets/WebVision/info/train_balanced_filelist.txt"
file = open(path, "r")
print("Loading data ...")
print(path)
listofclasses = {}
for c in range(0,1000):
listofclasses[c] = []
# Load data
for line in file:
d = line.split()
listofclasses[int(d[1])].append(d[0])
file.close()
# Count number per class
numxclass = np.zeros((1000,1))
for c in range(0,1000):
numxclass[c] = len(listofclasses[c])
maxxclass = max(numxclass)
print "Max per class: " + str(maxxclass)
minxclass = int(maxxclass - maxxclass * 0.5)
print "Min per class: " + str(minxclass)
print "Writing data"
# Write data balancing
file = open(dest_path, "w")
for c in range(0,1000):
elements_writed = 0
while elements_writed <= minxclass:
for el in listofclasses[c]:
file.write(el + " " + str(c) + "\n")
elements_writed += 1
if elements_writed > minxclass and elements_writed > numxclass[c]: break
print "Class " + str(c) + " : " + str(elements_writed)
file.close()
print "DONE"
| [
"raulgombru@gmail.com"
] | raulgombru@gmail.com |
bcb8f52bdf77dee5e83cf7ccb9a921a9caba190e | 2befb6f2a5f1fbbd5340093db43a198abdd5f53b | /pythonProject/customAuth/CustomAuthApp/migrations/0001_initial.py | 5e92563f7b3e1ae564639280aafad3ca484fe3c3 | [] | no_license | JanardanPandey/RestAPI | 1956d3529782d18ef2118961f6286e3213665aad | 654933a4d9687076a00c6f4c57fc3dfee1a2c567 | refs/heads/master | 2023-06-14T07:02:31.702000 | 2021-07-02T07:50:59 | 2021-07-02T07:50:59 | 382,357,537 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 597 | py | # Generated by Django 3.2.3 on 2021-06-12 11:45
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Student',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=20)),
('city', models.CharField(max_length=20)),
('roll', models.IntegerField()),
],
),
]
| [
"janardanpandey0510@gmail.com"
] | janardanpandey0510@gmail.com |
392544400066482d508c6d31e0c3c975481bbfab | cacac33bd0ff7bd1024c00f907d87a2750a4118f | /radiopadre_client/backends/backend_utils.py | d126c5cb8fac1275d0be18d7814310d14ba24a18 | [
"MIT"
] | permissive | ratt-ru/radiopadre-client | b96abda62e4ad6c26d45f6468e61dbf333806530 | ef138860d22523bf08a847317f3daca363db65a3 | refs/heads/master | 2023-03-07T09:23:22.237526 | 2021-12-05T06:32:00 | 2021-12-05T06:32:00 | 234,801,909 | 4 | 0 | MIT | 2021-07-14T13:05:03 | 2020-01-18T21:44:18 | Python | UTF-8 | Python | false | false | 2,173 | py | import socket, time, os, os.path
import iglesia
from iglesia.utils import message, bye, ff, shell
from radiopadre_client import config
def update_server_from_repository():
"""
Updates the radiopadre git working directory, if necessary
:return:
"""
if config.UPDATE and config.SERVER_INSTALL_PATH and os.path.isdir(config.SERVER_INSTALL_PATH + "/.git"):
if config.SERVER_INSTALL_BRANCH:
cmd = ff("cd {config.SERVER_INSTALL_PATH} && git fetch origin && git checkout {config.SERVER_INSTALL_BRANCH} && git pull")
else:
cmd = ff("cd {config.SERVER_INSTALL_PATH} && git pull")
message(ff(
"--update specified, --server-install-path at {config.SERVER_INSTALL_PATH} will be updated via"))
message(ff(" {cmd}"))
if shell(cmd):
bye("update failed")
def await_server_startup(port, process=None, server_name="jupyter notebook server", init_wait=2, wait=60):
"""
Waits for a server process to start up, tries to connect to the specified port,
returns when successful
:param port: port number
:param process: if not None, waits on the process and checks its return code
:param init_wait: number of second to wait before trying to connect
:param wait: total number of seconds to wait before giving up
:return: number of seconds elapsed before connection, or None if failed
"""
# pause to let the Jupyter server spin up
t0 = time.time()
time.sleep(init_wait)
# then try to connect to it
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
for retry in range(int(wait/.1)):
# try to connect
try:
sock.connect(("localhost", port))
del sock
return time.time() - t0
except socket.error:
pass
if not retry:
message(ff("Waiting for up to {wait} secs for the {server_name} to come up"))
# sleep, check process
if process is not None:
process.poll()
if process.returncode is not None:
return None
time.sleep(.1)
return None
| [
"osmirnov@gmail.com"
] | osmirnov@gmail.com |
b2530a028a58fb2c70429d5a4c290a7924804c48 | 4618ab5db3bae5417eca7e0bcb054d1707402d5c | /test/test_nnapi.py | bd990033594a7ba2689a1604f91e133d0ec74ad4 | [
"BSD-3-Clause",
"LicenseRef-scancode-generic-cla",
"BSL-1.0",
"Apache-2.0",
"BSD-2-Clause"
] | permissive | Mogul5306/pytorch | 460322f36678fbd715d25f20b25df4fc032b9f58 | 2e49c5dc37bce5ce8ba463a59aacb5d3e4a638b6 | refs/heads/master | 2023-06-16T19:46:08.490529 | 2021-07-13T07:55:56 | 2021-07-13T07:57:01 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 23,232 | py | #!/usr/bin/env python3
import os
import ctypes
import torch
from typing import Tuple
from torch.backends._nnapi.prepare import convert_model_to_nnapi
from torch.testing._internal.common_utils import TestCase, run_tests
def qpt(t, scale, zero_point, dtype=torch.quint8):
t = torch.tensor(t)
return torch.quantize_per_tensor(t, scale, zero_point, dtype)
def nhwc(t):
t = t.clone().contiguous(memory_format=torch.channels_last)
t.nnapi_nhwc = True
return t
class TestNNAPI(TestCase):
def setUp(self):
# Avoid saturation in fbgemm
torch.backends.quantized.engine = 'qnnpack'
libneuralnetworks_path = os.environ.get("LIBNEURALNETWORKS_PATH")
if libneuralnetworks_path:
ctypes.cdll.LoadLibrary(libneuralnetworks_path)
print("Will attempt to run NNAPI models.")
self.can_run_nnapi = True
else:
self.can_run_nnapi = False
def check(
self,
module,
arg_or_args,
*,
trace_args=None,
convert_args=None,
atol_rtol=None,
limit=None,
):
with torch.no_grad():
if isinstance(arg_or_args, torch.Tensor):
args = [arg_or_args]
else:
args = arg_or_args
module.eval()
traced = torch.jit.trace(module, trace_args or args)
nnapi_module = convert_model_to_nnapi(traced, convert_args or args)
if not self.can_run_nnapi:
# Only test that the model was converted successfully.
return
eager_output = module(*args)
nnapi_output = nnapi_module(*args)
kwargs = {}
if atol_rtol is not None:
kwargs["atol"] = atol_rtol[0]
kwargs["rtol"] = atol_rtol[1]
self.assertEqual(eager_output, nnapi_output, **kwargs)
if limit is not None:
mismatches = \
eager_output.int_repr().to(torch.int32) - \
nnapi_output.int_repr().to(torch.int32)
if mismatches.count_nonzero() > limit:
# Too many mismatches. Re-run the check with no tolerance
# to get a nice message.
self.assertEqual(eager_output, nnapi_output, atol=0, rtol=0)
def float_and_quant_and_nhwc(self, inp_float, scale, zero_point):
torch.manual_seed(29)
inp_quant = qpt(inp_float, 0.03, 128)
return [
("float", inp_float),
("float-nhwc", nhwc(inp_float)),
("quant", inp_quant),
("quant-nhwc", nhwc(inp_quant)),
]
def test_prelu(self):
arg = torch.tensor([[1.0, -1.0, 2.0, -2.0]]).unsqueeze(-1).unsqueeze(-1)
single_a = torch.nn.PReLU()
self.check(single_a, arg)
multi_a = torch.nn.PReLU(4)
with torch.no_grad():
multi_a.weight.copy_(torch.tensor([.1, .2, .3, .4]))
self.check(multi_a, nhwc(arg))
# Test flexible size
self.check(
multi_a,
arg,
trace_args=[torch.zeros(1, 4, 3, 3)],
convert_args=[nhwc(torch.zeros(1, 4, 0, 0))],
)
def test_quantize(self):
self.check(
torch.nn.quantized.Quantize(0.25, 2, torch.quint8),
nhwc(torch.tensor([[[[1.0]], [[2.0]]]])))
def test_dequantize(self):
self.check(
torch.nn.quantized.DeQuantize(),
nhwc(qpt([[[[1.0]], [[2.0]]]], 0.25, 2)))
def test_unsqueeze(self):
class UnsqueezeModule(torch.nn.Module):
def __init__(self, dim):
super().__init__()
self.dim = dim
def forward(self, arg):
return arg.unsqueeze(self.dim)
self.check(UnsqueezeModule(-2), torch.randn(4, 2, 2))
self.check(UnsqueezeModule(-1), torch.randn(4, 2, 2))
self.check(UnsqueezeModule(0), torch.randn(4, 2, 2))
self.check(UnsqueezeModule(1), torch.randn(4, 2, 2))
self.check(UnsqueezeModule(2), torch.randn(4, 2, 2))
def test_reshape(self):
class ReshapeModule(torch.nn.Module):
def __init__(self, shape):
super().__init__()
self.shape = shape
def forward(self, arg):
return arg.reshape(self.shape)
self.check(
ReshapeModule((2, 4)),
torch.randn(4, 2, 1, 1))
self.check(
ReshapeModule((8, -1)),
nhwc(torch.randn(4, 2, 1, 1)))
with self.assertRaisesRegex(Exception, "target size"):
self.check(
ReshapeModule((2, 4)),
nhwc(torch.randn(4, 2, 1, 1)))
def test_flatten(self):
for mod in [
torch.nn.Flatten(),
torch.nn.Flatten(start_dim=2, end_dim=3),
torch.nn.Flatten(start_dim=2, end_dim=4),
torch.nn.Flatten(start_dim=0, end_dim=-2),
torch.nn.Flatten(start_dim=0, end_dim=4)
]:
self.check(mod, torch.randn(4, 2, 1, 3, 7))
self.check(
torch.nn.Flatten(),
torch.randn(4, 2, 1, 3, 7),
convert_args=[torch.zeros(0, 2, 1, 3, 7)]
)
with self.assertRaisesRegex(Exception, "dims can't be flexible"):
self.check(torch.nn.Flatten(), torch.randn(4, 2, 0, 0, 7))
with self.assertRaisesRegex(Exception, "Only 1 dim"):
self.check(
torch.nn.Flatten(start_dim=1, end_dim=-2),
torch.randn(0, 2, 1, 3, 0))
def test_slice(self):
class SliceModule(torch.nn.Module):
def __init__(self, start, stop, step):
super().__init__()
self.start = start
self.stop = stop
self.step = step
def forward(self, t):
return t[1:, self.start:self.stop:self.step, :]
class SliceModule2(torch.nn.Module):
def forward(self, t):
return t[3:]
self.check(
SliceModule(1, 5, 2),
torch.randn(4, 6, 2)
)
self.check(
SliceModule2(),
torch.randn(5)
)
# flex inputs
self.check(
SliceModule(1, 5, 2),
torch.randn(4, 6, 2),
convert_args=[torch.zeros(4, 6, 0)]
)
with self.assertRaisesRegex(Exception, "slice with flexible shape"):
self.check(
SliceModule(1, 5, 2),
torch.randn(4, 6, 2),
convert_args=[torch.zeros(0, 0, 0)]
)
def test_cat(self):
class CatModule(torch.nn.Module):
def __init__(self, dim):
super().__init__()
self.dim = dim
def forward(self, t1, t2):
return torch.cat([t1, t2], self.dim)
self.check(
CatModule(0),
[
torch.randn(1, 2, 3, 3),
torch.randn(2, 2, 3, 3),
])
self.check(
CatModule(1),
[
torch.randn(1, 2, 3, 3),
torch.randn(1, 4, 3, 3),
])
self.check(
CatModule(1),
[
nhwc(torch.randn(1, 2, 3, 3)),
nhwc(torch.randn(1, 4, 3, 3)),
])
self.check(
CatModule(1),
[
torch.randn(1, 2, 3, 3),
torch.randn(1, 4, 3, 3),
],
convert_args=[
torch.zeros(0, 0, 0, 0),
torch.zeros(0, 0, 0, 0)
])
def test_pointwise_unary(self):
for op in ["relu", "sigmoid"]:
with self.subTest(op):
class UnaryModule(torch.nn.Module):
def forward(self, arg):
if op == "relu":
return torch.nn.functional.relu(arg)
if op == "sigmoid":
return torch.sigmoid(arg)
raise Exception("Bad op")
self.check(UnaryModule(), torch.tensor([-1.0, 1.0]))
def test_pointwise_binary(self):
for op in ["add", "sub", "mul", "div"]:
with self.subTest(op):
class BinaryModule(torch.nn.Module):
def forward(self, lhs, rhs):
if op == "add":
return lhs + rhs
if op == "sub":
return lhs - rhs
if op == "mul":
return lhs * rhs
if op == "div":
return lhs / rhs
raise Exception("Bad op")
self.check(
BinaryModule(),
[
torch.tensor([1.0, 2.0]),
torch.tensor([3.0, 4.0]),
])
self.check(
BinaryModule(),
[
torch.tensor([[1.0, 2.0]]),
torch.tensor([[3.0, 4.0], [5.0, 6.0]]),
])
with self.assertRaisesRegex(Exception, "Non-equal-rank broadcast"):
self.check(
BinaryModule(),
[
torch.tensor([1.0, 2.0]),
torch.tensor([[3.0, 4.0], [5.0, 6.0]]),
])
def test_hardtanh(self):
inp = torch.tensor([-2.0, -0.5, 0.5, 2.0, 7.0])
self.check(torch.nn.Hardtanh(), inp)
self.check(torch.nn.Hardtanh(0.0, 6.0), inp)
with self.assertRaisesRegex(Exception, "hardtanh with args"):
self.check(torch.nn.Hardtanh(0.0, 5.0), inp)
def test_softmax(self):
inp = torch.tensor([[-2.0, -0.5], [0.5, 2.0]])
self.check(torch.nn.Softmax(), inp)
self.check(torch.nn.Softmax(dim=0), inp)
# Test flexible size
self.check(
torch.nn.Softmax(),
inp,
convert_args=[torch.zeros(0, 0)],
)
def test_to(self):
class ToCPU(torch.nn.Module):
def __init__(self):
super().__init__()
self.prelu = torch.nn.PReLU()
def forward(self, x):
y = x.to("cpu")
# add prelu since input operand can't be output
return self.prelu(y)
arg = torch.randn(1, 2, 3, 3)
self.check(ToCPU(), arg)
# Test flexible size
self.check(
ToCPU(),
arg,
convert_args=[torch.zeros(1, 2, 0, 0)],
)
def test_detach(self):
class DetachModule(torch.nn.Module):
def __init__(self):
super().__init__()
def forward(self, x):
y = x.detach()
return torch.nn.functional.relu(y)
self.check(DetachModule(), torch.randn(1, 2, 3, 3))
self.check(
DetachModule(), torch.randn(1, 2, 3, 3),
convert_args=[torch.zeros(1, 2, 0, 0)])
def test_log_softmax(self):
inp = torch.randn(3, 10)
self.check(torch.nn.LogSoftmax(), inp)
self.check(torch.nn.LogSoftmax(0), inp)
def test_mean(self):
class MeanModule(torch.nn.Module):
def __init__(self, dim, keep=False):
super().__init__()
self.dim = dim
self.keep = keep
def forward(self, t):
return torch.mean(t, dim=self.dim, keepdim=self.keep)
self.check(MeanModule(0), torch.randn(2, 3))
self.check(MeanModule(1), torch.randn(2, 3))
self.check(MeanModule([2, 3]), torch.randn(2, 3, 6, 6))
self.check(MeanModule([2, 3]), nhwc(torch.randn(2, 3, 6, 6)))
self.check(MeanModule([-1, -2]), nhwc(torch.randn(2, 3, 6, 6)))
self.check(MeanModule([-1, -2], keep=True), nhwc(torch.randn(2, 3, 6, 6)))
def test_max_pool2d(self):
for (name, inp) in self.float_and_quant_and_nhwc(torch.randn(2, 3, 12, 16), 0.3, 128):
with self.subTest(name):
self.check(torch.nn.MaxPool2d(2), inp)
self.check(torch.nn.MaxPool2d((3, 4)), inp)
self.check(torch.nn.MaxPool2d((3, 4), (1, 2)), inp)
def test_avg_pool2d(self):
for (name, inp) in self.float_and_quant_and_nhwc(torch.randn(2, 3, 12, 16), 0.3, 128):
with self.subTest(name):
atol_rtol = None
limit = None
convert_dims = (2, 3, 0, 0)
convert_arg = torch.zeros(*convert_dims)
for model in (
torch.nn.AvgPool2d(2),
torch.nn.AvgPool2d((3, 4)),
torch.nn.AvgPool2d((3, 4), (1, 2))):
if "quant" in name:
atol_rtol = (1, 0)
limit = model(inp).numel()
convert_arg = qpt(torch.zeros(*convert_dims), 1.0 / 16, 128)
if "nhwc" in name:
convert_arg = nhwc(convert_arg)
self.check(model, inp, atol_rtol=atol_rtol, limit=limit)
self.check(
model,
inp,
convert_args=[convert_arg],
atol_rtol=atol_rtol,
limit=limit
)
def test_adaptive_avg_pool2d(self):
for (name, inp) in self.float_and_quant_and_nhwc(torch.randn(2, 3, 12, 16), 0.3, 128):
with self.subTest(name):
self.check(torch.nn.AdaptiveAvgPool2d((1, 1)), inp)
with self.assertRaisesRegex(Exception, "with output size"):
self.check(torch.nn.AdaptiveAvgPool2d((2, 2)), inp)
def test_upsample_nearest2d(self):
convert_args = dict(self.float_and_quant_and_nhwc(torch.randn(2, 3, 0, 0), 0.3, 128))
for (name, inp) in self.float_and_quant_and_nhwc(torch.randn(2, 3, 12, 16), 0.3, 128):
with self.subTest(name):
self.check(torch.nn.UpsamplingNearest2d(size=(16, 20)), inp)
self.check(torch.nn.UpsamplingNearest2d(size=(24, 32)), inp)
self.check(torch.nn.UpsamplingNearest2d(size=(36, 48)), inp)
self.check(torch.nn.UpsamplingNearest2d(scale_factor=(1.5, 1.5)), inp)
self.check(torch.nn.UpsamplingNearest2d(scale_factor=(2.0, 2.0)), inp)
self.check(torch.nn.UpsamplingNearest2d(scale_factor=(3.0, 3.0)), inp)
self.check(
torch.nn.UpsamplingNearest2d(size=(24, 32)), inp,
convert_args=[convert_args[name]]
)
self.check(
torch.nn.UpsamplingNearest2d(scale_factor=(2.0, 2.0)), inp,
convert_args=[convert_args[name]]
)
def test_linear(self):
torch.manual_seed(29)
self.check(torch.nn.Linear(16, 32), torch.randn(2, 16))
self.check(
torch.nn.Linear(16, 32), torch.randn(2, 16),
convert_args=[torch.zeros(0, 16)])
def test_conv2d(self):
cases = [
# in_ch, out_ch, kernel, stride, padding, groups, bias, input_dim, name
( 4, 8, (3, 3), 1, 0, 1, 1, (2, 4, 16, 16), "3x3"), # noqa: E201,E241
( 4, 8, (3, 3), 1, 0, 1, 0, (2, 4, 16, 16), "3x3nobias"), # noqa: E201,E241
( 4, 16, (3, 3), 1, 1, 1, 1, (2, 4, 16, 16), "3x3p1"), # noqa: E201,E241
( 8, 8, (3, 3), 2, 0, 1, 1, (2, 8, 16, 16), "3x3s2"), # noqa: E201,E241
( 4, 8, (5, 5), 1, 0, 1, 1, (2, 4, 16, 16), "5x5"), # noqa: E201,E241
( 4, 4, (3, 3), 1, 0, 4, 1, (2, 4, 16, 16), "3x3dw"), # noqa: E201,E241
( 8, 4, (1, 1), 1, 0, 1, 1, (2, 8, 16, 16), "1x1"), # noqa: E201,E241
]
for kind in ["float", "float-nhwc", "quant", "quant-nhwc"]:
for case in cases:
in_ch, out_ch, kernel, stride, padding, groups, bias, input_dim, name = case
with self.subTest("{}-{}".format(kind, name)):
inp = torch.randn(input_dim)
model = torch.nn.Conv2d(in_ch, out_ch, kernel, stride, padding, groups=groups, bias=bool(bias))
output_size = model(inp).numel()
atol_rtol = None
limit = None
convert_dims = (0, in_ch, 0, 0)
convert_arg = torch.zeros(*convert_dims)
if "quant" in kind:
model = torch.nn.Sequential(model)
model.eval()
model.qconfig = torch.quantization.get_default_qconfig('qnnpack')
model = torch.quantization.prepare(model)
model(inp)
model = torch.quantization.convert(model)
inp = qpt(inp, 1.0 / 16, 128)
# I've seen numerical differences between QNNPACK and NNAPI,
# but never more than 1 quantum, and never more than ~1% of
# the output in this test.
atol_rtol = (1, 0)
limit = output_size * 0.03
convert_arg = qpt(torch.zeros(*convert_dims), 1.0 / 16, 128)
if "nhwc" in kind:
inp = nhwc(inp)
convert_arg = nhwc(convert_arg)
self.check(model, inp, atol_rtol=atol_rtol, limit=limit)
self.check(
model,
inp,
convert_args=[convert_arg],
atol_rtol=atol_rtol,
limit=limit
)
def test_conv2d_transpose(self):
in_ch, out_ch, kernel = (5, 7, (2, 2))
input_dim = (4, 5, 3, 3)
inp = torch.randn(input_dim)
convert_dims = input_dim[:2] + (0, 0)
for kind in ["float", "float-nhwc", "quant", "quant-nhwc"]:
with self.subTest(kind):
model = torch.nn.ConvTranspose2d(in_ch, out_ch, kernel)
output_size = model(inp).numel()
atol_rtol = (0.0002, 0)
limit = None
convert_arg = torch.zeros(*convert_dims)
if "quant" in kind:
# FIXME 'aten::slow_conv_transpose2d' with arguments from the 'QuantizedCPU' backend
continue
model = torch.nn.Sequential(model)
model.eval()
model.qconfig = torch.quantization.get_default_qconfig('qnnpack')
model = torch.quantization.prepare(model)
model(inp)
model = torch.quantization.convert(model)
inp = qpt(inp, 1.0 / 16, 128)
# I've seen numerical differences between QNNPACK and NNAPI,
# but never more than 1 quantum, and never more than ~1% of
# the output in this test.
atol_rtol = (1, 0)
limit = output_size * 0.03
convert_arg = qpt(convert_arg, 1.0 / 16, 128)
if "nhwc" in kind:
inp = nhwc(inp)
convert_arg = nhwc(convert_arg)
self.check(model, inp, atol_rtol=atol_rtol, limit=limit)
self.check(
model,
inp,
convert_args=[convert_arg],
atol_rtol=atol_rtol,
limit=limit
)
def test_qadd(self):
func = torch.nn.quantized.QFunctional()
func.scale = 0.5
func.zero_point = 120
class AddMod(torch.nn.Module):
def forward(self, lhs, rhs):
return func.add(lhs, rhs)
class AddReluMod(torch.nn.Module):
def forward(self, lhs, rhs):
return func.add_relu(lhs, rhs)
for (name, mod) in [("add", AddMod), ("add_relu", AddReluMod)]:
with self.subTest(name):
self.check(
mod(),
[
qpt([1.0, 2.0], 0.25, 128),
qpt([3.0, 4.0], 0.25, 128),
])
self.check(
mod(),
[
qpt([[1.0, 2.0]], 0.25, 128),
qpt([[3.0, 4.0]], 0.25, 128),
],
convert_args=[
qpt([[1.0, 2.0]], 0.25, 128),
qpt(torch.zeros((1, 2)), 0.25, 128),
]
)
self.check(
mod(),
[
qpt([[1.0, 2.0]], 0.25, 128),
qpt([[3.0, 4.0]], 0.25, 128),
],
convert_args=[
qpt(torch.zeros((1, 2)), 0.25, 128),
qpt([[3.0, 4.0]], 0.25, 128),
]
)
self.check(
mod(),
[
qpt([[1.0, 2.0]], 0.25, 128),
qpt([[3.0, 4.0]], 0.25, 128),
],
convert_args=[
qpt(torch.zeros((1, 2)), 0.25, 128),
qpt(torch.zeros((1, 2)), 0.25, 128),
]
)
# NOTE: NNAPI qadd supports broadcast, but PT does not.
def test_qlinear(self):
torch.manual_seed(29)
weight = qpt(torch.randn(16, 32), 0.125, 0, torch.qint8)
bias = torch.randn(16)
mod = torch.nn.quantized.Linear(32, 16)
mod.set_weight_bias(weight, bias)
inp = qpt(torch.randn(2, 32), 0.05, 130, torch.quint8)
self.check(mod, inp)
def test_seblock_mul(self):
class MulModel(torch.nn.Module):
def forward(self, lhs, rhs):
return lhs * rhs
self.check(
MulModel(),
[
nhwc(torch.randn(2, 3, 4, 4)),
torch.randn(1, 3, 1, 1),
])
def test_multi_output(self):
class MultiModel(torch.nn.Module):
def forward(self, lhs, rhs) -> Tuple[torch.Tensor, torch.Tensor]:
the_sum = lhs + rhs
the_diff = lhs - rhs
return the_sum, the_diff
self.check(MultiModel(), [torch.tensor([1.0, 2.0]), torch.tensor([1.0, 3.0])])
if __name__ == '__main__':
run_tests()
| [
"facebook-github-bot@users.noreply.github.com"
] | facebook-github-bot@users.noreply.github.com |
77c75a0b7749c3c5c8338bde04b1bb61e93bb78f | 3e4bb5b4036a66d25a72793c1deaa4f5572d37bf | /apps/dashboard/views.py | 2f723b17ac30dd70dec007dcbcc52e9db4be3f89 | [
"MIT"
] | permissive | hbussell/pinax-tracker | f7f7eb0676d01251d7d8832557be14665755844d | 4f6538324b2e1f7a8b14c346104d2f1bd8e1556b | refs/heads/master | 2021-01-20T12:06:29.630850 | 2010-02-03T00:39:05 | 2010-02-03T00:39:05 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,788 | py |
from django.http import HttpResponse, HttpResponseRedirect, Http404
from django.template import RequestContext
from django.shortcuts import render_to_response, get_object_or_404
from tasks.models import Task
from tasks.forms import TaskDashboardForm
from tasks.filters import TaskProjectFilter
from projects.models import Project
from django.contrib import messages
from django.utils.translation import ugettext
from django.template.defaultfilters import slugify
import re
from tagging.models import Tag
from pinax.utils.importlib import import_module
from django.conf import settings
workflow = import_module(getattr(settings, "TASKS_WORKFLOW_MODULE", "tasks.workflow"))
def dashboard(request, template_name="dashboard/dashboard.html"):
if _handle_taskbar(request):
return HttpResponseRedirect('/')
if _handle_projects(request):
return HttpResponseRedirect('/')
form_class = TaskDashboardForm
task_form = form_class(request.user)
group_by = request.GET.get("group_by")
tasks = Task.objects.filter()
group_base = None
tasks = tasks.select_related("assignee")
# default filtering
state_keys = dict(workflow.STATE_CHOICES).keys()
default_states = set(state_keys).difference(
# don"t show these states
set(["2", "3"])
)
filter_data = {"state": list(default_states)}
filter_data.update(request.GET)
task_filter = TaskProjectFilter(request.user, filter_data, queryset=tasks)
group_by_querydict = request.GET.copy()
group_by_querydict.pop("group_by", None)
group_by_querystring = group_by_querydict.urlencode()
return render_to_response(template_name, {
'projects':Project.objects.all()
,'task_form':task_form
,'task_filter':task_filter
,'tasks':task_filter.qs,
"group_by": group_by,
"group": None
}, context_instance=RequestContext(request))
def _handle_taskbar(request):
if not request.user.is_authenticated():
return
if request.method == 'POST':
if request.POST.get('add_task'):
name = request.POST.get('task_name')
project_id = request.POST.get('task_project', None)
if project_id:
try:
project = Project.objects.get(pk=project_id)
except Project.DoesNotExist:
project = None
regex = re.compile("(?P<word>@\w+.?)")
tags = []
for match in regex.findall(name):
name = name.replace(match,'')
tag = match.strip('@').strip(' ')
tags.append(tag)
name = name.strip(' ')
form_class = TaskDashboardForm
task_form = form_class(request.user, data=request.POST)
task_form.group = project
if task_form.is_valid():
task = task_form.save(commit=False)
task.summary = name
task.creator = request.user
if 'me' in tags:
tags.remove('me')
task.assignee = request.user
elif 'my' in tags:
tags.remove('my')
task.assignee = request.user
task.group = project
if hasattr(workflow, "initial_state"):
task.state = workflow.initial_state(task, request.user)
task.tags = ' '.join(tags)
task.save()
task.save_history()
messages.add_message(request, messages.SUCCESS,
ugettext("added task '%s'") % task.summary
)
return True
def _handle_projects(request):
if not request.user.is_authenticated():
return
if request.method == 'POST':
if request.POST.get('add_project'):
name = request.POST.get('project_name')
try:
Project.objects.get(name=name)
except Project.DoesNotExist:
project = Project(name=name, slug=slugify(name), creator=request.user)
project.save()
messages.add_message(request, messages.SUCCESS,
ugettext("added project '%s'") % project.name
)
return True
def all_tasks(request, template_name="dashboard/all_tasks.html"):
from tasks.models import Task
from tasks import workflow
from tasks.filters import TaskProjectFilter
if not request.user.is_authenticated():
is_member = False
else:
is_member = True
group_by = request.GET.get("group_by")
tasks = Task.objects.all()
tasks = tasks.select_related("assignee")
# default filtering
state_keys = dict(workflow.STATE_CHOICES).keys()
default_states = set(state_keys).difference(
# don"t show these states
set(["2", "3"])
)
# milestones = [(m.id, m.title) for m in Milestone.objects.all()]
filter_data = {"state": list(default_states)}
#"milestone":
#milestones}
filter_data.update(request.GET)
task_filter = TaskProjectFilter(request.user, filter_data, queryset=tasks)
# task_filter.filter('milestone', milestone.id)
group_by_querydict = request.GET.copy()
group_by_querydict.pop("group_by", None)
group_by_querystring = group_by_querydict.urlencode()
del task_filter.filters['milestone']
return render_to_response(template_name, {
"group_by": group_by,
"gbqs": group_by_querystring,
"task_filter": task_filter,
"tasks": task_filter.qs,
"querystring": request.GET.urlencode(),
}, context_instance=RequestContext(request))
| [
"harley@harley-desktop.(none)"
] | harley@harley-desktop.(none) |
789b05e2076e5b7f7ffca11a36057f33810e1c88 | 04f4cc1b9e5420968df75c362ffad7ed78bdb86c | /yt/finance/binomial.py | 7a395d45d7c8246d4753988f471dd077a06c7f9e | [] | no_license | toumorokoshi/yt.finance | 6f4b22fa042d86751f41c3cd30f1065bf8f6f30f | 7c12a6f09c30afc28dbdee1e50f9ff32c62bebe8 | refs/heads/master | 2021-01-17T03:13:06.396961 | 2013-04-05T08:03:09 | 2013-04-05T08:03:09 | 8,792,504 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 9,808 | py | """
binomial.py
A set of pricing tools for various options using the binomial model
"""
__author__ = 'yusuke tsutsumi'
import math
from yt.finance.lib import precision
class Binomial(object):
"""
Price with the binomial model
"""
periods = 1 # number of periods to evaluate with the binomial model
initial_price = 0 # inital price of a stock
strike_price = None # price of stock at the end expiration of the option
periods = 1 # number of periods in the pricing range
return_price = None # return on investment
security_volatility = 0 # volatility of the security of the one period
dividend = 0 # the dividend paid, in proportion of the price
def __init__(self, **kwargs):
pass
def convert_black_sholes_params(self, periods, maturity, interest_rate,
strike_price, volatility, dividend_yield):
"""
Returns parameters to the binomial model from parameters for
the black sholes model
returns the market_return, gain, and the dividend
>>> b.convert_black_sholes_params(15, 0.25, 0.02, 110, 0.3, 0.01)
(1.0003333888950623, 1.0394896104013376, 0.00016670833873502509)
"""
market_return = math.e ** (1.0 * interest_rate * maturity / periods)
gain = math.e ** (volatility * math.sqrt(1.0 * maturity / periods))
dividend = market_return * (1.0 - (math.e ** - (1.0 * dividend_yield * maturity / periods)))
return (market_return, gain, dividend)
@precision
def price_american_put(self, periods, strike_price, market_return,
security_volatility, stock_lattice, dividend=0):
"""
Get price of an american put.
>>> lattice = b.generate_stock_lattice(3, 110, 1.07)
>>> b.price_american_put(3, 100, 1.01, 1.07, lattice, precision=2)
[[0.86], [0.0, 1.96], [0.0, 0.0, 4.48], [0, 0, 0, 10.21]]
"""
# first initialize a matrix to house the results
return_values = []
# value for the last column starts at (price_matrix_value - strike_price)
return_values.append(
[(strike_price - x if strike_price - x > 0 else 0) \
for x in stock_lattice[periods]])
for i in range(periods):
return_column = []
for j in range(periods - i):
price = self._calculate_security_pricing(
market_return,
security_volatility,
return_values[0][j],
return_values[0][j + 1],
dividend=dividend)
excersize_now_price = strike_price - stock_lattice[periods - 1 - i][j]
if price < excersize_now_price:
price = excersize_now_price
return_column.append(price)
return_values.insert(0, return_column)
return return_values
@precision
def price_european_put(self, periods, strike_price, market_return,
security_volatility, stock_lattice, dividend=0):
"""
Get price of a european put. Unlike an American put, a holder
is not able to excersize early.
>>> lattice = b.generate_stock_lattice(3, 110, 1.07)
>>> b.price_european_put(3, 100, 1.01, 1.07, lattice, precision=2)
[[0.86], [0.0, 1.96], [0.0, 0.0, 4.48], [0, 0, 0, 10.21]]
"""
return_values = []
return_values.append(
[(strike_price - x if strike_price - x > 0 else 0) \
for x in stock_lattice[periods]])
for i in range(periods):
return_column = []
for j in range(periods - i):
price = self._calculate_security_pricing(
market_return,
security_volatility,
return_values[0][j],
return_values[0][j + 1],
dividend=dividend)
return_column.append(price)
return_values.insert(0, return_column)
return return_values
@precision
def price_call(self, periods, strike_price, market_return,
security_volatility, stock_lattice, dividend=0):
"""
Get price of a call. As the optimal strategy in a call for
American and european don't differ, there's no distinction
with this method.
This utilizes the binomial model to calculate the expirations
of various prices, and uses dynamic programming to solve the
call prices and periods from periods to period zero.
if precision is greater than 0, the result is rounded to precision decimals.
>>> lattice = b.generate_stock_lattice(3, 100, 1.07)
>>> b.price_call(3, 100, 1.01, 1.07, lattice, precision=2)
[[6.57], [10.23, 2.13], [15.48, 3.86, 0.0], [22.5, 7.0, 0, 0]]
"""
# starting at the end, work backwards to find the proper values of the matrix.
return_values = []
# value for the last column starts at (price_matrix_value - strike_price)
return_values.append(
[(x - strike_price if x - strike_price > 0 else 0) \
for x in stock_lattice[periods]])
for i in range(periods):
return_column = []
for j in range(periods - i):
price = self._calculate_security_pricing(
market_return,
security_volatility,
return_values[0][j],
return_values[0][j + 1],
dividend=dividend)
return_column.append(price)
return_values.insert(0, return_column)
return return_values
@precision
def generate_stock_lattice(self, periods, initial_price, security_volatility):
"""
Generate a price matrix of the security in various conditions,
at each possible outcome.
Outcome is rounded to accurracy digits
>>> b.generate_stock_lattice(3, 100, 1.07, precision=2)
[[100.0], [107.0, 93.46], [114.49, 100.0, 87.34], [122.5, 107.0, 93.46, 81.63]]
"""
return_values = []
for i in range(periods + 1):
return_column = []
for j in range(i):
price = self._calculate_price(initial_price, security_volatility, i - j, j)
return_column.append(price)
price = self._calculate_price(initial_price, security_volatility, 0, i)
return_column.append(price)
return_values.append(return_column)
return return_values
@precision
def _calculate_price(self, initial_price, security_volatility,
positive_changes, negative_changes):
"""
Calculate and return the price of an underlying security after
positive_changes price increases and negative_changes price
decreases.
>>> round(b._calculate_price(100, 1.10, 5, 3), 2)
121.0
"""
return initial_price * (security_volatility ** positive_changes) * \
((1.0 / security_volatility) ** negative_changes)
@precision
def _calculate_security_pricing(self, market_return, security_volatility,
gain_price, loss_price, dividend=0):
"""
This calculates the price of a security at the start of time, with risk-neutral pricing.
We start with these values:
* The standard return of the market R = market_return
* The possible gain proportion u = security_volatility
* The possible loss proportion d = 1 / security_volatility
* The value of the security in the case of a gain Cu = gain_price
* The value of the security in the case of a loss Cd = loss_price
And try to find the security price C0
By the arbitrage principle, we must ensure that the price of
the security reflects the profit it provides over other means
of investments. Thus, we find the total amount that needs to
be invested in the security and other investments:
u*s0*x + R*y = Cu
d*s0*x + R*y = Cd
C0 = x*s0 + y
solving, we end up with:
C0 = (1/R)*((R-d)/(u-d)*Cu + (u-R)/(u-d)*Cd)
>>> round(b._calculate_security_pricing(1.01, 1.07, 5, 0), 2)
2.76
"""
security_probability = self._risk_neutral_probability(market_return,
security_volatility,
dividend=dividend)
return (1 / market_return) * ((security_probability * gain_price) +
((1 - security_probability) * loss_price))
@precision
def _risk_neutral_probability(self, market_return, security_volatility, dividend=0):
"""
Return the probabilities that emerge from a perfectly
competitive market.
I.E. given the provided market return,
possible gain ratio, and possible loss ratio of a security, the returned value
is the probability of a gain required to ensure that the security
provides the same risk as any other investment in the market.
>>> round(b._risk_neutral_probability(1.01, 1.07), 3)
0.557
>>> round(b._risk_neutral_probability(1.0, 1.039, 0.0001), 4)
0.4891
"""
return (1.0 * market_return - (1 / security_volatility) - dividend) \
/ (security_volatility - (1 / security_volatility))
if __name__ == '__main__':
import doctest
doctest.testmod(extraglobs={
'b': Binomial()
})
| [
"tsutsumi.yusuke@gmail.com"
] | tsutsumi.yusuke@gmail.com |
bbaabec616048487232d14e3d7df6c3f072d1f0e | 536a59c31d9e7d56b91a1c49f814e1b6ab513b27 | /webserver/dependencies/SQLAlchemy-0.5.6/test/orm/test_eager_relations.py | aaba9bbe5d00ca47c15de1aa000f7f8c6356de42 | [
"MIT"
] | permissive | hughperkins/ailadder | 46bf4f32e837bcf831cfa0eaaba15d3aec561bce | 2dd6c07c2e5d8709be483917b3b120322fdbc80e | refs/heads/master | 2021-01-01T19:24:58.964859 | 2009-10-23T09:12:10 | 2009-10-23T09:12:10 | 313,108 | 0 | 2 | null | null | null | null | UTF-8 | Python | false | false | 62,150 | py | """basic tests of eager loaded attributes"""
from sqlalchemy.test.testing import eq_
import sqlalchemy as sa
from sqlalchemy.test import testing
from sqlalchemy.orm import eagerload, deferred, undefer
from sqlalchemy import Integer, String, Date, ForeignKey, and_, select, func
from sqlalchemy.test.schema import Table
from sqlalchemy.test.schema import Column
from sqlalchemy.orm import mapper, relation, create_session, lazyload, aliased
from sqlalchemy.test.testing import eq_
from sqlalchemy.test.assertsql import CompiledSQL
from test.orm import _base, _fixtures
import datetime
class EagerTest(_fixtures.FixtureTest):
run_inserts = 'once'
run_deletes = None
@testing.resolve_artifact_names
def test_basic(self):
mapper(User, users, properties={
'addresses':relation(mapper(Address, addresses), lazy=False, order_by=Address.id)
})
sess = create_session()
q = sess.query(User)
assert [User(id=7, addresses=[Address(id=1, email_address='jack@bean.com')])] == q.filter(User.id==7).all()
eq_(self.static.user_address_result, q.order_by(User.id).all())
@testing.resolve_artifact_names
def test_late_compile(self):
m = mapper(User, users)
sess = create_session()
sess.query(User).all()
m.add_property("addresses", relation(mapper(Address, addresses)))
sess.expunge_all()
def go():
eq_(
[User(id=7, addresses=[Address(id=1, email_address='jack@bean.com')])],
sess.query(User).options(eagerload('addresses')).filter(User.id==7).all()
)
self.assert_sql_count(testing.db, go, 1)
@testing.resolve_artifact_names
def test_no_orphan(self):
"""An eagerly loaded child object is not marked as an orphan"""
mapper(User, users, properties={
'addresses':relation(Address, cascade="all,delete-orphan", lazy=False)
})
mapper(Address, addresses)
sess = create_session()
user = sess.query(User).get(7)
assert getattr(User, 'addresses').hasparent(sa.orm.attributes.instance_state(user.addresses[0]), optimistic=True)
assert not sa.orm.class_mapper(Address)._is_orphan(sa.orm.attributes.instance_state(user.addresses[0]))
@testing.resolve_artifact_names
def test_orderby(self):
mapper(User, users, properties = {
'addresses':relation(mapper(Address, addresses), lazy=False, order_by=addresses.c.email_address),
})
q = create_session().query(User)
assert [
User(id=7, addresses=[
Address(id=1)
]),
User(id=8, addresses=[
Address(id=3, email_address='ed@bettyboop.com'),
Address(id=4, email_address='ed@lala.com'),
Address(id=2, email_address='ed@wood.com')
]),
User(id=9, addresses=[
Address(id=5)
]),
User(id=10, addresses=[])
] == q.order_by(User.id).all()
@testing.resolve_artifact_names
def test_orderby_multi(self):
mapper(User, users, properties = {
'addresses':relation(mapper(Address, addresses), lazy=False, order_by=[addresses.c.email_address, addresses.c.id]),
})
q = create_session().query(User)
assert [
User(id=7, addresses=[
Address(id=1)
]),
User(id=8, addresses=[
Address(id=3, email_address='ed@bettyboop.com'),
Address(id=4, email_address='ed@lala.com'),
Address(id=2, email_address='ed@wood.com')
]),
User(id=9, addresses=[
Address(id=5)
]),
User(id=10, addresses=[])
] == q.order_by(User.id).all()
@testing.resolve_artifact_names
def test_orderby_related(self):
"""A regular mapper select on a single table can order by a relation to a second table"""
mapper(Address, addresses)
mapper(User, users, properties = dict(
addresses = relation(Address, lazy=False, order_by=addresses.c.id),
))
q = create_session().query(User)
l = q.filter(User.id==Address.user_id).order_by(Address.email_address).all()
assert [
User(id=8, addresses=[
Address(id=2, email_address='ed@wood.com'),
Address(id=3, email_address='ed@bettyboop.com'),
Address(id=4, email_address='ed@lala.com'),
]),
User(id=9, addresses=[
Address(id=5)
]),
User(id=7, addresses=[
Address(id=1)
]),
] == l
@testing.resolve_artifact_names
def test_orderby_desc(self):
mapper(Address, addresses)
mapper(User, users, properties = dict(
addresses = relation(Address, lazy=False,
order_by=[sa.desc(addresses.c.email_address)]),
))
sess = create_session()
assert [
User(id=7, addresses=[
Address(id=1)
]),
User(id=8, addresses=[
Address(id=2, email_address='ed@wood.com'),
Address(id=4, email_address='ed@lala.com'),
Address(id=3, email_address='ed@bettyboop.com'),
]),
User(id=9, addresses=[
Address(id=5)
]),
User(id=10, addresses=[])
] == sess.query(User).order_by(User.id).all()
@testing.resolve_artifact_names
def test_deferred_fk_col(self):
User, Address, Dingaling = self.classes.get_all(
'User', 'Address', 'Dingaling')
users, addresses, dingalings = self.tables.get_all(
'users', 'addresses', 'dingalings')
mapper(Address, addresses, properties={
'user_id':deferred(addresses.c.user_id),
'user':relation(User, lazy=False)
})
mapper(User, users)
sess = create_session()
for q in [
sess.query(Address).filter(Address.id.in_([1, 4, 5])),
sess.query(Address).filter(Address.id.in_([1, 4, 5])).limit(3)
]:
sess.expunge_all()
eq_(q.all(),
[Address(id=1, user=User(id=7)),
Address(id=4, user=User(id=8)),
Address(id=5, user=User(id=9))]
)
a = sess.query(Address).filter(Address.id==1).first()
def go():
eq_(a.user_id, 7)
# assert that the eager loader added 'user_id' to the row and deferred
# loading of that col was disabled
self.assert_sql_count(testing.db, go, 0)
# do the mapping in reverse
# (we would have just used an "addresses" backref but the test
# fixtures then require the whole backref to be set up, lazy loaders
# trigger, etc.)
sa.orm.clear_mappers()
mapper(Address, addresses, properties={
'user_id':deferred(addresses.c.user_id),
})
mapper(User, users, properties={
'addresses':relation(Address, lazy=False)})
for q in [
sess.query(User).filter(User.id==7),
sess.query(User).filter(User.id==7).limit(1)
]:
sess.expunge_all()
eq_(q.all(),
[User(id=7, addresses=[Address(id=1)])]
)
sess.expunge_all()
u = sess.query(User).get(7)
def go():
assert u.addresses[0].user_id==7
# assert that the eager loader didn't have to affect 'user_id' here
# and that its still deferred
self.assert_sql_count(testing.db, go, 1)
sa.orm.clear_mappers()
mapper(User, users, properties={
'addresses':relation(Address, lazy=False)})
mapper(Address, addresses, properties={
'user_id':deferred(addresses.c.user_id),
'dingalings':relation(Dingaling, lazy=False)})
mapper(Dingaling, dingalings, properties={
'address_id':deferred(dingalings.c.address_id)})
sess.expunge_all()
def go():
u = sess.query(User).get(8)
eq_(User(id=8,
addresses=[Address(id=2, dingalings=[Dingaling(id=1)]),
Address(id=3),
Address(id=4)]),
u)
self.assert_sql_count(testing.db, go, 1)
@testing.resolve_artifact_names
def test_many_to_many(self):
Keyword, Item = self.Keyword, self.Item
keywords, item_keywords, items = self.tables.get_all(
'keywords', 'item_keywords', 'items')
mapper(Keyword, keywords)
mapper(Item, items, properties = dict(
keywords = relation(Keyword, secondary=item_keywords,
lazy=False, order_by=keywords.c.id)))
q = create_session().query(Item).order_by(Item.id)
def go():
assert self.static.item_keyword_result == q.all()
self.assert_sql_count(testing.db, go, 1)
def go():
eq_(self.static.item_keyword_result[0:2],
q.join('keywords').filter(Keyword.name == 'red').all())
self.assert_sql_count(testing.db, go, 1)
def go():
eq_(self.static.item_keyword_result[0:2],
(q.join('keywords', aliased=True).
filter(Keyword.name == 'red')).all())
self.assert_sql_count(testing.db, go, 1)
@testing.resolve_artifact_names
def test_eager_option(self):
Keyword, Item = self.Keyword, self.Item
keywords, item_keywords, items = self.tables.get_all(
'keywords', 'item_keywords', 'items')
mapper(Keyword, keywords)
mapper(Item, items, properties = dict(
keywords = relation(Keyword, secondary=item_keywords, lazy=True,
order_by=keywords.c.id)))
q = create_session().query(Item)
def go():
eq_(self.static.item_keyword_result[0:2],
(q.options(eagerload('keywords')).
join('keywords').filter(keywords.c.name == 'red')).order_by(Item.id).all())
self.assert_sql_count(testing.db, go, 1)
@testing.resolve_artifact_names
def test_cyclical(self):
"""A circular eager relationship breaks the cycle with a lazy loader"""
User, Address = self.User, self.Address
users, addresses = self.tables.get_all('users', 'addresses')
mapper(Address, addresses)
mapper(User, users, properties = dict(
addresses = relation(Address, lazy=False,
backref=sa.orm.backref('user', lazy=False), order_by=Address.id)
))
assert sa.orm.class_mapper(User).get_property('addresses').lazy is False
assert sa.orm.class_mapper(Address).get_property('user').lazy is False
sess = create_session()
eq_(self.static.user_address_result, sess.query(User).order_by(User.id).all())
@testing.resolve_artifact_names
def test_double(self):
"""Eager loading with two relations simultaneously, from the same table, using aliases."""
User, Address, Order = self.classes.get_all(
'User', 'Address', 'Order')
users, addresses, orders = self.tables.get_all(
'users', 'addresses', 'orders')
openorders = sa.alias(orders, 'openorders')
closedorders = sa.alias(orders, 'closedorders')
mapper(Address, addresses)
mapper(Order, orders)
open_mapper = mapper(Order, openorders, non_primary=True)
closed_mapper = mapper(Order, closedorders, non_primary=True)
mapper(User, users, properties = dict(
addresses = relation(Address, lazy=False, order_by=addresses.c.id),
open_orders = relation(
open_mapper,
primaryjoin=sa.and_(openorders.c.isopen == 1,
users.c.id==openorders.c.user_id),
lazy=False, order_by=openorders.c.id),
closed_orders = relation(
closed_mapper,
primaryjoin=sa.and_(closedorders.c.isopen == 0,
users.c.id==closedorders.c.user_id),
lazy=False, order_by=closedorders.c.id)))
q = create_session().query(User).order_by(User.id)
def go():
assert [
User(
id=7,
addresses=[Address(id=1)],
open_orders = [Order(id=3)],
closed_orders = [Order(id=1), Order(id=5)]
),
User(
id=8,
addresses=[Address(id=2), Address(id=3), Address(id=4)],
open_orders = [],
closed_orders = []
),
User(
id=9,
addresses=[Address(id=5)],
open_orders = [Order(id=4)],
closed_orders = [Order(id=2)]
),
User(id=10)
] == q.all()
self.assert_sql_count(testing.db, go, 1)
@testing.resolve_artifact_names
def test_double_same_mappers(self):
"""Eager loading with two relations simulatneously, from the same table, using aliases."""
User, Address, Order = self.classes.get_all(
'User', 'Address', 'Order')
users, addresses, orders = self.tables.get_all(
'users', 'addresses', 'orders')
mapper(Address, addresses)
mapper(Order, orders, properties={
'items': relation(Item, secondary=order_items, lazy=False,
order_by=items.c.id)})
mapper(Item, items)
mapper(User, users, properties=dict(
addresses=relation(Address, lazy=False, order_by=addresses.c.id),
open_orders=relation(
Order,
primaryjoin=sa.and_(orders.c.isopen == 1,
users.c.id==orders.c.user_id),
lazy=False, order_by=orders.c.id),
closed_orders=relation(
Order,
primaryjoin=sa.and_(orders.c.isopen == 0,
users.c.id==orders.c.user_id),
lazy=False, order_by=orders.c.id)))
q = create_session().query(User).order_by(User.id)
def go():
assert [
User(id=7,
addresses=[
Address(id=1)],
open_orders=[Order(id=3,
items=[
Item(id=3),
Item(id=4),
Item(id=5)])],
closed_orders=[Order(id=1,
items=[
Item(id=1),
Item(id=2),
Item(id=3)]),
Order(id=5,
items=[
Item(id=5)])]),
User(id=8,
addresses=[
Address(id=2),
Address(id=3),
Address(id=4)],
open_orders = [],
closed_orders = []),
User(id=9,
addresses=[
Address(id=5)],
open_orders=[
Order(id=4,
items=[
Item(id=1),
Item(id=5)])],
closed_orders=[
Order(id=2,
items=[
Item(id=1),
Item(id=2),
Item(id=3)])]),
User(id=10)
] == q.all()
self.assert_sql_count(testing.db, go, 1)
@testing.resolve_artifact_names
def test_no_false_hits(self):
"""Eager loaders don't interpret main table columns as part of their eager load."""
User, Address, Order = self.classes.get_all(
'User', 'Address', 'Order')
users, addresses, orders = self.tables.get_all(
'users', 'addresses', 'orders')
mapper(User, users, properties={
'addresses':relation(Address, lazy=False),
'orders':relation(Order, lazy=False)
})
mapper(Address, addresses)
mapper(Order, orders)
allusers = create_session().query(User).all()
# using a textual select, the columns will be 'id' and 'name'. the
# eager loaders have aliases which should not hit on those columns,
# they should be required to locate only their aliased/fully table
# qualified column name.
noeagers = create_session().query(User).from_statement("select * from users").all()
assert 'orders' not in noeagers[0].__dict__
assert 'addresses' not in noeagers[0].__dict__
@testing.fails_on('maxdb', 'FIXME: unknown')
@testing.resolve_artifact_names
def test_limit(self):
"""Limit operations combined with lazy-load relationships."""
User, Item, Address, Order = self.classes.get_all(
'User', 'Item', 'Address', 'Order')
users, items, order_items, orders, addresses = self.tables.get_all(
'users', 'items', 'order_items', 'orders', 'addresses')
mapper(Item, items)
mapper(Order, orders, properties={
'items':relation(Item, secondary=order_items, lazy=False, order_by=items.c.id)
})
mapper(User, users, properties={
'addresses':relation(mapper(Address, addresses), lazy=False, order_by=addresses.c.id),
'orders':relation(Order, lazy=True)
})
sess = create_session()
q = sess.query(User)
if testing.against('mysql'):
l = q.limit(2).all()
assert self.static.user_all_result[:2] == l
else:
l = q.order_by(User.id).limit(2).offset(1).all()
print self.static.user_all_result[1:3]
print l
assert self.static.user_all_result[1:3] == l
@testing.resolve_artifact_names
def test_distinct(self):
# this is an involved 3x union of the users table to get a lot of rows.
# then see if the "distinct" works its way out. you actually get the same
# result with or without the distinct, just via less or more rows.
u2 = users.alias('u2')
s = sa.union_all(u2.select(use_labels=True), u2.select(use_labels=True), u2.select(use_labels=True)).alias('u')
mapper(User, users, properties={
'addresses':relation(mapper(Address, addresses), lazy=False, order_by=addresses.c.id),
})
sess = create_session()
q = sess.query(User)
def go():
l = q.filter(s.c.u2_id==User.id).distinct().order_by(User.id).all()
eq_(self.static.user_address_result, l)
self.assert_sql_count(testing.db, go, 1)
@testing.fails_on('maxdb', 'FIXME: unknown')
@testing.resolve_artifact_names
def test_limit_2(self):
mapper(Keyword, keywords)
mapper(Item, items, properties = dict(
keywords = relation(Keyword, secondary=item_keywords, lazy=False, order_by=[keywords.c.id]),
))
sess = create_session()
q = sess.query(Item)
l = q.filter((Item.description=='item 2') | (Item.description=='item 5') | (Item.description=='item 3')).\
order_by(Item.id).limit(2).all()
assert self.static.item_keyword_result[1:3] == l
@testing.fails_on('maxdb', 'FIXME: unknown')
@testing.resolve_artifact_names
def test_limit_3(self):
"""test that the ORDER BY is propagated from the inner select to the outer select, when using the
'wrapped' select statement resulting from the combination of eager loading and limit/offset clauses."""
mapper(Item, items)
mapper(Order, orders, properties = dict(
items = relation(Item, secondary=order_items, lazy=False)
))
mapper(Address, addresses)
mapper(User, users, properties = dict(
addresses = relation(Address, lazy=False, order_by=addresses.c.id),
orders = relation(Order, lazy=False, order_by=orders.c.id),
))
sess = create_session()
q = sess.query(User)
if not testing.against('maxdb', 'mssql'):
l = q.join('orders').order_by(Order.user_id.desc()).limit(2).offset(1)
assert [
User(id=9,
orders=[Order(id=2), Order(id=4)],
addresses=[Address(id=5)]
),
User(id=7,
orders=[Order(id=1), Order(id=3), Order(id=5)],
addresses=[Address(id=1)]
)
] == l.all()
l = q.join('addresses').order_by(Address.email_address.desc()).limit(1).offset(0)
assert [
User(id=7,
orders=[Order(id=1), Order(id=3), Order(id=5)],
addresses=[Address(id=1)]
)
] == l.all()
@testing.resolve_artifact_names
def test_limit_4(self):
# tests the LIMIT/OFFSET aliasing on a mapper against a select. original issue from ticket #904
sel = sa.select([users, addresses.c.email_address], users.c.id==addresses.c.user_id).alias('useralias')
mapper(User, sel, properties={
'orders':relation(Order, primaryjoin=sel.c.id==orders.c.user_id, lazy=False)
})
mapper(Order, orders)
sess = create_session()
eq_(sess.query(User).first(),
User(name=u'jack',orders=[
Order(address_id=1,description=u'order 1',isopen=0,user_id=7,id=1),
Order(address_id=1,description=u'order 3',isopen=1,user_id=7,id=3),
Order(address_id=None,description=u'order 5',isopen=0,user_id=7,id=5)],
email_address=u'jack@bean.com',id=7)
)
@testing.resolve_artifact_names
def test_one_to_many_scalar(self):
mapper(User, users, properties = dict(
address = relation(mapper(Address, addresses), lazy=False, uselist=False)
))
q = create_session().query(User)
def go():
l = q.filter(users.c.id == 7).all()
assert [User(id=7, address=Address(id=1))] == l
self.assert_sql_count(testing.db, go, 1)
@testing.fails_on('maxdb', 'FIXME: unknown')
@testing.resolve_artifact_names
def test_many_to_one(self):
mapper(Address, addresses, properties = dict(
user = relation(mapper(User, users), lazy=False)
))
sess = create_session()
q = sess.query(Address)
def go():
a = q.filter(addresses.c.id==1).one()
assert a.user is not None
u1 = sess.query(User).get(7)
assert a.user is u1
self.assert_sql_count(testing.db, go, 1)
@testing.resolve_artifact_names
def test_many_to_one_null(self):
"""test that a many-to-one eager load which loads None does
not later trigger a lazy load.
"""
# use a primaryjoin intended to defeat SA's usage of
# query.get() for a many-to-one lazyload
mapper(Order, orders, properties = dict(
address = relation(mapper(Address, addresses),
primaryjoin=and_(
addresses.c.id==orders.c.address_id,
addresses.c.email_address != None
),
lazy=False)
))
sess = create_session()
def go():
o1 = sess.query(Order).options(lazyload('address')).filter(Order.id==5).one()
eq_(o1.address, None)
self.assert_sql_count(testing.db, go, 2)
sess.expunge_all()
def go():
o1 = sess.query(Order).filter(Order.id==5).one()
eq_(o1.address, None)
self.assert_sql_count(testing.db, go, 1)
@testing.resolve_artifact_names
def test_one_and_many(self):
"""tests eager load for a parent object with a child object that
contains a many-to-many relationship to a third object."""
mapper(User, users, properties={
'orders':relation(Order, lazy=False, order_by=orders.c.id)
})
mapper(Item, items)
mapper(Order, orders, properties = dict(
items = relation(Item, secondary=order_items, lazy=False, order_by=items.c.id)
))
q = create_session().query(User)
l = q.filter("users.id in (7, 8, 9)").order_by("users.id")
def go():
assert self.static.user_order_result[0:3] == l.all()
self.assert_sql_count(testing.db, go, 1)
@testing.resolve_artifact_names
def test_double_with_aggregate(self):
max_orders_by_user = sa.select([sa.func.max(orders.c.id).label('order_id')], group_by=[orders.c.user_id]).alias('max_orders_by_user')
max_orders = orders.select(orders.c.id==max_orders_by_user.c.order_id).alias('max_orders')
mapper(Order, orders)
mapper(User, users, properties={
'orders':relation(Order, backref='user', lazy=False),
'max_order':relation(mapper(Order, max_orders, non_primary=True), lazy=False, uselist=False)
})
q = create_session().query(User)
def go():
assert [
User(id=7, orders=[
Order(id=1),
Order(id=3),
Order(id=5),
],
max_order=Order(id=5)
),
User(id=8, orders=[]),
User(id=9, orders=[Order(id=2),Order(id=4)],
max_order=Order(id=4)
),
User(id=10),
] == q.all()
self.assert_sql_count(testing.db, go, 1)
@testing.resolve_artifact_names
def test_wide(self):
mapper(Order, orders, properties={'items':relation(Item, secondary=order_items, lazy=False, order_by=items.c.id)})
mapper(Item, items)
mapper(User, users, properties = dict(
addresses = relation(mapper(Address, addresses), lazy = False, order_by=addresses.c.id),
orders = relation(Order, lazy = False, order_by=orders.c.id),
))
q = create_session().query(User)
l = q.all()
assert self.static.user_all_result == q.order_by(User.id).all()
@testing.resolve_artifact_names
def test_against_select(self):
"""test eager loading of a mapper which is against a select"""
s = sa.select([orders], orders.c.isopen==1).alias('openorders')
mapper(Order, s, properties={
'user':relation(User, lazy=False)
})
mapper(User, users)
mapper(Item, items)
q = create_session().query(Order)
assert [
Order(id=3, user=User(id=7)),
Order(id=4, user=User(id=9))
] == q.all()
q = q.select_from(s.join(order_items).join(items)).filter(~Item.id.in_([1, 2, 5]))
assert [
Order(id=3, user=User(id=7)),
] == q.all()
@testing.resolve_artifact_names
def test_aliasing(self):
"""test that eager loading uses aliases to insulate the eager load from regular criterion against those tables."""
mapper(User, users, properties = dict(
addresses = relation(mapper(Address, addresses), lazy=False, order_by=addresses.c.id)
))
q = create_session().query(User)
l = q.filter(addresses.c.email_address == 'ed@lala.com').filter(Address.user_id==User.id).order_by(User.id)
assert self.static.user_address_result[1:2] == l.all()
class AddEntityTest(_fixtures.FixtureTest):
run_inserts = 'once'
run_deletes = None
@testing.resolve_artifact_names
def _assert_result(self):
return [
(
User(id=7,
addresses=[Address(id=1)]
),
Order(id=1,
items=[Item(id=1), Item(id=2), Item(id=3)]
),
),
(
User(id=7,
addresses=[Address(id=1)]
),
Order(id=3,
items=[Item(id=3), Item(id=4), Item(id=5)]
),
),
(
User(id=7,
addresses=[Address(id=1)]
),
Order(id=5,
items=[Item(id=5)]
),
),
(
User(id=9,
addresses=[Address(id=5)]
),
Order(id=2,
items=[Item(id=1), Item(id=2), Item(id=3)]
),
),
(
User(id=9,
addresses=[Address(id=5)]
),
Order(id=4,
items=[Item(id=1), Item(id=5)]
),
)
]
@testing.resolve_artifact_names
def test_mapper_configured(self):
mapper(User, users, properties={
'addresses':relation(Address, lazy=False),
'orders':relation(Order)
})
mapper(Address, addresses)
mapper(Order, orders, properties={
'items':relation(Item, secondary=order_items, lazy=False, order_by=items.c.id)
})
mapper(Item, items)
sess = create_session()
oalias = sa.orm.aliased(Order)
def go():
ret = sess.query(User, oalias).join(('orders', oalias)).order_by(User.id, oalias.id).all()
eq_(ret, self._assert_result())
self.assert_sql_count(testing.db, go, 1)
@testing.resolve_artifact_names
def test_options(self):
mapper(User, users, properties={
'addresses':relation(Address),
'orders':relation(Order)
})
mapper(Address, addresses)
mapper(Order, orders, properties={
'items':relation(Item, secondary=order_items, order_by=items.c.id)
})
mapper(Item, items)
sess = create_session()
oalias = sa.orm.aliased(Order)
def go():
ret = sess.query(User, oalias).options(eagerload('addresses')).join(('orders', oalias)).order_by(User.id, oalias.id).all()
eq_(ret, self._assert_result())
self.assert_sql_count(testing.db, go, 6)
sess.expunge_all()
def go():
ret = sess.query(User, oalias).options(eagerload('addresses'), eagerload(oalias.items)).join(('orders', oalias)).order_by(User.id, oalias.id).all()
eq_(ret, self._assert_result())
self.assert_sql_count(testing.db, go, 1)
class OrderBySecondaryTest(_base.MappedTest):
@classmethod
def define_tables(cls, metadata):
Table('m2m', metadata,
Column('id', Integer, primary_key=True),
Column('aid', Integer, ForeignKey('a.id')),
Column('bid', Integer, ForeignKey('b.id')))
Table('a', metadata,
Column('id', Integer, primary_key=True),
Column('data', String(50)))
Table('b', metadata,
Column('id', Integer, primary_key=True),
Column('data', String(50)))
@classmethod
def fixtures(cls):
return dict(
a=(('id', 'data'),
(1, 'a1'),
(2, 'a2')),
b=(('id', 'data'),
(1, 'b1'),
(2, 'b2'),
(3, 'b3'),
(4, 'b4')),
m2m=(('id', 'aid', 'bid'),
(2, 1, 1),
(4, 2, 4),
(1, 1, 3),
(6, 2, 2),
(3, 1, 2),
(5, 2, 3)))
@testing.resolve_artifact_names
def test_ordering(self):
class A(_base.ComparableEntity):pass
class B(_base.ComparableEntity):pass
mapper(A, a, properties={
'bs':relation(B, secondary=m2m, lazy=False, order_by=m2m.c.id)
})
mapper(B, b)
sess = create_session()
eq_(sess.query(A).all(), [A(data='a1', bs=[B(data='b3'), B(data='b1'), B(data='b2')]), A(bs=[B(data='b4'), B(data='b3'), B(data='b2')])])
class SelfReferentialEagerTest(_base.MappedTest):
@classmethod
def define_tables(cls, metadata):
Table('nodes', metadata,
Column('id', Integer, sa.Sequence('node_id_seq', optional=True),
primary_key=True),
Column('parent_id', Integer, ForeignKey('nodes.id')),
Column('data', String(30)))
@testing.fails_on('maxdb', 'FIXME: unknown')
@testing.resolve_artifact_names
def test_basic(self):
class Node(_base.ComparableEntity):
def append(self, node):
self.children.append(node)
mapper(Node, nodes, properties={
'children':relation(Node, lazy=False, join_depth=3, order_by=nodes.c.id)
})
sess = create_session()
n1 = Node(data='n1')
n1.append(Node(data='n11'))
n1.append(Node(data='n12'))
n1.append(Node(data='n13'))
n1.children[1].append(Node(data='n121'))
n1.children[1].append(Node(data='n122'))
n1.children[1].append(Node(data='n123'))
sess.add(n1)
sess.flush()
sess.expunge_all()
def go():
d = sess.query(Node).filter_by(data='n1').all()[0]
assert Node(data='n1', children=[
Node(data='n11'),
Node(data='n12', children=[
Node(data='n121'),
Node(data='n122'),
Node(data='n123')
]),
Node(data='n13')
]) == d
self.assert_sql_count(testing.db, go, 1)
sess.expunge_all()
def go():
d = sess.query(Node).filter_by(data='n1').first()
assert Node(data='n1', children=[
Node(data='n11'),
Node(data='n12', children=[
Node(data='n121'),
Node(data='n122'),
Node(data='n123')
]),
Node(data='n13')
]) == d
self.assert_sql_count(testing.db, go, 1)
@testing.resolve_artifact_names
def test_lazy_fallback_doesnt_affect_eager(self):
class Node(_base.ComparableEntity):
def append(self, node):
self.children.append(node)
mapper(Node, nodes, properties={
'children':relation(Node, lazy=False, join_depth=1, order_by=nodes.c.id)
})
sess = create_session()
n1 = Node(data='n1')
n1.append(Node(data='n11'))
n1.append(Node(data='n12'))
n1.append(Node(data='n13'))
n1.children[1].append(Node(data='n121'))
n1.children[1].append(Node(data='n122'))
n1.children[1].append(Node(data='n123'))
sess.add(n1)
sess.flush()
sess.expunge_all()
# eager load with join depth 1. when eager load of 'n1' hits the
# children of 'n12', no columns are present, eager loader degrades to
# lazy loader; fine. but then, 'n12' is *also* in the first level of
# columns since we're loading the whole table. when those rows
# arrive, now we *can* eager load its children and an eager collection
# should be initialized. essentially the 'n12' instance is present in
# not just two different rows but two distinct sets of columns in this
# result set.
def go():
allnodes = sess.query(Node).order_by(Node.data).all()
n12 = allnodes[2]
assert n12.data == 'n12'
assert [
Node(data='n121'),
Node(data='n122'),
Node(data='n123')
] == list(n12.children)
self.assert_sql_count(testing.db, go, 1)
@testing.resolve_artifact_names
def test_with_deferred(self):
class Node(_base.ComparableEntity):
def append(self, node):
self.children.append(node)
mapper(Node, nodes, properties={
'children':relation(Node, lazy=False, join_depth=3, order_by=nodes.c.id),
'data':deferred(nodes.c.data)
})
sess = create_session()
n1 = Node(data='n1')
n1.append(Node(data='n11'))
n1.append(Node(data='n12'))
sess.add(n1)
sess.flush()
sess.expunge_all()
def go():
eq_(
Node(data='n1', children=[Node(data='n11'), Node(data='n12')]),
sess.query(Node).order_by(Node.id).first(),
)
self.assert_sql_count(testing.db, go, 4)
sess.expunge_all()
def go():
assert Node(data='n1', children=[Node(data='n11'), Node(data='n12')]) == sess.query(Node).options(undefer('data')).order_by(Node.id).first()
self.assert_sql_count(testing.db, go, 3)
sess.expunge_all()
def go():
assert Node(data='n1', children=[Node(data='n11'), Node(data='n12')]) == sess.query(Node).options(undefer('data'), undefer('children.data')).first()
self.assert_sql_count(testing.db, go, 1)
@testing.resolve_artifact_names
def test_options(self):
class Node(_base.ComparableEntity):
def append(self, node):
self.children.append(node)
mapper(Node, nodes, properties={
'children':relation(Node, lazy=True, order_by=nodes.c.id)
}, order_by=nodes.c.id)
sess = create_session()
n1 = Node(data='n1')
n1.append(Node(data='n11'))
n1.append(Node(data='n12'))
n1.append(Node(data='n13'))
n1.children[1].append(Node(data='n121'))
n1.children[1].append(Node(data='n122'))
n1.children[1].append(Node(data='n123'))
sess.add(n1)
sess.flush()
sess.expunge_all()
def go():
d = sess.query(Node).filter_by(data='n1').options(eagerload('children.children')).first()
assert Node(data='n1', children=[
Node(data='n11'),
Node(data='n12', children=[
Node(data='n121'),
Node(data='n122'),
Node(data='n123')
]),
Node(data='n13')
]) == d
self.assert_sql_count(testing.db, go, 2)
def go():
d = sess.query(Node).filter_by(data='n1').options(eagerload('children.children')).first()
# test that the query isn't wrapping the initial query for eager loading.
self.assert_sql_execution(testing.db, go,
CompiledSQL(
"SELECT nodes.id AS nodes_id, nodes.parent_id AS nodes_parent_id, nodes.data AS nodes_data FROM nodes "
"WHERE nodes.data = :data_1 ORDER BY nodes.id LIMIT 1 OFFSET 0",
{'data_1': 'n1'}
)
)
@testing.fails_on('maxdb', 'FIXME: unknown')
@testing.resolve_artifact_names
def test_no_depth(self):
class Node(_base.ComparableEntity):
def append(self, node):
self.children.append(node)
mapper(Node, nodes, properties={
'children':relation(Node, lazy=False)
})
sess = create_session()
n1 = Node(data='n1')
n1.append(Node(data='n11'))
n1.append(Node(data='n12'))
n1.append(Node(data='n13'))
n1.children[1].append(Node(data='n121'))
n1.children[1].append(Node(data='n122'))
n1.children[1].append(Node(data='n123'))
sess.add(n1)
sess.flush()
sess.expunge_all()
def go():
d = sess.query(Node).filter_by(data='n1').first()
assert Node(data='n1', children=[
Node(data='n11'),
Node(data='n12', children=[
Node(data='n121'),
Node(data='n122'),
Node(data='n123')
]),
Node(data='n13')
]) == d
self.assert_sql_count(testing.db, go, 3)
class MixedSelfReferentialEagerTest(_base.MappedTest):
@classmethod
def define_tables(cls, metadata):
Table('a_table', metadata,
Column('id', Integer, primary_key=True)
)
Table('b_table', metadata,
Column('id', Integer, primary_key=True),
Column('parent_b1_id', Integer, ForeignKey('b_table.id')),
Column('parent_a_id', Integer, ForeignKey('a_table.id')),
Column('parent_b2_id', Integer, ForeignKey('b_table.id')))
@classmethod
@testing.resolve_artifact_names
def setup_mappers(cls):
class A(_base.ComparableEntity):
pass
class B(_base.ComparableEntity):
pass
mapper(A,a_table)
mapper(B,b_table,properties = {
'parent_b1': relation(B,
remote_side = [b_table.c.id],
primaryjoin = (b_table.c.parent_b1_id ==b_table.c.id),
order_by = b_table.c.id
),
'parent_z': relation(A,lazy = True),
'parent_b2': relation(B,
remote_side = [b_table.c.id],
primaryjoin = (b_table.c.parent_b2_id ==b_table.c.id),
order_by = b_table.c.id
)
});
@classmethod
@testing.resolve_artifact_names
def insert_data(cls):
a_table.insert().execute(dict(id=1), dict(id=2), dict(id=3))
b_table.insert().execute(
dict(id=1, parent_a_id=2, parent_b1_id=None, parent_b2_id=None),
dict(id=2, parent_a_id=1, parent_b1_id=1, parent_b2_id=None),
dict(id=3, parent_a_id=1, parent_b1_id=1, parent_b2_id=2),
dict(id=4, parent_a_id=3, parent_b1_id=1, parent_b2_id=None),
dict(id=5, parent_a_id=3, parent_b1_id=None, parent_b2_id=2),
dict(id=6, parent_a_id=1, parent_b1_id=1, parent_b2_id=3),
dict(id=7, parent_a_id=2, parent_b1_id=None, parent_b2_id=3),
dict(id=8, parent_a_id=2, parent_b1_id=1, parent_b2_id=2),
dict(id=9, parent_a_id=None, parent_b1_id=1, parent_b2_id=None),
dict(id=10, parent_a_id=3, parent_b1_id=7, parent_b2_id=2),
dict(id=11, parent_a_id=3, parent_b1_id=1, parent_b2_id=8),
dict(id=12, parent_a_id=2, parent_b1_id=5, parent_b2_id=2),
dict(id=13, parent_a_id=3, parent_b1_id=4, parent_b2_id=4),
dict(id=14, parent_a_id=3, parent_b1_id=7, parent_b2_id=2),
)
@testing.resolve_artifact_names
def test_eager_load(self):
session = create_session()
def go():
eq_(
session.query(B).options(eagerload('parent_b1'),eagerload('parent_b2'),eagerload('parent_z')).
filter(B.id.in_([2, 8, 11])).order_by(B.id).all(),
[
B(id=2, parent_z=A(id=1), parent_b1=B(id=1), parent_b2=None),
B(id=8, parent_z=A(id=2), parent_b1=B(id=1), parent_b2=B(id=2)),
B(id=11, parent_z=A(id=3), parent_b1=B(id=1), parent_b2=B(id=8))
]
)
self.assert_sql_count(testing.db, go, 1)
class SelfReferentialM2MEagerTest(_base.MappedTest):
@classmethod
def define_tables(cls, metadata):
Table('widget', metadata,
Column('id', Integer, primary_key=True),
Column('name', sa.Unicode(40), nullable=False, unique=True),
)
Table('widget_rel', metadata,
Column('parent_id', Integer, ForeignKey('widget.id')),
Column('child_id', Integer, ForeignKey('widget.id')),
sa.UniqueConstraint('parent_id', 'child_id'),
)
@testing.resolve_artifact_names
def test_basic(self):
class Widget(_base.ComparableEntity):
pass
mapper(Widget, widget, properties={
'children': relation(Widget, secondary=widget_rel,
primaryjoin=widget_rel.c.parent_id==widget.c.id,
secondaryjoin=widget_rel.c.child_id==widget.c.id,
lazy=False, join_depth=1,
)
})
sess = create_session()
w1 = Widget(name=u'w1')
w2 = Widget(name=u'w2')
w1.children.append(w2)
sess.add(w1)
sess.flush()
sess.expunge_all()
assert [Widget(name='w1', children=[Widget(name='w2')])] == sess.query(Widget).filter(Widget.name==u'w1').all()
class MixedEntitiesTest(_fixtures.FixtureTest, testing.AssertsCompiledSQL):
run_setup_mappers = 'once'
run_inserts = 'once'
run_deletes = None
@classmethod
@testing.resolve_artifact_names
def setup_mappers(cls):
mapper(User, users, properties={
'addresses':relation(Address, backref='user'),
'orders':relation(Order, backref='user'), # o2m, m2o
})
mapper(Address, addresses)
mapper(Order, orders, properties={
'items':relation(Item, secondary=order_items, order_by=items.c.id), #m2m
})
mapper(Item, items, properties={
'keywords':relation(Keyword, secondary=item_keywords) #m2m
})
mapper(Keyword, keywords)
@testing.resolve_artifact_names
def test_two_entities(self):
sess = create_session()
# two FROM clauses
def go():
eq_(
[
(User(id=9, addresses=[Address(id=5)]), Order(id=2, items=[Item(id=1), Item(id=2), Item(id=3)])),
(User(id=9, addresses=[Address(id=5)]), Order(id=4, items=[Item(id=1), Item(id=5)])),
],
sess.query(User, Order).filter(User.id==Order.user_id).\
options(eagerload(User.addresses), eagerload(Order.items)).filter(User.id==9).\
order_by(User.id, Order.id).all(),
)
self.assert_sql_count(testing.db, go, 1)
# one FROM clause
def go():
eq_(
[
(User(id=9, addresses=[Address(id=5)]), Order(id=2, items=[Item(id=1), Item(id=2), Item(id=3)])),
(User(id=9, addresses=[Address(id=5)]), Order(id=4, items=[Item(id=1), Item(id=5)])),
],
sess.query(User, Order).join(User.orders).options(eagerload(User.addresses), eagerload(Order.items)).filter(User.id==9).\
order_by(User.id, Order.id).all(),
)
self.assert_sql_count(testing.db, go, 1)
@testing.exclude('sqlite', '>', (0, 0, 0), "sqlite flat out blows it on the multiple JOINs")
@testing.resolve_artifact_names
def test_two_entities_with_joins(self):
sess = create_session()
# two FROM clauses where there's a join on each one
def go():
u1 = aliased(User)
o1 = aliased(Order)
eq_(
[
(
User(addresses=[Address(email_address=u'fred@fred.com')], name=u'fred'),
Order(description=u'order 2', isopen=0, items=[Item(description=u'item 1'), Item(description=u'item 2'), Item(description=u'item 3')]),
User(addresses=[Address(email_address=u'jack@bean.com')], name=u'jack'),
Order(description=u'order 3', isopen=1, items=[Item(description=u'item 3'), Item(description=u'item 4'), Item(description=u'item 5')])
),
(
User(addresses=[Address(email_address=u'fred@fred.com')], name=u'fred'),
Order(description=u'order 2', isopen=0, items=[Item(description=u'item 1'), Item(description=u'item 2'), Item(description=u'item 3')]),
User(addresses=[Address(email_address=u'jack@bean.com')], name=u'jack'),
Order(address_id=None, description=u'order 5', isopen=0, items=[Item(description=u'item 5')])
),
(
User(addresses=[Address(email_address=u'fred@fred.com')], name=u'fred'),
Order(description=u'order 4', isopen=1, items=[Item(description=u'item 1'), Item(description=u'item 5')]),
User(addresses=[Address(email_address=u'jack@bean.com')], name=u'jack'),
Order(address_id=None, description=u'order 5', isopen=0, items=[Item(description=u'item 5')])
),
],
sess.query(User, Order, u1, o1).\
join((Order, User.orders)).options(eagerload(User.addresses), eagerload(Order.items)).filter(User.id==9).\
join((o1, u1.orders)).options(eagerload(u1.addresses), eagerload(o1.items)).filter(u1.id==7).\
filter(Order.id<o1.id).\
order_by(User.id, Order.id, u1.id, o1.id).all(),
)
self.assert_sql_count(testing.db, go, 1)
@testing.resolve_artifact_names
def test_aliased_entity(self):
sess = create_session()
oalias = sa.orm.aliased(Order)
# two FROM clauses
def go():
eq_(
[
(User(id=9, addresses=[Address(id=5)]), Order(id=2, items=[Item(id=1), Item(id=2), Item(id=3)])),
(User(id=9, addresses=[Address(id=5)]), Order(id=4, items=[Item(id=1), Item(id=5)])),
],
sess.query(User, oalias).filter(User.id==oalias.user_id).\
options(eagerload(User.addresses), eagerload(oalias.items)).filter(User.id==9).\
order_by(User.id, oalias.id).all(),
)
self.assert_sql_count(testing.db, go, 1)
# one FROM clause
def go():
eq_(
[
(User(id=9, addresses=[Address(id=5)]), Order(id=2, items=[Item(id=1), Item(id=2), Item(id=3)])),
(User(id=9, addresses=[Address(id=5)]), Order(id=4, items=[Item(id=1), Item(id=5)])),
],
sess.query(User, oalias).join((User.orders, oalias)).options(eagerload(User.addresses), eagerload(oalias.items)).filter(User.id==9).\
order_by(User.id, oalias.id).all(),
)
self.assert_sql_count(testing.db, go, 1)
from sqlalchemy.engine.default import DefaultDialect
# improper setup: oalias in the columns clause but join to usual
# orders alias. this should create two FROM clauses even though the
# query has a from_clause set up via the join
self.assert_compile(sess.query(User, oalias).join(User.orders).options(eagerload(oalias.items)).with_labels().statement,
"SELECT users.id AS users_id, users.name AS users_name, orders_1.id AS orders_1_id, "\
"orders_1.user_id AS orders_1_user_id, orders_1.address_id AS orders_1_address_id, "\
"orders_1.description AS orders_1_description, orders_1.isopen AS orders_1_isopen, items_1.id AS items_1_id, "\
"items_1.description AS items_1_description FROM users JOIN orders ON users.id = orders.user_id, "\
"orders AS orders_1 LEFT OUTER JOIN order_items AS order_items_1 ON orders_1.id = order_items_1.order_id "\
"LEFT OUTER JOIN items AS items_1 ON items_1.id = order_items_1.item_id ORDER BY items_1.id",
dialect=DefaultDialect()
)
class CyclicalInheritingEagerTest(_base.MappedTest):
@classmethod
def define_tables(cls, metadata):
Table('t1', metadata,
Column('c1', Integer, primary_key=True),
Column('c2', String(30)),
Column('type', String(30))
)
Table('t2', metadata,
Column('c1', Integer, primary_key=True),
Column('c2', String(30)),
Column('type', String(30)),
Column('t1.id', Integer, ForeignKey('t1.c1')))
@testing.resolve_artifact_names
def test_basic(self):
class T(object):
pass
class SubT(T):
pass
class T2(object):
pass
class SubT2(T2):
pass
mapper(T, t1, polymorphic_on=t1.c.type, polymorphic_identity='t1')
mapper(SubT, None, inherits=T, polymorphic_identity='subt1', properties={
't2s':relation(SubT2, lazy=False, backref=sa.orm.backref('subt', lazy=False))
})
mapper(T2, t2, polymorphic_on=t2.c.type, polymorphic_identity='t2')
mapper(SubT2, None, inherits=T2, polymorphic_identity='subt2')
# testing a particular endless loop condition in eager join setup
create_session().query(SubT).all()
class SubqueryTest(_base.MappedTest):
@classmethod
def define_tables(cls, metadata):
Table('users_table', metadata,
Column('id', Integer, primary_key=True),
Column('name', String(16))
)
Table('tags_table', metadata,
Column('id', Integer, primary_key=True),
Column('user_id', Integer, ForeignKey("users_table.id")),
Column('score1', sa.Float),
Column('score2', sa.Float),
)
@testing.resolve_artifact_names
def test_label_anonymizing(self):
"""Eager loading works with subqueries with labels,
Even if an explicit labelname which conflicts with a label on the
parent.
There's not much reason a column_property() would ever need to have a
label of a specific name (and they don't even need labels these days),
unless you'd like the name to line up with a name that you may be
using for a straight textual statement used for loading instances of
that type.
"""
class User(_base.ComparableEntity):
@property
def prop_score(self):
return sum([tag.prop_score for tag in self.tags])
class Tag(_base.ComparableEntity):
@property
def prop_score(self):
return self.score1 * self.score2
for labeled, labelname in [(True, 'score'), (True, None), (False, None)]:
sa.orm.clear_mappers()
tag_score = (tags_table.c.score1 * tags_table.c.score2)
user_score = sa.select([sa.func.sum(tags_table.c.score1 *
tags_table.c.score2)],
tags_table.c.user_id == users_table.c.id)
if labeled:
tag_score = tag_score.label(labelname)
user_score = user_score.label(labelname)
else:
user_score = user_score.as_scalar()
mapper(Tag, tags_table, properties={
'query_score': sa.orm.column_property(tag_score),
})
mapper(User, users_table, properties={
'tags': relation(Tag, backref='user', lazy=False),
'query_score': sa.orm.column_property(user_score),
})
session = create_session()
session.add(User(name='joe', tags=[Tag(score1=5.0, score2=3.0), Tag(score1=55.0, score2=1.0)]))
session.add(User(name='bar', tags=[Tag(score1=5.0, score2=4.0), Tag(score1=50.0, score2=1.0), Tag(score1=15.0, score2=2.0)]))
session.flush()
session.expunge_all()
for user in session.query(User).all():
eq_(user.query_score, user.prop_score)
def go():
u = session.query(User).filter_by(name='joe').one()
eq_(u.query_score, u.prop_score)
self.assert_sql_count(testing.db, go, 1)
for t in (tags_table, users_table):
t.delete().execute()
class CorrelatedSubqueryTest(_base.MappedTest):
"""tests for #946, #947, #948.
The "users" table is joined to "stuff", and the relation
would like to pull only the "stuff" entry with the most recent date.
Exercises a variety of ways to configure this.
"""
@classmethod
def define_tables(cls, metadata):
users = Table('users', metadata,
Column('id', Integer, primary_key=True),
Column('name', String(50))
)
stuff = Table('stuff', metadata,
Column('id', Integer, primary_key=True),
Column('date', Date),
Column('user_id', Integer, ForeignKey('users.id')))
@classmethod
@testing.resolve_artifact_names
def insert_data(cls):
users.insert().execute(
{'id':1, 'name':'user1'},
{'id':2, 'name':'user2'},
{'id':3, 'name':'user3'},
)
stuff.insert().execute(
{'id':1, 'user_id':1, 'date':datetime.date(2007, 10, 15)},
{'id':2, 'user_id':1, 'date':datetime.date(2007, 12, 15)},
{'id':3, 'user_id':1, 'date':datetime.date(2007, 11, 15)},
{'id':4, 'user_id':2, 'date':datetime.date(2008, 1, 15)},
{'id':5, 'user_id':3, 'date':datetime.date(2007, 6, 15)},
{'id':6, 'user_id':3, 'date':datetime.date(2007, 3, 15)},
)
def test_labeled_on_date_noalias(self):
self._do_test('label', True, False)
def test_scalar_on_date_noalias(self):
self._do_test('scalar', True, False)
def test_plain_on_date_noalias(self):
self._do_test('none', True, False)
def test_labeled_on_limitid_noalias(self):
self._do_test('label', False, False)
def test_scalar_on_limitid_noalias(self):
self._do_test('scalar', False, False)
def test_plain_on_limitid_noalias(self):
self._do_test('none', False, False)
def test_labeled_on_date_alias(self):
self._do_test('label', True, True)
def test_scalar_on_date_alias(self):
self._do_test('scalar', True, True)
def test_plain_on_date_alias(self):
self._do_test('none', True, True)
def test_labeled_on_limitid_alias(self):
self._do_test('label', False, True)
def test_scalar_on_limitid_alias(self):
self._do_test('scalar', False, True)
def test_plain_on_limitid_alias(self):
self._do_test('none', False, True)
@testing.resolve_artifact_names
def _do_test(self, labeled, ondate, aliasstuff):
class User(_base.ComparableEntity):
pass
class Stuff(_base.ComparableEntity):
pass
mapper(Stuff, stuff)
if aliasstuff:
salias = stuff.alias()
else:
# if we don't alias the 'stuff' table within the correlated subquery,
# it gets aliased in the eager load along with the "stuff" table to "stuff_1".
# but it's a scalar subquery, and this doesn't actually matter
salias = stuff
if ondate:
# the more 'relational' way to do this, join on the max date
stuff_view = select([func.max(salias.c.date).label('max_date')]).where(salias.c.user_id==users.c.id).correlate(users)
else:
# a common method with the MySQL crowd, which actually might perform better in some
# cases - subquery does a limit with order by DESC, join on the id
stuff_view = select([salias.c.id]).where(salias.c.user_id==users.c.id).correlate(users).order_by(salias.c.date.desc()).limit(1)
if labeled == 'label':
stuff_view = stuff_view.label('foo')
elif labeled == 'scalar':
stuff_view = stuff_view.as_scalar()
if ondate:
mapper(User, users, properties={
'stuff':relation(Stuff, primaryjoin=and_(users.c.id==stuff.c.user_id, stuff.c.date==stuff_view))
})
else:
mapper(User, users, properties={
'stuff':relation(Stuff, primaryjoin=and_(users.c.id==stuff.c.user_id, stuff.c.id==stuff_view))
})
sess = create_session()
def go():
eq_(
sess.query(User).order_by(User.name).options(eagerload('stuff')).all(),
[
User(name='user1', stuff=[Stuff(id=2)]),
User(name='user2', stuff=[Stuff(id=4)]),
User(name='user3', stuff=[Stuff(id=5)])
]
)
self.assert_sql_count(testing.db, go, 1)
sess = create_session()
def go():
eq_(
sess.query(User).order_by(User.name).first(),
User(name='user1', stuff=[Stuff(id=2)])
)
self.assert_sql_count(testing.db, go, 2)
sess = create_session()
def go():
eq_(
sess.query(User).order_by(User.name).options(eagerload('stuff')).first(),
User(name='user1', stuff=[Stuff(id=2)])
)
self.assert_sql_count(testing.db, go, 1)
sess = create_session()
def go():
eq_(
sess.query(User).filter(User.id==2).options(eagerload('stuff')).one(),
User(name='user2', stuff=[Stuff(id=4)])
)
self.assert_sql_count(testing.db, go, 1)
| [
"hughperkins@gmail.com"
] | hughperkins@gmail.com |
525d87c0196a42e75f55f89d743b31765ba68d48 | 63811ad4592793a8028ab973e254ba59a4205fe5 | /src/modules/storage_evernote/__init__.py | b30826ea497661139b30177a332751794def6cb6 | [] | no_license | AlexWoroschilow/AOD-Notes | d124fe9206278cae3c57b146883081bfaaaf1ff5 | aa43c58291e2f9175a456c156ebc78aaf61cac1e | refs/heads/master | 2022-11-30T10:42:16.405161 | 2020-08-05T20:28:05 | 2020-08-05T20:28:05 | 130,315,652 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 909 | py | # -*- coding: utf-8 -*-
# Copyright 2015 Alex Woroschilow (alex.woroschilow@gmail.com)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import inject
class Loader(object):
def __enter__(self):
return self
def __exit__(self, type, value, traceback):
pass
def configure(self, binder, options, args):
"""
Configure service container for the dependency injections
:param binder:
:param options:
:param args:
:return:
"""
pass
| [
"alex.woroschilow@gmail.com"
] | alex.woroschilow@gmail.com |
842f17c7aeae65c3e435a8bef7373d36475fcad4 | 48408a93a358e09526e8f8b9cf560cfede086d9f | /tests/test_plot_acc_signal.py | 675b8400125a71a0064af1afd0215602f6460a83 | [
"MIT"
] | permissive | eng-tools/engformat | aa4c137854f05706feceee136e0601508c4ea4f1 | 8cc3937327eb4e7b52e0b5d486248bf25894ec0d | refs/heads/master | 2023-03-28T23:07:54.075599 | 2021-03-30T21:47:27 | 2021-03-30T21:47:27 | 111,424,434 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,289 | py | import numpy as np
import matplotlib
matplotlib.use('agg')
from eqsig import AccSignal
from matplotlib.testing.decorators import image_comparison
import matplotlib.pyplot as plt
from bwplot import cbox
from engformat import plot_acc_signal
from tests.conftest import TEST_DATA_DIR
@image_comparison(baseline_images=['plot_acc_sig_as_response_spectrum'], extensions=['png'])
def test_plot_acc_sig_as_response_spectrum():
record_path = TEST_DATA_DIR
record_filename = 'test_motion_dt0p01.txt'
motion_step = 0.01
rec = np.loadtxt(record_path + record_filename)
acc_sig = AccSignal(rec, motion_step)
plot_acc_signal.plot_acc_sig_as_response_spectrum(acc_sig)
@image_comparison(baseline_images=['plot_acc_sig_as_time_series'], extensions=['png'])
def test_plot_acc_sig_as_time_series():
record_path = TEST_DATA_DIR
record_filename = 'test_motion_dt0p01.txt'
motion_step = 0.01
rec = np.loadtxt(record_path + record_filename)
acc_sig = AccSignal(rec, motion_step)
plot_acc_signal.plot_acc_sig_as_time_series(acc_sig)
@image_comparison(baseline_images=['plot_acc_sig_as_fa_spectrum'], extensions=['png'])
def test_plot_acc_sig_as_fa_spectrum():
record_path = TEST_DATA_DIR
record_filename = 'test_motion_dt0p01.txt'
motion_step = 0.01
rec = np.loadtxt(record_path + record_filename)
acc_sig = AccSignal(rec, motion_step)
plot_acc_signal.plot_acc_sig_as_fa_spectrum(acc_sig)
@image_comparison(baseline_images=['plot_acc_sig_as_avd'], extensions=['png'])
def test_plot_acc_sig_as_avd():
record_path = TEST_DATA_DIR
record_filename = 'test_motion_dt0p01.txt'
motion_step = 0.01
rec = np.loadtxt(record_path + record_filename)
acc_sig = AccSignal(rec, motion_step)
plot_acc_signal.plot_acc_sig_as_avd(acc_sig)
@image_comparison(baseline_images=['plot_acc_sig_as_transfer_function'], extensions=['png'])
def test_plot_acc_sig_as_transfer_function():
record_path = TEST_DATA_DIR
record_filename = 'test_motion_dt0p01.txt'
motion_step = 0.01
rec = np.loadtxt(record_path + record_filename)
acc_sig = AccSignal(rec, motion_step)
plot_acc_signal.plot_acc_sig_as_transfer_function(acc_sig, [acc_sig])
if __name__ == '__main__':
test_plot_acc_sig_as_response_spectrum() | [
"maxim.millen@gmail.com"
] | maxim.millen@gmail.com |
8e3d54e893943143b258daaa50207961b795f69d | d9e277dc46c9ed02d339db0fc1c4ebaed9d15e12 | /ingest/spoor_xml.py | 735227883f6ef3b55d3da3731f44056cba250eff | [
"MIT"
] | permissive | O-C-R/intotheokavango | b7ea700a178610ce6154eaf8bd423ff7acf5d522 | 4006940ddead3f31eea701efb9b9dcdc7b19402e | refs/heads/master | 2020-04-10T20:13:52.547817 | 2017-10-31T16:26:17 | 2017-10-31T16:26:17 | 32,355,238 | 1 | 3 | MIT | 2018-06-15T15:43:23 | 2015-03-16T21:51:32 | JavaScript | UTF-8 | Python | false | false | 2,105 | py | import json, xmltodict, os, base64
from ingest import ingest_json_body, save_files, process_image, ingest_data, ingest_plain_body
from housepy import config, log, util, strings
from ingest.sighting import get_taxonomy
def parse(request):
log.info("spoor_xml.parse")
try:
content = ingest_plain_body(request)
data = xmltodict.parse(content)
except Exception as e:
log.error(log.exc(e))
return None, "Parsing error"
try:
log.info("--> parsing XML")
data = data['instance']
feature = {'FeatureType': "sighting", 'Delivery': "devicemagic"}
log.debug(json.dumps(data, indent=4, default=lambda x: str(x)))
# feature['Member'] = data['@dm:submitting_user'].split(' ')[0] # let TeamMember override this
dt = util.parse_date(data['@writeTime'])
data = data['inputs']
for alias in ['Date___Time_Question', 'Date___Time']:
if alias in data:
dt = util.parse_date(data[alias])
del data[alias]
feature['t_utc'] = util.timestamp(dt)
for alias in ['Current_Location', 'LocationQuestion', 'Location_Question', 'GPSLocation']:
if alias in data:
data['Location'] = data[alias]
del data[alias]
if 'Location' in data:
try:
feature['Latitude'] = data['Location'].split(',')[0].replace("lat=", '').strip()
feature['Longitude'] = data['Location'].split(',')[1].replace("long=", '').strip()
feature['Altitude'] = data['Location'].split(',')[2].replace("alt=", '').strip()
del data['Location']
except Exception as e:
log.error(log.exc(e))
for key, value in data.items():
feature[key.replace('_', '')] = value
# purge blanks
feature = {key: value for (key, value) in feature.items() if type(value) != str or len(value.strip())}
except Exception as e:
log.error(log.exc(e))
return None, "Unexpected fields"
return feature
| [
"brian.house@gmail.com"
] | brian.house@gmail.com |
4b2850b6ce34f9c85f6c0c7634f32b5c9cf5fcff | aef9d6b8bb21957fa8b2235872bca51f64e7b5ff | /petstagram/petstagram/pets/urls.py | 2473e643a2fe767b5674d65d09873b6da5c9eacb | [] | no_license | dreadlordow/Softuni-Python-Web | 3cf9cc234960bb47f1c3c2a91a1a80d0fc499fd6 | 784faccbe15023536917d610384222d839a63bae | refs/heads/master | 2023-08-28T19:39:57.149514 | 2021-02-23T16:28:55 | 2021-02-23T16:28:55 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 490 | py | from django.urls import path
from petstagram.pets.views import list_pets, details_or_comment_pet, like_pet, create, edit_pet, delete_pet
urlpatterns =[
path('', list_pets, name='list pets'),
path('details/<int:pk>/', details_or_comment_pet, name='pet details'),
path('like/<int:pk>/', like_pet, name='like pet'),
path('create/', create, name='create pet'),
path('edit/<int:pk>', edit_pet, name='edit pet'),
path('delete/<int:pk>', delete_pet, name='delete pet'),
] | [
"georgipavlov1913@gmail.com"
] | georgipavlov1913@gmail.com |
87c33bb6777835d09d524a7349f95644d682d200 | fa7e75212e9f536eed7a78237a5fa9a4021a206b | /python/smqtk/tests/algorithms/nn_index/test_NNI_itq.py | e9cbb9ca8295459ddafa1635f0cd73c710e71e13 | [] | no_license | kod3r/SMQTK | 3d40730c956220a3d9bb02aef65edc8493bbf527 | c128e8ca38c679ee37901551f4cc021cc43d00e6 | refs/heads/master | 2020-12-03T09:12:41.163643 | 2015-10-19T14:56:55 | 2015-10-19T14:56:55 | 44,916,678 | 1 | 0 | null | 2015-10-25T15:47:35 | 2015-10-25T15:47:35 | null | UTF-8 | Python | false | false | 7,630 | py | import json
import os
import random
import unittest
import nose.tools as ntools
import numpy
from smqtk.representation.code_index.memory import MemoryCodeIndex
from smqtk.representation.descriptor_element.local_elements import \
DescriptorMemoryElement
from smqtk.algorithms.nn_index.lsh.itq import ITQNearestNeighborsIndex
from smqtk.utils.file_utils import make_tempfile
__author__ = "paul.tunison@kitware.com"
class TestIqrSimilarityIndex (unittest.TestCase):
ITQ_ROTATION_MAT = None
ITQ_MEAN_VEC = None
RANDOM_SEED = 42
@classmethod
def _clean_cache_files(cls):
for fp in [cls.ITQ_ROTATION_MAT, cls.ITQ_MEAN_VEC]:
if fp and os.path.isfile(fp):
os.remove(fp)
@classmethod
def _make_cache_files(cls):
cls._clean_cache_files()
cls.ITQ_MEAN_VEC = make_tempfile(suffix='.npy')
cls.ITQ_ROTATION_MAT = make_tempfile(suffix='.npy')
def _make_inst(self, dist_method, bits=8):
self._make_cache_files()
# don't want the files to actually exist
self._clean_cache_files()
# Initialize with a fresh code index instance every time, otherwise the
# same code index is maintained between constructions
return ITQNearestNeighborsIndex(self.ITQ_MEAN_VEC, self.ITQ_ROTATION_MAT,
code_index=MemoryCodeIndex(),
bit_length=bits,
distance_method=dist_method,
random_seed=self.RANDOM_SEED)
def tearDown(self):
self._clean_cache_files()
def test_configuration(self):
c = ITQNearestNeighborsIndex.get_default_config()
# Default code index should be memory based
ntools.assert_equal(c['code_index']['type'], 'MemoryCodeIndex')
ntools.assert_true(c['mean_vec_filepath'] is None)
ntools.assert_true(c['rotation_filepath'] is None)
ntools.assert_true(c['random_seed'] is None)
# Conversion to JSON and back is idempotent
ntools.assert_equal(json.loads(json.dumps(c)), c)
# Make some changes to deviate from defaults
c['bit_length'] = 256
c['itq_iterations'] = 25
c['mean_vec_filepath'] = 'vec.npy'
c['rotation_filepath'] = 'rot.npy'
# Make instance
index = ITQNearestNeighborsIndex.from_config(c)
ntools.assert_equal(index._mean_vec_cache_filepath,
c['mean_vec_filepath'])
ntools.assert_equal(index._rotation_cache_filepath,
c['rotation_filepath'])
ntools.assert_is_instance(index._code_index, MemoryCodeIndex)
ntools.assert_equal(index._bit_len, c['bit_length'])
ntools.assert_equal(index._itq_iter_num, c['itq_iterations'])
ntools.assert_equal(index._dist_method, c['distance_method'])
ntools.assert_equal(index._rand_seed, c['random_seed'])
def test_known_descriptors_euclidean_unit(self):
dim = 5
###
# Unit vectors -- Equal distance
#
index = self._make_inst('euclidean')
test_descriptors = []
for i in xrange(dim):
v = numpy.zeros(dim, float)
v[i] = 1.
d = DescriptorMemoryElement('unit', i)
d.set_vector(v)
test_descriptors.append(d)
index.build_index(test_descriptors)
# query descriptor -- zero vector
# -> all modeled descriptors should be equally distance (unit corners)
q = DescriptorMemoryElement('query', 0)
q.set_vector(numpy.zeros(dim, float))
# All dists should be 1.0, r order doesn't matter
r, dists = index.nn(q, dim)
for d in dists:
ntools.assert_equal(d, 1.)
def test_known_descriptors_euclidean_ordered(self):
index = self._make_inst('euclidean')
# make vectors to return in a known euclidean distance order
i = 1000
test_descriptors = []
for j in xrange(i):
d = DescriptorMemoryElement('ordered', j)
d.set_vector(numpy.array([j, j*2], float))
test_descriptors.append(d)
random.shuffle(test_descriptors)
index.build_index(test_descriptors)
# Since descriptors were build in increasing distance from (0,0),
# returned descriptors for a query of [0,0] should be in index order.
q = DescriptorMemoryElement('query', i)
q.set_vector(numpy.array([0, 0], float))
# top result should have UUID == 0 (nearest to query)
r, dists = index.nn(q, 5)
ntools.assert_equal(r[0].uuid(), 0)
ntools.assert_equal(r[1].uuid(), 1)
ntools.assert_equal(r[2].uuid(), 2)
ntools.assert_equal(r[3].uuid(), 3)
ntools.assert_equal(r[4].uuid(), 4)
# global search should be in complete order
r, dists = index.nn(q, i)
for j, d, dist in zip(range(i), r, dists):
ntools.assert_equal(d.uuid(), j)
def test_random_descriptors_euclidean(self):
# make random descriptors
i = 1000
dim = 256
bits = 32
td = []
for j in xrange(i):
d = DescriptorMemoryElement('random', j)
d.set_vector(numpy.random.rand(dim))
td.append(d)
index = self._make_inst('euclidean', bits)
index.build_index(td)
# test query from build set -- should return same descriptor when k=1
q = td[255]
r, dists = index.nn(q, 1)
ntools.assert_equal(r[0], q)
# test query very near a build vector
td_q = td[0]
q = DescriptorMemoryElement('query', i)
v = numpy.array(td_q.vector()) # copy
v_min = max(v.min(), 0.1)
v[0] += v_min
v[dim-1] -= v_min
q.set_vector(v)
r, dists = index.nn(q, 1)
ntools.assert_false(numpy.array_equal(q.vector(), td_q.vector()))
ntools.assert_equal(r[0], td_q)
# random query
q = DescriptorMemoryElement('query', i+1)
q.set_vector(numpy.random.rand(dim))
# for any query of size k, results should at least be in distance order
r, dists = index.nn(q, 10)
for j in xrange(1, len(dists)):
ntools.assert_greater(dists[j], dists[j-1])
r, dists = index.nn(q, i)
for j in xrange(1, len(dists)):
ntools.assert_greater(dists[j], dists[j-1])
def test_known_descriptors_hik_unit(self):
dim = 5
###
# Unit vectors - Equal distance
#
index = self._make_inst('hik')
test_descriptors = []
for i in xrange(dim):
v = numpy.zeros(dim, float)
v[i] = 1.
d = DescriptorMemoryElement('unit', i)
d.set_vector(v)
test_descriptors.append(d)
index.build_index(test_descriptors)
# query with zero vector
# -> all modeled descriptors have no intersection, dists should be 1.0,
# or maximum distance by histogram intersection
q = DescriptorMemoryElement('query', 0)
q.set_vector(numpy.zeros(dim, float))
r, dists = index.nn(q, dim)
# All dists should be 1.0, r order doesn't matter
for d in dists:
ntools.assert_equal(d, 1.)
# query with index element
q = test_descriptors[3]
r, dists = index.nn(q, 1)
ntools.assert_equal(r[0], q)
ntools.assert_equal(dists[0], 0.)
r, dists = index.nn(q, dim)
ntools.assert_equal(r[0], q)
ntools.assert_equal(dists[0], 0.)
| [
"paul.tunison@kitware.com"
] | paul.tunison@kitware.com |
815e6293e7b50bf45be49abf34aa8aa462497005 | ad553dd718a8df51dabc9ba636040da740db57cf | /.history/app_20181208041346.py | f81adf16e88f6eaee743e5ea52a166492686ae44 | [] | no_license | NergisAktug/E-Commerce-PythonWithFlask-Sqlite3 | 8e67f12c28b11a7a30d13788f8dc991f80ac7696 | 69ff4433aa7ae52ef854d5e25472dbd67fd59106 | refs/heads/main | 2023-01-01T14:03:40.897592 | 2020-10-19T20:36:19 | 2020-10-19T20:36:19 | 300,379,376 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,022 | py | import datetime
from flask import Flask, request, render_template_string, render_template
from flask import Flask, url_for, render_template, request, redirect, session, escape, render_template_string
from flask_babelex import Babel
from flask_sqlalchemy import SQLAlchemy
from flask_user import current_user, login_required, roles_required
from sqlalchemy.sql import table, column, select
from sqlalchemy import MetaData, create_engine
from flask_user import login_required, roles_required, UserManager, UserMixin
class ConfigClass(object):
SECRET_KEY = 'This is an INSECURE secret!! DO NOT use this in production!!'
SQLALCHEMY_DATABASE_URI = 'sqlite:///eticaret.sqlite'
SQLALCHEMY_TRACK_MODIFICATIONS = False
MAIL_SERVER = 'smtp.gmail.com'
MAIL_PORT = 465
MAIL_USE_SSL = True
MAIL_USE_TLS = False
MAIL_USERNAME = 'nergis.aktug2014@gmail.com'
MAIL_PASSWORD = '05383896877'
MAIL_DEFAULT_SENDER = '"MyApp" <xyz@gmail.com>'
USER_ENABLE_EMAIL = True
USER_ENABLE_USERNAME = False
USER_EMAIL_SENDER_EMAIL = "noreply@example.com"
def create_app():
""" Flask application factory """
# Create Flask app load app.config
app = Flask(__name__)
app.config.from_object(__name__ + '.ConfigClass')
db = SQLAlchemy(app)
class Kullanici(db.Model):
__tablename__ = 'Kullanici'
id = db.Column(db.Integer, primary_key=True)
tarih = db.Column(db.DateTime())
email = db.Column(db.String(80), unique=True)
sifre = db.Column(db.String(80))
rolId = db.Column(db.Integer, db.ForeignKey('rol.rolId', ondelete='CASCADE'))
active = db.Column('is_active', db.Boolean(), nullable=False, server_default='1')
def __init__(self, email, sifre):
self.email = email
self.sifre = sifre
self.rolId = 0
class Roller(db.Model):
__tablename__ = 'rol'
rolId = db.Column(db.Integer, primary_key=True)
rolisim = db.Column(db.String(80))
class urunler(db.Model):
__tablename__ = 'urunler'
urun_id = db.Column(db.Integer, primary_key=True)
kategori_id = db.Column(db.Integer(), db.ForeignKey('kategori.kategoriId', ondelete='CASCADE'))
urunresmi = db.Column(db.String(80))
urunFiyati = db.Column(db.Integer)
markaId = db.Column(db.Integer(), db.ForeignKey('markalar.markaId', ondelete='CASCADE'))
def __init__(self, kategori_id, urun_ozellikleri, urun_fiyati):
self.kategori_id = kategori_id
self.urun_ozellikleri = urun_ozellikleri
self.urun_fiyati = urun_fiyati
class kategori(db.Model):
__tablename__ = 'kategori'
kategoriId = db.Column(db.Integer, primary_key=True)
kategori_adi = db.Column(db.String(80))
def __init__(self, kategori_adi):
self.kategori_adi = kategori_adi
class markalar(db.Model):
__tablename__ = 'markalar'
markaId = db.Column(db.Integer, primary_key=True)
markaadi = db.Column(db.String(80))
marka_modeli = db.Column(db.String(80))
def __init__(self, markaadi, marka_modeli):
self.markaadi = markaadi
self.marka_modeli = marka_modeli
class musteri(db.Model):
__tablename__ = 'musteri'
musteriId = db.Column(db.Integer, primary_key=True)
musteriadi = db.Column(db.String(80))
musterisoyadi = db.Column(db.String(80))
mail = db.Column(db.String(80), unique=True)
telefon = db.Column(db.Integer)
sifre = db.Column(db.String(80))
il = db.Column(db.String(80))
ilce = db.Column(db.String(80))
kullaniciId = db.Column(db.Integer(), db.ForeignKey('Kullanici.id', ondelete='CASCADE'))
def __init__(self, musteriadi, musterisoyadi, mail, telefon, sifre, il, ilce, kullaniciId):
self.musteriadi = musteriadi
self.musterisoyadi = musterisoyadi
self.mail = mail
self.telefon = telefon
self.sifre = sifre
self.il = il
self.ilce = ilce
self.kullaniciId = kullaniciId
class siparis(db.Model):
__tablename__ = 'siparis'
siparisId = db.Column(db.Integer, primary_key=True)
musteriId = db.Column(db.Integer(), db.ForeignKey('musteri.musteriId', ondelete='CASCADE'))
urunId = db.Column(db.Integer(), db.ForeignKey('urunler.urun_id', ondelete='CASCADE'))
siparisno = db.Column(db.Integer)
siparisTarihi = db.Column(db.Integer)
odemeId = db.Column(db.Integer())
def __init__(self, musteriId, urunId, siparisno, siparisTarihi, odemeId):
self.musteriId = musteriId
self.urunId = urunId
self.siparisno = siparisno
self.siparisTarihi = siparisTarihi
self.odemeId = odemeId
user_manager = UserManager(app, db, Kullanici)
db.create_all()
if not Kullanici.query.filter(Kullanici.email == request.form['email']).first():
kullanici = Kullanici(
email=request.form['email'],
tarih=datetime.datetime.utcnow(),
sifre=user_manager.hash_password(request.form['sifre']),
)
# Create 'admin@example.com' user with 'Admin' and 'Agent' roles
if not Kullanici.query.filter(Kullanici.email == 'admin@example.com').first():
user = User(
email='admin@example.com',
email_confirmed_at=datetime.datetime.utcnow(),
password=user_manager.hash_password('Password1'),
)
@app.route('/')
def anasayfa():
return render_template('index.html')
@app.route('/kayit', methods=['GET', 'POST'])
def kayit():
if request.method == 'POST':
mail = request.form['email']
parola = request.form['sifre']
yeniKullanici = Kullanici(email=mail, sifre=parola)
db.session.add(yeniKullanici)
db.session.commit()
if yeniKullanici is not None:
mesaj = "Kayıt Başarıyla Sağlanmıştır."
return render_template("index.html", mesaj=mesaj)
else:
return render_template('kayit.html')
@app.route('/uye', methods=['GET', 'POST'])
def uye():
return render_template("uyeGirisi.html")
@app.route('/giris', methods=['GET', 'POST'])
def giris():
session['giris_yap']=False
if request.method=='GET':
if(session['giris_yap']==True):
return redirect(url_for('index'))
else:
return render_template('uyeGirisi.html')
else:
email=request.form['email']
parola=request.form['sifre']
active=0
try:
if Kullanici.query.filter_by(email=email,sifre=parola,active=1).first():
@app.route('/admin')
@roles_required('admin')
def admin():
return "naber selin ya"
return app
if __name__ == '__main__':
app = create_app()
# app.run(host='0.0.0.0', port=5000, debug=True)
app.run(host='127.0.0.1', port=5000, debug=True) | [
"nergis.aktug2014@gmail.com"
] | nergis.aktug2014@gmail.com |
e02728d9fc94a43001308defdd5483846398a4de | e10a6d844a286db26ef56469e31dc8488a8c6f0e | /ime/models/ar_net.py | 218ee2b12134f1c56ce580873cc51c45a26c1c8e | [
"Apache-2.0",
"CC-BY-4.0"
] | permissive | Jimmy-INL/google-research | 54ad5551f97977f01297abddbfc8a99a7900b791 | 5573d9c5822f4e866b6692769963ae819cb3f10d | refs/heads/master | 2023-04-07T19:43:54.483068 | 2023-03-24T16:27:28 | 2023-03-24T16:32:17 | 282,682,170 | 1 | 0 | Apache-2.0 | 2020-07-26T15:50:32 | 2020-07-26T15:50:31 | null | UTF-8 | Python | false | false | 3,614 | py | # coding=utf-8
# Copyright 2022 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Class that implement ARNet."""
import torch
import torch.nn as nn
class ARNet(nn.Module):
"""Auto Regressive model as described in https://arxiv.org/abs/1911.12436.
"""
def __init__(self, n_forecasts, n_lags, device):
"""Initializes a ARNet instance.
Args:
n_forecasts: Number of time steps to forecast
n_lags: Lags (past time steps) used to make forecast
device: Device used by the model
"""
super(ARNet, self).__init__()
self.n_lags = n_lags
self.device = device
self.n_forecasts = n_forecasts
self.fc = nn.Linear(n_lags, 1, bias=False)
nn.init.kaiming_normal_(self.fc.weight, mode="fan_in")
def forward(self, x, true_output):
"""Forward pass for ARNet.
Args:
x: A tensor of shape `(batch_size, n_lags)
true_output: Actual forecast this is used for teacher forcing during
training
Returns:
output: Forecast a tensor of shape `(batch_size, n_forecasts)`
"""
output = torch.zeros((x.shape[0], self.n_forecasts)).to(self.device)
output[:, 0] = self.fc(x).squeeze()
if self.n_forecasts > self.n_lags:
# If the forecast larger the lags than use orignal input and shift untill
# the orginal inputs are done than use true output (teacher forecing).
for i in range(1, self.n_lags):
output[:,
i] = self.fc(torch.cat((x[:, i:], true_output[:, :i]),
dim=1)).squeeze()
for i in range(0, self.n_forecasts - self.n_lags):
output[:, self.n_lags + i] = self.fc(
true_output[:, i:i + self.n_lags]).squeeze()
else:
for i in range(1, self.n_forecasts):
output[:,
i] = self.fc(torch.cat((x[:, i:], true_output[:, :i]),
dim=1)).squeeze()
return output
def predict(self, x):
"""Function used during testing to make predictions in an auto regressive style.
Args:
x : A tensor of shape `(batch_size, n_lags)
Returns:
output: Forecast a tensor of shape `(batch_size, n_forecasts)`
"""
output = torch.zeros((x.shape[0], self.n_forecasts)).to(self.device)
output[:, 0] = self.fc(x).squeeze()
if self.n_forecasts > self.n_lags:
# If the forecast larger the lags than use orignal input and shift untill
# the orginal inputs are done than the input will only contain forecasted
# values
for i in range(1, self.n_lags):
output[:, i] = self.fc(torch.cat((x[:, i:], output[:, :i]),
dim=1)).squeeze()
for i in range(0, self.n_forecasts - self.n_lags):
output[:,
self.n_lags + i] = self.fc(output[:,
i:i + self.n_lags]).squeeze()
else:
for i in range(1, self.n_forecasts):
output[:, i] = self.fc(torch.cat((x[:, i:], output[:, :i]),
dim=1)).squeeze()
return output
| [
"copybara-worker@google.com"
] | copybara-worker@google.com |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.