blob_id stringlengths 40 40 | directory_id stringlengths 40 40 | path stringlengths 3 288 | content_id stringlengths 40 40 | detected_licenses listlengths 0 112 | license_type stringclasses 2 values | repo_name stringlengths 5 115 | snapshot_id stringlengths 40 40 | revision_id stringlengths 40 40 | branch_name stringclasses 684 values | visit_date timestamp[us]date 2015-08-06 10:31:46 2023-09-06 10:44:38 | revision_date timestamp[us]date 1970-01-01 02:38:32 2037-05-03 13:00:00 | committer_date timestamp[us]date 1970-01-01 02:38:32 2023-09-06 01:08:06 | github_id int64 4.92k 681M ⌀ | star_events_count int64 0 209k | fork_events_count int64 0 110k | gha_license_id stringclasses 22 values | gha_event_created_at timestamp[us]date 2012-06-04 01:52:49 2023-09-14 21:59:50 ⌀ | gha_created_at timestamp[us]date 2008-05-22 07:58:19 2023-08-21 12:35:19 ⌀ | gha_language stringclasses 147 values | src_encoding stringclasses 25 values | language stringclasses 1 value | is_vendor bool 2 classes | is_generated bool 2 classes | length_bytes int64 128 12.7k | extension stringclasses 142 values | content stringlengths 128 8.19k | authors listlengths 1 1 | author_id stringlengths 1 132 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
a2011bd335f06239f1071ddd091443dcdbbc2baf | 087a0cfe910992bbf1913a1cc8ddb47eae53c81d | /browsercompat/bcauth/oauth2p/urls.py | fe6f542748de5c4cb6208b609ed0d0a39c453f85 | [] | no_license | WeilerWebServices/MDN-Web-Docs | a1e4716ce85ee6a7548819bcb19e78f6d1c14dfa | bc092964153b03381aaff74a4d80f43a2b2dec19 | refs/heads/master | 2023-01-29T17:47:06.730214 | 2020-12-09T05:53:29 | 2020-12-09T05:53:29 | 259,744,472 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,307 | py | """
Overrides for oauth2_provider.urls.
Same as default, but uses our AuthorizationView.
"""
from __future__ import absolute_import
from django.conf.urls import url
from oauth2_provider import views
from .views import MyAuthorizationView
urlpatterns = (
url(r'^authorize/$', MyAuthorizationView.as_view(), name='authorize'),
url(r'^token/$', views.TokenView.as_view(), name='token'),
url(r'^revoke_token/$', views.RevokeTokenView.as_view(),
name='revoke-token'),
)
# Application management views
urlpatterns += (
url(r'^applications/$', views.ApplicationList.as_view(), name='list'),
url(r'^applications/register/$', views.ApplicationRegistration.as_view(),
name='register'),
url(r'^applications/(?P<pk>\d+)/$', views.ApplicationDetail.as_view(),
name='detail'),
url(r'^applications/(?P<pk>\d+)/delete/$',
views.ApplicationDelete.as_view(), name='delete'),
url(r'^applications/(?P<pk>\d+)/update/$',
views.ApplicationUpdate.as_view(), name='update'),
)
urlpatterns += (
url(r'^authorized_tokens/$', views.AuthorizedTokensListView.as_view(),
name='authorized-token-list'),
url(r'^authorized_tokens/(?P<pk>\d+)/delete/$',
views.AuthorizedTokenDeleteView.as_view(),
name='authorized-token-delete'),
)
| [
"nateweiler84@gmail.com"
] | nateweiler84@gmail.com |
d0ec0186ae9afc856f78b53b6439831f4865b158 | 4ea43f3f79ad483d83238d88572feb822f451372 | /philo/migrations/0004_auto__del_field_attribute_json_value.py | 2cfc222a490142fcde0e59222b460dd47760eab8 | [
"ISC"
] | permissive | kgodey/philo | c8c433d44b2f31121f13bd0ee101605be11fe9da | c19bf577d44606d2b284e6058d633f4a174b61cc | refs/heads/master | 2020-12-29T02:54:11.746966 | 2011-05-24T21:57:47 | 2011-05-24T21:57:47 | 686,009 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,789 | py | # encoding: utf-8
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Deleting field 'Attribute.json_value'
db.delete_column('philo_attribute', 'json_value')
def backwards(self, orm):
# Adding field 'Attribute.json_value'
db.add_column('philo_attribute', 'json_value', self.gf('django.db.models.fields.TextField')(default=''), keep_default=False)
models = {
'contenttypes.contenttype': {
'Meta': {'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'philo.attribute': {
'Meta': {'unique_together': "(('key', 'entity_content_type', 'entity_object_id'),)", 'object_name': 'Attribute'},
'entity_content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'entity_object_id': ('django.db.models.fields.PositiveIntegerField', [], {}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'value': ('philo.models.fields.JSONField', [], {})
},
'philo.collection': {
'Meta': {'object_name': 'Collection'},
'description': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'})
},
'philo.collectionmember': {
'Meta': {'object_name': 'CollectionMember'},
'collection': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'members'", 'to': "orm['philo.Collection']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'index': ('django.db.models.fields.PositiveIntegerField', [], {'null': 'True', 'blank': 'True'}),
'member_content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'member_object_id': ('django.db.models.fields.PositiveIntegerField', [], {})
},
'philo.contentlet': {
'Meta': {'object_name': 'Contentlet'},
'content': ('philo.models.fields.TemplateField', [], {}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'page': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'contentlets'", 'to': "orm['philo.Page']"})
},
'philo.contentreference': {
'Meta': {'object_name': 'ContentReference'},
'content_id': ('django.db.models.fields.PositiveIntegerField', [], {'null': 'True', 'blank': 'True'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'page': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'contentreferences'", 'to': "orm['philo.Page']"})
},
'philo.file': {
'Meta': {'object_name': 'File'},
'file': ('django.db.models.fields.files.FileField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'mimetype': ('django.db.models.fields.CharField', [], {'max_length': '255'})
},
'philo.node': {
'Meta': {'object_name': 'Node'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'parent': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'children'", 'null': 'True', 'to': "orm['philo.Node']"}),
'slug': ('django.db.models.fields.SlugField', [], {'max_length': '255', 'db_index': 'True'}),
'view_content_type': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'node_view_set'", 'to': "orm['contenttypes.ContentType']"}),
'view_object_id': ('django.db.models.fields.PositiveIntegerField', [], {})
},
'philo.page': {
'Meta': {'object_name': 'Page'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'template': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'pages'", 'to': "orm['philo.Template']"}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '255'})
},
'philo.redirect': {
'Meta': {'object_name': 'Redirect'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'status_code': ('django.db.models.fields.IntegerField', [], {'default': '302'}),
'target': ('django.db.models.fields.CharField', [], {'max_length': '200'})
},
'philo.relationship': {
'Meta': {'unique_together': "(('key', 'entity_content_type', 'entity_object_id'),)", 'object_name': 'Relationship'},
'entity_content_type': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'relationship_entity_set'", 'to': "orm['contenttypes.ContentType']"}),
'entity_object_id': ('django.db.models.fields.PositiveIntegerField', [], {}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'value_content_type': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'relationship_value_set'", 'null': 'True', 'to': "orm['contenttypes.ContentType']"}),
'value_object_id': ('django.db.models.fields.PositiveIntegerField', [], {'null': 'True', 'blank': 'True'})
},
'philo.tag': {
'Meta': {'object_name': 'Tag'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'slug': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '255', 'db_index': 'True'})
},
'philo.template': {
'Meta': {'object_name': 'Template'},
'code': ('philo.models.fields.TemplateField', [], {}),
'documentation': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'mimetype': ('django.db.models.fields.CharField', [], {'default': "'text/html'", 'max_length': '255'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'parent': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'children'", 'null': 'True', 'to': "orm['philo.Template']"}),
'slug': ('django.db.models.fields.SlugField', [], {'max_length': '255', 'db_index': 'True'})
}
}
complete_apps = ['philo']
| [
"stephen.r.burrows@gmail.com"
] | stephen.r.burrows@gmail.com |
e57a2eac0d5aae324db2f1f3da2271d06bba059e | 44f216cc3bb4771c8186349013ff0ed1abc98ea6 | /torchgen/shape_functions/gen_jit_shape_functions.py | d25539d3fa2cd35023e6989e6bb93576a1c69ef5 | [
"BSD-2-Clause",
"BSD-3-Clause",
"LicenseRef-scancode-generic-cla",
"BSL-1.0",
"Apache-2.0"
] | permissive | eiphy/pytorch | a8fc21a3c0552b392ed8c3a1d69f7ed8660c56ac | 104f0bf09ec7609d1c5626a7d7953ade4f8c9007 | refs/heads/master | 2022-05-23T02:10:13.158924 | 2022-05-07T21:26:00 | 2022-05-07T21:26:00 | 244,914,898 | 2 | 0 | NOASSERTION | 2020-03-04T14:00:53 | 2020-03-04T14:00:53 | null | UTF-8 | Python | false | false | 3,366 | py | #!/usr/bin/env python3
import os
from pathlib import Path
from torch.jit._shape_functions import shape_compute_graph_mapping
SHAPE_HEADER = r"""
/**
* @generated
* This is an auto-generated file. Please do not modify it by hand.
* To re-generate, please run:
* cd ~/pytorch && python
* torchgen/shape_functions/gen_jit_shape_functions.py
*/
#include <torch/csrc/jit/jit_log.h>
#include <torch/csrc/jit/passes/inliner.h>
#include <torch/csrc/jit/runtime/serialized_shape_function_registry.h>
#include <torch/csrc/jit/runtime/operator.h>
// clang-format off
namespace torch {
namespace jit {
std::string shape_funcs = ""
"""
DECOMP_CENTER = r"""
const std::string& GetSerializedShapeFunctions() {
return shape_funcs;
}
const OperatorMap<std::string>& GetShapeFunctionMappings() {
static const OperatorMap<std::string> shape_mappings {
"""
DECOMP_END = r"""
};
return shape_mappings;
}
// clang-format on
} // namespace jit
} // namespace torch
"""
SERIALIZED_SHAPE_UTIL_FILE_NAME = "serialized_shape_function_registry.cpp"
def gen_serialized_decompisitions() -> str:
already_serialized_names = set()
unique_funcs = []
for scripted_func in shape_compute_graph_mapping.values():
if scripted_func.name in already_serialized_names:
continue
already_serialized_names.add(scripted_func.name)
unique_funcs.append(scripted_func)
output_strs = []
curr_str = ""
for scripted_func in unique_funcs:
serialized_code = scripted_func.code
# technically its higher but give a buffer bc there are weird rules
# around some characters
# TODO: this was the limit I found by googling but it seems way
# too short ?
MAX_MSFT_STR_LEN = 2000
if len(curr_str) + len(serialized_code) <= MAX_MSFT_STR_LEN:
curr_str += "\n" + serialized_code
else:
output_strs.append(curr_str)
curr_str = scripted_func.code
output_strs.append(curr_str)
final_output = ""
# Windows compiler doesnt correctly handle adjacent
# string literals
for output_str in output_strs:
start = '+ std::string(R"=====('
end = '\n)=====")\n'
final_output += start + output_str + end
final_output += ";"
return final_output
def gen_shape_mappings() -> str:
shape_mappings = []
for schema, scripted_func in shape_compute_graph_mapping.items():
shape_mappings.append(' {"' + schema + '", "' + scripted_func.name + '"},')
return "\n".join(shape_mappings)
def write_decomposition_util_file(path: str) -> None:
decomposition_str = gen_serialized_decompisitions()
shape_mappings = gen_shape_mappings()
file_components = [
SHAPE_HEADER,
decomposition_str,
DECOMP_CENTER,
shape_mappings,
DECOMP_END,
]
print("writing file to : ", path + "/" + SERIALIZED_SHAPE_UTIL_FILE_NAME)
with open(os.path.join(path, SERIALIZED_SHAPE_UTIL_FILE_NAME), "wb") as out_file:
final_output = "".join(file_components)
out_file.write(final_output.encode("utf-8"))
def main() -> None:
pytorch_dir = Path(__file__).resolve().parents[2]
upgrader_path = pytorch_dir / "torch" / "csrc" / "jit" / "runtime"
write_decomposition_util_file(str(upgrader_path))
if __name__ == "__main__":
main()
| [
"pytorchmergebot@users.noreply.github.com"
] | pytorchmergebot@users.noreply.github.com |
f16e57b0781c6ce1aaf648186b412f2f16a75ec9 | 92963d596f263b04d244fe87d1cad149961c7e39 | /caffe2_tutorial/Basics/test_caffe2.py | fadd2f155da66095dbc4383b8de459a6739f61a8 | [] | no_license | zchen0211/ml_system | 1d993c6f481d269013c4193bbe6de2d178f0b3fb | 7a74656eb8fab559890513ee318cf726654ff44a | refs/heads/master | 2021-01-20T05:19:10.393493 | 2019-03-08T22:45:53 | 2019-03-08T22:45:53 | 101,426,846 | 0 | 2 | null | null | null | null | UTF-8 | Python | false | false | 1,533 | py | from caffe2.python import workspace, model_helper
import numpy as np
import glog as log
# Create random tensor of three dimensions
x = np.random.rand(4, 3, 2)
print(x)
print(x.shape)
workspace.FeedBlob("my_x", x)
x2 = workspace.FetchBlob("my_x")
print(x2)
### Nets and Operators
# Create the input data
data = np.random.rand(16, 100).astype(np.float32)
# Create labels for the data as integers [0, 9].
label = (np.random.rand(16) * 10).astype(np.int32)
workspace.FeedBlob("data", data)
workspace.FeedBlob("label", label)
# Create model using a model helper
m = model_helper.ModelHelper(name="my first net")
weight = m.param_init_net.XavierFill([], 'fc_w', shape=[10, 100])
bias = m.param_init_net.ConstantFill([], 'fc_b', shape=[10, ])
fc_1 = m.net.FC(["data", "fc_w", "fc_b"], "fc1")
pred = m.net.Sigmoid(fc_1, "pred")
[softmax, loss] = m.net.SoftmaxWithLoss([pred, "label"], ["softmax", "loss"])
print(str(m.net.Proto()))
### Executing
# 1. initialization
m.AddGradientOperators([loss])
workspace.RunNetOnce(m.param_init_net)
# 2. create the actual training
workspace.CreateNet(m.net)
# 3. Run it
# Run 100 x 10 iterations
for j in range(0, 100):
data = np.random.rand(16, 100).astype(np.float32)
label = (np.random.rand(16) * 10).astype(np.int32)
workspace.FeedBlob("data", data)
workspace.FeedBlob("label", label)
workspace.RunNet(m.name, 10) # run for 10 times
# print(workspace.FetchBlob("softmax"))
log.info('The loss of forward running: %f' % workspace.FetchBlob("loss"))
print(str(m.net.Proto()))
| [
"chenzhuoyuan07@gmail.com"
] | chenzhuoyuan07@gmail.com |
a5267614375d4244c70ef9d22e43775759ce616f | b2abec1469351de38a37b6189fd365be71ac1a5c | /v2/api/assets/user_preferences.py | 9bdefa24e0677c120d05de4c4a0e13925375780f | [] | no_license | stainedart/kdm-manager | 49804eb258ebc22a7679dad8e1e704c997694747 | 3b73fc037be3b2b63c0baf4280e379bdf4e7cb75 | refs/heads/master | 2020-03-07T02:28:49.893626 | 2018-03-25T14:24:47 | 2018-03-25T14:24:47 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,676 | py | preferences_dict = {
"beta": {
"type": "General",
"desc": "Enable beta features of the Manager?",
"affirmative": "Enable",
"negative": "Disable",
"patron_level": 2,
},
"preserve_sessions": {
"type": "General",
"desc": "Preserve Sessions?",
"affirmative": "Keep me logged in",
"negative": "Remove sessions after 24 hours",
"patron_level": 1,
},
"random_names_for_unnamed_assets": {
"type": "General",
"desc": "Choose random names for Settlements/Survivors without names?",
"affirmative": "Choose randomly",
"negative": "Use 'Unknown' and 'Anonymous'",
"patron_level": 0,
},
"apply_new_survivor_buffs": {
"type": "Automation",
"desc": "Automatically apply settlement bonuses to new, newborn and current survivors where appropriate?",
"affirmative": "Automatically apply",
"negative": "Do not apply",
"patron_level": 0,
},
"apply_weapon_specialization": {
"type": "Automation",
"desc": "Automatically add weapon specializations if Innovations include the mastery?",
"affirmative": "Add",
"negative": "Do Not Add",
"patron_level": 0,
},
"show_endeavor_token_controls": {
"type": "Campaign Summary",
"desc": "Show Endeavor Token controls on Campaign Summary view?",
"affirmative": "Show controls",
"negative": "Hide controls",
"patron_level": 0,
},
# "update_timeline": {
# "type": "Automation",
# "desc": "Automatically Update Timeline with Milestone Story Events?",
# "affirmative": "Update settlement timelines when milestone conditions are met",
# "negative": "Do not automatically update settlement timelines",
# "patron_level": 0,
# },
"show_epithet_controls": {
"type": "Survivor Sheet",
"desc": "Use survivor epithets?",
"affirmative": "Show controls on Survivor Sheets",
"negative": "Hide controls and survivor epithets on Survivor Sheets",
"patron_level": 0,
},
"show_remove_button": {
"type": "General",
"desc": "Show controls for removing Settlements and Survivors?",
"affirmative": "Show controls on Settlement and Survivor Sheets",
"negative": "Hide controls on Settlement and Survivor Sheets",
"patron_level": 0,
},
"show_ui_tips": {
"type": "General",
"desc": "Display in-line help and user interface tips?",
"affirmative": "Show UI tips",
"negative": "Hide UI tips",
"patron_level": 2,
},
}
| [
"toconnell@tyrannybelle.com"
] | toconnell@tyrannybelle.com |
b4901ff780580eb8733db95e8de4824e965fd50e | 077c91b9d5cb1a6a724da47067483c622ce64be6 | /nox_mesh_4_loop_repro_debug_verbose/interreplay_20_l_5/replay_config.py | 81a227dc97ef5fe3361b91f64be4cda59ae66e9f | [] | no_license | Spencerx/experiments | 0edd16398725f6fd9365ddbb1b773942e4878369 | aaa98b0f67b0d0c0c826b8a1565916bf97ae3179 | refs/heads/master | 2020-04-03T10:11:40.671606 | 2014-06-11T23:55:11 | 2014-06-11T23:55:11 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 876 | py |
from config.experiment_config_lib import ControllerConfig
from sts.topology import *
from sts.control_flow import Replayer
from sts.simulation_state import SimulationConfig
from sts.input_traces.input_logger import InputLogger
simulation_config = SimulationConfig(controller_configs=[ControllerConfig(start_cmd='./nox_core -v -v -i ptcp:6635 routing', address='127.0.0.1', port=6635, cwd='nox_classic/build/src')],
topology_class=MeshTopology,
topology_params="num_switches=4",
patch_panel_class=BufferedPatchPanel,
multiplex_sockets=False)
control_flow = Replayer(simulation_config, "experiments/nox_mesh_4_loop_repro_debug_verbose/interreplay_20_l_5/events.trace",
input_logger=InputLogger(),
wait_on_deterministic_values=False)
# Invariant check: 'None'
| [
"cs@cs.berkeley.edu"
] | cs@cs.berkeley.edu |
eb26b4d645ca9ad3766a6fcd1e53b646322b4db4 | 282d0a84b45b12359b96bbf0b1d7ca9ee0cb5d19 | /Malware1/venv/Lib/site-packages/sklearn/base.py | 10620bcf6f59c053e9249cfa67230a2ee5e90210 | [] | no_license | sameerakhtar/CyberSecurity | 9cfe58df98495eac6e4e2708e34e70b7e4c055d3 | 594973df27b4e1a43f8faba0140ce7d6c6618f93 | refs/heads/master | 2022-12-11T11:53:40.875462 | 2020-09-07T23:13:22 | 2020-09-07T23:13:22 | 293,598,094 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 130 | py | version https://git-lfs.github.com/spec/v1
oid sha256:2b7638a252b70d507fafc022659ee844b25e7e5cf54b0595420920b4a8c42ce7
size 22906
| [
"46763165+sameerakhtar@users.noreply.github.com"
] | 46763165+sameerakhtar@users.noreply.github.com |
408a09caa0f100dcf5c20e1fdf23a5e47ce33265 | e940e2d5e0696b9f1385962100796c3d990c33d1 | /chapter 3/exercise_3.14.py | fa55234e74e6fed3a7a23a112f1e030b0ee30fb6 | [] | no_license | sfwarnock/python_programming | a84b6c3d18f55e59d50e5299cedd102c265dfc6b | aa45be8984cd80094f685d4fc4d0b9aca9e9eefb | refs/heads/master | 2021-04-12T03:16:49.267143 | 2018-08-06T12:24:00 | 2018-08-06T12:24:00 | 125,944,584 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 630 | py | # -*- coding: utf-8 -*-
"""
Created on Thu Apr 19
@author: Scott Warnock
"""
# Exercise 3.14
#
# Write a program that finds the average of a series of numbers entered by the user.
# First prompt the user for how many numbers are to be entered..
print("This program averages numbers entered by the user.")
print()
def main():
tn = eval(input("How many numbers do you want to average? "))
sum = 0
for n in range(tn):
n = eval(input("Enter a number: "))
sum = sum + n
mean = sum / tn
print()
print ("The mean of the numbers you entered is", mean)
main() | [
"noreply@github.com"
] | sfwarnock.noreply@github.com |
94f25a94f89bd31421515bc201727e6530947fab | e23a4f57ce5474d468258e5e63b9e23fb6011188 | /125_algorithms/_exercises/templates/_algorithms_challenges/leetcode/LeetCode_with_solution/417 Pacific Atlantic Water Flow.py | e749a8f4525325e00fb1e9c945f5d40b2aff4e87 | [] | no_license | syurskyi/Python_Topics | 52851ecce000cb751a3b986408efe32f0b4c0835 | be331826b490b73f0a176e6abed86ef68ff2dd2b | refs/heads/master | 2023-06-08T19:29:16.214395 | 2023-05-29T17:09:11 | 2023-05-29T17:09:11 | 220,583,118 | 3 | 2 | null | 2023-02-16T03:08:10 | 2019-11-09T02:58:47 | Python | UTF-8 | Python | false | false | 4,163 | py | #!/usr/bin/python3
"""
Given an m x n matrix of non-negative integers representing the height of each
nit cell in a continent, the "Pacific ocean" touches the left and top edges of
the matrix and the "Atlantic ocean" touches the right and bottom edges.
Water can only flow in four directions (up, down, left, or right) from a cell to
another one with height equal or lower.
Find the list of grid coordinates where water can flow to both the Pacific and
Atlantic ocean.
Note:
The order of returned grid coordinates does not matter.
Both m and n are less than 150.
Example:
Given the following 5x5 matrix:
Pacific ~ ~ ~ ~ ~
~ 1 2 2 3 (5) *
~ 3 2 3 (4) (4) *
~ 2 4 (5) 3 1 *
~ (6) (7) 1 4 5 *
~ (5) 1 1 2 4 *
* * * * * Atlantic
Return:
[[0, 4], [1, 3], [1, 4], [2, 2], [3, 0], [3, 1], [4, 0]] (positions with
parentheses in above matrix).
"""
dirs ((0, 1), (0, -1), (1, 0), (-1, 0
c_ Solution:
___ pacificAtlantic matrix
"""
dfs, visisted O(1)
Similar to Trapping Rainwater II (BFS + heap), but no need to record
volume, thus, dfs is enough.
Similar to longest increasing path
Starting from the edge point rather than any point, dfs visit the
possible cell
Complexity analysis, although a cell can be checked multiple times
(at most 4 times); but only perform 1 dfs on each cell; thus
O(mn)
:type matrix: List[List[int]]
:rtype: List[List[int]]
"""
__ n.. matrix o. n.. matrix[0]:
r.. # list
m, n l..(matrix), l..(matrix 0 # row, col
# don't do [[False] * n ] * m, memory management, all rows reference the same row
P [[F.. ___ _ __ r..(n)] ___ _ __ r..(m)]
A [[F.. ___ _ __ r..(n)] ___ _ __ r..(m)]
# starting from edge point
___ i __ r..(m
dfs(matrix, i, 0, P)
dfs(matrix, i, n-1, A)
___ j __ r..(n
dfs(matrix, 0, j, P)
dfs(matrix, m-1, j, A)
ret [
[i, j]
___ i __ r..(m)
___ j __ r..(n)
__ P[i][j] a.. A[i][j]
]
r.. ret
___ dfs matrix, i, j, C
# check before dfs (to be consistent)
C[i][j] T..
m, n l..(matrix), l..(matrix 0
___ x, y __ dirs:
I i + x
J j + y
__ 0 <_ I < m a.. 0 <_ J < n a.. matrix[i][j] <_ matrix[I][J]:
__ n.. C[I][J]:
dfs(matrix, I, J, C)
___ pacificAtlantic_error matrix
"""
DP
dfs, visisted O(1)
:type matrix: List[List[int]]
:rtype: List[List[int]]
"""
__ n.. matrix o. n.. matrix[0]:
r.. # list
m, n l..(matrix), l..(matrix 0 # row, col
P [[F..] * n ] * m
A [[F..] * n ] * m
visisted [[F..] * n ] * m
___ i __ r..(m
___ j __ r..(n
dfs_error(matrix, i, j, visisted, P, l.... i, j: i < 0 o. j <0)
visisted [[F..] * n ] * m
___ i __ r..(m
___ j __ r..(n
dfs_error(matrix, i, j, visisted, A, l.... i, j: i >_ m o. j >_ n)
ret [
[i, j]
___ i __ r..(m)
___ j __ r..(n)
__ P[i][j] a.. A[i][j]
]
r.. ret
___ dfs_error matrix, i, j, visisted, C, predicate
m, n l..(matrix), l..(matrix 0
__ visisted[i][j]:
r.. C[i][j]
visisted[i][j] T..
___ x, y __ dirs:
i2 i + x
j2= j + y
__ 0 <_ i2 < m a.. 0 <_ j2 < n:
__ dfs_error(matrix, i2, j2, visisted, C, predicate) a.. matrix[i][j] >_ matrix[i2][j2]:
C[i][j] T..
____ predicate(i2, j2
C[i][j] T..
r.. C[i][j]
__ _______ __ _______
... Solution().pacificAtlantic([
[1,2,2,3,5],
[3,2,3,4,4],
[2,4,5,3,1],
[6,7,1,4,5],
[5,1,1,2,4]
]) __ [[0, 4], [1, 3], [1, 4], [2, 2], [3, 0], [3, 1], [4, 0]]
| [
"sergejyurskyj@yahoo.com"
] | sergejyurskyj@yahoo.com |
3fd0f6ad32c23a1851c4dde901c0d66727e175e1 | e776e45ae9f78765fb11e3e8cf5c87a0a1b9f0da | /OLD/tests/test-websockets.py | a3a2ab909c839346c259af6dd028fc9718d30345 | [
"MIT"
] | permissive | mvandepanne/seamless | 5100b9994b84c83d82815b572b2ee4e1f61931d6 | 1dc9108176cca2d7e2fe57eb1695aec6d39df456 | refs/heads/master | 2020-04-17T10:06:14.829539 | 2019-01-15T19:31:36 | 2019-01-15T19:31:36 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,682 | py | import seamless
from seamless import context, cell, reactor, transformer
from seamless.lib.filelink import link
ctx = context()
ctx.server = reactor({"socket": {"pin": "output", "dtype": "int"}})
ctx.servercode = ctx.server.code_start.cell()
link(ctx.servercode, ".", "test-websockets_pycell.py")
ctx.server.code_update.cell().set("")
ctx.server.code_stop.cell().set("""
server.close()
loop.run_until_complete(server.wait_closed())
""")
from seamless.lib.gui.browser import browse
ctx.client_template = cell("text")
link(ctx.client_template, ".", "test-websockets_client.jinja")
tf_params = {"inp":{"pin": "input", "dtype": "text"},
"identifier":{"pin": "input", "dtype": "text"},
"socket":{"pin": "input", "dtype": "int"},
"outp":{"pin": "output", "dtype": ("text", "html")} }
tf_code = """
import jinja2
d = dict(IDENTIFIER=identifier, socket=socket)
return jinja2.Template(inp).render(d)
"""
ctx.client1 = cell(("text", "html"))
ctx.tf_client1 = transformer(tf_params)
ctx.server.socket.cell().connect(ctx.tf_client1.socket)
ctx.client_template.connect(ctx.tf_client1.inp)
ctx.tf_client1.code.cell().set(tf_code)
ctx.tf_client1.identifier.cell().set("First WebSocket client")
ctx.tf_client1.outp.connect(ctx.client1)
browse(ctx.client1)
ctx.client2 = cell(("text", "html"))
ctx.tf_client2 = transformer(tf_params)
ctx.server.socket.cell().connect(ctx.tf_client2.socket)
ctx.client_template.connect(ctx.tf_client2.inp)
ctx.tf_client2.code.cell().set(tf_code)
ctx.tf_client2.identifier.cell().set("Second WebSocket client")
ctx.tf_client2.outp.connect(ctx.client2)
browse(ctx.client2)
if not seamless.ipython:
seamless.mainloop()
| [
"sjdv1982@gmail.com"
] | sjdv1982@gmail.com |
5e91a4dab811e752ac55b1c8c0f67c5649ab163e | 159d4ae61f4ca91d94e29e769697ff46d11ae4a4 | /venv/lib/python3.9/site-packages/prompt_toolkit/output/color_depth.py | a6166bacafb2941014a2080438fc330df92d394d | [
"MIT"
] | permissive | davidycliao/bisCrawler | 729db002afe10ae405306b9eed45b782e68eace8 | f42281f35b866b52e5860b6a062790ae8147a4a4 | refs/heads/main | 2023-05-24T00:41:50.224279 | 2023-01-22T23:17:51 | 2023-01-22T23:17:51 | 411,470,732 | 8 | 0 | MIT | 2023-02-09T16:28:24 | 2021-09-28T23:48:13 | Python | UTF-8 | Python | false | false | 1,387 | py | import os
from enum import Enum
from typing import Optional
__all__ = [
"ColorDepth",
]
class ColorDepth(str, Enum):
"""
Possible color depth values for the output.
"""
value: str
#: One color only.
DEPTH_1_BIT = "DEPTH_1_BIT"
#: ANSI Colors.
DEPTH_4_BIT = "DEPTH_4_BIT"
#: The default.
DEPTH_8_BIT = "DEPTH_8_BIT"
#: 24 bit True color.
DEPTH_24_BIT = "DEPTH_24_BIT"
# Aliases.
MONOCHROME = DEPTH_1_BIT
ANSI_COLORS_ONLY = DEPTH_4_BIT
DEFAULT = DEPTH_8_BIT
TRUE_COLOR = DEPTH_24_BIT
@classmethod
def from_env(cls) -> Optional["ColorDepth"]:
"""
Return the color depth if the $PROMPT_TOOLKIT_COLOR_DEPTH environment
variable has been set.
This is a way to enforce a certain color depth in all prompt_toolkit
applications.
"""
# Check the `PROMPT_TOOLKIT_COLOR_DEPTH` environment variable.
all_values = [i.value for i in ColorDepth]
if os.environ.get("PROMPT_TOOLKIT_COLOR_DEPTH") in all_values:
return cls(os.environ["PROMPT_TOOLKIT_COLOR_DEPTH"])
return None
@classmethod
def default(cls) -> "ColorDepth":
"""
Return the default color depth for the default output.
"""
from .defaults import create_output
return create_output().get_default_color_depth()
| [
"davidycliao@gmail.com"
] | davidycliao@gmail.com |
91ffadc0956fd77e0b62bd670aa556229fd3ab4a | e33199ecbe80ef7205473bf1ad584bbffd1a24a5 | /test.py | a3fa8b56983b7216b2f94532745867fe0510f4da | [] | no_license | zhangjiulong/Seq2Seq_Chatbot_QA | cba49143df1a20910f687fa7c48b10a99a7127c3 | 0c3ea305615ee572d2b8a4bef654a3182709107a | refs/heads/master | 2021-01-11T01:31:13.841357 | 2016-10-10T15:57:40 | 2016-10-10T15:57:40 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,301 | py | #!/usr/bin/env python3
__author__ = 'qhduan@memect.co'
import sys
import math
import time
import random
import numpy as np
from sklearn.utils import shuffle
import tensorflow as tf
from tqdm import tqdm
import data_util
tf.device(data_util.test_device)
encoder_inputs = [tf.placeholder(tf.int32, [None], name='encoder_inputs_{}'.format(i))
for i in range(data_util.input_len)]
decoder_inputs = [tf.placeholder(tf.int32, [None], name='decoder_inputs_{}'.format(i))
for i in range(data_util.output_len)]
decoder_targets = [tf.placeholder(tf.int32, [None], name='decoder_targets_{}'.format(i))
for i in range(data_util.output_len)]
decoder_weights = [tf.placeholder(tf.float32, [None], name='decoder_weights_{}'.format(i))
for i in range(data_util.output_len)]
outputs, states = data_util.build_model(encoder_inputs, decoder_inputs, True)
loss_func = tf.nn.seq2seq.sequence_loss(
outputs,
decoder_targets,
decoder_weights,
data_util.dim
)
sess = tf.Session()
init = tf.initialize_all_variables()
sess.run(init)
data_util.load_model(sess)
def test_sentence(s):
s = s.strip()
if len(s) > data_util.input_len:
s = s[:data_util.input_len]
encoder, decoder = data_util.get_sentence(s)
feed_dict = {}
for i in range(len(encoder_inputs)):
feed_dict[encoder_inputs[i]] = encoder[i]
feed_dict[decoder_inputs[0]] = decoder[0]
output = sess.run(outputs, feed_dict)
output = np.asarray(output).argmax(axis=2).T
for o in output:
return data_util.indice_sentence(o)
def test_qa(s):
o = test_sentence(s)
print('Q:', s)
print(o)
print('-' * 10)
def test_example():
t = [
'你好',
'你是谁',
'你从哪来',
'你到哪去'
]
for x in t:
test_qa(x)
def test_db():
asks, answers = data_util.read_db('db/conversation.db')
for _ in range(20):
s = random.choice(asks)
test_qa(s)
if __name__ == '__main__':
while True:
sentence = input('说:')
sentence = sentence.strip()
if sentence in ('quit', 'exit'):
break
if len(sentence) <= 0:
break
recall = test_sentence(sentence)
print(recall)
| [
"mail@qhduan.com"
] | mail@qhduan.com |
ce8b2390d1ca1c2670dca353c0dfdebe4586f810 | 9ba439d691359a6296e182e0b8cea30b89f95530 | /modules/processing/parsers/malwareconfig/Ursnif.py | cb18b7eb4ad29f561d173ce2fb771cb482dcf475 | [] | no_license | naxonez/CAPE | ffed1b8c54199ac149dbe21df25f89db650d4369 | b6295b40de8b9020e4bb25b2c0b09a126736b5f5 | refs/heads/master | 2020-07-02T22:19:41.421961 | 2019-10-01T07:20:44 | 2019-10-01T07:20:44 | 201,684,949 | 1 | 0 | null | 2019-08-10T21:30:36 | 2019-08-10T21:30:36 | null | UTF-8 | Python | false | false | 3,988 | py | # Copyright (C) 2017 Kevin O'Reilly (kevin.oreilly@contextis.co.uk)
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import struct
import os.path
MAX_STRING_SIZE = 256
def string_from_offset(buffer, offset):
string = buffer[offset:offset+MAX_STRING_SIZE].split("\0")[0]
return string
def get_config_item(config, offset):
config_string = string_from_offset(config, offset)
if ' ' in config_string:
config_list = config_string.split(' ')
return config_list
else:
return config_string
def config(raw_data):
number_of_sections = struct.unpack('I', raw_data[0:4])[0]
section_offset = 8
section_count = 0
config_dict = {}
while section_count < number_of_sections:
section_key = struct.unpack('I', raw_data[section_offset:section_offset+4])[0]
section_type = struct.unpack('I', raw_data[section_offset+4:section_offset+8])[0]
if section_type == 1:
data_offset = struct.unpack('I', raw_data[section_offset+8:section_offset+12])[0]
config_item = get_config_item(raw_data, section_offset + data_offset)
if config_item == None:
continue
if section_key == 0xD0665BF6:
config_dict['Domains'] = config_item
elif section_key == 0x73177345:
config_dict['DGA Base URL'] = config_item
elif section_key == 0xCD850E68:
config_dict['DGA CRC'] = config_item
elif section_key == 0xC61EFA7A:
config_dict['DGA TLDs'] = config_item
elif section_key == 0x510F22D2:
config_dict['TOR Domains'] = config_item
elif section_key == 0xDF351E24:
config_dict['32-bit DLL URLs'] = config_item
elif section_key == 0x4B214F54:
config_dict['64-bit DLL URLs'] = config_item
elif section_key == 0xEC99DF2E:
config_dict['IP Service'] = config_item
elif section_key == 0x11271C7F:
config_dict['Timer'] = config_item
elif section_key == 0xDF2E7488:
config_dict['DGA Season'] = config_item
elif section_key == 0x556AED8F:
config_dict['Server'] = config_item
elif section_key == 0x4FA8693E:
config_dict['Encryption key'] = config_item
elif section_key == 0xD7A003C9:
config_dict['Config Fail Timeout'] = config_item
elif section_key == 0x18A632BB:
config_dict['Config Timeout'] = config_item
elif section_key == 0x31277BD5:
config_dict['Task Timeout'] = config_item
elif section_key == 0x955879A6:
config_dict['Send Timeout'] = config_item
elif section_key == 0xACC79A02:
config_dict['Knocker Timeout'] = config_item
elif section_key == 0x6DE85128:
config_dict['BC Timeout'] = config_item
elif section_key == 0x656B798A:
config_dict['Botnet ID'] = config_item
elif section_key == 0xEFC574AE:
config_dict['Value 11'] = config_item
#elif section_key == 0x584E5925:
# config_dict['EndPointer'] = config_item
section_count += 1
section_offset += 24
return config_dict
| [
"kevoreilly@gmail.com"
] | kevoreilly@gmail.com |
d067a21fa8765b22a0f88cdba5e0a412f49c94ca | 9743d5fd24822f79c156ad112229e25adb9ed6f6 | /xai/brain/wordbase/adjectives/_perilous.py | a1869b74937c931bf046a9fb6d81b59763772b4b | [
"MIT"
] | permissive | cash2one/xai | de7adad1758f50dd6786bf0111e71a903f039b64 | e76f12c9f4dcf3ac1c7c08b0cc8844c0b0a104b6 | refs/heads/master | 2021-01-19T12:33:54.964379 | 2017-01-28T02:00:50 | 2017-01-28T02:00:50 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 383 | py |
#calss header
class _PERILOUS():
def __init__(self,):
self.name = "PERILOUS"
self.definitions = [u'extremely dangerous: ']
self.parents = []
self.childen = []
self.properties = []
self.jsondata = {}
self.specie = 'adjectives'
def run(self, obj1, obj2):
self.jsondata[obj2] = {}
self.jsondata[obj2]['properties'] = self.name.lower()
return self.jsondata
| [
"xingwang1991@gmail.com"
] | xingwang1991@gmail.com |
13db48c20ac0aeb1293560cbe2a6d3b6db7c0101 | f576f0ea3725d54bd2551883901b25b863fe6688 | /sdk/alertsmanagement/azure-mgmt-alertsmanagement/generated_samples/alerts_summary.py | 8794d5a0df8d764bf286909b59972416f2b2b42d | [
"LicenseRef-scancode-generic-cla",
"MIT",
"LGPL-2.1-or-later"
] | permissive | Azure/azure-sdk-for-python | 02e3838e53a33d8ba27e9bcc22bd84e790e4ca7c | c2ca191e736bb06bfbbbc9493e8325763ba990bb | refs/heads/main | 2023-09-06T09:30:13.135012 | 2023-09-06T01:08:06 | 2023-09-06T01:08:06 | 4,127,088 | 4,046 | 2,755 | MIT | 2023-09-14T21:48:49 | 2012-04-24T16:46:12 | Python | UTF-8 | Python | false | false | 1,547 | py | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from azure.identity import DefaultAzureCredential
from azure.mgmt.alertsmanagement import AlertsManagementClient
"""
# PREREQUISITES
pip install azure-identity
pip install azure-mgmt-alertsmanagement
# USAGE
python alerts_summary.py
Before run the sample, please set the values of the client ID, tenant ID and client secret
of the AAD application as environment variables: AZURE_CLIENT_ID, AZURE_TENANT_ID,
AZURE_CLIENT_SECRET. For more info about how to get the value, please see:
https://docs.microsoft.com/azure/active-directory/develop/howto-create-service-principal-portal
"""
def main():
client = AlertsManagementClient(
credential=DefaultAzureCredential(),
subscription_id="1e3ff1c0-771a-4119-a03b-be82a51e232d",
)
response = client.alerts.get_summary(
groupby="severity,alertState",
)
print(response)
# x-ms-original-file: specification/alertsmanagement/resource-manager/Microsoft.AlertsManagement/preview/2019-05-05-preview/examples/Alerts_Summary.json
if __name__ == "__main__":
main()
| [
"noreply@github.com"
] | Azure.noreply@github.com |
25433c3b9058296a7d47ba555fd4cc166775905f | 0fe11fbe31be719a253c0b2d9e41e20fedc2c40f | /dapper/mods/Lorenz95/boc10.py | e051e268a9250ed1e2e4aa60b8ac50ba136e0b59 | [
"MIT"
] | permissive | lijunde/DAPPER | 148ff5cefb92d1bb01c78bd4a82a6f1ecdebdad2 | dc92a7339932af059967bd9cf0a473ae9b8d7bf9 | refs/heads/master | 2020-12-10T21:44:54.468785 | 2019-09-24T18:18:36 | 2019-09-24T18:18:36 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,162 | py | # From Fig. 1 of Bocquet 2010 "Beyond Gaussian Statistical Modeling
# in Geophysical Data Assimilation".
from dapper import *
from dapper.mods.Lorenz95 import core
t = Chronology(0.05,dkObs=1,T=4**3,BurnIn=20)
Nx = 10
Dyn = {
'M' : Nx,
'model': core.step,
'noise': 0
}
X0 = GaussRV(M=Nx, C=0.001)
jj = arange(0,Nx,2)
Obs = partial_Id_Obs(Nx,jj)
Obs['noise'] = 1.5
HMM = HiddenMarkovModel(Dyn,Obs,t,X0)
####################
# Suggested tuning
####################
# Why are these benchmarks superior to those in the article?
# We use, in the EnKF,
# - inflation instead of additive noise ?
# - Sqrt instead of perturbed obs
# - random orthogonal rotations.
# The particle filters are also probably better tuned:
# - jitter covariance proportional to ensemble (weighted) cov
# - no jitter on unique particles after resampling
#
# For a better "picture" of the relative performances,
# see benchmarks in presentation from SIAM_SEAS.
# Note: They are slightly unrealiable (short runs).
# Expected RMSE_a:
# cfgs += EnKF_N(N=8,rot=True,xN=1.3) # 0.31
# cfgs += PartFilt(N=50 ,NER=0.3 ,reg=1.7) # 1.0
# cfgs += PartFilt(N=100,NER=0.2 ,reg=1.3) # 0.36
# cfgs += PartFilt(N=800,NER=0.2 ,reg=0.8) # 0.25
# cfgs += OptPF( N=50 ,NER=0.25,reg=1.4,Qs=0.4) # 0.61
# cfgs += OptPF( N=100,NER=0.2 ,reg=1.0,Qs=0.3) # 0.37
# cfgs += OptPF( N=800,NER=0.2 ,reg=0.6,Qs=0.1) # 0.25
# cfgs += PFa( N=50 ,alpha=0.4,NER=0.5,reg=1.0) # 0.45
# cfgs += PFa( N=100,alpha=0.3,NER=0.4,reg=1.0) # 0.38
# cfgs += PFxN (N=30, NER=0.4, Qs=1.0,xN=1000) # 0.48
# cfgs += PFxN (N=50, NER=0.3, Qs=1.1,xN=100 ) # 0.43
# cfgs += PFxN (N=100,NER=0.2, Qs=1.0,xN=100 ) # 0.32
# cfgs += PFxN (N=400,NER=0.2, Qs=0.8,xN=100 ) # 0.27
# cfgs += PFxN (N=800,NER=0.2, Qs=0.6,xN=100 ) # 0.25
# cfgs += PFxN_EnKF(N=25 ,NER=0.4 ,Qs=1.5,xN=100) # 0.49
# cfgs += PFxN_EnKF(N=50 ,NER=0.25,Qs=1.5,xN=100) # 0.36
# cfgs += PFxN_EnKF(N=100,NER=0.20,Qs=1.0,xN=100) # 0.32
# cfgs += PFxN_EnKF(N=300,NER=0.10,Qs=1.0,xN=100) # 0.28
| [
"patrick.n.raanes@gmail.com"
] | patrick.n.raanes@gmail.com |
fa89d08d72199fe676941a86fa19de36749c1879 | 3b8013a29b6800f0f15569d74603346cef62e4e7 | /Reinforcement_learning_TUT/5.2_Prioritized_Replay_DQN/run_MountainCar.py | 127e28eff91a62bb7e6ee2a4f27218d1eb4197f4 | [] | no_license | pentium3/tutorials | 3978140a2038a988b6043cb4efcb5cab67e7ca89 | abf2ea80ba0e5c6800701908367aeb4b5ee2369b | refs/heads/master | 2021-01-22T22:28:32.828169 | 2017-03-20T03:05:18 | 2017-03-20T03:05:18 | 85,548,010 | 3 | 0 | null | 2017-03-20T07:38:15 | 2017-03-20T07:38:15 | null | UTF-8 | Python | false | false | 2,039 | py | """
The DQN improvement: Prioritized Experience Replay (based on https://arxiv.org/abs/1511.05952)
View more on 莫烦Python: https://morvanzhou.github.io/tutorials/
Using:
Tensorflow: 1.0
gym: 0.8.0
"""
import gym
from RL_brain import DQNPrioritizedReplay
import matplotlib.pyplot as plt
import tensorflow as tf
import numpy as np
env = gym.make('MountainCar-v0')
env = env.unwrapped
env.seed(21)
MEMORY_SIZE = 10000
sess = tf.Session()
with tf.variable_scope('natural_DQN'):
RL_natural = DQNPrioritizedReplay(
n_actions=3, n_features=2, memory_size=MEMORY_SIZE,
e_greedy_increment=0.00005, sess=sess, prioritized=False,
)
with tf.variable_scope('DQN_with_prioritized_replay'):
RL_prio = DQNPrioritizedReplay(
n_actions=3, n_features=2, memory_size=MEMORY_SIZE,
e_greedy_increment=0.00005, sess=sess, prioritized=True, output_graph=True,
)
sess.run(tf.global_variables_initializer())
def train(RL):
total_steps = 0
steps = []
episodes = []
for i_episode in range(20):
observation = env.reset()
while True:
# env.render()
action = RL.choose_action(observation)
observation_, reward, done, info = env.step(action)
if done: reward = 10
RL.store_transition(observation, action, reward, observation_)
if total_steps > MEMORY_SIZE:
RL.learn()
if done:
print('episode ', i_episode, ' finished')
steps.append(total_steps)
episodes.append(i_episode)
break
observation = observation_
total_steps += 1
return np.vstack((episodes, steps))
his_natural = train(RL_natural)
his_prio = train(RL_prio)
plt.plot(his_natural[0, :], his_natural[1, :], c='b', label='natural DQN')
plt.plot(his_prio[0, :], his_prio[1, :], c='r', label='DQN with prioritized replay')
plt.legend(loc='best')
plt.ylabel('total training time')
plt.xlabel('episode')
plt.grid()
plt.show()
| [
"morvanzhou@hotmail.com"
] | morvanzhou@hotmail.com |
af19ca4c8a24e5a881d2b25b64da4fe3e104f5be | d90283bff72b5a55dd4d0f90c7325355b00ce7b1 | /p1804/p12/tuple.py | 6cab125c1070a8dc3fdf2ce36d74f9586479b242 | [] | no_license | yuemeiss/p1804daima | f841f52e63081d53d50a199e4d148d4533605bb6 | 6ea08eb9971e42bf4ac535033a006d98ed98bf98 | refs/heads/master | 2020-03-15T23:29:59.691297 | 2018-08-06T02:42:49 | 2018-08-06T02:42:49 | 132,395,078 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 312 | py | t = ("张三",22,"未婚","有钱","likedog","Ture",'sb')
print(t[3])
print(t[5])
print(t[6])
print(len(t))
print(t.index(22))
print(t.count("Ture"))
print(type(t))
print(t)
print("姓名: %s, \n年龄: %d, \n为什么: %s, \n爱好: %s, \n性别: %s, \n性格: %s, \n相貌: %s " % t )
for a in t:
print(a)
| [
"1083027306@qq.com"
] | 1083027306@qq.com |
dcba45dc6badfffc9ef1b173f06e2e3c525948fc | 0184a8149c063dd7a350dac476ef705304864040 | /rsbeams/rsphysics/decoherence.py | 0c205e51bddb76c15d195d8f4c9b9e1a77bbcb5d | [
"Apache-2.0",
"LicenseRef-scancode-warranty-disclaimer",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | radiasoft/rsbeams | 71a8bed396aced7da3f4adc3daebe1a694284c51 | 732b40b233609418b5204b4d865568bf89386005 | refs/heads/master | 2023-06-10T15:11:11.905713 | 2023-05-01T22:11:52 | 2023-05-01T22:11:52 | 56,353,370 | 4 | 24 | NOASSERTION | 2023-05-26T23:03:41 | 2016-04-15T22:28:48 | Python | UTF-8 | Python | false | false | 5,783 | py | import numpy as np
from pathos.multiprocessing import Pool, cpu_count
from scipy.integrate import quad
from scipy.special import fresnel
class CentroidPosition:
"""
Calculate position of an initially offset beam centroid vs turn.
Assumes a waterbag distribution and arbitrary order in tune dependence with amplitude.
Based on SSC-N-360.
"""
def __init__(self, N, Z, nu0, mu):
"""
Set up to perform integrations of centroid positions. Centroid positions can be found after setup by calling
the `calculate_centroid` method.
Note that mu contains the coefficients for the tune amplitude dependence with amplitude:
mu_0 * a**2 + mu_1 * a**4 + ...
Args:
N: (int) Max turn number to calculate out to.
Z: (float) Initial offset normalized by rms beam size at offset position.
nu0: (float)Linear tune.
mu: (floats in iterable object) Iterable containing mu values to desired order.
"""
self.N = N
self.Z = Z
self.nu0 = nu0
self.mu = mu
def _reduced_integrand(self, a, n):
"""
Calculate the integrand. Based on SSC-N-360 eq. 13.
Args:
a: (float or array of floats) Normalized amplitude on range [0, 2*Pi*N].
n: (int) Turn number for calculation.
Returns:
Float
"""
order = 1
advance = 0
for m in self.mu:
advance += m * a ** order / (2. * np.pi * n) ** (order - 1)
order += 1
coeff = self.Z / (2 * n)
const_slip = 2 * np.pi * self.nu0 * n
angular_term = np.cos(const_slip) * np.cos(advance) + np.sin(const_slip) * np.sin(advance)
# Calculate cutoff if a is float or array
try:
maxa = 1. * 2 * np.pi * n
if a <= maxa:
distr = angular_term / 1. / np.pi
else:
distr = 0.
except ValueError:
maxa = np.ones_like(a, dtype='float') * 2 * np.pi * n
distr = angular_term / 1. / np.pi * np.less(a, maxa)
return coeff * distr
def integrate_any_order(self, turn=None):
"""
Performs numerical integration over range [0, 2*Pi*n] for each turn out to N. Up to arbitrary order in a.
Args:
turn: [None] (Int) If not None then specify a single turn to calculate the centroid position at.
Returns:
Float or array of floats
"""
if turn is not None:
n = turn
else:
n = self.N
if n == 0:
return self.Z
# noinspection PyTupleAssignmentBalance
result, _ = quad(self._reduced_integrand,
0, 2 * np.pi * n,
args=n)
return result
def integrate_first_order(self, turn=None):
"""
Exact value of integral if only a**2 term in tune dependent amplitude is used.
Args:
turn: [None] (Int) If not None then specify a single turn to calculate the centroid position at.
Returns:
Float or array of floats
"""
if turn is not None:
n = turn
else:
n = self.N
if n == 0:
return self.Z
xN = self.Z / (2. * np.pi * n * self.mu[0]) * \
(np.cos(2 * np.pi * self.nu0 * n) * np.sin(2 * np.pi * n * self.mu[0]) +
2. * np.sin(2 * np.pi * self.nu0 * n) * np.sin(
np.pi * n * self.mu[0]) ** 2)
return xN
def integrate_second_order(self, turn=None):
"""
Exact value of integral if only a**2 and a**4 terms in tune dependent amplitude are used.
Args:
turn: [None] (Int) If not None then specify a single turn to calculate the centroid position at.
Returns:
Float or array of floats
"""
if turn is not None:
n = turn
else:
n = self.N
if n == 0:
return self.Z
def integrand(u, N):
fS, fC = fresnel((self.mu[0] * N * np.pi + self.mu[1] * u) / np.sqrt(self.mu[1] * N * np.pi**2))
term1 = np.cos(np.pi * self.mu[0]**2 * N / (2. * self.mu[1]) + 2. * np.pi * self.nu0 * N)
term2 = np.sin(np.pi * self.mu[0]**2 * N / (2. * self.mu[1]) + 2. * np.pi * self.nu0 * N)
return fC * term1 + fS * term2
xN = integrand(2 * np.pi * n, n) - integrand(0, n)
return xN * self.Z / np.sqrt(4. * self.mu[1] * n)
def calculate_centroids(self, p=None):
"""
Perform integration to find centroid at all turns up to N. Multiprocessing pool used to calculate independent
turn values.
Will automatically use `integrate_first_order` or `integrate_second_order` if appropriate.
Args:
p: Specify number of processes for pool. If not given then `cpu_count` is used.
Returns:
array of floats
"""
if p:
pool_size = p
else:
pool_size = cpu_count()
pool = Pool(pool_size)
# attempt to speed things up by spreading out difficult integration values at the end of range
# appeared to not work
# x = []
# for i in range(cpu_count()):
# x += range(N)[i::4]
if len(self.mu) == 1:
integration_function = self.integrate_first_order
elif len(self.mu) == 2:
integration_function = self.integrate_second_order
else:
integration_function = self.integrate_any_order
x = range(self.N)
results = pool.map(integration_function, x)
pool.close()
return results
| [
"chall@radiasoft.net"
] | chall@radiasoft.net |
f14a5f507d6c84a401f49bf011da731a8090d3e6 | 82aee3211216f55392d5a757eb57f02c859e9a28 | /Easy/680_validPalindrome_II.py | 69e73cef406dfb2488f769e23635c4306938a0dc | [] | no_license | Yucheng7713/CodingPracticeByYuch | 505d18095d4b9a35c1f3b23632a90a76d811b64a | 1461b10b8910fa90a311939c6df9082a8526f9b1 | refs/heads/master | 2022-05-01T11:51:00.612603 | 2022-04-18T09:46:55 | 2022-04-18T09:46:55 | 198,961,132 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 519 | py | class Solution:
def validPalindrome(self, s):
l_index, r_index = 0, len(s) - 1
while l_index < r_index:
if s[l_index] != s[r_index]:
ld = s[:l_index] + s[l_index + 1:]
rd = s[:r_index] + s[r_index + 1:]
if (ld == ld[::-1]) or (rd == rd[::-1]):
return True
return False
l_index += 1
r_index -= 1
return True
s = Solution()
mystr = "abcdef"
print(s.validPalindrome(mystr)) | [
"yuchengh@usc.edu"
] | yuchengh@usc.edu |
d573c33b815eaa794a8c2ddd9347b9ff3acb8449 | 4491549f0b1bbf5397ae0b56192605a7abcb61b0 | /python/Session-Management-2/models/SQL.py | 9f80310ec3149956a81008768c2ebd2f1a619fae | [
"Apache-2.0"
] | permissive | iNoSec2/skf-labs | 81e9d400ccac1007632add23bd50a094de1f50d5 | 8af9edc83e313be1578c5dee0fd4ecdf7ac18a32 | refs/heads/master | 2023-08-17T00:20:12.274684 | 2023-08-04T13:13:10 | 2023-08-04T13:13:10 | 235,376,119 | 0 | 0 | Apache-2.0 | 2023-08-05T00:31:43 | 2020-01-21T15:33:01 | Python | UTF-8 | Python | false | false | 1,288 | py | #!/usr/bin/python
# -*- coding: utf-8 -*-
#
# SKF Labs - Security Knowledge Framework (SKF)
# Copyright (C) 2022, OWASP Foundation, Inc.
#
# This software is provided under a slightly modified version
# of The GNU Affero General Public License. See the accompanying LICENSE
# file for more information.
#
# Description:
# Database layer functionalities including:
# - User credential validation
#
# Author:
# Alex Romero (@NtAlexio2)
#
from config.sqlite import *
import hashlib
class DataAccess:
def validateCredentials(self, username, password):
hash = hashlib.md5(password.encode()).hexdigest().lower()
connection = create_db_connection()
cursor = connection.execute('SELECT username, hash FROM Users WHERE username=? AND hash=?', (username, hash, ))
return cursor.fetchone() is not None
def checkUserExists(self, username):
connection = create_db_connection()
cursor = connection.execute('SELECT username FROM Users WHERE username=?', (username, ))
return cursor.fetchone() is not None
def isAdmin(self, username):
connection = create_db_connection()
cursor = connection.execute('SELECT is_admin FROM Users WHERE username=?', (username, ))
return bool(cursor.fetchone()[0])
| [
"glenntencate@gmail.com"
] | glenntencate@gmail.com |
861b5dc42fe75933ac0cd3d42acd0499ef6f55f1 | 2ff7e53d5e512cd762217ca54317982e07a2bb0c | /eve-8.51.857815/carbon/common/script/entities/audioEmitter.py | 69d4790838f2e6414248d5702ab6b90aa4cedabd | [] | no_license | nanxijw/Clara-Pretty-One-Dick | 66d3d69426642b79e8fd4cc8e0bec23adeeca6d6 | 50de3488a2140343c364efc2615cf6e67f152be0 | refs/heads/master | 2021-01-19T09:25:07.555284 | 2015-02-17T21:49:33 | 2015-02-17T21:49:33 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 544 | py | #Embedded file name: carbon/common/script/entities\audioEmitter.py
"""
Contains a set of available audio components.
"""
INITIAL_EVENT_NAME = 'initialEventName'
INITIAL_SOUND_ID = 'initialSoundID'
EMITTER_GROUP_NAME = 'groupName'
class AudioEmitterComponent:
__guid__ = 'audio.AudioEmitterComponent'
def __init__(self):
self.initialEventName = None
self.initialSoundID = None
self.groupName = None
import carbon.common.script.util.autoexport as autoexport
exports = autoexport.AutoExports('audio', locals())
| [
"billchang.e@gmail.com"
] | billchang.e@gmail.com |
b275678714d301a028aa868acf30bec68fc76782 | 76de4fc4f00a04c8c9acc1e9e4a5fae12cf0c08a | /trunk/pyformex/examples/SpaceTrussRoof_abq.py | 0216d8a2ac79315d917755356de57ab4bf7795cf | [] | no_license | BackupTheBerlios/pyformex-svn | ec2361b1b9967918be65e892217a691a6f8b145d | f5404809095711334bbb938d9d119a69ad8fc260 | refs/heads/master | 2020-12-24T13:20:47.422165 | 2011-11-15T11:52:23 | 2011-11-15T11:52:23 | 40,749,266 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,340 | py | #!/usr/bin/env pyformex
# $Id$
##
## This file is part of pyFormex 0.8.5 Sun Nov 6 17:27:05 CET 2011
## pyFormex is a tool for generating, manipulating and transforming 3D
## geometrical models by sequences of mathematical operations.
## Home page: http://pyformex.org
## Project page: https://savannah.nongnu.org/projects/pyformex/
## Copyright (C) Benedict Verhegghe (benedict.verhegghe@ugent.be)
## Distributed under the GNU General Public License version 3 or later.
##
##
## This program is free software: you can redistribute it and/or modify
## it under the terms of the GNU General Public License as published by
## the Free Software Foundation, either version 3 of the License, or
## (at your option) any later version.
##
## This program is distributed in the hope that it will be useful,
## but WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
## GNU General Public License for more details.
##
## You should have received a copy of the GNU General Public License
## along with this program. If not, see http://www.gnu.org/licenses/.
##
"""Double Layer Flat Space Truss Roof
level = 'advanced'
topics = ['FEA']
techniques = ['color']
"""
from plugins.properties import *
from plugins.fe_abq import *
import os
####
#Data
###################
dx = 1800 # Modular size [mm]
ht = 900 # Deck height [mm]
nx = 4 # number of bottom deck modules in x direction
ny = 5 # number of bottom deck modules in y direction
q = -0.005 #distributed load [N/mm^2]
#############
#Creating the model
###################
top = (Formex("1").replic2(nx-1,ny,1,1) + Formex("2").replic2(nx,ny-1,1,1)).scale(dx)
top.setProp(3)
bottom = (Formex("1").replic2(nx,ny+1,1,1) + Formex("2").replic2(nx+1,ny,1,1)).scale(dx).translate([-dx/2,-dx/2,-ht])
bottom.setProp(0)
T0 = Formex(4*[[[0,0,0]]]) # 4 times the corner of the top deck
T4 = bottom.select([0,1,nx,nx+1]) # 4 nodes of corner module of bottom deck
dia = connect([T0,T4]).replic2(nx,ny,dx,dx)
dia.setProp(1)
F = (top+bottom+dia)
# Show upright
createView('myview1',(0.,-90.,0.))
clear();linewidth(1);draw(F,view='myview1')
############
#Creating FE-model
###################
M = F.toMesh()
###############
#Creating elemsets
###################
# Remember: elems are in the same order as elements in F
topbar = where(F.prop==3)[0]
bottombar = where(F.prop==0)[0]
diabar = where(F.prop==1)[0]
###############
#Creating nodesets
###################
nnod=M.ncoords()
nlist=arange(nnod)
count = zeros(nnod)
for n in M.elems.flat:
count[n] += 1
field = nlist[count==8]
topedge = nlist[count==7]
topcorner = nlist[count==6]
bottomedge = nlist[count==5]
bottomcorner = nlist[count==3]
support = concatenate([bottomedge,bottomcorner])
edge = concatenate([topedge,topcorner])
########################
#Defining and assigning the properties
#############################
Q = 0.5*q*dx*dx
P = PropertyDB()
P.nodeProp(set=field,cload = [0,0,Q,0,0,0])
P.nodeProp(set=edge,cload = [0,0,Q/2,0,0,0])
P.nodeProp(set=support,bound = [1,1,1,0,0,0])
circ20 = ElemSection(section={'name':'circ20','sectiontype':'Circ','radius':10, 'cross_section':314.159}, material={'name':'S500', 'young_modulus':210000, 'shear_modulus':81000, 'poisson_ratio':0.3, 'yield_stress' : 500,'density':0.000007850})
# example of how to set the element type by set
P.elemProp(set=topbar,section=circ20,eltype='T3D2')
P.elemProp(set=bottombar,section=circ20,eltype='T3D2')
# alternatively, we can specify the elements by an index value
# in an array that we will pass in the Abqdata 'eprop' argument
P.elemProp(prop=1,section=circ20,eltype='T3D2')
# Since all elements have same characteristics, we could just have used:
# P.elemProp(section=circ20,elemtype='T3D2')
# But putting the elems in three sets allows for separate postprocessing
# Print node and element property databases
for p in P.nprop:
print p
for p in P.eprop:
print p
#############
#Writing the inputfile
###################
step = Step()
out = Output(type='field',variable='preselect')
res = [ Result(kind='element',keys=['S']),
Result(kind='node',keys=['U'])
]
model = Model(M.coords,M.elems)
if not checkWorkdir():
exit()
AbqData(model,P,[step],eprop=F.prop,out=[out],res=res).write('SpaceTruss')
# End
| [
"bverheg@8d6f1305-3bde-0310-9e88-884b4813ce35"
] | bverheg@8d6f1305-3bde-0310-9e88-884b4813ce35 |
67370eb3a4b958731d7ec128f3d2da56dcf993f9 | 98bebd68f01daa7e328d06e8f6d98042b587995f | /D2/다리를지나는트럭/timecomplexity.py | 99512214389e7583dd4ebd2aa7fb5dd0ea316a05 | [] | no_license | Ysh096/programmers | f189108f2e0cd792697821e806558dea489254f9 | c391ee58df1554af91a7099817b208d6883adca8 | refs/heads/master | 2023-04-13T06:06:48.938489 | 2021-04-15T16:43:30 | 2021-04-15T16:43:30 | 332,446,374 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 552 | py | import timeit
start_time = timeit.default_timer()
test_list = [1, 2, 3]
for _ in range(10000):
test_list = [0] + test_list
# test_list.insert(0, 0)
terminate_time = timeit.default_timer()
print('덧셈: ', terminate_time - start_time)
start_time = timeit.default_timer()
test_list = [1, 2, 3]
print('insert 전: ', id(test_list))
for _ in range(10000):
# test_list = [0] + test_list
test_list.insert(0, 0)
terminate_time = timeit.default_timer()
print('insert 후: ', id(test_list))
print('insert:', terminate_time - start_time) | [
"skk7541@gmail.com"
] | skk7541@gmail.com |
65f8dcf11bee03d8da86bda4c16e8fef954c3273 | 52585c8d95cef15199c18ba1a76899d2c31329f0 | /05PythonCookbook/ch12Concurrency/13polling_multiple_thread_queques/pqueue.py | 91396071d9996401e4454a9b17767eed6909d23c | [] | no_license | greatabel/PythonRepository | c7a952257303a21083ed7d535274c339362bd126 | 836fcdd3f5c1b150122302685104fe51b5ebe1a3 | refs/heads/master | 2023-08-30T15:56:05.376391 | 2023-08-26T03:34:14 | 2023-08-26T03:34:14 | 29,392,599 | 33 | 6 | null | 2023-02-14T13:33:21 | 2015-01-17T13:54:58 | Python | UTF-8 | Python | false | false | 1,615 | py | import queue
import socket
import os
class PollableQueue(queue.Queue):
def __init__(self):
super().__init__()
# Create a pair of connected sockets
if os.name == 'posix':
self._putsocket, self._getsocket = socket.socketpair()
else:
# Compatibility on non-POSIX systems
server = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
server.bind(('127.0.0.1', 0))
server.listen(1)
self._putsocket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self._putsocket.connect(server.getsockname())
self._getsocket, _ = server.accept()
server.close()
def fileno(self):
return self._getsocket.fileno()
def put(self, item):
super().put(item)
self._putsocket.send(b'x')
def get(self):
self._getsocket.recv(1)
return super().get()
if __name__ == '__main__':
import select
import threading
import time
def consumer(queues):
'''
Consumer that reads data on multiple queues simultaneously
'''
while True:
can_read, _, _ = select.select(queues,[],[])
for r in can_read:
item = r.get()
print('Got:', item)
q1 = PollableQueue()
q2 = PollableQueue()
q3 = PollableQueue()
t = threading.Thread(target=consumer, args=([q1,q2,q3],))
t.daemon = True
t.start()
# Feed data to the queues
q1.put(1)
q2.put(10)
q3.put('hello')
q2.put(15)
# Give thread time to run
time.sleep(1) | [
"greatabel1@126.com"
] | greatabel1@126.com |
7e6c9b781e4c749a5a0b3bde3a2fb1bfe9d9f012 | 38e0a6aa9df9c968135b348845abfa489cda4031 | /binhaishiPaper/binhaishiPaper/spiders/newsPaperSpider.py | 662ae17c09cb15907a2cbf17c062004b6274cae7 | [] | no_license | AReallyMan/everySpiders | bb923de508bd986bcf158728d17638c4ce608db8 | 19419ae5097a522ed0c88e9ab63aa62419c25b44 | refs/heads/master | 2022-09-20T02:18:06.205480 | 2020-06-03T06:33:47 | 2020-06-03T06:33:47 | 263,788,915 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,362 | py | # -*- coding: utf-8 -*-
# @Time : 2020-06-04
# @Author : ZhangYangyang
# @Software: PyCharm
import scrapy
import datetime
import re
import time
from scrapy.spiders import Rule, CrawlSpider
from scrapy.linkextractors import LinkExtractor
from ..items import BinhaishipaperItem
from ..settings import ELASTICSEARCH_TYPE
# 滨海时报
class NewpaperSpider(CrawlSpider):
name = 'newpaperSpider'
current_time = time.strftime("%Y/%m%d", time.localtime())
today = datetime.date.today()
start_urls = ['http://www.tjbhnews.com/finanec/', 'http://www.tjbhnews.com/life/',
'http://www.tjbhnews.com/xinwen/', 'http://bhsb.tjbhnews.com/']
rules = {
Rule(LinkExtractor(allow='/'+current_time+'/\d+\.html'),
callback='parse_item'),
Rule(LinkExtractor(allow='/'+current_time+'/\d+_\d+\.html'),
callback='parse_item')
}
def parse_item(self, response):
item = BinhaishipaperItem()
if self.duplicate.redis_db.hexists(self.duplicate.redis_data_dict, response.url):
print("该连接已被爬取")
else:
item['title'] = response.xpath("//div[@class='contTit']/font/text()").extract_first()
editor = response.xpath("//div[@class='contTit']/font/text()").extract_first()
if editor:
item['editor'] = editor
else:
item['editor'] = ''
item['publishtime'] = response.xpath("//span[@id='pubtime_baidu']/text()").extract_first()
content = response.xpath("//div[@class='contTxt']/div").xpath('string(.)').extract_first()
if content:
content = re.findall(u"[\u4e00-\u9fa5]+", content)
item['content'] = ''.join(content)
else:
item['content'] = ''
item['fromwhere'] = response.xpath("//span[@id='source_baidu']/text()").extract_first()
item['url'] = response.url
item['spiderName'] = ELASTICSEARCH_TYPE
item['spiderDesc'] = '滨海时报'
item['siteType'] = '纸媒'
item['source'] = '滨海时报'
item['publicTimeStamp'] = int(time.mktime(self.today.timetuple()))
item['insertTimeStamp'] = int(time.time() * 1000)
yield item
| [
"969114624@qq.com"
] | 969114624@qq.com |
a5f00db22afe958b88f3b951e3100919543dcdf9 | 77c641fd0708b279dddbe01f6af32a8531b93185 | /marketsim/gen/_intrinsic/orderbook/of_trader.py | 3818a80cc72db0e909b4478f6c75154923f96bcb | [] | no_license | abensrhir/marketsimulator | aea286afd2bb2e0c8a547bfa879601aef21c0cd5 | f9f55c72fb34cdbec42b96737ca20839f26c6299 | refs/heads/master | 2020-12-13T20:55:55.795344 | 2014-02-24T22:52:24 | 2014-02-24T22:52:24 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,098 | py | from marketsim import types
from marketsim.gen._out.trader._singleproxy import SingleProxy
from marketsim import getLabel
class Base(object):
_properties = {}
def __getattr__(self, name):
if name[0:2] != '__' and self._impl:
return getattr(self._impl, name)
else:
raise AttributeError
def __str__(self):
return getLabel(self._impl) if self._impl else ''
def __repr__(self):
return self.__str__()
class _OfTrader_Impl(Base):
def __init__(self):
self._alias = ["$(TraderAsset)"] if type(self.Trader) == SingleProxy else ['OfTrader']
Base.__init__(self)
@property
def _impl(self):
try:
return self.Trader.orderBook
except AttributeError:
return None
class _Proxy_Impl(Base):
def __init__(self):
self._impl = None
Base.__init__(self)
@property
def label(self):
return self._impl.label if self._impl else '$(OrderBook)'
def bind(self, ctx):
assert self._impl is None
self._impl = ctx.orderbook
| [
"anton.kolotaev@gmail.com"
] | anton.kolotaev@gmail.com |
53c3f734336bf253cd01c5cc8db9119e31f584a6 | a1aba83b90285def84cc425c0b089dd632a01a51 | /py千峰/day1函数/func10.py | ac2e75f8e338836fa45056bb68612f7bafc0267b | [] | no_license | 15929134544/wangwang | 8ada14acb505576f07f01e37c936500ee95573a0 | 47f9abbf46f8d3cbc0698cb64c043735b06940d4 | refs/heads/master | 2023-05-11T19:59:54.462454 | 2021-05-25T15:19:43 | 2021-05-25T15:19:43 | 328,119,916 | 1 | 1 | null | 2021-05-11T16:13:18 | 2021-01-09T09:33:29 | JavaScript | UTF-8 | Python | false | false | 873 | py | # global 变量的范围
# 全局变量 局部变量
# 声明在函数外部的是全局变量,所有函数都可以访问
name = '月月'
def func():
# 函数内部声明的变量,局部变量,仅限于在函数内部使用
s = 'abcd'
s += 'X'
print(s, name)
def func1():
global name # 不修改全局变量,只是获取或者打印。但是如果要修改全局变量。则需要
# 在函数内部声明:global 变量名
# 修改后,全局变量的值发生改变
# print(s, name)
name += '弹吉他的小美女'
print(name)
# 报错:函数内部的变量可以随意修改赋值
# 但是全局变量不能随便在函数体中修改
def func2():
name = '小月月' # 全局变量与局部变量同名了
name += '弹吉他的小美女'
print(name)
# print(s) 报错
func1()
func2()
| [
"you@example.com"
] | you@example.com |
326693eabcfe1a9d41f11f7c08ff574a844a8568 | bdb1c323968cd9d5441a187a29ed7e25a2e4f07e | /slave_server/runner/test_scripts/bxtp_ivi_m/operation_lib/base_lib/makelog.py | b80127c92706da5548863284c0dc568cdaff53c0 | [] | no_license | liangzhaowang/automation_system | beee351dd9f09a51e2b81617ac5bee63023ea9b8 | f77ef433c2366253dc9d9fdb7c54911cb38ed3e8 | refs/heads/master | 2022-02-19T11:07:44.047000 | 2019-09-23T02:16:00 | 2019-09-23T02:16:00 | 209,732,359 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 1,177 | py | #!/usr/bin/env python
# coding=utf-8
import os, sys
import logging.config
import logging
import time
class makelog():
def __init__(self, filename="", filepath=""):
self.filename = filename
self.filepath = filepath
self.makelogfile()
self.logger = logging.getLogger()
self.write()
def makelogfile(self):
if(os.path.exists(self.filepath)):
pass
# cmd = 'gedit %s/%s'%(self.filepath, self.filename)
# os.system(cmd)
else:
print self.filepath
cmd = 'mkdir %s'%(self.filepath)
os.system(cmd)
self.makelogfile()
def write(self):
logging.basicConfig(filename =self.filepath + self.filename)
self.logger.setLevel(logging.DEBUG)
fh = logging.FileHandler(self.filepath + self.filename)
fh.setLevel(logging.DEBUG)
ch = logging.StreamHandler()
ch.setLevel(logging.DEBUG)
formatter = logging.Formatter("%(asctime)s [%(levelname)s] [%(funcName)s] %(message)s")
ch.setFormatter(formatter)
fh.setFormatter(formatter)
self.logger.addHandler(ch)
self.logger.addHandler(fh)
file_path = "./log_info/"
file_name = "log_message_20170912_075156.txt"
print file_path
log_info = makelog(filepath = file_path, filename = file_name)
| [
"zhaowangx.liang@intel.com"
] | zhaowangx.liang@intel.com |
45f051b6b85c7aa298756f2cbe0f5c6d051359c2 | 8afb5afd38548c631f6f9536846039ef6cb297b9 | /_PYTHON/DATA_STRUC_PYTHON_NOTES/python-prac/mini-scripts/Python_RegEx_metacharacters__exactly_the_specifies_number_of_occurrences.txt.py | 75b978dc9b6e5add1c1041161a9bbb0f41ebfb6f | [
"MIT"
] | permissive | bgoonz/UsefulResourceRepo2.0 | d87588ffd668bb498f7787b896cc7b20d83ce0ad | 2cb4b45dd14a230aa0e800042e893f8dfb23beda | refs/heads/master | 2023-03-17T01:22:05.254751 | 2022-08-11T03:18:22 | 2022-08-11T03:18:22 | 382,628,698 | 10 | 12 | MIT | 2022-10-10T14:13:54 | 2021-07-03T13:58:52 | null | UTF-8 | Python | false | false | 281 | py | import re
txt = "The rain in Spain falls mainly in the plain!"
# Check if the string contains "a" followed by exactly two "l" characters:
x = re.findall("al{2}", txt)
print(x)
if x:
print("Yes, there is at least one match!")
else:
print("No match")
# Author: Bryan G
| [
"bryan.guner@gmail.com"
] | bryan.guner@gmail.com |
fba115b2085192f099c00764f8bfc49dcc98f713 | 279caab77d0731196b82548a35e6e61334a2141e | /n_gram_segmentation/segmenter.py | e49613368a50556db728e07bc4ae5cd18bf4e66a | [] | no_license | VitalyRomanov/segmented-embeddings | 52d6382feb36c65e12c513535a7bc5f0793d85ce | 9a8d4b897214e73b0ce18621b9ac121085c88e3a | refs/heads/master | 2022-10-19T22:38:08.148913 | 2022-10-03T14:31:53 | 2022-10-03T14:31:53 | 172,964,423 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,779 | py | import sys
import pickle
file_path = sys.argv[1]
lang = sys.argv[2]
words = open(file_path, "r").read().strip().split("\n")
char_gram_size_min = 3
char_gram_size_max = 4
char_grams = set()
def segment_recursively(dest, win, word):
dest.append(word[:win])
if win < char_gram_size_max and len(word) > win:
segment_recursively(dest, win+1, word)
elif win == char_gram_size_max and len(word) > win:
segment_recursively(dest, win, word[1:])
else:
if win > char_gram_size_min:
segment_recursively(dest, win-1, word[1:])
# if len(word) > len_:
# if len_ <= char_gram_size_max:
# dest.append(word[:len_])
# segment_recursively(dest, word[1:], len_+1)
def get_grams(w):
if w[0] == '<':
grams = [w]
else:
w = '<' + word + '>'
# grams = [w[i: i + char_gram_size] for i in range(len(w) - char_gram_size + 1)]
grams = []
segment_recursively(grams, char_gram_size_min, w)
return grams
with open("{}_word_{}_grams.txt".format(lang, char_gram_size_min), "w") as word_grams:
for word in words:
word_grams.write(word)
word_grams.write("\t")
grams = get_grams(word)
for g in grams:
word_grams.write(g)
word_grams.write(" ")
char_grams.add(g)
word_grams.write("\n")
grams = list(char_grams)
grams.sort()
grams_dict = {}
for id_, g in enumerate(grams):
grams_dict[g] = id_
print(len(grams))
word2gram = {}
for id_, word in enumerate(words):
word2gram[id_] = [grams_dict[g] for g in get_grams(word)]
pickle.dump(word2gram, open("%s_word2segment.pkl" % lang, "wb"))
pickle.dump(grams_dict, open("%s_segment2id.pkl" % lang , "wb")) | [
"mortiv16@gmail.com"
] | mortiv16@gmail.com |
ed08a844307b2d776880dd97684ac94e4920196a | da9c4a9a92d49d2fb2983a54e0f64c2a1ce8aa19 | /symphony/cli/pyinventory/graphql/mutation/edit_equipment_port.py | 43a16d01545cd8442f49d0bcedae9fa6a4d02f20 | [
"BSD-3-Clause",
"Apache-2.0"
] | permissive | rohan-prasad/magma | 347c370347724488215a0783504788eac41d8ec7 | 2c1f36d2fd04eae90366cc8b314eaab656d7f8ad | refs/heads/master | 2022-10-14T14:08:14.067593 | 2020-06-11T23:52:03 | 2020-06-11T23:54:27 | 271,671,835 | 0 | 0 | NOASSERTION | 2020-06-12T00:20:23 | 2020-06-12T00:17:39 | null | UTF-8 | Python | false | false | 3,035 | py | #!/usr/bin/env python3
# @generated AUTOGENERATED file. Do not Change!
from dataclasses import dataclass
from datetime import datetime
from gql.gql.datetime_utils import DATETIME_FIELD
from gql.gql.graphql_client import GraphqlClient
from gql.gql.client import OperationException
from gql.gql.reporter import FailedOperationException
from functools import partial
from numbers import Number
from typing import Any, Callable, List, Mapping, Optional
from time import perf_counter
from dataclasses_json import DataClassJsonMixin
from ..fragment.link import LinkFragment, QUERY as LinkFragmentQuery
from ..fragment.property import PropertyFragment, QUERY as PropertyFragmentQuery
from ..input.edit_equipment_port import EditEquipmentPortInput
QUERY: List[str] = LinkFragmentQuery + PropertyFragmentQuery + ["""
mutation EditEquipmentPortMutation($input: EditEquipmentPortInput!) {
editEquipmentPort(input: $input) {
id
properties {
...PropertyFragment
}
definition {
id
name
portType {
id
name
}
}
link {
...LinkFragment
}
}
}
"""]
@dataclass
class EditEquipmentPortMutation(DataClassJsonMixin):
@dataclass
class EditEquipmentPortMutationData(DataClassJsonMixin):
@dataclass
class EquipmentPort(DataClassJsonMixin):
@dataclass
class Property(PropertyFragment):
pass
@dataclass
class EquipmentPortDefinition(DataClassJsonMixin):
@dataclass
class EquipmentPortType(DataClassJsonMixin):
id: str
name: str
id: str
name: str
portType: Optional[EquipmentPortType]
@dataclass
class Link(LinkFragment):
pass
id: str
properties: List[Property]
definition: EquipmentPortDefinition
link: Optional[Link]
editEquipmentPort: EquipmentPort
data: EditEquipmentPortMutationData
@classmethod
# fmt: off
def execute(cls, client: GraphqlClient, input: EditEquipmentPortInput) -> EditEquipmentPortMutationData.EquipmentPort:
# fmt: off
variables = {"input": input}
try:
network_start = perf_counter()
response_text = client.call(''.join(set(QUERY)), variables=variables)
decode_start = perf_counter()
res = cls.from_json(response_text).data
decode_time = perf_counter() - decode_start
network_time = decode_start - network_start
client.reporter.log_successful_operation("EditEquipmentPortMutation", variables, network_time, decode_time)
return res.editEquipmentPort
except OperationException as e:
raise FailedOperationException(
client.reporter,
e.err_msg,
e.err_id,
"EditEquipmentPortMutation",
variables,
)
| [
"facebook-github-bot@users.noreply.github.com"
] | facebook-github-bot@users.noreply.github.com |
ed3b3e49d2373541f1c4a08baaea4b9b8235163d | b03497e9c38e27aac47792c30ad0e2945ed2fca9 | /mqtt.py | ca15170b9e14dc176b9a957e70000f7c57d3ba22 | [] | no_license | ThomasMoellerR/11_02_rpi_cube | c92522e0d2dd910a383c83dd49d55ddb06b0c1b4 | 54c2a8ea6e24a7fa358773a72dade8c1354d1b37 | refs/heads/master | 2020-04-17T11:41:15.158843 | 2019-11-02T13:26:15 | 2019-11-02T13:26:15 | 166,550,814 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,288 | py | import paho.mqtt.client as pmc
import time
import queue
class c_mqtt:
def __init__(self, hostname = "192.168.178.52", port = "1880", sub_list = []):
self.hostname = hostname
self.port = port
self.try_to_connect = True
self.sub_list = sub_list
self.connected = False
self.q = queue.Queue()
self.was_connected = False
self.client = pmc.Client()
self.client.on_connect = self.on_connect
self.client.on_message = self.on_message
def on_connect(self, client, userdata, flags, rc):
# rc = result code
if rc == 0:
print("Successfully connected to broker")
self.connected = True
else:
print("Error while trying to connect to broker")
self.connected = False
# subscribe
for topic in self.sub_list:
self.client.subscribe(topic)
def on_message(self, client, userdata, msg):
t = msg.topic
m = msg.payload.decode("utf-8")
#print("Received", t + " "+ m)
self.q.put((t, m))
def loop(self):
if self.try_to_connect:
if self.was_connected == True:
time.sleep(1)
print("Try to connect to broker", self.hostname, int(self.port))
try:
self.client.connect(self.hostname, int(self.port), 60)
self.try_to_connect = False
self.connected = True
self.was_connected = True
except Exception as e:
print(e)
self.connected = False
if self.connected:
try:
self.client.loop_forever()
except Exception as e:
print(e)
self.try_to_connect = True
self.connected = False
def pub(self, topic, msg):
if self.connected:
self.client.publish(topic, msg, qos=0, retain=False)
def set_connection_state(self, state):
self.connected = state
def get_connection_state(self):
return self.connected
def sub(self, topic):
self.sub_list.append(topic)
def empty(self):
return self.q.empty()
def get(self):
return self.q.get()
| [
"test"
] | test |
53fa7be54b523395a65b6f0b0053527f50bfa22f | 4015291afebfd346da3fee4b1d5a775882b5b461 | /packages/service-library/src/servicelib/rest_constants.py | e03667f15f28a500dff9aaa58b93cb6ee3f2a129 | [
"MIT"
] | permissive | pcrespov/osparc-simcore | 3a8a6b5252038542f515c7e90d983ac6f1fb4de7 | eb5e00bc2cf4acfe81f5dc422a5e50a4646c9596 | refs/heads/master | 2023-08-06T04:33:38.594066 | 2023-07-12T09:47:00 | 2023-07-12T09:47:00 | 130,357,545 | 0 | 1 | MIT | 2023-04-18T08:04:27 | 2018-04-20T12:10:41 | Python | UTF-8 | Python | false | false | 212 | py | # SEE https://pydantic-docs.helpmanual.io/usage/exporting_models/#modeldict
RESPONSE_MODEL_POLICY = {
"by_alias": True,
"exclude_unset": True,
"exclude_defaults": False,
"exclude_none": False,
}
| [
"noreply@github.com"
] | pcrespov.noreply@github.com |
16c41ea1c7c2797237e891f1ac38e86347e93b15 | d59bf974dd42d74dae62f58c0272ceb246d935c9 | /7.2.py | 9ef0a1a85e8beb93d1c39e39aa74b67020b39afb | [] | no_license | Serega1000rr/Ser | 423d2de1ba1fcc1f3f684363b90f05d018fb1306 | e349cb8b5c7aea333a78e448e7edfaa6c13edd61 | refs/heads/main | 2023-05-14T02:39:47.855859 | 2021-06-07T10:46:56 | 2021-06-07T10:46:56 | 374,630,452 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 151 | py | things=['mozarella','cinderella','salmonella']
print(things[0].capitalize())
things[1]=things[1].upper()
print(things)
del things[2]
print(things) | [
"unknown@example.com"
] | unknown@example.com |
09b95d91f2931902d3ccab96b894edd1818d2827 | 53784d3746eccb6d8fca540be9087a12f3713d1c | /res/packages/scripts/scripts/common/gun_rotation_shared.py | 04cdb63b1822d5294990244854eebc8db0ae3d35 | [] | no_license | webiumsk/WOT-0.9.17.1-CT | 736666d53cbd0da6745b970e90a8bac6ea80813d | d7c3cf340ae40318933e7205bf9a17c7e53bac52 | refs/heads/master | 2021-01-09T06:00:33.898009 | 2017-02-03T21:40:17 | 2017-02-03T21:40:17 | 80,870,824 | 0 | 0 | null | null | null | null | WINDOWS-1250 | Python | false | false | 4,744 | py | # 2017.02.03 21:54:59 Střední Evropa (běžný čas)
# Embedded file name: scripts/common/gun_rotation_shared.py
import BigWorld
import Math
from math import pi
from constants import IS_CLIENT, IS_CELLAPP
from debug_utils import *
if IS_CELLAPP:
from server_constants import MAX_VEHICLE_RADIUS
def calcPitchLimitsFromDesc(turretYaw, pitchLimitsDesc):
minPitch = pitchLimitsDesc['minPitch']
maxPitch = pitchLimitsDesc['maxPitch']
return BigWorld.wg_calcGunPitchLimits(turretYaw, minPitch, maxPitch)
def encodeAngleToUint(angle, bits):
mask = (1 << bits) - 1
return int(round((mask + 1) * (angle + pi) / (pi * 2.0))) & mask
def decodeAngleFromUint(code, bits):
return pi * 2.0 * code / (1 << bits) - pi
def encodeRestrictedValueToUint(angle, bits, minBound, maxBound):
t = 0 if maxBound == minBound else (angle - minBound) / (maxBound - minBound)
t = _clamp(0.0, t, 1.0)
mask = (1 << bits) - 1
return int(round(mask * t)) & mask
def decodeRestrictedValueFromUint(code, bits, minBound, maxBound):
t = float(code) / ((1 << bits) - 1)
return minBound + t * (maxBound - minBound)
def encodeGunAngles(yaw, pitch, pitchLimits):
return encodeAngleToUint(yaw, 10) << 6 | encodeRestrictedValueToUint(pitch, 6, *pitchLimits)
def decodeGunAngles(code, pitchLimits):
return (decodeAngleFromUint(code >> 6 & 1023, 10), decodeRestrictedValueFromUint((code & 63), 6, *pitchLimits))
def _clamp(minBound, value, maxBound):
if value < minBound:
return minBound
if value > maxBound:
return maxBound
return value
def isShootPositionInsideOtherVehicle(vehicle, turretPosition, shootPosition):
if IS_CLIENT:
def getNearVehicles(vehicle, shootPosition):
nearVehicles = []
arenaVehicles = BigWorld.player().arena.vehicles
for id in arenaVehicles.iterkeys():
v = BigWorld.entities.get(id)
if v and not v.isPlayerVehicle:
nearVehicles.append(v)
return nearVehicles
elif IS_CELLAPP:
def getNearVehicles(vehicle, shootPosition):
return vehicle.entitiesInRange(MAX_VEHICLE_RADIUS, 'Vehicle', shootPosition)
nearVehicles = getNearVehicles(vehicle, shootPosition)
for v in nearVehicles:
if shootPosition.distTo(v.position) < v.typeDescriptor.boundingRadius and isSegmentCollideWithVehicle(v, turretPosition, shootPosition):
return True
return False
def isSegmentCollideWithVehicle(vehicle, startPoint, endPoint):
if IS_CLIENT:
def getVehicleSpaceMatrix(vehicle):
toVehSpace = Math.Matrix(vehicle.model.matrix)
toVehSpace.invert()
return toVehSpace
def getVehicleComponents(vehicle):
return vehicle.getComponents()
elif IS_CELLAPP:
def getVehicleSpaceMatrix(vehicle):
toVehSpace = Math.Matrix(vehicle.mover.matrix)
toVehSpace.invert()
return toVehSpace
def getVehicleComponents(vehicle):
return vehicle.getComponents(vehicle.gunAngles)
toVehSpace = getVehicleSpaceMatrix(vehicle)
vehStartPoint = toVehSpace.applyPoint(startPoint)
vehEndPoint = toVehSpace.applyPoint(endPoint)
for compDescr, toCompSpace, isAttached in getVehicleComponents(vehicle):
if not isAttached or compDescr.get('itemTypeName') == 'vehicleGun':
continue
compStartPoint = toCompSpace.applyPoint(vehStartPoint)
compEndPoint = toCompSpace.applyPoint(vehEndPoint)
collisions = compDescr['hitTester'].localAnyHitTest(compStartPoint, compEndPoint)
if collisions is not None:
return True
return False
def getLocalAimPoint(vehicleDescriptor):
if vehicleDescriptor is None:
return Math.Vector3(0.0, 0.0, 0.0)
else:
hullBox = vehicleDescriptor.hull['hitTester'].bbox
hullPosition = vehicleDescriptor.chassis['hullPosition']
middleX = (hullBox[0].x + hullBox[1].x) * 0.5 + hullPosition.x
middleZ = (hullBox[0].z + hullBox[1].z) * 0.5 + hullPosition.z
calculatedHullPosition = (middleX, hullPosition.y, middleZ)
turretPosition = vehicleDescriptor.hull['turretPositions'][0] * 0.5
maxZOffset = abs(hullBox[1].z - hullBox[0].z) * 0.2
turretPosition.z = max(-maxZOffset, min(maxZOffset, turretPosition.z))
localAimPoint = calculatedHullPosition + turretPosition
return localAimPoint
# okay decompyling c:\Users\PC\wotsources\files\originals\res\packages\scripts\scripts\common\gun_rotation_shared.pyc
# decompiled 1 files: 1 okay, 0 failed, 0 verify failed
# 2017.02.03 21:54:59 Střední Evropa (běžný čas)
| [
"info@webium.sk"
] | info@webium.sk |
24f1ef6fd36b61bb20469a5bfc7613033a19d292 | c411c5513ec5d58eb0e0edab0b6a697974d638fb | /model/DeepLabV3.py | 0e3406abccfff10e00297a28aed6ff7b0ce8b37f | [] | no_license | blue88blue/Segmentation | ab7f9dec4ab1ab4cdb4b8ca5af0cb9e1a560e20f | 69c4db1897a550a08a63811ffbb817754c20fbf2 | refs/heads/master | 2023-03-01T06:58:49.405779 | 2021-01-27T02:07:56 | 2021-01-27T02:07:56 | 296,049,616 | 4 | 3 | null | null | null | null | UTF-8 | Python | false | false | 4,784 | py | import torch
import torch.nn as nn
import torch.nn.functional as F
from model.segbase import SegBaseModel
from model.model_utils import init_weights, _FCNHead
class DeepLabV3(SegBaseModel):
r"""DeepLabV3
Parameters
----------
nclass : int
Number of categories for the training dataset.
backbone : string
Pre-trained dilated backbone network type (default:'resnet50'; 'resnet50',
'resnet101' or 'resnet152').
norm_layer : object
Normalization layer used in backbone network (default: :class:`nn.BatchNorm`;
for Synchronized Cross-GPU BachNormalization).
aux : bool
Auxiliary loss.
Reference:
Chen, Liang-Chieh, et al. "Rethinking atrous convolution for semantic image segmentation."
arXiv preprint arXiv:1706.05587 (2017).
"""
def __init__(self, n_class, backbone='resnet34', aux=False, pretrained_base=False, dilated=False, **kwargs):
super(DeepLabV3, self).__init__(backbone, pretrained_base=pretrained_base, dilated=dilated, **kwargs)
self.head = _DeepLabHead(self.base_channel[-1], n_class, **kwargs)
self.aux = aux
if self.aux:
self.auxlayer = _FCNHead(256, n_class, **kwargs)
def forward(self, x):
size = x.size()[2:]
_, _, c3, c4 = self.base_forward(x)
outputs = dict()
x = self.head(c4)
x = F.interpolate(x, size, mode='bilinear', align_corners=True)
outputs.update({"main_out": x})
if self.aux:
auxout = self.auxlayer(c3)
auxout = F.interpolate(auxout, size, mode='bilinear', align_corners=True)
outputs.update({"auxout": [auxout]})
return outputs
class _DeepLabHead(nn.Module):
def __init__(self, in_channel, nclass, norm_layer=nn.BatchNorm2d, norm_kwargs=None, **kwargs):
super(_DeepLabHead, self).__init__()
self.aspp = _ASPP(in_channel, [12, 24, 36], norm_layer=norm_layer, norm_kwargs=norm_kwargs, **kwargs)
self.block = nn.Sequential(
nn.Conv2d(256, 256, 3, padding=1, bias=False),
norm_layer(256, **({} if norm_kwargs is None else norm_kwargs)),
nn.ReLU(True),
nn.Dropout(0.1),
nn.Conv2d(256, nclass, 1)
)
def forward(self, x):
x = self.aspp(x)
return self.block(x)
class _ASPPConv(nn.Module):
def __init__(self, in_channels, out_channels, atrous_rate, norm_layer, norm_kwargs):
super(_ASPPConv, self).__init__()
self.block = nn.Sequential(
nn.Conv2d(in_channels, out_channels, 3, padding=atrous_rate, dilation=atrous_rate, bias=False),
norm_layer(out_channels, **({} if norm_kwargs is None else norm_kwargs)),
nn.ReLU(True)
)
def forward(self, x):
return self.block(x)
class _AsppPooling(nn.Module):
def __init__(self, in_channels, out_channels, norm_layer, norm_kwargs, **kwargs):
super(_AsppPooling, self).__init__()
self.gap = nn.Sequential(
nn.AdaptiveAvgPool2d(1),
nn.Conv2d(in_channels, out_channels, 1, bias=False),
norm_layer(out_channels, **({} if norm_kwargs is None else norm_kwargs)),
nn.ReLU(True)
)
def forward(self, x):
size = x.size()[2:]
pool = self.gap(x)
out = F.interpolate(pool, size, mode='bilinear', align_corners=True)
return out
class _ASPP(nn.Module):
def __init__(self, in_channels, atrous_rates, norm_layer, norm_kwargs, out_channels=256, **kwargs):
super(_ASPP, self).__init__()
self.b0 = nn.Sequential(
nn.Conv2d(in_channels, out_channels, 1, bias=False),
norm_layer(out_channels, **({} if norm_kwargs is None else norm_kwargs)),
nn.ReLU(True)
)
rate1, rate2, rate3 = tuple(atrous_rates)
self.b1 = _ASPPConv(in_channels, out_channels, rate1, norm_layer, norm_kwargs)
self.b2 = _ASPPConv(in_channels, out_channels, rate2, norm_layer, norm_kwargs)
self.b3 = _ASPPConv(in_channels, out_channels, rate3, norm_layer, norm_kwargs)
self.b4 = _AsppPooling(in_channels, out_channels, norm_layer=norm_layer, norm_kwargs=norm_kwargs)
self.project = nn.Sequential(
nn.Conv2d(5 * out_channels, out_channels, 1, bias=False),
norm_layer(out_channels, **({} if norm_kwargs is None else norm_kwargs)),
nn.ReLU(True),
nn.Dropout(0.5)
)
def forward(self, x):
feat1 = self.b0(x)
feat2 = self.b1(x)
feat3 = self.b2(x)
feat4 = self.b3(x)
feat5 = self.b4(x)
x = torch.cat((feat1, feat2, feat3, feat4, feat5), dim=1)
x = self.project(x)
return x
| [
"805207107@qq.com"
] | 805207107@qq.com |
f2190533c3b9802af9fa0c749b96108bf2036c1a | 74a1b51082e18a152626eb8044ab5d598283dacb | /easy/leetCode1646.py | 63c66927a02405ce8571af38833036cebcea6577 | [] | no_license | git874997967/LeetCode_Python | 6eb7d869d3737e946a8c6f0c51899a80bf03d650 | 1248cd19ab0d9d8aba503c487e163808c1d107cb | refs/heads/master | 2023-08-22T13:24:15.612040 | 2021-09-20T05:53:07 | 2021-09-20T05:53:07 | 340,973,354 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 398 | py | #1646. Get Maximum in Generated Array
def getMaximumGenerated(n):
arr = []
arr.append(0)
arr.append(1)
for i in range(2, n+ 1):
print(i)
if i % 2 == 0:
arr.append(i // 2)
else:
arr.append(arr[(i+1)//2] + arr[(i -1)//2])
return max(arr) if n >= 1 else 0
getMaximumGenerated(7)
getMaximumGenerated(2)
getMaximumGenerated(3)
| [
"g8749979677@gmail.com"
] | g8749979677@gmail.com |
403d845645672867f12a3e55739cd6c86e6594d3 | b31c0f0d1e8a3bf575e6b86591ec1071cd9a8a3d | /mlonmcu/platform/microtvm/microtvm_zephyr_target.py | c70668791002be5ca39259d7331daa22b90d3ca3 | [
"Apache-2.0"
] | permissive | tum-ei-eda/mlonmcu | e75238cd7134771217153c740301a8327a7b93b1 | f1b934d5bd42b5471d21bcf257bf88c055698918 | refs/heads/main | 2023-08-07T15:12:13.466944 | 2023-07-15T13:26:21 | 2023-07-15T13:26:21 | 448,808,394 | 22 | 4 | Apache-2.0 | 2023-06-09T23:00:19 | 2022-01-17T08:20:05 | Python | UTF-8 | Python | false | false | 2,969 | py | #
# Copyright (c) 2022 TUM Department of Electrical and Computer Engineering.
#
# This file is part of MLonMCU.
# See https://github.com/tum-ei-eda/mlonmcu.git for further info.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from pathlib import Path
from mlonmcu.target.target import Target
from mlonmcu.logging import get_logger
from .microtvm_template_target import TemplateMicroTvmPlatformTarget
logger = get_logger()
class ZephyrMicroTvmPlatformTarget(TemplateMicroTvmPlatformTarget):
FEATURES = Target.FEATURES + []
DEFAULTS = {
**Target.DEFAULTS,
"extra_files_tar": None,
"project_type": "host_driven",
"zephyr_board": "",
# "zephyr_base": "?",
# "west_cmd": "?",
"verbose": False,
"warning_as_error": True,
"compile_definitions": "",
# "config_main_stack_size": None,
"config_main_stack_size": "16384",
"gdbserver_port": None,
"nrfjprog_snr": None,
"openocd_serial": None,
"port": None, # Workaround to overwrite esptool detection
}
REQUIRED = Target.REQUIRED + ["zephyr.install_dir", "zephyr.sdk_dir"]
def __init__(self, name=None, features=None, config=None):
super().__init__(name=name, features=features, config=config)
self.template_path = None
self.option_names = [
"extra_files_tar",
"project_type",
"zephyr_board",
# "verbose",
"warning_as_error",
"compile_definitions",
"config_main_stack_size",
"gdbserver_port",
"nrfjprog_snr",
"openocd_serial",
]
# self.platform = platform
# self.template = name2template(name)
@property
def zephyr_install_dir(self):
return Path(self.config["zephyr.install_dir"])
@property
def port(self):
return self.config["port"]
@property
def zephyr_sdk_dir(self):
return Path(self.config["zephyr.sdk_dir"])
def get_project_options(self):
ret = super().get_project_options()
ret.update({"zephyr_base": self.zephyr_install_dir / "zephyr"})
return ret
def update_environment(self, env):
super().update_environment(env)
env["ZEPHYR_BASE"] = str(self.zephyr_install_dir / "zephyr")
env["ZEPHYR_SDK_INSTALL_DIR"] = str(self.zephyr_sdk_dir)
if self.port:
env["ESPTOOL_PORT"] = self.port
| [
"philipp.van-kempen@tum.de"
] | philipp.van-kempen@tum.de |
0a363bd967dc4a9e87872bd8e62484085cbd6476 | cc2df07a053b0ee13b05fe53ea9463033dd70c36 | /app/models.py | cad45bdeca5f36778a00f25f80049c6608156b71 | [] | no_license | sinjorjob/django-progress-bar | 5d0263d0cacc867fcc4ac6e5d07b37833ab7c849 | d948663231859b3485b7a35608f1c97246f952b7 | refs/heads/master | 2023-07-01T19:19:30.712511 | 2021-08-07T00:19:28 | 2021-08-07T00:19:28 | 393,536,082 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 289 | py | from django.db import models
class FileUpload(models.Model):
title = models.CharField(verbose_name="画像のタイトル", max_length=100)
image = models.ImageField(verbose_name="画像",upload_to="images/upload_files/")
def __str__(self):
return self.title | [
"sinforjob@gmail.com"
] | sinforjob@gmail.com |
edaccd77699ca8d4dccc010070da555c9528b148 | c05ab2b704fd779f0ea76f4bd69ee58b68ab4bb7 | /resale_market_place/accounts/migrations/0001_initial.py | c619d03fb096b2a9f94af9a8cb8d9b58a3d08cbd | [] | no_license | cmrajib/django_restaurant | 8aaaa73937fe76768c88149e58417b21bacacba7 | e09a6d6855eb79d30ae5adfa2720e9c86960ecd0 | refs/heads/main | 2023-02-28T18:30:47.415293 | 2021-01-12T20:03:35 | 2021-01-12T20:03:35 | 329,099,021 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,931 | py | # Generated by Django 3.1.4 on 2021-01-08 08:31
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
('auth', '0012_alter_user_first_name_max_length'),
]
operations = [
migrations.CreateModel(
name='User',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('password', models.CharField(max_length=128, verbose_name='password')),
('last_login', models.DateTimeField(blank=True, null=True, verbose_name='last login')),
('is_superuser', models.BooleanField(default=False, help_text='Designates that this user has all permissions without explicitly assigning them.', verbose_name='superuser status')),
('email', models.EmailField(max_length=254, unique=True)),
('is_staff', models.BooleanField(default=False, help_text='Designates whether the user can log in the site', verbose_name='Staff')),
('is_active', models.BooleanField(default=True, help_text='Designates whether this user should be treatea as active', verbose_name='active')),
('groups', models.ManyToManyField(blank=True, help_text='The groups this user belongs to. A user will get all permissions granted to each of their groups.', related_name='user_set', related_query_name='user', to='auth.Group', verbose_name='groups')),
('user_permissions', models.ManyToManyField(blank=True, help_text='Specific permissions for this user.', related_name='user_set', related_query_name='user', to='auth.Permission', verbose_name='user permissions')),
],
options={
'abstract': False,
},
),
migrations.CreateModel(
name='Profile',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('username', models.CharField(blank=True, max_length=264, null=True)),
('full_name', models.CharField(blank=True, max_length=264, null=True)),
('address_1', models.TextField(blank=True, max_length=300, null=True)),
('city', models.CharField(blank=True, max_length=40, null=True)),
('zipcode', models.CharField(blank=True, max_length=10, null=True)),
('country', models.CharField(blank=True, max_length=20, null=True)),
('phone', models.CharField(blank=True, max_length=20, null=True)),
('date_joined', models.DateTimeField(auto_now_add=True)),
('user', models.OneToOneField(on_delete=django.db.models.deletion.CASCADE, related_name='profile', to=settings.AUTH_USER_MODEL)),
],
),
]
| [
"cmrajib@gmail.com"
] | cmrajib@gmail.com |
a96def7f2dc97e51d0ec0b74e35ba3f491feb1d4 | 312d8dbbf980bf164f210e7935b17dc08d64ff87 | /Model/repeat3_attribute_prediction_exist_PTS_utilize_all/Baseline/main.py | f40523340de517fcd318799d311d421cece624ef | [] | no_license | hsack6/OWGP_NBA | 27dafbd6e59c17ce4a66e92132ee56782e2126bf | 56656efb5884cd9f806e476a92c5e6485c71adeb | refs/heads/master | 2023-02-25T09:52:05.165494 | 2021-02-03T12:44:04 | 2021-02-03T12:44:04 | 288,363,250 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,381 | py | import argparse
import random
import pandas as pd
from utils.inference import inference
from utils.data.dataset import BADataset
from utils.data.dataloader import BADataloader
import sys
import os
current_dir = os.path.dirname(os.path.abspath("__file__"))
sys.path.append( str(current_dir) + '/../../../' )
from setting_param import Model_repeat3_attribute_prediction_exist_PTS_utilize_all_InputDir as InputDir
from setting_param import Model_repeat3_attribute_prediction_exist_PTS_utilize_all_Baseline_OutputDir as OutputDir
from setting_param import repeat3_attribute_prediction_exist_PTS_utilize_all_worker
from setting_param import repeat3_attribute_prediction_exist_PTS_utilize_all_batchSize
from setting_param import repeat3_attribute_prediction_exist_PTS_utilize_all_init_L
from setting_param import repeat3_attribute_prediction_exist_PTS_utilize_all_state_dim
from setting_param import repeat3_attribute_prediction_exist_PTS_utilize_all_output_dim
from setting_param import repeat3_attribute_prediction_exist_PTS_utilize_all_idx as Attribute_idx
parser = argparse.ArgumentParser()
parser.add_argument('--workers', type=int, help='number of data loading workers', default=repeat3_attribute_prediction_exist_PTS_utilize_all_worker)
parser.add_argument('--batchSize', type=int, default=repeat3_attribute_prediction_exist_PTS_utilize_all_batchSize, help='input batch size')
parser.add_argument('--state_dim', type=int, default=repeat3_attribute_prediction_exist_PTS_utilize_all_state_dim, help='GGNN hidden state size')
parser.add_argument('--output_dim', type=int, default=repeat3_attribute_prediction_exist_PTS_utilize_all_output_dim, help='Model output state size')
parser.add_argument('--init_L', type=int, default=repeat3_attribute_prediction_exist_PTS_utilize_all_init_L, help='number of observation time step')
opt = parser.parse_args()
print(opt)
opt.dataroot = InputDir
opt.L = opt.init_L
def main(opt):
all_dataset = BADataset(opt.dataroot, opt.L, False, False, False)
all_dataloader = BADataloader(all_dataset, batch_size=opt.batchSize, \
shuffle=False, num_workers=opt.workers, drop_last=False)
opt.annotation_dim = 10
opt.n_edge_types = all_dataset.n_edge_types
opt.n_node = all_dataset.n_node
inference(all_dataloader, opt, OutputDir, Attribute_idx)
if __name__ == "__main__":
main(opt)
| [
"yamasaki.shohei@ist.osaka-u.ac.jp"
] | yamasaki.shohei@ist.osaka-u.ac.jp |
a501d1205297f9df8f05e56b27a9b3f0ea2f6122 | 50948d4cb10dcb1cc9bc0355918478fb2841322a | /azure-mgmt-redis/azure/mgmt/redis/models/redis_linked_server_with_properties.py | d4982b6a62d0bc2d014e157738b34cc984be1ebf | [
"MIT"
] | permissive | xiafu-msft/azure-sdk-for-python | de9cd680b39962702b629a8e94726bb4ab261594 | 4d9560cfd519ee60667f3cc2f5295a58c18625db | refs/heads/master | 2023-08-12T20:36:24.284497 | 2019-05-22T00:55:16 | 2019-05-22T00:55:16 | 187,986,993 | 1 | 0 | MIT | 2020-10-02T01:17:02 | 2019-05-22T07:33:46 | Python | UTF-8 | Python | false | false | 2,830 | py | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from .proxy_resource import ProxyResource
class RedisLinkedServerWithProperties(ProxyResource):
"""Response to put/get linked server (with properties) for Redis cache.
Variables are only populated by the server, and will be ignored when
sending a request.
All required parameters must be populated in order to send to Azure.
:ivar id: Resource ID.
:vartype id: str
:ivar name: Resource name.
:vartype name: str
:ivar type: Resource type.
:vartype type: str
:param linked_redis_cache_id: Required. Fully qualified resourceId of the
linked redis cache.
:type linked_redis_cache_id: str
:param linked_redis_cache_location: Required. Location of the linked redis
cache.
:type linked_redis_cache_location: str
:param server_role: Required. Role of the linked server. Possible values
include: 'Primary', 'Secondary'
:type server_role: str or ~azure.mgmt.redis.models.ReplicationRole
:ivar provisioning_state: Terminal state of the link between primary and
secondary redis cache.
:vartype provisioning_state: str
"""
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'type': {'readonly': True},
'linked_redis_cache_id': {'required': True},
'linked_redis_cache_location': {'required': True},
'server_role': {'required': True},
'provisioning_state': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'linked_redis_cache_id': {'key': 'properties.linkedRedisCacheId', 'type': 'str'},
'linked_redis_cache_location': {'key': 'properties.linkedRedisCacheLocation', 'type': 'str'},
'server_role': {'key': 'properties.serverRole', 'type': 'ReplicationRole'},
'provisioning_state': {'key': 'properties.provisioningState', 'type': 'str'},
}
def __init__(self, **kwargs):
super(RedisLinkedServerWithProperties, self).__init__(**kwargs)
self.linked_redis_cache_id = kwargs.get('linked_redis_cache_id', None)
self.linked_redis_cache_location = kwargs.get('linked_redis_cache_location', None)
self.server_role = kwargs.get('server_role', None)
self.provisioning_state = None
| [
"lmazuel@microsoft.com"
] | lmazuel@microsoft.com |
37be7de681f708069186af725c1ab6a772547fcf | 1985e545df5ddfee396e87af6501fe517661cc77 | /bin/make_slides_github_action | 0cb25a135267d1a417524d79005623c2d1f311f1 | [
"MIT"
] | permissive | blester125/dotfiles | 46e657966582ba0b4552317107c85a44426ce9fd | 03b6856552040246b4d60330d0af6f37b440024d | refs/heads/master | 2023-07-24T11:13:11.989638 | 2023-07-12T13:30:15 | 2023-07-12T14:50:18 | 127,983,262 | 1 | 0 | null | 2022-02-12T23:09:41 | 2018-04-04T00:10:30 | Emacs Lisp | UTF-8 | Python | false | false | 2,132 | #!/usr/bin/python3
import os
import argparse
import textwrap
def slides_action(title):
title = title.replace(" ", "-")
return textwrap.dedent(r"""
name: Slides
on: [push]
jobs:
build:
runs-on: ubuntu-latest
container: blester125/beamer-image:latest
steps:
- uses: actions/checkout@v2
- name: Build Slides
run: |
make clean
make release
- uses: actions/upload-artifact@v1
if: success()
with:
name: artifacts
path: %s.pdf
commit:
needs: build
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v2
- name: Delete slides
run: |
rm -rf %s.pdf
- uses: actions/download-artifact@v1
with:
name: artifacts
path: tmp
- name: Move artifacts
run: |
mv tmp/* .
rm -rf tmp
- name: Commit Files
shell: bash
run: |
git add -A
git diff-index --quiet HEAD \
|| git -c user.name="GitHub" -c user.email="noreply@github.com" commit \
--author="github-actions[bot] <41898282+github-actions[bot]@users.noreply.github.com>" \
-m "Built Slides"
- name: Push changes
uses: ad-m/github-push-action@master
with:
github_token: ${{ secrets.GITHUB_TOKEN }}
""".lstrip("\n")) % (title, title)
def main():
parser = argparse.ArgumentParser()
parser.add_argument("--title", required=True)
args = parser.parse_args()
workflow_dir = os.path.join(".github", "workflows")
if not os.path.exists(workflow_dir):
os.makedirs(workflow_dir)
with open(os.path.join(workflow_dir, "slides.yml"), "w") as wf:
wf.write(slides_action(args.title))
if __name__ == "__main__":
main()
| [
"blester125@gmail.com"
] | blester125@gmail.com | |
07224a9608ee55b656a3fe877c7771a8a6bd459d | 0eb0657ad8262952c2ec87e7605246d1bebb9cd0 | /storops/vnx/resource/mirror_view.py | 41275de1ffc7ba7358cda4a4dabb7be540b919d3 | [
"Apache-2.0"
] | permissive | cdailing/storops | b666d204bf5fc8a561c436a927e72de3f3d9d64f | d24c48b3cb58f02dce1f131e7448b5400904f8ee | refs/heads/master | 2021-01-19T10:02:01.132218 | 2017-03-29T08:01:55 | 2017-03-29T08:02:18 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,041 | py | # coding=utf-8
# Copyright (c) 2015 EMC Corporation.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from __future__ import unicode_literals
from storops.exception import raise_if_err, \
VNXMirrorException, VNXMirrorImageNotFoundError
from storops.lib.common import check_text, instance_cache
from storops.vnx.enums import VNXMirrorViewRecoveryPolicy
from storops.vnx.enums import VNXMirrorViewSyncRate
import storops.vnx.resource.lun
from storops.vnx.resource import VNXCliResource, VNXCliResourceList
__author__ = 'Cedric Zhuang'
class VNXMirrorViewImage(VNXCliResource):
@staticmethod
def get_id(image):
if isinstance(image, VNXMirrorViewImage):
image = image.uid
try:
image = check_text(image)
except ValueError:
raise ValueError('invalid image id supplied: {}'
.format(image))
return image
@property
def wwn(self):
return self.uid
class VNXMirrorViewImageList(VNXCliResourceList):
@classmethod
def get_resource_class(cls):
return VNXMirrorViewImage
class VNXMirrorView(VNXCliResource):
def __init__(self, name=None, cli=None):
super(VNXMirrorView, self).__init__()
self._cli = cli
self._name = name
def _get_raw_resource(self):
return self._cli.get_mirror_view(name=self._name, poll=self.poll)
@classmethod
def create(cls, cli, name, src_lun, use_write_intent_log=True):
lun_clz = storops.vnx.resource.lun.VNXLun
lun_id = lun_clz.get_id(src_lun)
out = cli.create_mirror_view(name, lun_id, use_write_intent_log)
raise_if_err(out, default=VNXMirrorException)
return VNXMirrorView(name, cli=cli)
@classmethod
def get(cls, cli, name=None):
if name is None:
ret = VNXMirrorViewList(cli)
else:
ret = VNXMirrorView(name, cli)
return ret
def add_image(self, sp_ip, lun_id,
recovery_policy=VNXMirrorViewRecoveryPolicy.AUTO,
sync_rate=VNXMirrorViewSyncRate.HIGH):
if hasattr(sp_ip, 'spa_ip'):
sp_ip = sp_ip.spa_ip
lun_clz = storops.vnx.resource.lun.VNXLun
lun_id = lun_clz.get_id(lun_id)
out = self._cli.add_mirror_view_image(self._get_name(), sp_ip, lun_id,
recovery_policy, sync_rate,
poll=self.poll)
raise_if_err(out, default=VNXMirrorException)
def get_image(self, image_id):
for image in self.images:
if image.uid == image_id:
ret = image
break
else:
raise VNXMirrorImageNotFoundError(
'image {} not found in mirror view {}.'.format(
image_id, self._get_name()))
return ret
@staticmethod
def _get_image_id(image_id):
return VNXMirrorViewImage.get_id(image_id)
@property
@instance_cache
def primary_image(self):
for image in self.images:
if image.is_primary:
ret = image
break
else:
ret = None
return ret
@property
@instance_cache
def secondary_image(self):
for image in self.images:
if not image.is_primary:
ret = image
break
else:
ret = None
return ret
@property
def is_primary(self):
return self.remote_mirror_status == 'Mirrored'
@property
def primary_image_id(self):
return self.primary_image.uid
@property
def secondary_image_id(self):
image = self.secondary_image
if image is None:
raise VNXMirrorImageNotFoundError(
'no secondary image exists for this mirror view.')
return image.uid
def remove_image(self, image_id=None):
if image_id is None:
image_id = self.secondary_image_id
image_id = self._get_image_id(image_id)
out = self._cli.delete_mirror_view_image(self._get_name(), image_id,
poll=self.poll)
raise_if_err(out, default=VNXMirrorException)
def fracture_image(self, image_id=None):
if image_id is None:
image_id = self.secondary_image_id
image_id = self._get_image_id(image_id)
out = self._cli.mirror_view_fracture_image(self._get_name(), image_id,
poll=self.poll)
raise_if_err(out, default=VNXMirrorException)
def sync_image(self, image_id=None):
if image_id is None:
image_id = self.secondary_image_id
image_id = self._get_image_id(image_id)
out = self._cli.mirror_view_sync_image(self._get_name(), image_id,
poll=self.poll)
raise_if_err(out, default=VNXMirrorException)
def promote_image(self, image_id=None):
if image_id is None:
image_id = self.secondary_image_id
image_id = self._get_image_id(image_id)
out = self._cli.mirror_view_promote_image(self._get_name(), image_id,
poll=self.poll)
raise_if_err(out, default=VNXMirrorException)
def delete(self, force=False):
if force:
if self.secondary_image:
self.remove_image()
out = self._cli.delete_mirror_view(self._get_name())
raise_if_err(out, default=VNXMirrorException)
class VNXMirrorViewList(VNXCliResourceList):
@classmethod
def get_resource_class(cls):
return VNXMirrorView
def __init__(self, cli=None, src_lun=None, tgt_lun=None):
super(VNXMirrorViewList, self).__init__()
self._cli = cli
self._src_lun = src_lun
self._tgt_lun = tgt_lun
def _filter(self, item):
if self._src_lun is None and self._tgt_lun is None:
ret = True
else:
ret = False
pi = item.primary_image
si = item.secondary_image
if self._src_lun is not None:
ret |= self._src_lun.wwn == pi.logical_unit_uid
if self._tgt_lun is not None and si is not None:
ret |= self._tgt_lun.wwn == si.logical_unit_uid
return ret
def _get_raw_resource(self):
return self._cli.get_mirror_view(poll=self.poll)
| [
"cedric.zhuang@emc.com"
] | cedric.zhuang@emc.com |
b945dff1986dd6fd177224f743126fcb7d69fa73 | 826bffcd468f0979d05251e6578de13fff1029d4 | /tests/utils/_duplicate_console_output_check.py | 28e4ed0277ddd521f04db7319c8e9740b1c82837 | [
"MIT"
] | permissive | korepwx/madoka | 58b9b65cb1e3edf476d741ee1b5ffc67a9793a48 | 56675bd8220935c6a9c1571a886a84bed235fd3b | refs/heads/master | 2021-01-12T03:18:46.261909 | 2017-01-06T06:36:21 | 2017-01-06T06:36:21 | 78,190,020 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 607 | py | import os
import subprocess
import sys
sys.path.insert(0, os.path.abspath(os.path.join(os.path.dirname(__file__), '../..')))
from madoka.utils import duplicate_console_output
with duplicate_console_output(sys.argv[1]):
print('from print')
sys.stdout.flush()
sys.stdout.write('from stdout.write\n')
sys.stdout.flush()
sys.stderr.write('from stderr.write\n')
sys.stderr.flush()
os.system('echo os.system+stdout')
subprocess.check_call([
sys.executable,
'-c',
'import sys; sys.stderr.write("os.system+stderr\\n");'
'sys.stderr.flush()'
])
| [
"public@korepwx.com"
] | public@korepwx.com |
bdc7e6c501652abf318d816b800f9404c0ac8d58 | 30ac2f9831ebd33885a6f48d153356c2e3731c26 | /Python_Stack/django/django_orm/project_marcela/app_marcela/models.py | 287fbc16c76b573eca3b93d52409f5d482b42977 | [] | no_license | pharaoht/Coding-Dojo-Projects | 192cfd8c36b6dadb049e81d31bd780c7ab340d1e | 504f71acbac3c006cf866a08aea0566058f81ce2 | refs/heads/master | 2023-05-11T21:09:17.316257 | 2021-06-08T00:54:09 | 2021-06-08T00:54:09 | 334,003,413 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,796 | py | from django.db import models
import re
import bcrypt
# Create your models here.
class UserManager(models.Manager):
def register_validator(self, formInfo):
errors = {}
EMAIL_REGEX = re.compile(
r'[a-zA-Z0-9.+_-]+@[a-zA-Z0-9._-]+\.[a-zA-Z]+$')
emailChecker = User.objects.filter(email=formInfo['email'])
if len(formInfo['username']) == 0:
errors['usernamelenCheck'] = "User name field is required"
elif len(formInfo['username']) < 4:
errors['usernamelenCheck2'] = "User name my be at least 4 characters"
if len(formInfo['email']) == 0:
errors['emailLenCheck'] = "Email field is required"
elif not EMAIL_REGEX.match(formInfo['email']):
errors['emailnotmatch'] = 'Invalid email'
elif len(emailChecker) > 0:
errors['emailtaken'] = 'Sorry, that email is already resgistered'
if len(formInfo['password']) == 0:
errors['passworcheck'] = "A password is required"
elif len(formInfo['password']) < 8:
errors['passwordlengthcheck'] = "Password must be 8 characters long"
if formInfo['password'] != formInfo['cpassword']:
errors['psmatch'] = "Your Password must be the same as confirmed password"
return errors
def login_validator(self, formInfo):
errors = {}
emailChecker = User.objects.filter(email=formInfo['email'])
if len(formInfo['email']) == 0:
errors['emallencheck'] = "Email field can not be empty"
elif len(emailChecker) == 0:
errors['emailcheck'] = "Sorry that email, could not be found."
if len(formInfo['password']) == 0:
errors['passwordcheck'] = "Password field can not be empty"
if len(emailChecker) != 0:
if not bcrypt.checkpw(formInfo['password'].encode(), emailChecker[0].password.encode()):
errors['errorpassword'] = "Incorrect password"
return errors
class PostManager(models.Manager):
pass
class User(models.Model):
user_name = models.CharField(max_length=255)
email = models.CharField(max_length=255)
password = models.CharField(max_length=255)
created_at = models.DateTimeField(auto_now_add=True, null=True)
updated_at = models.DateTimeField(auto_now=True, null=True)
objects = UserManager()
class Post(models.Model):
title = models.CharField(max_length=255)
img = models.CharField(max_length=255)
posted_at = models.DateField()
desc = models.TextField()
posted_by = models.ForeignKey(
User, related_name="uploader", on_delete=models.CASCADE)
liked_by = models.ManyToManyField(User, related_name='likes')
created_at = models.DateTimeField(auto_now_add=True, null=True)
| [
"pharaohmanson@gmail.com"
] | pharaohmanson@gmail.com |
c5fc50bd9c9a74dc99617e3b0491bb8b90d339a0 | 03a2c1eb549a66cc0cff72857963eccb0a56031d | /hacker_rank/domains/algorithms/implementation/kaprekar-numbers_sunghyo.jung.py | fd4ff909df1f992a146f1752423d6d81c3c36433 | [] | no_license | nobe0716/problem_solving | c56e24564dbe3a8b7093fb37cd60c9e0b25f8e59 | cd43dc1eddb49d6b5965419e36db708c300dadf5 | refs/heads/master | 2023-01-21T14:05:54.170065 | 2023-01-15T16:36:30 | 2023-01-15T16:36:30 | 80,906,041 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 461 | py | __author__ = 'sunghyo.jung'
p, q = int(raw_input()), int(raw_input())
def is_kaprekar(n):
if n == 1:
return True
d = len(str(n))
s = str(n * n)
d = len(s) - d
a = int(s[:d] if len(s[:d]) > 0 else '0')
b = int(s[d:] if len(s[d:]) > 0 else '0')
return n == a + b and b > 0
flag = False
for i in range(p, q + 1):
if is_kaprekar(i):
flag = True
print i,
if flag:
print ''
else:
print 'INVALID RANGE' | [
"sunghyo.jung@navercorp.com"
] | sunghyo.jung@navercorp.com |
5fb4190fbcf940f1f1faea22e2a81c53b7e0a41d | bb150497a05203a718fb3630941231be9e3b6a32 | /framework/api/nn/test_hardshrink.py | e608bf97d4639e51495891a2b9089ff59b265844 | [] | no_license | PaddlePaddle/PaddleTest | 4fb3dec677f0f13f7f1003fd30df748bf0b5940d | bd3790ce72a2a26611b5eda3901651b5a809348f | refs/heads/develop | 2023-09-06T04:23:39.181903 | 2023-09-04T11:17:50 | 2023-09-04T11:17:50 | 383,138,186 | 42 | 312 | null | 2023-09-13T11:13:35 | 2021-07-05T12:44:59 | Python | UTF-8 | Python | false | false | 1,951 | py | #!/bin/env python
# -*- coding: utf-8 -*-
# encoding=utf-8 vi:ts=4:sw=4:expandtab:ft=python
"""
test_hardshrink
"""
from apibase import APIBase
from apibase import randtool
import paddle
import pytest
import numpy as np
class TestNNHardshrink(APIBase):
"""
test
"""
def hook(self):
"""
implement
"""
self.types = [np.float32, np.float64]
# self.debug = True
# self.static = True
# enable check grad
# self.enable_backward = True
obj = TestNNHardshrink(paddle.nn.Hardshrink)
@pytest.mark.api_nn_Hardshrink_vartype
def test_hardshrink_base():
"""
base
"""
x = np.array([-1, 0.3, 2.5])
res = np.array([-1, 0, 2.5])
obj.base(res=res, data=x)
@pytest.mark.api_nn_Hardshrink_parameters
def test_hardshrink():
"""
default
"""
x = np.array([-1, 0.3, 2.5])
res = np.array([-1, 0, 2.5])
obj.run(res=res, data=x)
@pytest.mark.api_nn_Hardshrink_parameters
def test_hardshrink1():
"""
threshold = 0
"""
x = np.array([-1, 0.3, 2.5])
threshold = 0
res = np.array([-1, 0.3, 2.5])
obj.run(res=res, data=x, threshold=threshold)
@pytest.mark.api_nn_Hardshrink_parameters
def test_hardshrink2():
"""
threshold = 0 x contains 0.01
"""
x = np.array([-1, -0.01, 2.5])
threshold = 0
res = np.array([-1, -0.01, 2.5])
obj.run(res=res, data=x, threshold=threshold)
@pytest.mark.api_nn_Hardshrink_vartype
def test_hardshrink3():
"""
threshold = -1
"""
x = np.array([-1, -0.01, 2.5])
threshold = -1
res = np.array([-1, -0.01, 2.5])
obj.base(res=res, data=x, threshold=threshold)
@pytest.mark.api_nn_Hardshrink_exception
def test_hardshrink4():
"""
threshold = "1"
"""
x = np.array([-1, -0.01, 2.5])
threshold = "1"
# res = np.array([-1, -0.01, 2.5])
obj.exception(etype="InvalidArgumentError", data=x, threshold=threshold)
| [
"noreply@github.com"
] | PaddlePaddle.noreply@github.com |
0002086686eef8eef58f00ddfaa9a3c6b02f158c | 6a4d30fc4850a04f7370f30dbe1b6ffa9f616392 | /intake/migrations/0003_fillablepdf_name.py | a2eb63e24a5799a885567b47ac1da2dc90379735 | [
"MIT"
] | permissive | codeforamerica/intake | bb88d63ae914ce6eebeb957b26451dbb9cfaeb88 | 8755e64c13e2b6f9bef9bbee47011253f20e7e0d | refs/heads/master | 2021-11-26T09:51:10.084263 | 2021-11-15T19:37:23 | 2021-11-15T19:37:23 | 57,333,621 | 51 | 24 | MIT | 2021-09-29T00:07:26 | 2016-04-28T21:03:55 | Python | UTF-8 | Python | false | false | 499 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.9 on 2016-05-09 00:40
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('intake', '0002_fillablepdf'),
]
operations = [
migrations.AddField(
model_name='fillablepdf',
name='name',
field=models.CharField(default='Sample pdf', max_length=50),
preserve_default=False,
),
]
| [
"bgolder@codeforamerica.org"
] | bgolder@codeforamerica.org |
d97b68cb2186fd512abb20603927df0360996948 | 2d6a2539055e1efd67f4252d11adfaf7ccd2720b | /principal/models/users.py | aa5c85bbaa81af1f8ba7fa27e4c174724c1fb67d | [] | no_license | r202-coe-psu/principal | 96997818073e6dd74df3590caef90ffec16709cc | 1781eacb880ecdf6dbe35cd1433530708eb29875 | refs/heads/master | 2021-09-16T13:56:28.940394 | 2018-06-21T13:52:53 | 2018-06-21T13:52:53 | 111,189,331 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,887 | py | import mongoengine as me
import datetime
from passlib.hash import bcrypt
from flask_login import UserMixin
class DataSource(me.EmbeddedDocument):
provider = me.StringField(required=True)
data = me.DictField()
created_date = me.DateTimeField(required=True,
default=datetime.datetime.utcnow)
updated_date = me.DateTimeField(required=True,
default=datetime.datetime.utcnow,
auto_now=True)
class User(me.Document, UserMixin):
username = me.StringField(required=True, unique=True)
password = me.StringField()
email = me.StringField()
first_name = me.StringField(required=True)
last_name = me.StringField(required=True)
status = me.StringField(required=True, default='disactive')
roles = me.ListField(me.StringField(), default=['user'])
created_date = me.DateTimeField(required=True,
default=datetime.datetime.utcnow)
updated_date = me.DateTimeField(required=True,
default=datetime.datetime.utcnow,
auto_now=True)
data_sources = me.EmbeddedDocumentListField(DataSource)
meta = {'collection': 'users'}
def get_user_id(self):
return self.id
def __get_salt(self, salt):
token = salt.replace(' ', '.')
return '{:.<22.22}'.format(token)
def set_password(self, password, salt=''):
self.password = bcrypt.using(rounds=16).hash(
password,
salt=self.__get_salt(salt))
def verify_password(self, password, salt=''):
return bcrypt.verify(password,
self.password)
def has_roles(self, roles):
for role in roles:
if role in self.roles:
return True
return False
| [
"boatkrap@gmail.com"
] | boatkrap@gmail.com |
01112d45a464ff83d5d7b67ff843feaee71e4958 | 255e19ddc1bcde0d3d4fe70e01cec9bb724979c9 | /dockerized-gists/5c70ccf26bba78eb4d1b/snippet.py | 599476d4f4e0a616826b54de9f63877e1de77cae | [
"MIT"
] | permissive | gistable/gistable | 26c1e909928ec463026811f69b61619b62f14721 | 665d39a2bd82543d5196555f0801ef8fd4a3ee48 | refs/heads/master | 2023-02-17T21:33:55.558398 | 2023-02-11T18:20:10 | 2023-02-11T18:20:10 | 119,861,038 | 76 | 19 | null | 2020-07-26T03:14:55 | 2018-02-01T16:19:24 | Python | UTF-8 | Python | false | false | 159 | py | import sys
def exec_(code, globals, locals):
if sys.version_info >= (3, 0):
exec(code, globals, locals)
else:
exec("exec code in globals, locals") | [
"gistshub@gmail.com"
] | gistshub@gmail.com |
790e27357a9eeb93997620d266585d71e33aed53 | d0d1e07c984651f96bd9386d546c85c0341e46b2 | /timedata/control/action.py | 0af1dc5757f38f1299f42cfed2bc37a071cdf313 | [
"MIT"
] | permissive | timedata-org/timedata | 61cde905b1fe9eb60ac83ecbf5a5a2114793c45d | 3faac7450678aaccd4a283d0d41ca3e7f113f51b | refs/heads/master | 2020-04-11T12:03:57.962646 | 2019-06-09T10:05:16 | 2019-06-09T10:05:52 | 51,217,217 | 5 | 3 | null | 2016-09-18T16:20:43 | 2016-02-06T19:13:43 | C++ | UTF-8 | Python | false | false | 1,802 | py | from .ops import Ops
from .editor import Editor
from .receiver import Receiver
class Action(Receiver):
"""
An Action takes an incoming message, applies Ops to it, and then
uses it to set a value on a Editor.
"""
def __init__(self, address, ops=()):
self.address = Editor(address)
self.ops = Ops(*ops)
def set_project(self, project):
self.address.set_project(project)
def receive(self, values):
if self.ops:
if len(values) == 1:
values = [self.ops(values[0])]
else:
# TODO: They specified ops, but we can't use it.
# Should we warn here? Can we use the ops somehow?
pass
return self.address.receive(values)
def __bool__(self):
return bool(self.address or self.ops)
def __str__(self):
if self.ops:
return '%s->%s' % self.address, self.ops
return str(self.address)
@classmethod
def make(cls, action):
if isinstance(action, str):
return cls(action)
if isinstance(action, dict):
return cls(**action)
return cls(*action)
class ActionList(Receiver):
"""A list of Actions."""
def __init__(self, actions=None):
if isinstance(actions, (str, dict)):
actions = [actions]
self.actions = tuple(Action.make(a) for a in actions or ())
def set_project(self, project):
for a in self.actions:
a.set_project(project)
def receive(self, msg):
values = tuple(msg.values())
for action in self.actions:
action.receive(values)
def __bool__(self):
return bool(self.actions)
def __str__(self):
return ' + '.join(str(a) for a in self.actions)
| [
"tom@swirly.com"
] | tom@swirly.com |
f54ba2b9e658843fa70413a13b059f89900ab3dd | 0fe0ffe29ca6f76c6f15c85c8d82b09beaada246 | /third_party/catapult/tracing/tracing_build/strip_memory_infra_trace.py | e4c0cabb5366abaa54d428d65fbac289f6414a55 | [
"BSD-3-Clause"
] | permissive | hanpfei/chromium-net | 4dc8fd48cf3b05d89b11dc121f9c3abdd3ba962e | 9df8ce98c2a14fb60c2f581853011e32eb4bed0f | refs/heads/master | 2023-07-08T15:28:01.033104 | 2023-06-14T13:02:39 | 2023-06-14T13:02:39 | 65,541,033 | 297 | 73 | null | 2022-11-02T23:33:48 | 2016-08-12T09:25:34 | C++ | UTF-8 | Python | false | false | 3,059 | py | # Copyright 2016 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Filters a big trace keeping only the last memory-infra dumps."""
import collections
import gzip
import json
def FormatBytes(value):
units = ['B', 'kB', 'MB', 'GB']
while abs(value) >= 1000 and len(units) > 1:
value /= 1000
units = units.pop(0)
return '%3.1f %s' % (value, units[0])
def Main(argv):
if len(argv) < 2:
print 'Usage: %s trace.json[.gz]' % argv[0]
return 1
in_path = argv[1]
if in_path.lower().endswith('.gz'):
fin = gzip.open(in_path, 'rb')
else:
fin = open(in_path, 'r')
with fin:
print 'Loading trace (can take 1 min on a z620 for a 1GB trace)...'
trace = json.load(fin)
print 'Done. Read ' + FormatBytes(fin.tell())
print 'Filtering events'
phase_count = collections.defaultdict(int)
out_events = []
global_dumps = collections.OrderedDict()
if isinstance(trace, dict):
in_events = trace.get('traceEvents', [])
elif isinstance(trace, list) and isinstance(trace[0], dict):
in_events = trace
for evt in in_events:
phase = evt.get('ph', '?')
phase_count[phase] += 1
# Drop all diagnostic events for memory-infra debugging.
if phase not in ('v', 'V') and evt.get('cat', '').endswith('memory-infra'):
continue
# pass-through all the other non-memory-infra events
if phase != 'v':
out_events.append(evt)
continue
# Recreate the global dump groups
event_id = evt['id']
global_dumps.setdefault(event_id, [])
global_dumps[event_id].append(evt)
print 'Detected %d memory-infra global dumps' % len(global_dumps)
if global_dumps:
max_procs = max(len(x) for x in global_dumps.itervalues())
print 'Max number of processes seen: %d' % max_procs
ndumps = 2
print 'Preserving the last %d memory-infra dumps' % ndumps
detailed_dumps = []
non_detailed_dumps = []
for global_dump in global_dumps.itervalues():
try:
level_of_detail = global_dump[0]['args']['dumps']['level_of_detail']
except KeyError:
level_of_detail = None
if level_of_detail == 'detailed':
detailed_dumps.append(global_dump)
else:
non_detailed_dumps.append(global_dump)
dumps_to_preserve = detailed_dumps[-ndumps:]
ndumps -= len(dumps_to_preserve)
if ndumps:
dumps_to_preserve += non_detailed_dumps[-ndumps:]
for global_dump in dumps_to_preserve:
out_events += global_dump
print '\nEvents histogram for the original trace (count by phase)'
print '--------------------------------------------------------'
for phase, count in sorted(phase_count.items(), key=lambda x: x[1]):
print '%s %d' % (phase, count)
out_path = in_path.split('.json')[0] + '-filtered.json'
print '\nWriting filtered trace to ' + out_path,
with open(out_path, 'w') as fout:
json.dump({'traceEvents': out_events}, fout)
num_bytes_written = fout.tell()
print ' (%s written)' % FormatBytes(num_bytes_written)
| [
"hanpfei@gmail.com"
] | hanpfei@gmail.com |
4d8406d067af9e8bc7b63302376f873e80d00c09 | 2b9289c6348a58a839501f3088030061046e2b6c | /local_dm_control_suite/hopper.py | fe253ac364d772509e4d399753d88138d689dcda | [
"MIT"
] | permissive | rohitkuk/mtenv | 2a612e03cfcb8f373ae2a7e7fb21e7fe108bbe1f | 4a6d9d6fdfb321f1b51f890ef36b5161359e972d | refs/heads/main | 2023-04-21T09:26:57.666390 | 2021-05-30T00:29:02 | 2021-05-30T00:29:02 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,194 | py | # Copyright 2017 The dm_control Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Hopper domain."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
from dm_control import mujoco
from dm_control.rl import control
from . import base
from . import common
from dm_control.suite.utils import randomizers
from dm_control.utils import containers
from dm_control.utils import rewards
import numpy as np
SUITE = containers.TaggedTasks()
_CONTROL_TIMESTEP = 0.02 # (Seconds)
# Default duration of an episode, in seconds.
_DEFAULT_TIME_LIMIT = 20
# Minimal height of torso over foot above which stand reward is 1.
_STAND_HEIGHT = 0.6
# Hopping speed above which hop reward is 1.
_HOP_SPEED = 2
def get_model_and_assets():
"""Returns a tuple containing the model XML string and a dict of assets."""
return common.read_model("hopper.xml"), common.ASSETS
@SUITE.add("benchmarking")
def stand(time_limit=_DEFAULT_TIME_LIMIT, random=None, environment_kwargs=None):
"""Returns a Hopper that strives to stand upright, balancing its pose."""
physics = Physics.from_xml_string(*get_model_and_assets())
task = Hopper(hopping=False, random=random)
environment_kwargs = environment_kwargs or {}
return control.Environment(
physics,
task,
time_limit=time_limit,
control_timestep=_CONTROL_TIMESTEP,
**environment_kwargs
)
@SUITE.add("benchmarking")
def hop(time_limit=_DEFAULT_TIME_LIMIT, random=None, environment_kwargs=None):
"""Returns a Hopper that strives to hop forward."""
physics = Physics.from_xml_string(*get_model_and_assets())
task = Hopper(hopping=True, random=random)
environment_kwargs = environment_kwargs or {}
return control.Environment(
physics,
task,
time_limit=time_limit,
control_timestep=_CONTROL_TIMESTEP,
**environment_kwargs
)
class Physics(mujoco.Physics):
"""Physics simulation with additional features for the Hopper domain."""
def height(self):
"""Returns height of torso with respect to foot."""
return self.named.data.xipos["torso", "z"] - self.named.data.xipos["foot", "z"]
def speed(self):
"""Returns horizontal speed of the Hopper."""
return self.named.data.sensordata["torso_subtreelinvel"][0]
def touch(self):
"""Returns the signals from two foot touch sensors."""
return np.log1p(self.named.data.sensordata[["touch_toe", "touch_heel"]])
class Hopper(base.Task):
"""A Hopper's `Task` to train a standing and a jumping Hopper."""
def __init__(self, hopping, random=None):
"""Initialize an instance of `Hopper`.
Args:
hopping: Boolean, if True the task is to hop forwards, otherwise it is to
balance upright.
random: Optional, either a `numpy.random.RandomState` instance, an
integer seed for creating a new `RandomState`, or None to select a seed
automatically (default).
"""
self._hopping = hopping
super(Hopper, self).__init__(random=random)
def initialize_episode(self, physics):
"""Sets the state of the environment at the start of each episode."""
randomizers.randomize_limited_and_rotational_joints(physics, self.random)
self._timeout_progress = 0
super(Hopper, self).initialize_episode(physics)
def get_observation(self, physics):
"""Returns an observation of positions, velocities and touch sensors."""
obs = collections.OrderedDict()
# Ignores horizontal position to maintain translational invariance:
obs["position"] = physics.data.qpos[1:].copy()
obs["velocity"] = physics.velocity()
obs["touch"] = physics.touch()
return obs
def get_reward(self, physics):
"""Returns a reward applicable to the performed task."""
standing = rewards.tolerance(physics.height(), (_STAND_HEIGHT, 2))
if self._hopping:
hopping = rewards.tolerance(
physics.speed(),
bounds=(_HOP_SPEED, float("inf")),
margin=_HOP_SPEED / 2,
value_at_margin=0.5,
sigmoid="linear",
)
return standing * hopping
else:
small_control = rewards.tolerance(
physics.control(), margin=1, value_at_margin=0, sigmoid="quadratic"
).mean()
small_control = (small_control + 4) / 5
return standing * small_control
| [
"sodhani@fb.com"
] | sodhani@fb.com |
6a175ea3d24d0ba13c9ba9188e4c07e166cac602 | a777170c979214015df511999f5f08fc2e0533d8 | /claf/factory/tokens.py | 0f6f9d5845d72d7970d5be980590319398151d30 | [
"MIT",
"Apache-2.0",
"BSD-3-Clause"
] | permissive | srlee-ai/claf | 210b2d51918cf210683e7489ccb8347cb8b1f146 | 89b3e5c5ec0486886876ea3bac381508c6a6bf58 | refs/heads/master | 2021-02-13T04:38:36.198288 | 2020-03-03T15:01:01 | 2020-03-03T15:01:01 | 244,661,892 | 0 | 0 | MIT | 2020-03-03T14:45:52 | 2020-03-03T14:45:52 | null | UTF-8 | Python | false | false | 3,202 | py |
from overrides import overrides
from claf.config.registry import Registry
from claf.config.utils import convert_config2dict
from claf.tokens import tokenizer
from .base import Factory
def make_tokenizer(tokenizer_cls, tokenizer_config, parent_tokenizers={}):
if tokenizer_config is None or "name" not in tokenizer_config:
return None
package_name = tokenizer_config["name"]
package_config = tokenizer_config.get(package_name, {})
tokenizer_config["config"] = package_config
if package_name in tokenizer_config:
del tokenizer_config[package_name]
tokenizer_config.update(parent_tokenizers)
return tokenizer_cls(**tokenizer_config)
def make_all_tokenizers(all_tokenizer_config):
""" Tokenizer is resource used all token together """
sent_tokenizer = make_tokenizer(
tokenizer.SentTokenizer, all_tokenizer_config.get("sent", {"name": "punkt"})
)
word_tokenizer = make_tokenizer(
tokenizer.WordTokenizer,
all_tokenizer_config.get("word", None),
parent_tokenizers={"sent_tokenizer": sent_tokenizer},
)
subword_tokenizer = make_tokenizer(
tokenizer.SubwordTokenizer,
all_tokenizer_config.get("subword", None),
parent_tokenizers={"word_tokenizer": word_tokenizer},
)
char_tokenizer = make_tokenizer(
tokenizer.CharTokenizer,
all_tokenizer_config.get("char", None),
parent_tokenizers={"word_tokenizer": word_tokenizer},
)
bpe_tokenizer = make_tokenizer(
tokenizer.BPETokenizer,
all_tokenizer_config.get("bpe", None),
)
return {
"bpe": bpe_tokenizer,
"char": char_tokenizer,
"subword": subword_tokenizer,
"word": word_tokenizer,
"sent": sent_tokenizer,
}
class TokenMakersFactory(Factory):
"""
TokenMakers Factory Class
* Args:
config: token config from argument (config.token)
"""
LANGS = ["eng", "kor"]
def __init__(self):
self.registry = Registry()
@overrides
def create(self, config):
if getattr(config, "tokenizer", None):
tokenizers = make_all_tokenizers(convert_config2dict(config.tokenizer))
else:
tokenizers = {}
token_names, token_types = config.names, config.types
if len(token_names) != len(token_types):
raise ValueError("token_names and token_types must be same length.")
token_makers = {"tokenizers": tokenizers}
for token_name, token_type in sorted(zip(token_names, token_types)):
token_config = getattr(config, token_name, {})
if token_config != {}:
token_config = convert_config2dict(token_config)
# Token (tokenizer, indexer, embedding, vocab)
token_config = {
"tokenizers": tokenizers,
"indexer_config": token_config.get("indexer", {}),
"embedding_config": token_config.get("embedding", {}),
"vocab_config": token_config.get("vocab", {}),
}
token_makers[token_name] = self.registry.get(f"token:{token_type}")(**token_config)
return token_makers
| [
"humanbrain.djlee@gmail.com"
] | humanbrain.djlee@gmail.com |
ccb3efc9358bc44e6f4d99ee6cd99ba7342e7f28 | 4a48593a04284ef997f377abee8db61d6332c322 | /python/dbm/python2/test_dbm.py | 9104e09525b70761eee62c3643b98bb9ef3753c2 | [
"MIT"
] | permissive | jeremiedecock/snippets | 8feaed5a8d873d67932ef798e16cb6d2c47609f0 | b90a444041c42d176d096fed14852d20d19adaa7 | refs/heads/master | 2023-08-31T04:28:09.302968 | 2023-08-21T07:22:38 | 2023-08-21T07:22:38 | 36,926,494 | 26 | 9 | MIT | 2023-06-06T02:17:44 | 2015-06-05T10:19:09 | Python | UTF-8 | Python | false | false | 1,614 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright (c) 2012 Jérémie DECOCK (http://www.jdhp.org)
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
import dbm
import whichdb
def main():
"""Main function"""
# WRITE #######
db = dbm.open('foo_dbm', 'c')
db['one'] = 'un'
db['two'] = 'dos'
db['three'] = 'tres'
db.close()
# WHICH DBM ###
print "whichdb:", whichdb.whichdb('foo_dbm')
print
# READ ########
db = dbm.open('foo_dbm', 'r')
for k in db.keys():
print k, ':', db[k]
db.close()
if __name__ == '__main__':
main()
| [
"jd.jdhp@gmail.com"
] | jd.jdhp@gmail.com |
76f40dbe916e27ef75c91cef03d606f26fd73a67 | 487ce91881032c1de16e35ed8bc187d6034205f7 | /codes/CodeJamCrawler/CJ/16_0_2_aMAN_plus.py | e499fe2553e862196dbf07b73f6585542cbcf6da | [] | no_license | DaHuO/Supergraph | 9cd26d8c5a081803015d93cf5f2674009e92ef7e | c88059dc66297af577ad2b8afa4e0ac0ad622915 | refs/heads/master | 2021-06-14T16:07:52.405091 | 2016-08-21T13:39:13 | 2016-08-21T13:39:13 | 49,829,508 | 2 | 0 | null | 2021-03-19T21:55:46 | 2016-01-17T18:23:00 | Python | UTF-8 | Python | false | false | 1,187 | py | t = int(input())
arr = []
s = ""
times = 0
def rev(x): # index of last from 0
global arr
global times
times = times +1
half = (x+1)//2
for i in range(half):
temp = 1 - arr[i]
arr[i] = 1 - arr[x-i]
arr[x-i] = temp
if((x+1)%2 != 0):
arr[half] = 1 - arr[half]
def check(n):
global arr
for i in range(n-1):
if(arr[i]!=arr[i+1]):
return i
return -1
def ini():
global s
global arr
for i in range(len(s)):
if(s[i] == '+'):
arr.append(1)
else:
arr.append(0)
for i in range(t):
global arr
global s
global times
s = input()
ini()
boo = True
while(boo):
j = check(len(s))
if(j== (-1)):
boo = False
else:
rev(j) # index
if(1 not in arr):
rev(len(s)-1)
boo = False
elif(0 not in arr):
boo = False
#######################
print("Case #"+str(i+1)+": "+str(times))
arr = []
s = ""
times = 0
| [
"[dhuo@tcd.ie]"
] | [dhuo@tcd.ie] |
6d45841a1bc911599365d6efe618b8bd10ce654d | fd85e5320da3e0dae5ffc270c54caa8f85d20af7 | /user_analytics/views.py | c1164b992a4c427f2472395f8cdc5ad598a66611 | [
"Apache-2.0"
] | permissive | madre/analytics_nvd3 | 9a657937c91c9acd4b60e4ff33daecdf75a78c49 | 052f775c12f04e0e3a9fd321ee05de1fbceec09a | refs/heads/master | 2021-01-10T18:26:29.051575 | 2015-04-13T10:26:30 | 2015-04-13T10:26:30 | 33,605,665 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,751 | py | # -*- coding: utf-8 -*-
# !/usr/local/bin/python
__version__ = "1.0"
__license__ = "Copyright (c) 2014-2010, levp-inc, All rights reserved."
__author__ = "madeling <madeling@letvpicture.com>"
from django.views.generic import TemplateView
from utils.redis_cache import REDIS_INS
class UserBasicTemplate(TemplateView):
template_name = "device.html"
def get_context_data(self, **kwargs):
context = super(UserBasicTemplate, self).get_context_data(**kwargs)
device_wifi_total = REDIS_INS.hget("analytics_wifi_user_", "device_wifi_total")
context['device_wifi_total'] = device_wifi_total
user_wifi_total = REDIS_INS.hget("analytics_wifi_user_", "user_wifi_total")
context['user_wifi_total'] = user_wifi_total
user_wifi_origin_total = REDIS_INS.hget("analytics_wifi_user_", "user_wifi_origin_total")
context['user_wifi_origin_total'] = user_wifi_origin_total
# 报表数据
xdata = ["设备", "用户", "独立用户"]
ydata = [device_wifi_total, user_wifi_total, user_wifi_origin_total]
extra_serie1 = {"tooltip": {"y_start": "", "y_end": " cal"}}
chartdata = {
'x': xdata, 'name1': '', 'y1': ydata, 'extra1': extra_serie1,
}
charttype = "discreteBarChart"
chartcontainer = 'discretebarchart_container' # container name
data = {
'charttype': charttype,
'chartdata': chartdata,
'chartcontainer': chartcontainer,
'extra': {
'x_is_date': False,
'x_axis_format': '',
'tag_script_js': True,
'jquery_on_ready': True,
},
}
context.update(data)
return context
| [
"lingnck@gmail.com"
] | lingnck@gmail.com |
56d1a355702247f5513deef778923b1b68ad26fb | 397e125e94f4f139f2bf5055824d81f24b8b1757 | /ABC/145/D.py | e13c534bf3fde50c178423dff2b3ee1085432ceb | [] | no_license | tails1434/Atcoder | ecbab6ee238e3f225551297db961b1b502841fa4 | e7c7fed36be46bbaaf020a70997842240ba98d62 | refs/heads/master | 2021-07-07T00:31:49.235625 | 2020-09-30T01:42:01 | 2020-09-30T01:42:01 | 189,009,622 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 750 | py | def cmb(n, r, MOD, g1, g2):
if ( r<0 or r>n ):
return 0
r = min(r, n-r)
return g1[n] * g2[r] * g2[n-r] % MOD
def main():
X, Y = map(int, input().split())
MOD = 10 ** 9 + 7
if (X + Y) % 3 != 0:
print(0)
exit()
m = (2 * X - Y) // 3
n = (2 * Y - X) // 3
N = 10**6
g1 = [1, 1] # 元テーブル
g2 = [1, 1] #逆元テーブル
inverse = [0, 1] #逆元テーブル計算用テーブル
for i in range( 2, N + 1 ):
g1.append( ( g1[-1] * i ) % MOD )
inverse.append( ( -inverse[MOD % i] * (MOD//i) ) % MOD )
g2.append( (g2[-1] * inverse[-1]) % MOD )
ans = cmb(n + m, n, MOD, g1, g2)
print(ans)
if __name__ == "__main__":
main() | [
"sososo1333@gmail.com"
] | sososo1333@gmail.com |
0ac3ce67a375b998817489ff3c11903d0feb0220 | 804ce3c2897a8720a27e0d86ac3b868ebd41cd20 | /archive/admin.py | e4f227d12c67b1dd80f1b36e7d55cdbcb8853546 | [] | no_license | hoboland21/mango | 383359aa85b685bfe77c6336974600038454cf80 | be8bf3398612a0c3dbb4498eb5eb18407c574ce3 | refs/heads/master | 2023-07-13T06:25:39.508434 | 2021-08-25T03:25:37 | 2021-08-25T03:25:37 | 399,520,705 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 1,082 | py | from django.contrib import admin
from rsvn.models import *
# Register your models here.
#---------------------------------------------------------
class RateHeadingAdmin(admin.ModelAdmin) :
list_display = ('title','descr',)
ordering = ('title',)
#---------------------------------------------------------
class RateAtomAdmin(admin.ModelAdmin) :
list_display = ('rateHeading','rateName','rateType','rateDays','lowSeason','highSeason','peakSeason',)
ordering = ('rateName',)
#---------------------------------------------------------
class RoomInfoAdmin(admin.ModelAdmin) :
list_display = ('type', 'number', 'beds','connect', 'notes')
ordering = ('type','number')
#---------------------------------------------------------
class SeasonAdmin(admin.ModelAdmin) :
list_display = ('name','beginDate','endDate')
ordering = ('beginDate',)
admin.site.register(RoomInfo,RoomInfoAdmin)
admin.site.register(Season,SeasonAdmin)
#admin.site.register(RateAtom,RateAtomAdmin)
#admin.site.register(RateHeading,RateHeadingAdmin)
#admin.site.register(ServiceRate,ServiceRateAdmin)
| [
"jc@saipantech.com"
] | jc@saipantech.com |
01712697928ec9ebd687a93b160d3d87fd2b3bec | c3082eb2adc43b311dd3c9ff16fd3ed9df85f266 | /python/examples/pandas/genome_calculation.py | 74384393563d0a41da51f305348efca0a30d59db | [] | no_license | szabgab/slides | 78818c7138331b3ba9e221c81da3678a46efe9b3 | 63bba06678554db737602f2fbcd6510c36037e8a | refs/heads/main | 2023-08-31T07:13:51.536711 | 2023-08-29T13:17:59 | 2023-08-29T13:17:59 | 122,212,527 | 87 | 69 | null | 2023-05-19T06:55:11 | 2018-02-20T14:57:03 | Python | UTF-8 | Python | false | false | 662 | py | import pandas as pd
import numpy as np
import datetime
import sys
filename = 'raw_data.xlsx'
if len(sys.argv) == 2:
filename = sys.argv[1]
def calculate_averages(row):
v1 = row.iloc[0:3].mean()
v2 = row.iloc[3:6].mean()
return np.log2(v1/v2)
start_time = datetime.datetime.now()
df = pd.read_excel(filename, index_col='genome name')
load_time = datetime.datetime.now()
print(load_time - start_time)
print(df.head())
calculated_value = df.apply(calculate_averages, axis=1)
threshold = 0.2
filtered_df = df[calculated_value > threshold]
print(filtered_df.head())
calculate_time = datetime.datetime.now()
print(calculate_time - load_time)
| [
"gabor@szabgab.com"
] | gabor@szabgab.com |
60d08f29afa6ffc2672f0e31e78ffda838221d70 | 3fd47598050ab6098088eddc79624dfa855c2143 | /djangoRest/settings.py | 5a18da26ac279010d5a33f80c85edab2135a8091 | [] | no_license | sajibuzzaman/djangoRest_Framework | cf6be098744e2506cea089ebc8f9e0dc21c0162f | a90b571f2c6dc1b9f832a0e0dda5f08b1724d9cc | refs/heads/master | 2023-04-10T04:28:49.872941 | 2021-04-21T18:41:07 | 2021-04-21T18:41:07 | 359,920,304 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,539 | py | """
Django settings for djangoRest project.
Generated by 'django-admin startproject' using Django 3.2.
For more information on this file, see
https://docs.djangoproject.com/en/3.2/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/3.2/ref/settings/
"""
from pathlib import Path
# Build paths inside the project like this: BASE_DIR / 'subdir'.
BASE_DIR = Path(__file__).resolve().parent.parent
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/3.2/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'django-insecure-^l$vw5bdp-f7zk0m^s2f8xe&38l)6k-_9lh$(80fet%86q+sor'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
# apps
'djangoRestApp',
'articleApp',
# Rest Framework
'rest_framework',
'rest_framework.authtoken',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'djangoRest.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [BASE_DIR / 'templates'],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'djangoRest.wsgi.application'
# Database
# https://docs.djangoproject.com/en/3.2/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': BASE_DIR / 'db.sqlite3',
}
}
# Password validation
# https://docs.djangoproject.com/en/3.2/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/3.2/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/3.2/howto/static-files/
STATIC_ROOT = BASE_DIR / 'staticfiles'
STATIC_URL = '/static/'
STATICFILES_DIRS =[
BASE_DIR / 'static',
]
MEDIA_ROOT = BASE_DIR / 'media'
MEDIA_URL = '/media/'
# Default primary key field type
# https://docs.djangoproject.com/en/3.2/ref/settings/#default-auto-field
DEFAULT_AUTO_FIELD = 'django.db.models.BigAutoField'
| [
"muhammadsajibuzzaman1998@gmail.com"
] | muhammadsajibuzzaman1998@gmail.com |
f01c048210b678c812f3bb6b87718e5bd62b2199 | 43d0413d129d997b41cd87a740010f889ab0c646 | /dataset_balancing/balance_dataset.py | a159cf9add6b0a13dae208db000a3772fbf4e165 | [] | no_license | LTTTDH/WebVision | 467aa46f63c2b95332f83504feb73f8628382c26 | 4a133071441b7412638382c4465dc2925b87235f | refs/heads/master | 2023-03-15T21:59:29.473087 | 2017-09-27T10:12:06 | 2017-09-27T10:12:06 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,129 | py | import numpy as np
path = "../../../datasets/WebVision/info/train_filelist_all.txt"
dest_path = "../../../datasets/WebVision/info/train_balanced_filelist.txt"
file = open(path, "r")
print("Loading data ...")
print(path)
listofclasses = {}
for c in range(0,1000):
listofclasses[c] = []
# Load data
for line in file:
d = line.split()
listofclasses[int(d[1])].append(d[0])
file.close()
# Count number per class
numxclass = np.zeros((1000,1))
for c in range(0,1000):
numxclass[c] = len(listofclasses[c])
maxxclass = max(numxclass)
print "Max per class: " + str(maxxclass)
minxclass = int(maxxclass - maxxclass * 0.5)
print "Min per class: " + str(minxclass)
print "Writing data"
# Write data balancing
file = open(dest_path, "w")
for c in range(0,1000):
elements_writed = 0
while elements_writed <= minxclass:
for el in listofclasses[c]:
file.write(el + " " + str(c) + "\n")
elements_writed += 1
if elements_writed > minxclass and elements_writed > numxclass[c]: break
print "Class " + str(c) + " : " + str(elements_writed)
file.close()
print "DONE"
| [
"raulgombru@gmail.com"
] | raulgombru@gmail.com |
bcb8f52bdf77dee5e83cf7ccb9a921a9caba190e | 2befb6f2a5f1fbbd5340093db43a198abdd5f53b | /pythonProject/customAuth/CustomAuthApp/migrations/0001_initial.py | 5e92563f7b3e1ae564639280aafad3ca484fe3c3 | [] | no_license | JanardanPandey/RestAPI | 1956d3529782d18ef2118961f6286e3213665aad | 654933a4d9687076a00c6f4c57fc3dfee1a2c567 | refs/heads/master | 2023-06-14T07:02:31.702000 | 2021-07-02T07:50:59 | 2021-07-02T07:50:59 | 382,357,537 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 597 | py | # Generated by Django 3.2.3 on 2021-06-12 11:45
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Student',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=20)),
('city', models.CharField(max_length=20)),
('roll', models.IntegerField()),
],
),
]
| [
"janardanpandey0510@gmail.com"
] | janardanpandey0510@gmail.com |
392544400066482d508c6d31e0c3c975481bbfab | cacac33bd0ff7bd1024c00f907d87a2750a4118f | /radiopadre_client/backends/backend_utils.py | d126c5cb8fac1275d0be18d7814310d14ba24a18 | [
"MIT"
] | permissive | ratt-ru/radiopadre-client | b96abda62e4ad6c26d45f6468e61dbf333806530 | ef138860d22523bf08a847317f3daca363db65a3 | refs/heads/master | 2023-03-07T09:23:22.237526 | 2021-12-05T06:32:00 | 2021-12-05T06:32:00 | 234,801,909 | 4 | 0 | MIT | 2021-07-14T13:05:03 | 2020-01-18T21:44:18 | Python | UTF-8 | Python | false | false | 2,173 | py | import socket, time, os, os.path
import iglesia
from iglesia.utils import message, bye, ff, shell
from radiopadre_client import config
def update_server_from_repository():
"""
Updates the radiopadre git working directory, if necessary
:return:
"""
if config.UPDATE and config.SERVER_INSTALL_PATH and os.path.isdir(config.SERVER_INSTALL_PATH + "/.git"):
if config.SERVER_INSTALL_BRANCH:
cmd = ff("cd {config.SERVER_INSTALL_PATH} && git fetch origin && git checkout {config.SERVER_INSTALL_BRANCH} && git pull")
else:
cmd = ff("cd {config.SERVER_INSTALL_PATH} && git pull")
message(ff(
"--update specified, --server-install-path at {config.SERVER_INSTALL_PATH} will be updated via"))
message(ff(" {cmd}"))
if shell(cmd):
bye("update failed")
def await_server_startup(port, process=None, server_name="jupyter notebook server", init_wait=2, wait=60):
"""
Waits for a server process to start up, tries to connect to the specified port,
returns when successful
:param port: port number
:param process: if not None, waits on the process and checks its return code
:param init_wait: number of second to wait before trying to connect
:param wait: total number of seconds to wait before giving up
:return: number of seconds elapsed before connection, or None if failed
"""
# pause to let the Jupyter server spin up
t0 = time.time()
time.sleep(init_wait)
# then try to connect to it
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
for retry in range(int(wait/.1)):
# try to connect
try:
sock.connect(("localhost", port))
del sock
return time.time() - t0
except socket.error:
pass
if not retry:
message(ff("Waiting for up to {wait} secs for the {server_name} to come up"))
# sleep, check process
if process is not None:
process.poll()
if process.returncode is not None:
return None
time.sleep(.1)
return None
| [
"osmirnov@gmail.com"
] | osmirnov@gmail.com |
77c75a0b7749c3c5c8338bde04b1bb61e93bb78f | 3e4bb5b4036a66d25a72793c1deaa4f5572d37bf | /apps/dashboard/views.py | 2f723b17ac30dd70dec007dcbcc52e9db4be3f89 | [
"MIT"
] | permissive | hbussell/pinax-tracker | f7f7eb0676d01251d7d8832557be14665755844d | 4f6538324b2e1f7a8b14c346104d2f1bd8e1556b | refs/heads/master | 2021-01-20T12:06:29.630850 | 2010-02-03T00:39:05 | 2010-02-03T00:39:05 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,788 | py |
from django.http import HttpResponse, HttpResponseRedirect, Http404
from django.template import RequestContext
from django.shortcuts import render_to_response, get_object_or_404
from tasks.models import Task
from tasks.forms import TaskDashboardForm
from tasks.filters import TaskProjectFilter
from projects.models import Project
from django.contrib import messages
from django.utils.translation import ugettext
from django.template.defaultfilters import slugify
import re
from tagging.models import Tag
from pinax.utils.importlib import import_module
from django.conf import settings
workflow = import_module(getattr(settings, "TASKS_WORKFLOW_MODULE", "tasks.workflow"))
def dashboard(request, template_name="dashboard/dashboard.html"):
if _handle_taskbar(request):
return HttpResponseRedirect('/')
if _handle_projects(request):
return HttpResponseRedirect('/')
form_class = TaskDashboardForm
task_form = form_class(request.user)
group_by = request.GET.get("group_by")
tasks = Task.objects.filter()
group_base = None
tasks = tasks.select_related("assignee")
# default filtering
state_keys = dict(workflow.STATE_CHOICES).keys()
default_states = set(state_keys).difference(
# don"t show these states
set(["2", "3"])
)
filter_data = {"state": list(default_states)}
filter_data.update(request.GET)
task_filter = TaskProjectFilter(request.user, filter_data, queryset=tasks)
group_by_querydict = request.GET.copy()
group_by_querydict.pop("group_by", None)
group_by_querystring = group_by_querydict.urlencode()
return render_to_response(template_name, {
'projects':Project.objects.all()
,'task_form':task_form
,'task_filter':task_filter
,'tasks':task_filter.qs,
"group_by": group_by,
"group": None
}, context_instance=RequestContext(request))
def _handle_taskbar(request):
if not request.user.is_authenticated():
return
if request.method == 'POST':
if request.POST.get('add_task'):
name = request.POST.get('task_name')
project_id = request.POST.get('task_project', None)
if project_id:
try:
project = Project.objects.get(pk=project_id)
except Project.DoesNotExist:
project = None
regex = re.compile("(?P<word>@\w+.?)")
tags = []
for match in regex.findall(name):
name = name.replace(match,'')
tag = match.strip('@').strip(' ')
tags.append(tag)
name = name.strip(' ')
form_class = TaskDashboardForm
task_form = form_class(request.user, data=request.POST)
task_form.group = project
if task_form.is_valid():
task = task_form.save(commit=False)
task.summary = name
task.creator = request.user
if 'me' in tags:
tags.remove('me')
task.assignee = request.user
elif 'my' in tags:
tags.remove('my')
task.assignee = request.user
task.group = project
if hasattr(workflow, "initial_state"):
task.state = workflow.initial_state(task, request.user)
task.tags = ' '.join(tags)
task.save()
task.save_history()
messages.add_message(request, messages.SUCCESS,
ugettext("added task '%s'") % task.summary
)
return True
def _handle_projects(request):
if not request.user.is_authenticated():
return
if request.method == 'POST':
if request.POST.get('add_project'):
name = request.POST.get('project_name')
try:
Project.objects.get(name=name)
except Project.DoesNotExist:
project = Project(name=name, slug=slugify(name), creator=request.user)
project.save()
messages.add_message(request, messages.SUCCESS,
ugettext("added project '%s'") % project.name
)
return True
def all_tasks(request, template_name="dashboard/all_tasks.html"):
from tasks.models import Task
from tasks import workflow
from tasks.filters import TaskProjectFilter
if not request.user.is_authenticated():
is_member = False
else:
is_member = True
group_by = request.GET.get("group_by")
tasks = Task.objects.all()
tasks = tasks.select_related("assignee")
# default filtering
state_keys = dict(workflow.STATE_CHOICES).keys()
default_states = set(state_keys).difference(
# don"t show these states
set(["2", "3"])
)
# milestones = [(m.id, m.title) for m in Milestone.objects.all()]
filter_data = {"state": list(default_states)}
#"milestone":
#milestones}
filter_data.update(request.GET)
task_filter = TaskProjectFilter(request.user, filter_data, queryset=tasks)
# task_filter.filter('milestone', milestone.id)
group_by_querydict = request.GET.copy()
group_by_querydict.pop("group_by", None)
group_by_querystring = group_by_querydict.urlencode()
del task_filter.filters['milestone']
return render_to_response(template_name, {
"group_by": group_by,
"gbqs": group_by_querystring,
"task_filter": task_filter,
"tasks": task_filter.qs,
"querystring": request.GET.urlencode(),
}, context_instance=RequestContext(request))
| [
"harley@harley-desktop.(none)"
] | harley@harley-desktop.(none) |
525d87c0196a42e75f55f89d743b31765ba68d48 | 63811ad4592793a8028ab973e254ba59a4205fe5 | /src/modules/storage_evernote/__init__.py | b30826ea497661139b30177a332751794def6cb6 | [] | no_license | AlexWoroschilow/AOD-Notes | d124fe9206278cae3c57b146883081bfaaaf1ff5 | aa43c58291e2f9175a456c156ebc78aaf61cac1e | refs/heads/master | 2022-11-30T10:42:16.405161 | 2020-08-05T20:28:05 | 2020-08-05T20:28:05 | 130,315,652 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 909 | py | # -*- coding: utf-8 -*-
# Copyright 2015 Alex Woroschilow (alex.woroschilow@gmail.com)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import inject
class Loader(object):
def __enter__(self):
return self
def __exit__(self, type, value, traceback):
pass
def configure(self, binder, options, args):
"""
Configure service container for the dependency injections
:param binder:
:param options:
:param args:
:return:
"""
pass
| [
"alex.woroschilow@gmail.com"
] | alex.woroschilow@gmail.com |
842f17c7aeae65c3e435a8bef7373d36475fcad4 | 48408a93a358e09526e8f8b9cf560cfede086d9f | /tests/test_plot_acc_signal.py | 675b8400125a71a0064af1afd0215602f6460a83 | [
"MIT"
] | permissive | eng-tools/engformat | aa4c137854f05706feceee136e0601508c4ea4f1 | 8cc3937327eb4e7b52e0b5d486248bf25894ec0d | refs/heads/master | 2023-03-28T23:07:54.075599 | 2021-03-30T21:47:27 | 2021-03-30T21:47:27 | 111,424,434 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,289 | py | import numpy as np
import matplotlib
matplotlib.use('agg')
from eqsig import AccSignal
from matplotlib.testing.decorators import image_comparison
import matplotlib.pyplot as plt
from bwplot import cbox
from engformat import plot_acc_signal
from tests.conftest import TEST_DATA_DIR
@image_comparison(baseline_images=['plot_acc_sig_as_response_spectrum'], extensions=['png'])
def test_plot_acc_sig_as_response_spectrum():
record_path = TEST_DATA_DIR
record_filename = 'test_motion_dt0p01.txt'
motion_step = 0.01
rec = np.loadtxt(record_path + record_filename)
acc_sig = AccSignal(rec, motion_step)
plot_acc_signal.plot_acc_sig_as_response_spectrum(acc_sig)
@image_comparison(baseline_images=['plot_acc_sig_as_time_series'], extensions=['png'])
def test_plot_acc_sig_as_time_series():
record_path = TEST_DATA_DIR
record_filename = 'test_motion_dt0p01.txt'
motion_step = 0.01
rec = np.loadtxt(record_path + record_filename)
acc_sig = AccSignal(rec, motion_step)
plot_acc_signal.plot_acc_sig_as_time_series(acc_sig)
@image_comparison(baseline_images=['plot_acc_sig_as_fa_spectrum'], extensions=['png'])
def test_plot_acc_sig_as_fa_spectrum():
record_path = TEST_DATA_DIR
record_filename = 'test_motion_dt0p01.txt'
motion_step = 0.01
rec = np.loadtxt(record_path + record_filename)
acc_sig = AccSignal(rec, motion_step)
plot_acc_signal.plot_acc_sig_as_fa_spectrum(acc_sig)
@image_comparison(baseline_images=['plot_acc_sig_as_avd'], extensions=['png'])
def test_plot_acc_sig_as_avd():
record_path = TEST_DATA_DIR
record_filename = 'test_motion_dt0p01.txt'
motion_step = 0.01
rec = np.loadtxt(record_path + record_filename)
acc_sig = AccSignal(rec, motion_step)
plot_acc_signal.plot_acc_sig_as_avd(acc_sig)
@image_comparison(baseline_images=['plot_acc_sig_as_transfer_function'], extensions=['png'])
def test_plot_acc_sig_as_transfer_function():
record_path = TEST_DATA_DIR
record_filename = 'test_motion_dt0p01.txt'
motion_step = 0.01
rec = np.loadtxt(record_path + record_filename)
acc_sig = AccSignal(rec, motion_step)
plot_acc_signal.plot_acc_sig_as_transfer_function(acc_sig, [acc_sig])
if __name__ == '__main__':
test_plot_acc_sig_as_response_spectrum() | [
"maxim.millen@gmail.com"
] | maxim.millen@gmail.com |
8e3d54e893943143b258daaa50207961b795f69d | d9e277dc46c9ed02d339db0fc1c4ebaed9d15e12 | /ingest/spoor_xml.py | 735227883f6ef3b55d3da3731f44056cba250eff | [
"MIT"
] | permissive | O-C-R/intotheokavango | b7ea700a178610ce6154eaf8bd423ff7acf5d522 | 4006940ddead3f31eea701efb9b9dcdc7b19402e | refs/heads/master | 2020-04-10T20:13:52.547817 | 2017-10-31T16:26:17 | 2017-10-31T16:26:17 | 32,355,238 | 1 | 3 | MIT | 2018-06-15T15:43:23 | 2015-03-16T21:51:32 | JavaScript | UTF-8 | Python | false | false | 2,105 | py | import json, xmltodict, os, base64
from ingest import ingest_json_body, save_files, process_image, ingest_data, ingest_plain_body
from housepy import config, log, util, strings
from ingest.sighting import get_taxonomy
def parse(request):
log.info("spoor_xml.parse")
try:
content = ingest_plain_body(request)
data = xmltodict.parse(content)
except Exception as e:
log.error(log.exc(e))
return None, "Parsing error"
try:
log.info("--> parsing XML")
data = data['instance']
feature = {'FeatureType': "sighting", 'Delivery': "devicemagic"}
log.debug(json.dumps(data, indent=4, default=lambda x: str(x)))
# feature['Member'] = data['@dm:submitting_user'].split(' ')[0] # let TeamMember override this
dt = util.parse_date(data['@writeTime'])
data = data['inputs']
for alias in ['Date___Time_Question', 'Date___Time']:
if alias in data:
dt = util.parse_date(data[alias])
del data[alias]
feature['t_utc'] = util.timestamp(dt)
for alias in ['Current_Location', 'LocationQuestion', 'Location_Question', 'GPSLocation']:
if alias in data:
data['Location'] = data[alias]
del data[alias]
if 'Location' in data:
try:
feature['Latitude'] = data['Location'].split(',')[0].replace("lat=", '').strip()
feature['Longitude'] = data['Location'].split(',')[1].replace("long=", '').strip()
feature['Altitude'] = data['Location'].split(',')[2].replace("alt=", '').strip()
del data['Location']
except Exception as e:
log.error(log.exc(e))
for key, value in data.items():
feature[key.replace('_', '')] = value
# purge blanks
feature = {key: value for (key, value) in feature.items() if type(value) != str or len(value.strip())}
except Exception as e:
log.error(log.exc(e))
return None, "Unexpected fields"
return feature
| [
"brian.house@gmail.com"
] | brian.house@gmail.com |
4b2850b6ce34f9c85f6c0c7634f32b5c9cf5fcff | aef9d6b8bb21957fa8b2235872bca51f64e7b5ff | /petstagram/petstagram/pets/urls.py | 2473e643a2fe767b5674d65d09873b6da5c9eacb | [] | no_license | dreadlordow/Softuni-Python-Web | 3cf9cc234960bb47f1c3c2a91a1a80d0fc499fd6 | 784faccbe15023536917d610384222d839a63bae | refs/heads/master | 2023-08-28T19:39:57.149514 | 2021-02-23T16:28:55 | 2021-02-23T16:28:55 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 490 | py | from django.urls import path
from petstagram.pets.views import list_pets, details_or_comment_pet, like_pet, create, edit_pet, delete_pet
urlpatterns =[
path('', list_pets, name='list pets'),
path('details/<int:pk>/', details_or_comment_pet, name='pet details'),
path('like/<int:pk>/', like_pet, name='like pet'),
path('create/', create, name='create pet'),
path('edit/<int:pk>', edit_pet, name='edit pet'),
path('delete/<int:pk>', delete_pet, name='delete pet'),
] | [
"georgipavlov1913@gmail.com"
] | georgipavlov1913@gmail.com |
87c33bb6777835d09d524a7349f95644d682d200 | fa7e75212e9f536eed7a78237a5fa9a4021a206b | /python/smqtk/tests/algorithms/nn_index/test_NNI_itq.py | e9cbb9ca8295459ddafa1635f0cd73c710e71e13 | [] | no_license | kod3r/SMQTK | 3d40730c956220a3d9bb02aef65edc8493bbf527 | c128e8ca38c679ee37901551f4cc021cc43d00e6 | refs/heads/master | 2020-12-03T09:12:41.163643 | 2015-10-19T14:56:55 | 2015-10-19T14:56:55 | 44,916,678 | 1 | 0 | null | 2015-10-25T15:47:35 | 2015-10-25T15:47:35 | null | UTF-8 | Python | false | false | 7,630 | py | import json
import os
import random
import unittest
import nose.tools as ntools
import numpy
from smqtk.representation.code_index.memory import MemoryCodeIndex
from smqtk.representation.descriptor_element.local_elements import \
DescriptorMemoryElement
from smqtk.algorithms.nn_index.lsh.itq import ITQNearestNeighborsIndex
from smqtk.utils.file_utils import make_tempfile
__author__ = "paul.tunison@kitware.com"
class TestIqrSimilarityIndex (unittest.TestCase):
ITQ_ROTATION_MAT = None
ITQ_MEAN_VEC = None
RANDOM_SEED = 42
@classmethod
def _clean_cache_files(cls):
for fp in [cls.ITQ_ROTATION_MAT, cls.ITQ_MEAN_VEC]:
if fp and os.path.isfile(fp):
os.remove(fp)
@classmethod
def _make_cache_files(cls):
cls._clean_cache_files()
cls.ITQ_MEAN_VEC = make_tempfile(suffix='.npy')
cls.ITQ_ROTATION_MAT = make_tempfile(suffix='.npy')
def _make_inst(self, dist_method, bits=8):
self._make_cache_files()
# don't want the files to actually exist
self._clean_cache_files()
# Initialize with a fresh code index instance every time, otherwise the
# same code index is maintained between constructions
return ITQNearestNeighborsIndex(self.ITQ_MEAN_VEC, self.ITQ_ROTATION_MAT,
code_index=MemoryCodeIndex(),
bit_length=bits,
distance_method=dist_method,
random_seed=self.RANDOM_SEED)
def tearDown(self):
self._clean_cache_files()
def test_configuration(self):
c = ITQNearestNeighborsIndex.get_default_config()
# Default code index should be memory based
ntools.assert_equal(c['code_index']['type'], 'MemoryCodeIndex')
ntools.assert_true(c['mean_vec_filepath'] is None)
ntools.assert_true(c['rotation_filepath'] is None)
ntools.assert_true(c['random_seed'] is None)
# Conversion to JSON and back is idempotent
ntools.assert_equal(json.loads(json.dumps(c)), c)
# Make some changes to deviate from defaults
c['bit_length'] = 256
c['itq_iterations'] = 25
c['mean_vec_filepath'] = 'vec.npy'
c['rotation_filepath'] = 'rot.npy'
# Make instance
index = ITQNearestNeighborsIndex.from_config(c)
ntools.assert_equal(index._mean_vec_cache_filepath,
c['mean_vec_filepath'])
ntools.assert_equal(index._rotation_cache_filepath,
c['rotation_filepath'])
ntools.assert_is_instance(index._code_index, MemoryCodeIndex)
ntools.assert_equal(index._bit_len, c['bit_length'])
ntools.assert_equal(index._itq_iter_num, c['itq_iterations'])
ntools.assert_equal(index._dist_method, c['distance_method'])
ntools.assert_equal(index._rand_seed, c['random_seed'])
def test_known_descriptors_euclidean_unit(self):
dim = 5
###
# Unit vectors -- Equal distance
#
index = self._make_inst('euclidean')
test_descriptors = []
for i in xrange(dim):
v = numpy.zeros(dim, float)
v[i] = 1.
d = DescriptorMemoryElement('unit', i)
d.set_vector(v)
test_descriptors.append(d)
index.build_index(test_descriptors)
# query descriptor -- zero vector
# -> all modeled descriptors should be equally distance (unit corners)
q = DescriptorMemoryElement('query', 0)
q.set_vector(numpy.zeros(dim, float))
# All dists should be 1.0, r order doesn't matter
r, dists = index.nn(q, dim)
for d in dists:
ntools.assert_equal(d, 1.)
def test_known_descriptors_euclidean_ordered(self):
index = self._make_inst('euclidean')
# make vectors to return in a known euclidean distance order
i = 1000
test_descriptors = []
for j in xrange(i):
d = DescriptorMemoryElement('ordered', j)
d.set_vector(numpy.array([j, j*2], float))
test_descriptors.append(d)
random.shuffle(test_descriptors)
index.build_index(test_descriptors)
# Since descriptors were build in increasing distance from (0,0),
# returned descriptors for a query of [0,0] should be in index order.
q = DescriptorMemoryElement('query', i)
q.set_vector(numpy.array([0, 0], float))
# top result should have UUID == 0 (nearest to query)
r, dists = index.nn(q, 5)
ntools.assert_equal(r[0].uuid(), 0)
ntools.assert_equal(r[1].uuid(), 1)
ntools.assert_equal(r[2].uuid(), 2)
ntools.assert_equal(r[3].uuid(), 3)
ntools.assert_equal(r[4].uuid(), 4)
# global search should be in complete order
r, dists = index.nn(q, i)
for j, d, dist in zip(range(i), r, dists):
ntools.assert_equal(d.uuid(), j)
def test_random_descriptors_euclidean(self):
# make random descriptors
i = 1000
dim = 256
bits = 32
td = []
for j in xrange(i):
d = DescriptorMemoryElement('random', j)
d.set_vector(numpy.random.rand(dim))
td.append(d)
index = self._make_inst('euclidean', bits)
index.build_index(td)
# test query from build set -- should return same descriptor when k=1
q = td[255]
r, dists = index.nn(q, 1)
ntools.assert_equal(r[0], q)
# test query very near a build vector
td_q = td[0]
q = DescriptorMemoryElement('query', i)
v = numpy.array(td_q.vector()) # copy
v_min = max(v.min(), 0.1)
v[0] += v_min
v[dim-1] -= v_min
q.set_vector(v)
r, dists = index.nn(q, 1)
ntools.assert_false(numpy.array_equal(q.vector(), td_q.vector()))
ntools.assert_equal(r[0], td_q)
# random query
q = DescriptorMemoryElement('query', i+1)
q.set_vector(numpy.random.rand(dim))
# for any query of size k, results should at least be in distance order
r, dists = index.nn(q, 10)
for j in xrange(1, len(dists)):
ntools.assert_greater(dists[j], dists[j-1])
r, dists = index.nn(q, i)
for j in xrange(1, len(dists)):
ntools.assert_greater(dists[j], dists[j-1])
def test_known_descriptors_hik_unit(self):
dim = 5
###
# Unit vectors - Equal distance
#
index = self._make_inst('hik')
test_descriptors = []
for i in xrange(dim):
v = numpy.zeros(dim, float)
v[i] = 1.
d = DescriptorMemoryElement('unit', i)
d.set_vector(v)
test_descriptors.append(d)
index.build_index(test_descriptors)
# query with zero vector
# -> all modeled descriptors have no intersection, dists should be 1.0,
# or maximum distance by histogram intersection
q = DescriptorMemoryElement('query', 0)
q.set_vector(numpy.zeros(dim, float))
r, dists = index.nn(q, dim)
# All dists should be 1.0, r order doesn't matter
for d in dists:
ntools.assert_equal(d, 1.)
# query with index element
q = test_descriptors[3]
r, dists = index.nn(q, 1)
ntools.assert_equal(r[0], q)
ntools.assert_equal(dists[0], 0.)
r, dists = index.nn(q, dim)
ntools.assert_equal(r[0], q)
ntools.assert_equal(dists[0], 0.)
| [
"paul.tunison@kitware.com"
] | paul.tunison@kitware.com |
815e6293e7b50bf45be49abf34aa8aa462497005 | ad553dd718a8df51dabc9ba636040da740db57cf | /.history/app_20181208041346.py | f81adf16e88f6eaee743e5ea52a166492686ae44 | [] | no_license | NergisAktug/E-Commerce-PythonWithFlask-Sqlite3 | 8e67f12c28b11a7a30d13788f8dc991f80ac7696 | 69ff4433aa7ae52ef854d5e25472dbd67fd59106 | refs/heads/main | 2023-01-01T14:03:40.897592 | 2020-10-19T20:36:19 | 2020-10-19T20:36:19 | 300,379,376 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,022 | py | import datetime
from flask import Flask, request, render_template_string, render_template
from flask import Flask, url_for, render_template, request, redirect, session, escape, render_template_string
from flask_babelex import Babel
from flask_sqlalchemy import SQLAlchemy
from flask_user import current_user, login_required, roles_required
from sqlalchemy.sql import table, column, select
from sqlalchemy import MetaData, create_engine
from flask_user import login_required, roles_required, UserManager, UserMixin
class ConfigClass(object):
SECRET_KEY = 'This is an INSECURE secret!! DO NOT use this in production!!'
SQLALCHEMY_DATABASE_URI = 'sqlite:///eticaret.sqlite'
SQLALCHEMY_TRACK_MODIFICATIONS = False
MAIL_SERVER = 'smtp.gmail.com'
MAIL_PORT = 465
MAIL_USE_SSL = True
MAIL_USE_TLS = False
MAIL_USERNAME = 'nergis.aktug2014@gmail.com'
MAIL_PASSWORD = '05383896877'
MAIL_DEFAULT_SENDER = '"MyApp" <xyz@gmail.com>'
USER_ENABLE_EMAIL = True
USER_ENABLE_USERNAME = False
USER_EMAIL_SENDER_EMAIL = "noreply@example.com"
def create_app():
""" Flask application factory """
# Create Flask app load app.config
app = Flask(__name__)
app.config.from_object(__name__ + '.ConfigClass')
db = SQLAlchemy(app)
class Kullanici(db.Model):
__tablename__ = 'Kullanici'
id = db.Column(db.Integer, primary_key=True)
tarih = db.Column(db.DateTime())
email = db.Column(db.String(80), unique=True)
sifre = db.Column(db.String(80))
rolId = db.Column(db.Integer, db.ForeignKey('rol.rolId', ondelete='CASCADE'))
active = db.Column('is_active', db.Boolean(), nullable=False, server_default='1')
def __init__(self, email, sifre):
self.email = email
self.sifre = sifre
self.rolId = 0
class Roller(db.Model):
__tablename__ = 'rol'
rolId = db.Column(db.Integer, primary_key=True)
rolisim = db.Column(db.String(80))
class urunler(db.Model):
__tablename__ = 'urunler'
urun_id = db.Column(db.Integer, primary_key=True)
kategori_id = db.Column(db.Integer(), db.ForeignKey('kategori.kategoriId', ondelete='CASCADE'))
urunresmi = db.Column(db.String(80))
urunFiyati = db.Column(db.Integer)
markaId = db.Column(db.Integer(), db.ForeignKey('markalar.markaId', ondelete='CASCADE'))
def __init__(self, kategori_id, urun_ozellikleri, urun_fiyati):
self.kategori_id = kategori_id
self.urun_ozellikleri = urun_ozellikleri
self.urun_fiyati = urun_fiyati
class kategori(db.Model):
__tablename__ = 'kategori'
kategoriId = db.Column(db.Integer, primary_key=True)
kategori_adi = db.Column(db.String(80))
def __init__(self, kategori_adi):
self.kategori_adi = kategori_adi
class markalar(db.Model):
__tablename__ = 'markalar'
markaId = db.Column(db.Integer, primary_key=True)
markaadi = db.Column(db.String(80))
marka_modeli = db.Column(db.String(80))
def __init__(self, markaadi, marka_modeli):
self.markaadi = markaadi
self.marka_modeli = marka_modeli
class musteri(db.Model):
__tablename__ = 'musteri'
musteriId = db.Column(db.Integer, primary_key=True)
musteriadi = db.Column(db.String(80))
musterisoyadi = db.Column(db.String(80))
mail = db.Column(db.String(80), unique=True)
telefon = db.Column(db.Integer)
sifre = db.Column(db.String(80))
il = db.Column(db.String(80))
ilce = db.Column(db.String(80))
kullaniciId = db.Column(db.Integer(), db.ForeignKey('Kullanici.id', ondelete='CASCADE'))
def __init__(self, musteriadi, musterisoyadi, mail, telefon, sifre, il, ilce, kullaniciId):
self.musteriadi = musteriadi
self.musterisoyadi = musterisoyadi
self.mail = mail
self.telefon = telefon
self.sifre = sifre
self.il = il
self.ilce = ilce
self.kullaniciId = kullaniciId
class siparis(db.Model):
__tablename__ = 'siparis'
siparisId = db.Column(db.Integer, primary_key=True)
musteriId = db.Column(db.Integer(), db.ForeignKey('musteri.musteriId', ondelete='CASCADE'))
urunId = db.Column(db.Integer(), db.ForeignKey('urunler.urun_id', ondelete='CASCADE'))
siparisno = db.Column(db.Integer)
siparisTarihi = db.Column(db.Integer)
odemeId = db.Column(db.Integer())
def __init__(self, musteriId, urunId, siparisno, siparisTarihi, odemeId):
self.musteriId = musteriId
self.urunId = urunId
self.siparisno = siparisno
self.siparisTarihi = siparisTarihi
self.odemeId = odemeId
user_manager = UserManager(app, db, Kullanici)
db.create_all()
if not Kullanici.query.filter(Kullanici.email == request.form['email']).first():
kullanici = Kullanici(
email=request.form['email'],
tarih=datetime.datetime.utcnow(),
sifre=user_manager.hash_password(request.form['sifre']),
)
# Create 'admin@example.com' user with 'Admin' and 'Agent' roles
if not Kullanici.query.filter(Kullanici.email == 'admin@example.com').first():
user = User(
email='admin@example.com',
email_confirmed_at=datetime.datetime.utcnow(),
password=user_manager.hash_password('Password1'),
)
@app.route('/')
def anasayfa():
return render_template('index.html')
@app.route('/kayit', methods=['GET', 'POST'])
def kayit():
if request.method == 'POST':
mail = request.form['email']
parola = request.form['sifre']
yeniKullanici = Kullanici(email=mail, sifre=parola)
db.session.add(yeniKullanici)
db.session.commit()
if yeniKullanici is not None:
mesaj = "Kayıt Başarıyla Sağlanmıştır."
return render_template("index.html", mesaj=mesaj)
else:
return render_template('kayit.html')
@app.route('/uye', methods=['GET', 'POST'])
def uye():
return render_template("uyeGirisi.html")
@app.route('/giris', methods=['GET', 'POST'])
def giris():
session['giris_yap']=False
if request.method=='GET':
if(session['giris_yap']==True):
return redirect(url_for('index'))
else:
return render_template('uyeGirisi.html')
else:
email=request.form['email']
parola=request.form['sifre']
active=0
try:
if Kullanici.query.filter_by(email=email,sifre=parola,active=1).first():
@app.route('/admin')
@roles_required('admin')
def admin():
return "naber selin ya"
return app
if __name__ == '__main__':
app = create_app()
# app.run(host='0.0.0.0', port=5000, debug=True)
app.run(host='127.0.0.1', port=5000, debug=True) | [
"nergis.aktug2014@gmail.com"
] | nergis.aktug2014@gmail.com |
e02728d9fc94a43001308defdd5483846398a4de | e10a6d844a286db26ef56469e31dc8488a8c6f0e | /ime/models/ar_net.py | 218ee2b12134f1c56ce580873cc51c45a26c1c8e | [
"Apache-2.0",
"CC-BY-4.0"
] | permissive | Jimmy-INL/google-research | 54ad5551f97977f01297abddbfc8a99a7900b791 | 5573d9c5822f4e866b6692769963ae819cb3f10d | refs/heads/master | 2023-04-07T19:43:54.483068 | 2023-03-24T16:27:28 | 2023-03-24T16:32:17 | 282,682,170 | 1 | 0 | Apache-2.0 | 2020-07-26T15:50:32 | 2020-07-26T15:50:31 | null | UTF-8 | Python | false | false | 3,614 | py | # coding=utf-8
# Copyright 2022 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Class that implement ARNet."""
import torch
import torch.nn as nn
class ARNet(nn.Module):
"""Auto Regressive model as described in https://arxiv.org/abs/1911.12436.
"""
def __init__(self, n_forecasts, n_lags, device):
"""Initializes a ARNet instance.
Args:
n_forecasts: Number of time steps to forecast
n_lags: Lags (past time steps) used to make forecast
device: Device used by the model
"""
super(ARNet, self).__init__()
self.n_lags = n_lags
self.device = device
self.n_forecasts = n_forecasts
self.fc = nn.Linear(n_lags, 1, bias=False)
nn.init.kaiming_normal_(self.fc.weight, mode="fan_in")
def forward(self, x, true_output):
"""Forward pass for ARNet.
Args:
x: A tensor of shape `(batch_size, n_lags)
true_output: Actual forecast this is used for teacher forcing during
training
Returns:
output: Forecast a tensor of shape `(batch_size, n_forecasts)`
"""
output = torch.zeros((x.shape[0], self.n_forecasts)).to(self.device)
output[:, 0] = self.fc(x).squeeze()
if self.n_forecasts > self.n_lags:
# If the forecast larger the lags than use orignal input and shift untill
# the orginal inputs are done than use true output (teacher forecing).
for i in range(1, self.n_lags):
output[:,
i] = self.fc(torch.cat((x[:, i:], true_output[:, :i]),
dim=1)).squeeze()
for i in range(0, self.n_forecasts - self.n_lags):
output[:, self.n_lags + i] = self.fc(
true_output[:, i:i + self.n_lags]).squeeze()
else:
for i in range(1, self.n_forecasts):
output[:,
i] = self.fc(torch.cat((x[:, i:], true_output[:, :i]),
dim=1)).squeeze()
return output
def predict(self, x):
"""Function used during testing to make predictions in an auto regressive style.
Args:
x : A tensor of shape `(batch_size, n_lags)
Returns:
output: Forecast a tensor of shape `(batch_size, n_forecasts)`
"""
output = torch.zeros((x.shape[0], self.n_forecasts)).to(self.device)
output[:, 0] = self.fc(x).squeeze()
if self.n_forecasts > self.n_lags:
# If the forecast larger the lags than use orignal input and shift untill
# the orginal inputs are done than the input will only contain forecasted
# values
for i in range(1, self.n_lags):
output[:, i] = self.fc(torch.cat((x[:, i:], output[:, :i]),
dim=1)).squeeze()
for i in range(0, self.n_forecasts - self.n_lags):
output[:,
self.n_lags + i] = self.fc(output[:,
i:i + self.n_lags]).squeeze()
else:
for i in range(1, self.n_forecasts):
output[:, i] = self.fc(torch.cat((x[:, i:], output[:, :i]),
dim=1)).squeeze()
return output
| [
"copybara-worker@google.com"
] | copybara-worker@google.com |
f9c7055b43709d3b9c8e75815970faffbc2bdfd7 | e7f7a4688c587978129f6e95a4735ba99b44028e | /python/aocrecs/logic/users.py | 30ea6147f129eeaaea8e232983a432bc9d972827 | [] | no_license | Jester-5115/aocrecs.com | 97eb521e0006a54e25c2984062134140fb680976 | d6e60a0211f0d8aa6a81f30f2153da1947da9078 | refs/heads/master | 2022-06-11T06:42:26.967342 | 2020-05-06T00:57:04 | 2020-05-06T00:57:04 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,254 | py | """Users."""
import asyncio
from aocrecs.cache import cached
@cached(warm=True, ttl=86400)
async def get_people(database):
"""Get all people."""
query = """
select
people.id, people.name, people.country, count(distinct match_id) as match_count,
min(extract(year from matches.played)) as first_year, max(extract(year from matches.played)) as last_year
from people join users on people.id=users.person_id
join players on users.id=players.user_id and players.platform_id=users.platform_id
join matches on players.match_id=matches.id
where players.human=true
group by people.id, people.name, people.country
order by people.name
"""
return list(map(dict, await database.fetch_all(query)))
@cached(ttl=86400)
async def get_person(context, person_id):
"""Get a person."""
person_query = """
select id, name, country, aoeelo_rank, aoeelo_rate, earnings, first_name, last_name,
aoeelo_id, esportsearnings_id, case when portrait is not null then true else false end as has_portrait,
twitch, mixer, douyu, youtube, discord
from people
where id=:person_id
"""
account_query = """
select users.id, users.platform_id, max(players.name) as name, platforms.name as platform_name
from users join players on players.user_id=users.id and players.platform_id=users.platform_id
join platforms on users.platform_id=platforms.id
where person_id=:person_id and players.human=true
group by users.id, users.platform_id, platforms.name
order by platforms.name, max(players.name)
"""
event_query = """
select distinct events.id, events.name, events.year
from people join users on people.id=users.person_id
join players on users.id=players.user_id and players.platform_id=users.platform_id
join matches on players.match_id=matches.id
join events on events.id=matches.event_id
where person_id=:person_id and players.human=true
order by events.year desc
"""
alias_query = """
select distinct players.name, players.user_name
from users join players on players.user_id=users.id and players.platform_id=users.platform_id
where person_id=:person_id and players.human=true
"""
person, accounts, aliases, events = await asyncio.gather(
context.database.fetch_one(person_query, values=dict(person_id=person_id)),
context.database.fetch_all(account_query, values=dict(person_id=person_id)),
context.database.fetch_all(alias_query, values=dict(person_id=person_id)),
context.database.fetch_all(event_query, values=dict(person_id=person_id))
)
aliases_set = set()
for row in aliases:
if row['name']:
aliases_set.add(row['name'])
if row['user_name']:
aliases_set.add(row['user_name'])
return dict(
person,
portrait_link=context.request.url_for('portrait', person_id=person['id']) if person['has_portrait'] else None,
accounts=[
dict(
id=a['id'],
name=a['name'],
platform_id=a['platform_id'],
platform=dict(id=a['platform_id'], name=a['platform_name'])
) for a in accounts
],
aliases=list(aliases_set),
events=[dict(e) for e in events]
)
@cached(ttl=86400)
async def get_user(database, user_id, platform_id):
"""Get user."""
query = """
select u.user_id, u.name, u.user_name, people.id as person_id, people.name as person_name, people.country
from (
select user_name, name, user_id
from players join matches on players.match_id=matches.id
where players.user_id=:user_id and players.platform_id=:platform_id and players.human=true
order by matches.played desc limit 1
) as u join users on u.user_id=users.id
left join people on users.person_id=people.id
"""
user = await database.fetch_one(query, values={'user_id': user_id, 'platform_id': platform_id})
person = None
if user['person_name']:
person = dict(
id=user['person_id'],
name=user['person_name'],
country=user['country']
)
return dict(
id=user_id,
platform_id=platform_id,
name=user['user_name'] or user['name'],
person=person
)
@cached(ttl=86400)
async def get_top_map(database, user_id, platform_id):
"""Get top map for user."""
query = """
select map_name as name
from players join matches on players.match_id=matches.id
where user_id=:id and matches.platform_id=:platform_id and winner=true and human=true
group by map_name
order by count(id) desc limit 1
"""
top = await database.fetch_one(query, values={'id': user_id, 'platform_id': platform_id})
if top:
return dict(top)
return None
@cached(ttl=86400)
async def get_top_civilization(database, user_id, platform_id):
"""Get top civilizations for user."""
query = """
select civilization_id as id, civilizations.name, civilizations.dataset_id
from players join civilizations on players.dataset_id=civilizations.dataset_id and players.civilization_id=civilizations.id
where user_id=:id and platform_id=:platform_id and winner=true and human=true
group by civilization_id, civilizations.name, civilizations.dataset_id
order by count(match_id) desc limit 1
"""
top = await database.fetch_one(query, values={'id': user_id, 'platform_id': platform_id})
if top:
return dict(top)
return None
@cached(ttl=86400)
async def get_top_dataset(database, user_id, platform_id):
"""Get top dataset for user."""
query = """
select dataset_id as id, datasets.name
from players join datasets on players.dataset_id=datasets.id
where user_id=:id and platform_id=:platform_id and human=true
group by dataset_id, datasets.name
order by count(match_id) desc limit 1
"""
return dict(await database.fetch_one(query, values={'id': user_id, 'platform_id': platform_id}))
| [
"happyleaves.tfr@gmail.com"
] | happyleaves.tfr@gmail.com |
133719c18752571f8bdb0264e27fc9d332272cf0 | b0eef0efd10556a4b054574fdd2d43124cb0856b | /npbench/benchmarks/polybench/gemm/gemm_dace.py | 60a2a8faeadf250f679ec0ac4e94b79a20fadd19 | [
"BSD-3-Clause"
] | permissive | learning-chip/npbench | 140d38be2095b54393de6e0008264b54b7cf686b | f2f545afe3603d5c8f1771f26d660f25ce4a3cda | refs/heads/main | 2023-05-10T09:54:52.719759 | 2021-05-31T12:09:48 | 2021-05-31T12:09:48 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 290 | py | import numpy as np
import dace as dc
NI, NJ, NK = (dc.symbol(s, dtype=dc.int64) for s in ('NI', 'NJ', 'NK'))
@dc.program
def kernel(alpha: dc.float64, beta: dc.float64, C: dc.float64[NI, NJ],
A: dc.float64[NI, NK], B: dc.float64[NK, NJ]):
C[:] = alpha * A @ B + beta * C
| [
"alexandros.ziogas@inf.ethz.ch"
] | alexandros.ziogas@inf.ethz.ch |
9ed4a38cd1b16b119a215d4a84dccafd921ba499 | 9edaf93c833ba90ae9a903aa3c44c407a7e55198 | /bpmn/models/timer_event_definition.py | 5840f64a909841c9a4b1037fac983ef652b1ef2f | [] | no_license | tefra/xsdata-samples | c50aab4828b8c7c4448dbdab9c67d1ebc519e292 | ef027fe02e6a075d8ed676c86a80e9647d944571 | refs/heads/main | 2023-08-14T10:31:12.152696 | 2023-07-25T18:01:22 | 2023-07-25T18:01:22 | 222,543,692 | 6 | 1 | null | 2023-06-25T07:21:04 | 2019-11-18T21:00:37 | Python | UTF-8 | Python | false | false | 341 | py | from dataclasses import dataclass
from .t_timer_event_definition import TTimerEventDefinition
__NAMESPACE__ = "http://www.omg.org/spec/BPMN/20100524/MODEL"
@dataclass
class TimerEventDefinition(TTimerEventDefinition):
class Meta:
name = "timerEventDefinition"
namespace = "http://www.omg.org/spec/BPMN/20100524/MODEL"
| [
"chris@komposta.net"
] | chris@komposta.net |
993491362cac36aa4d43c4583fbe256a8c1b0a1b | 06b06ce31d4369dcb2a998a80fb7e5a3349803ce | /pm4pyws/handlers/xes/process_schema/indbpmn_freq/get_vis.py | 4ad8061aed08cc453266324ca1be1ff655f08767 | [
"AGPL-3.0-only"
] | permissive | Javert899/pm4py-ws | f3df4dea442ff0e46fc5ee6df427520c580c96b5 | 78fa062df449d3e5076df87f094f9d5461684f1a | refs/heads/master | 2021-07-07T22:59:35.024414 | 2020-06-02T06:00:57 | 2020-06-02T06:00:57 | 175,439,265 | 0 | 1 | Apache-2.0 | 2019-03-13T14:39:16 | 2019-03-13T14:39:16 | null | UTF-8 | Python | false | false | 5,229 | py | from pm4py.algo.discovery.inductive.versions.dfg import imdfb as inductive_miner
from pm4py.objects.petri.exporter.pnml import export_petri_as_string
from pm4py.visualization.common.utils import get_base64_from_gviz, get_base64_from_file
from pm4py.visualization.petrinet import factory as pn_vis_factory
from pm4py.algo.filtering.log.auto_filter import auto_filter
from pm4py.algo.filtering.log.attributes import attributes_filter
from pm4py.algo.conformance.tokenreplay.versions import token_replay
from pm4py.util import constants as pm4_constants
from pm4py.objects.log.util import xes
from pm4py.algo.filtering.log.start_activities import start_activities_filter
from pm4py.algo.filtering.log.end_activities import end_activities_filter
from pm4pyws.util import get_graph
from pm4py.visualization.petrinet.versions import token_decoration
from pm4pybpmn.visualization.bpmn.util import convert_performance_map
from pm4pybpmn.objects.bpmn.exporter import bpmn20 as bpmn_exporter
import base64
from pm4pyws.util import constants
from pm4pybpmn.objects.conversion.petri_to_bpmn import factory as petri_to_bpmn
from pm4pybpmn.visualization.bpmn import factory as bpmn_vis_factory
from pm4pybpmn.visualization.bpmn.util import bpmn_embedding
from pm4pybpmn.objects.bpmn.util import bpmn_diagram_layouter
from pm4pybpmn.visualization.bpmn.util import convert_performance_map
from pm4py.algo.filtering.dfg.dfg_filtering import clean_dfg_based_on_noise_thresh
from pm4py.algo.discovery.dfg import factory as dfg_factory
def apply(log, parameters=None):
"""
Gets the Petri net through Inductive Miner, decorated by frequency metric
Parameters
------------
log
Log
parameters
Parameters of the algorithm
Returns
------------
base64
Base64 of an SVG representing the model
model
Text representation of the model
format
Format of the model
"""
if parameters is None:
parameters = {}
decreasingFactor = parameters[
"decreasingFactor"] if "decreasingFactor" in parameters else constants.DEFAULT_DEC_FACTOR
activity_key = parameters[
pm4_constants.PARAMETER_CONSTANT_ACTIVITY_KEY] if pm4_constants.PARAMETER_CONSTANT_ACTIVITY_KEY in parameters else xes.DEFAULT_NAME_KEY
# reduce the depth of the search done by token-based replay
token_replay.MAX_REC_DEPTH = 1
token_replay.MAX_IT_FINAL1 = 1
token_replay.MAX_IT_FINAL2 = 1
token_replay.MAX_REC_DEPTH_HIDTRANSENABL = 1
log = attributes_filter.filter_log_on_max_no_activities(log, max_no_activities=constants.MAX_NO_ACTIVITIES,
parameters=parameters)
filtered_log = auto_filter.apply_auto_filter(log, parameters=parameters)
activities_count = attributes_filter.get_attribute_values(filtered_log, activity_key)
activities = list(activities_count.keys())
start_activities = list(start_activities_filter.get_start_activities(filtered_log, parameters=parameters).keys())
end_activities = list(end_activities_filter.get_end_activities(filtered_log, parameters=parameters).keys())
dfg = dfg_factory.apply(filtered_log, parameters=parameters)
dfg = clean_dfg_based_on_noise_thresh(dfg, activities, decreasingFactor * constants.DEFAULT_DFG_CLEAN_MULTIPLIER,
parameters=parameters)
net, im, fm = inductive_miner.apply_dfg(dfg, parameters=parameters, activities=activities,
start_activities=start_activities, end_activities=end_activities)
# parameters["format"] = "svg"
# gviz = pn_vis_factory.apply(net, im, fm, log=log, variant="frequency", parameters=parameters)
bpmn_graph, el_corr, inv_el_corr, el_corr_keys_map = petri_to_bpmn.apply(net, im, fm)
aggregated_statistics = token_decoration.get_decorations(filtered_log, net, im, fm,
parameters=parameters, measure="frequency")
bpmn_aggreg_statistics = convert_performance_map.convert_performance_map_to_bpmn(aggregated_statistics,
inv_el_corr)
# bpmn_graph = bpmn_embedding.embed_info_into_bpmn(bpmn_graph, bpmn_aggreg_statistics, "frequency")
bpmn_graph = bpmn_diagram_layouter.apply(bpmn_graph)
bpmn_string = bpmn_exporter.get_string_from_bpmn(bpmn_graph)
gviz = bpmn_vis_factory.apply_petri(net, im, fm, aggregated_statistics=aggregated_statistics, variant="frequency",
parameters={"format": "svg"})
gviz2 = bpmn_vis_factory.apply_petri(net, im, fm, aggregated_statistics=aggregated_statistics, variant="frequency",
parameters={"format": "dot"})
svg = get_base64_from_file(gviz.name)
gviz_base64 = get_base64_from_file(gviz2.name)
ret_graph = get_graph.get_graph_from_petri(net, im, fm)
return svg, export_petri_as_string(net, im,
fm), ".pnml", "xes", activities, start_activities, end_activities, gviz_base64, ret_graph, "indbpmn", "freq", bpmn_string, ".bpmn", activity_key
| [
"a.berti@pads.rwth-aachen.de"
] | a.berti@pads.rwth-aachen.de |
741025911f1f089732b7ae56e651f09b18d60dee | 0909dd4fd63f093022369948622c2627a5ddc47c | /data/root-.pyload-config/userplugins/hoster/PotloadCom.py | a197a1370e1f3a24cde538a76ef14c790fa660bc | [] | no_license | kurtiss/htpc | 3c4f523f0b12e878211d51c1ea63ec1645d4f62c | ef6d859b92dbcace76abef04ef251ee0bf09cf8b | refs/heads/master | 2021-01-18T13:46:47.932073 | 2015-07-01T21:51:18 | 2015-07-01T21:51:18 | 34,362,836 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 438 | py | # -*- coding: utf-8 -*-
from module.plugins.internal.DeadHoster import DeadHoster, create_getInfo
class PotloadCom(DeadHoster):
__name__ = "PotloadCom"
__type__ = "hoster"
__version__ = "0.02"
__pattern__ = r'http://(?:www\.)?potload\.com/\w{12}'
__description__ = """Potload.com hoster plugin"""
__author_name__ = "stickell"
__author_mail__ = "l.stickell@yahoo.it"
getInfo = create_getInfo(PotloadCom)
| [
"kurtiss@gmail.com"
] | kurtiss@gmail.com |
0a0f62ffcda8415ae96d81cc0d92adf29ef4e134 | 45df3588d0ec1a2bd7dbe4af104a49aa5775d034 | /login/migrations/0006_auto_20150704_0050.py | 6f6528eefa50ad415c8b0b0d31a033845f338a3a | [] | no_license | wittawin/DB_Project | 043db7eb3d70ef32c9c97d51a242775b3e115f73 | 1cc1fe84c75906d670f7bb4dd130093bc15035b8 | refs/heads/master | 2020-04-06T03:43:21.516583 | 2015-07-13T05:47:09 | 2015-07-13T05:47:09 | 37,700,817 | 0 | 1 | null | 2015-06-19T04:06:07 | 2015-06-19T04:06:06 | JavaScript | UTF-8 | Python | false | false | 1,549 | py | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
import django.utils.timezone
class Migration(migrations.Migration):
dependencies = [
('login', '0005_auto_20150702_1652'),
]
operations = [
migrations.AddField(
model_name='teacher',
name='academic_position',
field=models.CharField(default=django.utils.timezone.now, max_length=1, choices=[(b'0', b''), (b'1', b'\xe0\xb8\x9c\xe0\xb8\xb9\xe0\xb9\x89\xe0\xb8\x8a\xe0\xb9\x88\xe0\xb8\xa7\xe0\xb8\xa2\xe0\xb8\xa8\xe0\xb8\xb2\xe0\xb8\xaa\xe0\xb8\x95\xe0\xb8\xa3\xe0\xb8\xb2\xe0\xb8\x88\xe0\xb8\xb2\xe0\xb8\xa3\xe0\xb8\xa2\xe0\xb9\x8c'), (b'2', b'\xe0\xb8\xa3\xe0\xb8\xad\xe0\xb8\x87\xe0\xb8\xa8\xe0\xb8\xb2\xe0\xb8\xaa\xe0\xb8\x95\xe0\xb8\xa3\xe0\xb8\xb2\xe0\xb8\x88\xe0\xb8\xb2\xe0\xb8\xa3\xe0\xb8\xa2\xe0\xb9\x8c'), (b'3', b'\xe0\xb8\xa8\xe0\xb8\xb2\xe0\xb8\xaa\xe0\xb8\x95\xe0\xb8\xa3\xe0\xb8\xb2\xe0\xb8\x88\xe0\xb8\xb2\xe0\xb8\xa3\xe0\xb8\xa2\xe0\xb9\x8c')]),
preserve_default=False,
),
migrations.AddField(
model_name='userprofile',
name='prefix_name',
field=models.CharField(default=django.utils.timezone.now, max_length=1, choices=[(b'0', b'\xe0\xb8\x99\xe0\xb8\xb2\xe0\xb8\xa2'), (b'1', b'\xe0\xb8\x99\xe0\xb8\xb2\xe0\xb8\x87'), (b'2', b'\xe0\xb8\x99\xe0\xb8\xb2\xe0\xb8\x87\xe0\xb8\xaa\xe0\xb8\xb2\xe0\xb8\xa7'), (b'3', b'\xe0\xb8\x94\xe0\xb8\xa3.')]),
preserve_default=False,
),
]
| [
"o_k_t@hotmail.com"
] | o_k_t@hotmail.com |
d4e6b2b4adda45acf4e45b2520d5c9f3185ba272 | a9386fd8a14e66c27b5059f562dc239f2c4b0ff7 | /MARC/scripts/identify_main_records.py | 9f920cb300e3828fb4c594f1247bf856a943d6e5 | [] | no_license | bentley-historical-library/vandura | 20f93e2f9cf2370e40537f863da9f2f19db329a0 | 0fefc0bf92c2487987a9c23e70187718c3b949f0 | refs/heads/master | 2021-01-17T00:54:08.023435 | 2016-11-04T20:00:04 | 2016-11-04T20:00:04 | 37,206,505 | 0 | 18 | null | 2016-11-04T20:00:05 | 2015-06-10T15:45:33 | Python | UTF-8 | Python | false | false | 808 | py | from vandura.config import marc_dir
from lxml import etree
import os
from os.path import join
ns = {'marc': 'http://www.loc.gov/MARC21/slim'}
marcxml_dir = join(marc_dir, "marcxml_no_ead_joined")
no_main_record = []
for filename in os.listdir(marcxml_dir):
print filename
tree = etree.parse(join(marcxml_dir, filename))
records = tree.xpath("//marc:record", namespaces=ns)
if len(records) > 1:
five80s = tree.xpath("//marc:datafield[@tag='580']", namespaces=ns)
seven73s = tree.xpath("//marc:datafield[@tag='773']", namespaces=ns)
LKRs = tree.xpath("//marc:datafield[@tag='LKR']", namespaces=ns)
if (len(records) - len(five80s) != 1) and (len(records) - len(seven73s) != 1) and (len(records) - len(LKRs) != 1):
no_main_record.append(filename)
print "Unid main records: ", no_main_record
| [
"djpillen@umich.edu"
] | djpillen@umich.edu |
53d329f2547fb0ca91031e8381921444d11ea1ef | 4392b40a932619bf8168364cc1df5695069d7de0 | /company/migrations/0001_initial.py | 388fb36a0a4c18497a7718512fa722d44b12f193 | [] | no_license | kashul/python-employee-management | b704aa5276029da55286d9b731d8c306403c77c8 | 99669c889bfafe1a6aa430e88cc947768a34d0ed | refs/heads/master | 2021-01-05T07:53:04.894932 | 2020-02-16T20:18:24 | 2020-02-16T20:18:24 | 240,941,465 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 708 | py | # Generated by Django 2.2.9 on 2020-02-12 08:23
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Company',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=50)),
('email', models.EmailField(max_length=100, null=True)),
('logo', models.ImageField(null=True, upload_to='logos')),
('website', models.CharField(max_length=100, null=True)),
],
),
]
| [
"you@example.com"
] | you@example.com |
483fe42f132aa6d928108f3433fc12b490fb879c | 84b266bbe18394196ee64cad190b2550189e46a6 | /catkin_carto/build/cartographer_ros/catkin_generated/generate_cached_setup.py | cf9094534137c2e280e5272a056ce5509eeffcfd | [] | no_license | Asher-1/Robots | 4d3a2f9938720a116a3eb749e36decb878b47aa5 | 8056144d1a677584b92db084704b32c540dd6ce8 | refs/heads/master | 2022-03-17T09:10:12.012984 | 2019-09-30T07:36:37 | 2019-09-30T07:36:37 | 209,523,717 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,498 | py | # -*- coding: utf-8 -*-
from __future__ import print_function
import argparse
import os
import stat
import sys
# find the import for catkin's python package - either from source space or from an installed underlay
if os.path.exists(os.path.join('/opt/ros/melodic/share/catkin/cmake', 'catkinConfig.cmake.in')):
sys.path.insert(0, os.path.join('/opt/ros/melodic/share/catkin/cmake', '..', 'python'))
try:
from catkin.environment_cache import generate_environment_script
except ImportError:
# search for catkin package in all workspaces and prepend to path
for workspace in "/home/yons/develop/AI/V_Slam/catkin_carto/devel;/home/yons/develop/AI/V_Slam/cubeslam_ws/devel;/home/yons/develop/AI/V_Slam/sim_platform/devel;/opt/ros/melodic".split(';'):
python_path = os.path.join(workspace, 'lib/python2.7/dist-packages')
if os.path.isdir(os.path.join(python_path, 'catkin')):
sys.path.insert(0, python_path)
break
from catkin.environment_cache import generate_environment_script
code = generate_environment_script('/home/yons/develop/AI/V_Slam/catkin_carto/devel/.private/cartographer_ros/env.sh')
output_filename = '/home/yons/develop/AI/V_Slam/catkin_carto/build/cartographer_ros/catkin_generated/setup_cached.sh'
with open(output_filename, 'w') as f:
#print('Generate script for cached setup "%s"' % output_filename)
f.write('\n'.join(code))
mode = os.stat(output_filename).st_mode
os.chmod(output_filename, mode | stat.S_IXUSR)
| [
"ludahai19@163.com"
] | ludahai19@163.com |
3c3ecd6cf0faf3fc4b6e48066c32bd06f5121123 | a802c639bd7af799c6089a6ccda671a7f2436952 | /Code/palindromes-and-strings/palindromes.py | 04474ab25877392f5242f74190ab17078aaedc4e | [] | no_license | franklin-phan/cs1.3-code | 45a2cf045b5c4543def86d71b1cf89a3c8e880b5 | 793238b42e5164cf35bc4d66d3126e07763f67f5 | refs/heads/master | 2022-07-18T19:40:57.559754 | 2020-05-16T15:35:33 | 2020-05-16T15:35:33 | 261,888,975 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,556 | py | #!python
import string
# Hint: Use these string constants to ignore capitalization and/or punctuation
# string.ascii_lowercase is 'abcdefghijklmnopqrstuvwxyz'
# string.ascii_uppercase is 'ABCDEFGHIJKLMNOPQRSTUVWXYZ'
# string.ascii_letters is ascii_lowercase + ascii_uppercase
def is_palindrome(text):
"""A string of characters is a palindrome if it reads the same forwards and
backwards, ignoring punctuation, whitespace, and letter casing."""
# implement is_palindrome_iterative and is_palindrome_recursive below, then
# change this to call your implementation to verify it passes all tests
assert isinstance(text, str), 'input is not a string: {}'.format(text)
return is_palindrome_iterative(text)
# return is_palindrome_recursive(text)
def is_palindrome_iterative(text):
# TODO: implement the is_palindrome function iteratively here
#makes text lowercase
text = ''.join([text[i] for i in range(len(text)) if text[i].isalpha()]).lower()
print(text)
left = 0
right = len(text) - 1
while left <= right:
if text[left] == text[right]:
left += 1
right -= 1
else:
return False
return True
# once implemented, change is_palindrome to call is_palindrome_iterative
# to verify that your iterative implementation passes all tests
def is_palindrome_recursive(text, left=None, right=None):
# TODO: implement the is_palindrome function recursively here
pass
# once implemented, change is_palindrome to call is_palindrome_recursive
# to verify that your iterative implementation passes all tests
text = ''.join([text[i] for i in range(len(text)) if text[i].isalpha()]).lower()
if right == None:
right = len(text) -1
if left == None:
left =0
if left >= right:
return True
if text[left] != text[right]:
return False
else:
left += 1
right -= 1
return is_palindrome_recursive(text,left,right)
def main():
import sys
args = sys.argv[1:] # Ignore script file name
if len(args) > 0:
for arg in args:
is_pal = is_palindrome(arg)
result = 'PASS' if is_pal else 'FAIL'
is_str = 'is' if is_pal else 'is not'
print('{}: {} {} a palindrome'.format(result, repr(arg), is_str))
else:
print('Usage: {} string1 string2 ... stringN'.format(sys.argv[0]))
print(' checks if each argument given is a palindrome')
if __name__ == '__main__':
main()
| [
"franklin.phan123@gmail.com"
] | franklin.phan123@gmail.com |
cab64dfea12c82e4e1dee006551d8ca5cd935379 | 958685165bfeb4122cc3473659a6d0c89c5cae95 | /crea8s_document/__openerp__.py | d30ebd982f07258e0590d64507368c1fd10d7b7e | [] | no_license | tringuyen17588/OpenERP-7.0 | 44efee7735af65d960c5adb4b03a1a329f5c4a57 | 2486261e4d351d4f444ec31e74c6b0e36ed2fb82 | refs/heads/master | 2021-01-10T02:45:24.320726 | 2016-02-19T06:05:21 | 2016-02-19T06:05:21 | 52,064,852 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,601 | py | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
{
'name': 'Document Management Crea8s',
'version': '1.0',
'category': 'Document Management',
'sequence': 14,
'summary': 'Document Magagement Module created by Crea8s',
'description': """ Document Magagement Module created by Crea8s """,
'author': 'Crea8s',
'website': 'http://www.crea8s.com',
'images': [],
'depends': ['base', 'document', 'crm'],
'data': ["res_partner_view.xml",
"security/security.xml",
"security/ir.model.access.csv"],
'demo': [],
'test': [],
'installable': True,
'auto_install': False,
'application': True,
}
| [
"tri@crea8s.com"
] | tri@crea8s.com |
e58dc2d0c83ac782f98bdc93e308cfaaf1cf99dc | 11e62879d16539494d49a25da66f70c79a390809 | /apps_data/courseevent/migrations/0011_auto_20150917_1206.py | d6d5b8ce371ad8f31197ac07af012de3424331f4 | [] | no_license | sabinem/mentoki | cdf558912f5811d9c78081a0e37c16d016fcb445 | 947881b5100d516a36cdff2bb629b2252b313c1b | refs/heads/master | 2021-04-30T12:15:14.831327 | 2018-02-22T12:25:42 | 2018-02-22T12:25:42 | 121,265,643 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 921 | py | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('courseevent', '0010_auto_20150917_1151'),
]
operations = [
migrations.AlterField(
model_name='classroommenuitem',
name='item_type',
field=models.CharField(help_text='Welcher Art ist der Men\xfceintrag: \xdcberschrift, Link, etc?', max_length=15, verbose_name='Typ des Men\xfcpunkts', choices=[('forum', 'Forum: Forum wird publiziert'), ('lesson', 'Unterricht: Lektion wird publiziert '), ('announcements', 'Link zu Ank\xfcndigungsliste'), ('last_posts', 'Link zu den neuesten Beitr\xe4ge'), ('private', 'Link zum Privatbereich der Kursteilnehmer'), ('header', '\xdcberschrift'), ('participants', 'Link zur Teilnehmerliste'), ('lessonstep', 'Link zu einem Lernschritt')]),
),
]
| [
"sabine.maennel@gmail.com"
] | sabine.maennel@gmail.com |
536d55634727551f9b11918fe071badd22904a29 | 6710c52d04e17facbc9fb35a7df313f7a2a7bd53 | /1343. Maximum Product of Splitted Binary Tree.py | ff1ec6afa6cdba2b325dc5a13f9fd2aaafb48399 | [] | no_license | pwang867/LeetCode-Solutions-Python | 535088fbe747a453360457728cc22cf336020bd2 | 188befbfb7080ba1053ee1f7187b177b64cf42d2 | refs/heads/master | 2022-11-13T16:20:28.211707 | 2020-06-28T06:01:14 | 2020-06-28T06:01:14 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,762 | py | # Definition for a binary tree node.
# class TreeNode(object):
# def __init__(self, x):
# self.val = x
# self.left = None
# self.right = None
# postorder DFS, time/space O(n), space can be reduced to O(1)
# if we traverse the tree twice
class Solution(object):
def maxProduct(self, root):
"""
:type root: TreeNode
:rtype: int
"""
if not root:
return 0
N = 10**9 + 7
self.nums = []
self.postorder(root)
max_product = -float('inf')
total = self.nums[-1]
for num in self.nums:
cur = num * (total - num)
max_product = max(max_product, cur)
return max_product % N
def postorder(self, root):
# return the total sum of the tree
if not root:
return 0
cur = self.postorder(root.left) + self.postorder(root.right) + root.val
self.nums.append(cur)
return cur
"""
Given a binary tree root. Split the binary tree into two subtrees by removing 1 edge such that the product of the sums of the subtrees are maximized.
Since the answer may be too large, return it modulo 10^9 + 7.
Example 1:
Input: root = [1,2,3,4,5,6]
Output: 110
Explanation: Remove the red edge and get 2 binary trees with sum 11 and 10. Their product is 110 (11*10)
Example 2:
Input: root = [1,null,2,3,4,null,null,5,6]
Output: 90
Explanation: Remove the red edge and get 2 binary trees with sum 15 and 6.Their product is 90 (15*6)
Example 3:
Input: root = [2,3,9,10,7,8,6,5,4,11,1]
Output: 1025
Example 4:
Input: root = [1,1]
Output: 1
Constraints:
Each tree has at most 50000 nodes and at least 2 nodes.
Each node's value is between [1, 10000].
Accepted
"""
| [
"wzhou007@ucr.edu"
] | wzhou007@ucr.edu |
f98c01a95f2a08c3020f7e5fde118ba80e8c4c0a | 0724a1443d36f43d7c65d53b4a382a8b4d4ddbdc | /fastreid/modeling/meta_arch/baseline.py | 25002ffa342bf2c90d28939a8d7dcf393e1790de | [] | no_license | zhaoyang10/fast-reid | cb36f8c331f4a1597b59146ca225fa339398ee81 | 8458bece5e66e5760db10bd79482fd5129080d77 | refs/heads/master | 2022-09-02T14:10:21.739881 | 2020-05-23T02:42:31 | 2020-05-23T02:42:31 | 266,256,158 | 2 | 0 | null | 2020-05-23T03:33:50 | 2020-05-23T03:33:49 | null | UTF-8 | Python | false | false | 1,886 | py | # encoding: utf-8
"""
@author: liaoxingyu
@contact: sherlockliao01@gmail.com
"""
from torch import nn
from fastreid.layers import GeneralizedMeanPoolingP
from fastreid.modeling.backbones import build_backbone
from fastreid.modeling.heads import build_reid_heads
from fastreid.modeling.losses import reid_losses
from .build import META_ARCH_REGISTRY
@META_ARCH_REGISTRY.register()
class Baseline(nn.Module):
def __init__(self, cfg):
super().__init__()
self._cfg = cfg
# backbone
self.backbone = build_backbone(cfg)
# head
if cfg.MODEL.HEADS.POOL_LAYER == 'avgpool':
pool_layer = nn.AdaptiveAvgPool2d(1)
elif cfg.MODEL.HEADS.POOL_LAYER == 'maxpool':
pool_layer = nn.AdaptiveMaxPool2d(1)
elif cfg.MODEL.HEADS.POOL_LAYER == 'gempool':
pool_layer = GeneralizedMeanPoolingP()
else:
pool_layer = nn.Identity()
in_feat = cfg.MODEL.HEADS.IN_FEAT
num_classes = cfg.MODEL.HEADS.NUM_CLASSES
self.heads = build_reid_heads(cfg, in_feat, num_classes, pool_layer)
def forward(self, inputs):
images = inputs["images"]
if not self.training:
pred_feat = self.inference(images)
try:
return pred_feat, inputs["targets"], inputs["camid"]
except KeyError:
return pred_feat
targets = inputs["targets"]
# training
features = self.backbone(images) # (bs, 2048, 16, 8)
return self.heads(features, targets)
def inference(self, images):
assert not self.training
features = self.backbone(images) # (bs, 2048, 16, 8)
pred_feat = self.heads(features)
return pred_feat
def losses(self, outputs):
logits, feat, targets = outputs
return reid_losses(self._cfg, logits, feat, targets)
| [
"sherlockliao01@gmail.com"
] | sherlockliao01@gmail.com |
bea7cc57bbbfcff2a1a64b8322a3ab6f10e6ee2f | 55c250525bd7198ac905b1f2f86d16a44f73e03a | /Python/Games/RPG Quest Generator/World/Types/__init__.py | 19ee6e62f5111f3720e456ead6ae4fa5b8560e0c | [] | no_license | NateWeiler/Resources | 213d18ba86f7cc9d845741b8571b9e2c2c6be916 | bd4a8a82a3e83a381c97d19e5df42cbababfc66c | refs/heads/master | 2023-09-03T17:50:31.937137 | 2023-08-28T23:50:57 | 2023-08-28T23:50:57 | 267,368,545 | 2 | 1 | null | 2022-09-08T15:20:18 | 2020-05-27T16:18:17 | null | UTF-8 | Python | false | false | 128 | py | version https://git-lfs.github.com/spec/v1
oid sha256:860c31adaf073aea021ccfed493a9f4723e137aecec7153d79e47daede57d296
size 603
| [
"nateweiler84@gmail.com"
] | nateweiler84@gmail.com |
35bff181e53c2792d25843ffc5c9c62a576ce855 | 7bead245354e233f76fff4608938bf956abb84cf | /test/test_docx_to_jpg_result.py | a930dd574706fee13420d148e2031d88c030addb | [
"Apache-2.0"
] | permissive | Cloudmersive/Cloudmersive.APIClient.Python.Convert | 5ba499937b9664f37cb2700509a4ba93952e9d6c | dba2fe7257229ebdacd266531b3724552c651009 | refs/heads/master | 2021-10-28T23:12:42.698951 | 2021-10-18T03:44:49 | 2021-10-18T03:44:49 | 138,449,321 | 3 | 2 | null | null | null | null | UTF-8 | Python | false | false | 973 | py | # coding: utf-8
"""
convertapi
Convert API lets you effortlessly convert file formats and types. # noqa: E501
OpenAPI spec version: v1
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import unittest
import cloudmersive_convert_api_client
from cloudmersive_convert_api_client.models.docx_to_jpg_result import DocxToJpgResult # noqa: E501
from cloudmersive_convert_api_client.rest import ApiException
class TestDocxToJpgResult(unittest.TestCase):
"""DocxToJpgResult unit test stubs"""
def setUp(self):
pass
def tearDown(self):
pass
def testDocxToJpgResult(self):
"""Test DocxToJpgResult"""
# FIXME: construct object with mandatory attributes with example values
# model = cloudmersive_convert_api_client.models.docx_to_jpg_result.DocxToJpgResult() # noqa: E501
pass
if __name__ == '__main__':
unittest.main()
| [
"35204726+Cloudmersive@users.noreply.github.com"
] | 35204726+Cloudmersive@users.noreply.github.com |
353f1336028ba1040a8d2d9147d2a48c7f9191fa | 5774101105b47d78adb7a57eefdfa21502bbd70c | /python 语法基础/d14_tkinter_python图形开发界面库/tkinter/4.Entry输入框控件.py | 9bf10d812b0679dcee7e06b82b7d000faf7f46ef | [] | no_license | zhlthunder/python-study | 34d928f0ebbdcd5543ae0f41baaea955c92f5c56 | 0f25dd5105ba46791842d66babbe4c3a64819ee5 | refs/heads/master | 2023-01-12T18:39:47.184978 | 2018-10-07T23:48:04 | 2018-10-07T23:48:04 | 90,516,611 | 0 | 1 | null | 2022-12-26T19:46:22 | 2017-05-07T07:39:48 | HTML | UTF-8 | Python | false | false | 695 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#author:zhl
"""
entry :是输入控件
也可以用于显示简单的文本内容
"""
import tkinter
win=tkinter.Tk()
win.title("zhl")
win.geometry("400x400+200+0")
entry1=tkinter.Entry(win)
entry1.pack()
entry2=tkinter.Entry(win,show="*")##show:设置显示的字符,比如用于密码输入用
entry2.pack()
##绑定变量:
e=tkinter.Variable() ##定义变量对象
entry3=tkinter.Entry(win,textvariable=e) ##将变量绑定到输入框上
entry3.pack()
#e就代表输入框这个对象
#设置值
e.set("zhl is good man")
##获取输入框的值
print(e.get()) ##取值方法1
print(entry3.get()) ##取值方法2
win.mainloop() | [
"zhlthunder@163.com"
] | zhlthunder@163.com |
9f4870d0d0b21b619f73baa217ea590c3fd450da | 493e117a8366b4cde04d4d9946aa785cc0192ecb | /Student/Collegeinfo/Myproject.py | e3f39ab293925e390dece1f110c1e56418428ba6 | [] | no_license | Srinivasareddymediboina/Web-Development-Srinivasa-Reddy- | adf59be95f656fd04823ab44db662c90f6ee22c9 | 715b58a0453fdd738c24b0045ed873e9254b097e | refs/heads/master | 2020-06-22T11:46:08.844365 | 2019-07-19T13:58:07 | 2019-07-19T13:58:07 | 197,709,422 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,400 | py | from flask import Flask,render_template,url_for,request
from flask_sqlalchemy import SQLAlchemy
app=Flask(__name__)
app.config['SQLALCHEMY_DATABASE_URI']="sqlite:///collegeinfo.db"
mydb=SQLAlchemy(app)
#database connection
class Signup(mydb.Model):
id=mydb.Column(mydb.Integer,primary_key=True)
s_name=mydb.Column(mydb.String(200))
roll_no=mydb.Column(mydb.String(50))
mail_id=mydb.Column(mydb.String(50))
phone_no=mydb.Column(mydb.String(50))
branch=mydb.Column(mydb.String(50))
def __init__(self,name,rollno,emailid,phno,branch):
self.s_name=name
self.roll_no=rollno
self.mail_id=emailid
self.phone_no=phno
self.branch=branch
@app.route('/myportal/signup',methods=['POST','GET'])
def signup():
if request.method=="POST":
#data=request.form
stu_name=request.form['sname']
stu_rollno=request.form['rollno']
stu_email=request.form['email']
stu_phno=request.form['phno']
stu_branch=request.form['branch']
sgn = Signup(stu_name,stu_rollno,stu_email,stu_phno,stu_branch)
mydb.session.add(sgn)
mydb.session.commit()
return render_template('status.html')
#print(stu_name,stu_rollno,stu_email,stu_phno,stu_branch)
return render_template("signup.html")
@app.route('/myportal/studentList',methods=['POST','GET'])
def display():
return render_template('showDetails.html',data=Signup.query.all())
if __name__=="__main__":
mydb.create_all()
app.run(debug=True) | [
"nivas0803@gmail.com"
] | nivas0803@gmail.com |
e8358392affc756e228a44fc5f8e9622e3308900 | 53fab060fa262e5d5026e0807d93c75fb81e67b9 | /backup/user_091/ch19_2020_09_09_20_16_20_148677.py | 8b1d6477e3882b89861c988bdaf5494780422e46 | [] | no_license | gabriellaec/desoft-analise-exercicios | b77c6999424c5ce7e44086a12589a0ad43d6adca | 01940ab0897aa6005764fc220b900e4d6161d36b | refs/heads/main | 2023-01-31T17:19:42.050628 | 2020-12-16T05:21:31 | 2020-12-16T05:21:31 | 306,735,108 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 207 | py | def classifica_triangulo(a,b,c):
if a==b==c:
return "equilátero"
if a!=b!=c:
return "escaleno"
else:
return "isósceles"
x=10
y=10
z=10
print(classifica_triangulo(x,y,z) | [
"you@example.com"
] | you@example.com |
05e15eaff68be2de7c993cd556581c4ef317d9ab | 7d406f258fb0023d8af653c8640925ea16b0d655 | /example/commands/ex_commands.py | 0eb87bccf3c9be3af867cd143a922d5f12a3bb50 | [] | no_license | volitilov/Docker_learn | d0923b8434132203112077de2de9ef1c66972a60 | 458132701554d2b63513d5840cf0a233314b487c | refs/heads/master | 2023-04-16T02:15:06.284505 | 2023-04-01T05:39:55 | 2023-04-01T05:39:55 | 148,593,986 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,850 | py | # ex_commands.py
# Примеры консольных комманд для Docker
# :::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
$ sudo chmod ug+s /usr/bin/docker
# Убирает постаянные запросы на прова sudo
$ docker run -it ubuntu /bin/bash
# Флаг -i оставляет STDIN открытым, даже, когда вы не присоединены к
# контейнеру. Флаг -t назначает псевдо-tty контейнеру. Таким образом
# создается интерактивный интерфейс к контейнеру. Так же мы указываем
# название образа (ubuntu — базовый образ) и шелл /bin/bash.
$ exit
# Выйти из контейнера
$ docker ps -a
# Показывает спесок всех контейнеров включая остановленные
$ docker run --name habrahabr -ti ubuntu
# Указывает другое имя контейнера при создании
$ docker start ubuntu
# Запуск контейнера ubuntu
# Обращаться к контейнеру можно не только по ID, но и по имени.
$ docker attach ubuntu
# Подключения к контейнеру ubuntu
$ docker run -v /tmp:/root -ti <имя образа>
# Подмонтировать папку хоста в контейнер при создании
# Где /tmp – путь к папке на хосте, а /root – путь к папке на сервере.
# Таким образом можно работать из контейнера с данными на хосте и исключить
# необходимость копирования данных в обе стороны.
$ docker run -it -p 80:80 --name nginx ubuntu:trusty
# Создаёт чистый контейнер с Ubuntu 14.04 с открытыми 80 и 443 портами
$ docker build -t volitilov/nginx ~/project
# Строит образ из Docker файла где volitilov – название репозитория, где
# будет храниться образ, nginx – имя образа. Последний параметр — путь к
# папке с Dockerfile. Если вы не укажете название образа, он автоматически
# получит название lastest.
$ docker build -t volitilov/nginx \ git@github.com:volitilov/nginx
# Указываем git репозиторий, где находится Dockerfile.
$ docker run -it 066b799ea548 /bin/bash
# Если инструкция не исполнится, мы можем создать контейнер из
# предпоследнего шага с ID образа 066b799ea548
$ docker run -d centos tail -f /dev/null
# Запускает образ в фоновом режиме
$ docker build --no-cache -t volitilov/nginx .
# По-умолчанию Docker кеширует каждый шаг и формируя кеш сборок. Чтобы
# отключить кеш, например для использования последнего apt-get update,
# используйте флаг --no-cache.
$ docker pull nginx
# - Скачиваем образ nginx
$ docker run --name test_nginx2 -p 80:80
-v /home/x/html_files/:/usr/share/nginx/html:ro -d nginx
# - запускаем контейнер с именем (--name) test_nginx2
# - (-p) делаем проброс портов хоста и контейнера
# - (-v) указываем дерикторию для монтирования с дерикторией контейнера nginx
# - (-d) указывает докеру запустить контейнер в фоновом режиме
| [
"volitilov@gmail.com"
] | volitilov@gmail.com |
1d815389eb5d5cc4f8dfd679b3ab2b2b281e1ae2 | 52b5773617a1b972a905de4d692540d26ff74926 | /.history/sets_20200609191132.py | 95ca5d989d437a421a34401aef5d7ebfbf8f353f | [] | no_license | MaryanneNjeri/pythonModules | 56f54bf098ae58ea069bf33f11ae94fa8eedcabc | f4e56b1e4dda2349267af634a46f6b9df6686020 | refs/heads/master | 2022-12-16T02:59:19.896129 | 2020-09-11T12:05:22 | 2020-09-11T12:05:22 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 986 | py | import json
def Strings(str):
# dictionary--> key value pairs
values = {}
newArray = []
keys = []
for i in str:
newArray.append(i.split(":"))
for j in range(0,len(newArray)):
if newArray[j][0] in values:
# if newArray[j][0] in values:
# values[newArray[j][0]] += int(newArray[j][1])
# else:
# values[newArray[j][0]] = int(newArray[j][1])
# for k in values:
# keys.append(k)
# keys = sorted(keys)
# newString = ""
# last =len(keys)-1
# lastString = ""
# lastString +=keys[last] + ":" + json.dumps(values[keys[last]])
# for i in range(len(keys)-1):
# if keys[i] in values:
# newString += keys[i] + ":"+ json.dumps(values[keys[i]])+","
# finalString = newString + lastString
# print(type(finalString))
Strings(["Z:1","B:3","C:3","Z:4","B:2"])
# "B:5,C:3,Z:5"
| [
"mary.jereh@gmail.com"
] | mary.jereh@gmail.com |
2ff51a591a4dd0ef1dbae1c23f2d680ecd694e88 | c7a6f8ed434c86b4cdae9c6144b9dd557e594f78 | /ECE364/.PyCharm40/system/python_stubs/348993582/gnome/ui/RestartStyle.py | a71cd108a8b9be92cb98a6eb21bd9b784f5ea662 | [] | no_license | ArbalestV/Purdue-Coursework | 75d979bbe72106975812b1d46b7d854e16e8e15e | ee7f86145edb41c17aefcd442fa42353a9e1b5d1 | refs/heads/master | 2020-08-29T05:27:52.342264 | 2018-04-03T17:59:01 | 2018-04-03T17:59:01 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 743 | py | # encoding: utf-8
# module gnome.ui
# from /usr/lib64/python2.6/site-packages/gtk-2.0/gnome/ui.so
# by generator 1.136
# no doc
# imports
import gnome.canvas as __gnome_canvas
import gobject as __gobject
import gobject._gobject as __gobject__gobject
import gtk as __gtk
class RestartStyle(__gobject.GEnum):
# no doc
def __init__(self, *args, **kwargs): # real signature unknown
pass
__weakref__ = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
"""list of weak references to the object (if defined)"""
__dict__ = None # (!) real value is ''
__enum_values__ = {
0: 0,
1: 1,
2: 2,
3: 3,
}
__gtype__ = None # (!) real value is ''
| [
"pkalita@princeton.edu"
] | pkalita@princeton.edu |
5e2106eeff6d1bd89c7cca7cf86ee5f22713f038 | 8f77a1ae843c3ea650cabcacbc89c142f77feebd | /visualize.py | e0e68450d1c119689cc900081d58c50b112594b9 | [] | no_license | bpachev/cs640 | 57d114c11be89d8e4de31f388dbd3e57f54cb06e | 6f4481dbb9b5906cd9ad346ce7711aa7f9deab68 | refs/heads/master | 2020-08-05T12:12:14.623723 | 2019-10-14T21:04:37 | 2019-10-14T21:04:37 | 212,499,332 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,145 | py | import numpy as np
import argparse as ap
import matplotlib.pyplot as plt
def ordered_balanced_sample(y, samples_per_class=20):
"""
y -- a numpy array with k unique values
samples_per_class -- an integer specifying how many samples to draw for each unique value
Returns:
inds -- indices to get the samples in order
"""
sorted_inds = np.argsort(y)
vals, inds = np.unique(y[sorted_inds], return_index=True)
num_classes = len(vals)
if samples_per_class * num_classes > len(y): raise ValueError("Too many samples required {}*{} > {} !".format(samples_per_class, num_classes,len(y)))
res = np.zeros(samples_per_class * num_classes, dtype=np.int64)
for i in xrange(0,num_classes*samples_per_class, samples_per_class):
j = inds[int(i/samples_per_class)]
res[i:i+samples_per_class] = sorted_inds[j:j+samples_per_class]
return res
def visualize_histograms(ark):
samples = 20
mask = ordered_balanced_sample(ark['labels'],samples_per_class=samples)
mat = ark['features'][mask].T
plt.subplot(121)
plt.imshow(mat)
plt.subplot(122)
total_classes = int(mat.shape[1]/samples)
for i in xrange(total_classes):
mat[:,i*samples:(i+1)*samples] = np.mean(mat[:,i*samples:(i+1)*samples], axis=1).reshape((400,1))
plt.imshow(mat)
plt.show()
def plot_patches(mat, patch_size):
mat = mat.T
if patch_size * 10 > mat.shape[0]:
print "Less than 10 patches, not plotting"
return
for i in xrange(1,11):
plt.subplot(2,5,i)
plt.imshow(mat[i*patch_size:(i+1)*patch_size])
plt.yticks([])
plt.xticks([])
plt.show()
if __name__ == "__main__":
parser = ap.ArgumentParser()
parser.add_argument("infile", type=ap.FileType('r'))
parser.add_argument("--mode", type=str, nargs="?", default="histograms")
parser.add_argument("--words", type=int, nargs="+")
args = parser.parse_args()
ark = np.load(args.infile)
if args.mode == "histograms":
visualize_histograms(ark)
elif args.mode == "words":
print np.argsort(ark['patch_sizes'])[-20:-10]
for word in args.words:
plot_patches(ark['patches_'+str(word)], ark['patch_sizes'][word])
else:
raise ValueError("Unrecognized visualization {}".format(args.mode))
| [
"benjaminpachev@gmail.com"
] | benjaminpachev@gmail.com |
4d513a0de2b90b6a4ad593424332864a3b945a96 | 14484978017d08be00c45acd10c4d10869f31b16 | /10-photoresistor/photoresistor.py | ddb63a923b855e8b71cdd31348042bc1bc51546b | [] | permissive | raspberrypi-tw/gpio-game-console | 991f1517ae60cf19c0dbce24fa5919ea46776425 | 5319addec034dae72bf829e5873626b00b69e3d5 | refs/heads/master | 2021-06-18T11:42:23.674188 | 2021-03-23T08:38:15 | 2021-03-23T08:38:15 | 47,773,598 | 15 | 17 | BSD-3-Clause | 2019-07-01T05:29:10 | 2015-12-10T16:23:42 | Python | UTF-8 | Python | false | false | 1,248 | py | #!/usr/bin/python3
#+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
#|R|a|s|p|b|e|r|r|y|P|i|.|c|o|m|.|t|w|
#+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
#
# photoresistor.py
# Sense the light by photoresistor
#
# Author : RaspberryPi-spy.co.uk
# Date : 06/22/2014
# Origin : http://www.raspberrypi-spy.co.uk/2013/10/analogue-sensors-on-the-raspberry-pi-using-an-mcp3008/
import spidev
import time
import os
spi = spidev.SpiDev()
spi.open(0,0)
spi.max_speed_hz = 1800000
def ReadChannel(channel):
adc = spi.xfer2([1, (8+channel)<<4, 0])
data = ((adc[1]&3) << 8) + adc[2]
return data
def ConvertVolts(data,places):
volts = (data * 3.3) / float(1023)
volts = round(volts,places)
return volts
light_channel = 0
delay = 1
try:
while True:
light_level = ReadChannel(light_channel)
light_volts = ConvertVolts(light_level, 2)
print("--------------------------------------------")
print("Light: {} ({}V)".format(light_level,light_volts))
#resistor_ohms = int(light_volts/(3.3 - light_volts) * 1000)
#print("Light: {} ({}V), Resistor: {}(ohms)".format(light_level,light_volts, resistor_ohms))
time.sleep(delay)
except KeyboardInterrupt:
print("Exception: KeyboardInterrupt")
| [
"sosorry@raspberrypi.com.tw"
] | sosorry@raspberrypi.com.tw |
12ee5ec32428fcb5d2cd8286d92efb4e5a28ada7 | a5747577f1f4b38823f138ec0fbb34a0380cd673 | /16/mc/ExoDiBosonResonances/EDBRTreeMaker/test/crab3_analysisST_t-channel_antitop_4f_inclusiveDecays.py | ee7e0b4a08f8c621e19fb53592e5c6b27702b852 | [] | no_license | xdlyu/fullRunII_ntuple | 346fc1da4cec9da4c404aa1ec0bfdaece6df1526 | aa00ca4ce15ae050c3096d7af779de44fc59141e | refs/heads/master | 2020-08-03T07:52:29.544528 | 2020-01-22T14:18:12 | 2020-01-22T14:18:12 | 211,673,739 | 0 | 3 | null | null | null | null | UTF-8 | Python | false | false | 2,395 | py | from WMCore.Configuration import Configuration
config = Configuration()
config.section_("General")
config.General.requestName = 'ST_t-channel_antitop_4f_inclusiveDecays'
config.General.transferLogs = True
config.section_("JobType")
config.JobType.pluginName = 'Analysis'
config.JobType.inputFiles = ['Summer16_23Sep2016V3_MC_L1FastJet_AK4PFchs.txt','Summer16_23Sep2016V3_MC_L2Relative_AK4PFchs.txt','Summer16_23Sep2016V3_MC_L3Absolute_AK4PFchs.txt','Summer16_23Sep2016V3_MC_L1FastJet_AK8PFchs.txt','Summer16_23Sep2016V3_MC_L2Relative_AK8PFchs.txt','Summer16_23Sep2016V3_MC_L3Absolute_AK8PFchs.txt','Summer16_23Sep2016V3_MC_L1FastJet_AK8PFPuppi.txt','Summer16_23Sep2016V3_MC_L2Relative_AK8PFPuppi.txt','Summer16_23Sep2016V3_MC_L3Absolute_AK8PFPuppi.txt','Summer16_23Sep2016V3_MC_L1FastJet_AK4PFPuppi.txt','Summer16_23Sep2016V3_MC_L2Relative_AK4PFPuppi.txt','Summer16_23Sep2016V3_MC_L3Absolute_AK4PFPuppi.txt']
#config.JobType.inputFiles = ['PHYS14_25_V2_All_L1FastJet_AK4PFchs.txt','PHYS14_25_V2_All_L2Relative_AK4PFchs.txt','PHYS14_25_V2_All_L3Absolute_AK4PFchs.txt','PHYS14_25_V2_All_L1FastJet_AK8PFchs.txt','PHYS14_25_V2_All_L2Relative_AK8PFchs.txt','PHYS14_25_V2_All_L3Absolute_AK8PFchs.txt']
# Name of the CMSSW configuration file
#config.JobType.psetName = 'bkg_ana.py'
config.JobType.psetName = 'analysis.py'
#config.JobType.allowUndistributedCMSSW = True
config.JobType.sendExternalFolder = True
config.JobType.allowUndistributedCMSSW = True
config.section_("Data")
#config.Data.inputDataset = '/WJetsToLNu_13TeV-madgraph-pythia8-tauola/Phys14DR-PU20bx25_PHYS14_25_V1-v1/MINIAODSIM'
config.Data.inputDataset = '/ST_t-channel_antitop_4f_inclusiveDecays_13TeV-powhegV2-madspin-pythia8_TuneCUETP8M1/RunIISummer16MiniAODv2-PUMoriond17_80X_mcRun2_asymptotic_2016_TrancheIV_v6-v1/MINIAODSIM'
config.Data.inputDBS = 'global'
#config.Data.inputDBS = 'phys03'
config.Data.splitting = 'FileBased'
config.Data.unitsPerJob =5
config.Data.totalUnits = -1
config.Data.publication = False
name = 'WWW'
steam_dir = 'xulyu'
config.Data.outLFNDirBase = '/store/group/dpg_trigger/comm_trigger/TriggerStudiesGroup/STEAM/' + steam_dir + '/' + name + '/'
# This string is used to construct the output dataset name
config.Data.outputDatasetTag = 'ST_t-channel_antitop_4f_inclusiveDecays'
config.section_("Site")
# Where the output files will be transmitted to
config.Site.storageSite = 'T2_CH_CERN'
| [
"XXX@cern.ch"
] | XXX@cern.ch |
a8a2b17652d06ce3c5c3f7dcb3f4ccdadcb4f203 | 32226e72c8cbaa734b2bdee081c2a2d4d0322702 | /railrl/torch/vae/dataset/sawyer_door_push_and_reach_data.py | 84da7999020d7b64fe0be7e0b8977003e3597c01 | [
"MIT"
] | permissive | Asap7772/rail-rl-franka-eval | 2b1cbad7adae958b3b53930a837df8a31ab885dc | 4bf99072376828193d05b53cf83c7e8f4efbd3ba | refs/heads/master | 2022-11-15T07:08:33.416025 | 2020-07-12T22:05:32 | 2020-07-12T22:05:32 | 279,155,722 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,185 | py | import time
import numpy as np
import os.path as osp
import pickle
from gym.spaces import Box
from multiworld.envs.mujoco.sawyer_xyz.sawyer_door import SawyerDoorPushOpenEnv, SawyerDoorPushOpenEnv, SawyerDoorPushOpenAndReachEnv
from multiworld.core.image_env import ImageEnv
from railrl.exploration_strategies.base import PolicyWrappedWithExplorationStrategy
from railrl.exploration_strategies.ou_strategy import OUStrategy
from railrl.images.camera import sawyer_door_env_camera, sawyer_door_env_camera
import cv2
from railrl.misc.asset_loader import local_path_from_s3_or_local_path, sync_down
from railrl.policies.simple import RandomPolicy
from railrl.torch import pytorch_util as ptu
def generate_vae_dataset(
N=10000, test_p=0.9, use_cached=True, imsize=84, show=False,
dataset_path=None, env_class=None, env_kwargs=None, init_camera=sawyer_door_env_camera,
):
filename = "/tmp/sawyer_door_push_open_and_reach" + str(N) + ".npy"
info = {}
if dataset_path is not None:
filename = local_path_from_s3_or_local_path(dataset_path)
dataset = np.load(filename)
elif use_cached and osp.isfile(filename):
dataset = np.load(filename)
print("loaded data from saved file", filename)
else:
env = env_class(**env_kwargs)
env = ImageEnv(
env, imsize,
transpose=True,
init_camera=init_camera,
normalize=True,
)
oracle_sampled_data = int(N/2)
dataset = np.zeros((N, imsize * imsize * 3))
print('Goal Space Sampling')
for i in range(oracle_sampled_data):
goal = env.sample_goal()
env.set_to_goal(goal)
img = env._get_flat_img()
dataset[i, :] = img
if show:
cv2.imshow('img', img.reshape(3, 84, 84).transpose())
cv2.waitKey(1)
print(i)
env._wrapped_env.min_y_pos=.6
policy = RandomPolicy(env.action_space)
es = OUStrategy(action_space=env.action_space, theta=0)
exploration_policy = PolicyWrappedWithExplorationStrategy(
exploration_strategy=es,
policy=policy,
)
print('Random Sampling')
for i in range(oracle_sampled_data, N):
if i % 20==0:
env.reset()
exploration_policy.reset()
for _ in range(10):
action = exploration_policy.get_action()[0]
env.wrapped_env.step(
action
)
img = env._get_flat_img()
dataset[i, :] = img
if show:
cv2.imshow('img', img.reshape(3, 84, 84).transpose())
cv2.waitKey(1)
print(i)
n = int(N * test_p)
train_dataset = dataset[:n, :]
test_dataset = dataset[n:, :]
return train_dataset, test_dataset, info
if __name__ == "__main__":
generate_vae_dataset(
1000,
use_cached=False,
show=True,
env_class=SawyerDoorPushOpenAndReachEnv,
env_kwargs=dict(
max_x_pos=.1,
max_y_pos=.8,
frame_skip=50,
),
)
| [
"asap7772@berkeley.edu"
] | asap7772@berkeley.edu |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.