hexsha stringlengths 40 40 | size int64 5 2.06M | ext stringclasses 11 values | lang stringclasses 1 value | max_stars_repo_path stringlengths 3 251 | max_stars_repo_name stringlengths 4 130 | max_stars_repo_head_hexsha stringlengths 40 78 | max_stars_repo_licenses listlengths 1 10 | max_stars_count int64 1 191k ⌀ | max_stars_repo_stars_event_min_datetime stringlengths 24 24 ⌀ | max_stars_repo_stars_event_max_datetime stringlengths 24 24 ⌀ | max_issues_repo_path stringlengths 3 251 | max_issues_repo_name stringlengths 4 130 | max_issues_repo_head_hexsha stringlengths 40 78 | max_issues_repo_licenses listlengths 1 10 | max_issues_count int64 1 116k ⌀ | max_issues_repo_issues_event_min_datetime stringlengths 24 24 ⌀ | max_issues_repo_issues_event_max_datetime stringlengths 24 24 ⌀ | max_forks_repo_path stringlengths 3 251 | max_forks_repo_name stringlengths 4 130 | max_forks_repo_head_hexsha stringlengths 40 78 | max_forks_repo_licenses listlengths 1 10 | max_forks_count int64 1 105k ⌀ | max_forks_repo_forks_event_min_datetime stringlengths 24 24 ⌀ | max_forks_repo_forks_event_max_datetime stringlengths 24 24 ⌀ | content stringlengths 1 1.05M | avg_line_length float64 1 1.02M | max_line_length int64 3 1.04M | alphanum_fraction float64 0 1 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
b166dad82fde9d6a3518b1f26a85b2e2546d77b9 | 16,633 | py | Python | files/models.py | AdrianoCahete/website | 114156e24b37e5f2293aeac3c29ab4d5cd8311cd | [
"MIT"
] | null | null | null | files/models.py | AdrianoCahete/website | 114156e24b37e5f2293aeac3c29ab4d5cd8311cd | [
"MIT"
] | null | null | null | files/models.py | AdrianoCahete/website | 114156e24b37e5f2293aeac3c29ab4d5cd8311cd | [
"MIT"
] | null | null | null | # -*- coding: UTF-8 -*-
# vim: set expandtab sw=4 ts=4 sts=4:
#
# phpMyAdmin web site
#
# Copyright (C) 2008 - 2016 Michal Cihar <michal@cihar.com>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
import json
import urllib2
from django.dispatch import receiver
from django.db.models.signals import post_save
from django.core.urlresolvers import reverse
from django.db import models
from django.conf import settings
from django.utils import timezone
import os.path
from data.themes import CSSMAP
from markupfield.fields import MarkupField
from pmaweb.cdn import purge_cdn, purge_all_cdn
# Naming of versions
VERSION_INFO = (
('alpha1', ' First alpha version.'),
('alpha2', ' Second alpha version.'),
('alpha3', ' Third alpha version.'),
('alpha4', ' Fourth alpha version.'),
('beta1', ' First beta version.'),
('beta2', ' Second beta version.'),
('beta3', ' Third beta version.'),
('beta4', ' Fourth beta version.'),
('beta', ' Beta version.'),
('rc1', ' First release candidate.'),
('rc2', ' Second release candidate.'),
('rc3', ' Third release candidate.'),
('rc4', ' Fourth release candidate.'),
('rc', ' Release candidate.'),
)
DOCKER_TRIGGER = \
'https://registry.hub.docker.com/u/phpmyadmin/phpmyadmin/trigger/{0}/'
def get_absolute_url(self):
return 'https://files.phpmyadmin.net{0}'.format(
self.__unicode__()
)
def get_signed_url(self):
if not self.signed:
return ''
return 'https://files.phpmyadmin.net{0}.asc'.format(
self.__unicode__()
)
def get_checksum_url(self):
return 'https://files.phpmyadmin.net{0}.sha256'.format(
self.__unicode__()
)
def get_alternate_url(self):
return 'https://1126968067.rsc.cdn77.org{0}'.format(
self.__unicode__()
)
def dockerhub_trigger(tag):
if settings.DOCKERHUB_TOKEN is None:
return
request = urllib2.Request(
DOCKER_TRIGGER.format(settings.DOCKERHUB_TOKEN),
json.dumps({'docker_tag': tag}),
{'Content-Type': 'application/json'}
)
handle = urllib2.urlopen(request)
handle.read()
| 32.486328 | 94 | 0.554199 |
b166eaf0f74796997babad39184ea07ba1f3c842 | 948 | py | Python | main/models/sign.py | fakegit/gxgk-wechat-server | 89ad21bcd2dcd1c28e43d4b230d47207e78098b3 | [
"MIT"
] | 1,564 | 2015-09-01T13:11:02.000Z | 2022-03-29T08:44:56.000Z | main/models/sign.py | fakegit/gxgk-wechat-server | 89ad21bcd2dcd1c28e43d4b230d47207e78098b3 | [
"MIT"
] | 11 | 2015-12-13T05:04:15.000Z | 2019-09-10T06:14:03.000Z | main/models/sign.py | fakegit/gxgk-wechat-server | 89ad21bcd2dcd1c28e43d4b230d47207e78098b3 | [
"MIT"
] | 649 | 2015-12-11T09:23:09.000Z | 2022-03-04T17:31:28.000Z | #!/usr/bin/env python
# -*- coding: utf-8 -*-
from . import db
| 26.333333 | 70 | 0.619198 |
b167bd125d417e4efdcc02611c67219208d449ac | 2,579 | py | Python | pinax/projects/sample_group_project/urls.py | peiwei/pinax | 34f95b1df4318655fe9bd90dcda8fe824e0c4117 | [
"MIT"
] | 1 | 2019-02-12T04:45:09.000Z | 2019-02-12T04:45:09.000Z | pinax/projects/sample_group_project/urls.py | peiwei/pinax | 34f95b1df4318655fe9bd90dcda8fe824e0c4117 | [
"MIT"
] | null | null | null | pinax/projects/sample_group_project/urls.py | peiwei/pinax | 34f95b1df4318655fe9bd90dcda8fe824e0c4117 | [
"MIT"
] | 1 | 2019-02-12T04:45:40.000Z | 2019-02-12T04:45:40.000Z | from django.conf import settings
from django.conf.urls.defaults import *
from django.views.generic.simple import direct_to_template
from django.contrib import admin
admin.autodiscover()
from account.openid_consumer import PinaxConsumer
handler500 = "pinax.views.server_error"
if settings.ACCOUNT_OPEN_SIGNUP:
signup_view = "account.views.signup"
else:
signup_view = "signup_codes.views.signup"
urlpatterns = patterns("",
url(r"^$", direct_to_template, {
"template": "homepage.html",
}, name="home"),
url(r"^admin/invite_user/$", "signup_codes.views.admin_invite_user", name="admin_invite_user"),
url(r"^account/signup/$", signup_view, name="acct_signup"),
(r"^about/", include("about.urls")),
(r"^account/", include("account.urls")),
(r"^openid/(.*)", PinaxConsumer()),
(r"^profiles/", include("basic_profiles.urls")),
(r"^notices/", include("notification.urls")),
(r"^announcements/", include("announcements.urls")),
(r"^tagging_utils/", include("tagging_utils.urls")),
(r"^comments/", include("threadedcomments.urls")),
(r"^attachments/", include("attachments.urls")),
(r"^groups/", include("basic_groups.urls")),
(r"^tribes/", include("tribes.urls")),
(r"^projects/", include("projects.urls")),
(r"^flag/", include("flag.urls")),
(r"^admin/", include(admin.site.urls)),
)
from tagging.models import TaggedItem
from projects.models import Project
from tasks.models import Task
from topics.models import Topic
from wiki.models import Article as WikiArticle
tagged_models = (
dict(title="Projects",
query=lambda tag: TaggedItem.objects.get_by_model(Project, tag),
),
dict(title="Topics",
query=lambda tag: TaggedItem.objects.get_by_model(Topic, tag),
),
dict(title="Project Tasks",
query=lambda tag: TaggedItem.objects.get_by_model(Task, tag),
),
dict(title="Wiki Articles",
query=lambda tag: TaggedItem.objects.get_by_model(WikiArticle, tag),
),
)
tagging_ext_kwargs = {
'tagged_models':tagged_models,
}
urlpatterns += patterns('',
url(r'^tags/(?P<tag>.+)/(?P<model>.+)$', 'tagging_ext.views.tag_by_model',
kwargs=tagging_ext_kwargs, name='tagging_ext_tag_by_model'),
url(r'^tags/(?P<tag>.+)/$', 'tagging_ext.views.tag',
kwargs=tagging_ext_kwargs, name='tagging_ext_tag'),
url(r'^tags/$', 'tagging_ext.views.index', name='tagging_ext_index'),
)
if settings.SERVE_MEDIA:
urlpatterns += patterns("",
(r"", include("staticfiles.urls")),
)
| 29.306818 | 99 | 0.669252 |
b1684a8441dca67ce07724eebd55d0e4be2809be | 3,060 | py | Python | synapse/storage/schema/delta/50/make_event_content_nullable.py | Cadair/synapse | 466866a1d9dd1fcf82348a36c0532cb0c6614767 | [
"Apache-2.0"
] | 2 | 2020-04-30T18:38:02.000Z | 2020-07-08T21:38:28.000Z | synapse/storage/schema/delta/50/make_event_content_nullable.py | Cadair/synapse | 466866a1d9dd1fcf82348a36c0532cb0c6614767 | [
"Apache-2.0"
] | 4 | 2020-03-04T23:47:05.000Z | 2021-12-09T21:41:44.000Z | synapse/storage/schema/delta/50/make_event_content_nullable.py | Cadair/synapse | 466866a1d9dd1fcf82348a36c0532cb0c6614767 | [
"Apache-2.0"
] | 2 | 2020-03-03T18:34:52.000Z | 2022-03-31T11:06:18.000Z | # -*- coding: utf-8 -*-
# Copyright 2018 New Vector Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
We want to stop populating 'event.content', so we need to make it nullable.
If this has to be rolled back, then the following should populate the missing data:
Postgres:
UPDATE events SET content=(ej.json::json)->'content' FROM event_json ej
WHERE ej.event_id = events.event_id AND
stream_ordering < (
SELECT stream_ordering FROM events WHERE content IS NOT NULL
ORDER BY stream_ordering LIMIT 1
);
UPDATE events SET content=(ej.json::json)->'content' FROM event_json ej
WHERE ej.event_id = events.event_id AND
stream_ordering > (
SELECT stream_ordering FROM events WHERE content IS NOT NULL
ORDER BY stream_ordering DESC LIMIT 1
);
SQLite:
UPDATE events SET content=(
SELECT json_extract(json,'$.content') FROM event_json ej
WHERE ej.event_id = events.event_id
)
WHERE
stream_ordering < (
SELECT stream_ordering FROM events WHERE content IS NOT NULL
ORDER BY stream_ordering LIMIT 1
)
OR stream_ordering > (
SELECT stream_ordering FROM events WHERE content IS NOT NULL
ORDER BY stream_ordering DESC LIMIT 1
);
"""
import logging
from synapse.storage.engines import PostgresEngine
logger = logging.getLogger(__name__)
| 31.546392 | 84 | 0.671242 |
b169661dd2e123c3c4e9fd3e7fd531b5b79cc52c | 1,822 | py | Python | tools/applause_detection/applause_detection.py | AudiovisualMetadataPlatform/amp_mgms | 593d4f4d40b597a7753cd152cd233976e6b28c75 | [
"Apache-2.0"
] | null | null | null | tools/applause_detection/applause_detection.py | AudiovisualMetadataPlatform/amp_mgms | 593d4f4d40b597a7753cd152cd233976e6b28c75 | [
"Apache-2.0"
] | 1 | 2022-02-16T16:21:03.000Z | 2022-02-16T16:21:03.000Z | tools/applause_detection/applause_detection.py | AudiovisualMetadataPlatform/amp_mgms | 593d4f4d40b597a7753cd152cd233976e6b28c75 | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python3
import os
import os.path
import shutil
import subprocess
import sys
import tempfile
import argparse
import amp.utils
if __name__ == "__main__":
main()
| 36.44 | 120 | 0.687157 |
b16a393bb50e48e50f448e75e1aa34a864d369d1 | 226 | py | Python | 5_Pham_Ngo_Tien_Dung/3.1.py | lpython2006e/exercies | 84343eae57d86708a7984aa02f77183a4688a508 | [
"MIT"
] | null | null | null | 5_Pham_Ngo_Tien_Dung/3.1.py | lpython2006e/exercies | 84343eae57d86708a7984aa02f77183a4688a508 | [
"MIT"
] | null | null | null | 5_Pham_Ngo_Tien_Dung/3.1.py | lpython2006e/exercies | 84343eae57d86708a7984aa02f77183a4688a508 | [
"MIT"
] | 8 | 2020-07-10T14:13:54.000Z | 2020-08-03T08:17:50.000Z | """Write a program that allow user enter a file name (path) then content, allow user to save it"""
filename = input("Please input filename")
f= open(filename,"w+")
content = input("Please input content")
f.write(content) | 37.666667 | 99 | 0.712389 |
b16c522c8657dbedfb8cc24e18349f5784c77002 | 8,203 | py | Python | 2019/intcode/intcode/tests/test_intcode.py | Ganon11/AdventCode | eebf3413c8e73c45d0e0a65a80e57eaf594baead | [
"MIT"
] | null | null | null | 2019/intcode/intcode/tests/test_intcode.py | Ganon11/AdventCode | eebf3413c8e73c45d0e0a65a80e57eaf594baead | [
"MIT"
] | null | null | null | 2019/intcode/intcode/tests/test_intcode.py | Ganon11/AdventCode | eebf3413c8e73c45d0e0a65a80e57eaf594baead | [
"MIT"
] | null | null | null | import intcode
if __name__ == "__main__":
test_reddit()
| 31.30916 | 85 | 0.720224 |
b16cd2c50420d1e6d132def2948468675ae9b60d | 720 | py | Python | tests/test_DataAugmenterExternally.py | AlexKay28/zarnitsa | c7e93423dcc1f000849f8c1e1f685e8a91b90f9c | [
"Apache-2.0"
] | 8 | 2021-07-19T18:25:03.000Z | 2021-10-05T15:25:20.000Z | tests/test_DataAugmenterExternally.py | AlexKay28/zarnitsa | c7e93423dcc1f000849f8c1e1f685e8a91b90f9c | [
"Apache-2.0"
] | 22 | 2021-07-26T19:13:32.000Z | 2021-10-09T18:56:07.000Z | tests/test_DataAugmenterExternally.py | AlexKay28/zarnitsa | c7e93423dcc1f000849f8c1e1f685e8a91b90f9c | [
"Apache-2.0"
] | 1 | 2021-08-10T12:24:00.000Z | 2021-08-10T12:24:00.000Z | import os
import sys
import pytest
import numpy as np
import pandas as pd
from scipy.stats import ks_2samp
sys.path.append("zarnitsa/")
from zarnitsa.stats import DataAugmenterExternally
N_TO_CHECK = 500
SIG = 0.5
def test_augment_column_permute(dae, normal_data):
"""
Augment column with normal distribution
"""
normal_data_aug = dae.augment_distrib_random(
aug_type="normal", size=N_TO_CHECK, loc=0, scale=SIG * 3
)
assert ks_2samp(normal_data, normal_data_aug).pvalue > 0.01, "KS criteria"
| 20 | 84 | 0.730556 |
b16d517f951d0f5516bebdb100e3d55e1e838a34 | 22,314 | py | Python | cgc/Collision.py | Jfeatherstone/ColorGlass | f242541df614a8eea97c43d3480c779e92660ebb | [
"MIT"
] | null | null | null | cgc/Collision.py | Jfeatherstone/ColorGlass | f242541df614a8eea97c43d3480c779e92660ebb | [
"MIT"
] | null | null | null | cgc/Collision.py | Jfeatherstone/ColorGlass | f242541df614a8eea97c43d3480c779e92660ebb | [
"MIT"
] | null | null | null | from .Wavefunction import Wavefunction
import numpy as np
from scipy.fft import ifft2, fft2
import numba
CACHE_OPTIMIZATIONS = True
# Using custom functions within other jitted functions can cause some issues,
# so we define the signatures explicitly for these two functions.
# Because of the same issue described above, we can't cache this function
# This function gives a warning because numba only experimentally supports
# treating functions as objects (the list derivs).
| 37.314381 | 202 | 0.63691 |
b1716479f1c26f49cf955c116938436d2e898588 | 21 | py | Python | fastagram/tags/models/__init__.py | dobestan/fastagram | 8c57401512d7621890a4f160d4b27c6e0d3ab326 | [
"MIT"
] | 1 | 2016-03-27T10:36:01.000Z | 2016-03-27T10:36:01.000Z | fastagram/tags/models/__init__.py | dobestan/django-101-fastagram | 8c57401512d7621890a4f160d4b27c6e0d3ab326 | [
"MIT"
] | 3 | 2016-03-25T05:32:39.000Z | 2016-03-28T04:59:17.000Z | fastagram/tags/models/__init__.py | dobestan/django-101-fastagram | 8c57401512d7621890a4f160d4b27c6e0d3ab326 | [
"MIT"
] | 1 | 2016-03-28T16:35:36.000Z | 2016-03-28T16:35:36.000Z | from .tag import Tag
| 10.5 | 20 | 0.761905 |
b1724ba73246edc325129a0b1a56c982075f8024 | 8,346 | py | Python | tensorflow/contrib/model_pruning/python/learning.py | uve/tensorflow | e08079463bf43e5963acc41da1f57e95603f8080 | [
"Apache-2.0"
] | null | null | null | tensorflow/contrib/model_pruning/python/learning.py | uve/tensorflow | e08079463bf43e5963acc41da1f57e95603f8080 | [
"Apache-2.0"
] | null | null | null | tensorflow/contrib/model_pruning/python/learning.py | uve/tensorflow | e08079463bf43e5963acc41da1f57e95603f8080 | [
"Apache-2.0"
] | null | null | null | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Wrapper around tf-slim's training code contrib/slim/python/slim/learning.py
to support training of pruned models
*******************************************************************
* A simple working training script with support for model pruning *
*******************************************************************
# Load data and create the model:
images, labels = LoadData(...)
predictions = MyModel(images)
# Define the loss:
slim.losses.log_loss(predictions, labels)
total_loss = slim.losses.get_total_loss()
# Define the optimizer:
optimizer = tf.compat.v1.train.MomentumOptimizer(FLAGS.learning_rate,
FLAGS.momentum)
# Create the train_op
train_op = slim.learning.create_train_op(total_loss, optimizer)
# Parse pruning hyperparameters
pruning_hparams = pruning.get_pruning_hparams().parse(FLAGS.pruning_hparams)
# Create a pruning object using the pruning_hparams
p = pruning.Pruning(pruning_hparams)
# Add mask update ops to the graph
mask_update_op = p.conditional_mask_update_op()
# Run training.
learning.train(train_op,
my_log_dir,
mask_update_op)
see contrib/slim/python/slim/learning.py for additional examples
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.contrib import slim as _slim
_USE_DEFAULT = 0
train_step = _slim.learning.train_step
def train(train_op,
logdir,
mask_update_op,
train_step_fn=train_step,
train_step_kwargs=_USE_DEFAULT,
log_every_n_steps=1,
graph=None,
master='',
is_chief=True,
global_step=None,
number_of_steps=None,
init_op=_USE_DEFAULT,
init_feed_dict=None,
local_init_op=_USE_DEFAULT,
init_fn=None,
ready_op=_USE_DEFAULT,
summary_op=_USE_DEFAULT,
save_summaries_secs=600,
summary_writer=_USE_DEFAULT,
startup_delay_steps=0,
saver=None,
save_interval_secs=600,
sync_optimizer=None,
session_config=None,
trace_every_n_steps=None):
"""Wrapper around tf-slim's train function.
Runs a training loop using a TensorFlow supervisor.
When the sync_optimizer is supplied, gradient updates are applied
synchronously. Otherwise, gradient updates are applied asynchronous.
Args:
train_op: A `Tensor` that, when executed, will apply the gradients and
return the loss value.
logdir: The directory where training logs are written to. If None, model
checkpoints and summaries will not be written.
mask_update_op: Operation that upon execution updates the weight masks and
thresholds.
train_step_fn: The function to call in order to execute a single gradient
step. The function must have take exactly four arguments: the current
session, the `train_op` `Tensor`, a global step `Tensor` and a
dictionary.
train_step_kwargs: A dictionary which is passed to the `train_step_fn`. By
default, two `Boolean`, scalar ops called "should_stop" and "should_log"
are provided.
log_every_n_steps: The frequency, in terms of global steps, that the loss
and global step and logged.
graph: The graph to pass to the supervisor. If no graph is supplied the
default graph is used.
master: The address of the tensorflow master.
is_chief: Specifies whether or not the training is being run by the primary
replica during replica training.
global_step: The `Tensor` representing the global step. If left as `None`,
then slim.variables.get_or_create_global_step() is used.
number_of_steps: The max number of gradient steps to take during training,
as measured by 'global_step': training will stop if global_step is greater
than 'number_of_steps'. If the value is left as None, training proceeds
indefinitely.
init_op: The initialization operation. If left to its default value, then
the session is initialized by calling
`tf.compat.v1.global_variables_initializer()`.
init_feed_dict: A feed dictionary to use when executing the `init_op`.
local_init_op: The local initialization operation. If left to its default
value, then the session is initialized by calling
`tf.compat.v1.local_variables_initializer()` and
`tf.compat.v1.tables_initializer()`.
init_fn: An optional callable to be executed after `init_op` is called. The
callable must accept one argument, the session being initialized.
ready_op: Operation to check if the model is ready to use. If left to its
default value, then the session checks for readiness by calling
`tf.compat.v1.report_uninitialized_variables()`.
summary_op: The summary operation.
save_summaries_secs: How often, in seconds, to save summaries.
summary_writer: `SummaryWriter` to use. Can be `None` to indicate that no
summaries should be written. If unset, we create a SummaryWriter.
startup_delay_steps: The number of steps to wait for before beginning. Note
that this must be 0 if a sync_optimizer is supplied.
saver: Saver to save checkpoints. If None, a default one will be created and
used.
save_interval_secs: How often, in seconds, to save the model to `logdir`.
sync_optimizer: an instance of tf.compat.v1.train.SyncReplicasOptimizer, or
a list of them. If the argument is supplied, gradient updates will be
synchronous. If left as `None`, gradient updates will be asynchronous.
session_config: An instance of `tf.compat.v1.ConfigProto` that will be used
to configure the `Session`. If left as `None`, the default will be used.
trace_every_n_steps: produce and save a `Timeline` in Chrome trace format
and add it to the summaries every `trace_every_n_steps`. If None, no trace
information will be produced or saved.
Returns:
the value of the loss function after training.
Raises:
ValueError: if `train_op` is empty or if `startup_delay_steps` is
non-zero when `sync_optimizer` is supplied, if `number_of_steps` is
negative, or if `trace_every_n_steps` is not `None` and no `logdir` is
provided.
"""
total_loss, _ = _slim.learning.train(
train_op,
logdir,
train_step_fn=train_step_with_pruning_fn,
train_step_kwargs=train_step_kwargs,
log_every_n_steps=log_every_n_steps,
graph=graph,
master=master,
is_chief=is_chief,
global_step=global_step,
number_of_steps=number_of_steps,
init_op=init_op,
init_feed_dict=init_feed_dict,
local_init_op=local_init_op,
init_fn=init_fn,
ready_op=ready_op,
summary_op=summary_op,
save_summaries_secs=save_summaries_secs,
summary_writer=summary_writer,
startup_delay_steps=startup_delay_steps,
saver=saver,
save_interval_secs=save_interval_secs,
sync_optimizer=sync_optimizer,
session_config=session_config,
trace_every_n_steps=trace_every_n_steps)
return total_loss
| 42.581633 | 81 | 0.684999 |
b172a5ff4bd5c2830f5d2332f4e30cc2a061bc37 | 306 | py | Python | run2.py | akuz/deep-gen-mnist | 13d4d350a0dc9dc7f0111c839fb7158654f048c4 | [
"MIT"
] | null | null | null | run2.py | akuz/deep-gen-mnist | 13d4d350a0dc9dc7f0111c839fb7158654f048c4 | [
"MIT"
] | null | null | null | run2.py | akuz/deep-gen-mnist | 13d4d350a0dc9dc7f0111c839fb7158654f048c4 | [
"MIT"
] | null | null | null |
import numpy as np
import tensorflow as tf
import model
if __name__ == "__main__":
print("Making level configs...")
level_configs = model.default_level_configs()
print("Making filter variables...")
filters = model.make_filters(tf.get_default_graph(), level_configs)
print("Done")
| 19.125 | 71 | 0.70915 |
b17399bedc9351d3452e2254d35db67407d43d19 | 11,201 | py | Python | payload_templates/lin_shell_payload.py | ahirejayeshbapu/python-shell | 3560fe03f89557c1189255ca2737accdeda48faf | [
"MIT"
] | 4 | 2018-09-20T13:37:28.000Z | 2022-02-23T00:36:55.000Z | payload_templates/lin_shell_payload.py | ahirejayeshbapu/python-shell | 3560fe03f89557c1189255ca2737accdeda48faf | [
"MIT"
] | null | null | null | payload_templates/lin_shell_payload.py | ahirejayeshbapu/python-shell | 3560fe03f89557c1189255ca2737accdeda48faf | [
"MIT"
] | null | null | null | import subprocess, os, socket, re, pickle, docx, urllib2
from platform import platform
from getpass import getuser
from time import sleep
from datetime import datetime
port = !!!!!
ip_addr = @@@@@
lkey = #####
End = $$$$$
skey = %%%%%
time_to_sleep = ^^^^^
type_of_scout = 'Command Shell'
try:
operating_sys = platform()
except:
operating_sys = '?????'
try:
hostname = socket.gethostname()
except:
hostname = '?????'
try:
username = getuser()
except:
username = '?????'
userinfo = hostname + '/' + username
scout_data = [skey, lkey, userinfo, type_of_scout, operating_sys]
shell_type = '/bin/bash'
s = None
help_menu = '''\nCommand Shell Menu
==================
Global Commands :
banner Display a banner
clear Clear the screen
help Show the help menu
local <shell command> Locally execute a shell command
python Enter the system python interpreter
quit Quit the framework
Connection commands :
disconnect Make the scout disconnect and try to reconnect
terminate Kill the scout process
sleep <seconds> Disconnect the scout and make it sleep for some time
Handler commands :
back Move back to scout handler
Command Shell Commands :
exec <shell command> Executes shell command and returns output
exec_file <shell command> Executes a shell command with no output(use this to run files and avoid blocking)
swap <shell path> Switch the type of shell used, default is "/bin/bash"
File Commands :
download <filepath> Download file
dump <filepath> Dump and view file content(supports .docx file)
upload <filepath> Upload a file
web_download <url> Download a file through a url\n'''
main()
| 37.713805 | 130 | 0.44871 |
b17454e4938df93dd6729a10260ca6df34c9564c | 84 | py | Python | scripts/python/make-dist-cfg.py | brakmic/cm3 | b99e280eca00c322e04e0586951de50108e51343 | [
"BSD-4-Clause-UC",
"BSD-4-Clause",
"BSD-3-Clause"
] | 2 | 2015-03-02T17:01:32.000Z | 2021-12-29T14:34:46.000Z | scripts/python/make-dist-cfg.py | ganeshbabuNN/cm3 | 9fb432d44a2ba89575febb38f7c1eb3dca6a3879 | [
"BSD-4-Clause-UC",
"BSD-4-Clause",
"BSD-3-Clause"
] | 1 | 2015-07-23T07:51:22.000Z | 2015-07-23T07:51:22.000Z | scripts/python/make-dist-cfg.py | RodneyBates/M3Devel | 7b8dd3fc8f5b05d1c69774d92234ea50d143a692 | [
"BSD-4-Clause-UC",
"BSD-4-Clause"
] | 1 | 2021-12-29T14:35:47.000Z | 2021-12-29T14:35:47.000Z | #! /usr/bin/env python
from pylib import *
CopyConfigForDistribution(InstallRoot)
| 14 | 38 | 0.785714 |
b175213c84777ec0e61947cb929e05305bf328ad | 17,813 | py | Python | bench.py | citorva/verificateur_defis_leviathan | 98cd7280253a541d94b34c120879556585ef814c | [
"CC0-1.0"
] | null | null | null | bench.py | citorva/verificateur_defis_leviathan | 98cd7280253a541d94b34c120879556585ef814c | [
"CC0-1.0"
] | null | null | null | bench.py | citorva/verificateur_defis_leviathan | 98cd7280253a541d94b34c120879556585ef814c | [
"CC0-1.0"
] | null | null | null | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
import pygame
import threading
import time
import math
import sys
import argparse
import bench_core
import multiprocessing
# Couleurs du programme. Peut tre modifi tout moment
couleur_txt = (0xc0, 0xc0, 0xc0) # Couleur gris clair pour le texte commun
couleur_vic = (0x28, 0xa7, 0x45) # Couleur verte pour la gauge et le texte associ
couleur_arp = (0x18, 0x18, 0x18) # Couleur de l'arrire plan gnral de l'application (Gris fonc)
couleur_gar = (0x21, 0x21, 0x21) # Couleur de l'arrire plan de la gauge en mode barre de chargement (Nuance de l'AP)
couleurs_echec = [
(0xf5, 0xe8, 0x00), # Couleur jaune pour signaler une exeption (un plantage de l'algorithme)
(0xff, 0x80, 0x3c), # Couleur orange pour signaler un puit (Le personnage prend une corniche avec un puit)
(0xf7, 0x40, 0x3b), # Couleur rouge pour signaler Lviathan (Le personnage se fait manger par ce dernier)
(0x7f, 0x7f, 0x7f), # Couleur grise pour signaler une manque d'nergie (cd le personnage tourne en rond)
(0xff, 0x00, 0x00) # Couleur rouge vif pour signaler une non rponse (L'algorithme prennds trop de temps)
]
# Modles de texte
texte_modeles = [
"%0.00f%% cause d'une exeption (%d, %d%% des checs)%s",
"%0.00f%% tomb dans un puit (%d, %d%% des checs)%s",
"%0.00f%% mang par leviathan (%d, %d%% des checs)%s",
"%0.00f%% par manque d'nergie (%d, %d%% des checs)%s",
"%0.00f%% ne rpondant pas (%d, %d%% des checs)%s"
]
# Constantes de mise en page (Metriques)
metrique_mm = 8 # Marges de l'application (entre les bords de la fentre et le contenu ainsi que entre les lments)
metrique_hg = 24 # Hauteur de la gauge en pixels
metrique_pt = 25 # Taille du texte de titre en points
metrique_pp = 12 # Taille du texte gnral en points
# Variables de benchmark (NE PAS MODIFIER)
# Variable de control de l'IHM
affichage_absolu = False
arret_demande = False
# Systme de comptage du temps
heure_depart = 0
heure_fin = 0
# Initialisation de pygame (NE PAS MODIFIER)
pygame.font.init()
pygame.display.init()
# Initialisation des lments graphiques (NE PAS MODIFIER)
ecran = None
police_titre = pygame.font.Font(pygame.font.get_default_font(), metrique_pt)
police = pygame.font.Font(pygame.font.get_default_font(), metrique_pp)
def cree_jauge(surface, donnees, couleur, rect):
"""
Dessine une gauge en fonctions des donnes et couleurs fournis dans une boite dfini par rect.
:param surface: La surface o dessiner la gauge
:param donnees: Les donnes de la gauge dans un tableau de taille N
:param couleur: Les couleurs associs aux donnes de la gauge dans un tableau de taille N
:param rect: La boite o dessiner la gauge (coordonnes + taille)
:return: None
"""
total_donnees = 0
nombre_donnees = len(donnees)
taille_elements = [0] * nombre_donnees
largeur_donnees = 0
for i in donnees:
total_donnees += i
for i in range(nombre_donnees - 1):
t = int(rect.width * donnees[i] / total_donnees)
taille_elements[i] = t
largeur_donnees += t
taille_elements[-1] = rect.width - largeur_donnees
largeur_donnees = 0
for i in range(nombre_donnees):
surface.fill(couleur[i], (rect.x + largeur_donnees, rect.y, taille_elements[i], rect.height))
largeur_donnees += taille_elements[i]
def rendu_temps(temps):
"""
Affiche l'ordre de grandeur du temps restant
:param temps: Le temps restant en secondes
:return: Un texte donnant son ordre de grandeur en jour/heures/minutes
"""
minutes = temps // 60 % 60
heures = temps // 3600 % 24
jours = temps // 86400
if jours != 0:
return "~%d jour%s" % (jours, "s" if jours != 1 else "")
if heures != 0:
return "~%d heure%s" % (heures, "s" if heures != 1 else "")
if minutes != 0:
return "~%d minute%s" % (minutes, "s" if minutes != 1 else "")
return "<1 minute"
def format_duree(duree):
"""
Formate une dure en ensemble jours/heure/minutes/secondes
Cette dure format n'affiche pas les ordres de grandeurs nuls
:param duree: La dure formater
:return: Le texte de la dure format sour le format <j>j <hh>h <mm>min <ss>s
"""
duree = int(math.floor(duree))
return "{}{:02d}s".format(
"{}{:02d}min".format(
"{}{:02d}h".format(
"{}j".format(duree // 86400) if duree // 86400 != 0 else "",
duree // 3600 % 24
) if duree // 3600 != 0 else "",
duree // 60 % 60
) if duree // 60 != 0 else "",
duree % 60
)
def afficher_graine(graine):
"""
Formate un texte avec la graine donne ou ne donner rien si cette dernire est None
:param graine: La graine afficher
:return: Un texte sous la forme ". Graine alatoire: <graine>" si seed diffrent de None sinon ""
"""
if graine is None:
return ""
else:
return ". Graine alatoire: %d" % graine
# TODO: Nettoyer et documenter cette fonction
def fonction_affichage():
"""
Routine d'affichage. Cette fonction tourne dans un thread indpendant
:return: None
"""
global arret_demande, affichage_absolu, ecran, heure_fin
temps_mise_a_jour = 0
duree_mise_a_jour = 1/args.update_frequency
debut_clic = False
while not arret_demande:
if time.time() - temps_mise_a_jour >= duree_mise_a_jour:
bench.mise_a_jour_donnees()
if bench.total_compteur != 0:
if bench.total_compteur < args.number:
heure_fin = time.time()
surface = affichage_donnees()
if ecran is None or surface.get_width() != ecran.get_width() or surface.get_height() != ecran.get_height():
ecran = pygame.display.set_mode((surface.get_width(), surface.get_height()))
ecran.blit(surface, (0, 0, ecran.get_width(), ecran.get_height()))
pygame.display.flip()
temps_mise_a_jour = time.time()
if ecran is not None:
for event in pygame.event.get():
if event.type == pygame.QUIT:
bench.arret()
arret_demande = True
elif event.type == pygame.MOUSEBUTTONDOWN:
debut_clic = True
elif event.type == pygame.MOUSEBUTTONUP and debut_clic:
affichage_absolu = not affichage_absolu
debut_clic = False
# Objet grant le benchmark de l'IA
bench = None
# Parsing des options d'excution
parser = argparse.ArgumentParser(
description="Effectue de nombreux tests dans le but de vrifier le comportement de l'IA pour le dfi python "
"du Leviathan dans des cas alatoires. Voir "
"https://tiplanet.org/forum/viewtopic.php?f=49&t=24387&p=257174#p257172 pour plus d'informations "
"sur le dfi."
)
# Argument pour l'intelligence artificielle
parser.add_argument("ia", help="Fichier de l'IA tester")
parser.add_argument('-n', "--number", default=100000, type=int, help="Nombre de tests effectuer")
parser.add_argument('-s', "--seed", default=0xc0ffee, type=int, help="Graine alatoire du benchmark")
parser.add_argument('-w', "--web-dim", default=36, type=int, help="Nombre de corniches")
parser.add_argument("-d", "--web-density", default=0.05, type=float,
help="Densit moyenne de voisine chaque corniche")
parser.add_argument("-b", "--bats-density", default=0.1, type=float, help="Densit de chauve souris par parties")
parser.add_argument("-p", "--pit-density", default=0.15, type=float, help="Densit de puit par parties")
parser.add_argument("-m", "--max-duration", default=20, type=float, help="Dure maximum d'une partie en seconde")
parser.add_argument("-t", "--threads", default=1, type=int, help="Nombre de fils d'excution pour les tests")
parser.add_argument("-f", "--update-frequency", default=24, type=int, help="Frquence de rafraichssement de l'interface")
args = parser.parse_args(sys.argv[1:])
err = False
err_text = "\n"
if args.web_density >= 1 or args.web_density <= 0:
err_text += "La densit de corniche voisine doit tre comprise entre 0 et 1, non inclu\n"
err = True
if args.bats_density >= 1 or args.bats_density <= 0:
err_text += "La densit de chauve souris doit tre comprise entre 0 et 1, non inclu\n"
err = True
if args.pit_density >= 1 or args.pit_density <= 0:
err_text += "La densit de puit doit tre comprise entre 0 et 1, non inclu\n"
err = True
if args.max_duration <= 0:
err_text += "La dure maximum d'une partie doit tre strictement suprieure 0\n"
err = True
if args.threads <= 0:
err_text += "Le nombre de fils d'excution doit tre suprieur 0\n"
err = True
if args.web_dim <= 3:
err_text += "Un nombre raisonnable de corniche doit tre fourni pour le bon fonctionnement de l'algorithme\n"
err = True
if args.number <= 0:
err_text += "Il faut au minimum un test pour pouvoir avoir des donnes exploitables\n"
err = True
if args.update_frequency <= 0:
err_text += "La frquence de rafraichissement de l'interface doit tre strictement positive"
err = True
if args.update_frequency > 60:
print("Alerte: La frquence de rafraichissement choisi est trs leve. Cela pourra impacter ngativement la vitesse du test")
if args.threads >= multiprocessing.cpu_count():
print("Alerte: Le nombre de fils d'excution demand est suprieur au nombre de processeurs disponibles. Cela risque d'impacter les performance totales de votre ordinateur")
"""
try:
bench = bench_core.Bench(
args.threads,
args.seed,
args.number,
args.ia,
args.max_duration,
args.web_dim,
args.web_density,
args.pit_density,
args.bats_density
)
except Exception as _:
err_text += "L'ia spcifi ne peut tre ouvert en tant que script. Il se peut que ce dernier n'existe pas ou ne " \
"soit pas un script python valide\n"
err = True
"""
bench = bench_core.Bench(
args.threads,
args.seed,
args.number,
args.ia,
args.max_duration,
args.web_dim,
args.web_density,
args.pit_density,
args.bats_density
)
if err:
parser.print_usage()
print(err_text)
quit()
del parser
# Programme principal: Cre les fils d'excution et fait tourner l'algorithme
fil_exec_interface_utilisateur = threading.Thread(target=fonction_affichage)
heure_depart = time.time()
fil_exec_interface_utilisateur.start()
# Lance les boucles de test
bench.demarre()
fil_exec_interface_utilisateur.join()
bench.arret()
pygame.quit()
if bench.total_compteur != 0:
total_tst = bench.total_compteur
total_vic = bench.compteur[bench_core.PARAMETRE_TOTAL_REUSSITE]
total_ech = total_tst - total_vic
total_lvt = bench.compteur[bench_core.PARAMETRE_ECHEC_LEVIATHAN]
total_pit = bench.compteur[bench_core.PARAMETRE_ECHEC_PUIT]
total_nrj = bench.compteur[bench_core.PARAMETRE_ECHEC_ENERGIE]
total_exc = bench.compteur[bench_core.PARAMETRE_ECHEC_EXEPTION]
total_nrp = bench.compteur[bench_core.PARAMETRE_ECHEC_NON_REPONSE]
graine_lvt = bench.graines[bench_core.PARAMETRE_ECHEC_LEVIATHAN]
graine_pit = bench.graines[bench_core.PARAMETRE_ECHEC_PUIT]
graine_nrj = bench.graines[bench_core.PARAMETRE_ECHEC_ENERGIE]
graine_exc = bench.graines[bench_core.PARAMETRE_ECHEC_EXEPTION]
graine_nrp = bench.graines[bench_core.PARAMETRE_ECHEC_NON_REPONSE]
score = (1000 * (total_tst - 2 * total_nrp - total_exc // 2) - bench.trajet_moyen) * args.web_dim / bench.total_compteur
print(
"Statistiques finales:\n\tNombre total test: %d\n\n"
"Score final: %d\n"
"%d succs (%0.00f%%) avec un trajet moyen de %d\n"
"%d checs (%0.00f%%) avec comme dtails:\n"
"\t%d dues un lviathan (%0.00f%%)%s\n"
"\t%d dues un puit (%0.00f%%)%s\n"
"\t%d dues un manque d'nergie (%0.00f%%)%s\n"
"\t%d dues une exeption (%0.00f%%)%s\n"
"\t%d dues un temps de rponse trop lev (%0.00f%%)%s\n"
"" % (
total_tst,
score,
total_vic, 100 * total_vic / bench.total_compteur, bench.trajet_moyen,
total_ech, 100 * total_ech / bench.total_compteur,
total_lvt, 100 * total_lvt / bench.total_ech, afficher_graine(graine_lvt),
total_pit, 100 * total_pit / bench.total_ech, afficher_graine(graine_pit),
total_nrj, 100 * total_nrj / bench.total_ech, afficher_graine(graine_nrj),
total_exc, 100 * total_exc / bench.total_ech, afficher_graine(graine_exc),
total_nrp, 100 * total_nrp / bench.total_ech, afficher_graine(graine_nrp)
)
)
| 41.233796 | 242 | 0.662999 |
b17694133578e1b1a9c1c195cbd91ca5e72b6295 | 181 | py | Python | test/conftest.py | PlaidCloud/sqlalchemy-greenplum | b40beeee8b775290b262d3b9989e8faeba8b2d20 | [
"BSD-3-Clause"
] | 6 | 2019-05-10T18:31:05.000Z | 2021-09-08T16:59:46.000Z | test/conftest.py | PlaidCloud/sqlalchemy-greenplum | b40beeee8b775290b262d3b9989e8faeba8b2d20 | [
"BSD-3-Clause"
] | 2 | 2018-06-04T23:28:16.000Z | 2022-03-08T14:20:14.000Z | test/conftest.py | PlaidCloud/sqlalchemy-greenplum | b40beeee8b775290b262d3b9989e8faeba8b2d20 | [
"BSD-3-Clause"
] | 1 | 2019-06-13T10:12:44.000Z | 2019-06-13T10:12:44.000Z |
from sqlalchemy.dialects import registry
registry.register("greenplum", "sqlalchemy_greenplum.dialect", "GreenplumDialect")
from sqlalchemy.testing.plugin.pytestplugin import *
| 22.625 | 82 | 0.823204 |
b177b1d71b976403fe1dab8da5d47925b29da724 | 10,319 | py | Python | xclim/core/locales.py | bzah/xclim | 18ceee3f1db2d39355913c1c60ec32ddca6baccc | [
"Apache-2.0"
] | null | null | null | xclim/core/locales.py | bzah/xclim | 18ceee3f1db2d39355913c1c60ec32ddca6baccc | [
"Apache-2.0"
] | 2 | 2021-06-23T09:26:54.000Z | 2021-07-26T19:28:41.000Z | xclim/core/locales.py | bzah/xclim | 18ceee3f1db2d39355913c1c60ec32ddca6baccc | [
"Apache-2.0"
] | 1 | 2021-03-02T20:12:28.000Z | 2021-03-02T20:12:28.000Z | # -*- coding: utf-8 -*-
# noqa: D205,D400
"""
Internationalization
====================
Defines methods and object to help the internationalization of metadata for the
climate indicators computed by xclim.
All the methods and objects in this module use localization data given in json files.
These files are expected to be defined as in this example for french:
.. code-block::
{
"attrs_mapping" : {
"modifiers": ["", "f", "mpl", "fpl"],
"YS" : ["annuel", "annuelle", "annuels", "annuelles"],
"AS-*" : ["annuel", "annuelle", "annuels", "annuelles"],
... and so on for other frequent parameters translation...
},
"DTRVAR": {
"long_name": "Variabilit de l'amplitude de la temprature diurne",
"description": "Variabilit {freq:f} de l'amplitude de la temprature diurne (dfinie comme la moyenne de la variation journalire de l'amplitude de temprature sur une priode donne)",
"title": "Variation quotidienne absolue moyenne de l'amplitude de la temprature diurne",
"comment": "",
"abstract": "La valeur absolue de la moyenne de l'amplitude de la temprature diurne."
},
... and so on for other indicators...
}
Indicators are named by subclass identifier, the same as in the indicator registry (`xclim.core.indicators.registry`),
but which can differ from the callable name. In this case, the indicator is called through
`atmos.daily_temperature_range_variability`, but its identifier is `DTRVAR`.
Use the `ind.__class__.__name__` accessor to get its registry name.
Here, the usual parameter passed to the formatting of "description" is "freq" and is usually
translated from "YS" to "annual". However, in french and in this sentence, the feminine
form should be used, so the "f" modifier is added by the translator so that the
formatting function knows which translation to use. Acceptable entries for the mappings
are limited to what is already defined in `xclim.core.indicators.utils.default_formatter`.
For user-provided internationalization dictionaries, only the "attrs_mapping" and
its "modifiers" key are mandatory, all other entries (translations of frequent parameters
and all indicator entries) are optional. For xclim-provided translations (for now only french),
all indicators must have en entry and the "attrs_mapping" entries must match exactly the default formatter.
Those default translations are found in the `xclim/locales` folder.
Attributes
----------
TRANSLATABLE_ATTRS
List of attributes to consider translatable when generating locale dictionaries.
"""
import json
import warnings
from importlib.resources import contents, open_text
from pathlib import Path
from typing import Optional, Sequence, Tuple, Union
from .formatting import AttrFormatter, default_formatter
TRANSLATABLE_ATTRS = [
"long_name",
"description",
"comment",
"title",
"abstract",
"keywords",
]
def list_locales():
"""Return a list of available locales in xclim."""
locale_list = contents("xclim.data")
return [locale.split(".")[0] for locale in locale_list if locale.endswith(".json")]
def get_best_locale(locale: str):
"""Get the best fitting available locale.
for existing locales : ['fr', 'fr-BE', 'en-US'],
'fr-CA' returns 'fr', 'en' -> 'en-US' and 'en-GB' -> 'en-US'.
Parameters
----------
locale : str
The requested locale, as an IETF language tag (lang or lang-territory)
Returns
-------
str or None:
The best available locale. None is none are available.
"""
available = list_locales()
if locale in available:
return locale
locale = locale.split("-")[0]
if locale in available:
return locale
if locale in [av.split("-")[0] for av in available]:
return [av for av in available if av.split("-")[0] == locale][0]
return None
def get_local_dict(locale: Union[str, Sequence[str], Tuple[str, dict]]):
"""Return all translated metadata for a given locale.
Parameters
----------
locale : str or sequence of str
IETF language tag or a tuple of the language tag and a translation dict, or
a tuple of the language tag and a path to a json file defining translation
of attributes.
Raises
------
UnavailableLocaleError
If the given locale is not available.
Returns
-------
str
The best fitting locale string
dict
The available translations in this locale.
"""
if isinstance(locale, str):
locale = get_best_locale(locale)
if locale is None:
raise UnavailableLocaleError(locale)
return (
locale,
json.load(open_text("xclim.data", f"{locale}.json")),
)
if isinstance(locale[1], dict):
return locale
with open(locale[1], encoding="utf-8") as locf:
return locale[0], json.load(locf)
def get_local_attrs(
indicator: str,
*locales: Union[str, Sequence[str], Tuple[str, dict]],
names: Optional[Sequence[str]] = None,
append_locale_name: bool = True,
) -> dict:
"""Get all attributes of an indicator in the requested locales.
Parameters
----------
indicator : str
Indicator's class name, usually the same as in `xc.core.indicator.registry`.
*locales : str
IETF language tag or a tuple of the language tag and a translation dict, or
a tuple of the language tag and a path to a json file defining translation
of attributes.
names : Optional[Sequence[str]]
If given, only returns translations of attributes in this list.
append_locale_name : bool
If True (default), append the language tag (as "{attr_name}_{locale}") to the
returned attributes.
Raises
------
ValueError
If `append_locale_name` is False and multiple `locales` are requested.
Returns
-------
dict
All CF attributes available for given indicator and locales.
Warns and returns an empty dict if none were available.
"""
if not append_locale_name and len(locales) > 1:
raise ValueError(
"`append_locale_name` cannot be False if multiple locales are requested."
)
attrs = {}
for locale in locales:
loc_name, loc_dict = get_local_dict(locale)
loc_name = f"_{loc_name}" if append_locale_name else ""
local_attrs = loc_dict.get(indicator)
if local_attrs is None:
warnings.warn(
f"Attributes of indicator {indicator} in language {locale} were requested, but none were found."
)
else:
for name in TRANSLATABLE_ATTRS:
if (names is None or name in names) and name in local_attrs:
attrs[f"{name}{loc_name}"] = local_attrs[name]
return attrs
def get_local_formatter(
locale: Union[str, Sequence[str], Tuple[str, dict]]
) -> AttrFormatter:
"""Return an AttrFormatter instance for the given locale.
Parameters
----------
locale : str or tuple of str
IETF language tag or a tuple of the language tag and a translation dict, or
a tuple of the language tag and a path to a json file defining translation
of attributes.
"""
loc_name, loc_dict = get_local_dict(locale)
attrs_mapping = loc_dict["attrs_mapping"].copy()
mods = attrs_mapping.pop("modifiers")
return AttrFormatter(attrs_mapping, mods)
def generate_local_dict(locale: str, init_english: bool = False):
"""Generate a dictionary with keys for each indicators and translatable attributes.
Parameters
----------
locale : str
Locale in the IETF format
init_english : bool
If True, fills the initial dictionary with the english versions of the attributes.
Defaults to False.
"""
from xclim.core.indicator import registry
best_locale = get_best_locale(locale)
if best_locale is not None:
locname, attrs = get_local_dict(best_locale)
for ind_name in attrs.copy().keys():
if ind_name != "attrs_mapping" and ind_name not in registry:
attrs.pop(ind_name)
else:
attrs = {}
attrs_mapping = attrs.setdefault("attrs_mapping", {})
attrs_mapping.setdefault("modifiers", [""])
for key, value in default_formatter.mapping.items():
attrs_mapping.setdefault(key, [value[0]])
eng_attr = ""
for ind_name, indicator in registry.items():
ind_attrs = attrs.setdefault(ind_name, {})
for translatable_attr in set(TRANSLATABLE_ATTRS).difference(
set(indicator._cf_names)
):
if init_english:
eng_attr = getattr(indicator, translatable_attr)
if not isinstance(eng_attr, str):
eng_attr = ""
ind_attrs.setdefault(f"{translatable_attr}", eng_attr)
for var_attrs in indicator.cf_attrs:
# In the case of single output, put var attrs in main dict
if len(indicator.cf_attrs) > 1:
ind_attrs = attrs.setdefault(f"{ind_name}.{var_attrs['var_name']}", {})
for translatable_attr in set(TRANSLATABLE_ATTRS).intersection(
set(indicator._cf_names)
):
if init_english:
eng_attr = var_attrs.get(translatable_attr)
if not isinstance(eng_attr, str):
eng_attr = ""
ind_attrs.setdefault(f"{translatable_attr}", eng_attr)
return attrs
| 35.582759 | 198 | 0.64609 |
b1784fe113bca2d558cd14a80d284029cd03a532 | 92 | py | Python | tests/samples/importing/nested/base.py | machinable-org/machinable | 9d96e942dde05d68699bc7bc0c3d062ee18652ad | [
"MIT"
] | 23 | 2020-02-28T14:29:04.000Z | 2021-12-23T20:50:54.000Z | tests/samples/importing/nested/base.py | machinable-org/machinable | 9d96e942dde05d68699bc7bc0c3d062ee18652ad | [
"MIT"
] | 172 | 2020-02-24T12:12:11.000Z | 2022-03-29T03:08:24.000Z | tests/samples/importing/nested/base.py | machinable-org/machinable | 9d96e942dde05d68699bc7bc0c3d062ee18652ad | [
"MIT"
] | 1 | 2020-11-23T22:42:20.000Z | 2020-11-23T22:42:20.000Z | from machinable import Component
| 15.333333 | 32 | 0.75 |
b17898d3cc02bf7ea9e57ca3010adf0a3b3916ab | 435 | py | Python | source/blockchain_backup/config/gunicorn.conf.py | denova-com/blockchain-backup | a445bcbd67bd6485a4969dc1e24d51fbffc43cff | [
"OLDAP-2.6",
"OLDAP-2.4"
] | null | null | null | source/blockchain_backup/config/gunicorn.conf.py | denova-com/blockchain-backup | a445bcbd67bd6485a4969dc1e24d51fbffc43cff | [
"OLDAP-2.6",
"OLDAP-2.4"
] | null | null | null | source/blockchain_backup/config/gunicorn.conf.py | denova-com/blockchain-backup | a445bcbd67bd6485a4969dc1e24d51fbffc43cff | [
"OLDAP-2.6",
"OLDAP-2.4"
] | null | null | null | # See
# The configuration file should be a valid Python source file with a python extension (e.g. gunicorn.conf.py).
# https://docs.gunicorn.org/en/stable/configure.html
bind='127.0.0.1:8962'
timeout=75
daemon=True
user='user'
accesslog='/var/local/log/user/blockchain_backup.gunicorn.access.log'
errorlog='/var/local/log/user/blockchain_backup.gunicorn.error.log'
log_level='debug'
capture_output=True
max_requests=3
workers=1
| 29 | 113 | 0.777011 |
b1791920593f4e50adb1ee5900ad47f68783a7d1 | 211 | py | Python | code_snippets/api-monitor-schedule-downtime.py | brettlangdon/documentation | 87c23cb1d5e3e877bb37a19f7231b5d9239509dc | [
"BSD-3-Clause"
] | null | null | null | code_snippets/api-monitor-schedule-downtime.py | brettlangdon/documentation | 87c23cb1d5e3e877bb37a19f7231b5d9239509dc | [
"BSD-3-Clause"
] | null | null | null | code_snippets/api-monitor-schedule-downtime.py | brettlangdon/documentation | 87c23cb1d5e3e877bb37a19f7231b5d9239509dc | [
"BSD-3-Clause"
] | null | null | null | from datadog import initialize, api
options = {
'api_key': 'api_key',
'app_key': 'app_key'
}
initialize(**options)
# Schedule downtime
api.Downtime.create(scope='env:staging', start=int(time.time()))
| 17.583333 | 64 | 0.691943 |
b17998122b0c9414fb547e0a5c5bf8d5f8b4473a | 63 | py | Python | src/oscar/apps/customer/__init__.py | QueoLda/django-oscar | 8dd992d82e31d26c929b3caa0e08b57e9701d097 | [
"BSD-3-Clause"
] | 4,639 | 2015-01-01T00:42:33.000Z | 2022-03-29T18:32:12.000Z | src/oscar/apps/customer/__init__.py | QueoLda/django-oscar | 8dd992d82e31d26c929b3caa0e08b57e9701d097 | [
"BSD-3-Clause"
] | 2,215 | 2015-01-02T22:32:51.000Z | 2022-03-29T12:16:23.000Z | src/oscar/apps/customer/__init__.py | QueoLda/django-oscar | 8dd992d82e31d26c929b3caa0e08b57e9701d097 | [
"BSD-3-Clause"
] | 2,187 | 2015-01-02T06:33:31.000Z | 2022-03-31T15:32:36.000Z | default_app_config = 'oscar.apps.customer.apps.CustomerConfig'
| 31.5 | 62 | 0.84127 |
b179f01fa470edabbb25665461efb486ca6b1128 | 795 | py | Python | modnotes/converters.py | jack1142/SinbadCogs-1 | e0f24c0dbc3f845aa7a37ca96d00ee59494911ca | [
"BSD-Source-Code"
] | null | null | null | modnotes/converters.py | jack1142/SinbadCogs-1 | e0f24c0dbc3f845aa7a37ca96d00ee59494911ca | [
"BSD-Source-Code"
] | null | null | null | modnotes/converters.py | jack1142/SinbadCogs-1 | e0f24c0dbc3f845aa7a37ca96d00ee59494911ca | [
"BSD-Source-Code"
] | null | null | null | import contextlib
import re
from typing import NamedTuple, Optional
import discord
from redbot.core.commands import BadArgument, Context, MemberConverter
_discord_member_converter_instance = MemberConverter()
_id_regex = re.compile(r"([0-9]{15,21})$")
_mention_regex = re.compile(r"<@!?([0-9]{15,21})>$")
| 27.413793 | 79 | 0.693082 |
b179ff426e1a26e74d3b6cc6592435b4bf9294c3 | 224 | py | Python | face_api/admin.py | glen-s-abraham/face-detection-api | ce671a9750065c0fc82d0dd668299738f1c07508 | [
"MIT"
] | null | null | null | face_api/admin.py | glen-s-abraham/face-detection-api | ce671a9750065c0fc82d0dd668299738f1c07508 | [
"MIT"
] | null | null | null | face_api/admin.py | glen-s-abraham/face-detection-api | ce671a9750065c0fc82d0dd668299738f1c07508 | [
"MIT"
] | null | null | null | from django.contrib import admin
from face_api.models import KnowledgeDatabase
from face_api.models import ImageUploads
# Register your models here.
admin.site.register(KnowledgeDatabase)
admin.site.register(ImageUploads)
| 24.888889 | 45 | 0.848214 |
b17a29c0eb42919a5d5dc662a31db12c22531561 | 4,596 | py | Python | plugins/base/views.py | adlerosn/corpusslayer | d3dea2e2d15e911d048a39f6ef6cb2d5f7b33e58 | [
"MIT"
] | null | null | null | plugins/base/views.py | adlerosn/corpusslayer | d3dea2e2d15e911d048a39f6ef6cb2d5f7b33e58 | [
"MIT"
] | 1 | 2019-07-06T20:43:45.000Z | 2019-07-06T20:43:45.000Z | plugins/base/views.py | adlerosn/corpusslayer | d3dea2e2d15e911d048a39f6ef6cb2d5f7b33e58 | [
"MIT"
] | null | null | null | # Copyright (c) 2017 Adler Neves <adlerosn@gmail.com>
#
# MIT License
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
import os
pluginName = os.path.abspath(__file__).split(os.path.sep)[-2]
importline1 = 'import '+('.'.join(['plugins',pluginName,'models'])+' as models')
importline2 = 'import '+('.'.join(['plugins',pluginName,'forms'])+' as forms')
exec(importline1) #import plugins.thisplugin.models as models
exec(importline2) #import plugins.thisplugin.forms as forms
import application.forms as app_forms
import application.models as app_models
import application.business as app_ctrl
from django.utils.translation import ugettext_lazy as _
from django.http import HttpResponse
from django.http import HttpResponseRedirect
from django.shortcuts import render
from django.views.generic import View
from django.views.generic import TemplateView
from django.template.response import TemplateResponse
from django.http import Http404
from django.urls import reverse
from django.core.paginator import Paginator
from urllib.parse import urlencode
from view.pages.views import SoonView, TemplateViewLoggedIn, UserPartEditFormView
from view.pages.views import CrudDeleteView, CrudEditView, CrudListView
import re
import json
import base64
# Create your views here.
| 38.3 | 104 | 0.698216 |
b17abbe2c8f968394190d9316ec3a085ca24ece7 | 197 | py | Python | addons/stats/scripts/predictors/abstract_predictor.py | Kait-tt/tacowassa | 7e71c6ef6b5f939a99a3600025b26d459ebc0233 | [
"MIT"
] | null | null | null | addons/stats/scripts/predictors/abstract_predictor.py | Kait-tt/tacowassa | 7e71c6ef6b5f939a99a3600025b26d459ebc0233 | [
"MIT"
] | 141 | 2016-08-23T03:44:17.000Z | 2017-10-08T02:39:36.000Z | addons/stats/scripts/predictors/abstract_predictor.py | Kait-tt/tacowassa | 7e71c6ef6b5f939a99a3600025b26d459ebc0233 | [
"MIT"
] | 1 | 2019-04-05T15:19:43.000Z | 2019-04-05T15:19:43.000Z | # coding:utf-8
from abc import ABCMeta, abstractmethod
| 19.7 | 45 | 0.720812 |
b17ac66814a8b6950eb9f7e8278e334fa9498901 | 216 | py | Python | day11/eqatri.py | nikhilsamninan/python-files | 15198459081097058a939b40b5e8ef754e578fe0 | [
"Apache-2.0"
] | null | null | null | day11/eqatri.py | nikhilsamninan/python-files | 15198459081097058a939b40b5e8ef754e578fe0 | [
"Apache-2.0"
] | null | null | null | day11/eqatri.py | nikhilsamninan/python-files | 15198459081097058a939b40b5e8ef754e578fe0 | [
"Apache-2.0"
] | null | null | null | size = 5
m = (2 * size)-2
for i in range(0, size):
for j in range(0, m):
print(end=" ")
m = m - 1
for j in range(0, i + 1):
if(m%2!=0):
print("*", end=" ")
print("") | 12.705882 | 31 | 0.388889 |
b17bb1524daf129418a0726643402df5cb23be6d | 691 | py | Python | tests/test_constants.py | 9cat/dydx-v3-python | c222f3d0b1a870e63fcceaf19b42109c9558a6df | [
"Apache-2.0"
] | null | null | null | tests/test_constants.py | 9cat/dydx-v3-python | c222f3d0b1a870e63fcceaf19b42109c9558a6df | [
"Apache-2.0"
] | null | null | null | tests/test_constants.py | 9cat/dydx-v3-python | c222f3d0b1a870e63fcceaf19b42109c9558a6df | [
"Apache-2.0"
] | null | null | null | from dydx3.constants import SYNTHETIC_ASSET_MAP, SYNTHETIC_ASSET_ID_MAP, ASSET_RESOLUTION, COLLATERAL_ASSET
| 40.647059 | 107 | 0.688857 |
b17beb716bfd95140964574b9d48ea04c12d770d | 5,802 | py | Python | src/cogs/invasion.py | calsf/codex-prime | c651d4c2f34581babc8078d01fe84dc95f3b7c36 | [
"MIT"
] | null | null | null | src/cogs/invasion.py | calsf/codex-prime | c651d4c2f34581babc8078d01fe84dc95f3b7c36 | [
"MIT"
] | null | null | null | src/cogs/invasion.py | calsf/codex-prime | c651d4c2f34581babc8078d01fe84dc95f3b7c36 | [
"MIT"
] | null | null | null | #INVASION COMMANDS:
# !invasions // !atinvasions <reward> // !rminvasions
import discord
from discord.ext import commands
import asyncio
from src import sess
| 42.977778 | 122 | 0.54757 |
b17cbc82703ac9fc882cd99a409335fa53853226 | 226 | py | Python | samples-python/datalayer.calc/calculations/__init__.py | bracoe/ctrlx-automation-sdk | 6b2e61e146c557488125baf941e4d64c6fa6d0fb | [
"MIT"
] | 16 | 2021-08-23T13:07:12.000Z | 2022-02-21T13:29:21.000Z | samples-python/datalayer.calc/calculations/__init__.py | bracoe/ctrlx-automation-sdk | 6b2e61e146c557488125baf941e4d64c6fa6d0fb | [
"MIT"
] | null | null | null | samples-python/datalayer.calc/calculations/__init__.py | bracoe/ctrlx-automation-sdk | 6b2e61e146c557488125baf941e4d64c6fa6d0fb | [
"MIT"
] | 10 | 2021-09-29T09:58:33.000Z | 2022-01-13T07:20:00.000Z | __version__ = '2.0.0'
__description__ = 'Sample for calculations with data from the ctrlX Data Layer'
__author__ = 'Fantastic Python Developers'
__licence__ = 'MIT License'
__copyright__ = 'Copyright (c) 2021 Bosch Rexroth AG' | 45.2 | 79 | 0.778761 |
b17e60242b5d5da25f1f85bc29429ee00fd48f19 | 320 | py | Python | sqlalchemist/models/definitions.py | pmav99/sqlalchemist | af784f8d6e7c6c7298ad273c481af748cc0332d5 | [
"BSD-3-Clause"
] | 7 | 2019-09-06T21:58:42.000Z | 2021-12-02T21:48:35.000Z | sqlalchemist/models/definitions.py | pmav99/sqlalchemy_playground | af784f8d6e7c6c7298ad273c481af748cc0332d5 | [
"BSD-3-Clause"
] | null | null | null | sqlalchemist/models/definitions.py | pmav99/sqlalchemy_playground | af784f8d6e7c6c7298ad273c481af748cc0332d5 | [
"BSD-3-Clause"
] | 1 | 2021-01-22T03:23:21.000Z | 2021-01-22T03:23:21.000Z | import sqlalchemy as sa
from .meta import Base
__all__ = [
"Person",
]
| 16.842105 | 48 | 0.6625 |
b17eab4940677c2202b0aa8a880f82fca874b795 | 2,732 | py | Python | examples/example_hello_world.py | clbarnes/figurefirst | ed38e246a96f28530bf663eb6920da1c3ccee610 | [
"MIT"
] | 67 | 2016-06-03T20:37:56.000Z | 2022-03-08T19:05:06.000Z | examples/example_hello_world.py | clbarnes/figurefirst | ed38e246a96f28530bf663eb6920da1c3ccee610 | [
"MIT"
] | 56 | 2016-05-23T17:44:04.000Z | 2021-11-18T19:23:52.000Z | examples/example_hello_world.py | clbarnes/figurefirst | ed38e246a96f28530bf663eb6920da1c3ccee610 | [
"MIT"
] | 11 | 2017-07-13T14:25:08.000Z | 2021-12-01T00:15:01.000Z | #!/usr/bin/env python
import numpy as np
import matplotlib.pyplot as plt
plt.ion()
from figurefirst import FigureLayout
layout = FigureLayout('example_hello_world_layout.svg')
layout.make_mplfigures()
d = np.array([[144, 57], [138, 57], [138, 59], [141, 61], [141, 82], [138, 84], [138, 85], [142, 85], [147, 85], [147, 84], [144, 82], [144, 57], [144, 57], [155, 57], [149, 57], [149, 59], [152, 61], [152, 82], [149, 84], [149, 85], [153, 85], [158, 85], [158, 84], [155, 82], [155, 57], [155, 57], [273, 57], [267, 57], [267, 59], [270, 61], [270, 82], [267, 84], [267, 85], [271, 85], [276, 85], [276, 84], [273, 82], [273, 57], [273, 57], [295, 57], [289, 57], [289, 59], [292, 61], [292, 70], [287, 67], [278, 76], [287, 85], [292, 83], [292, 85], [298, 85], [298, 84], [295, 81], [295, 57], [295, 57], [90, 57], [90, 59], [91, 59], [94, 61], [94, 82], [91, 84], [90, 84], [90, 85], [96, 85], [102, 85], [102, 84], [101, 84], [98, 82], [98, 71], [110, 71], [110, 82], [107, 84], [106, 84], [106, 85], [112, 85], [118, 85], [118, 84], [117, 84], [113, 82], [113, 61], [117, 59], [118, 59], [118, 57], [112, 58], [106, 57], [106, 59], [107, 59], [110, 61], [110, 70], [98, 70], [98, 61], [101, 59], [102, 59], [102, 57], [96, 58], [90, 57], [90, 57], [193, 57], [193, 59], [197, 60], [205, 85], [205, 86], [206, 85], [213, 65], [219, 85], [220, 86], [221, 85], [229, 61], [233, 59], [233, 57], [229, 58], [224, 57], [224, 59], [228, 61], [227, 62], [221, 80], [215, 60], [215, 60], [218, 59], [218, 57], [213, 58], [208, 57], [208, 59], [211, 60], [212, 63], [207, 80], [200, 60], [200, 60], [203, 59], [203, 57], [198, 58], [193, 57], [193, 57], [128, 67], [120, 76], [129, 85], [135, 80], [135, 80], [134, 80], [129, 84], [125, 82], [123, 76], [134, 76], [135, 75], [128, 67], [128, 67], [169, 67], [160, 76], [169, 85], [178, 76], [169, 67], [169, 67], [240, 67], [231, 76], [240, 85], [249, 76], [240, 67], [240, 67], [257, 67], [251, 68], [251, 69], [254, 71], [254, 82], [251, 84], [251, 85], [256, 85], [261, 85], [261, 84], [260, 84], [257, 82], [257, 75], [262, 68], [262, 68], [261, 70], [263, 71], [265, 70], [262, 67], [257, 71], [257, 67], [257, 67], [128, 68], [133, 75], [123, 75], [128, 68], [128, 68], [169, 68], [173, 70], [174, 76], [173, 81], [169, 84], [164, 82], [163, 76], [164, 70], [169, 68], [169, 68], [240, 68], [244, 70], [246, 76], [245, 81], [240, 84], [235, 82], [234, 76], [235, 70], [240, 68], [240, 68], [287, 68], [292, 70], [292, 72], [292, 80], [292, 82], [287, 84], [283, 82], [281, 76], [283, 71], [287, 68], [287, 68]])
ax = layout.axes['ax_name']['axis']
ax.plot(d[:,0], -d[:,1], lw=4)
layout.insert_figures('target_layer_name')
layout.write_svg('example_hello_world_output.svg')
| 143.789474 | 2,363 | 0.493411 |
b17fee2e7308f25f04ee5daea15a5c921b98ff99 | 2,009 | py | Python | cifar_exps/metric/local_config.py | maestrojeong/Deep-Hash-Table-ICML18- | 0c7efa230f950d5a2cd1928ac9f5d99f4276d2b5 | [
"MIT"
] | 70 | 2018-06-03T04:19:13.000Z | 2021-11-08T10:40:46.000Z | cifar_exps/metric/local_config.py | maestrojeong/Deep-Hash-Table-ICML18- | 0c7efa230f950d5a2cd1928ac9f5d99f4276d2b5 | [
"MIT"
] | null | null | null | cifar_exps/metric/local_config.py | maestrojeong/Deep-Hash-Table-ICML18- | 0c7efa230f950d5a2cd1928ac9f5d99f4276d2b5 | [
"MIT"
] | 14 | 2018-06-03T16:34:55.000Z | 2020-09-09T17:02:30.000Z | import sys
sys.path.append("../../configs")
#../../configs
from path import EXP_PATH
import numpy as np
DECAY_PARAMS_DICT =\
{
'stair' :
{
128 :{
'a1': {'initial_lr' : 1e-5, 'decay_steps' : 50000, 'decay_rate' : 0.3},
'a2' : {'initial_lr' : 3e-4, 'decay_steps' : 50000, 'decay_rate' : 0.3},
'a3' : {'initial_lr' : 1e-3, 'decay_steps' : 50000, 'decay_rate' : 0.3},
'a4' : {'initial_lr' : 3e-3, 'decay_steps' : 50000, 'decay_rate' : 0.3},
'a5' : {'initial_lr' : 1e-2, 'decay_steps' : 50000, 'decay_rate' : 0.3}
}
},
'piecewise' :
{
128 : {
'a1' : {'boundaries' : [10000, 20000], 'values' : [1e-4, 3e-5, 1e-5]},
'a2' : {'boundaries' : [10000, 20000], 'values' : [3e-4, 1e-4, 3e-5]},
'a3' : {'boundaries' : [10000, 20000], 'values' : [1e-3, 3e-4, 1e-4]},
'a4' : {'boundaries' : [10000, 20000], 'values' : [3e-3, 1e-3, 3e-4]},
'a5' : {'boundaries' : [10000, 20000], 'values' : [1e-2, 3e-3, 1e-3]},
'b1' : {'boundaries' : [20000, 35000], 'values' : [1e-4, 3e-5, 1e-5]},
'b2' : {'boundaries' : [20000, 35000], 'values' : [3e-4, 1e-4, 3e-5]},
'b3' : {'boundaries' : [20000, 35000], 'values' : [1e-3, 3e-4, 1e-4]},
'b4' : {'boundaries' : [20000, 35000], 'values' : [3e-3, 1e-3, 3e-4]},
'b5' : {'boundaries' : [20000, 35000], 'values' : [1e-2, 3e-3, 1e-3]}
}
}
}
ACTIVATE_K_SET = np.arange(1, 5)
K_SET = [1,4,16]
RESULT_DIR = EXP_PATH+"cifar_exps/"
#========================PARAM============================#
DATASET= 'cifar'
GPU_ID = 0
BATCH_SIZE = 128
EPOCH = 300
NSCLASS = 16
# model
EMBED_M= 64
CONV_NAME = 'conv1'
# metric loss
LOSS_TYPE = 'triplet'
MARGIN_ALPHA = 0.3
LAMBDA = 0.003 # regularization for npair
# learning
DECAY_TYPE = 'stair'
DECAY_PARAM_TYPE = 'a3'
| 36.527273 | 88 | 0.47337 |
b18129f45c367129cdadaeeefa97748f7c44101b | 1,133 | py | Python | POO punto 2/ManagerUsers.py | nan0te/Python-Algorithm-And-DataStructure | 7b7802b56d397c38f230f5efb687cedc6cc263f3 | [
"MIT"
] | null | null | null | POO punto 2/ManagerUsers.py | nan0te/Python-Algorithm-And-DataStructure | 7b7802b56d397c38f230f5efb687cedc6cc263f3 | [
"MIT"
] | null | null | null | POO punto 2/ManagerUsers.py | nan0te/Python-Algorithm-And-DataStructure | 7b7802b56d397c38f230f5efb687cedc6cc263f3 | [
"MIT"
] | null | null | null |
from Profesional import Profesional
from Particular import Particular
from Comercial import Comercial
|
| 28.325 | 68 | 0.620477 |
b1826d4965ab04b828a39c0aa6af7cd8e92a7f3e | 10,419 | py | Python | src/ggrc/models/mixins/with_action.py | MikalaiMikalalai/ggrc-core | f0f83b3638574bb64de474f3b70ed27436ca812a | [
"ECL-2.0",
"Apache-2.0"
] | 1 | 2019-01-12T23:46:00.000Z | 2019-01-12T23:46:00.000Z | src/ggrc/models/mixins/with_action.py | MikalaiMikalalai/ggrc-core | f0f83b3638574bb64de474f3b70ed27436ca812a | [
"ECL-2.0",
"Apache-2.0"
] | null | null | null | src/ggrc/models/mixins/with_action.py | MikalaiMikalalai/ggrc-core | f0f83b3638574bb64de474f3b70ed27436ca812a | [
"ECL-2.0",
"Apache-2.0"
] | null | null | null | # Copyright (C) 2020 Google Inc.
# Licensed under http://www.apache.org/licenses/LICENSE-2.0 <see LICENSE file>
"""Contains WithAction mixin.
A mixin for processing actions on an object in the scope of put request .
"""
from collections import namedtuple, defaultdict
import werkzeug.exceptions as wzg_exceptions
from ggrc import db
from ggrc.login import get_current_user
from ggrc.models.comment import Comment
from ggrc.models.document import Document
from ggrc.models.evidence import Evidence
from ggrc.models.snapshot import Snapshot
from ggrc.models.exceptions import ValidationError
from ggrc.models.reflection import ApiAttributes
from ggrc.models.reflection import Attribute
from ggrc.models.relationship import Relationship
from ggrc.rbac import permissions
| 33.501608 | 79 | 0.613783 |
b183550bc53fd30c394fa716585596aa04c10f32 | 99 | py | Python | tests/__init__.py | Fokko/example-library-python | b20b69c6dae93c32cd3d2c86a644abbf6b85199b | [
"Apache-2.0"
] | null | null | null | tests/__init__.py | Fokko/example-library-python | b20b69c6dae93c32cd3d2c86a644abbf6b85199b | [
"Apache-2.0"
] | null | null | null | tests/__init__.py | Fokko/example-library-python | b20b69c6dae93c32cd3d2c86a644abbf6b85199b | [
"Apache-2.0"
] | null | null | null | import sys, os
path = os.path.dirname(__file__)
if path not in sys.path:
sys.path.append(path)
| 19.8 | 32 | 0.717172 |
b184dd55b715329d1a0d130a5cfdba08a4a14ccb | 3,457 | py | Python | GAN_discriminator.py | SEE-MOF/Generation_of_atmospheric_cloud_fields_using_GANs | 6dce1447e140f5724638ac576bbf913af4e8a0e6 | [
"MIT"
] | null | null | null | GAN_discriminator.py | SEE-MOF/Generation_of_atmospheric_cloud_fields_using_GANs | 6dce1447e140f5724638ac576bbf913af4e8a0e6 | [
"MIT"
] | null | null | null | GAN_discriminator.py | SEE-MOF/Generation_of_atmospheric_cloud_fields_using_GANs | 6dce1447e140f5724638ac576bbf913af4e8a0e6 | [
"MIT"
] | 1 | 2020-12-11T15:03:36.000Z | 2020-12-11T15:03:36.000Z | import torch
| 37.576087 | 92 | 0.582586 |
b1867ef42ce297b26321e0a3ab432ed29359ffca | 7,770 | py | Python | statuspage_io.py | spyder007/pi-monitoring | fab660adcf6ed89a591a6ed2060d653369843e6e | [
"MIT"
] | null | null | null | statuspage_io.py | spyder007/pi-monitoring | fab660adcf6ed89a591a6ed2060d653369843e6e | [
"MIT"
] | null | null | null | statuspage_io.py | spyder007/pi-monitoring | fab660adcf6ed89a591a6ed2060d653369843e6e | [
"MIT"
] | null | null | null | import logging
import statuspage_io_client
import configuration
from enums import OpLevel
logger = logging.getLogger(__name__)
| 38.85 | 223 | 0.674775 |
b188895e8bd69c46255cb2668635f56b60539874 | 14,875 | py | Python | tests/test_gpath.py | ConductorTechnologies/ciopath | 574bfc38859cc68a80b98f8b0cf0d9aeddb646e5 | [
"MIT"
] | 1 | 2020-10-13T07:50:19.000Z | 2020-10-13T07:50:19.000Z | tests/test_gpath.py | ConductorTechnologies/ciopath | 574bfc38859cc68a80b98f8b0cf0d9aeddb646e5 | [
"MIT"
] | null | null | null | tests/test_gpath.py | ConductorTechnologies/ciopath | 574bfc38859cc68a80b98f8b0cf0d9aeddb646e5 | [
"MIT"
] | null | null | null | """ test gpath
isort:skip_file
"""
import os
import sys
import unittest
try:
from unittest import mock
except ImportError:
import mock
SRC = os.path.join(os.path.dirname(os.path.dirname(os.path.abspath(__file__))), "src")
if SRC not in sys.path:
sys.path.insert(0, SRC)
from ciopath.gpath import Path
sys.modules["glob"] = __import__("mocks.glob", fromlist=["dummy"])
if __name__ == "__main__":
unittest.main()
| 32.620614 | 93 | 0.604034 |
b188abfaae0783909143fd3975f59d921af7acbd | 3,513 | py | Python | linter.py | KidkArolis/SublimeLinter-contrib-healthier | 5b912af5f9afca85de86d709c46d3e566057823f | [
"MIT"
] | null | null | null | linter.py | KidkArolis/SublimeLinter-contrib-healthier | 5b912af5f9afca85de86d709c46d3e566057823f | [
"MIT"
] | 3 | 2019-01-25T15:21:38.000Z | 2019-01-30T23:52:11.000Z | linter.py | KidkArolis/SublimeLinter-contrib-healthier | 5b912af5f9afca85de86d709c46d3e566057823f | [
"MIT"
] | null | null | null | """This module exports the Healthier plugin class."""
import json
import logging
import re
import shlex
from SublimeLinter.lint import NodeLinter
logger = logging.getLogger('SublimeLinter.plugin.healthier')
| 32.831776 | 79 | 0.545687 |
b188c34a63c4e8f52180a384c6fb116f6a431c46 | 7,184 | py | Python | model_compression_toolkit/gptq/pytorch/quantization_facade.py | ofirgo/model_optimization | 18be895a35238df128913183b05e60550c2b6e6b | [
"Apache-2.0"
] | 42 | 2021-10-31T10:17:49.000Z | 2022-03-21T08:51:46.000Z | model_compression_toolkit/gptq/pytorch/quantization_facade.py | ofirgo/model_optimization | 18be895a35238df128913183b05e60550c2b6e6b | [
"Apache-2.0"
] | 6 | 2021-10-31T15:06:03.000Z | 2022-03-31T10:32:53.000Z | model_compression_toolkit/gptq/pytorch/quantization_facade.py | ofirgo/model_optimization | 18be895a35238df128913183b05e60550c2b6e6b | [
"Apache-2.0"
] | 18 | 2021-11-01T12:16:43.000Z | 2022-03-25T16:52:37.000Z | # Copyright 2022 Sony Semiconductors Israel, Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
from typing import Callable
from model_compression_toolkit.core import common
from model_compression_toolkit.core.common import Logger
from model_compression_toolkit.core.common.constants import PYTORCH
from model_compression_toolkit.gptq.common.gptq_config import GradientPTQConfig
from model_compression_toolkit.core.common.target_platform import TargetPlatformCapabilities
from model_compression_toolkit.core.common.mixed_precision.kpi import KPI
from model_compression_toolkit.core.common.framework_info import FrameworkInfo
from model_compression_toolkit import CoreConfig
from model_compression_toolkit.core.common.mixed_precision.mixed_precision_quantization_config import \
MixedPrecisionQuantizationConfigV2
from model_compression_toolkit.core.common.post_training_quantization import post_training_quantization
import importlib
if importlib.util.find_spec("torch") is not None:
from model_compression_toolkit.core.pytorch.default_framework_info import DEFAULT_PYTORCH_INFO
from model_compression_toolkit.core.pytorch.pytorch_implementation import PytorchImplementation
from model_compression_toolkit.core.pytorch.constants import DEFAULT_TP_MODEL
from torch.nn import Module
from model_compression_toolkit import get_target_platform_capabilities
DEFAULT_PYTORCH_TPC = get_target_platform_capabilities(PYTORCH, DEFAULT_TP_MODEL)
def pytorch_gradient_post_training_quantization_experimental(in_module: Module,
representative_data_gen: Callable,
target_kpi: KPI = None,
core_config: CoreConfig = CoreConfig(),
fw_info: FrameworkInfo = DEFAULT_PYTORCH_INFO,
gptq_config: GradientPTQConfig = None,
target_platform_capabilities: TargetPlatformCapabilities = DEFAULT_PYTORCH_TPC):
"""
Quantize a trained Pytorch module using post-training quantization.
By default, the module is quantized using a symmetric constraint quantization thresholds
(power of two) as defined in the default TargetPlatformCapabilities.
The module is first optimized using several transformations (e.g. BatchNormalization folding to
preceding layers). Then, using a given dataset, statistics (e.g. min/max, histogram, etc.) are
being collected for each layer's output (and input, depends on the quantization configuration).
Thresholds are then being calculated using the collected statistics and the module is quantized
(both coefficients and activations by default).
If gptq_config is passed, the quantized weights are optimized using gradient based post
training quantization by comparing points between the float and quantized modules, and minimizing the
observed loss.
Args:
in_module (Module): Pytorch module to quantize.
representative_data_gen (Callable): Dataset used for calibration.
target_kpi (KPI): KPI object to limit the search of the mixed-precision configuration as desired.
core_config (CoreConfig): Configuration object containing parameters of how the model should be quantized, including mixed precision parameters.
fw_info (FrameworkInfo): Information needed for quantization about the specific framework (e.g., kernel channels indices, groups of layers by how they should be quantized, etc.). `Default PyTorch info <https://github.com/sony/model_optimization/blob/main/model_compression_toolkit/core/pytorch/default_framework_info.py>`_
gptq_config (GradientPTQConfig): Configuration for using gptq (e.g. optimizer).
target_platform_capabilities (TargetPlatformCapabilities): TargetPlatformCapabilities to optimize the PyTorch model according to. `Default PyTorch TPC <https://github.com/sony/model_optimization/blob/main/model_compression_toolkit/core/tpc_models/pytorch_tp_models/pytorch_default.py>`_
Returns:
A quantized module and information the user may need to handle the quantized module.
Examples:
Import a Pytorch module:
>>> import torchvision.models.mobilenet_v2 as models
>>> module = models.mobilenet_v2()
Create a random dataset generator:
>>> import numpy as np
>>> def repr_datagen(): return [np.random.random((1,224,224,3))]
Import mct and pass the module with the representative dataset generator to get a quantized module:
>>> import model_compression_toolkit as mct
>>> quantized_module, quantization_info = mct.pytorch_post_training_quantization(module, repr_datagen)
"""
if core_config.mixed_precision_enable:
if not isinstance(core_config.mixed_precision_config, MixedPrecisionQuantizationConfigV2):
common.Logger.error("Given quantization config to mixed-precision facade is not of type "
"MixedPrecisionQuantizationConfigV2. Please use pytorch_post_training_quantization API,"
"or pass a valid mixed precision configuration.")
common.Logger.info("Using experimental mixed-precision quantization. "
"If you encounter an issue please file a bug.")
return post_training_quantization(in_module,
representative_data_gen,
core_config,
fw_info,
PytorchImplementation(),
target_platform_capabilities,
gptq_config,
target_kpi=target_kpi)
else:
# If torch is not installed,
# we raise an exception when trying to use these functions.
| 60.369748 | 334 | 0.680122 |
b188f10ec381323c6265f65bdee66f4fcf49a96c | 11,472 | py | Python | transformer/dataset/graph.py | tmpaul06/dgl | 8f458464b0e14c78978db4b91590e8ca718c5ec6 | [
"Apache-2.0"
] | 1 | 2019-03-15T07:25:09.000Z | 2019-03-15T07:25:09.000Z | transformer/dataset/graph.py | tmpaul06/dgl | 8f458464b0e14c78978db4b91590e8ca718c5ec6 | [
"Apache-2.0"
] | null | null | null | transformer/dataset/graph.py | tmpaul06/dgl | 8f458464b0e14c78978db4b91590e8ca718c5ec6 | [
"Apache-2.0"
] | null | null | null | import dgl
import torch as th
import numpy as np
import itertools
import time
from collections import *
Graph = namedtuple('Graph',
['g', 'src', 'tgt', 'tgt_y', 'nids', 'eids', 'nid_arr', 'n_nodes', 'n_edges', 'n_tokens', 'layer_eids'])
# We need to create new graph pools for relative position attention (ngram style)
| 44.638132 | 174 | 0.539139 |
b189f5ce6dc38c0cbcc1102caf8a791a932e5870 | 12,747 | py | Python | tests/asgi/test_configuration.py | mrmilu/ariadne | cba577bd4befd16e0ec22701a5ac68f719661a9a | [
"BSD-3-Clause"
] | 1 | 2020-05-28T01:48:58.000Z | 2020-05-28T01:48:58.000Z | tests/asgi/test_configuration.py | mrmilu/ariadne | cba577bd4befd16e0ec22701a5ac68f719661a9a | [
"BSD-3-Clause"
] | null | null | null | tests/asgi/test_configuration.py | mrmilu/ariadne | cba577bd4befd16e0ec22701a5ac68f719661a9a | [
"BSD-3-Clause"
] | null | null | null | # pylint: disable=not-context-manager
from unittest.mock import ANY, Mock
from starlette.testclient import TestClient
from ariadne.asgi import (
GQL_CONNECTION_ACK,
GQL_CONNECTION_INIT,
GQL_DATA,
GQL_ERROR,
GQL_START,
GraphQL,
)
from ariadne.types import Extension
| 36.524355 | 87 | 0.672394 |
b18afbecdd582dccbd726f5d982378f6fc6adc50 | 7,056 | py | Python | OpenAI-Gym/agents/ddpg.py | stmobo/Machine-Learning | 83f69c7afb0a4bc1dc94482b8d23805e8ab2acde | [
"MIT"
] | 2 | 2017-09-26T04:39:04.000Z | 2017-10-12T08:57:51.000Z | OpenAI-Gym/agents/ddpg.py | stmobo/Machine-Learning | 83f69c7afb0a4bc1dc94482b8d23805e8ab2acde | [
"MIT"
] | null | null | null | OpenAI-Gym/agents/ddpg.py | stmobo/Machine-Learning | 83f69c7afb0a4bc1dc94482b8d23805e8ab2acde | [
"MIT"
] | null | null | null | import tensorflow as tf
import prettytensor as pt
import numpy as np
import gym
import math
import random
from collections import deque
from agents import mixed_network, spaces, replay_buffer
tensorType = tf.float32
"""
Implements a Deep Deterministic Policy Gradient agent.
Adjustable parameters:
- Actor / Critic learning rates
- Temporal Difference discount factor
- Experience Replay buffer / batch sizes
"""
| 45.230769 | 150 | 0.658872 |
b18b42a0184f3b3519a30ad5c379fbaef6c9cbc7 | 14,426 | py | Python | tests/unit/test_door.py | buxx/rolling | ef1268fe6ddabe768a125c3ce8b37e0b9cbad4a5 | [
"MIT"
] | 14 | 2019-11-16T18:51:51.000Z | 2022-01-15T17:50:34.000Z | tests/unit/test_door.py | buxx/rolling | ef1268fe6ddabe768a125c3ce8b37e0b9cbad4a5 | [
"MIT"
] | 148 | 2018-12-10T09:07:45.000Z | 2022-03-08T10:51:04.000Z | tests/unit/test_door.py | buxx/rolling | ef1268fe6ddabe768a125c3ce8b37e0b9cbad4a5 | [
"MIT"
] | 1 | 2020-08-05T14:25:48.000Z | 2020-08-05T14:25:48.000Z | from aiohttp.test_utils import TestClient
import pytest
import typing
import unittest.mock
from rolling.kernel import Kernel
from rolling.model.character import CharacterModel
from rolling.model.character import MINIMUM_BEFORE_EXHAUSTED
from rolling.server.document.affinity import AffinityDirectionType
from rolling.server.document.affinity import AffinityJoinType
from rolling.server.document.affinity import CHIEF_STATUS
from rolling.server.document.affinity import MEMBER_STATUS
from rolling.server.document.build import BuildDocument
from rolling.server.document.build import DOOR_MODE_LABELS
from rolling.server.document.build import DOOR_MODE__CLOSED
from rolling.server.document.build import DOOR_MODE__CLOSED_EXCEPT_FOR
from rolling.server.document.build import DoorDocument
class TestDoor:
def _place_door(self, kernel: Kernel) -> DoorDocument:
build = kernel.build_lib.place_build(
world_row_i=1,
world_col_i=1,
zone_row_i=10,
zone_col_i=10,
build_id="DOOR",
under_construction=False,
)
return build
| 32.272931 | 93 | 0.643768 |
b18c5f15c9a68336330b6b76a56071233826bf51 | 1,311 | py | Python | myWeather2_github.py | RCElectronic/weatherlight | 5d70b5bdbb67396620c211399c502b801878667f | [
"MIT"
] | null | null | null | myWeather2_github.py | RCElectronic/weatherlight | 5d70b5bdbb67396620c211399c502b801878667f | [
"MIT"
] | null | null | null | myWeather2_github.py | RCElectronic/weatherlight | 5d70b5bdbb67396620c211399c502b801878667f | [
"MIT"
] | null | null | null | # myWeather.py for inkyphat and RPiZW
print('Starting')
try:
import requests
print('requests module imported')
except:
print('Sorry, need to install requests module')
exit()
wx_url = 'api.openweathermap.org/data/2.5/weather?'
wx_city = 'q=Quispamsis,CA&units=metric'
wx_cityID = 'id=6115383&units=metric'
api_key = '&APPID='+'ENTER YOUR API KEY HERE'
try:
resp = requests.get('http://'+wx_url+wx_cityID+api_key)
print('got data')
except:
print('Cannot connect to service...')
exit()
if resp.status_code != 200:
raise ApiError('GET /weather/ {}'.format(resp.status_code))
try:
city=resp.json()["name"]
temperature=resp.json()["main"]["temp"] # in celcius
pressure=resp.json()["main"]["pressure"] # in hPa
humidity=resp.json()["main"]["humidity"] # in %
windSpeed = resp.json()["wind"]["speed"] # in m/s
windDeg = resp.json()["wind"]["deg"]
print('got json info')
except:
print('Cannot read data in api call...')
exit()
print('Weather in', city+':')
print('\tTemperature:\t',str(temperature)+'C')
print('\tPressure:\t',pressure,'hPa')
print('\tWind:\t\t',windSpeed,'m/s from',str(windDeg)+'')
print('\tWind:\t\t',
round(windSpeed*0.277778,1),'km/h from',str(windDeg)+'')
| 30.488372 | 64 | 0.617849 |
b18cdd01036a990db77da457c825c577e134e9df | 4,526 | py | Python | push-to-gee.py | Servir-Mekong/sentinel-1-pipeline | 79ccba65d974aa5c337adc4d72fa1df8ef75d20c | [
"MIT"
] | 16 | 2020-04-19T12:54:55.000Z | 2022-03-24T18:59:32.000Z | push-to-gee.py | Servir-Mekong/sentinel-1-pipeline | 79ccba65d974aa5c337adc4d72fa1df8ef75d20c | [
"MIT"
] | 2 | 2021-04-30T21:14:14.000Z | 2021-06-02T01:39:56.000Z | push-to-gee.py | Servir-Mekong/sentinel-1-pipeline | 79ccba65d974aa5c337adc4d72fa1df8ef75d20c | [
"MIT"
] | 1 | 2021-04-21T08:58:12.000Z | 2021-04-21T08:58:12.000Z | # -*- coding: utf-8 -*-
from dotenv import load_dotenv
load_dotenv('.env')
import logging
logging.basicConfig(filename='logs/push-2-gee.log', level=logging.INFO)
import ast
import glob
import json
import os
import subprocess
from datetime import datetime
from dbio import *
scale_factor = 10000
output_path = os.getenv('OUTPUT_PATH')
final_output = os.getenv('POST_PROCESS_OUTPUT_PATH')
gdal_path = os.getenv('GDAL_PATH')
manifest_dir = os.getenv('MANIFESTS_PATH')
cloud_path = os.getenv('GCS_PATH')
gee_asset_path = os.getenv('GEE_ASSET_PATH')
calc = '{0}gdal_calc.py -A %s --calc="A*{1}" --outfile={2}%s --type=UInt16'.format(gdal_path, scale_factor, final_output)
_cp_to_gs = 'gsutil cp {0}%s {1}'.format(final_output, cloud_path)
_upload_to_gee = 'earthengine upload image --manifest "{0}%s.json"'.format(manifest_dir)
properties = ['acquisitiontype', 'lastorbitnumber', 'lastrelativeorbitnumber', 'missiondatatakeid', 'orbitdirection',
'orbitnumber', 'platformidentifier', 'polarisationmode', 'producttype', 'relativeorbitnumber',
'sensoroperationalmode', 'swathidentifier']
if __name__ == '__main__':
main()
| 33.776119 | 122 | 0.592134 |
b18dfbe911fad785c5c6176e1eec4c5f85de7b49 | 938 | py | Python | rabbitai/tasks/celery_app.py | psbsgic/rabbitai | 769e120ba605d56ac076f810a549c38dac410c8e | [
"Apache-2.0"
] | null | null | null | rabbitai/tasks/celery_app.py | psbsgic/rabbitai | 769e120ba605d56ac076f810a549c38dac410c8e | [
"Apache-2.0"
] | null | null | null | rabbitai/tasks/celery_app.py | psbsgic/rabbitai | 769e120ba605d56ac076f810a549c38dac410c8e | [
"Apache-2.0"
] | 1 | 2021-07-09T16:29:50.000Z | 2021-07-09T16:29:50.000Z | """
This is the main entrypoint used by Celery workers. As such,
it needs to call create_app() in order to initialize things properly
"""
from typing import Any
from celery.signals import worker_process_init
# Rabbitai framework imports
from rabbitai import create_app
from rabbitai.extensions import celery_app, db
# Init the Flask app / configure everything
flask_app = create_app()
# Need to import late, as the celery_app will have been setup by "create_app()"
# pylint: disable=wrong-import-position, unused-import
from . import cache, schedules, scheduler # isort:skip
# Export the celery app globally for Celery (as run on the cmd line) to find
app = celery_app
| 32.344828 | 87 | 0.765458 |
b18ee92e764bf93ddc723331ee49b72f1366542a | 4,403 | py | Python | adapters/adapter.py | ChristfriedBalizou/jeamsql | abd7735831b572f1f1a2d8e47b0759801fd5881c | [
"MIT"
] | null | null | null | adapters/adapter.py | ChristfriedBalizou/jeamsql | abd7735831b572f1f1a2d8e47b0759801fd5881c | [
"MIT"
] | null | null | null | adapters/adapter.py | ChristfriedBalizou/jeamsql | abd7735831b572f1f1a2d8e47b0759801fd5881c | [
"MIT"
] | null | null | null | from tabulate.tabulate import tabulate
import subprocess
import sys
import os
import re
import csv
import io
import json
| 24.461111 | 81 | 0.539859 |
b18f1a4acb87b8bb932241fcbf259f84c3dba954 | 3,000 | py | Python | MyCrypto/dsa/sm2_dsa.py | hiyouga/cryptography-experiment | d76abc56d6c09c96dd93abcd51d3c9e38fc8787c | [
"MIT"
] | 8 | 2019-11-30T14:45:13.000Z | 2022-03-16T10:09:34.000Z | MyCrypto/dsa/sm2_dsa.py | hiyouga/Cryptographic-Algorithms-Python | d76abc56d6c09c96dd93abcd51d3c9e38fc8787c | [
"MIT"
] | null | null | null | MyCrypto/dsa/sm2_dsa.py | hiyouga/Cryptographic-Algorithms-Python | d76abc56d6c09c96dd93abcd51d3c9e38fc8787c | [
"MIT"
] | null | null | null | import sys
sys.path.append("../..")
import random
from MyCrypto.utils.bitarray import bitarray
from MyCrypto.algorithms.exgcd import inverse
from MyCrypto.ecc.sm2 import SM2
if __name__ == '__main__':
message = b'message'
uid = b'ID:A'
sm2_dsa = SM2_DSA()
sk, pk = sm2_dsa.generate_keys()
sign = sm2_dsa.sign(message, uid, sk)
print(sign)
print(sm2_dsa.verify(message, sign, uid, pk))
''' file test '''
sm2_dsa.sign_file('../testdata/text.txt', uid, sk)
print(sm2_dsa.verify_file('../testdata/text.txt', '../testdata/text.txt.sign', uid, pk))
| 35.714286 | 92 | 0.549667 |
b18f8ac4ca91a60fabe49e7603be45706caf3334 | 52 | py | Python | chatbot/component/__init__.py | zgj0607/ChatBot | 3c6126754b9d037a04bd80d13874e2ae16b2c421 | [
"Apache-2.0"
] | null | null | null | chatbot/component/__init__.py | zgj0607/ChatBot | 3c6126754b9d037a04bd80d13874e2ae16b2c421 | [
"Apache-2.0"
] | null | null | null | chatbot/component/__init__.py | zgj0607/ChatBot | 3c6126754b9d037a04bd80d13874e2ae16b2c421 | [
"Apache-2.0"
] | null | null | null | __all__ = (
'readonly_admin',
'singleton'
)
| 10.4 | 21 | 0.576923 |
b18f8f8fa2a426987f403aea37090ba3d3fc94d4 | 5,103 | py | Python | calculadora.py | LucasCouto22/calculadoraPython | 84426c8d71f2c2186ae500245423516000e19ec0 | [
"Apache-2.0"
] | null | null | null | calculadora.py | LucasCouto22/calculadoraPython | 84426c8d71f2c2186ae500245423516000e19ec0 | [
"Apache-2.0"
] | null | null | null | calculadora.py | LucasCouto22/calculadoraPython | 84426c8d71f2c2186ae500245423516000e19ec0 | [
"Apache-2.0"
] | null | null | null |
controller = 0
fim = 0
while controller != 2:
if controller == 1 or controller == 0:
e = int(input('Digite um nmero para escolher: \n'
' 1 para soma \n'
' 2 para subtrao \n'
' 3 para multiplicao \n'
' 4 para diviso inteira \n'
' 5 para diviso real \n '
'6 para porcentagem \n'
' 7 para exponencial \n'
' 8 para raiz quadrada: '))
if e == 1:
if controller == 0:
h = int(input('Digite um valor: '))
t = int(input('Digite um valor para somar: '))
c = somar(h, t)
fim = c
print('Resultado: ', fim)
elif controller == 1:
t = int(input('Digite um valor para somar: '))
c = somar(fim, t)
fim = c
print('Resultado: ', fim)
elif e == 2:
if controller == 0:
h = int(input('Digite um valor: '))
t = int(input('Digite um valor para subtrair: '))
c = subtrair(h, t)
fim = c
print('Resultado: ', fim)
elif controller == 1:
t = int(input('Digite um valor para subtrair: '))
c = subtrair(fim, t)
fim = c
print('Resultado: ', fim)
elif e == 3:
if controller == 0:
h = int(input('Digite o primeiro valor: '))
t = int(input('Digite o segundo valor: '))
c = multiplicar(h, t)
fim = c
print('Resultado: ', fim)
elif controller == 1:
t = int(input('Digite um valor para multiplicar: '))
c = multiplicar(fim, t)
fim = c
print('Resultado: ', fim)
elif e == 4:
if controller == 0:
h = int(input('Digite o valor a ser dividido: '))
t = int(input('Digite o valor divisor: '))
c = dividirInteiro(h, t)
fim = c
print('Resultado: ', fim)
elif controller == 1:
t = int(input('Digite um valor para divisor: '))
c = dividirInteiro(fim, t)
fim = c
print('Resultado: ', fim)
elif e == 5:
if controller == 0:
h = int(input('Digite o valor a ser dividido: '))
t = int(input('Digite o valor divisor: '))
c = dividir(h, t)
fim = c
print('Resultado: ', fim)
elif controller == 1:
t = int(input('Digite um valor para divisor: '))
c = dividir(fim, t)
fim = c
print('Resultado: ', fim)
elif e == 6:
if controller == 0:
h = int(input('Digite o valor: '))
t = int(input('Digite a porcentagem: '))
c = porcentagem(h, t)
fim = c
print('Resultado final: ', fim,'%')
break;
elif controller == 1:
t = int(input('Digite o valor para descobrir porcentagem: '))
c = porcentagem(fim, t)
fim = c
print('Resultado final: ', fim,'%')
break;
elif e == 7:
if controller == 0:
h = int(input('Digite o valor: '))
t = int(input('Elevado a: '))
c = exponencial(h, t)
fim = c
print('Resultado: ', fim)
elif controller == 1:
t = int(input('Elevado a: '))
c = exponencial(fim, t)
fim = c
print('Resultado: ', fim)
elif e == 8:
if controller == 0:
t = int(input('Nmero para descobrir raiz quadrada: '))
c = raizQuadrada(t)
fim = c
print('Resultado: ', fim)
elif controller == 1:
c = raizQuadrada(fim)
fim = c
print('Resultado: ', fim)
controller = int(input('Deseja continuar? \n'
'Se sim digite 1, se no digite 2: '))
if controller == 2:
print('Valor Final: ',fim)
break;
| 26.440415 | 77 | 0.406232 |
b190d1c3b154f53e7b40cd2cb8a33782b7ce1f7f | 1,982 | py | Python | prime_issue_spoilage/main.py | NicholasSynovic/ssl-metrics-github-issue-spoilage | 05711b6103aa6b6b935d02aa92fbcaf735a63cea | [
"BSD-3-Clause"
] | null | null | null | prime_issue_spoilage/main.py | NicholasSynovic/ssl-metrics-github-issue-spoilage | 05711b6103aa6b6b935d02aa92fbcaf735a63cea | [
"BSD-3-Clause"
] | null | null | null | prime_issue_spoilage/main.py | NicholasSynovic/ssl-metrics-github-issue-spoilage | 05711b6103aa6b6b935d02aa92fbcaf735a63cea | [
"BSD-3-Clause"
] | null | null | null | from argparse import Namespace
from datetime import datetime
import pandas
from dateutil.parser import parse as dateParse
from intervaltree import IntervalTree
from pandas import DataFrame
from prime_issue_spoilage.utils.primeIssueSpoilageArgs import mainArgs
if __name__ == "__main__":
main()
| 28.314286 | 77 | 0.706862 |
b192038591712556b2d6695f9b0d3ac03bfac07f | 4,544 | py | Python | IFR/mmseg/datasets/pipelines/semi/loading.py | jfzhuang/IFR | d6ffdd0c0810d7bb244f102ba8cc19c12f61e102 | [
"MIT"
] | 3 | 2022-03-09T13:15:15.000Z | 2022-03-21T06:59:10.000Z | IFR/mmseg/datasets/pipelines/semi/loading.py | jfzhuang/IFR | d6ffdd0c0810d7bb244f102ba8cc19c12f61e102 | [
"MIT"
] | null | null | null | IFR/mmseg/datasets/pipelines/semi/loading.py | jfzhuang/IFR | d6ffdd0c0810d7bb244f102ba8cc19c12f61e102 | [
"MIT"
] | null | null | null | import os.path as osp
import mmcv
import numpy as np
from mmseg.datasets.builder import PIPELINES
| 40.571429 | 116 | 0.650088 |
b192ffd8dc0dbef0c193761ff4f0641070958f09 | 3,384 | py | Python | topologies/dc_t1.py | andriymoroz/sai-challenger | 665f5dbff8c797cfd55cc0c13b03a77aefdb9977 | [
"Apache-2.0"
] | 11 | 2021-04-23T05:54:05.000Z | 2022-03-29T16:37:42.000Z | topologies/dc_t1.py | andriymoroz/sai-challenger | 665f5dbff8c797cfd55cc0c13b03a77aefdb9977 | [
"Apache-2.0"
] | 4 | 2021-06-02T11:05:31.000Z | 2021-11-26T14:39:50.000Z | topologies/dc_t1.py | andriymoroz/sai-challenger | 665f5dbff8c797cfd55cc0c13b03a77aefdb9977 | [
"Apache-2.0"
] | 14 | 2021-02-27T15:17:31.000Z | 2021-11-01T10:15:51.000Z | from contextlib import contextmanager
import pytest
from sai import SaiObjType
| 40.285714 | 110 | 0.637411 |
b193f13f0d572526822d816991b5f3105ef56820 | 7,045 | py | Python | asynchronous_qiwi/models/QIWIWallet/master_m/list_qvc.py | LexLuthorReal/asynchronous_qiwi | 5847a8d4008493656e973e5283888a4e57234962 | [
"MIT"
] | 3 | 2021-05-20T02:36:30.000Z | 2021-11-28T16:00:15.000Z | asynchronous_qiwi/models/QIWIWallet/master_m/list_qvc.py | LexLuthorReal/asynchronous_qiwi | 5847a8d4008493656e973e5283888a4e57234962 | [
"MIT"
] | null | null | null | asynchronous_qiwi/models/QIWIWallet/master_m/list_qvc.py | LexLuthorReal/asynchronous_qiwi | 5847a8d4008493656e973e5283888a4e57234962 | [
"MIT"
] | 1 | 2021-11-28T16:00:20.000Z | 2021-11-28T16:00:20.000Z | from loguru import logger
import datetime
from pydantic.fields import ModelField
from typing import Optional, List, Union, Any
from ....utils.tools.str_datetime import convert
from pydantic import BaseModel, Field, validator, ValidationError
from ....data_types.QIWIWallet.list_qvc import ReleasedCardStatus, CardType, CardAlias
| 41.686391 | 107 | 0.628957 |
b194d8469a9b5649a06d4a8f9eab020579871edb | 818 | py | Python | src/mciso/visualize.py | lancechua/mciso | 2fd406b7c54f9cb6b331ae8ad3470d1f47696494 | [
"MIT"
] | 2 | 2021-08-06T14:20:37.000Z | 2022-03-29T16:13:10.000Z | src/mciso/visualize.py | lancechua/mciso | 2fd406b7c54f9cb6b331ae8ad3470d1f47696494 | [
"MIT"
] | null | null | null | src/mciso/visualize.py | lancechua/mciso | 2fd406b7c54f9cb6b331ae8ad3470d1f47696494 | [
"MIT"
] | 1 | 2021-08-06T14:21:13.000Z | 2021-08-06T14:21:13.000Z | import matplotlib.pyplot as plt
import pandas as pd
def scenarios_by_product(
X: "np.ndarray", indices: list, products: list, ax: plt.Axes = None
) -> plt.Axes:
"""Plot generated scenarios, with a subplot for each product"""
if ax is None:
_, ax = plt.subplots(X.shape[-1], 1, figsize=(8, X.shape[-1] * 2), sharex=True)
try:
iter(ax)
except TypeError:
ax = [ax]
for i, prod_i in enumerate(products):
pd.DataFrame(
X[:, :, i],
index=indices,
).plot(ax=ax[i], alpha=0.05, linewidth=3, legend=None, color="gray")
pd.DataFrame(X[:, :, i].mean(axis=1), index=indices, columns=["avg"]).plot(
ax=ax[i], alpha=0.8, linewidth=1, legend=None, color="blue"
)
ax[i].set_ylabel(prod_i)
return ax
| 27.266667 | 87 | 0.57335 |
b194dd14e51803a9d2a228b8e98a09f53e6b31cf | 26,160 | py | Python | sdk/apimanagement/azure-mgmt-apimanagement/azure/mgmt/apimanagement/models/_api_management_client_enums.py | vincenttran-msft/azure-sdk-for-python | 348b56f9f03eeb3f7b502eed51daf494ffff874d | [
"MIT"
] | 1 | 2021-09-07T18:39:05.000Z | 2021-09-07T18:39:05.000Z | sdk/apimanagement/azure-mgmt-apimanagement/azure/mgmt/apimanagement/models/_api_management_client_enums.py | vincenttran-msft/azure-sdk-for-python | 348b56f9f03eeb3f7b502eed51daf494ffff874d | [
"MIT"
] | null | null | null | sdk/apimanagement/azure-mgmt-apimanagement/azure/mgmt/apimanagement/models/_api_management_client_enums.py | vincenttran-msft/azure-sdk-for-python | 348b56f9f03eeb3f7b502eed51daf494ffff874d | [
"MIT"
] | 1 | 2022-03-04T06:21:56.000Z | 2022-03-04T06:21:56.000Z | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from enum import Enum
from six import with_metaclass
from azure.core import CaseInsensitiveEnumMeta
| 36.536313 | 123 | 0.718425 |
b197033d00037d8ccf26822dfa92949370b97250 | 308 | py | Python | lcd_rom_small.py | rhubarbdog/microbit-LCD-driver | d1a7f5cf3c4cfe825da873ae1a25b5765fe8ca3e | [
"MIT"
] | 2 | 2020-11-23T20:27:03.000Z | 2021-11-04T12:08:10.000Z | lcd_rom_small.py | rhubarbdog/microbit-LCD-driver | d1a7f5cf3c4cfe825da873ae1a25b5765fe8ca3e | [
"MIT"
] | 1 | 2021-12-14T10:47:00.000Z | 2021-12-14T12:02:08.000Z | lcd_rom_small.py | rhubarbdog/microbit-LCD-driver | d1a7f5cf3c4cfe825da873ae1a25b5765fe8ca3e | [
"MIT"
] | null | null | null | from microbit import *
import microbit_i2c_lcd as lcd
i2c.init(sda=pin15,scl=pin13)
display = lcd.lcd(i2c)
display.lcd_display_string(str(chr(247)), 1)
print("this will display a pi symbol for ROM A00 japaneese\n"+\
"display a divide symbol for the A02 ROM european")
i2c.init(sda=pin20,scl=pin19)
| 23.692308 | 63 | 0.746753 |
b19975a6c0f70cdf1b6594a54b946673ec51a754 | 11,349 | py | Python | benchmarks/benchmarks.py | alanefl/vdf-competition | 84efc3aec180c43582c9421c6fb7fb2e22000635 | [
"Apache-2.0"
] | 97 | 2018-10-04T18:10:42.000Z | 2021-08-23T10:37:06.000Z | benchmarks/benchmarks.py | alanefl/vdf-competition | 84efc3aec180c43582c9421c6fb7fb2e22000635 | [
"Apache-2.0"
] | 4 | 2018-10-04T18:20:49.000Z | 2021-05-03T07:13:14.000Z | benchmarks/benchmarks.py | alanefl/vdf-competition | 84efc3aec180c43582c9421c6fb7fb2e22000635 | [
"Apache-2.0"
] | 17 | 2018-10-08T18:08:21.000Z | 2022-01-12T00:54:32.000Z | import time
import textwrap
import math
import binascii
from inkfish.create_discriminant import create_discriminant
from inkfish.classgroup import ClassGroup
from inkfish.iterate_squarings import iterate_squarings
from inkfish import proof_wesolowski
from inkfish.proof_of_time import (create_proof_of_time_nwesolowski,
check_proof_of_time_nwesolowski,
generate_r_value)
from inkfish import proof_pietrzak
from tests.int_mod_n import int_mod_n
start_t = 0
time_multiplier = 1000 # Use milliseconds
if __name__ == '__main__':
bench_main()
"""
Copyright 2018 Chia Network Inc
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
| 38.602041 | 114 | 0.707639 |
b19995883a43664eea79cdbbf4ebcc8afcf1f9f2 | 2,415 | py | Python | ccl_dask_blizzard.py | michaelleerilee/CCL-M2BLIZZARD | ff936647d69c5e83553b55d84d7b3a0636290c77 | [
"BSD-3-Clause"
] | null | null | null | ccl_dask_blizzard.py | michaelleerilee/CCL-M2BLIZZARD | ff936647d69c5e83553b55d84d7b3a0636290c77 | [
"BSD-3-Clause"
] | null | null | null | ccl_dask_blizzard.py | michaelleerilee/CCL-M2BLIZZARD | ff936647d69c5e83553b55d84d7b3a0636290c77 | [
"BSD-3-Clause"
] | null | null | null |
import numpy as np
from load_for_ccl_inputs import load_for_ccl_inputs
from ccl_marker_stack import ccl_dask
base = '/home/mrilee/nobackup/tmp/others/'
fnames = None
if False:
fnames = ['ccl-inputs-globe-122736+23.csv.gz']
if False:
fnames = ['ccl-inputs-globe-122736+23.csv.gz'
,'ccl-inputs-globe-122760+23.csv.gz']
if True:
fnames = ['ccl-inputs-globe-122736+23.csv.gz'
,'ccl-inputs-globe-122760+23.csv.gz'
,'ccl-inputs-globe-122784+23.csv.gz'
,'ccl-inputs-globe-122808+23.csv.gz'
,'ccl-inputs-globe-122832+23.csv.gz'
,'ccl-inputs-globe-122856+23.csv.gz'
,'ccl-inputs-globe-122880+23.csv.gz'
,'ccl-inputs-globe-122904+23.csv.gz']
file_fpnames = [base+fname for fname in fnames]
print 'file_fpnames: ',file_fpnames
# quit()
###########################################################################
# Load
# precsno_arr, visibility_arr = load_for_ccl_inputs(file_name)
# For extinction, 1/visibility.
thresh_mnmx = (1.0e-3,1.0)
# The calculation
if True:
ccl_dask_object = ccl_dask()
ccl_dask_object.load_data_segments_with_loader(load_for_ccl_inputs,file_fpnames,[('visibility_i',np.nan,np.float)])
# Diagnostics
if False:
print 'ccl_dask_object.data_segs',ccl_dask_object.data_segs
print 'execute'
ccl_dask_object.data_segs[0].result()
print 'ccl_dask_object.data_segs',ccl_dask_object.data_segs
if True:
ccl_dask_object.make_stacks(thresh_mnmx)
ccl_dask_object.shift_labels()
ccl_dask_object.make_translations()
ccl_dask_object.apply_translations()
if False:
print 'ccl_dask_object.data_segs[0].results()[0]\n'\
,ccl_dask_object.data_segs[0].result()[0]
if True:
np.set_printoptions(threshold=5000,linewidth=600)
print 'ccl_dask_object.ccl_results[0].m_results_translated[0][0:60,0:60]\n'\
,ccl_dask_object.ccl_results[0].m_results_translated[0][0:60,0:60]
np.set_printoptions(threshold=1000,linewidth=75)
ccl_dask_object.close()
# Note, if we have to do the 3-hour blizzard calculation w/o CCL, then we can monkey with the load_data_segments to
# have files loaded onto separate cluster nodes, like ghost cells. Alternatively, we can Dask it by client.submitting
# tasks with dependencies on those two adjacent futures.
| 30.56962 | 119 | 0.670807 |
b19b15001ce2daedc7edc47219f748a11fbd096b | 3,108 | py | Python | setup.py | RiS3-Lab/polytracker | 2ea047738717ff0c22e3b157934667c9ed84fa6f | [
"Apache-2.0"
] | null | null | null | setup.py | RiS3-Lab/polytracker | 2ea047738717ff0c22e3b157934667c9ed84fa6f | [
"Apache-2.0"
] | 1 | 2020-09-01T15:58:13.000Z | 2021-01-18T16:24:56.000Z | setup.py | RiS3-Lab/polytracker | 2ea047738717ff0c22e3b157934667c9ed84fa6f | [
"Apache-2.0"
] | null | null | null | import os
import re
import sys
from setuptools import setup, find_packages
from typing import Optional, Tuple
SETUP_DIR = os.path.dirname(os.path.realpath(__file__))
POLYTRACKER_HEADER = os.path.join(SETUP_DIR, 'polytracker', 'include', 'polytracker', 'polytracker.h')
if not os.path.exists(POLYTRACKER_HEADER):
sys.stderr.write(f"Error loading polytracker.h!\nIt was expected to be here:\n{POLYTRACKER_HEADER}\n\n")
exit(1)
setup(
name='polytracker',
description='API and Library for operating and interacting with PolyTracker',
url='https://github.com/trailofbits/polytracker',
author='Trail of Bits',
version=polytracker_version_string(),
packages=find_packages(),
python_requires='>=3.7',
install_requires=[
'graphviz',
'matplotlib',
'networkx',
'pygraphviz',
'pydot',
'tqdm',
'typing_extensions'
],
extras_require={
"dev": ["black", "mypy", "pytest"]
},
entry_points={
'console_scripts': [
'polyprocess = polytracker.polyprocess.__main__:main'
]
},
classifiers=[
'Development Status :: 4 - Beta',
'Environment :: Console',
'Intended Audience :: Science/Research',
'License :: OSI Approved :: Apache Software License',
'Programming Language :: Python :: 3 :: Only',
'Topic :: Utilities'
]
)
| 35.318182 | 120 | 0.604247 |
b19b3f7c4a68fc939bc0e963cc37d4583121c7aa | 111 | py | Python | Game22/modules/online/__init__.py | ttkaixin1998/pikachupythongames | 609a3a5a2be3f5a187c332c7980bb5bb14548f02 | [
"MIT"
] | 4,013 | 2018-06-16T08:00:02.000Z | 2022-03-30T11:48:14.000Z | Game22/modules/online/__init__.py | pigbearcat/Games | b8c47ef1bcce9a9db3f3730c162e6e8e08b508a2 | [
"MIT"
] | 22 | 2018-10-18T00:15:50.000Z | 2022-01-13T08:16:15.000Z | Game22/modules/online/__init__.py | pigbearcat/Games | b8c47ef1bcce9a9db3f3730c162e6e8e08b508a2 | [
"MIT"
] | 2,172 | 2018-07-20T04:03:14.000Z | 2022-03-31T14:18:29.000Z | ''''''
from .server import gobangSever
from .client import gobangClient
from .playOnline import playOnlineUI | 27.75 | 36 | 0.810811 |
b19b4f05269a9c0a51ba854a6b3f0bd1816a6911 | 9,317 | py | Python | gazette_processor/gazette.py | GabrielTrettel/DiariesProcessor | 817b4d8d1bbf0fe88b315b159e949fe49a2324f7 | [
"MIT"
] | 2 | 2020-10-04T19:45:45.000Z | 2020-10-28T20:21:08.000Z | gazette_processor/gazette.py | GabrielTrettel/DiariesProcessor | 817b4d8d1bbf0fe88b315b159e949fe49a2324f7 | [
"MIT"
] | 6 | 2020-09-25T14:31:12.000Z | 2020-09-28T13:37:37.000Z | gazette_processor/gazette.py | GabrielTrettel/DiariesProcessor | 817b4d8d1bbf0fe88b315b159e949fe49a2324f7 | [
"MIT"
] | null | null | null | import os,sys, re
from math import ceil, floor
if __name__ == "__main__":
input_f = sys.argv[1]
output_f = sys.argv[2]
# g = Gazette(input_f, "", "")
# g.__split_cols()
# print(g.linear_text)
for file in os.listdir(input_f):
g = Gazette(input_f + '/' + file,"", "")
print(f"Parsing {file}")
with open( output_f + "/" + file, 'w') as f:
f.write(g.linear_text)
| 30.152104 | 114 | 0.605452 |
b19b6144712313556ed4af7f1913f9e90750f30c | 1,065 | py | Python | homepairs/HomepairsApp/Apps/Tenants/migrations/0001_initial.py | YellowRainBoots/2.0 | bf215350c2da0ab28ad2ec6f9338fb1b73b3f2e5 | [
"MIT"
] | 1 | 2021-01-19T00:48:10.000Z | 2021-01-19T00:48:10.000Z | homepairs/HomepairsApp/Apps/Tenants/migrations/0001_initial.py | YellowRainBoots/2.0 | bf215350c2da0ab28ad2ec6f9338fb1b73b3f2e5 | [
"MIT"
] | 17 | 2020-01-23T05:51:18.000Z | 2020-06-16T02:33:41.000Z | homepairs/HomepairsApp/Apps/Tenants/migrations/0001_initial.py | YellowRainBoots/2.0 | bf215350c2da0ab28ad2ec6f9338fb1b73b3f2e5 | [
"MIT"
] | 1 | 2020-08-06T02:10:58.000Z | 2020-08-06T02:10:58.000Z | # Generated by Django 3.0.2 on 2020-03-03 21:48
from django.db import migrations, models
import django.db.models.deletion
| 35.5 | 139 | 0.611268 |
b19cab2172cb675aff98cad37d3038a9d288244b | 21,295 | py | Python | edk2toolext/image_validation.py | cfernald/edk2-pytool-extensions | 3452e781a021e9b736fb10dbd3e7645a2efc400f | [
"BSD-2-Clause-Patent"
] | null | null | null | edk2toolext/image_validation.py | cfernald/edk2-pytool-extensions | 3452e781a021e9b736fb10dbd3e7645a2efc400f | [
"BSD-2-Clause-Patent"
] | null | null | null | edk2toolext/image_validation.py | cfernald/edk2-pytool-extensions | 3452e781a021e9b736fb10dbd3e7645a2efc400f | [
"BSD-2-Clause-Patent"
] | null | null | null | # @file image_validation.py
# This tool allows a user validate an PE/COFF file
# against specific requirements
##
# Copyright (c) Microsoft Corporation
#
# SPDX-License-Identifier: BSD-2-Clause-Patent
##
from datetime import datetime
import os
from pefile import PE, SECTION_CHARACTERISTICS, MACHINE_TYPE, SUBSYSTEM_TYPE
import logging
import argparse
import sys
from edk2toolext import edk2_logging
########################
# Helper Functions #
########################
###########################
# TESTS START #
###########################
###########################
# TESTS END #
###########################
#
# Command Line Interface configuration
#
def get_cli_args(args):
parser = argparse.ArgumentParser(description='A Image validation tool for memory mitigation')
parser.add_argument('-i', '--file',
type=str,
required=True,
help='path to the image that needs validated.')
parser.add_argument('-d', '--debug',
action='store_true',
default=False)
parser.add_argument('-p', '--profile',
type=str,
default=None,
help='the profile config to be verified against. \
Will use the default, if not provided')
group = parser.add_mutually_exclusive_group()
group.add_argument('--set-nx-compat',
action='store_true',
default=False,
help='sets the NX_COMPAT flag')
group.add_argument('--clear-nx-compat',
action='store_true',
default=False,
help='clears the NX_COMPAT flag')
group.add_argument('--get-nx-compat',
action='store_true',
default=False,
help='returns the value of the NX_COMPAT flag')
return parser.parse_args(args)
def main():
# setup main console as logger
logger = logging.getLogger('')
logger.setLevel(logging.INFO)
console = edk2_logging.setup_console_logging(False)
logger.addHandler(console)
args = get_cli_args(sys.argv[1:])
if args.debug is True:
console.setLevel(logging.DEBUG)
logging.info("Log Started: " + datetime.strftime(
datetime.now(), "%A, %B %d, %Y %I:%M%p"))
# pe.write(filename=f'{basename[0]}_nx_clear.{basename[1]}'
# Set the nx compatability flag and exit
if args.set_nx_compat is not None:
pe = PE(args.file)
set_nx_compat_flag(pe)
os.remove(args.file)
pe.write(args.file)
exit(0)
# clear the nx compatability flag and exit
if args.clear_nx_compat is not None:
pe = PE(args.file)
clear_nx_compat_flag(pe)
os.remove(args.file)
pe.write(args.file)
exit(0)
# exit with status equal to if nx compatability is present or not
if args.get_nx_compat is True:
exit(get_nx_compat_flag(args.file))
test_manager = TestManager()
test_manager.add_test(TestWriteExecuteFlags())
test_manager.add_test(TestSectionAlignment())
test_manager.add_test(TestSubsystemValue())
pe = PE(args.file)
if not args.profile:
result = test_manager.run_tests(pe)
else:
result = test_manager.run_tests(pe, args.profile)
logging.info(f'Overall Result: {result}')
if result == Result.SKIP:
logging.info('No Test requirements in the config file for this file.')
elif result == Result.PASS or result == Result.WARN:
sys.exit(0)
else:
sys.exit(1)
if __name__ == '__main__':
main()
| 34.795752 | 112 | 0.505565 |
b19eba8650f17954158c7ab292c05abfa2a4065c | 44 | py | Python | src/basics/files/delete_fichero.py | FoxNeo/MyPythonProjects | 3499ef0853f0087f6f143e1633b0a88a3d7b9818 | [
"MIT"
] | null | null | null | src/basics/files/delete_fichero.py | FoxNeo/MyPythonProjects | 3499ef0853f0087f6f143e1633b0a88a3d7b9818 | [
"MIT"
] | null | null | null | src/basics/files/delete_fichero.py | FoxNeo/MyPythonProjects | 3499ef0853f0087f6f143e1633b0a88a3d7b9818 | [
"MIT"
] | null | null | null | import os
os.remove("fichero_generado.txt") | 14.666667 | 33 | 0.795455 |
b19fd8f1c6f4a820c1d3db28aa85e5f3c1020cae | 31,290 | py | Python | Canon-M10.py | emanuelelaface/Canon-M10 | bd4559b2e528fbaa9559a92c4e752ce5f96c1053 | [
"MIT"
] | 3 | 2019-12-06T22:32:31.000Z | 2022-02-13T00:35:55.000Z | Canon-M10.py | emanuelelaface/Canon-M10 | bd4559b2e528fbaa9559a92c4e752ce5f96c1053 | [
"MIT"
] | null | null | null | Canon-M10.py | emanuelelaface/Canon-M10 | bd4559b2e528fbaa9559a92c4e752ce5f96c1053 | [
"MIT"
] | 5 | 2019-12-06T22:32:23.000Z | 2021-12-26T20:46:56.000Z | # -*- coding: utf-8 -*-
from remi.gui import *
from remi import start, App
import cv2
import numpy
import chdkptp
import time
import threading
import rawpy
if __name__ == "__main__":
start(M10GUI, address='0.0.0.0', port=8081, multiple_instance=False, enable_file_cache=True, start_browser=False, debug=False, update_interval = 0.01)
| 58.376866 | 703 | 0.577245 |
b1a00da7893518e48125fe8f8ffac5ec512f86f7 | 781 | py | Python | server/utils/exception/exception.py | mnichangxin/blog-server | 44544c53542971e4ba31b7d1a58d2a7fe55bfe06 | [
"MIT"
] | null | null | null | server/utils/exception/exception.py | mnichangxin/blog-server | 44544c53542971e4ba31b7d1a58d2a7fe55bfe06 | [
"MIT"
] | null | null | null | server/utils/exception/exception.py | mnichangxin/blog-server | 44544c53542971e4ba31b7d1a58d2a7fe55bfe06 | [
"MIT"
] | null | null | null | from werkzeug.exceptions import HTTPException | 32.541667 | 74 | 0.627401 |
b1a21975ae4f7b1e5e6eec59130eae251c21b5f0 | 2,159 | py | Python | backend/fetch_tweet.py | phuens/Tweet_Analysis | 8d5fca79107bd4af5278a4530ea1131482f49b42 | [
"MIT"
] | null | null | null | backend/fetch_tweet.py | phuens/Tweet_Analysis | 8d5fca79107bd4af5278a4530ea1131482f49b42 | [
"MIT"
] | null | null | null | backend/fetch_tweet.py | phuens/Tweet_Analysis | 8d5fca79107bd4af5278a4530ea1131482f49b42 | [
"MIT"
] | null | null | null | import json
import csv
import tweepy
from textblob import TextBlob
import nltk
from nltk.tokenize import word_tokenize
if __name__ == '__main__':
consumer_key =
consumer_secret =
access_token =
access_token_secret =
hashtag_phrase = 'geocode:27.466079,89.639010,30km'
search_for_hashtags(consumer_key, consumer_secret,
access_token, access_token_secret, hashtag_phrase)
| 38.553571 | 131 | 0.637332 |
b1a2e9e876bf7788f4968b9eb3b29a91a90c21c3 | 9,585 | py | Python | umich_daily.py | mpars0ns/scansio-sonar-es | ea7b1928277317b97c84443812da01af99ef0feb | [
"BSD-3-Clause"
] | 36 | 2015-10-14T21:17:16.000Z | 2022-01-21T16:34:24.000Z | umich_daily.py | mpars0ns/scansio-sonar-es | ea7b1928277317b97c84443812da01af99ef0feb | [
"BSD-3-Clause"
] | 5 | 2015-10-19T13:47:55.000Z | 2017-06-21T07:12:41.000Z | umich_daily.py | mpars0ns/scansio-sonar-es | ea7b1928277317b97c84443812da01af99ef0feb | [
"BSD-3-Clause"
] | 8 | 2016-04-28T09:34:20.000Z | 2022-01-21T16:34:23.000Z | import argparse
import sys
from multiprocessing import cpu_count, Process, Queue
import json
import logging
from datetime import datetime
from elasticsearch import Elasticsearch
from elasticsearch.helpers import bulk, scan
import hashlib
from helpers.certparser import process_cert
from helpers.hostparser import proccess_host
logger = logging.getLogger('SSLImporter')
logger_format = logging.Formatter('\033[1;32m%(levelname)-5s %(module)s:%(funcName)s():%(lineno)d %(asctime)s\033[0m| '
'%(message)s')
stream_handler = logging.StreamHandler(sys.stdout)
stream_handler.setFormatter(logger_format)
logger.addHandler(stream_handler)
elastic_logger = logging.getLogger('elasticsearch')
elastic_logger.addHandler(stream_handler)
DEFAULT_SERVER = u'localhost'
DEFAULT_PORT = 9200
def process_scan_certs(q, es):
"""
:param q: The Queue object that certs should be pulled off of
:param es: An Elasticsearch connection. This way each worker has its own connection and you don't have to share it
across multiple workers/processes
:return:
"""
bulk_certs = []
while True:
certs = q.get()
if certs == "DONE":
bulk(es, bulk_certs)
return True
for cert in certs['certs']:
newcert = process_cert(cert)
if newcert:
newcert['import_date'] = certs['time']
newcert['source'] = 'umich'
newcert_action = {"_index": "passive-ssl-certs-umich", "_type": "cert", '_id': newcert['hash_id'],
'_source': newcert}
bulk_certs.append(newcert_action)
if len(bulk_certs) == 500:
bulk(es, bulk_certs)
bulk_certs = []
def process_hosts(q, es, initial):
"""
:param q: The Queue object that hosts should be pulled off of
:param es: An Elasticsearch connection. This way each worker has its own connection and you don't have to share it
across multiple workers/processes
:param initial: If this is the initial upload then we set the first_seen = last_seen. Other wise first_seen is left
blank and will be cleaned up later
:return:
"""
bulk_hosts = []
while True:
line = q.get()
if line == "DONE":
bulk(es, bulk_hosts)
return True
host = proccess_host(line)
cert_hash = hashlib.sha1(host['host']+host['hash']+host['source'])
cert_hash = cert_hash.hexdigest()
if initial:
host['first_seen'] = host['last_seen']
action = {"_op_type": "update", "_index": 'passive-ssl-hosts-umich', "_type": "host", "_id": cert_hash,
"doc": line, "doc_as_upsert": "true"}
bulk_hosts.append(action)
if len(bulk_hosts) == 500:
bulk(es, bulk_hosts)
bulk_hosts = []
def parse_scanfile(f, host_queue, cert_queue):
"""
:param f: json file from University of Michigan that has been lz4 decompressed.
:param host_queue: Queue to send host info to
:param cert_queue: Queue to send cert info to
:return:
"""
certs_set = set()
with open(f) as scan_file:
for line in scan_file:
item = json.loads(line)
item['log'].pop(0)
for entry in item['log']:
if entry['data']:
if 'server_certificates' in entry['data'] and entry['data']['server_certificates'] is not None:
if entry['data']['server_certificates']['certificate'] is not None:
if 'fingerprint_sha1' in entry['data']['server_certificates']['certificate']:
server_cert = entry['data']['server_certificates']['certificate']['fingerprint_sha1']
doc = {'host': item['host'], 'source': 'umich', 'last_seen': item['time'],
'hash': server_cert}
host_queue.put(doc)
if server_cert in certs_set:
pass # We already have this sha1 and we don't need to attempt parsing it
else:
if entry['data']['server_certificates']['certificate'] is not None:
if 'raw' in entry['data']['server_certificates']:
raw_cert = dict()
raw_cert['time'] = item['time']
raw_cert['certs'] = entry['data']['server_certificates']['raw']
else:
raw_cert = None
if raw_cert:
cert_queue.put(raw_cert)
certs_set.add(server_cert) # We have added this hash to be processed so we
# don't need to process it again
print "Finished processing file....now printing the length of the certs set"
print len(certs_set)
if __name__ == "__main__":
main(sys.argv)
| 45.212264 | 119 | 0.605842 |
b1a35e06a9245c638232ac973c3cdcca21d276f6 | 980 | py | Python | project/tests/scripts/system_vars.py | LeDron12/c2eo | 4f0dc6ed79df0739bd834eda6a0f77f3caf4292c | [
"MIT"
] | 12 | 2021-08-05T12:12:09.000Z | 2022-03-08T13:33:53.000Z | project/tests/scripts/system_vars.py | LeDron12/c2eo | 4f0dc6ed79df0739bd834eda6a0f77f3caf4292c | [
"MIT"
] | 26 | 2021-08-23T10:25:37.000Z | 2022-03-30T12:56:08.000Z | project/tests/scripts/system_vars.py | LeDron12/c2eo | 4f0dc6ed79df0739bd834eda6a0f77f3caf4292c | [
"MIT"
] | 12 | 2021-08-17T09:20:07.000Z | 2022-03-31T13:37:28.000Z | integer = [
['lld', 'long long', 9223372036854775807, -9223372036854775808],
['ld', 'long', 9223372036854775807, -9223372036854775808],
['lu', 'unsigned long', 18446744073709551615, 0],
['d', 'signed', 2147483647, -2147483648],
['u', 'unsigned', 4294967295, 0],
['hd', 'short', 32767, -32768],
['hu', 'unsigned short', 65535, 0],
['c', 'char', 127, -128],
['c', 'unsigned char', 255, 0],
['d', '_Bool', 1, 0],
]
real = [
['f', 'float', 3.40282e+38, -3.40282e+38],
['f', 'double', 1.79769e+308, -1.79769e+308],
['Lf', 'long double', 1.79769e+308, -1.79769e+308]
]
# todo: fix path
path = ''
directory = 'env'
filename1 = f'{directory}/code1.c'
filename2 = f'{directory}/code2.c'
logfile1 = f'{directory}/log1.txt'
logfile2 = f'{directory}/log2.txt'
eo_out = f'{directory}/eo_out.txt'
c_out = f'{directory}/c_out.txt'
c_bin = f'{directory}/a.out'
launcher = '../../bin/launcher.py'
full_log = None
resultDir = '../../../result'
| 29.69697 | 68 | 0.596939 |
b1a435a669f2409d097f7f74a5d9ca3c12d7e85f | 1,944 | py | Python | isaactest/tests/recieve_verify_emails.py | jsharkey13/isaac-selenium-testing | fc57ec57179cf7d9f0bb5ef46d759792b2af3bc8 | [
"MIT"
] | null | null | null | isaactest/tests/recieve_verify_emails.py | jsharkey13/isaac-selenium-testing | fc57ec57179cf7d9f0bb5ef46d759792b2af3bc8 | [
"MIT"
] | 1 | 2016-01-15T11:28:06.000Z | 2016-01-25T17:09:18.000Z | isaactest/tests/recieve_verify_emails.py | jsharkey13/isaac-selenium-testing | fc57ec57179cf7d9f0bb5ef46d759792b2af3bc8 | [
"MIT"
] | 1 | 2019-05-14T16:53:49.000Z | 2019-05-14T16:53:49.000Z | from ..utils.log import log, INFO, ERROR, PASS
from ..utils.i_selenium import assert_tab, image_div
from ..tests import TestWithDependency
__all__ = ["recieve_verify_emails"]
#####
# Test : Recieve Verification Emails
#####
| 44.181818 | 149 | 0.718107 |
b1a4e4ea2b00add4c4b415ad7ce218f992351283 | 536 | py | Python | setup.py | msabramo/grr | 4b13392528d61a3d42e6c3baa14fa74cc920c055 | [
"CC0-1.0"
] | null | null | null | setup.py | msabramo/grr | 4b13392528d61a3d42e6c3baa14fa74cc920c055 | [
"CC0-1.0"
] | null | null | null | setup.py | msabramo/grr | 4b13392528d61a3d42e6c3baa14fa74cc920c055 | [
"CC0-1.0"
] | null | null | null | #!/usr/bin/env python3
from setuptools import setup
import sys
setup(
name='grr',
version='0.2',
author='Kunal Mehta',
author_email='legoktm@gmail.com',
url='https://github.com/legoktm/grr/',
license='CC-0',
description='A command-line utility to work with Gerrit',
long_description=open('README.rst').read(),
packages=['grr'],
install_requires=['configparser'] if sys.version_info[0] == 2 else [],
entry_points={
'console_scripts': [
'grr = grr:main'
],
}
)
| 24.363636 | 74 | 0.613806 |
b1a5144b5a072c013aabc225925d03cb09f975fc | 11,553 | py | Python | runtime/server/x86_gpu/model_repo_stateful/wenet/1/wenet_onnx_model.py | zelda3721/wenet | f41555469b93bcc055a95432dd14fd1400522964 | [
"Apache-2.0"
] | null | null | null | runtime/server/x86_gpu/model_repo_stateful/wenet/1/wenet_onnx_model.py | zelda3721/wenet | f41555469b93bcc055a95432dd14fd1400522964 | [
"Apache-2.0"
] | null | null | null | runtime/server/x86_gpu/model_repo_stateful/wenet/1/wenet_onnx_model.py | zelda3721/wenet | f41555469b93bcc055a95432dd14fd1400522964 | [
"Apache-2.0"
] | 1 | 2022-02-08T07:39:13.000Z | 2022-02-08T07:39:13.000Z | # Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import multiprocessing
import numpy as np
import os
import torch
import triton_python_backend_utils as pb_utils
from torch.utils.dlpack import to_dlpack, from_dlpack
from swig_decoders import ctc_beam_search_decoder_batch, Scorer, map_batch
| 42.947955 | 87 | 0.528348 |
b1a58559665e94514cdf1de5372c35158b389ecc | 7,254 | py | Python | stuojchaques.py | sunlupeng2020/stuoj | f8c109894e7a7118dc632fef34c55a01fe116f9a | [
"Apache-2.0"
] | null | null | null | stuojchaques.py | sunlupeng2020/stuoj | f8c109894e7a7118dc632fef34c55a01fe116f9a | [
"Apache-2.0"
] | null | null | null | stuojchaques.py | sunlupeng2020/stuoj | f8c109894e7a7118dc632fef34c55a01fe116f9a | [
"Apache-2.0"
] | null | null | null | #
#
# stuojstuquestionbh
from selenium import webdriver
# from selenium.webdriver.common.by import By
import pymysql
import re
from bs4 import BeautifulSoup
import connsql
# import loginzznuoj
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
import ojtmxx
import time
# driver_path = "D:\\ChromeCoreDownloads\\chromedriver_win32\\chromedriver.exe"
# driver = webdriver.Chrome()
driver = webdriver.PhantomJS()
cur = connsql.conn.cursor()
# OJ
# getstudentxuehao()#
# getOjQuesNo('204215091001', '1003')
# cur.close()
# ,
if __name__ == '__main__':
# cur = connsql.conn.cursor() # connsql conn
# getBanjiChallengeNum(1)
# getStuChallengenum('204215091001')
# getOjQuesNo('204215091001', '1003')
# print(getbanjistuojusername(1))
# cur.close()
loginzznuoj() # OJ
time.sleep(2)
# codes = getsubmitcode(1000,1063215)
# print(codes)
# options.addArguments(
# "--user-data-dir=" + System.getenv("USERPROFILE") + "/AppData/Local/Google/Chrome/User Data/Default");
# driver.get("http://47.95.10.46/problemsubmit.php?sid=1063215&pid=1000")
results = getstuchallenge()
for result in results:
print(result)
# url = "http://47.95.10.46/problemsubmit.php?sid=" + str(result[1]) + "&pid=" + str(result[0]) #
# # driver.get(url)
codes = getsubmitcode(result[0], result[1])
if len(codes) > 5000:
codes = codes[0:5000]
# print(codes)
updatequescode(result[0], result[1], str(codes).strip()) #
# getsubmitcode('1003', '1068443')
# 2021.1.17
# stuxhlist = getbanjistuojusername(1) # 1
# questionnolist= ojtmxx.getojallquesnofromdatabase() # ID
# print(questionnolist)
# for stuno in stuxhlist:
# stuno1 = stuno[0] #
# for i in range(33, 35):
# stuno1='2042150910'+str(i)
# # if int(stuno1) > 204215091003:
# for questionno in questionnolist:
# questionno0 = questionno[0]
# print((stuno1, questionno0))
# getOjQuesNo(stuno1, questionno0)
# stuno1 = '204215091032'
# for questionno0 in range(1000, 2200):
# print((stuno1, questionno0))
# getOjQuesNo(stuno1, questionno0)
cur.close()
driver.close()
| 35.043478 | 114 | 0.666391 |
b1a5a19351b24a513cab2db62b55e27e8f29e1d1 | 3,899 | py | Python | tests/test_core.py | TheCheapestPixels/panda3d-stageflow | 7a049d939dec39e3ac780872bbaba5c25f309397 | [
"BSD-3-Clause"
] | 3 | 2020-10-04T18:52:37.000Z | 2022-02-21T13:21:45.000Z | tests/test_core.py | TheCheapestPixels/panda3d-stageflow | 7a049d939dec39e3ac780872bbaba5c25f309397 | [
"BSD-3-Clause"
] | 2 | 2020-05-28T03:33:47.000Z | 2020-05-28T03:38:30.000Z | tests/test_core.py | TheCheapestPixels/panda3d-stageflow | 7a049d939dec39e3ac780872bbaba5c25f309397 | [
"BSD-3-Clause"
] | null | null | null | from stageflow import Flow
from stageflow import Stage
# FIXME: Now add the ways that Flow *shouldn't* be usable:
# * transitioning to non-existent stages
# * passing invalid objects to Flow(stages=...)
| 24.36875 | 58 | 0.616055 |
b1a639ae9556a6f333b9ef26546b354a0f37d7a5 | 1,925 | py | Python | Yukki/__main__.py | nezukorobot/YUUKI | 7589acbb7db1e52710ee9fce1bdc6df5cb924be6 | [
"MIT"
] | null | null | null | Yukki/__main__.py | nezukorobot/YUUKI | 7589acbb7db1e52710ee9fce1bdc6df5cb924be6 | [
"MIT"
] | null | null | null | Yukki/__main__.py | nezukorobot/YUUKI | 7589acbb7db1e52710ee9fce1bdc6df5cb924be6 | [
"MIT"
] | 1 | 2021-12-01T10:17:55.000Z | 2021-12-01T10:17:55.000Z | import asyncio
import time
import uvloop
import importlib
from pyrogram import Client as Bot, idle
from .config import API_ID, API_HASH, BOT_TOKEN, MONGO_DB_URI, SUDO_USERS, LOG_GROUP_ID
from Yukki import BOT_NAME, ASSNAME, app, chacha, aiohttpsession
from Yukki.YukkiUtilities.database.functions import clean_restart_stage
from Yukki.YukkiUtilities.database.queue import (get_active_chats, remove_active_chat)
from .YukkiUtilities.tgcallsrun import run
from pyrogram import Client, idle
from motor.motor_asyncio import AsyncIOMotorClient as MongoClient
import time
Bot(
':yukki:',
API_ID,
API_HASH,
bot_token=BOT_TOKEN,
plugins={'root': 'Yukki.Plugins'},
).start()
print(f"[INFO]: BOT STARTED AS {BOT_NAME}!")
print(f"[INFO]: ASSISTANT STARTED AS {ASSNAME}!")
loop = asyncio.get_event_loop()
loop.run_until_complete(load_start())
run()
loop.close()
print("[LOG] CLOSING BOT")
| 29.166667 | 90 | 0.662338 |
b1a71b362a63e180bb73d60affe130cb3f02f9e9 | 3,180 | py | Python | loopchain/blockchain/transactions/transaction_builder.py | metalg0su/loopchain | dd27f8f42a350d1b22b0985749b1e821c053fe49 | [
"Apache-2.0"
] | null | null | null | loopchain/blockchain/transactions/transaction_builder.py | metalg0su/loopchain | dd27f8f42a350d1b22b0985749b1e821c053fe49 | [
"Apache-2.0"
] | 7 | 2019-08-28T00:19:28.000Z | 2020-07-31T07:07:53.000Z | loopchain/blockchain/transactions/transaction_builder.py | metalg0su/loopchain | dd27f8f42a350d1b22b0985749b1e821c053fe49 | [
"Apache-2.0"
] | null | null | null | import hashlib
from abc import abstractmethod, ABC
from typing import TYPE_CHECKING
from .. import Signature, ExternalAddress, Hash32
from loopchain.crypto.hashing import build_hash_generator
if TYPE_CHECKING:
from secp256k1 import PrivateKey
from . import Transaction, TransactionVersioner
| 32.783505 | 92 | 0.659119 |
b1a7a9bcfc93410c2986fe9c347507c8fbff9db4 | 1,132 | py | Python | PyBank/main.py | yongjinjiang/python-challenge | 4b266976baf8339186fae7140024ae5a3af3bc76 | [
"ADSL"
] | null | null | null | PyBank/main.py | yongjinjiang/python-challenge | 4b266976baf8339186fae7140024ae5a3af3bc76 | [
"ADSL"
] | null | null | null | PyBank/main.py | yongjinjiang/python-challenge | 4b266976baf8339186fae7140024ae5a3af3bc76 | [
"ADSL"
] | null | null | null | import csv
import os
resource_dir="/Users/jyj/OneDrive/A_A_Data_Analysis/MINSTP201808DATA2/03-Python/Homework/PyBank/Resources"
file_path=os.path.join(resource_dir,"budget_data.csv")
with open(file_path,newline="") as data_file:
csvreader=csv.reader(data_file,delimiter=",")
next(csvreader)
i=0
Num_month=0
Pro_each_month=[]
months=[]
for row in csvreader:
#print(row)
months.append(row[0])
Pro_each_month.append(float(row[1]))
# if i==5:
# break
# i=i+1
Num_month=Num_month+1
print("Financial Analysis")
print("____________________")
print("Total Months:{}".format(Num_month))
print("Total:${}".format(sum(Pro_each_month)))
ss1=Pro_each_month[:-1]
ss2=Pro_each_month[1:]
ss=[ss2[i]-ss1[i] for i in range(Num_month-1)]
print("Average change:${}".format(sum(ss)/(Num_month-1)))
print("Greatest increase in Profits :{} (${})".format(months[ss.index(max(ss))+1],max(ss)))
print("Greatest Decrease in Profits :{} (${})".format(months[ss.index(min(ss))+1],min(ss)))
| 31.444444 | 106 | 0.626325 |
b1a801667f7526e28011c5f08b7558d194b2a413 | 3,508 | py | Python | demo.py | sshopov/pyconau2017 | e492e284a5afa5115f81fddf83546168b128591c | [
"MIT"
] | 21 | 2018-01-09T15:55:44.000Z | 2020-03-22T06:27:52.000Z | demo.py | sshopov/pyconau2017 | e492e284a5afa5115f81fddf83546168b128591c | [
"MIT"
] | null | null | null | demo.py | sshopov/pyconau2017 | e492e284a5afa5115f81fddf83546168b128591c | [
"MIT"
] | 9 | 2017-08-08T10:19:09.000Z | 2019-03-01T12:12:30.000Z | #!/usr/bin/env python3
'''
Source name: demo.py
Author(s): Stoyan Shopov
Python Version: 3.* 32-bit or 64-bit
License: LGPL
Description:
This program was demoed on EV3D4 at PyCon Australia 2017.
It kicks off 2 threads a move thread and a feel thread.
The move thread drives the bot forward until the feel thread
detects an obstacle.
Then the move thread makes the bot move around in a circle
until the feel thread detects a touch on the touch sensor.
Preconditions:
The program has been loaded on to EV3 running ev3dev
Postcoditions:
Program exits cleanly.
References:
https://github.com/sshopov/pyconau2017
https://github.com/rhempel/ev3dev-lang-python
Release history:
----------------------------------------------------
0.0.1 - 06/08/2017:
Initial release
'''
import sys
import time
import threading
import signal
from ev3dev import ev3
# The 'done' event will be used to signal the threads to stop:
done = threading.Event()
# We also need to catch SIGINT (keyboard interrup) and SIGTERM (termination
# signal from brickman) and exit gracefully:
signal.signal(signal.SIGINT, signal_handler)
signal.signal(signal.SIGTERM, signal_handler)
# Now that we have the worker functions defined, lets run those in separate
# threads.
move_thread = threading.Thread(target=move, args=(done,))
feel_thread = threading.Thread(target=feel, args=(done,))
move_thread.start()
feel_thread.start()
# The main thread will wait for the 'back' button to be pressed. When that
# happens, it will signal the worker threads to stop and wait for their completion.
btn = ev3.Button()
while not btn.backspace and not done.is_set():
time.sleep(1)
done.set()
move_thread.join()
feel_thread.join()
ev3.Sound.speak('Farewell and good bye!').wait()
ev3.Leds.all_off() | 26.37594 | 84 | 0.643672 |
b1a8412c74612f899302b6781aec760fcfd3dd6d | 21,742 | py | Python | Game/story.py | starc52/GDE-Project | 50ee4055e26c1873b1c21dcb2a8c2d05f7bca40f | [
"MIT"
] | null | null | null | Game/story.py | starc52/GDE-Project | 50ee4055e26c1873b1c21dcb2a8c2d05f7bca40f | [
"MIT"
] | null | null | null | Game/story.py | starc52/GDE-Project | 50ee4055e26c1873b1c21dcb2a8c2d05f7bca40f | [
"MIT"
] | 1 | 2021-07-06T03:38:24.000Z | 2021-07-06T03:38:24.000Z |
from Game.player import Player
from pygame import *
from Game.const import *
| 39.966912 | 250 | 0.664474 |
b1a88bc7e1241c7e280f5c4ac943fa677100e8e2 | 7,651 | py | Python | utilities/tag-bumper.py | stackrox/collector | 4c3913176eb62636e32a8a56f889e611c638de73 | [
"Apache-2.0"
] | 1 | 2022-03-31T15:25:16.000Z | 2022-03-31T15:25:16.000Z | utilities/tag-bumper.py | stackrox/collector | 4c3913176eb62636e32a8a56f889e611c638de73 | [
"Apache-2.0"
] | 4 | 2022-03-31T16:16:00.000Z | 2022-03-31T23:24:33.000Z | utilities/tag-bumper.py | stackrox/collector | 4c3913176eb62636e32a8a56f889e611c638de73 | [
"Apache-2.0"
] | null | null | null | #! /usr/bin/env python3
from sh import git, ErrorReturnCode
import argparse
import sys
import os
import atexit
import re
def exit_handler(repo):
"""
Rollback the repo to the branch passed as an argument.
Parameters:
repo: An sh.Command baked for git on the working repository.
"""
print('Rolling back to starting branch')
repo.checkout('-')
def validate_version(version: str):
"""
Validates the provided version is in the form 'M.m'.
Returns:
The same string provided as input if the format is valid.
Raises:
ValueError If the provided version does not match the expected pattern.
"""
version_re = re.compile(r'(:?^\d+\.\d+$)')
if not version_re.match(version):
raise ValueError
return version
def get_repo_handle(path: str):
"""
Provides a sh.Command baked to run git commands on a repository.
Parameters:
path: A path to a repository, if it is empty, the returned handle points to the directory this script lives in.
Returns:
An sh.Command ready to run git commands.
"""
if path != '':
return git.bake('--no-pager', C=path)
return git.bake('--no-pager', C=os.path.dirname(os.path.realpath(__file__)))
def get_release_branch(version: str) -> str:
"""
Helper function, simply formats the release branch for the provided version.
Parameters:
version: A string with a valid version.
Returns:
A string with the name of the corresponding release branch.
"""
return f'release/{version}.x'
def fetch_all(repo):
"""
Fetches all branches and tags from all remotes configured in the repository.
Parameters:
repo: An sh.Command baked for git on the working repository.
"""
try:
repo.fetch('--all', '--tags')
except ErrorReturnCode as e:
print(f'Failed to fetch remote. {e}')
sys.exit(1)
def get_branch(repo, version: str) -> str:
"""
Validates the release branch exists and returns a string with its name.
Parameters:
repo: An sh.Command baked for git on the working repository.
version: A string with a valid version.
Returns:
A string with the name of the release branch.
"""
release_branch = get_release_branch(version)
try:
repo('rev-parse', '--verify', release_branch)
except ErrorReturnCode as e:
print(f'The branch {release_branch} does not exist. {e}')
sys.exit(1)
return release_branch
def checkout_release_branch(repo, version: str):
"""
Checks out the release branch for the provided version.
Parameters:
repo: An sh.Command baked for git on the working repository.
version: A string with a valid version.
"""
branch = get_branch(repo, version)
print(f'Checking out {branch}')
try:
repo.checkout(branch).wait()
except ErrorReturnCode as e:
print(f'Failed to checkout release branch {branch}. {e}')
sys.exit(1)
def find_tag_version(repo, version: str) -> str:
"""
Finds the latest tag for the provided version.
This is done by iterating over the tags in the repository, checking against the provided major and minor versions
and using the highest patch number found once the iteration is done.
Parameters:
repo: An sh.Command baked for git on the working repository.
version: The major and minor versions we want to create a new tag for in the format 'M.m'
Returns:
The new tag to be created.
"""
patch_version = -1
version_regex = re.compile(fr'^{re.escape(version)}\.(\d+)$')
for tag in repo.tag().splitlines():
matched = version_regex.match(tag)
if matched:
patch = int(matched[1])
if patch > patch_version:
patch_version = patch
if patch_version == -1:
print(f'Failed to find an existing tag for {".".join(version)}')
sys.exit(1)
return f'{version}.{patch_version + 1}'
def create_empty_commit(repo):
"""
Creates an empty commit on the current branch. Uses defaults for author, signature, etc.
"""
print('Creating empty commit.')
try:
repo.commit('--allow-empty', '-m', 'Empty commit')
except ErrorReturnCode as e:
print(f'Failed to create empty commit: {e}')
sys.exit(1)
def create_new_tag(repo, new_tag: str):
"""
Creates a new tag on the current commit.
Parameters:
new_tag: The new tag to be created. i.e: 3.8.5
"""
print(f'Creating new tag: {new_tag}')
try:
git.tag(new_tag)
except ErrorReturnCode as e:
print(f'Failed to create new tag {new_tag}. {e}')
sys.exit(1)
def push_branch(repo):
"""
Executes a push on the current branch.
Parameters:
repo: An sh.Command baked for git on the working repository.
"""
print('Pushing release branch...')
try:
repo.push()
except ErrorReturnCode as e:
print(f'Failed to push empty commit to release branch. {e}')
sys.exit(1)
def push_tag(repo, new_tag: str, remote: str):
"""
Push a new tag to the provided remote.
Parameters:
repo: An sh.Command baked for git on the working repository.
new_tag: The new tag to be pushed. i.e: 3.8.5
remote: The remote in the repository the tag will be pushed to. i.e: origin
"""
print(f'Pushing {new_tag} to {remote}...')
try:
repo.push(remote, new_tag)
except ErrorReturnCode as e:
print(f'Failed to push tag {new_tag} to {remote}. {e}')
if __name__ == "__main__":
description = """Creates a new patch tag with an empty commit.
Useful when we need to simply rebuild a collector image."""
parser = argparse.ArgumentParser(description=description,
formatter_class=argparse.RawTextHelpFormatter)
parser.add_argument('version', help='Version to bump in the vormat X.Y', type=validate_version)
parser.add_argument('-d', '--dry-run', help='Run all checks without actually modifying the repo',
default=False, action='store_true')
parser.add_argument('-p', '--push', help="Push the newly create tag", default=False, action='store_true')
parser.add_argument('-C', '--cwd',
help='Path to the repository to run in, defaults to the directory this script is in',
default='')
parser.add_argument('-r', '--remote', help="Remote repoditory to push tags to, defaults to 'origin'")
args = parser.parse_args()
version = args.version
dry_run = args.dry_run
push = args.push
path = args.cwd
remote = args.remote
main(version, dry_run, push, path, remote)
| 29.091255 | 119 | 0.640439 |
b1a911035784142a39959873000505c8b7d79b40 | 2,455 | py | Python | openshift/helper/openshift.py | flaper87/openshift-restclient-python | 13d5d86ca89035b9f596032e7a34f3cc33bf8f18 | [
"Apache-2.0"
] | null | null | null | openshift/helper/openshift.py | flaper87/openshift-restclient-python | 13d5d86ca89035b9f596032e7a34f3cc33bf8f18 | [
"Apache-2.0"
] | null | null | null | openshift/helper/openshift.py | flaper87/openshift-restclient-python | 13d5d86ca89035b9f596032e7a34f3cc33bf8f18 | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
from __future__ import absolute_import
import json
from kubernetes.client import models as k8s_models
from kubernetes.client import apis as k8s_apis
from kubernetes.client.rest import ApiException
from urllib3.exceptions import MaxRetryError
from . import VERSION_RX
from .. import config
from ..client import models as openshift_models
from ..client import apis as openshift_apis
from ..client import ApiClient, ConfigurationObject
from .base import BaseObjectHelper
from .exceptions import OpenShiftException
| 35.071429 | 127 | 0.712424 |
b1a93d370fc62aa987aa9250ab1bac4da3444f9c | 35 | py | Python | tests/__init__.py | jsta/nhdpy | 38f52a68907e4d838715c77b18e61450eb775c72 | [
"MIT"
] | null | null | null | tests/__init__.py | jsta/nhdpy | 38f52a68907e4d838715c77b18e61450eb775c72 | [
"MIT"
] | 8 | 2020-11-12T16:42:23.000Z | 2021-03-04T19:00:09.000Z | tests/__init__.py | jsta/nhdpy | 38f52a68907e4d838715c77b18e61450eb775c72 | [
"MIT"
] | null | null | null | """Unit test package for nhdpy."""
| 17.5 | 34 | 0.657143 |
b1a94a4b34655e087c8464bf5e2ca43f8d328eaa | 10,423 | py | Python | ltcl/modules/lvae_nonlinear.py | anonymous-authors-iclr2022-481/ltcl | 0d8902228fa6c37f875bb60c4d16988462a9655a | [
"MIT"
] | 8 | 2021-10-16T08:35:37.000Z | 2022-02-10T09:25:50.000Z | leap/modules/lvae_nonlinear.py | weirayao/leap | 8d10b8413d02d3be49d5c02a13a0aa60a741d8da | [
"MIT"
] | null | null | null | leap/modules/lvae_nonlinear.py | weirayao/leap | 8d10b8413d02d3be49d5c02a13a0aa60a741d8da | [
"MIT"
] | 1 | 2021-11-30T04:06:43.000Z | 2021-11-30T04:06:43.000Z | """Temporal VAE with gaussian margial and laplacian transition prior"""
import torch
import numpy as np
import ipdb as pdb
import torch.nn as nn
import pytorch_lightning as pl
import torch.distributions as D
from torch.nn import functional as F
from .components.beta import BetaVAE_MLP
from .metrics.correlation import compute_mcc
from .components.base import GroupLinearLayer
from .components.transforms import ComponentWiseSpline
| 40.399225 | 111 | 0.570469 |
b1a94cda8b0a8f59129a19a7e19f329084618c94 | 7,196 | py | Python | cargame/camera.py | jocelynthiojaya/Self-Learning-Cars | 5dbd47f4f34155cf50cd6c6a6daef70449f96398 | [
"Apache-2.0"
] | null | null | null | cargame/camera.py | jocelynthiojaya/Self-Learning-Cars | 5dbd47f4f34155cf50cd6c6a6daef70449f96398 | [
"Apache-2.0"
] | null | null | null | cargame/camera.py | jocelynthiojaya/Self-Learning-Cars | 5dbd47f4f34155cf50cd6c6a6daef70449f96398 | [
"Apache-2.0"
] | null | null | null | import arcade
from cargame.globals import conf
from cargame import util
# This math is for getting the ratio from zoom. I honestly
# don't know what it is called, i just constructed it by hand
# Long form is 1 - (x - 1) / 2
zoom_multiplexer = lambda x : (3 - x)/2
# TODO: Implement anchor | 34.932039 | 153 | 0.591857 |
b1ac9e7af9abde201568a2b9eff7f851241bb02a | 168 | py | Python | configs/tsmnet/tsmnet_r50-d1_769x769_40k_cityscapes_video.py | labdeeman7/TRDP_temporal_stability_semantic_segmentation | efe0f13c2ed4e203d1caa41810e39e09152b508e | [
"Apache-2.0"
] | null | null | null | configs/tsmnet/tsmnet_r50-d1_769x769_40k_cityscapes_video.py | labdeeman7/TRDP_temporal_stability_semantic_segmentation | efe0f13c2ed4e203d1caa41810e39e09152b508e | [
"Apache-2.0"
] | null | null | null | configs/tsmnet/tsmnet_r50-d1_769x769_40k_cityscapes_video.py | labdeeman7/TRDP_temporal_stability_semantic_segmentation | efe0f13c2ed4e203d1caa41810e39e09152b508e | [
"Apache-2.0"
] | null | null | null | _base_ = [
'../_base_/models/tsm_r50-d8.py', '../_base_/datasets/cityscapes_769x769.py',
'../_base_/default_runtime.py', '../_base_/schedules/schedule_40k.py'
] | 42 | 81 | 0.684524 |
b1aca6b126eaf2078a24e5384b735f4060abd7a2 | 1,883 | py | Python | abc/104/c_retry2.py | 515hikaru/solutions | 9fb3e4600f9a97b78211a5736c98354d4cbebc38 | [
"MIT"
] | null | null | null | abc/104/c_retry2.py | 515hikaru/solutions | 9fb3e4600f9a97b78211a5736c98354d4cbebc38 | [
"MIT"
] | 9 | 2019-12-29T17:57:39.000Z | 2020-02-16T16:36:04.000Z | abc104/c_retry2.py | 515hikaru/abc-sandbox | 6445dd9d6583bd48a285d6e5693173529933da51 | [
"MIT"
] | null | null | null | from itertools import combinations
if __name__ == '__main__':
main()
| 29.888889 | 86 | 0.495486 |
b1acfa5bf6bd71ea82cf922fd4900527c2980874 | 4,418 | py | Python | merlin/celery.py | robinson96/merlin | 962b97ac037465f0fe285ceee6b77e554d8a29fe | [
"MIT"
] | null | null | null | merlin/celery.py | robinson96/merlin | 962b97ac037465f0fe285ceee6b77e554d8a29fe | [
"MIT"
] | null | null | null | merlin/celery.py | robinson96/merlin | 962b97ac037465f0fe285ceee6b77e554d8a29fe | [
"MIT"
] | null | null | null | ###############################################################################
# Copyright (c) 2019, Lawrence Livermore National Security, LLC.
# Produced at the Lawrence Livermore National Laboratory
# Written by the Merlin dev team, listed in the CONTRIBUTORS file.
# <merlin@llnl.gov>
#
# LLNL-CODE-797170
# All rights reserved.
# This file is part of Merlin, Version: 1.5.0.
#
# For details, see https://github.com/LLNL/merlin.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
###############################################################################
"""Updated celery configuration."""
from __future__ import (
absolute_import,
print_function,
)
import logging
import os
import billiard
import psutil
from celery import Celery
from celery.signals import worker_process_init
import merlin.common.security.encrypt_backend_traffic
from merlin.config import (
broker,
results_backend,
)
from merlin.log_formatter import FORMATS
from merlin.router import route_for_task
LOG = logging.getLogger(__name__)
broker_ssl = True
results_ssl = False
try:
BROKER_URI = broker.get_connection_string()
LOG.info(f"broker: {broker.get_connection_string(include_password=False)}")
broker_ssl = broker.get_ssl_config()
LOG.info(f"broker_ssl = {broker_ssl}")
RESULTS_BACKEND_URI = results_backend.get_connection_string()
results_ssl = results_backend.get_ssl_config(celery_check=True)
LOG.info(
f"results: {results_backend.get_connection_string(include_password=False)}"
)
LOG.info(f"results: redis_backed_use_ssl = {results_ssl}")
except ValueError:
# These variables won't be set if running with '--local'.
BROKER_URI = None
RESULTS_BACKEND_URI = None
app = Celery(
"merlin",
broker=BROKER_URI,
backend=RESULTS_BACKEND_URI,
broker_use_ssl=broker_ssl,
redis_backend_use_ssl=results_ssl,
)
app.conf.update(
task_serializer="pickle", accept_content=["pickle"], result_serializer="pickle"
)
app.autodiscover_tasks(["merlin.common"])
app.conf.update(
task_acks_late=True,
task_reject_on_worker_lost=True,
task_publish_retry_policy={
"interval_start": 10,
"interval_step": 10,
"interval_max": 60,
},
redis_max_connections=100000,
)
# Set a one hour timeout to acknowledge a task before it's available to grab
# again.
app.conf.broker_transport_options = {"visibility_timeout": 7200, "max_connections": 100}
app.conf.update(broker_pool_limit=0)
# Task routing: call our default queue merlin
app.conf.task_routes = (route_for_task,)
app.conf.task_default_queue = "merlin"
# Log formatting
app.conf.worker_log_color = True
app.conf.worker_log_format = FORMATS["DEFAULT"]
app.conf.worker_task_log_format = FORMATS["WORKER"]
| 33.218045 | 88 | 0.715708 |
b1ad3f5981efc006ce7e36a91015794cd61586bc | 648 | py | Python | webapi/apps/web/management/mockup.py | NovaSBE-DSKC/retention-evaluation | 5b68b9282f0b5479a9dc5238faef68067c76b861 | [
"MIT"
] | null | null | null | webapi/apps/web/management/mockup.py | NovaSBE-DSKC/retention-evaluation | 5b68b9282f0b5479a9dc5238faef68067c76b861 | [
"MIT"
] | null | null | null | webapi/apps/web/management/mockup.py | NovaSBE-DSKC/retention-evaluation | 5b68b9282f0b5479a9dc5238faef68067c76b861 | [
"MIT"
] | null | null | null | import random
import pandas as pd
import json
| 20.903226 | 108 | 0.634259 |
b1ad704b385cea93f718a905833492ee873ae1bf | 1,332 | py | Python | migrations/versions/e91e2508f055_.py | ifat-mohit/flask-microblog | f4f5f0df600779caecbe442d30a7ecc517ad515f | [
"MIT"
] | 1 | 2021-02-13T23:47:46.000Z | 2021-02-13T23:47:46.000Z | migrations/versions/e91e2508f055_.py | ifat-mohit/flask-microblog | f4f5f0df600779caecbe442d30a7ecc517ad515f | [
"MIT"
] | 2 | 2021-02-14T17:04:53.000Z | 2021-06-02T00:35:49.000Z | migrations/versions/e91e2508f055_.py | mohidex/flask-microblog | f4f5f0df600779caecbe442d30a7ecc517ad515f | [
"MIT"
] | 1 | 2020-04-07T11:56:22.000Z | 2020-04-07T11:56:22.000Z | """empty message
Revision ID: e91e2508f055
Revises: a064e677a1f1
Create Date: 2019-11-04 22:59:00.701304
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = 'e91e2508f055'
down_revision = 'a064e677a1f1'
branch_labels = None
depends_on = None
| 31.714286 | 89 | 0.682432 |
490a5d4dee030077442db885609423fe0007703e | 758 | py | Python | cli/cli_cloudformation.py | reneses/cloud-cli | 1f765cfb67cb9ffde1633fffe0da11893fb1503f | [
"MIT"
] | null | null | null | cli/cli_cloudformation.py | reneses/cloud-cli | 1f765cfb67cb9ffde1633fffe0da11893fb1503f | [
"MIT"
] | null | null | null | cli/cli_cloudformation.py | reneses/cloud-cli | 1f765cfb67cb9ffde1633fffe0da11893fb1503f | [
"MIT"
] | null | null | null | from menu import Menu, MenuEntry
from logic.cloudformation import CloudFormation
| 25.266667 | 71 | 0.604222 |
490a7e4e927bf1f9002b7ce41d2b092342ed19da | 3,107 | py | Python | bot/models/__init__.py | masterbpro/radio-archive | c612cd845d969a6577a3facbdd8183048f8db2de | [
"MIT"
] | null | null | null | bot/models/__init__.py | masterbpro/radio-archive | c612cd845d969a6577a3facbdd8183048f8db2de | [
"MIT"
] | null | null | null | bot/models/__init__.py | masterbpro/radio-archive | c612cd845d969a6577a3facbdd8183048f8db2de | [
"MIT"
] | null | null | null | from datetime import datetime, timedelta
from peewee import SqliteDatabase, Model, PrimaryKeyField, IntegerField, CharField, BooleanField, DateTimeField
from bot.data.config import STATIC_DIR
from bot.utils.logging import logger
db = SqliteDatabase(f"{STATIC_DIR}/db.sqlite3")
User.create_table(safe=True)
Archive.create_table(safe=True)
user = User()
archive = Archive()
| 30.762376 | 111 | 0.620856 |
490c3a09e90ac7741bc5df730d26dac2764368fc | 40,373 | py | Python | TrainingExtensions/tensorflow/src/python/aimet_tensorflow/utils/op/fusedbatchnorm.py | quic-ykota/aimet | c897bd4c360e3a0fb7a329c6bb98b569f66bace1 | [
"BSD-3-Clause"
] | 945 | 2020-04-30T02:23:55.000Z | 2022-03-31T08:44:32.000Z | TrainingExtensions/tensorflow/src/python/aimet_tensorflow/utils/op/fusedbatchnorm.py | seaun163/aimet | de94e5522e0c9250fb422d064b77ef9ecc70f239 | [
"BSD-3-Clause"
] | 563 | 2020-05-01T03:07:22.000Z | 2022-03-30T05:35:58.000Z | TrainingExtensions/tensorflow/src/python/aimet_tensorflow/utils/op/fusedbatchnorm.py | seaun163/aimet | de94e5522e0c9250fb422d064b77ef9ecc70f239 | [
"BSD-3-Clause"
] | 186 | 2020-04-30T00:55:26.000Z | 2022-03-30T09:54:51.000Z | # /usr/bin/env python3.5
# -*- mode: python -*-
# =============================================================================
# @@-COPYRIGHT-START-@@
#
# Copyright (c) 2019-2020, Qualcomm Innovation Center, Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its contributors
# may be used to endorse or promote products derived from this software
# without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
#
# SPDX-License-Identifier: BSD-3-Clause
#
# @@-COPYRIGHT-END-@@
# =============================================================================
""" utilities for fused batchnorm op """
from typing import Union
import numpy as np
import tensorflow as tf
from tensorflow.contrib import graph_editor as ge
from aimet_common.utils import AimetLogger
from aimet_tensorflow.utils import constants
logger = AimetLogger.get_area_logger(AimetLogger.LogAreas.Utils)
_BN_STRUCTURE_ERROR_MSG = "BN op doesn't have the expected structure"
| 43.552319 | 118 | 0.626112 |
490d54319f77117f33898d0f301f950c860478c3 | 4,878 | py | Python | flaskex.py | DesuDeluxe/simple_rest_api | c9bed666269882adae97db974c29f9f8e406ce80 | [
"MIT"
] | null | null | null | flaskex.py | DesuDeluxe/simple_rest_api | c9bed666269882adae97db974c29f9f8e406ce80 | [
"MIT"
] | null | null | null | flaskex.py | DesuDeluxe/simple_rest_api | c9bed666269882adae97db974c29f9f8e406ce80 | [
"MIT"
] | null | null | null | import os
from flask import Flask, Response, render_template, redirect
from flask_restful import reqparse,request, abort, Api, Resource, fields, marshal_with
from flask_sqlalchemy import SQLAlchemy
import sqlite3
app = Flask(__name__)
p_dir = os.path.dirname(os.path.abspath(__file__))
db_file = "sqlite:///{}".format(os.path.join(p_dir, "notes.db"))
app.config['SQLALCHEMY_DATABASE_URI'] = db_file
api = Api(app)
db = SQLAlchemy(app)
parser = reqparse.RequestParser(bundle_errors=True)
#parser.add_argument('id', required=False,help='No id provided')
parser.add_argument('title', required=True, help='No title provided')
parser.add_argument('content', required=True, help='No content provided')
parserPut = reqparse.RequestParser(bundle_errors=True)
parserPut.add_argument('content', required=True, help='No content provided')
## sqlalchemy classes to be mapped to db
## fields needed for json output
note_fields = {
'id': fields.Integer,
'title': fields.String,
'content': fields.String,
'created_date': fields.DateTime,
'modified_date': fields.DateTime
}
noteH_fields = dict(note_fields)
noteH_fields.update({
'note_id': fields.Integer,
} )
noteD_fields = dict(noteH_fields)
noteD_fields.update({
'deletion_date': fields.DateTime,
} )
##flask classes for routing
##setup the Api resource routing
api.add_resource(Home, '/')
api.add_resource(Note, '/note/<int:number>')
api.add_resource(NotesHistory, '/note/<int:number>/history')
api.add_resource(NotesList, '/notes')
api.add_resource(NotesDeleted, '/deleted')
if __name__ == '__main__':
app.run(debug=False)
| 32.304636 | 193 | 0.693727 |
491193c73d24c3c74876c7aa66287f19f2f09a60 | 6,203 | py | Python | backend/server/device_legacy/routes.py | kristof-g/TempHum-Supervisor-Sys | aa7343c5dab5941b905333fd0172b688f8b4896f | [
"MIT"
] | null | null | null | backend/server/device_legacy/routes.py | kristof-g/TempHum-Supervisor-Sys | aa7343c5dab5941b905333fd0172b688f8b4896f | [
"MIT"
] | null | null | null | backend/server/device_legacy/routes.py | kristof-g/TempHum-Supervisor-Sys | aa7343c5dab5941b905333fd0172b688f8b4896f | [
"MIT"
] | null | null | null | import sys
import os
import json
import csv
from time import strftime
from datetime import timedelta, date, datetime
from flask import Blueprint, render_template, redirect, request, url_for, flash
import server.configuration as cfg
from server.postalservice import checkTemp
from server.helpers import LoginRequired, pwIsValid, resource_path
from server.models import SzenzorAdatok
app = sys.modules['__main__']
device_bp = Blueprint('device_bp', __name__, template_folder='templates')
| 44.307143 | 120 | 0.59036 |
4912467ee29fbe811c78fea1ef046cb9707fcd7e | 2,507 | py | Python | gdsfactory/components/resistance_sheet.py | simbilod/gdsfactory | 4d76db32674c3edb4d16260e3177ee29ef9ce11d | [
"MIT"
] | null | null | null | gdsfactory/components/resistance_sheet.py | simbilod/gdsfactory | 4d76db32674c3edb4d16260e3177ee29ef9ce11d | [
"MIT"
] | null | null | null | gdsfactory/components/resistance_sheet.py | simbilod/gdsfactory | 4d76db32674c3edb4d16260e3177ee29ef9ce11d | [
"MIT"
] | null | null | null | from functools import partial
from gdsfactory.cell import cell
from gdsfactory.component import Component
from gdsfactory.components.compass import compass
from gdsfactory.components.via_stack import via_stack_slab_npp_m3
from gdsfactory.types import ComponentSpec, Floats, LayerSpecs, Optional
pad_via_stack_slab_npp = partial(via_stack_slab_npp_m3, size=(80, 80))
if __name__ == "__main__":
# import gdsfactory as gf
# sweep = [resistance_sheet(width=width, layers=((1,0), (1,1))) for width in [1, 10, 100]]
# c = gf.pack(sweep)[0]
c = resistance_sheet(width=40)
c.show()
# import gdsfactory as gf
# sweep_resistance = list(map(resistance_sheet, (5, 10, 80)))
# c = gf.grid(sweep_resistance)
# c.show()
| 28.816092 | 94 | 0.643797 |
4912d26a22acac060d471e8872438c7e944e8077 | 17,008 | py | Python | cogdl/trainers/sampled_trainer.py | zhangdan0602/cogdl | 35a338f29066e4b1a5d7f46217f09ebceaf13106 | [
"MIT"
] | null | null | null | cogdl/trainers/sampled_trainer.py | zhangdan0602/cogdl | 35a338f29066e4b1a5d7f46217f09ebceaf13106 | [
"MIT"
] | null | null | null | cogdl/trainers/sampled_trainer.py | zhangdan0602/cogdl | 35a338f29066e4b1a5d7f46217f09ebceaf13106 | [
"MIT"
] | null | null | null | from abc import abstractmethod
import argparse
import copy
import numpy as np
import torch
from tqdm import tqdm
from cogdl.data import Dataset
from cogdl.data.sampler import (
SAINTSampler,
NeighborSampler,
ClusteredLoader,
)
from cogdl.models.supervised_model import SupervisedModel
from cogdl.trainers.base_trainer import BaseTrainer
from . import register_trainer
def train(self):
epoch_iter = tqdm(range(self.max_epoch))
patience = 0
max_score = 0
min_loss = np.inf
best_model = copy.deepcopy(self.model)
for epoch in epoch_iter:
self._train_step()
if (epoch + 1) % self.eval_step == 0:
acc, loss = self._test_step()
train_acc = acc["train"]
val_acc = acc["val"]
val_loss = loss["val"]
epoch_iter.set_description(
f"Epoch: {epoch:03d}, Train Acc/F1: {train_acc:.4f}, Val Acc/F1: {val_acc:.4f}"
)
self.model = self.model.to(self.device)
if val_loss <= min_loss or val_acc >= max_score:
if val_loss <= min_loss:
best_model = copy.deepcopy(self.model)
min_loss = np.min((min_loss, val_loss.cpu()))
max_score = np.max((max_score, val_acc))
patience = 0
else:
patience += 1
if patience == self.patience:
epoch_iter.close()
break
return best_model
| 36.893709 | 127 | 0.602305 |
491377c3b97184cf6e4325a1301a6746ac433ea2 | 7,448 | py | Python | sample-input/sph-factors/pin-cell/sph-factors.py | AI-Pranto/OpenMOC | 7f6ce4797aec20ddd916981a56a4ba54ffda9a06 | [
"MIT"
] | 97 | 2015-01-02T02:13:45.000Z | 2022-03-09T14:12:45.000Z | sample-input/sph-factors/pin-cell/sph-factors.py | AI-Pranto/OpenMOC | 7f6ce4797aec20ddd916981a56a4ba54ffda9a06 | [
"MIT"
] | 325 | 2015-01-07T17:43:14.000Z | 2022-02-21T17:22:00.000Z | sample-input/sph-factors/pin-cell/sph-factors.py | AI-Pranto/OpenMOC | 7f6ce4797aec20ddd916981a56a4ba54ffda9a06 | [
"MIT"
] | 73 | 2015-01-17T19:11:58.000Z | 2022-03-24T16:31:37.000Z | import openmoc
import openmc.openmoc_compatible
import openmc.mgxs
import numpy as np
import matplotlib
# Enable Matplotib to work for headless nodes
matplotlib.use('Agg')
import matplotlib.pyplot as plt
plt.ioff()
opts = openmoc.options.Options()
openmoc.log.set_log_level('NORMAL')
###############################################################################
# Eigenvalue Calculation w/o SPH Factors
###############################################################################
# Initialize 2-group OpenMC multi-group cross section library for a pin cell
mgxs_lib = openmc.mgxs.Library.load_from_file(filename='mgxs', directory='.')
# Create an OpenMOC Geometry from the OpenMOC Geometry
openmoc_geometry = \
openmc.openmoc_compatible.get_openmoc_geometry(mgxs_lib.geometry)
# Load cross section data
openmoc_materials = \
openmoc.materialize.load_openmc_mgxs_lib(mgxs_lib, openmoc_geometry)
# Initialize FSRs
openmoc_geometry.initializeFlatSourceRegions()
# Initialize an OpenMOC TrackGenerator
track_generator = openmoc.TrackGenerator(
openmoc_geometry, opts.num_azim, opts.azim_spacing)
track_generator.generateTracks()
# Initialize an OpenMOC Solver
solver = openmoc.CPUSolver(track_generator)
solver.setConvergenceThreshold(opts.tolerance)
solver.setNumThreads(opts.num_omp_threads)
# Run an eigenvalue calulation with the MGXS from OpenMC
solver.computeEigenvalue(opts.max_iters)
solver.printTimerReport()
keff_no_sph = solver.getKeff()
# Extract the OpenMOC scalar fluxes
fluxes_no_sph = openmoc.process.get_scalar_fluxes(solver)
###############################################################################
# Eigenvalue Calculation with SPH Factors
###############################################################################
# Compute SPH factors
sph, sph_mgxs_lib, sph_indices = \
openmoc.materialize.compute_sph_factors(
mgxs_lib, azim_spacing=opts.azim_spacing,
num_azim=opts.num_azim, num_threads=opts.num_omp_threads)
# Load the SPH-corrected MGXS library data
materials = \
openmoc.materialize.load_openmc_mgxs_lib(sph_mgxs_lib, openmoc_geometry)
# Run an eigenvalue calculation with the SPH-corrected modified MGXS library
solver.computeEigenvalue(opts.max_iters)
solver.printTimerReport()
keff_with_sph = solver.getKeff()
# Report the OpenMC and OpenMOC eigenvalues
openmoc.log.py_printf('RESULT', 'OpenMOC keff w/o SPH: \t%1.5f', keff_no_sph)
openmoc.log.py_printf('RESULT', 'OpenMOC keff w/ SPH: \t%1.5f', keff_with_sph)
openmoc.log.py_printf('RESULT', 'OpenMC keff: \t\t1.17574 +/- 0.00086')
###############################################################################
# Extracting Scalar Fluxes
###############################################################################
openmoc.log.py_printf('NORMAL', 'Plotting data...')
# Plot the cells
openmoc.plotter.plot_cells(openmoc_geometry)
# Extract the OpenMOC scalar fluxes
fluxes_sph = openmoc.process.get_scalar_fluxes(solver)
fluxes_sph *= sph
# Extract the OpenMC scalar fluxes
num_fsrs = openmoc_geometry.getNumFSRs()
num_groups = openmoc_geometry.getNumEnergyGroups()
openmc_fluxes = np.zeros((num_fsrs, num_groups), dtype=np.float64)
nufission_xs = np.zeros((num_fsrs, num_groups), dtype=np.float64)
# Get the OpenMC flux in each FSR
for fsr in range(num_fsrs):
# Find the OpenMOC cell and volume for this FSR
openmoc_cell = openmoc_geometry.findCellContainingFSR(fsr)
cell_id = openmoc_cell.getId()
fsr_volume = track_generator.getFSRVolume(fsr)
# Store the volume-averaged flux
mgxs = mgxs_lib.get_mgxs(cell_id, 'nu-fission')
flux = mgxs.tallies['flux'].mean.flatten()
flux = np.flipud(flux) / fsr_volume
openmc_fluxes[fsr, :] = flux
nufission_xs[fsr, :] = mgxs.get_xs(nuclide='all')
# Extract energy group edges
group_edges = mgxs_lib.energy_groups.group_edges
group_edges += 1e-3 # Adjust lower bound to 1e-3 eV (for loglog scaling)
# Compute difference in energy bounds for each group
group_edges = np.flipud(group_edges)
# Normalize fluxes with the fission source
openmc_fluxes /= np.sum(openmc_fluxes * nufission_xs)
fluxes_sph /= np.sum(fluxes_sph * nufission_xs)
fluxes_no_sph /= np.sum(fluxes_no_sph * nufission_xs)
###############################################################################
# Plot the OpenMC, OpenMOC Scalar Fluxes
###############################################################################
# Extend the mgxs values array for matplotlib's step plot of fluxes
openmc_fluxes = np.insert(openmc_fluxes, 0, openmc_fluxes[:,0], axis=1)
fluxes_no_sph = np.insert(fluxes_no_sph, 0, fluxes_no_sph[:,0], axis=1)
fluxes_sph = np.insert(fluxes_sph, 0, fluxes_sph[:,0], axis=1)
# Plot OpenMOC and OpenMC fluxes in each FSR
for fsr in range(num_fsrs):
# Get the OpenMOC cell and material for this FSR
cell = openmoc_geometry.findCellContainingFSR(fsr)
material_name = cell.getFillMaterial().getName()
# Create a step plot for the MGXS
fig = plt.figure()
plt.plot(group_edges, openmc_fluxes[fsr,:],
drawstyle='steps', color='r', linewidth=2)
plt.plot(group_edges, fluxes_no_sph[fsr,:],
drawstyle='steps', color='b', linewidth=2)
plt.plot(group_edges, fluxes_sph[fsr,:],
drawstyle='steps', color='g', linewidth=2)
plt.yscale('log')
plt.xscale('log')
plt.xlabel('Energy [eV]')
plt.ylabel('Flux')
plt.title('Normalized Flux ({0})'.format(material_name))
plt.xlim((min(group_edges), max(group_edges)))
plt.legend(['openmc', 'openmoc w/o sph', 'openmoc w/ sph'], loc='best')
plt.grid()
filename = 'plots/flux-{0}.png'.format(material_name.replace(' ', '-'))
plt.savefig(filename, bbox_inches='tight')
plt.close()
###############################################################################
# Plot OpenMC-to-OpenMOC Scalar Flux Errors
###############################################################################
# Compute the percent relative error in the flux
rel_err_no_sph = np.zeros(openmc_fluxes.shape)
rel_err_sph = np.zeros(openmc_fluxes.shape)
for fsr in range(num_fsrs):
delta_flux_no_sph = fluxes_no_sph[fsr,:] - openmc_fluxes[fsr,:]
delta_flux_sph = fluxes_sph[fsr,:] - openmc_fluxes[fsr,:]
rel_err_no_sph[fsr,:] = delta_flux_no_sph / openmc_fluxes[fsr,:] * 100.
rel_err_sph[fsr,:] = delta_flux_sph / openmc_fluxes[fsr,:] * 100.
# Plot OpenMOC relative flux errors in each FSR
for fsr in range(num_fsrs):
# Get the OpenMOC cell and material for this FSR
cell = openmoc_geometry.findCellContainingFSR(fsr)
material_name = cell.getFillMaterial().getName()
# Create a step plot for the MGXS
fig = plt.figure()
plt.plot(group_edges, rel_err_no_sph[fsr,:],
drawstyle='steps', color='r', linewidth=2)
plt.plot(group_edges, rel_err_sph[fsr,:],
drawstyle='steps', color='b', linewidth=2)
plt.xscale('log')
plt.xlabel('Energy [eV]')
plt.ylabel('Relative Error [%]')
plt.title('OpenMOC-to-OpenMC Flux Rel. Err. ({0})'.format(material_name))
plt.xlim((min(group_edges), max(group_edges)))
plt.legend(['openmoc w/o sph', 'openmoc w/ sph'], loc='best')
plt.grid()
filename = 'plots/rel-err-{0}.png'.format(material_name.replace(' ', '-'))
plt.savefig(filename, bbox_inches='tight')
plt.close()
| 36.509804 | 79 | 0.649436 |
4913c3ea285b469820f3898e3feff4274634fe9e | 494 | py | Python | VerifyServer.py | ACueva/Avi-Playground | cb1768999630ed884cff5d40c0faa86d24802754 | [
"Apache-2.0"
] | null | null | null | VerifyServer.py | ACueva/Avi-Playground | cb1768999630ed884cff5d40c0faa86d24802754 | [
"Apache-2.0"
] | null | null | null | VerifyServer.py | ACueva/Avi-Playground | cb1768999630ed884cff5d40c0faa86d24802754 | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python
import os
import urllib2, json
from urlparse import urlparse | 21.478261 | 31 | 0.61336 |