blob_id stringlengths 40 40 | directory_id stringlengths 40 40 | path stringlengths 3 288 | content_id stringlengths 40 40 | detected_licenses listlengths 0 112 | license_type stringclasses 2 values | repo_name stringlengths 5 115 | snapshot_id stringlengths 40 40 | revision_id stringlengths 40 40 | branch_name stringclasses 684 values | visit_date timestamp[us]date 2015-08-06 10:31:46 2023-09-06 10:44:38 | revision_date timestamp[us]date 1970-01-01 02:38:32 2037-05-03 13:00:00 | committer_date timestamp[us]date 1970-01-01 02:38:32 2023-09-06 01:08:06 | github_id int64 4.92k 681M ⌀ | star_events_count int64 0 209k | fork_events_count int64 0 110k | gha_license_id stringclasses 22 values | gha_event_created_at timestamp[us]date 2012-06-04 01:52:49 2023-09-14 21:59:50 ⌀ | gha_created_at timestamp[us]date 2008-05-22 07:58:19 2023-08-21 12:35:19 ⌀ | gha_language stringclasses 147 values | src_encoding stringclasses 25 values | language stringclasses 1 value | is_vendor bool 2 classes | is_generated bool 2 classes | length_bytes int64 128 12.7k | extension stringclasses 142 values | content stringlengths 128 8.19k | authors listlengths 1 1 | author_id stringlengths 1 132 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
9b723091bf86c7c061fc6eceeef607fde9cef2a1 | b10e3b9e797af8fd100746a520541bf5bf11d707 | /SciGraph/wsgi.py | c844e5759fe084b73fbb677c879efe83af98d268 | [] | no_license | done520/SciGraph | 20adf85f2359e9e9b18b16ff3868388fdc4ba295 | e2bcff74bb46a00ce5b9b528857f5b6e8a4d491f | refs/heads/master | 2023-08-25T08:50:47.659532 | 2021-10-15T08:17:21 | 2021-10-15T08:17:21 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 393 | py | """
WSGI config for SciGraph project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/2.2/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'SciGraph.settings')
application = get_wsgi_application()
| [
"1240723224@qq.com"
] | 1240723224@qq.com |
ec4b251935be7880dbdd7fd3803ed95f553826b8 | f07c7e3966de00005230ebe31ab0579b92b66872 | /tests/test_conversions.py | fabd5966ca8f99127821f8602853abe727b673c4 | [
"Apache-2.0"
] | permissive | Algomorph/LevelSetFusion-Python | 30d990228e3d63a40668ade58e7879ae6e581719 | 46625cd185da4413f9afaf201096203ee72d3803 | refs/heads/master | 2021-06-25T11:30:44.672555 | 2020-11-11T14:47:33 | 2020-11-11T14:47:33 | 152,263,399 | 12 | 2 | Apache-2.0 | 2019-05-30T23:12:33 | 2018-10-09T14:15:03 | Python | UTF-8 | Python | false | false | 2,949 | py | # ================================================================
# Created by Gregory Kramida on 2/6/19.
# Copyright (c) 2019 Gregory Kramida
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ================================================================
# stdlib
from unittest import TestCase
# libraries
import numpy as np
# local
# C++ extension
import level_set_fusion_optimization as cpp_extension
class CoonversionTest(TestCase):
def test_tensor_f3_basic(self):
t = np.array([[[1, 2, 3, 4],
[5, 6, 7, 8]],
[[9, 10, 11, 12],
[13, 14, 15, 16]]], dtype=np.float32)
t2 = cpp_extension.return_input_f3(t)
self.assertTrue(np.allclose(t, t2))
t3 = np.array([[[1, 2, 3], [4, 5, 6], [7, 8, 9], [10, 11, 12]],
[[13, 14, 15], [16, 17, 18], [19, 20, 21], [22, 23, 24]],
[[25, 26, 27], [28, 29, 30], [31, 32, 33], [34, 35, 36]]]
, dtype=np.float32)
t4 = cpp_extension.return_input_f3(t3)
self.assertTrue(np.allclose(t3, t4))
def test_tensor_f4_basic(self):
t = np.arange(1, 49, dtype=np.float32).reshape((2, 4, 2, 3))
t2 = cpp_extension.return_input_f4(t)
self.assertTrue(np.allclose(t, t2))
def test_tensor_f3rm_basic(self):
t = np.arange(1, 25, dtype=np.float32).reshape((2, 4, 3))
t2 = cpp_extension.return_tensor_f3rm()
self.assertTrue(np.allclose(t, t2))
def test_tensor_f4rm_basic(self):
t = np.arange(1, 49, dtype=np.float32).reshape((2, 4, 2, 3))
t2 = cpp_extension.return_tensor_f4rm()
self.assertTrue(np.allclose(t, t2))
def test_tensor_f3_scale(self):
t = np.arange(1, 25, dtype=np.float32).reshape((2, 4, 3))
factor = 2.5
t2 = cpp_extension.scale(t, factor)
self.assertTrue(np.allclose(t * factor, t2))
def test_tensor_f3_add_constant(self):
t = np.arange(1, 25, dtype=np.float32).reshape((2, 4, 3))
constant = 95.2
t2 = cpp_extension.add_constant(t, constant)
self.assertTrue(np.allclose(t + constant, t2))
def test_tensor_f3_add_2_tensors(self):
t1 = np.arange(1, 25, dtype=np.float32).reshape((2, 4, 3))
t2 = np.random.rand(2, 4, 3).astype(np.float32) * 15.0
t3 = cpp_extension.add_tensors(t1, t2)
self.assertTrue(np.allclose(t1 + t2, t3))
| [
"algomorph@gmail.com"
] | algomorph@gmail.com |
0b18e9c90c6d35661c619353909e746c7833e730 | 8272944ef520d9f013e7e5083ac201a148f11728 | /src/examples/regression/sklearn_lasso_diabetes.py | 0097200b76aa2196441d3a285851bb2bb66637fd | [] | no_license | alecordev/data-science | 4709a46ee31e21286913548317bdbffba1b51fd3 | 9b152fa8c03cca33abcc65cc572d15815917bd05 | refs/heads/master | 2023-04-09T17:56:29.336037 | 2023-03-25T13:15:42 | 2023-03-25T13:15:42 | 157,546,453 | 1 | 0 | null | 2022-07-23T20:21:51 | 2018-11-14T12:35:55 | Jupyter Notebook | UTF-8 | Python | false | false | 1,807 | py | from sklearn.datasets import load_diabetes
from sklearn.model_selection import train_test_split, GridSearchCV, KFold
from sklearn.pipeline import Pipeline
from sklearn.impute import SimpleImputer
from sklearn.preprocessing import StandardScaler
from sklearn.linear_model import Lasso
from sklearn.metrics import make_scorer, r2_score, mean_squared_error
from joblib import dump, load
import numpy as np
# Load the diabetes dataset
diabetes = load_diabetes()
# Split the dataset into training and testing sets
X_train, X_test, y_train, y_test = train_test_split(
diabetes.data, diabetes.target, test_size=0.2, random_state=42
)
# Define the pipeline
pipeline = Pipeline(
[("imputer", SimpleImputer()), ("scaler", StandardScaler()), ("lasso", Lasso())]
)
# Define the hyperparameter grid
param_grid = {"lasso__alpha": np.logspace(-4, 4, 20)}
# Define the evaluation metrics
scoring = {"r2": make_scorer(r2_score), "mse": make_scorer(mean_squared_error)}
# Define the cross-validation strategy
cv = KFold(n_splits=5, shuffle=True, random_state=42)
# Define the grid search
grid_search = GridSearchCV(
pipeline, param_grid=param_grid, cv=cv, scoring=scoring, refit="r2"
)
# Fit the grid search to the training data
grid_search.fit(X_train, y_train)
# Print the best hyperparameters and metrics
print("Best hyperparameters:", grid_search.best_params_)
print("Best r2 score:", grid_search.best_score_)
print("Best MSE:", grid_search.cv_results_["mean_test_mse"][grid_search.best_index_])
# Save the best model
dump(grid_search.best_estimator_, "diabetes_model.joblib")
# Load the best model and use it to predict a new observation
loaded_model = load("diabetes_model.joblib")
new_observation = X_test[0, :].reshape(1, -1)
print("Predicted value:", loaded_model.predict(new_observation)[0])
| [
"alecor.dev@gmail.com"
] | alecor.dev@gmail.com |
eab369dbdac56b2df51579a6ff167856be574579 | 97caa124ffa5da9819c39a16c734165176d90349 | /archive/2016/week6/tasks/figures.py | bdb3f1677fcede9381c778c3f8ccd4270e3351ce | [
"Apache-2.0"
] | permissive | YAtOff/python0 | dd684731065321fd52d475fd2b2105db59f5c19c | b5af5004131d64dd52d42746eddb72b6c43a13c7 | refs/heads/master | 2021-01-18T21:19:11.990434 | 2019-05-29T20:14:23 | 2019-05-29T20:14:23 | 44,601,010 | 6 | 7 | Apache-2.0 | 2019-10-31T22:45:21 | 2015-10-20T11:13:11 | Jupyter Notebook | UTF-8 | Python | false | false | 420 | py | """
Напишета програма, която с помощта на Turtle рисува следните фитури:
- триъгълник
- квадрат
- петоъгълник
- шестоъгълник
Използвайте цикли.
Може ли да напишета една функция, която да може да рисува всяка едно от фигурите.
"""
| [
"yavor.atov@gmail.com"
] | yavor.atov@gmail.com |
a67ec9e602d48416932753f037925e48bd6d91cb | 2916dd05f6c67958d4ad71392f8c093ed6710016 | /app1/migrations/0015_auto_20180326_1039.py | 0f52518c963545d78a706a8d0fa95fc72539fa1b | [] | no_license | firchatn/WorkOrderMangerDjango | a05d6bbfdcc6111da813aca56676ea12b8a4c1d0 | f546e8db24e8ab9a0465e09bb17bd9190570a018 | refs/heads/master | 2020-04-04T10:16:52.840589 | 2018-11-02T10:12:17 | 2018-11-02T10:12:17 | 155,849,103 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,068 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.11.6 on 2018-03-26 08:39
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('app1', '0014_auto_20180326_0008'),
]
operations = [
migrations.AlterField(
model_name='woconsommable',
name='codWo',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='app1.WorkOrdre'),
),
migrations.AlterField(
model_name='woeqpinspecter',
name='codWo',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='app1.WorkOrdre'),
),
migrations.AlterField(
model_name='woequipement',
name='codWo',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='app1.WorkOrdre'),
),
migrations.AlterField(
model_name='wologistique',
name='codWo',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='app1.WorkOrdre'),
),
migrations.AlterField(
model_name='womethodeinspection',
name='codWo',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='app1.WorkOrdre'),
),
migrations.AlterField(
model_name='wopersonnel',
name='codWo',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='app1.WorkOrdre'),
),
migrations.AlterField(
model_name='woservice',
name='codWo',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='app1.WorkOrdre'),
),
migrations.AlterField(
model_name='wovehicule',
name='codWo',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='app1.WorkOrdre'),
),
]
| [
"firaschaabencss@gmail.com"
] | firaschaabencss@gmail.com |
01f0fbe0765bca56a059bfe8dcb23fba59cfa74f | b739654b057190041d3f82d035874fe10e4825d4 | /qtpynodeeditor/base.py | ec2455a0de51d78cf186d5399a2e9c8f5dde762c | [
"BSD-3-Clause"
] | permissive | klauer/qtpynodeeditor | 708cec70ae51cdbf52262e1cdf0d0bd33bf5e137 | 523e76e15ef26edc73fdad6fdd65df9babbde73b | refs/heads/master | 2023-08-16T06:35:08.051000 | 2023-08-04T16:50:21 | 2023-08-04T16:50:21 | 175,901,436 | 141 | 50 | NOASSERTION | 2023-08-15T06:23:03 | 2019-03-15T22:55:45 | Python | UTF-8 | Python | false | false | 342 | py | class Serializable:
'Interface for a serializable class'
def save(self) -> dict:
"""
Save
Returns
-------
value : dict
"""
...
def restore(self, state: dict):
"""
Restore
Parameters
----------
state : dict
"""
...
| [
"klauer@slac.stanford.edu"
] | klauer@slac.stanford.edu |
dc78542a96e486cffbe3bd259698a41a9b92db77 | f9acdde88dbb70a2844e058f6c53c016fc8407c1 | /lfc/utils/images.py | ea55297329c22e839417157a4753b8d6ebccdc1f | [] | no_license | yzl11/django-lfc | 536daccae82351af66f3894c38c8f2702691af75 | 75c900d672b4d36705fb8fa4833c446bbb78efea | refs/heads/master | 2021-01-15T13:14:37.192773 | 2015-05-03T15:03:12 | 2015-05-03T15:03:12 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,916 | py | # PIL imports
import PIL.ImageFile
import PIL
def scale_to_min_size(image, min_width, min_height):
"""Returns an image, that isn't smaller than min_width and min_height.
That means one side is exactly given value and the other is greater.
This may only makes sense if the image is cut after it is scaled.
"""
# resize proportinal
width, height = image.size
prop_x = float(min_width) / width
prop_y = float(min_height) / height
# TODO: Translate to english
# Die groessere Proportion (oder Faktor oder Quotient) zwischen Soll-Groesse
# und Ist-Groesse kommt fuer beide Kanten (da proportional) zur Anwendung.
# Das bedeutet die uebrige Kante ist auf jeden Fall groesser als gewuenscht
# (da Multiplikation mit Faktor).
if prop_x > prop_y:
height = int(prop_x * height)
image = image.resize((min_width, height), PIL.Image.ANTIALIAS)
else:
width = int(prop_y * width)
image = image.resize((width, min_height), PIL.Image.ANTIALIAS)
return image
def scale_to_max_size(image, max_width, max_height):
"""Returns an image, that isn't bigger than max_width and max_height.
That means one side is exactly given value and the other is smaller. In
other words the image fits at any rate in the given box max_width x
max_height.
"""
# resize proportinal
width, height = image.size
# TODO: Translate to english
# Erechne Proportionen zwischen Soll-Weite und Ist-Weite und zwischen
# Soll-Hoehe und Ist-Hoehe
prop_width = float(max_width) / width
prop_height = float(max_height) / height
# TODO: Translate to english
# Die kleinere Proportion (oder Faktor oder Quotient) der beiden kommt fuer
# beide Kanten (da Proportional) zur Anwendung. Das bedeutet die uebrige
# Kante ist auf jeden Fall kleiner als gewuenscht (da Multiplikation mit
# Faktor).
if prop_height < prop_width:
width = int(prop_height * width)
image = image.resize((width, max_height), PIL.Image.ANTIALIAS)
else:
height = int(prop_width * height)
image = image.resize((max_width, height), PIL.Image.ANTIALIAS)
return image
def scale_to_width(image, target_width):
"""Returns an image that has the exactly given width and scales height
proportional.
"""
width, height = image.size
prop_width = float(target_width) / width
new_height = int(prop_width * height)
image = image.resize((target_width, new_height), PIL.Image.ANTIALIAS)
return image
def scale_to_height(image, target_height):
"""Returns an image that has the exactly given height and scales width
proportional.
"""
width, height = image.size
prop_height = float(target_height) / height
new_height = int(prop_height * width)
image = image.resize((new_height, target_height), PIL.Image.ANTIALIAS)
return image
| [
"kai.diefenbach@iqpp.de"
] | kai.diefenbach@iqpp.de |
a5b4d3210895376b4fef4c8e422ea7effa1ebb54 | 255e19ddc1bcde0d3d4fe70e01cec9bb724979c9 | /all-gists/5224673/snippet.py | cea9424811bd53898a7138f8a2e829391080caa2 | [
"MIT"
] | permissive | gistable/gistable | 26c1e909928ec463026811f69b61619b62f14721 | 665d39a2bd82543d5196555f0801ef8fd4a3ee48 | refs/heads/master | 2023-02-17T21:33:55.558398 | 2023-02-11T18:20:10 | 2023-02-11T18:20:10 | 119,861,038 | 76 | 19 | null | 2020-07-26T03:14:55 | 2018-02-01T16:19:24 | Python | UTF-8 | Python | false | false | 153 | py | def java_string_hashcode(s):
h = 0
for c in s:
h = (31 * h + ord(c)) & 0xFFFFFFFF
return ((h + 0x80000000) & 0xFFFFFFFF) - 0x80000000 | [
"gistshub@gmail.com"
] | gistshub@gmail.com |
f8696bb50b6a66fd6c4d390c0935db0f6299ab42 | 914626bf92d528766bf4b9402f5f120caccbe5cf | /탐욕법_단속카메라.py | 7292bbc0d89ca0e7879785a8ec8bdc31684e24ab | [] | no_license | LINDBURG/Programmers | 64ee0a193f407c9802f7fbda64c27c6adb4a26d3 | adf94767ebe2d4d63aa17bf52ece0c74873aec5c | refs/heads/master | 2020-12-27T06:47:59.062955 | 2020-11-28T07:29:08 | 2020-11-28T07:29:08 | 237,799,965 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 440 | py | def solution(routes):
answer = 0
routes.sort()
while routes:
answer += 1
now = routes.pop(0)
end = now[1]
for i in range(len(routes)):
if routes[i][1] < end:
end = routes[i][1]
elif routes[i][0] > end:
break
for route in routes[:]:
if route[0] > end:
break
routes.pop(0)
return answer
| [
"monc2170@gmail.com"
] | monc2170@gmail.com |
7c318280fb4418094a2b04b53172893d5d9bba08 | 037d31bfad9c9da19f6fa8046fa575b59bdfd97b | /app/language/models.py | 88494ac6f043a96a8491fa81c4c46c5965f46765 | [] | no_license | luoyun/homepress | d8cfa58ea8de81bc559da9da3e2eb44d537e5df6 | 8c97579bd7b523dbbcc4a11f378bc4b56407de7c | refs/heads/master | 2020-12-30T10:23:09.672711 | 2013-08-05T10:24:02 | 2013-08-05T10:24:02 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 534 | py | import datetime
from yweb.orm import ORMBase
from sqlalchemy import Column, Integer, String, \
Sequence, DateTime, Table, ForeignKey, Boolean, Text
from sqlalchemy.orm import relationship, backref
class Language(ORMBase):
''' Language system '''
__tablename__ = 'language'
id = Column(Integer, Sequence('language_id_seq'), primary_key=True)
name = Column(String(32))
name_en = Column(String(32))
codename = Column(String(6))
def __str__(self):
return '<%s>' % self.codename
| [
"lijian.gnu@gmail.com"
] | lijian.gnu@gmail.com |
a485108da57b34bdbdf518806f23a799754dfbc5 | b6db9a5bdbe84ad9b53407635a8a054a6af8e779 | /dataportal/wsgi.py | c1bb5fc00883504bac7ff98b591a9e1dd404c49f | [] | no_license | gagon/dataportal_kpo_django | 6cca0b03d1d82e20fb5fa4db6430ae616ec91b96 | 6f862a026c0b9fa02d31452e29525021159de58d | refs/heads/master | 2021-01-10T11:12:58.426186 | 2018-04-04T12:41:17 | 2018-04-04T12:41:17 | 48,659,768 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 397 | py | """
WSGI config for dataportal project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/1.8/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "dataportal.settings")
application = get_wsgi_application()
| [
"you@example.com"
] | you@example.com |
32d6e24f2fe834017f93833a66735566ed3b82a1 | a46b480eefa54d0aa8a1df7158cdf61def351724 | /tests/bcm/cli/test_cli_util.py | 54e34f55fb487e301b00143be0be6e220865aa51 | [
"MIT"
] | permissive | achillesrasquinha/biomodels-cellcollective-migrator | 3ed3a575c32343f0b94115af7a67db40ea40953f | 7c9b50986a6fa8cdfc7d6ec2b434a7b2be999a5b | refs/heads/develop | 2021-06-14T16:34:52.468209 | 2019-06-28T20:59:06 | 2019-06-28T20:59:06 | 194,329,396 | 0 | 1 | MIT | 2021-06-02T00:05:12 | 2019-06-28T20:16:57 | Python | UTF-8 | Python | false | false | 1,914 | py | # imports - module imports
from bcm import cli
# imports - test imports
from testutils import assert_input, assert_stdout
def test_confirm(capfd):
query = "foobar"
stdout = "{} [Y/n/q]: ".format(query)
def _assert_confirm(stdout):
assert_input(capfd, query, "Y", expected = True, input_ = cli.confirm, stdout = stdout)
assert_input(capfd, query, "y", expected = True, input_ = cli.confirm, stdout = stdout)
assert_input(capfd, query,"\n", expected = True, input_ = cli.confirm, stdout = stdout)
assert_input(capfd, query, "n", expected = False, input_ = cli.confirm, stdout = stdout)
assert_input(capfd, query, "1", expected = False, input_ = cli.confirm, stdout = stdout)
_assert_confirm(stdout)
stdout = "{} [Y/n]: ".format(query)
# assert_input(capfd, query, "Y", expected = True, input_ = cli.confirm, stdout = stdout, input_args = { 'quit_': False })
# assert_input(capfd, query, "y", expected = True, input_ = cli.confirm, stdout = stdout, input_args = { 'quit_': False })
# assert_input(capfd, query,"\n", expected = True, input_ = cli.confirm, stdout = stdout, input_args = { 'quit_': False })
# assert_input(capfd, query, "n", expected = False, input_ = cli.confirm, stdout = stdout, input_args = { 'quit_': False })
# assert_input(capfd, query, "1", expected = False, input_ = cli.confirm, stdout = stdout, input_args = { 'quit_': False })
def test_format():
string = "foobar"
def _assert_format(string, type_):
assert cli.format(string, type_) == "{}{}{}".format(type_, string, cli.CLEAR)
_assert_format(string, cli.GREEN)
_assert_format(string, cli.RED)
_assert_format(string, cli.BOLD)
def test_echo(capfd):
query = "foobar"
cli.echo(query, nl = False)
assert_stdout(capfd, query)
cli.echo(query, nl = True)
assert_stdout(capfd, "{}\n".format(query)) | [
"achillesrasquinha@gmail.com"
] | achillesrasquinha@gmail.com |
c0798e517a6501161acbc0b853fb0d04fba4d25a | d3dcbda2c798f24b43bd35ecc8ea40b2e494e646 | /games/views.py | f0159630d481b4778ad81fd4acbf7828e1e22b19 | [] | no_license | milu-buet/milu_test3 | 48e560d778a61c44fadfacf2adcad3374797cdf6 | 09790d829fddd09cdf9fd9525c8b6829b58d67e5 | refs/heads/master | 2021-01-18T22:10:58.079326 | 2015-06-24T09:15:55 | 2015-06-24T09:15:55 | 19,772,796 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 323 | py | from django.shortcuts import render
from django.http import HttpResponse
import simplejson as json
# Create your views here.
def home(request):
return HttpResponse("game home works")
def game1(request):
return render(request,"games/game1.html")
def game2(request):
return render(request,"games/game2.html")
| [
"milu.buet@gmail.com"
] | milu.buet@gmail.com |
9e51ee20084954c66c9648d096b95fc2a2f2f697 | 0f2112a0e198cb0275c002826854c836bbfb5bdf | /utils/gui/benchmark/images_list_model.py | e63ce3f72c2efd4ef74cc8157899d49a2c31fe7a | [
"MIT"
] | permissive | jeremiedecock/pywi-cta | a7f98ae59beb1adecb25623153c13e5bc70e5560 | 1185f7dfa48d60116472c12ffc423be78a250fc9 | refs/heads/master | 2021-04-15T12:06:03.723786 | 2019-03-21T02:33:15 | 2019-03-21T02:33:15 | 126,397,380 | 0 | 1 | MIT | 2018-10-16T12:17:52 | 2018-03-22T21:31:45 | Python | UTF-8 | Python | false | false | 4,280 | py | # -*- coding: utf-8 -*-
# Copyright (c) 2015 Jérémie DECOCK (http://www.jdhp.org)
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
from gi.repository import Gtk as gtk
import json
import math
import os
from pywicta.io import images
class ImagesListModel(object):
def __init__(self, input_directory_path):
self.input_directory_path = input_directory_path
# Parse the input directory
self.fits_file_name_list = get_fits_files_list(input_directory_path)
# Parse FITS files
self.fits_metadata_list = parse_fits_files(self.input_directory_path, self.fits_file_name_list)
# Creating the gtk.ListStore model
self.liststore = gtk.ListStore(int, # Event ID
int, # Tel ID
float, # MC energy
float, # NPE
str) # File name
for image_metadata_dict in self.fits_metadata_list:
event_id = image_metadata_dict["event_id"]
tel_id = image_metadata_dict["tel_id"]
mc_energy = image_metadata_dict["mc_energy"]
npe = image_metadata_dict["npe"]
file_name = image_metadata_dict["file_name"]
self.liststore.append([event_id, tel_id, mc_energy, npe, file_name])
def get_fits_files_list(directory_path):
# Parse the input directory
print("Parsing", directory_path)
fits_file_name_list = [file_name
for file_name
in os.listdir(directory_path)
if os.path.isfile(os.path.join(directory_path, file_name))
and file_name.endswith((".fits", ".fit"))]
return fits_file_name_list
def parse_fits_files(dir_name, fits_file_name_list):
fits_metadata_list = []
# Parse the input files
mc_energy_unit = None
for file_index, file_name in enumerate(fits_file_name_list):
metadata_dict = {}
# Read the input file #########
fits_images_dict, fits_metadata_dict = images.load_benchmark_images(os.path.join(dir_name, file_name))
# Fill the dict ###############
if mc_energy_unit is None:
mc_energy_unit = fits_metadata_dict["mc_energy_unit"] # TODO
else:
if mc_energy_unit != fits_metadata_dict["mc_energy_unit"]:
raise Exception("Inconsistent data")
metadata_dict["event_id"] = fits_metadata_dict["event_id"]
metadata_dict["tel_id"] = fits_metadata_dict["tel_id"]
metadata_dict["mc_energy"] = fits_metadata_dict["mc_energy"]
metadata_dict["npe"] = fits_metadata_dict["npe"]
metadata_dict["file_name"] = file_name
fits_metadata_list.append(metadata_dict)
# Progress bar ################
num_files = len(fits_file_name_list)
relative_steps = math.ceil(num_files / 100.)
if (file_index % relative_steps) == 0:
progress_str = "{:.2f}% ({}/{})".format((file_index + 1)/num_files * 100,
file_index + 1,
num_files)
print(progress_str)
return fits_metadata_list
| [
"jd.jdhp@gmail.com"
] | jd.jdhp@gmail.com |
8e2b508ffb965952aba2ad74a9c3b8bcedda1017 | db7aac75e31d35c4a18c966170b46f269d015d0b | /webgl_canvas_gadget/apps/projects/migrations/0010_auto_20160621_0147.py | 16a5fabbca8c7c236c5a7224f993ed4aebefe01d | [] | no_license | jjpastprojects/Django | 12fbf3cf27a9230db98a21cc1013216aeadaae1e | c55562be7226f29b4ec213f8f018b6c2dd50c420 | refs/heads/master | 2022-12-12T22:15:49.493289 | 2017-09-05T12:51:20 | 2017-09-05T12:51:20 | 101,995,798 | 0 | 0 | null | 2022-12-07T23:21:46 | 2017-08-31T11:55:00 | JavaScript | UTF-8 | Python | false | false | 1,191 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.9 on 2016-06-20 19:47
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('projects', '0009_auto_20160619_2250'),
]
operations = [
migrations.AddField(
model_name='lensflare',
name='band_1',
field=models.ImageField(default='', upload_to='projects/lens_flare'),
preserve_default=False,
),
migrations.AddField(
model_name='lensflare',
name='band_2',
field=models.ImageField(default='', upload_to='projects/lens_flare'),
preserve_default=False,
),
migrations.AddField(
model_name='lensflare',
name='hexigon_shape',
field=models.ImageField(default='', upload_to='projects/lens_flare'),
preserve_default=False,
),
migrations.AddField(
model_name='lensflare',
name='main_flare',
field=models.ImageField(default='', upload_to='projects/lens_flare'),
preserve_default=False,
),
]
| [
"sam.noreaksey@outlook.com"
] | sam.noreaksey@outlook.com |
c7406959efccab9a16ed49ab84285896a04da6ec | 159aed4755e47623d0aa7b652e178296be5c9604 | /data/scripts/templates/object/draft_schematic/clothing/shared_clothing_ith_pants_casual_16.py | 8d9d8eacf01fe496da90925628e0d43e97005813 | [
"MIT"
] | permissive | anhstudios/swganh | fb67d42776864b1371e95f769f6864d0784061a3 | 41c519f6cdef5a1c68b369e760781652ece7fec9 | refs/heads/develop | 2020-12-24T16:15:31.813207 | 2016-03-08T03:54:32 | 2016-03-08T03:54:32 | 1,380,891 | 33 | 44 | null | 2016-03-08T03:54:32 | 2011-02-18T02:32:45 | Python | UTF-8 | Python | false | false | 466 | py | #### NOTICE: THIS FILE IS AUTOGENERATED
#### MODIFICATIONS MAY BE LOST IF DONE IMPROPERLY
#### PLEASE SEE THE ONLINE DOCUMENTATION FOR EXAMPLES
from swgpy.object import *
def create(kernel):
result = Intangible()
result.template = "object/draft_schematic/clothing/shared_clothing_ith_pants_casual_16.iff"
result.attribute_template_id = -1
result.stfName("string_id_table","")
#### BEGIN MODIFICATIONS ####
#### END MODIFICATIONS ####
return result | [
"rwl3564@rit.edu"
] | rwl3564@rit.edu |
fd7e4b0e092aae7c5122f922cc9f62076b885df2 | 1e43fd5e134157e6f034327ffbf3e6501c67275d | /mlps/core/apeflow/interface/model/SKLModel.py | 2e656276a0b2fe1d5080e2af279877a487b2445e | [
"Apache-2.0"
] | permissive | sone777/automl-mlps | f15780e23142e0f3f368815678959c7954966e71 | a568b272333bc22dc979ac3affc9762ac324efd8 | refs/heads/main | 2023-08-24T10:07:30.834883 | 2021-11-03T07:41:15 | 2021-11-03T07:41:15 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 402 | py | # -*- coding: utf-8 -*-
# Author : Manki Baek
# e-mail : Manki.Baek@seculayer.co.kr
# Powered by Seculayer © 2021 Service Model Team, R&D Center.
from mlps.core.apeflow.interface.model.ModelAbstract import ModelAbstract
class SKLModel(ModelAbstract):
def __init__(self, param_dict, ext_data=None):
super(SKLModel, self).__init__(param_dict, ext_data)
self.model = self._build()
| [
"bmg8551@naver.com"
] | bmg8551@naver.com |
5b4f27a3f438a63076f482c06b59452a9bcf8501 | 7f698acfc0655fb0978c46f7c79a1f66fd0f4af0 | /users/migrations/0003_auto_20210716_1850.py | a3614b7bd00bd907051b24bf0629f48b409918d7 | [] | no_license | bunnycast/bird_bnb | a53c67c1fd117bf459dd36062a63d0b10aceda7b | 2bd43f0be4f9873028c278c735633cee990ca372 | refs/heads/master | 2023-06-19T18:27:49.151543 | 2021-07-16T10:05:50 | 2021-07-16T10:05:50 | 386,560,933 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 541 | py | # Generated by Django 3.1.6 on 2021-07-16 09:50
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('users', '0002_user_bio'),
]
operations = [
migrations.AddField(
model_name='user',
name='avatar',
field=models.ImageField(null=True, upload_to=''),
),
migrations.AddField(
model_name='user',
name='gender',
field=models.CharField(max_length=10, null=True),
),
]
| [
"berzzubunny@gmail.com"
] | berzzubunny@gmail.com |
2e85756c40f453e7ad77be898d16d5598a94feab | ed915e9ac23875688b35734b3ffd42b23f00f626 | /tools/make_response.py | 5f3e89567324172be6ac15a900205f3180e28b8a | [
"MIT"
] | permissive | rcbyron/hey-athena-client | 4df2b25cf4aa05b65fa359836609afa2f5c50224 | 703e2184610a1718923bf60bc2ef6ec18e126148 | refs/heads/demo-branch | 2023-01-02T00:30:03.948348 | 2018-01-15T03:51:10 | 2018-01-15T03:51:10 | 40,776,319 | 391 | 136 | MIT | 2020-10-15T05:32:33 | 2015-08-15T18:08:20 | Python | UTF-8 | Python | false | false | 423 | py | '''
Created on Feb 11, 2016
@author: Connor
'''
from athena import tts
print('~ Enter \'q\' at any time to quit')
while True:
fname = input('\n~ Unique Filename: ')
if len(fname) is 0 or 'q' in fname[0].lower():
break
phrase = input('~ Phrase: ')
if len(phrase) is 0 or 'q' in phrase[0].lower():
break
tts.speak(phrase, cache=True, filename=fname) | [
"rcbyron@utexas.edu"
] | rcbyron@utexas.edu |
92d0ca24d92136a27f7cb54a84c65c5b885630e9 | 6b77241ff82ca0ac1293e971276c87a1294dd878 | /tea/msg/__init__.py | 891e39ed5fcb2c379bf2986daa07cc97f45af264 | [] | no_license | SenadI/tea | 356639d2d24b6f56fad69adeba90bbadacacd10b | abed19ecd5274ac05b825d8b83c3f448db197e9a | refs/heads/master | 2020-12-30T10:23:51.880549 | 2017-06-12T16:40:18 | 2017-06-12T16:40:18 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 233 | py | __author__ = 'Viktor Kerkez <alefnula@gmail.com>'
__date__ = '27 November 2009'
__copyright__ = 'Copyright (c) 2009 Viktor Kerkez'
__all__ = ['send_mail', 'send_mass_mail']
from tea.msg.mail import send_mail, send_mass_mail
| [
"alefnula@gmail.com"
] | alefnula@gmail.com |
27eb01bd63140d723b41b4dd57ec4807f308f050 | ee8c4c954b7c1711899b6d2527bdb12b5c79c9be | /assessment2/amazon/run/core/controllers/fragile.py | 3dde43aea23363a04aaa23d1ebff9541e7fa44da | [] | no_license | sqlconsult/byte | 02ac9899aebea4475614969b594bfe2992ffe29a | 548f6cb5038e927b54adca29caf02c981fdcecfc | refs/heads/master | 2021-01-25T14:45:42.120220 | 2018-08-11T23:45:31 | 2018-08-11T23:45:31 | 117,135,069 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 368 | py | #!/usr/bin/env python3
from flask import Blueprint, Flask, render_template, request, url_for
controller = Blueprint('fragile', __name__, url_prefix='/fragile')
# @controller.route('/<string:title>', methods=['GET'])
# def lookup(title):
# if title == 'Republic': # TODO 2
# return render_template('republic.html') # TODO 2
# else:
# pass
| [
"sqlconsult@hotmail.com"
] | sqlconsult@hotmail.com |
b3c7e233bc4c37c1c0d23b146680f7bea823069f | cdf8b0df0b22f18e3e31e59946dadfbf50074e67 | /dockless/qr/views.py | 5f2a4a7c0ca835ed8548a38fd9719e30d65b3689 | [] | no_license | deshiyan1010/Dockless-Bike-Service | a5bd3bf92c837053f05835f9ebdfe291389a9e30 | a7df45ed19a2f93c02abc101b19f5aca5d42337d | refs/heads/main | 2023-07-01T05:00:20.135427 | 2021-08-10T11:12:54 | 2021-08-10T11:12:54 | 382,597,432 | 0 | 0 | null | 2021-07-04T18:34:05 | 2021-07-03T11:23:12 | CSS | UTF-8 | Python | false | false | 1,568 | py | from django.shortcuts import render
import geocoder
from maps import staticdb
import math
from django.http import HttpResponseRedirect, HttpResponse
from django.urls import reverse
from django.core.files.storage import FileSystemStorage
from PIL import Image
from pyzbar.pyzbar import decode
import cv2
import pyzbar
def euc(c1,c2):
return math.sqrt((c1[0]-c2[0])**2+(c1[1]-c2[1])**2)
def read_barcodes(frame):
barcodes = decode(frame)
print(frame.shape)
for barcode in barcodes:
x, y , w, h = barcode.rect
#1
barcode_info = barcode.data
# cv2.rectangle(frame, (x, y),(x+w, y+h), (0, 255, 0), 2)
# #2
# font = cv2.FONT_HERSHEY_DUPLEX
# cv2.putText(frame, barcode_info, (x + 6, y - 6), font, 2.0, (255, 255, 255), 1)
# #3
# with open("barcode_result.txt", mode ='w') as file:
# file.write("Recognized Barcode:" + barcode_info)
barcode_info = barcode_info.decode('utf-8')
print("Barcode:", barcode_info)
return barcode_info
def valQR(path):
# data = decode(Image.open(path))
data = read_barcodes(cv2.imread(path,1))
print(type(data),type(staticdb.QR_DATA))
if data == staticdb.QR_DATA:
g = geocoder.ip('me').latlng
min = euc(staticdb.QR_LOCATIONS[0],g)
for coords in staticdb.QR_LOCATIONS[1:]:
dist = euc(coords,g)
if dist<min:
min = dist
if min<0.4:
return 1
else:
return 0
return 0 | [
"vinayakamikkal@gmail.com"
] | vinayakamikkal@gmail.com |
f6195f60d2d18a5fec319ddf775067291fe7e7e8 | 32b076374481b10e9ba67209d677eb6be0b3d440 | /bin/wp-get-access-logs | a746b956a88fffacfa1bef2b613a75ce2587e88b | [
"Apache-2.0",
"LicenseRef-scancode-warranty-disclaimer"
] | permissive | defconcepts/quac | 7f8cca7aebf2fcdc1382fbbf329d03ad3dd86f59 | 4c279ef6ff0fcd51f389ae1817d3104d38740276 | refs/heads/master | 2021-01-18T18:53:31.227441 | 2015-11-03T21:23:49 | 2015-11-03T21:23:49 | 46,738,274 | 1 | 0 | null | 2015-11-23T17:58:21 | 2015-11-23T17:58:20 | null | UTF-8 | Python | false | false | 2,466 | #!/usr/bin/env python3
'Update local copy of Wikimedia access logs.'
# Copyright (c) Los Alamos National Security, LLC, and others.
help_epilogue = '''
Logs only go to stdout, so as to capture the rsync chatter as well.
Note that we use rsync to download Wikipedia stuff, which isn't supported by
the Wikimedia site, so you have to use a mirror. See the list of mirrors:
<http://meta.wikimedia.org/wiki/Mirroring_Wikimedia_project_XML_dumps>
Notes:
* If a log file is changed, your local copy will be updated to match. This
probably shouldn't happen, so investigate the situation if it does.
(However, deleted log files will not also be deleted locally; e.g., you are
OK if the mirror you are using goes into a zombie state.)
* This script will download several terabytes of data. Be patient and be
courteous.
* --verify also takes a long time and gives no output if all's well. If you're
worried about progress, you can use lsof to see what file is currently
being checked.
'''
import os.path
import subprocess
import quacpath
import rsync
import testable
import u
l = u.l
c = u.c
### Setup ###
ap = u.ArgumentParser(description=__doc__, epilog=help_epilogue)
gr = ap.add_argument_group('arguments')
gr.add_argument('--verify',
action='store_true',
help='verify MD5sums of existing files instead of downloading')
### Main ###
def main():
l.info('Wikimedia access logs in %s' % (log_dir))
if (args.verify):
l.info('mode: verify')
subprocess.call('find %s -name md5sums.txt -exec sh -c "cd \$(dirname {}) && md5sum --check --quiet md5sums.txt || echo MD5 error in {}" \;' % (log_dir), shell=True)
else:
l.info('mode: update')
l.info('bandwidth limit is %d KB/s' % (bwlimit))
# FIXME: awkward to specify --include * simply to override --exclude *.
rsync.fetch(mirror_url, log_dir + '/raw', bwlimit,
['--exclude', 'projectcounts*'], args.verbose)
l.info('done')
### Bootstrap ###
try:
args = u.parse_args(ap)
u.configure(args.config)
u.logging_init('wpacc')
bwlimit = c.getint('wkpd', 'bandwidth_limit')
mirror_url = c.get('wkpd', 'access_log_url')
log_dir = c.getpath('wkpd', 'access_log_dir')
if (not os.path.isdir(log_dir)):
u.abort('%s is not a directory or does not exist' % (log_dir))
if (__name__ == '__main__'):
main()
except testable.Unittests_Only_Exception:
testable.register('')
| [
"reidpr@lanl.gov"
] | reidpr@lanl.gov | |
b01d86596c1d50ddad7135ffa21518b523697c01 | e99985945b956698dbe430b61250607efeae0fe7 | /blog/migrations/0001_initial.py | 152678f4693b11cfadff686c1658bf8d693cfb70 | [] | no_license | anthonylauly/Project-Blog----Django-2-By-Example | 5a01b2083122d3f26eb72b4b16b9177d9e5f8715 | b7bb60bb80de11dceedeb1edd100e2d78ba83cee | refs/heads/main | 2023-01-21T17:55:49.736625 | 2020-12-08T05:52:29 | 2020-12-08T05:52:29 | 319,536,171 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,369 | py | # Generated by Django 2.2.2 on 2019-08-22 15:23
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
import django.utils.timezone
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='Post',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(max_length=250)),
('slug', models.SlugField(max_length=250, unique_for_date='publish')),
('body', models.TextField()),
('publish', models.DateTimeField(default=django.utils.timezone.now)),
('created_at', models.DateTimeField(auto_now_add=True)),
('updated', models.DateTimeField(auto_now=True)),
('status', models.CharField(choices=[('draft', 'Draft'), ('published', 'Published')], default='draft', max_length=10)),
('author', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='blog_posts', to=settings.AUTH_USER_MODEL)),
],
options={
'ordering': ('-publish',),
},
),
]
| [
"anthony.lauly1833@gmail.com"
] | anthony.lauly1833@gmail.com |
5c619b914e03131689488198a31009c0eb874c03 | 2aba62d66c2c622bdc148cef451da76cae5fd76c | /exercise/learn_python_dm2039/ch16/ch16_13.py | effa4076052f01a7b1da11b467c3cc048caa2602 | [] | no_license | NTUT-109AB8011/crawler | 6a76de2ab1848ebc8365e071e76c08ca7348be62 | a703ec741b48d3af615a757fed7607b1f8eb66a6 | refs/heads/master | 2023-03-26T22:39:59.527175 | 2021-03-30T03:29:22 | 2021-03-30T03:29:22 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 312 | py | # ch16_13.py
import re
msg = 'Johnson, Johnnason and Johnnathan will attend my party tonight.'
pattern = 'John(son|nason|nathan)'
txt = re.search(pattern,msg) # 傳回搜尋結果
print(txt.group()) # 列印第一個搜尋結果
print(txt.group(1)) # 列印第一個分組
| [
"terranandes@gmail.com"
] | terranandes@gmail.com |
81c444ea32e12b01a3349ba80b9ada0162c9b2de | c646eda22844eb3aadc832a55dc8a7a8d8b28656 | /LintCode/Problems/Python3/1010. 维持城市轮廓的最大增量.py | 6f6e037a68900ddc9879fccf078174560d88550a | [] | no_license | daidai21/ExerciseProblem | 78f41f20f6d12cd71c510241d5fe829af676a764 | cdc526fdb4ee1ca8e0d6334fecc4932d55019cea | refs/heads/master | 2021-11-22T21:54:13.106707 | 2021-11-14T10:54:37 | 2021-11-14T10:54:37 | 213,108,530 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 876 | py | class Solution:
"""
@param grid: a 2D array
@return: the maximum total sum that the height of the buildings can be increased
"""
def maxIncreaseKeepingSkyline(self, grid):
# Write your code here
# row high list
row_high = []
for i in range(len(grid[0])):
high = 0
for j in range(len(grid)):
high = max(high, grid[j][i])
row_high.append(high)
# column high list
column_high = []
for i in range(len(grid)):
high = 0
for j in range(len(grid[0])):
high = max(high, grid[i][j])
column_high.append(high)
# ans
ans = 0
for i in range(len(grid)):
for j in range(len(grid[0])):
ans += min(row_high[i], column_high[j]) - grid[i][j]
return ans
| [
"daidai4269@aliyun.com"
] | daidai4269@aliyun.com |
661e388c326d7f7fcae917f8bbd046c6126f2242 | 7792b03540784a0d28073899dd4ad78689e9a9fb | /char_map/all_char.py | 9ff04bc9c121310a5b6bd6c7e57b7b628212d1ae | [] | no_license | ayiis/coding | 3b1362f813a22a7246af3725162cfb53dea2f175 | c73e4622e1811cc3fd8729a92df6537bd73dc802 | refs/heads/master | 2021-06-02T14:55:38.451288 | 2021-04-26T08:39:16 | 2021-04-26T08:39:16 | 134,660,001 | 0 | 0 | null | 2020-06-05T04:03:58 | 2018-05-24T04:14:14 | CSS | UTF-8 | Python | false | false | 439 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# __author__ = "ayiis"
# create on 2020/05/06
# char_set = set([chr(i) for i in range(32, 127)])
char_set = set([chr(i) for i in range(32, 127)])
# print("char_set:", sorted(list(char_set)))
r"""
! " # $ % & ' ( ) * + , - .
/ 0 1 2 3 4 5 6 7 8 9 : ; < =
> ? @ A B C D E F G H I J K L
M N O P Q R S T U V W X Y Z [
\ ] ^ _ ` a b c d e f g h i j
k l m n o p q r s t u v w x y
z { | } ~
"""
| [
"ayiis@126.com"
] | ayiis@126.com |
76570c11245c3e2210c7d78bd57b32ff5fe7b088 | e58aa9d46f06d091cc0be6259996efed238529cd | /tests/test_extension.py | 058c583c5f83ede5286c46d6c1f3d51d25c97d2e | [
"Apache-2.0"
] | permissive | kingosticks/mopidy-pidi | d446390c84de511c4f2f522d1f267b1f1f90e44a | 7a091ea4597b313b59082c8b7494395e8f9e49ea | refs/heads/master | 2020-09-16T13:35:06.825810 | 2019-11-15T13:53:05 | 2019-11-15T13:53:05 | 223,785,395 | 0 | 0 | Apache-2.0 | 2019-11-24T17:48:10 | 2019-11-24T17:48:09 | null | UTF-8 | Python | false | false | 499 | py | from __future__ import unicode_literals
from mopidy_pidi import Extension, frontend as frontend_lib
def test_get_default_config():
ext = Extension()
config = ext.get_default_config()
assert "[pidi]" in config
assert "enabled = true" in config
def test_get_config_schema():
ext = Extension()
schema = ext.get_config_schema()
# TODO Test the content of your config schema
#assert "username" in schema
#assert "password" in schema
# TODO Write more tests
| [
"phil@gadgetoid.com"
] | phil@gadgetoid.com |
04194d3259bddb1b85dd569f81d42adde8bd6519 | b64c45e75aa215ddcf7249fb92e047f3e7731187 | /mainapp/migrations/0002_auto_20200217_2307.py | 3ff8952ddbbcd3d0d5fc5ed54ba586369c9da0b1 | [] | no_license | johngaitho05/CohMat | 6731b4dfb94475c75f1cd1d2ec55cc810729f939 | ff5b8e5eb877f68a0477f4f19b78c6e7c407af2c | refs/heads/master | 2022-12-12T15:55:53.363782 | 2021-04-04T13:17:05 | 2021-04-04T13:17:05 | 239,868,710 | 1 | 0 | null | 2022-11-04T19:31:50 | 2020-02-11T21:31:47 | Python | UTF-8 | Python | false | false | 537 | py | # Generated by Django 2.2.6 on 2020-02-17 20:07
import django.contrib.postgres.fields
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('mainapp', '0001_initial'),
]
operations = [
migrations.AlterField(
model_name='cohort',
name='sub_groups',
field=django.contrib.postgres.fields.ArrayField(base_field=django.contrib.postgres.fields.ArrayField(base_field=models.IntegerField(), size=None), size=None),
),
]
| [
"johngaitho05@gmail.com"
] | johngaitho05@gmail.com |
9c3aba948575031c83d273d634d1d6a34c7d502a | c70ac0b6d3ec292ab95626cbd519dee56a70289a | /embedded-software/mcu-hal/STM32F4xx_HAL_Driver/wscript | 8daa2e183e3e623cacd3047f1f461cdf043ac7d9 | [
"CC-BY-4.0",
"BSD-3-Clause"
] | permissive | dv1990/foxbms | c4b28fea533f681c04fae5bc4f004fd2f6bcb498 | 9176f75e8ebf42da0581d82be3db9ebcdfea4f0e | refs/heads/master | 2020-04-17T08:27:07.168099 | 2018-12-14T12:43:17 | 2018-12-14T12:43:17 | 166,412,545 | 1 | 0 | NOASSERTION | 2019-01-18T13:52:42 | 2019-01-18T13:52:42 | null | UTF-8 | Python | false | false | 3,967 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# @copyright © 2010 - 2018, Fraunhofer-Gesellschaft zur Foerderung der
# angewandten Forschung e.V. All rights reserved.
#
# BSD 3-Clause License
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# 1. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
# 3. Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from this
# software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
#
# We kindly request you to use one or more of the following phrases to refer to
# foxBMS in your hardware, software, documentation or advertising materials:
#
# ″This product uses parts of foxBMS®″
#
# ″This product includes parts of foxBMS®″
#
# ″This product is derived from foxBMS®″
import os
def build(bld):
srcs = ' '.join([
os.path.join('Src', 'stm32f4xx_hal.c'),
os.path.join('Src', 'stm32f4xx_hal_adc.c'),
os.path.join('Src', 'stm32f4xx_hal_adc_ex.c'),
os.path.join('Src', 'stm32f4xx_hal_can.c'),
os.path.join('Src', 'stm32f4xx_hal_cortex.c'),
os.path.join('Src', 'stm32f4xx_hal_dma.c'),
os.path.join('Src', 'stm32f4xx_ll_fmc.c'),
os.path.join('Src', 'stm32f4xx_hal_flash.c'),
os.path.join('Src', 'stm32f4xx_hal_flash_ex.c'),
os.path.join('Src', 'stm32f4xx_hal_gpio.c'),
os.path.join('Src', 'stm32f4xx_hal_iwdg.c'),
os.path.join('Src', 'stm32f4xx_hal_pwr.c'),
os.path.join('Src', 'stm32f4xx_hal_pwr_ex.c'),
os.path.join('Src', 'stm32f4xx_hal_rcc.c'),
os.path.join('Src', 'stm32f4xx_hal_rcc_ex.c'),
os.path.join('Src', 'stm32f4xx_hal_rtc.c'),
os.path.join('Src', 'stm32f4xx_hal_rtc_ex.c'),
os.path.join('Src', 'stm32f4xx_hal_sdram.c'),
os.path.join('Src', 'stm32f4xx_hal_spi.c'),
os.path.join('Src', 'stm32f4xx_hal_tim.c'),
os.path.join('Src', 'stm32f4xx_hal_uart.c')])
includes = os.path.join(bld.bldnode.abspath()) + ' '
includes += ' '.join([
'.',
os.path.join('..', 'CMSIS', 'Device', 'ST', 'STM32F4xx', 'Include'),
os.path.join('..', 'CMSIS', 'Include'),
os.path.join('Inc'),
os.path.join('Inc', 'Legacy'),
os.path.join('Src'),
os.path.join(bld.top_dir, bld.env.__sw_dir, bld.env.__bld_project, 'src', 'general', 'config'),
os.path.join(bld.top_dir, bld.env.__sw_dir, bld.env.__bld_project, 'src', 'general', 'config', bld.env.CPU_MAJOR)])
bld.stlib(target='foxbms-stmhal',
source=srcs,
includes=includes)
| [
"info@foxbms.org"
] | info@foxbms.org | |
5a288ad3997042da67edb9b18baba20de636c05b | 8cb6cba90622021549b94e62a7fd5ae9ebc3f55f | /simplemooc/simplemooc/courses/views.py | 99291abeafc9581c1987db05ff5212db16ecc237 | [] | no_license | thiagorossener/course-udemy-django-simplemooc | 4c319e23fb2d4dae2c7b152179f4f700904d3271 | f23303f27712149722747dc3f6bcc8361acc1698 | refs/heads/master | 2021-06-16T03:11:10.527563 | 2017-05-11T14:35:16 | 2017-05-11T14:35:16 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,213 | py | from django.shortcuts import render, get_object_or_404, redirect
from django.contrib.auth.decorators import login_required
from django.contrib import messages
from simplemooc.courses.models import (Course, Enrollment, Announcement, Lesson,
Material)
from simplemooc.courses.forms import ContactCourse, CommentForm
from .decorators import enrollment_required
def index(request):
courses = Course.objects.all()
template_name = 'courses/index.html'
context = {
'courses': courses
}
return render(request, template_name, context)
def details(request, slug):
course = get_object_or_404(Course, slug=slug)
context = {}
if request.method == 'POST':
form = ContactCourse(request.POST)
if form.is_valid():
context['is_valid'] = True
form.send_mail(course)
form = ContactCourse()
else:
form = ContactCourse()
context['form'] = form
context['course'] = course
template_name = 'courses/details.html'
return render(request, template_name, context)
@login_required
def enrollment(request, slug):
course = get_object_or_404(Course, slug=slug)
enrollment, created = Enrollment.objects.get_or_create(
user=request.user, course=course
)
if created:
enrollment.active()
messages.success(request, 'Você foi inscrito no curso com sucesso')
else:
messages.info(request, 'Você já está inscrito no curso')
return redirect('accounts:dashboard')
@login_required
def undo_enrollment(request, slug):
course = get_object_or_404(Course, slug=slug)
enrollment = get_object_or_404(
Enrollment, user=request.user, course=course
)
if request.method == 'POST':
enrollment.delete()
messages.success(request, 'Sua inscrição foi cancelada com sucesso')
return redirect('accounts:dashboard')
template_name = 'courses/undo_enrollment.html'
context = {
'enrollment': enrollment,
'course': course,
}
return render(request, template_name, context)
@login_required
@enrollment_required
def announcements(request, slug):
course = request.course
template_name = 'courses/announcements.html'
context = {
'course': course,
'announcements': course.announcements.all()
}
return render(request, template_name, context)
@login_required
@enrollment_required
def show_announcement(request, slug, pk):
course = request.course
announcement = get_object_or_404(course.announcements.all(), pk=pk)
form = CommentForm(request.POST or None)
if form.is_valid():
comment = form.save(commit=False)
comment.user = request.user
comment.announcement = announcement
comment.save()
form = CommentForm()
messages.success(request, 'Seu comentário foi salvo com sucesso')
template_name = 'courses/show_announcement.html'
context = {
'course': course,
'announcement': announcement,
'form': form,
}
return render(request, template_name, context)
@login_required
@enrollment_required
def lessons(request, slug):
course = request.course
template_name = 'courses/lessons.html'
lessons = course.release_lessons()
if request.user.is_staff:
lessons = course.lessons.all()
context = {
'course': course,
'lessons': lessons,
}
return render(request, template_name, context)
@login_required
@enrollment_required
def lesson(request, slug, pk):
course = request.course
lesson = get_object_or_404(Lesson, pk=pk, course=course)
if not request.user.is_staff or not lesson.is_available():
message.error(request, 'Esta aula não está disponível')
return redirect('courses:lessons', slug=course.slug)
template_name = 'courses/lesson.html'
context = {
'course': course,
'lesson': lesson,
}
return render(request, template_name, context)
@login_required
@enrollment_required
def material(request, slug, pk):
course = request.course
material = get_object_or_404(Material, pk=pk, lesson__course=course)
lesson = material.lesson
if not request.user.is_staff or not lesson.is_available():
message.error(request, 'Este material não está disponível')
return redirect('courses:lesson', slug=course.slug, pk=lesson.pk)
if not material.is_embedded():
return redirect(material.file.url)
template_name = 'courses/material.html'
context = {
'course': course,
'lesson': lesson,
'material': material,
}
return render(request, template_name, context)
| [
"thiago.rossener@gmail.com"
] | thiago.rossener@gmail.com |
342f75521d3abaf866851722cdfd35ec72b29d01 | 2befb6f2a5f1fbbd5340093db43a198abdd5f53b | /pythonProject/modelviewset/modelviewsetApp/migrations/0001_initial.py | f0099f330374e6813c4bfd515b8bb93cdc7d1383 | [] | no_license | JanardanPandey/RestAPI | 1956d3529782d18ef2118961f6286e3213665aad | 654933a4d9687076a00c6f4c57fc3dfee1a2c567 | refs/heads/master | 2023-06-14T07:02:31.702000 | 2021-07-02T07:50:59 | 2021-07-02T07:50:59 | 382,357,537 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 604 | py | # Generated by Django 3.2.3 on 2021-06-06 08:35
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='StudentModel',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=100)),
('city', models.CharField(max_length=100)),
('roll', models.IntegerField()),
],
),
]
| [
"janardanpandey0510@gmail.com"
] | janardanpandey0510@gmail.com |
b035d4c55236efb3af1c591684b28900e1f9eca8 | 3267fb38696d7b114a22f476f2c60425d6ee349a | /src/api/exceptions.py | 6eab930e8268c1c57c0cb4c9d602481895dd8f7e | [] | no_license | marcinowski/github-adapter | c0092e3f817f9dc1d97691e81b1c247ae281b2c7 | 2d7c6b9601da082de246450cc840412f0c4331b5 | refs/heads/master | 2022-12-10T00:53:39.386198 | 2017-09-06T10:57:09 | 2017-09-06T10:57:09 | 100,716,960 | 0 | 0 | null | 2021-06-01T22:02:20 | 2017-08-18T13:55:02 | Python | UTF-8 | Python | false | false | 1,602 | py | """
:created on: 2017-08-21
:author: Marcin Muszynski
:contact: marcinowski007@gmail.com
"""
RESPONSE_EXCEPTION_NAME_FORMAT = 'GitHubAdapter{}Error'
class GitHubAdapterBaseError(Exception):
""" Base Exception for GitHub adapter"""
class GitHubAdapterHTTPError(GitHubAdapterBaseError):
""" Base HTTP Error Exception"""
status_code = 400
reason = ''
class GitHubAdapter400Error(GitHubAdapterHTTPError):
""" Exception to raise for Bad Request """
status_code = 400
reason = 'Bad Request'
class GitHubAdapter401Error(GitHubAdapterHTTPError):
""" Exception to raise when authentication error """
status_code = 401
reason = 'Authentication error'
class GitHubAdapter403Error(GitHubAdapterHTTPError):
""" Exception to raise when authentication error """
status_code = 403
reason = 'Access denied'
class GitHubAdapter404Error(GitHubAdapterHTTPError):
""" Exception to raise when resource is not found """
status_code = 404
reason = 'Page not found'
class GitHubAdapter405Error(GitHubAdapterHTTPError):
""" Exception to raise when method is not allowed """
status_code = 405
reason = 'Method not allowed'
class GitHubAdapter422Error(GitHubAdapterHTTPError):
""" Exception to raise when method is not allowed """
status_code = 422
reason = 'Unprocessable Entity - invalid fields received'
class GitHubAdapter500Error(GitHubAdapterHTTPError):
status_code = 500
reason = 'Server Error'
class GitHubAdapter501Error(GitHubAdapterHTTPError):
status_code = 501
reason = 'Unrecognized Error'
| [
"muszynskimarcin@wp.pl"
] | muszynskimarcin@wp.pl |
31a9d3dc24bee09b099c63b756f0a4157ae58716 | ba949e02c0f4a7ea0395a80bdc31ed3e5f5fcd54 | /problems/dp/Solution727.py | 398306eae193d167eb1b975cf6e85d29184f1f7b | [
"MIT"
] | permissive | akaliutau/cs-problems-python | 6bc0a74064f6e9687fe58b13763da1fdf2e1f626 | 9b1bd8e3932be62135a38a77f955ded9a766b654 | refs/heads/master | 2023-05-11T22:19:06.711001 | 2021-06-04T11:14:42 | 2021-06-04T11:14:42 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 627 | py | """ Given strings S and T, find the minimum (contiguous) substring W of S, so
that T is a subsequence of W.
If there is no such window in S that covers all characters in T, return the
empty string "". If there are multiple such minimum-length windows, return
the one with the left-most starting index.
Example 1:
Input: S = "abcdebdde", T = "bde" Output: "bcde" Explanation: "bcde" is the
answer because it occurs before "bdde" which has the same length. "deb" is
not a smaller window because the elements of T in the window must occur in
order
"""
class Solution727:
pass
| [
"aliaksei.kaliutau@gmail.com"
] | aliaksei.kaliutau@gmail.com |
dcd430ba643880ad3d9694ed4cc1931961efdb1c | b9eb496c4551fd091954675a61382636fc68e715 | /src/ABC1xx/ABC17x/ABC172/ABC172D.py | 22b78ec009546408cba2d57d41d58a2f5f8fe13c | [] | no_license | kttaroha/AtCoder | af4c5783d89a61bc6a40f59be5e0992980cc8467 | dc65ce640954da8c2ad0d1b97580da50fba98a55 | refs/heads/master | 2021-04-17T16:52:09.508706 | 2020-11-22T05:45:08 | 2020-11-22T05:45:08 | 249,460,649 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 290 | py | def main():
N = int(input())
nums = [1] * (N + 1)
s = 0
for i in range(2, N+1):
tmp = i
while tmp <= N:
nums[tmp] += 1
tmp += i
for i in range(1, N+1):
s += i * nums[i]
print(s)
if __name__ == '__main__':
main()
| [
"kthamano1994@gmail.com"
] | kthamano1994@gmail.com |
2e582b4c164d4d1f684a12aed00aa4268b122446 | e6d862a9df10dccfa88856cf16951de8e0eeff2b | /Core/worker/python/test/test_worker_performance_event_evaluation.py | b0bcb28658398e498344033f0e7a1a1d032bdb69 | [] | no_license | AllocateSoftware/API-Stubs | c3de123626f831b2bd37aba25050c01746f5e560 | f19d153f8e9a37c7fb1474a63c92f67fc6c8bdf0 | refs/heads/master | 2022-06-01T07:26:53.264948 | 2020-01-09T13:44:41 | 2020-01-09T13:44:41 | 232,816,845 | 0 | 0 | null | 2022-05-20T21:23:09 | 2020-01-09T13:34:35 | C# | UTF-8 | Python | false | false | 1,014 | py | # coding: utf-8
"""
Workers
## Workers and events # noqa: E501
The version of the OpenAPI document: 1.0.0
Contact: support@allocatesoftware.com
Generated by: https://openapi-generator.tech
"""
from __future__ import absolute_import
import unittest
import api_server
from api_server.models.worker_performance_event_evaluation import WorkerPerformanceEventEvaluation # noqa: E501
from api_server.rest import ApiException
class TestWorkerPerformanceEventEvaluation(unittest.TestCase):
"""WorkerPerformanceEventEvaluation unit test stubs"""
def setUp(self):
pass
def tearDown(self):
pass
def testWorkerPerformanceEventEvaluation(self):
"""Test WorkerPerformanceEventEvaluation"""
# FIXME: construct object with mandatory attributes with example values
# model = api_server.models.worker_performance_event_evaluation.WorkerPerformanceEventEvaluation() # noqa: E501
pass
if __name__ == '__main__':
unittest.main()
| [
"nigel.magnay@gmail.com"
] | nigel.magnay@gmail.com |
c0f1fb47b6faa9935dd1ef06dd48a6f9ebe66315 | 2fd087fbc5faf43940153693823969df6c8ec665 | /pyc_decrypted/latest/distutils/versionpredicate.py | 1ceb1d76a7d86286983c5fc23abde3ae1bbb4694 | [] | no_license | mickeystone/DropBoxLibrarySRC | ed132bbffda7f47df172056845e5f8f6c07fb5de | 2e4a151caa88b48653f31a22cb207fff851b75f8 | refs/heads/master | 2021-05-27T05:02:30.255399 | 2013-08-27T13:16:55 | 2013-08-27T13:16:55 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,425 | py | #Embedded file name: distutils/versionpredicate.py
import re
import distutils.version
import operator
re_validPackage = re.compile('(?i)^\\s*([a-z_]\\w*(?:\\.[a-z_]\\w*)*)(.*)')
re_paren = re.compile('^\\s*\\((.*)\\)\\s*$')
re_splitComparison = re.compile('^\\s*(<=|>=|<|>|!=|==)\\s*([^\\s,]+)\\s*$')
def splitUp(pred):
res = re_splitComparison.match(pred)
if not res:
raise ValueError('bad package restriction syntax: %r' % pred)
comp, verStr = res.groups()
return (comp, distutils.version.StrictVersion(verStr))
compmap = {'<': operator.lt,
'<=': operator.le,
'==': operator.eq,
'>': operator.gt,
'>=': operator.ge,
'!=': operator.ne}
class VersionPredicate:
def __init__(self, versionPredicateStr):
versionPredicateStr = versionPredicateStr.strip()
if not versionPredicateStr:
raise ValueError('empty package restriction')
match = re_validPackage.match(versionPredicateStr)
if not match:
raise ValueError('bad package name in %r' % versionPredicateStr)
self.name, paren = match.groups()
paren = paren.strip()
if paren:
match = re_paren.match(paren)
if not match:
raise ValueError('expected parenthesized list: %r' % paren)
str = match.groups()[0]
self.pred = [ splitUp(aPred) for aPred in str.split(',') ]
if not self.pred:
raise ValueError('empty parenthesized list in %r' % versionPredicateStr)
else:
self.pred = []
def __str__(self):
if self.pred:
seq = [ cond + ' ' + str(ver) for cond, ver in self.pred ]
return self.name + ' (' + ', '.join(seq) + ')'
else:
return self.name
def satisfied_by(self, version):
for cond, ver in self.pred:
if not compmap[cond](version, ver):
return False
return True
_provision_rx = None
def split_provision(value):
global _provision_rx
if _provision_rx is None:
_provision_rx = re.compile('([a-zA-Z_]\\w*(?:\\.[a-zA-Z_]\\w*)*)(?:\\s*\\(\\s*([^)\\s]+)\\s*\\))?$')
value = value.strip()
m = _provision_rx.match(value)
if not m:
raise ValueError('illegal provides specification: %r' % value)
ver = m.group(2) or None
if ver:
ver = distutils.version.StrictVersion(ver)
return (m.group(1), ver)
| [
"bizonix@me.com"
] | bizonix@me.com |
f20f8c824979e86de13da915aadb5f80de1ba16f | 15f321878face2af9317363c5f6de1e5ddd9b749 | /solutions_python/Problem_157/750.py | 40dcc08356dad6634828062c24ac8c82edb056f4 | [] | no_license | dr-dos-ok/Code_Jam_Webscraper | c06fd59870842664cd79c41eb460a09553e1c80a | 26a35bf114a3aa30fc4c677ef069d95f41665cc0 | refs/heads/master | 2020-04-06T08:17:40.938460 | 2018-10-14T10:12:47 | 2018-10-14T10:12:47 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 888 | py | #!/usr/bin/python
import sys
char2q={'i':0, 'j':1, 'k':2}
def solve_case():
(L,X)=[int(n) for n in sys.stdin.readline().split(" ")]
string=sys.stdin.readline()[:-1]
string=X*string
#print string
letter=0
Q=3
minus=False
for c in string:
#print c,
q=char2q[c]
if q==Q:
Q=3
minus=not minus
elif Q==3:
Q=q
else:
diff=(3+q-Q)%3
if diff==1:
Q=(Q+2)%3
else:
Q=(Q+1)%3
minus=not minus
if not minus and Q==letter and letter!=3:
letter+=1
Q=3
#print
if letter==3 and not minus and Q==3:
return "YES"
else:
return "NO"
cases_count=int(sys.stdin.readline())
for i in xrange(cases_count):
print "Case #"+`i+1`+": "+solve_case()
| [
"miliar1732@gmail.com"
] | miliar1732@gmail.com |
0b9b9d7c58619def344ec1bb8490039754f21dff | 42c48f3178a48b4a2a0aded547770027bf976350 | /google/ads/google_ads/v4/services/reach_plan_service_client_config.py | 7da209ded44302ee1db8c586bc0d53ba3cb561df | [
"Apache-2.0"
] | permissive | fiboknacky/google-ads-python | e989464a85f28baca1f28d133994c73759e8b4d6 | a5b6cede64f4d9912ae6ad26927a54e40448c9fe | refs/heads/master | 2021-08-07T20:18:48.618563 | 2020-12-11T09:21:29 | 2020-12-11T09:21:29 | 229,712,514 | 0 | 0 | Apache-2.0 | 2019-12-23T08:44:49 | 2019-12-23T08:44:49 | null | UTF-8 | Python | false | false | 1,331 | py | config = {
"interfaces": {
"google.ads.googleads.v4.services.ReachPlanService": {
"retry_codes": {
"idempotent": [
"DEADLINE_EXCEEDED",
"UNAVAILABLE"
],
"non_idempotent": []
},
"retry_params": {
"default": {
"initial_retry_delay_millis": 5000,
"retry_delay_multiplier": 1.3,
"max_retry_delay_millis": 60000,
"initial_rpc_timeout_millis": 3600000,
"rpc_timeout_multiplier": 1.0,
"max_rpc_timeout_millis": 3600000,
"total_timeout_millis": 3600000
}
},
"methods": {
"ListPlannableLocations": {
"timeout_millis": 60000,
"retry_codes_name": "non_idempotent",
"retry_params_name": "default"
},
"ListPlannableProducts": {
"timeout_millis": 60000,
"retry_codes_name": "non_idempotent",
"retry_params_name": "default"
},
"GenerateProductMixIdeas": {
"timeout_millis": 60000,
"retry_codes_name": "non_idempotent",
"retry_params_name": "default"
},
"GenerateReachForecast": {
"timeout_millis": 60000,
"retry_codes_name": "non_idempotent",
"retry_params_name": "default"
}
}
}
}
}
| [
"noreply@github.com"
] | fiboknacky.noreply@github.com |
487e993470611bf28744dc80b3488e471f00e3a1 | 23db36a9e19a48aa660444dfc32d50ebc6c88a42 | /doc/_code/inline_keyboard.py | 2d29d42301312f031228dbcdbd429134c5d299c8 | [
"MIT"
] | permissive | cpcchengt/telepot | fab206ce6ea4149e0dce9619adc9075b59c06a66 | eb2050fbb36b142a9746533e738322d0cc0d4631 | refs/heads/master | 2023-06-20T02:35:36.644708 | 2021-07-21T02:55:44 | 2021-07-21T02:55:44 | 384,059,417 | 2 | 0 | MIT | 2021-07-08T08:44:57 | 2021-07-08T08:44:57 | null | UTF-8 | Python | false | false | 936 | py | import sys
import time
import telepot
from telepot.loop import MessageLoop
from telepot.namedtuple import InlineKeyboardMarkup, InlineKeyboardButton
def on_chat_message(msg):
content_type, chat_type, chat_id = telepot.glance(msg)
keyboard = InlineKeyboardMarkup(inline_keyboard=[
[InlineKeyboardButton(text='Press me', callback_data='press')],
])
bot.sendMessage(chat_id, 'Use inline keyboard', reply_markup=keyboard)
def on_callback_query(msg):
query_id, from_id, query_data = telepot.glance(msg, flavor='callback_query')
print('Callback Query:', query_id, from_id, query_data)
bot.answerCallbackQuery(query_id, text='Got it')
TOKEN = sys.argv[1] # get token from command-line
bot = telepot.Bot(TOKEN)
MessageLoop(bot, {'chat': on_chat_message,
'callback_query': on_callback_query}).run_as_thread()
print('Listening ...')
while 1:
time.sleep(10)
| [
"lee1nick@yahoo.ca"
] | lee1nick@yahoo.ca |
69f414c19e8cafff59fec7dc9c7e05ad0644f490 | b3c47795e8b6d95ae5521dcbbb920ab71851a92f | /Leetcode/Algorithm/python/2000/01865-Finding Pairs With a Certain Sum.py | 66b88abfc548b4814351a7770db1ce304c3a3b3d | [
"LicenseRef-scancode-warranty-disclaimer"
] | no_license | Wizmann/ACM-ICPC | 6afecd0fd09918c53a2a84c4d22c244de0065710 | 7c30454c49485a794dcc4d1c09daf2f755f9ecc1 | refs/heads/master | 2023-07-15T02:46:21.372860 | 2023-07-09T15:30:27 | 2023-07-09T15:30:27 | 3,009,276 | 51 | 23 | null | null | null | null | UTF-8 | Python | false | false | 724 | py | from collections import defaultdict
class FindSumPairs(object):
def __init__(self, nums1, nums2):
self.d = defaultdict(int)
self.nums1 = nums1
self.nums2 = nums2
for item in nums2:
self.d[item] += 1
def add(self, index, val):
pre = self.nums2[index]
cur = pre + val
self.d[pre] -= 1
self.d[cur] += 1
self.nums2[index] = cur
def count(self, tot):
res = 0
for a in self.nums1:
res += self.d[tot - a]
return res
# Your FindSumPairs object will be instantiated and called as such:
# obj = FindSumPairs(nums1, nums2)
# obj.add(index,val)
# param_2 = obj.count(tot)
| [
"noreply@github.com"
] | Wizmann.noreply@github.com |
6022f801423acba4dc6fba931bf79e8128f0fd72 | e30a578e2467b67d82dc8529c2e8107579496d01 | /ML/3.py | 3cdaebea2b295d3905e602dbc0e4bad537e6ab9e | [
"MIT"
] | permissive | rednithin/7thSemLabs | 7636ad55e02d7f4fbf56e2b4fbc73ff79868006e | 5bd4102627aa068afd16c55c02b18e51ec5cba4c | refs/heads/master | 2020-04-06T13:01:15.576852 | 2018-12-09T06:49:12 | 2018-12-09T06:49:12 | 157,480,151 | 3 | 3 | null | null | null | null | UTF-8 | Python | false | false | 2,187 | py | import numpy as np
from csv import reader
from math import log2
from collections import Counter
from pprint import pprint
YES, NO = "Y", "N"
class Node:
def __init__(self, label):
self.label = label
self.branches = {}
def entropy(data):
total, positive, negative = len(
data), (data[:, -1] == YES).sum(), (data[:, -1] == NO).sum()
entropy = 0
if positive:
entropy -= positive / total * log2(positive / total)
if negative:
entropy -= negative / total * log2(negative / total)
return entropy
def gain(s, data, column):
values = set(data[:, column])
gain = s
for value in values:
sub = data[data[:, column] == value]
gain -= len(sub) / len(data) * entropy(sub)
return gain
def bestAttribute(data):
s = entropy(data)
g = [gain(s, data, column) for column in range(len(data[0]) - 1)]
return g.index(max(g))
def id3(data, labels):
root = Node('Null')
if entropy(data) == 0:
root.label = data[0, -1]
elif len(data[0]) == 1:
root.label = Counter(data[:, -1]).most_common()[0][0]
else:
column = bestAttribute(data)
root.label = labels[column]
values = set(data[:, column])
for value in values:
nData = np.delete(
data[data[:, column] == value], column, axis=1)
nLabels = np.delete(labels, column)
root.branches[value] = id3(nData, nLabels)
return root
def getRules(root, rule, rules):
if not root.branches:
rules.append(rule[:-2] + "=> " + root.label)
for value, nRoot in root.branches.items():
getRules(nRoot, rule + root.label + "=" + value + " ^ ", rules)
def predict(tree, tup):
if not tree.branches:
return tree.label
return predict(tree.branches[tup[tree.label]], tup)
labels = np.array(['Outlook', 'Temperature', 'Humidity', 'Wind', 'PlayTennis'])
with open('3-dataset.csv') as f:
data = np.array(list(reader(f)))
tree = id3(data, labels)
rules = []
getRules(tree, "", rules)
pprint(sorted(rules))
tup = {}
for label in labels[:-1]:
tup[label] = input(label + ": ")
print(predict(tree, tup))
| [
"reddy.nithinpg@live.com"
] | reddy.nithinpg@live.com |
0440b1fff9d5435ce1e25dee4e860b2f50ea83b1 | 06569ec06be697beffdc5eeff1e51ca0e69ee388 | /naengpa/migrations/0001_initial.py | 1b2e597cd43aee0be84f29f42d34ba72acf44faf | [] | no_license | Areum0921/naengpa | fbb88f962c03a0a6222942e81726fb883986861a | ebf6f27aaa78c8ea06d269410bfc9a179c3f4bcc | refs/heads/master | 2023-07-14T15:55:36.524558 | 2021-08-19T07:50:26 | 2021-08-19T07:50:26 | 388,108,445 | 1 | 1 | null | null | null | null | UTF-8 | Python | false | false | 806 | py | # Generated by Django 3.2.5 on 2021-07-22 08:11
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Recipe',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=200)),
('time', models.IntegerField()),
('difficulty', models.IntegerField()),
('need_ingredient', models.CharField(max_length=200)),
('content', models.TextField(default='')),
('create_date', models.DateTimeField()),
],
),
]
| [
"a90907@gmail.com"
] | a90907@gmail.com |
a5a6566ee6f951719d7bae58963b4f8df750761f | ca2782824a16b7b9dc1284ff058776ca62809787 | /exercise_library/basic_navigation/api.py | 6c2a9ddcf12418c07ad9055ba07a07cfb90228b0 | [] | no_license | captbaritone/exercise-library | 9ddc838f4ffe24c65fd381be6d421f5084e48392 | c01274c05a35e47b140846ffa06f3a42cc4dc291 | refs/heads/master | 2020-12-24T12:47:23.645268 | 2015-02-09T08:05:34 | 2015-02-09T08:05:34 | 30,525,565 | 1 | 0 | null | 2015-02-09T08:21:29 | 2015-02-09T08:21:28 | null | UTF-8 | Python | false | false | 555 | py | import json
from django.http import Http404
from django.http import HttpResponse
def render_to_json(response_obj, context={}, content_type="application/json", status=200):
json_str = json.dumps(response_obj, indent=4)
return HttpResponse(json_str, content_type=content_type, status=status)
def requires_post(fn):
def inner(request, *args, **kwargs):
if request.method != "POST":
return Http404
# post_data = request.POST or json.loads(request.body)
return fn(request, *args, **kwargs)
return inner
| [
"slobdell@hearsaycorp.com"
] | slobdell@hearsaycorp.com |
5b6670e8acd44e1c3c70765eccf7f95c6d7a4463 | ee89c84c5b2f48d447b7005299b409d61cc4d807 | /venv/Scripts/rst2html.py | d82a8d31a3a9267b435b9d556c0d9e90b2f8e873 | [
"BSD-3-Clause",
"Apache-2.0"
] | permissive | songweiwei/rasa_run | 342075cc645725a042acf273ab6508c5da55cbee | 0cfc0a280b9efea344bacf5f2df5800c32d0b3a8 | refs/heads/master | 2023-05-31T03:23:26.490925 | 2020-04-22T07:56:07 | 2020-04-22T07:56:07 | 257,218,895 | 2 | 2 | null | 2023-05-22T23:23:45 | 2020-04-20T08:31:42 | Python | UTF-8 | Python | false | false | 641 | py | #!c:\users\sonny\pycharmprojects\rasa_run\venv\scripts\python.exe
# $Id: rst2html.py 4564 2006-05-21 20:44:42Z wiemann $
# Author: David Goodger <goodger@python.org>
# Copyright: This module has been placed in the public domain.
"""
A minimal front end to the Docutils Publisher, producing HTML.
"""
try:
import locale
locale.setlocale(locale.LC_ALL, '')
except:
pass
from docutils.core import publish_cmdline, default_description
description = ('Generates (X)HTML documents from standalone reStructuredText '
'sources. ' + default_description)
publish_cmdline(writer_name='html', description=description)
| [
"1194488130@qq.com"
] | 1194488130@qq.com |
2f53fe70840c17f5e6eb67b3f761b26aa5105eb1 | 9df2fb0bc59ab44f026b0a2f5ef50c72b2fb2ceb | /sdk/hdinsight/azure-mgmt-hdinsight/generated_samples/get_linux_hadoop_script_action.py | c384d8c553f822eb33b468a8920289e2e0df61e7 | [
"MIT",
"LGPL-2.1-or-later",
"LicenseRef-scancode-generic-cla"
] | permissive | openapi-env-test/azure-sdk-for-python | b334a2b65eeabcf9b7673879a621abb9be43b0f6 | f61090e96094cfd4f43650be1a53425736bd8985 | refs/heads/main | 2023-08-30T14:22:14.300080 | 2023-06-08T02:53:04 | 2023-06-08T02:53:04 | 222,384,897 | 1 | 0 | MIT | 2023-09-08T08:38:48 | 2019-11-18T07:09:24 | Python | UTF-8 | Python | false | false | 1,580 | py | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from azure.identity import DefaultAzureCredential
from azure.mgmt.hdinsight import HDInsightManagementClient
"""
# PREREQUISITES
pip install azure-identity
pip install azure-mgmt-hdinsight
# USAGE
python get_linux_hadoop_script_action.py
Before run the sample, please set the values of the client ID, tenant ID and client secret
of the AAD application as environment variables: AZURE_CLIENT_ID, AZURE_TENANT_ID,
AZURE_CLIENT_SECRET. For more info about how to get the value, please see:
https://docs.microsoft.com/azure/active-directory/develop/howto-create-service-principal-portal
"""
def main():
client = HDInsightManagementClient(
credential=DefaultAzureCredential(),
subscription_id="subid",
)
response = client.script_actions.list_by_cluster(
resource_group_name="rg1",
cluster_name="cluster1",
)
for item in response:
print(item)
# x-ms-original-file: specification/hdinsight/resource-manager/Microsoft.HDInsight/stable/2021-06-01/examples/GetLinuxHadoopScriptAction.json
if __name__ == "__main__":
main()
| [
"noreply@github.com"
] | openapi-env-test.noreply@github.com |
4390260f8e58d5e4672f6e75b6cd7aa1bc72ce21 | e40bf504eda3413074cc719a421cdab222ece729 | /chip.py | 4bb26d0260a51c141f095838551ef442146d101b | [] | no_license | bcrafton/noc | 93b20725b53890d0ee405ee51a53fa04cf448e3d | 4191eca66a4498eb5f3e4069b92a3e7c006e868b | refs/heads/master | 2023-02-16T12:46:22.729027 | 2021-01-11T17:09:29 | 2021-01-11T17:09:29 | 328,193,001 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,619 | py |
import numpy as np
from modules import *
from transform_weights import transform_weights
from transform_inputs import transform_inputs
############################
def bits(model, inputs):
xh, xw, xc, xn = np.shape(inputs)
size = xh
ws = []
xs = []
for layer in model.keys():
w = model[layer]['f']
k, _, c, n = np.shape(w)
p1 = model[layer]['p1']
p2 = model[layer]['p2']
s = model[layer]['s']
w = np.reshape(w, (k * k * c, n)).astype(int)
ws.append(transform_weights(w))
x = (size, size, c)
xs.append(x)
size = (size - k + s + p1 + p2) // s
return ws, xs
############################
def malloc(grid, w, x):
nwl, _, nbl, _ = np.shape(w)
nd = 8 // nbl
# allocate [nwl] grid cells.
col = np.floor(np.sqrt(nwl))
row = nwl // col
rem = nwl % col
# nwl=3 -> [col=1, row=3, rem=0]
# pick a start address
# this should be an optimization problem
# create clusters, then figure out how to route them.
#
############################
class chip:
def __init__(self):
self.grid = [ [None for _ in range(4)] for _ in range(4) ]
# generate all [SRAM, PCRAM] pairs
for i in range(4):
for j in range(4):
self.grid[i][j] = {'PCM': PCM(), 'SRAM': SRAM()}
def step(self):
pass
'''
def map(self, model, inputs):
# set PCM and SRAM to hold specific parts of our model and activations.
# this is probably a whole field we dont know about.
#
ws, xs = bits(model, inputs)
grid = np.zeros(shape=(4, 4))
for (w, x) in zip(ws, xs):
# malloc - where to place w and x ?
malloc(grid, w, x)
# what about when we cant store the whole input in the SRAM ?
# need to "orchestrate" the transfer to all adjacent nodes.
#
# allocate
# placement / mapping
# routing
#
# think problem is small enough such that we can find optimal solution
#
# we already did allocation with breaking barriers.
# but now we have to do the other parts.
#
#
'''
def map(self, model, inputs):
ws, xs = bits(model, inputs)
alloc = malloc(ws, xs) # breaking barriers
place = placement(alloc)
route = routing(place)
| [
"crafton.b@husky.neu.edu"
] | crafton.b@husky.neu.edu |
e0e0877c3ae3262b43646cf6272ffe758b1fc454 | c17133ea0f7bbad98ef17ce2c0213b8c95a4630f | /Design_serach_autocomplete_system.py | 8971f354fa6ab79c1247ea332d87cc11c02a0754 | [] | no_license | Mohan110594/Design-6 | e8be1935bfa9a95537846face73f5f7d8943bf3f | 36faa1203276e58f8b52470e1e09fa60f388f1e3 | refs/heads/master | 2022-04-16T09:25:59.967115 | 2020-04-11T16:06:36 | 2020-04-11T16:06:36 | 254,902,314 | 0 | 0 | null | 2020-04-11T15:55:00 | 2020-04-11T15:54:59 | null | UTF-8 | Python | false | false | 3,575 | py | // Did this code successfully run on Leetcode : Yes
// Any problem you faced while coding this : None
// Your code here along with comments explaining your approach
In this problem we create a trie with all the given sentences and at every letter we maintain the word frequency at each and every letter.
when inputing we check the trie and if the sentence is not present we add it to the trie and make changes in the frequency map accordingly.Then we fetch the frequency map and then we extract the 3 most frequency lexiographically greater sentences and then we send them as output at each input.
# Time complexity --> o(l*n) where n is the number of sentences and l is the length of each sentence
import heapq
from collections import deque
# creating a custom comparator if frequencies of the sentences are of same value we go with higher lexiographic sentence else we go with the sentence which has higher frequency
class lexico:
def __init__(self, key, value):
self.key = key
self.value = value
def __lt__(self, d2):
if self.value == d2.value:
return self.key> d2.key
else:
return self.value<d2.value
# creating a trie which store the sentence character and the word with its frequencies
class TrieNode:
def __init__(self):
self.isEnd = False
self.children = dict()
self.freqmap = dict()
class AutocompleteSystem(object):
def __init__(self, sentences, times):
"""
:type sentences: List[str]
:type times: List[int]
"""
# self.sentences=sentences
# self.times=times
self.str1 = ''
self.root = TrieNode()
self.cursor = self.root
# max length of the heap to be maintained
self.k = 3
# inserting all the sentences into the trie
for i in range(len(sentences)):
sent = sentences[i]
freq = times[i]
self.insert(sent, freq)
# logic for trie insertion
def insert(self, word, freq):
root1=self.root
for i in range(len(word)):
if word[i] not in root1.children:
root1.children[word[i]] = TrieNode()
root1 = root1.children[word[i]]
root1.freqmap[word] = freq
root1.isEnd = True
def input(self, c):
"""
:type c: str
:rtype: List[str]
"""
# if the input is # we have to insert all the previous sentence into the trie else if present we have to increment its frequency
if c == '#':
if self.str1 not in self.cursor.freqmap:
self.insert(self.str1, 1)
else:
self.insert(self.str1,self.cursor.freqmap[self.str1]+1)
self.str1 = ''
self.cursor = self.root
return
self.str1 = self.str1 + c
if c not in self.cursor.children:
self.cursor.children[c] = TrieNode()
# storing the frequency map for that character
freqcursor=self.cursor.children[c].freqmap
self.cursor = self.cursor.children[c]
pq = []
# min heap with custom comparator
for key,value in freqcursor.items():
val1=lexico(key,value)
heapq.heappush(pq, val1)
if len(pq) > self.k:
heapq.heappop(pq)
# storing the values based on the frequency order and lexicographic order
out=deque()
for i in range(len(pq)):
ele=heapq.heappop(pq)
out.appendleft(ele.key)
return out | [
"anu110594@gmail.com"
] | anu110594@gmail.com |
1c01d6b6bfd004dd2a32aa4a0929d97689dc22af | 625108dc5a9b90d0f22609788ff52aff155a2c99 | /selection/randomized/bootstrap/bayes_boot_randomX_gn.py | 0c2f94b79a6c8c437a2055d00767c26536c46cd3 | [] | no_license | guhjy/Python-software | 0169f6618a570bb5a5e3aaf29e895d2251ca791c | 061a050bd17ca6f276296dbfa51573f001c320b1 | refs/heads/master | 2021-01-09T20:55:01.417131 | 2016-09-12T20:43:43 | 2016-09-12T20:43:43 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,078 | py | import numpy as np
from scipy.stats import laplace, probplot, uniform
#from selection.algorithms.lasso import instance
from instances import instance, bootstrap_covariance
import selection.sampling.randomized.api as randomized
#from pvalues_bayes_randomX import pval
from pvalues_bayes_ranX_gn import pval
from matplotlib import pyplot as plt
import regreg.api as rr
import selection.sampling.randomized.losses.lasso_randomX as lasso_randomX
import statsmodels.api as sm
def test_lasso(s=0, n=100, p=20, weights = "neutral",
randomization_dist = "logistic", randomization_scale = 1,
Langevin_steps = 10000, burning = 2000, X_scaled = True,
covariance_estimate = "nonparametric", noise = "uniform"):
""" weights: exponential, gamma, normal, gumbel
randomization_dist: logistic, laplace """
step_size = 1./p
X, y, true_beta, nonzero, sigma = instance(n=n, p=p, random_signs=True, s=s, sigma=1.,rho=0, scale=X_scaled, noise=noise)
print 'true beta', true_beta
lam_frac = 1.
if randomization_dist == "laplace":
randomization = laplace(loc=0, scale=1.)
random_Z = randomization.rvs(p)
if randomization_dist == "logistic":
random_Z = np.random.logistic(loc=0, scale = 1, size = p)
if randomization_dist== "normal":
random_Z = np.random.standard_normal(p)
print 'randomization', random_Z*randomization_scale
loss = lasso_randomX.lasso_randomX(X, y)
epsilon = 1./np.sqrt(n)
#epsilon = 1.
lam = sigma * lam_frac * np.mean(np.fabs(np.dot(X.T, np.random.standard_normal((n, 10000)))+randomization_scale*np.random.logistic(size=(p,10000))).max(0))
lam_scaled = lam.copy()
random_Z_scaled = random_Z.copy()
epsilon_scaled = epsilon
if (X_scaled == False):
random_Z_scaled *= np.sqrt(n)
lam_scaled *= np.sqrt(n)
epsilon_scaled *= np.sqrt(n)
penalty = randomized.selective_l1norm_lan(p, lagrange=lam_scaled)
# initial solution
problem = rr.simple_problem(loss, penalty)
random_term = rr.identity_quadratic(epsilon_scaled, 0, -randomization_scale*random_Z_scaled, 0)
solve_args = {'tol': 1.e-10, 'min_its': 100, 'max_its': 500}
initial_soln = problem.solve(random_term, **solve_args)
print 'initial solution', initial_soln
active = (initial_soln != 0)
if np.sum(active)==0:
return [-1], [-1]
inactive = ~active
betaE = initial_soln[active]
signs = np.sign(betaE)
initial_grad = -np.dot(X.T, y - np.dot(X, initial_soln))
if (X_scaled==False):
initial_grad /= np.sqrt(n)
print 'initial_gradient', initial_grad
subgradient = random_Z - initial_grad - epsilon * initial_soln
cube = np.divide(subgradient[inactive], lam)
nactive = betaE.shape[0]
ninactive = cube.shape[0]
beta_unpenalized = np.linalg.lstsq(X[:, active], y)[0]
print 'beta_OLS onto E', beta_unpenalized
obs_residuals = y - np.dot(X[:, active], beta_unpenalized) # y-X_E\bar{\beta}^E
N = np.dot(X[:, inactive].T, obs_residuals) # X_{-E}^T(y-X_E\bar{\beta}_E), null statistic
full_null = np.zeros(p)
full_null[nactive:] = N
# parametric coveriance estimate
if covariance_estimate == "parametric":
XE_pinv = np.linalg.pinv(X[:, active])
mat = np.zeros((nactive+ninactive, n))
mat[:nactive,:] = XE_pinv
mat[nactive:,:] = X[:, inactive].T.dot(np.identity(n)-X[:, active].dot(XE_pinv))
Sigma_full = mat.dot(mat.T)
else:
Sigma_full = bootstrap_covariance(X,y,active, beta_unpenalized)
init_vec_state = np.zeros(n+nactive+ninactive)
if weights =="exponential":
init_vec_state[:n] = np.ones(n)
else:
init_vec_state[:n] = np.zeros(n)
#init_vec_state[:n] = np.random.standard_normal(n)
#init_vec_state[:n] = np.ones(n)
init_vec_state[n:(n+nactive)] = betaE
init_vec_state[(n+nactive):] = cube
def full_projection(vec_state, signs = signs,
nactive=nactive, ninactive = ninactive):
alpha = vec_state[:n].copy()
betaE = vec_state[n:(n+nactive)].copy()
cube = vec_state[(n+nactive):].copy()
projected_alpha = alpha.copy()
projected_betaE = betaE.copy()
projected_cube = np.zeros_like(cube)
if weights == "exponential":
projected_alpha = np.clip(alpha, 0, np.inf)
if weights == "gamma":
projected_alpha = np.clip(alpha, -2+1./n, np.inf)
for i in range(nactive):
if (projected_betaE[i] * signs[i] < 0):
projected_betaE[i] = 0
projected_cube = np.clip(cube, -1, 1)
return np.concatenate((projected_alpha, projected_betaE, projected_cube), 0)
Sigma = np.linalg.inv(np.dot(X[:, active].T, X[:, active]))
null, alt = pval(init_vec_state, full_projection, X, obs_residuals, beta_unpenalized, full_null,
signs, lam, epsilon,
nonzero, active, Sigma,
weights, randomization_dist, randomization_scale,
Langevin_steps, step_size, burning,
X_scaled)
# Sigma_full[:nactive, :nactive])
return null, alt
if __name__ == "__main__":
np.random.seed(1)
plt.figure()
plt.ion()
P0, PA = [], []
for i in range(50):
print "iteration", i
p0, pA = test_lasso()
if np.sum(p0)>-1:
P0.extend(p0); PA.extend(pA)
plt.clf()
plt.xlim([0, 1])
plt.ylim([0, 1])
ecdf = sm.distributions.ECDF(P0)
x = np.linspace(min(P0), max(P0))
y = ecdf(x)
plt.plot(x, y, lw=2)
plt.plot([0, 1], [0, 1], 'k-', lw=1)
#probplot(P0, dist=uniform, sparams=(0, 1), plot=plt,fit=False)
#plt.plot([0, 1], color='k', linestyle='-', linewidth=2)
plt.pause(0.01)
print "done! mean: ", np.mean(P0), "std: ", np.std(P0)
while True:
plt.pause(0.05)
plt.savefig('bayes.pdf')
| [
"jonathan.taylor@stanford.edu"
] | jonathan.taylor@stanford.edu |
7fdcfd43aa5a87d2ab8a4c5a2f506ea8afb52965 | ec6a55345b2e3358a99113369c62ab4622ab527c | /src/Utilities/date_helper.py | ed424b9f133177c5b881b0a20ea2e7243aef4c7c | [] | no_license | cloew/PersonalAccountingSoftware | 566a92e127bc060cd18470e35e2f6decf94f1aa5 | 57c909c8581bef3b66388038a1cf5edda426ecf9 | refs/heads/master | 2016-08-11T16:16:30.179032 | 2015-05-12T20:09:19 | 2015-05-12T20:09:19 | 8,558,121 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 256 | py | from dateutil import parser
def DateToString(date):
""" Returns a string format of the given date """
return "{0:%m/%d/%Y}".format(date)
def StringToDate(dateString):
""" Converts a String to a date """
return parser.parse(dateString) | [
"cloew123@gmail.com"
] | cloew123@gmail.com |
c88654782e0687cc493dc2eb6dbdfc7506ce426d | cf025ea3bf079748472557304a290593c753b884 | /Algorithm/SWEA/연습문제_큐_마이쮸(list).py | 05a9a3af9874fc2a67cb7097febeae47d7ab0ed7 | [] | no_license | Silentsoul04/my_software_study | 7dbb035ceea74f42c7ce2051b2320f6cae75ed88 | c27d33c57f59fe5244a1041c11bbd826dd481546 | refs/heads/master | 2023-03-29T02:43:40.861045 | 2019-07-10T08:09:55 | 2019-07-10T08:09:55 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 289 | py | front=-1
rear=-1
total=20
Q=[0]*100000
idx=0
while True:
new_sn=[idx+1,0]
rear+=1
Q[rear]=new_sn
front+=1
total-=Q[front][1]+1
if total<=0:
print(Q[front][0])
break
else:
rear+=1
Q[rear]=[Q[front][0],Q[front][1]+1]
idx+=1 | [
"pok_winter@naver.com"
] | pok_winter@naver.com |
a5214df67c7e06950cf5a6f59334fcf6b5e51b8b | c4764283f6d3eb9ee77c05d489ec0763a40c9925 | /Plot_Graphs.py | c125326400c76ff415a368500231cf4e8cfc89c6 | [] | no_license | Arseni1919/simulator_dcop_mst | 09475b6369a28120efabf4cfa29a973c80846f9d | 7f2c8a3066e1760df773ed9a92fdaab67942b20c | refs/heads/master | 2020-08-06T20:32:12.091581 | 2020-05-11T09:02:06 | 2020-05-11T09:02:06 | 213,143,452 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,135 | py | from main_help_functions import *
# ---------------------------
# ------INPUT SETTINGS-------
# ---------------------------
file_name = "08.01.2020-22_54_47__DSA__Max_sum_MST_vanilla__DSA_HPA__Max_sum_HPA__Max_sum_TAC_DSA_vs_MS_file.data"
need_to_plot_variance = False
need_to_plot_min_max = False
# ---------------------------
file_name = 'data/%s' % file_name
file_name = file_name[:-5] + '.info'
with open(file_name, 'rb') as fileObject:
# load the object from the file into var b
info = pickle.load(fileObject)
for k, v in info['collisions'].items():
print(k, 'collisions mean: ', (statistics.mean(v)/2), 'std:', (statistics.stdev(v)/2))
pprint(info)
file_name = file_name[:-5] + '.data'
with open(file_name, 'rb') as fileObject:
# load the object from the file into var b
graphs = pickle.load(fileObject)
algorithms = list(graphs.keys())
plot_results_if(True, need_to_plot_variance, need_to_plot_min_max, graphs, algorithms, alpha=0.025)
# '.'
# ','
# 'o'
# 'v'
# '^'
# '<'
# '>'
# '1'
# '2'
# '3'
# '4'
# 's'
# 'p'
# '*'
# 'h'
# 'H'
# '+'
# 'x'
# 'D'
# 'd'
# '|'
# '_'
| [
"1919ars@gmail.com"
] | 1919ars@gmail.com |
5283b261f46dde76d6167da8cb2c5dd383eb6a7e | ea5762e8754d6b039963b0125822afb261844cc8 | /docs/_examples/drx_numpy-numba.py | 210d21040bafbc04be9cf0bee76fef45d132e451 | [
"MIT"
] | permissive | gonzalocasas/compas | 787977a4712fbfb9e230c4f433b6e2be509e4855 | 2fabc7e5c966a02d823fa453564151e1a1e7e3c6 | refs/heads/master | 2020-03-23T20:17:55.126856 | 2018-07-24T22:30:08 | 2018-07-24T22:30:08 | 142,033,431 | 0 | 0 | MIT | 2018-07-31T14:54:52 | 2018-07-23T15:27:19 | Python | UTF-8 | Python | false | false | 1,838 | py | """A dynamic relaxation example comparing NumPy with Numba."""
from compas_blender.geometry import BlenderMesh
from compas_blender.helpers import network_from_bmesh
from compas_blender.utilities import clear_layer
from compas_blender.utilities import draw_plane
from compas.numerical import drx_numpy
from compas.hpc import drx_numba
from time import time
from matplotlib import pyplot as plt
__author__ = ['Andrew Liew <liew@arch.ethz.ch>']
__copyright__ = 'Copyright 2017, BLOCK Research Group - ETH Zurich'
__license__ = 'MIT License'
__email__ = 'liew@arch.ethz.ch'
data = {'numpy': [], 'numba': [], 'nodes': []}
for m in range(10, 71, 5):
clear_layer(layer=0)
# Set-up Network
bmesh = draw_plane(dx=1/m, dy=1/m)
blendermesh = BlenderMesh(object=bmesh)
network = network_from_bmesh(bmesh=bmesh)
Pz = 100 / network.number_of_vertices()
network.update_default_vertex_attributes({'B': [0, 0, 1], 'P': [0, 0, Pz]})
network.update_default_edge_attributes({'E': 10, 'A': 1, 'ct': 't', 'l0': 1/m})
corners = [key for key in network.vertices() if network.vertex_degree(key) == 2]
network.set_vertices_attributes(corners, {'B': [0, 0, 0]})
data['nodes'].append(network.number_of_vertices())
# Numpy-SciPy
tic = time()
X, f, l = drx_numpy(network=network, tol=0.01)
data['numpy'].append(time() - tic)
blendermesh.update_vertices(X)
# Numba
tic = time()
X, f, l = drx_numba(network=network, tol=0.01)
data['numba'].append(time() - tic)
blendermesh.update_vertices(X)
# Plot data
plt.plot(data['nodes'], data['numpy'])
plt.plot(data['nodes'], data['numba'])
plt.ylabel('Analysis time [s]')
plt.xlabel('No. nodes')
plt.legend(['NumPy-SciPy', 'Numba'])
plt.xlim([0, data['nodes'][-1]])
plt.ylim([0, max(data['numpy'])])
plt.show()
| [
"vanmelet@ethz.ch"
] | vanmelet@ethz.ch |
759e6bda7235ac11b4f4d331bff779a33d0164b5 | 875c597ab0bb0af1fe3a78944b1424f0defd164b | /flask/flaskr/blog/blog.py | 424621b8a1a12abb1458adcc59a6d6d2988a7dc9 | [] | no_license | alls77/lectures | 20dcbb0e69a7a2db0eaed2f8c858e8c1d1483c77 | 4695c35b811176a2ad66c9417708158bf5052d23 | refs/heads/master | 2022-05-22T12:35:04.131269 | 2020-01-27T16:31:15 | 2020-01-27T16:31:15 | 221,741,618 | 0 | 0 | null | 2022-04-22T23:01:54 | 2019-11-14T16:33:28 | Python | UTF-8 | Python | false | false | 3,102 | py | from flask import Blueprint
from flask import flash
from flask import g
from flask import redirect
from flask import render_template
from flask import request
from flask import url_for
from werkzeug.exceptions import abort
from flaskr.auth.auth import login_required
from flaskr.db import get_db
from flaskr.blog.queries import (
create_post, delete_post, get_post, update_post, post_list
)
bp = Blueprint("blog", __name__)
@bp.route("/")
def index():
"""Show all the posts, most recent first."""
db = get_db()
posts = post_list(db)
return render_template("blog/index.html", posts=posts)
def check_post(id, check_author=True):
"""Get a post and its author by id.
Checks that the id exists and optionally that the current user is
the author.
:param id: id of post to get
:param check_author: require the current user to be the author
:return: the post with author information
:raise 404: if a post with the given id doesn't exist
:raise 403: if the current user isn't the author
"""
post = get_post(get_db(), id)
if post is None:
abort(404, "Post id {0} doesn't exist.".format(id))
if check_author and post["author_id"] != g.user["id"]:
abort(403)
return post
@bp.route("/create", methods=("GET", "POST"))
@login_required
def create():
"""Create a new post for the current user."""
if request.method == "POST":
error = None
# TODO: достать title и body из формы
title = request.form['title']
body = request.form['body']
# TODO: title обязательное поле. Если его нет записать ошибку
if title is None:
error = 'Title is required'
if error is not None:
flash(error)
else:
db = get_db()
create_post(db, title, body, g.user["id"])
return redirect(url_for("blog.index"))
return render_template("blog/create.html")
@bp.route("/<int:id>/update", methods=("GET", "POST"))
@login_required
def update(id):
"""Update a post if the current user is the author."""
post = check_post(id)
if request.method == "POST":
error = None
# TODO: достать title и body из формы
title = request.form['title']
body = request.form['body']
# TODO: title обязательное поле. Если его нет записать ошибку
if title is None:
error = 'Title is None'
if error is not None:
flash(error)
else:
db = get_db()
update_post(db, title, body, id)
return redirect(url_for("blog.index"))
return render_template("blog/update.html", post=post)
@bp.route("/<int:id>/delete", methods=("POST",))
@login_required
def delete(id):
"""Delete a post.
Ensures that the post exists and that the logged in user is the
author of the post.
"""
check_post(id)
db = get_db()
delete_post(db, id)
return redirect(url_for("blog.index"))
| [
"alina_selina@bk.ru"
] | alina_selina@bk.ru |
41006f95fe2e28137f3887419d27eca666de4098 | 83de24182a7af33c43ee340b57755e73275149ae | /aliyun-python-sdk-ccc/aliyunsdkccc/request/v20200701/ListHistoricalAgentReportRequest.py | 4772cc3adbe8a9211facc64e47f783fba1e7272c | [
"Apache-2.0"
] | permissive | aliyun/aliyun-openapi-python-sdk | 4436ca6c57190ceadbc80f0b1c35b1ab13c00c7f | 83fd547946fd6772cf26f338d9653f4316c81d3c | refs/heads/master | 2023-08-04T12:32:57.028821 | 2023-08-04T06:00:29 | 2023-08-04T06:00:29 | 39,558,861 | 1,080 | 721 | NOASSERTION | 2023-09-14T08:51:06 | 2015-07-23T09:39:45 | Python | UTF-8 | Python | false | false | 2,409 | py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
#
# http://www.apache.org/licenses/LICENSE-2.0
#
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from aliyunsdkcore.request import RpcRequest
from aliyunsdkccc.endpoint import endpoint_data
class ListHistoricalAgentReportRequest(RpcRequest):
def __init__(self):
RpcRequest.__init__(self, 'CCC', '2020-07-01', 'ListHistoricalAgentReport','CCC')
self.set_method('POST')
if hasattr(self, "endpoint_map"):
setattr(self, "endpoint_map", endpoint_data.getEndpointMap())
if hasattr(self, "endpoint_regional"):
setattr(self, "endpoint_regional", endpoint_data.getEndpointRegional())
def get_StartTime(self): # Long
return self.get_query_params().get('StartTime')
def set_StartTime(self, StartTime): # Long
self.add_query_param('StartTime', StartTime)
def get_StopTime(self): # Long
return self.get_query_params().get('StopTime')
def set_StopTime(self, StopTime): # Long
self.add_query_param('StopTime', StopTime)
def get_PageNumber(self): # Integer
return self.get_query_params().get('PageNumber')
def set_PageNumber(self, PageNumber): # Integer
self.add_query_param('PageNumber', PageNumber)
def get_InstanceId(self): # String
return self.get_query_params().get('InstanceId')
def set_InstanceId(self, InstanceId): # String
self.add_query_param('InstanceId', InstanceId)
def get_AgentIdList(self): # String
return self.get_body_params().get('AgentIdList')
def set_AgentIdList(self, AgentIdList): # String
self.add_body_params('AgentIdList', AgentIdList)
def get_PageSize(self): # Integer
return self.get_query_params().get('PageSize')
def set_PageSize(self, PageSize): # Integer
self.add_query_param('PageSize', PageSize)
| [
"sdk-team@alibabacloud.com"
] | sdk-team@alibabacloud.com |
c600cab231ae189afad7e9c793c31698f1d1dda0 | acd41dc7e684eb2e58b6bef2b3e86950b8064945 | /res/packages/scripts/scripts/client/gui/Scaleform/locale/COMMON.py | 6af510f834e815784c03e95821358733067dd94e | [] | no_license | webiumsk/WoT-0.9.18.0 | e07acd08b33bfe7c73c910f5cb2a054a58a9beea | 89979c1ad547f1a1bbb2189f5ee3b10685e9a216 | refs/heads/master | 2021-01-20T09:37:10.323406 | 2017-05-04T13:51:43 | 2017-05-04T13:51:43 | 90,268,530 | 0 | 0 | null | null | null | null | WINDOWS-1250 | Python | false | false | 637 | py | # 2017.05.04 15:24:58 Střední Evropa (letní čas)
# Embedded file name: scripts/client/gui/Scaleform/locale/COMMON.py
"""
This file was generated using the wgpygen.
Please, don't edit this file manually.
"""
class COMMON(object):
COMMON_COLON = '#common:common/colon'
COMMON_PERCENT = '#common:common/percent'
COMMON_DASH = '#common:common/dash'
COMMON_SLASH = '#common:common/slash'
# okay decompyling C:\Users\PC\wotmods\files\originals\res\packages\scripts\scripts\client\gui\Scaleform\locale\COMMON.pyc
# decompiled 1 files: 1 okay, 0 failed, 0 verify failed
# 2017.05.04 15:24:58 Střední Evropa (letní čas)
| [
"info@webium.sk"
] | info@webium.sk |
19f41e85f4206a6192a3e3a9be678142773af90e | 0f4e69f6620ce3bd35b0090ed97b0d520dac2775 | /build/kobuki_safety_controller/catkin_generated/pkg.develspace.context.pc.py | e8c8e32c2906ad95b607e2e0dba31bcfe5203745 | [] | no_license | yosoy2/turtlebot2 | 277c44fe63bb808ac3ff1b050388f35e7e9aca5d | d3052cc648b617c43b6190cbfc8d08addbb8f9de | refs/heads/master | 2021-03-16T12:40:54.660254 | 2020-03-12T18:52:15 | 2020-03-12T18:52:15 | 246,908,194 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 654 | py | # generated from catkin/cmake/template/pkg.context.pc.in
CATKIN_PACKAGE_PREFIX = ""
PROJECT_PKG_CONFIG_INCLUDE_DIRS = "/home/edu/turtlebot2/src/kobuki_safety_controller/include".split(';') if "/home/edu/turtlebot2/src/kobuki_safety_controller/include" != "" else []
PROJECT_CATKIN_DEPENDS = "roscpp;nodelet;pluginlib;std_msgs;geometry_msgs;kobuki_msgs;yocs_controllers;ecl_threads".replace(';', ' ')
PKG_CONFIG_LIBRARIES_WITH_PREFIX = "-lkobuki_safety_controller_nodelet".split(';') if "-lkobuki_safety_controller_nodelet" != "" else []
PROJECT_NAME = "kobuki_safety_controller"
PROJECT_SPACE_DIR = "/home/edu/turtlebot2/devel"
PROJECT_VERSION = "0.7.6"
| [
"edotercero66.el@gmail.com"
] | edotercero66.el@gmail.com |
cb4095ce914cad8e2421522ea8914167096ef584 | 219d7cf7cf00b778ff1a5709406c144fcf2132f3 | /exam prep/04. Food for Pets.py | 16055a3550f1f098f577347be2a5aa08d4590c62 | [] | no_license | SilviaKoynova/Softuni-Programming-Basics-Python | e8e175419383815c65c4e110fdb2b752d940e887 | 0dfef0850f2cb8471dfee1af89f137be4e887cb8 | refs/heads/main | 2023-07-13T00:35:09.389302 | 2021-08-27T07:43:45 | 2021-08-27T07:43:45 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 823 | py | from math import floor
days = int(input())
bought_food = float(input())
total_food_dog = 0
total_eaten_cat = 0
biscuits = 0
total_food = 0
final = 0
dog = 0
cat = 0
for all_days in range (1, days + 1):
dog_eaten = int(input())
cat_eaten = int(input())
total_food_dog += dog_eaten
total_eaten_cat += cat_eaten
if all_days % 3 == 0:
current_biscuits = (dog_eaten + cat_eaten) * 0.1
biscuits += current_biscuits
total_food = total_food_dog + total_eaten_cat
final = total_food / bought_food * 100
dog = total_food_dog / total_food * 100
cat = total_eaten_cat / total_food * 100
print(f'Total eaten biscuits: {floor(biscuits)}gr.')
print(f'{final:.2f}% of the food has been eaten.')
print(f'{dog:.2f}% eaten from the dog.')
print(f'{cat:.2f}% eaten from the cat.')
| [
"noreply@github.com"
] | SilviaKoynova.noreply@github.com |
16fdd87db79816926ec1b2ace8eb8c4e396aa4a0 | dd31b966b1fbbd129b8657aa35b1dd460cbd1675 | /sandbox/python/wcpan.ftp/wcpan/ftp/network.py | 2af7106f582b54be4a85b0f0c0a63dae00995630 | [] | no_license | legnaleurc/junkcode | 9e48608d3ecafef0d7103240458f622aee87e077 | f63b04d04853fb7d3ae0002b24657a9fd781b648 | refs/heads/master | 2023-07-08T04:27:01.934100 | 2023-06-26T05:14:18 | 2023-06-26T05:14:39 | 27,518 | 5 | 5 | null | 2019-05-12T17:22:15 | 2008-06-22T19:17:04 | C++ | UTF-8 | Python | false | false | 5,345 | py | import socket
import subprocess
import os.path
import tornado.tcpserver as tts
import tornado.ioloop
import tornado.iostream
import tornado.locks as tl
import tornado.netutil as tn
class FTPServer(tts.TCPServer):
def __init__(self, path):
super().__init__()
self.path = path
self._loop = tornado.ioloop.IOLoop.current()
async def handle_stream(self, stream, address):
session = ControlChannel(self, stream, address)
self._loop.add_callback(session.start)
class ControlChannel(object):
def __init__(self, server, stream, address):
self.server = server
self.stream = stream
self.address = address
self.encoding = "utf-8"
self._cwd = '/'
self._transfer_mode = 'binary'
self.start_position = 0
async def writeline(self, value):
value += "\r\n"
await self.stream.write(value.encode(self.encoding))
print('->', value)
async def readline(self):
value = await self.stream.read_until(b"\r\n")
value = value.decode(self.encoding)
value = value.rstrip("\r\n")
return value
async def start(self):
print("Incoming connection from {}".format(self.address))
await self.writeline("220 Service ready for new user.")
self.running = True
while self.running:
try:
await self.handle_command()
except tornado.iostream.StreamClosedError:
self.stop()
def stop(self):
print("Closing connection from {}".format(self.address))
self.running = False
self.stream.close()
async def handle_command(self):
command = await self.readline()
print("Received command: " + command)
command = command.split(" ", 1)
if len(command) == 1:
command = command[0]
parameters = ""
else:
command, parameters = command
if command == "USER":
await self.writeline("230 User logged in, proceed.")
elif command == "SYST":
await self.writeline("215 UNIX Type: L8")
elif command == "FEAT":
await self.writeline("211-")
await self.writeline(" PASV")
await self.writeline(" REST")
await self.writeline("211 ")
elif command == "PWD":
await self.writeline('257 "{}"'.format(self._cwd))
elif command == "CWD":
self._cwd = parameters
await self.writeline('250 Requested file action okay, completed.')
elif command == "TYPE":
await self.writeline('200 __TYPE_OK__')
elif command == "PASV":
self.data_connection = PassiveListener(self.address[0])
await self.writeline("227 Entering Passive Mode " + self.data_connection.format_host() + ".")
await self.data_connection.wait_for_ready()
elif command == "LIST":
await self.writeline("150 File status okay; about to open data connection.")
await self.data_connection.send(
subprocess.check_output(["ls", "-l", self.server.path]))
await self.writeline("226 Closing data connection.")
elif command == "RETR":
await self.writeline("150")
filename = os.path.basename(parameters)
# Wait for opened data connection ?
fh = open(os.path.join(self.server.path, filename), "rb")
fh.seek(self.start_position)
await self.data_connection.send(fh.read())
self.start_position = 0
await self.writeline("226")
elif command == "REST":
self.start_position = int(parameters)
await self.writeline("350")
elif command == "QUIT":
await self.writeline("221 Service closing control connection.")
self.close()
else:
await self.writeline("502 Command not implemented.")
class ChannelHandler(object):
def __init__(self, host):
self._host = host
def create_passive_listener(self):
return PassiveListener(self._host)
async def send_passive_port(self, response):
await self._control.send_line(response)
class PassiveListener(tts.TCPServer):
def __init__(self, host):
super().__init__()
self._host = host
self._stream = None
self._ready_lock = tl.Condition()
self._loop = ti.IOLoop.current()
# TODO support IPv6?
socket_list = tn.bind_sockets(0, address=self._host,
family=socket.AF_INET)
self.add_sockets(socket_list)
self.start()
def get_socket(self):
return list(self._sockets.values())[0]
def get_address(self):
addr = socket.gethostbyname(socket.gethostname())
port = self.get_socket().getsockname()[1]
result = addr.replace(".", ",")
result += "," + str(port // 256)
result += "," + str(port % 256)
return result
async def handle_stream(self, stream, addr):
self._stream = stream
self._ready_lock.notify()
self._ready_lock = None
self._loop.add_callback(self.stop)
async def wait_for_ready(self):
await self._ready_lock.wait()
return self._stream
| [
"legnaleurc@gmail.com"
] | legnaleurc@gmail.com |
e59c90e5865491891f0f6300d06a803ab6505488 | ca7aa979e7059467e158830b76673f5b77a0f5a3 | /Python_codes/p03646/s038304175.py | 851d87f751819207824900501d7ddeaf0e2ff6a4 | [] | no_license | Aasthaengg/IBMdataset | 7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901 | f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8 | refs/heads/main | 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 136 | py | k = int(input())
rest = k % 50
print(50)
lst = [50 + k // 50 if i < rest else 50 + (k // 50 - 1) - rest for i in range(50)]
print(*lst) | [
"66529651+Aastha2104@users.noreply.github.com"
] | 66529651+Aastha2104@users.noreply.github.com |
c052aa6a6a116e3f4cf56a227c2d8fe9cfdc2b9e | 1419418226b6ba0f510649daaf62b71554cc2284 | /clawtools/plot_coast_chiapas.py | e3af2a89b43214556f155501d3fdea2e3cbbc522 | [] | no_license | shineusn/mylife | 2ef48a777e39be2ef746c3dad16ea963d5b23e5e | 61dfa72d9047551746d26b7fe01fb5c2f1f0657a | refs/heads/master | 2020-03-22T13:44:42.422127 | 2018-02-13T18:09:43 | 2018-02-13T18:09:43 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,637 | py | # -*- coding: utf-8 -*-
from matplotlib import pyplot as plt
from numpy import genfromtxt,unique,zeros,where,array,nan,c_,r_,argmin,squeeze,isnan
from obspy import read
from matplotlib import rcParams
etaclip=50
rcParams.update({'font.size': 22})
fgmax_file=u'/Users/dmelgar/Tsunamis/tehuantepec/_output/fort.FG1.valuemax'
aux_file='/Users/dmelgar/Tsunamis/tehuantepec/_output/fort.FG1.aux1'
wet_tol=0.001
etaclip=4
minamr=3
#Get maximum amplitude first
lon=genfromtxt(fgmax_file,usecols=0)
lat=genfromtxt(fgmax_file,usecols=1)
amr=genfromtxt(fgmax_file,usecols=2)
H_in=genfromtxt(fgmax_file,usecols=3)
b_in=genfromtxt(aux_file)
unique_amr=unique(amr)
#i=where(unique_amr>0)[0]
#unique_amr=unique_amr[i]
eta=zeros(len(H_in))
H=zeros(len(H_in))
b=zeros(len(H_in))
for k in range(len(unique_amr)):
i=where(amr==unique_amr[k])[0]
if unique_amr[k]<minamr:
eta[i]=0
else:
eta[i]=H_in[i]+b_in[i,int(unique_amr[k]+1)]
H[i]=H_in[i]
b[i]=b_in[i,int(unique_amr[k]+1)]
#i=where(b<0)[0]
#lon=lon[i]
#lat=lat[i]
#eta=eta[i]
#H=H[i]
#b=b[i]
#
#i=where(H<10)[0]
#lon=lon[i]
#lat=lat[i]
#eta=eta[i]
#H=H[i]
#b=b[i]
#remove onshore points
#Wphase and slip inversion clipping
#iclip=where(lat>-25)[0]
#i=where(eta[iclip]>etaclip)[0]
#eta[iclip[i]]=nan
#i=where(isnan(eta)==False)[0]
#eta=eta[i]
#lat=lat[i]
#PGD clipping
#etaclip=0.6
#iclip=where(lat>-28.6)[0]
#i=where(eta[iclip]>etaclip)[0]
#eta[iclip[i]]=nan
#i=where(isnan(eta)==False)[0]
#eta=eta[i]
#lat=lat[i]
#iclip=where(lat<-34.7)[0]
#i=where(eta[iclip]>etaclip)[0]
#eta[iclip[i]]=nan
#i=where(isnan(eta)==False)[0]
#eta=eta[i]
#lat=lat[i]
| [
"dmelgar@berkeley.edu"
] | dmelgar@berkeley.edu |
8db418acd2d08188e41e2f04b93c36f5e140c62c | c701dbdd743aa807d772bad99a4d903088282fb3 | /Proj_Centroid_Loss_LeNet/LeNet_plus_centerloss/network.py | 56b351535335b2b388cb9fcf18aa0451c726b7d0 | [] | no_license | Beerkay/deep_learning_notes | 6d5230c95cf67e6330f0d5ff87186515b346d157 | 7f8c7312ddf8ed1e46bf0e6971565b911eb8bc92 | refs/heads/master | 2021-05-04T05:54:42.628731 | 2016-10-13T20:27:55 | 2016-10-13T20:27:55 | 71,084,243 | 5 | 6 | null | 2016-10-16T23:43:51 | 2016-10-16T23:43:51 | null | UTF-8 | Python | false | false | 7,285 | py | import math
import tensorflow as tf
from termcolor import colored as c, cprint
import numpy as np
from tensorflow.examples.tutorials.mnist import input_data
mnist = input_data.read_data_sets("MNIST_data/", one_hot=True)
import helpers
### helper functions
from functools import reduce
def fc_layer(x, weight_shape, bias_shape, layer_name):
with tf.name_scope(layer_name):
# initializing at 0 is no-good.
norm = math.sqrt(float(
reduce(lambda v, e: v * e, weight_shape)
))
weight = tf.Variable(
tf.truncated_normal(weight_shape,
mean=0.5,
stddev=1.0 / norm),
name='weight')
bias = tf.Variable(tf.zeros(bias_shape), name='bias')
activation = tf.matmul(x, weight) + bias
return weight, bias, activation
# main network build stages
def inference():
x = tf.placeholder(tf.float32, shape=[None, 784], name='input')
image = tf.reshape(x, [-1, 28, 28, 1])
with tf.name_scope('conv_layer_1'):
W_conv1 = helpers.weight_variable([5, 5, 1, 32], 'W_conv1')
b_conv1 = helpers.bias_variable([32], 'bias_conv1')
alphas_conv1 = helpers.bias_variable([32], 'alpha_conv1')
layer_conv_1 = helpers.prelu(helpers.conv2d(image, W_conv1) + b_conv1, alphas_conv1)
W_conv1_b = helpers.weight_variable([5, 5, 32, 32], 'W_conv1_b')
b_conv1_b = helpers.bias_variable([32], 'bias_conv1_b')
alphas_conv1_b = helpers.bias_variable([32], 'alpha_conv1_b')
layer_conv_1_b = helpers.prelu(helpers.conv2d(layer_conv_1, W_conv1_b) + b_conv1_b, alphas_conv1_b)
stage_1_pool = helpers.max_pool_2x2(layer_conv_1_b)
with tf.name_scope('conv_layer_2'):
W_conv2 = helpers.weight_variable([5, 5, 32, 64], "W_conv2")
b_conv2 = helpers.bias_variable([64], 'bias_conv2')
alphas_conv2 = helpers.bias_variable([64], 'alpha_conv2')
layer_conv_2 = helpers.prelu(helpers.conv2d(stage_1_pool, W_conv2) + b_conv2, alphas_conv2)
W_conv2_b = helpers.weight_variable([5, 5, 64, 64], "W_conv2_b")
b_conv2_b = helpers.bias_variable([64], 'bias_conv2_b')
alphas_conv2_b = helpers.bias_variable([64], 'alpha_conv2_b')
layer_conv_2_b = helpers.prelu(helpers.conv2d(layer_conv_2, W_conv2_b) + b_conv2_b, alphas_conv2_b)
stage_2_pool = helpers.max_pool_2x2(layer_conv_2_b)
# stage_2_pool_flat = tf.reshape(stage_2_pool, [-1, 7 * 7 * 64])
with tf.name_scope('conv_layer_3'):
W_conv3 = helpers.weight_variable([5, 5, 64, 128], "W_conv3")
b_conv3 = helpers.bias_variable([128], 'bias_conv3')
alphas_conv3 = helpers.bias_variable([128], 'alpha_conv3')
layer_conv_3 = helpers.prelu(helpers.conv2d(stage_2_pool, W_conv3) + b_conv3, alphas_conv3)
# stage_3_pool = helpers.max_pool_2x2(layer_conv_3)
# stage_3_pool_flat = tf.reshape(stage_3_pool, [-1, 4 * 4 * 256])
W_conv3_b = helpers.weight_variable([5, 5, 128, 128], "W_conv3_b")
b_conv3_b = helpers.bias_variable([128], 'bias_conv3_b')
alphas_conv3_b = helpers.bias_variable([128], 'alpha_conv3_b')
layer_conv_3_b = helpers.prelu(helpers.conv2d(layer_conv_3, W_conv3_b) + b_conv3_b, alphas_conv3_b)
stage_3_pool = helpers.max_pool_2x2(layer_conv_3_b)
stage_3_pool_flat = tf.reshape(stage_3_pool, [-1, 4 * 4 * 128])
with tf.name_scope('fc_layer_1'):
W_fc1 = helpers.weight_variable([4 * 4 * 128, 2], "W_fc1")
# W_fc1 = helpers.weight_variable([7 * 7 * 64, 2], "W_fc1")
b_fc1 = helpers.bias_variable([2], 'bias_fc1')
alphas_fc1 = helpers.bias_variable([2], 'alpha_conv3')
output = helpers.prelu(tf.matmul(stage_3_pool_flat, W_fc1) + b_fc1, alphas_fc1)
# with tf.name_scope('fc_output'):
# W_output = helpers.weight_variable([500, 10], "W_putput")
# b_output = helpers.bias_variable([10], 'bias_output')
# output = tf.nn.relu(tf.matmul(h_fc1, W_output) + b_output)
# with tf.name_scope('output'):
# W_output = helpers.weight_variable([2, 10], "W_output")
# b_output = helpers.bias_variable([10])
# output = tf.nn.relu(tf.matmul(h_fc2, W_output) + b_output)
return x, output
def loss(deep_features):
with tf.name_scope('softmax_loss'):
batch_labels = tf.placeholder(tf.float32, name='labels')
W_loss = helpers.weight_variable([2, 10], "W_loss")
bias_loss = tf.Variable(
tf.truncated_normal(shape=[10], stddev=1e-4, mean=1e-1), 'bias_loss')
# Note: we don't use the bias here because it does not affect things. removing the
# bias also makes the analysis simpler.
logits = tf.matmul(deep_features, W_loss) + bias_loss
cross_entropy = - tf.reduce_mean(
tf.mul(batch_labels, tf.nn.log_softmax(logits)),
reduction_indices=[1]
)
xentropy_mean = tf.reduce_mean(cross_entropy, name="xentropy_mean")
tf.scalar_summary(xentropy_mean.op.name, xentropy_mean)
return batch_labels, logits, xentropy_mean
def center_loss(deep_features, labels):
with tf.name_scope('center_loss'):
features_expanded = tf.reshape(deep_features, shape=[-1, 2, 1])
labels_expanded = tf.reshape(labels, shape=[-1, 1, 10])
samples_per_label = tf.reduce_sum(
labels_expanded,
reduction_indices=[0]
)
centroids = \
tf.reduce_sum(
tf.reshape(deep_features, shape=[-1, 2, 1]) * \
labels_expanded,
reduction_indices=[0]
) / samples_per_label
centroids_expanded = tf.reshape(centroids, shape=[1, 2, 10]) * labels_expanded
spread = \
tf.reduce_mean(
tf.reduce_sum(
tf.square(
features_expanded * labels_expanded - centroids_expanded
),
reduction_indices=[1, 2]
)
) / 2.0
tf.scalar_summary(spread.op.name, spread)
return spread, centroids, spread
def training(loss):
learning_rate = tf.placeholder(tf.float32, name='learning_rate')
with tf.name_scope('training'):
global_step = tf.Variable(0, name='global_step', trainable=False)
optimizer = tf.train.GradientDescentOptimizer(learning_rate)
# optimizer = tf.train.AdamOptimizer(learning_rate)
train_op = optimizer.minimize(loss, global_step=global_step)
# optimizer = tf.train.GradientDescentOptimizer(learning_rate)
# grads_and_vars = optimizer.compute_gradients(loss, tf.trainable_variables())
# capped_grads_and_vars = [(tf.clip_by_value(grads, 1e-10, 1e10), vars) for grads, vars in grads_and_vars]
# train_op = optimizer.apply_gradients(capped_grads_and_vars)
return learning_rate, train_op, global_step
def evaluation(logits, labels):
correct = tf.nn.in_top_k(logits, tf.cast(tf.argmax(labels, dimension=1), dtype=tf.int32), 1)
accuracy = tf.reduce_mean(tf.cast(correct, tf.float64), name='accuracy')
tf.scalar_summary(accuracy.op.name, accuracy)
# Return the number of true entries.
return accuracy
| [
"yangge1987@gmail.com"
] | yangge1987@gmail.com |
0692a79c84ee0e748f2693731a6624ae00bcf533 | 41586d36dd07c06860b9808c760e2b0212ed846b | /network/dns/openresolv/actions.py | 67d9d61544066f741f397767005544aa40d70f26 | [] | no_license | SulinOS/SulinRepository | 4d5551861f57bc1f4bec6879dfe28ce68c7c125d | 9686811a1e06080f63199233561a922fe1f78d67 | refs/heads/master | 2021-06-15T21:34:25.039979 | 2021-06-05T13:43:34 | 2021-06-05T13:43:34 | 207,672,864 | 6 | 3 | null | 2019-12-06T08:11:22 | 2019-09-10T22:16:17 | Python | UTF-8 | Python | false | false | 455 | py | # -*- coding: utf-8 -*-
#
# Copyright (C) YEAR, YOUR NAME
# Licensed under the GNU General Public License, version 2.
# See the file http://www.gnu.org/licenses/old-licenses/gpl-2.0.txt
from inary.actionsapi import autotools
from inary.actionsapi import inarytools
from inary.actionsapi import get
def setup():
autotools.configure()
def build():
autotools.make()
def install():
autotools.rawInstall("DESTDIR={}".format(get.installDIR()))
| [
"zaryob.dev@gmail.com"
] | zaryob.dev@gmail.com |
ef210239883c7e07621deacc3740c78187b1cc96 | 897914e8acf6e14f9dc42f43f342e42e38bcf5c5 | /pigeon_app/asgi.py | 4957b9f14d49662efc818c175dfdf0e5f585e8ab | [] | no_license | ShazeRx/pigeon_app | aa1f5694419db723ba39d7eb0ef1a01cdcd22464 | 70d0ba1f019ebd322695a7b322af85554118a51e | refs/heads/master | 2023-08-17T02:01:36.796184 | 2021-06-18T19:52:32 | 2021-06-18T19:52:32 | 412,397,560 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 397 | py | """
ASGI config for pigeon_app project.
It exposes the ASGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/3.2/howto/deployment/asgi/
"""
import os
from django.core.asgi import get_asgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'pigeon_app.settings')
application = get_asgi_application()
| [
"you@example.com"
] | you@example.com |
fdbbf3441857f1b3067480de3b89e390ccfce659 | 0b93015540df1aa52435d4a7d24a6f7ddf69e60f | /libreria.py | 32e5073ac1f6afe6322c9b43441da8fda0017d40 | [] | no_license | smith-sanchez/t10_viilavicencio.carrion | f1c9a54838594c4e0a2aa6c2f16c2bec5576b857 | d761db7e6685a99ef3312aad2817ecf0d23cecfb | refs/heads/master | 2020-12-08T21:04:52.484495 | 2020-01-10T17:33:40 | 2020-01-10T17:33:40 | 233,095,554 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,559 | py | # libreria
# funcion validar talla
def validar_talla(num):
# tiene que ser un entero
if(isinstance(num,int)):
return True
# fin if
else:
return False
# funcion validar rango
def validar_rango(num,ri,rf):
if(validar_talla(num)==True):
if(num>=ri and num<=rf):
return True
# fin_if
else:
return False
else:
return False
# funcion pedir rango
def pedir_rango(msg,ri,rf):
n=-1
while(validar_rango(n,ri,rf)==False):
n=input(msg)
if(n.isdigit()==True):
n=int(n)
# fin_ if
#fin_while
return n
# funcion pedir edad
def pedir_edad(msg,ri,rf):
n=-1
while(validar_rango(n,ri,rf)==False):
n=input(msg)
if(n.isdigit()==True):
n=int(n)
return n
# funcion pedir nombre
def pedir_nombre(msg):
m=""
while(validar_color(m)==False):
m=input(msg)
return m
# funcion validar color
def validar_color(color):
# es una cadena
if(isinstance(color,str)):
# longitud >3
if(len(color)>=3):
return True
else:
return False
else:
return False
# fin_if
# funcion pedir solo dos colores
# solo puede elejir una--> blanco / negro
def pedir_color(msg):
m=""
while(validar_color(m)==False):
m=input(msg)
while (m!="blanco" and m!="negro"):
m = input(msg)
# fin_while
return m
# fumcion pedir colores
# solo se puede ingresar colores seleccionadas segun el programador
def pedir_colores(msg):
m=""
while(validar_color(m)==False):
m=input(msg)
while (m!="blanco" and m!="negro" and m!="amarillo" and m!="rojo" and m!="rosado"
and m != "dorado" and m!="verde" and m!="celeste" and m!="azul"):
m = input(msg)
# fin_while
return m
# funciom validar marca
def validar_marca(sapatilla):
# tiene que ser cadena
if(isinstance(sapatilla,str)):
# su longitud es mayo que dos
if(len(sapatilla)>2):
return True
else:
return False
else:
return False
# fin_if
# fin_def
# funcion pedir marca
def pedir_marca(marca):
m=""
while(validar_marca(m)==False):
m=input(marca)
# fin_while
return m
# fin_def
# funcion pedir postre
# condicion ==> ( gelatina / mazamorra )
def pedir_marcas(marca):
m=""
while(validar_marca(m)==False):
while(m!="gelatina" and m!="mazamorra"):
m=input(marca)
# fin_while
return m
# fin_def
# funcion pedir elejir menu con la condicion
# si solo es ( desayuno / almuerzo / cena )
def pedir_marcas_condic(marca):
m=""
while(validar_marca(m)==False):
while(m!="desayuno" and m!="almuerzo" and m!="cena"):
m=input(marca)
# fin_while
return m
# fin_def
# funcion validar entero
def validar_entero(numero):
# en entero-->int
if(isinstance(numero,int)):
# entero positivo
if(numero>=0):
return True
else:
return False
else:
return False
# funcion pedir entero
def pedir_entero(msg):
n=-1
while(validar_entero(n)==False):
n=input(msg)
if(n.isdigit()==True):
n=int(n)
# fin_if
# fin:while
return n
# funcion validar tamaño
def validar_tamano(tamano):
# tiene queser una cadena
if(isinstance(tamano,str)):
# su longitud >3
if(len(tamano)>3):
return True
else:
return False
else:
return False
def pedir_tamano(msg):
n=""
while(validar_tamano(n)==False):
n=input(msg)
while(n!="grande" and n!="pequeño"):
n=input(msg)
return n
# funcion validar mes
def validar_mes(mes):
# tiene que ser cadena
if(isinstance(mes,str)):
# su longitud nmayor que cuatro
if(len(mes)>4):
return True
else:
return False
else:
return False
def pedir_peli(msg):
n=""
while(validar_mes(n)==False):
# solo se podra ingresar una pelicula ==>
while(n!="star wars" and n!="stand de besos" and n!="naruto" and
n!="dragon ball" and n!="el barco" and n!="la casa de papel"):
n=input(msg)
return n
def pedir_mes(msg):
n=""
while(validar_mes(n)==False):
n=input(msg)
while(n!="enero" and n!="febrero" and n!="marzo" and n!="abril" and n!="mayo" and n!="junio"
and n!="julio" and n!="agosto" and n!="septiembre" and n!="octubre" and n!="noviembre"
and n!="diciembre"):
n=input(msg)
return n
# funcion pedir escuela
def pedir_escuela(msg):
n=""
while(validar_mes(n)==False):
n=input(msg)
while(n!="ing. electronica" and n!="matematica" and n!="ing. informatica" and
n!="estadistica" and n!="fisica" ):
n=input(msg)
return n
def pedir_dia(msg):
n=""
while(validar_mes(n)==False):
n=input(msg)
# tiene que ingresar un dia : lunes/martes/miercoles/jueves/viernes/sabado/dommingo:
while(n!="lunes" and n!="martes" and n!="miercoles" and n!="jueves" and n!="viernes" and
n!="sabado" and n!="domingo" ):
n=input(msg)
return n
def pedir_curso(msg):
n=""
while(validar_mes(n)==False):
n=input(msg)
while(n!="matematica" and n!="programacion" and n!="analisis"):
n=input(msg)
return n
# funcion validar año
def validar_ano(ano):
# tiene que ser un entero
if(isinstance(ano,int)):
# rango entre 1950 y 2020
if(ano>=1950 and ano <=2020):
return True
else:
return False
else:
return False
# funcion pedir año
def pedir_ano(msg):
n=0
while(validar_ano(n)==False):
n=input(msg)
if(n.isdigit()==True):
n=int(n)
return n
# funcion validar telefono
def validar_telefono(telefono):
# sus digitos tienen que ser enteros
if(isinstance(telefono,int)):
# el telefono tiene que tener la forma(925780779)
if(telefono>=900000000 and telefono<=999999999):
return True
else:
return False
else:
return False
# funcion pedir telefono
def pedir_telefono(msg):
n=-1
while(validar_telefono(n)==False):
n=input(msg)
if(n.isdigit()==True):
n=int(n)
return n
# funcion validar dni
def validar_dni(dni):
if(isinstance(dni,int)):
# sus digitos son enteros
# el dni tiene que tener la forma( 74286646 )
if(dni>=10000000 and dni<=99999999):
return True
else:
return False
else:
return False
# funcion pedir dni
def pedir_dni(msg):
n=-1
while(validar_dni(n)==False):
n=input(msg)
if(n.isdigit()==True):
n=int(n)
return n
def guardar_d(nombre_archivo,contenido,modo):
archivo=open(nombre_archivo,modo)
archivo.write(contenido)
archivo.close()
def obtener_datos(nombre_archivos):
archivo = open(nombre_archivos, "r")
datos = archivo.read()
archivo.close()
return datos
| [
"noreply@github.com"
] | smith-sanchez.noreply@github.com |
ee47f2e5c8e8db84a4778c986ab56b7a70348e2f | 7c1df6de8b6eb64f941a189d6015161713efd194 | /weather/forms.py | 7ffd4874167a717459eef52aadb9df4cbddc8336 | [] | no_license | phuclhv/weather_app_django | ad3cbd1d6d46a5bc41cd1c1f57e998e456faaa9f | 01587369538c873ab7d3a3550dc8ca2093ea5236 | refs/heads/master | 2020-09-06T21:36:56.957805 | 2019-11-09T01:13:37 | 2019-11-09T01:13:37 | 220,561,400 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 238 | py | from django.forms import ModelForm, TextInput
from .models import City
class Cityform(ModelForm):
class Meta:
model = City
fields = ['name']
widget = {'name': TextInput(attrs={'class': 'input', 'placeholder': 'City name'})} | [
"you@example.com"
] | you@example.com |
e290a946b03dfac33f19285e2b51bec5e6bd5377 | f719dc32c437a15c0eb7a229adc2848e4646a172 | /billy/tests/functional/test_company.py | 2a6ab678f297833c0c7ecc07b9f20901429996db | [
"MIT",
"BSD-3-Clause",
"BSD-2-Clause",
"LicenseRef-scancode-warranty-disclaimer"
] | permissive | grang5/billy | db3a88b650962f25b8bdea80a81c5efa5d80dec0 | a723c3aca18f817829ae088f469fabc5bea9d538 | refs/heads/master | 2021-04-18T19:36:05.586549 | 2014-06-16T21:47:37 | 2014-06-16T21:47:37 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,232 | py | from __future__ import unicode_literals
import json
import mock
from freezegun import freeze_time
from billy.utils.generic import utc_now
from billy.tests.functional.helper import ViewTestCase
@freeze_time('2013-08-16')
class TestCompanyViews(ViewTestCase):
@mock.patch('billy.tests.fixtures.processor.DummyProcessor.register_callback')
def test_create_company(self, register_callback_method):
processor_key = 'MOCK_PROCESSOR_KEY'
now = utc_now()
now_iso = now.isoformat()
res = self.testapp.post(
'/v1/companies',
dict(processor_key=processor_key),
status=200
)
self.failUnless('processor_key' not in res.json)
self.failUnless('guid' in res.json)
self.failUnless('api_key' in res.json)
self.assertEqual(res.json['created_at'], now_iso)
self.assertEqual(res.json['updated_at'], now_iso)
company = self.company_model.get(res.json['guid'])
expected_url = 'http://localhost/v1/companies/{}/callbacks/{}/'.format(
company.guid, company.callback_key,
)
register_callback_method.assert_called_once_with(company, expected_url)
def test_create_company_with_random_callback_keys(self):
times = 100
callback_keys = set()
for _ in range(times):
res = self.testapp.post(
'/v1/companies',
dict(processor_key='MOCK_PROCESSOR_KEY'),
status=200
)
company = self.company_model.get(res.json['guid'])
callback_keys.add(company.callback_key)
# ensure callback keys won't repeat
self.assertEqual(len(callback_keys), times)
@mock.patch('billy.tests.fixtures.processor.DummyProcessor.callback')
def test_callback(self, callback_method, slash=False):
res = self.testapp.post(
'/v1/companies',
dict(processor_key='MOCK_PROCESSOR_KEY'),
)
guid = res.json['guid']
payload = dict(foo='bar')
company = self.company_model.get(guid)
url = '/v1/companies/{}/callbacks/{}'.format(guid, company.callback_key)
if slash:
url = url + '/'
res = self.testapp.post(
url,
json.dumps(payload),
headers=[(b'content-type', b'application/json')],
)
self.assertEqual(res.json['code'], 'ok')
callback_method.assert_called_once_with(company, payload)
@mock.patch('billy.tests.fixtures.processor.DummyProcessor.callback')
def test_callback_with_slash_ending(self, callback_method):
self.test_callback(slash=True)
def test_create_company_with_bad_parameters(self):
self.testapp.post(
'/v1/companies',
status=400,
)
def test_get_company(self):
processor_key = 'MOCK_PROCESSOR_KEY'
res = self.testapp.post(
'/v1/companies',
dict(processor_key=processor_key),
status=200
)
created_company = res.json
guid = created_company['guid']
api_key = str(created_company['api_key'])
res = self.testapp.get(
'/v1/companies/{}'.format(guid),
extra_environ=dict(REMOTE_USER=api_key),
status=200,
)
self.assertEqual(res.json, created_company)
def test_get_company_with_bad_api_key(self):
processor_key = 'MOCK_PROCESSOR_KEY'
res = self.testapp.post(
'/v1/companies',
dict(processor_key=processor_key),
status=200
)
created_company = res.json
guid = created_company['guid']
self.testapp.get(
'/v1/companies/{}'.format(guid),
extra_environ=dict(REMOTE_USER=b'BAD_API_KEY'),
status=403,
)
self.testapp.get(
'/v1/companies/{}'.format(guid),
status=403,
)
def test_get_non_existing_company(self):
processor_key = 'MOCK_PROCESSOR_KEY'
res = self.testapp.post(
'/v1/companies',
dict(processor_key=processor_key),
status=200
)
api_key = str(res.json['api_key'])
self.testapp.get(
'/v1/companies/NON_EXIST',
extra_environ=dict(REMOTE_USER=api_key),
status=404
)
def test_get_other_company(self):
processor_key = 'MOCK_PROCESSOR_KEY'
res = self.testapp.post(
'/v1/companies',
dict(processor_key=processor_key),
status=200
)
api_key1 = str(res.json['api_key'])
guid1 = res.json['guid']
res = self.testapp.post(
'/v1/companies',
dict(processor_key=processor_key),
status=200
)
api_key2 = str(res.json['api_key'])
guid2 = res.json['guid']
self.testapp.get(
'/v1/companies/{}'.format(guid2),
extra_environ=dict(REMOTE_USER=api_key1),
status=403,
)
self.testapp.get(
'/v1/companies/{}'.format(guid1),
extra_environ=dict(REMOTE_USER=api_key2),
status=403,
)
| [
"bornstub@gmail.com"
] | bornstub@gmail.com |
c7875cc5e8a302db04c8bdc305771a81583e3d0e | bf426f52cf7462ba4b8b583f0fbd3f5585a73491 | /Internet/Web/cgi-bin/tutor3.py | 8f7c848d5df8a49b7532ae06c74fe9e851df00a5 | [] | no_license | CodedQuen/Programming-Python-by-Mark-Lutz | 337b309f8ba98be3ac7585d0de0fc8d7ee4697f5 | 0397b29973ab24d476308b1f4f3c9befb3169a25 | refs/heads/master | 2022-06-09T21:19:01.891651 | 2020-05-01T01:57:13 | 2020-05-01T01:57:13 | 260,358,442 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 465 | py | #!/usr/bin/python
"""
runs on the server, reads form input, prints HTML;
url=http://server-name/cgi-bin/tutor3.py
"""
import cgi
form = cgi.FieldStorage() # parse form data
print('Content-type: text/html') # plus blank line
html = """
<TITLE>tutor3.py</TITLE>
<H1>Greetings</H1>
<HR>
<P>%s</P>
<HR>"""
if not 'user' in form:
print(html % 'Who are you?')
else:
print(html % ('Hello, %s.' % form['user'].value))
| [
"noreply@github.com"
] | CodedQuen.noreply@github.com |
40ecff4fd8323752bb84797d6c98d85d52bd3e40 | ca7aa979e7059467e158830b76673f5b77a0f5a3 | /Python_codes/p02269/s412170301.py | 7790eefd3630490518a1dc914ece731d74e805f4 | [] | no_license | Aasthaengg/IBMdataset | 7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901 | f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8 | refs/heads/main | 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 235 | py | se = set([])
n = int(raw_input())
for i in range(n):
s = raw_input().split()
if s[0] == 'insert':
se.add(s[1])
elif s[0] == 'find':
if s[1] in se:
print 'yes'
else:
print 'no' | [
"66529651+Aastha2104@users.noreply.github.com"
] | 66529651+Aastha2104@users.noreply.github.com |
ae2861b0b2ff85c3be0f336934ba4427b074f31d | 711a99404fe7e540f2c23f3b28b894921ec8a679 | /System_Test/Test_scripts/Click_on_Clusters.py | 6ab19409f34b459f3d2df36c0c7523d4c9ca09ac | [] | no_license | chetandg123/cQube_Testing | 14a8683799f65b1ad45ff768efb7101cb0be6389 | f4fa01111e7958740c73b3bea6dc54b2241c83d9 | refs/heads/master | 2022-10-02T06:21:38.563225 | 2020-06-04T11:42:18 | 2020-06-04T11:42:18 | 268,982,907 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 901 | py | import time
import unittest
from selenium import webdriver
from Data.parameters import Data
from TS.reuse_func import cqube
from get_dir import pwd
class test_cluster(unittest.TestCase):
def setUp(self):
driver_path = pwd()
self.driver = webdriver.Chrome(executable_path=driver_path.get_driver_path())
driver = cqube(self.driver)
driver.open_cqube_appln()
driver = cqube(self.driver)
driver.login_cqube()
driver.navigate_to_student_report()
def test_url(self):
time.sleep(5)
# self.driver.find_element_by_xpath(Data.Clusters).click()
# time.sleep(15)
dots = self.driver.find_elements_by_xpath(Data.dots)
count = len(dots)
self.assertEqual(0,count,msg="Failed ")
def tearDown(self):
time.sleep(5)
self.driver.close()
if __name__ == "__main__":
unittest.main() | [
"chetan.goudar@tibilsolutions.com"
] | chetan.goudar@tibilsolutions.com |
328911b18176b0e78a1e7b37cb8d91b1fe1a12fd | ca7aa979e7059467e158830b76673f5b77a0f5a3 | /Python_codes/p03496/s065615411.py | 36ffdb7558e56bc715947a4d7a8023899a98c2e3 | [] | no_license | Aasthaengg/IBMdataset | 7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901 | f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8 | refs/heads/main | 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 387 | py | from operator import itemgetter
N = int(input())
A = list(map(int, input().split()))
i = max(enumerate(map(abs, A)), key=itemgetter(1))[0]
print(2*N)
if A[i]<0:
print(i+1, N)
print(i+1, N)
for j in range(N, 1, -1) :
print(j, j-1)
print(j, j-1)
else :
print(i+1, 1)
print(i+1, 1)
for j in range(1, N) :
print(j, j+1)
print(j, j+1) | [
"66529651+Aastha2104@users.noreply.github.com"
] | 66529651+Aastha2104@users.noreply.github.com |
d18f189a6e54f4324082f1387a36081bbc37aee3 | 5b71e2952f34dd3bb20148874d952fee06d31857 | /app/mf/crud/migrations/0114_auto_20210213_1157.py | a34fbcd3016d1fb31dcd178b570e5cf365728f97 | [] | no_license | isela1998/facebook | a937917cddb9ef043dd6014efc44d59d034102b1 | a0f2f146eb602b45c951995a5cb44409426250c5 | refs/heads/master | 2023-07-18T02:14:50.293774 | 2021-08-28T03:26:06 | 2021-08-28T03:26:06 | 400,613,743 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 663 | py | # Generated by Django 3.1.1 on 2021-02-13 16:27
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('crud', '0113_auto_20210213_1006'),
]
operations = [
migrations.AddField(
model_name='deliveryorder',
name='date_end',
field=models.DateField(default='2021-02-13', max_length=10, verbose_name='Fecha|Vencimiento'),
),
migrations.AlterField(
model_name='deliveryorder',
name='datejoined',
field=models.DateField(default='2021-02-13', max_length=10, verbose_name='Fecha|Traslado'),
),
]
| [
"infantefernandezisela@gmail.com"
] | infantefernandezisela@gmail.com |
200c2e434c8baba1cca508ed6aeeac33b6aa710d | b0f1acbe5cd30c2ade801465924c12403ab7e585 | /Corda_Api_Library/openapi_client/model/net_corda_core_contracts_state_ref.py | 190e98ef92804385fb048410022c9a0d8a6a81e0 | [] | no_license | TanzimAzadNishan/Blockchain-Based-Online-Ticketing-Platform | 94ea0f06a7761f9033f7a1dc61548ade6f6ff499 | d04a2696cab4c41743c7c5999c623002d0e57f80 | refs/heads/main | 2023-03-09T14:34:27.148340 | 2021-02-24T11:49:26 | 2021-02-24T11:49:26 | 338,845,282 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,711 | py | """
No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501
The version of the OpenAPI document: 1.0.0
Generated by: https://openapi-generator.tech
"""
import re # noqa: F401
import sys # noqa: F401
from openapi_client.model_utils import ( # noqa: F401
ApiTypeError,
ModelComposed,
ModelNormal,
ModelSimple,
cached_property,
change_keys_js_to_python,
convert_js_args_to_python_args,
date,
datetime,
file_type,
none_type,
validate_get_composed_info,
)
class NetCordaCoreContractsStateRef(ModelNormal):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
Attributes:
allowed_values (dict): The key is the tuple path to the attribute
and the for var_name this is (var_name,). The value is a dict
with a capitalized key describing the allowed value and an allowed
value. These dicts store the allowed enum values.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
discriminator_value_class_map (dict): A dict to go from the discriminator
variable value to the discriminator class name.
validations (dict): The key is the tuple path to the attribute
and the for var_name this is (var_name,). The value is a dict
that stores validations for max_length, min_length, max_items,
min_items, exclusive_maximum, inclusive_maximum, exclusive_minimum,
inclusive_minimum, and regex.
additional_properties_type (tuple): A tuple of classes accepted
as additional properties values.
"""
allowed_values = {
}
validations = {
}
additional_properties_type = None
_nullable = False
@cached_property
def openapi_types():
"""
This must be a method because a model may have properties that are
of type self, this must run after the class is loaded
Returns
openapi_types (dict): The key is attribute name
and the value is attribute type.
"""
return {
'txhash': (str,), # noqa: E501
'index': (int,), # noqa: E501
}
@cached_property
def discriminator():
return None
attribute_map = {
'txhash': 'txhash', # noqa: E501
'index': 'index', # noqa: E501
}
_composed_schemas = {}
required_properties = set([
'_data_store',
'_check_type',
'_spec_property_naming',
'_path_to_item',
'_configuration',
'_visited_composed_classes',
])
@convert_js_args_to_python_args
def __init__(self, txhash, index, *args, **kwargs): # noqa: E501
"""NetCordaCoreContractsStateRef - a model defined in OpenAPI
Args:
txhash (str): Base 58 Encoded Secure Hash
index (int):
Keyword Args:
_check_type (bool): if True, values for parameters in openapi_types
will be type checked and a TypeError will be
raised if the wrong type is input.
Defaults to True
_path_to_item (tuple/list): This is a list of keys or values to
drill down to the model in received_data
when deserializing a response
_spec_property_naming (bool): True if the variable names in the input data
are serialized names, as specified in the OpenAPI document.
False if the variable names in the input data
are pythonic names, e.g. snake case (default)
_configuration (Configuration): the instance to use when
deserializing a file_type parameter.
If passed, type conversion is attempted
If omitted no type conversion is done.
_visited_composed_classes (tuple): This stores a tuple of
classes that we have traveled through so that
if we see that class again we will not use its
discriminator again.
When traveling through a discriminator, the
composed schema that is
is traveled through is added to this set.
For example if Animal has a discriminator
petType and we pass in "Dog", and the class Dog
allOf includes Animal, we move through Animal
once using the discriminator, and pick Dog.
Then in Dog, we will make an instance of the
Animal class but this time we won't travel
through its discriminator because we passed in
_visited_composed_classes = (Animal,)
"""
_check_type = kwargs.pop('_check_type', True)
_spec_property_naming = kwargs.pop('_spec_property_naming', False)
_path_to_item = kwargs.pop('_path_to_item', ())
_configuration = kwargs.pop('_configuration', None)
_visited_composed_classes = kwargs.pop('_visited_composed_classes', ())
if args:
raise ApiTypeError(
"Invalid positional arguments=%s passed to %s. Remove those invalid positional arguments." % (
args,
self.__class__.__name__,
),
path_to_item=_path_to_item,
valid_classes=(self.__class__,),
)
self._data_store = {}
self._check_type = _check_type
self._spec_property_naming = _spec_property_naming
self._path_to_item = _path_to_item
self._configuration = _configuration
self._visited_composed_classes = _visited_composed_classes + (self.__class__,)
self.txhash = txhash
self.index = index
for var_name, var_value in kwargs.items():
if var_name not in self.attribute_map and \
self._configuration is not None and \
self._configuration.discard_unknown_keys and \
self.additional_properties_type is None:
# discard variable.
continue
setattr(self, var_name, var_value)
| [
"64527153+TanzimAzadNishan@users.noreply.github.com"
] | 64527153+TanzimAzadNishan@users.noreply.github.com |
8ba3542efe1c21d14f7e692d5f0fd03380f2f97c | 4bb1a23a62bf6dc83a107d4da8daefd9b383fc99 | /contests/agc032/b.py | c8b8c5aeb0753d30b8b877cc4b14c6817e83b4a7 | [] | no_license | takushi-m/atcoder-work | 0aeea397c85173318497e08cb849efd459a9f6b6 | f6769f0be9c085bde88129a1e9205fb817bb556a | refs/heads/master | 2021-09-24T16:52:58.752112 | 2021-09-11T14:17:10 | 2021-09-11T14:17:10 | 144,509,843 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 530 | py | # -*- coding: utf-8 -*-
from itertools import permutations
n = int(input())
res = []
for r in permutations(range(1,n+1)):
c = r[0]+r[2]
flag = True
for i in range(1,n-1):
if r[i-1]+r[i+1]!=c:
flag = False
break
if r[1]==r[n-2]==c:
pass
elif r[1]+r[n-1]==c:
res.append((r[1],r[n-1]))
else:
flag = False
if flag:
for i in range(n-1):
res.append((r[i],r[i+1]))
break
print(len(res))
for r in res:
print(r[0],r[1])
| [
"takushi-m@users.noreply.github.com"
] | takushi-m@users.noreply.github.com |
8517363ceefdfaab31040b419d1493f2f053b671 | 1c58aef845d5dc1398249d784450c3825a1a75a5 | /LeetCode/Easy/Strings/383_ransome_note.py | 38aeefec3e3d6f05551f4931a49b5b9643b88e90 | [] | no_license | AmitKulkarni23/Leet_HackerRank | b1c1d7e5915397fd971d777baf75bb0f6fd27c78 | 047b167311d2fb93a53998a20d73533a4cae2ab8 | refs/heads/master | 2021-06-01T20:24:40.659530 | 2020-02-06T22:10:14 | 2020-02-06T22:10:14 | 123,007,444 | 0 | 0 | null | 2018-07-12T18:42:40 | 2018-02-26T17:58:28 | Python | UTF-8 | Python | false | false | 1,159 | py | # Given an arbitrary ransom note string and another string containing letters from all the magazines,
# write a function that will return true if the ransom note can be constructed from the magazines ; otherwise, it will return false.
#
# Each letter in the magazine string can only be used once in your ransom note.
#
# Note:
# You may assume that both strings contain only lowercase letters.
#
# canConstruct("a", "b") -> false
# canConstruct("aa", "ab") -> false
# canConstruct("aa", "aab") -> true
def canConstruct(ransomNote, magazine):
"""
:type ransomNote: str
:type magazine: str
:rtype: bool
"""
mag = list(magazine)
for ch in ransomNote:
if ch in mag:
mag.remove(ch)
else:
return False
return True
def best_leetcode_sol(ransomNote, magazine):
"""
:type ransomNote: str
:type magazine: str
:rtype: bool
"""
for i in set(ransomNote):
if ransomNote.count(i) > magazine.count(i):
return False
return True
# Examples:
print(best_leetcode_sol("a", "b"))
print(best_leetcode_sol("aa", "ab"))
print(best_leetcode_sol("aa", "aab"))
| [
"amitrkulkarni232@gmail.com"
] | amitrkulkarni232@gmail.com |
8907112478d9caeabd453553b72d4e927ce3d745 | 31780af7a5558523def1aae5f25df3e0b084be9b | /Ex67.py | ffc1f975db4f03a3ed514384bdd5a1865506d385 | [] | no_license | sevilaybayatli/PYTHS19 | 1796615ff939f2e98ce657feeaa3efd47a2e66c6 | ae0607e215a0d8205475d124c0362c39881e5eda | refs/heads/master | 2020-07-23T16:12:17.922548 | 2020-03-23T22:03:00 | 2020-03-23T22:03:00 | 207,624,169 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,164 | py | import sys
import math
import matplotlib.pyplot as plt
##def histogram(items):
# for n in items:
# output=''
# times=n
# while times>0:
# output+='@'
# times-=1
# print(output)
#histogram([9,2,4,5,3])
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
#Write a Python program to concatenate all elements in a list into a string and return it
#def concatenate(lisst):
# stringg=''
# for n in lisst:
# stringg+=n
#print(stringg)
#ll=input("enter the list of letters: ")
#lisst=ll.split(",")
#concatenate(lisst)
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
#Write a Python program to print all even numbers from a given numbers list in the same order and stop the printing if any numbers that come after 237 in the sequence.
#numbers = [
# 386, 462, 47, 418, 907, 344, 236, 375, 823, 566, 597, 978, 328, 615, 953, 345,
# 399, 162, 758, 219, 918, 237, 412, 566, 826, 248, 866, 950, 626, 949, 687, 217,
# 815, 67, 104, 58, 512, 24, 892, 894, 767, 553, 81, 379, 843, 831, 445, 742, 717,
# 958,743, 527
# ]
#def printing(numbers):
# for n in numbers:
# if n==237:
# print(n)
# break;
# elif (n%2==0):
# print(n)
#numbers = [386, 462, 47, 418, 907, 344, 236, 375, 823, 566, 597, 978, 328, 615, 953, 345, 399, 162, 758, 219, 918, 237, 412, 566, 826, 248, 866, 950, 626, 949, 687, 217, 815, 67, 104, 58, 512, 24, 892, 894, 767, 553, 81, 379, 843, 831, 445, 742, 717, 958,743, 527]
#printing(numbers)
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# Write a Python program to print out a set containing all the colors from color_list_1 which are not present in color_list_2.
#def colorlist(tset):
#color_list_1 = set(["White", "Black", "Red"])
#color_list_2 = set(["Red", "Green"])
# for n in color_list_1:
# if n not in color_list_2:
# tset.append(n)
#tset=set()
#print(color_list_1.difference(color_list_2))
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
#Write a Python program to compute the greatest common divisor (GCD) of two positive integers.
#def gcd(x, y):
# gcd = 1
# if x % y == 0:
# return y
# for k in range(int(y / 2), 0, -1):
# if x % k == 0 and y % k == 0:
# gcd = k
# break
# return gcd
#print(gcd(12, 17))
#print(gcd(4, 6))
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
#Write a Python program to get the least common multiple (LCM) of two positive integers.
#def lcd(x,y):
# if x>y:
# z=x
# else:
# z=y
# while(True):
# if (z%x==0 and z%y==0):
# lcd=z
# return lcd
#z+=1
#print(lcd(4,6))
#print(lcd(15,17))
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
#Write a Python program to sum of three given integers. However, if two values are equal sum will be zero
#x=int(input("enter a n1"))
#y=int(input("enter a n2"))
#z=int(input("enter a n3"))
#k=0
#if (x==y or x==z or y==z):
# k=x+y+z
# k=0
# print(k)
#else:
# print(x+y+z)
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
#Write a Python program to sum of two given integers. However, if the sum is between 15 to 20 it will return 20.
#def summ(x,y):
# z=x+y
# if z in range (15,20):
# sum=20
# return sum
# else:
# return z
#x=int(input("enter a number: "))
#y=int(input("enter a number: "))
#print(summ(x,y))
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
#Write a Python program that will return true if the two given integer values are equal or their sum or difference is 5
def values(x,y):
z=x+y
k=x-y
if (x==y or z==5 or k==5):
return True
x=int(input("enter a number: "))
y=int(input("enter a number: "))
print(values(x,y))
| [
"sevilaybayatli@gmail.com"
] | sevilaybayatli@gmail.com |
896ef150b0486cb58818cfdfc7767977826fb19d | 435360144d233c85ea23e50e05ee6edbfb4e4dd2 | /python网络数据采集/图像识别与文字处理/clean-image.py | 6308ecf105a1f49e07a86cb81607bf69597f5bb4 | [] | no_license | liazylee/python | 5d56e1a60b6472660c30a3e7548da873f5ee3ebd | f90b5d18ad8375b2b0c951fa68e7e0041b016cc6 | refs/heads/master | 2020-04-03T18:24:39.434080 | 2018-10-30T13:00:27 | 2018-10-30T13:00:27 | 155,482,955 | 1 | 0 | null | 2018-10-31T01:56:47 | 2018-10-31T01:56:47 | null | UTF-8 | Python | false | false | 715 | py | '''
利用 Pillow 库,我们可以创建一个 阈值过滤器来去掉渐变的背景色,只把文字留下来,从而让图片更加清晰
'''
from PIL import Image
import subprocess
def cleanFile(filePath, newFilePath):
image = Image.open(filePath)
#Set a threshold value for the image, and save
image = image.point(lambda x: 0 if x<143 else 255)
image.save(newFilePath)
#子进程调用tesseract Tesseract 最大的缺点是对渐变背景色的处理
subprocess.call(["tesseract", newFilePath, "test"])
#Open and read the resulting data file
outputFile = open("test.txt", 'r')
print(outputFile.read())
outputFile.close()
cleanFile("text.png", "text_clean.png") | [
"superonesfazai@gmail.com"
] | superonesfazai@gmail.com |
85952d0e2cac31f79d9c8daae2b41a3d0e24e218 | 194313096f9b7a520a3ce21a5778b4b49b384932 | /src/idleobject.py | 0ce1c6e0e416fd8563c7e49f71c2dc5997fb01c5 | [] | no_license | atareao/imagedownloader | 4af9fe1f78c09000c844dcfebd4a8dfdd434876e | d5f6c8b5240d606626dead2bc00ad29d953f335d | refs/heads/master | 2021-01-17T06:49:29.134980 | 2016-07-08T18:46:41 | 2016-07-08T18:46:41 | 51,201,959 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,193 | py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# This file is part of PushBullet-Commons
#
# Copyright (C) 2014-2016
# Lorenzo Carbonell Cerezo <lorenzo.carbonell.cerezo@gmail.com>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from gi.repository import GObject
from gi.repository import GLib
class IdleObject(GObject.GObject):
"""
Override GObject.GObject to always emit signals in the main thread
by emmitting on an idle handler
"""
def __init__(self):
GObject.GObject.__init__(self)
def emit(self, *args):
GLib.idle_add(GObject.GObject.emit, self, *args)
| [
"lorenzo.carbonell.cerezo@gmail.com"
] | lorenzo.carbonell.cerezo@gmail.com |
39001bd1a26f2714673d019cbd303670f8a82b2f | f03bd5bd7873c5cc33b4ef5199f219539f3a340e | /CAAPR/CAAPR_AstroMagic/PTS/pts/do/core/plotprogress.py | 30874beb519285c495bb670c954f6c5d3f02651c | [
"GPL-1.0-or-later",
"AGPL-3.0-only",
"AGPL-3.0-or-later",
"LicenseRef-scancode-other-copyleft",
"LicenseRef-scancode-philippe-de-muyter",
"MIT"
] | permissive | Stargrazer82301/CAAPR | 5f8a7033b16792f23abd5d07021b53b9228a5db4 | 62b2339beb2eb956565e1605d44d92f934361ad7 | refs/heads/master | 2022-08-29T02:53:33.658022 | 2022-08-05T19:06:46 | 2022-08-05T19:06:46 | 49,977,601 | 8 | 1 | MIT | 2022-08-05T19:06:47 | 2016-01-19T19:32:42 | Python | UTF-8 | Python | false | false | 3,694 | py | #!/usr/bin/env python
# -*- coding: utf8 -*-
# *****************************************************************
# ** PTS -- Python Toolkit for working with SKIRT **
# ** © Astronomical Observatory, Ghent University **
# *****************************************************************
## \package pts.do.core.plotprogress Plot progress for the various phases of a SKIRT simulation.
#
# This script plots the progress in function of time for certain phases of a SKIRT simulation, based on the log
# messages. A seperate PDF plot is created for each of the following phases, if present in the simulation:
# - shooting photons for stellar emission ("prefix_progress_stellar_photons.pdf");
# - calculating dust emission spectra ("prefix_progress_dust_spectra.pdf");
# - shooting photons for dust emission ("prefix_progress_dust_photons.pdf");
#
# The dust self-absorption phase, if present, is ignored in the current implementation of the script.
#
# For multi-process (MPI) simulations with verbose logging (i.e. with a separate log file per process),
# the progress for all processes is displayed on the same plot.
#
# The script expects the complete output of a SKIRT simulation to be present (including log file etc.).
# If there are no arguments, the script processes all simulation output sets residing in the current directory.
# If the first argument contains a slash, the script processes all simulation output sets in the indicated directory.
# If the first argument does not contain a slash, the script processes just the simulation in the current directory
# with the indicated prefix.
#
# -----------------------------------------------------------------
# Ensure Python 3 compatibility
from __future__ import absolute_import, division, print_function
# Import the relevant PTS classes and modules
from pts.core.simulation.simulation import createsimulations
from pts.core.extract.progress import ProgressExtractor, ProgressTable
from pts.core.plot.progress import ProgressPlotter
from pts.core.tools import filesystem as fs
from pts.core.basics.configuration import ConfigurationDefinition, ConfigurationReader
# -----------------------------------------------------------------
# Create the configuration definition
definition = ConfigurationDefinition()
# Add flags
definition.add_flag("table", "save the extracted progress table")
# Get configuration
reader = ConfigurationReader("plotprogress")
config = reader.read(definition)
# -----------------------------------------------------------------
# Look for a file in the current working directory that contains extracted progress information
progress_table_path = fs.join(fs.cwd(), "progress.dat")
if fs.is_file(progress_table_path): table = ProgressTable.from_file(progress_table_path)
# If extracted progress information is not present, first perform the extraction
else:
# Create a SkirtSimulation object based on a log file present in the current working directory
simulation = createsimulations(single=True)
# Create a new ProgressExtractor instance
extractor = ProgressExtractor()
# Run the extractor and get the table
table = extractor.run(simulation)
# -----------------------------------------------------------------
if config.table and not fs.is_file(progress_table_path): table.saveto(progress_table_path)
# -----------------------------------------------------------------
# Determine the path to the plotting directory
plot_path = fs.join(fs.cwd())
# Create a ProgressPlotter instance
plotter = ProgressPlotter()
# Run the progress plotter
plotter.run(table, plot_path)
# -----------------------------------------------------------------
| [
"cjrc88@gmail.com"
] | cjrc88@gmail.com |
6dd21581be8808e6af84d54ea4876e29955d7868 | 97e557d328b89adbd1459f8988a12ec3a9f4adc7 | /trino/datadog_checks/trino/config_models/defaults.py | dd72d075a339c504cf5a54565c63a0a9ed0e45d6 | [
"BSD-3-Clause",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | DataDog/integrations-extras | 1b9c9928af4e5a146b9611aed29db206f420710c | 1d20f8d945ef802fa6e01b41b6ba99148ac508a4 | refs/heads/master | 2023-08-31T04:01:33.555722 | 2023-08-30T18:51:09 | 2023-08-30T18:51:09 | 51,574,196 | 221 | 667 | BSD-3-Clause | 2023-09-14T16:07:56 | 2016-02-12T07:55:28 | Python | UTF-8 | Python | false | false | 2,456 | py | # This file is autogenerated.
# To change this file you should edit assets/configuration/spec.yaml and then run the following commands:
# ddev -x validate config -s <INTEGRATION_NAME>
# ddev -x validate models -s <INTEGRATION_NAME>
from datadog_checks.base.utils.models.fields import get_default_field_value
def shared_collect_default_metrics(field, value):
return False
def shared_conf(field, value):
return get_default_field_value(field, value)
def shared_new_gc_metrics(field, value):
return False
def shared_service(field, value):
return get_default_field_value(field, value)
def shared_service_check_prefix(field, value):
return get_default_field_value(field, value)
def instance_collect_default_jvm_metrics(field, value):
return True
def instance_empty_default_hostname(field, value):
return False
def instance_is_jmx(field, value):
return False
def instance_java_bin_path(field, value):
return get_default_field_value(field, value)
def instance_java_options(field, value):
return get_default_field_value(field, value)
def instance_jmx_url(field, value):
return get_default_field_value(field, value)
def instance_key_store_password(field, value):
return get_default_field_value(field, value)
def instance_key_store_path(field, value):
return get_default_field_value(field, value)
def instance_min_collection_interval(field, value):
return 15
def instance_name(field, value):
return get_default_field_value(field, value)
def instance_password(field, value):
return get_default_field_value(field, value)
def instance_process_name_regex(field, value):
return get_default_field_value(field, value)
def instance_rmi_client_timeout(field, value):
return 15000
def instance_rmi_connection_timeout(field, value):
return 20000
def instance_rmi_registry_ssl(field, value):
return False
def instance_service(field, value):
return get_default_field_value(field, value)
def instance_tags(field, value):
return get_default_field_value(field, value)
def instance_tools_jar_path(field, value):
return get_default_field_value(field, value)
def instance_trust_store_password(field, value):
return get_default_field_value(field, value)
def instance_trust_store_path(field, value):
return get_default_field_value(field, value)
def instance_user(field, value):
return get_default_field_value(field, value)
| [
"noreply@github.com"
] | DataDog.noreply@github.com |
223af6cc25b1fbb1b1897dd9fc56907beca41a5f | 1d75146a66245dc046dc216bb602129208e00733 | /closed/Intel/code/rnnt/pytorch-cpu/pytorch/model_separable_rnnt.py | f0ef252130cdddba876ef1e6e96538b72431c578 | [
"Apache-2.0",
"LicenseRef-scancode-unknown-license-reference",
"MIT"
] | permissive | georgelyuan/inference_results_v1.1 | febf287bd5967bf7f087355a81f06a2bd298cbfe | 3196a5587887c39203ee3ac246fa5dbe789d9085 | refs/heads/main | 2023-08-16T08:49:45.274284 | 2021-09-23T20:57:17 | 2021-09-23T20:57:17 | 409,773,141 | 0 | 0 | NOASSERTION | 2021-09-23T23:36:37 | 2021-09-23T23:36:37 | null | UTF-8 | Python | false | false | 7,227 | py | from typing import Optional, Tuple
import numpy as np
import torch
from rnn import rnn
from rnn import StackTime
class RNNT(torch.nn.Module):
def __init__(self, rnnt=None, num_classes=1, **kwargs):
super().__init__()
if kwargs.get("no_featurizer", False):
in_features = kwargs.get("in_features")
else:
feat_config = kwargs.get("feature_config")
# This may be useful in the future, for MLPerf
# configuration.
in_features = feat_config['features'] * \
feat_config.get("frame_splicing", 1)
self.encoder = Encoder(in_features,
rnnt["encoder_n_hidden"],
rnnt["encoder_pre_rnn_layers"],
rnnt["encoder_post_rnn_layers"],
rnnt["forget_gate_bias"],
None if "norm" not in rnnt else rnnt["norm"],
rnnt["rnn_type"],
rnnt["encoder_stack_time_factor"],
rnnt["dropout"],
)
self.prediction = Prediction(
num_classes,
rnnt["pred_n_hidden"],
rnnt["pred_rnn_layers"],
rnnt["forget_gate_bias"],
None if "norm" not in rnnt else rnnt["norm"],
rnnt["rnn_type"],
rnnt["dropout"],
-1, #_SOS
)
self.joint = Joint(
num_classes,
rnnt["pred_n_hidden"],
rnnt["encoder_n_hidden"],
rnnt["joint_n_hidden"],
rnnt["dropout"],
)
def forward(self, x_padded: torch.Tensor, x_lens: torch.Tensor) -> Tuple[torch.Tensor, torch.Tensor]:
return self.encoder(x_padded, x_lens)
class Encoder(torch.nn.Module):
def __init__(self, in_features, encoder_n_hidden,
encoder_pre_rnn_layers, encoder_post_rnn_layers,
forget_gate_bias, norm, rnn_type, encoder_stack_time_factor,
dropout):
super().__init__()
self.pre_rnn = rnn(
rnn=rnn_type,
input_size=in_features,
hidden_size=encoder_n_hidden,
num_layers=encoder_pre_rnn_layers,
norm=norm,
forget_gate_bias=forget_gate_bias,
dropout=dropout,
)
self.stack_time = StackTime(factor=encoder_stack_time_factor)
self.post_rnn = rnn(
rnn=rnn_type,
input_size=encoder_stack_time_factor * encoder_n_hidden,
hidden_size=encoder_n_hidden,
num_layers=encoder_post_rnn_layers,
norm=norm,
forget_gate_bias=forget_gate_bias,
norm_first_rnn=True,
dropout=dropout,
)
def forward(self, x_padded: torch.Tensor, x_lens: torch.Tensor) -> Tuple[torch.Tensor, torch.Tensor]:
x_padded, _ = self.pre_rnn(x_padded, None)
x_padded, x_lens = self.stack_time(x_padded, x_lens)
# (T, B, H)
x_padded, _ = self.post_rnn(x_padded, None)
# (B, T, H)
x_padded = x_padded.transpose_(0, 1)
return x_padded, x_lens
class Prediction(torch.nn.Module):
def __init__(self, vocab_size, n_hidden, pred_rnn_layers,
forget_gate_bias, norm, rnn_type, dropout, sos_val):
super().__init__()
self.embed = torch.nn.Embedding(vocab_size - 1, n_hidden)
self.n_hidden = n_hidden
self.dec_rnn = rnn(
rnn=rnn_type,
input_size=n_hidden,
hidden_size=n_hidden,
num_layers=pred_rnn_layers,
norm=norm,
forget_gate_bias=forget_gate_bias,
dropout=dropout,
)
self._SOS = sos_val
def forward(self, y: torch.Tensor,
state: Optional[Tuple[torch.Tensor, torch.Tensor]] = None,
b: int = 1) -> Tuple[torch.Tensor, Tuple[torch.Tensor, torch.Tensor]]:
"""
B - batch size
U - label length
H - Hidden dimension size
L - Number of decoder layers = 2
Args:
y: (B, U)
Returns:
Tuple (g, hid) where:
g: (B, U + 1, H)
hid: (h, c) where h is the final sequence hidden state and c is
the final cell state:
h (tensor), shape (L, B, H)
c (tensor), shape (L, B, H)
"""
# SOS hack, there is no SOS, and SOS should as if embedding give 0.0
# So identify SOS and fill lookup result with 0.0
# If embedding table contains SOS token this would save a lot of
# trouble
y_mask = y.eq(self._SOS)
y.masked_fill_(y_mask, 0)
y = self.embed(y)
y.masked_fill_(y_mask.unsqueeze(2), 0.0)
# if state is None:
# batch = y.size(0)
# state = [
# (torch.zeros(batch, self.pred_n_hidden, dtype=y.dtype, device=y.device),
# torch.zeros(batch, self.pred_n_hidden, dtype=y.dtype, device=y.device))
# for _ in range(self.pred_rnn_layers)
# ]
y = y.transpose_(0, 1) # .contiguous() # (U + 1, B, H)
g, hid = self.dec_rnn(y, state)
g = g.transpose_(0, 1) # .contiguous() # (B, U + 1, H)
# del y, state
return g, hid
class Joint(torch.nn.Module):
def __init__(self, vocab_size, pred_n_hidden, enc_n_hidden,
joint_n_hidden, dropout):
super().__init__()
layers = [
torch.nn.Linear(pred_n_hidden + enc_n_hidden, joint_n_hidden),
torch.nn.ReLU(),
] + ([torch.nn.Dropout(p=dropout), ] if dropout else []) + [
torch.nn.Linear(joint_n_hidden, vocab_size)
]
self.net = torch.nn.Sequential(
*layers
)
def forward(self, f: torch.Tensor, g: torch.Tensor):
"""
f should be shape (B, T, H)
g should be shape (B, U + 1, H)
returns:
logits of shape (B, T, U, K + 1)
"""
# Combine the input states and the output states
B, T, H = f.shape
B, U_, H2 = g.shape
f = f.unsqueeze(dim=2) # (B, T, 1, H)
f = f.expand((B, T, U_, H))
g = g.unsqueeze(dim=1) # (B, 1, U + 1, H)
g = g.expand((B, T, U_, H2))
inp = torch.cat([f, g], dim=3) # (B, T, U, 2H)
res = self.net(inp)
# del f, g, inp
return res
def label_collate(labels):
"""Collates the label inputs for the rnn-t prediction network.
If `labels` is already in torch.Tensor form this is a no-op.
Args:
labels: A torch.Tensor List of label indexes or a torch.Tensor.
Returns:
A padded torch.Tensor of shape (batch, max_seq_len).
"""
if isinstance(labels, torch.Tensor):
return labels.type(torch.int64)
if not isinstance(labels, (list, tuple)):
raise ValueError(
f"`labels` should be a list or tensor not {type(labels)}"
)
batch_size = len(labels)
max_len = max(len(l) for l in labels)
cat_labels = np.full((batch_size, max_len), fill_value=0.0, dtype=np.int32)
for e, l in enumerate(labels):
cat_labels[e, :len(l)] = l
labels = torch.LongTensor(cat_labels)
return labels
| [
"tjablin@google.com"
] | tjablin@google.com |
215d4e5fe25b13aa2b68168297d79b1ec68bbcc4 | 95b29e6ce83320a8e2368efd104a5db1f2af697e | /vel/openai/baselines/common/vec_env/subproc_vec_env.py | ebe55e4a848cab67dc37944e6dd69bc5c04c0a6c | [
"MIT"
] | permissive | tigerwlin/vel | 9b237c0a2ebb0fc6285db13e404c596907eb9107 | 00e4fbb7b612e888e2cbb5d8455146664638cd0b | refs/heads/master | 2020-04-17T07:06:58.759152 | 2019-04-18T01:43:57 | 2019-04-18T01:43:57 | 166,354,546 | 0 | 0 | MIT | 2019-05-01T11:17:59 | 2019-01-18T06:20:18 | Python | UTF-8 | Python | false | false | 3,363 | py | import numpy as np
from multiprocessing import Process, Pipe
from vel.openai.baselines.common.vec_env import VecEnv, CloudpickleWrapper
from vel.openai.baselines.common.tile_images import tile_images
def worker(remote, parent_remote, env_fn_wrapper):
parent_remote.close()
env = env_fn_wrapper.x()
while True:
cmd, data = remote.recv()
if cmd == 'step':
ob, reward, done, info = env.step(data)
if done:
ob = env.reset()
remote.send((ob, reward, done, info))
elif cmd == 'reset':
ob = env.reset()
remote.send(ob)
elif cmd == 'render':
remote.send(env.render(mode='rgb_array'))
elif cmd == 'close':
remote.close()
break
elif cmd == 'get_spaces':
remote.send((env.observation_space, env.action_space))
else:
raise NotImplementedError
class SubprocVecEnv(VecEnv):
def __init__(self, env_fns, spaces=None):
"""
envs: list of gym environments to run in subprocesses
"""
self.waiting = False
self.closed = False
nenvs = len(env_fns)
self.remotes, self.work_remotes = zip(*[Pipe() for _ in range(nenvs)])
self.ps = [Process(target=worker, args=(work_remote, remote, CloudpickleWrapper(env_fn)))
for (work_remote, remote, env_fn) in zip(self.work_remotes, self.remotes, env_fns)]
for p in self.ps:
p.daemon = True # if the main process crashes, we should not cause things to hang
p.start()
for remote in self.work_remotes:
remote.close()
self.remotes[0].send(('get_spaces', None))
observation_space, action_space = self.remotes[0].recv()
VecEnv.__init__(self, len(env_fns), observation_space, action_space)
def step_async(self, actions):
for remote, action in zip(self.remotes, actions):
remote.send(('step', action))
self.waiting = True
def step_wait(self):
results = [remote.recv() for remote in self.remotes]
self.waiting = False
obs, rews, dones, infos = zip(*results)
return np.stack(obs), np.stack(rews), np.stack(dones), infos
def reset(self):
for remote in self.remotes:
remote.send(('reset', None))
return np.stack([remote.recv() for remote in self.remotes])
def reset_task(self):
for remote in self.remotes:
remote.send(('reset_task', None))
return np.stack([remote.recv() for remote in self.remotes])
def close(self):
if self.closed:
return
if self.waiting:
for remote in self.remotes:
remote.recv()
for remote in self.remotes:
remote.send(('close', None))
for p in self.ps:
p.join()
self.closed = True
def render(self, mode='human'):
for pipe in self.remotes:
pipe.send(('render', None))
imgs = [pipe.recv() for pipe in self.remotes]
bigimg = tile_images(imgs)
if mode == 'human':
import cv2
cv2.imshow('vecenv', bigimg[:,:,::-1])
cv2.waitKey(1)
elif mode == 'rgb_array':
return bigimg
else:
raise NotImplementedError
| [
"jerry@millionintegrals.com"
] | jerry@millionintegrals.com |
38e465b29c5974e1c2697b29528910e655c026ee | c78bc2364a22e84b8c39f8b1aeb889d2376ea669 | /tests/test_core/test_compile.py | 28a111ad89b5ae093c1811f98ef36980fc406b7f | [
"BSD-3-Clause"
] | permissive | funkelab/spimagine | ee5262f4698eee56bb8bd5640ebae24829337a1c | d7fb0aac8986421df339486e1f0d33d0ba1c820c | refs/heads/master | 2020-04-23T17:29:12.209161 | 2019-02-18T18:24:40 | 2019-02-18T18:24:40 | 171,333,799 | 0 | 0 | BSD-3-Clause | 2019-02-18T18:21:13 | 2019-02-18T18:21:11 | null | UTF-8 | Python | false | false | 182 | py | """
mweigert@mpi-cbg.de
"""
from __future__ import print_function, unicode_literals, absolute_import, division
import os
os.environ["PYOPENCL_COMPILER_OUTPUT"]="1"
import spimagine
| [
"mweigert@mpi-cbg.de"
] | mweigert@mpi-cbg.de |
e59d923bae404186dae6b1d5495e1017b6bdcb90 | f484afd10f622babaa0a38197cb7fa5f4da99630 | /news/models.py | 9414cc1ae1f13e9040a4cad961d53ab91f1e34a5 | [] | permissive | brayomumo/Django-mini-project | 95bc68ddf41f47bace23a34f6bacbc60cb3ea895 | 47a9731e5f319c7225e1e1451b6fb5b20d7ff752 | refs/heads/master | 2021-10-30T10:51:42.807899 | 2021-10-24T19:05:14 | 2021-10-24T19:05:14 | 203,002,911 | 0 | 0 | MIT | 2021-09-08T01:13:45 | 2019-08-18T12:54:21 | Python | UTF-8 | Python | false | false | 1,761 | py | from django.db import models
import datetime as dt
from django.contrib.auth.models import User
from tinymce.models import HTMLField
# Create your models here.
class Editor(models.Model):
first_name = models.CharField(max_length = 30)
last_name = models.CharField(max_length = 30)
email = models.EmailField()
phone_number = models.CharField(max_length = 10, blank = True)
def __str__(self):
return self.first_name
def save_editor(self):
self.save()
class Meta:
ordering = ['first_name']
class Tags(models.Model):
name = models.CharField(max_length = 30)
def __str__(self):
return self.name
class Article(models.Model):
title = models.CharField(max_length = 60)
post = HTMLField(default="post")
editor = models.ForeignKey(User,on_delete=models.CASCADE)
tags = models.ManyToManyField(Tags)
pub_date = models.DateTimeField(auto_now_add=True)
article_image = models.ImageField(upload_to='articles/',default="")
def __str__(self):
return self.title
def save_article(self):
self.save()
# def delete_editor(self):
# self.delete()
def delete_Article(self):
self.delete()
@classmethod
def todays_news(cls):
today = dt.date.today()
news = cls.objects.filter(pub_date__date = today)
return news
@classmethod
def days_news(cls, date):
news = cls.objects.filter(pub_date__date = date)
return news
@classmethod
def search_by_title(cls,search_term):
news = cls.objects.filter(title__icontains=search_term)
return news
class NewsLetterRecepients(models.Model):
name = models.CharField(max_length = 30)
email = models.EmailField() | [
"brayomumo5@gmail.com"
] | brayomumo5@gmail.com |
5f071a543f2c99f4a357cd5d63caef4e9a02a7fe | c1655d6c6c11dafc1c7fa9f771b8e1f99cf7f123 | /venv/lib/python3.6/site-packages/pyomo/core/base/check.py | f09834fb6a0999a7f44f8d6080960c7c6864325d | [] | no_license | igorsowa9/vpp | a27520f19a54d7490534016ded9cd66f4ef5385b | ea91e3b2db921e7b1a450d243f39dbcf61231107 | refs/heads/master | 2021-04-30T03:28:56.642244 | 2019-09-16T09:01:49 | 2019-09-16T09:01:49 | 121,514,524 | 3 | 1 | null | null | null | null | UTF-8 | Python | false | false | 2,735 | py | # ___________________________________________________________________________
#
# Pyomo: Python Optimization Modeling Objects
# Copyright 2017 National Technology and Engineering Solutions of Sandia, LLC
# Under the terms of Contract DE-NA0003525 with National Technology and
# Engineering Solutions of Sandia, LLC, the U.S. Government retains certain
# rights in this software.
# This software is distributed under the 3-clause BSD License.
# ___________________________________________________________________________
__all__ = ['BuildCheck']
import logging
import types
from pyomo.util.timing import ConstructionTimer
from pyomo.core.base.plugin import register_component
from pyomo.core.base.indexed_component import IndexedComponent
from pyomo.core.base.misc import apply_indexed_rule
logger = logging.getLogger('pyomo.core')
class BuildCheck(IndexedComponent):
"""
A build check, which executes a rule for all valid indices. If
the function returns False an exception is raised.
Constructor arguments:
rule The rule that is executed for every indice.
Private class attributes:
_rule The rule that is executed for every indice.
"""
def __init__(self, *args, **kwd):
self._rule = kwd.pop('rule', None)
kwd['ctype'] = BuildCheck
IndexedComponent.__init__(self, *args, **kwd)
#
if not type(self._rule) is types.FunctionType:
raise ValueError("BuildCheck must have an 'rule' option specified whose value is a function")
def _pprint(self):
return ([], None, None, None)
def construct(self, data=None):
""" Apply the rule to construct values in this set """
if __debug__ and logger.isEnabledFor(logging.DEBUG): #pragma:nocover
logger.debug("Constructing Check, name="+self.name)
#
if self._constructed: #pragma:nocover
return
timer = ConstructionTimer(self)
self._constructed=True
#
if not self.is_indexed():
# Scalar component
res = self._rule(self._parent())
if not res:
raise ValueError("BuildCheck %r identified error" % self.name)
else:
# Indexed component
for index in self._index:
res = apply_indexed_rule(self, self._rule, self._parent(), index)
if not res:
raise ValueError("BuildCheck %r identified error with index %r" % (self.name, str(index)))
timer.report()
register_component(BuildCheck, "A component that performs tests during model construction. The action rule is applied to every index value.")
| [
"iso@ubuntu.ubuntu-domain"
] | iso@ubuntu.ubuntu-domain |
199d2bf06aabfed4e29852c18c294ea926cd1786 | 78d9827b9a7d8789c2d266adebe3a05fc1ed474b | /read_file.py | 1a696994b66117ceeaf8fb60429cd1f722f9479c | [
"MIT"
] | permissive | t4d-classes/python_03012021 | 8a62331d3b742097de56d334e9de9a8f82f5ed9e | 48b970d58e2e7b08f619be21154e8ec199155a50 | refs/heads/master | 2023-03-19T06:42:52.554737 | 2021-03-04T22:08:11 | 2021-03-04T22:08:11 | 341,255,879 | 0 | 1 | null | 2021-03-03T16:36:38 | 2021-02-22T16:01:51 | Python | UTF-8 | Python | false | false | 575 | py |
# with open("colors.txt", "r") as colors_file:
# colors_file.seek(30)
# colors_data = colors_file.read(30)
# print(colors_data)
# start_line = 3
# stop_line = 6
# with open("colors.txt", "r") as colors_file:
# lines = []
# for line_count, color in enumerate(colors_file):
# if line_count >= start_line and line_count < stop_line:
# lines.append(color.strip())
# if line_count >= stop_line:
# break
# print(lines)
with open("colors.txt", "r") as colors_file:
lines = colors_file.readlines()
print(lines)
| [
"eric@t4d.io"
] | eric@t4d.io |
19dd6237396dfe491b5f242c1848a0b5e0599d84 | c3b3eb44d1f4c6a17f095d46ba12adc7eb535e22 | /src/cube2stress/protocol/client_write_helper.py | 260d37fb7f88443f26afa75af28cdb77045d27af | [] | no_license | fdChasm/cube2stress | 52512c3eb7664612ea8b462838c9924f83c9cb4d | 38b74493716595482708e931a3fbe487b930249b | refs/heads/master | 2020-03-30T06:47:19.134982 | 2013-10-12T07:36:12 | 2013-10-13T16:56:53 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,206 | py | from cube2common.constants import message_types, DMF, DVELF, empty_material_types, material_types
from cube2common.ivec import ivec
from cube2common.vec import vec
from cube2common.utils.clamp import clamp
from cube2common.utils.vectoyawpitch import vectoyawpitch
def lookupmaterial(feetpos):
#I don't want to implement this right now so just pretend we're in the air
return empty_material_types.MAT_AIR
class cwh(object):
@staticmethod
def put_connect(cds, name, playermodel, pwdhash, authdomain, authname):
cds.putint(message_types.N_CONNECT)
cds.putstring(name)
cds.putint(playermodel)
cds.putstring(pwdhash)
cds.putstring(authdomain)
cds.putstring(authname)
@staticmethod
def put_spawn(cds, lifesequence, gunselect):
cds.putint(message_types.N_SPAWN)
cds.putint(lifesequence)
cds.putint(gunselect)
@staticmethod
def put_pos(cds, cn, physics_state):
d = physics_state
cds.putint(message_types.N_POS)
cds.putuint(cn)
# 3 bits phys state, 1 bit life sequence, 2 bits move, 2 bits strafe
physstate = d.physstate | ((d.lifesequence & 1) << 3) | ((d.move & 3) << 4) | ((d.strafe & 3) << 6)
cds.putbyte(physstate)
o = ivec(vec(d.o.x, d.o.y, d.o.z - d.eyeheight))
vel = min(int(d.vel.magnitude() * DVELF), 0xFFFF)
fall = min(int(d.falling.magnitude() * DVELF), 0xFFFF)
# 3 bits position, 1 bit velocity, 3 bits falling, 1 bit material
flags = 0;
if (o.x < 0 or o.x > 0xFFFF): flags |= 1 << 0
if (o.y < 0 or o.y > 0xFFFF): flags |= 1 << 1
if (o.z < 0 or o.z > 0xFFFF): flags |= 1 << 2
if (vel > 0xFF): flags |= 1 << 3
if fall > 0:
flags |= 1 << 4
if fall > 0xFF:
flags |= 1 << 5
if d.falling.x or d.falling.y or d.falling.z > 0:
flags |= 1 << 6
if lookupmaterial(d.feetpos()) & material_types.MATF_CLIP == empty_material_types.MAT_GAMECLIP:
flags |= 1 << 7
cds.putuint(flags)
for k in xrange(3):
cds.putbyte(o[k] & 0xFF)
cds.putbyte((o[k] >> 8) & 0xFF)
if o[k] < 0 or o[k] > 0xFFFF:
cds.putbyte((o[k] >> 16) & 0xFF)
if d.yaw < 0:
dir = 360 + int(d.yaw) % 360
else:
dir = int(d.yaw) % 360
dir += clamp(int(d.pitch + 90), 0, 180) * 360
cds.putbyte(dir & 0xFF)
cds.putbyte((dir >> 8) & 0xFF)
cds.putbyte(clamp(int(d.roll + 90), 0, 180))
cds.putbyte(vel & 0xFF)
if vel > 0xFF:
cds.putbyte((vel >> 8) & 0xFF)
velyaw, velpitch = vectoyawpitch(d.vel)
if velyaw < 0:
veldir = 360 + int(velyaw) % 360
else:
veldir = int(velyaw) % 360
veldir += clamp(int(velpitch + 90), 0, 180) * 360
cds.putbyte(veldir & 0xFF)
cds.putbyte((veldir >> 8) & 0xFF)
if fall > 0:
cds.putbyte(fall & 0xFF)
if fall > 0xFF:
cds.putbyte((fall >> 8) & 0xFF)
if d.falling.x or d.falling.y or d.falling.z > 0:
fallyaw, fallpitch = vectoyawpitch(d.falling)
if fallyaw < 0:
falldir = 360 + int(fallyaw) % 360
else:
falldir = int(fallyaw) % 360
falldir += clamp(int(fallpitch + 90), 0, 180) * 360
cds.putbyte(falldir & 0xFF)
cds.putbyte((falldir >> 8) & 0xFF)
@staticmethod
def put_clientdata(data_stream, client, data):
data_stream.putint(message_types.N_CLIENT)
data_stream.putint(client.cn)
data_stream.putuint(len(data))
data_stream.write(data)
@staticmethod
def put_text(cds, text):
cds.putint(message_types.N_TEXT)
cds.putstring(text)
@staticmethod
def put_switchname(cds, name):
cds.putint(message_types.N_SWITCHNAME)
cds.putstring(name)
@staticmethod
def put_tryspawn(cds):
cds.putint(message_types.N_TRYSPAWN)
| [
"fd.chasm@gmail.com"
] | fd.chasm@gmail.com |
aabdc28245a847b6a4e05ae366760caa283b0ed9 | 38bd99c72ca2521489ce1eb02b7604095b02b585 | /src/2211-CountCollisionsOnARoad.py | 23452618552f63035a2aab03ebe988c3d8207fa7 | [
"MIT"
] | permissive | Jiezhi/myleetcode | eadbd7d9f1f0ea6a0ee15c2da9040dcfbd28b522 | 4dd1e54d8d08f7e6590bc76abd08ecaacaf775e5 | refs/heads/master | 2023-03-16T15:52:21.833622 | 2023-03-09T14:33:03 | 2023-03-09T14:33:03 | 139,965,948 | 1 | 2 | null | null | null | null | UTF-8 | Python | false | false | 2,328 | py | #!/usr/bin/env python
"""
CREATED AT: 2022/3/20
Des:
https://leetcode.com/problems/count-collisions-on-a-road/
GITHUB: https://github.com/Jiezhi/myleetcode
Difficulty: Medium
Tag: Weekly Contest 285
See:
"""
class Solution:
def countCollisions2(self, s: str) -> int:
"""
See https://leetcode-cn.com/problems/count-collisions-on-a-road/solution/zui-duan-dai-ma-bu-jie-shi-by-freeyourmi-6o0r/
:param s:
:return:
"""
return len(s.lstrip('L').rstrip('R')) - s.count('S')
def countCollisions(self, directions: str) -> int:
"""
1 <= directions.length <= 10^5
directions[i] is either 'L', 'R', or 'S'.
"""
if not directions:
return 0
start = 0
while start < len(directions) and directions[start] == 'L':
start += 1
end = len(directions) - 1
while end > 0 and directions[end] == 'R':
end -= 1
if start >= end:
return 0
ds = directions[start: end + 1]
if len(ds) == 1:
return 0
pre = ds[0]
ret = 0
i = 1
while i < len(ds):
d = ds[i]
if pre == 'S':
if d == 'L':
ret += 1
elif d == 'R':
pre = 'R'
elif pre == 'R':
if d == 'S':
ret += 1
pre = 'S'
elif d == 'L':
ret += 2
pre = 'S'
elif d == 'R':
pos = 1
while i + pos < len(ds) and ds[i + pos] == 'R':
pos += 1
if ds[i + pos] == 'L':
ret += pos + 2
i += pos + 1
pre = 'S'
continue
elif ds[i + pos] == 'S':
ret += pos + 1
i += pos + 1
pre = 'S'
continue
i += 1
return ret
def test():
assert Solution().countCollisions("RLRSLL") == 5
assert Solution().countCollisions2("RLRSLL") == 5
assert Solution().countCollisions("LLRR") == 0
if __name__ == '__main__':
test()
| [
"Jiezhi@users.noreply.github.com"
] | Jiezhi@users.noreply.github.com |
76da260cb85dac15d43e6462305ec7633cbaf61f | f37d4992d322e703b31c42dbc2a5b5d52cedb80a | /Ship-detector/ship_model.py | 36e04955e6a08af51a2db9ec297f25c493269a76 | [] | no_license | Jimut123/SATELLITE | a9eff4ebcf2890576a857fcb1c5b086e43831c75 | 97d948b823b701831b3e6cf78f0b23274046db9f | refs/heads/master | 2023-04-06T17:47:23.038610 | 2021-04-24T14:37:55 | 2021-04-24T14:37:55 | 281,049,955 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,190 | py | import json, sys, random
import numpy as np
from keras.models import Sequential
from keras.layers import Dense, Flatten, Activation
from keras.layers import Dropout
from keras.layers.convolutional import Conv2D, MaxPooling2D
from keras.utils.np_utils import to_categorical
from keras.utils import np_utils
from keras.optimizers import SGD, Nadam
import keras.callbacks
from PIL import Image, ImageDraw
from matplotlib import pyplot as plt
# download dataset from json object
f = open(r'shipsnet.json')
dataset = json.load(f)
f.close()
input_data = np.array(dataset['data']).astype('uint8')
output_data = np.array(dataset['labels']).astype('uint8')
n_spectrum = 3 # color chanel (RGB)
weight = 80
height = 80
X = input_data.reshape([-1, n_spectrum, weight, height])
X[0].shape
# get one chanel
pic = X[0]
rad_spectrum = pic[0]
green_spectrum = pic[1]
blue_spectum = pic[2]
# output encoding
y = to_categorical(output_data, 2)
# shuffle all indexes
indexes = np.arange(2800)
np.random.shuffle(indexes)
X_train = X[indexes].transpose([0,2,3,1])
y_train = y[indexes]
# normalization
X_train = X_train / 255
np.random.seed(42)
from tensorflow.keras import layers
from tensorflow.keras import initializers
he_initializer = initializers.HeNormal()
inputs = keras.Input(shape=(80, 80, 3), name="img")
x = layers.Conv2D(32, (3, 3), padding='same',activation='relu',kernel_initializer=he_initializer,
bias_initializer="zeros")(inputs)
x = layers.MaxPooling2D(pool_size=(2, 2))(x) #40x40
x = layers.Dropout(0.25)(x)
x = layers.Conv2D(32, (3, 3), padding='same',activation='relu',kernel_initializer=he_initializer,
bias_initializer="zeros")(x)
x = layers.MaxPooling2D(pool_size=(2, 2))(x) #20x20
x = layers.Dropout(0.25)(x)
x = layers.Conv2D(64, (3, 3), padding='same',activation='relu',kernel_initializer=he_initializer,
bias_initializer="zeros")(x)
x = layers.MaxPooling2D(pool_size=(2, 2))(x) #10x10
x = layers.Dropout(0.25)(x)
x = layers.Conv2D(64, (3, 3), padding='same',activation='relu',kernel_initializer=he_initializer,
bias_initializer="zeros")(x)
x = layers.MaxPooling2D(pool_size=(2, 2))(x) #5x5
x = layers.Dropout(0.25)(x)
x = layers.Conv2D(128, (3, 3), padding='same',activation='relu',kernel_initializer=he_initializer,
bias_initializer="zeros")(x)
x = layers.MaxPooling2D(pool_size=(2, 2))(x) #5x5
x = layers.Dropout(0.25)(x)
x = layers.Flatten()(x)
x = layers.Dense(512, activation='relu')(x)
x = layers.Dropout(0.5)(x)
outputs = layers.Dense(2, activation='softmax')(x)
model = keras.Model(inputs, outputs, name="My_model")
from tensorflow.keras.utils import plot_model as pm #plotting the model structure
pm(model, to_file='model_plot.png', show_shapes=True, show_layer_names=True,dpi=60)
# augmentation
# example of horizontal shift image augmentation
from numpy import expand_dims
from keras.preprocessing.image import load_img
from keras.preprocessing.image import img_to_array
from keras.preprocessing.image import ImageDataGenerator
# aug = ImageDataGenerator(
# featurewise_center=True,
# samplewise_center=True,
# featurewise_std_normalization=True,
# samplewise_std_normalization=True,
# #zca_whitening=True,
# #zca_epsilon=1e-06,
# rotation_range=360,
# width_shift_range=0.25,
# height_shift_range=0.25,
# brightness_range=(150,255),
# shear_range=0.45,
# zoom_range=0.35,
# #channel_shift_range=0.35,
# fill_mode="nearest",
# #cval=0.0,
# horizontal_flip=True,
# vertical_flip=True,
# rescale=0.35,
# #preprocessing_function=None,
# #data_format=None,
# validation_split=0.35,
# )
aug = ImageDataGenerator(
rotation_range=360,
#zoom_range=0.2,
width_shift_range=0.10,
height_shift_range=0.10,
#brightness_range=[0.7,1.0],
shear_range=0.10,
horizontal_flip=True,
vertical_flip=True,
fill_mode="nearest")
from tensorflow.keras.callbacks import ModelCheckpoint
from datetime import datetime
# for storing logs into tensorboard
logdir="logs/fit/" + datetime.now().strftime("%Y%m%d-%H%M%S")
callbacks = [
ModelCheckpoint("./model_checkpoint", monitor='val_loss'),
keras.callbacks.TensorBoard(log_dir=logdir)
]
# optimization setup
# sgd = SGD(lr=0.01, momentum=0.9, nesterov=True)
nadam = Nadam(
learning_rate=0.00001, beta_1=0.9, beta_2=0.999, epsilon=1e-07#, name="Nadam"#, **kwargs
)
model.compile(
loss='categorical_crossentropy',
optimizer=nadam, #sgd,
metrics=['accuracy'])
# # training
# history = model.fit(
# X_train,
# y_train,
# batch_size=32,
# callbacks=callbacks,
# epochs=18,
# #steps_per_epoch=len(X_train) // 32,
# validation_split=0.2,
# shuffle=True,
# verbose=1)
history = model.fit(
x=aug.flow(X_train, y_train, batch_size=64),
validation_data=(X_train, y_train),
steps_per_epoch=len(X_train) // 64,
callbacks=callbacks,
epochs=1000,
verbose=1)
model.save('satseg1000e_nadam.h5')
from keras.models import load_model
load_model('satseg1000e_nadam.h5')
with open('history.json', 'w') as f:
json.dump(history.history, f)
with open('history.json') as f:
d = json.load(f)
#print(d)
| [
"jimutbahanpal@yahoo.com"
] | jimutbahanpal@yahoo.com |
b560cc41d956fc94b579f1df47d17c8cc742c9d5 | 5f9243fc8282cdc15e0aba8c3bb4e4a9ae55d88d | /lib/apikey/ncloud_apikey/properties_parser.py | a25dae3a68d20aa8211f1dd2820a962e6ad894e6 | [
"MIT"
] | permissive | KidongSohn/ncloud-sdk-py | ab7c743e4628d9498bda79e74bd87a7a4ba11a0d | 1c62471a9bd320d77164ed3193a0ebb9f64229ff | refs/heads/master | 2020-03-21T21:46:49.895974 | 2018-06-29T03:39:04 | 2018-06-29T03:39:04 | 139,083,793 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 605 | py | # coding: utf-8
class PropertiesParser(object):
def parse(self, filepath):
with open(filepath, 'r') as f:
lines = f.readlines()
prop_list = []
for line in lines:
if line == '' or len(line) < 3 or line[0] == "#" :
continue
split_str = line.strip().split('=')
if len(split_str) < 2 :
continue
prop_list.append((split_str[0].strip(), split_str[1].strip()))
return prop_list
#print(PropertiesParser().parse("/Users/user/.ncloud/configure"))
| [
"kidong.sohn@navercorp.com"
] | kidong.sohn@navercorp.com |
c39faacad300340acf5afbc1dc9638d135b33ac7 | aa5e9defea373d64d75336fc6c5a03124e24abbd | /mwel/xml2mwel | a38bc73f0f316136adbc60e91e01f311502a0660 | [
"MIT"
] | permissive | esayui/mworks | e8ae5d8b07d36d5bbdec533a932d29641f000eb9 | 0522e5afc1e30fdbf1e67cedd196ee50f7924499 | refs/heads/master | 2022-02-18T03:47:49.858282 | 2019-09-04T16:42:52 | 2019-09-05T13:55:06 | 208,943,825 | 0 | 0 | MIT | 2019-09-17T02:43:38 | 2019-09-17T02:43:38 | null | UTF-8 | Python | false | false | 167 | #!/usr/bin/python
from __future__ import division, print_function, unicode_literals
import sys
import mwel
if __name__ == '__main__':
sys.exit(mwel.fromxml())
| [
"cstawarz@mit.edu"
] | cstawarz@mit.edu | |
e8ea04934ba2fd9e709df7aacea5088ce1de1a5f | 4111ca5a73a22174f189361bef654c3f91c3b7ed | /Lintcode/Ladder_37_BB/medium/831. 3Sum II.py | a2a340f0781320c786d23c4fbcaf343c3acbd7e9 | [
"MIT"
] | permissive | ctc316/algorithm-python | 58b541b654509ecf4e9eb8deebfcbdf785699cc4 | ac4580d55e05e93e407c6156c9bb801808027d60 | refs/heads/master | 2020-03-16T06:09:50.130146 | 2019-08-02T02:50:49 | 2019-08-02T02:50:49 | 132,548,222 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 526 | py | class Solution:
"""
@param n: an integer
@return: the number of solutions
"""
def threeSum2(self, n):
res = 0
for z in range(0, int(n ** 0.5) + 1):
x, y = 0, z
target = n - z ** 2
while x <= y:
summ = x ** 2 + y ** 2
if summ > target:
y -= 1
elif summ < target:
x += 1
else:
y -= 1
res += 1
return res | [
"mike.tc.chen101@gmail.com"
] | mike.tc.chen101@gmail.com |
d676d645eb1adbaf3bf29bfaeda688d91a7d5206 | 50de54517ef5e157b43598e412c477fd66890a3e | /Assignment 01/Problem 16.py | 217e6ac4a6272a81683f430b2354c5f51928e3ef | [] | no_license | Shihabsarker93/BRACU-CSE111 | f530be247bebaaee9cc5e85948dc070adae0c6ae | 17c95c76f84abffe9d9bdcb5861fbacbc510b5a6 | refs/heads/main | 2023-08-13T15:33:57.331850 | 2021-10-07T10:56:09 | 2021-10-07T10:56:09 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 712 | py | def count_dict(string):
"""
Given a string, this function returns a dictionary containing
the characters of the string as "key" and how many times the
character repeated itself as a value.
"""
dictionary = {}
for char in string:
if char == " ":
continue
dictionary[char] = dictionary.get(char, 0) + 1
return dictionary
user_input_1 = input()
user_input_2 = input()
dict_1 = count_dict(user_input_1)
dict_2 = count_dict(user_input_2)
if user_input_1 != user_input_2:
if dict_1 == dict_2:
print("Those strings are anagrams.")
else:
print("Those strings are not anagrams.")
else:
print("Those strings are not anagrams.")
| [
"mirzamahrabhossain@gmail.com"
] | mirzamahrabhossain@gmail.com |
a50740f0b46094ea02a198d3e53ffeca7ee7bd49 | 94ff68c2bf2a231584e8434a9d4363c56ea3af46 | /apps/goods/filter.py | 578d28d583582dc3f217f989340e04fa74f5301c | [] | no_license | Asunqingwen/MxShop | c95eae8d36273148220cfe31796e560a43b99c31 | eb4730f7c6921aa2a9099f210c7c914d8adfc3aa | refs/heads/master | 2022-12-23T19:22:13.226669 | 2019-12-13T08:03:32 | 2019-12-13T08:03:32 | 226,297,474 | 0 | 0 | null | 2022-12-08T03:19:38 | 2019-12-06T09:54:43 | JavaScript | UTF-8 | Python | false | false | 1,406 | py | # -*- coding: utf-8 -*-
# @Time : 2019/12/5 0005 16:09
# @Author : 没有蜡笔的小新
# @E-mail : sqw123az@sina.com
# @FileName: filter.py
# @Software: PyCharm
# @Blog :https://blog.csdn.net/Asunqingwen
# @GitHub :https://github.com/Asunqingwen
# @WebSite : labixiaoxin.me
from django.db.models import Q
from django_filters import rest_framework as filters
from .models import Goods
class GoodsFilter(filters.FilterSet):
"""
商品过滤类
"""
# 两个参数,name是要过滤的字段,lookup是执行的行为,’小于等于本店价格‘
price_min = filters.NumberFilter(field_name='shop_price', lookup_expr='gte', help_text='大于等于本店价格')
price_max = filters.NumberFilter(field_name='shop_price', lookup_expr='lte', help_text='小于等于本店价格')
# 行为: 名称中包含某字符,且字符不区分大小写
# name = filters.CharFilter(field_name="name" ,lookup_expr="icontains")
top_category = filters.NumberFilter(field_name="category", method='top_category_filter')
def top_category_filter(self, queryset, name, value):
# 不管当前点击的是一级目录二级目录还是三级目录。
return queryset.filter(Q(category_id=value) | Q(category__parent_category_id=value) | Q(
category__parent_category__parent_category_id=value))
class Meta:
model = Goods
fields = ['price_min', 'price_max', 'name', 'is_hot', 'is_new']
| [
"sqw123az@sina.com"
] | sqw123az@sina.com |
7e64c40255aea30895b1a4ba08e789acc97b8949 | ad4952035a8ea3116a18c346d5d8cbfa26fb8e98 | /PalindromePartitioning.py | 823396b73c932b11a64f0eb3fef963e7683f5dd0 | [] | no_license | AbhiniveshP/Backtracking-2 | 48908072a8f186dfb0bc77d7107e8229172d6a67 | 8ad2faaa93d100a47a37e3d453626acdadf4febe | refs/heads/master | 2021-03-14T06:44:12.538985 | 2020-03-13T11:29:31 | 2020-03-13T11:29:31 | 246,747,056 | 0 | 0 | null | 2020-03-12T04:56:15 | 2020-03-12T04:56:14 | null | UTF-8 | Python | false | false | 1,954 | py | '''
Solution
1. Using Backtracking, we check whether the substring considered is palindrome or not.
2. If palindrome, we check the remaining possible substrings
3. If not palindrome, we backtrack to the previous state and check for other possible substrings from that state.
Time Complexity: O(n * 2^n)
Space Complexity: O(n)
--- Passed all testcases on Leetcode successfully
'''
class PalindromePartitioning(object):
def __init__(self):
self.finalList = []
self.tempList = []
def __isPalindrome(self, s, fromId, toId):
# check for palindrome using 2 pointers
if (toId >= len(s)):
return False
while (fromId <= toId):
if (s[fromId] != s[toId]):
return False
fromId += 1; toId -= 1
return True
def __backtracking(self, s, fromIndex):
# base case
if (fromIndex == len(s)):
self.finalList.append(list(self.tempList))
return
# from current index to total length
for toIndex in range(fromIndex, len(s)):
# only if palindrome, do the following
if self.__isPalindrome(s, fromIndex, toIndex):
# action -- appending the current substring to the list
self.tempList.append(s[fromIndex: toIndex + 1])
# recursion -- just to check whether the partition can be valid or not
self.__backtracking(s, toIndex + 1)
# backtrack -- removing the current substring from the list
self.tempList.pop()
def partition(self, s):
"""
:type s: str
:rtype: List[List[str]]
"""
# edge case check
if (s == None or len(s) == 0):
return self.finalList
# main call to the helper function
self.__backtracking(s, 0)
# return the final list
return self.finalList | [
"pabhinivesh@gmail.com"
] | pabhinivesh@gmail.com |
a05788207b0351d1b7a36360cd29cc39e8273709 | 0ca0dfcdbd6e07280f401aa447d659cc04233398 | /db_migrate.py | 044f0247f9cfac4c523bcfbe9b55be7dada94a93 | [] | no_license | pace-noge/apt-assessment | 775530ca50bb95f3a34892477b0e8e794be9be98 | 113a0d645a69e3c66d7efd1e21e023b19751af38 | refs/heads/master | 2021-01-09T20:41:22.144393 | 2016-07-21T10:56:38 | 2016-07-21T10:56:38 | 63,752,001 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 857 | py | import imp
from app import db
from migrate.versioning import api
from config import SQLALCHEMY_DATABASE_URI
from config import SQLALCHEMY_MIGRATE_REPO
v = api.db_version(SQLALCHEMY_DATABASE_URI, SQLALCHEMY_MIGRATE_REPO)
migration = SQLALCHEMY_MIGRATE_REPO + ('/versions/%03d_migration.py' % (v+1))
tmp_module = imp.new_module('old_model')
old_model = api.create_model(SQLALCHEMY_DATABASE_URI, SQLALCHEMY_MIGRATE_REPO)
exec(old_model, tmp_module.__dict__)
script = api.make_update_script_for_model(
SQLALCHEMY_DATABASE_URI,
SQLALCHEMY_MIGRATE_REPO,
tmp_module.meta,
db.metadata
)
open(migration, "wt").write(script)
api.upgrade(SQLALCHEMY_DATABASE_URI, SQLALCHEMY_MIGRATE_REPO)
v = api.db_version(SQLALCHEMY_DATABASE_URI, SQLALCHEMY_MIGRATE_REPO)
print('[+] New migration created as ' + migration)
print('[+] Current db version: ' + str(v)) | [
"nasa.freaks@gmail.com"
] | nasa.freaks@gmail.com |
03ef89e2525d97d86133e194f46b15b129dd712f | 316c45c7900c2440d4c72ec96c3e41358611585e | /test/CSVSL/MVATrainer_PseudoVertexNoSoftLepton_B_C_cfg.py | bc48465f474314f4c2c4a3c0f35adc4168805bf7 | [] | no_license | cms-btv-pog/RecoBTau-JetTagMVALearning | 49c52529774762c44a6eb8a9f6e130c4c0f01df3 | 691937f31d7c2f1865c555623ab4027362235d6e | refs/heads/master | 2020-12-24T13:20:49.115617 | 2015-10-07T08:39:14 | 2015-10-07T08:39:14 | 12,036,491 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,451 | py | import FWCore.ParameterSet.Config as cms
process = cms.Process("IPTrainer")
process.source = cms.Source("EmptySource")
process.maxEvents = cms.untracked.PSet(input = cms.untracked.int32(1))
process.load("FWCore.MessageLogger.MessageLogger_cfi")
process.combinedSVTrainer = cms.EDAnalyzer("JetTagMVATreeTrainer",
useCategories = cms.bool(False),
calibrationRecord = cms.string("CombinedSVPseudoVertexNoSoftLepton"),
ignoreFlavours = cms.vint32(0, 1, 2, 3, 21),
signalFlavours = cms.vint32(5, 7),
minimumTransverseMomentum = cms.double(15.0),
minimumPseudoRapidity = cms.double(0),
maximumPseudoRapidity = cms.double(2.5),
fileNames = cms.vstring(
"/user/pvmulder/NewEraOfDataAnalysis/BTagServiceWork/DEVELOPMENT/SuperTaggerDev/CMSSW_5_3_14/src/RootFiles/SkimmedRootFiles/skimmed_max20k_eachptetabin_CombinedSVPseudoVertexNoSoftLepton_B.root",
"/user/pvmulder/NewEraOfDataAnalysis/BTagServiceWork/DEVELOPMENT/SuperTaggerDev/CMSSW_5_3_14/src/RootFiles/SkimmedRootFiles/skimmed_max20k_eachptetabin_CombinedSVPseudoVertexNoSoftLepton_C.root"
)
)
process.looper = cms.Looper("JetTagMVATrainerLooper",
trainers = cms.VPSet(
cms.PSet(
calibrationRecord = cms.string("CombinedSVPseudoVertexNoSoftLepton"),
trainDescription = cms.untracked.string("Save_PseudoVertexNoSoftLepton_B_C.xml"),
loadState = cms.untracked.bool(False),
saveState = cms.untracked.bool(False)
)
)
)
process.p = cms.Path(process.combinedSVTrainer)
| [
"pvmulder@cern.ch"
] | pvmulder@cern.ch |
b163cc7eb9a6f0762cd988763d79bf757e4f7f35 | 644c10493df293ba492133140ca6b6153802c75b | /{{cookiecutter.project_slug}}/{{cookiecutter.main_app}}/migrations/0001_initial.py | 50dd6e4583cac35236265bdbc4b2a8a3c65b9076 | [
"BSD-3-Clause"
] | permissive | huogerac/cookiecutter-djangofloppyforms | cc27aec961a3d339d390dba6deb791676650aab4 | 0a2c1d7fe506a5df13aaefde0f716373dbb8194e | refs/heads/main | 2023-04-30T21:34:14.822736 | 2021-05-15T18:20:10 | 2021-05-15T18:20:10 | 334,623,255 | 3 | 3 | null | null | null | null | UTF-8 | Python | false | false | 630 | py | # Generated by Django 3.1.5 on 2021-01-26 18:01
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='{{ cookiecutter.main_model }}',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('description', models.CharField(max_length=264)),
('done', models.BooleanField(default=False)),
('due_to', models.DateTimeField()),
],
),
]
| [
"huogerac@gmail.com"
] | huogerac@gmail.com |
5e88ca3857afb692a59c3fe77b58248c23b0726a | 6ae5affdffdf5f18161c90e400623a823976330d | /codejam/R1C2020/b.py | 5fb0bb6da5c871ec28f36f116957b6f4e1818618 | [] | no_license | organization-lab/codejam | 01b7544600f421f8878dcc8ca275d521d4db5984 | 92e2b1cc25e1dd31a9cddf3ebb34c0e4e0c38568 | refs/heads/master | 2022-08-21T12:28:03.557676 | 2022-08-18T02:21:46 | 2022-08-18T02:21:46 | 121,612,804 | 0 | 2 | null | 2022-08-18T02:21:47 | 2018-02-15T09:48:39 | Python | UTF-8 | Python | false | false | 647 | py | # author: mofhu@github
ncase = int(input())
for case in range(1, ncase+1):
u = int(input())
s = {}
z0 = {}
for i in range(10000):
qi, si = input().split(' ')
if si[0] not in s:
s[si[0]] = 1
else:
s[si[0]] += 1
if len(z0) < 10:
if si[-1] not in z0:
z0[si[-1]] = 1
else:
z0[si[-1]] += 1
t = [(s[i], i) for i in s]
t.sort(key=lambda x:x[0], reverse=True)
for i in z0:
if i not in s:
ans = [i]
for i in t:
ans.append(i[1])
print("Case #{}: {}".format(case, ''.join(ans)))
| [
"mofrankhu@gmail.com"
] | mofrankhu@gmail.com |
7ccd37964a1cd6c0813afc434697d4c7f77c866c | 55ab64b67d8abc02907eb43a54ff6c326ded6b72 | /scripts/startup/tila_OP_ToggleOverlay.py | d3d02affd772876cd82f8dc6e49843aec7341875 | [
"MIT"
] | permissive | Tilapiatsu/blender-custom_config | 2f03b0bb234c3b098d2830732296d199c91147d0 | 00e14fc190ebff66cf50ff911f25cf5ad3529f8f | refs/heads/master | 2023-08-16T14:26:39.990840 | 2023-08-16T01:32:41 | 2023-08-16T01:32:41 | 161,249,779 | 6 | 2 | MIT | 2023-04-12T05:33:59 | 2018-12-10T23:25:14 | Python | UTF-8 | Python | false | false | 2,687 | py | import bpy
bl_info = {
"name": "Tila : Toggle Overlay",
"author": "Tilapiatsu",
"version": (1, 0, 0, 0),
"blender": (2, 80, 0),
"location": "View3D",
"category": "View3D",
}
class TILA_ToggleOverlay(bpy.types.Operator):
bl_idname = "view3d.toggle_overlay"
bl_label = "TILA: Toggle overlay"
bl_options = {'REGISTER', 'UNDO'}
mode : bpy.props.EnumProperty(items=[("TOGGLE", "Toggle", ""), ("SOFT", "Soft", "")])
soft_parameters=['show_annotation',
'show_extras',
'show_bones',
# 'show_relationship_lines',
'show_motion_paths',
'show_outline_selected',
'show_object_origins',
'show_floor',
'show_axis_x',
'show_axis_y',
'show_face_orientation',
'show_faces']
def toggle_state(self, state=None):
if state is None:
self.enabled = not (self.enabled)
else:
self.enabled = state
def toggle_soft(self, state=None):
if state is None:
return
else:
for p in self.soft_parameters:
if p in dir(bpy.context.space_data.overlay):
setattr(bpy.context.space_data.overlay, p, state)
def is_enable(self):
if self.mode == 'TOGGLE':
return bpy.context.space_data.overlay
elif self.mode == 'SOFT':
return self.enabled
def get_state(self):
if self.mode == 'TOGGLE':
self.enabled = self.is_enable()
elif self.mode == 'SOFT':
state = True
for p in self.soft_parameters:
if p in dir(bpy.context.space_data.overlay):
state = state and getattr(bpy.context.space_data.overlay, p)
self.enabled = state
def execute(self, context):
self.get_state()
if self.mode == 'TOGGLE':
bpy.ops.wm.context_toggle(data_path='space_data.overlay.show_overlays')
if self.is_enable:
self.toggle_state(state=False)
else:
self.toggle_state(state=True)
elif self.mode == 'SOFT':
spaces = context.area.spaces
for s in spaces:
if s.type =='VIEW_3D':
self.toggle_soft(not self.enabled)
self.toggle_state()
return {'FINISHED'}
classes = (TILA_ToggleOverlay,)
register, unregister = bpy.utils.register_classes_factory(classes)
if __name__ == "__main__":
register()
| [
"tilapiatsu@hotmail.fr"
] | tilapiatsu@hotmail.fr |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.